Added det of TensorN
This commit is contained in:
parent
38cc58893b
commit
671a2708ff
3 changed files with 152 additions and 165 deletions
|
@ -36,6 +36,7 @@ License
|
||||||
sphericalTensorType,vectorType,CmptType, \
|
sphericalTensorType,vectorType,CmptType, \
|
||||||
args...) \
|
args...) \
|
||||||
\
|
\
|
||||||
|
UNARY_FUNCTION(CmptType, tensorType, det) \
|
||||||
UNARY_FUNCTION(tensorType, tensorType, inv) \
|
UNARY_FUNCTION(tensorType, tensorType, inv) \
|
||||||
UNARY_FUNCTION(diagTensorType, tensorType, diag) \
|
UNARY_FUNCTION(diagTensorType, tensorType, diag) \
|
||||||
UNARY_FUNCTION(tensorType, tensorType, negSumDiag) \
|
UNARY_FUNCTION(tensorType, tensorType, negSumDiag) \
|
||||||
|
|
|
@ -47,6 +47,7 @@ SourceFiles
|
||||||
sphericalTensorType,vectorType,CmptType, \
|
sphericalTensorType,vectorType,CmptType, \
|
||||||
args...) \
|
args...) \
|
||||||
\
|
\
|
||||||
|
UNARY_FUNCTION(CmptType, tensorType, det) \
|
||||||
UNARY_FUNCTION(tensorType, tensorType, inv) \
|
UNARY_FUNCTION(tensorType, tensorType, inv) \
|
||||||
UNARY_FUNCTION(diagTensorType, tensorType, diag) \
|
UNARY_FUNCTION(diagTensorType, tensorType, diag) \
|
||||||
UNARY_FUNCTION(tensorType, tensorType, negSumDiag) \
|
UNARY_FUNCTION(tensorType, tensorType, negSumDiag) \
|
||||||
|
|
|
@ -89,11 +89,11 @@ inline TensorN<Cmpt, length> TensorN<Cmpt, length>::T() const
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> transpose;
|
TensorN<Cmpt, length> transpose;
|
||||||
|
|
||||||
int i = 0;
|
label i = 0;
|
||||||
for (int row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
for (label row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
||||||
{
|
{
|
||||||
int j=row;
|
label j = row;
|
||||||
for (int col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
for (label col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
||||||
{
|
{
|
||||||
transpose.v_[i] = this->v_[j];
|
transpose.v_[i] = this->v_[j];
|
||||||
i++;
|
i++;
|
||||||
|
@ -110,8 +110,8 @@ inline DiagTensorN<Cmpt, length> TensorN<Cmpt, length>::diag() const
|
||||||
{
|
{
|
||||||
DiagTensorN<Cmpt, length> dt;
|
DiagTensorN<Cmpt, length> dt;
|
||||||
|
|
||||||
int diagI=0;
|
label diagI=0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
dt[i] = this->v_[diagI];
|
dt[i] = this->v_[diagI];
|
||||||
diagI += TensorN<Cmpt, length>::rowLength + 1;
|
diagI += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -127,18 +127,18 @@ inline TensorN<Cmpt, length> TensorN<Cmpt, length>::negSumDiag() const
|
||||||
TensorN<Cmpt, length> negsumdiag;
|
TensorN<Cmpt, length> negsumdiag;
|
||||||
|
|
||||||
// Zero main diagonal
|
// Zero main diagonal
|
||||||
int diagI=0;
|
label diagI = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
negsumdiag.v_[diagI] = 0.0;
|
negsumdiag.v_[diagI] = 0.0;
|
||||||
diagI += TensorN<Cmpt, length>::rowLength + 1;
|
diagI += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int k=0;
|
label k = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
diagI = 0;
|
diagI = 0;
|
||||||
for (int j = 0; j < TensorN<Cmpt, length>::rowLength; j++)
|
for (label j = 0; j < TensorN<Cmpt, length>::rowLength; j++)
|
||||||
{
|
{
|
||||||
if (k != diagI)
|
if (k != diagI)
|
||||||
{
|
{
|
||||||
|
@ -158,8 +158,8 @@ template <class Cmpt, int length>
|
||||||
inline void
|
inline void
|
||||||
TensorN<Cmpt, length>::operator=(const SphericalTensorN<Cmpt, length>& st)
|
TensorN<Cmpt, length>::operator=(const SphericalTensorN<Cmpt, length>& st)
|
||||||
{
|
{
|
||||||
int diag=0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::nComponents; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::nComponents; i++)
|
||||||
{
|
{
|
||||||
if (i == diag)
|
if (i == diag)
|
||||||
{
|
{
|
||||||
|
@ -179,11 +179,11 @@ template <class Cmpt, int length>
|
||||||
inline void
|
inline void
|
||||||
TensorN<Cmpt, length>::operator=(const DiagTensorN<Cmpt, length>& dt)
|
TensorN<Cmpt, length>::operator=(const DiagTensorN<Cmpt, length>& dt)
|
||||||
{
|
{
|
||||||
int diag=0;
|
label diag = 0;
|
||||||
int k=0;
|
label k = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
for (int j = 0; j < TensorN<Cmpt, length>::rowLength; j++)
|
for (label j = 0; j < TensorN<Cmpt, length>::rowLength; j++)
|
||||||
{
|
{
|
||||||
if (j == diag)
|
if (j == diag)
|
||||||
{
|
{
|
||||||
|
@ -278,17 +278,25 @@ operator&(const TensorN<Cmpt, length>& t1, const TensorN<Cmpt, length>& t2)
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result(TensorN<Cmpt, length>::zero);
|
TensorN<Cmpt, length> result(TensorN<Cmpt, length>::zero);
|
||||||
|
|
||||||
int i = 0;
|
label i = 0;
|
||||||
int j = 0;
|
label j = 0;
|
||||||
for (int row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
|
||||||
|
label m, n;
|
||||||
|
|
||||||
|
for (label row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
||||||
{
|
{
|
||||||
for (int col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
for (label col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
||||||
{
|
{
|
||||||
Cmpt& r = result.v_[i];
|
Cmpt& r = result.v_[i];
|
||||||
int m = j;
|
m = j;
|
||||||
int n = col;
|
n = col;
|
||||||
|
|
||||||
for (int row2=0; row2 < TensorN<Cmpt, length>::rowLength; row2++)
|
for
|
||||||
|
(
|
||||||
|
label row2 = 0;
|
||||||
|
row2 < TensorN<Cmpt, length>::rowLength;
|
||||||
|
row2++
|
||||||
|
)
|
||||||
{
|
{
|
||||||
r += t1.v_[m]*t2.v_[n];
|
r += t1.v_[m]*t2.v_[n];
|
||||||
m++;
|
m++;
|
||||||
|
@ -314,12 +322,12 @@ operator&
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result;
|
TensorN<Cmpt, length> result;
|
||||||
|
|
||||||
int k=0;
|
label k = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
const Cmpt& xx = dt1.v_[i];
|
const Cmpt& xx = dt1.v_[i];
|
||||||
|
|
||||||
for (int j = 0; j < TensorN<Cmpt, length>::rowLength; j++)
|
for (label j = 0; j < TensorN<Cmpt, length>::rowLength; j++)
|
||||||
{
|
{
|
||||||
result.v_[k] = xx*t2.v_[k];
|
result.v_[k] = xx*t2.v_[k];
|
||||||
k++;
|
k++;
|
||||||
|
@ -341,10 +349,10 @@ operator&
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result;
|
TensorN<Cmpt, length> result;
|
||||||
|
|
||||||
int k=0;
|
label k = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
for (int j = 0; j < TensorN<Cmpt, length>::rowLength; j++)
|
for (label j = 0; j < TensorN<Cmpt, length>::rowLength; j++)
|
||||||
{
|
{
|
||||||
result.v_[k] = t1.v_[k]*dt2.v_[j];
|
result.v_[k] = t1.v_[k]*dt2.v_[j];
|
||||||
k++;
|
k++;
|
||||||
|
@ -411,12 +419,12 @@ operator&(const TensorN<Cmpt, length>& t, const VectorN<Cmpt, length>& v)
|
||||||
{
|
{
|
||||||
VectorN<Cmpt, length> result(VectorN<Cmpt, length>::zero);
|
VectorN<Cmpt, length> result(VectorN<Cmpt, length>::zero);
|
||||||
|
|
||||||
int i=0;
|
label i = 0;
|
||||||
for (int row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
for (label row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
||||||
{
|
{
|
||||||
Cmpt& r = result.v_[row];
|
Cmpt& r = result.v_[row];
|
||||||
|
|
||||||
for (int col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
for (label col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
||||||
{
|
{
|
||||||
r += t.v_[i]*v.v_[col];
|
r += t.v_[i]*v.v_[col];
|
||||||
i++;
|
i++;
|
||||||
|
@ -435,12 +443,12 @@ operator&(const VectorN<Cmpt, length>& v, const TensorN<Cmpt, length>& t)
|
||||||
{
|
{
|
||||||
VectorN<Cmpt, length> result(VectorN<Cmpt, length>::zero);
|
VectorN<Cmpt, length> result(VectorN<Cmpt, length>::zero);
|
||||||
|
|
||||||
for (int col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
for (label col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
||||||
{
|
{
|
||||||
int j=col;
|
label j = col;
|
||||||
Cmpt& r = result.v_[col];
|
Cmpt& r = result.v_[col];
|
||||||
|
|
||||||
for (int row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
for (label row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
||||||
{
|
{
|
||||||
r += v.v_[row]*t.v_[j];
|
r += v.v_[row]*t.v_[j];
|
||||||
j += TensorN<Cmpt, length>::rowLength;
|
j += TensorN<Cmpt, length>::rowLength;
|
||||||
|
@ -459,10 +467,10 @@ operator*(const VectorN<Cmpt, length>& v1, const VectorN<Cmpt, length>& v2)
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result(TensorN<Cmpt, length>::zero);
|
TensorN<Cmpt, length> result(TensorN<Cmpt, length>::zero);
|
||||||
|
|
||||||
int i=0;
|
label i = 0;
|
||||||
for (int row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
for (label row = 0; row < TensorN<Cmpt, length>::rowLength; row++)
|
||||||
{
|
{
|
||||||
for (int col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
for (label col = 0; col < TensorN<Cmpt, length>::rowLength; col++)
|
||||||
{
|
{
|
||||||
result.v_[i] = v1.v_[row]*v2.v_[col];
|
result.v_[i] = v1.v_[row]*v2.v_[col];
|
||||||
i++;
|
i++;
|
||||||
|
@ -498,8 +506,8 @@ operator+(const TensorN<Cmpt,length>& t1, const DiagTensorN<Cmpt,length>& dt2)
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result(t1);
|
TensorN<Cmpt, length> result(t1);
|
||||||
|
|
||||||
int diag = 0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
result.v_[diag] += dt2.v_[i];
|
result.v_[diag] += dt2.v_[i];
|
||||||
diag += TensorN<Cmpt, length>::rowLength + 1;
|
diag += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -516,8 +524,8 @@ operator+(const DiagTensorN<Cmpt,length>& dt1, const TensorN<Cmpt,length>& t2)
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result(t2);
|
TensorN<Cmpt, length> result(t2);
|
||||||
|
|
||||||
int diag = 0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
result.v_[diag] += dt1.v_[i];
|
result.v_[diag] += dt1.v_[i];
|
||||||
diag += TensorN<Cmpt, length>::rowLength + 1;
|
diag += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -539,8 +547,8 @@ operator+
|
||||||
TensorN<Cmpt, length> result(t1);
|
TensorN<Cmpt, length> result(t1);
|
||||||
|
|
||||||
const Cmpt& s = st2.v_[0];
|
const Cmpt& s = st2.v_[0];
|
||||||
int diag = 0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
result.v_[diag] += s;
|
result.v_[diag] += s;
|
||||||
diag += TensorN<Cmpt, length>::rowLength + 1;
|
diag += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -562,8 +570,8 @@ operator+
|
||||||
TensorN<Cmpt, length> result(t2);
|
TensorN<Cmpt, length> result(t2);
|
||||||
|
|
||||||
const Cmpt& s = st1.v_[0];
|
const Cmpt& s = st1.v_[0];
|
||||||
int diag = 0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
result.v_[diag] += s;
|
result.v_[diag] += s;
|
||||||
diag += TensorN<Cmpt, length>::rowLength + 1;
|
diag += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -598,8 +606,8 @@ operator-(const TensorN<Cmpt,length>& t1, const DiagTensorN<Cmpt,length>& dt2)
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result(t1);
|
TensorN<Cmpt, length> result(t1);
|
||||||
|
|
||||||
int diag = 0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
result.v_[diag] -= dt2.v_[i];
|
result.v_[diag] -= dt2.v_[i];
|
||||||
diag += TensorN<Cmpt, length>::rowLength + 1;
|
diag += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -616,8 +624,8 @@ operator-(const DiagTensorN<Cmpt,length>& dt1, const TensorN<Cmpt,length>& t2)
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result(-t2);
|
TensorN<Cmpt, length> result(-t2);
|
||||||
|
|
||||||
int diag = 0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
result.v_[diag] += dt1.v_[i];
|
result.v_[diag] += dt1.v_[i];
|
||||||
diag += TensorN<Cmpt, length>::rowLength + 1;
|
diag += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -639,8 +647,8 @@ operator-
|
||||||
TensorN<Cmpt, length> result(t1);
|
TensorN<Cmpt, length> result(t1);
|
||||||
|
|
||||||
const Cmpt& s = st2.v_[0];
|
const Cmpt& s = st2.v_[0];
|
||||||
int diag = 0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
result.v_[diag] -= s;
|
result.v_[diag] -= s;
|
||||||
diag += TensorN<Cmpt, length>::rowLength + 1;
|
diag += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -662,8 +670,8 @@ operator-
|
||||||
TensorN<Cmpt, length> result(-t2);
|
TensorN<Cmpt, length> result(-t2);
|
||||||
|
|
||||||
const Cmpt& s = st1.v_[0];
|
const Cmpt& s = st1.v_[0];
|
||||||
int diag = 0;
|
label diag = 0;
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::rowLength; i++)
|
||||||
{
|
{
|
||||||
result.v_[diag] += s;
|
result.v_[diag] += s;
|
||||||
diag += TensorN<Cmpt, length>::rowLength + 1;
|
diag += TensorN<Cmpt, length>::rowLength + 1;
|
||||||
|
@ -741,7 +749,7 @@ operator/
|
||||||
TensorN<Cmpt, length> result;
|
TensorN<Cmpt, length> result;
|
||||||
|
|
||||||
const Cmpt& s = st2[0];
|
const Cmpt& s = st2[0];
|
||||||
for (int i = 0; i < TensorN<Cmpt, length>::nComponents; i++)
|
for (label i = 0; i < TensorN<Cmpt, length>::nComponents; i++)
|
||||||
{
|
{
|
||||||
result.v_[i] = t1.v_[i]/s;
|
result.v_[i] = t1.v_[i]/s;
|
||||||
}
|
}
|
||||||
|
@ -750,109 +758,55 @@ operator/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// UNOPTIMIZED VERSION
|
|
||||||
/*
|
|
||||||
//- Return the inverse of a tensor give the determinant
|
|
||||||
// Uses Gauss-Jordan Elimination with full pivoting
|
|
||||||
template <class Cmpt, int length>
|
template <class Cmpt, int length>
|
||||||
inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)
|
inline Cmpt det(const TensorN<Cmpt, length>& t)
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result(t);
|
// Calculate determinant via sub-determinants
|
||||||
|
Cmpt result = pTraits<Cmpt>::zero;
|
||||||
|
|
||||||
label i, j, k, iRow=0, iCol=0;
|
TensorN<Cmpt, length - 1> subMatrix;
|
||||||
Cmpt bigValue, temp, pivotInv;
|
|
||||||
|
|
||||||
// Lists used for bookkeeping on the pivoting
|
for (label i = 0; i < length; i++)
|
||||||
List<label> indexCol(length), indexRow(length), iPivot(length);
|
|
||||||
|
|
||||||
iPivot=0;
|
|
||||||
|
|
||||||
// Main loop over columns to be reduced
|
|
||||||
for (i=0; i<length; i++)
|
|
||||||
{
|
{
|
||||||
bigValue = pTraits<Cmpt>::zero;
|
label nj = 0;
|
||||||
|
|
||||||
//Search for pivot element
|
// Build sub-matrix, skipping the
|
||||||
for (j=0; j<length; j++)
|
for (label j = 0; j < length; j++)
|
||||||
{
|
{
|
||||||
if (iPivot[j] != 1)
|
// Skip i-th column
|
||||||
|
if (j == i) continue;
|
||||||
|
|
||||||
|
for (label k = 1; k < length; k++)
|
||||||
{
|
{
|
||||||
for (k=0; k<length; k++)
|
subMatrix(nj, k) = t(j, k);
|
||||||
{
|
|
||||||
if (iPivot[k] == 0)
|
|
||||||
{
|
|
||||||
if (Foam::mag(result(j,k)) >= bigValue)
|
|
||||||
{
|
|
||||||
bigValue = Foam::mag(result(j,k));
|
|
||||||
iRow = j;
|
|
||||||
iCol = k;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
++(iPivot[iCol]);
|
|
||||||
|
|
||||||
// We now have the pivot element
|
nj++;
|
||||||
// Interchange rows if needed
|
|
||||||
if (iRow != iCol)
|
|
||||||
{
|
|
||||||
for (j=0; j<length; j++)
|
|
||||||
{
|
|
||||||
Swap(result(iRow,j), result(iCol,j));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
indexRow[i] = iRow;
|
|
||||||
indexCol[i] = iCol;
|
|
||||||
|
|
||||||
//Check for singularity
|
|
||||||
if (result(iCol, iCol) == 0.0)
|
|
||||||
{
|
|
||||||
FatalErrorIn("inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)")
|
|
||||||
<< "Singular tensor" << length << Foam::abort(FatalError);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Divide the pivot row by pivot element
|
// Handle +/- sign switch
|
||||||
pivotInv = pTraits<Cmpt>::one/result(iCol, iCol);
|
result += pow(-1, i)*t(i, 0)*det(subMatrix);
|
||||||
result(iCol, iCol) = pTraits<Cmpt>::one;
|
|
||||||
|
|
||||||
// Multiply all row elements by inverse
|
|
||||||
for (j=0; j<length; j++)
|
|
||||||
{
|
|
||||||
result(iCol,j) *= pivotInv;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reduce the rows
|
|
||||||
for (j=0; j<length; j++)
|
|
||||||
{
|
|
||||||
if (j != iCol)
|
|
||||||
{
|
|
||||||
temp=result(j,iCol);
|
|
||||||
result(j,iCol) = pTraits<Cmpt>::zero;
|
|
||||||
|
|
||||||
for (k=0; k<length; k++)
|
|
||||||
{
|
|
||||||
result(j,k) -= result(iCol,k)*temp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unscamble the solution
|
|
||||||
for (i=length-1; i>=0; i--)
|
|
||||||
{
|
|
||||||
if (indexRow[i] != indexCol[i])
|
|
||||||
{
|
|
||||||
for (j=0; j<length; j++)
|
|
||||||
{
|
|
||||||
Swap(result(j,indexRow[i]), result(j,indexCol[i]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
|
|
||||||
|
// Determinant: partial specialisation for rank 1
|
||||||
|
template<class Cmpt>
|
||||||
|
inline Cmpt det(const TensorN<Cmpt, 1>& t)
|
||||||
|
{
|
||||||
|
return t(0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Determinant: partial specialisation for rank 2
|
||||||
|
template<class Cmpt>
|
||||||
|
inline Cmpt det(const TensorN<Cmpt, 2>& t)
|
||||||
|
{
|
||||||
|
return t(0, 0)*t(1, 1) - t(0, 1)*t(1, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//- Return the inverse of a tensor give the determinant
|
//- Return the inverse of a tensor give the determinant
|
||||||
// Uses Gauss-Jordan Elimination with full pivoting
|
// Uses Gauss-Jordan Elimination with full pivoting
|
||||||
|
@ -861,7 +815,7 @@ inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)
|
||||||
{
|
{
|
||||||
TensorN<Cmpt, length> result(t);
|
TensorN<Cmpt, length> result(t);
|
||||||
|
|
||||||
label iRow=0, iCol=0;
|
label iRow = 0, iCol = 0;
|
||||||
Cmpt largestCoeff, temp;
|
Cmpt largestCoeff, temp;
|
||||||
Cmpt* __restrict__ srcIter;
|
Cmpt* __restrict__ srcIter;
|
||||||
Cmpt* __restrict__ destIter;
|
Cmpt* __restrict__ destIter;
|
||||||
|
@ -869,20 +823,23 @@ inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)
|
||||||
// Lists used for bookkeeping on the pivoting
|
// Lists used for bookkeeping on the pivoting
|
||||||
List<label> indexCol(length), indexRow(length), iPivot(length);
|
List<label> indexCol(length), indexRow(length), iPivot(length);
|
||||||
|
|
||||||
iPivot=0;
|
iPivot = 0;
|
||||||
|
|
||||||
|
label curRowOffset;
|
||||||
|
|
||||||
// Main loop over columns to be reduced
|
// Main loop over columns to be reduced
|
||||||
for (int i=0; i<length; i++)
|
for (label i = 0; i < length; i++)
|
||||||
{
|
{
|
||||||
largestCoeff = pTraits<Cmpt>::zero;
|
largestCoeff = pTraits<Cmpt>::zero;
|
||||||
|
|
||||||
//Search for pivot element
|
// Search for pivot element
|
||||||
int curRowOffset = 0;
|
curRowOffset = 0;
|
||||||
for (int j=0; j<length; j++)
|
|
||||||
|
for (label j = 0; j < length; j++)
|
||||||
{
|
{
|
||||||
if (iPivot[j] != 1)
|
if (iPivot[j] != 1)
|
||||||
{
|
{
|
||||||
for (int k=0; k<length; k++)
|
for (int k = 0; k < length; k++)
|
||||||
{
|
{
|
||||||
if (iPivot[k] == 0)
|
if (iPivot[k] == 0)
|
||||||
{
|
{
|
||||||
|
@ -907,10 +864,10 @@ inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)
|
||||||
// Interchange rows if needed
|
// Interchange rows if needed
|
||||||
if (iRow != iCol)
|
if (iRow != iCol)
|
||||||
{
|
{
|
||||||
srcIter = &result(iRow,0);
|
srcIter = &result(iRow, 0);
|
||||||
destIter = &result(iCol,0);
|
destIter = &result(iCol, 0);
|
||||||
|
|
||||||
for (int j=0; j<length; j++)
|
for (label j = 0; j < length; j++)
|
||||||
{
|
{
|
||||||
Swap((*srcIter), (*destIter));
|
Swap((*srcIter), (*destIter));
|
||||||
srcIter++;
|
srcIter++;
|
||||||
|
@ -921,36 +878,39 @@ inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)
|
||||||
indexCol[i] = iCol;
|
indexCol[i] = iCol;
|
||||||
|
|
||||||
//Check for singularity
|
//Check for singularity
|
||||||
srcIter = &result(iCol, iCol); //Dummy pointer to reduce indexing
|
srcIter = &result(iCol, iCol); // Dummy pointer to reduce indexing
|
||||||
if ((*srcIter) == Cmpt(0.0))
|
if ((*srcIter) == Cmpt(0))
|
||||||
{
|
{
|
||||||
FatalErrorIn("inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)")
|
FatalErrorIn
|
||||||
<< "Singular tensor" << length << Foam::abort(FatalError);
|
(
|
||||||
|
"inline TensorN<Cmpt, length> inv("
|
||||||
|
"const TensorN<Cmpt, length>& t)"
|
||||||
|
) << "Singular tensor" << length << Foam::abort(FatalError);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Divide the pivot row by pivot element
|
// Divide the pivot row by pivot element
|
||||||
temp = pTraits<Cmpt>::one/(*srcIter);
|
temp = pTraits<Cmpt>::one/(*srcIter);
|
||||||
(*srcIter) = pTraits<Cmpt>::one;
|
(*srcIter) = pTraits<Cmpt>::one;
|
||||||
|
|
||||||
srcIter = &result(iCol,0);
|
srcIter = &result(iCol, 0);
|
||||||
for (int j=0; j<length; j++)
|
for (label j = 0; j < length; j++)
|
||||||
{
|
{
|
||||||
(*srcIter) *= temp;
|
(*srcIter) *= temp;
|
||||||
srcIter++;
|
srcIter++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reduce the rows, excluding the pivot row
|
// Reduce the rows, excluding the pivot row
|
||||||
for (int j=0; j<length; j++)
|
for (label j = 0; j < length; j++)
|
||||||
{
|
{
|
||||||
if (j != iCol)
|
if (j != iCol)
|
||||||
{
|
{
|
||||||
destIter = &result(j,0);
|
destIter = &result(j, 0);
|
||||||
srcIter = &result(iCol,0);
|
srcIter = &result(iCol, 0);
|
||||||
|
|
||||||
temp=destIter[iCol];
|
temp=destIter[iCol];
|
||||||
destIter[iCol] = pTraits<Cmpt>::zero;
|
destIter[iCol] = pTraits<Cmpt>::zero;
|
||||||
|
|
||||||
for (int k=0; k<length; k++)
|
for (label k = 0; k < length; k++)
|
||||||
{
|
{
|
||||||
(*destIter) -= (*srcIter)*temp;
|
(*destIter) -= (*srcIter)*temp;
|
||||||
srcIter++;
|
srcIter++;
|
||||||
|
@ -961,13 +921,13 @@ inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unscamble the solution
|
// Unscamble the solution
|
||||||
for (int i=length-1; i>=0; i--)
|
for (label i = length - 1; i >= 0; i--)
|
||||||
{
|
{
|
||||||
if (indexRow[i] != indexCol[i])
|
if (indexRow[i] != indexCol[i])
|
||||||
{
|
{
|
||||||
srcIter = &result[indexRow[i]];
|
srcIter = &result[indexRow[i]];
|
||||||
destIter = &result[indexCol[i]];
|
destIter = &result[indexCol[i]];
|
||||||
for (int j=0; j<length; j++)
|
for (label j = 0; j < length; j++)
|
||||||
{
|
{
|
||||||
Swap((*srcIter), (*destIter));
|
Swap((*srcIter), (*destIter));
|
||||||
srcIter += length;
|
srcIter += length;
|
||||||
|
@ -980,6 +940,31 @@ inline TensorN<Cmpt, length> inv(const TensorN<Cmpt, length>& t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//- Inverse: partial template specialisation for rank 1
|
||||||
|
template <class Cmpt>
|
||||||
|
inline TensorN<Cmpt, 1> inv(const TensorN<Cmpt, 1>& t)
|
||||||
|
{
|
||||||
|
return TensorN<Cmpt, 1>(1/t(0, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//- Inverse: partial template specialisation for rank 2
|
||||||
|
template <class Cmpt>
|
||||||
|
inline TensorN<Cmpt, 2> inv(const TensorN<Cmpt, 2>& t)
|
||||||
|
{
|
||||||
|
TensorN<Cmpt, 2> result(t);
|
||||||
|
|
||||||
|
Cmpt oneOverDet = 1/(t(0, 0)*t(1, 1) - t(0, 1)*t(1, 0));
|
||||||
|
|
||||||
|
result(0, 0) = oneOverDet*t(1, 1);
|
||||||
|
result(0, 1) = -oneOverDet*t(0, 1);
|
||||||
|
result(1, 0) = -oneOverDet*t(1, 0);
|
||||||
|
result(1, 1) = oneOverDet*t(0, 0);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//- Return tensor diagonal
|
//- Return tensor diagonal
|
||||||
template <class Cmpt, int length>
|
template <class Cmpt, int length>
|
||||||
inline DiagTensorN<Cmpt, length> diag(const TensorN<Cmpt, length>& t)
|
inline DiagTensorN<Cmpt, length> diag(const TensorN<Cmpt, length>& t)
|
||||||
|
|
Reference in a new issue