Bugfix: Cumulative bug fixes for SP Windows and Misc. Author: Inno Gatin and Hrvoje Jasak. Merge: Hrvoje Jasak.

This commit is contained in:
Hrvoje Jasak 2015-10-30 15:31:57 +00:00
commit bc02b33f4a
27 changed files with 293 additions and 183 deletions

View file

@ -90,6 +90,16 @@ echo
( rpm_make -p gcc-4.6.3 -s gcc-4.6.3.spec -u http://ftpmirror.gnu.org/gcc/gcc-4.6.3/gcc-4.6.3.tar.gz )
}
# Gcc 4.7.4 and companion libraries
#
[ ! -z "$WM_THIRD_PARTY_USE_GCC_474" ] && {
echo "Building gmp-5.0.5 mpfr-3.1.0 mpc-0.9 gcc-4.6.3"
( rpm_make -p gmp-5.1.2 -s gmp-5.1.2.spec -u ftp://ftp.gnu.org/gnu/gmp/gmp-5.1.2.tar.bz2 )
( rpm_make -p mpfr-3.1.2 -s mpfr-3.1.2.spec -u ftp://ftp.gnu.org/gnu/mpfr/mpfr-3.1.2.tar.gz )
( rpm_make -p mpc-1.0.1 -s mpc-1.0.1.spec -u http://www.multiprecision.org/mpc/download/mpc-1.0.1.tar.gz )
( rpm_make -p gcc-4.7.4 -s gcc-4.7.4.spec -u ftp://ftp.gnu.org/gnu/gcc/gcc-4.7.4/gcc-4.7.4.tar.gz )
}
# Gcc 4.8.4 and companion libraries
#
[ ! -z "$WM_THIRD_PARTY_USE_GCC_484" ] && {

View file

@ -1,5 +1,5 @@
// Momentum equation
fvVectorMatrix UEqn
tmp<fvVectorMatrix> UEqn
(
fvm::ddt(U)
+ fvm::div(phi, U)
@ -7,9 +7,9 @@
);
// Add MRF and porous sources
mrfZones.addCoriolis(UEqn);
pZones.addResistance(UEqn);
mrfZones.addCoriolis(UEqn());
pZones.addResistance(UEqn());
UEqn.relax();
UEqn().relax();
UpEqn.insertEquation(0, UEqn);
UpEqn.insertEquation(0, UEqn());

View file

@ -2,9 +2,11 @@
surfaceScalarField rUAf
(
"rUAf",
fvc::interpolate(1.0/UEqn.A())
fvc::interpolate(1.0/UEqn().A())
);
UEqn.clear();
surfaceScalarField presSource
(
"presSource",

View file

@ -10,6 +10,7 @@ print 'Reading file', logfilename
import re
UpRegex=r"([A-Z,a-z]*):*.*Solving for Up, Initial residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), Final residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), No Iterations ([0-9]*)"
kepsilonRegex=r"([A-Z,a-z]*):*.*Solving for kEpsilon, Initial residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), Final residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), No Iterations ([0-9]*)"
komegaRegex=r"([A-Z,a-z]*):*.*Solving for kOmega, Initial residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), Final residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), No Iterations ([0-9]*)"
tUp = []
@ -24,6 +25,11 @@ k = []
omega = []
ikomega = 0
tkepsilon = []
k = []
epsilon = []
ikepsilon = 0
#HJ take name of log file as script argument
pipefile=open(logfilename,'r')
lines = pipefile.readlines()
@ -37,6 +43,12 @@ for line in lines:
Uy.append(float(matchUp.group(3)))
Uz.append(float(matchUp.group(4)))
p.append(float(matchUp.group(5)))
matchkepsilon=re.search(kepsilonRegex,line)
if matchkepsilon:
ikepsilon = ikepsilon + 1
tkepsilon.append(ikepsilon)
k.append(float(matchkepsilon.group(2)))
epsilon.append(float(matchkepsilon.group(3)))
matchkomega=re.search(komegaRegex,line)
if matchkomega:
ikomega = ikomega + 1
@ -46,15 +58,16 @@ for line in lines:
outfile=open('residual.dat','w')
print 'hits = ', ikomega
#HJ need better way of combining lists
if ikomega > 0:
for index in range(0,ikomega):
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+' '+str(k[index])+' '+str(omega[index])+'\n')
for data in zip(tUp,Ux,Uy,Uz,p,k,omega):
outfile.write(' '.join([str(d) for d in data])+'\n')
elif ikepsilon > 0:
for data in zip(tUp,Ux,Uy,Uz,p,k,epsilon):
outfile.write(' '.join([str(d) for d in data])+'\n')
elif iUp > 0:
for index in range(0,iUp):
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+'\n')
for data in zip(tUp,Ux,Uy,Uz,p):
outfile.write(' '.join([str(d) for d in data])+'\n')
outfile.close()
@ -71,6 +84,10 @@ if iUp > 0:
pylab.semilogy(tUp,Uz,'-',label="Uz")
pylab.semilogy(tUp,p,'-',label="p")
if ikepsilon > 0:
pylab.semilogy(tkepsilon,k,'-',label="k")
pylab.semilogy(tkepsilon,epsilon,'-',label="epsilon")
if ikomega > 0:
pylab.semilogy(tkomega,k,'-',label="k")
pylab.semilogy(tkomega,omega,'-',label="omega")

View file

@ -64,16 +64,15 @@ for line in lines:
outfile=open('residual.dat','w')
#HJ need better way of combining lists
if iomega > 0:
for index in range(0,iomega):
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+' '+str(k[index])+' '+str(omega[index])+'\n')
for data in zip(tUp,Ux,Uy,Uz,p,k,omega):
outfile.write(' '.join([str(d) for d in data])+'\n')
elif iepsilon > 0:
for index in range(0,iepsilon):
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+' '+str(k[index])+' '+str(epsilon[index])+'\n')
for data in zip(tUp,Ux,Uy,Uz,p,k,epsilon):
outfile.write(' '.join([str(d) for d in data])+'\n')
elif iUp > 0:
for index in range(0,iUp):
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+'\n')
for data in zip(tUp,Ux,Uy,Uz,p):
outfile.write(' '.join([str(d) for d in data])+'\n')
outfile.close()

View file

@ -219,6 +219,24 @@ MINGW32_NT-6.2)
export WM_LDFLAGS='-m64'
;;
MINGW64_NT-6.1)
WM_ARCH=mingw
export WM_ARCH_BASE=mingw
export WM_COMPILER_LIB_ARCH=64
export WM_CFLAGS='-m64 -fPIC'
export WM_CXXFLAGS='-m64 -fPIC'
export WM_LDFLAGS='-m64'
;;
MINGW64_NT-6.2)
WM_ARCH=mingw
export WM_ARCH_BASE=mingw
export WM_COMPILER_LIB_ARCH=64
export WM_CFLAGS='-m64 -fPIC'
export WM_CXXFLAGS='-m64 -fPIC'
export WM_LDFLAGS='-m64'
;;
Linux)
WM_ARCH=linux

View file

@ -191,6 +191,7 @@
# For AllMake.stage1
#setenv WM_THIRD_PARTY_USE_GCC_492 1
#setenv WM_THIRD_PARTY_USE_GCC_484 1
#setenv WM_THIRD_PARTY_USE_GCC_474 1
#setenv WM_THIRD_PARTY_USE_GCC_463 1
#setenv WM_THIRD_PARTY_USE_GCC_451 1
#setenv WM_THIRD_PARTY_USE_GCC_445 1

View file

@ -201,6 +201,7 @@ export FOAM_VERBOSE=1
# For AllMake.stage1
#export WM_THIRD_PARTY_USE_GCC_492=1
#export WM_THIRD_PARTY_USE_GCC_484=1
#export WM_THIRD_PARTY_USE_GCC_474=1
#export WM_THIRD_PARTY_USE_GCC_463=1
#export WM_THIRD_PARTY_USE_GCC_451=1
#export WM_THIRD_PARTY_USE_GCC_445=1

View file

@ -105,7 +105,7 @@ export PARMGRIDGEN_INCLUDE_DIR=$PARMGRIDGEN_DIR/include
# System installed Scotch
#export SCOTCH_SYSTEM=1
export SCOTCH_DIR=$WM_THIRD_PARTY_DIR/packages/scotch_6.0.0
export SCOTCH_DIR=$WM_THIRD_PARTY_DIR/packages/scotch_6.0.4
export SCOTCH_BIN_DIR=$SCOTCH_DIR/bin
export SCOTCH_LIB_DIR=$SCOTCH_DIR/lib
export SCOTCH_INCLUDE_DIR=$SCOTCH_DIR/include

View file

@ -103,6 +103,10 @@ case FOAM:
setenv WM_COMPILER_DIR $WM_THIRD_PARTY_DIR/packages/gcc-4.9.2/platforms/$WM_OPTIONS
_foamSource $WM_THIRD_PARTY_DIR/packages/gcc-4.9.2/platforms/$WM_OPTIONS/etc/gcc-4.9.2.csh
breaksw
case Gcc48:
setenv WM_COMPILER_DIR $WM_THIRD_PARTY_DIR/packages/gcc-4.8.4/platforms/$WM_OPTIONS
_foamSource $WM_THIRD_PARTY_DIR/packages/gcc-4.8.4/platforms/$WM_OPTIONS/etc/gcc-4.8.4.csh
breaksw
case Gcc47:
setenv WM_COMPILER_DIR $WM_THIRD_PARTY_DIR/packages/gcc-4.7.4/platforms/$WM_OPTIONS
_foamSource $WM_THIRD_PARTY_DIR/packages/gmp-5.1.2/platforms/$WM_OPTIONS/etc/gmp-5.1.2.csh

View file

@ -161,7 +161,7 @@ void Foam::mgMeshLevel::makeChild() const
options[3] = nGeometricD(); // Dimensionality of the grid
// Output: cell to coarse clusted addressing
label nCoarseCells = 0;
int nCoarseCells = 0;
child_.setSize(nCells());
int nMoves = -1;
@ -214,8 +214,8 @@ void Foam::mgMeshLevel::makeChild() const
dblAreas.begin(),
cellCells.begin(),
dblFaceWeights.begin(),
mgMinClusterSize_,
mgMaxClusterSize_,
mgMinClusterSize_(),
mgMaxClusterSize_(),
options.begin(),
&nMoves,
&nCoarseCells_,

View file

@ -17,6 +17,7 @@ LIB_LIBS = \
-ldecompositionMethods \
-L$(SCOTCH_LIB_DIR) -lscotch \
-L$(SCOTCH_LIB_DIR) -lscotcherrexit \
-L$(WM_THIRD_PARTY_DIR)/packages/pthreads-w32-2-9-1-release/Pre-built.2/lib/x64 -lpthreadGC2 \
-L$(OPENMPI_DIR)/lib -lmpi
#else

View file

@ -77,7 +77,7 @@ void directMappedFixedValueFvPatchField<Type>::mapField()
}
mapDistribute::distribute
(
static_cast<Pstream::commsTypes>(Pstream::defaultCommsType()),
Pstream::defaultComms(),
distMap.schedule(),
distMap.constructSize(),
distMap.subMap(),
@ -112,7 +112,7 @@ void directMappedFixedValueFvPatchField<Type>::mapField()
newValues_ = nbrField.boundaryField()[nbrPatchID];
mapDistribute::distribute
(
static_cast<Pstream::commsTypes>(Pstream::defaultCommsType()),
Pstream::defaultComms(),
distMap.schedule(),
distMap.constructSize(),
distMap.subMap(),
@ -144,7 +144,7 @@ void directMappedFixedValueFvPatchField<Type>::mapField()
mapDistribute::distribute
(
static_cast<Pstream::commsTypes>(Pstream::defaultCommsType()),
Pstream::defaultComms(),
distMap.schedule(),
distMap.constructSize(),
distMap.subMap(),

View file

@ -72,33 +72,9 @@ public:
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
//- Hash specialization for hashing pointer addresses.
// Treat a pointer like a intptr_t.
// This should work for both 32-bit and 64-bit pointers.
template<>
class Hash<void*>
{
public:
Hash()
{}
unsigned operator()(const void* const& p, unsigned seed) const
{
return Hash<intptr_t>()(intptr_t(p), seed);
}
unsigned operator()(const void* const& p) const
{
return Hash<intptr_t>()(intptr_t(p));
}
};
//- Hash specialization for hashing labels
template<>
class Hash<Foam::label>
class Hash<label>
{
public:
@ -123,6 +99,30 @@ public:
};
//- Hash specialization for hashing pointer addresses.
// Treat a pointer like a intptr_t.
// This should work for both 32-bit and 64-bit pointers.
template<>
class Hash<void*>
{
public:
Hash()
{}
unsigned operator()(const void* const& p, unsigned seed) const
{
return Hash<intptr_t>()(intptr_t(p), seed);
}
unsigned operator()(const void* const& p) const
{
return Hash<intptr_t>()(intptr_t(p));
}
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam

View file

@ -118,7 +118,8 @@ immersedBoundaryOmegaWallFunctionFvPatchScalarField
nutName_(owfpsf.nutName_),
Cmu_(owfpsf.Cmu_),
kappa_(owfpsf.kappa_),
E_(owfpsf.E_)
E_(owfpsf.E_),
beta1_(owfpsf.beta1_)
{}
@ -137,7 +138,8 @@ immersedBoundaryOmegaWallFunctionFvPatchScalarField
nutName_(owfpsf.nutName_),
Cmu_(owfpsf.Cmu_),
kappa_(owfpsf.kappa_),
E_(owfpsf.E_)
E_(owfpsf.E_),
beta1_(owfpsf.beta1_)
{}
@ -190,11 +192,16 @@ void immersedBoundaryOmegaWallFunctionFvPatchScalarField::updateCoeffs()
);
// Calculate tangential component, taking into account wall velocity
const scalarField UtanOld =
mag((I - sqr(n)) & (Uw.ibSamplingPointValue() - Uw.ibValue()));
const vectorField UtanOld =
(I - sqr(n)) & (Uw.ibSamplingPointValue() - Uw.ibValue());
const scalarField magUtanOld = mag(UtanOld);
// Tangential velocity component
scalarField& UTangentialNew = Uw.wallTangentialValue();
// Wall shear stress
vectorField& tauWall = Uw.tauWall();
// Turbulence kinetic energy
const fvPatchScalarField& kg =
patch().lookupPatchField<volScalarField, scalar>(kName_);
@ -247,7 +254,6 @@ void immersedBoundaryOmegaWallFunctionFvPatchScalarField::updateCoeffs()
// Calculate wall function conditions
forAll (ibc, ibCellI)
{
const scalar nuLam = nu[ibCellI];
// Calculate yPlus from k and laminar viscosity for the IB point
const scalar yPlusSample = ypd[ibCellI];
@ -258,18 +264,20 @@ void immersedBoundaryOmegaWallFunctionFvPatchScalarField::updateCoeffs()
{
// Calculate tauW from log-law using k and U at sampling point
tauW = UtanOld[ibCellI]*Cmu25*sqrt(k[ibCellI])*kappa_
tauW = magUtanOld[ibCellI]*Cmu25*sqrt(k[ibCellI])*kappa_
/log(E_*yPlusSample);
}
else
{
// Sampling point is in laminar sublayer
tauW = UtanOld[ibCellI]*Cmu25*sqrt(k[ibCellI])/yPlusSample;
tauW = magUtanOld[ibCellI]*Cmu25*sqrt(k[ibCellI])/yPlusSample;
}
// friction velocity computed from k and U at sampling point
uTau = sqrt(tauW);
tauWall[ibCellI] = tauW*UtanOld[ibCellI]/(magUtanOld[ibCellI] + SMALL);
// Calculate yPlus for IB point
scalar yPlusIB = yPlusSample*y[ibCellI]/ySample[ibCellI];
@ -277,6 +285,7 @@ void immersedBoundaryOmegaWallFunctionFvPatchScalarField::updateCoeffs()
// Calculate wall function data in the immersed boundary point
if (yPlusIB > yPlusLam)
{
const scalar nuLam = nu[ibCellI];
// Logarithmic region
wf[ibCellI] = true;
@ -315,12 +324,19 @@ void immersedBoundaryOmegaWallFunctionFvPatchScalarField::updateCoeffs()
// Compute omega at the IB cell
omegaNew[ibCellI] = 6.0*nu[ibCellI]/(beta1_*sqr(y[ibCellI]));
// Bugfix - set zeroGradient bc for large omega values at ib boundary
// to avoid k unboundedness (IG 30/OCT/2015), not
// sure if this is a good criteria
if(omegaNew[ibCellI] > 10.0)
{
wf[ibCellI] = true;
}
// Laminar sub-layer for tangential velocity: uPlus = yPlus
UTangentialNew[ibCellI] = uTau*yPlusIB;
// Turbulent viscosity is zero
nutNew[ibCellI] = SMALL;
}
}

View file

@ -205,6 +205,77 @@ immersedBoundaryVelocityWallFunctionFvPatchVectorField::wallShearStress() const
return tauWall_;
}
Foam::scalarField&
immersedBoundaryVelocityWallFunctionFvPatchVectorField::wallTangentialValue() const
{
// Bugfix 30/OCT/2015 - check if the mesh is moving
const immersedBoundaryFvPatch& ibFvP =
immersedBoundaryFvPatchVectorField::ibPatch();
if
(
wallTangentialValue_.empty()
|| (ibFvP.movingIb() || ibFvP.boundaryMesh().mesh().moving())
)
{
wallTangentialValue_.setSize
(
this->ibPatch().ibCells().size(),
0
);
}
return wallTangentialValue_;
}
Foam::vectorField&
immersedBoundaryVelocityWallFunctionFvPatchVectorField::tauWall() const
{
// Bugfix 30/OCT/2015 - check if the mesh is moving
const immersedBoundaryFvPatch& ibFvP =
immersedBoundaryFvPatchVectorField::ibPatch();
if
(
tauWall_.empty()
|| (ibFvP.movingIb() || ibFvP.boundaryMesh().mesh().moving())
)
{
tauWall_.setSize
(
this->ibPatch().ibCells().size(),
vector::zero
);
}
return tauWall_;
}
Foam::boolList&
immersedBoundaryVelocityWallFunctionFvPatchVectorField::wallMask() const
{
// Bugfix 30/OCT/2015 - check if the mesh is moving
const immersedBoundaryFvPatch& ibFvP =
immersedBoundaryFvPatchVectorField::ibPatch();
if
(
wallMask_.empty()
|| (ibFvP.movingIb() || ibFvP.boundaryMesh().mesh().moving())
)
{
wallMask_.setSize
(
this->ibPatch().ibCells().size(),
false
);
}
return wallMask_;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -165,53 +165,17 @@ public:
//- Access to tangential velocity value to fix in IB cell
// Note non-const access
scalarField& wallTangentialValue() const
{
if (wallTangentialValue_.empty())
{
wallTangentialValue_.setSize
(
this->ibPatch().ibCells().size(),
0
);
}
return wallTangentialValue_;
}
scalarField& wallTangentialValue() const;
//- Return wall shear stress
const vectorField& wallShearStress() const;
//- Access to wall shear stress in IB cell
// Note non-const access
vectorField& tauWall() const
{
if (tauWall_.empty())
{
tauWall_.setSize
(
this->ibPatch().ibCells().size(),
vector::zero
);
}
return tauWall_;
}
vectorField& tauWall() const;
//- Access to indicator on fixed values. Note non-const access
boolList& wallMask() const
{
if (wallMask_.empty())
{
wallMask_.setSize
(
this->ibPatch().ibCells().size(),
false
);
}
return wallMask_;
}
boolList& wallMask() const;
};

View file

@ -39,14 +39,6 @@ namespace RASModels
// * * * * * * * * * * * * * Protected Member Functions * * * * * * * * * * //
template<class Type>
void immersedBoundaryWallFunctionFvPatchField<Type>::motionUpdate() const
{
wallValue_.clear();
wallMask_.clear();
immersedBoundaryFvPatchField<Type>::motionUpdate();
}
template<class Type>
@ -173,6 +165,65 @@ immersedBoundaryWallFunctionFvPatchField
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
Foam::Field<Type>& immersedBoundaryWallFunctionFvPatchField<Type>::wallValue() const
{
// Note: on a moving mesh, the intersection has changed and
// wallValue fields should be cleared and recalculated.
// This should happen only once, but I cannot see the mechanism
// HJ, 17/Oct/2012
// Bugfix 30/OCT/2015 - check if the mesh is moving
const immersedBoundaryFvPatch& ibFvP =
immersedBoundaryFvPatchField<Type>::ibPatch();
if
(
wallValue_.empty()
|| (ibFvP.movingIb() || ibFvP.boundaryMesh().mesh().moving())
)
{
wallValue_.setSize
(
this->ibPatch().ibCells().size(),
pTraits<Type>::zero
);
}
return wallValue_;
}
template<class Type>
Foam::boolList& immersedBoundaryWallFunctionFvPatchField<Type>::wallMask() const
{
// Note: on a moving mesh, the intersection has changed and
// wallValue fields should be cleared and recalculated.
// This should happen only once, but I cannot see the mechanism
// HJ, 17/Oct/2012
// Bugfix 30/OCT/2015 - check if the mesh is moving
const immersedBoundaryFvPatch& ibFvP =
immersedBoundaryFvPatchField<Type>::ibPatch();
if
(
wallMask_.empty()
|| (ibFvP.movingIb() || ibFvP.boundaryMesh().mesh().moving())
)
{
wallMask_.setSize
(
this->ibPatch().ibCells().size(),
false
);
}
return wallMask_;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace RASModels

View file

@ -86,9 +86,6 @@ protected:
// Protected Member Functions
//- Execute immersed boundary patch field motion update
virtual void motionUpdate() const;
//- Set IB cell values: contains data manipulation
virtual void setIbCellValues(const Field<Type>&) const;
@ -172,38 +169,10 @@ public:
// Access
//- Access to value to fix in IB cell. Note non-const access
Field<Type>& wallValue() const
{
// Note: on a moving mesh, the intersection has changed and
// wallValue fields should be cleared and recalculated.
// This should happen only once, but I cannot see the mechanism
// HJ, 17/Oct/2012
if (wallValue_.empty())
{
wallValue_.setSize
(
this->ibPatch().ibCells().size(),
pTraits<Type>::zero
);
}
return wallValue_;
}
Field<Type>& wallValue() const;
//- Access to indicator on fixed values. Note non-const access
boolList& wallMask() const
{
if (wallMask_.empty())
{
wallMask_.setSize
(
this->ibPatch().ibCells().size(),
false
);
}
return wallMask_;
}
boolList& wallMask() const;
};

View file

@ -203,11 +203,7 @@ void Foam::symGaussSeidelPrecon::precondition
// Finish current x
curX /= diagPtr[rowI];
// Distribute the neighbour side using current x
for (register label curCoeff = fStart; curCoeff < fEnd; curCoeff++)
{
bPrimePtr[uPtr[curCoeff]] -= lowerPtr[curCoeff]*curX;
}
// No need to update bPrime on reverse sweep. VV, 20/May/2015.
}
}
}
@ -325,12 +321,7 @@ void Foam::symGaussSeidelPrecon::preconditionT
// Finish current x
curX /= diagPtr[rowI];
// Distribute the neighbour side using current x
for (register label curCoeff = fStart; curCoeff < fEnd; curCoeff++)
{
// Transpose multiplication. HJ, 10/Jul/2007
bPrimePtr[uPtr[curCoeff]] -= upperPtr[curCoeff]*curX;
}
// No need to update bPrime on reverse sweep. VV, 20/May/2015.
}
}
}

View file

@ -46,17 +46,12 @@ bool triSurface::stitchTriangles
pointField newPoints;
bool hasMerged = mergePoints(rawPoints, tol, verbose, pointMap, newPoints);
pointField& ps = storedPoints();
// Set the coordinates to the merged ones
ps.transfer(newPoints);
if (hasMerged)
{
if (verbose)
{
Pout<< "stitchTriangles : Merged from " << rawPoints.size()
<< " points down to " << ps.size() << endl;
<< " points down to " << newPoints.size() << endl;
}
// Reset the triangle point labels to the unique points array
@ -72,7 +67,12 @@ bool triSurface::stitchTriangles
tri.region()
);
if ((newTri[0] != newTri[1]) && (newTri[0] != newTri[2]) && (newTri[1] != newTri[2]))
if
(
(newTri[0] != newTri[1])
&& (newTri[0] != newTri[2])
&& (newTri[1] != newTri[2])
)
{
operator[](newTriangleI++) = newTri;
}
@ -82,11 +82,12 @@ bool triSurface::stitchTriangles
<< "Removing triangle " << i
<< " with non-unique vertices." << endl
<< " vertices :" << newTri << endl
<< " coordinates:" << newTri.points(ps)
<< " coordinates:" << newTri.points(newPoints)
<< endl;
}
}
// If empty triangles are detected, remove them from the list
if (newTriangleI != size())
{
if (verbose)
@ -97,12 +98,12 @@ bool triSurface::stitchTriangles
}
setSize(newTriangleI);
// And possibly compact out any unused points (since used only
// Possibly compact out any unused points (since used only
// by triangles that have just been deleted)
// Done in two passes to save memory (pointField)
// 1. Detect only
PackedBoolList pointIsUsed(ps.size());
PackedBoolList pointIsUsed(newPoints.size());
label nPoints = 0;
@ -120,20 +121,20 @@ bool triSurface::stitchTriangles
}
}
if (nPoints != ps.size())
if (nPoints != newPoints.size())
{
// 2. Compact.
pointMap.setSize(ps.size());
pointMap.setSize(newPoints.size());
label newPointI = 0;
forAll(pointIsUsed, pointI)
{
if (pointIsUsed[pointI])
{
ps[newPointI] = ps[pointI];
newPoints[newPointI] = newPoints[pointI];
pointMap[pointI] = newPointI++;
}
}
ps.setSize(newPointI);
newPoints.setSize(newPointI);
newTriangleI = 0;
forAll(*this, i)
@ -149,6 +150,9 @@ bool triSurface::stitchTriangles
}
}
}
// Set the coordinates to the merged ones
storedPoints().transfer(newPoints);
}
return hasMerged;

View file

@ -274,7 +274,7 @@ void Foam::turbulentTemperatureCoupledBaffleFvPatchScalarField::updateCoeffs()
scalarField nbrIntFld = nbrField.patchInternalField();
mapDistribute::distribute
(
static_cast<Pstream::commsTypes>(Pstream::defaultCommsType()),
Pstream::defaultComms(),
distMap.schedule(),
distMap.constructSize(),
distMap.subMap(), // what to send
@ -286,7 +286,7 @@ void Foam::turbulentTemperatureCoupledBaffleFvPatchScalarField::updateCoeffs()
scalarField nbrKappaDelta = nbrField.Kappa()*nbrPatch.deltaCoeffs();
mapDistribute::distribute
(
static_cast<Pstream::commsTypes>(Pstream::defaultCommsType()),
Pstream::defaultComms(),
distMap.schedule(),
distMap.constructSize(),
distMap.subMap(), // what to send
@ -296,7 +296,8 @@ void Foam::turbulentTemperatureCoupledBaffleFvPatchScalarField::updateCoeffs()
tmp<scalarField> myKappaDelta = Kappa()*patch().deltaCoeffs();
// Calculate common wall temperature. Reuse *this to store common value.
// Calculate common wall temperature.
// Reuse *this to store common value.
scalarField Twall
(
(myKappaDelta()*intFld() + nbrKappaDelta*nbrIntFld)
@ -307,7 +308,7 @@ void Foam::turbulentTemperatureCoupledBaffleFvPatchScalarField::updateCoeffs()
// Distribute back and assign to neighbour
mapDistribute::distribute
(
static_cast<Pstream::commsTypes>(Pstream::defaultCommsType()),
Pstream::defaultComms(),
distMap.schedule(),
nbrField.size(),
distMap.constructMap(), // reverse : what to send

View file

@ -233,7 +233,7 @@ void turbulentTemperatureCoupledBaffleMixedFvPatchScalarField::updateCoeffs()
scalarField nbrIntFld = nbrField.patchInternalField();
mapDistribute::distribute
(
static_cast<Pstream::commsTypes>(Pstream::defaultCommsType()),
Pstream::defaultComms(),
distMap.schedule(),
distMap.constructSize(),
distMap.subMap(), // what to send
@ -245,7 +245,7 @@ void turbulentTemperatureCoupledBaffleMixedFvPatchScalarField::updateCoeffs()
scalarField nbrKappaDelta = nbrField.Kappa()*nbrPatch.deltaCoeffs();
mapDistribute::distribute
(
static_cast<Pstream::commsTypes>(Pstream::defaultCommsType()),
Pstream::defaultComms(),
distMap.schedule(),
distMap.constructSize(),
distMap.subMap(), // what to send

View file

@ -302,16 +302,6 @@ void coupledKEpsilon::correct()
keEqn.insertEquation(0, kEqn);
}
// Add coupling term: C1*Cmu*(symm(grad(U))) k but with wall function
// corrections: must be calculated from G. HJ, 27/Apr/2015
// Add coupling term: epsilon source depends on k
// k, e sink terms cannot be changed because of boundedness
keEqn.insertEquationCoupling
(
1, 0, -C1_*G*epsilon_/sqr(k_)
);
// Update source coupling: coupling terms eliminated from source
keEqn.updateSourceCoupling();

View file

@ -3,7 +3,7 @@
| \\ / F ield | foam-extend: Open Source CFD |
| \\ / O peration | Version: 3.2 |
| \\ / A nd | Web: http://www.foam-extend.org |
| \\/ M anipulation | |
| \\/ M anipulation | For copyright notice see file Copyright |
\*---------------------------------------------------------------------------*/
FoamFile
{

View file

@ -3,7 +3,7 @@
| \\ / F ield | foam-extend: Open Source CFD |
| \\ / O peration | Version: 3.2 |
| \\ / A nd | Web: http://www.foam-extend.org |
| \\/ M anipulation | |
| \\/ M anipulation | For copyright notice see file Copyright |
\*---------------------------------------------------------------------------*/
FoamFile
{

View file

@ -3,7 +3,7 @@
| \\ / F ield | foam-extend: Open Source CFD |
| \\ / O peration | Version: 3.2 |
| \\ / A nd | Web: http://www.foam-extend.org |
| \\/ M anipulation | |
| \\/ M anipulation | For copyright notice see file Copyright |
\*---------------------------------------------------------------------------*/
FoamFile
{