FEATURE: Cumulative development including long double and coupled solver updates. Author: Hrvoje Jasak. Merge: Dominik Christ.
This commit is contained in:
commit
9dc306a1f5
90 changed files with 2343 additions and 1002 deletions
|
@ -8,6 +8,6 @@ wmake blockCoupledScalarTransportFoam
|
|||
wmake conjugateHeatFoam
|
||||
wmake conjugateHeatSimpleFoam
|
||||
wmake pUCoupledFoam
|
||||
wmake pUCoupledFullPicardFoam
|
||||
wmake pUCoupledSemiPicardFoam
|
||||
wmake MRFPorousFoam
|
||||
|
||||
# ----------------------------------------------------------------- end-of-file
|
||||
|
|
118
applications/solvers/coupled/MRFPorousFoam/MRFPorousFoam.C
Normal file
118
applications/solvers/coupled/MRFPorousFoam/MRFPorousFoam.C
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*---------------------------------------------------------------------------*\
|
||||
========= |
|
||||
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
|
||||
\\ / O peration |
|
||||
\\ / A nd | Copyright held by original author
|
||||
\\/ M anipulation |
|
||||
-------------------------------------------------------------------------------
|
||||
License
|
||||
This file is part of OpenFOAM.
|
||||
|
||||
OpenFOAM is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the
|
||||
Free Software Foundation; either version 2 of the License, or (at your
|
||||
option) any later version.
|
||||
|
||||
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with OpenFOAM; if not, write to the Free Software Foundation,
|
||||
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
Application
|
||||
MRFPorousFoam
|
||||
|
||||
Description
|
||||
Steady-state solver for incompressible, turbulent flow, with implicit
|
||||
coupling between pressure and velocity achieved by fvBlockMatrix.
|
||||
Turbulence is in this version solved using the existing turbulence
|
||||
structure.
|
||||
|
||||
Added support for MRF and porous zones
|
||||
|
||||
Authors
|
||||
Hrvoje Jasak, Wikki Ltd.
|
||||
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
#include "fvCFD.H"
|
||||
#include "fvBlockMatrix.H"
|
||||
#include "singlePhaseTransportModel.H"
|
||||
#include "RASModel.H"
|
||||
#include "MRFZones.H"
|
||||
#include "porousZones.H"
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
|
||||
# include "setRootCase.H"
|
||||
# include "createTime.H"
|
||||
# include "createMesh.H"
|
||||
# include "createFields.H"
|
||||
# include "createMRF.H"
|
||||
# include "createPorous.H"
|
||||
# include "initContinuityErrs.H"
|
||||
# include "initConvergenceCheck.H"
|
||||
|
||||
Info<< "\nStarting time loop\n" << endl;
|
||||
while (runTime.loop())
|
||||
{
|
||||
# include "readBlockSolverControls.H"
|
||||
# include "readFieldBounds.H"
|
||||
|
||||
Info<< "Time = " << runTime.timeName() << nl << endl;
|
||||
|
||||
p.storePrevIter();
|
||||
|
||||
// Initialize the Up block system (matrix, source and reference to Up)
|
||||
fvBlockMatrix<vector4> UpEqn(Up);
|
||||
|
||||
// Assemble and insert momentum equation
|
||||
# include "UEqn.H"
|
||||
|
||||
// Assemble and insert pressure equation
|
||||
# include "pEqn.H"
|
||||
|
||||
// Assemble and insert coupling terms
|
||||
# include "couplingTerms.H"
|
||||
|
||||
// Solve the block matrix
|
||||
maxResidual = cmptMax(UpEqn.solve().initialResidual());
|
||||
|
||||
// Retrieve solution
|
||||
UpEqn.retrieveSolution(0, U.internalField());
|
||||
UpEqn.retrieveSolution(3, p.internalField());
|
||||
|
||||
U.correctBoundaryConditions();
|
||||
p.correctBoundaryConditions();
|
||||
|
||||
phi = (fvc::interpolate(U) & mesh.Sf()) + pEqn.flux() + presSource;
|
||||
|
||||
// Make flux relative in rotating zones
|
||||
mrfZones.relativeFlux(phi);
|
||||
|
||||
# include "continuityErrs.H"
|
||||
|
||||
# include "boundPU.H"
|
||||
|
||||
p.relax();
|
||||
|
||||
turbulence->correct();
|
||||
runTime.write();
|
||||
|
||||
Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
|
||||
<< " ClockTime = " << runTime.elapsedClockTime() << " s"
|
||||
<< nl << endl;
|
||||
|
||||
# include "convergenceCheck.H"
|
||||
}
|
||||
|
||||
Info<< "End\n" << endl;
|
||||
|
||||
return 0;
|
||||
}
|
3
applications/solvers/coupled/MRFPorousFoam/Make/files
Normal file
3
applications/solvers/coupled/MRFPorousFoam/Make/files
Normal file
|
@ -0,0 +1,3 @@
|
|||
MRFPorousFoam.C
|
||||
|
||||
EXE = $(FOAM_APPBIN)/MRFPorousFoam
|
12
applications/solvers/coupled/MRFPorousFoam/Make/options
Normal file
12
applications/solvers/coupled/MRFPorousFoam/Make/options
Normal file
|
@ -0,0 +1,12 @@
|
|||
EXE_INC = \
|
||||
-I$(LIB_SRC)/turbulenceModels \
|
||||
-I$(LIB_SRC)/turbulenceModels/incompressible/RAS/RASModel \
|
||||
-I$(LIB_SRC)/transportModels \
|
||||
-I$(LIB_SRC)/transportModels/incompressible/singlePhaseTransportModel \
|
||||
-I$(LIB_SRC)/finiteVolume/lnInclude
|
||||
|
||||
EXE_LIBS = \
|
||||
-lincompressibleRASModels \
|
||||
-lincompressibleTransportModels \
|
||||
-lfiniteVolume \
|
||||
-llduSolvers
|
15
applications/solvers/coupled/MRFPorousFoam/UEqn.H
Normal file
15
applications/solvers/coupled/MRFPorousFoam/UEqn.H
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Momentum equation
|
||||
fvVectorMatrix UEqn
|
||||
(
|
||||
fvm::ddt(U)
|
||||
+ fvm::div(phi, U)
|
||||
+ turbulence->divDevReff(U)
|
||||
);
|
||||
|
||||
// Add MRF and porous sources
|
||||
mrfZones.addCoriolis(UEqn);
|
||||
pZones.addResistance(UEqn);
|
||||
|
||||
UEqn.relax();
|
||||
|
||||
UpEqn.insertEquation(0, UEqn);
|
32
applications/solvers/coupled/MRFPorousFoam/boundPU.H
Normal file
32
applications/solvers/coupled/MRFPorousFoam/boundPU.H
Normal file
|
@ -0,0 +1,32 @@
|
|||
{
|
||||
// Bound the pressure
|
||||
dimensionedScalar p1 = min(p);
|
||||
dimensionedScalar p2 = max(p);
|
||||
|
||||
if (p1 < pMin || p2 > pMax)
|
||||
{
|
||||
Info<< "p: " << p1.value() << " " << p2.value()
|
||||
<< ". Bounding." << endl;
|
||||
|
||||
p.max(pMin);
|
||||
p.min(pMax);
|
||||
p.correctBoundaryConditions();
|
||||
}
|
||||
|
||||
// Bound the velocity
|
||||
volScalarField magU = mag(U);
|
||||
dimensionedScalar U1 = max(magU);
|
||||
|
||||
if (U1 > UMax)
|
||||
{
|
||||
Info<< "U: " << U1.value() << ". Bounding." << endl;
|
||||
|
||||
volScalarField Ulimiter = pos(magU - UMax)*UMax/(magU + smallU)
|
||||
+ neg(magU - UMax);
|
||||
Ulimiter.max(scalar(0));
|
||||
Ulimiter.min(scalar(1));
|
||||
|
||||
U *= Ulimiter;
|
||||
U.correctBoundaryConditions();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
// Check convergence
|
||||
if (maxResidual < convergenceCriterion)
|
||||
{
|
||||
Info<< "reached convergence criterion: " << convergenceCriterion << endl;
|
||||
runTime.writeAndEnd();
|
||||
Info<< "latestTime = " << runTime.timeName() << endl;
|
||||
}
|
||||
|
15
applications/solvers/coupled/MRFPorousFoam/couplingTerms.H
Normal file
15
applications/solvers/coupled/MRFPorousFoam/couplingTerms.H
Normal file
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
// Calculate grad p coupling matrix. Needs to be here if one uses
|
||||
// gradient schemes with limiters. VV, 9/June/2014
|
||||
BlockLduSystem<vector, vector> pInU(fvm::grad(p));
|
||||
|
||||
// Calculate div U coupling. Could be calculated only once since
|
||||
// it is only geometry dependent. VV, 9/June/2014
|
||||
BlockLduSystem<vector, scalar> UInp(fvm::UDiv(U));
|
||||
|
||||
// Last argument in insertBlockCoupling says if the column direction
|
||||
// should be incremented. This is needed for arbitrary positioning
|
||||
// of U and p in the system. This could be better. VV, 30/April/2014
|
||||
UpEqn.insertBlockCoupling(0, 3, pInU, true);
|
||||
UpEqn.insertBlockCoupling(3, 0, UInp, false);
|
||||
}
|
52
applications/solvers/coupled/MRFPorousFoam/createFields.H
Normal file
52
applications/solvers/coupled/MRFPorousFoam/createFields.H
Normal file
|
@ -0,0 +1,52 @@
|
|||
Info << "Reading field p\n" << endl;
|
||||
volScalarField p
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"p",
|
||||
runTime.timeName(),
|
||||
mesh,
|
||||
IOobject::MUST_READ,
|
||||
IOobject::AUTO_WRITE
|
||||
),
|
||||
mesh
|
||||
);
|
||||
|
||||
Info << "Reading field U\n" << endl;
|
||||
volVectorField U
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"U",
|
||||
runTime.timeName(),
|
||||
mesh,
|
||||
IOobject::MUST_READ,
|
||||
IOobject::AUTO_WRITE
|
||||
),
|
||||
mesh
|
||||
);
|
||||
|
||||
#include "createPhi.H"
|
||||
|
||||
singlePhaseTransportModel laminarTransport(U, phi);
|
||||
autoPtr<incompressible::RASModel> turbulence
|
||||
(
|
||||
incompressible::RASModel::New(U, phi, laminarTransport)
|
||||
);
|
||||
|
||||
// Block vector field for velocity (first entry) and pressure (second
|
||||
// entry).
|
||||
Info << "Creating field Up\n" << endl;
|
||||
volVector4Field Up
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"Up",
|
||||
runTime.timeName(),
|
||||
mesh,
|
||||
IOobject::NO_READ,
|
||||
IOobject::NO_WRITE
|
||||
),
|
||||
mesh,
|
||||
dimensionedVector4("zero", dimless, vector4::zero)
|
||||
);
|
2
applications/solvers/coupled/MRFPorousFoam/createMRF.H
Normal file
2
applications/solvers/coupled/MRFPorousFoam/createMRF.H
Normal file
|
@ -0,0 +1,2 @@
|
|||
MRFZones mrfZones(mesh);
|
||||
mrfZones.correctBoundaryVelocity(U);
|
|
@ -0,0 +1 @@
|
|||
porousZones pZones(mesh);
|
|
@ -0,0 +1,4 @@
|
|||
// initialize values for convergence checks
|
||||
|
||||
scalar maxResidual = 0;
|
||||
scalar convergenceCriterion = 0;
|
23
applications/solvers/coupled/MRFPorousFoam/pEqn.H
Normal file
23
applications/solvers/coupled/MRFPorousFoam/pEqn.H
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Pressure parts of the continuity equation
|
||||
surfaceScalarField rUAf
|
||||
(
|
||||
"rUAf",
|
||||
fvc::interpolate(1.0/UEqn.A())
|
||||
);
|
||||
|
||||
surfaceScalarField presSource
|
||||
(
|
||||
"presSource",
|
||||
rUAf*(fvc::interpolate(fvc::grad(p, "grad(pSource)")) & mesh.Sf())
|
||||
);
|
||||
|
||||
fvScalarMatrix pEqn
|
||||
(
|
||||
- fvm::laplacian(rUAf, p)
|
||||
==
|
||||
- fvc::div(presSource)
|
||||
);
|
||||
|
||||
pEqn.setReference(pRefCell, pRefValue);
|
||||
|
||||
UpEqn.insertEquation(3, pEqn);
|
|
@ -0,0 +1,15 @@
|
|||
label pRefCell = 0;
|
||||
scalar pRefValue = 0;
|
||||
setRefCell
|
||||
(
|
||||
p,
|
||||
mesh.solutionDict().subDict("blockSolver"),
|
||||
pRefCell,
|
||||
pRefValue
|
||||
);
|
||||
|
||||
mesh.solutionDict().subDict("blockSolver").readIfPresent
|
||||
(
|
||||
"convergence",
|
||||
convergenceCriterion
|
||||
);
|
14
applications/solvers/coupled/MRFPorousFoam/readFieldBounds.H
Normal file
14
applications/solvers/coupled/MRFPorousFoam/readFieldBounds.H
Normal file
|
@ -0,0 +1,14 @@
|
|||
// Read field bounds
|
||||
dictionary fieldBounds = mesh.solutionDict().subDict("fieldBounds");
|
||||
|
||||
// Pressure bounds
|
||||
dimensionedScalar pMin("pMin", p.dimensions(), 0);
|
||||
dimensionedScalar pMax("pMax", p.dimensions(), 0);
|
||||
|
||||
fieldBounds.lookup(p.name()) >> pMin.value() >> pMax.value();
|
||||
|
||||
// Velocity bound
|
||||
dimensionedScalar UMax("UMax", U.dimensions(), 0);
|
||||
|
||||
fieldBounds.lookup(U.name()) >> UMax.value();
|
||||
dimensionedScalar smallU("smallU", dimVelocity, 1e-10);
|
|
@ -38,13 +38,8 @@ Description
|
|||
#include "fieldTypes.H"
|
||||
#include "Time.H"
|
||||
#include "fvMesh.H"
|
||||
#include "fvBlockMatrix.H"
|
||||
|
||||
#include "blockLduSolvers.H"
|
||||
#include "VectorNFieldTypes.H"
|
||||
#include "volVectorNFields.H"
|
||||
#include "blockVectorNMatrices.H"
|
||||
|
||||
#include "blockMatrixTools.H"
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
|
@ -92,53 +87,27 @@ int main(int argc, char *argv[])
|
|||
TsEqn.relax();
|
||||
|
||||
// Prepare block system
|
||||
BlockLduMatrix<vector2> blockM(mesh);
|
||||
|
||||
//- Transfer the coupled interface list for processor/cyclic/etc.
|
||||
// boundaries
|
||||
blockM.interfaces() = blockT.boundaryField().blockInterfaces();
|
||||
|
||||
// Grab block diagonal and set it to zero
|
||||
Field<tensor2>& d = blockM.diag().asSquare();
|
||||
d = tensor2::zero;
|
||||
|
||||
// Grab linear off-diagonal and set it to zero
|
||||
Field<vector2>& l = blockM.lower().asLinear();
|
||||
Field<vector2>& u = blockM.upper().asLinear();
|
||||
u = vector2::zero;
|
||||
l = vector2::zero;
|
||||
|
||||
vector2Field& blockX = blockT.internalField();
|
||||
vector2Field blockB(mesh.nCells(), vector2::zero);
|
||||
fvBlockMatrix<vector2> blockM(blockT);
|
||||
|
||||
//- Inset equations into block Matrix
|
||||
blockMatrixTools::insertEquation(0, TEqn, blockM, blockX, blockB);
|
||||
blockMatrixTools::insertEquation(1, TsEqn, blockM, blockX, blockB);
|
||||
blockM.insertEquation(0, TEqn);
|
||||
blockM.insertEquation(1, TsEqn);
|
||||
|
||||
//- Add off-diagonal terms and remove from block source
|
||||
forAll(d, i)
|
||||
{
|
||||
d[i](0, 1) = -alpha.value()*mesh.V()[i];
|
||||
d[i](1, 0) = -alpha.value()*mesh.V()[i];
|
||||
//- Add off-diagonal coupling terms
|
||||
scalarField coupling(mesh.nCells(), -alpha.value());
|
||||
|
||||
blockB[i][0] -= alpha.value()*blockX[i][1]*mesh.V()[i];
|
||||
blockB[i][1] -= alpha.value()*blockX[i][0]*mesh.V()[i];
|
||||
}
|
||||
blockM.insertEquationCoupling(0, 1, coupling);
|
||||
blockM.insertEquationCoupling(1, 0, coupling);
|
||||
|
||||
// Update source coupling: coupling terms eliminated from source
|
||||
blockM.updateSourceCoupling();
|
||||
|
||||
//- Block coupled solver call
|
||||
BlockSolverPerformance<vector2> solverPerf =
|
||||
BlockLduSolver<vector2>::New
|
||||
(
|
||||
blockT.name(),
|
||||
blockM,
|
||||
mesh.solutionDict().solver(blockT.name())
|
||||
)->solve(blockX, blockB);
|
||||
|
||||
solverPerf.print();
|
||||
blockM.solve();
|
||||
|
||||
// Retrieve solution
|
||||
blockMatrixTools::blockRetrieve(0, T.internalField(), blockX);
|
||||
blockMatrixTools::blockRetrieve(1, Ts.internalField(), blockX);
|
||||
blockM.retrieveSolution(0, T.internalField());
|
||||
blockM.retrieveSolution(1, Ts.internalField());
|
||||
|
||||
T.correctBoundaryConditions();
|
||||
Ts.correctBoundaryConditions();
|
||||
|
|
|
@ -44,15 +44,6 @@
|
|||
dimensionedVector2("zero", dimless, vector2::zero)
|
||||
);
|
||||
|
||||
{
|
||||
vector2Field& blockX = blockT.internalField();
|
||||
|
||||
blockMatrixTools::blockInsert(0, T.internalField(), blockX);
|
||||
blockMatrixTools::blockInsert(1, Ts.internalField(), blockX);
|
||||
}
|
||||
|
||||
blockT.write();
|
||||
|
||||
Info<< "Reading field U\n" << endl;
|
||||
|
||||
volVectorField U
|
||||
|
|
|
@ -46,7 +46,7 @@ ersPointSource::ersPointSource
|
|||
alpha_(readScalar(dict.lookup("alpha"))),
|
||||
direction_(dict.lookup("direction"))
|
||||
{
|
||||
q() = -alpha_*qmax_*min(direction_ & p.Sf()/p.magSf(), 0.0);
|
||||
q() = -alpha_*qmax_*min(direction_ & p.Sf()/p.magSf(), scalar(0));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -30,18 +30,10 @@ Description
|
|||
#include "contactPatchPair.H"
|
||||
#include "volFields.H"
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
namespace Foam
|
||||
{
|
||||
|
||||
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
|
||||
|
||||
// Construct from components
|
||||
contactPatchPair::contactPatchPair
|
||||
Foam::contactPatchPair::contactPatchPair
|
||||
(
|
||||
const fvMesh& m,
|
||||
const label master,
|
||||
|
@ -75,6 +67,4 @@ contactPatchPair::contactPatchPair
|
|||
{}
|
||||
|
||||
|
||||
} // End namespace Foam
|
||||
|
||||
// ************************************************************************* //
|
||||
|
|
|
@ -302,7 +302,7 @@ void Foam::contactPatchPair::correct
|
|||
(
|
||||
slavePressure
|
||||
),
|
||||
0.0
|
||||
scalar(0)
|
||||
);
|
||||
|
||||
// Calculate master traction, using the positive part of
|
||||
|
|
|
@ -6,13 +6,15 @@ nCoupledFacesToBreak = 0;
|
|||
// scalarField effTraction =
|
||||
// cohesiveZone.internalField() *
|
||||
// mag(traction.internalField());
|
||||
scalarField normalTraction =
|
||||
cohesiveZone.internalField() *
|
||||
( n.internalField() & traction.internalField() );
|
||||
normalTraction = max(normalTraction, 0.0); // only consider tensile tractions
|
||||
scalarField shearTraction =
|
||||
cohesiveZone.internalField() *
|
||||
mag( (I - Foam::sqr(n.internalField())) & traction.internalField() );
|
||||
scalarField normalTraction =
|
||||
cohesiveZone.internalField()*
|
||||
( n.internalField() & traction.internalField() );
|
||||
|
||||
// only consider tensile tractions
|
||||
normalTraction = max(normalTraction, scalar(0));
|
||||
scalarField shearTraction =
|
||||
cohesiveZone.internalField() *
|
||||
mag( (I - Foam::sqr(n.internalField())) & traction.internalField() );
|
||||
|
||||
// the traction fraction is monitored to decide which faces to break:
|
||||
// ie (tN/tNC)^2 + (tS/tSC)^2 >1 to crack a face
|
||||
|
@ -27,16 +29,18 @@ nCoupledFacesToBreak = 0;
|
|||
scalarField effTractionFraction(normalTraction.size(), 0.0);
|
||||
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI) + (shearTraction/tauMaxI)*(shearTraction/tauMaxI);
|
||||
}
|
||||
{
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI)
|
||||
+ (shearTraction/tauMaxI)*(shearTraction/tauMaxI);
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI) + (shearTraction/sigmaMaxI)*(shearTraction/sigmaMaxI);
|
||||
}
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI)
|
||||
+ (shearTraction/sigmaMaxI)*(shearTraction/sigmaMaxI);
|
||||
}
|
||||
maxEffTractionFraction = gMax(effTractionFraction);
|
||||
|
||||
SLList<label> facesToBreakList;
|
||||
|
@ -44,15 +48,21 @@ nCoupledFacesToBreak = 0;
|
|||
|
||||
forAll(effTractionFraction, faceI)
|
||||
{
|
||||
if (effTractionFraction[faceI] > 1.0)
|
||||
if (effTractionFraction[faceI] > 1.0)
|
||||
{
|
||||
facesToBreakList.insert(faceI);
|
||||
facesToBreakEffTractionFractionList.insert(effTractionFraction[faceI]);
|
||||
facesToBreakEffTractionFractionList.insert
|
||||
(
|
||||
effTractionFraction[faceI]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
labelList facesToBreak(facesToBreakList);
|
||||
List<scalar> facesToBreakEffTractionFraction(facesToBreakEffTractionFractionList);
|
||||
List<scalar> facesToBreakEffTractionFraction
|
||||
(
|
||||
facesToBreakEffTractionFractionList
|
||||
);
|
||||
|
||||
nFacesToBreak = facesToBreak.size();
|
||||
|
||||
|
@ -122,7 +132,7 @@ nCoupledFacesToBreak = 0;
|
|||
scalarField pNormalTraction =
|
||||
cohesiveZone.boundaryField()[patchI] *
|
||||
( n.boundaryField()[patchI] & traction.boundaryField()[patchI] );
|
||||
pNormalTraction = max(pNormalTraction, 0.0); // only consider tensile tractions
|
||||
pNormalTraction = max(pNormalTraction, scalar(0)); // only consider tensile tractions
|
||||
scalarField pShearTraction =
|
||||
cohesiveZone.boundaryField()[patchI] *
|
||||
mag( (I - Foam::sqr(n.boundaryField()[patchI])) & traction.boundaryField()[patchI] );
|
||||
|
|
|
@ -6,13 +6,16 @@ nCoupledFacesToBreak = 0;
|
|||
// scalarField effTraction =
|
||||
// cohesiveZone.internalField() *
|
||||
// mag(traction.internalField());
|
||||
scalarField normalTraction =
|
||||
cohesiveZone.internalField() *
|
||||
( n.internalField() & traction.internalField() );
|
||||
normalTraction = max(normalTraction, 0.0); // only consider tensile tractions
|
||||
scalarField shearTraction =
|
||||
cohesiveZone.internalField() *
|
||||
mag( (I - Foam::sqr(n.internalField())) & traction.internalField() );
|
||||
scalarField normalTraction =
|
||||
cohesiveZone.internalField()*
|
||||
( n.internalField() & traction.internalField() );
|
||||
|
||||
// only consider tensile tractions
|
||||
normalTraction = max(normalTraction, scalar(0));
|
||||
|
||||
scalarField shearTraction =
|
||||
cohesiveZone.internalField() *
|
||||
mag( (I - Foam::sqr(n.internalField())) & traction.internalField() );
|
||||
|
||||
// the traction fraction is monitored to decide which faces to break:
|
||||
// ie (tN/tNC)^2 + (tS/tSC)^2 >1 to crack a face
|
||||
|
@ -28,16 +31,18 @@ nCoupledFacesToBreak = 0;
|
|||
scalarField effTractionFraction(normalTraction.size(), 0.0);
|
||||
|
||||
if(cohesivePatchDUPtr)
|
||||
{
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI) + (shearTraction/tauMaxI)*(shearTraction/tauMaxI);
|
||||
}
|
||||
{
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI)
|
||||
+ (shearTraction/tauMaxI)*(shearTraction/tauMaxI);
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI) + (shearTraction/sigmaMaxI)*(shearTraction/sigmaMaxI);
|
||||
}
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI)
|
||||
+ (shearTraction/sigmaMaxI)*(shearTraction/sigmaMaxI);
|
||||
}
|
||||
|
||||
maxEffTractionFraction = gMax(effTractionFraction);
|
||||
|
||||
|
@ -46,15 +51,21 @@ nCoupledFacesToBreak = 0;
|
|||
|
||||
forAll(effTractionFraction, faceI)
|
||||
{
|
||||
if (effTractionFraction[faceI] > 1.0)
|
||||
if (effTractionFraction[faceI] > 1.0)
|
||||
{
|
||||
facesToBreakList.insert(faceI);
|
||||
facesToBreakEffTractionFractionList.insert(effTractionFraction[faceI]);
|
||||
facesToBreakEffTractionFractionList.insert
|
||||
(
|
||||
effTractionFraction[faceI]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
labelList facesToBreak(facesToBreakList);
|
||||
List<scalar> facesToBreakEffTractionFraction(facesToBreakEffTractionFractionList);
|
||||
List<scalar> facesToBreakEffTractionFraction
|
||||
(
|
||||
facesToBreakEffTractionFractionList
|
||||
);
|
||||
|
||||
nFacesToBreak = facesToBreak.size();
|
||||
|
||||
|
@ -69,9 +80,14 @@ nCoupledFacesToBreak = 0;
|
|||
scalar faceToBreakEffTractionFraction = 0;
|
||||
forAll(facesToBreakEffTractionFraction, faceI)
|
||||
{
|
||||
if (facesToBreakEffTractionFraction[faceI] > faceToBreakEffTractionFraction)
|
||||
if
|
||||
(
|
||||
facesToBreakEffTractionFraction[faceI]
|
||||
> faceToBreakEffTractionFraction
|
||||
)
|
||||
{
|
||||
faceToBreakEffTractionFraction = facesToBreakEffTractionFraction[faceI];
|
||||
faceToBreakEffTractionFraction =
|
||||
facesToBreakEffTractionFraction[faceI];
|
||||
faceToBreakIndex = facesToBreak[faceI];
|
||||
}
|
||||
}
|
||||
|
@ -83,7 +99,11 @@ nCoupledFacesToBreak = 0;
|
|||
bool procHasFaceToBreak = false;
|
||||
if (nFacesToBreak > 0)
|
||||
{
|
||||
if ( mag(gMaxEffTractionFraction - faceToBreakEffTractionFraction) < SMALL )
|
||||
if
|
||||
(
|
||||
mag(gMaxEffTractionFraction - faceToBreakEffTractionFraction)
|
||||
< SMALL
|
||||
)
|
||||
{
|
||||
// philipc - Maximum traction fraction is on this processor
|
||||
procHasFaceToBreak = true;
|
||||
|
@ -123,9 +143,12 @@ nCoupledFacesToBreak = 0;
|
|||
// scalarField pEffTractionFraction = pEffTraction/sigmaMax.boundaryField()[patchI];
|
||||
|
||||
scalarField pNormalTraction =
|
||||
cohesiveZone.boundaryField()[patchI] *
|
||||
( n.boundaryField()[patchI] & traction.boundaryField()[patchI] );
|
||||
pNormalTraction = max(pNormalTraction, 0.0); // only consider tensile tractions
|
||||
cohesiveZone.boundaryField()[patchI]*
|
||||
( n.boundaryField()[patchI] & traction.boundaryField()[patchI] );
|
||||
|
||||
// only consider tensile tractions
|
||||
pNormalTraction = max(pNormalTraction, scalar(0));
|
||||
|
||||
scalarField pShearTraction =
|
||||
cohesiveZone.boundaryField()[patchI] *
|
||||
mag( (I - Foam::sqr(n.boundaryField()[patchI])) & traction.boundaryField()[patchI] );
|
||||
|
@ -139,24 +162,24 @@ nCoupledFacesToBreak = 0;
|
|||
// (pNormalTraction/pSigmaMax)*(pNormalTraction/pSigmaMax) + (pShearTraction/pTauMax)*(pShearTraction/pTauMax);
|
||||
scalarField pEffTractionFraction(pNormalTraction.size(), 0.0);
|
||||
if(cohesivePatchDUPtr)
|
||||
{
|
||||
{
|
||||
pEffTractionFraction =
|
||||
(pNormalTraction/pSigmaMax)*(pNormalTraction/pSigmaMax) + (pShearTraction/pTauMax)*(pShearTraction/pTauMax);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
pEffTractionFraction =
|
||||
(pNormalTraction/pSigmaMax)*(pNormalTraction/pSigmaMax) + (pShearTraction/pSigmaMax)*(pShearTraction/pSigmaMax);
|
||||
}
|
||||
}
|
||||
|
||||
label start = mesh.boundaryMesh()[patchI].start();
|
||||
|
||||
forAll(pEffTractionFraction, faceI)
|
||||
{
|
||||
if (pEffTractionFraction[faceI] > maxEffTractionFraction)
|
||||
{
|
||||
maxEffTractionFraction = pEffTractionFraction[faceI];
|
||||
if (pEffTractionFraction[faceI] > maxEffTractionFraction)
|
||||
{
|
||||
maxEffTractionFraction = pEffTractionFraction[faceI];
|
||||
}
|
||||
|
||||
if (pEffTractionFraction[faceI] > 1.0)
|
||||
|
@ -368,30 +391,41 @@ nCoupledFacesToBreak = 0;
|
|||
faceToBreakSigmaMax = sigmaMax.boundaryField()[patchID][localIndex];
|
||||
faceToBreakTauMax = tauMax.boundaryField()[patchID][localIndex];
|
||||
scalar normalTrac = faceToBreakNormal & faceToBreakTraction;
|
||||
normalTrac = max(normalTrac, 0.0);
|
||||
normalTrac = max(normalTrac, scalar(0));
|
||||
scalar shearTrac = mag( (I - sqr(faceToBreakNormal)) & faceToBreakTraction );
|
||||
scalar scaleFactor = 1;
|
||||
if(cohesivePatchDUPtr)
|
||||
{
|
||||
{
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakTauMax)*(shearTrac/faceToBreakTauMax)
|
||||
) );
|
||||
}
|
||||
Foam::sqrt
|
||||
(
|
||||
1/
|
||||
(
|
||||
(normalTrac/faceToBreakSigmaMax)*
|
||||
(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakTauMax)*(shearTrac/faceToBreakTauMax)
|
||||
)
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakSigmaMax)*(shearTrac/faceToBreakSigmaMax)
|
||||
) );
|
||||
}
|
||||
Foam::sqrt
|
||||
(
|
||||
1/
|
||||
(
|
||||
(normalTrac/faceToBreakSigmaMax)*
|
||||
(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakSigmaMax)*
|
||||
(shearTrac/faceToBreakSigmaMax)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
faceToBreakTraction *= scaleFactor;
|
||||
|
||||
topoChange = true;
|
||||
faceToBreakTraction *= scaleFactor;
|
||||
|
||||
topoChange = true;
|
||||
}
|
||||
|
||||
reduce(topoChange, orOp<bool>());
|
||||
|
|
|
@ -6,13 +6,15 @@ nCoupledFacesToBreak = 0;
|
|||
// scalarField effTraction =
|
||||
// cohesiveZone.internalField() *
|
||||
// mag(traction.internalField());
|
||||
scalarField normalTraction =
|
||||
cohesiveZone.internalField() *
|
||||
( n.internalField() & traction.internalField() );
|
||||
normalTraction = max(normalTraction, 0.0); // only consider tensile tractions
|
||||
scalarField shearTraction =
|
||||
cohesiveZone.internalField() *
|
||||
mag( (I - Foam::sqr(n.internalField())) & traction.internalField() );
|
||||
scalarField normalTraction =
|
||||
cohesiveZone.internalField()*
|
||||
( n.internalField() & traction.internalField() );
|
||||
|
||||
// only consider tensile tractions
|
||||
normalTraction = max(normalTraction, scalar(0));
|
||||
scalarField shearTraction =
|
||||
cohesiveZone.internalField()*
|
||||
mag( (I - Foam::sqr(n.internalField())) & traction.internalField() );
|
||||
|
||||
// the traction fraction is monitored to decide which faces to break:
|
||||
// ie (tN/tNC)^2 + (tS/tSC)^2 >1 to crack a face
|
||||
|
@ -26,17 +28,19 @@ nCoupledFacesToBreak = 0;
|
|||
//scalarField effTractionFraction = effTraction/sigmaMax;
|
||||
scalarField effTractionFraction(normalTraction.size(), 0.0);
|
||||
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI) + (shearTraction/tauMaxI)*(shearTraction/tauMaxI);
|
||||
}
|
||||
if (cohesivePatchUPtr)
|
||||
{
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI)
|
||||
+ (shearTraction/tauMaxI)*(shearTraction/tauMaxI);
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI) + (shearTraction/sigmaMaxI)*(shearTraction/sigmaMaxI);
|
||||
}
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
effTractionFraction =
|
||||
(normalTraction/sigmaMaxI)*(normalTraction/sigmaMaxI)
|
||||
+ (shearTraction/sigmaMaxI)*(shearTraction/sigmaMaxI);
|
||||
}
|
||||
maxEffTractionFraction = gMax(effTractionFraction);
|
||||
|
||||
SLList<label> facesToBreakList;
|
||||
|
@ -44,15 +48,21 @@ nCoupledFacesToBreak = 0;
|
|||
|
||||
forAll(effTractionFraction, faceI)
|
||||
{
|
||||
if (effTractionFraction[faceI] > 1.0)
|
||||
if (effTractionFraction[faceI] > 1.0)
|
||||
{
|
||||
facesToBreakList.insert(faceI);
|
||||
facesToBreakEffTractionFractionList.insert(effTractionFraction[faceI]);
|
||||
facesToBreakEffTractionFractionList.insert
|
||||
(
|
||||
effTractionFraction[faceI]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
labelList facesToBreak(facesToBreakList);
|
||||
List<scalar> facesToBreakEffTractionFraction(facesToBreakEffTractionFractionList);
|
||||
List<scalar> facesToBreakEffTractionFraction
|
||||
(
|
||||
facesToBreakEffTractionFractionList
|
||||
);
|
||||
|
||||
nFacesToBreak = facesToBreak.size();
|
||||
|
||||
|
@ -67,9 +77,15 @@ nCoupledFacesToBreak = 0;
|
|||
scalar faceToBreakEffTractionFraction = 0;
|
||||
forAll(facesToBreakEffTractionFraction, faceI)
|
||||
{
|
||||
if (facesToBreakEffTractionFraction[faceI] > faceToBreakEffTractionFraction)
|
||||
if
|
||||
(
|
||||
facesToBreakEffTractionFraction[faceI]
|
||||
> faceToBreakEffTractionFraction
|
||||
)
|
||||
{
|
||||
faceToBreakEffTractionFraction = facesToBreakEffTractionFraction[faceI];
|
||||
faceToBreakEffTractionFraction =
|
||||
facesToBreakEffTractionFraction[faceI];
|
||||
|
||||
faceToBreakIndex = facesToBreak[faceI];
|
||||
}
|
||||
}
|
||||
|
@ -82,7 +98,11 @@ nCoupledFacesToBreak = 0;
|
|||
bool procHasFaceToBreak = false;
|
||||
if (nFacesToBreak > 0)
|
||||
{
|
||||
if ( mag(gMaxEffTractionFraction - faceToBreakEffTractionFraction) < SMALL )
|
||||
if
|
||||
(
|
||||
mag(gMaxEffTractionFraction - faceToBreakEffTractionFraction)
|
||||
< SMALL
|
||||
)
|
||||
{
|
||||
// philipc - Maximum traction fraction is on this processor
|
||||
procHasFaceToBreak = true;
|
||||
|
@ -90,7 +110,7 @@ nCoupledFacesToBreak = 0;
|
|||
}
|
||||
|
||||
// Check if maximum is present on more then one processors
|
||||
label procID = Pstream::nProcs();
|
||||
label procID = Pstream::nProcs();
|
||||
if (procHasFaceToBreak)
|
||||
{
|
||||
procID = Pstream::myProcNo();
|
||||
|
@ -115,54 +135,59 @@ nCoupledFacesToBreak = 0;
|
|||
if (mesh.boundary()[patchI].coupled())
|
||||
{
|
||||
// scalarField pEffTraction =
|
||||
// cohesiveZone.boundaryField()[patchI] *
|
||||
// mag(traction.boundaryField()[patchI]);
|
||||
// scalarField pEffTractionFraction = pEffTraction/sigmaMax.boundaryField()[patchI];
|
||||
// cohesiveZone.boundaryField()[patchI] *
|
||||
// mag(traction.boundaryField()[patchI]);
|
||||
// scalarField pEffTractionFraction = pEffTraction/sigmaMax.boundaryField()[patchI];
|
||||
|
||||
scalarField pNormalTraction =
|
||||
cohesiveZone.boundaryField()[patchI] *
|
||||
( n.boundaryField()[patchI] & traction.boundaryField()[patchI] );
|
||||
pNormalTraction = max(pNormalTraction, 0.0); // only consider tensile tractions
|
||||
scalarField pShearTraction =
|
||||
cohesiveZone.boundaryField()[patchI] *
|
||||
mag( (I - Foam::sqr(n.boundaryField()[patchI])) & traction.boundaryField()[patchI] );
|
||||
scalarField pNormalTraction =
|
||||
cohesiveZone.boundaryField()[patchI] *
|
||||
( n.boundaryField()[patchI] & traction.boundaryField()[patchI] );
|
||||
|
||||
// the traction fraction is monitored to decide which faces to break:
|
||||
// ie (tN/tNC)^2 + (tS/tSC)^2 >1 to crack a face
|
||||
const scalarField& pSigmaMax = sigmaMax.boundaryField()[patchI];
|
||||
const scalarField& pTauMax = tauMax.boundaryField()[patchI];
|
||||
// only consider tensile tractions
|
||||
pNormalTraction = max(pNormalTraction, scalar(0));
|
||||
scalarField pShearTraction =
|
||||
cohesiveZone.boundaryField()[patchI] *
|
||||
mag( (I - Foam::sqr(n.boundaryField()[patchI])) & traction.boundaryField()[patchI] );
|
||||
|
||||
scalarField pEffTractionFraction(pNormalTraction.size(), 0.0);
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
pEffTractionFraction =
|
||||
(pNormalTraction/pSigmaMax)*(pNormalTraction/pSigmaMax) + (pShearTraction/pTauMax)*(pShearTraction/pTauMax);
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
pEffTractionFraction =
|
||||
(pNormalTraction/pSigmaMax)*(pNormalTraction/pSigmaMax) + (pShearTraction/pSigmaMax)*(pShearTraction/pSigmaMax);
|
||||
}
|
||||
// the traction fraction is monitored to decide which faces to break:
|
||||
// ie (tN/tNC)^2 + (tS/tSC)^2 >1 to crack a face
|
||||
const scalarField& pSigmaMax = sigmaMax.boundaryField()[patchI];
|
||||
const scalarField& pTauMax = tauMax.boundaryField()[patchI];
|
||||
|
||||
label start = mesh.boundaryMesh()[patchI].start();
|
||||
scalarField pEffTractionFraction(pNormalTraction.size(), 0);
|
||||
|
||||
forAll(pEffTractionFraction, faceI)
|
||||
{
|
||||
if (pEffTractionFraction[faceI] > maxEffTractionFraction)
|
||||
{
|
||||
maxEffTractionFraction = pEffTractionFraction[faceI];
|
||||
}
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
pEffTractionFraction =
|
||||
(pNormalTraction/pSigmaMax)*(pNormalTraction/pSigmaMax)
|
||||
+ (pShearTraction/pTauMax)*(pShearTraction/pTauMax);
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
pEffTractionFraction =
|
||||
(pNormalTraction/pSigmaMax)*(pNormalTraction/pSigmaMax)
|
||||
+ (pShearTraction/pSigmaMax)*(pShearTraction/pSigmaMax);
|
||||
}
|
||||
|
||||
if (pEffTractionFraction[faceI] > 1.0)
|
||||
{
|
||||
coupledFacesToBreakList.insert(start + faceI);
|
||||
coupledFacesToBreakEffTractionFractionList.insert
|
||||
(
|
||||
pEffTractionFraction[faceI]
|
||||
);
|
||||
}
|
||||
}
|
||||
label start = mesh.boundaryMesh()[patchI].start();
|
||||
|
||||
forAll(pEffTractionFraction, faceI)
|
||||
{
|
||||
if (pEffTractionFraction[faceI] > maxEffTractionFraction)
|
||||
{
|
||||
maxEffTractionFraction = pEffTractionFraction[faceI];
|
||||
}
|
||||
|
||||
if (pEffTractionFraction[faceI] > 1.0)
|
||||
{
|
||||
coupledFacesToBreakList.insert(start + faceI);
|
||||
coupledFacesToBreakEffTractionFractionList.insert
|
||||
(
|
||||
pEffTractionFraction[faceI]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -250,7 +275,7 @@ nCoupledFacesToBreak = 0;
|
|||
if (nCoupledFacesToBreak)
|
||||
{
|
||||
label patchID =
|
||||
mesh.boundaryMesh().whichPatch(coupledFaceToBreakIndex);
|
||||
mesh.boundaryMesh().whichPatch(coupledFaceToBreakIndex);
|
||||
|
||||
label start = mesh.boundaryMesh()[patchID].start();
|
||||
label localIndex = coupledFaceToBreakIndex - start;
|
||||
|
@ -315,31 +340,31 @@ nCoupledFacesToBreak = 0;
|
|||
faceToBreakNormal = n.internalField()[faceToBreakIndex];
|
||||
|
||||
// Scale broken face traction
|
||||
faceToBreakSigmaMax = sigmaMaxI[faceToBreakIndex];
|
||||
faceToBreakTauMax = tauMaxI[faceToBreakIndex];
|
||||
scalar normalTrac = faceToBreakNormal & faceToBreakTraction;
|
||||
normalTrac = max(normalTrac, 0.0);
|
||||
scalar shearTrac = mag( (I - sqr(faceToBreakNormal)) & faceToBreakTraction );
|
||||
scalar scaleFactor = 1;
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakTauMax)*(shearTrac/faceToBreakTauMax)
|
||||
) );
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakSigmaMax)*(shearTrac/faceToBreakSigmaMax)
|
||||
) );
|
||||
}
|
||||
faceToBreakSigmaMax = sigmaMaxI[faceToBreakIndex];
|
||||
faceToBreakTauMax = tauMaxI[faceToBreakIndex];
|
||||
scalar normalTrac = faceToBreakNormal & faceToBreakTraction;
|
||||
normalTrac = max(normalTrac, 0.0);
|
||||
scalar shearTrac = mag( (I - sqr(faceToBreakNormal)) & faceToBreakTraction );
|
||||
scalar scaleFactor = 1;
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakTauMax)*(shearTrac/faceToBreakTauMax)
|
||||
) );
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakSigmaMax)*(shearTrac/faceToBreakSigmaMax)
|
||||
) );
|
||||
}
|
||||
|
||||
faceToBreakTraction *= scaleFactor;
|
||||
faceToBreakTraction *= scaleFactor;
|
||||
|
||||
topoChange = true;
|
||||
}
|
||||
|
@ -354,29 +379,29 @@ nCoupledFacesToBreak = 0;
|
|||
faceToBreakNormal = n.boundaryField()[patchID][localIndex];
|
||||
|
||||
// Scale broken face traction
|
||||
faceToBreakSigmaMax = sigmaMax.boundaryField()[patchID][localIndex];
|
||||
faceToBreakTauMax = tauMax.boundaryField()[patchID][localIndex];
|
||||
scalar normalTrac = faceToBreakNormal & faceToBreakTraction;
|
||||
normalTrac = max(normalTrac, 0.0);
|
||||
scalar shearTrac = mag( (I - sqr(faceToBreakNormal)) & faceToBreakTraction );
|
||||
scalar scaleFactor = 1;
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakTauMax)*(shearTrac/faceToBreakTauMax)
|
||||
) );
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakSigmaMax)*(shearTrac/faceToBreakSigmaMax)
|
||||
) );
|
||||
}
|
||||
faceToBreakSigmaMax = sigmaMax.boundaryField()[patchID][localIndex];
|
||||
faceToBreakTauMax = tauMax.boundaryField()[patchID][localIndex];
|
||||
scalar normalTrac = faceToBreakNormal & faceToBreakTraction;
|
||||
normalTrac = max(normalTrac, 0.0);
|
||||
scalar shearTrac = mag( (I - sqr(faceToBreakNormal)) & faceToBreakTraction );
|
||||
scalar scaleFactor = 1;
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakTauMax)*(shearTrac/faceToBreakTauMax)
|
||||
) );
|
||||
}
|
||||
else
|
||||
{
|
||||
// solidCohesiveFixedModeMix only uses sigmaMax
|
||||
scaleFactor =
|
||||
::sqrt(1 / (
|
||||
(normalTrac/faceToBreakSigmaMax)*(normalTrac/faceToBreakSigmaMax)
|
||||
+ (shearTrac/faceToBreakSigmaMax)*(shearTrac/faceToBreakSigmaMax)
|
||||
) );
|
||||
}
|
||||
|
||||
faceToBreakTraction *= scaleFactor;
|
||||
|
||||
|
@ -416,20 +441,20 @@ nCoupledFacesToBreak = 0;
|
|||
Cf = fvc::interpolate(C);
|
||||
Kf = fvc::interpolate(K);
|
||||
|
||||
// we need to modify propertiess after cracking otherwise momentum equation is wrong
|
||||
// but solidInterface seems to hold some information about old mesh
|
||||
// so we will delete it and make another
|
||||
// we could probably add a public clearout function
|
||||
// create new solidInterface
|
||||
//Pout << "Creating new solidInterface" << endl;
|
||||
//delete solidInterfacePtr;
|
||||
//solidInterfacePtr = new solidInterface(mesh, rheology);
|
||||
// delete demand driven data as the mesh has changed
|
||||
if(rheology.solidInterfaceActive())
|
||||
{
|
||||
rheology.solInterface().clearOut();
|
||||
solidInterfacePtr->modifyProperties(Cf, Kf);
|
||||
}
|
||||
// we need to modify propertiess after cracking otherwise momentum equation is wrong
|
||||
// but solidInterface seems to hold some information about old mesh
|
||||
// so we will delete it and make another
|
||||
// we could probably add a public clearout function
|
||||
// create new solidInterface
|
||||
//Pout << "Creating new solidInterface" << endl;
|
||||
//delete solidInterfacePtr;
|
||||
//solidInterfacePtr = new solidInterface(mesh, rheology);
|
||||
// delete demand driven data as the mesh has changed
|
||||
if(rheology.solidInterfaceActive())
|
||||
{
|
||||
rheology.solInterface().clearOut();
|
||||
solidInterfacePtr->modifyProperties(Cf, Kf);
|
||||
}
|
||||
|
||||
// Local crack displacement
|
||||
vectorField UpI =
|
||||
|
@ -441,21 +466,21 @@ nCoupledFacesToBreak = 0;
|
|||
vectorField globalUpI = mesh.globalCrackField(UpI);
|
||||
vectorField globalOldUpI = mesh.globalCrackField(oldUpI);
|
||||
|
||||
// C and K field on new crack faces must be updated
|
||||
// C and K field on new crack faces must be updated
|
||||
symmTensor4thOrderField CPI = C.boundaryField()[cohesivePatchID].patchInternalField();
|
||||
diagTensorField KPI = K.boundaryField()[cohesivePatchID].patchInternalField();
|
||||
symmTensor4thOrderField globalCPI = mesh.globalCrackField(CPI);
|
||||
diagTensorField globalKPI = mesh.globalCrackField(KPI);
|
||||
|
||||
// cohesivePatchU.size()
|
||||
int cohesivePatchSize(cohesivePatchUPtr ? cohesivePatchUPtr->size() : cohesivePatchUFixedModePtr->size());
|
||||
// cohesivePatchU.size()
|
||||
int cohesivePatchSize(cohesivePatchUPtr ? cohesivePatchUPtr->size() : cohesivePatchUFixedModePtr->size());
|
||||
|
||||
// Initialise U for new cohesive face
|
||||
const labelList& gcfa = mesh.globalCrackFaceAddressing();
|
||||
label globalIndex = mesh.localCrackStart();
|
||||
// for (label i=0; i<cohesivePatchU.size(); i++)
|
||||
for (label i=0; i<cohesivePatchSize; i++)
|
||||
{
|
||||
{
|
||||
label oldFaceIndex = faceMap[start+i];
|
||||
|
||||
// If new face
|
||||
|
@ -474,10 +499,10 @@ nCoupledFacesToBreak = 0;
|
|||
+ globalOldUpI[gcfa[globalIndex]]
|
||||
);
|
||||
|
||||
// initialise C and K on new faces
|
||||
// set new face value to value of internal cell
|
||||
Cf.boundaryField()[cohesivePatchID][i] = globalCPI[globalIndex];
|
||||
Kf.boundaryField()[cohesivePatchID][i] = globalKPI[globalIndex];
|
||||
// initialise C and K on new faces
|
||||
// set new face value to value of internal cell
|
||||
Cf.boundaryField()[cohesivePatchID][i] = globalCPI[globalIndex];
|
||||
Kf.boundaryField()[cohesivePatchID][i] = globalKPI[globalIndex];
|
||||
|
||||
globalIndex++;
|
||||
}
|
||||
|
@ -488,81 +513,87 @@ nCoupledFacesToBreak = 0;
|
|||
}
|
||||
|
||||
// we must calculate grad using interface
|
||||
// U at the interface has not been calculated yet as interface.correct()
|
||||
// has not been called yet
|
||||
// not really a problem as gradU is correct in second outer iteration
|
||||
// as long as this does not cause convergence problems for the first iterations.
|
||||
// we should be able to calculate the interface displacements without
|
||||
// having to call interface.correct()
|
||||
// todo: add calculateInterfaceU() function
|
||||
// interface grad uses Gauss, we need least squares
|
||||
//gradU = solidInterfacePtr->grad(U);
|
||||
// U at the interface has not been calculated yet as interface.correct()
|
||||
// has not been called yet
|
||||
// not really a problem as gradU is correct in second outer iteration
|
||||
// as long as this does not cause convergence problems for the first iterations.
|
||||
// we should be able to calculate the interface displacements without
|
||||
// having to call interface.correct()
|
||||
// todo: add calculateInterfaceU() function
|
||||
// interface grad uses Gauss, we need least squares
|
||||
//gradU = solidInterfacePtr->grad(U);
|
||||
gradU = fvc::grad(U); // leastSquaresSolidInterface grad scheme
|
||||
//snGradU = fvc::snGrad(U);
|
||||
|
||||
# include "calculateTraction.H"
|
||||
//if (nFacesToBreak || nCoupledFacesToBreak) mesh.write(); traction.write();
|
||||
//if (nFacesToBreak || nCoupledFacesToBreak) mesh.write(); traction.write();
|
||||
|
||||
// Initialise initiation traction for new cohesive patch face
|
||||
// for (label i=0; i<cohesivePatchU.size(); i++)
|
||||
for (label i=0; i<cohesivePatchSize; i++)
|
||||
for (label i=0; i<cohesivePatchSize; i++)
|
||||
{
|
||||
label oldFaceIndex = faceMap[start+i];
|
||||
|
||||
// If new face
|
||||
if
|
||||
(
|
||||
(oldFaceIndex == faceToBreakIndex)
|
||||
|| (oldFaceIndex == coupledFaceToBreakIndex)
|
||||
)
|
||||
{
|
||||
label oldFaceIndex = faceMap[start+i];
|
||||
vector n0 =
|
||||
mesh.Sf().boundaryField()[cohesivePatchID][i]
|
||||
/mesh.magSf().boundaryField()[cohesivePatchID][i];
|
||||
//vector n1 = -n0;
|
||||
|
||||
// If new face
|
||||
if
|
||||
(
|
||||
(oldFaceIndex == faceToBreakIndex)
|
||||
|| (oldFaceIndex == coupledFaceToBreakIndex)
|
||||
)
|
||||
if ((n0&faceToBreakNormal) > SMALL)
|
||||
{
|
||||
vector n0 =
|
||||
mesh.Sf().boundaryField()[cohesivePatchID][i]
|
||||
/mesh.magSf().boundaryField()[cohesivePatchID][i];
|
||||
//vector n1 = -n0;
|
||||
|
||||
if ((n0&faceToBreakNormal) > SMALL)
|
||||
{
|
||||
traction.boundaryField()[cohesivePatchID][i] =
|
||||
traction.boundaryField()[cohesivePatchID][i] =
|
||||
faceToBreakTraction;
|
||||
|
||||
traction.oldTime().boundaryField()[cohesivePatchID][i] =
|
||||
faceToBreakTraction;
|
||||
traction.oldTime().boundaryField()[cohesivePatchID][i] =
|
||||
faceToBreakTraction;
|
||||
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
cohesivePatchUPtr->traction()[i] = faceToBreakTraction;
|
||||
}
|
||||
else
|
||||
{
|
||||
cohesivePatchUFixedModePtr->traction()[i] = faceToBreakTraction;
|
||||
cohesivePatchUFixedModePtr->initiationTraction()[i] = faceToBreakTraction;
|
||||
}
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
cohesivePatchUPtr->traction()[i] = faceToBreakTraction;
|
||||
}
|
||||
else
|
||||
{
|
||||
traction.boundaryField()[cohesivePatchID][i] =
|
||||
cohesivePatchUFixedModePtr->traction()[i] =
|
||||
faceToBreakTraction;
|
||||
|
||||
cohesivePatchUFixedModePtr->initiationTraction()[i] =
|
||||
faceToBreakTraction;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
traction.boundaryField()[cohesivePatchID][i] =
|
||||
-faceToBreakTraction;
|
||||
|
||||
traction.oldTime().boundaryField()[cohesivePatchID][i] =
|
||||
-faceToBreakTraction;
|
||||
traction.oldTime().boundaryField()[cohesivePatchID][i] =
|
||||
-faceToBreakTraction;
|
||||
|
||||
//cohesivePatchU.traction()[i] = -faceToBreakTraction;
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
cohesivePatchUPtr->traction()[i] = -faceToBreakTraction;
|
||||
}
|
||||
else
|
||||
{
|
||||
cohesivePatchUFixedModePtr->traction()[i] = -faceToBreakTraction;
|
||||
cohesivePatchUFixedModePtr->initiationTraction()[i] = -faceToBreakTraction;
|
||||
}
|
||||
//cohesivePatchU.traction()[i] = -faceToBreakTraction;
|
||||
if(cohesivePatchUPtr)
|
||||
{
|
||||
cohesivePatchUPtr->traction()[i] = -faceToBreakTraction;
|
||||
}
|
||||
else
|
||||
{
|
||||
cohesivePatchUFixedModePtr->traction()[i] =
|
||||
-faceToBreakTraction;
|
||||
|
||||
cohesivePatchUFixedModePtr->initiationTraction()[i] =
|
||||
-faceToBreakTraction;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// hmmnn we only need a reference for very small groups of cells
|
||||
// turn off for now
|
||||
//# include "updateReference.H"
|
||||
// hmmnn we only need a reference for very small groups of cells
|
||||
// turn off for now
|
||||
//# include "updateReference.H"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ int main(int argc, char *argv[])
|
|||
Info << "tf value: " << FadOneValue(tf) << endl;
|
||||
Info << "tf deriv: " << FadOneDeriv(tf, 0) << endl;
|
||||
|
||||
FadOneSetDeriv(tf, 0, Field<double>(3, 1));
|
||||
FadOneSetDeriv(tf, 0, scalarField(3, 1));
|
||||
|
||||
fadScalar ten(10.0);
|
||||
fadScalar bs = tf[0] - ten;
|
||||
|
|
71
bin/plotForces.py
Executable file
71
bin/plotForces.py
Executable file
|
@ -0,0 +1,71 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
forcesfilename = 'forces/0/forces.dat'
|
||||
|
||||
import sys
|
||||
if len(sys.argv) != 1:
|
||||
print 'script assumes forces file ', forcesfilename
|
||||
sys.exit()
|
||||
|
||||
import re
|
||||
forceRegex=r"([0-9.Ee\-+]+)\s+\(+([0-9.Ee\-+]+)\s([0-9.Ee\-+]+)\s([0-9.Ee\-+]+)\)\s\(([0-9.Ee\-+]+)\s([0-9.Ee\-+]+)\s([0-9 .Ee\-+]+)\)+\s\(+([0-9.Ee\-+]+)\s([0-9.Ee\-+]+)\s([0-9.Ee\-+]+)\)\s\(([0-9.Ee\-+]+)\s([0-9.Ee\-+]+)\s([0-9.Ee\-+]+)\)+"
|
||||
t = []
|
||||
fpx = []; fpy = []; fpz = []
|
||||
fvx = []; fvy = []; fvz = []
|
||||
mpx = []; mpy = []; mpz = []
|
||||
mvx = []; mvy = []; mvz = []
|
||||
|
||||
pipefile=open(forcesfilename,'r')
|
||||
lines = pipefile.readlines()
|
||||
|
||||
for line in lines:
|
||||
match=re.search(forceRegex,line)
|
||||
if match:
|
||||
t.append(float(match.group(1)))
|
||||
fpx.append(float(match.group(2)))
|
||||
fpy.append(float(match.group(3)))
|
||||
fpz.append(float(match.group(4)))
|
||||
fvx.append(float(match.group(5)))
|
||||
fvy.append(float(match.group(6)))
|
||||
fvz.append(float(match.group(7)))
|
||||
mpx.append(float(match.group(8)))
|
||||
mpy.append(float(match.group(9)))
|
||||
mpz.append(float(match.group(10)))
|
||||
mvx.append(float(match.group(11)))
|
||||
mvy.append(float(match.group(12)))
|
||||
mvz.append(float(match.group(13)))
|
||||
|
||||
|
||||
# combine pressure and viscous forces and moments
|
||||
fx = fpx
|
||||
fy = fpy
|
||||
fz = fpz
|
||||
|
||||
mx = mpx
|
||||
my = mpy
|
||||
mz = mpz
|
||||
|
||||
for i in range(1,len(t)):
|
||||
fx[i] += fvx[i]
|
||||
fy[i] += fvy[i]
|
||||
fz[i] += fvz[i]
|
||||
mx[i] += mvx[i]
|
||||
my[i] += mvy[i]
|
||||
mz[i] += mvz[i]
|
||||
|
||||
# plot forces
|
||||
import pylab
|
||||
pylab.xlabel('iteration')
|
||||
pylab.ylabel('force (N)')
|
||||
pylab.grid(True)
|
||||
#
|
||||
pylab.plot(t,fx,'-',label="fx")
|
||||
pylab.plot(t,fy,'-',label="fy")
|
||||
pylab.plot(t,fz,'-',label="fz")
|
||||
|
||||
#pylab.plot(t,mx,'-',label="mx")
|
||||
#pylab.plot(t,my,'-',label="my")
|
||||
#pylab.plot(t,mz,'-',label="mz")
|
||||
|
||||
pylab.legend()
|
||||
pylab.show()
|
79
bin/plotResCoupled.py
Executable file
79
bin/plotResCoupled.py
Executable file
|
@ -0,0 +1,79 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import sys
|
||||
if len(sys.argv) != 2:
|
||||
print 'script requires name of log file'
|
||||
sys.exit()
|
||||
|
||||
logfilename = sys.argv[1]
|
||||
print 'Reading file', logfilename
|
||||
|
||||
import re
|
||||
UpRegex=r"([A-Z,a-z]*):*.*Solving for Up, Initial residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), Final residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), No Iterations ([0-9]*)"
|
||||
komegaRegex=r"([A-Z,a-z]*):*.*Solving for kOmega, Initial residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), Final residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), No Iterations ([0-9]*)"
|
||||
|
||||
tUp = []
|
||||
Ux = []
|
||||
Uy = []
|
||||
Uz = []
|
||||
p = []
|
||||
iUp = 0
|
||||
|
||||
tkomega = []
|
||||
k = []
|
||||
omega = []
|
||||
ikomega = 0
|
||||
|
||||
#HJ take name of log file as script argument
|
||||
pipefile=open(logfilename,'r')
|
||||
lines = pipefile.readlines()
|
||||
|
||||
for line in lines:
|
||||
matchUp=re.search(UpRegex,line)
|
||||
if matchUp:
|
||||
iUp = iUp + 1
|
||||
tUp.append(iUp)
|
||||
Ux.append(float(matchUp.group(2)))
|
||||
Uy.append(float(matchUp.group(3)))
|
||||
Uz.append(float(matchUp.group(4)))
|
||||
p.append(float(matchUp.group(5)))
|
||||
matchkomega=re.search(komegaRegex,line)
|
||||
if matchkomega:
|
||||
ikomega = ikomega + 1
|
||||
tkomega.append(ikomega)
|
||||
k.append(float(matchkomega.group(2)))
|
||||
omega.append(float(matchkomega.group(3)))
|
||||
|
||||
outfile=open('residual.dat','w')
|
||||
|
||||
print 'hits = ', ikomega
|
||||
|
||||
#HJ need better way of combining lists
|
||||
if ikomega > 0:
|
||||
for index in range(0,ikomega):
|
||||
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+' '+str(k[index])+' '+str(omega[index])+'\n')
|
||||
elif iUp > 0:
|
||||
for index in range(0,iUp):
|
||||
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+'\n')
|
||||
|
||||
outfile.close()
|
||||
|
||||
# prepare plot
|
||||
import pylab
|
||||
pylab.xlabel('iteration')
|
||||
pylab.ylabel('residual')
|
||||
pylab.grid(True)
|
||||
|
||||
# plot in log scale
|
||||
if iUp > 0:
|
||||
pylab.semilogy(tUp,Ux,'-',label="Ux")
|
||||
pylab.semilogy(tUp,Uy,'-',label="Uy")
|
||||
pylab.semilogy(tUp,Uz,'-',label="Uz")
|
||||
pylab.semilogy(tUp,p,'-',label="p")
|
||||
|
||||
if ikomega > 0:
|
||||
pylab.semilogy(tkomega,k,'-',label="k")
|
||||
pylab.semilogy(tkomega,omega,'-',label="omega")
|
||||
|
||||
pylab.legend()
|
||||
pylab.show()
|
103
bin/plotResidual.py
Executable file
103
bin/plotResidual.py
Executable file
|
@ -0,0 +1,103 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import sys
|
||||
if len(sys.argv) != 2:
|
||||
print 'script requires name of log file'
|
||||
sys.exit()
|
||||
|
||||
logfilename = sys.argv[1]
|
||||
print 'Reading file', logfilename
|
||||
|
||||
import re
|
||||
UpRegex=r"([A-Z,a-z]*):*.*Solving for Up, Initial residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), Final residual = \(([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\s([0-9.Ee\-+]*)\), No Iterations ([0-9]*)"
|
||||
kRegex=r"([A-Z,a-z]*):*.*Solving for k, Initial residual = ([0-9.Ee\-+]*), Final residual = ([0-9.Ee\-+]*), No Iterations ([0-9]*)"
|
||||
omegaRegex=r"([A-Z,a-z]*):*.*Solving for omega, Initial residual = ([0-9.Ee\-+]*), Final residual = ([0-9.Ee\-+]*), No Iterations ([0-9]*)"
|
||||
epsilonRegex=r"([A-Z,a-z]*):*.*Solving for epsilon, Initial residual = ([0-9.Ee\-+]*), Final residual = ([0-9.Ee\-+]*), No Iterations ([0-9]*)"
|
||||
|
||||
tUp = []
|
||||
Ux = []
|
||||
Uy = []
|
||||
Uz = []
|
||||
p = []
|
||||
iUp = 0
|
||||
|
||||
tk = []
|
||||
k = []
|
||||
ik = 0
|
||||
|
||||
tomega = []
|
||||
omega = []
|
||||
iomega = 0
|
||||
|
||||
tepsilon = []
|
||||
epsilon = []
|
||||
iepsilon = 0
|
||||
|
||||
#HJ take name of log file as script argument
|
||||
pipefile=open(logfilename,'r')
|
||||
lines = pipefile.readlines()
|
||||
|
||||
for line in lines:
|
||||
matchUp=re.search(UpRegex,line)
|
||||
if matchUp:
|
||||
iUp = iUp + 1
|
||||
tUp.append(iUp)
|
||||
Ux.append(float(matchUp.group(2)))
|
||||
Uy.append(float(matchUp.group(3)))
|
||||
Uz.append(float(matchUp.group(4)))
|
||||
p.append(float(matchUp.group(5)))
|
||||
matchk=re.search(kRegex,line)
|
||||
if matchk:
|
||||
ik = ik + 1
|
||||
tk.append(ik)
|
||||
k.append(float(matchk.group(2)))
|
||||
matchomega=re.search(omegaRegex,line)
|
||||
if matchomega:
|
||||
iomega = iomega + 1
|
||||
tomega.append(iomega)
|
||||
omega.append(float(matchomega.group(2)))
|
||||
matchepsilon=re.search(epsilonRegex,line)
|
||||
if matchepsilon:
|
||||
iepsilon = iepsilon + 1
|
||||
tepsilon.append(iepsilon)
|
||||
epsilon.append(float(matchepsilon.group(2)))
|
||||
|
||||
outfile=open('residual.dat','w')
|
||||
|
||||
#HJ need better way of combining lists
|
||||
if iomega > 0:
|
||||
for index in range(0,iomega):
|
||||
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+' '+str(k[index])+' '+str(omega[index])+'\n')
|
||||
elif iepsilon > 0:
|
||||
for index in range(0,iepsilon):
|
||||
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+' '+str(k[index])+' '+str(epsilon[index])+'\n')
|
||||
elif iUp > 0:
|
||||
for index in range(0,iUp):
|
||||
outfile.write(str(tUp[index])+' '+str(Ux[index])+' '+str(Uy[index])+' '+str(Uz[index])+' '+str(p[index])+'\n')
|
||||
|
||||
outfile.close()
|
||||
|
||||
# prepare plot
|
||||
import pylab
|
||||
pylab.xlabel('iteration')
|
||||
pylab.ylabel('residual')
|
||||
pylab.grid(True)
|
||||
|
||||
# plot in log scale
|
||||
if iUp > 0:
|
||||
pylab.semilogy(tUp,Ux,'-',label="Ux")
|
||||
pylab.semilogy(tUp,Uy,'-',label="Uy")
|
||||
pylab.semilogy(tUp,Uz,'-',label="Uz")
|
||||
pylab.semilogy(tUp,p,'-',label="p")
|
||||
|
||||
if ik > 0:
|
||||
pylab.semilogy(tk,k,'-',label="k")
|
||||
|
||||
if iomega > 0:
|
||||
pylab.semilogy(tomega,omega,'-',label="omega")
|
||||
|
||||
if iepsilon > 0:
|
||||
pylab.semilogy(tepsilon,epsilon,'-',label="epsilon")
|
||||
|
||||
pylab.legend()
|
||||
pylab.show()
|
|
@ -138,7 +138,7 @@ export WM_COMPILER_LIB_ARCH=
|
|||
# WM_ARCH_OPTION = 32 | 64
|
||||
: ${WM_ARCH_OPTION:=64}; export WM_ARCH_OPTION
|
||||
|
||||
# WM_PRECISION_OPTION = DP | SP
|
||||
# WM_PRECISION_OPTION = LDP | DP | SP
|
||||
: ${WM_PRECISION_OPTION:=DP}; export WM_PRECISION_OPTION
|
||||
|
||||
# WM_COMPILE_OPTION = Opt | Debug | Prof
|
||||
|
|
|
@ -120,7 +120,7 @@ setenv WM_COMPILER_LIB_ARCH
|
|||
# WM_ARCH_OPTION = 32 | 64
|
||||
if ( ! $?WM_ARCH_OPTION ) setenv WM_ARCH_OPTION 64
|
||||
|
||||
# WM_PRECISION_OPTION = DP | SP
|
||||
# WM_PRECISION_OPTION = LDP | DP | SP
|
||||
if ( ! $?WM_PRECISION_OPTION ) setenv WM_PRECISION_OPTION DP
|
||||
|
||||
# WM_COMPILE_OPTION = Opt | Debug | Prof
|
||||
|
|
|
@ -37,6 +37,8 @@ License
|
|||
# define MPI_SCALAR MPI_FLOAT
|
||||
#elif defined(WM_DP)
|
||||
# define MPI_SCALAR MPI_DOUBLE
|
||||
#elif defined(WM_LDP)
|
||||
# define MPI_SCALAR MPI_LONG_DOUBLE
|
||||
#endif
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
|
|
@ -38,15 +38,19 @@ Author
|
|||
#ifndef cudaTypes_H
|
||||
#define cudaTypes_H
|
||||
|
||||
#if defined(WM_DP)
|
||||
#if defined(WM_SP)
|
||||
|
||||
#define MPI_SCALAR MPI_DOUBLE
|
||||
# define MPI_SCALAR MPI_FLOAT
|
||||
typedef float ValueType;
|
||||
|
||||
#elif defined(WM_DP)
|
||||
|
||||
# define MPI_SCALAR MPI_DOUBLE
|
||||
typedef double ValueType;
|
||||
|
||||
#elif defined(WM_SP)
|
||||
#elif defined(WM_LDP)
|
||||
|
||||
#define MPI_SCALAR MPI_FLOAT
|
||||
typedef float ValueType;
|
||||
# error Long double not supported in CUDA
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -163,6 +163,8 @@ void Foam::mgMeshLevel::makeChild() const
|
|||
child_.setSize(nCells());
|
||||
int nMoves = -1;
|
||||
|
||||
# ifdef WM_DP
|
||||
|
||||
MGridGen
|
||||
(
|
||||
nCells(),
|
||||
|
@ -175,10 +177,51 @@ void Foam::mgMeshLevel::makeChild() const
|
|||
mgMaxClusterSize_,
|
||||
options.begin(),
|
||||
&nMoves,
|
||||
&nCoarseCells,
|
||||
child_.begin()
|
||||
);
|
||||
|
||||
# else
|
||||
|
||||
// Conversion of type for MGridGen interface
|
||||
const scalarField& vols = cellVolumes();
|
||||
|
||||
Field<double> dblVols(vols.size());
|
||||
forAll (dblVols, i)
|
||||
{
|
||||
dblVols[i] = vols[i];
|
||||
}
|
||||
|
||||
Field<double> dblAreas(boundaryAreas.size());
|
||||
forAll (dblAreas, i)
|
||||
{
|
||||
dblAreas[i] = boundaryAreas[i];
|
||||
}
|
||||
|
||||
Field<double> dblFaceWeights(faceWeights.size());
|
||||
forAll (dblFaceWeights, i)
|
||||
{
|
||||
dblFaceWeights[i] = faceWeights[i];
|
||||
}
|
||||
|
||||
MGridGen
|
||||
(
|
||||
nCells(),
|
||||
cellCellOffsets.begin(),
|
||||
dblVols.begin(),
|
||||
dblAreas.begin(),
|
||||
cellCells.begin(),
|
||||
dblFaceWeights.begin(),
|
||||
mgMinClusterSize_,
|
||||
mgMaxClusterSize_,
|
||||
options.begin(),
|
||||
&nMoves,
|
||||
&nCoarseCells_,
|
||||
child_.begin()
|
||||
);
|
||||
|
||||
# endif
|
||||
|
||||
Info<< "Number of coarse cells = " << nCoarseCells_ << endl;
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ class MRFZone
|
|||
//- Divide faces in frame according to patch
|
||||
void setMRFFaces();
|
||||
|
||||
//- Make the given absolute mass/vol flux relative within the MRF region
|
||||
//- Make the given absolute mass/vol flux relative within MRF region
|
||||
template<class RhoFieldType>
|
||||
void relativeRhoFlux
|
||||
(
|
||||
|
@ -104,7 +104,7 @@ class MRFZone
|
|||
surfaceScalarField& phi
|
||||
) const;
|
||||
|
||||
//- Make the given relative mass/vol flux absolute within the MRF region
|
||||
//- Make the given relative mass/vol flux absolute within MRF region
|
||||
template<class RhoFieldType>
|
||||
void absoluteRhoFlux
|
||||
(
|
||||
|
|
|
@ -85,6 +85,7 @@ Foam::tmp<Foam::surfaceScalarField> Foam::MRFZones::fluxCorrection() const
|
|||
return tMRFZonesPhiCorr;
|
||||
}
|
||||
|
||||
|
||||
Foam::tmp<Foam::surfaceVectorField> Foam::MRFZones::meshPhi() const
|
||||
{
|
||||
tmp<surfaceVectorField> tMRFZonesPhiCorr
|
||||
|
@ -113,6 +114,7 @@ Foam::tmp<Foam::surfaceVectorField> Foam::MRFZones::meshPhi() const
|
|||
return tMRFZonesPhiCorr;
|
||||
}
|
||||
|
||||
|
||||
void Foam::MRFZones::addCoriolis(fvVectorMatrix& UEqn) const
|
||||
{
|
||||
forAll(*this, i)
|
||||
|
@ -275,4 +277,5 @@ Foam::tmp<Foam::volVectorField> Foam::MRFZones::Su
|
|||
return tPhiSource;
|
||||
}
|
||||
|
||||
|
||||
// ************************************************************************* //
|
||||
|
|
|
@ -50,7 +50,7 @@ void basicSymmetryFvPatchField<scalar>::evaluate(const Pstream::commsTypes)
|
|||
{
|
||||
// Local typedefs
|
||||
typedef scalar Type;
|
||||
typedef typename outerProduct<vector, Type>::type gradType;
|
||||
typedef outerProduct<vector, Type>::type gradType;
|
||||
typedef GeometricField<gradType, fvPatchField, volMesh> gradFieldType;
|
||||
|
||||
if (!updated())
|
||||
|
@ -140,7 +140,7 @@ tmp<vectorField> basicSymmetryFvPatchField<vector>::snGrad() const
|
|||
{
|
||||
// Local typedefs
|
||||
typedef vector Type;
|
||||
typedef typename outerProduct<vector, Type>::type gradType;
|
||||
typedef outerProduct<vector, Type>::type gradType;
|
||||
typedef GeometricField<gradType, fvPatchField, volMesh> gradFieldType;
|
||||
|
||||
vectorField nHat = this->patch().nf();
|
||||
|
@ -213,7 +213,7 @@ void basicSymmetryFvPatchField<vector>::evaluate(const Pstream::commsTypes)
|
|||
{
|
||||
// Local typedefs
|
||||
typedef vector Type;
|
||||
typedef typename outerProduct<vector, Type>::type gradType;
|
||||
typedef outerProduct<vector, Type>::type gradType;
|
||||
typedef GeometricField<gradType, fvPatchField, volMesh> gradFieldType;
|
||||
|
||||
if (!updated())
|
||||
|
|
|
@ -315,8 +315,7 @@ void cyclicGgiFvPatchField<Type>::updateInterfaceMatrix
|
|||
const CoeffField<Type>&,
|
||||
const Pstream::commsTypes commsType
|
||||
) const
|
||||
{
|
||||
}
|
||||
{}
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
|
|
@ -47,20 +47,6 @@ translatingWallVelocityFvPatchVectorField
|
|||
{}
|
||||
|
||||
|
||||
translatingWallVelocityFvPatchVectorField::
|
||||
translatingWallVelocityFvPatchVectorField
|
||||
(
|
||||
const translatingWallVelocityFvPatchVectorField& ptf,
|
||||
const fvPatch& p,
|
||||
const DimensionedField<vector, volMesh>& iF,
|
||||
const fvPatchFieldMapper& mapper
|
||||
)
|
||||
:
|
||||
fixedValueFvPatchField<vector>(ptf, p, iF, mapper),
|
||||
U_(ptf.U_)
|
||||
{}
|
||||
|
||||
|
||||
translatingWallVelocityFvPatchVectorField::
|
||||
translatingWallVelocityFvPatchVectorField
|
||||
(
|
||||
|
@ -77,6 +63,20 @@ translatingWallVelocityFvPatchVectorField
|
|||
}
|
||||
|
||||
|
||||
translatingWallVelocityFvPatchVectorField::
|
||||
translatingWallVelocityFvPatchVectorField
|
||||
(
|
||||
const translatingWallVelocityFvPatchVectorField& ptf,
|
||||
const fvPatch& p,
|
||||
const DimensionedField<vector, volMesh>& iF,
|
||||
const fvPatchFieldMapper& mapper
|
||||
)
|
||||
:
|
||||
fixedValueFvPatchField<vector>(ptf, p, iF, mapper),
|
||||
U_(ptf.U_)
|
||||
{}
|
||||
|
||||
|
||||
translatingWallVelocityFvPatchVectorField::
|
||||
translatingWallVelocityFvPatchVectorField
|
||||
(
|
||||
|
|
|
@ -43,7 +43,7 @@ namespace Foam
|
|||
{
|
||||
|
||||
/*---------------------------------------------------------------------------*\
|
||||
Class translatingWallVelocityFvPatchField Declaration
|
||||
Class translatingWallVelocityFvPatchField Declaration
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
class translatingWallVelocityFvPatchVectorField
|
||||
|
|
|
@ -30,7 +30,7 @@ License
|
|||
|
||||
template<class Type>
|
||||
template<class fieldType>
|
||||
void fvBlockMatrix<Type>::insertSolutionVector
|
||||
void Foam::fvBlockMatrix<Type>::insertSolutionVector
|
||||
(
|
||||
const direction dir,
|
||||
const Field<fieldType>& xSingle
|
||||
|
@ -59,7 +59,7 @@ void fvBlockMatrix<Type>::insertSolutionVector
|
|||
|
||||
template<class Type>
|
||||
template<class matrixType>
|
||||
void fvBlockMatrix<Type>::insertDiagSource
|
||||
void Foam::fvBlockMatrix<Type>::insertDiagSource
|
||||
(
|
||||
const direction dir,
|
||||
fvMatrix<matrixType>& matrix
|
||||
|
@ -162,7 +162,7 @@ void fvBlockMatrix<Type>::insertDiagSource
|
|||
|
||||
template<class Type>
|
||||
template<class matrixType>
|
||||
void fvBlockMatrix<Type>::insertUpperLower
|
||||
void Foam::fvBlockMatrix<Type>::insertUpperLower
|
||||
(
|
||||
const direction dir,
|
||||
const fvMatrix<matrixType>& matrix
|
||||
|
@ -290,7 +290,7 @@ void fvBlockMatrix<Type>::insertUpperLower
|
|||
|
||||
template<class Type>
|
||||
template<class matrixType>
|
||||
void fvBlockMatrix<Type>::updateCouplingCoeffs
|
||||
void Foam::fvBlockMatrix<Type>::updateCouplingCoeffs
|
||||
(
|
||||
const direction dir,
|
||||
const fvMatrix<matrixType>& matrix
|
||||
|
@ -299,7 +299,9 @@ void fvBlockMatrix<Type>::updateCouplingCoeffs
|
|||
const direction nCmpts = pTraits<matrixType>::nComponents;
|
||||
direction localDir = dir;
|
||||
|
||||
const GeometricField<matrixType, fvPatchField, volMesh>& psi = matrix.psi();
|
||||
const GeometricField<matrixType, fvPatchField, volMesh>& psi =
|
||||
matrix.psi();
|
||||
|
||||
forAll(psi.boundaryField(), patchI)
|
||||
{
|
||||
const fvPatchField<matrixType>& pf = psi.boundaryField()[patchI];
|
||||
|
@ -373,7 +375,7 @@ void fvBlockMatrix<Type>::updateCouplingCoeffs
|
|||
|
||||
template<class Type>
|
||||
template<class blockType, class fieldType>
|
||||
void fvBlockMatrix<Type>::insertBlock
|
||||
void Foam::fvBlockMatrix<Type>::insertBlock
|
||||
(
|
||||
const direction dirI,
|
||||
const direction dirJ,
|
||||
|
@ -471,7 +473,7 @@ void fvBlockMatrix<Type>::insertBlock
|
|||
|
||||
template<class Type>
|
||||
template<class blockType, class fieldType>
|
||||
void fvBlockMatrix<Type>::insertBoundaryContributions
|
||||
void Foam::fvBlockMatrix<Type>::insertBoundaryContributions
|
||||
(
|
||||
const direction dirI,
|
||||
const direction dirJ,
|
||||
|
@ -561,39 +563,30 @@ void fvBlockMatrix<Type>::insertBoundaryContributions
|
|||
|
||||
|
||||
template<class Type>
|
||||
void fvBlockMatrix<Type>::insertCouplingDiagSource
|
||||
void Foam::fvBlockMatrix<Type>::insertCouplingDiag
|
||||
(
|
||||
const direction dirI,
|
||||
const direction dirJ,
|
||||
const fvScalarMatrix& matrix
|
||||
const scalarField& coeffIJ
|
||||
)
|
||||
{
|
||||
// Prepare the diagonal and source
|
||||
scalarField diag = matrix.diag();
|
||||
scalarField source = matrix.source();
|
||||
|
||||
// Add boundary source contribution
|
||||
matrix.addBoundaryDiag(diag, 0);
|
||||
matrix.addBoundarySource(source, false);
|
||||
|
||||
// Get reference to block diagonal of the block system
|
||||
typename CoeffField<Type>::squareTypeField& blockDiag =
|
||||
this->diag().asSquare();
|
||||
|
||||
// Get reference to this source field of the block system
|
||||
Field<Type>& b = this->source();
|
||||
|
||||
// Set off-diagonal coefficient
|
||||
forAll(diag, cellI)
|
||||
forAll(coeffIJ, cellI)
|
||||
{
|
||||
blockDiag[cellI](dirI, dirJ) += diag[cellI];
|
||||
b[cellI](dirI) += source[cellI];
|
||||
blockDiag[cellI](dirI, dirJ) += coeffIJ[cellI];
|
||||
}
|
||||
|
||||
// Source compensation is done in function updateSourceCoupling()
|
||||
// after all coupling terms are added. HJ, 27/Apr/2015
|
||||
}
|
||||
|
||||
|
||||
template<class Type>
|
||||
void fvBlockMatrix<Type>::insertCouplingUpperLower
|
||||
void Foam::fvBlockMatrix<Type>::insertCouplingUpperLower
|
||||
(
|
||||
const direction dirI,
|
||||
const direction dirJ,
|
||||
|
@ -707,7 +700,7 @@ Foam::fvBlockMatrix<Type>::fvBlockMatrix
|
|||
|
||||
template<class Type>
|
||||
template<class fieldType>
|
||||
void fvBlockMatrix<Type>::retrieveSolution
|
||||
void Foam::fvBlockMatrix<Type>::retrieveSolution
|
||||
(
|
||||
const direction dir,
|
||||
Field<fieldType>& xSingle
|
||||
|
@ -736,7 +729,7 @@ void fvBlockMatrix<Type>::retrieveSolution
|
|||
|
||||
template<class Type>
|
||||
template<class matrixType>
|
||||
void fvBlockMatrix<Type>::insertEquation
|
||||
void Foam::fvBlockMatrix<Type>::insertEquation
|
||||
(
|
||||
const direction dir,
|
||||
fvMatrix<matrixType>& matrix
|
||||
|
@ -751,7 +744,7 @@ void fvBlockMatrix<Type>::insertEquation
|
|||
|
||||
template<class Type>
|
||||
template<class blockType, class fieldType>
|
||||
void fvBlockMatrix<Type>::insertBlockCoupling
|
||||
void Foam::fvBlockMatrix<Type>::insertBlockCoupling
|
||||
(
|
||||
const direction dirI,
|
||||
const direction dirJ,
|
||||
|
@ -765,20 +758,22 @@ void fvBlockMatrix<Type>::insertBlockCoupling
|
|||
|
||||
|
||||
template<class Type>
|
||||
void fvBlockMatrix<Type>::insertEquationCoupling
|
||||
void Foam::fvBlockMatrix<Type>::insertEquationCoupling
|
||||
(
|
||||
const direction dirI,
|
||||
const direction dirJ,
|
||||
const fvScalarMatrix& matrix
|
||||
const scalarField& coeffIJ
|
||||
)
|
||||
{
|
||||
insertCouplingDiagSource(dirI, dirJ, matrix);
|
||||
insertCouplingUpperLower(dirI, dirJ, matrix);
|
||||
// Multiply coefficients by volume
|
||||
scalarField coeffIJVol = coeffIJ*psi_.mesh().V();
|
||||
|
||||
insertCouplingDiag(dirI, dirJ, coeffIJVol);
|
||||
}
|
||||
|
||||
|
||||
template<class Type>
|
||||
void fvBlockMatrix<Type>::blockAdd
|
||||
void Foam::fvBlockMatrix<Type>::blockAdd
|
||||
(
|
||||
const direction dir,
|
||||
const scalarField& xSingle,
|
||||
|
@ -793,7 +788,7 @@ void fvBlockMatrix<Type>::blockAdd
|
|||
|
||||
|
||||
template<class Type>
|
||||
void fvBlockMatrix<Type>::updateSourceCoupling()
|
||||
void Foam::fvBlockMatrix<Type>::updateSourceCoupling()
|
||||
{
|
||||
// Eliminate off-diagonal block coefficients from the square diagonal
|
||||
// With this change, the segregated matrix can be assembled with complete
|
||||
|
@ -827,215 +822,7 @@ void fvBlockMatrix<Type>::updateSourceCoupling()
|
|||
|
||||
|
||||
template<class Type>
|
||||
void fvBlockMatrix<Type>::insertPicardTensor
|
||||
(
|
||||
const direction UEqnDir,
|
||||
const volVectorField& U,
|
||||
const surfaceScalarField& phi
|
||||
)
|
||||
{
|
||||
const direction nCmpts = pTraits<vector>::nComponents;
|
||||
direction localDirI = UEqnDir;
|
||||
direction localDirJ = UEqnDir;
|
||||
|
||||
// Sanity check
|
||||
{
|
||||
const direction blockMatrixSize = pTraits<Type>::nComponents;
|
||||
|
||||
if (nCmpts > blockMatrixSize)
|
||||
{
|
||||
FatalErrorIn
|
||||
(
|
||||
"void fvBlockMatrix<Type>::insertPicardTensor\n"
|
||||
"(\n"
|
||||
" const direction UEqnDir,\n"
|
||||
" const volVectorField& U,\n"
|
||||
" const surfaceScalarField& phi\n"
|
||||
")"
|
||||
) << "Trying to insert Picard tensor term into smaller "
|
||||
<< "fvBlockMatrix. Do you have momentum equation?"
|
||||
<< abort(FatalError);
|
||||
}
|
||||
}
|
||||
|
||||
// Get weights for U which makes the implicit flux part
|
||||
const fvMesh& mesh = U.mesh();
|
||||
|
||||
tmp<surfaceInterpolationScheme<vector> > tinterpScheme =
|
||||
fvc::scheme<vector>(mesh, U.name());
|
||||
|
||||
tmp<surfaceScalarField> tweights = tinterpScheme().weights(U);
|
||||
const scalarField& wIn = tweights().internalField();
|
||||
|
||||
// Calculate the Pi tensor. Consider hard coding the interpolation scheme to
|
||||
// correspond to the div(U, phi) interpolation scheme for consistency.
|
||||
// VV, 21/July/2014.
|
||||
const surfaceTensorField pi(mesh.Sf()*fvc::interpolate(U, phi, "(phi,U)"));
|
||||
// const surfaceTensorField pi(fvc::interpolate(U, phi, "(phi,U)")*mesh.Sf());
|
||||
const tensorField& piIn = pi.internalField();
|
||||
|
||||
BlockLduSystem<vector, vector> piSystem(mesh);
|
||||
|
||||
// Get references to ldu fields of pi block system always as square
|
||||
typename CoeffField<vector>::squareTypeField& piDiag =
|
||||
piSystem.diag().asSquare();
|
||||
typename CoeffField<vector>::squareTypeField& piUpper =
|
||||
piSystem.upper().asSquare();
|
||||
typename CoeffField<vector>::squareTypeField& piLower =
|
||||
piSystem.lower().asSquare();
|
||||
|
||||
vectorField& piSource = piSystem.source();
|
||||
|
||||
piLower = -wIn*piIn;
|
||||
piUpper = piLower + piIn;
|
||||
piSystem.negSumDiag();
|
||||
|
||||
Info << "Diag coeff: " << piDiag[125] << nl
|
||||
<< "Lower coeff: " << piLower[125] << nl
|
||||
<< "Upper coeff: " << piUpper[125] << endl;
|
||||
|
||||
// Boundary contributions - treated explicitly because of the problem with
|
||||
// inconsistent return type of internalCoeffs. VV, 21/July/2014.
|
||||
forAll(U.boundaryField(), patchI)
|
||||
{
|
||||
const fvPatchVectorField& Ub = U.boundaryField()[patchI];
|
||||
const fvPatch& patch = Ub.patch();
|
||||
const tensorField& pib = pi.boundaryField()[patchI];
|
||||
const fvsPatchScalarField& wb = tweights().boundaryField()[patchI];
|
||||
const unallocLabelList& fc = patch.faceCells();
|
||||
|
||||
// const vectorField internalCoeffs(Ub.valueInternalCoeffs(wb));
|
||||
|
||||
// Explicit diag boundary contribution
|
||||
// forAll(Ub, faceI)
|
||||
// {
|
||||
// piSource[fc[faceI]] -= pib[faceI] & Ub[faceI];
|
||||
// piSource[fc[faceI]] -= Ub[faceI] & pib[faceI];
|
||||
// }
|
||||
|
||||
// Hard coded zeroGradient if patch does not fix value
|
||||
if (!U.boundaryField()[patchI].fixesValue())
|
||||
{
|
||||
forAll(Ub, faceI)
|
||||
{
|
||||
piDiag[fc[faceI]] += pib[faceI];
|
||||
}
|
||||
}
|
||||
// Coupled patches treated implicitly
|
||||
else if (patch.coupled())
|
||||
{
|
||||
typename CoeffField<vector>::squareTypeField& pipCoupleUpper =
|
||||
piSystem.coupleUpper()[patchI].asSquare();
|
||||
typename CoeffField<vector>::squareTypeField& pipCoupleLower =
|
||||
piSystem.coupleLower()[patchI].asSquare();
|
||||
|
||||
const tensorField pcl = -wb*pib;
|
||||
const tensorField pcu = pcl + pib;
|
||||
|
||||
// Coupling contributions
|
||||
pipCoupleLower -= pcl;
|
||||
pipCoupleUpper -= pcu;
|
||||
}
|
||||
else
|
||||
{
|
||||
const vectorField boundaryCoeffs(Ub.valueBoundaryCoeffs(wb));
|
||||
|
||||
// Boundary contribution
|
||||
forAll(Ub, faceI)
|
||||
{
|
||||
piSource[fc[faceI]] -= pib[faceI] & boundaryCoeffs[faceI];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Consider chucking the above code into fvm::picardTensor operator.
|
||||
// VV, 21/July/2014.
|
||||
|
||||
// Finally add Picard piSystem into this fvBlockMatrix
|
||||
typename CoeffField<Type>::squareTypeField& blockDiag =
|
||||
this->diag().asSquare();
|
||||
typename CoeffField<Type>::squareTypeField& blockUpper =
|
||||
this->upper().asSquare();
|
||||
typename CoeffField<Type>::squareTypeField& blockLower =
|
||||
this->lower().asSquare();
|
||||
|
||||
Field<Type>& blockSource = this->source();
|
||||
|
||||
// Add diagonal, source, lower and upper
|
||||
for (direction cmptI = 0; cmptI < nCmpts; cmptI++)
|
||||
{
|
||||
for (direction cmptJ = 0; cmptJ < nCmpts; cmptJ++)
|
||||
{
|
||||
forAll(piDiag, cellI)
|
||||
{
|
||||
blockDiag[cellI](localDirI, localDirJ) +=
|
||||
piDiag[cellI](cmptI, cmptJ);
|
||||
// blockSource[cellI](localDirI, localDirJ) +=
|
||||
// piSource[cellI](cmptI, cmptJ);
|
||||
}
|
||||
|
||||
forAll(piUpper, faceI)
|
||||
{
|
||||
blockUpper[faceI](localDirI, localDirJ) +=
|
||||
piUpper[faceI](cmptI, cmptJ);
|
||||
blockLower[faceI](localDirI, localDirJ) +=
|
||||
piLower[faceI](cmptI, cmptJ);
|
||||
}
|
||||
|
||||
localDirJ++;
|
||||
}
|
||||
|
||||
forAll(piSource, cellI)
|
||||
{
|
||||
blockSource[cellI](localDirI) += piSource[cellI](cmptI);
|
||||
}
|
||||
|
||||
localDirI++;
|
||||
}
|
||||
|
||||
// Reset local direction for coupling contributions
|
||||
localDirI = UEqnDir;
|
||||
localDirJ = UEqnDir;
|
||||
|
||||
// Add coupling contributions
|
||||
forAll(U.boundaryField(), patchI)
|
||||
{
|
||||
if (U.boundaryField()[patchI].patch().coupled())
|
||||
{
|
||||
typename CoeffField<Type>::squareTypeField& pcoupleUpper =
|
||||
this->coupleUpper()[patchI].asSquare();
|
||||
typename CoeffField<Type>::squareTypeField& pcoupleLower =
|
||||
this->coupleLower()[patchI].asSquare();
|
||||
|
||||
const typename CoeffField<vector>::squareTypeField& pipcu =
|
||||
piSystem.coupleUpper()[patchI].asSquare();
|
||||
const typename CoeffField<vector>::squareTypeField& pipcl =
|
||||
piSystem.coupleLower()[patchI].asSquare();
|
||||
|
||||
for (direction cmptI = 0; cmptI < nCmpts; cmptI++)
|
||||
{
|
||||
for (direction cmptJ = 0; cmptJ < nCmpts; cmptJ++)
|
||||
{
|
||||
forAll(pipcu, faceI)
|
||||
{
|
||||
pcoupleUpper[faceI](localDirI, localDirJ) +=
|
||||
pipcu[faceI](cmptI, cmptJ);
|
||||
pcoupleLower[faceI](localDirI, localDirJ) +=
|
||||
pipcl[faceI](cmptI, cmptJ);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset local directions for other patches
|
||||
localDirI = UEqnDir;
|
||||
localDirJ = UEqnDir;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class Type>
|
||||
Foam::BlockSolverPerformance<Type> fvBlockMatrix<Type>::solve
|
||||
Foam::BlockSolverPerformance<Type> Foam::fvBlockMatrix<Type>::solve
|
||||
(
|
||||
const dictionary& solverControls
|
||||
)
|
||||
|
@ -1057,7 +844,7 @@ Foam::BlockSolverPerformance<Type> fvBlockMatrix<Type>::solve
|
|||
|
||||
|
||||
template<class Type>
|
||||
Foam::BlockSolverPerformance<Type> fvBlockMatrix<Type>::solve()
|
||||
Foam::BlockSolverPerformance<Type> Foam::fvBlockMatrix<Type>::solve()
|
||||
{
|
||||
return solve(psi_.mesh().solutionDict().solverDict(psi_.name()));
|
||||
}
|
||||
|
|
|
@ -26,11 +26,12 @@ Class
|
|||
|
||||
Description
|
||||
fvBlockMatrix is an extension of fvMatrix for block coupled types. It holds
|
||||
general insertion and retrieval tools for block coupling along with specific
|
||||
general insertion and retrieval tools for block coupling and specific
|
||||
functions for p-U coupling.
|
||||
|
||||
Author
|
||||
Vuko Vukcevic, FMENA Zagreb.
|
||||
Update by Hrvoje Jasak
|
||||
|
||||
SourceFiles
|
||||
fvBlockMatrix.C
|
||||
|
@ -112,14 +113,13 @@ class fvBlockMatrix
|
|||
|
||||
// Insertion functions for fvScalarMatrix into off-diagonal positions
|
||||
// (coupling matrices)
|
||||
// These two functions are dubious. Reconsider. HJ, 17/July/2014.
|
||||
|
||||
//- Insert coupling matrix diag and source into this fvBlockMatrix
|
||||
void insertCouplingDiagSource
|
||||
//- Insert coupling matrix diag element into this fvBlockMatrix
|
||||
void insertCouplingDiag
|
||||
(
|
||||
const direction dirI,
|
||||
const direction dirJ,
|
||||
const fvScalarMatrix& matrix
|
||||
const scalarField& coeffIJ
|
||||
);
|
||||
|
||||
//- Insert coupling matrix lower and upper into this fvBlockMatrix
|
||||
|
@ -144,8 +144,8 @@ class fvBlockMatrix
|
|||
const bool incrementColumnDir
|
||||
);
|
||||
|
||||
//- Insert source and coupling coeffs of BlockLduSystem (obtained by
|
||||
// implicit grad/div operator) into this fvBlockMatrix
|
||||
//- Insert source and coupling coeffs of BlockLduSystem
|
||||
// (eg. obtained by implicit grad/div operator)
|
||||
template<class blockType, class fieldType>
|
||||
void insertBoundaryContributions
|
||||
(
|
||||
|
@ -192,7 +192,7 @@ public:
|
|||
|
||||
// Insertion and retrieval public tools
|
||||
|
||||
//- Retrieve part of internal (solved) field from this fvBlockMatrix
|
||||
//- Retrieve part of internal field from this fvBlockMatrix
|
||||
template<class fieldType>
|
||||
void retrieveSolution
|
||||
(
|
||||
|
@ -220,12 +220,13 @@ public:
|
|||
);
|
||||
|
||||
//- Insert scalar equation coupling into this fvBlockMatrix
|
||||
// Not tested: VV, 10/July/2014
|
||||
// Source compensation is done in function updateSourceCoupling()
|
||||
// after all coupling terms are added. HJ, 27/Apr/2015
|
||||
void insertEquationCoupling
|
||||
(
|
||||
const direction dirI,
|
||||
const direction dirJ,
|
||||
const fvScalarMatrix& matrix
|
||||
const scalarField& coeffIJ
|
||||
);
|
||||
|
||||
//- Add field into block field
|
||||
|
@ -237,20 +238,11 @@ public:
|
|||
);
|
||||
|
||||
//- Update coupling of block system.
|
||||
// Subtracts the block-coefficient coupling as specified by the user
|
||||
// from the source, leaving the implicit update given by
|
||||
// linearisation
|
||||
// Subtracts the block-coefficient coupling as specified by the
|
||||
// user from the source, leaving the implicit update given by
|
||||
// linearisation
|
||||
void updateSourceCoupling();
|
||||
|
||||
//- Insert Picard tensor term that comes from Picard linearisation
|
||||
// of convection term in momentum equation. VV, 21/July/2014.
|
||||
void insertPicardTensor
|
||||
(
|
||||
const direction UEqnDir,
|
||||
const volVectorField& U,
|
||||
const surfaceScalarField& phi
|
||||
);
|
||||
|
||||
|
||||
// Solver calls for fvBlockMatrix
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ $(ints)/label/label.C
|
|||
$(ints)/uLabel/uLabel.C
|
||||
|
||||
primitives/Scalar/doubleScalar/doubleScalar.C
|
||||
primitives/Scalar/longDoubleScalar/longDoubleScalar.C
|
||||
primitives/Scalar/floatScalar/floatScalar.C
|
||||
primitives/Scalar/scalar/scalar.C
|
||||
primitives/DiagTensor/diagTensor/diagTensor.C
|
||||
|
|
|
@ -109,8 +109,8 @@ public:
|
|||
IFstream
|
||||
(
|
||||
const fileName& pathname,
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion
|
||||
);
|
||||
|
||||
|
||||
|
|
|
@ -75,8 +75,8 @@ public:
|
|||
//- Set stream status
|
||||
Istream
|
||||
(
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion,
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion,
|
||||
compressionType compression=UNCOMPRESSED
|
||||
)
|
||||
:
|
||||
|
@ -122,6 +122,9 @@ public:
|
|||
//- Read a doubleScalar
|
||||
virtual Istream& read(doubleScalar&) = 0;
|
||||
|
||||
//- Read a longDoubleScalar
|
||||
virtual Istream& read(longDoubleScalar&) = 0;
|
||||
|
||||
//- Read binary block
|
||||
virtual Istream& read(char*, std::streamsize) = 0;
|
||||
|
||||
|
|
|
@ -77,8 +77,8 @@ public:
|
|||
//- Set stream status
|
||||
Ostream
|
||||
(
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion,
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion,
|
||||
compressionType compression=UNCOMPRESSED
|
||||
)
|
||||
:
|
||||
|
@ -131,6 +131,9 @@ public:
|
|||
//- Write doubleScalar
|
||||
virtual Ostream& write(const doubleScalar) = 0;
|
||||
|
||||
//- Write doubleScalar
|
||||
virtual Ostream& write(const longDoubleScalar) = 0;
|
||||
|
||||
//- Write binary block
|
||||
virtual Ostream& write(const char*, std::streamsize) = 0;
|
||||
|
||||
|
|
|
@ -211,6 +211,21 @@ Foam::Istream& Foam::IPstream::read(token& t)
|
|||
return *this;
|
||||
}
|
||||
|
||||
// longDoubleScalar
|
||||
case token::LONG_DOUBLE_SCALAR :
|
||||
{
|
||||
longDoubleScalar val;
|
||||
if (read(val))
|
||||
{
|
||||
t = val;
|
||||
}
|
||||
else
|
||||
{
|
||||
t.setBad();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Character (returned as a single character word) or error
|
||||
default:
|
||||
{
|
||||
|
@ -281,6 +296,13 @@ Foam::Istream& Foam::IPstream::read(doubleScalar& val)
|
|||
}
|
||||
|
||||
|
||||
Foam::Istream& Foam::IPstream::read(longDoubleScalar& val)
|
||||
{
|
||||
readFromBuffer(val);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
Foam::Istream& Foam::IPstream::read(char* data, std::streamsize count)
|
||||
{
|
||||
if (format() != BINARY)
|
||||
|
|
|
@ -84,7 +84,7 @@ public:
|
|||
const int fromProcNo,
|
||||
const label bufSize = 0,
|
||||
streamFormat format=BINARY,
|
||||
versionNumber version=currentVersion
|
||||
versionNumber version = currentVersion
|
||||
);
|
||||
|
||||
|
||||
|
@ -143,6 +143,9 @@ public:
|
|||
//- Read a doubleScalar
|
||||
Istream& read(doubleScalar&);
|
||||
|
||||
//- Read a longDoubleScalar
|
||||
Istream& read(longDoubleScalar&);
|
||||
|
||||
//- Read binary block
|
||||
Istream& read(char*, std::streamsize);
|
||||
|
||||
|
|
|
@ -210,6 +210,14 @@ Foam::Ostream& Foam::OPstream::write(const doubleScalar val)
|
|||
}
|
||||
|
||||
|
||||
Foam::Ostream& Foam::OPstream::write(const longDoubleScalar val)
|
||||
{
|
||||
write(char(token::LONG_DOUBLE_SCALAR));
|
||||
writeToBuffer(val);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
Foam::Ostream& Foam::OPstream::write(const char* data, std::streamsize count)
|
||||
{
|
||||
if (format() != BINARY)
|
||||
|
|
|
@ -83,7 +83,7 @@ public:
|
|||
const int toProcNo,
|
||||
const label bufSize = 0,
|
||||
streamFormat format=BINARY,
|
||||
versionNumber version=currentVersion
|
||||
versionNumber version = currentVersion
|
||||
);
|
||||
|
||||
|
||||
|
@ -152,6 +152,9 @@ public:
|
|||
//- Write doubleScalar
|
||||
Ostream& write(const doubleScalar);
|
||||
|
||||
//- Write longDoubleScalar
|
||||
Ostream& write(const longDoubleScalar);
|
||||
|
||||
//- Write binary block
|
||||
Ostream& write(const char*, std::streamsize);
|
||||
|
||||
|
|
|
@ -489,6 +489,14 @@ Foam::Istream& Foam::ISstream::read(doubleScalar& val)
|
|||
}
|
||||
|
||||
|
||||
Foam::Istream& Foam::ISstream::read(longDoubleScalar& val)
|
||||
{
|
||||
is_ >> val;
|
||||
setState(is_.rdstate());
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
// read binary block
|
||||
Foam::Istream& Foam::ISstream::read(char* buf, std::streamsize count)
|
||||
{
|
||||
|
|
|
@ -97,9 +97,9 @@ public:
|
|||
(
|
||||
istream& is,
|
||||
const string& name,
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion,
|
||||
compressionType compression=UNCOMPRESSED
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion,
|
||||
compressionType compression = UNCOMPRESSED
|
||||
);
|
||||
|
||||
|
||||
|
@ -165,6 +165,9 @@ public:
|
|||
//- Read a doubleScalar
|
||||
virtual Istream& read(doubleScalar&);
|
||||
|
||||
//- Read a longDoubleScalar
|
||||
virtual Istream& read(longDoubleScalar&);
|
||||
|
||||
//- Read binary block
|
||||
virtual Istream& read(char*, std::streamsize);
|
||||
|
||||
|
|
|
@ -195,6 +195,14 @@ Foam::Ostream& Foam::OSstream::write(const doubleScalar val)
|
|||
}
|
||||
|
||||
|
||||
Foam::Ostream& Foam::OSstream::write(const longDoubleScalar val)
|
||||
{
|
||||
os_ << val;
|
||||
setState(os_.rdstate());
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
Foam::Ostream& Foam::OSstream::write(const char* buf, std::streamsize count)
|
||||
{
|
||||
if (format() != BINARY)
|
||||
|
|
|
@ -90,8 +90,8 @@ public:
|
|||
(
|
||||
ostream& os,
|
||||
const string& name,
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion,
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion,
|
||||
compressionType compression=UNCOMPRESSED
|
||||
);
|
||||
|
||||
|
@ -155,6 +155,9 @@ public:
|
|||
//- Write doubleScalar
|
||||
virtual Ostream& write(const doubleScalar);
|
||||
|
||||
//- Write longDoubleScalar
|
||||
virtual Ostream& write(const longDoubleScalar);
|
||||
|
||||
//- Write binary block
|
||||
virtual Ostream& write(const char*, std::streamsize);
|
||||
|
||||
|
|
|
@ -146,6 +146,13 @@ Foam::Ostream& Foam::prefixOSstream::write(const doubleScalar val)
|
|||
}
|
||||
|
||||
|
||||
Foam::Ostream& Foam::prefixOSstream::write(const longDoubleScalar val)
|
||||
{
|
||||
checkWritePrefix();
|
||||
return OSstream::write(val);
|
||||
}
|
||||
|
||||
|
||||
Foam::Ostream& Foam::prefixOSstream::write
|
||||
(
|
||||
const char* buf,
|
||||
|
|
|
@ -73,8 +73,8 @@ public:
|
|||
(
|
||||
ostream& os,
|
||||
const string& name,
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion,
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion,
|
||||
compressionType compression=UNCOMPRESSED
|
||||
);
|
||||
|
||||
|
@ -130,6 +130,9 @@ public:
|
|||
//- Write doubleScalar
|
||||
virtual Ostream& write(const doubleScalar);
|
||||
|
||||
//- Write doubleScalar
|
||||
virtual Ostream& write(const longDoubleScalar);
|
||||
|
||||
//- Write binary block
|
||||
virtual Ostream& write(const char*, std::streamsize);
|
||||
|
||||
|
|
|
@ -60,8 +60,8 @@ public:
|
|||
IStringStream
|
||||
(
|
||||
const string& buffer,
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion
|
||||
)
|
||||
:
|
||||
ISstream
|
||||
|
@ -78,8 +78,8 @@ public:
|
|||
IStringStream
|
||||
(
|
||||
const char* buffer,
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion
|
||||
)
|
||||
:
|
||||
ISstream
|
||||
|
|
|
@ -59,8 +59,8 @@ public:
|
|||
//- Construct and set stream status
|
||||
OStringStream
|
||||
(
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion
|
||||
)
|
||||
:
|
||||
OSstream
|
||||
|
|
|
@ -149,6 +149,13 @@ Foam::Istream& Foam::ITstream::read(doubleScalar&)
|
|||
}
|
||||
|
||||
|
||||
Foam::Istream& Foam::ITstream::read(longDoubleScalar&)
|
||||
{
|
||||
notImplemented("Istream& ITstream::read(longDoubleScalar&)");
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
Foam::Istream& Foam::ITstream::read(char*, std::streamsize)
|
||||
{
|
||||
notImplemented("Istream& ITstream::read(char*, std::streamsize)");
|
||||
|
|
|
@ -70,8 +70,8 @@ public:
|
|||
(
|
||||
const string& name,
|
||||
const tokenList& tokens,
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion
|
||||
)
|
||||
:
|
||||
Istream(format, version),
|
||||
|
@ -167,6 +167,9 @@ public:
|
|||
//- Read a doubleScalar
|
||||
virtual Istream& read(doubleScalar&);
|
||||
|
||||
//- Read a longDoubleScalar
|
||||
virtual Istream& read(longDoubleScalar&);
|
||||
|
||||
//- Read binary block
|
||||
virtual Istream& read(char*, std::streamsize);
|
||||
|
||||
|
|
|
@ -151,8 +151,8 @@ public:
|
|||
//- Construct and set stream status
|
||||
OSHA1stream
|
||||
(
|
||||
streamFormat format=ASCII,
|
||||
versionNumber version=currentVersion
|
||||
streamFormat format = ASCII,
|
||||
versionNumber version = currentVersion
|
||||
)
|
||||
:
|
||||
OSstream
|
||||
|
|
|
@ -81,6 +81,7 @@ public:
|
|||
LABEL,
|
||||
FLOAT_SCALAR,
|
||||
DOUBLE_SCALAR,
|
||||
LONG_DOUBLE_SCALAR,
|
||||
COMPOUND,
|
||||
|
||||
ERROR
|
||||
|
@ -258,6 +259,7 @@ private:
|
|||
label labelToken_;
|
||||
floatScalar floatScalarToken_;
|
||||
doubleScalar doubleScalarToken_;
|
||||
longDoubleScalar longDoubleScalarToken_;
|
||||
mutable compound* compoundTokenPtr_;
|
||||
};
|
||||
|
||||
|
@ -307,6 +309,9 @@ public:
|
|||
//- Construct doubleScalar token
|
||||
inline token(const doubleScalar, label lineNumber=0);
|
||||
|
||||
//- Construct longDoubleScalar token
|
||||
inline token(const longDoubleScalar, label lineNumber=0);
|
||||
|
||||
//- Construct from Istream
|
||||
token(Istream&);
|
||||
|
||||
|
@ -344,6 +349,9 @@ public:
|
|||
inline bool isDoubleScalar() const;
|
||||
inline doubleScalar doubleScalarToken() const;
|
||||
|
||||
inline bool isLongDoubleScalar() const;
|
||||
inline longDoubleScalar longDoubleScalarToken() const;
|
||||
|
||||
inline bool isScalar() const;
|
||||
inline scalar scalarToken() const;
|
||||
|
||||
|
@ -391,6 +399,7 @@ public:
|
|||
inline void operator=(const label);
|
||||
inline void operator=(const floatScalar);
|
||||
inline void operator=(const doubleScalar);
|
||||
inline void operator=(const longDoubleScalar);
|
||||
|
||||
inline void operator=(compound*);
|
||||
|
||||
|
@ -404,6 +413,7 @@ public:
|
|||
inline bool operator==(const label) const;
|
||||
inline bool operator==(const floatScalar) const;
|
||||
inline bool operator==(const doubleScalar) const;
|
||||
inline bool operator==(const longDoubleScalar) const;
|
||||
|
||||
|
||||
// Inequality
|
||||
|
@ -415,6 +425,7 @@ public:
|
|||
inline bool operator!=(const label) const;
|
||||
inline bool operator!=(const floatScalar) const;
|
||||
inline bool operator!=(const doubleScalar) const;
|
||||
inline bool operator!=(const longDoubleScalar) const;
|
||||
|
||||
|
||||
// IOstream operators
|
||||
|
|
|
@ -103,6 +103,10 @@ inline token::token(const token& t)
|
|||
doubleScalarToken_ = t.doubleScalarToken_;
|
||||
break;
|
||||
|
||||
case LONG_DOUBLE_SCALAR:
|
||||
longDoubleScalarToken_ = t.longDoubleScalarToken_;
|
||||
break;
|
||||
|
||||
case COMPOUND:
|
||||
compoundTokenPtr_ = t.compoundTokenPtr_;
|
||||
compoundTokenPtr_->refCount::operator++();
|
||||
|
@ -162,6 +166,15 @@ inline token::token(const doubleScalar s, label lineNumber)
|
|||
{}
|
||||
|
||||
|
||||
// Construct longDoubleScalar token
|
||||
inline token::token(const longDoubleScalar s, label lineNumber)
|
||||
:
|
||||
type_(LONG_DOUBLE_SCALAR),
|
||||
longDoubleScalarToken_(s),
|
||||
lineNumber_(lineNumber)
|
||||
{}
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * * Destructors * * * * * * * * * * * * * * * //
|
||||
|
||||
// Delete token clearing the storage used by word or string
|
||||
|
@ -289,6 +302,7 @@ inline bool token::isDoubleScalar() const
|
|||
return (type_ == DOUBLE_SCALAR);
|
||||
}
|
||||
|
||||
|
||||
inline doubleScalar token::doubleScalarToken() const
|
||||
{
|
||||
if (type_ == DOUBLE_SCALAR)
|
||||
|
@ -303,9 +317,34 @@ inline doubleScalar token::doubleScalarToken() const
|
|||
}
|
||||
|
||||
|
||||
inline bool token::isLongDoubleScalar() const
|
||||
{
|
||||
return (type_ == LONG_DOUBLE_SCALAR);
|
||||
}
|
||||
|
||||
|
||||
inline longDoubleScalar token::longDoubleScalarToken() const
|
||||
{
|
||||
if (type_ == LONG_DOUBLE_SCALAR)
|
||||
{
|
||||
return longDoubleScalarToken_;
|
||||
}
|
||||
else
|
||||
{
|
||||
parseError("longDoubleScalar");
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline bool token::isScalar() const
|
||||
{
|
||||
return (type_ == FLOAT_SCALAR || type_ == DOUBLE_SCALAR);
|
||||
return
|
||||
(
|
||||
type_ == FLOAT_SCALAR
|
||||
|| type_ == DOUBLE_SCALAR
|
||||
|| type_ == LONG_DOUBLE_SCALAR
|
||||
);
|
||||
}
|
||||
|
||||
inline scalar token::scalarToken() const
|
||||
|
@ -318,6 +357,10 @@ inline scalar token::scalarToken() const
|
|||
{
|
||||
return doubleScalarToken_;
|
||||
}
|
||||
else if (type_ == LONG_DOUBLE_SCALAR)
|
||||
{
|
||||
return longDoubleScalarToken_;
|
||||
}
|
||||
else
|
||||
{
|
||||
parseError("scalar");
|
||||
|
@ -420,6 +463,10 @@ inline void token::operator=(const token& t)
|
|||
doubleScalarToken_ = t.doubleScalarToken_;
|
||||
break;
|
||||
|
||||
case LONG_DOUBLE_SCALAR:
|
||||
longDoubleScalarToken_ = t.longDoubleScalarToken_;
|
||||
break;
|
||||
|
||||
case COMPOUND:
|
||||
compoundTokenPtr_ = t.compoundTokenPtr_;
|
||||
compoundTokenPtr_->refCount::operator++();
|
||||
|
@ -432,6 +479,7 @@ inline void token::operator=(const token& t)
|
|||
lineNumber_ = t.lineNumber_;
|
||||
}
|
||||
|
||||
|
||||
inline void token::operator=(const punctuationToken p)
|
||||
{
|
||||
clear();
|
||||
|
@ -484,6 +532,13 @@ inline void token::operator=(const doubleScalar s)
|
|||
doubleScalarToken_ = s;
|
||||
}
|
||||
|
||||
inline void token::operator=(const longDoubleScalar s)
|
||||
{
|
||||
clear();
|
||||
type_ = LONG_DOUBLE_SCALAR;
|
||||
longDoubleScalarToken_ = s;
|
||||
}
|
||||
|
||||
inline void token::operator=(token::compound* cPtr)
|
||||
{
|
||||
clear();
|
||||
|
@ -522,6 +577,9 @@ inline bool token::operator==(const token& t) const
|
|||
case DOUBLE_SCALAR:
|
||||
return equal(doubleScalarToken_, t.doubleScalarToken_);
|
||||
|
||||
case LONG_DOUBLE_SCALAR:
|
||||
return equal(longDoubleScalarToken_, t.longDoubleScalarToken_);
|
||||
|
||||
case COMPOUND:
|
||||
return compoundTokenPtr_ == t.compoundTokenPtr_;
|
||||
|
||||
|
@ -562,6 +620,11 @@ inline bool token::operator==(const doubleScalar s) const
|
|||
return (type_ == DOUBLE_SCALAR && equal(doubleScalarToken_, s));
|
||||
}
|
||||
|
||||
inline bool token::operator==(const longDoubleScalar s) const
|
||||
{
|
||||
return (type_ == LONG_DOUBLE_SCALAR && equal(longDoubleScalarToken_, s));
|
||||
}
|
||||
|
||||
inline bool token::operator!=(const token& t) const
|
||||
{
|
||||
return !operator==(t);
|
||||
|
@ -592,6 +655,11 @@ inline bool token::operator!=(const doubleScalar s) const
|
|||
return !operator==(s);
|
||||
}
|
||||
|
||||
inline bool token::operator!=(const longDoubleScalar s) const
|
||||
{
|
||||
return !operator==(s);
|
||||
}
|
||||
|
||||
inline bool token::operator!=(const label l) const
|
||||
{
|
||||
return !operator==(l);
|
||||
|
|
|
@ -85,6 +85,10 @@ Foam::Ostream& Foam::operator<<(Ostream& os, const token& t)
|
|||
os << t.doubleScalarToken_;
|
||||
break;
|
||||
|
||||
case token::LONG_DOUBLE_SCALAR:
|
||||
os << t.longDoubleScalarToken_;
|
||||
break;
|
||||
|
||||
case token::COMPOUND:
|
||||
os << *t.compoundTokenPtr_;
|
||||
break;
|
||||
|
@ -168,6 +172,10 @@ ostream& Foam::operator<<(ostream& os, const InfoProxy<token>& ip)
|
|||
os << " the doubleScalar " << t.doubleScalarToken();
|
||||
break;
|
||||
|
||||
case token::LONG_DOUBLE_SCALAR:
|
||||
os << " the longDoubleScalar " << t.doubleScalarToken();
|
||||
break;
|
||||
|
||||
case token::COMPOUND:
|
||||
{
|
||||
if (t.compoundToken().empty())
|
||||
|
@ -238,6 +246,10 @@ Ostream& operator<<(Ostream& os, const InfoProxy<token>& ip)
|
|||
os << " the doubleScalar " << t.doubleScalarToken();
|
||||
break;
|
||||
|
||||
case token::LONG_DOUBLE_SCALAR:
|
||||
os << " the longDoubleScalar " << t.longDoubleScalarToken();
|
||||
break;
|
||||
|
||||
case token::COMPOUND:
|
||||
{
|
||||
if (t.compoundToken().empty())
|
||||
|
|
|
@ -75,7 +75,7 @@ void Foam::Time::adjustDeltaT()
|
|||
{
|
||||
scalar timeToNextWrite = max
|
||||
(
|
||||
0.0,
|
||||
scalar(0),
|
||||
(outputTimeIndex_ + 1)*writeInterval_ - (value() - startTime_)
|
||||
);
|
||||
|
||||
|
|
|
@ -516,8 +516,8 @@ evaluate
|
|||
|
||||
// ZT, 22-07-2014 - point ordering is not same
|
||||
// at master and slave side after topology change
|
||||
const labelList& neiPoints =
|
||||
procPatch_.procPolyPatch().neighbPoints();
|
||||
// const labelList& neiPoints =
|
||||
// procPatch_.procPolyPatch().neighbPoints();
|
||||
|
||||
// VS, 2015-04-12 - This doesn't work in parallel!
|
||||
// tpn =
|
||||
|
|
|
@ -28,7 +28,7 @@ Description
|
|||
|
||||
Author
|
||||
|
||||
\*----------------------------------------------------------------------------*/
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
#include "BlockLduInterfaceField.H"
|
||||
#include "processorBlockLduInterfaceField.H"
|
||||
|
@ -41,10 +41,10 @@ namespace Foam
|
|||
|
||||
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
|
||||
|
||||
#define makeTemplateTypeNameAndDebug(type, Type, args...) \
|
||||
defineTemplateTypeNameAndDebug(BlockLduInterfaceField<type>, 0); \
|
||||
\
|
||||
defineTemplateTypeNameAndDebug(processorBlockLduInterfaceField<type>, 0); \
|
||||
#define makeTemplateTypeNameAndDebug(type, Type, args...) \
|
||||
defineTemplateTypeNameAndDebug(BlockLduInterfaceField<type>, 0); \
|
||||
\
|
||||
defineTemplateTypeNameAndDebug(processorBlockLduInterfaceField<type>, 0);
|
||||
|
||||
forAllVectorNTypes(makeTemplateTypeNameAndDebug);
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ namespace Foam
|
|||
{
|
||||
|
||||
/*---------------------------------------------------------------------------*\
|
||||
Class processorBlockLduInterfaceField Declaration
|
||||
Class processorBlockLduInterfaceField Declaration
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
template<class Type>
|
||||
|
|
|
@ -225,7 +225,9 @@ Foam::label Foam::face::split
|
|||
splitEdge /= Foam::mag(splitEdge) + VSMALL;
|
||||
|
||||
const scalar splitCos = splitEdge & rightEdge;
|
||||
const scalar splitAngle = acos(max(-1.0, min(1.0, splitCos)));
|
||||
const scalar splitAngle =
|
||||
acos(max(scalar(-1), min(scalar(1), splitCos)));
|
||||
|
||||
const scalar angleDiff = fabs(splitAngle - bisectAngle);
|
||||
|
||||
if (angleDiff < minDiff)
|
||||
|
|
|
@ -400,7 +400,7 @@ inline scalar triangle<Point, PointRef>::circumRadius() const
|
|||
{
|
||||
scalar a = (d1 + d2)*(d2 + d3)*(d3 + d1) / denom;
|
||||
|
||||
return 0.5*Foam::sqrt(min(GREAT, max(0, a)));
|
||||
return 0.5*Foam::sqrt(min(GREAT, max(scalar(0), a)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -411,9 +411,7 @@ inline scalar triangle<Point, PointRef>::quality() const
|
|||
return
|
||||
mag()
|
||||
/(
|
||||
mathematicalConstant::pi
|
||||
*Foam::sqr(circumRadius())
|
||||
*0.413497
|
||||
0.413497*mathematicalConstant::pi*sqr(circumRadius())
|
||||
+ VSMALL
|
||||
);
|
||||
}
|
||||
|
@ -775,8 +773,8 @@ inline bool triangle<Point, PointRef>::classify
|
|||
if (hit)
|
||||
{
|
||||
// alpha,beta might get negative due to precision errors
|
||||
alpha = max(0.0, min(1.0, alpha));
|
||||
beta = max(0.0, min(1.0, beta));
|
||||
alpha = max(scalar(0), min(scalar(1), alpha));
|
||||
beta = max(scalar(0), min(scalar(1), beta));
|
||||
}
|
||||
|
||||
nearType = NONE;
|
||||
|
|
|
@ -161,11 +161,11 @@ inline Scalar sqrtSumSqr(const Scalar a, const Scalar b)
|
|||
|
||||
if (maga > magb)
|
||||
{
|
||||
return maga*sqrt(1.0 + sqr(magb/maga));
|
||||
return maga*::sqrt(1.0 + sqr(magb/maga));
|
||||
}
|
||||
else
|
||||
{
|
||||
return magb < ScalarVSMALL ? 0.0 : magb*sqrt(1.0 + sqr(maga/magb));
|
||||
return magb < ScalarVSMALL ? 0.0 : magb*::sqrt(1.0 + sqr(maga/magb));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -84,6 +84,13 @@ inline double pow(const type1 s, const type2 e) \
|
|||
return ::pow(double(s), double(e)); \
|
||||
}
|
||||
|
||||
MAXMINPOW(long double, long double, long double)
|
||||
MAXMINPOW(long double, long double, double)
|
||||
MAXMINPOW(long double, long double, float)
|
||||
MAXMINPOW(long double, long double, int)
|
||||
MAXMINPOW(long double, double, long double)
|
||||
MAXMINPOW(long double, float, long double)
|
||||
MAXMINPOW(long double, int, long double)
|
||||
|
||||
MAXMINPOW(double, double, double)
|
||||
MAXMINPOW(double, double, float)
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*---------------------------------------------------------------------------*\
|
||||
========= |
|
||||
\\ / F ield | foam-extend: Open Source CFD
|
||||
\\ / O peration |
|
||||
\\ / A nd | For copyright notice see file Copyright
|
||||
\\/ M anipulation |
|
||||
-------------------------------------------------------------------------------
|
||||
License
|
||||
This file is part of foam-extend.
|
||||
|
||||
foam-extend is free software: you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the
|
||||
Free Software Foundation, either version 3 of the License, or (at your
|
||||
option) any later version.
|
||||
|
||||
foam-extend is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
#include "longDoubleScalar.H"
|
||||
#include "IOstreams.H"
|
||||
|
||||
#include <sstream>
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
#define Scalar longDoubleScalar
|
||||
#define ScalarVGREAT longDoubleScalarVGREAT
|
||||
#define ScalarVSMALL longDoubleScalarVSMALL
|
||||
#define readScalar readLongDoubleScalar
|
||||
#include "Scalar.C"
|
||||
#undef Scalar
|
||||
#undef ScalarVGREAT
|
||||
#undef ScalarVSMALL
|
||||
#undef readScalar
|
||||
|
||||
// ************************************************************************* //
|
115
src/foam/primitives/Scalar/longDoubleScalar/longDoubleScalar.H
Normal file
115
src/foam/primitives/Scalar/longDoubleScalar/longDoubleScalar.H
Normal file
|
@ -0,0 +1,115 @@
|
|||
/*---------------------------------------------------------------------------*\
|
||||
========= |
|
||||
\\ / F ield | foam-extend: Open Source CFD
|
||||
\\ / O peration |
|
||||
\\ / A nd | For copyright notice see file Copyright
|
||||
\\/ M anipulation |
|
||||
-------------------------------------------------------------------------------
|
||||
License
|
||||
This file is part of foam-extend.
|
||||
|
||||
foam-extend is free software: you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the
|
||||
Free Software Foundation, either version 3 of the License, or (at your
|
||||
option) any later version.
|
||||
|
||||
foam-extend is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Typedef
|
||||
Foam::doubleScalar
|
||||
|
||||
Description
|
||||
Long double precision floating point scalar type.
|
||||
|
||||
SourceFiles
|
||||
longDoubleScalar.C
|
||||
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef longDoubleScalar_H
|
||||
#define longDoubleScalar_H
|
||||
|
||||
#include "doubleFloat.H"
|
||||
#include "direction.H"
|
||||
#include "word.H"
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
namespace Foam
|
||||
{
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
typedef long double longDoubleScalar;
|
||||
|
||||
// Largest and smallest scalar values allowed in certain parts of the code.
|
||||
// (15 is the number of significant figures in an
|
||||
// IEEE double precision number. See limits.h or float.h)
|
||||
static const longDoubleScalar longDoubleScalarGREAT = 1.0e+15;
|
||||
static const longDoubleScalar longDoubleScalarVGREAT = 1.0e+300;
|
||||
static const longDoubleScalar longDoubleScalarROOTVGREAT = 1.0e+150;
|
||||
static const longDoubleScalar longDoubleScalarSMALL = 1.0e-15;
|
||||
static const longDoubleScalar longDoubleScalarVSMALL = 1.0e-300;
|
||||
static const longDoubleScalar longDoubleScalarROOTVSMALL = 1.0e-150;
|
||||
|
||||
|
||||
#define Scalar longDoubleScalar
|
||||
#define ScalarVGREAT longDoubleScalarVGREAT
|
||||
#define ScalarVSMALL longDoubleScalarVSMALL
|
||||
#define readScalar readLongDoubleScalar
|
||||
|
||||
inline Scalar mag(const Scalar s)
|
||||
{
|
||||
return ::fabs(s);
|
||||
}
|
||||
|
||||
#define transFunc(func) \
|
||||
inline Scalar func(const Scalar s) \
|
||||
{ \
|
||||
return ::func(s); \
|
||||
}
|
||||
|
||||
#include "Scalar.H"
|
||||
|
||||
inline Scalar hypot(const Scalar x, const Scalar y)
|
||||
{
|
||||
return ::hypot(x, y);
|
||||
}
|
||||
|
||||
inline Scalar atan2(const Scalar y, const Scalar x)
|
||||
{
|
||||
return ::atan2(y, x);
|
||||
}
|
||||
|
||||
inline Scalar jn(const int n, const Scalar s)
|
||||
{
|
||||
return ::jn(n, s);
|
||||
}
|
||||
|
||||
inline Scalar yn(const int n, const Scalar s)
|
||||
{
|
||||
return ::yn(n, s);
|
||||
}
|
||||
|
||||
#undef Scalar
|
||||
#undef ScalarVGREAT
|
||||
#undef ScalarVSMALL
|
||||
#undef readScalar
|
||||
#undef transFunc
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
} // End namespace Foam
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
#endif
|
||||
|
||||
// ************************************************************************* //
|
|
@ -38,6 +38,7 @@ SourceFiles
|
|||
|
||||
#include "floatScalar.H"
|
||||
#include "doubleScalar.H"
|
||||
#include "longDoubleScalar.H"
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
|
@ -77,6 +78,24 @@ namespace Foam
|
|||
scalar readScalar(Istream& is);
|
||||
}
|
||||
|
||||
#elif defined(WM_LDP)
|
||||
|
||||
// Define scalar as a long double
|
||||
|
||||
namespace Foam
|
||||
{
|
||||
typedef longDoubleScalar scalar;
|
||||
|
||||
static const scalar GREAT = longDoubleScalarGREAT;
|
||||
static const scalar VGREAT = longDoubleScalarVGREAT;
|
||||
static const scalar ROOTVGREAT = longDoubleScalarROOTVGREAT;
|
||||
static const scalar SMALL = longDoubleScalarSMALL;
|
||||
static const scalar VSMALL = longDoubleScalarVSMALL;
|
||||
static const scalar ROOTVSMALL = longDoubleScalarROOTVSMALL;
|
||||
|
||||
scalar readScalar(Istream& is);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ vector eigenValues(const tensor& t)
|
|||
|
||||
if (disc >= -SMALL)
|
||||
{
|
||||
scalar q = -0.5*sqrt(max(0.0, disc));
|
||||
scalar q = -0.5*sqrt(max(scalar(0), disc));
|
||||
|
||||
i = 0;
|
||||
ii = -0.5*a + q;
|
||||
|
@ -381,7 +381,7 @@ vector eigenValues(const symmTensor& t)
|
|||
// If there is a zero root
|
||||
if (mag(c) < SMALL)
|
||||
{
|
||||
const scalar disc = Foam::max(sqr(a) - 4*b, 0.0);
|
||||
const scalar disc = Foam::max(sqr(a) - 4*b, scalar(0));
|
||||
|
||||
scalar q = -0.5*sqrt(max(scalar(0), disc));
|
||||
|
||||
|
|
|
@ -117,163 +117,163 @@ inline SymmTensor4thOrder<Cmpt> transform
|
|||
const SymmTensor4thOrder<Cmpt>& C
|
||||
)
|
||||
{
|
||||
//- represent fourth order tensors in 6x6 notation then rotation is given by
|
||||
//- C_rotated_af = A_ba * C_cd * A_ef
|
||||
//- where A is a function of stt
|
||||
//- represent fourth order tensors in 6x6 notation. Rotation is given by
|
||||
//- C_rotated_af = A_ba * C_cd * A_ef
|
||||
//- where A is a function of stt
|
||||
|
||||
const double s = ::sqrt(2);
|
||||
const double A[6][6] =
|
||||
const scalar s = ::sqrt(2);
|
||||
const scalar A[6][6] =
|
||||
{
|
||||
{ stt.xx()*stt.xx(), stt.xy()*stt.xy(), stt.xz()*stt.xz(), s*stt.xx()*stt.xy(), s*stt.xy()*stt.xz(), s*stt.xz()*stt.xx() },
|
||||
{ stt.xy()*stt.xy(), stt.yy()*stt.yy(), stt.yz()*stt.yz(), s*stt.xy()*stt.yy(), s*stt.yy()*stt.yz(), s*stt.yz()*stt.xy() },
|
||||
{ stt.xz()*stt.xz(), stt.yz()*stt.yz(), stt.zz()*stt.zz(), s*stt.xz()*stt.yz(), s*stt.yz()*stt.zz(), s*stt.zz()*stt.xz() },
|
||||
{ s*stt.xx()*stt.xy(), s*stt.xy()*stt.yy(), s*stt.xz()*stt.yz(),
|
||||
(stt.xy()*stt.xy()+stt.xx()*stt.yy()), (stt.xz()*stt.yy()+stt.xy()*stt.yz()), (stt.xx()*stt.yz()+stt.xz()*stt.xy()) },
|
||||
{ s*stt.xy()*stt.xz(), s*stt.yy()*stt.yz(), s*stt.yz()*stt.zz(),
|
||||
(stt.yy()*stt.xz()+stt.xy()*stt.yz()), (stt.yz()*stt.yz()+stt.yy()*stt.zz()), (stt.xy()*stt.zz()+stt.yz()*stt.xz()) },
|
||||
{ s*stt.xz()*stt.xx(), s*stt.yz()*stt.xy(), s*stt.zz()*stt.xz(),
|
||||
(stt.yz()*stt.xx()+stt.xz()*stt.xy()), (stt.zz()*stt.xy()+stt.yz()*stt.xz()), (stt.xz()*stt.xz()+stt.zz()*stt.xx()) }
|
||||
{ stt.xx()*stt.xx(), stt.xy()*stt.xy(), stt.xz()*stt.xz(), s*stt.xx()*stt.xy(), s*stt.xy()*stt.xz(), s*stt.xz()*stt.xx() },
|
||||
{ stt.xy()*stt.xy(), stt.yy()*stt.yy(), stt.yz()*stt.yz(), s*stt.xy()*stt.yy(), s*stt.yy()*stt.yz(), s*stt.yz()*stt.xy() },
|
||||
{ stt.xz()*stt.xz(), stt.yz()*stt.yz(), stt.zz()*stt.zz(), s*stt.xz()*stt.yz(), s*stt.yz()*stt.zz(), s*stt.zz()*stt.xz() },
|
||||
{ s*stt.xx()*stt.xy(), s*stt.xy()*stt.yy(), s*stt.xz()*stt.yz(),
|
||||
(stt.xy()*stt.xy()+stt.xx()*stt.yy()), (stt.xz()*stt.yy()+stt.xy()*stt.yz()), (stt.xx()*stt.yz()+stt.xz()*stt.xy()) },
|
||||
{ s*stt.xy()*stt.xz(), s*stt.yy()*stt.yz(), s*stt.yz()*stt.zz(),
|
||||
(stt.yy()*stt.xz()+stt.xy()*stt.yz()), (stt.yz()*stt.yz()+stt.yy()*stt.zz()), (stt.xy()*stt.zz()+stt.yz()*stt.xz()) },
|
||||
{ s*stt.xz()*stt.xx(), s*stt.yz()*stt.xy(), s*stt.zz()*stt.xz(),
|
||||
(stt.yz()*stt.xx()+stt.xz()*stt.xy()), (stt.zz()*stt.xy()+stt.yz()*stt.xz()), (stt.xz()*stt.xz()+stt.zz()*stt.xx()) }
|
||||
};
|
||||
|
||||
return symmTensor4thOrder
|
||||
return symmTensor4thOrder
|
||||
(
|
||||
// xxxx
|
||||
A[0][0] * C.xxxx() * A[0][0] +
|
||||
A[1][0] * C.xxyy() * A[0][0] +
|
||||
A[2][0] * C.xxzz() * A[0][0] +
|
||||
A[0][0] * C.xxyy() * A[1][0] +
|
||||
A[1][0] * C.yyyy() * A[1][0] +
|
||||
A[2][0] * C.yyzz() * A[1][0] +
|
||||
A[0][0] * C.xxzz() * A[2][0] +
|
||||
A[1][0] * C.yyzz() * A[2][0] +
|
||||
A[2][0] * C.zzzz() * A[2][0] +
|
||||
A[3][0] * C.xyxy() * A[3][0] +
|
||||
A[4][0] * C.yzyz() * A[4][0] +
|
||||
A[5][0] * C.zxzx() * A[5][0],
|
||||
// xxxx
|
||||
A[0][0] * C.xxxx() * A[0][0] +
|
||||
A[1][0] * C.xxyy() * A[0][0] +
|
||||
A[2][0] * C.xxzz() * A[0][0] +
|
||||
A[0][0] * C.xxyy() * A[1][0] +
|
||||
A[1][0] * C.yyyy() * A[1][0] +
|
||||
A[2][0] * C.yyzz() * A[1][0] +
|
||||
A[0][0] * C.xxzz() * A[2][0] +
|
||||
A[1][0] * C.yyzz() * A[2][0] +
|
||||
A[2][0] * C.zzzz() * A[2][0] +
|
||||
A[3][0] * C.xyxy() * A[3][0] +
|
||||
A[4][0] * C.yzyz() * A[4][0] +
|
||||
A[5][0] * C.zxzx() * A[5][0],
|
||||
|
||||
// xxyy
|
||||
A[0][0] * C.xxxx() * A[0][1] +
|
||||
A[1][0] * C.xxyy() * A[0][1] +
|
||||
A[2][0] * C.xxzz() * A[0][1] +
|
||||
A[0][0] * C.xxyy() * A[1][1] +
|
||||
A[1][0] * C.yyyy() * A[1][1] +
|
||||
A[2][0] * C.yyzz() * A[1][1] +
|
||||
A[0][0] * C.xxzz() * A[2][1] +
|
||||
A[1][0] * C.yyzz() * A[2][1] +
|
||||
A[2][0] * C.zzzz() * A[2][1] +
|
||||
A[3][0] * C.xyxy() * A[3][1] +
|
||||
A[4][0] * C.yzyz() * A[4][1] +
|
||||
A[5][0] * C.zxzx() * A[5][1],
|
||||
// xxyy
|
||||
A[0][0] * C.xxxx() * A[0][1] +
|
||||
A[1][0] * C.xxyy() * A[0][1] +
|
||||
A[2][0] * C.xxzz() * A[0][1] +
|
||||
A[0][0] * C.xxyy() * A[1][1] +
|
||||
A[1][0] * C.yyyy() * A[1][1] +
|
||||
A[2][0] * C.yyzz() * A[1][1] +
|
||||
A[0][0] * C.xxzz() * A[2][1] +
|
||||
A[1][0] * C.yyzz() * A[2][1] +
|
||||
A[2][0] * C.zzzz() * A[2][1] +
|
||||
A[3][0] * C.xyxy() * A[3][1] +
|
||||
A[4][0] * C.yzyz() * A[4][1] +
|
||||
A[5][0] * C.zxzx() * A[5][1],
|
||||
|
||||
// xzz
|
||||
A[0][0] * C.xxxx() * A[0][2] +
|
||||
A[1][0] * C.xxyy() * A[0][2] +
|
||||
A[2][0] * C.xxzz() * A[0][2] +
|
||||
A[0][0] * C.xxyy() * A[1][2] +
|
||||
A[1][0] * C.yyyy() * A[1][2] +
|
||||
A[2][0] * C.yyzz() * A[1][2] +
|
||||
A[0][0] * C.xxzz() * A[2][2] +
|
||||
A[1][0] * C.yyzz() * A[2][2] +
|
||||
A[2][0] * C.zzzz() * A[2][2] +
|
||||
A[3][0] * C.xyxy() * A[3][2] +
|
||||
A[4][0] * C.yzyz() * A[4][2] +
|
||||
A[5][0] * C.zxzx() * A[5][2],
|
||||
// xzz
|
||||
A[0][0] * C.xxxx() * A[0][2] +
|
||||
A[1][0] * C.xxyy() * A[0][2] +
|
||||
A[2][0] * C.xxzz() * A[0][2] +
|
||||
A[0][0] * C.xxyy() * A[1][2] +
|
||||
A[1][0] * C.yyyy() * A[1][2] +
|
||||
A[2][0] * C.yyzz() * A[1][2] +
|
||||
A[0][0] * C.xxzz() * A[2][2] +
|
||||
A[1][0] * C.yyzz() * A[2][2] +
|
||||
A[2][0] * C.zzzz() * A[2][2] +
|
||||
A[3][0] * C.xyxy() * A[3][2] +
|
||||
A[4][0] * C.yzyz() * A[4][2] +
|
||||
A[5][0] * C.zxzx() * A[5][2],
|
||||
|
||||
// yyyy
|
||||
A[0][1] * C.xxxx() * A[0][1] +
|
||||
A[1][1] * C.xxyy() * A[0][1] +
|
||||
A[2][1] * C.xxzz() * A[0][1] +
|
||||
A[0][1] * C.xxyy() * A[1][1] +
|
||||
A[1][1] * C.yyyy() * A[1][1] +
|
||||
A[2][1] * C.yyzz() * A[1][1] +
|
||||
A[0][1] * C.xxzz() * A[2][1] +
|
||||
A[1][1] * C.yyzz() * A[2][1] +
|
||||
A[2][1] * C.zzzz() * A[2][1] +
|
||||
A[3][1] * C.xyxy() * A[3][1] +
|
||||
A[4][1] * C.yzyz() * A[4][1] +
|
||||
A[5][1] * C.zxzx() * A[5][1],
|
||||
// yyyy
|
||||
A[0][1] * C.xxxx() * A[0][1] +
|
||||
A[1][1] * C.xxyy() * A[0][1] +
|
||||
A[2][1] * C.xxzz() * A[0][1] +
|
||||
A[0][1] * C.xxyy() * A[1][1] +
|
||||
A[1][1] * C.yyyy() * A[1][1] +
|
||||
A[2][1] * C.yyzz() * A[1][1] +
|
||||
A[0][1] * C.xxzz() * A[2][1] +
|
||||
A[1][1] * C.yyzz() * A[2][1] +
|
||||
A[2][1] * C.zzzz() * A[2][1] +
|
||||
A[3][1] * C.xyxy() * A[3][1] +
|
||||
A[4][1] * C.yzyz() * A[4][1] +
|
||||
A[5][1] * C.zxzx() * A[5][1],
|
||||
|
||||
// yyzz
|
||||
A[0][1] * C.xxxx() * A[0][2] +
|
||||
A[1][1] * C.xxyy() * A[0][2] +
|
||||
A[2][1] * C.xxzz() * A[0][2] +
|
||||
A[0][1] * C.xxyy() * A[1][2] +
|
||||
A[1][1] * C.yyyy() * A[1][2] +
|
||||
A[2][1] * C.yyzz() * A[1][2] +
|
||||
A[0][1] * C.xxzz() * A[2][2] +
|
||||
A[1][1] * C.yyzz() * A[2][2] +
|
||||
A[2][1] * C.zzzz() * A[2][2] +
|
||||
A[3][1] * C.xyxy() * A[3][2] +
|
||||
A[4][1] * C.yzyz() * A[4][2] +
|
||||
A[5][1] * C.zxzx() * A[5][2],
|
||||
// yyzz
|
||||
A[0][1] * C.xxxx() * A[0][2] +
|
||||
A[1][1] * C.xxyy() * A[0][2] +
|
||||
A[2][1] * C.xxzz() * A[0][2] +
|
||||
A[0][1] * C.xxyy() * A[1][2] +
|
||||
A[1][1] * C.yyyy() * A[1][2] +
|
||||
A[2][1] * C.yyzz() * A[1][2] +
|
||||
A[0][1] * C.xxzz() * A[2][2] +
|
||||
A[1][1] * C.yyzz() * A[2][2] +
|
||||
A[2][1] * C.zzzz() * A[2][2] +
|
||||
A[3][1] * C.xyxy() * A[3][2] +
|
||||
A[4][1] * C.yzyz() * A[4][2] +
|
||||
A[5][1] * C.zxzx() * A[5][2],
|
||||
|
||||
// zzzz
|
||||
A[0][2] * C.xxxx() * A[0][2] +
|
||||
A[1][2] * C.xxyy() * A[0][2] +
|
||||
A[2][2] * C.xxzz() * A[0][2] +
|
||||
A[0][2] * C.xxyy() * A[1][2] +
|
||||
A[1][2] * C.yyyy() * A[1][2] +
|
||||
A[2][2] * C.yyzz() * A[1][2] +
|
||||
A[0][2] * C.xxzz() * A[2][2] +
|
||||
A[1][2] * C.yyzz() * A[2][2] +
|
||||
A[2][2] * C.zzzz() * A[2][2] +
|
||||
A[3][2] * C.xyxy() * A[3][2] +
|
||||
A[4][2] * C.yzyz() * A[4][2] +
|
||||
A[5][2] * C.zxzx() * A[5][2],
|
||||
// zzzz
|
||||
A[0][2] * C.xxxx() * A[0][2] +
|
||||
A[1][2] * C.xxyy() * A[0][2] +
|
||||
A[2][2] * C.xxzz() * A[0][2] +
|
||||
A[0][2] * C.xxyy() * A[1][2] +
|
||||
A[1][2] * C.yyyy() * A[1][2] +
|
||||
A[2][2] * C.yyzz() * A[1][2] +
|
||||
A[0][2] * C.xxzz() * A[2][2] +
|
||||
A[1][2] * C.yyzz() * A[2][2] +
|
||||
A[2][2] * C.zzzz() * A[2][2] +
|
||||
A[3][2] * C.xyxy() * A[3][2] +
|
||||
A[4][2] * C.yzyz() * A[4][2] +
|
||||
A[5][2] * C.zxzx() * A[5][2],
|
||||
|
||||
// xyxy
|
||||
A[0][3] * C.xxxx() * A[0][3] +
|
||||
A[1][3] * C.xxyy() * A[0][3] +
|
||||
A[2][3] * C.xxzz() * A[0][3] +
|
||||
A[0][3] * C.xxyy() * A[1][3] +
|
||||
A[1][3] * C.yyyy() * A[1][3] +
|
||||
A[2][3] * C.yyzz() * A[1][3] +
|
||||
A[0][3] * C.xxzz() * A[2][3] +
|
||||
A[1][3] * C.yyzz() * A[2][3] +
|
||||
A[2][3] * C.zzzz() * A[2][3] +
|
||||
A[3][3] * C.xyxy() * A[3][3] +
|
||||
A[4][3] * C.yzyz() * A[4][3] +
|
||||
A[5][3] * C.zxzx() * A[5][3],
|
||||
// xyxy
|
||||
A[0][3] * C.xxxx() * A[0][3] +
|
||||
A[1][3] * C.xxyy() * A[0][3] +
|
||||
A[2][3] * C.xxzz() * A[0][3] +
|
||||
A[0][3] * C.xxyy() * A[1][3] +
|
||||
A[1][3] * C.yyyy() * A[1][3] +
|
||||
A[2][3] * C.yyzz() * A[1][3] +
|
||||
A[0][3] * C.xxzz() * A[2][3] +
|
||||
A[1][3] * C.yyzz() * A[2][3] +
|
||||
A[2][3] * C.zzzz() * A[2][3] +
|
||||
A[3][3] * C.xyxy() * A[3][3] +
|
||||
A[4][3] * C.yzyz() * A[4][3] +
|
||||
A[5][3] * C.zxzx() * A[5][3],
|
||||
|
||||
// yzyz
|
||||
A[0][4] * C.xxxx() * A[0][4] +
|
||||
A[1][4] * C.xxyy() * A[0][4] +
|
||||
A[2][4] * C.xxzz() * A[0][4] +
|
||||
A[0][4] * C.xxyy() * A[1][4] +
|
||||
A[1][4] * C.yyyy() * A[1][4] +
|
||||
A[2][4] * C.yyzz() * A[1][4] +
|
||||
A[0][4] * C.xxzz() * A[2][4] +
|
||||
A[1][4] * C.yyzz() * A[2][4] +
|
||||
A[2][4] * C.zzzz() * A[2][4] +
|
||||
A[3][4] * C.xyxy() * A[3][4] +
|
||||
A[4][4] * C.yzyz() * A[4][4] +
|
||||
A[5][4] * C.zxzx() * A[5][4],
|
||||
// yzyz
|
||||
A[0][4] * C.xxxx() * A[0][4] +
|
||||
A[1][4] * C.xxyy() * A[0][4] +
|
||||
A[2][4] * C.xxzz() * A[0][4] +
|
||||
A[0][4] * C.xxyy() * A[1][4] +
|
||||
A[1][4] * C.yyyy() * A[1][4] +
|
||||
A[2][4] * C.yyzz() * A[1][4] +
|
||||
A[0][4] * C.xxzz() * A[2][4] +
|
||||
A[1][4] * C.yyzz() * A[2][4] +
|
||||
A[2][4] * C.zzzz() * A[2][4] +
|
||||
A[3][4] * C.xyxy() * A[3][4] +
|
||||
A[4][4] * C.yzyz() * A[4][4] +
|
||||
A[5][4] * C.zxzx() * A[5][4],
|
||||
|
||||
// zxzx
|
||||
A[0][5] * C.xxxx() * A[0][5] +
|
||||
A[1][5] * C.xxyy() * A[0][5] +
|
||||
A[2][5] * C.xxzz() * A[0][5] +
|
||||
A[0][5] * C.xxyy() * A[1][5] +
|
||||
A[1][5] * C.yyyy() * A[1][5] +
|
||||
A[2][5] * C.yyzz() * A[1][5] +
|
||||
A[0][5] * C.xxzz() * A[2][5] +
|
||||
A[1][5] * C.yyzz() * A[2][5] +
|
||||
A[2][5] * C.zzzz() * A[2][5] +
|
||||
A[3][5] * C.xyxy() * A[3][5] +
|
||||
A[4][5] * C.yzyz() * A[4][5] +
|
||||
A[5][5] * C.zxzx() * A[5][5]
|
||||
);
|
||||
// zxzx
|
||||
A[0][5] * C.xxxx() * A[0][5] +
|
||||
A[1][5] * C.xxyy() * A[0][5] +
|
||||
A[2][5] * C.xxzz() * A[0][5] +
|
||||
A[0][5] * C.xxyy() * A[1][5] +
|
||||
A[1][5] * C.yyyy() * A[1][5] +
|
||||
A[2][5] * C.yyzz() * A[1][5] +
|
||||
A[0][5] * C.xxzz() * A[2][5] +
|
||||
A[1][5] * C.yyzz() * A[2][5] +
|
||||
A[2][5] * C.zzzz() * A[2][5] +
|
||||
A[3][5] * C.xyxy() * A[3][5] +
|
||||
A[4][5] * C.yzyz() * A[4][5] +
|
||||
A[5][5] * C.zxzx() * A[5][5]
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
template<class Cmpt>
|
||||
inline DiagTensor<Cmpt> transform
|
||||
(
|
||||
const symmTensor& stt,
|
||||
const DiagTensor<Cmpt>& dt
|
||||
const symmTensor& stt,
|
||||
const DiagTensor<Cmpt>& dt
|
||||
)
|
||||
{
|
||||
return dt;
|
||||
return dt;
|
||||
}
|
||||
|
||||
|
||||
|
@ -328,21 +328,26 @@ inline symmTensor transformMask<symmTensor>(const symmTensor& st)
|
|||
|
||||
|
||||
template<>
|
||||
inline symmTensor4thOrder transformMask<symmTensor4thOrder>(const symmTensor& st)
|
||||
inline symmTensor4thOrder transformMask<symmTensor4thOrder>
|
||||
(
|
||||
const symmTensor& st
|
||||
)
|
||||
{
|
||||
notImplemented("symmTransform.H\n"
|
||||
"template<>\n"
|
||||
"inline symmTensor4thOrder transformMask<symmTensor4thOrder>(const symmTensor& st)\n"
|
||||
"not implemented");
|
||||
notImplemented
|
||||
(
|
||||
"template<>\n"
|
||||
"inline symmTensor4thOrder transformMask<symmTensor4thOrder>"
|
||||
"(const symmTensor& st)"
|
||||
);
|
||||
|
||||
return symmTensor4thOrder::zero;
|
||||
return symmTensor4thOrder::zero;
|
||||
}
|
||||
|
||||
|
||||
template<>
|
||||
inline diagTensor transformMask<diagTensor>(const symmTensor& st)
|
||||
{
|
||||
return diagTensor(st.xx(), st.yy(), st.zz());
|
||||
return diagTensor(st.xx(), st.yy(), st.zz());
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -146,151 +146,156 @@ inline SphericalTensor<Cmpt> transform
|
|||
|
||||
|
||||
template<class Cmpt>
|
||||
inline SymmTensor4thOrder<Cmpt> transform(const tensor& L, const SymmTensor4thOrder<Cmpt>& C)
|
||||
inline SymmTensor4thOrder<Cmpt> transform
|
||||
(
|
||||
const tensor& L,
|
||||
const SymmTensor4thOrder<Cmpt>& C
|
||||
)
|
||||
{
|
||||
//- represent fourth order tensors in 6x6 notation then rotation is given by
|
||||
//- C_rotated_af = A_ba * C_cd * A_ef
|
||||
//- where A is a function of L
|
||||
//- represent fourth order tensors in 6x6 notation. Rotation is given by
|
||||
//- C_rotated_af = A_ba * C_cd * A_ef
|
||||
//- where A is a function of L
|
||||
|
||||
const double s = ::sqrt(2);
|
||||
const double A[6][6] =
|
||||
const scalar s = ::sqrt(2);
|
||||
const scalar A[6][6] =
|
||||
{
|
||||
{ L.xx()*L.xx(), L.xy()*L.xy(), L.xz()*L.xz(), s*L.xx()*L.xy(), s*L.xy()*L.xz(), s*L.xz()*L.xx() },
|
||||
{ L.yx()*L.yx(), L.yy()*L.yy(), L.yz()*L.yz(), s*L.yx()*L.yy(), s*L.yy()*L.yz(), s*L.yz()*L.yx() },
|
||||
{ L.zx()*L.zx(), L.zy()*L.zy(), L.zz()*L.zz(), s*L.zx()*L.zy(), s*L.zy()*L.zz(), s*L.zz()*L.zx() },
|
||||
{ s*L.xx()*L.yx(), s*L.xy()*L.yy(), s*L.xz()*L.yz(), (L.xy()*L.yx()+L.xx()*L.yy()), (L.xz()*L.yy()+L.xy()*L.yz()), (L.xx()*L.yz()+L.xz()*L.yx()) },
|
||||
{ s*L.yx()*L.zx(), s*L.yy()*L.zy(), s*L.yz()*L.zz(), (L.yy()*L.zx()+L.yx()*L.zy()), (L.yz()*L.zy()+L.yy()*L.zz()), (L.yx()*L.zz()+L.yz()*L.zx()) },
|
||||
{ s*L.zx()*L.xx(), s*L.zy()*L.xy(), s*L.zz()*L.xz(), (L.zy()*L.xx()+L.zx()*L.xy()), (L.zz()*L.xy()+L.zy()*L.xz()), (L.zx()*L.xz()+L.zz()*L.xx()) }
|
||||
{
|
||||
L.xx()*L.xx(), L.xy()*L.xy(), L.xz()*L.xz(), s*L.xx()*L.xy(), s*L.xy()*L.xz(), s*L.xz()*L.xx() },
|
||||
{ L.yx()*L.yx(), L.yy()*L.yy(), L.yz()*L.yz(), s*L.yx()*L.yy(), s*L.yy()*L.yz(), s*L.yz()*L.yx() },
|
||||
{ L.zx()*L.zx(), L.zy()*L.zy(), L.zz()*L.zz(), s*L.zx()*L.zy(), s*L.zy()*L.zz(), s*L.zz()*L.zx() },
|
||||
{ s*L.xx()*L.yx(), s*L.xy()*L.yy(), s*L.xz()*L.yz(), (L.xy()*L.yx()+L.xx()*L.yy()), (L.xz()*L.yy()+L.xy()*L.yz()), (L.xx()*L.yz()+L.xz()*L.yx()) },
|
||||
{ s*L.yx()*L.zx(), s*L.yy()*L.zy(), s*L.yz()*L.zz(), (L.yy()*L.zx()+L.yx()*L.zy()), (L.yz()*L.zy()+L.yy()*L.zz()), (L.yx()*L.zz()+L.yz()*L.zx()) },
|
||||
{ s*L.zx()*L.xx(), s*L.zy()*L.xy(), s*L.zz()*L.xz(), (L.zy()*L.xx()+L.zx()*L.xy()), (L.zz()*L.xy()+L.zy()*L.xz()), (L.zx()*L.xz()+L.zz()*L.xx()) }
|
||||
};
|
||||
|
||||
return symmTensor4thOrder
|
||||
return symmTensor4thOrder
|
||||
(
|
||||
// xxxx
|
||||
A[0][0] * C.xxxx() * A[0][0] +
|
||||
A[1][0] * C.xxyy() * A[0][0] +
|
||||
A[2][0] * C.xxzz() * A[0][0] +
|
||||
A[0][0] * C.xxyy() * A[1][0] +
|
||||
A[1][0] * C.yyyy() * A[1][0] +
|
||||
A[2][0] * C.yyzz() * A[1][0] +
|
||||
A[0][0] * C.xxzz() * A[2][0] +
|
||||
A[1][0] * C.yyzz() * A[2][0] +
|
||||
A[2][0] * C.zzzz() * A[2][0] +
|
||||
A[3][0] * C.xyxy() * A[3][0] +
|
||||
A[4][0] * C.yzyz() * A[4][0] +
|
||||
A[5][0] * C.zxzx() * A[5][0],
|
||||
// xxxx
|
||||
A[0][0] * C.xxxx() * A[0][0] +
|
||||
A[1][0] * C.xxyy() * A[0][0] +
|
||||
A[2][0] * C.xxzz() * A[0][0] +
|
||||
A[0][0] * C.xxyy() * A[1][0] +
|
||||
A[1][0] * C.yyyy() * A[1][0] +
|
||||
A[2][0] * C.yyzz() * A[1][0] +
|
||||
A[0][0] * C.xxzz() * A[2][0] +
|
||||
A[1][0] * C.yyzz() * A[2][0] +
|
||||
A[2][0] * C.zzzz() * A[2][0] +
|
||||
A[3][0] * C.xyxy() * A[3][0] +
|
||||
A[4][0] * C.yzyz() * A[4][0] +
|
||||
A[5][0] * C.zxzx() * A[5][0],
|
||||
|
||||
// xxyy
|
||||
A[0][0] * C.xxxx() * A[0][1] +
|
||||
A[1][0] * C.xxyy() * A[0][1] +
|
||||
A[2][0] * C.xxzz() * A[0][1] +
|
||||
A[0][0] * C.xxyy() * A[1][1] +
|
||||
A[1][0] * C.yyyy() * A[1][1] +
|
||||
A[2][0] * C.yyzz() * A[1][1] +
|
||||
A[0][0] * C.xxzz() * A[2][1] +
|
||||
A[1][0] * C.yyzz() * A[2][1] +
|
||||
A[2][0] * C.zzzz() * A[2][1] +
|
||||
A[3][0] * C.xyxy() * A[3][1] +
|
||||
A[4][0] * C.yzyz() * A[4][1] +
|
||||
A[5][0] * C.zxzx() * A[5][1],
|
||||
// xxyy
|
||||
A[0][0] * C.xxxx() * A[0][1] +
|
||||
A[1][0] * C.xxyy() * A[0][1] +
|
||||
A[2][0] * C.xxzz() * A[0][1] +
|
||||
A[0][0] * C.xxyy() * A[1][1] +
|
||||
A[1][0] * C.yyyy() * A[1][1] +
|
||||
A[2][0] * C.yyzz() * A[1][1] +
|
||||
A[0][0] * C.xxzz() * A[2][1] +
|
||||
A[1][0] * C.yyzz() * A[2][1] +
|
||||
A[2][0] * C.zzzz() * A[2][1] +
|
||||
A[3][0] * C.xyxy() * A[3][1] +
|
||||
A[4][0] * C.yzyz() * A[4][1] +
|
||||
A[5][0] * C.zxzx() * A[5][1],
|
||||
|
||||
// xzz
|
||||
A[0][0] * C.xxxx() * A[0][2] +
|
||||
A[1][0] * C.xxyy() * A[0][2] +
|
||||
A[2][0] * C.xxzz() * A[0][2] +
|
||||
A[0][0] * C.xxyy() * A[1][2] +
|
||||
A[1][0] * C.yyyy() * A[1][2] +
|
||||
A[2][0] * C.yyzz() * A[1][2] +
|
||||
A[0][0] * C.xxzz() * A[2][2] +
|
||||
A[1][0] * C.yyzz() * A[2][2] +
|
||||
A[2][0] * C.zzzz() * A[2][2] +
|
||||
A[3][0] * C.xyxy() * A[3][2] +
|
||||
A[4][0] * C.yzyz() * A[4][2] +
|
||||
A[5][0] * C.zxzx() * A[5][2],
|
||||
// xzz
|
||||
A[0][0] * C.xxxx() * A[0][2] +
|
||||
A[1][0] * C.xxyy() * A[0][2] +
|
||||
A[2][0] * C.xxzz() * A[0][2] +
|
||||
A[0][0] * C.xxyy() * A[1][2] +
|
||||
A[1][0] * C.yyyy() * A[1][2] +
|
||||
A[2][0] * C.yyzz() * A[1][2] +
|
||||
A[0][0] * C.xxzz() * A[2][2] +
|
||||
A[1][0] * C.yyzz() * A[2][2] +
|
||||
A[2][0] * C.zzzz() * A[2][2] +
|
||||
A[3][0] * C.xyxy() * A[3][2] +
|
||||
A[4][0] * C.yzyz() * A[4][2] +
|
||||
A[5][0] * C.zxzx() * A[5][2],
|
||||
|
||||
// yyyy
|
||||
A[0][1] * C.xxxx() * A[0][1] +
|
||||
A[1][1] * C.xxyy() * A[0][1] +
|
||||
A[2][1] * C.xxzz() * A[0][1] +
|
||||
A[0][1] * C.xxyy() * A[1][1] +
|
||||
A[1][1] * C.yyyy() * A[1][1] +
|
||||
A[2][1] * C.yyzz() * A[1][1] +
|
||||
A[0][1] * C.xxzz() * A[2][1] +
|
||||
A[1][1] * C.yyzz() * A[2][1] +
|
||||
A[2][1] * C.zzzz() * A[2][1] +
|
||||
A[3][1] * C.xyxy() * A[3][1] +
|
||||
A[4][1] * C.yzyz() * A[4][1] +
|
||||
A[5][1] * C.zxzx() * A[5][1],
|
||||
// yyyy
|
||||
A[0][1] * C.xxxx() * A[0][1] +
|
||||
A[1][1] * C.xxyy() * A[0][1] +
|
||||
A[2][1] * C.xxzz() * A[0][1] +
|
||||
A[0][1] * C.xxyy() * A[1][1] +
|
||||
A[1][1] * C.yyyy() * A[1][1] +
|
||||
A[2][1] * C.yyzz() * A[1][1] +
|
||||
A[0][1] * C.xxzz() * A[2][1] +
|
||||
A[1][1] * C.yyzz() * A[2][1] +
|
||||
A[2][1] * C.zzzz() * A[2][1] +
|
||||
A[3][1] * C.xyxy() * A[3][1] +
|
||||
A[4][1] * C.yzyz() * A[4][1] +
|
||||
A[5][1] * C.zxzx() * A[5][1],
|
||||
|
||||
// yyzz
|
||||
A[0][1] * C.xxxx() * A[0][2] +
|
||||
A[1][1] * C.xxyy() * A[0][2] +
|
||||
A[2][1] * C.xxzz() * A[0][2] +
|
||||
A[0][1] * C.xxyy() * A[1][2] +
|
||||
A[1][1] * C.yyyy() * A[1][2] +
|
||||
A[2][1] * C.yyzz() * A[1][2] +
|
||||
A[0][1] * C.xxzz() * A[2][2] +
|
||||
A[1][1] * C.yyzz() * A[2][2] +
|
||||
A[2][1] * C.zzzz() * A[2][2] +
|
||||
A[3][1] * C.xyxy() * A[3][2] +
|
||||
A[4][1] * C.yzyz() * A[4][2] +
|
||||
A[5][1] * C.zxzx() * A[5][2],
|
||||
// yyzz
|
||||
A[0][1] * C.xxxx() * A[0][2] +
|
||||
A[1][1] * C.xxyy() * A[0][2] +
|
||||
A[2][1] * C.xxzz() * A[0][2] +
|
||||
A[0][1] * C.xxyy() * A[1][2] +
|
||||
A[1][1] * C.yyyy() * A[1][2] +
|
||||
A[2][1] * C.yyzz() * A[1][2] +
|
||||
A[0][1] * C.xxzz() * A[2][2] +
|
||||
A[1][1] * C.yyzz() * A[2][2] +
|
||||
A[2][1] * C.zzzz() * A[2][2] +
|
||||
A[3][1] * C.xyxy() * A[3][2] +
|
||||
A[4][1] * C.yzyz() * A[4][2] +
|
||||
A[5][1] * C.zxzx() * A[5][2],
|
||||
|
||||
// zzzz
|
||||
A[0][2] * C.xxxx() * A[0][2] +
|
||||
A[1][2] * C.xxyy() * A[0][2] +
|
||||
A[2][2] * C.xxzz() * A[0][2] +
|
||||
A[0][2] * C.xxyy() * A[1][2] +
|
||||
A[1][2] * C.yyyy() * A[1][2] +
|
||||
A[2][2] * C.yyzz() * A[1][2] +
|
||||
A[0][2] * C.xxzz() * A[2][2] +
|
||||
A[1][2] * C.yyzz() * A[2][2] +
|
||||
A[2][2] * C.zzzz() * A[2][2] +
|
||||
A[3][2] * C.xyxy() * A[3][2] +
|
||||
A[4][2] * C.yzyz() * A[4][2] +
|
||||
A[5][2] * C.zxzx() * A[5][2],
|
||||
// zzzz
|
||||
A[0][2] * C.xxxx() * A[0][2] +
|
||||
A[1][2] * C.xxyy() * A[0][2] +
|
||||
A[2][2] * C.xxzz() * A[0][2] +
|
||||
A[0][2] * C.xxyy() * A[1][2] +
|
||||
A[1][2] * C.yyyy() * A[1][2] +
|
||||
A[2][2] * C.yyzz() * A[1][2] +
|
||||
A[0][2] * C.xxzz() * A[2][2] +
|
||||
A[1][2] * C.yyzz() * A[2][2] +
|
||||
A[2][2] * C.zzzz() * A[2][2] +
|
||||
A[3][2] * C.xyxy() * A[3][2] +
|
||||
A[4][2] * C.yzyz() * A[4][2] +
|
||||
A[5][2] * C.zxzx() * A[5][2],
|
||||
|
||||
// xyxy
|
||||
A[0][3] * C.xxxx() * A[0][3] +
|
||||
A[1][3] * C.xxyy() * A[0][3] +
|
||||
A[2][3] * C.xxzz() * A[0][3] +
|
||||
A[0][3] * C.xxyy() * A[1][3] +
|
||||
A[1][3] * C.yyyy() * A[1][3] +
|
||||
A[2][3] * C.yyzz() * A[1][3] +
|
||||
A[0][3] * C.xxzz() * A[2][3] +
|
||||
A[1][3] * C.yyzz() * A[2][3] +
|
||||
A[2][3] * C.zzzz() * A[2][3] +
|
||||
A[3][3] * C.xyxy() * A[3][3] +
|
||||
A[4][3] * C.yzyz() * A[4][3] +
|
||||
A[5][3] * C.zxzx() * A[5][3],
|
||||
// xyxy
|
||||
A[0][3] * C.xxxx() * A[0][3] +
|
||||
A[1][3] * C.xxyy() * A[0][3] +
|
||||
A[2][3] * C.xxzz() * A[0][3] +
|
||||
A[0][3] * C.xxyy() * A[1][3] +
|
||||
A[1][3] * C.yyyy() * A[1][3] +
|
||||
A[2][3] * C.yyzz() * A[1][3] +
|
||||
A[0][3] * C.xxzz() * A[2][3] +
|
||||
A[1][3] * C.yyzz() * A[2][3] +
|
||||
A[2][3] * C.zzzz() * A[2][3] +
|
||||
A[3][3] * C.xyxy() * A[3][3] +
|
||||
A[4][3] * C.yzyz() * A[4][3] +
|
||||
A[5][3] * C.zxzx() * A[5][3],
|
||||
|
||||
// yzyz
|
||||
A[0][4] * C.xxxx() * A[0][4] +
|
||||
A[1][4] * C.xxyy() * A[0][4] +
|
||||
A[2][4] * C.xxzz() * A[0][4] +
|
||||
A[0][4] * C.xxyy() * A[1][4] +
|
||||
A[1][4] * C.yyyy() * A[1][4] +
|
||||
A[2][4] * C.yyzz() * A[1][4] +
|
||||
A[0][4] * C.xxzz() * A[2][4] +
|
||||
A[1][4] * C.yyzz() * A[2][4] +
|
||||
A[2][4] * C.zzzz() * A[2][4] +
|
||||
A[3][4] * C.xyxy() * A[3][4] +
|
||||
A[4][4] * C.yzyz() * A[4][4] +
|
||||
A[5][4] * C.zxzx() * A[5][4],
|
||||
// yzyz
|
||||
A[0][4] * C.xxxx() * A[0][4] +
|
||||
A[1][4] * C.xxyy() * A[0][4] +
|
||||
A[2][4] * C.xxzz() * A[0][4] +
|
||||
A[0][4] * C.xxyy() * A[1][4] +
|
||||
A[1][4] * C.yyyy() * A[1][4] +
|
||||
A[2][4] * C.yyzz() * A[1][4] +
|
||||
A[0][4] * C.xxzz() * A[2][4] +
|
||||
A[1][4] * C.yyzz() * A[2][4] +
|
||||
A[2][4] * C.zzzz() * A[2][4] +
|
||||
A[3][4] * C.xyxy() * A[3][4] +
|
||||
A[4][4] * C.yzyz() * A[4][4] +
|
||||
A[5][4] * C.zxzx() * A[5][4],
|
||||
|
||||
// zxzx
|
||||
A[0][5] * C.xxxx() * A[0][5] +
|
||||
A[1][5] * C.xxyy() * A[0][5] +
|
||||
A[2][5] * C.xxzz() * A[0][5] +
|
||||
A[0][5] * C.xxyy() * A[1][5] +
|
||||
A[1][5] * C.yyyy() * A[1][5] +
|
||||
A[2][5] * C.yyzz() * A[1][5] +
|
||||
A[0][5] * C.xxzz() * A[2][5] +
|
||||
A[1][5] * C.yyzz() * A[2][5] +
|
||||
A[2][5] * C.zzzz() * A[2][5] +
|
||||
A[3][5] * C.xyxy() * A[3][5] +
|
||||
A[4][5] * C.yzyz() * A[4][5] +
|
||||
A[5][5] * C.zxzx() * A[5][5]
|
||||
);
|
||||
// zxzx
|
||||
A[0][5] * C.xxxx() * A[0][5] +
|
||||
A[1][5] * C.xxyy() * A[0][5] +
|
||||
A[2][5] * C.xxzz() * A[0][5] +
|
||||
A[0][5] * C.xxyy() * A[1][5] +
|
||||
A[1][5] * C.yyyy() * A[1][5] +
|
||||
A[2][5] * C.yyzz() * A[1][5] +
|
||||
A[0][5] * C.xxzz() * A[2][5] +
|
||||
A[1][5] * C.yyzz() * A[2][5] +
|
||||
A[2][5] * C.zzzz() * A[2][5] +
|
||||
A[3][5] * C.xyxy() * A[3][5] +
|
||||
A[4][5] * C.yzyz() * A[4][5] +
|
||||
A[5][5] * C.zxzx() * A[5][5]
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -146,6 +146,8 @@ Foam::tmp<Foam::labelField> Foam::MGridGenGAMGAgglomeration::agglomerate
|
|||
List<int> finalAgglom(nFineCells);
|
||||
int nMoves = -1;
|
||||
|
||||
# ifdef WM_DP
|
||||
|
||||
MGridGen
|
||||
(
|
||||
nFineCells,
|
||||
|
@ -162,6 +164,46 @@ Foam::tmp<Foam::labelField> Foam::MGridGenGAMGAgglomeration::agglomerate
|
|||
finalAgglom.begin()
|
||||
);
|
||||
|
||||
# else
|
||||
|
||||
// Conversion of type for MGridGen interface
|
||||
|
||||
Field<realtype> dblVols(V.size());
|
||||
forAll (dblVols, i)
|
||||
{
|
||||
dblVols[i] = V[i];
|
||||
}
|
||||
|
||||
Field<realtype> dblAreas(Sb.size());
|
||||
forAll (dblAreas, i)
|
||||
{
|
||||
dblAreas[i] = Sb[i];
|
||||
}
|
||||
|
||||
Field<realtype> dblFaceWeights(faceWeights.size());
|
||||
forAll (dblFaceWeights, i)
|
||||
{
|
||||
dblFaceWeights[i] = faceWeights[i];
|
||||
}
|
||||
|
||||
MGridGen
|
||||
(
|
||||
nFineCells,
|
||||
cellCellOffsets.begin(),
|
||||
dblVols.begin(),
|
||||
dblAreas.begin(),
|
||||
cellCells.begin(),
|
||||
dblFaceWeights.begin(),
|
||||
minSize,
|
||||
maxSize,
|
||||
options.begin(),
|
||||
&nMoves,
|
||||
&nCoarseCells,
|
||||
finalAgglom.begin()
|
||||
);
|
||||
|
||||
# endif
|
||||
|
||||
return tmp<labelField>(new labelField(finalAgglom));
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*---------------------------------------------------------------------------*\
|
||||
/*---------------------------------------------------------------------------* \
|
||||
========= |
|
||||
\\ / F ield | foam-extend: Open Source CFD
|
||||
\\ / O peration |
|
||||
|
|
|
@ -729,7 +729,7 @@ void solidCohesiveFvPatchVectorField::updateCoeffs()
|
|||
unloadingDeltaEff_,
|
||||
Foam::sqrt
|
||||
(
|
||||
max(deltaN_, 0.0)*max(deltaN_, 0.0) + deltaS_*deltaS_
|
||||
sqr(max(deltaN_, scalar(0))) + sqr(deltaS_)
|
||||
)
|
||||
);
|
||||
|
||||
|
|
|
@ -261,8 +261,8 @@ void dirichletNeumann::correct
|
|||
slaveDispMag =
|
||||
localSlaveInterpolator.pointToFaceInterpolate<scalar>
|
||||
(
|
||||
min(slavePointPenetration, 0.0)
|
||||
);
|
||||
min(slavePointPenetration, scalar(0))
|
||||
);
|
||||
|
||||
// for visualisation
|
||||
slaveContactPointGap() = slavePointPenetration;
|
||||
|
@ -277,11 +277,14 @@ void dirichletNeumann::correct
|
|||
= mesh.boundaryMesh()[slavePatchIndex].start();
|
||||
forAll(slaveDispMag, facei)
|
||||
{
|
||||
slaveDispMag[facei] =
|
||||
globalSlavePenetration
|
||||
[
|
||||
mesh.faceZones()[slaveFaceZoneID()].whichFace(slavePatchStart + facei)
|
||||
];
|
||||
slaveDispMag[facei] =
|
||||
globalSlavePenetration
|
||||
[
|
||||
mesh.faceZones()[slaveFaceZoneID()].whichFace
|
||||
(
|
||||
slavePatchStart + facei
|
||||
)
|
||||
];
|
||||
|
||||
//- when the master surface surrounds the slave (like the pelvis
|
||||
// and femur head) then
|
||||
|
@ -317,7 +320,7 @@ void dirichletNeumann::correct
|
|||
slaveContactPointGap() = slavePointPenetration;
|
||||
|
||||
// set all positive penetrations to zero
|
||||
slaveDispMag = min(slaveDispMag,0.0);
|
||||
slaveDispMag = min(slaveDispMag, scalar(0));
|
||||
|
||||
minSlavePointPenetration = min(slavePointPenetration);
|
||||
reduce(minSlavePointPenetration, minOp<scalar>());
|
||||
|
|
|
@ -77,7 +77,6 @@ Foam::tmp<Foam::fvVectorMatrix> Foam::Maxwell::divTau(volVectorField& U) const
|
|||
- fvc::laplacian(etaPEff/rho_, U, "laplacian(etaPEff,U)")
|
||||
+ fvm::laplacian( (etaPEff + etaS_)/rho_, U, "laplacian(etaPEff+etaS,U)")
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -86,6 +85,9 @@ void Foam::Maxwell::correct()
|
|||
// Velocity gradient tensor
|
||||
volTensorField L = fvc::grad(U());
|
||||
|
||||
// Convected derivate term
|
||||
volTensorField C = tau_ & L;
|
||||
|
||||
// Twice the rate of deformation tensor
|
||||
volSymmTensorField twoD = twoSymm(L);
|
||||
|
||||
|
@ -93,9 +95,11 @@ void Foam::Maxwell::correct()
|
|||
fvSymmTensorMatrix tauEqn
|
||||
(
|
||||
fvm::ddt(tau_)
|
||||
+ fvm::div(phi(), tau_)
|
||||
==
|
||||
etaP_/lambda_*twoD
|
||||
- fvm::Sp( 1/lambda_, tau_ )
|
||||
+ twoSymm(C)
|
||||
- fvm::Sp( 1/lambda_, tau_)
|
||||
);
|
||||
|
||||
tauEqn.relax();
|
||||
|
|
|
@ -18,6 +18,8 @@ NonlinearKEShih/NonlinearKEShih.C
|
|||
LienLeschzinerLowRe/LienLeschzinerLowRe.C
|
||||
LamBremhorstKE/LamBremhorstKE.C
|
||||
|
||||
coupledKEpsilon/coupledKEpsilon.C
|
||||
|
||||
/* Wall functions */
|
||||
wallFunctions = derivedFvPatchFields/wallFunctions
|
||||
|
||||
|
|
|
@ -0,0 +1,323 @@
|
|||
/*---------------------------------------------------------------------------*\
|
||||
========= |
|
||||
\\ / F ield | foam-extend: Open Source CFD
|
||||
\\ / O peration |
|
||||
\\ / A nd | For copyright notice see file Copyright
|
||||
\\/ M anipulation |
|
||||
-------------------------------------------------------------------------------
|
||||
License
|
||||
This file is part of foam-extend.
|
||||
|
||||
foam-extend is free software: you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the
|
||||
Free Software Foundation, either version 3 of the License, or (at your
|
||||
option) any later version.
|
||||
|
||||
foam-extend is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
#include "coupledKEpsilon.H"
|
||||
#include "fvBlockMatrix.H"
|
||||
#include "addToRunTimeSelectionTable.H"
|
||||
|
||||
#include "backwardsCompatibilityWallFunctions.H"
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
namespace Foam
|
||||
{
|
||||
namespace incompressible
|
||||
{
|
||||
namespace RASModels
|
||||
{
|
||||
|
||||
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
|
||||
|
||||
defineTypeNameAndDebug(coupledKEpsilon, 0);
|
||||
addToRunTimeSelectionTable(RASModel, coupledKEpsilon, dictionary);
|
||||
|
||||
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
|
||||
|
||||
coupledKEpsilon::coupledKEpsilon
|
||||
(
|
||||
const volVectorField& U,
|
||||
const surfaceScalarField& phi,
|
||||
transportModel& lamTransportModel
|
||||
)
|
||||
:
|
||||
RASModel(typeName, U, phi, lamTransportModel),
|
||||
|
||||
Cmu_
|
||||
(
|
||||
dimensioned<scalar>::lookupOrAddToDict
|
||||
(
|
||||
"Cmu",
|
||||
coeffDict_,
|
||||
0.09
|
||||
)
|
||||
),
|
||||
C1_
|
||||
(
|
||||
dimensioned<scalar>::lookupOrAddToDict
|
||||
(
|
||||
"C1",
|
||||
coeffDict_,
|
||||
1.44
|
||||
)
|
||||
),
|
||||
C2_
|
||||
(
|
||||
dimensioned<scalar>::lookupOrAddToDict
|
||||
(
|
||||
"C2",
|
||||
coeffDict_,
|
||||
1.92
|
||||
)
|
||||
),
|
||||
sigmaEps_
|
||||
(
|
||||
dimensioned<scalar>::lookupOrAddToDict
|
||||
(
|
||||
"sigmaEps",
|
||||
coeffDict_,
|
||||
1.3
|
||||
)
|
||||
),
|
||||
|
||||
k_
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"k",
|
||||
runTime_.timeName(),
|
||||
U_.db(),
|
||||
IOobject::NO_READ,
|
||||
IOobject::AUTO_WRITE
|
||||
),
|
||||
autoCreateK("k", mesh_)
|
||||
),
|
||||
epsilon_
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"epsilon",
|
||||
runTime_.timeName(),
|
||||
U_.db(),
|
||||
IOobject::NO_READ,
|
||||
IOobject::AUTO_WRITE
|
||||
),
|
||||
autoCreateEpsilon("epsilon", mesh_)
|
||||
),
|
||||
nut_
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"nut",
|
||||
runTime_.timeName(),
|
||||
U_.db(),
|
||||
IOobject::NO_READ,
|
||||
IOobject::AUTO_WRITE
|
||||
),
|
||||
autoCreateNut("nut", mesh_)
|
||||
),
|
||||
ke_
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"ke",
|
||||
runTime_.timeName(),
|
||||
U_.db(),
|
||||
IOobject::NO_READ,
|
||||
IOobject::NO_WRITE
|
||||
),
|
||||
mesh_,
|
||||
dimensionedVector2("zero", dimless, vector2::zero)
|
||||
)
|
||||
{
|
||||
nut_ = Cmu_*sqr(k_)/(epsilon_ + epsilonSmall_);
|
||||
nut_ = min(nut_, nuRatio()*nu());
|
||||
nut_.correctBoundaryConditions();
|
||||
|
||||
printCoeffs();
|
||||
}
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
|
||||
|
||||
tmp<volSymmTensorField> coupledKEpsilon::R() const
|
||||
{
|
||||
return tmp<volSymmTensorField>
|
||||
(
|
||||
new volSymmTensorField
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"R",
|
||||
runTime_.timeName(),
|
||||
U_.db(),
|
||||
IOobject::NO_READ,
|
||||
IOobject::NO_WRITE
|
||||
),
|
||||
((2.0/3.0)*I)*k_ - nut_*twoSymm(fvc::grad(U_)),
|
||||
k_.boundaryField().types()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
tmp<volSymmTensorField> coupledKEpsilon::devReff() const
|
||||
{
|
||||
return tmp<volSymmTensorField>
|
||||
(
|
||||
new volSymmTensorField
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"devRhoReff",
|
||||
runTime_.timeName(),
|
||||
U_.db(),
|
||||
IOobject::NO_READ,
|
||||
IOobject::NO_WRITE
|
||||
),
|
||||
-nuEff()*dev(twoSymm(fvc::grad(U_)))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
tmp<fvVectorMatrix> coupledKEpsilon::divDevReff(volVectorField& U) const
|
||||
{
|
||||
return
|
||||
(
|
||||
- fvm::laplacian(nuEff(), U)
|
||||
- fvc::div(nuEff()*dev(fvc::grad(U)().T()))
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
bool coupledKEpsilon::read()
|
||||
{
|
||||
if (RASModel::read())
|
||||
{
|
||||
Cmu_.readIfPresent(coeffDict());
|
||||
C1_.readIfPresent(coeffDict());
|
||||
C2_.readIfPresent(coeffDict());
|
||||
sigmaEps_.readIfPresent(coeffDict());
|
||||
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void coupledKEpsilon::correct()
|
||||
{
|
||||
// Bound in case of topological change
|
||||
// HJ, 22/Aug/2007
|
||||
if (mesh_.changing())
|
||||
{
|
||||
bound(k_, k0_);
|
||||
bound(epsilon_, epsilon0_);
|
||||
}
|
||||
|
||||
RASModel::correct();
|
||||
|
||||
if (!turbulence_)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
fvBlockMatrix<vector2> keEqn(ke_);
|
||||
|
||||
volScalarField G("RASModel::G", nut_*2*magSqr(symm(fvc::grad(U_))));
|
||||
|
||||
// Update epsilon and G at the wall
|
||||
epsilon_.boundaryField().updateCoeffs();
|
||||
|
||||
// Dissipation equation
|
||||
{
|
||||
fvScalarMatrix epsEqn
|
||||
(
|
||||
fvm::ddt(epsilon_)
|
||||
+ fvm::div(phi_, epsilon_)
|
||||
+ fvm::SuSp(-fvc::div(phi_), epsilon_)
|
||||
- fvm::laplacian(DepsilonEff(), epsilon_)
|
||||
==
|
||||
C1_*G*epsilon_/k_
|
||||
- fvm::Sp(C2_*epsilon_/k_, epsilon_)
|
||||
);
|
||||
|
||||
epsEqn.relax();
|
||||
epsEqn.completeAssembly();
|
||||
|
||||
keEqn.insertEquation(1, epsEqn);
|
||||
}
|
||||
|
||||
// Turbulent kinetic energy equation
|
||||
{
|
||||
fvScalarMatrix kEqn
|
||||
(
|
||||
fvm::ddt(k_)
|
||||
+ fvm::div(phi_, k_)
|
||||
+ fvm::SuSp(-fvc::div(phi_), k_)
|
||||
- fvm::laplacian(DkEff(), k_)
|
||||
==
|
||||
G
|
||||
- fvm::Sp(epsilon_/k_, k_)
|
||||
);
|
||||
|
||||
kEqn.relax();
|
||||
|
||||
keEqn.insertEquation(0, kEqn);
|
||||
}
|
||||
|
||||
// Add coupling term: C1*Cmu*(symm(grad(U))) k but with wall function
|
||||
// corrections: must be calculated from G. HJ, 27/Apr/2015
|
||||
|
||||
// Add coupling term: epsilon source depends on k
|
||||
// k, e sink terms cannot be changed because of boundedness
|
||||
keEqn.insertEquationCoupling
|
||||
(
|
||||
1, 0, -C1_*G*epsilon_/sqr(k_)
|
||||
);
|
||||
|
||||
// Update source coupling: coupling terms eliminated from source
|
||||
keEqn.updateSourceCoupling();
|
||||
|
||||
keEqn.solve();
|
||||
|
||||
// Retrieve solution
|
||||
keEqn.retrieveSolution(0, k_.internalField());
|
||||
keEqn.retrieveSolution(1, epsilon_.internalField());
|
||||
|
||||
k_.correctBoundaryConditions();
|
||||
epsilon_.correctBoundaryConditions();
|
||||
|
||||
bound(epsilon_, epsilon0_);
|
||||
bound(k_, k0_);
|
||||
|
||||
// Re-calculate viscosity
|
||||
nut_ = Cmu_*sqr(k_)/epsilon_;
|
||||
nut_ = min(nut_, nuRatio()*nu());
|
||||
nut_.correctBoundaryConditions();
|
||||
}
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
} // End namespace RASModels
|
||||
} // End namespace incompressible
|
||||
} // End namespace Foam
|
||||
|
||||
// ************************************************************************* //
|
|
@ -0,0 +1,175 @@
|
|||
/*---------------------------------------------------------------------------*\
|
||||
========= |
|
||||
\\ / F ield | foam-extend: Open Source CFD
|
||||
\\ / O peration |
|
||||
\\ / A nd | For copyright notice see file Copyright
|
||||
\\/ M anipulation |
|
||||
-------------------------------------------------------------------------------
|
||||
License
|
||||
This file is part of foam-extend.
|
||||
|
||||
foam-extend is free software: you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the
|
||||
Free Software Foundation, either version 3 of the License, or (at your
|
||||
option) any later version.
|
||||
|
||||
foam-extend is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Class
|
||||
Foam::incompressible::RASModels::coupledKEpsilon
|
||||
|
||||
Description
|
||||
Standard k-epsilon turbulence model for incompressible flows using
|
||||
a block-coupled solution.
|
||||
|
||||
The default model coefficients correspond to the following:
|
||||
@verbatim
|
||||
coupledKEpsilonCoeffs
|
||||
{
|
||||
Cmu 0.09;
|
||||
C1 1.44;
|
||||
C2 1.92;
|
||||
sigmaEps 1.3;
|
||||
}
|
||||
@endverbatim
|
||||
|
||||
SourceFiles
|
||||
coupledKEpsilon.C
|
||||
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef coupledKEpsilon_H
|
||||
#define coupledKEpsilon_H
|
||||
|
||||
#include "RASModel.H"
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
namespace Foam
|
||||
{
|
||||
namespace incompressible
|
||||
{
|
||||
namespace RASModels
|
||||
{
|
||||
|
||||
/*---------------------------------------------------------------------------*\
|
||||
Class coupledKEpsilon Declaration
|
||||
\*---------------------------------------------------------------------------*/
|
||||
|
||||
class coupledKEpsilon
|
||||
:
|
||||
public RASModel
|
||||
{
|
||||
// Private data
|
||||
|
||||
// Model coefficients
|
||||
|
||||
dimensionedScalar Cmu_;
|
||||
dimensionedScalar C1_;
|
||||
dimensionedScalar C2_;
|
||||
dimensionedScalar sigmaEps_;
|
||||
|
||||
|
||||
// Fields
|
||||
|
||||
volScalarField k_;
|
||||
volScalarField epsilon_;
|
||||
volScalarField nut_;
|
||||
|
||||
// Block vector field for k-epsilon
|
||||
volVector2Field ke_;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
//- Runtime type information
|
||||
TypeName("coupledKEpsilon");
|
||||
|
||||
// Constructors
|
||||
|
||||
//- Construct from components
|
||||
coupledKEpsilon
|
||||
(
|
||||
const volVectorField& U,
|
||||
const surfaceScalarField& phi,
|
||||
transportModel& transport
|
||||
);
|
||||
|
||||
|
||||
//- Destructor
|
||||
virtual ~coupledKEpsilon()
|
||||
{}
|
||||
|
||||
|
||||
// Member Functions
|
||||
|
||||
//- Return the turbulence viscosity
|
||||
virtual tmp<volScalarField> nut() const
|
||||
{
|
||||
return nut_;
|
||||
}
|
||||
|
||||
//- Return the effective diffusivity for k
|
||||
tmp<volScalarField> DkEff() const
|
||||
{
|
||||
return tmp<volScalarField>
|
||||
(
|
||||
new volScalarField("DkEff", nut_ + nu())
|
||||
);
|
||||
}
|
||||
|
||||
//- Return the effective diffusivity for epsilon
|
||||
tmp<volScalarField> DepsilonEff() const
|
||||
{
|
||||
return tmp<volScalarField>
|
||||
(
|
||||
new volScalarField("DepsilonEff", nut_/sigmaEps_ + nu())
|
||||
);
|
||||
}
|
||||
|
||||
//- Return the turbulence kinetic energy
|
||||
virtual tmp<volScalarField> k() const
|
||||
{
|
||||
return k_;
|
||||
}
|
||||
|
||||
//- Return the turbulence kinetic energy dissipation rate
|
||||
virtual tmp<volScalarField> epsilon() const
|
||||
{
|
||||
return epsilon_;
|
||||
}
|
||||
|
||||
//- Return the Reynolds stress tensor
|
||||
virtual tmp<volSymmTensorField> R() const;
|
||||
|
||||
//- Return the effective stress tensor including the laminar stress
|
||||
virtual tmp<volSymmTensorField> devReff() const;
|
||||
|
||||
//- Return the source term for the momentum equation
|
||||
virtual tmp<fvVectorMatrix> divDevReff(volVectorField& U) const;
|
||||
|
||||
//- Solve the turbulence equations and correct the turbulence viscosity
|
||||
virtual void correct();
|
||||
|
||||
//- Read RASProperties dictionary
|
||||
virtual bool read();
|
||||
};
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
} // End namespace RASModels
|
||||
} // End namespace incompressible
|
||||
} // End namespace Foam
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
#endif
|
||||
|
||||
// ************************************************************************* //
|
|
@ -69,6 +69,7 @@ tmp<volScalarField> kOmegaSST::F1(const volScalarField& CDkOmega) const
|
|||
return tanh(pow4(arg1));
|
||||
}
|
||||
|
||||
|
||||
tmp<volScalarField> kOmegaSST::F2() const
|
||||
{
|
||||
volScalarField arg2 = min
|
||||
|
|
|
@ -37,7 +37,7 @@ purgeWrite 0;
|
|||
|
||||
writeFormat ascii;
|
||||
|
||||
writePrecision 6;
|
||||
writePrecision 10;
|
||||
|
||||
writeCompression uncompressed;
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ purgeWrite 0;
|
|||
|
||||
writeFormat ascii;
|
||||
|
||||
writePrecision 6;
|
||||
writePrecision 10;
|
||||
|
||||
writeCompression uncompressed;
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ purgeWrite 0;
|
|||
|
||||
writeFormat ascii;
|
||||
|
||||
writePrecision 6;
|
||||
writePrecision 10;
|
||||
|
||||
writeCompression compressed;
|
||||
|
||||
|
|
|
@ -33,56 +33,55 @@ Tr = 170 C; activation energy: E0 = 48.2 kJ/mol; WLF-shift parameters: C1 = 14.3
|
|||
|
||||
rheology
|
||||
{
|
||||
type multiMode;
|
||||
|
||||
type multiMode;
|
||||
models
|
||||
(
|
||||
first
|
||||
{
|
||||
type PTT-Exponential;
|
||||
rho rho [1 -3 0 0 0 0 0] 850;
|
||||
etaS etaS [1 -1 -1 0 0 0 0] 0.0;
|
||||
etaP etaP [1 -1 -1 0 0 0 0] 280.43457;
|
||||
lambda lambda [0 0 1 0 0 0 0] 3.8946e-3;
|
||||
epsilon epsilon [0 0 0 0 0 0 0] 0.3;
|
||||
zeta zeta [0 0 0 0 0 0 0] 0.08;
|
||||
}
|
||||
|
||||
models
|
||||
(
|
||||
first
|
||||
{
|
||||
type PTT-Exponential;
|
||||
rho rho [1 -3 0 0 0 0 0] 850;
|
||||
etaS etaS [1 -1 -1 0 0 0 0] 0.0;
|
||||
etaP etaP [1 -1 -1 0 0 0 0] 280.43457;
|
||||
lambda lambda [0 0 1 0 0 0 0] 3.8946e-3;
|
||||
epsilon epsilon [0 0 0 0 0 0 0] 0.3;
|
||||
zeta zeta [0 0 0 0 0 0 0] 0.08;
|
||||
}
|
||||
second
|
||||
{
|
||||
type PTT-Exponential;
|
||||
rho rho [1 -3 0 0 0 0 0] 850;
|
||||
etaS etaS [1 -1 -1 0 0 0 0] 0.0;
|
||||
etaP etaP [1 -1 -1 0 0 0 0] 810.4203;
|
||||
lambda lambda [0 0 1 0 0 0 0] 5.1390e-2;
|
||||
epsilon epsilon [0 0 0 0 0 0 0] 0.2;
|
||||
zeta zeta [0 0 0 0 0 0 0] 0.08;
|
||||
}
|
||||
|
||||
second
|
||||
{
|
||||
type PTT-Exponential;
|
||||
rho rho [1 -3 0 0 0 0 0] 850;
|
||||
etaS etaS [1 -1 -1 0 0 0 0] 0.0;
|
||||
etaP etaP [1 -1 -1 0 0 0 0] 810.4203;
|
||||
lambda lambda [0 0 1 0 0 0 0] 5.1390e-2;
|
||||
epsilon epsilon [0 0 0 0 0 0 0] 0.2;
|
||||
zeta zeta [0 0 0 0 0 0 0] 0.08;
|
||||
}
|
||||
|
||||
third
|
||||
{
|
||||
type PTT-Exponential;
|
||||
rho rho [1 -3 0 0 0 0 0] 850;
|
||||
etaS etaS [1 -1 -1 0 0 0 0] 0.0;
|
||||
etaP etaP [1 -1 -1 0 0 0 0] 1678.6357;
|
||||
lambda lambda [0 0 1 0 0 0 0] 5.0349e-1;
|
||||
epsilon epsilon [0 0 0 0 0 0 0] 0.02;
|
||||
zeta zeta [0 0 0 0 0 0 0] 0.08;
|
||||
}
|
||||
|
||||
fourth
|
||||
{
|
||||
type PTT-Exponential;
|
||||
rho rho [1 -3 0 0 0 0 0] 850;
|
||||
etaS etaS [1 -1 -1 0 0 0 0] 0.0;
|
||||
etaP etaP [1 -1 -1 0 0 0 0] 1381.0029;
|
||||
lambda lambda [0 0 1 0 0 0 0] 4.5911e0;
|
||||
epsilon epsilon [0 0 0 0 0 0 0] 0.02;
|
||||
zeta zeta [0 0 0 0 0 0 0] 0.08;
|
||||
}
|
||||
);
|
||||
third
|
||||
{
|
||||
type PTT-Exponential;
|
||||
rho rho [1 -3 0 0 0 0 0] 850;
|
||||
etaS etaS [1 -1 -1 0 0 0 0] 0.0;
|
||||
etaP etaP [1 -1 -1 0 0 0 0] 1678.6357;
|
||||
lambda lambda [0 0 1 0 0 0 0] 5.0349e-1;
|
||||
epsilon epsilon [0 0 0 0 0 0 0] 0.02;
|
||||
zeta zeta [0 0 0 0 0 0 0] 0.08;
|
||||
}
|
||||
|
||||
fourth
|
||||
{
|
||||
type PTT-Exponential;
|
||||
rho rho [1 -3 0 0 0 0 0] 850;
|
||||
etaS etaS [1 -1 -1 0 0 0 0] 0.0;
|
||||
etaP etaP [1 -1 -1 0 0 0 0] 1381.0029;
|
||||
lambda lambda [0 0 1 0 0 0 0] 4.5911e0;
|
||||
epsilon epsilon [0 0 0 0 0 0 0] 0.02;
|
||||
zeta zeta [0 0 0 0 0 0 0] 0.08;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
// ************************************************************************* //
|
||||
|
|
Reference in a new issue