--HG--
branch : bgschaid/minorAdditionsBranch
This commit is contained in:
Bernhard F.W. Gschaider 2013-07-19 00:36:27 +02:00
commit 72c243a96f
1416 changed files with 18017 additions and 18012 deletions

View file

@ -5,33 +5,33 @@ if(divDSigmaExpMethod == "standard")
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU =
((I - n*n)&fvc::interpolate(gradDU));
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU = ((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*(
*
(
- (muf + lambdaf)*(fvc::snGrad(DU)&(I - n*n))
+ lambdaf*tr(shearGradDU&(I - n*n))*n
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
- fvc::laplacian(mu + lambda, DU, "laplacian(DDU,DU)")
+ fvc::div
@ -40,8 +40,8 @@ if(divDSigmaExpMethod == "standard")
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -23,7 +23,7 @@ philipc
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
//***** FIX INCORRECT POINT ON PATCHES WITH FACEZONE *****//
contactPatchPairList& contacts = contact;
@ -33,9 +33,13 @@ if(Pstream::parRun())
label slaveID = contacts[contactI].slavePatch().index();
primitivePatchInterpolation masterInterpolator
(mesh.boundaryMesh()[masterID]);
(
mesh.boundaryMesh()[masterID]
);
primitivePatchInterpolation slaveInterpolator
(mesh.boundaryMesh()[slaveID]);
(
mesh.boundaryMesh()[slaveID]
);
//- U must be interpolated to the vertices, this ignores the faceZone
//- points with no U (unlike volPointInterpolation)
@ -65,23 +69,17 @@ if(Pstream::parRun())
{
label pointGlobalLabel = masterPointLabels[pointI];
newPoints[pointGlobalLabel] =
oldMasterPoints[pointI]
+
correctMasterPointU[pointI];
oldMasterPoints[pointI] + correctMasterPointU[pointI];
}
forAll(slavePointLabels, pointI)
{
label pointGlobalLabel = slavePointLabels[pointI];
newPoints[pointGlobalLabel] =
oldSlavePoints[pointI]
+
correctSlavePointU[pointI];
oldSlavePoints[pointI] + correctSlavePointU[pointI];
}
}
//***** NOW FIX AND SYNCHRONISE ALL THE FACEZONE POINTS *****//
forAll(mesh.faceZones(), faceZoneI)
@ -117,8 +115,7 @@ if(Pstream::parRun())
{
label procPoint =
mesh.faceZones()[faceZoneI]().meshPoints()[localPoint];
globalFZnewPoints[globalPointI] =
newPoints[procPoint];
globalFZnewPoints[globalPointI] = newPoints[procPoint];
pointNumProcs[globalPointI] = 1;
}
}
@ -141,20 +138,16 @@ if(Pstream::parRun())
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
procFZnewPoints[localPoint] =
globalFZnewPoints[globalPointI];
procFZnewPoints[localPoint] = globalFZnewPoints[globalPointI];
}
//- now fix the newPoints points on the globalFaceZones
labelList procFZmeshPoints =
mesh.faceZones()[faceZoneI]().meshPoints();
labelList procFZmeshPoints = mesh.faceZones()[faceZoneI]().meshPoints();
forAll(procFZmeshPoints, pointI)
{
label procPoint = procFZmeshPoints[pointI];
newPoints[procPoint] =
procFZnewPoints[pointI];
}
newPoints[procPoint] = procFZnewPoints[pointI];
}
}
}

View file

@ -28,7 +28,7 @@ IOList<labelList> procToGlobalFZmap
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
);
IOList<labelList> pointOnLocalProcPatch
(
@ -46,20 +46,20 @@ IOList<labelList> pointOnLocalProcPatch
//- if they have been read then don't recalculate it
bool globalFaceZoneMappingSet = false;
if(gMax(procToGlobalFZmap[0]) > 0 && gMax(pointOnLocalProcPatch[0]) > 0)
{
{
Info << "Reading procToGlobalFZmap and pointOnLocalProcPatch allowing restart of contact cases"
<< endl;
globalFaceZoneMappingSet = true;
}
else
{
}
else
{
Info << "procToGlobalFZmap and pointOnLocalProcPatch will be calculated as it has not been found" << nl
<< "this message should only appear starting a new analysis" << endl;
}
}
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
if(!globalFaceZoneMappingSet)
{
forAll(mesh.faceZones(), faceZoneI)
@ -70,7 +70,9 @@ if(Pstream::parRun())
//- set all slave points to zero because only the master order is used
if(!Pstream::master())
{
globalFZpoints *= 0.0;
}
//- pass points to all procs
reduce(globalFZpoints, sumOp<vectorField>());
@ -93,6 +95,7 @@ if(Pstream::parRun())
}
}
}
//- procToGlobalFZmap now contains the local FZpoint label for each
//- global FZ point label - for each faceZone
@ -123,13 +126,13 @@ if(Pstream::parRun())
}
}
} //- end if(!globalFaceZoneMappingSet)
}
}
//- write to disk to allow restart of cases
//- because it is not possible to calculate the
//- mapping after the meshes have moved
if(!globalFaceZoneMappingSet && Pstream::parRun())
{
{
procToGlobalFZmap.write();
pointOnLocalProcPatch.write();
}
}

View file

@ -15,7 +15,7 @@ solidInterface* solidInterfacePtr(NULL);
solidInterfacePtr->modifyProperties(muf, lambdaf);
gradDU = solidInterfacePtr->grad(DU);
//- solidInterface needs muf and lambdaf to be used for divSigmaExp
//- solidInterface needs muf and lambdaf to be used for divDSigmaExp
if(divDSigmaExpMethod != "surface" && divDSigmaExpMethod != "decompose")
{
FatalError << "divDSigmaExp must be decompose or surface when solidInterface is on"

View file

@ -75,7 +75,7 @@ int main(int argc, char *argv[])
# include "createSolidInterface.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "\nStarting time loop\n" << endl;
@ -101,7 +101,9 @@ int main(int argc, char *argv[])
//- reset DU to zero at the start of the time-step if
//- a predictor is not required
if(!predictor)
{
DU = dimensionedVector("zero", dimLength, vector::zero);
}
do //- start of momentum loop
{
@ -114,7 +116,7 @@ int main(int argc, char *argv[])
<< "iteration: " << iCorr
<< ", residual: " << residual
<< endl;
//# include "moveMeshLeastSquares.H"
//# include "moveMeshLeastSquares.H"
# include "moveSolidMesh.H"
contact.correct();
mesh.movePoints(oldMeshPoints);

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Moving mesh using least squares interpolation" << endl;
leastSquaresVolPointInterpolation pointInterpolation(mesh);
@ -31,8 +31,7 @@ if(min(J.internalField()) > 0)
pointInterpolation.interpolate(DU, pointDU);
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
//- Move mesh
vectorField newPoints = mesh.allPoints();
@ -47,10 +46,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
// FAILS IN PARALLEL - FIX
// Info << "Print contact area" << endl;
//volScalarField ca = contact.contactArea();
@ -52,4 +52,4 @@ if (runTime.outputTime())
//- SHOULD THIS BE A REF TO A TMP...?
volScalarField cPressure = contact.contactPressure();
cPressure.write();
}
}

View file

@ -1,9 +1,15 @@
//- how explicit component of sigma is to be calculated
word divDSigmaExpMethod(mesh.solutionDict().subDict("stressedFoam").lookup("divDSigmaExp"));
Info << divDSigmaExpMethod << " method chosen for calculation of sigmaExp" << endl;
if(divDSigmaExpMethod != "standard" && divDSigmaExpMethod != "surface" && divDSigmaExpMethod != "decompose" && divDSigmaExpMethod != "laplacian")
{
if
(
divDSigmaExpMethod != "standard"
&& divDSigmaExpMethod != "surface"
&& divDSigmaExpMethod != "decompose"
&& divDSigmaExpMethod != "laplacian"
)
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << nl
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -44,6 +44,7 @@ if (runTime.outputTime())
),
tr(sigma)/3.0
);
//- boundary surface pressure
forAll(pressure.boundaryField(), patchi)
{
@ -74,4 +75,4 @@ if (runTime.outputTime())
mesh.movePoints(oldMeshPoints);
runTime.write();
}
}

View file

@ -1,22 +1,22 @@
if(divDSigmaExpMethod == "standard")
{
{
divDSigmaExp = fvc::div
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU =
((I - n*n)&fvc::interpolate(gradDU));
@ -29,9 +29,9 @@ if(divDSigmaExpMethod == "standard")
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
- fvc::laplacian(mu + lambda, DU, "laplacian(DDU,DU)")
+ fvc::div
@ -40,8 +40,8 @@ if(divDSigmaExpMethod == "standard")
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -2,7 +2,7 @@
//- sigma explicit large strain explicit terms
//----------------------------------------------------//
if(divDSigmaLargeStrainExpMethod == "standard")
{
{
divDSigmaLargeStrainExp =
fvc::div
(
@ -11,9 +11,9 @@ if(divDSigmaLargeStrainExpMethod == "standard")
+ ((sigma + DSigma) & DF.T()),
"div(sigma)"
);
}
else if(divDSigmaLargeStrainExpMethod == "surface")
{
}
else if(divDSigmaLargeStrainExpMethod == "surface")
{
divDSigmaLargeStrainExp =
fvc::div
(
@ -22,13 +22,13 @@ if(divDSigmaLargeStrainExpMethod == "standard")
+ (mesh.Sf() & fvc::interpolate( sigma & DF.T() ))
+ (mesh.Sf() & fvc::interpolate(DSigma & DF.T() ))
);
}
else
{
}
else
{
FatalError
<< "divDSigmaLargeStrainExp not found!"
<< exit(FatalError);
}
}
//- relax large strain component
divDSigmaLargeStrainExp.relax();

View file

@ -69,5 +69,4 @@ FieldField<Field, vector> extraVecs(ptc.size());
curExtraVectors.setSize(nFacesAroundPoint);
}
}

View file

@ -91,7 +91,8 @@ FieldField<Field, scalar> w(ptc.size());
// Update coupled boundaries
// Work-around for cyclic parallels.
/*if (Pstream::parRun() && !mesh.parallelData().cyclicParallel())
/*
if (Pstream::parRun() && !mesh.parallelData().cyclicParallel())
{
forAll (volPointSumWeights.boundaryField(), patchI)
{
@ -111,7 +112,8 @@ FieldField<Field, scalar> w(ptc.size());
);
}
}
}*/
}
*/
// Re-scale the weights for the current point
forAll (ptc, pointI)

View file

@ -23,7 +23,7 @@ philipc
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
//***** FIX INCORRECT POINT ON PATCHES WITH FACEZONE *****//
contactPatchPairList& contacts = contact;
@ -33,9 +33,13 @@ if(Pstream::parRun())
label slaveID = contacts[contactI].slavePatch().index();
primitivePatchInterpolation masterInterpolator
(mesh.boundaryMesh()[masterID]);
(
mesh.boundaryMesh()[masterID]
);
primitivePatchInterpolation slaveInterpolator
(mesh.boundaryMesh()[slaveID]);
(
mesh.boundaryMesh()[slaveID]
);
//- DU must be interpolated to the vertices, this ignores the faceZone
//- points with no DU (unlike volPointInterpolation)
@ -65,23 +69,17 @@ if(Pstream::parRun())
{
label pointGlobalLabel = masterPointLabels[pointI];
newPoints[pointGlobalLabel] =
oldMasterPoints[pointI]
+
correctMasterPointDU[pointI];
oldMasterPoints[pointI] + correctMasterPointDU[pointI];
}
forAll(slavePointLabels, pointI)
{
label pointGlobalLabel = slavePointLabels[pointI];
newPoints[pointGlobalLabel] =
oldSlavePoints[pointI]
+
correctSlavePointDU[pointI];
oldSlavePoints[pointI] + correctSlavePointDU[pointI];
}
}
//***** NOW FIX AND SYNCHRONISE ALL THE FACEZONE POINTS *****//
forAll(mesh.faceZones(), faceZoneI)
@ -117,8 +115,7 @@ if(Pstream::parRun())
{
label procPoint =
mesh.faceZones()[faceZoneI]().meshPoints()[localPoint];
globalFZnewPoints[globalPointI] =
newPoints[procPoint];
globalFZnewPoints[globalPointI] = newPoints[procPoint];
pointNumProcs[globalPointI] = 1;
}
}
@ -141,20 +138,16 @@ if(Pstream::parRun())
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
procFZnewPoints[localPoint] =
globalFZnewPoints[globalPointI];
procFZnewPoints[localPoint] = globalFZnewPoints[globalPointI];
}
//- now fix the newPoints points on the globalFaceZones
labelList procFZmeshPoints =
mesh.faceZones()[faceZoneI]().meshPoints();
labelList procFZmeshPoints = mesh.faceZones()[faceZoneI]().meshPoints();
forAll(procFZmeshPoints, pointI)
{
label procPoint = procFZmeshPoints[pointI];
newPoints[procPoint] =
procFZnewPoints[pointI];
}
newPoints[procPoint] = procFZnewPoints[pointI];
}
}
}

View file

@ -28,7 +28,7 @@ IOList<labelList> procToGlobalFZmap
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
);
IOList<labelList> pointOnLocalProcPatch
(
@ -46,20 +46,20 @@ IOList<labelList> pointOnLocalProcPatch
//- if they have been read then don't recalculate it
bool globalFaceZoneMappingSet = false;
if(gMax(procToGlobalFZmap[0]) > 0 && gMax(pointOnLocalProcPatch[0]) > 0)
{
{
Info << "Reading procToGlobalFZmap and pointOnLocalProcPatch allowing restart of contact cases"
<< endl;
globalFaceZoneMappingSet = true;
}
else
{
}
else
{
Info << "procToGlobalFZmap and pointOnLocalProcPatch will be calculated as it has not been found" << nl
<< "this message should only appear starting a new analysis" << endl;
}
}
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
if(!globalFaceZoneMappingSet)
{
forAll(mesh.faceZones(), faceZoneI)
@ -70,7 +70,9 @@ if(Pstream::parRun())
//- set all slave points to zero because only the master order is used
if(!Pstream::master())
{
globalFZpoints *= 0.0;
}
//- pass points to all procs
reduce(globalFZpoints, sumOp<vectorField>());
@ -93,8 +95,6 @@ if(Pstream::parRun())
}
}
}
//- procToGlobalFZmap now contains the local FZpoint label for each
//- global FZ point label - for each faceZone
//- check what points are on the current proc patch
pointOnLocalProcPatch[faceZoneI].setSize(globalFZpoints.size(), 0);
@ -123,13 +123,13 @@ if(Pstream::parRun())
}
}
} //- end if(!globalFaceZoneMappingSet)
}
}
//- write to disk to allow restart of cases
//- because it is not possible to calculate the
//- mapping after the meshes have moved
if(!globalFaceZoneMappingSet)
{
{
procToGlobalFZmap.write();
pointOnLocalProcPatch.write();
}
}

View file

@ -82,7 +82,7 @@ int main(int argc, char *argv[])
# include "createGlobalToLocalFaceZonePointMap.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "\nStarting time loop\n" << endl;
@ -118,7 +118,7 @@ int main(int argc, char *argv[])
<< "iteration: " << iCorr
<< ", residual: " << residual
<< endl;
//# include "moveMeshLeastSquares.H"
//# include "moveMeshLeastSquares.H"
# include "moveSolidMeshForContact.H"
contact.correct();
mesh.movePoints(oldMeshPoints);
@ -135,7 +135,6 @@ int main(int argc, char *argv[])
fvm::laplacian(2*mu + lambda, DU, "laplacian(DDU,DU)")
+ divDSigmaExp
+ divDSigmaLargeStrainExp
);
solverPerf = DUEqn.solve();

View file

@ -1,15 +1,15 @@
if(moveMeshMethod == "inverseDistance")
{
{
# include "moveMeshInverseDistance.H"
}
else if(moveMeshMethod == "leastSquares")
{
}
else if(moveMeshMethod == "leastSquares")
{
# include "moveMeshLeastSquares.H"
}
else
{
}
else
{
FatalError << "move mesh method " << moveMeshMethod << " not recognised" << nl
<< "available methods are:" << nl
<< "inverseDistance" << nl
<< "leastSquares" << exit(FatalError);
}
}

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Move solid mesh using inverse distance interpolation" << endl;
// Create point mesh
@ -41,8 +41,7 @@ if(min(J.internalField()) > 0)
//pointDU.write();
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
// Move mesh
vectorField newPoints = mesh.allPoints();
@ -57,10 +56,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Moving mesh using least squares interpolation" << endl;
leastSquaresVolPointInterpolation pointInterpolation(mesh);
@ -31,8 +31,7 @@ if(min(J.internalField()) > 0)
pointInterpolation.interpolate(DU, pointDU);
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
//- Move mesh
vectorField newPoints = mesh.allPoints();
@ -47,10 +46,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -7,7 +7,8 @@ pointVectorField& pf = pointDU;
// Do the correction
//GeometricField<Type, pointPatchField, pointMesh> pfCorr
/*pointVectorField pfCorr
/*
pointVectorField pfCorr
(
IOobject
(
@ -23,7 +24,8 @@ pointVectorField& pf = pointDU;
//dimensioned<Type>("zero", pf.dimensions(), pTraits<Type>::zero),
dimensionedVector("zero", pf.dimensions(), vector::zero),
pf.boundaryField().types()
);*/
);
*/
pointVectorField pfCorr
(
@ -38,7 +40,7 @@ pointVectorField pfCorr
pMesh,
dimensionedVector("vector", dimLength, vector::zero),
"calculated"
);
);
//const labelList& ptc = boundaryPoints();
#include "findBoundaryPoints.H"
@ -96,25 +98,29 @@ forAll (ptc, pointI)
}
// Update coupled boundaries
/*forAll (pfCorr.boundaryField(), patchI)
/*
forAll (pfCorr.boundaryField(), patchI)
{
if (pfCorr.boundaryField()[patchI].coupled())
{
pfCorr.boundaryField()[patchI].initAddField();
}
}*/
}
*/
/*forAll (pfCorr.boundaryField(), patchI)
/*
forAll (pfCorr.boundaryField(), patchI)
{
if (pfCorr.boundaryField()[patchI].coupled())
{
pfCorr.boundaryField()[patchI].addField(pfCorr.internalField());
}
}*/
}
*/
//Info << "pfCorr: " << pfCorr << endl;
pfCorr.correctBoundaryConditions();
//Info << "pfCorr: " << pfCorr << endl;
pfCorr.correctBoundaryConditions();
//pfCorr.write();

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
// FAILS IN PARALLEL - FIX
// Info << "Print contact area" << endl;
//volScalarField ca = contact.contactArea();
@ -52,4 +52,4 @@ if (runTime.outputTime())
//- SHOULD THIS BE A REF TO A TMP...?
volScalarField cPressure = contact.contactPressure();
cPressure.write();
}
}

View file

@ -1,9 +1,15 @@
//- how explicit component of sigma is to be calculated
word divDSigmaExpMethod(mesh.solutionDict().subDict("stressedFoam").lookup("divDSigmaExp"));
Info << divDSigmaExpMethod << " method chosen for calculation of DSigmaExp" << endl;
if(divDSigmaExpMethod != "standard" && divDSigmaExpMethod != "surface" && divDSigmaExpMethod != "decompose" && divDSigmaExpMethod != "laplacian")
{
if
(
divDSigmaExpMethod != "standard"
&& divDSigmaExpMethod != "surface"
&& divDSigmaExpMethod != "decompose"
&& divDSigmaExpMethod != "laplacian"
)
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << nl
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -33,4 +33,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -1,24 +1,23 @@
if(divSigmaExpMethod == "standard")
{
{
divSigmaExp = fvc::div
(
mu*gradU.T() + lambda*(I*tr(gradU)) - (mu + lambda)*gradU,
"div(sigma)"
);
}
else if(divSigmaExpMethod == "surface")
{
}
else if(divSigmaExpMethod == "surface")
{
divSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradU))
);
}
else if(divSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU =
((I - n*n)&fvc::interpolate(gradU));
}
else if(divSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU = ((I - n*n)&fvc::interpolate(gradU));
divSigmaExp = fvc::div
(
@ -29,9 +28,9 @@ if(divSigmaExpMethod == "standard")
+ muf*(shearGradU&n)
)
);
}
else if(divSigmaExpMethod == "expLaplacian")
{
}
else if(divSigmaExpMethod == "expLaplacian")
{
divSigmaExp =
- fvc::laplacian(mu + lambda, U, "laplacian(U,U)")
+ fvc::div
@ -40,8 +39,8 @@ if(divSigmaExpMethod == "standard")
+ lambda*(I*tr(gradU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divSigmaExp method " << divSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -23,7 +23,7 @@ philipc
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
//***** FIX INCORRECT POINT ON PATCHES WITH FACEZONE *****//
contactPatchPairList& contacts = contact;
@ -33,9 +33,13 @@ if(Pstream::parRun())
label slaveID = contacts[contactI].slavePatch().index();
primitivePatchInterpolation masterInterpolator
(mesh.boundaryMesh()[masterID]);
(
mesh.boundaryMesh()[masterID]
);
primitivePatchInterpolation slaveInterpolator
(mesh.boundaryMesh()[slaveID]);
(
mesh.boundaryMesh()[slaveID]
);
//- U must be interpolated to the vertices, this ignores the faceZone
//- points with no U (unlike volPointInterpolation)
@ -65,23 +69,17 @@ if(Pstream::parRun())
{
label pointGlobalLabel = masterPointLabels[pointI];
newPoints[pointGlobalLabel] =
oldMasterPoints[pointI]
+
correctMasterPointU[pointI];
oldMasterPoints[pointI] + correctMasterPointU[pointI];
}
forAll(slavePointLabels, pointI)
{
label pointGlobalLabel = slavePointLabels[pointI];
newPoints[pointGlobalLabel] =
oldSlavePoints[pointI]
+
correctSlavePointU[pointI];
oldSlavePoints[pointI] + correctSlavePointU[pointI];
}
}
//***** NOW FIX AND SYNCHRONISE ALL THE FACEZONE POINTS *****//
forAll(mesh.faceZones(), faceZoneI)
@ -117,8 +115,7 @@ if(Pstream::parRun())
{
label procPoint =
mesh.faceZones()[faceZoneI]().meshPoints()[localPoint];
globalFZnewPoints[globalPointI] =
newPoints[procPoint];
globalFZnewPoints[globalPointI] = newPoints[procPoint];
pointNumProcs[globalPointI] = 1;
}
}
@ -141,20 +138,16 @@ if(Pstream::parRun())
forAll(globalFZnewPoints, globalPointI)
{
label localPoint = procToGlobalFZmap[faceZoneI][globalPointI];
procFZnewPoints[localPoint] =
globalFZnewPoints[globalPointI];
procFZnewPoints[localPoint] = globalFZnewPoints[globalPointI];
}
//- now fix the newPoints points on the globalFaceZones
labelList procFZmeshPoints =
mesh.faceZones()[faceZoneI]().meshPoints();
labelList procFZmeshPoints = mesh.faceZones()[faceZoneI]().meshPoints();
forAll(procFZmeshPoints, pointI)
{
label procPoint = procFZmeshPoints[pointI];
newPoints[procPoint] =
procFZnewPoints[pointI];
}
newPoints[procPoint] = procFZnewPoints[pointI];
}
}
}

View file

@ -28,7 +28,7 @@ IOList<labelList> procToGlobalFZmap
IOobject::AUTO_WRITE
),
mesh.faceZones().size()
);
);
IOList<labelList> pointOnLocalProcPatch
(
@ -46,20 +46,20 @@ IOList<labelList> pointOnLocalProcPatch
//- if they have been read then don't recalculate it
bool globalFaceZoneMappingSet = false;
if(gMax(procToGlobalFZmap[0]) > 0 && gMax(pointOnLocalProcPatch[0]) > 0)
{
{
Info << "Reading procToGlobalFZmap and pointOnLocalProcPatch allowing restart of contact cases"
<< endl;
globalFaceZoneMappingSet = true;
}
else
{
}
else
{
Info << "procToGlobalFZmap and pointOnLocalProcPatch will be calculated as it has not been found" << nl
<< "this message should only appear starting a new analysis" << endl;
}
}
//- this is only needed in a parallel runs
if(Pstream::parRun())
{
{
if(!globalFaceZoneMappingSet)
{
forAll(mesh.faceZones(), faceZoneI)
@ -70,7 +70,9 @@ if(Pstream::parRun())
//- set all slave points to zero because only the master order is used
if(!Pstream::master())
{
globalFZpoints *= 0.0;
}
//- pass points to all procs
reduce(globalFZpoints, sumOp<vectorField>());
@ -93,8 +95,6 @@ if(Pstream::parRun())
}
}
}
//- procToGlobalFZmap now contains the local FZpoint label for each
//- global FZ point label - for each faceZone
//- check what points are on the current proc patch
pointOnLocalProcPatch[faceZoneI].setSize(globalFZpoints.size(), 0);
@ -123,13 +123,13 @@ if(Pstream::parRun())
}
}
} //- end if(!globalFaceZoneMappingSet)
}
}
//- write to disk to allow restart of cases
//- because it is not possible to calculate the
//- mapping after the meshes have moved
if(!globalFaceZoneMappingSet)
{
{
procToGlobalFZmap.write();
pointOnLocalProcPatch.write();
}
}

View file

@ -78,7 +78,7 @@ int main(int argc, char *argv[])
# include "createGlobalToLocalFaceZonePointMap.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "\nStarting time loop\n" << endl;
@ -121,7 +121,7 @@ int main(int argc, char *argv[])
<< "iteration: " << iCorr
<< ", residual: " << residual
<< endl;
//# include "moveMeshLeastSquares.H"
//# include "moveMeshLeastSquares.H"
# include "moveSolidMesh.H"
contact.correct();
mesh.movePoints(oldMeshPoints);
@ -187,10 +187,10 @@ int main(int argc, char *argv[])
# include "writeFields.H"
//# include "moveMeshLeastSquares.H"
//# include "moveSolidMesh.H"
//# include "printContactResults.H"
//mesh.movePoints(oldMeshPoints);
//# include "moveMeshLeastSquares.H"
//# include "moveSolidMesh.H"
//# include "printContactResults.H"
// mesh.movePoints(oldMeshPoints);
Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
<< " ClockTime = " << runTime.elapsedClockTime() << " s"

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Moving mesh using least squares interpolation" << endl;
leastSquaresVolPointInterpolation pointInterpolation(mesh);
@ -31,8 +31,7 @@ if(min(J.internalField()) > 0)
pointInterpolation.interpolate(DU, pointDU);
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
//- Move mesh
vectorField newPoints = mesh.allPoints();
@ -47,10 +46,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
// FAILS IN PARALLEL - FIX
// Info << "Print contact area" << endl;
//volScalarField ca = contact.contactArea();
@ -52,4 +52,4 @@ if (runTime.outputTime())
//- SHOULD THIS BE A REF TO A TMP...?
volScalarField cPressure = contact.contactPressure();
cPressure.write();
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -33,4 +33,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -1,24 +1,23 @@
if(divSigmaExpMethod == "standard")
{
{
divSigmaExp = fvc::div
(
mu*gradU.T() + lambda*(I*tr(gradU)) - (mu + lambda)*gradU,
"div(sigma)"
);
}
else if(divSigmaExpMethod == "surface")
{
}
else if(divSigmaExpMethod == "surface")
{
divSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradU))
);
}
else if(divSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU =
((I - n*n)&fvc::interpolate(gradU));
}
else if(divSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU = ((I - n*n)&fvc::interpolate(gradU));
divSigmaExp = fvc::div
(
@ -29,9 +28,9 @@ if(divSigmaExpMethod == "standard")
+ muf*(shearGradU&n)
)
);
}
else if(divSigmaExpMethod == "expLaplacian")
{
}
else if(divSigmaExpMethod == "expLaplacian")
{
divSigmaExp =
- fvc::laplacian(mu + lambda, U, "laplacian(DU,U)")
+ fvc::div
@ -40,8 +39,8 @@ if(divSigmaExpMethod == "standard")
+ lambda*(I*tr(gradU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divSigmaExp method " << divSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -1,9 +1,15 @@
//- how explicit component of sigma is to be calculated
word divSigmaExpMethod(mesh.solutionDict().subDict("stressedFoam").lookup("divSigmaExp"));
Info << "Selecting divSigmaExp calculation method " << divSigmaExpMethod << endl;
if(divSigmaExpMethod != "standard" && divSigmaExpMethod != "surface" && divSigmaExpMethod != "decompose" && divSigmaExpMethod != "laplacian")
{
if
(
divSigmaExpMethod != "standard"
&& divSigmaExpMethod != "surface"
&& divSigmaExpMethod != "decompose"
&& divSigmaExpMethod != "laplacian"
)
{
FatalError << "divSigmaExp method " << divSigmaExpMethod << " not found!" << nl
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -33,4 +33,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -1,37 +1,37 @@
if(divDSigmaExpMethod == "standard")
{
{
divDSigmaExp = fvc::div
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU =
((I - n*n)&fvc::interpolate(gradDU));
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU = ((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*(
*
(
- (muf + lambdaf)*(fvc::snGrad(DU)&(I - n*n))
+ lambdaf*tr(shearGradDU&(I - n*n))*n
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
- fvc::laplacian(mu + lambda, DU, "laplacian(DDU,DU)")
+ fvc::div
@ -40,8 +40,8 @@ if(divDSigmaExpMethod == "standard")
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -1,9 +1,15 @@
//- how explicit component of sigma is to be calculated
word divDSigmaExpMethod(mesh.solutionDict().subDict("stressedFoam").lookup("divDSigmaExp"));
Info << "Selecting divDSigmaExp calculation method " << divDSigmaExpMethod << endl;
if(divDSigmaExpMethod != "standard" && divDSigmaExpMethod != "surface" && divDSigmaExpMethod != "decompose" && divDSigmaExpMethod != "laplacian")
{
if
(
divDSigmaExpMethod != "standard"
&& divDSigmaExpMethod != "surface"
&& divDSigmaExpMethod != "decompose"
&& divDSigmaExpMethod != "laplacian"
)
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << nl
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -33,4 +33,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -2,15 +2,16 @@
label leftPatchID = mesh.boundaryMesh().findPatchID("leftClamp");
if(leftPatchID == -1)
{
{
FatalError << "Cannot find patch left for calculating force" << endl;
}
}
//- calculate force in x direction on leftClamp patch
scalar leftForce = gSum(
vector(1, 0, 0) &
(mesh.boundary()[leftPatchID].Sf() & sigma.boundaryField()[leftPatchID])
);
scalar leftForce = gSum
(
vector(1, 0, 0)
& (mesh.boundary()[leftPatchID].Sf() & sigma.boundaryField()[leftPatchID])
);
//- patchIntegrate utility integrates it this way but this is worng because the sigma tensor should
//- be dotted with the surface normal to give the actual traction/force
@ -23,13 +24,13 @@ scalar leftForce = gSum(
vector gaugeU1 = vector::zero;
vector gaugeU2 = vector::zero;
if(gaugeFaceID1 != -1)
{
{
gaugeU1 = U.boundaryField()[gaugeFacePatchID1][gaugeFaceID1];
}
}
if(gaugeFaceID2 != -1)
{
{
gaugeU2 = U.boundaryField()[gaugeFacePatchID2][gaugeFaceID2];
}
}
//- reduce across procs
reduce(gaugeU1, sumOp<vector>());
@ -42,7 +43,7 @@ scalar gaugeDisp = mag(gaugeU1 - gaugeU2);
//- write to file
if(Pstream::master())
{
{
OFstream& forceDispFile = *filePtr;
forceDispFile << 1000*gaugeDisp << "\t" << -1*leftForce << endl;
}
}

View file

@ -1,7 +1,9 @@
{
forAll(mesh.boundary(), patchID)
{
if(U.boundaryField()[patchID].type()
if
(
U.boundaryField()[patchID].type()
== solidDirectionMixedFvPatchVectorField::typeName
)
{

View file

@ -83,8 +83,9 @@ int main(int argc, char *argv[])
fvm::d2dt2(rho, U)
==
fvm::laplacian(2*mu + lambda, U, "laplacian(DU,U)")
+ fvc::div(
-( (mu + lambda) * gradU )
+ fvc::div
(
- ( (mu + lambda) * gradU )
+ ( mu * gradU.T() )
+ ( mu * (gradU & gradU.T()) )
+ ( lambda * tr(gradU) * I )

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Moving mesh using least squares interpolation" << endl;
leastSquaresVolPointInterpolation pointInterpolation(mesh);
@ -31,8 +31,7 @@ if(min(J.internalField()) > 0)
pointInterpolation.interpolate(U, pointU);
const vectorField& pointUI =
pointU.internalField();
const vectorField& pointUI = pointU.internalField();
//- Move mesh
vectorField newPoints = mesh.allPoints();
@ -47,10 +46,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -95,10 +95,10 @@ if (runTime.outputTime())
//- move mesh for visualisation and move it back after writing
vectorField oldPoints = mesh.allPoints();
#include "moveMeshLeastSquares.H"
# include "moveMeshLeastSquares.H"
runTime.write();
//- move mesh back
mesh.movePoints(oldPoints);
}
}

View file

@ -1,37 +1,37 @@
if(divDSigmaExpMethod == "standard")
{
{
divDSigmaExp = fvc::div
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU =
((I - n*n)&fvc::interpolate(gradDU));
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU = ((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*(
*
(
- (muf + lambdaf)*(fvc::snGrad(DU)&(I - n*n))
+ lambdaf*tr(shearGradDU&(I - n*n))*n
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
- fvc::laplacian(mu + lambda, DU, "laplacian(DDU,DU)")
+ fvc::div
@ -40,8 +40,8 @@ if(divDSigmaExpMethod == "standard")
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -2,7 +2,7 @@
//- sigma explicit large strain explicit terms
//----------------------------------------------------//
if(divDSigmaLargeStrainExpMethod == "standard")
{
{
divDSigmaLargeStrainExp =
fvc::div
(
@ -11,9 +11,9 @@ if(divDSigmaLargeStrainExpMethod == "standard")
+ ((sigma + DSigma) & gradDU),
"div(sigma)"
);
}
else if(divDSigmaLargeStrainExpMethod == "surface")
{
}
else if(divDSigmaLargeStrainExpMethod == "surface")
{
divDSigmaLargeStrainExp =
fvc::div
(
@ -21,13 +21,13 @@ if(divDSigmaLargeStrainExpMethod == "standard")
+ 0.5*lambdaf * (mesh.Sf() & (fvc::interpolate(gradDU && gradDU)*I))
+ (mesh.Sf() & fvc::interpolate( (sigma + DSigma) & gradDU ))
);
}
else
{
}
else
{
FatalError
<< "divDSigmaLargeStrainMethod not found!"
<< exit(FatalError);
}
}
//- relax
divDSigmaLargeStrainExp.relax();

View file

@ -69,5 +69,4 @@ FieldField<Field, vector> extraVecs(ptc.size());
curExtraVectors.setSize(nFacesAroundPoint);
}
}

View file

@ -91,7 +91,8 @@ FieldField<Field, scalar> w(ptc.size());
// Update coupled boundaries
// Work-around for cyclic parallels.
/*if (Pstream::parRun() && !mesh.parallelData().cyclicParallel())
/*
if (Pstream::parRun() && !mesh.parallelData().cyclicParallel())
{
forAll (volPointSumWeights.boundaryField(), patchI)
{
@ -111,7 +112,8 @@ FieldField<Field, scalar> w(ptc.size());
);
}
}
}*/
}
*/
// Re-scale the weights for the current point
forAll (ptc, pointI)

View file

@ -1,13 +1,13 @@
if(moveMeshMethod == "inverseDistance")
{
{
# include "moveMeshInverseDistance.H"
}
else if(moveMeshMethod == "leastSquares")
{
}
else if(moveMeshMethod == "leastSquares")
{
# include "moveMeshLeastSquares.H"
}
else
{
}
else
{
FatalError << "move mesh method " << moveMeshMethod << " not recognised" << nl
<< "available methods are:" << nl
<< "inverseDistance" << nl

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Move solid mesh using inverse distance interpolation" << endl;
// Create point mesh
@ -36,10 +36,9 @@ if(min(J.internalField()) > 0)
//- correct edge interpolation
//- this is the stuff from edgeCorrectedVolPointInterpolation but
//- that class no longer works
# include "performEdgeCorrectedVolPointInterpolation.H"
# include "performEdgeCorrectedVolPointInterpolation.H"
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
//- see the effect of correctBCs
@ -56,10 +55,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Moving mesh using least squares interpolation" << endl;
leastSquaresVolPointInterpolation pointInterpolation(mesh);
@ -31,8 +31,7 @@ if(min(J.internalField()) > 0)
pointInterpolation.interpolate(DU, pointDU);
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
//- Move mesh
vectorField newPoints = mesh.allPoints();
@ -47,10 +46,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -7,7 +7,8 @@ pointVectorField& pf = pointDU;
// Do the correction
//GeometricField<Type, pointPatchField, pointMesh> pfCorr
/*pointVectorField pfCorr
/*
pointVectorField pfCorr
(
IOobject
(
@ -23,7 +24,8 @@ pointVectorField& pf = pointDU;
//dimensioned<Type>("zero", pf.dimensions(), pTraits<Type>::zero),
dimensionedVector("zero", pf.dimensions(), vector::zero),
pf.boundaryField().types()
);*/
);
*/
pointVectorField pfCorr
(
@ -38,7 +40,7 @@ pointVectorField pfCorr
pMesh,
dimensionedVector("vector", dimLength, vector::zero),
"calculated"
);
);
//const labelList& ptc = boundaryPoints();
#include "findBoundaryPoints.H"
@ -96,25 +98,29 @@ forAll (ptc, pointI)
}
// Update coupled boundaries
/*forAll (pfCorr.boundaryField(), patchI)
/*
forAll (pfCorr.boundaryField(), patchI)
{
if (pfCorr.boundaryField()[patchI].coupled())
{
pfCorr.boundaryField()[patchI].initAddField();
}
}*/
}
*/
/*forAll (pfCorr.boundaryField(), patchI)
/*
forAll (pfCorr.boundaryField(), patchI)
{
if (pfCorr.boundaryField()[patchI].coupled())
{
pfCorr.boundaryField()[patchI].addField(pfCorr.internalField());
}
}*/
}
*/
//Info << "pfCorr: " << pfCorr << endl;
pfCorr.correctBoundaryConditions();
//Info << "pfCorr: " << pfCorr << endl;
pfCorr.correctBoundaryConditions();
//pfCorr.write();

View file

@ -1,12 +1,15 @@
//- the method used to calculate the explicit component of sigma
word divDSigmaExpMethod(mesh.solutionDict().subDict("stressedFoam").lookup("divDSigmaExp"));
Info << "Calculation of divDSigmaExp method: " << divDSigmaExpMethod << endl;
if(divDSigmaExpMethod != "standard"
if
(
divDSigmaExpMethod != "standard"
&& divDSigmaExpMethod != "surface"
&& divDSigmaExpMethod != "decompose"
&& divDSigmaExpMethod != "laplacian")
{
&& divDSigmaExpMethod != "laplacian"
)
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << nl
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -33,4 +33,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -1,37 +1,37 @@
if(divDSigmaExpMethod == "standard")
{
{
divDSigmaExp = fvc::div
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU =
((I - n*n)&fvc::interpolate(gradDU));
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradDU = ((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*(
*
(
- (muf + lambdaf)*(fvc::snGrad(DU)&(I - n*n))
+ lambdaf*tr(shearGradDU&(I - n*n))*n
+ muf*(shearGradDU&n)
)
);
}
else if(divDSigmaExpMethod == "laplacian")
{
}
else if(divDSigmaExpMethod == "laplacian")
{
divDSigmaExp =
- fvc::laplacian(mu + lambda, DU, "laplacian(DDU,DU)")
+ fvc::div
@ -40,8 +40,8 @@ if(divDSigmaExpMethod == "standard")
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -2,7 +2,7 @@
//- sigma explicit large strain explicit terms
//----------------------------------------------------//
if(divDSigmaLargeStrainExpMethod == "standard")
{
{
divDSigmaLargeStrainExp =
fvc::div
(
@ -11,9 +11,9 @@ if(divDSigmaLargeStrainExpMethod == "standard")
+ ((sigma + DSigma) & DF.T()),
"div(sigma)"
);
}
else if(divDSigmaLargeStrainExpMethod == "surface")
{
}
else if(divDSigmaLargeStrainExpMethod == "surface")
{
divDSigmaLargeStrainExp =
fvc::div
(
@ -22,13 +22,13 @@ if(divDSigmaLargeStrainExpMethod == "standard")
+ (mesh.Sf() & fvc::interpolate( sigma & DF.T() ))
+ (mesh.Sf() & fvc::interpolate(DSigma & DF.T() ))
);
}
else
{
}
else
{
FatalError
<< "divDSigmaLargeStrainMethod not found!"
<< exit(FatalError);
}
}
//- relax
divDSigmaLargeStrainExp.relax();

View file

@ -69,5 +69,4 @@ FieldField<Field, vector> extraVecs(ptc.size());
curExtraVectors.setSize(nFacesAroundPoint);
}
}

View file

@ -91,7 +91,8 @@ FieldField<Field, scalar> w(ptc.size());
// Update coupled boundaries
// Work-around for cyclic parallels.
/*if (Pstream::parRun() && !mesh.parallelData().cyclicParallel())
/*
if (Pstream::parRun() && !mesh.parallelData().cyclicParallel())
{
forAll (volPointSumWeights.boundaryField(), patchI)
{
@ -111,7 +112,8 @@ FieldField<Field, scalar> w(ptc.size());
);
}
}
}*/
}
*/
// Re-scale the weights for the current point
forAll (ptc, pointI)

View file

@ -1,15 +1,15 @@
if(moveMeshMethod == "inverseDistance")
{
{
# include "moveMeshInverseDistance.H"
}
else if(moveMeshMethod == "leastSquares")
{
}
else if(moveMeshMethod == "leastSquares")
{
# include "moveMeshLeastSquares.H"
}
else
{
}
else
{
FatalError << "move mesh method " << moveMeshMethod << " not recognised" << nl
<< "available methods are:" << nl
<< "inverseDistance" << nl
<< "leastSquares" << exit(FatalError);
}
}

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Move solid mesh using inverse distance interpolation" << endl;
// Create point mesh
@ -41,8 +41,7 @@ if(min(J.internalField()) > 0)
//pointDU.write();
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
// Move mesh
vectorField newPoints = mesh.allPoints();
@ -57,10 +56,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Moving mesh using least squares interpolation" << endl;
leastSquaresVolPointInterpolation pointInterpolation(mesh);
@ -31,8 +31,7 @@ if(min(J.internalField()) > 0)
pointInterpolation.interpolate(DU, pointDU);
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
//- Move mesh
vectorField newPoints = mesh.allPoints();
@ -47,10 +46,10 @@ if(min(J.internalField()) > 0)
mesh.movePoints(newPoints);
mesh.V00();
mesh.moving(false);
}
else
{
}
else
{
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -7,7 +7,8 @@ pointVectorField& pf = pointDU;
// Do the correction
//GeometricField<Type, pointPatchField, pointMesh> pfCorr
/*pointVectorField pfCorr
/*
pointVectorField pfCorr
(
IOobject
(
@ -23,7 +24,8 @@ pointVectorField& pf = pointDU;
//dimensioned<Type>("zero", pf.dimensions(), pTraits<Type>::zero),
dimensionedVector("zero", pf.dimensions(), vector::zero),
pf.boundaryField().types()
);*/
);
*/
pointVectorField pfCorr
(
@ -38,7 +40,7 @@ pointVectorField pfCorr
pMesh,
dimensionedVector("vector", dimLength, vector::zero),
"calculated"
);
);
//const labelList& ptc = boundaryPoints();
#include "findBoundaryPoints.H"
@ -96,25 +98,29 @@ forAll (ptc, pointI)
}
// Update coupled boundaries
/*forAll (pfCorr.boundaryField(), patchI)
/*
forAll (pfCorr.boundaryField(), patchI)
{
if (pfCorr.boundaryField()[patchI].coupled())
{
pfCorr.boundaryField()[patchI].initAddField();
}
}*/
}
*/
/*forAll (pfCorr.boundaryField(), patchI)
/*
forAll (pfCorr.boundaryField(), patchI)
{
if (pfCorr.boundaryField()[patchI].coupled())
{
pfCorr.boundaryField()[patchI].addField(pfCorr.internalField());
}
}*/
}
*/
//Info << "pfCorr: " << pfCorr << endl;
pfCorr.correctBoundaryConditions();
//Info << "pfCorr: " << pfCorr << endl;
pfCorr.correctBoundaryConditions();
//pfCorr.write();

View file

@ -1,12 +1,15 @@
//- the method used to calculate the explicit component of sigma
word divDSigmaExpMethod(mesh.solutionDict().subDict("stressedFoam").lookup("divDSigmaExp"));
Info << "Calculation of divDSigmaExp method: " << divDSigmaExpMethod << endl;
if(divDSigmaExpMethod != "standard"
if
(
divDSigmaExpMethod != "standard"
&& divDSigmaExpMethod != "surface"
&& divDSigmaExpMethod != "decompose"
&& divDSigmaExpMethod != "laplacian")
{
&& divDSigmaExpMethod != "laplacian"
)
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << nl
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -53,4 +53,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -1,37 +1,37 @@
if(divDSigmaExpMethod == "standard")
{
{
divDSigmaExp = fvc::div
(
mu*gradDU.T() + lambda*(I*tr(gradDU)) - (mu + lambda)*gradDU,
"div(sigma)"
);
}
else if(divDSigmaExpMethod == "surface")
{
}
else if(divDSigmaExpMethod == "surface")
{
divDSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradDU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradDU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradDU))
);
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU =
((I - n*n)&fvc::interpolate(gradDU));
}
else if(divDSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU = ((I - n*n)&fvc::interpolate(gradDU));
divDSigmaExp = fvc::div
(
mesh.magSf()
*(
*
(
- (muf + lambdaf)*(fvc::snGrad(U)&(I - n*n))
+ lambdaf*tr(shearGradU&(I - n*n))*n
+ muf*(shearGradU&n)
)
);
}
else if(divDSigmaExpMethod == "expLaplacian")
{
}
else if(divDSigmaExpMethod == "expLaplacian")
{
divDSigmaExp =
- fvc::laplacian(mu + lambda, U, "laplacian(DU,U)")
+ fvc::div
@ -40,8 +40,8 @@ if(divDSigmaExpMethod == "standard")
+ lambda*(I*tr(gradDU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -1,9 +1,15 @@
//- how explicit component of sigma is to be calculated
word divDSigmaExpMethod(mesh.solutionDict().subDict("stressedFoam").lookup("divDSigmaExp"));
Info << "Selecting divDSigmaExp calculation method " << divDSigmaExpMethod << endl;
if(divDSigmaExpMethod != "standard" && divDSigmaExpMethod != "surface" && divDSigmaExpMethod != "decompose" && divDSigmaExpMethod != "laplacian")
{
if
(
divDSigmaExpMethod != "standard"
&& divDSigmaExpMethod != "surface"
&& divDSigmaExpMethod != "decompose"
&& divDSigmaExpMethod != "laplacian"
)
{
FatalError << "divDSigmaExp method " << divDSigmaExpMethod << " not found!" << nl
<< "valid methods are:\nstandard\nsurface\ndecompose\nlaplacian"
<< exit(FatalError);
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -53,4 +53,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -1,37 +1,37 @@
if(divSigmaExpMethod == "standard")
{
{
divSigmaExp = fvc::div
(
mu*gradU.T() + lambda*(I*tr(gradU)) - (mu + lambda)*gradU,
"div(sigma)"
);
}
else if(divSigmaExpMethod == "surface")
{
}
else if(divSigmaExpMethod == "surface")
{
divSigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradU))
);
}
else if(divSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU =
((I - n*n)&fvc::interpolate(gradU));
}
else if(divSigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU = ((I - n*n)&fvc::interpolate(gradU));
divSigmaExp = fvc::div
(
mesh.magSf()
*(
*
(
- (muf + lambdaf)*(fvc::snGrad(U)&(I - n*n))
+ lambdaf*tr(shearGradU&(I - n*n))*n
+ muf*(shearGradU&n)
)
);
}
else if(divSigmaExpMethod == "expLaplacian")
{
}
else if(divSigmaExpMethod == "expLaplacian")
{
divSigmaExp =
- fvc::laplacian(mu + lambda, U, "laplacian(DU,U)")
+ fvc::div
@ -40,8 +40,8 @@ if(divSigmaExpMethod == "standard")
+ lambda*(I*tr(gradU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "divSigmaExp method " << divSigmaExpMethod << " not found!" << endl;
}
}

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -33,4 +33,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -1,37 +1,37 @@
if(sigmaExpMethod == "standard")
{
{
sigmaExp = fvc::div
(
mu*gradU.T() + lambda*(I*tr(gradU)) - (mu + lambda)*gradU,
"div(sigma)"
);
}
else if(sigmaExpMethod == "surface")
{
}
else if(sigmaExpMethod == "surface")
{
sigmaExp = fvc::div
(
muf*(mesh.Sf() & fvc::interpolate(gradU.T()))
+ lambdaf*(mesh.Sf() & I*fvc::interpolate(tr(gradU)))
- (muf + lambdaf)*(mesh.Sf() & fvc::interpolate(gradU))
);
}
else if(sigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU =
((I - n*n)&fvc::interpolate(gradU));
}
else if(sigmaExpMethod == "decompose")
{
surfaceTensorField shearGradU = ((I - n*n)&fvc::interpolate(gradU));
sigmaExp = fvc::div
(
mesh.magSf()
*(
*
(
- (muf + lambdaf)*(fvc::snGrad(U)&(I - n*n))
+ lambdaf*tr(shearGradU&(I - n*n))*n
+ muf*(shearGradU&n)
)
);
}
else if(sigmaExpMethod == "expLaplacian")
{
}
else if(sigmaExpMethod == "expLaplacian")
{
sigmaExp =
- fvc::laplacian(mu + lambda, U, "laplacian(DU,U)")
+ fvc::div
@ -40,8 +40,8 @@ if(sigmaExpMethod == "standard")
+ lambda*(I*tr(gradU)),
"div(sigma)"
);
}
else
{
}
else
{
FatalError << "sigmaExp method " << sigmaExpMethod << " not found!" << endl;
}
}

View file

@ -13,7 +13,8 @@
}
//- update patch
if(
if
(
U.boundaryField()[patchID].type()
== fixedValueFvPatchVectorField::typeName
)
@ -27,7 +28,6 @@
<< " to " << disp
<< endl;
}
else
{
SeriousError << "Loading Patch " << patchName << " is type "

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -33,4 +33,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -4,16 +4,16 @@ label plateID = mesh.boundaryMesh().findPatchID("plate");
label cylinderID = mesh.boundaryMesh().findPatchID("cylinder");
if(plateID == -1 || cylinderID == -1)
{
{
FatalError << "\n Cannot find the plate patch or the cylinder"
<< " patch to calculate lift and drag!"
<< exit(FatalError);
}
}
scalar lift = 0;
scalar drag = 0;
const vectorField& Sfp = mesh.boundary()[plateID].Sf();
const vectorField& Sfp = mesh.boundary()[plateID].Sf();
forAll(p.boundaryField()[plateID], facei)
{
vector faceForce = p.boundaryField()[plateID][facei] * Sfp[facei];
@ -21,7 +21,7 @@ forAll(p.boundaryField()[plateID], facei)
drag += vector(1,0,0) & faceForce;
}
const vectorField& Sfc = mesh.boundary()[cylinderID].Sf();
const vectorField& Sfc = mesh.boundary()[cylinderID].Sf();
forAll(p.boundaryField()[cylinderID], facei)
{
vector faceForce = p.boundaryField()[cylinderID][facei] * Sfc[facei];
@ -29,9 +29,9 @@ forAll(p.boundaryField()[cylinderID], facei)
drag += vector(1,0,0) & faceForce;
}
scalar width = 0.050668;
scalar width = 0.050668;
Info << "Total lift on the cylinder and plate boundaries is " << lift << " N, per unit width is " << (lift/width) << " N\n"
Info << "Total lift on the cylinder and plate boundaries is " << lift << " N, per unit width is " << (lift/width) << " N\n"
<< "Total drag on the cylinder and plate boundaries is " << drag << " N, per unit width is " << (drag/width) << " N\n"
<< endl;
}

View file

@ -103,7 +103,7 @@
word solidDdtScheme
(
stressMesh.ddtScheme("ddt(" + DU.name() +')')
stressMesh.schemesDict().ddtScheme("ddt(" + DU.name() +')')
);
// if

View file

@ -131,12 +131,12 @@ int main(int argc, char *argv[])
# include "rotateSolidFields.H"
//# include "moveSolidMesh.H"
//# include "moveSolidMesh.H"
# include "moveSolidMeshLeastSquares.H"
# include "calculateStress.H"
//# include "calculateLiftAndDrag.H"
//# include "calculateLiftAndDrag.H"
Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
<< " ClockTime = " << runTime.elapsedClockTime() << " s"

View file

@ -2,7 +2,7 @@
//- move mesh
//--------------------------------------------------//
if(min(J.internalField()) > 0)
{
{
Info << "Moving mesh using least squares interpolation" << endl;
leastSquaresVolPointInterpolation pointInterpolation(stressMesh);
@ -31,8 +31,7 @@ if(min(J.internalField()) > 0)
pointInterpolation.interpolate(DU, pointDU);
const vectorField& pointDUI =
pointDU.internalField();
const vectorField& pointDUI = pointDU.internalField();
//- Move mesh
vectorField newPoints = stressMesh.allPoints();
@ -53,4 +52,4 @@ if(min(J.internalField()) > 0)
FatalErrorIn(args.executable())
<< "Negative Jacobian"
<< exit(FatalError);
}
}

View file

@ -68,23 +68,28 @@ Foam::contactPatchPair::contactPatchPair
// ),
// vectorField(cp_.mesh().boundaryMesh()[slavePatch_.index()].pointNormals().size(), vector::zero)
// ),
slavePointPenetration_(
slavePointPenetration_
(
cp_.mesh().boundaryMesh()[slavePatch_.index()].pointNormals().size(),
0.0
),
masterPointPenetration_(
masterPointPenetration_
(
cp_.mesh().boundaryMesh()[masterPatch_.index()].pointNormals().size(),
0.0
),
globalSlavePointPenetration_(
globalSlavePointPenetration_
(
cp_.mesh().pointZones()[cp_.mesh().faceZones().findZoneID(slaveFaceZoneName_)].size(),
0.0
),
globalMasterPointPenetration_(
globalMasterPointPenetration_
(
cp_.mesh().pointZones()[cp_.mesh().faceZones().findZoneID(masterFaceZoneName_)].size(),
0.0
),
oldTotalSlavePointForce_(
oldTotalSlavePointForce_
(
cp_.mesh().boundaryMesh()[slavePatch_.index()].pointNormals().size(),
vector::zero
),
@ -92,7 +97,8 @@ Foam::contactPatchPair::contactPatchPair
penetrationLimit_(readScalar(dict.lookup("penetrationLimit"))),
rigidMaster_(dict.lookup("rigidMaster")),
interpolationMethod_(dict.lookup("interpolationMethod")),
faceZoneMasterToSlaveInterpolator_(
faceZoneMasterToSlaveInterpolator_
(
cp_.mesh().faceZones()[cp_.mesh().faceZones().findZoneID(masterFaceZoneName_)](), // from
cp_.mesh().faceZones()[cp_.mesh().faceZones().findZoneID(slaveFaceZoneName_)](), // to zone
alg_,

View file

@ -34,7 +34,6 @@ Description
void Foam::contactPatchPair::correct()
{
//---------------------PRELIMINARIES---------------------------------//
const fvMesh& mesh = cp_.mesh();
const label& masterIndex = masterPatch_.index();
@ -42,28 +41,24 @@ void Foam::contactPatchPair::correct()
scalar maxMagSlaveTraction = 0.0;
contactIterNum_++;
//--------CALCULATE MASTER AND SLAVE PENETRATIONS----------------------//
scalarField& globalSlavePointPenetration = globalSlavePointPenetration_;
//scalarField& globalMasterPointPenetration = globalMasterPointPenetration_;
//- tell zoneToZone that mesh has moved, so the intersection will be recalculated
faceZoneMasterToSlaveInterpolator_.movePoints();
//- calculate intersection distances
//- this is the slowest part of the contact correction especially when the slavePatch
//- has many points. parallelisation of this step should be considered.
globalSlavePointPenetration
= faceZoneMasterToSlaveInterpolator_.pointDistanceToIntersection();
//globalMasterPointPenetration
//= faceZoneSlaveToMasterInterpolator.pointDistanceToIntersection();
globalSlavePointPenetration =
faceZoneMasterToSlaveInterpolator_.pointDistanceToIntersection();
//globalMasterPointPenetration =
// faceZoneSlaveToMasterInterpolator.pointDistanceToIntersection();
scalarField& slavePointPenetration = slavePointPenetration_;
//scalarField& masterPointPenetration = masterPointPenetration_;
forAll(slavePointPenetration, pointI)
{
//label pointGlobalLabel = slavePointLabels[pointI];
@ -95,8 +90,6 @@ void Foam::contactPatchPair::correct()
// }
//------CALCULATE SLAVE VERTEX FORCES BASED ON PENETRATION-------------//
//- approximation of penaltyFactor
//- this should be automatic, these numbers don't really matter, the scaleFactor
@ -111,9 +104,9 @@ void Foam::contactPatchPair::correct()
const vectorField& slavePointNormals = mesh.boundaryMesh()[slaveIndex].pointNormals();
vectorField& totalSlavePointForce = totalSlavePointForce_;
int numSlaveContactPoints = 0;
int numSlaveContactPointsReducing = 0;
int numSlavesUpdated = 0;
label numSlaveContactPoints = 0;
label numSlaveContactPointsReducing = 0;
label numSlavesUpdated = 0;
//- so the procs know the global min
//scalar minSlavePointPenetration = gMin(slavePointPenetration);
@ -134,7 +127,9 @@ void Foam::contactPatchPair::correct()
numSlavesUpdated++;
//- force is linearly dependent on penetration
totalSlavePointForce[pointI] +=
( slavePointNormals[pointI] * penaltyFactor * slavePointPenetration[pointI] );
(
slavePointNormals[pointI]*penaltyFactor*slavePointPenetration[pointI]
);
}
//- else if point is within contact tolerance then don't add any more force
else if(slavePointPenetration[pointI] < 0.0)
@ -151,7 +146,9 @@ void Foam::contactPatchPair::correct()
// point forces must be reduced slowly
totalSlavePointForce[pointI] +=
( slavePointNormals[pointI] * returnPenaltyFactor * slavePointPenetration[pointI] );
(
slavePointNormals[pointI]*returnPenaltyFactor*slavePointPenetration[pointI]
);
// if a tensile force develops
if((totalSlavePointForce[pointI] & slavePointNormals[pointI]) > 0.0)
@ -185,10 +182,10 @@ void Foam::contactPatchPair::correct()
//- for a deformable master
if(!rigidMaster_)
{
const label slaveFaceZoneID
= mesh.faceZones().findZoneID(slaveFaceZoneName_);
const label slavePatchStart
= mesh.boundaryMesh()[slaveIndex].start();
const label slaveFaceZoneID =
mesh.faceZones().findZoneID(slaveFaceZoneName_);
const label slavePatchStart =
mesh.boundaryMesh()[slaveIndex].start();
scalarField globalSlavePressure
(
@ -231,14 +228,13 @@ void Foam::contactPatchPair::correct()
);
}
//- exchange parallel data
reduce(globalMasterPressure, maxOp<scalarField>());
//Pout << "The max global master trac is " << max(globalMasterPressure) << endl;
const label masterPatchStart
= mesh.boundaryMesh()[masterIndex].start();
const label masterPatchStart =
mesh.boundaryMesh()[masterIndex].start();
scalarField masterPressure(mesh.boundaryMesh()[masterIndex].size(), 0.0);

View file

@ -147,8 +147,6 @@ void contactProblem::correct()
}
//**********************CONTACT AREA FUNCTION***********************************//
tmp<volScalarField> contactProblem::contactArea() const
{
@ -189,19 +187,19 @@ tmp<volScalarField> contactProblem::contactArea() const
label slaveIndex = contacts[contactI].slavePatch().index();
scalarField masterFrac = contacts[contactI].masterTouchFraction();
scalarField slaveFrac = contacts[contactI].slaveTouchFraction();
scalar contactAreaMaster =
gSum
scalar contactAreaMaster = gSum
(
masterFrac *
mag(
mag
(
mesh().Sf().boundaryField()[masterIndex]
)
);
scalar contactAreaSlave =
gSum
scalar contactAreaSlave = gSum
(
slaveFrac *
mag(
mag
(
mesh().Sf().boundaryField()[slaveIndex]
)
);
@ -275,8 +273,6 @@ void contactProblem::contactPointForce(pointVectorField& cPointForce)
}
tmp<volScalarField> contactProblem::contactPressure() const
{
tmp<volScalarField> tcPress

View file

@ -317,11 +317,11 @@ void solidTractionFreeFvPatchVectorField::write(Ostream& os) const
template<>
const char* Foam::NamedEnum<Foam::solidTractionFreeFvPatchVectorField::nonLinearType, 3>::names[] =
{
{
"off",
"updatedLagrangian",
"totalLagrangian"
};
};
const Foam::NamedEnum<Foam::solidTractionFreeFvPatchVectorField::nonLinearType, 3>
Foam::solidTractionFreeFvPatchVectorField::nonLinearNames_;

View file

@ -38,8 +38,8 @@ defineTypeNameAndDebug(leastSquaresVolPointInterpolation, 0);
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void leastSquaresVolPointInterpolation::calcA(List<scalarSquareMatrix>& A) const
{
void leastSquaresVolPointInterpolation::calcA(List<scalarSquareMatrix>& A) const
{
//Info << "leastSquaresVolPointInterpolation calcA" << endl;
const fvMesh& mesh = mesh_;
@ -180,11 +180,11 @@ defineTypeNameAndDebug(leastSquaresVolPointInterpolation, 0);
}
} //- end of else
} //- end of forAll boundary
}
}
void leastSquaresVolPointInterpolation::calcB(List<Field<vector> >& B, const GeometricField<vector, fvPatchField, volMesh>& vf) const
{
void leastSquaresVolPointInterpolation::calcB(List<Field<vector> >& B, const GeometricField<vector, fvPatchField, volMesh>& vf) const
{
//Info << "leastSquaresVolPointInterpolation calcB" << endl;
const fvMesh& mesh = mesh_;
@ -298,15 +298,15 @@ defineTypeNameAndDebug(leastSquaresVolPointInterpolation, 0);
}
} //- end of forAll boundary
} //- end of for all components
}
}
void leastSquaresVolPointInterpolation::interpolate
(
void leastSquaresVolPointInterpolation::interpolate
(
const GeometricField<vector, fvPatchField, volMesh>& vf,
GeometricField<vector, pointPatchField, pointMesh>& pf //Field<vector>& pf
) const
{
) const
{
//Info << "Interpolating cell to point using leastSquaresVolPointInterpolation" << endl;
const fvMesh& mesh = mesh_;
@ -361,19 +361,19 @@ defineTypeNameAndDebug(leastSquaresVolPointInterpolation, 0);
// * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * * //
leastSquaresVolPointInterpolation::leastSquaresVolPointInterpolation(const fvMesh& vm)
:
leastSquaresVolPointInterpolation::leastSquaresVolPointInterpolation(const fvMesh& vm)
:
mesh_(vm) //,
//A_(vm.points().size(), scalarSquareMatrix(4, 0.0)),
//B_(vm.points().size(), Field<vector>(4, vector::zero))
{
{
//calcA();
}
}
// * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * * //
leastSquaresVolPointInterpolation::~leastSquaresVolPointInterpolation()
{}
leastSquaresVolPointInterpolation::~leastSquaresVolPointInterpolation()
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //

View file

@ -312,7 +312,6 @@ void plasticityModel::correct()
{
betaPatch[faceI] = 0;
curDEpsEPred = DEpsilonPatch[faceI];
if
(
(DEpsilonEqPatch[faceI] >= 0)
@ -448,7 +447,6 @@ void plasticityModel::updateYieldStress()
bool plasticityModel::read()
{
if (regIOobject::read())
{
return true;

View file

@ -37,13 +37,15 @@ solvers
}
/* U
/*
U
{
solver PCG;
preconditioner DIC;
tolerance 1e-09;
relTol 0.99;
}*/
}
*/
}
stressedFoam

View file

@ -31,7 +31,6 @@ boundaryField
cohesive
{
type cohesiveZone;
cohesiveLaw Dugdale;
relaxationFactor 0.9;

View file

@ -273,6 +273,7 @@ int main(int argc, char *argv[])
mesh,
dimensionedScalar("zero", dimless, 0.0)
);
forAll(theta.internalField(), celli)
{
const scalar& x = mesh.C().internalField()[celli][vector::X];
@ -311,9 +312,12 @@ int main(int argc, char *argv[])
{
const scalar& t = theta.internalField()[celli];
rotMat.internalField()[celli] = tensor(::cos(t), ::sin(t), 0,
rotMat.internalField()[celli] = tensor
(
::cos(t), ::sin(t), 0,
-::sin(t), ::cos(t), 0,
0, 0, 1);
0, 0, 1
);
}
forAll(rotMat.boundaryField(), patchi)
@ -322,9 +326,12 @@ int main(int argc, char *argv[])
{
const scalar& t = theta.boundaryField()[patchi][facei];
rotMat.boundaryField()[patchi][facei] = tensor(::cos(t), ::sin(t), 0,
rotMat.boundaryField()[patchi][facei] = tensor
(
::cos(t), ::sin(t), 0,
-::sin(t), ::cos(t), 0,
0, 0, 1);
0, 0, 1
);
}
}
@ -350,18 +357,16 @@ int main(int argc, char *argv[])
const tensor& rot = rotMat.internalField()[celli];
symmTensor sigmaCart(r, 0, 0,
t, 0,
z);
symmTensor sigmaCart(r, 0, 0, t, 0, z);
sigma.internalField()[celli] =
symm(rot.T() & sigmaCart & rot);
//-for general 2-D plain strain problems, the axial stress is given by this:
//- (which is not equal to the solution by Timoshenko... hmmmnn)
// sigma.internalField()[celli][symmTensor::ZZ] =
// 0.3*(sigma.internalField()[celli][symmTensor::XX] + sigma.internalField()[celli][symmTensor::YY])
// - E*alpha*(T.internalField()[celli]);
// sigma.internalField()[celli][symmTensor::ZZ] =
// 0.3*(sigma.internalField()[celli][symmTensor::XX] + sigma.internalField()[celli][symmTensor::YY])
// - E*alpha*(T.internalField()[celli]);
}
forAll(sigma.boundaryField(), patchi)
@ -374,16 +379,12 @@ int main(int argc, char *argv[])
const tensor& rot = rotMat.boundaryField()[patchi][facei];
symmTensor sigmaCart(r, 0, 0,
t, 0,
z);
symmTensor sigmaCart(r, 0, 0, t, 0, z);
sigma.boundaryField()[patchi][facei] =
symm(rot.T() & sigmaCart & rot);
}
}
Info << "\nWriting analytical sigma tensor" << endl;
sigma.write();

View file

@ -110,14 +110,10 @@ int main(int argc, char *argv[])
label refFace = cells[celli][0];
//- insert first four abaqusCellPoints
abaqusCellPoints[celli][0]
= (faces[refFace][3] + 1);
abaqusCellPoints[celli][1]
= (faces[refFace][2] + 1);
abaqusCellPoints[celli][2]
= (faces[refFace][1] + 1);
abaqusCellPoints[celli][3]
= (faces[refFace][0] + 1);
abaqusCellPoints[celli][0] = (faces[refFace][3] + 1);
abaqusCellPoints[celli][1] = (faces[refFace][2] + 1);
abaqusCellPoints[celli][2] = (faces[refFace][1] + 1);
abaqusCellPoints[celli][3] = (faces[refFace][0] + 1);
//- now find the opposite face in the cell
//Info << "Finding oppFace" << endl << endl;
@ -181,29 +177,25 @@ int main(int argc, char *argv[])
label globalPpi = oppFacePPs[oppFacePointi];
if(globalPpi == faces[refFace][0])
{
abaqusCellPoints[celli][7]
= globalPointi + 1;
abaqusCellPoints[celli][7] = globalPointi + 1;
ppFound = true;
break;
}
else if(globalPpi == faces[refFace][1])
{
abaqusCellPoints[celli][6]
= globalPointi + 1;
abaqusCellPoints[celli][6] = globalPointi + 1;
ppFound = true;
break;
}
else if(globalPpi == faces[refFace][2])
{
abaqusCellPoints[celli][5]
= globalPointi + 1;
abaqusCellPoints[celli][5] = globalPointi + 1;
ppFound = true;
break;
}
else if(globalPpi == faces[refFace][3])
{
abaqusCellPoints[celli][4]
= globalPointi + 1;
abaqusCellPoints[celli][4] = globalPointi + 1;
ppFound = true;
break;
}

View file

@ -110,7 +110,6 @@ int main(int argc, char *argv[])
gradDU = fvc::grad(DU);
# include "calculateDEpsilonDSigma.H"
}
while
(

View file

@ -1,5 +1,5 @@
if (runTime.outputTime())
{
{
volScalarField epsilonEq
(
IOobject
@ -53,4 +53,4 @@ if (runTime.outputTime())
<< endl;
runTime.write();
}
}

View file

@ -112,12 +112,6 @@ Foam::axisCoordinateRotation::axisCoordinateRotation
scalar theta = readScalar(dict.lookup("theta"));
scalar psi = readScalar(dict.lookup("psi"));
bool inDegrees = true;
if (dict.found("degrees"))
{
inDegrees = Switch(dict.lookup("degrees"));
}
calcTransform
(
phi,

View file

@ -55,7 +55,7 @@ modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
@ -111,7 +111,7 @@ modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
@ -158,7 +158,7 @@ Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
@ -216,7 +216,7 @@ instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
@ -267,7 +267,7 @@ Library will still fall under Section 6.)
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
@ -329,7 +329,7 @@ restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
@ -370,7 +370,7 @@ subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
@ -422,7 +422,7 @@ conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
@ -456,7 +456,7 @@ SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest

View file

@ -27,6 +27,7 @@ License
#include "dimensionedScalar.H"
#include "equationReader.H"
#include "equationOperation.H"
#include "error.H"
//#include "equationOperationList.H"
class dimensionedScalar;
@ -396,6 +397,16 @@ Foam::word Foam::equationOperation::opName
return "min";
case otstabilise:
return "stabilise";
default:
FatalErrorIn
(
"Foam::word Foam::equationOperation::opName"
"(const Foam::equationOperation::operationType& op)"
)
<< "invalid operation"
<< exit(FatalError);
return "invalid";
}
}
@ -423,6 +434,16 @@ Foam::word Foam::equationOperation::sourceName
return "equation";
case slstorage:
return "memory";
default:
FatalErrorIn
(
"Foam::word Foam::equationOperation::opName"
"(const Foam::equationOperation::operationType& op)"
)
<< "invalid source"
<< exit(FatalError);
return "invalid";
}
}

View file

@ -81,7 +81,6 @@ Foam::tmp<Foam::scalarField> Foam::mixingPlaneGAMGInterface::agglomerateCoeffs
// AMG agglomeration missing
notImplemented("mixingPlaneGAMGInterface::agglomerateCoeffs");
tmp<scalarField> tcoarseCoeffs(new scalarField(size(), 0.0));
scalarField& coarseCoeffs = tcoarseCoeffs();
return tcoarseCoeffs;
}

View file

@ -102,7 +102,6 @@ void Foam::polyMesh::initMesh()
nCells++;
label nUsedFaces = 0;
label nIntFaces = 0;
// Use patch info if provided, use all faces otherwise
if (boundary_.size())
@ -110,13 +109,11 @@ void Foam::polyMesh::initMesh()
nUsedFaces =
boundary_[boundary_.size() - 1].start()
+ boundary_[boundary_.size() - 1].size();
nIntFaces = boundary_[0].start();
}
else
{
// No patch info. Assume all faces are used.
nUsedFaces = owner_.size();
nIntFaces = neighbour_.size();
}

View file

@ -57,7 +57,8 @@ bool dynamicTopoFvMesh::meshQuality
label nCells = 0, minCell = -1;
scalar maxQuality = -GREAT;
scalar minQuality = GREAT;
scalar cQuality, meanQuality = 0.0;
scalar cQuality = 0.0;
scalar meanQuality = 0.0;
// Track slivers
bool sliversAbsent = true;
@ -1682,7 +1683,8 @@ bool dynamicTopoFvMesh::checkCollapse
) const
{
label faceIndex = -1;
scalar cQuality = 0.0, oldVolume = 0.0;
scalar cQuality = 0.0;
scalar oldVolume = 0.0;
const cell& cellToCheck = cells_[cellIndex];
// Look for a face that doesn't contain 'pointIndex'

View file

@ -180,7 +180,8 @@ void eMesh::calcEdgePoints() const
// NOTE: Will work only on tetrahedral meshes!
bool found;
label faceIndex = -1, cellIndex = -1;
label faceIndex = -1
label cellIndex = -1;
const labelList& owner = mesh_.faceOwner();
const labelList& neighbour = mesh_.faceNeighbour();
const cellList& cells = mesh_.cells();

View file

@ -126,16 +126,14 @@ Foam::tmp<Foam::scalarField> Foam::cellQuality::skewness() const
forAll (nei, faceI)
{
scalar dOwn =
mag
scalar dOwn = mag
(
(faceCtrs[faceI] - cellCtrs[own[faceI]])
&areas[faceI]
)
/mag(areas[faceI]);
scalar dNei =
mag
scalar dNei = mag
(
(cellCtrs[nei[faceI]] - faceCtrs[faceI])
&areas[faceI]
@ -176,8 +174,8 @@ Foam::tmp<Foam::scalarField> Foam::cellQuality::skewness() const
+ ((faceCentres[faceI] - cellCtrs[faceCells[faceI]])&n)*n;
scalar skewness =
mag(faceCentres[faceI] - faceIntersection)/
(
mag(faceCentres[faceI] - faceIntersection)
/(
mag(faceCentres[faceI] - cellCtrs[faceCells[faceI]])
+ VSMALL
);
@ -273,16 +271,14 @@ Foam::tmp<Foam::scalarField> Foam::cellQuality::faceSkewness() const
forAll (nei, faceI)
{
scalar dOwn =
mag
scalar dOwn = mag
(
(faceCtrs[faceI] - cellCtrs[own[faceI]])
&areas[faceI]
)
/mag(areas[faceI]);
scalar dNei =
mag
scalar dNei = mag
(
(cellCtrs[nei[faceI]] - faceCtrs[faceI])
&areas[faceI]
@ -322,8 +318,8 @@ Foam::tmp<Foam::scalarField> Foam::cellQuality::faceSkewness() const
+ ((faceCentres[faceI] - cellCtrs[faceCells[faceI]])&n)*n;
result[globalFaceI++] =
mag(faceCentres[faceI] - faceIntersection)/
(
mag(faceCentres[faceI] - faceIntersection)
/(
mag(faceCentres[faceI] - cellCtrs[faceCells[faceI]])
+ VSMALL
);

View file

@ -14,13 +14,10 @@
if(valves_[valveI].curLift() >= valves_[valveI].deformationLift())
{
bool isMoving(false);
if(mag(valves_[valveI].curVelocity()) > 0)
{
Info<< "Valve n. " << valveI << " is moving with velocity = "
<< valves_[valveI].curVelocity() << endl;
isMoving = true;
}
Info<< "Valve displacement for valve " << valveI << " = "

View file

@ -94,7 +94,6 @@ void Foam::layerAR::addZonesAndModifiers()
bool foundAtLeastOne = false;
scalar zHigher = GREAT;
scalar zLower = GREAT;
scalar dh = GREAT;
scalar dl = GREAT;
@ -108,7 +107,6 @@ void Foam::layerAR::addZonesAndModifiers()
{
if (zPistV - zc > 0 && zPistV - zc < dl)
{
zLower = zc;
dl = zPistV - zc;
}

View file

@ -95,7 +95,6 @@ void Foam::layerARGambit::addZonesAndModifiers()
bool foundAtLeastOne = false;
scalar zHigher = GREAT;
scalar zLower = GREAT;
scalar dh = GREAT;
scalar dl = GREAT;
@ -109,7 +108,6 @@ void Foam::layerARGambit::addZonesAndModifiers()
{
if (zPistV - zc > 0 && zPistV - zc < dl)
{
zLower = zc;
dl = zPistV - zc;
}

Some files were not shown because too many files have changed in this diff Show more