Fixes to previous merge commits

This commit is contained in:
Henrik Rusche 2013-07-03 19:34:16 +02:00
parent 031d180f63
commit 09fbf2ef28
20 changed files with 935 additions and 398 deletions

19
ThirdParty/.gitignore vendored Normal file
View file

@ -0,0 +1,19 @@
# git-ls-files --others --exclude-from=.git/info/exclude
# Lines that start with '#' are comments.
# editor and misc backup files - anywhere
*~
.*~
*.bak
*.bak[0-9][0-9]
*.orig
*.orig[0-9][0-9]
\#*\#
# ignore the build
rpmBuild/BUILD
rpmBuild/RPMS
rpmBuild/rpmDB
rpmBuild/tmp
# end-of-file

View file

@ -79,7 +79,7 @@ alias _foamSource 'if ($?FOAM_VERBOSE && $?prompt) echo "Sourcing: \!*"; source
# Add in preset user or site preferences:
set foamPrefs=`$WM_PROJECT_DIR/bin/foamEtcFile prefs.csh`
if ( $status == 0 ) then
if ($foamPrefs) then
if ( $?foamPrefs ) then
_foamSource $foamPrefs
endif
endif
@ -110,7 +110,7 @@ if ( ! $?WM_OSTYPE ) setenv WM_OSTYPE POSIX
# Compiler: set to Gcc, Gcc43 or Icc (for Intel's icc)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if ( ! $?WM_COMPILER ) setenv WM_COMPILER Gcc
setenv WM_COMPILER Gcc
#setenv WM_COMPILER Gcc
#setenv WM_COMPILER Icc
setenv WM_COMPILER_ARCH
@ -129,7 +129,7 @@ if ( ! $?WM_PRECISION_OPTION ) setenv WM_PRECISION_OPTION DP
# WM_COMPILE_OPTION = Opt | Debug | Prof
if ( ! $?WM_COMPILE_OPTION ) setenv WM_COMPILE_OPTION Opt
setenv WM_COMPILE_OPTION Opt
#setenv WM_COMPILE_OPTION Opt
#setenv WM_COMPILE_OPTION Debug
# WM_MPLIB = | OPENMPI | MPICH | MPICH-GM | HPMPI | GAMMA | MPI | QSMPI

View file

@ -140,26 +140,13 @@
#setenv PARAVIEW_DIR path_to_system_installed_paraview
#setenv PARAVIEW_BIN_DIR $PARAVIEW_DIR/bin
# System installed bison
#setenv BISON_SYSTEM 1
# System installed flex
#setenv FLEX_SYSTEM 1
# System installed m4
#setenv M4_SYSTEM 1
# System installed PyFoam
#setenv PYFOAM_SYSTEM 1
# System installed swak4Foam
#setenv SWAK4FOAM_SYSTEM 1
# Specify ParaView version
# ~~~~~~~~~~~~~~~~~~~~~~~~
#setenv ParaView_VERSION git # eg, cvs/git version
#setenv ParaView_MAJOR 3.7
# System identifier for the OpenFOAM CDash test harness on openfoam-extend
#
# By default, your system FQN/hostname will be used as the system identifier

View file

@ -151,17 +151,12 @@ compilerInstall=System
# System installed m4
#export M4_SYSTEM=1
# System installed PyFoam
#export PYFOAM_SYSTEM=1
# System installed swak4Foam
#export SWAK4FOAM_SYSTEM=1
# Specify ParaView version
# ~~~~~~~~~~~~~~~~~~~~~~~~
#export ParaView_VERSION=git # eg, cvs/git version
#export ParaView_MAJOR=3.7
# System identifier for the OpenFOAM CDash test harness on openfoam-extend
#
# By default, your system FQN/hostname will be used as the system identifier

View file

@ -450,21 +450,23 @@ endif
# QT
# ~~~~~
if ( $?QT_SYSTEM == 0 && -e "$WM_THIRD_PARTY_DIR"/packages/qt-everywhere-opensource-src-4.7.0/platforms/$WM_OPTIONS )then
_foamSource $WM_THIRD_PARTY_DIR/packages/qt-everywhere-opensource-src-4.7.0/platforms/$WM_OPTIONS/etc/qt-everywhere-opensource-src-4.7.0.csh
if ( $?QT_SYSTEM == 0 && -e "$WM_THIRD_PARTY_DIR"/packages/qt-everywhere-opensource-src-4.7.4/platforms/$WM_OPTIONS )then
_foamSource $WM_THIRD_PARTY_DIR/packages/qt-everywhere-opensource-src-4.7.4/platforms/$WM_OPTIONS/etc/qt-everywhere-opensource-src-4.7.4.csh
#if ( $?QT_SYSTEM == 0 && -e "$WM_THIRD_PARTY_DIR"/packages/qt-everywhere-opensource-src-4.7.0/platforms/$WM_OPTIONS )then
# _foamSource $WM_THIRD_PARTY_DIR/packages/qt-everywhere-opensource-src-4.7.0/platforms/$WM_OPTIONS/etc/qt-everywhere-opensource-src-4.7.0.csh
endif
# PARAVIEW
# ~~~~~
if ( $?PARAVIEW_SYSTEM == 0 && -e "$WM_THIRD_PARTY_DIR"/packages/ParaView-3.12.0/platforms/$WM_OPTIONS ) then
_foamSource $WM_THIRD_PARTY_DIR/packages/ParaView-3.12.0/platforms/$WM_OPTIONS/etc/ParaView-3.12.0.csh
#if ( $?PARAVIEW_SYSTEM == 0 && -e "$WM_THIRD_PARTY_DIR"/packages/ParaView-3.8.1/platforms/$WM_OPTIONS ) then
# _foamSource $WM_THIRD_PARTY_DIR/packages/ParaView-3.8.1/platforms/$WM_OPTIONS/etc/ParaView-3.8.1.csh
endif
#if ( $?PARAVIEW_SYSTEM == 0 && -e "$WM_THIRD_PARTY_DIR"/packages/ParaView-3.14.1 ) then
#if ( $?PARAVIEW_SYSTEM == 0 && -e "$WM_THIRD_PARTY_DIR"/packages/ParaView-3.14.1/platforms/$WM_OPTIONS ) then
# _foamSource $WM_THIRD_PARTY_DIR/packages/ParaView-3.14.1/platforms/$WM_OPTIONS/etc/ParaView-3.14.1.csh
#endif
endif
if ( $WM_ARCH == "darwinIntel" || $WM_ARCH == "darwinIntel64" ) then
setenv DYLD_LIBRARY_PATH ${LD_LIBRARY_PATH}

View file

@ -487,13 +487,10 @@ export MPI_BUFFER_SIZE
# Load cmake
# ~~~~~~~~~~
<<<<<<< Temporary merge branch 1
[ -z "$CMAKE_SYSTEM" ] && [ -e $WM_THIRD_PARTY_DIR/packages/cmake-2.8.8/platforms/$WM_OPTIONS ] && {
_foamSource $WM_THIRD_PARTY_DIR/packages/cmake-2.8.8/platforms/$WM_OPTIONS/etc/cmake-2.8.8.sh
=======
[ -z "$CMAKE_SYSTEM" ] && [ -e $WM_THIRD_PARTY_DIR/packages/cmake-2.8.6/platforms/$WM_OPTIONS ] && {
_foamSource $WM_THIRD_PARTY_DIR/packages/cmake-2.8.6/platforms/$WM_OPTIONS/etc/cmake-2.8.6.sh
>>>>>>> Temporary merge branch 2
#[ -z "$CMAKE_SYSTEM" ] && [ -e $WM_THIRD_PARTY_DIR/packages/cmake-2.8.6/platforms/$WM_OPTIONS ] && {
# _foamSource $WM_THIRD_PARTY_DIR/packages/cmake-2.8.6/platforms/$WM_OPTIONS/etc/cmake-2.8.6.sh
}
[ "$FOAM_VERBOSE" -a "$PS1" ] && echo " CMAKE_DIR is initialized to: $CMAKE_DIR"
@ -521,8 +518,8 @@ export MPI_BUFFER_SIZE
# Load zoltan
# ~~~~~~~~~~
[ -z "$ZOLTAN_SYSTEM" ] && [ -e $WM_THIRD_PARTY_DIR/packages/zoltan-3.5/platforms/$WM_OPTIONS ] && {
_foamSource $WM_THIRD_PARTY_DIR/packages/zoltan-3.5/platforms/$WM_OPTIONS/etc/zoltan-3.5.sh
[ -z "$ZOLTAN_SYSTEM" ] && [ -e $WM_THIRD_PARTY_DIR/packages/zoltan_3.5 ] && {
_foamSource $WM_THIRD_PARTY_DIR/packages/zoltan_3.5/platforms/$WM_OPTIONS/etc/zoltan_3.5.sh
}
[ "$FOAM_VERBOSE" -a "$PS1" ] && echo " ZOLTAN_DIR is initialized to: $ZOLTAN_DIR"
@ -554,6 +551,8 @@ export MPI_BUFFER_SIZE
# ~~~~~~~~~~~~~
[ -z "$PARAVIEW_SYSTEM" ] && [ -e $WM_THIRD_PARTY_DIR/packages/ParaView-3.12.0/platforms/$WM_OPTIONS ] && {
_foamSource $WM_THIRD_PARTY_DIR/packages/ParaView-3.12.0/platforms/$WM_OPTIONS/etc/ParaView-3.12.0.sh
#[ -z "$PARAVIEW_SYSTEM" ] && [ -e $WM_THIRD_PARTY_DIR/packages/ParaView-3.8.1/platforms/$WM_OPTIONS ] && {
# _foamSource $WM_THIRD_PARTY_DIR/packages/ParaView-3.8.1/platforms/$WM_OPTIONS/etc/ParaView-3.8.1.sh
}
[ "$FOAM_VERBOSE" -a "$PS1" ] && echo " PARAVIEW_DIR is initialized to: $PARAVIEW_DIR"

View file

@ -48,7 +48,8 @@ template
class Type
>
void ProcessorPointPatchField
<PatchField, Mesh, PointPatch, ProcessorPointPatch, MatrixType, Type>::resizeBuf
<PatchField, Mesh, PointPatch, ProcessorPointPatch, MatrixType, Type>::
resizeBuf
(
List<char>& buf,
const label size
@ -961,7 +962,8 @@ initInterfaceMatrixUpdate
const lduMatrix& m,
const scalarField& coeffs,
const direction,
const Pstream::commsTypes commsType
const Pstream::commsTypes commsType,
const bool switchToLhs
) const
{
tmp<scalarField> tlocalMult(new scalarField(this->size(), 0));
@ -985,6 +987,100 @@ initInterfaceMatrixUpdate
// use the counter.
label coeffI = 0;
if (switchToLhs)
{
// Owner side
// ~~~~~~~~~~
{
const labelList& cutOwn = procPatch_.cutEdgeOwnerIndices();
const labelList& cutOwnStart = procPatch_.cutEdgeOwnerStart();
forAll (mp, pointI)
{
label ownIndex = cutOwnStart[pointI];
label endOwn = cutOwnStart[pointI + 1];
for (; ownIndex < endOwn; ownIndex++)
{
localMult[pointI] +=
coeffs[coeffI]*psiInternal[U[cutOwn[ownIndex]]];
// Multiply the internal side as well using the cut mask
result[U[cutOwn[ownIndex]]] -=
cutMask[coeffI]*coeffs[coeffI]*psiInternal[mp[pointI]];
coeffI++;
}
}
}
// Neighbour side
// ~~~~~~~~~~~~~~
{
const labelList& cutNei = procPatch_.cutEdgeNeighbourIndices();
const labelList& cutNeiStart = procPatch_.cutEdgeNeighbourStart();
forAll (mp, pointI)
{
label neiIndex = cutNeiStart[pointI];
label endNei = cutNeiStart[pointI + 1];
for (; neiIndex < endNei; neiIndex++)
{
localMult[pointI] +=
coeffs[coeffI]*psiInternal[L[cutNei[neiIndex]]];
// Multiply the internal side as well using the cut mask
result[L[cutNei[neiIndex]]] -=
cutMask[coeffI]*coeffs[coeffI]*psiInternal[mp[pointI]];
coeffI++;
}
}
}
// Doubly cut coefficients
// ~~~~~~~~~~~~~~~~~~~~~~~
// There exists a possibility of having an internal edge for a
// point on the processor patch which is in fact connected to
// another point of the same patch. This particular nastiness
// introduces a deformation in the solution because the edge is
// either multiplied twice or not at all. For this purpose, the
// offending edges need to be separated out and multiplied
// appropriately. This will only happen for cell tetrahedral
// decomposition and is generally nasty.
// No need for cut mask here
{
const labelList& doubleCut = procPatch_.doubleCutEdgeIndices();
const labelList& doubleCutOwner = procPatch_.doubleCutOwner();
const labelList& doubleCutNeighbour =
procPatch_.doubleCutNeighbour();
forAll (doubleCut, edgeI)
{
// Owner side
localMult[doubleCutOwner[edgeI]] +=
coeffs[coeffI]*psiInternal[U[doubleCut[edgeI]]];
coeffI++;
// Neighbour side
localMult[doubleCutNeighbour[edgeI]] +=
coeffs[coeffI]*psiInternal[L[doubleCut[edgeI]]];
coeffI++;
}
}
// Add the local multiplication to this side as well
forAll (mp, pointI)
{
result[mp[pointI]] -= localMult[pointI];
}
}
else
{
// Owner side
// ~~~~~~~~~~
{
@ -1051,7 +1147,8 @@ initInterfaceMatrixUpdate
const labelList& doubleCut = procPatch_.doubleCutEdgeIndices();
const labelList& doubleCutOwner = procPatch_.doubleCutOwner();
const labelList& doubleCutNeighbour = procPatch_.doubleCutNeighbour();
const labelList& doubleCutNeighbour =
procPatch_.doubleCutNeighbour();
forAll (doubleCut, edgeI)
{
@ -1073,6 +1170,7 @@ initInterfaceMatrixUpdate
{
result[mp[pointI]] += localMult[pointI];
}
}
// Send the localMult
sendField(tlocalMult, commsType);
@ -1099,9 +1197,13 @@ updateInterfaceMatrix
const lduMatrix&,
const scalarField&,
const direction,
const Pstream::commsTypes commsType
const Pstream::commsTypes commsType,
const bool switchToLhs
) const
{
// Switch to lhs handled in init
// HJ, 22/May/2013
// Get the neighbour side multiplication
tmp<scalarField> tneiMult = receivePointField<scalar>(commsType);
this->addToInternalField(result, tneiMult());

View file

@ -1024,7 +1024,8 @@ void GlobalPointPatchField
const lduMatrix& m,
const scalarField& coeffs,
const direction,
const Pstream::commsTypes commsType
const Pstream::commsTypes commsType,
const bool switchToLhs
) const
{
tmp<scalarField> tlocalMult(new scalarField(this->size(), 0));
@ -1047,11 +1048,142 @@ void GlobalPointPatchField
label coeffI = 0;
scalarField sumOffDiag(this->size(), 0);
if (switchToLhs)
{
// Owner side
// ~~~~~~~~~~
{
const labelList& cutOwn = globalPointPatch_.cutEdgeOwnerIndices();
const labelList& cutOwnStart = globalPointPatch_.cutEdgeOwnerStart();
const labelList& cutOwnStart =
globalPointPatch_.cutEdgeOwnerStart();
forAll (mp, pointI)
{
label ownIndex = cutOwnStart[pointI];
label endOwn = cutOwnStart[pointI + 1];
for (; ownIndex < endOwn; ownIndex++)
{
localMult[pointI] +=
cutMask[coeffI]*coeffs[coeffI]
*psiInternal[U[cutOwn[ownIndex]]];
sumOffDiag[pointI] += cutMask[coeffI]*coeffs[coeffI];
// Multiply the internal side as well
result[U[cutOwn[ownIndex]]] -=
coeffs[coeffI]*psiInternal[mp[pointI]];
coeffI++;
}
}
}
// Neighbour side
// ~~~~~~~~~~~~~~
{
const labelList& cutNei =
globalPointPatch_.cutEdgeNeighbourIndices();
const labelList& cutNeiStart =
globalPointPatch_.cutEdgeNeighbourStart();
forAll (mp, pointI)
{
label neiIndex = cutNeiStart[pointI];
label endNei = cutNeiStart[pointI + 1];
for (; neiIndex < endNei; neiIndex++)
{
localMult[pointI] +=
cutMask[coeffI]*coeffs[coeffI]
*psiInternal[L[cutNei[neiIndex]]];
sumOffDiag[pointI] += cutMask[coeffI]*coeffs[coeffI];
// Multiply the internal side as well
result[L[cutNei[neiIndex]]] -=
coeffs[coeffI]*psiInternal[mp[pointI]];
coeffI++;
}
}
}
// Doubly cut coefficients
// ~~~~~~~~~~~~~~~~~~~~~~~
// There exists a possibility of having an internal edge for a
// point on the processor patch which is in fact connected to
// another point of the same patch. This particular nastiness
// introduces a deformation in the solution because the edge is
// either multiplied twice or not at all. For this purpose, the
// offending edges need to be separated out and multiplied
// appropriately.
{
const labelList& doubleCut =
globalPointPatch_.doubleCutEdgeIndices();
const labelList& doubleCutOwner =
globalPointPatch_.doubleCutOwner();
const labelList& doubleCutNeighbour =
globalPointPatch_.doubleCutNeighbour();
forAll (doubleCut, edgeI)
{
// Owner side
localMult[doubleCutOwner[edgeI]] +=
cutMask[coeffI]*coeffs[coeffI]*
psiInternal[U[doubleCut[edgeI]]];
sumOffDiag[doubleCutOwner[edgeI]] +=
cutMask[coeffI]*coeffs[coeffI];
coeffI++;
// Neighbour side
localMult[doubleCutNeighbour[edgeI]] +=
cutMask[coeffI]*coeffs[coeffI]*
psiInternal[L[doubleCut[edgeI]]];
sumOffDiag[doubleCutNeighbour[edgeI]] +=
cutMask[coeffI]*coeffs[coeffI];
coeffI++;
}
}
// Reduce/extract the result and enforce over all processors
// Requires global sync points to flush buffers before gather-scatter
// communications. Reconsider. HJ, 29/Mar/2011
if (Pstream::defaultCommsType == Pstream::nonBlocking)
{
IPstream::waitRequests();
OPstream::waitRequests();
}
tmp<Field<scalar> > trpf =
reduceExtractPoint<scalar>(localMult);
Field<scalar>& rpf = trpf();
// Get addressing
const labelList& addr = globalPointPatch_.meshPoints();
forAll (addr, i)
{
result[addr[i]] -= rpf[i];
}
}
else
{
// Owner side
// ~~~~~~~~~~
{
const labelList& cutOwn = globalPointPatch_.cutEdgeOwnerIndices();
const labelList& cutOwnStart =
globalPointPatch_.cutEdgeOwnerStart();
forAll (mp, pointI)
{
@ -1078,7 +1210,8 @@ void GlobalPointPatchField
// Neighbour side
// ~~~~~~~~~~~~~~
{
const labelList& cutNei = globalPointPatch_.cutEdgeNeighbourIndices();
const labelList& cutNei =
globalPointPatch_.cutEdgeNeighbourIndices();
const labelList& cutNeiStart =
globalPointPatch_.cutEdgeNeighbourStart();
@ -1115,9 +1248,12 @@ void GlobalPointPatchField
// offending edges need to be separated out and multiplied
// appropriately.
{
const labelList& doubleCut = globalPointPatch_.doubleCutEdgeIndices();
const labelList& doubleCut =
globalPointPatch_.doubleCutEdgeIndices();
const labelList& doubleCutOwner =
globalPointPatch_.doubleCutOwner();
const labelList& doubleCutOwner = globalPointPatch_.doubleCutOwner();
const labelList& doubleCutNeighbour =
globalPointPatch_.doubleCutNeighbour();
@ -1168,6 +1304,7 @@ void GlobalPointPatchField
result[addr[i]] += rpf[i];
}
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -33,10 +33,6 @@ Contributor
\*---------------------------------------------------------------------------*/
#include <sys/time.h>
#include <sstream>
#include <iomanip>
namespace Foam
{
@ -44,17 +40,13 @@ namespace Foam
template<class MasterPatch, class SlavePatch>
template<class Type>
void
MixingPlaneInterpolation<MasterPatch, SlavePatch>::
interpolate
void MixingPlaneInterpolation<MasterPatch, SlavePatch>::toProfile
(
const Field<Type>& srcF,
const labelListList& srcAddr,
const scalarListList& srcWeights,
const labelListList& dstAddr,
const scalarListList& dstWeights,
Field<Type>& dstResultF
) const
Field<Type>& profileBandValues
)
{
// The src to profile transfer is done using weighted averaging
// evaluation of srcF.
@ -76,119 +68,131 @@ interpolate
//
// average(phi) == sum(w*phi)
int nbrProfileBands = interpolationProfile_.size() - 1;
List<Type> profileBandValues(nbrProfileBands, pTraits<Type>::zero);
scalarField srcScalingValues(nbrProfileBands, 0.0);
forAll (srcAddr, bandI)
{
forAll (srcAddr[bandI], faceI)
{
profileBandValues[bandI] +=
srcF[srcAddr[bandI][faceI]]*srcWeights[bandI][faceI];
}
}
}
// NB: The next operation should be computed only
// once... and should sum up to 1.0 Let's keep this
// operation for now, until the mixingPlane interface is
// fully debugged (MB, 07/2010)
srcScalingValues[bandI] += srcWeights[bandI][faceI];
if (debug <= -200)
template<class MasterPatch, class SlavePatch>
template<class Type>
void MixingPlaneInterpolation<MasterPatch, SlavePatch>::fromProfile
(
const Field<Type>& profileBandValues,
const labelListList& dstAddr,
const scalarListList& dstWeights,
Field<Type>& dstResultF
)
{
Info << "bande: " << bandI
<< " src valeur: " << srcF[srcAddr[bandI][faceI]]
<< " src weight: " << srcWeights[bandI][faceI] << endl;
}
}
}
// We don't need to divide the profileBandValues by the
// srcScalingValues because the srcScalingValues are identically
// equal to 1.0, thanks to the conservativeness of the GGI
// weighting factors
//profileBandValues = profileBandValues/srcScalingValues;
// profileBandValues are now the circumferentially averaged values
// The profile to dst transfer is done by simply distributing the
// The profile to dst transfer is done by distributing the
// average value accordingly to the dst weighting factors
forAll (dstAddr, faceI)
{
const labelList& curAddr = dstAddr[faceI];
const scalarList& curW = dstWeights[faceI];
dstResultF[faceI] = pTraits<Type>::zero;
forAll (curAddr, bandI)
{
dstResultF[faceI] += profileBandValues[curAddr[bandI]]*curW[bandI];
if (debug <= -200)
{
Info<< "bande: " << dstAddr[faceI][bandI]
<< " dst valeur: " << dstResultF[faceI]
<< " dst weight: " << dstWeights[faceI][bandI] << endl;
}
}
}
if (debug <= -500)
{
error::printStack(Info);
Info<< "srcF : " << srcF << nl
<< "srcAddr : " << srcAddr << nl
<< "srcWeights: " << srcWeights << nl
<< "profileBandValues: " << profileBandValues << nl
<< "srcScalingValues: " << srcScalingValues << nl
<< "dstAddr : " << dstAddr << nl
<< "dstWeights: " << dstWeights << nl
<< "dstResultF: " << dstResultF << nl
<< "srcScalingValues: " << srcScalingValues << endl;
}
if (debug <= -999)
{
fileName traceFileDir("./mixingPlaneTraceFiles");
if (!exists(traceFileDir))
{
mkDir(traceFileDir);
}
struct timeval tod;
gettimeofday(&tod, NULL);
//struct timespec tp;
//clock_gettime(CLOCK_MONOTONIC, &tp);
std::ostringstream osBuffer;
osBuffer
<< Foam::name(tod.tv_sec)
<< "." << std::setfill('0') << std::setw(6)
<< Foam::name(tod.tv_usec);
fileName traceFileName(traceFileDir/"profileValues_" + osBuffer.str());
OFstream dumpFileSrc(traceFileName + "_orig");
OFstream dumpFileDst(traceFileName + "_interpolated");
OFstream dumpFileProfile(traceFileName + "_profile");
//Foam::error::printStack(Info);
InfoIn
template<class MasterPatch, class SlavePatch>
template<class Type>
void MixingPlaneInterpolation<MasterPatch, SlavePatch>::maskedFromProfile
(
"MixingPlaneInterpolation::interpolate"
) << "Dumping src profiles to: " << traceFileName + "_orig" << nl
<< "Dumping interpolated profiles to : "
<< traceFileName + "_interpolated" << nl
<< "Dumping profile values to: "
<< traceFileName + "_profile" << endl;
const Field<Type>& profileBandValues,
const labelListList& dstAddr,
const scalarListList& dstWeights,
Field<Type>& dstResultF,
const labelList& mask
)
{
// The profile to dst transfer is done by distributing the
// average value accordingly to the dst weighting factors
forAll (mask, maskI)
{
// Pick the masked face
const label faceI = mask[maskI];
dumpFileDst << dstResultF << endl;
dumpFileSrc << srcF << endl;
dumpFileProfile << profileBandValues << endl;
const labelList& curAddr = dstAddr[faceI];
const scalarList& curW = dstWeights[faceI];
dstResultF[maskI] = pTraits<Type>::zero;
forAll (curAddr, bandI)
{
dstResultF[maskI] += profileBandValues[curAddr[bandI]]*curW[bandI];
}
}
}
template<class MasterPatch, class SlavePatch>
template<class Type>
void MixingPlaneInterpolation<MasterPatch, SlavePatch>::maskedTransform
(
Field<Type>& transField,
const tensorField& t,
const Field<Type>& inField,
const labelList& mask
)
{
// The profile to dst transfer is done by distributing the
// average value accordingly to the dst weighting factors
forAll (mask, maskI)
{
// Pick the masked face
const label faceI = mask[maskI];
transField[maskI] = transform(t[faceI], inField[maskI]);
}
}
template<class MasterPatch, class SlavePatch>
template<class Type>
void MixingPlaneInterpolation<MasterPatch, SlavePatch>::interpolate
(
const Field<Type>& srcF,
const labelListList& srcAddr,
const scalarListList& srcWeights,
const labelListList& dstAddr,
const scalarListList& dstWeights,
Field<Type>& dstResultF
) const
{
Field<Type> profileBandValues(nProfileBands(), pTraits<Type>::zero);
// Interpolate from patch to profile
toProfile
(
srcF,
srcAddr,
srcWeights,
profileBandValues
);
// profileBandValues are now the circumferentially averaged values
// Collect from profile to patch
fromProfile
(
profileBandValues,
dstAddr,
dstWeights,
dstResultF
);
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
@ -206,7 +210,7 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToSlave
FatalErrorIn
(
"MixingPlaneInterpolation::masterToSlave("
"const Field<Type> ff)"
"const Field<Type> ff) const"
) << "given field does not correspond to patch. Patch size: "
<< masterPatch_.size() << " field size: " << patchFF.size()
<< abort(FatalError);
@ -216,16 +220,6 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToSlave
// MB: We need this back
Field<Type> profileFF = transform(masterPatchToProfileT(), patchFF);
if (debug > 1)
{
Info << "MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToSlave: "
<< "patchFF: " << patchFF << endl
<< "profileFF: " << profileFF << endl
<< "masterPatchToProfileT(): " << masterPatchToProfileT() << endl
<< "slaveProfileToPatchT(): " << slaveProfileToPatchT() << endl
<< endl;
}
// Do interpolation
tmp<Field<Type> > tresult
(
@ -243,14 +237,14 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToSlave
profileFF, // Master data in 'profile space'
masterPatchToProfileAddr(), // From master: compute the average
masterPatchToProfileWeights(),
slaveProfileToPatchAddr(), // To slave we distribute the average from
slaveProfileToPatchAddr(), // To slave distribute average from
slaveProfileToPatchWeights(), // profile to patch
result
);
// Apply transform to bring the slave field back from 'profile space'
// to 'patch space'
transform(result, slaveProfileToPatchT(), result); // MB: We need this back
transform(result, slaveProfileToPatchT(), result);
return tresult;
}
@ -283,7 +277,7 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToMaster
FatalErrorIn
(
"MixingPlaneInterpolation::slaveToMaster("
"const Field<Type> ff)"
"const Field<Type> ff) const"
) << "given field does not correspond to patch. Patch size: "
<< slavePatch_.size() << " field size: " << patchFF.size()
<< abort(FatalError);
@ -292,16 +286,6 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToMaster
// Move slave data from 'patch space' to 'profile space'
Field<Type> profileFF = transform(slavePatchToProfileT(), patchFF);
if (debug > 1)
{
Info << "MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToMaster: "
<< "patchFF: " << patchFF << endl
<< "profileFF: " << profileFF << endl
<< "slavePatchToProfileT(): " << slavePatchToProfileT() << endl
<< "masterProfileToPatchT(): " << masterProfileToPatchT() << endl
<< endl;
}
// Do interpolation
tmp<Field<Type> > tresult
(
@ -345,6 +329,334 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToMaster
return tint;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToProfile
(
const Field<Type>& patchFF
) const
{
if (patchFF.size() != masterPatch_.size())
{
FatalErrorIn
(
"MixingPlaneInterpolation::masterToProfile("
"const Field<Type> ff) const"
) << "given field does not correspond to patch. Patch size: "
<< masterPatch_.size() << " field size: " << patchFF.size()
<< abort(FatalError);
}
// Move master data from 'patch space' to 'profile space'
Field<Type> profileFF = transform(masterPatchToProfileT(), patchFF);
// Do interpolation
tmp<Field<Type> > tresult
(
new Field<Type>
(
nProfileBands(),
pTraits<Type>::zero
)
);
Field<Type>& result = tresult();
toProfile
(
profileFF, // Master data in 'profile space'
masterPatchToProfileAddr(), // From master: compute the average
masterPatchToProfileWeights(),
result
);
return tresult;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToProfile
(
const tmp<Field<Type> >& tff
) const
{
tmp<Field<Type> > tint = masterToProfile(tff());
tff.clear();
return tint;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToProfile
(
const Field<Type>& patchFF
) const
{
if (patchFF.size() != slavePatch_.size())
{
FatalErrorIn
(
"MixingPlaneInterpolation::slaveToProfile("
"const Field<Type> ff) const"
) << "given field does not correspond to patch. Patch size: "
<< slavePatch_.size() << " field size: " << patchFF.size()
<< abort(FatalError);
}
// Move slave data from 'patch space' to 'profile space'
Field<Type> profileFF = transform(slavePatchToProfileT(), patchFF);
// Do interpolation
tmp<Field<Type> > tresult
(
new Field<Type>
(
nProfileBands(),
pTraits<Type>::zero
)
);
Field<Type>& result = tresult();
toProfile
(
profileFF, // Slave data in 'profile space'
slavePatchToProfileAddr(), // From slave: compute the average
slavePatchToProfileWeights(),
result
);
return tresult;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToProfile
(
const tmp<Field<Type> >& tff
) const
{
tmp<Field<Type> > tint = slaveToProfile(tff());
tff.clear();
return tint;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
MixingPlaneInterpolation<MasterPatch, SlavePatch>::profileToMaster
(
const Field<Type>& profileFF
) const
{
if (profileFF.size() != nProfileBands())
{
FatalErrorIn
(
"MixingPlaneInterpolation::profileToMaster("
"const Field<Type> ff) const"
) << "given field does not correspond to profile. Profile size: "
<< nProfileBands() << " field size: " << profileFF.size()
<< abort(FatalError);
}
// Do interpolation
tmp<Field<Type> > tresult
(
new Field<Type>
(
masterPatch_.size(),
pTraits<Type>::zero
)
);
Field<Type>& result = tresult();
fromProfile
(
profileFF, // Master data in 'profile space'
masterProfileToPatchAddr(), // To master: distribute the average
masterProfileToPatchWeights(),
result
);
// Apply transform to bring the master field back from 'profile space'
// to 'patch space'
transform(result, masterProfileToPatchT(), result);
return tresult;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
MixingPlaneInterpolation<MasterPatch, SlavePatch>::profileToMaster
(
const tmp<Field<Type> >& tff
) const
{
tmp<Field<Type> > tint = profileToMaster(tff());
tff.clear();
return tint;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
void MixingPlaneInterpolation<MasterPatch, SlavePatch>::maskedProfileToMaster
(
const Field<Type>& profileFF,
Field<Type>& result,
const labelList& mask
) const
{
if (profileFF.size() != nProfileBands() || result.size() != mask.size())
{
FatalErrorIn
(
"bvoid MixingPlaneInterpolation<MasterPatch, SlavePatch>::"
"maskedProfileToMaster\n"
"(\n"
" const Field<Type>& profileFF,\n"
" Field<Type>& result,\n"
" const labelList& mask\n"
") const"
) << "given field does not correspond to profile. Profile size: "
<< nProfileBands() << " field size: " << profileFF.size()
<< " result size: " << result.size()
<< " mask size: " << mask.size()
<< abort(FatalError);
}
// Do interpolation
maskedFromProfile
(
profileFF, // Master data in 'profile space'
masterProfileToPatchAddr(), // To master: distribute the average
masterProfileToPatchWeights(),
result,
mask
);
// Apply transform to bring the master field back from 'profile space'
// to 'patch space'
maskedTransform(result, masterProfileToPatchT(), result, mask);
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
MixingPlaneInterpolation<MasterPatch, SlavePatch>::profileToSlave
(
const Field<Type>& profileFF
) const
{
if (profileFF.size() != nProfileBands())
{
FatalErrorIn
(
"MixingPlaneInterpolation::profileToSlave("
"const Field<Type> ff) const"
) << "given field does not correspond to profile. Profile size: "
<< nProfileBands() << " field size: " << profileFF.size()
<< abort(FatalError);
}
// Do interpolation
tmp<Field<Type> > tresult
(
new Field<Type>
(
slavePatch_.size(),
pTraits<Type>::zero
)
);
Field<Type>& result = tresult();
fromProfile
(
profileFF, // Slave data in 'profile space'
slaveProfileToPatchAddr(), // To slave distribute average from
slaveProfileToPatchWeights(), // profile to patch
result
);
// Apply transform to bring the slave field back from 'profile space'
// to 'patch space'
transform(result, slaveProfileToPatchT(), result);
return tresult;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
MixingPlaneInterpolation<MasterPatch, SlavePatch>::profileToSlave
(
const tmp<Field<Type> >& tff
) const
{
tmp<Field<Type> > tint = profileToSlave(tff());
tff.clear();
return tint;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
void MixingPlaneInterpolation<MasterPatch, SlavePatch>::maskedProfileToSlave
(
const Field<Type>& profileFF,
Field<Type>& result,
const labelList& mask
) const
{
if (profileFF.size() != nProfileBands() || result.size() != mask.size())
{
FatalErrorIn
(
"void MixingPlaneInterpolation<MasterPatch, SlavePatch>::"
"maskedProfileToSlave\n"
"(\n"
" const Field<Type>& profileFF,\n"
" Field<Type>& result,\n"
" const labelList& mask\n"
") const"
) << "given field does not correspond to profile. Profile size: "
<< nProfileBands() << " field size: " << profileFF.size()
<< " result size: " << result.size()
<< " mask size: " << mask.size()
<< abort(FatalError);
}
maskedFromProfile
(
profileFF, // Slave data in 'profile space'
slaveProfileToPatchAddr(), // To slave distribute average from
slaveProfileToPatchWeights(), // profile to patch
result,
mask
);
// Apply transform to bring the slave field back from 'profile space'
// to 'patch space'
maskedTransform(result, slaveProfileToPatchT(), result, mask);
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
@ -367,15 +679,6 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToMaster
// Move master data from 'patch space' to 'profile space'
Field<Type> profileFF = transform(masterPatchToProfileT(), patchFF);
if (debug > 1)
{
Info << "MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToMaster: "
<< "patchFF: " << patchFF << endl
<< "profileFF: " << profileFF << endl
<< "masterPatchToProfileT(): " << masterPatchToProfileT() << endl
<< endl;
}
// Do interpolation
tmp<Field<Type> > tresult
(
@ -405,6 +708,7 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToMaster
return tresult;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
@ -418,6 +722,7 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::masterToMaster
return tint;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >
@ -440,15 +745,6 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToSlave
// Move slave data from 'patch space' to 'profile space'
Field<Type> profileFF = transform(slavePatchToProfileT(), patchFF);
if (debug > 1)
{
Info << "MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToSlave: "
<< "patchFF: " << patchFF << endl
<< "profileFF: " << profileFF << endl
<< "slavePatchToProfileT(): " << slavePatchToProfileT() << endl
<< endl;
}
// Do interpolation
tmp<Field<Type> > tresult
(
@ -478,6 +774,7 @@ MixingPlaneInterpolation<MasterPatch, SlavePatch>::slaveToSlave
return tresult;
}
template<class MasterPatch, class SlavePatch>
template<class Type>
tmp<Field<Type> >

View file

@ -29,8 +29,8 @@ Description
Radial basis function interpolation class
Description
Interpolation class which uses Radial Basis Functions to interpolate the
fluid displacements for given boundary displacements.
Interpolation class which uses Radial Basis Functions to interpolate
field from given data points to arbitrary set of points.
The coefficient vectors, alpha and beta are determined by solving
the system:
@ -38,25 +38,17 @@ Description
| db | = | Mbb Pb | | alpha |
| 0 | | Pb 0 | | beta |
where db are the given boundary displacements,
where db are the given field values at data carrier points.
Mbb the boundary RBF correlation matrix (NbxNb), containing RBF evaluations
at the boundary nodes, and Pb some linear polynomial matrix (Nbx4).
Those coefficients are calculated every timestep, with the current
boundary displacements db, with the inverse of Mbb. Using those
coefficients, the RBF is evaluated at all fluid points every
timestep.
The efficiency of this method is increased by:
1) using control points which is a subset of the moving
boundary points. Those control points are selected by
a coarsening function.
2) The outer boundary points are neglected since a cutoff function
is used toward the outer boundaries.
In cases where far field data is not of interest, a cutoff function
is used to eliminate unnecessary data points in the far field
Author
Frank Bos, TU Delft. All rights reserved.
Dubravko Matijasevic, FSB Zagreb.
Reorganisation by Hrvoje Jasak, Wikki Ltd.
SourceFiles
RBFInterpolation.C
@ -90,7 +82,7 @@ class RBFInterpolation
const vectorField& controlPoints_;
//- Reference to all points
const vectorField& allPoints_;
const vectorField& dataPoints_;
//- RBF function
autoPtr<RBFFunction> RBF_;
@ -136,7 +128,7 @@ public:
(
const dictionary& dict,
const vectorField& controlPoints,
const vectorField& allPoints
const vectorField& dataPoints
);
//- Construct as copy
@ -150,6 +142,18 @@ public:
// Member Functions
//- Return reference to control points
const vectorField& controlPoints() const
{
return controlPoints_;
}
//- Reference to all points
const vectorField& dataPoints() const
{
return dataPoints_;
}
//- Interpolate
template<class Type>
tmp<Field<Type> > interpolate(const Field<Type>& ctrlField) const;

View file

@ -105,7 +105,7 @@ Foam::BlockAmgSolver<Type>::solve
solverPerf.finalResidual() = gSum(cmptMag(wA))/norm;
solverPerf.nIterations()++;
} while (!stop(solverPerf));
} while (!this->stop(solverPerf));
}
return solverPerf;

View file

@ -93,7 +93,7 @@ Foam::BlockBiCGStabSolver<Type>::solve
// Check convergence, solve if not converged
if (!stop(solverPerf))
if (!this->stop(solverPerf))
{
scalar rho = this->great_;
scalar rhoOld = rho;

View file

@ -92,7 +92,7 @@ typename Foam::BlockSolverPerformance<Type> Foam::BlockCGSolver<Type>::solve
// Check convergence, solve if not converged
if (!stop(solverPerf))
if (!this->stop(solverPerf))
{
scalar rho = this->great_;
scalar rhoOld = rho;

View file

@ -128,7 +128,7 @@ Foam::BlockGMRESSolver<Type>::solve
// Check convergence, solve if not converged
if (!stop(solverPerf))
if (!this->stop(solverPerf))
{
// Create the Hesenberg matrix
scalarSquareMatrix H(nDirs_, 0);

View file

@ -86,7 +86,7 @@ Foam::BlockGaussSeidelSolver<Type>::solve
// Check convergence, solve if not converged
if (!stop(solverPerf))
if (!this->stop(solverPerf))
{
// Iteration loop

View file

@ -41,11 +41,6 @@ engineTopoChangerMesh/deformingEngineMesh/deformingEngineMeshInitialize.C
engineTopoChangerMesh/deformingEngineMesh/deformingEngineMeshMove.C
engineTopoChangerMesh/deformingEngineMesh/addDeformingEngineMeshZones.C
engineTopoChangerMesh/deformingEngineMesh/deformingEngineMesh.C
engineTopoChangerMesh/deformingEngineMesh/deformingEngineMeshInitialize.C
engineTopoChangerMesh/deformingEngineMesh/deformingEngineMeshMove.C
engineTopoChangerMesh/deformingEngineMesh/addDeformingEngineMeshZones.C
engineTopoChangerMesh/engineValveSliding/addEngineValveSlidingMeshModifiers.C
engineTopoChangerMesh/engineValveSliding/engineValveSliding.C
engineTopoChangerMesh/engineValveSliding/engineValveSlidingInitialize.C

View file

@ -140,7 +140,7 @@ gaussGrad<Type>::grad
GeometricField<GradType, fvPatchField, volMesh>& gGrad = tgGrad();
gGrad.rename("grad(" + vsf.name() + ')');
correctBoundaryConditions(vsf, gGrad);
this->correctBoundaryConditions(vsf, gGrad);
return tgGrad;
}

View file

@ -2,7 +2,7 @@
c++WARN = -Wall -Wextra -Wno-unused-parameter -Wold-style-cast -Wnon-virtual-dtor
CC = ccache g++ -m64
CC = g++ -m64
include $(RULES)/c++$(WM_COMPILE_OPTION)