Intermediate: direct comms in parallel GGI with fast cutting
This commit is contained in:
parent
2b24157b77
commit
8d2bae6e31
15 changed files with 692 additions and 234 deletions
|
@ -120,8 +120,8 @@ void GGIInterpolation<MasterPatch, SlavePatch>::maskedBridge
|
|||
// Note: tricky algorithm
|
||||
// In order for a face to be bridged it needs to be both in the
|
||||
// mask and in selection of faces that are bridged (addr).
|
||||
// This implies an n-squared search, but we can use the fact that
|
||||
// both lists are ordered.
|
||||
// This implies an n-squared search, but we can avoid it by
|
||||
// using the fact that both lists are ordered.
|
||||
|
||||
label maskAddrI = 0;
|
||||
|
||||
|
|
|
@ -43,6 +43,50 @@ namespace Foam
|
|||
|
||||
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
label GGIInterpolation<MasterPatch, SlavePatch>::parMasterStart() const
|
||||
{
|
||||
if (globalData())
|
||||
{
|
||||
// Integer division intended
|
||||
return Pstream::myProcNo()*(masterPatch_.size()/Pstream::nProcs() + 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
// No parallel search: do complete patch
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
label GGIInterpolation<MasterPatch, SlavePatch>::parMasterEnd() const
|
||||
{
|
||||
if (globalData())
|
||||
{
|
||||
// Integer division intended
|
||||
return Foam::min
|
||||
(
|
||||
masterPatch_.size(),
|
||||
(Pstream::myProcNo() + 1)*
|
||||
(masterPatch_.size()/Pstream::nProcs() + 1)
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
// No parallel search: do complete patch
|
||||
return masterPatch_.size();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
label GGIInterpolation<MasterPatch, SlavePatch>::parMasterSize() const
|
||||
{
|
||||
return this->parMasterEnd() - this->parMasterStart();
|
||||
}
|
||||
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
void GGIInterpolation<MasterPatch, SlavePatch>::clearOut()
|
||||
{
|
||||
|
@ -67,6 +111,7 @@ GGIInterpolation<MasterPatch, SlavePatch>::GGIInterpolation
|
|||
const tensorField& forwardT,
|
||||
const tensorField& reverseT,
|
||||
const vectorField& forwardSep,
|
||||
const bool globalData,
|
||||
const scalar masterNonOverlapFaceTol,
|
||||
const scalar slaveNonOverlapFaceTol,
|
||||
const bool rescaleGGIWeightingFactors,
|
||||
|
@ -78,6 +123,7 @@ GGIInterpolation<MasterPatch, SlavePatch>::GGIInterpolation
|
|||
forwardT_(forwardT),
|
||||
reverseT_(reverseT),
|
||||
forwardSep_(forwardSep),
|
||||
globalData_(globalData),
|
||||
masterNonOverlapFaceTol_(masterNonOverlapFaceTol),
|
||||
slaveNonOverlapFaceTol_(slaveNonOverlapFaceTol),
|
||||
rescaleGGIWeightingFactors_(rescaleGGIWeightingFactors),
|
||||
|
|
|
@ -38,17 +38,19 @@ Contributor:
|
|||
|
||||
defineTypeNameAndDebug(Foam::GGIInterpolationName, 0);
|
||||
|
||||
|
||||
template<>
|
||||
const char*
|
||||
Foam::NamedEnum<Foam::GGIInterpolationName::quickReject, 4>::names[] =
|
||||
Foam::NamedEnum<Foam::GGIInterpolationName::quickReject, 3>::names[] =
|
||||
{
|
||||
"distance3D",
|
||||
"AABB",
|
||||
"bbOctree",
|
||||
"nSquared"
|
||||
"bbOctree"
|
||||
};
|
||||
|
||||
const Foam::NamedEnum<Foam::GGIInterpolationName::quickReject, 4>
|
||||
|
||||
const Foam::NamedEnum<Foam::GGIInterpolationName::quickReject, 3>
|
||||
Foam::GGIInterpolationName::quickRejectNames_;
|
||||
|
||||
|
||||
// ************************************************************************* //
|
||||
|
|
|
@ -53,31 +53,37 @@ GGIInterpolation<MasterPatch, SlavePatch>::faceBoundBoxExtendSpanFraction_
|
|||
"Add robustness for quick-search algo. Keep it to a few percent."
|
||||
);
|
||||
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
const Foam::debug::optimisationSwitch
|
||||
GGIInterpolation<MasterPatch, SlavePatch>::octreeSearchMinNLevel_
|
||||
(
|
||||
"GGIOctreeSearchMinNLevel",
|
||||
3,
|
||||
"GGI neighbouring facets octree-based search: minNlevel parameter for octree"
|
||||
"GGI neighbouring facets octree-based search: "
|
||||
"minNlevel parameter for octree"
|
||||
);
|
||||
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
const Foam::debug::optimisationSwitch
|
||||
GGIInterpolation<MasterPatch, SlavePatch>::octreeSearchMaxLeafRatio_
|
||||
(
|
||||
"GGIOctreeSearchMaxLeafRatio",
|
||||
3,
|
||||
"GGI neighbouring facets octree-based search: maxLeafRatio parameter for octree"
|
||||
"GGI neighbouring facets octree-based search: "
|
||||
"maxLeafRatio parameter for octree"
|
||||
);
|
||||
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
const Foam::debug::optimisationSwitch
|
||||
GGIInterpolation<MasterPatch, SlavePatch>::octreeSearchMaxShapeRatio_
|
||||
(
|
||||
"GGIOctreeSearchMaxShapeRatio",
|
||||
1,
|
||||
"GGI neighbouring facets octree-based search: maxShapeRatio parameter for octree"
|
||||
"GGI neighbouring facets octree-based search: "
|
||||
"maxShapeRatio parameter for octree"
|
||||
);
|
||||
|
||||
|
||||
|
@ -87,7 +93,7 @@ GGIInterpolation<MasterPatch, SlavePatch>::octreeSearchMaxShapeRatio_
|
|||
// One of the most primitive ways of doing collision detection is to
|
||||
// approximate each object or a part of the object with a sphere, and
|
||||
// then check whether spheres intersect each other. This method is
|
||||
// widely used even today because it’s computationally inexpensive.
|
||||
// widely used even today because it's computationally inexpensive.
|
||||
// We merely check whether the distance between the centers of two
|
||||
// spheres is less than the sum of the two radii (which indicates that
|
||||
// a collision has occurred). Even better, if we calculate whether the
|
||||
|
@ -123,7 +129,8 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighbours3D
|
|||
labelListList& result
|
||||
) const
|
||||
{
|
||||
List<DynamicList<label, 8> > candidateMasterNeighbors(masterPatch_.size());
|
||||
// Allocation to local size. HJ, 27/Apr/2016
|
||||
List<DynamicList<label, 8> > candidateMasterNeighbors(parMasterSize());
|
||||
|
||||
// First, compute the face center and the sphere radius (squared)
|
||||
// of the slave patch faces so we will not have to recompute this
|
||||
|
@ -131,6 +138,8 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighbours3D
|
|||
vectorField slaveFaceCentre(slavePatch_.size());
|
||||
scalarField slaveRadius2(slavePatch_.size());
|
||||
|
||||
// Since the parallelised search is done on the master, all of
|
||||
// slave face data is needed. HJ, 27/Apr/2016
|
||||
forAll (slavePatch_, faceSi)
|
||||
{
|
||||
// Grab points from slave face
|
||||
|
@ -179,10 +188,10 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighbours3D
|
|||
|
||||
boundBox bbSlave(curFacePoints, false);
|
||||
|
||||
scalar tmpValue = Foam::magSqr(bbSlave.max() - bbSlave.min())/4.0;
|
||||
scalar tmpValue = 0.25*Foam::magSqr(bbSlave.max() - bbSlave.min());
|
||||
|
||||
// We compare squared distances, so save the sqrt() if value > 1.0.
|
||||
if (tmpValue < 1.0)
|
||||
// We compare squared distances, so save the sqrt() if value > 1.
|
||||
if (tmpValue < 1)
|
||||
{
|
||||
// Take the sqrt, otherwise, we underestimate the radius
|
||||
slaveRadius2[faceSi] = sqrt(tmpValue);
|
||||
|
@ -194,7 +203,17 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighbours3D
|
|||
}
|
||||
|
||||
// Next, we search for each master face a list of potential neighbors
|
||||
forAll (masterPatch_, faceMi)
|
||||
|
||||
// Parallel search split. HJ, 27/Apr/2016
|
||||
const label pmStart = this->parMasterStart();
|
||||
|
||||
for
|
||||
(
|
||||
label faceMi = pmStart;
|
||||
faceMi < this->parMasterEnd();
|
||||
faceMi++
|
||||
)
|
||||
// forAll (masterPatch_, faceMi)
|
||||
{
|
||||
// For each masterPatch faces, compute the bounding box. With
|
||||
// this, we compute the radius of the bounding sphere for this
|
||||
|
@ -238,18 +257,24 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighbours3D
|
|||
|
||||
if (distFaceCenters < (masterRadius2 + slaveRadius2[faceSi]))
|
||||
{
|
||||
candidateMasterNeighbors[faceMi].append(faceSi);
|
||||
candidateMasterNeighbors[faceMi - pmStart].append(faceSi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Repack the list
|
||||
result.setSize(masterPatch_.size());
|
||||
// Repack the list. Local size
|
||||
result.setSize(parMasterSize());
|
||||
|
||||
// Parallel search split: local size. HJ, 27/Apr/2016
|
||||
forAll (result, i)
|
||||
{
|
||||
result[i].transfer(candidateMasterNeighbors[i].shrink());
|
||||
}
|
||||
|
||||
// Note: since the neighbours are used to perform a search, there is
|
||||
// no need to do a global reduce of the candidates. The rest of the list
|
||||
// (searched on other processors) will remain unused.
|
||||
// HJ, 27/Apr/2016
|
||||
}
|
||||
|
||||
|
||||
|
@ -276,20 +301,28 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighboursAABB
|
|||
labelListList& result
|
||||
) const
|
||||
{
|
||||
List<DynamicList<label, 8> > candidateMasterNeighbors(masterPatch_.size());
|
||||
// Allocation to local size. HJ, 27/Apr/2016
|
||||
List<DynamicList<label, 8> > candidateMasterNeighbors(parMasterSize());
|
||||
|
||||
// Grab the master patch faces bounding boxes
|
||||
List<boundBox> masterPatchBB(masterPatch_.size());
|
||||
// Allocation to local size. HJ, 27/Apr/2016
|
||||
List<boundBox> masterPatchBB(parMasterSize());
|
||||
|
||||
forAll (masterPatch_, faceMi)
|
||||
// Parallel search split. HJ, 27/Apr/2016
|
||||
const label pmStart = this->parMasterStart();
|
||||
|
||||
for
|
||||
(
|
||||
label faceMi = this->parMasterStart();
|
||||
faceMi < this->parMasterEnd();
|
||||
faceMi++
|
||||
)
|
||||
// forAll (masterPatch_, faceMi)
|
||||
{
|
||||
boundBox bbMaster
|
||||
masterPatchBB[faceMi - pmStart] = boundBox
|
||||
(
|
||||
masterPatch_[faceMi].points(masterPatch_.points()),
|
||||
false
|
||||
);
|
||||
|
||||
masterPatchBB[faceMi] = bbMaster;
|
||||
}
|
||||
|
||||
// Grab the slave patch faces bounding boxes, plus compute its
|
||||
|
@ -388,15 +421,22 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighboursAABB
|
|||
// then, compute the intersection
|
||||
const vectorField& masterFaceNormals = masterPatch_.faceNormals();
|
||||
|
||||
forAll (masterPatchBB, faceMi)
|
||||
// Parallel search split. HJ, 27/Apr/2016
|
||||
for
|
||||
(
|
||||
label faceMi = this->parMasterStart();
|
||||
faceMi < this->parMasterEnd();
|
||||
faceMi++
|
||||
)
|
||||
// forAll (masterPatch_, faceMi)
|
||||
{
|
||||
forAll (slavePatchBB, faceSi)
|
||||
{
|
||||
// Compute the augmented AABB
|
||||
boundBox augmentedBBMaster
|
||||
(
|
||||
masterPatchBB[faceMi].min() - deltaBBSlave[faceSi],
|
||||
masterPatchBB[faceMi].max() + deltaBBSlave[faceSi]
|
||||
masterPatchBB[faceMi - pmStart].min() - deltaBBSlave[faceSi],
|
||||
masterPatchBB[faceMi - pmStart].max() + deltaBBSlave[faceSi]
|
||||
);
|
||||
|
||||
if (augmentedBBMaster.overlaps(slavePatchBB[faceSi]))
|
||||
|
@ -408,21 +448,28 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighboursAABB
|
|||
|
||||
if (mag(featureCos) > featureCosTol_)
|
||||
{
|
||||
candidateMasterNeighbors[faceMi].append(faceSi);
|
||||
candidateMasterNeighbors[faceMi - pmStart].append(faceSi);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Repack the list
|
||||
result.setSize(masterPatch_.size());
|
||||
// Repack the list. Local size
|
||||
result.setSize(parMasterSize());
|
||||
|
||||
// Parallel search split: local size. HJ, 27/Apr/2016
|
||||
forAll (result, i)
|
||||
{
|
||||
result[i].transfer(candidateMasterNeighbors[i].shrink());
|
||||
}
|
||||
|
||||
// Note: since the neighbours are used to perform a search, there is
|
||||
// no need to do a global reduce of the candidates. The rest of the list
|
||||
// (searched on other processors) will remain unused.
|
||||
// HJ, 27/Apr/2016
|
||||
}
|
||||
|
||||
|
||||
// This algorithm find the faces in proximity of another face based
|
||||
// on the face BB (Bounding Box) and an octree of bounding boxes.
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
|
@ -431,12 +478,22 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighboursBBOctree
|
|||
labelListList& result
|
||||
) const
|
||||
{
|
||||
List<DynamicList<label, 8> > candidateMasterNeighbors(masterPatch_.size());
|
||||
// Allocation to local size. HJ, 27/Apr/2016
|
||||
List<DynamicList<label, 8> > candidateMasterNeighbors(parMasterSize());
|
||||
|
||||
// Initialize the list of master patch faces bounding box
|
||||
treeBoundBoxList lmasterFaceBB(masterPatch_.size());
|
||||
// Allocation to local size. HJ, 27/Apr/2016
|
||||
treeBoundBoxList lmasterFaceBB(parMasterSize());
|
||||
|
||||
forAll (masterPatch_, faceMi)
|
||||
// Parallel search split. HJ, 27/Apr/2016
|
||||
const label pmStart = this->parMasterStart();
|
||||
|
||||
for
|
||||
(
|
||||
label faceMi = this->parMasterStart();
|
||||
faceMi < this->parMasterEnd();
|
||||
faceMi++
|
||||
)
|
||||
// forAll (masterPatch_, faceMi)
|
||||
{
|
||||
pointField facePoints
|
||||
(
|
||||
|
@ -448,7 +505,7 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighboursBBOctree
|
|||
// (1% by default)
|
||||
treeBoundBox bbFaceMaster(facePoints);
|
||||
|
||||
lmasterFaceBB[faceMi] =
|
||||
lmasterFaceBB[faceMi - pmStart] =
|
||||
bbFaceMaster.extend(faceBoundBoxExtendSpanFraction_());
|
||||
}
|
||||
|
||||
|
@ -532,11 +589,18 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighboursBBOctree
|
|||
// Visit each master patch face BB and find the potential neighbours
|
||||
// using the slave patch octree
|
||||
|
||||
forAll (lmasterFaceBB, faceMi)
|
||||
// Parallel search split. HJ, 27/Apr/2016
|
||||
for
|
||||
(
|
||||
label faceMi = this->parMasterStart();
|
||||
faceMi < this->parMasterEnd();
|
||||
faceMi++
|
||||
)
|
||||
// forAll (masterPatch_, faceMi)
|
||||
{
|
||||
// List of candidate neighbours
|
||||
labelList overlappedFaces =
|
||||
slavePatchOctree.findBox(lmasterFaceBB[faceMi]);
|
||||
slavePatchOctree.findBox(lmasterFaceBB[faceMi - pmStart]);
|
||||
|
||||
forAll (overlappedFaces, ovFi)
|
||||
{
|
||||
|
@ -549,18 +613,24 @@ void GGIInterpolation<MasterPatch, SlavePatch>::findNeighboursBBOctree
|
|||
|
||||
if (mag(featureCos) > featureCosTol_)
|
||||
{
|
||||
candidateMasterNeighbors[faceMi].append(faceSi);
|
||||
candidateMasterNeighbors[faceMi - pmStart].append(faceSi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Repack the list
|
||||
result.setSize(masterPatch_.size());
|
||||
// Repack the list. Local size
|
||||
result.setSize(parMasterSize());
|
||||
|
||||
// Parallel search split: local size. HJ, 27/Apr/2016
|
||||
forAll (result, i)
|
||||
{
|
||||
result[i].transfer(candidateMasterNeighbors[i].shrink());
|
||||
}
|
||||
|
||||
// Note: since the neighbours are used to perform a search, there is
|
||||
// no need to do a global reduce of the candidates. The rest of the list
|
||||
// (searched on other processors) will remain unused.
|
||||
// HJ, 27/Apr/2016
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -28,6 +28,21 @@ Description
|
|||
Mass-conservative face interpolation of face data between two
|
||||
primitivePatches
|
||||
|
||||
Note on parallelisation
|
||||
GGI search and cutting is an expensive operation, typically used in
|
||||
patch-to-patch interpolation. The current paradigm keeps primitive patch
|
||||
data as a global zone on all processors for ease of manipulation
|
||||
and to avoid global numbering.
|
||||
|
||||
GGIInterpolation uses a globalData flag to indicate that identical
|
||||
patch data is available everywhere. In such cases, ALL processors
|
||||
(not just the ones which hold a piece of the active GGI surface)
|
||||
can be used for the cutting, with the resulting addressing and weights
|
||||
data assembled by global communication at the end.
|
||||
|
||||
If the GGI patch data is not identical on all processors, set
|
||||
globalData to false. HJ, 27/Apr/2016
|
||||
|
||||
Author
|
||||
Hrvoje Jasak, Wikki Ltd. All rights reserved
|
||||
|
||||
|
@ -86,8 +101,7 @@ public:
|
|||
{
|
||||
THREE_D_DISTANCE,
|
||||
AABB,
|
||||
BB_OCTREE,
|
||||
N_SQUARED
|
||||
BB_OCTREE
|
||||
};
|
||||
|
||||
|
||||
|
@ -96,7 +110,7 @@ public:
|
|||
ClassName("GGIInterpolation");
|
||||
|
||||
//- Quick reject names
|
||||
static const NamedEnum<quickReject, 4> quickRejectNames_;
|
||||
static const NamedEnum<quickReject, 3> quickRejectNames_;
|
||||
|
||||
|
||||
// Constructors
|
||||
|
@ -148,6 +162,10 @@ class GGIInterpolation
|
|||
// indicates no translation. MB, 28/Jan/2009
|
||||
vectorField forwardSep_;
|
||||
|
||||
//- Global data: are master and slave present on all processors?
|
||||
// If true, cutting will be performed in parallel
|
||||
bool globalData_;
|
||||
|
||||
//- Master non-overlap face tolerance factor
|
||||
const scalar masterNonOverlapFaceTol_;
|
||||
|
||||
|
@ -238,6 +256,27 @@ class GGIInterpolation
|
|||
void operator=(const GGIInterpolation&);
|
||||
|
||||
|
||||
// Helper functions for parallel search
|
||||
|
||||
//- Is parallel cutting active?
|
||||
bool globalData() const
|
||||
{
|
||||
return
|
||||
Pstream::parRun()
|
||||
&& globalData_
|
||||
&& masterPatch_.size() > 10*Pstream::nProcs();
|
||||
}
|
||||
|
||||
//- Parallel master slice start
|
||||
label parMasterStart() const;
|
||||
|
||||
//- Parallel master slice end
|
||||
label parMasterEnd() const;
|
||||
|
||||
//- Parallel master slice size
|
||||
label parMasterSize() const;
|
||||
|
||||
|
||||
// Helper functions for demand-driven data
|
||||
|
||||
//- Evaluate faces neighborhood based of sphere defined by faces BB
|
||||
|
@ -423,16 +462,16 @@ public:
|
|||
const tensorField& forwardT,
|
||||
const tensorField& reverseT,
|
||||
const vectorField& forwardSep,
|
||||
const bool globalData,
|
||||
const scalar masterFaceNonOverlapFaceTol = 0,
|
||||
const scalar slaveFaceNonOverlapFaceTol = 0,
|
||||
const bool rescaleGGIWeightingFactors = true,
|
||||
const quickReject reject = AABB
|
||||
const quickReject reject = BB_OCTREE
|
||||
);
|
||||
|
||||
|
||||
// Destructor
|
||||
|
||||
~GGIInterpolation();
|
||||
~GGIInterpolation();
|
||||
|
||||
|
||||
// Member Functions
|
||||
|
|
|
@ -52,9 +52,11 @@ GGIInterpolation<MasterPatch, SlavePatch>::areaErrorTol_
|
|||
(
|
||||
"GGIAreaErrorTol",
|
||||
1.0e-8,
|
||||
"Minimum GGI face to face intersection area. The smallest accepted GGI weighting factor."
|
||||
"Minimum GGI face to face intersection area. "
|
||||
"The smallest accepted GGI weighting factor."
|
||||
);
|
||||
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
const Foam::debug::tolerancesSwitch
|
||||
GGIInterpolation<MasterPatch, SlavePatch>::featureCosTol_
|
||||
|
@ -64,6 +66,7 @@ GGIInterpolation<MasterPatch, SlavePatch>::featureCosTol_
|
|||
"Minimum cosine value between 2 GGI patch neighbouring facet normals."
|
||||
);
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
|
||||
|
||||
template<class MasterPatch, class SlavePatch>
|
||||
|
@ -98,14 +101,6 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
|
||||
// Create the dynamic lists to hold the addressing
|
||||
|
||||
// The final master/slave list, after filtering out the "false" neighbours
|
||||
List<DynamicList<label, 8> > masterNeighbors(masterPatch_.size());
|
||||
List<DynamicList<label, 8> > slaveNeighbors(slavePatch_.size());
|
||||
|
||||
// The weights
|
||||
List<DynamicList<scalar, 8> > masterNeighborsWeights(masterPatch_.size());
|
||||
List<DynamicList<scalar, 8> > slaveNeighborsWeights(slavePatch_.size());
|
||||
|
||||
// First, find a rough estimate of each slave and master facet
|
||||
// neighborhood by filtering out all the faces located outside of
|
||||
// an Axis-Aligned Bounding Box (AABB). Warning: This algorithm
|
||||
|
@ -120,7 +115,9 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
// 1) Axis-aligned bounding box
|
||||
// 2) Octree search with bounding box
|
||||
// 3) 3-D vector distance
|
||||
// 4) n-Squared search
|
||||
|
||||
|
||||
// Note: Allocated to local size for parallel search. HJ, 27/Apr/2016
|
||||
labelListList candidateMasterNeighbors;
|
||||
|
||||
if (reject_ == AABB)
|
||||
|
@ -135,21 +132,14 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
{
|
||||
findNeighbours3D(candidateMasterNeighbors);
|
||||
}
|
||||
else if (reject_ == N_SQUARED)
|
||||
else
|
||||
{
|
||||
candidateMasterNeighbors.setSize(masterPatch_.size());
|
||||
|
||||
// Mark N-squared search
|
||||
labelList nSquaredList(slavePatch_.size());
|
||||
forAll (nSquaredList, i)
|
||||
{
|
||||
nSquaredList[i] = i;
|
||||
}
|
||||
|
||||
forAll (candidateMasterNeighbors, j)
|
||||
{
|
||||
candidateMasterNeighbors[j] = nSquaredList;
|
||||
}
|
||||
FatalErrorIn
|
||||
(
|
||||
"void GGIInterpolation<MasterPatch, SlavePatch>::"
|
||||
"calcAddressing() const"
|
||||
) << "Unknown search"
|
||||
<< abort(FatalError);
|
||||
}
|
||||
|
||||
// Next, we move to the 2D world. We project each slave and
|
||||
|
@ -167,12 +157,39 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
// Store the polygon made by projecting the face points onto the
|
||||
// face normal
|
||||
// The master faces polygons
|
||||
List<pointField> masterFace2DPolygon(masterPatch_.size());
|
||||
List<pointField> masterFace2DPolygon(masterPatch_.size());
|
||||
|
||||
// Tolerance factor for the Separation of Axes Theorem == distErrorTol_
|
||||
|
||||
forAll (masterPatch_, faceMi)
|
||||
// The final master/slave list, after filtering out the "false" neighbours
|
||||
// Note: rescale only the local ones. HJ, 27/Apr/2016
|
||||
// Note: slave neighbours and weights delayed until master cutting
|
||||
// is complete. HJ, 27/Apr/2016
|
||||
List<DynamicList<label> > masterNeighbors(masterPatch_.size());
|
||||
List<DynamicList<scalar> > masterNeighborsWeights(masterPatch_.size());
|
||||
|
||||
// Store slave negighbour weights at the same time, but under
|
||||
// addressing. The list will be redistributed to slaves in a separate
|
||||
// loop. HJ, 27/Apr/2016
|
||||
List<DynamicList<scalar> > slaveOnMasterNeighborsWeights
|
||||
(
|
||||
masterPatch_.size()
|
||||
);
|
||||
|
||||
// Parallel search split. HJ, 27/Apr/2016
|
||||
const label pmStart = this->parMasterStart();
|
||||
|
||||
for
|
||||
(
|
||||
label faceMi = this->parMasterStart();
|
||||
faceMi < this->parMasterEnd();
|
||||
faceMi++
|
||||
)
|
||||
// forAll (masterPatch_, faceMi)
|
||||
{
|
||||
// Set capacity
|
||||
masterNeighbors[faceMi].setCapacity(8);
|
||||
|
||||
// First, we make sure that all the master faces points are
|
||||
// recomputed onto the 2D plane defined by the master faces
|
||||
// normals.
|
||||
|
@ -211,16 +228,16 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
// normalized.
|
||||
//
|
||||
//
|
||||
// u = vector from face center to most distant projected master face point.
|
||||
// / .
|
||||
// ^y / | . .w = normal to master face
|
||||
// | / | . .
|
||||
// | / | . .
|
||||
// | | | .
|
||||
// | | / .
|
||||
// | | / .
|
||||
// | | / .
|
||||
// ---------> x |/ .
|
||||
// u = vector from face center to most distant projected master face point.
|
||||
// / .
|
||||
// ^y / | . .w = normal to master face
|
||||
// | / | . .
|
||||
// | / | . .
|
||||
// | | | .
|
||||
// | | / .
|
||||
// | | / .
|
||||
// | | / .
|
||||
// ---------> x |/ .
|
||||
// / v = w^u
|
||||
// /
|
||||
// /
|
||||
|
@ -254,9 +271,9 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
// We need this for computing the weighting factors
|
||||
scalar surfaceAreaMasterPointsInUV = area2D(masterPointsInUV);
|
||||
|
||||
// Check if polygon is CW.. Should not, it should be CCW; but
|
||||
// Check if polygon is CW. Should not, it should be CCW; but
|
||||
// better and cheaper to check here
|
||||
if (surfaceAreaMasterPointsInUV < 0.0)
|
||||
if (surfaceAreaMasterPointsInUV < 0)
|
||||
{
|
||||
reverse(masterPointsInUV);
|
||||
surfaceAreaMasterPointsInUV = -surfaceAreaMasterPointsInUV;
|
||||
|
@ -272,7 +289,8 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
|
||||
// Next, project the candidate master neighbours faces points
|
||||
// onto the same plane using the new orthonormal basis
|
||||
const labelList& curCMN = candidateMasterNeighbors[faceMi];
|
||||
// Note: Allocated to local size for parallel search. HJ, 27/Apr/2016
|
||||
const labelList& curCMN = candidateMasterNeighbors[faceMi - pmStart];
|
||||
|
||||
forAll (curCMN, neighbI)
|
||||
{
|
||||
|
@ -400,18 +418,25 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
// faces projection as well. That way, we make
|
||||
// sure all our factors will sum up to 1.0.
|
||||
|
||||
// Add slave to master
|
||||
masterNeighbors[faceMaster].append(faceSlave);
|
||||
slaveNeighbors[faceSlave].append(faceMaster);
|
||||
|
||||
// Add master weight to master
|
||||
masterNeighborsWeights[faceMaster].append
|
||||
(
|
||||
intersectionArea/surfaceAreaMasterPointsInUV
|
||||
);
|
||||
|
||||
slaveNeighborsWeights[faceSlave].append
|
||||
// Record slave weight on master to avoid recalculation
|
||||
// of projected areas. HJ, 27/Apr/2016
|
||||
slaveOnMasterNeighborsWeights[faceMaster].append
|
||||
(
|
||||
intersectionArea/surfaceAreaNeighbPointsInUV
|
||||
);
|
||||
|
||||
// Note: Slave side will be reconstructed after the
|
||||
// parallel cutting and reduce operations.
|
||||
// HJ, 27/Apr/2016
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -438,35 +463,112 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
masterAddrPtr_ = new labelListList(masterPatch_.size());
|
||||
labelListList& ma = *masterAddrPtr_;
|
||||
|
||||
// Parallel search split. HJ, 27/Apr/2016
|
||||
for (label mfI = this->parMasterStart(); mfI < this->parMasterEnd(); mfI++)
|
||||
// forAll (ma, mfI)
|
||||
{
|
||||
ma[mfI].transfer(masterNeighbors[mfI].shrink());
|
||||
}
|
||||
|
||||
// Parallel communication: reduce master addressing
|
||||
if (globalData())
|
||||
{
|
||||
Pstream::combineGather(ma, Pstream::listEq());
|
||||
Pstream::combineScatter(ma);
|
||||
}
|
||||
|
||||
masterWeightsPtr_ = new scalarListList(masterPatch_.size());
|
||||
scalarListList& maW = *masterWeightsPtr_;
|
||||
|
||||
forAll (ma, mfI)
|
||||
// Parallel search split. HJ, 27/Apr/2016
|
||||
for (label mfI = this->parMasterStart(); mfI < this->parMasterEnd(); mfI++)
|
||||
// forAll (maW, mfI)
|
||||
{
|
||||
ma[mfI].transfer(masterNeighbors[mfI].shrink());
|
||||
maW[mfI].transfer(masterNeighborsWeights[mfI].shrink());
|
||||
}
|
||||
|
||||
// Parallel communication: reduce master weights
|
||||
if (globalData())
|
||||
{
|
||||
Pstream::combineGather(maW, Pstream::listEq());
|
||||
Pstream::combineScatter(maW);
|
||||
}
|
||||
|
||||
// Reduce slave on master weights
|
||||
scalarListList smaW(masterPatch_.size());
|
||||
|
||||
for (label mfI = this->parMasterStart(); mfI < this->parMasterEnd(); mfI++)
|
||||
// forAll (smW, mfI)
|
||||
{
|
||||
smaW[mfI].transfer(slaveOnMasterNeighborsWeights[mfI].shrink());
|
||||
}
|
||||
|
||||
// Parallel communication: reduce master weights
|
||||
if (globalData())
|
||||
{
|
||||
Pstream::combineGather(smaW, Pstream::listEq());
|
||||
Pstream::combineScatter(smaW);
|
||||
}
|
||||
// Slave neighbours and weights
|
||||
List<DynamicList<label, 8> > slaveNeighbors(slavePatch_.size());
|
||||
List<DynamicList<scalar, 8> > slaveNeighborsWeights(slavePatch_.size());
|
||||
|
||||
// Note: slave side is not parallelised: book-keeping only
|
||||
// HJ, 27/Apr/2016
|
||||
|
||||
// Loop through the complete patch and distribute slave neighbours
|
||||
// and weights based onthe known intersection area from the master.
|
||||
// The code has been reorgnised to allow masters cutting to be
|
||||
// performed in parallel and collect the slave side once the parallel
|
||||
// reduction is complete. HJ, 27/Apr/2016
|
||||
|
||||
// Note: loop through the complete slave side
|
||||
forAll (ma, mfI)
|
||||
{
|
||||
// Gte master neighbours and weights
|
||||
const labelList& curMa = ma[mfI];
|
||||
const scalarList& cursmaW = smaW[mfI];
|
||||
|
||||
// Current master face indes
|
||||
const label faceMaster = mfI;
|
||||
|
||||
forAll (curMa, mAI)
|
||||
{
|
||||
const label faceSlave = curMa[mAI];
|
||||
// Add this master as a neighbour to its slave
|
||||
slaveNeighbors[faceSlave].append(faceMaster);
|
||||
|
||||
slaveNeighborsWeights[faceSlave].append(cursmaW[mAI]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
slaveAddrPtr_ = new labelListList(slavePatch_.size());
|
||||
labelListList& sa = *slaveAddrPtr_;
|
||||
|
||||
forAll (sa, sfI)
|
||||
{
|
||||
sa[sfI].transfer(slaveNeighbors[sfI].shrink());
|
||||
}
|
||||
|
||||
slaveWeightsPtr_ = new scalarListList(slavePatch_.size());
|
||||
scalarListList& saW = *slaveWeightsPtr_;
|
||||
|
||||
forAll (sa, sfI)
|
||||
{
|
||||
sa[sfI].transfer(slaveNeighbors[sfI].shrink());
|
||||
saW[sfI].transfer(slaveNeighborsWeights[sfI].shrink());
|
||||
}
|
||||
|
||||
// Now that the neighbourhood is known, let's go hunting for
|
||||
// non-overlapping faces
|
||||
|
||||
uncoveredMasterAddrPtr_ =
|
||||
new labelList
|
||||
(
|
||||
findNonOverlappingFaces(maW, masterNonOverlapFaceTol_)
|
||||
);
|
||||
|
||||
// Not parallelised. HJ, 27/Apr/2016
|
||||
uncoveredSlaveAddrPtr_ =
|
||||
new labelList
|
||||
(
|
||||
|
@ -480,6 +582,7 @@ void GGIInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
// then we need the brute values, so no rescaling in that
|
||||
// case. Hence the little flag rescaleGGIWeightingFactors_
|
||||
|
||||
// Not parallelised. HJ, 27/Apr/2016
|
||||
if (rescaleGGIWeightingFactors_)
|
||||
{
|
||||
rescaleWeightingFactors();
|
||||
|
@ -638,6 +741,7 @@ GGIInterpolation<MasterPatch, SlavePatch>::findNonOverlappingFaces
|
|||
return tpatchFaceNonOverlapAddr;
|
||||
}
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
} // End namespace Foam
|
||||
|
|
|
@ -133,8 +133,9 @@ void MixingPlaneInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
noTransform,
|
||||
noTransform,
|
||||
noTranslation,
|
||||
0,
|
||||
0,
|
||||
true, // Patch data is complete on all processors
|
||||
SMALL,
|
||||
SMALL,
|
||||
true // Scale GGI weights
|
||||
);
|
||||
|
||||
|
@ -146,8 +147,9 @@ void MixingPlaneInterpolation<MasterPatch, SlavePatch>::calcAddressing() const
|
|||
noTransform,
|
||||
noTransform,
|
||||
noTranslation,
|
||||
0,
|
||||
0,
|
||||
true, // Patch data is complete on all processors
|
||||
SMALL,
|
||||
SMALL,
|
||||
true // Scale GGI weights
|
||||
);
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
|
|||
slave++
|
||||
)
|
||||
{
|
||||
IPstream fromSlave(Pstream::blocking, slave);
|
||||
IPstream fromSlave(Pstream::scheduled, slave);
|
||||
List<labelPair> nbrData(fromSlave);
|
||||
|
||||
forAll(nbrData, i)
|
||||
|
@ -95,18 +95,18 @@ Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
|
|||
slave++
|
||||
)
|
||||
{
|
||||
OPstream toSlave(Pstream::blocking, slave);
|
||||
OPstream toSlave(Pstream::scheduled, slave);
|
||||
toSlave << allComms;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
{
|
||||
OPstream toMaster(Pstream::blocking, Pstream::masterNo());
|
||||
OPstream toMaster(Pstream::scheduled, Pstream::masterNo());
|
||||
toMaster << allComms;
|
||||
}
|
||||
{
|
||||
IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
|
||||
IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
|
||||
fromMaster >> allComms;
|
||||
}
|
||||
}
|
||||
|
@ -123,29 +123,28 @@ Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
|
|||
);
|
||||
|
||||
// Processors involved in my schedule
|
||||
return UIndirectList<labelPair>(allComms, mySchedule);
|
||||
return List<labelPair>(UIndirectList<labelPair>(allComms, mySchedule));
|
||||
|
||||
// if (debug)
|
||||
// {
|
||||
// Pout<< "I need to:" << endl;
|
||||
// const List<labelPair>& comms = schedule();
|
||||
// forAll(comms, i)
|
||||
// {
|
||||
// const labelPair& twoProcs = comms[i];
|
||||
// label sendProc = twoProcs[0];
|
||||
// label recvProc = twoProcs[1];
|
||||
|
||||
//if (debug)
|
||||
//{
|
||||
// Pout<< "I need to:" << endl;
|
||||
// const List<labelPair>& comms = schedule();
|
||||
// forAll(comms, i)
|
||||
// {
|
||||
// const labelPair& twoProcs = comms[i];
|
||||
// label sendProc = twoProcs[0];
|
||||
// label recvProc = twoProcs[1];
|
||||
//
|
||||
// if (recvProc == Pstream::myProcNo())
|
||||
// {
|
||||
// Pout<< " receive from " << sendProc << endl;
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// Pout<< " send to " << recvProc << endl;
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
// if (recvProc == Pstream::myProcNo())
|
||||
// {
|
||||
// Pout<< " receive from " << sendProc << endl;
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// Pout<< " send to " << recvProc << endl;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
|
@ -165,6 +164,32 @@ const Foam::List<Foam::labelPair>& Foam::mapDistribute::schedule() const
|
|||
}
|
||||
|
||||
|
||||
void Foam::mapDistribute::checkReceivedSize
|
||||
(
|
||||
const label procI,
|
||||
const label expectedSize,
|
||||
const label receivedSize
|
||||
)
|
||||
{
|
||||
if (receivedSize != expectedSize)
|
||||
{
|
||||
FatalErrorIn
|
||||
(
|
||||
"template<class T>\n"
|
||||
"void mapDistribute::checkReceivedSize\n"
|
||||
"(\n"
|
||||
" const label procI,\n"
|
||||
" const label expectedSize,\n"
|
||||
" const label receivedSize\n"
|
||||
")\n"
|
||||
) << "Expected from processor " << procI
|
||||
<< " " << expectedSize << " but received "
|
||||
<< receivedSize << " elements."
|
||||
<< abort(FatalError);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
|
||||
|
||||
//- Construct from components
|
||||
|
|
|
@ -79,6 +79,16 @@ class mapDistribute
|
|||
mutable autoPtr<List<labelPair> > schedulePtr_;
|
||||
|
||||
|
||||
// Private Member Functions
|
||||
|
||||
static void checkReceivedSize
|
||||
(
|
||||
const label procI,
|
||||
const label expectedSize,
|
||||
const label receivedSize
|
||||
);
|
||||
|
||||
|
||||
public:
|
||||
|
||||
// Constructors
|
||||
|
@ -165,7 +175,7 @@ public:
|
|||
|
||||
// Other
|
||||
|
||||
//- Compact maps. Gets per field a bool whether it is used (locally)
|
||||
//- Compact maps. Gets per field a bool whether it is used locally
|
||||
// and works out itself what this side and sender side can remove
|
||||
// from maps.
|
||||
void compact(const boolList& elemIsUsed);
|
||||
|
@ -257,7 +267,6 @@ public:
|
|||
// Member Operators
|
||||
|
||||
void operator=(const mapDistribute&);
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -85,25 +85,7 @@ void Foam::mapDistribute::distribute
|
|||
IPstream fromNbr(Pstream::blocking, domain);
|
||||
List<T> subField(fromNbr);
|
||||
|
||||
if (subField.size() != map.size())
|
||||
{
|
||||
FatalErrorIn
|
||||
(
|
||||
"template<class T>\n"
|
||||
"void mapDistribute::distribute\n"
|
||||
"(\n"
|
||||
" const Pstream::commsTypes commsType,\n"
|
||||
" const List<labelPair>& schedule,\n"
|
||||
" const label constructSize,\n"
|
||||
" const labelListList& subMap,\n"
|
||||
" const labelListList& constructMap,\n"
|
||||
" List<T>& field\n"
|
||||
")\n"
|
||||
) << "Expected from processor " << domain
|
||||
<< " " << map.size() << " but received "
|
||||
<< subField.size() << " elements."
|
||||
<< abort(FatalError);
|
||||
}
|
||||
checkReceivedSize(domain, map.size(), subField.size());
|
||||
|
||||
forAll(map, i)
|
||||
{
|
||||
|
@ -151,25 +133,7 @@ void Foam::mapDistribute::distribute
|
|||
|
||||
const labelList& map = constructMap[sendProc];
|
||||
|
||||
if (subField.size() != map.size())
|
||||
{
|
||||
FatalErrorIn
|
||||
(
|
||||
"template<class T>\n"
|
||||
"void mapDistribute::distribute\n"
|
||||
"(\n"
|
||||
" const Pstream::commsTypes commsType,\n"
|
||||
" const List<labelPair>& schedule,\n"
|
||||
" const label constructSize,\n"
|
||||
" const labelListList& subMap,\n"
|
||||
" const labelListList& constructMap,\n"
|
||||
" List<T>& field\n"
|
||||
")\n"
|
||||
) << "Expected from processor " << sendProc
|
||||
<< " " << map.size() << " but received "
|
||||
<< subField.size() << " elements."
|
||||
<< abort(FatalError);
|
||||
}
|
||||
checkReceivedSize(recvProc, map.size(), subField.size());
|
||||
|
||||
forAll(map, i)
|
||||
{
|
||||
|
@ -292,25 +256,12 @@ void Foam::mapDistribute::distribute
|
|||
|
||||
if (domain != Pstream::myProcNo() && map.size())
|
||||
{
|
||||
if (recvFields[domain].size() != map.size())
|
||||
{
|
||||
FatalErrorIn
|
||||
(
|
||||
"template<class T>\n"
|
||||
"void mapDistribute::distribute\n"
|
||||
"(\n"
|
||||
" const Pstream::commsTypes commsType,\n"
|
||||
" const List<labelPair>& schedule,\n"
|
||||
" const label constructSize,\n"
|
||||
" const labelListList& subMap,\n"
|
||||
" const labelListList& constructMap,\n"
|
||||
" List<T>& field\n"
|
||||
")\n"
|
||||
) << "Expected from processor " << domain
|
||||
<< " " << map.size() << " but received "
|
||||
<< recvFields[domain].size() << " elements."
|
||||
<< abort(FatalError);
|
||||
}
|
||||
checkReceivedSize
|
||||
(
|
||||
domain,
|
||||
map.size(),
|
||||
recvFields[domain].size()
|
||||
);
|
||||
|
||||
forAll(map, i)
|
||||
{
|
||||
|
@ -625,8 +576,9 @@ void Foam::mapDistribute::distribute
|
|||
}
|
||||
else
|
||||
{
|
||||
// This needs to be cleaned up: temporary solution. HJ, 15/Jun/2014
|
||||
FatalErrorIn("mapDistribute::distribute(..)")
|
||||
<< "Unknown communication schedule " << commsType
|
||||
<< "Unknown communication schedule " << label(commsType)
|
||||
<< abort(FatalError);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -204,10 +204,11 @@ void Foam::ggiPolyPatch::calcPatchToPatch() const
|
|||
forwardT(),
|
||||
reverseT(),
|
||||
-separation(), // Slave-to-master separation: Use - localValue
|
||||
true, // Patch data is complete on all processors
|
||||
// Bug fix, delayed slave evaluation causes error
|
||||
// HJ, 30/Jun/2013
|
||||
0, // Non-overlapping face tolerances
|
||||
0, // HJ, 24/Oct/2008
|
||||
SMALL, // Non-overlapping face tolerances
|
||||
SMALL, // HJ, 24/Oct/2008
|
||||
true, // Rescale weighting factors. Bug fix, MB.
|
||||
reject_ // Quick rejection algorithm, default BB_OCTREE
|
||||
);
|
||||
|
@ -258,7 +259,7 @@ void Foam::ggiPolyPatch::calcReconFaceCellCentres() const
|
|||
if (debug)
|
||||
{
|
||||
InfoIn("void ggiPolyPatch::calcReconFaceCellCentres() const")
|
||||
<< "Calculating recon centres for patch"
|
||||
<< "Calculating recon centres for patch "
|
||||
<< name() << endl;
|
||||
}
|
||||
|
||||
|
@ -355,12 +356,12 @@ void Foam::ggiPolyPatch::calcLocalParallel() const
|
|||
void Foam::ggiPolyPatch::calcSendReceive() const
|
||||
{
|
||||
// Note: all processors will execute calcSendReceive but only master will
|
||||
// hold the information. Therefore, pointers on slave processors
|
||||
// hold complete the information. Therefore, pointers on slave processors
|
||||
// will remain meaningless, but for purposes of consistency
|
||||
// (of the calc-call) they will be set to zero-sized array
|
||||
// HJ, 4/Jun/2011
|
||||
|
||||
if (receiveAddrPtr_ || sendAddrPtr_)
|
||||
if (receiveAddrPtr_ || sendAddrPtr_ || mapPtr_)
|
||||
{
|
||||
FatalErrorIn("void ggiPolyPatch::calcSendReceive() const")
|
||||
<< "Send-receive addressing already calculated"
|
||||
|
@ -381,52 +382,151 @@ void Foam::ggiPolyPatch::calcSendReceive() const
|
|||
<< abort(FatalError);
|
||||
}
|
||||
|
||||
// Master will receive and store the maps
|
||||
if (Pstream::master())
|
||||
// Gather send and receive addressing (to master)
|
||||
|
||||
// Get patch-to-zone addressing
|
||||
const labelList& za = zoneAddressing();
|
||||
|
||||
// Get remote patch-to-zone addressing
|
||||
const labelList& rza = remoteZoneAddressing();
|
||||
|
||||
receiveAddrPtr_ = new labelListList(Pstream::nProcs());
|
||||
labelListList& rAddr = *receiveAddrPtr_;
|
||||
|
||||
sendAddrPtr_ = new labelListList(Pstream::nProcs());
|
||||
labelListList& sAddr = *sendAddrPtr_;
|
||||
|
||||
rAddr[Pstream::myProcNo()] = za;
|
||||
sAddr[Pstream::myProcNo()] = rza;
|
||||
|
||||
// Perform gather operation to master
|
||||
Pstream::gatherList(rAddr);
|
||||
Pstream::gatherList(sAddr);
|
||||
|
||||
// Perform scatter operation to master
|
||||
Pstream::scatterList(rAddr);
|
||||
Pstream::scatterList(sAddr);
|
||||
|
||||
// Zone addressing and remote zone addressing is fixed. Determine the
|
||||
// parallel communications pattern
|
||||
|
||||
// Make a zone-sized field and fill it in with proc markings for processor
|
||||
// that holds and requires the data
|
||||
labelField zoneProcID(zone().size(), -1);
|
||||
|
||||
forAll (za, zaI)
|
||||
{
|
||||
receiveAddrPtr_ = new labelListList(Pstream::nProcs());
|
||||
labelListList& rAddr = *receiveAddrPtr_;
|
||||
zoneProcID[za[zaI]] = Pstream::myProcNo();
|
||||
}
|
||||
|
||||
sendAddrPtr_ = new labelListList(Pstream::nProcs());
|
||||
labelListList& sAddr = *sendAddrPtr_;
|
||||
reduce(zoneProcID, maxOp<labelField>());
|
||||
|
||||
// Insert master
|
||||
rAddr[0] = zoneAddressing();
|
||||
// Find out where my zone data is coming from
|
||||
labelList nRecv(Pstream::nProcs(), 0);
|
||||
|
||||
for (label procI = 1; procI < Pstream::nProcs(); procI++)
|
||||
const labelList& shadowRza = shadow().remoteZoneAddressing();
|
||||
|
||||
// Note: only visit the data from the local zone
|
||||
forAll (shadowRza, shadowRzaI)
|
||||
{
|
||||
nRecv[zoneProcID[shadowRza[shadowRzaI]]]++;
|
||||
}
|
||||
|
||||
// Make a receiving sub-map
|
||||
// It tells me which data I will receive from which processor and
|
||||
// where I need to put it into the remoteZone data before the mapping
|
||||
labelListList constructMap(Pstream::nProcs());
|
||||
|
||||
// Size the receiving list
|
||||
forAll (nRecv, procI)
|
||||
{
|
||||
constructMap[procI].setSize(nRecv[procI]);
|
||||
}
|
||||
|
||||
// Reset counters for processors
|
||||
nRecv = 0;
|
||||
|
||||
forAll (shadowRza, shadowRzaI)
|
||||
{
|
||||
label recvProc = zoneProcID[shadowRza[shadowRzaI]];
|
||||
|
||||
constructMap[recvProc][nRecv[recvProc]] = shadowRza[shadowRzaI];
|
||||
|
||||
nRecv[recvProc]++;
|
||||
}
|
||||
|
||||
// Make the sending sub-map
|
||||
// It tells me which data is required from me to be send to which
|
||||
// processor
|
||||
|
||||
// Algorithm
|
||||
// - expand the local zone faces with indices into a size of local zone
|
||||
// - go through remote zone addressing on all processors
|
||||
// - find out who hits my faces
|
||||
labelList localZoneIndices(zone().size(), -1);
|
||||
|
||||
forAll (za, zaI)
|
||||
{
|
||||
localZoneIndices[za[zaI]] = zaI;
|
||||
}
|
||||
|
||||
labelListList shadowToReceiveAddr(Pstream::nProcs());
|
||||
|
||||
// What my shadow needs to receive
|
||||
shadowToReceiveAddr[Pstream::myProcNo()] = shadow().remoteZoneAddressing();
|
||||
Pstream::gatherList(shadowToReceiveAddr);
|
||||
Pstream::scatterList(shadowToReceiveAddr);
|
||||
|
||||
// Now local zone indices contain the index of a local face that will
|
||||
// provide the data. For faces that are not local, the index will be -1
|
||||
|
||||
// Find out where my zone data is going to
|
||||
|
||||
// Make a sending sub-map
|
||||
// It tells me which data I will send to which processor
|
||||
labelListList sendMap(Pstream::nProcs());
|
||||
|
||||
// Collect local labels to be sent to each processor
|
||||
forAll (shadowToReceiveAddr, procI)
|
||||
{
|
||||
const labelList& curProcSend = shadowToReceiveAddr[procI];
|
||||
|
||||
// Find out how much of my data is going to this processor
|
||||
label nProcSend = 0;
|
||||
|
||||
forAll (curProcSend, sendI)
|
||||
{
|
||||
// Note: must use normal comms because the size of the
|
||||
// communicated lists is unknown on the receiving side
|
||||
// HJ, 4/Jun/2011
|
||||
if (localZoneIndices[curProcSend[sendI]] > -1)
|
||||
{
|
||||
nProcSend++;
|
||||
}
|
||||
}
|
||||
|
||||
// Opt: reconsider mode of communication
|
||||
IPstream ip(Pstream::scheduled, procI);
|
||||
if (nProcSend > 0)
|
||||
{
|
||||
// Collect the indices
|
||||
labelList& curSendMap = sendMap[procI];
|
||||
|
||||
rAddr[procI] = labelList(ip);
|
||||
curSendMap.setSize(nProcSend);
|
||||
|
||||
sAddr[procI] = labelList(ip);
|
||||
// Reset counter
|
||||
nProcSend = 0;
|
||||
|
||||
forAll (curProcSend, sendI)
|
||||
{
|
||||
if (localZoneIndices[curProcSend[sendI]] > -1)
|
||||
{
|
||||
curSendMap[nProcSend] =
|
||||
localZoneIndices[curProcSend[sendI]];
|
||||
nProcSend++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Create dummy pointers: only master processor stores maps
|
||||
receiveAddrPtr_ = new labelListList();
|
||||
sendAddrPtr_ = new labelListList();
|
||||
|
||||
// Send information to master
|
||||
const labelList& za = zoneAddressing();
|
||||
const labelList& ra = remoteZoneAddressing();
|
||||
|
||||
// Note: must use normal comms because the size of the
|
||||
// communicated lists is unknown on the receiving side
|
||||
// HJ, 4/Jun/2011
|
||||
|
||||
// Opt: reconsider mode of communication
|
||||
OPstream op(Pstream::scheduled, Pstream::masterNo());
|
||||
|
||||
// Send local and remote addressing to master
|
||||
op << za << ra;
|
||||
}
|
||||
// Map will return the object of the size of remote zone
|
||||
// HJ, 9/May/2016
|
||||
mapPtr_ = new mapDistribute(zone().size(), sendMap, constructMap);
|
||||
}
|
||||
|
||||
|
||||
|
@ -452,6 +552,17 @@ const Foam::labelListList& Foam::ggiPolyPatch::sendAddr() const
|
|||
}
|
||||
|
||||
|
||||
const Foam::mapDistribute& Foam::ggiPolyPatch::map() const
|
||||
{
|
||||
if (!mapPtr_)
|
||||
{
|
||||
calcSendReceive();
|
||||
}
|
||||
|
||||
return *mapPtr_;
|
||||
}
|
||||
|
||||
|
||||
void Foam::ggiPolyPatch::clearGeom()
|
||||
{
|
||||
deleteDemandDrivenData(reconFaceCellCentresPtr_);
|
||||
|
@ -464,6 +575,7 @@ void Foam::ggiPolyPatch::clearGeom()
|
|||
|
||||
deleteDemandDrivenData(receiveAddrPtr_);
|
||||
deleteDemandDrivenData(sendAddrPtr_);
|
||||
deleteDemandDrivenData(mapPtr_);
|
||||
}
|
||||
|
||||
|
||||
|
@ -504,7 +616,8 @@ Foam::ggiPolyPatch::ggiPolyPatch
|
|||
reconFaceCellCentresPtr_(NULL),
|
||||
localParallelPtr_(NULL),
|
||||
receiveAddrPtr_(NULL),
|
||||
sendAddrPtr_(NULL)
|
||||
sendAddrPtr_(NULL),
|
||||
mapPtr_(NULL)
|
||||
{}
|
||||
|
||||
|
||||
|
@ -534,7 +647,8 @@ Foam::ggiPolyPatch::ggiPolyPatch
|
|||
reconFaceCellCentresPtr_(NULL),
|
||||
localParallelPtr_(NULL),
|
||||
receiveAddrPtr_(NULL),
|
||||
sendAddrPtr_(NULL)
|
||||
sendAddrPtr_(NULL),
|
||||
mapPtr_(NULL)
|
||||
{}
|
||||
|
||||
|
||||
|
@ -559,7 +673,8 @@ Foam::ggiPolyPatch::ggiPolyPatch
|
|||
reconFaceCellCentresPtr_(NULL),
|
||||
localParallelPtr_(NULL),
|
||||
receiveAddrPtr_(NULL),
|
||||
sendAddrPtr_(NULL)
|
||||
sendAddrPtr_(NULL),
|
||||
mapPtr_(NULL)
|
||||
{
|
||||
if (dict.found("quickReject"))
|
||||
{
|
||||
|
@ -590,7 +705,8 @@ Foam::ggiPolyPatch::ggiPolyPatch
|
|||
reconFaceCellCentresPtr_(NULL),
|
||||
localParallelPtr_(NULL),
|
||||
receiveAddrPtr_(NULL),
|
||||
sendAddrPtr_(NULL)
|
||||
sendAddrPtr_(NULL),
|
||||
mapPtr_(NULL)
|
||||
{}
|
||||
|
||||
|
||||
|
@ -617,7 +733,8 @@ Foam::ggiPolyPatch::ggiPolyPatch
|
|||
reconFaceCellCentresPtr_(NULL),
|
||||
localParallelPtr_(NULL),
|
||||
receiveAddrPtr_(NULL),
|
||||
sendAddrPtr_(NULL)
|
||||
sendAddrPtr_(NULL),
|
||||
mapPtr_(NULL)
|
||||
{}
|
||||
|
||||
|
||||
|
@ -947,7 +1064,8 @@ void Foam::ggiPolyPatch::calcTransforms() const
|
|||
|
||||
fileName fvPath(mesh.time().path()/"VTK");
|
||||
mkDir(fvPath);
|
||||
Pout<< "shadow().localFaces(): " << shadow().localFaces().size()
|
||||
Pout<< "Patch " << name()
|
||||
<< " shadow().localFaces(): " << shadow().localFaces().size()
|
||||
<< " patchToPatch().uncoveredSlaveFaces().size(): "
|
||||
<< patchToPatch().uncoveredSlaveFaces().size()
|
||||
<< " shadow().localPoints(): " << shadow().localPoints().size()
|
||||
|
|
|
@ -47,6 +47,7 @@ SourceFiles
|
|||
#include "word.H"
|
||||
#include "faceZone.H"
|
||||
#include "Switch.H"
|
||||
#include "mapDistribute.H"
|
||||
|
||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||
|
||||
|
@ -110,6 +111,9 @@ class ggiPolyPatch
|
|||
//- List of zone faces indices to send to each processor
|
||||
mutable labelListList* sendAddrPtr_;
|
||||
|
||||
//- Map-distribute comms tool
|
||||
mutable mapDistribute* mapPtr_;
|
||||
|
||||
|
||||
// Private member functions
|
||||
|
||||
|
@ -147,6 +151,9 @@ class ggiPolyPatch
|
|||
//- Return send addressing
|
||||
const labelListList& sendAddr() const;
|
||||
|
||||
//- Return mapDistribute
|
||||
const mapDistribute& map() const;
|
||||
|
||||
|
||||
// Memory management
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ Foam::tmp<Foam::Field<Type> > Foam::ggiPolyPatch::fastExpand
|
|||
shadow().receiveAddr();
|
||||
}
|
||||
|
||||
// Expand the field to zone size
|
||||
// Prepare return field: expand the field to zone size
|
||||
tmp<Field<Type> > texpandField
|
||||
(
|
||||
new Field<Type>(zone().size(), pTraits<Type>::zero)
|
||||
|
@ -97,6 +97,8 @@ Foam::tmp<Foam::Field<Type> > Foam::ggiPolyPatch::fastExpand
|
|||
|
||||
Field<Type>& expandField = texpandField();
|
||||
|
||||
#if 1
|
||||
|
||||
if (Pstream::master())
|
||||
{
|
||||
// Insert master processor
|
||||
|
@ -200,6 +202,88 @@ Foam::tmp<Foam::Field<Type> > Foam::ggiPolyPatch::fastExpand
|
|||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
if (Pstream::parRun())
|
||||
{
|
||||
// Make shadow send subField to neighbour
|
||||
for (label domain = 0; domain < Pstream::nProcs(); domain++)
|
||||
{
|
||||
const labelList& curMap = map().subMap()[domain];
|
||||
|
||||
if (domain != Pstream::myProcNo() && curMap.size())
|
||||
{
|
||||
OPstream toNbr(Pstream::blocking, domain);
|
||||
toNbr << UIndirectList<Type>(ff, curMap);
|
||||
}
|
||||
}
|
||||
|
||||
// Subset myself
|
||||
{
|
||||
const labelList& mySubMap = map().subMap()[Pstream::myProcNo()];
|
||||
|
||||
List<Type> subField(mySubMap.size());
|
||||
forAll (mySubMap, i)
|
||||
{
|
||||
subField[i] = ff[mySubMap[i]];
|
||||
}
|
||||
|
||||
|
||||
// Receive sub field from myself (subField)
|
||||
const labelList& curMap =
|
||||
map().constructMap()[Pstream::myProcNo()];
|
||||
|
||||
forAll (curMap, i)
|
||||
{
|
||||
expandField[curMap[i]] = subField[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Receive sub field from neighbour
|
||||
for (label domain = 0; domain < Pstream::nProcs(); domain++)
|
||||
{
|
||||
const labelList& curMap = map().constructMap()[domain];
|
||||
|
||||
if (domain != Pstream::myProcNo() && curMap.size())
|
||||
{
|
||||
IPstream fromNbr(Pstream::blocking, domain);
|
||||
List<Type> recvField(fromNbr);
|
||||
|
||||
if (curMap.size() != recvField.size())
|
||||
{
|
||||
FatalErrorIn
|
||||
(
|
||||
"tmp<Field<Type> > ggiPolyPatch::fastExpand\n"
|
||||
"(\n"
|
||||
" const Field<Type>& ff\n"
|
||||
") const"
|
||||
) << "Expected from processor " << domain << " size "
|
||||
<< curMap.size() << " but received "
|
||||
<< recvField.size() << " elements."
|
||||
<< abort(FatalError);
|
||||
}
|
||||
|
||||
forAll (curMap, i)
|
||||
{
|
||||
expandField[curMap[i]] = recvField[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Serial. Expand the field to zone size
|
||||
|
||||
const labelList& zAddr = zoneAddressing();
|
||||
|
||||
forAll (zAddr, i)
|
||||
{
|
||||
expandField[zAddr[i]] = ff[i];
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
return texpandField;
|
||||
}
|
||||
|
||||
|
|
|
@ -167,10 +167,10 @@ void Foam::overlapGgiPolyPatch::calcPatchToPatch() const
|
|||
forwardT(),
|
||||
reverseT(),
|
||||
separation(),
|
||||
0, // master overlap tolerance
|
||||
0, // slave overlap tolerance
|
||||
true, // Patch data is complete on all processors
|
||||
SMALL, // master overlap tolerance
|
||||
SMALL, // slave overlap tolerance
|
||||
true, // Rescale weighting factors. Bug fix, MB.
|
||||
// ggiInterpolation::AABB
|
||||
overlapGgiInterpolation::BB_OCTREE // Octree search, MB.
|
||||
|
||||
);
|
||||
|
|
|
@ -222,10 +222,10 @@ void Foam::regionCouplePolyPatch::calcPatchToPatch() const
|
|||
forwardT(),
|
||||
reverseT(),
|
||||
shadow().separation(), // Slave-to-master separation. Bug fix
|
||||
0, // Non-overlapping face tolerances
|
||||
0,
|
||||
true, // Patch data is complete on all processors
|
||||
SMALL, // Non-overlapping face tolerances
|
||||
SMALL,
|
||||
true, // Rescale weighting factors
|
||||
// ggiInterpolation::AABB
|
||||
ggiInterpolation::BB_OCTREE // Octree search, MB.
|
||||
);
|
||||
|
||||
|
|
Reference in a new issue