Work in progress
This commit is contained in:
parent
ac941215d5
commit
b32edafe89
12 changed files with 387 additions and 138 deletions
|
@ -271,6 +271,12 @@ const Foam::scalarListList& Foam::ggiFvPatch::weights() const
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Foam::ggiFvPatch::expandAddrToZone(labelField& lf) const
|
||||||
|
{
|
||||||
|
lf = ggiPolyPatch_.fastExpand(lf);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Foam::tmp<Foam::labelField> Foam::ggiFvPatch::interfaceInternalField
|
Foam::tmp<Foam::labelField> Foam::ggiFvPatch::interfaceInternalField
|
||||||
(
|
(
|
||||||
const unallocLabelList& internalData
|
const unallocLabelList& internalData
|
||||||
|
@ -306,6 +312,7 @@ void Foam::ggiFvPatch::initInternalFieldTransfer
|
||||||
const unallocLabelList& iF
|
const unallocLabelList& iF
|
||||||
) const
|
) const
|
||||||
{
|
{
|
||||||
|
// Label transfer is local without global reduction
|
||||||
labelTransferBuffer_ = patchInternalField(iF);
|
labelTransferBuffer_ = patchInternalField(iF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -187,6 +187,11 @@ public:
|
||||||
return coupledFvPatch::reverseT();
|
return coupledFvPatch::reverseT();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//- Expand addressing to zone
|
||||||
|
// Used in optimised AMG coarsening
|
||||||
|
virtual void expandAddrToZone(labelField&) const;
|
||||||
|
|
||||||
|
|
||||||
//- Return the values of the given internal data adjacent to
|
//- Return the values of the given internal data adjacent to
|
||||||
// the interface as a field
|
// the interface as a field
|
||||||
virtual tmp<labelField> interfaceInternalField
|
virtual tmp<labelField> interfaceInternalField
|
||||||
|
|
|
@ -315,6 +315,16 @@ const Foam::scalarListList& Foam::regionCoupleFvPatch::weights() const
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Foam::regionCoupleFvPatch::expandAddrToZone(labelField& lf) const
|
||||||
|
{
|
||||||
|
// Missing code. Activate for AMG solvers across regionCoupleFvPatch
|
||||||
|
notImplemented
|
||||||
|
(
|
||||||
|
"void regionCoupleFvPatch::expandAddrToZone(labelField& lf) const"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Foam::tmp<Foam::labelField> Foam::regionCoupleFvPatch::interfaceInternalField
|
Foam::tmp<Foam::labelField> Foam::regionCoupleFvPatch::interfaceInternalField
|
||||||
(
|
(
|
||||||
const unallocLabelList& internalData
|
const unallocLabelList& internalData
|
||||||
|
|
|
@ -196,6 +196,11 @@ public:
|
||||||
return coupledFvPatch::reverseT();
|
return coupledFvPatch::reverseT();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//- Expand addressing to zone
|
||||||
|
// Used in optimised AMG coarsening
|
||||||
|
virtual void expandAddrToZone(labelField&) const;
|
||||||
|
|
||||||
|
|
||||||
//- Return the values of the given internal data adjacent to
|
//- Return the values of the given internal data adjacent to
|
||||||
// the interface as a field
|
// the interface as a field
|
||||||
virtual tmp<labelField> interfaceInternalField
|
virtual tmp<labelField> interfaceInternalField
|
||||||
|
|
|
@ -121,6 +121,10 @@ public:
|
||||||
//- Return face reverse transformation tensor
|
//- Return face reverse transformation tensor
|
||||||
virtual const tensorField& reverseT() const = 0;
|
virtual const tensorField& reverseT() const = 0;
|
||||||
|
|
||||||
|
//- Expand addressing to zone
|
||||||
|
// Used in optimised AMG coarsening
|
||||||
|
virtual void expandAddrToZone(labelField&) const = 0;
|
||||||
|
|
||||||
|
|
||||||
// Transfer buffer access
|
// Transfer buffer access
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ namespace Foam
|
||||||
{
|
{
|
||||||
|
|
||||||
/*---------------------------------------------------------------------------*\
|
/*---------------------------------------------------------------------------*\
|
||||||
Class overlapGGILduInterface Declaration
|
Class overlapGGILduInterface Declaration
|
||||||
\*---------------------------------------------------------------------------*/
|
\*---------------------------------------------------------------------------*/
|
||||||
|
|
||||||
class overlapGGILduInterface
|
class overlapGGILduInterface
|
||||||
|
|
|
@ -82,9 +82,21 @@ protected:
|
||||||
labelField faceCells_;
|
labelField faceCells_;
|
||||||
|
|
||||||
//- Fine addressing
|
//- Fine addressing
|
||||||
|
// On GGI interfaces, a single fine coefficient may contribute to
|
||||||
|
// multiple coarse coefficients using different weights.
|
||||||
|
// To hanld this, a fine coefficient may be visited multiple times
|
||||||
|
// which is recorded in fineAddressing.
|
||||||
|
// For simple (matching) interfaces, fineAddressing_[i] = i
|
||||||
|
// HJ, 21/Jun/2016
|
||||||
labelField fineAddressing_;
|
labelField fineAddressing_;
|
||||||
|
|
||||||
//- Restrict addressing
|
//- Restrict addressing
|
||||||
|
// For each fine coefficient, list coarse cluster index it will be
|
||||||
|
// agglomerated into
|
||||||
|
// For cases where the fineAddressing is used, restrict addressing
|
||||||
|
// and weights are expanded to match multiple hits for a single
|
||||||
|
// fine coefficient, as dictated by fineAddressing
|
||||||
|
// HJ, 21/Jun/2016
|
||||||
labelField restrictAddressing_;
|
labelField restrictAddressing_;
|
||||||
|
|
||||||
//- Fine level agglomeration weights
|
//- Fine level agglomeration weights
|
||||||
|
|
|
@ -203,11 +203,6 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
zoneAddressing_(),
|
zoneAddressing_(),
|
||||||
mapPtr_(NULL)
|
mapPtr_(NULL)
|
||||||
{
|
{
|
||||||
// Note.
|
|
||||||
// All processors will do the same coarsening and then filter
|
|
||||||
// the addressing to the local processor
|
|
||||||
// HJ, 1/Apr/2009
|
|
||||||
|
|
||||||
// Note.
|
// Note.
|
||||||
// Signalling in global clustering requires me to recognise clustering
|
// Signalling in global clustering requires me to recognise clustering
|
||||||
// from separate processors as separate. In the first phase, this will be
|
// from separate processors as separate. In the first phase, this will be
|
||||||
|
@ -222,7 +217,15 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
// larger max int, which can be changed on request
|
// larger max int, which can be changed on request
|
||||||
// HJ, 1/Apr/2009
|
// HJ, 1/Apr/2009
|
||||||
|
|
||||||
// Expand the local and neighbour addressing to full zone size
|
// New algorithm will assemble local clusters and then create a global
|
||||||
|
// zone ordering by collecting all faces (coarse pairs) from proc0,
|
||||||
|
// followed by proc 1 etc. This avoids global communication and allows
|
||||||
|
// each processor only to perform the analysis on locally created coarse
|
||||||
|
// faces
|
||||||
|
// HJ, 13/Jun/2016
|
||||||
|
|
||||||
|
// To help with analysis, expand the local and neighbour addressing
|
||||||
|
// to full zone size
|
||||||
labelField localExpandAddressing(fineGgiInterface_.zoneSize(), 0);
|
labelField localExpandAddressing(fineGgiInterface_.zoneSize(), 0);
|
||||||
|
|
||||||
// Memory management, local
|
// Memory management, local
|
||||||
|
@ -235,60 +238,70 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
localRestrictAddressing[i] + procOffset*Pstream::myProcNo();
|
localRestrictAddressing[i] + procOffset*Pstream::myProcNo();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!localParallel())
|
// Removed global reduce. Only local faces will be analysed.
|
||||||
{
|
// HJ, 13/Jun/2016
|
||||||
// Optimisation of this comms call is needed
|
|
||||||
// HJ, 9/Jun/2016
|
|
||||||
reduce(localExpandAddressing, sumOp<labelField>());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create addressing for neighbour faces. Note: expandAddrToZone will
|
||||||
|
// expand the addressing to zone size. HJ, 13/Jun/2016
|
||||||
labelField neighbourExpandAddressing
|
labelField neighbourExpandAddressing
|
||||||
(
|
(
|
||||||
fineGgiInterface_.shadowInterface().zoneSize(),
|
fineGgiInterface_.shadowInterface().interfaceSize()
|
||||||
0
|
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Fill local cluster ID with a combination of a local ID and processor
|
||||||
|
// offset
|
||||||
// Memory management, neighbour
|
// Memory management, neighbour
|
||||||
{
|
{
|
||||||
const labelList& addr =
|
forAll (neighbourExpandAddressing, i)
|
||||||
fineGgiInterface_.shadowInterface().zoneAddressing();
|
|
||||||
|
|
||||||
forAll (addr, i)
|
|
||||||
{
|
{
|
||||||
neighbourExpandAddressing[addr[i]] =
|
neighbourExpandAddressing[i] =
|
||||||
neighbourRestrictAddressing[i]
|
neighbourRestrictAddressing[i]
|
||||||
+ procOffset*Pstream::myProcNo();
|
+ procOffset*Pstream::myProcNo();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!localParallel())
|
// Expand neighbour side to get all the data required from other
|
||||||
{
|
// processors. Note: neigbour is now the size of remote zone
|
||||||
// Optimisation of this comms call is needed
|
fineGgiInterface_.shadowInterface().expandAddrToZone
|
||||||
// HJ, 9/Jun/2016
|
(
|
||||||
reduce(neighbourExpandAddressing, sumOp<labelField>());
|
neighbourExpandAddressing
|
||||||
}
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DEBUG: Check that all sizes are at zone size.
|
||||||
|
Info<< "Sizes check: local zone size "
|
||||||
|
<< fineGgiInterface_.zoneSize()
|
||||||
|
<< " " << localExpandAddressing << nl
|
||||||
|
<< "shadow zone size "
|
||||||
|
<< fineGgiInterface_.shadowInterface().zoneSize()
|
||||||
|
<< " " << neighbourExpandAddressing
|
||||||
|
<< endl;
|
||||||
|
|
||||||
|
|
||||||
|
// Note: neighbourExpandAddressing will be filled with NaNs for faces which
|
||||||
|
// not local
|
||||||
|
|
||||||
|
Info<< "End of reduce" << endl;
|
||||||
// Make a lookup table of entries for owner/neighbour.
|
// Make a lookup table of entries for owner/neighbour.
|
||||||
// All sizes are guessed at the size of fine interface
|
// All sizes are guessed at the size of fine interface
|
||||||
// HJ, 19/Feb/2009
|
// HJ, 19/Feb/2009
|
||||||
|
|
||||||
HashTable<SLList<label>, label, Hash<label> > neighboursTable
|
HashTable<SLList<label>, label, Hash<label> > neighboursTable
|
||||||
(
|
(
|
||||||
localExpandAddressing.size()
|
fineGgiInterface_.interfaceSize()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Table of face-sets to be agglomerated
|
// Table of face-sets to be agglomerated
|
||||||
HashTable<SLList<SLList<label> >, label, Hash<label> > faceFaceTable
|
HashTable<SLList<SLList<label> >, label, Hash<label> > faceFaceTable
|
||||||
(
|
(
|
||||||
localExpandAddressing.size()
|
fineGgiInterface_.interfaceSize()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Table of face-sets weights to be agglomerated
|
// Table of face-sets weights to be agglomerated
|
||||||
HashTable<SLList<SLList<scalar> >, label, Hash<label> >
|
HashTable<SLList<SLList<scalar> >, label, Hash<label> >
|
||||||
faceFaceWeightsTable
|
faceFaceWeightsTable
|
||||||
(
|
(
|
||||||
localExpandAddressing.size()
|
fineGgiInterface_.interfaceSize()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Count the number of coarse faces
|
// Count the number of coarse faces
|
||||||
|
@ -300,11 +313,24 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
// On the fine level, addressing is made in a labelListList
|
// On the fine level, addressing is made in a labelListList
|
||||||
if (fineGgiInterface_.fineLevel())
|
if (fineGgiInterface_.fineLevel())
|
||||||
{
|
{
|
||||||
|
// This addressing defines how to interpolate for all zone faces
|
||||||
|
// across the interface
|
||||||
const labelListList& fineAddr = fineGgiInterface_.addressing();
|
const labelListList& fineAddr = fineGgiInterface_.addressing();
|
||||||
const scalarListList& fineWeights = fineGgiInterface_.weights();
|
const scalarListList& fineWeights = fineGgiInterface_.weights();
|
||||||
|
|
||||||
forAll (fineAddr, ffI)
|
// Note: cluster only locally live faces
|
||||||
|
// HJ, 13/Jun/2016
|
||||||
|
|
||||||
|
// This addressing defines which faces from zone are local
|
||||||
|
const labelList& fineZa = fineGgiInterface_.zoneAddressing();
|
||||||
|
|
||||||
|
// Perform analysis only for local faces
|
||||||
|
// HJ, 22/Jun/2016
|
||||||
|
forAll (fineZa, fineZaI)
|
||||||
{
|
{
|
||||||
|
// Get the local face (from zone) to analyse
|
||||||
|
const label ffI = fineZa[fineZaI];
|
||||||
|
|
||||||
const labelList& curFineNbrs = fineAddr[ffI];
|
const labelList& curFineNbrs = fineAddr[ffI];
|
||||||
const scalarList& curFineWeigts = fineWeights[ffI];
|
const scalarList& curFineWeigts = fineWeights[ffI];
|
||||||
|
|
||||||
|
@ -372,6 +398,9 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
nbrFound = true;
|
nbrFound = true;
|
||||||
faceFacesIter().append(ffI);
|
faceFacesIter().append(ffI);
|
||||||
faceFaceWeightsIter().append(curNW);
|
faceFaceWeightsIter().append(curNW);
|
||||||
|
|
||||||
|
// New agglomeration pair found in already
|
||||||
|
// existing pair
|
||||||
nAgglomPairs++;
|
nAgglomPairs++;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -384,7 +413,7 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
curFaceFaces.append(SLList<label>(ffI));
|
curFaceFaces.append(SLList<label>(ffI));
|
||||||
curFaceWeights.append(SLList<scalar>(curNW));
|
curFaceWeights.append(SLList<scalar>(curNW));
|
||||||
|
|
||||||
// New coarse face created
|
// New coarse face created for an existing master
|
||||||
nCoarseFaces++;
|
nCoarseFaces++;
|
||||||
nAgglomPairs++;
|
nAgglomPairs++;
|
||||||
}
|
}
|
||||||
|
@ -407,7 +436,7 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
SLList<SLList<scalar> >(SLList<scalar>(curNW))
|
SLList<SLList<scalar> >(SLList<scalar>(curNW))
|
||||||
);
|
);
|
||||||
|
|
||||||
// New coarse face created
|
// New coarse face created for a new master
|
||||||
nCoarseFaces++;
|
nCoarseFaces++;
|
||||||
nAgglomPairs++;
|
nAgglomPairs++;
|
||||||
}
|
}
|
||||||
|
@ -417,8 +446,16 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Coarse level, addressing is stored in faceCells
|
// Coarse level, addressing is stored in faceCells
|
||||||
forAll (localExpandAddressing, ffI)
|
// This addressing defines whicf faces from zone are local
|
||||||
|
const labelList& fineZa = fineGgiInterface_.zoneAddressing();
|
||||||
|
|
||||||
|
// Perform analysis only for local faces
|
||||||
|
// HJ, 22/Jun/2016
|
||||||
|
forAll (fineZa, fineZaI)
|
||||||
{
|
{
|
||||||
|
// Get the local face (from zone) to analyse
|
||||||
|
const label ffI = fineZa[fineZaI];
|
||||||
|
|
||||||
label curMaster = -1;
|
label curMaster = -1;
|
||||||
label curSlave = -1;
|
label curSlave = -1;
|
||||||
|
|
||||||
|
@ -480,6 +517,9 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
faceFacesIter().append(ffI);
|
faceFacesIter().append(ffI);
|
||||||
// Add dummy weight
|
// Add dummy weight
|
||||||
faceFaceWeightsIter().append(1.0);
|
faceFaceWeightsIter().append(1.0);
|
||||||
|
|
||||||
|
// New agglomeration pair found in already
|
||||||
|
// existing pair
|
||||||
nAgglomPairs++;
|
nAgglomPairs++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -492,7 +532,7 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
// Add dummy weight
|
// Add dummy weight
|
||||||
curFaceWeights.append(SLList<scalar>(1.0));
|
curFaceWeights.append(SLList<scalar>(1.0));
|
||||||
|
|
||||||
// New coarse face created
|
// New coarse face created for an existing master
|
||||||
nCoarseFaces++;
|
nCoarseFaces++;
|
||||||
nAgglomPairs++;
|
nAgglomPairs++;
|
||||||
}
|
}
|
||||||
|
@ -516,116 +556,193 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
SLList<SLList<scalar> >(SLList<scalar>(1.0))
|
SLList<SLList<scalar> >(SLList<scalar>(1.0))
|
||||||
);
|
);
|
||||||
|
|
||||||
// New coarse face created
|
// New coarse face created for a new master
|
||||||
nCoarseFaces++;
|
nCoarseFaces++;
|
||||||
nAgglomPairs++;
|
nAgglomPairs++;
|
||||||
}
|
}
|
||||||
} // end for all fine faces
|
} // end for all fine faces
|
||||||
}
|
}
|
||||||
|
|
||||||
faceCells_.setSize(nCoarseFaces, -1);
|
// In order to assemble the coarse global face zone, find out
|
||||||
fineAddressing_.setSize(nAgglomPairs, -1);
|
// how many faces have been created on each processor.
|
||||||
restrictAddressing_.setSize(nAgglomPairs, -1);
|
// Note that masters and slaves both count faces so we will only ask master
|
||||||
restrictWeights_.setSize(nAgglomPairs);
|
// sizes to count
|
||||||
|
labelList nCoarseFacesPerProc(Pstream::nProcs(), 0);
|
||||||
|
|
||||||
|
if (master())
|
||||||
|
{
|
||||||
|
nCoarseFacesPerProc[Pstream::myProcNo()] = nCoarseFaces;
|
||||||
|
}
|
||||||
|
|
||||||
|
reduce(nCoarseFacesPerProc, sumOp<labelList>());
|
||||||
|
|
||||||
|
Info<< "Number of faces per processor: " << nCoarseFacesPerProc
|
||||||
|
<< endl;
|
||||||
|
|
||||||
|
// Coarse global face zone is assembled by adding all faces from proc0,
|
||||||
|
// followed by all faces from proc1 etc.
|
||||||
|
// Therefore, on procN, my master offset
|
||||||
|
// will be equal to the sum of numbers of coarse faces on all processors
|
||||||
|
// before mine
|
||||||
|
// HJ, 13/Jun/2016
|
||||||
|
|
||||||
|
label coarseGlobalFaceOffset = 0;
|
||||||
|
|
||||||
|
for (label i = 0; i < Pstream::myProcNo(); i++)
|
||||||
|
{
|
||||||
|
coarseGlobalFaceOffset += nCoarseFacesPerProc[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
Pout<< "coarseGlobalFaceOffset: " << coarseGlobalFaceOffset << endl;
|
||||||
|
|
||||||
|
Info<< "End of contents assembly" << endl;
|
||||||
|
labelField masterFaceCells(nCoarseFaces, -1);
|
||||||
|
labelField masterZoneAddressing(nCoarseFaces, -1);
|
||||||
|
labelField masterFineAddressing(nCoarseFaces, -1);
|
||||||
|
labelField masterRestrictAddressing(nAgglomPairs, -1);
|
||||||
|
scalarField masterRestrictWeights(nAgglomPairs);
|
||||||
|
|
||||||
|
// Note: in multiple agglomeration
|
||||||
|
|
||||||
labelList contents = neighboursTable.toc();
|
labelList contents = neighboursTable.toc();
|
||||||
|
|
||||||
|
// Global faces shall be assembled by the increasing label of master
|
||||||
|
// cluster ID.
|
||||||
|
|
||||||
// Sort makes sure the order is identical on both sides.
|
// Sort makes sure the order is identical on both sides.
|
||||||
// Since the global zone is defined by this sort, the neighboursTable
|
|
||||||
// must be complete on all processors
|
|
||||||
// HJ, 20/Feb/2009 and 6/Jun/2016
|
// HJ, 20/Feb/2009 and 6/Jun/2016
|
||||||
sort(contents);
|
sort(contents);
|
||||||
|
|
||||||
// Grab zone size and create zone addressing
|
// Grab zone size and create zone addressing
|
||||||
zoneSize_ = nCoarseFaces;
|
zoneSize_ = sum(nCoarseFacesPerProc);
|
||||||
|
Info<< "zoneSize_: " << zoneSize_ << endl;
|
||||||
|
|
||||||
zoneAddressing_.setSize(nCoarseFaces);
|
// Note:
|
||||||
|
// When I am agglomerating the master, I know faces are stacked up in order
|
||||||
|
// but on the slave side, all I know is the master cluster index and
|
||||||
|
// not a master coarse face index. Therefore:
|
||||||
|
// - master needs to be agglomerated first
|
||||||
|
// - once master is agglomerated, I need to signal to the slave side
|
||||||
|
// the global coarse face zone index
|
||||||
|
|
||||||
|
|
||||||
|
// Note: zone addressing will be assembled only for local clusters
|
||||||
|
// using the coarseGlobalFaceOffset
|
||||||
|
// HJ, 13/Jun/2016
|
||||||
label nProcFaces = 0;
|
label nProcFaces = 0;
|
||||||
|
|
||||||
// Reset face counter for re-use
|
// Reset face counter for re-use
|
||||||
nCoarseFaces = 0;
|
nCoarseFaces = 0;
|
||||||
nAgglomPairs = 0;
|
nAgglomPairs = 0;
|
||||||
|
|
||||||
if (master())
|
// Note:
|
||||||
|
// Since clustering has now happened only on local faces, addressing and
|
||||||
|
// all other array work on local indices and not on the coarse global zone
|
||||||
|
// HJ, 13/Jun/2016
|
||||||
|
|
||||||
|
// Establish zone addressing on the master side and communicate
|
||||||
|
// it to the shadow
|
||||||
|
|
||||||
|
// On master side, the owner addressing is stored in table of contents
|
||||||
|
forAll (contents, masterI)
|
||||||
{
|
{
|
||||||
// On master side, the owner addressing is stored in table of contents
|
SLList<label>& curNbrs =
|
||||||
forAll (contents, masterI)
|
neighboursTable.find(contents[masterI])();
|
||||||
|
|
||||||
|
// Note: neighbour processor index is irrelevant. HJ, 1/Apr/2009
|
||||||
|
|
||||||
|
SLList<SLList<label> >& curFaceFaces =
|
||||||
|
faceFaceTable.find(contents[masterI])();
|
||||||
|
|
||||||
|
SLList<SLList<scalar> >& curFaceWeights =
|
||||||
|
faceFaceWeightsTable.find(contents[masterI])();
|
||||||
|
|
||||||
|
SLList<label>::iterator nbrsIter = curNbrs.begin();
|
||||||
|
SLList<SLList<label> >::iterator faceFacesIter =
|
||||||
|
curFaceFaces.begin();
|
||||||
|
|
||||||
|
SLList<SLList<scalar> >::iterator faceFaceWeightsIter =
|
||||||
|
curFaceWeights.begin();
|
||||||
|
|
||||||
|
for
|
||||||
|
(
|
||||||
|
;
|
||||||
|
nbrsIter != curNbrs.end()
|
||||||
|
&& faceFacesIter != curFaceFaces.end()
|
||||||
|
&& faceFaceWeightsIter != curFaceWeights.end();
|
||||||
|
++nbrsIter, ++faceFacesIter, ++faceFaceWeightsIter
|
||||||
|
)
|
||||||
{
|
{
|
||||||
SLList<label>& curNbrs =
|
// Check if master is on local processor: no longer needed,
|
||||||
neighboursTable.find(contents[masterI])();
|
// as only local processor is being searched. HJ, 13/Jun/2016
|
||||||
|
|
||||||
// Note: neighbour processor index is irrelevant. HJ, 1/Apr/2009
|
// Record that this face belongs locally
|
||||||
|
// Use offset to indicate its position in the list
|
||||||
|
masterZoneAddressing[nProcFaces] =
|
||||||
|
nProcFaces + coarseGlobalFaceOffset;
|
||||||
|
|
||||||
SLList<SLList<label> >& curFaceFaces =
|
masterFaceCells[nProcFaces] =
|
||||||
faceFaceTable.find(contents[masterI])();
|
contents[masterI] - procOffset*Pstream::myProcNo();
|
||||||
|
|
||||||
SLList<SLList<scalar> >& curFaceWeights =
|
SLList<label>::iterator facesIter =
|
||||||
faceFaceWeightsTable.find(contents[masterI])();
|
faceFacesIter().begin();
|
||||||
|
|
||||||
SLList<label>::iterator nbrsIter = curNbrs.begin();
|
SLList<scalar>::iterator weightsIter =
|
||||||
SLList<SLList<label> >::iterator faceFacesIter =
|
faceFaceWeightsIter().begin();
|
||||||
curFaceFaces.begin();
|
|
||||||
|
|
||||||
SLList<SLList<scalar> >::iterator faceFaceWeightsIter =
|
|
||||||
curFaceWeights.begin();
|
|
||||||
|
|
||||||
for
|
for
|
||||||
(
|
(
|
||||||
;
|
;
|
||||||
nbrsIter != curNbrs.end()
|
facesIter != faceFacesIter().end()
|
||||||
&& faceFacesIter != curFaceFaces.end()
|
&& weightsIter != faceFaceWeightsIter().end();
|
||||||
&& faceFaceWeightsIter != curFaceWeights.end();
|
++facesIter, ++weightsIter
|
||||||
++nbrsIter, ++faceFacesIter, ++faceFaceWeightsIter
|
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
// Check if master is on local processor
|
masterFineAddressing[nAgglomPairs] = facesIter();
|
||||||
if
|
|
||||||
(
|
|
||||||
contents[masterI] >= procOffset*Pstream::myProcNo()
|
|
||||||
&& contents[masterI] < procOffset*(Pstream::myProcNo() + 1)
|
|
||||||
)
|
|
||||||
{
|
|
||||||
// Record that this face belongs locally
|
|
||||||
zoneAddressing_[nProcFaces] = nCoarseFaces;
|
|
||||||
faceCells_[nProcFaces] =
|
|
||||||
contents[masterI] - procOffset*Pstream::myProcNo();
|
|
||||||
nProcFaces++;
|
|
||||||
|
|
||||||
SLList<label>::iterator facesIter =
|
// Master processor zone face is calculated from
|
||||||
faceFacesIter().begin();
|
masterRestrictAddressing[nAgglomPairs] =
|
||||||
SLList<scalar>::iterator weightsIter =
|
nProcFaces + coarseGlobalFaceOffset;
|
||||||
faceFaceWeightsIter().begin();
|
masterRestrictWeights[nAgglomPairs] = weightsIter();
|
||||||
|
nAgglomPairs++;
|
||||||
for
|
|
||||||
(
|
|
||||||
;
|
|
||||||
facesIter != faceFacesIter().end()
|
|
||||||
&& weightsIter != faceFaceWeightsIter().end();
|
|
||||||
++facesIter, ++weightsIter
|
|
||||||
)
|
|
||||||
{
|
|
||||||
fineAddressing_[nAgglomPairs] = facesIter();
|
|
||||||
restrictAddressing_[nAgglomPairs] = nCoarseFaces;
|
|
||||||
restrictWeights_[nAgglomPairs] = weightsIter();
|
|
||||||
nAgglomPairs++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not a local face, but still created in global zone
|
|
||||||
nCoarseFaces++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nProcFaces++;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Resize arrays: not all of ggi is used locally
|
// Resize arrays: not all of ggi is used locally
|
||||||
faceCells_.setSize(nProcFaces);
|
masterFaceCells.setSize(nProcFaces);
|
||||||
zoneAddressing_.setSize(nProcFaces);
|
masterZoneAddressing.setSize(nProcFaces);
|
||||||
|
|
||||||
fineAddressing_.setSize(nAgglomPairs);
|
masterFineAddressing.setSize(nAgglomPairs);
|
||||||
restrictAddressing_.setSize(nAgglomPairs);
|
masterRestrictAddressing.setSize(nAgglomPairs);
|
||||||
restrictWeights_.setSize(nAgglomPairs);
|
masterRestrictWeights.setSize(nAgglomPairs);
|
||||||
|
|
||||||
|
// Note: Both master and slave have done the same agglomeration up to here
|
||||||
|
|
||||||
|
if (master())
|
||||||
|
{
|
||||||
|
// Master has completed the clustering
|
||||||
|
faceCells_ = masterFaceCells;
|
||||||
|
zoneAddressing_ = masterZoneAddressing;
|
||||||
|
fineAddressing_ = masterFineAddressing;
|
||||||
|
restrictAddressing_ = masterRestrictAddressing;
|
||||||
|
restrictWeights_ = masterRestrictWeights;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
// Note: shadowRestrictAddressing contains the
|
||||||
|
|
||||||
|
// Note: zone addressing will be assembled only for local clusters
|
||||||
|
// using the coarseGlobalFaceOffset
|
||||||
|
// HJ, 13/Jun/2016
|
||||||
|
label nProcFaces = 0;
|
||||||
|
|
||||||
|
// Reset face counter for re-use
|
||||||
|
nCoarseFaces = 0;
|
||||||
|
nAgglomPairs = 0;
|
||||||
|
|
||||||
// On slave side, the owner addressing is stored in linked lists
|
// On slave side, the owner addressing is stored in linked lists
|
||||||
forAll (contents, masterI)
|
forAll (contents, masterI)
|
||||||
{
|
{
|
||||||
|
@ -646,6 +763,7 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
|
|
||||||
SLList<SLList<scalar> >::iterator faceFaceWeightsIter =
|
SLList<SLList<scalar> >::iterator faceFaceWeightsIter =
|
||||||
curFaceWeights.begin();
|
curFaceWeights.begin();
|
||||||
|
|
||||||
for
|
for
|
||||||
(
|
(
|
||||||
;
|
;
|
||||||
|
@ -655,41 +773,37 @@ Foam::ggiAMGInterface::ggiAMGInterface
|
||||||
++nbrsIter, ++faceFacesIter, ++faceFaceWeightsIter
|
++nbrsIter, ++faceFacesIter, ++faceFaceWeightsIter
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
// Check if the face is on local processor
|
// Check if the face is on local processor: no longer needed,
|
||||||
if
|
// as only local processor is being searched. HJ, 13/Jun/2016
|
||||||
|
|
||||||
|
// Record that this face belongs locally.
|
||||||
|
|
||||||
|
//HJ, HERE: I need to find out the global face index for the face that was created from the master side
|
||||||
|
|
||||||
|
zoneAddressing_[nProcFaces] = nCoarseFaces;
|
||||||
|
faceCells_[nProcFaces] =
|
||||||
|
nbrsIter() - procOffset*Pstream::myProcNo();
|
||||||
|
nProcFaces++;
|
||||||
|
|
||||||
|
SLList<label>::iterator facesIter =
|
||||||
|
faceFacesIter().begin();
|
||||||
|
|
||||||
|
SLList<scalar>::iterator weightsIter =
|
||||||
|
faceFaceWeightsIter().begin();
|
||||||
|
|
||||||
|
for
|
||||||
(
|
(
|
||||||
nbrsIter() >= procOffset*Pstream::myProcNo()
|
;
|
||||||
&& nbrsIter() < procOffset*(Pstream::myProcNo() + 1)
|
facesIter != faceFacesIter().end()
|
||||||
|
&& weightsIter != faceFaceWeightsIter().end();
|
||||||
|
++facesIter, ++weightsIter
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
// Record that this face belongs locally.
|
fineAddressing_[nAgglomPairs] = facesIter();
|
||||||
zoneAddressing_[nProcFaces] = nCoarseFaces;
|
restrictAddressing_[nAgglomPairs] = nCoarseFaces;
|
||||||
faceCells_[nProcFaces] =
|
restrictWeights_[nAgglomPairs] = weightsIter();
|
||||||
nbrsIter() - procOffset*Pstream::myProcNo();
|
nAgglomPairs++;
|
||||||
nProcFaces++;
|
|
||||||
|
|
||||||
SLList<label>::iterator facesIter =
|
|
||||||
faceFacesIter().begin();
|
|
||||||
|
|
||||||
SLList<scalar>::iterator weightsIter =
|
|
||||||
faceFaceWeightsIter().begin();
|
|
||||||
|
|
||||||
for
|
|
||||||
(
|
|
||||||
;
|
|
||||||
facesIter != faceFacesIter().end()
|
|
||||||
&& weightsIter != faceFaceWeightsIter().end();
|
|
||||||
++facesIter, ++weightsIter
|
|
||||||
)
|
|
||||||
{
|
|
||||||
fineAddressing_[nAgglomPairs] = facesIter();
|
|
||||||
restrictAddressing_[nAgglomPairs] = nCoarseFaces;
|
|
||||||
restrictWeights_[nAgglomPairs] = weightsIter();
|
|
||||||
nAgglomPairs++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nCoarseFaces++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -718,6 +832,8 @@ Foam::tmp<Foam::scalarField> Foam::ggiAMGInterface::agglomerateCoeffs
|
||||||
const scalarField& fineCoeffs
|
const scalarField& fineCoeffs
|
||||||
) const
|
) const
|
||||||
{
|
{
|
||||||
|
// HJ, HERE THIS SHOULD BE REMOVED: NO LONGER NEEDED BECAUSE ALL ADDRESSING IS LOCAL
|
||||||
|
|
||||||
// Note: reconsider better parallel communication here.
|
// Note: reconsider better parallel communication here.
|
||||||
// Currently expanding to full zone size
|
// Currently expanding to full zone size
|
||||||
// HJ, 16/Mar/2016
|
// HJ, 16/Mar/2016
|
||||||
|
@ -917,4 +1033,10 @@ Foam::tmp<Foam::scalarField> Foam::ggiAMGInterface::internalFieldTransfer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Foam::ggiAMGInterface::expandAddrToZone(labelField& lf) const
|
||||||
|
{
|
||||||
|
lf = fastExpand(lf);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// ************************************************************************* //
|
// ************************************************************************* //
|
||||||
|
|
|
@ -143,9 +143,23 @@ public:
|
||||||
|
|
||||||
// Interface transfer functions
|
// Interface transfer functions
|
||||||
|
|
||||||
|
//- Fast expand
|
||||||
|
// Note: contains global communications
|
||||||
|
// This function will expand data to zone size with fast comms
|
||||||
|
// filling in only the part of the zone that is required
|
||||||
|
// on the shadow side
|
||||||
|
template<class Type>
|
||||||
|
tmp<Field<Type> > fastExpand(const UList<Type>&) const;
|
||||||
|
|
||||||
//- Fast reduce
|
//- Fast reduce
|
||||||
// Note: contains global communications
|
// Note: contains global communications
|
||||||
// New, HJ, 24/Jun/2011
|
// This function will expand data to zone size with fast comms
|
||||||
|
// and filter the field to cover the need of a shadow zone
|
||||||
|
// This makes sense on coarse levels because all addressing
|
||||||
|
// is one-on-one (one master, one slave). On the fine level
|
||||||
|
// each face in the zone may contribute to multiple faces meaning
|
||||||
|
// that the data needs to be expanded to zone size
|
||||||
|
// HJ, 24/Jun/2011 and 13/Jun/2016
|
||||||
template<class Type>
|
template<class Type>
|
||||||
tmp<Field<Type> > fastReduce(const UList<Type>&) const;
|
tmp<Field<Type> > fastReduce(const UList<Type>&) const;
|
||||||
|
|
||||||
|
@ -234,6 +248,10 @@ public:
|
||||||
|
|
||||||
//- Return neighbour-cell transformation tensor
|
//- Return neighbour-cell transformation tensor
|
||||||
virtual const tensorField& reverseT() const;
|
virtual const tensorField& reverseT() const;
|
||||||
|
|
||||||
|
//- Expand addressing to zone
|
||||||
|
// Used in optimised AMG coarsening
|
||||||
|
virtual void expandAddrToZone(labelField&) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -32,10 +32,61 @@ namespace Foam
|
||||||
|
|
||||||
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
|
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
|
||||||
|
|
||||||
|
template<class Type>
|
||||||
|
tmp<Field<Type> > ggiAMGInterface::fastExpand(const UList<Type>& ff) const
|
||||||
|
{
|
||||||
|
// Rewrite, 1/Jun/2016
|
||||||
|
// To avoid creating zone-sized data and gather-scatter communication
|
||||||
|
// to the master, the optimised map-distribute call is implemented.
|
||||||
|
// The field is filled with local data which is then sent where needed
|
||||||
|
// through map-distribute.
|
||||||
|
// On return, the field is expanded to zone size but only filled with
|
||||||
|
// the data which is needed for the shadow
|
||||||
|
// HJ, 1/Jun/2016
|
||||||
|
|
||||||
|
if (ff.size() != this->size())
|
||||||
|
{
|
||||||
|
FatalErrorIn
|
||||||
|
(
|
||||||
|
"tmp<Field<Type> > ggiAMGInterface::fastExpand"
|
||||||
|
"("
|
||||||
|
" const UList<Type>& ff"
|
||||||
|
") const"
|
||||||
|
) << "Wrong field size. ff: " << ff.size()
|
||||||
|
<< " interface: " << this->size()
|
||||||
|
<< abort(FatalError);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (localParallel() || !Pstream::parRun())
|
||||||
|
{
|
||||||
|
// Field remains identical: no parallel communications required
|
||||||
|
tmp<Field<Type> > tresult(new Field<Type>(ff));
|
||||||
|
|
||||||
|
return tresult;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Optimised mapDistribute
|
||||||
|
|
||||||
|
// Execute init reduce to calculate addressing if not already done
|
||||||
|
map();
|
||||||
|
|
||||||
|
// Prepare for distribute. Note: field will be expanded to zone size
|
||||||
|
// during the distribute operation
|
||||||
|
tmp<Field<Type> > tresult(new Field<Type>(ff));
|
||||||
|
List<Type>& expand = tresult();
|
||||||
|
|
||||||
|
map().distribute(expand);
|
||||||
|
|
||||||
|
return tresult;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
template<class Type>
|
template<class Type>
|
||||||
tmp<Field<Type> > ggiAMGInterface::fastReduce(const UList<Type>& ff) const
|
tmp<Field<Type> > ggiAMGInterface::fastReduce(const UList<Type>& ff) const
|
||||||
{
|
{
|
||||||
// Algorithm
|
// Old algorithm: OBOSLETE
|
||||||
// Local processor contains faceCells part of the zone and requires
|
// Local processor contains faceCells part of the zone and requires
|
||||||
// zoneAddressing part.
|
// zoneAddressing part.
|
||||||
// For fast communications, each processor will send the faceCells and
|
// For fast communications, each processor will send the faceCells and
|
||||||
|
@ -44,6 +95,18 @@ tmp<Field<Type> > ggiAMGInterface::fastReduce(const UList<Type>& ff) const
|
||||||
// the required data
|
// the required data
|
||||||
// HJ, 24/Jun/2011
|
// HJ, 24/Jun/2011
|
||||||
|
|
||||||
|
// Rewrite, 1/Jun/2016
|
||||||
|
// To avoid creating zone-sized data and gather-scatter communication
|
||||||
|
// to the master, the optimised map-distribute call is implemented.
|
||||||
|
// The field is filled with local data which is then sent where needed
|
||||||
|
// through map-distribute.
|
||||||
|
// On return, the field is expanded to zone size but only filled with
|
||||||
|
// the data which is needed for the shadow
|
||||||
|
// Having received the zone data, shadow data is extracted from the
|
||||||
|
// field size. Note: this works only on coarse levels, where one-on-one
|
||||||
|
// mapping applies
|
||||||
|
// HJ, 1/Jun/2016
|
||||||
|
|
||||||
if (ff.size() != this->size())
|
if (ff.size() != this->size())
|
||||||
{
|
{
|
||||||
FatalErrorIn
|
FatalErrorIn
|
||||||
|
@ -71,7 +134,8 @@ tmp<Field<Type> > ggiAMGInterface::fastReduce(const UList<Type>& ff) const
|
||||||
// Execute init reduce to calculate addressing if not already done
|
// Execute init reduce to calculate addressing if not already done
|
||||||
map();
|
map();
|
||||||
|
|
||||||
// Prepare for distribute: field will be expanded to zone size
|
// Prepare for distribute. Note: field will be expanded to zone size
|
||||||
|
// during the distribute operation
|
||||||
List<Type> expand = ff;
|
List<Type> expand = ff;
|
||||||
|
|
||||||
map().distribute(expand);
|
map().distribute(expand);
|
||||||
|
|
|
@ -168,6 +168,7 @@ void Foam::mixingPlaneAMGInterface::initInternalFieldTransfer
|
||||||
const unallocLabelList& iF
|
const unallocLabelList& iF
|
||||||
) const
|
) const
|
||||||
{
|
{
|
||||||
|
// NOTE: Change this: requires fast reduce. HJ, 13/Jun/20106
|
||||||
labelTransferBuffer_ = interfaceInternalField(iF);
|
labelTransferBuffer_ = interfaceInternalField(iF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -189,7 +189,8 @@ Foam::processorAMGInterface::processorAMGInterface
|
||||||
|
|
||||||
for
|
for
|
||||||
(
|
(
|
||||||
SLList<label>::iterator facesIter = faceFacesIter().begin();
|
SLList<label>::iterator facesIter =
|
||||||
|
faceFacesIter().begin();
|
||||||
facesIter != faceFacesIter().end();
|
facesIter != faceFacesIter().end();
|
||||||
++facesIter
|
++facesIter
|
||||||
)
|
)
|
||||||
|
|
Reference in a new issue