Pstream with a single comm, updated parallel ops

This commit is contained in:
Hrvoje Jasak 2016-09-14 05:24:50 +01:00
parent aa518c6931
commit 7bab48776c
77 changed files with 3288 additions and 1354 deletions

View file

@ -1047,6 +1047,18 @@ void Foam::faMesh::addFaPatches(const List<faPatch*>& p)
} }
Foam::label Foam::faMesh::comm() const
{
return comm_;
}
Foam::label& Foam::faMesh::comm()
{
return comm_;
}
const Foam::objectRegistry& Foam::faMesh::thisDb() const const Foam::objectRegistry& Foam::faMesh::thisDb() const
{ {
return mesh().thisDb(); return mesh().thisDb();

View file

@ -110,6 +110,12 @@ class faMesh
mutable label nFaces_; mutable label nFaces_;
// Communication support
//- Communicator used for parallel communication
label comm_;
// Demand-driven data // Demand-driven data
//- Primitive patch //- Primitive patch
@ -287,9 +293,8 @@ public:
); );
// Destructor //- Destructor
virtual ~faMesh();
virtual ~faMesh();
// Member Functions // Member Functions
@ -369,6 +374,15 @@ public:
} }
// Communication support
//- Return communicator used for parallel communication
label comm() const;
//- Return communicator used for parallel communication
label& comm();
// Access // Access
//- Return reference to the mesh database //- Return reference to the mesh database

View file

@ -55,6 +55,18 @@ processorFaPatch::~processorFaPatch()
// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
Foam::label Foam::processorFaPatch::comm() const
{
return boundaryMesh().mesh().comm();
}
int Foam::processorFaPatch::tag() const
{
return Pstream::msgType();
}
void processorFaPatch::makeNonGlobalPatchPoints() const void processorFaPatch::makeNonGlobalPatchPoints() const
{ {
// If it is not runing parallel or there are no global points // If it is not runing parallel or there are no global points

View file

@ -37,7 +37,6 @@ SourceFiles
#include "coupledFaPatch.H" #include "coupledFaPatch.H"
#include "processorLduInterface.H" #include "processorLduInterface.H"
// #include "processorPolyPatch.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -55,7 +54,10 @@ class processorFaPatch
{ {
// Private data // Private data
//- My processro number
int myProcNo_; int myProcNo_;
//- Neighbour processor number
int neighbProcNo_; int neighbProcNo_;
//- Processor-neighbbour patch edge centres //- Processor-neighbbour patch edge centres
@ -75,6 +77,7 @@ class processorFaPatch
// non-global, i.e. present in this processor patch // non-global, i.e. present in this processor patch
mutable labelList* nonGlobalPatchPointsPtr_; mutable labelList* nonGlobalPatchPointsPtr_;
protected: protected:
// Protected Member functions // Protected Member functions
@ -88,27 +91,27 @@ protected:
//- Find non-globa patch points //- Find non-globa patch points
void makeNonGlobalPatchPoints() const; void makeNonGlobalPatchPoints() const;
protected:
// Protected Member functions // Geometry functions
//- Initialise the calculation of the patch geometry //- Initialise the calculation of the patch geometry
void initGeometry(); void initGeometry();
//- Calculate the patch geometry //- Calculate the patch geometry
void calcGeometry(); void calcGeometry();
//- Initialise the patches for moving points //- Initialise the patches for moving points
void initMovePoints(const pointField&); void initMovePoints(const pointField&);
//- Correct patches after moving points //- Correct patches after moving points
void movePoints(const pointField&); void movePoints(const pointField&);
//- Initialise the update of the patch topology //- Initialise the update of the patch topology
virtual void initUpdateMesh(); virtual void initUpdateMesh();
//- Update of the patch topology
virtual void updateMesh();
//- Update of the patch topology
virtual void updateMesh();
public: public:
@ -160,9 +163,9 @@ public:
nonGlobalPatchPointsPtr_(NULL) nonGlobalPatchPointsPtr_(NULL)
{} {}
// Destructor
virtual ~processorFaPatch(); //- Destructor
virtual ~processorFaPatch();
// Member functions // Member functions
@ -192,6 +195,16 @@ public:
} }
} }
// Communications support
//- Return communicator used for communication
virtual label comm() const;
//- Return message tag to use for communication
virtual int tag() const;
//- Return face transformation tensor //- Return face transformation tensor
virtual const tensorField& forwardT() const virtual const tensorField& forwardT() const
{ {

View file

@ -249,7 +249,7 @@ public:
) const; ) const;
//- Processor coupled interface functions // Processor coupled interface functions
//- Return processor number //- Return processor number
virtual int myProcNo() const virtual int myProcNo() const
@ -263,6 +263,16 @@ public:
return procPatch_.neighbProcNo(); return procPatch_.neighbProcNo();
} }
// Communication support
//- Return communicator used for parallel communication
virtual int comm() const
{
return procPatch_.comm();
}
//- Does the patch field perform the transfromation //- Does the patch field perform the transfromation
virtual bool doTransform() const virtual bool doTransform() const
{ {

View file

@ -698,7 +698,10 @@ void Foam::fvMatrix<Type>::relax()
{ {
if (psi_.mesh().solutionDict().relaxEquation(psi_.name())) if (psi_.mesh().solutionDict().relaxEquation(psi_.name()))
{ {
relax(psi_.mesh().solutionDict().equationRelaxationFactor(psi_.name())); relax
(
psi_.mesh().solutionDict().equationRelaxationFactor(psi_.name())
);
} }
else else
{ {

View file

@ -216,9 +216,8 @@ public:
); );
// Destructor //- Destructor
virtual ~fvMesh();
virtual ~fvMesh();
// Member Functions // Member Functions
@ -285,6 +284,15 @@ public:
} }
// Communication support
//- Return communicator used for parallel communication
label comm() const
{
return polyMesh::comm();
}
//- Return cell volumes //- Return cell volumes
const DimensionedField<scalar, volMesh>& V() const; const DimensionedField<scalar, volMesh>& V() const;

View file

@ -90,10 +90,9 @@ public:
{} {}
// Destructor //- Destructor
virtual ~cyclicGgiFvPatch()
virtual ~cyclicGgiFvPatch() {}
{}
// Member functions // Member functions

View file

@ -92,13 +92,27 @@ public:
{} {}
// Destructor //- Destructor
virtual ~ggiFvPatch();
virtual ~ggiFvPatch();
// Member functions // Member functions
// Communication support
//- Return communicator used for parallel communication
virtual int comm() const
{
return ggiPolyPatch_.comm();
}
//- Return message tag used for sending
virtual int tag() const
{
return ggiPolyPatch_.tag();
}
// Access // Access
//- Return shadow patch //- Return shadow patch

View file

@ -95,13 +95,27 @@ public:
{} {}
// Destructor //- Destructor
virtual ~mixingPlaneFvPatch();
virtual ~mixingPlaneFvPatch();
// Member functions // Member functions
// Communication support
//- Return communicator used for parallel communication
virtual int comm() const
{
return mixingPlanePolyPatch_.comm();
}
//- Return message tag used for sending
virtual int tag() const
{
return mixingPlanePolyPatch_.tag();
}
// Access // Access
//- Return shadow patch //- Return shadow patch

View file

@ -86,10 +86,9 @@ public:
{} {}
// Destructor //- Destructor
virtual ~processorFvPatch()
virtual ~processorFvPatch() {}
{}
// Member functions // Member functions
@ -119,6 +118,21 @@ public:
} }
} }
// Communication support
//- Return communicator used for parallel communication
virtual int comm() const
{
return procPolyPatch_.comm();
}
//- Return message tag used for sending
virtual int tag() const
{
return procPolyPatch_.tag();
}
//- Return face transformation tensor //- Return face transformation tensor
virtual const tensorField& forwardT() const virtual const tensorField& forwardT() const
{ {

View file

@ -95,9 +95,8 @@ public:
{} {}
// Destructor //- Destructor
virtual ~regionCoupleFvPatch();
virtual ~regionCoupleFvPatch();
// Member functions // Member functions
@ -123,6 +122,21 @@ public:
virtual tmp<vectorField> delta() const; virtual tmp<vectorField> delta() const;
// Communication support
//- Return communicator used for parallel communication
virtual int comm() const
{
return rcPolyPatch_.comm();
}
//- Return message tag used for sending
virtual int tag() const
{
return rcPolyPatch_.tag();
}
// Interpolation // Interpolation
//- Interpolate face field //- Interpolate face field

View file

@ -147,7 +147,6 @@ $(Pstreams)/IPstream.C
$(Pstreams)/OPstream.C $(Pstreams)/OPstream.C
$(Pstreams)/IPread.C $(Pstreams)/IPread.C
$(Pstreams)/OPwrite.C $(Pstreams)/OPwrite.C
$(Pstreams)/PstreamsPrint.C
dictionary = db/dictionary dictionary = db/dictionary
$(dictionary)/dictionary.C $(dictionary)/dictionary.C

View file

@ -38,6 +38,8 @@ Foam::IPstream::IPstream
const commsTypes commsType, const commsTypes commsType,
const int fromProcNo, const int fromProcNo,
const label bufSize, const label bufSize,
const int tag,
const label comm,
streamFormat format, streamFormat format,
versionNumber version versionNumber version
) )
@ -45,6 +47,8 @@ Foam::IPstream::IPstream
Pstream(commsType, bufSize), Pstream(commsType, bufSize),
Istream(format, version), Istream(format, version),
fromProcNo_(fromProcNo), fromProcNo_(fromProcNo),
tag_(tag),
comm_(comm),
messageSize_(0) messageSize_(0)
{ {
setOpened(); setOpened();
@ -52,17 +56,31 @@ Foam::IPstream::IPstream
MPI_Status status; MPI_Status status;
// If the buffer size is not specified, probe the incomming message // If the buffer size is not specified, probe the incoming message
// and set it // and set it
if (!bufSize) if (!bufSize)
{ {
MPI_Probe(procID(fromProcNo_), msgType(), MPI_COMM_WORLD, &status); MPI_Probe
(
fromProcNo_,
tag_,
PstreamGlobals::MPICommunicators_[comm_],
&status
);
MPI_Get_count(&status, MPI_BYTE, &messageSize_); MPI_Get_count(&status, MPI_BYTE, &messageSize_);
buf_.setSize(messageSize_); buf_.setSize(messageSize_);
} }
messageSize_ = read(commsType, fromProcNo_, buf_.begin(), buf_.size()); messageSize_ = IPstream::read
(
commsType,
fromProcNo_,
buf_.begin(),
buf_.size(),
tag_,
comm_
);
if (!messageSize_) if (!messageSize_)
{ {
@ -83,9 +101,31 @@ Foam::label Foam::IPstream::read
const commsTypes commsType, const commsTypes commsType,
const int fromProcNo, const int fromProcNo,
char* buf, char* buf,
const std::streamsize bufSize const std::streamsize bufSize,
const int tag,
const label comm
) )
{ {
if (debug)
{
Pout<< "UIPstream::read : starting read from:" << fromProcNo
<< " tag:" << tag << " comm:" << comm
<< " wanted size:" << label(bufSize)
<< " commsType:" << Pstream::commsTypeNames[commsType]
<< Foam::endl;
}
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "UIPstream::read : starting read from:" << fromProcNo
<< " tag:" << tag << " comm:" << comm
<< " wanted size:" << label(bufSize)
<< " commsType:" << Pstream::commsTypeNames[commsType]
<< " warnComm:" << Pstream::warnComm
<< Foam::endl;
error::printStack(Pout);
}
if (commsType == blocking || commsType == scheduled) if (commsType == blocking || commsType == scheduled)
{ {
MPI_Status status; MPI_Status status;
@ -96,10 +136,10 @@ Foam::label Foam::IPstream::read
( (
buf, buf,
bufSize, bufSize,
MPI_PACKED, MPI_BYTE,
procID(fromProcNo), fromProcNo,
msgType(), tag,
MPI_COMM_WORLD, PstreamGlobals::MPICommunicators_[comm],
&status &status
) )
) )
@ -144,10 +184,10 @@ Foam::label Foam::IPstream::read
( (
buf, buf,
bufSize, bufSize,
MPI_PACKED, MPI_BYTE,
procID(fromProcNo), fromProcNo,
msgType(), tag,
MPI_COMM_WORLD, PstreamGlobals::MPICommunicators_[comm],
&request &request
) )
) )
@ -162,8 +202,18 @@ Foam::label Foam::IPstream::read
return 0; return 0;
} }
PstreamGlobals::IPstream_outstandingRequests_.append(request); if (debug)
{
Pout<< "UIPstream::read : started read from:" << fromProcNo
<< " tag:" << tag << " read size:" << label(bufSize)
<< " commsType:" << Pstream::commsTypeNames[commsType]
<< " request:" << PstreamGlobals::outstandingRequests_.size()
<< Foam::endl;
}
PstreamGlobals::outstandingRequests_.append(request);
// Assume the message is completely received.
return 1; return 1;
} }
else else
@ -180,56 +230,4 @@ Foam::label Foam::IPstream::read
} }
void Foam::IPstream::waitRequests()
{
if (PstreamGlobals::IPstream_outstandingRequests_.size())
{
if
(
MPI_Waitall
(
PstreamGlobals::IPstream_outstandingRequests_.size(),
PstreamGlobals::IPstream_outstandingRequests_.begin(),
MPI_STATUSES_IGNORE
)
)
{
FatalErrorIn
(
"IPstream::waitRequests()"
) << "MPI_Waitall returned with error" << endl;
}
PstreamGlobals::IPstream_outstandingRequests_.clear();
}
}
bool Foam::IPstream::finishedRequest(const label i)
{
if (i >= PstreamGlobals::IPstream_outstandingRequests_.size())
{
FatalErrorIn
(
"IPstream::finishedRequest(const label)"
) << "There are "
<< PstreamGlobals::IPstream_outstandingRequests_.size()
<< " outstanding send requests and you are asking for i=" << i
<< nl
<< "Maybe you are mixing blocking/non-blocking comms?"
<< Foam::abort(FatalError);
}
int flag;
MPI_Test
(
&PstreamGlobals::IPstream_outstandingRequests_[i],
&flag,
MPI_STATUS_IGNORE
);
return flag != 0;
}
// ************************************************************************* // // ************************************************************************* //

View file

@ -1,187 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | foam-extend: Open Source CFD
\\ / O peration | Version: 4.0
\\ / A nd | Web: http://www.foam-extend.org
\\/ M anipulation | For copyright notice see file Copyright
-------------------------------------------------------------------------------
License
This file is part of foam-extend.
foam-extend is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
foam-extend is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "IPstream.H"
#include "token.H"
#include <cctype>
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Istream& IPstream::read(token& t)
{
// Return the put back token if it exists
if (Istream::getBack(t))
{
return *this;
}
char c;
// return on error
if (!read(c))
{
t.setBad();
return *this;
}
// Set the line number of this token to the current stream line number
t.lineNumber() = lineNumber();
// Analyse input starting with this character.
switch (c)
{
// Punctuation
case token::END_STATEMENT :
case token::BEGIN_LIST :
case token::END_LIST :
case token::BEGIN_SQR :
case token::END_SQR :
case token::BEGIN_BLOCK :
case token::END_BLOCK :
case token::COLON :
case token::COMMA :
case token::ASSIGN :
case token::ADD :
case token::SUBTRACT :
case token::MULTIPLY :
case token::DIVIDE :
{
t = token::punctuationToken(c);
return *this;
}
// Word
case token::WORD :
{
word* wPtr = new word;
if (read(*wPtr))
{
if (token::compound::isCompound(*wPtr))
{
t = token::compound::New(*wPtr, *this).ptr();
delete wPtr;
}
else
{
t = wPtr;
}
}
else
{
delete wPtr;
t.setBad();
}
return *this;
}
// String
case token::STRING :
{
string* sPtr = new string;
if (read(*sPtr))
{
t = sPtr;
}
else
{
delete sPtr;
t.setBad();
}
return *this;
}
// Label
case token::LABEL :
{
label l;
if (read(l))
{
t = l;
}
else
{
t.setBad();
}
return *this;
}
// floatScalar
case token::FLOAT_SCALAR :
{
floatScalar s;
if (read(s))
{
t = s;
}
else
{
t.setBad();
}
return *this;
}
// doubleScalar
case token::DOUBLE_SCALAR :
{
doubleScalar s;
if (read(s))
{
t = s;
}
else
{
t.setBad();
}
return *this;
}
// Character (returned as a single character word) or error
default:
{
if (isalpha(c))
{
t = word(c);
return *this;
}
setBad();
t.setBad();
return *this;
}
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View file

@ -324,4 +324,13 @@ Foam::Istream& Foam::IPstream::rewind()
} }
void Foam::IPstream::print(Ostream& os) const
{
os << "Reading from processor " << fromProcNo_
<< " using communicator " << comm_
<< " and tag " << tag_
<< Foam::endl;
}
// ************************************************************************* // // ************************************************************************* //

View file

@ -58,6 +58,12 @@ class IPstream
//- ID of sending processor //- ID of sending processor
int fromProcNo_; int fromProcNo_;
//- Message tag
const int tag_;
//- Communicator
const label comm_;
//- Message size //- Message size
label messageSize_; label messageSize_;
@ -86,12 +92,14 @@ public:
const commsTypes commsType, const commsTypes commsType,
const int fromProcNo, const int fromProcNo,
const label bufSize = 0, const label bufSize = 0,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm,
streamFormat format = BINARY, streamFormat format = BINARY,
versionNumber version = currentVersion versionNumber version = currentVersion
); );
// Destructor //- Destructor
~IPstream(); ~IPstream();
@ -115,15 +123,11 @@ public:
const commsTypes commsType, const commsTypes commsType,
const int fromProcNo, const int fromProcNo,
char* buf, char* buf,
const std::streamsize bufSize const std::streamsize bufSize,
const int tag = Pstream::msgType(),
const label communicator = 0
); );
//- Non-blocking receives: wait until all have finished.
static void waitRequests();
//- Non-blocking receives: has request i finished?
static bool finishedRequest(const label i);
//- Return next token from stream //- Return next token from stream
Istream& read(token&); Istream& read(token&);

View file

@ -92,13 +92,17 @@ Foam::OPstream::OPstream
const commsTypes commsType, const commsTypes commsType,
const int toProcNo, const int toProcNo,
const label bufSize, const label bufSize,
const int tag,
const label comm,
streamFormat format, streamFormat format,
versionNumber version versionNumber version
) )
: :
Pstream(commsType, bufSize), Pstream(commsType, bufSize),
Ostream(format, version), Ostream(format, version),
toProcNo_(toProcNo) toProcNo_(toProcNo),
tag_(tag),
comm_(comm)
{ {
setOpened(); setOpened();
setGood(); setGood();
@ -233,4 +237,12 @@ Foam::Ostream& Foam::OPstream::write(const char* data, std::streamsize count)
} }
void Foam::OPstream::print(Ostream& os) const
{
os << "Writing from processor " << toProcNo_
<< " to processor " << myProcNo() << " in communicator " << comm_
<< " and tag " << tag_ << Foam::endl;
}
// ************************************************************************* // // ************************************************************************* //

View file

@ -58,6 +58,12 @@ class OPstream
// ID of receiving processor // ID of receiving processor
int toProcNo_; int toProcNo_;
//- Message tag
const int tag_;
//- Communicator
const label comm_;
// Private member functions // Private member functions
@ -88,14 +94,15 @@ public:
const commsTypes commsType, const commsTypes commsType,
const int toProcNo, const int toProcNo,
const label bufSize = 0, const label bufSize = 0,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm,
streamFormat format = BINARY, streamFormat format = BINARY,
versionNumber version = currentVersion versionNumber version = currentVersion
); );
// Destructor //- Destructor
~OPstream();
~OPstream();
// Member functions // Member functions
@ -117,15 +124,11 @@ public:
const commsTypes commsType, const commsTypes commsType,
const int toProcNo, const int toProcNo,
const char* buf, const char* buf,
const std::streamsize bufSize const std::streamsize bufSize,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
); );
//- Non-blocking writes: wait until all have finished.
static void waitRequests();
//- Non-blocking writes: has request i finished?
static bool finishedRequest(const label i);
//- Write next token to stream //- Write next token to stream
Ostream& write(const token&); Ostream& write(const token&);

View file

@ -60,9 +60,33 @@ bool Foam::OPstream::write
const commsTypes commsType, const commsTypes commsType,
const int toProcNo, const int toProcNo,
const char* buf, const char* buf,
const std::streamsize bufSize const std::streamsize bufSize,
const int tag,
const label comm
) )
{ {
if (debug)
{
Pout<< "OPstream::write : starting write to:" << toProcNo
<< " tag:" << tag
<< " comm:" << comm << " size:" << label(bufSize)
<< " commsType:" << Pstream::commsTypeNames[commsType]
<< Foam::endl;
}
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "OPstream::write : starting write to:" << toProcNo
<< " tag:" << tag
<< " comm:" << comm << " size:" << label(bufSize)
<< " commsType:" << Pstream::commsTypeNames[commsType]
<< " warnComm:" << Pstream::warnComm
<< Foam::endl;
error::printStack(Pout);
}
PstreamGlobals::checkCommunicator(comm, toProcNo);
bool transferFailed = true; bool transferFailed = true;
if (commsType == blocking) if (commsType == blocking)
@ -71,11 +95,19 @@ bool Foam::OPstream::write
( (
const_cast<char*>(buf), const_cast<char*>(buf),
bufSize, bufSize,
MPI_PACKED, MPI_BYTE,
procID(toProcNo), toProcNo, //procID(toProcNo),
msgType(), tag,
MPI_COMM_WORLD PstreamGlobals::MPICommunicators_[comm] // MPI_COMM_WORLD
); );
if (debug)
{
Pout<< "OPstream::write : finished write to:" << toProcNo
<< " tag:" << tag << " size:" << label(bufSize)
<< " commsType:" << Pstream::commsTypeNames[commsType]
<< Foam::endl;
}
} }
else if (commsType == scheduled) else if (commsType == scheduled)
{ {
@ -83,11 +115,19 @@ bool Foam::OPstream::write
( (
const_cast<char*>(buf), const_cast<char*>(buf),
bufSize, bufSize,
MPI_PACKED, MPI_BYTE,
procID(toProcNo), toProcNo, //procID(toProcNo),
msgType(), tag,
MPI_COMM_WORLD PstreamGlobals::MPICommunicators_[comm] // MPI_COMM_WORLD
); );
if (debug)
{
Pout<< "OPstream::write : finished write to:" << toProcNo
<< " tag:" << tag << " size:" << label(bufSize)
<< " commsType:" << Pstream::commsTypeNames[commsType]
<< Foam::endl;
}
} }
else if (commsType == nonBlocking) else if (commsType == nonBlocking)
{ {
@ -97,14 +137,23 @@ bool Foam::OPstream::write
( (
const_cast<char*>(buf), const_cast<char*>(buf),
bufSize, bufSize,
MPI_PACKED, MPI_BYTE,
procID(toProcNo), toProcNo, //procID(toProcNo),
msgType(), tag,
MPI_COMM_WORLD, PstreamGlobals::MPICommunicators_[comm],// MPI_COMM_WORLD,
&request &request
); );
PstreamGlobals::OPstream_outstandingRequests_.append(request); if (debug)
{
Pout<< "OPstream::write : started write to:" << toProcNo
<< " tag:" << tag << " size:" << label(bufSize)
<< " commsType:" << Pstream::commsTypeNames[commsType]
<< " request:" << PstreamGlobals::outstandingRequests_.size()
<< Foam::endl;
}
PstreamGlobals::outstandingRequests_.append(request);
} }
else else
{ {
@ -120,56 +169,4 @@ bool Foam::OPstream::write
} }
void Foam::OPstream::waitRequests()
{
if (PstreamGlobals::OPstream_outstandingRequests_.size())
{
if
(
MPI_Waitall
(
PstreamGlobals::OPstream_outstandingRequests_.size(),
PstreamGlobals::OPstream_outstandingRequests_.begin(),
MPI_STATUSES_IGNORE
)
)
{
FatalErrorIn
(
"OPstream::waitRequests()"
) << "MPI_Waitall returned with error" << Foam::endl;
}
PstreamGlobals::OPstream_outstandingRequests_.clear();
}
}
bool Foam::OPstream::finishedRequest(const label i)
{
if (i >= PstreamGlobals::OPstream_outstandingRequests_.size())
{
FatalErrorIn
(
"OPstream::finishedRequest(const label)"
) << "There are "
<< PstreamGlobals::OPstream_outstandingRequests_.size()
<< " outstanding send requests and you are asking for i=" << i
<< nl
<< "Maybe you are mixing blocking/non-blocking comms?"
<< Foam::abort(FatalError);
}
int flag;
MPI_Test
(
&PstreamGlobals::OPstream_outstandingRequests_[i],
&flag,
MPI_STATUS_IGNORE
);
return flag != 0;
}
// ************************************************************************* // // ************************************************************************* //

File diff suppressed because it is too large Load diff

View file

@ -29,11 +29,12 @@ Description
SourceFiles SourceFiles
Pstream.C Pstream.C
PstreamsPrint.C PstreamReduceOps.C
PstreamCommsStruct.C PstreamCommsStruct.C
gatherScatter.C gatherScatter.C
combineGatherScatter.C combineGatherScatter.C
gatherScatterList.C gatherScatterList.C
PstreamExchange.C
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
@ -47,6 +48,8 @@ SourceFiles
#include "NamedEnum.H" #include "NamedEnum.H"
#include "dynamicLabelList.H" #include "dynamicLabelList.H"
#include "optimisationSwitch.H" #include "optimisationSwitch.H"
#include "ListOps.H"
#include "LIFOStack.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -59,7 +62,6 @@ namespace Foam
class Pstream class Pstream
{ {
public: public:
//- Types of communications //- Types of communications
@ -72,6 +74,7 @@ public:
static const NamedEnum<commsTypes, 3> commsTypeNames; static const NamedEnum<commsTypes, 3> commsTypeNames;
// Public classes // Public classes
//- Structure for communicating between processors //- Structure for communicating between processors
@ -85,7 +88,7 @@ public:
//- procIDs of processors directly below me //- procIDs of processors directly below me
labelList below_; labelList below_;
//- procIDs of all processors below (so not just directly below) //- procIDs of all processors below (not just directly below)
labelList allBelow_; labelList allBelow_;
//- procIDs of all processors not below. (inverse set of //- procIDs of all processors not below. (inverse set of
@ -157,7 +160,7 @@ public:
friend Ostream& operator<<(Ostream&, const commsStruct&); friend Ostream& operator<<(Ostream&, const commsStruct&);
}; };
//- combineReduce operator for lists. Used for counting. //- combineReduce operator for lists. Used for counting.
class listEq class listEq
{ {
public: public:
@ -165,7 +168,7 @@ public:
template<class T> template<class T>
void operator()(T& x, const T& y) const void operator()(T& x, const T& y) const
{ {
forAll(y, i) forAll (y, i)
{ {
if (y[i].size()) if (y[i].size())
{ {
@ -183,32 +186,38 @@ private:
//- Is this a parallel run? //- Is this a parallel run?
static bool parRun_; static bool parRun_;
//- My processor index //- Default message type info
static int myProcNo_;
//- Process IDs
static List<int> procIDs_;
//- Default message type
static const int msgType_; static const int msgType_;
//- Stack of free comms
static LIFOStack<label> freeComms_;
//- My processor index
static DynamicList<int> myProcNo_;
//- Process IDs
static DynamicList<List<int> > procIDs_;
//- List of parent communicators
static DynamicList<label> parentCommunicator_;
//- Structure for linear communications //- Structure for linear communications
static List<commsStruct> linearCommunication_; static DynamicList<List<commsStruct> > linearCommunication_;
//- Structure for tree communications //- Structure for tree communications
static List<commsStruct> treeCommunication_; static DynamicList<List<commsStruct> > treeCommunication_;
// Private member functions // Private member functions
//- Set data for parallel running //- Set data for parallel running
static void setParRun(); static void setParRun(const label nProcs);
//- Calculate linear communication schedule //- Calculate linear communication schedule
static void calcLinearComm(const label nProcs); static List<commsStruct> calcLinearComm(const label nProcs);
//- Calculate tree communication schedule //- Calculate tree communication schedule
static void calcTreeComm(const label nProcs); static List<commsStruct> calcTreeComm(const label nProcs);
//- Helper function for tree communication schedule determination //- Helper function for tree communication schedule determination
// Collects all processorIDs below a processor // Collects all processorIDs below a processor
@ -223,6 +232,19 @@ private:
// Pstream::init() // Pstream::init()
static void initCommunicationSchedule(); static void initCommunicationSchedule();
//- Allocate a communicator with index
static void allocatePstreamCommunicator
(
const label parentIndex,
const label index
);
//- Free a communicator
static void freePstreamCommunicator
(
const label index
);
protected: protected:
@ -268,6 +290,15 @@ public:
); );
} }
//- Number of polling cycles in processor updates
static const debug::optimisationSwitch nPollProcInterfaces;
//- Default communicator (all processors)
static label worldComm;
//- Debugging: warn for use of any communicator differing from warnComm
static label warnComm;
// Constructors // Constructors
@ -290,6 +321,79 @@ public:
// Member functions // Member functions
//- Allocate a new communicator
static label allocateCommunicator
(
const label parent,
const labelList& subRanks,
const bool doPstream = true
);
//- Free a previously allocated communicator
static void freeCommunicator
(
const label communicator,
const bool doPstream = true
);
//- Free all communicators
static void freeCommunicators(const bool doPstream);
//- Helper class for allocating/freeing communicators
class communicator
{
//- Communicator identifier
label comm_;
//- Disallow copy and assignment
communicator(const communicator&);
void operator=(const communicator&);
public:
//- Constructo from components
communicator
(
const label parent,
const labelList& subRanks,
const bool doPstream
)
:
comm_(allocateCommunicator(parent, subRanks, doPstream))
{}
//- Destructor
~communicator()
{
freeCommunicator(comm_);
}
//- Cast to label
operator label() const
{
return comm_;
}
};
//- Return physical processor number (i.e. processor number in
// worldComm) given communicator and processor
static int baseProcNo(const label myComm, const int procID);
//- Return processor number in communicator (given physical processor
// number) (= reverse of baseProcNo)
static label procNo(const label comm, const int baseProcID);
//- Return processor number in communicator (given processor number
// and communicator)
static label procNo
(
const label myComm,
const label currentComm,
const int currentProcID
);
//- Add the valid option this type of communications library //- Add the valid option this type of communications library
// adds/requires on the command line // adds/requires on the command line
static void addValidParOptions(HashTable<string>& validParOptions); static void addValidParOptions(HashTable<string>& validParOptions);
@ -298,44 +402,78 @@ public:
// Spawns slave processes and initialises inter-communication // Spawns slave processes and initialises inter-communication
static bool init(int& argc, char**& argv); static bool init(int& argc, char**& argv);
// Non-blocking comms
//- Get number of outstanding requests
static label nRequests();
//- Truncate number of outstanding requests
static void resetRequests(const label sz);
//- Wait until all requests (from start onwards) have finished.
static void waitRequests(const label start = 0);
//- Wait until request i has finished.
static void waitRequest(const label i);
//- Non-blocking comms: has request i finished?
static bool finishedRequest(const label i);
static int allocateTag(const char*);
static int allocateTag(const word&);
static void freeTag(const char*, const int tag);
static void freeTag(const word&, const int tag);
//- Is this a parallel run? //- Is this a parallel run?
static bool& parRun() static bool& parRun()
{ {
return parRun_; return parRun_;
} }
//- Number of processes in parallel run //- Number of processes in parallel run for a given communicator
static label nProcs() static label nProcs(const label communicator = 0)
{ {
return procIDs_.size(); return procIDs_[communicator].size();
} }
//- Am I the master process //- Process index of the master for the global communicator
static bool master()
{
return myProcNo_ == masterNo();
}
//- Process index of the master
static int masterNo() static int masterNo()
{ {
return 0; return 0;
} }
//- Number of this process (starting from masterNo() = 0) //- Am I the master process
static int myProcNo() static bool master(const label communicator = 0)
{ {
return myProcNo_; return myProcNo_[communicator] == masterNo();
}
//- Number of this process (starting from masterNo() = 0)
static int myProcNo(const label communicator = 0)
{
return myProcNo_[communicator];
}
//- Return parent communicator
static label parent(const label communicator)
{
return parentCommunicator_(communicator);
} }
//- Process IDs //- Process IDs
static const List<int>& procIDs() static const List<int>& procIDs(const int communicator)
{ {
return procIDs_; return procIDs_[communicator];
} }
//- Process ID of given process index //- Process ID of given process index
static int procID(int procNo) static List<int>& procID(const int procNo)
{ {
return procIDs_[procNo]; return procIDs_[procNo];
} }
@ -347,21 +485,27 @@ public:
} }
//- Process index of last slave //- Process index of last slave
static int lastSlave() static int lastSlave(const label communicator = 0)
{ {
return nProcs() - 1; return nProcs(communicator) - 1;
} }
//- Communication schedule for linear all-to-master (proc 0) //- Communication schedule for linear all-to-master (proc 0)
static const List<commsStruct>& linearCommunication() static const List<commsStruct>& linearCommunication
(
const label communicator = 0
)
{ {
return linearCommunication_; return linearCommunication_[communicator];
} }
//- Communication schedule for tree all-to-master (proc 0) //- Communication schedule for tree all-to-master (proc 0)
static const List<commsStruct>& treeCommunication() static const List<commsStruct>& treeCommunication
(
const label communicator = 0
)
{ {
return treeCommunication_; return treeCommunication_[communicator];
} }
//- Message tag of standard messages //- Message tag of standard messages
@ -400,21 +544,40 @@ public:
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
T& Value, T& Value,
const BinaryOp& bop const BinaryOp& bop,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
template <class T, class BinaryOp> template <class T, class BinaryOp>
static void gather(T& Value, const BinaryOp& bop); static void gather
(
T& Value,
const BinaryOp& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
//- Scatter data. Distribute without modification. //- Scatter data. Distribute without modification.
// Reverse of gather // Reverse of gather
template <class T> template <class T>
static void scatter(const List<commsStruct>& comms, T& Value); static void scatter
(
const List<commsStruct>& comms,
T& Value,
const int tag,
const label comm
);
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
template <class T> template <class T>
static void scatter(T& Value); static void scatter
(
T& Value,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
// Combine variants. Inplace combine values from processors. // Combine variants. Inplace combine values from processors.
@ -425,24 +588,39 @@ public:
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
T& Value, T& Value,
const CombineOp& cop const CombineOp& cop,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
template <class T, class CombineOp> template <class T, class CombineOp>
static void combineGather(T& Value, const CombineOp& cop); static void combineGather
(
T& Value,
const CombineOp& cop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
//- Scatter data. Reverse of combineGather //- Scatter data. Reverse of combineGather
template <class T> template <class T>
static void combineScatter static void combineScatter
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
T& Value T& Value,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
template <class T> template <class T>
static void combineScatter(T& Value); static void combineScatter
(
T& Value,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
// Combine variants working on whole List at a time. // Combine variants working on whole List at a time.
@ -451,7 +629,9 @@ public:
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
List<T>& Value, List<T>& Value,
const CombineOp& cop const CombineOp& cop,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
@ -459,7 +639,9 @@ public:
static void listCombineGather static void listCombineGather
( (
List<T>& Value, List<T>& Value,
const CombineOp& cop const CombineOp& cop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
); );
//- Scatter data. Reverse of combineGather //- Scatter data. Reverse of combineGather
@ -467,12 +649,19 @@ public:
static void listCombineScatter static void listCombineScatter
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
List<T>& Value List<T>& Value,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
template <class T> template <class T>
static void listCombineScatter(List<T>& Value); static void listCombineScatter
(
List<T>& Value,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
// Combine variants working on whole map at a time. Container needs to // Combine variants working on whole map at a time. Container needs to
// have iterators and find() defined. // have iterators and find() defined.
@ -482,7 +671,9 @@ public:
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
Container& Values, Container& Values,
const CombineOp& cop const CombineOp& cop,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
@ -490,7 +681,9 @@ public:
static void mapCombineGather static void mapCombineGather
( (
Container& Values, Container& Values,
const CombineOp& cop const CombineOp& cop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
); );
//- Scatter data. Reverse of combineGather //- Scatter data. Reverse of combineGather
@ -498,13 +691,19 @@ public:
static void mapCombineScatter static void mapCombineScatter
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
Container& Values Container& Values,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
template <class Container> template <class Container>
static void mapCombineScatter(Container& Values); static void mapCombineScatter
(
Container& Values,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
// Gather/scatter keeping the individual processor data separate. // Gather/scatter keeping the individual processor data separate.
@ -516,24 +715,56 @@ public:
static void gatherList static void gatherList
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
List<T>& Values List<T>& Values,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
template <class T> template <class T>
static void gatherList(List<T>& Values); static void gatherList
(
List<T>& Values,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
//- Scatter data. Reverse of gatherList //- Scatter data. Reverse of gatherList
template <class T> template <class T>
static void scatterList static void scatterList
( (
const List<commsStruct>& comms, const List<commsStruct>& comms,
List<T>& Values List<T>& Values,
const int tag,
const label comm
); );
//- Like above but switches between linear/tree communication //- Like above but switches between linear/tree communication
template <class T> template <class T>
static void scatterList(List<T>& Values); static void scatterList
(
List<T>& Values,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
// Exchange
//- Exchange data. Sends sendData, receives into recvData, sets
// sizes (not bytes). sizes[p0][p1] is what processor p0 has
// sent to p1. Continuous data only.
// If block=true will wait for all transfers to finish.
template<class Container, class T>
static void exchange
(
const List<Container>&,
List<Container>&,
labelListList& sizes,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm,
const bool block = true
);
}; };
@ -556,6 +787,7 @@ Ostream& operator<<(Ostream&, const Pstream::commsStruct&);
# include "gatherScatter.C" # include "gatherScatter.C"
# include "combineGatherScatter.C" # include "combineGatherScatter.C"
# include "gatherScatterList.C" # include "gatherScatterList.C"
# include "PstreamExchange.C"
#endif #endif

View file

@ -50,26 +50,62 @@ void combineReduce
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
T& Value, T& Value,
const CombineOp& cop const CombineOp& cop,
const int tag,
const label comm
) )
{ {
Pstream::combineGather(comms, Value, cop); Pstream::combineGather(comms, Value, cop, tag, comm);
Pstream::combineScatter(comms, Value); Pstream::combineScatter(comms, Value, tag, comm);
} }
template <class T, class CombineOp> template <class T, class CombineOp>
void combineReduce(T& Value, const CombineOp& cop) void combineReduce
(
T& Value,
const CombineOp& cop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs() < Pstream::nProcsSimpleSum())
{ {
Pstream::combineGather(Pstream::linearCommunication(), Value, cop); Pstream::combineGather
Pstream::combineScatter(Pstream::linearCommunication(), Value); (
Pstream::linearCommunication(comm),
Value,
cop,
tag,
comm
);
Pstream::combineScatter
(
Pstream::linearCommunication(comm),
Value,
tag,
comm
);
} }
else else
{ {
Pstream::combineGather(Pstream::treeCommunication(), Value, cop); Pstream::combineGather
Pstream::combineScatter(Pstream::treeCommunication(), Value); (
Pstream::treeCommunication(comm),
Value,
cop,
tag,
comm
);
Pstream::combineScatter
(
Pstream::treeCommunication(comm),
Value,
tag,
comm
);
} }
} }

View file

@ -68,13 +68,13 @@ Foam::Pstream::commsStruct::commsStruct
{ {
boolList inBelow(nProcs, false); boolList inBelow(nProcs, false);
forAll(allBelow, belowI) forAll (allBelow, belowI)
{ {
inBelow[allBelow[belowI]] = true; inBelow[allBelow[belowI]] = true;
} }
label notI = 0; label notI = 0;
forAll(inBelow, procI) forAll (inBelow, procI)
{ {
if ((procI != myProcID) && !inBelow[procI]) if ((procI != myProcID) && !inBelow[procI])
{ {

View file

@ -0,0 +1,159 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | foam-extend: Open Source CFD
\\ / O peration | Version: 4.0
\\ / A nd | Web: http://www.foam-extend.org
\\/ M anipulation | For copyright notice see file Copyright
-------------------------------------------------------------------------------
License
This file is part of foam-extend.
foam-extend is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
foam-extend is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
Description
Pstream exchange data.
\*---------------------------------------------------------------------------*/
#include "Pstream.H"
#include "contiguous.H"
#include "PstreamCombineReduceOps.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
//template<template<class> class ListType, class T>
template<class Container, class T>
void Pstream::exchange
(
const List<Container>& sendBufs,
List<Container>& recvBufs,
labelListList& sizes,
const int tag,
const label comm,
const bool block
)
{
if (!contiguous<T>())
{
FatalErrorIn
(
"Pstream::exchange(..)"
) << "Continuous data only." << Foam::abort(FatalError);
}
if (sendBufs.size() != Pstream::nProcs(comm))
{
FatalErrorIn
(
"Pstream::exchange(..)"
) << "Size of list:" << sendBufs.size()
<< " does not equal the number of processors:"
<< Pstream::nProcs(comm)
<< Foam::abort(FatalError);
}
sizes.setSize(Pstream::nProcs(comm));
labelList& nsTransPs = sizes[Pstream::myProcNo(comm)];
nsTransPs.setSize(Pstream::nProcs(comm));
forAll (sendBufs, procI)
{
nsTransPs[procI] = sendBufs[procI].size();
}
// Send sizes across. Note: blocks.
combineReduce(sizes, Pstream::listEq(), tag, comm);
if (Pstream::nProcs(comm) > 1)
{
label startOfRequests = Pstream::nRequests();
// Set up receives
// ~~~~~~~~~~~~~~~
recvBufs.setSize(sendBufs.size());
forAll (sizes, procI)
{
label nRecv = sizes[procI][Pstream::myProcNo(comm)];
if (procI != Pstream::myProcNo(comm) && nRecv > 0)
{
recvBufs[procI].setSize(nRecv);
IPstream::read
(
Pstream::nonBlocking,
procI,
reinterpret_cast<char*>(recvBufs[procI].begin()),
nRecv*sizeof(T),
tag,
comm
);
}
}
// Set up sends
// ~~~~~~~~~~~~
forAll (sendBufs, procI)
{
if (procI != Pstream::myProcNo(comm) && sendBufs[procI].size() > 0)
{
if
(
!OPstream::write
(
Pstream::nonBlocking,
procI,
reinterpret_cast<const char*>(sendBufs[procI].begin()),
sendBufs[procI].size()*sizeof(T),
tag,
comm
)
)
{
FatalErrorIn("Pstream::exchange(..)")
<< "Cannot send outgoing message. "
<< "to:" << procI << " nBytes:"
<< label(sendBufs[procI].size()*sizeof(T))
<< Foam::abort(FatalError);
}
}
}
// Wait for all to finish
// ~~~~~~~~~~~~~~~~~~~~~~
if (block)
{
Pstream::waitRequests(startOfRequests);
}
}
// Do myself
recvBufs[Pstream::myProcNo(comm)] = sendBufs[Pstream::myProcNo(comm)];
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View file

@ -33,10 +33,50 @@ namespace Foam
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
// Outstanding non-blocking operations. // Outstanding non-blocking operations.
//! @cond fileScope //! \cond fileScope
DynamicList<MPI_Request> PstreamGlobals::IPstream_outstandingRequests_; DynamicList<MPI_Request> PstreamGlobals::outstandingRequests_;
DynamicList<MPI_Request> PstreamGlobals::OPstream_outstandingRequests_; //! \endcond
//! @endcond
// Max outstanding message tag operations.
//! \cond fileScope
int PstreamGlobals::nTags_ = 0;
//! \endcond
// Free'd message tags
//! \cond fileScope
DynamicList<int> PstreamGlobals::freedTags_;
//! \endcond
// Allocated communicators.
//! \cond fileScope
DynamicList<MPI_Comm> PstreamGlobals::MPICommunicators_;
DynamicList<MPI_Group> PstreamGlobals::MPIGroups_;
//! \endcond
void PstreamGlobals::checkCommunicator
(
const label comm,
const label otherProcNo
)
{
if
(
comm < 0
|| comm >= PstreamGlobals::MPICommunicators_.size()
)
{
FatalErrorIn
(
"PstreamGlobals::checkCommunicator(const label, const label)"
) << "otherProcNo:" << otherProcNo << " : illegal communicator "
<< comm << endl
<< "Communicator should be within range 0.."
<< PstreamGlobals::MPICommunicators_.size() - 1
<< abort(FatalError);
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -46,14 +46,24 @@ namespace Foam
{ {
/*---------------------------------------------------------------------------*\ /*---------------------------------------------------------------------------*\
Class PstreamGlobals Declaration Class PstreamGlobals Declaration
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
namespace PstreamGlobals namespace PstreamGlobals
{ {
extern DynamicList<MPI_Request> IPstream_outstandingRequests_; extern DynamicList<MPI_Request> outstandingRequests_;
extern DynamicList<MPI_Request> OPstream_outstandingRequests_;
extern int nTags_;
extern DynamicList<int> freedTags_;
// Current communicators. First element will be MPI_COMM_WORLD
extern DynamicList<MPI_Comm> MPICommunicators_;
extern DynamicList<MPI_Group> MPIGroups_;
void checkCommunicator(const label, const label procNo);
}; };

View file

@ -21,28 +21,11 @@ License
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with foam-extend. If not, see <http://www.gnu.org/licenses/>. along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
Description
Prints out a description of the streams
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
#include "IPstream.H" #include "PstreamReduceOps.H"
#include "OPstream.H" #include "allReduce.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void Foam::IPstream::print(Ostream& os) const
{
os << "Reading from processor " << fromProcNo_
<< " to processor " << myProcNo() << Foam::endl;
}
void Foam::OPstream::print(Ostream& os) const
{
os << "Writing from processor " << toProcNo_
<< " to processor " << myProcNo() << Foam::endl;
}
// ************************************************************************* // // ************************************************************************* //

View file

@ -28,6 +28,7 @@ License
#include "Pstream.H" #include "Pstream.H"
#include "ops.H" #include "ops.H"
#include "vector2D.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -42,11 +43,20 @@ void reduce
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
T& Value, T& Value,
const BinaryOp& bop const BinaryOp& bop,
const int tag,
const label comm
) )
{ {
Pstream::gather(comms, Value, bop); if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
Pstream::scatter(comms, Value); {
Pout<< "** reducing:" << Value << " with comm:" << comm
<< endl;
error::printStack(Pout);
}
Pstream::gather(comms, Value, bop, tag, comm);
Pstream::scatter(comms, Value, tag, comm);
} }
@ -55,16 +65,18 @@ template <class T, class BinaryOp>
void reduce void reduce
( (
T& Value, T& Value,
const BinaryOp& bop const BinaryOp& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
) )
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
reduce(Pstream::linearCommunication(), Value, bop); reduce(Pstream::linearCommunication(comm), Value, bop, tag, comm);
} }
else else
{ {
reduce(Pstream::treeCommunication(), Value, bop); reduce(Pstream::treeCommunication(comm), Value, bop, tag, comm);
} }
} }
@ -74,26 +86,100 @@ template <class T, class BinaryOp>
T returnReduce T returnReduce
( (
const T& Value, const T& Value,
const BinaryOp& bop const BinaryOp& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
) )
{ {
T WorkValue(Value); T WorkValue(Value);
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
reduce(Pstream::linearCommunication(), WorkValue, bop); reduce(Pstream::linearCommunication(comm), WorkValue, bop, tag, comm);
} }
else else
{ {
reduce(Pstream::treeCommunication(), WorkValue, bop); reduce(Pstream::treeCommunication(comm), WorkValue, bop, tag, comm);
} }
return WorkValue; return WorkValue;
} }
// Insist there is a specialisation for the reduction of a scalar // Reduce with sum of both value and count (for averaging)
void reduce(scalar& Value, const sumOp<scalar>& bop); template<class T>
void sumReduce
(
T& Value,
label& Count,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
)
{
reduce(Value, sumOp<T>(), tag, comm);
reduce(Count, sumOp<label>(), tag, comm);
}
// Non-blocking version of reduce. Sets request.
template<class T, class BinaryOp>
void reduce
(
T& Value,
const BinaryOp& bop,
const int tag,
const label comm,
label& request
)
{
notImplemented
(
"reduce(T&, const BinaryOp&, const int, const label, label&"
);
}
// Insist there are specialisations for the common reductions of scalar(s)
void reduce
(
scalar& Value,
const sumOp<scalar>& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
void reduce
(
scalar& Value,
const minOp<scalar>& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
void reduce
(
vector2D& Value,
const sumOp<vector2D>& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
void sumReduce
(
scalar& Value,
label& Count,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
void reduce
(
scalar& Value,
const sumOp<scalar>& bop,
const int tag,
const label comm,
label& request
);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View file

@ -0,0 +1,71 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | foam-extend: Open Source CFD
\\ / O peration | Version: 4.0
\\ / A nd | Web: http://www.foam-extend.org
\\/ M anipulation | For copyright notice see file Copyright
-------------------------------------------------------------------------------
License
This file is part of foam-extend.
foam-extend is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
foam-extend is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
Description
Various functions to wrap MPI_Allreduce
SourceFiles
allReduceTemplates.C
\*---------------------------------------------------------------------------*/
#ifndef allReduce_H
#define allReduce_H
#include "mpi.h"
#include "Pstream.H"
#include "PstreamGlobals.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
template<class Type, class BinaryOp>
void allReduce
(
Type& Value,
int count,
MPI_Datatype MPIType,
MPI_Op op,
const BinaryOp& bop,
const int tag,
const int communicator
);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
# include "allReduceTemplates.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View file

@ -0,0 +1,211 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | foam-extend: Open Source CFD
\\ / O peration | Version: 4.0
\\ / A nd | Web: http://www.foam-extend.org
\\/ M anipulation | For copyright notice see file Copyright
-------------------------------------------------------------------------------
License
This file is part of foam-extend.
foam-extend is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
foam-extend is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "allReduce.H"
// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * //
template<class Type, class BinaryOp>
void Foam::allReduce
(
Type& Value,
int MPICount,
MPI_Datatype MPIType,
MPI_Op MPIOp,
const BinaryOp& bop,
const int tag,
const label comm
)
{
if (!Pstream::parRun())
{
return;
}
if (Pstream::nProcs(comm) <= Pstream::nProcsSimpleSum)
{
if (Pstream::master(comm))
{
for
(
int slave = Pstream::firstSlave();
slave <= Pstream::lastSlave(comm);
slave++
)
{
Type value;
if
(
MPI_Recv
(
&value,
MPICount,
MPIType,
slave, //Pstream::procID(slave),
tag,
PstreamGlobals::MPICommunicators_[comm],
MPI_STATUS_IGNORE
)
)
{
FatalErrorIn
(
"void Foam::allReduce\n"
"(\n"
" Type&,\n"
" int,\n"
" MPI_Datatype,\n"
" MPI_Op,\n"
" const BinaryOp&,\n"
" const int\n"
")\n"
) << "MPI_Recv failed"
<< Foam::abort(FatalError);
}
Value = bop(Value, value);
}
}
else
{
if
(
MPI_Send
(
&Value,
MPICount,
MPIType,
Pstream::masterNo(),//Pstream::procID(masterNo()),
tag,
PstreamGlobals::MPICommunicators_[comm]
)
)
{
FatalErrorIn
(
"void Foam::allReduce\n"
"(\n"
" Type&,\n"
" int,\n"
" MPI_Datatype,\n"
" MPI_Op,\n"
" const BinaryOp&,\n"
" const int\n"
")\n"
) << "MPI_Send failed"
<< Foam::abort(FatalError);
}
}
if (Pstream::master(comm))
{
for
(
int slave = Pstream::firstSlave();
slave <= Pstream::lastSlave(comm);
slave++
)
{
if
(
MPI_Send
(
&Value,
MPICount,
MPIType,
slave, //Pstream::procID(slave),
tag,
PstreamGlobals::MPICommunicators_[comm]
)
)
{
FatalErrorIn
(
"void Foam::allReduce\n"
"(\n"
" Type&,\n"
" int,\n"
" MPI_Datatype,\n"
" MPI_Op,\n"
" const BinaryOp&,\n"
" const int\n"
")\n"
) << "MPI_Send failed"
<< Foam::abort(FatalError);
}
}
}
else
{
if
(
MPI_Recv
(
&Value,
MPICount,
MPIType,
Pstream::masterNo(),//Pstream::procID(masterNo()),
tag,
PstreamGlobals::MPICommunicators_[comm],
MPI_STATUS_IGNORE
)
)
{
FatalErrorIn
(
"void Foam::allReduce\n"
"(\n"
" Type&,\n"
" int,\n"
" MPI_Datatype,\n"
" MPI_Op,\n"
" const BinaryOp&,\n"
" const int\n"
")\n"
) << "MPI_Recv failed"
<< Foam::abort(FatalError);
}
}
}
else
{
Type sum;
MPI_Allreduce
(
&Value,
&sum,
MPICount,
MPIType,
MPIOp,
PstreamGlobals::MPICommunicators_[comm]
);
Value = sum;
}
}
// ************************************************************************* //

View file

@ -50,16 +50,18 @@ void Pstream::combineGather
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
T& Value, T& Value,
const CombineOp& cop const CombineOp& cop,
const int tag,
const label comm
) )
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[Pstream::myProcNo()]; const commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from my downstairs neighbours // Receive from my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
label belowID = myComm.below()[belowI]; label belowID = myComm.below()[belowI];
@ -71,7 +73,9 @@ void Pstream::combineGather
Pstream::scheduled, Pstream::scheduled,
belowID, belowID,
reinterpret_cast<char*>(&value), reinterpret_cast<char*>(&value),
sizeof(T) sizeof(T),
tag,
comm
); );
if (debug > 1) if (debug > 1)
@ -84,7 +88,7 @@ void Pstream::combineGather
} }
else else
{ {
IPstream fromBelow(Pstream::scheduled, belowID); IPstream fromBelow(Pstream::scheduled, belowID, 0, tag, comm);
T value(fromBelow); T value(fromBelow);
if (debug > 1) if (debug > 1)
@ -113,12 +117,21 @@ void Pstream::combineGather
Pstream::scheduled, Pstream::scheduled,
myComm.above(), myComm.above(),
reinterpret_cast<const char*>(&Value), reinterpret_cast<const char*>(&Value),
sizeof(T) sizeof(T),
tag,
comm
); );
} }
else else
{ {
OPstream toAbove(Pstream::scheduled, myComm.above()); OPstream toAbove
(
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
toAbove << Value; toAbove << Value;
} }
} }
@ -127,26 +140,52 @@ void Pstream::combineGather
template <class T, class CombineOp> template <class T, class CombineOp>
void Pstream::combineGather(T& Value, const CombineOp& cop) void Pstream::combineGather
(
T& Value,
const CombineOp& cop,
const int tag,
const label comm
)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
combineGather(Pstream::linearCommunication(), Value, cop); combineGather
(
Pstream::linearCommunication(comm),
Value,
cop,
tag,
comm
);
} }
else else
{ {
combineGather(Pstream::treeCommunication(), Value, cop); combineGather
(
Pstream::treeCommunication(comm),
Value,
cop,
tag,
comm
);
} }
} }
template <class T> template <class T>
void Pstream::combineScatter(const List<Pstream::commsStruct>& comms, T& Value) void Pstream::combineScatter
(
const List<Pstream::commsStruct>& comms,
T& Value,
const int tag,
const label comm
)
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const Pstream::commsStruct& myComm = comms[Pstream::myProcNo()]; const Pstream::commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from up // Receive from up
if (myComm.above() != -1) if (myComm.above() != -1)
@ -158,12 +197,21 @@ void Pstream::combineScatter(const List<Pstream::commsStruct>& comms, T& Value)
Pstream::scheduled, Pstream::scheduled,
myComm.above(), myComm.above(),
reinterpret_cast<char*>(&Value), reinterpret_cast<char*>(&Value),
sizeof(T) sizeof(T),
tag,
comm
); );
} }
else else
{ {
IPstream fromAbove(Pstream::scheduled, myComm.above()); IPstream fromAbove
(
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
Value = T(fromAbove); Value = T(fromAbove);
} }
@ -175,7 +223,7 @@ void Pstream::combineScatter(const List<Pstream::commsStruct>& comms, T& Value)
} }
// Send to my downstairs neighbours // Send to my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
label belowID = myComm.below()[belowI]; label belowID = myComm.below()[belowI];
@ -191,12 +239,14 @@ void Pstream::combineScatter(const List<Pstream::commsStruct>& comms, T& Value)
Pstream::scheduled, Pstream::scheduled,
belowID, belowID,
reinterpret_cast<const char*>(&Value), reinterpret_cast<const char*>(&Value),
sizeof(T) sizeof(T),
tag,
comm
); );
} }
else else
{ {
OPstream toBelow(Pstream::scheduled, belowID); OPstream toBelow(Pstream::scheduled, belowID, 0, tag, comm);
toBelow << Value; toBelow << Value;
} }
} }
@ -205,15 +255,20 @@ void Pstream::combineScatter(const List<Pstream::commsStruct>& comms, T& Value)
template <class T> template <class T>
void Pstream::combineScatter(T& Value) void Pstream::combineScatter
(
T& Value,
const int tag,
const label comm
)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
combineScatter(Pstream::linearCommunication(), Value); combineScatter(Pstream::linearCommunication(comm), Value, tag, comm);
} }
else else
{ {
combineScatter(Pstream::treeCommunication(), Value); combineScatter(Pstream::treeCommunication(comm), Value, tag, comm);
} }
} }
@ -227,16 +282,18 @@ void Pstream::listCombineGather
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
List<T>& Values, List<T>& Values,
const CombineOp& cop const CombineOp& cop,
const int tag,
const label comm
) )
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[Pstream::myProcNo()]; const commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from my downstairs neighbours // Receive from my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
label belowID = myComm.below()[belowI]; label belowID = myComm.below()[belowI];
@ -249,7 +306,9 @@ void Pstream::listCombineGather
Pstream::scheduled, Pstream::scheduled,
belowID, belowID,
reinterpret_cast<char*>(receivedValues.begin()), reinterpret_cast<char*>(receivedValues.begin()),
receivedValues.byteSize() receivedValues.byteSize(),
tag,
comm
); );
if (debug > 1) if (debug > 1)
@ -258,14 +317,14 @@ void Pstream::listCombineGather
<< belowID << " data:" << receivedValues << endl; << belowID << " data:" << receivedValues << endl;
} }
forAll(Values, i) forAll (Values, i)
{ {
cop(Values[i], receivedValues[i]); cop(Values[i], receivedValues[i]);
} }
} }
else else
{ {
IPstream fromBelow(Pstream::scheduled, belowID); IPstream fromBelow(Pstream::scheduled, belowID, 0, tag, comm);
List<T> receivedValues(fromBelow); List<T> receivedValues(fromBelow);
if (debug > 1) if (debug > 1)
@ -274,7 +333,7 @@ void Pstream::listCombineGather
<< belowID << " data:" << receivedValues << endl; << belowID << " data:" << receivedValues << endl;
} }
forAll(Values, i) forAll (Values, i)
{ {
cop(Values[i], receivedValues[i]); cop(Values[i], receivedValues[i]);
} }
@ -297,12 +356,21 @@ void Pstream::listCombineGather
Pstream::scheduled, Pstream::scheduled,
myComm.above(), myComm.above(),
reinterpret_cast<const char*>(Values.begin()), reinterpret_cast<const char*>(Values.begin()),
Values.byteSize() Values.byteSize(),
tag,
comm
); );
} }
else else
{ {
OPstream toAbove(Pstream::scheduled, myComm.above()); OPstream toAbove
(
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
toAbove << Values; toAbove << Values;
} }
} }
@ -311,15 +379,35 @@ void Pstream::listCombineGather
template <class T, class CombineOp> template <class T, class CombineOp>
void Pstream::listCombineGather(List<T>& Values, const CombineOp& cop) void Pstream::listCombineGather
(
List<T>& Values,
const CombineOp& cop,
const int tag,
const label comm
)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
listCombineGather(Pstream::linearCommunication(), Values, cop); listCombineGather
(
Pstream::linearCommunication(comm),
Values,
cop,
tag,
comm
);
} }
else else
{ {
listCombineGather(Pstream::treeCommunication(), Values, cop); listCombineGather
(
Pstream::treeCommunication(comm),
Values,
cop,
tag,
comm
);
} }
} }
@ -328,13 +416,15 @@ template <class T>
void Pstream::listCombineScatter void Pstream::listCombineScatter
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
List<T>& Values List<T>& Values,
const int tag,
const label comm
) )
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const Pstream::commsStruct& myComm = comms[Pstream::myProcNo()]; const Pstream::commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from up // Receive from up
if (myComm.above() != -1) if (myComm.above() != -1)
@ -346,12 +436,21 @@ void Pstream::listCombineScatter
Pstream::scheduled, Pstream::scheduled,
myComm.above(), myComm.above(),
reinterpret_cast<char*>(Values.begin()), reinterpret_cast<char*>(Values.begin()),
Values.byteSize() Values.byteSize(),
tag,
comm
); );
} }
else else
{ {
IPstream fromAbove(Pstream::scheduled, myComm.above()); IPstream fromAbove
(
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
fromAbove >> Values; fromAbove >> Values;
} }
@ -363,7 +462,7 @@ void Pstream::listCombineScatter
} }
// Send to my downstairs neighbours // Send to my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
label belowID = myComm.below()[belowI]; label belowID = myComm.below()[belowI];
@ -379,12 +478,14 @@ void Pstream::listCombineScatter
Pstream::scheduled, Pstream::scheduled,
belowID, belowID,
reinterpret_cast<const char*>(Values.begin()), reinterpret_cast<const char*>(Values.begin()),
Values.byteSize() Values.byteSize(),
tag,
comm
); );
} }
else else
{ {
OPstream toBelow(Pstream::scheduled, belowID); OPstream toBelow(Pstream::scheduled, belowID, 0, tag, comm);
toBelow << Values; toBelow << Values;
} }
} }
@ -393,44 +494,60 @@ void Pstream::listCombineScatter
template <class T> template <class T>
void Pstream::listCombineScatter(List<T>& Values) void Pstream::listCombineScatter
(
List<T>& Values,
const int tag,
const label comm
)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
listCombineScatter(Pstream::linearCommunication(), Values); listCombineScatter
(
Pstream::linearCommunication(),
Values,
tag,
comm
);
} }
else else
{ {
listCombineScatter(Pstream::treeCommunication(), Values); listCombineScatter
(
Pstream::treeCommunication(),
Values,
tag,
comm
);
} }
} }
// Same thing but for sparse list (map) // Same thing but for sparse list (map)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <class Container, class CombineOp> template <class Container, class CombineOp>
void Pstream::mapCombineGather void Pstream::mapCombineGather
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
Container& Values, Container& Values,
const CombineOp& cop const CombineOp& cop,
const int tag,
const label comm
) )
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[Pstream::myProcNo()]; const commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from my downstairs neighbours // Receive from my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
label belowID = myComm.below()[belowI]; label belowID = myComm.below()[belowI];
IPstream fromBelow(Pstream::scheduled, belowID); IPstream fromBelow(Pstream::scheduled, belowID, 0, tag, comm);
Container receivedValues(fromBelow); Container receivedValues(fromBelow);
if (debug > 1) if (debug > 1)
@ -470,7 +587,7 @@ void Pstream::mapCombineGather
<< " data:" << Values << endl; << " data:" << Values << endl;
} }
OPstream toAbove(Pstream::scheduled, myComm.above()); OPstream toAbove(Pstream::scheduled, myComm.above(), 0, tag, comm);
toAbove << Values; toAbove << Values;
} }
} }
@ -478,15 +595,35 @@ void Pstream::mapCombineGather
template <class Container, class CombineOp> template <class Container, class CombineOp>
void Pstream::mapCombineGather(Container& Values, const CombineOp& cop) void Pstream::mapCombineGather
(
Container& Values,
const CombineOp& cop,
const int tag,
const label comm
)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
mapCombineGather(Pstream::linearCommunication(), Values, cop); mapCombineGather
(
Pstream::linearCommunication(),
Values,
cop,
tag,
comm
);
} }
else else
{ {
mapCombineGather(Pstream::treeCommunication(), Values, cop); mapCombineGather
(
Pstream::treeCommunication(),
Values,
cop,
tag,
comm
);
} }
} }
@ -495,18 +632,27 @@ template <class Container>
void Pstream::mapCombineScatter void Pstream::mapCombineScatter
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
Container& Values Container& Values,
const int tag,
const label comm
) )
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const Pstream::commsStruct& myComm = comms[Pstream::myProcNo()]; const Pstream::commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from up // Receive from up
if (myComm.above() != -1) if (myComm.above() != -1)
{ {
IPstream fromAbove(Pstream::scheduled, myComm.above()); IPstream fromAbove
(
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
fromAbove >> Values; fromAbove >> Values;
if (debug > 1) if (debug > 1)
@ -517,7 +663,7 @@ void Pstream::mapCombineScatter
} }
// Send to my downstairs neighbours // Send to my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
label belowID = myComm.below()[belowI]; label belowID = myComm.below()[belowI];
@ -526,7 +672,7 @@ void Pstream::mapCombineScatter
Pout<< " sending to " << belowID << " data:" << Values << endl; Pout<< " sending to " << belowID << " data:" << Values << endl;
} }
OPstream toBelow(Pstream::scheduled, belowID); OPstream toBelow(Pstream::scheduled, belowID, 0, tag, comm);
toBelow << Values; toBelow << Values;
} }
} }
@ -534,15 +680,32 @@ void Pstream::mapCombineScatter
template <class Container> template <class Container>
void Pstream::mapCombineScatter(Container& Values) void Pstream::mapCombineScatter
(
Container& Values,
const int tag,
const label comm
)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
mapCombineScatter(Pstream::linearCommunication(), Values); mapCombineScatter
(
Pstream::linearCommunication(),
Values,
tag,
comm
);
} }
else else
{ {
mapCombineScatter(Pstream::treeCommunication(), Values); mapCombineScatter
(
Pstream::treeCommunication(),
Values,
tag,
comm
);
} }
} }

View file

@ -45,16 +45,18 @@ void Pstream::gather
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
T& Value, T& Value,
const BinaryOp& bop const BinaryOp& bop,
const int tag,
const label comm
) )
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[Pstream::myProcNo()]; const commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from my downstairs neighbours // Receive from my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
T value; T value;
@ -65,12 +67,21 @@ void Pstream::gather
Pstream::scheduled, Pstream::scheduled,
myComm.below()[belowI], myComm.below()[belowI],
reinterpret_cast<char*>(&value), reinterpret_cast<char*>(&value),
sizeof(T) sizeof(T),
tag,
comm
); );
} }
else else
{ {
IPstream fromBelow(Pstream::scheduled, myComm.below()[belowI]); IPstream fromBelow
(
Pstream::scheduled,
myComm.below()[belowI],
0,
tag,
comm
);
fromBelow >> value; fromBelow >> value;
} }
@ -87,12 +98,21 @@ void Pstream::gather
Pstream::scheduled, Pstream::scheduled,
myComm.above(), myComm.above(),
reinterpret_cast<const char*>(&Value), reinterpret_cast<const char*>(&Value),
sizeof(T) sizeof(T),
tag,
comm
); );
} }
else else
{ {
OPstream toAbove(Pstream::scheduled, myComm.above()); OPstream toAbove
(
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
toAbove << Value; toAbove << Value;
} }
} }
@ -101,26 +121,38 @@ void Pstream::gather
template <class T, class BinaryOp> template <class T, class BinaryOp>
void Pstream::gather(T& Value, const BinaryOp& bop) void Pstream::gather
(
T& Value,
const BinaryOp& bop,
const int tag,
const label comm
)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
gather(Pstream::linearCommunication(), Value, bop); gather(Pstream::linearCommunication(comm), Value, bop, tag, comm);
} }
else else
{ {
gather(Pstream::treeCommunication(), Value, bop); gather(Pstream::treeCommunication(comm), Value, bop, tag, comm);
} }
} }
template <class T> template <class T>
void Pstream::scatter(const List<Pstream::commsStruct>& comms, T& Value) void Pstream::scatter
(
const List<Pstream::commsStruct>& comms,
T& Value,
const int tag,
const label comm
)
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[Pstream::myProcNo()]; const commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from up // Receive from up
if (myComm.above() != -1) if (myComm.above() != -1)
@ -132,18 +164,27 @@ void Pstream::scatter(const List<Pstream::commsStruct>& comms, T& Value)
Pstream::scheduled, Pstream::scheduled,
myComm.above(), myComm.above(),
reinterpret_cast<char*>(&Value), reinterpret_cast<char*>(&Value),
sizeof(T) sizeof(T),
tag,
comm
); );
} }
else else
{ {
IPstream fromAbove(Pstream::scheduled, myComm.above()); IPstream fromAbove
(
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
fromAbove >> Value; fromAbove >> Value;
} }
} }
// Send to my downstairs neighbours // Send to my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
if (contiguous<T>()) if (contiguous<T>())
{ {
@ -152,12 +193,21 @@ void Pstream::scatter(const List<Pstream::commsStruct>& comms, T& Value)
Pstream::scheduled, Pstream::scheduled,
myComm.below()[belowI], myComm.below()[belowI],
reinterpret_cast<const char*>(&Value), reinterpret_cast<const char*>(&Value),
sizeof(T) sizeof(T),
tag,
comm
); );
} }
else else
{ {
OPstream toBelow(Pstream::scheduled,myComm.below()[belowI]); OPstream toBelow
(
Pstream::scheduled,
myComm.below()[belowI],
0,
tag,
comm
);
toBelow << Value; toBelow << Value;
} }
} }
@ -166,15 +216,15 @@ void Pstream::scatter(const List<Pstream::commsStruct>& comms, T& Value)
template <class T> template <class T>
void Pstream::scatter(T& Value) void Pstream::scatter(T& Value, const int tag, const label comm)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
scatter(Pstream::linearCommunication(), Value); scatter(Pstream::linearCommunication(comm), Value, tag, comm);
} }
else else
{ {
scatter(Pstream::treeCommunication(), Value); scatter(Pstream::treeCommunication(comm), Value, tag, comm);
} }
} }

View file

@ -49,28 +49,35 @@ template <class T>
void Pstream::gatherList void Pstream::gatherList
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
List<T>& Values List<T>& Values,
const int tag,
const label comm
) )
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
if (Values.size() != Pstream::nProcs()) if (Values.size() != Pstream::nProcs(comm))
{ {
FatalErrorIn FatalErrorIn
( (
"Pstream::gatherList(const List<Pstream::commsStruct>&" "void Pstream::gatherList\n"
", List<T>)" "(\n"
" const List<Pstream::commsStruct>& comms,\n"
" List<T>& Values,\n"
" const int tag,\n"
" const label comm\n"
")"
) << "Size of list:" << Values.size() ) << "Size of list:" << Values.size()
<< " does not equal the number of processors:" << " does not equal the number of processors:"
<< Pstream::nProcs() << Pstream::nProcs(comm)
<< Foam::abort(FatalError); << Foam::abort(FatalError);
} }
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[Pstream::myProcNo()]; const commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from my downstairs neighbours // Receive from my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
label belowID = myComm.below()[belowI]; label belowID = myComm.below()[belowI];
const labelList& belowLeaves = comms[belowID].allBelow(); const labelList& belowLeaves = comms[belowID].allBelow();
@ -84,19 +91,21 @@ void Pstream::gatherList
Pstream::scheduled, Pstream::scheduled,
belowID, belowID,
reinterpret_cast<char*>(receivedValues.begin()), reinterpret_cast<char*>(receivedValues.begin()),
receivedValues.byteSize() receivedValues.byteSize(),
tag,
comm
); );
Values[belowID] = receivedValues[0]; Values[belowID] = receivedValues[0];
forAll(belowLeaves, leafI) forAll (belowLeaves, leafI)
{ {
Values[belowLeaves[leafI]] = receivedValues[leafI + 1]; Values[belowLeaves[leafI]] = receivedValues[leafI + 1];
} }
} }
else else
{ {
IPstream fromBelow(Pstream::scheduled, belowID); IPstream fromBelow(Pstream::scheduled, belowID, 0, tag, comm);
fromBelow >> Values[belowID]; fromBelow >> Values[belowID];
if (debug > 1) if (debug > 1)
@ -107,7 +116,7 @@ void Pstream::gatherList
} }
// Receive from all other processors below belowID // Receive from all other processors below belowID
forAll(belowLeaves, leafI) forAll (belowLeaves, leafI)
{ {
label leafID = belowLeaves[leafI]; label leafID = belowLeaves[leafI];
fromBelow >> Values[leafID]; fromBelow >> Values[leafID];
@ -132,16 +141,16 @@ void Pstream::gatherList
if (debug > 1) if (debug > 1)
{ {
Pout<< " sending to " << myComm.above() Pout<< " sending to " << myComm.above()
<< " data from: " << Pstream::myProcNo() << " data from: " << Pstream::myProcNo(comm)
<< " data: " << Values[Pstream::myProcNo()] << endl; << " data: " << Values[Pstream::myProcNo(comm)] << endl;
} }
if (contiguous<T>()) if (contiguous<T>())
{ {
List<T> sendingValues(belowLeaves.size() + 1); List<T> sendingValues(belowLeaves.size() + 1);
sendingValues[0] = Values[Pstream::myProcNo()]; sendingValues[0] = Values[Pstream::myProcNo(comm)];
forAll(belowLeaves, leafI) forAll (belowLeaves, leafI)
{ {
sendingValues[leafI + 1] = Values[belowLeaves[leafI]]; sendingValues[leafI + 1] = Values[belowLeaves[leafI]];
} }
@ -151,15 +160,24 @@ void Pstream::gatherList
Pstream::scheduled, Pstream::scheduled,
myComm.above(), myComm.above(),
reinterpret_cast<const char*>(sendingValues.begin()), reinterpret_cast<const char*>(sendingValues.begin()),
sendingValues.byteSize() sendingValues.byteSize(),
tag,
comm
); );
} }
else else
{ {
OPstream toAbove(Pstream::scheduled, myComm.above()); OPstream toAbove
toAbove << Values[Pstream::myProcNo()]; (
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
toAbove << Values[Pstream::myProcNo(comm)];
forAll(belowLeaves, leafI) forAll (belowLeaves, leafI)
{ {
label leafID = belowLeaves[leafI]; label leafID = belowLeaves[leafI];
@ -178,15 +196,15 @@ void Pstream::gatherList
template <class T> template <class T>
void Pstream::gatherList(List<T>& Values) void Pstream::gatherList(List<T>& Values, const int tag, const label comm)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
gatherList(Pstream::linearCommunication(), Values); gatherList(Pstream::linearCommunication(comm), Values, tag, comm);
} }
else else
{ {
gatherList(Pstream::treeCommunication(), Values); gatherList(Pstream::treeCommunication(comm), Values, tag, comm);
} }
} }
@ -195,17 +213,24 @@ template <class T>
void Pstream::scatterList void Pstream::scatterList
( (
const List<Pstream::commsStruct>& comms, const List<Pstream::commsStruct>& comms,
List<T>& Values List<T>& Values,
const int tag,
const label comm
) )
{ {
if (Pstream::parRun()) if (Pstream::nProcs(comm) > 1)
{ {
if (Values.size() != Pstream::nProcs()) if (Values.size() != Pstream::nProcs(comm))
{ {
FatalErrorIn FatalErrorIn
( (
"Pstream::scatterList(const List<Pstream::commsStruct>&" "void Pstream::scatterList\n"
", List<T>)" "(\n"
" const List<Pstream::commsStruct>& comms,\n"
" List<T>& Values,\n"
" const int tag,\n"
" const label comm\n"
")"
) << "Size of list:" << Values.size() ) << "Size of list:" << Values.size()
<< " does not equal the number of processors:" << " does not equal the number of processors:"
<< Pstream::nProcs() << Pstream::nProcs()
@ -213,7 +238,7 @@ void Pstream::scatterList
} }
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[Pstream::myProcNo()]; const commsStruct& myComm = comms[Pstream::myProcNo(comm)];
// Receive from up // Receive from up
if (myComm.above() != -1) if (myComm.above() != -1)
@ -229,19 +254,28 @@ void Pstream::scatterList
Pstream::scheduled, Pstream::scheduled,
myComm.above(), myComm.above(),
reinterpret_cast<char*>(receivedValues.begin()), reinterpret_cast<char*>(receivedValues.begin()),
receivedValues.byteSize() receivedValues.byteSize(),
tag,
comm
); );
forAll(notBelowLeaves, leafI) forAll (notBelowLeaves, leafI)
{ {
Values[notBelowLeaves[leafI]] = receivedValues[leafI]; Values[notBelowLeaves[leafI]] = receivedValues[leafI];
} }
} }
else else
{ {
IPstream fromAbove(Pstream::scheduled, myComm.above()); IPstream fromAbove
(
Pstream::scheduled,
myComm.above(),
0,
tag,
comm
);
forAll(notBelowLeaves, leafI) forAll (notBelowLeaves, leafI)
{ {
label leafID = notBelowLeaves[leafI]; label leafID = notBelowLeaves[leafI];
fromAbove >> Values[leafID]; fromAbove >> Values[leafID];
@ -257,7 +291,7 @@ void Pstream::scatterList
} }
// Send to my downstairs neighbours // Send to my downstairs neighbours
forAll(myComm.below(), belowI) forAll (myComm.below(), belowI)
{ {
label belowID = myComm.below()[belowI]; label belowID = myComm.below()[belowI];
const labelList& notBelowLeaves = comms[belowID].allNotBelow(); const labelList& notBelowLeaves = comms[belowID].allNotBelow();
@ -266,7 +300,7 @@ void Pstream::scatterList
{ {
List<T> sendingValues(notBelowLeaves.size()); List<T> sendingValues(notBelowLeaves.size());
forAll(notBelowLeaves, leafI) forAll (notBelowLeaves, leafI)
{ {
sendingValues[leafI] = Values[notBelowLeaves[leafI]]; sendingValues[leafI] = Values[notBelowLeaves[leafI]];
} }
@ -276,15 +310,17 @@ void Pstream::scatterList
Pstream::scheduled, Pstream::scheduled,
belowID, belowID,
reinterpret_cast<const char*>(sendingValues.begin()), reinterpret_cast<const char*>(sendingValues.begin()),
sendingValues.byteSize() sendingValues.byteSize(),
tag,
comm
); );
} }
else else
{ {
OPstream toBelow(Pstream::scheduled, belowID); OPstream toBelow(Pstream::scheduled, belowID, 0, tag, comm);
// Send data destined for all other processors below belowID // Send data destined for all other processors below belowID
forAll(notBelowLeaves, leafI) forAll (notBelowLeaves, leafI)
{ {
label leafID = notBelowLeaves[leafI]; label leafID = notBelowLeaves[leafI];
toBelow << Values[leafID]; toBelow << Values[leafID];
@ -303,15 +339,15 @@ void Pstream::scatterList
template <class T> template <class T>
void Pstream::scatterList(List<T>& Values) void Pstream::scatterList(List<T>& Values, const int tag, const label comm)
{ {
if (Pstream::nProcs() < Pstream::nProcsSimpleSum()) if (Pstream::nProcs(comm) < Pstream::nProcsSimpleSum())
{ {
scatterList(Pstream::linearCommunication(), Values); scatterList(Pstream::linearCommunication(comm), Values, tag, comm);
} }
else else
{ {
scatterList(Pstream::treeCommunication(), Values); scatterList(Pstream::treeCommunication(comm), Values, tag, comm);
} }
} }

View file

@ -28,7 +28,6 @@ License
#include "objectRegistry.H" #include "objectRegistry.H"
#include "PstreamReduceOps.H" #include "PstreamReduceOps.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
Foam::Istream& Foam::regIOobject::readStream() Foam::Istream& Foam::regIOobject::readStream()

View file

@ -892,6 +892,7 @@ Foam::BlockMatrixAgglomeration<Type>::restrictMatrix() const
nCoarseEqns_, nCoarseEqns_,
coarseOwner, coarseOwner,
coarseNeighbour, coarseNeighbour,
Pstream::worldComm, //HJ, AMG Comm fineMesh.comm(),
true true
) )
); );
@ -953,7 +954,8 @@ Foam::BlockMatrixAgglomeration<Type>::restrictMatrix() const
coarseInterfaces, coarseInterfaces,
fineInterface, fineInterface,
fineInterface.interfaceInternalField(agglomIndex_), fineInterface.interfaceInternalField(agglomIndex_),
fineInterfaceAddr[intI] fineInterfaceAddr[intI],
Pstream::worldComm //HJ, AMG Comm fineMesh.comm(),
).ptr() ).ptr()
); );
} }

View file

@ -66,7 +66,7 @@ public:
{} {}
//-Destructor //- Destructor
virtual ~regionCoupleLduInterfaceField(); virtual ~regionCoupleLduInterfaceField();
}; };

View file

@ -132,6 +132,15 @@ public:
virtual void expandAddrToZone(labelField&) const = 0; virtual void expandAddrToZone(labelField&) const = 0;
// Communications support
//- Return communicator used for parallel communication
virtual int comm() const = 0;
//- Return message tag used for sending
virtual int tag() const = 0;
// Transfer buffer access // Transfer buffer access
//- Return contents of the label transfer buffer //- Return contents of the label transfer buffer

View file

@ -110,6 +110,15 @@ public:
virtual const tensorField& reverseT() const = 0; virtual const tensorField& reverseT() const = 0;
// Communications support
//- Return communicator used for parallel communication
virtual int comm() const = 0;
//- Return message tag used for sending
virtual int tag() const = 0;
// Transfer buffer access // Transfer buffer access
//- Return contents of the label transfer buffer //- Return contents of the label transfer buffer

View file

@ -76,9 +76,8 @@ public:
processorLduInterface(); processorLduInterface();
// Destructor //- Destructor
virtual ~processorLduInterface();
virtual ~processorLduInterface();
// Member Functions // Member Functions
@ -95,6 +94,15 @@ public:
virtual const tensorField& forwardT() const = 0; virtual const tensorField& forwardT() const = 0;
// Communications support
//- Return communicator used for parallel communication
virtual int comm() const = 0;
//- Return message tag used for sending
virtual int tag() const = 0;
// Transfer functions // Transfer functions
//- Raw send function //- Raw send function

View file

@ -36,6 +36,8 @@ void Foam::processorLduInterface::send
const UList<Type>& f const UList<Type>& f
) const ) const
{ {
label nBytes = f.byteSize();
if (commsType == Pstream::blocking || commsType == Pstream::scheduled) if (commsType == Pstream::blocking || commsType == Pstream::scheduled)
{ {
OPstream::write OPstream::write
@ -43,7 +45,9 @@ void Foam::processorLduInterface::send
commsType, commsType,
neighbProcNo(), neighbProcNo(),
reinterpret_cast<const char*>(f.begin()), reinterpret_cast<const char*>(f.begin()),
f.byteSize() nBytes,
tag(),
comm()
); );
} }
else if (commsType == Pstream::nonBlocking) else if (commsType == Pstream::nonBlocking)
@ -55,18 +59,22 @@ void Foam::processorLduInterface::send
commsType, commsType,
neighbProcNo(), neighbProcNo(),
receiveBuf_.begin(), receiveBuf_.begin(),
receiveBuf_.size() receiveBuf_.size(),
tag(),
comm()
); );
resizeBuf(sendBuf_, f.byteSize()); resizeBuf(sendBuf_, nBytes);
memcpy(sendBuf_.begin(), f.begin(), f.byteSize()); memcpy(sendBuf_.begin(), f.begin(), nBytes);
OPstream::write OPstream::write
( (
commsType, commsType,
neighbProcNo(), neighbProcNo(),
sendBuf_.begin(), sendBuf_.begin(),
f.byteSize() nBytes,
tag(),
comm()
); );
} }
else else
@ -92,7 +100,9 @@ void Foam::processorLduInterface::receive
commsType, commsType,
neighbProcNo(), neighbProcNo(),
reinterpret_cast<char*>(f.begin()), reinterpret_cast<char*>(f.begin()),
f.byteSize() f.byteSize(),
tag(),
comm()
); );
} }
else if (commsType == Pstream::nonBlocking) else if (commsType == Pstream::nonBlocking)
@ -141,7 +151,7 @@ void Foam::processorLduInterface::compressedSend
resizeBuf(sendBuf_, nBytes); resizeBuf(sendBuf_, nBytes);
float *fArray = reinterpret_cast<float*>(sendBuf_.begin()); float *fArray = reinterpret_cast<float*>(sendBuf_.begin());
for (register label i=0; i<nm1; i++) for (register label i = 0; i < nm1; i++)
{ {
fArray[i] = sArray[i] - slast[i%nCmpts]; fArray[i] = sArray[i] - slast[i%nCmpts];
} }
@ -155,7 +165,9 @@ void Foam::processorLduInterface::compressedSend
commsType, commsType,
neighbProcNo(), neighbProcNo(),
sendBuf_.begin(), sendBuf_.begin(),
nBytes nBytes,
tag(),
comm()
); );
} }
else if (commsType == Pstream::nonBlocking) else if (commsType == Pstream::nonBlocking)
@ -167,7 +179,9 @@ void Foam::processorLduInterface::compressedSend
commsType, commsType,
neighbProcNo(), neighbProcNo(),
receiveBuf_.begin(), receiveBuf_.begin(),
receiveBuf_.size() receiveBuf_.size(),
tag(),
comm()
); );
OPstream::write OPstream::write
@ -175,7 +189,9 @@ void Foam::processorLduInterface::compressedSend
commsType, commsType,
neighbProcNo(), neighbProcNo(),
sendBuf_.begin(), sendBuf_.begin(),
nBytes nBytes,
tag(),
comm()
); );
} }
else else
@ -215,7 +231,9 @@ void Foam::processorLduInterface::compressedReceive
commsType, commsType,
neighbProcNo(), neighbProcNo(),
receiveBuf_.begin(), receiveBuf_.begin(),
nBytes nBytes,
tag(),
comm()
); );
} }
else if (commsType != Pstream::nonBlocking) else if (commsType != Pstream::nonBlocking)
@ -231,7 +249,7 @@ void Foam::processorLduInterface::compressedReceive
scalar *sArray = reinterpret_cast<scalar*>(f.begin()); scalar *sArray = reinterpret_cast<scalar*>(f.begin());
const scalar *slast = &sArray[nm1]; const scalar *slast = &sArray[nm1];
for (register label i=0; i<nm1; i++) for (register label i = 0; i < nm1; i++)
{ {
sArray[i] = fArray[i] + slast[i%nCmpts]; sArray[i] = fArray[i] + slast[i%nCmpts];
} }

View file

@ -61,7 +61,6 @@ public:
//- Destructor //- Destructor
virtual ~regionCoupleLduInterface(); virtual ~regionCoupleLduInterface();
}; };

View file

@ -264,6 +264,7 @@ void Foam::GAMGAgglomeration::agglomerateLduAddressing
nCoarseCells, nCoarseCells,
coarseOwner, coarseOwner,
coarseNeighbour, coarseNeighbour,
fineMesh.comm(),
true true
) )
); );
@ -298,7 +299,8 @@ void Foam::GAMGAgglomeration::agglomerateLduAddressing
( (
restrictMap restrictMap
), ),
fineInterfaceAddr[inti] fineInterfaceAddr[inti],
fineMesh.comm() // Set up comm per level?
).ptr() ).ptr()
); );

View file

@ -132,14 +132,16 @@ public:
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
), ),
( (
lduMesh, lduMesh,
coarseInterfaces, coarseInterfaces,
fineInterface, fineInterface,
localRestrictAddressing, localRestrictAddressing,
neighbourRestrictAddressing neighbourRestrictAddressing,
coarseComm
) )
); );
@ -154,7 +156,8 @@ public:
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
); );
@ -162,7 +165,10 @@ public:
//- Construct from fine-level interface, //- Construct from fine-level interface,
// local and neighbour restrict addressing // local and neighbour restrict addressing
AMGInterface(const lduPrimitiveMesh& lduMesh) AMGInterface
(
const lduPrimitiveMesh& lduMesh
)
: :
lduMesh_(lduMesh) lduMesh_(lduMesh)
{} {}

View file

@ -35,7 +35,8 @@ Foam::autoPtr<Foam::AMGInterface> Foam::AMGInterface::New
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
) )
{ {
word coupleType(fineInterface.type()); word coupleType(fineInterface.type());
@ -53,7 +54,8 @@ Foam::autoPtr<Foam::AMGInterface> Foam::AMGInterface::New
" const lduInterfacePtrsList& coarseInterfaces,\n" " const lduInterfacePtrsList& coarseInterfaces,\n"
" const lduInterface& fineInterface,\n" " const lduInterface& fineInterface,\n"
" const labelField& localRestrictAddressing,\n" " const labelField& localRestrictAddressing,\n"
" const labelField& neighbourRestrictAddressing\n" " const labelField& neighbourRestrictAddressing,\n"
" const label coarseComm\n"
")" ")"
) << "Unknown AMGInterface type " << coupleType << ".\n" ) << "Unknown AMGInterface type " << coupleType << ".\n"
<< "Valid AMGInterface types are :" << "Valid AMGInterface types are :"
@ -69,7 +71,8 @@ Foam::autoPtr<Foam::AMGInterface> Foam::AMGInterface::New
coarseInterfaces, coarseInterfaces,
fineInterface, fineInterface,
localRestrictAddressing, localRestrictAddressing,
neighbourRestrictAddressing neighbourRestrictAddressing,
coarseComm
) )
); );
} }

View file

@ -48,7 +48,8 @@ Foam::cyclicAMGInterface::cyclicAMGInterface
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm // Not needed
) )
: :
AMGInterface(lduMesh), AMGInterface(lduMesh),

View file

@ -84,13 +84,13 @@ public:
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
); );
// Destructor //- Destructor
virtual ~cyclicAMGInterface();
virtual ~cyclicAMGInterface();
// Member Functions // Member Functions

View file

@ -48,7 +48,8 @@ Foam::cyclicGGIAMGInterface::cyclicGGIAMGInterface
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
) )
: :
ggiAMGInterface ggiAMGInterface
@ -57,7 +58,8 @@ Foam::cyclicGGIAMGInterface::cyclicGGIAMGInterface
coarseInterfaces, coarseInterfaces,
fineInterface, fineInterface,
localRestrictAddressing, localRestrictAddressing,
neighbourRestrictAddressing neighbourRestrictAddressing,
coarseComm
) )
{} {}

View file

@ -70,7 +70,8 @@ public:
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
); );

View file

@ -195,7 +195,8 @@ Foam::ggiAMGInterface::ggiAMGInterface
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
) )
: :
AMGInterface(lduMesh), AMGInterface(lduMesh),
@ -203,6 +204,8 @@ Foam::ggiAMGInterface::ggiAMGInterface
zoneSize_(0), zoneSize_(0),
zoneAddressing_(), zoneAddressing_(),
procMasterFaces_(), procMasterFaces_(),
comm_(coarseComm),
tag_(refCast<const ggiLduInterface>(fineInterface).tag()),
mapPtr_(NULL) mapPtr_(NULL)
{ {
// New algorithm will assemble local clusters on the master side and // New algorithm will assemble local clusters on the master side and

View file

@ -74,8 +74,15 @@ class ggiAMGInterface
// to allow the slave to insert faces in the same order // to allow the slave to insert faces in the same order
labelListList procMasterFaces_; labelListList procMasterFaces_;
// Parallel communication // Parallel communication
//- Communicator to use for parallel communication
const label comm_;
//- Message tag used for sending
const int tag_;
//- Map-distribute comms tool //- Map-distribute comms tool
mutable mapDistribute* mapPtr_; mutable mapDistribute* mapPtr_;
@ -118,7 +125,8 @@ public:
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
); );
@ -137,6 +145,21 @@ public:
} }
// Communications support
//- Return communicator used for parallel communication
virtual int comm() const
{
return comm_;
}
//- Return message tag used for sending
virtual int tag() const
{
return tag_;
}
// Agglomeration // Agglomeration
//- Agglomerating the given fine-level coefficients and return //- Agglomerating the given fine-level coefficients and return

View file

@ -54,14 +54,17 @@ Foam::mixingPlaneAMGInterface::mixingPlaneAMGInterface
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
) )
: :
AMGInterface(lduMesh), AMGInterface(lduMesh),
fineMixingPlaneInterface_ fineMixingPlaneInterface_
( (
refCast<const mixingPlaneLduInterface>(fineInterface) refCast<const mixingPlaneLduInterface>(fineInterface)
) ),
comm_(coarseComm),
tag_(refCast<const mixingPlaneLduInterface>(fineInterface).tag())
{} {}

View file

@ -65,6 +65,15 @@ class mixingPlaneAMGInterface
const mixingPlaneLduInterface& fineMixingPlaneInterface_; const mixingPlaneLduInterface& fineMixingPlaneInterface_;
// Parallel communication
//- Communicator to use for parallel communication
const label comm_;
//- Message tag used for sending
const int tag_;
// Private Member Functions // Private Member Functions
//- Disallow default bitwise copy construct //- Disallow default bitwise copy construct
@ -96,13 +105,13 @@ public:
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
); );
// Destructor // -Destructor
virtual ~mixingPlaneAMGInterface();
virtual ~mixingPlaneAMGInterface();
// Member Functions // Member Functions
@ -116,6 +125,21 @@ public:
} }
// Communications support
//- Return communicator used for parallel communication
virtual int comm() const
{
return comm_;
}
//- Return message tag used for sending
virtual int tag() const
{
return tag_;
}
// Agglomeration // Agglomeration
//- Agglomerating the given fine-level coefficients and return //- Agglomerating the given fine-level coefficients and return

View file

@ -48,11 +48,14 @@ Foam::processorAMGInterface::processorAMGInterface
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
) )
: :
AMGInterface(lduMesh), AMGInterface(lduMesh),
fineProcInterface_(refCast<const processorLduInterface>(fineInterface)) fineProcInterface_(refCast<const processorLduInterface>(fineInterface)),
comm_(coarseComm),
tag_(refCast<const processorLduInterface>(fineInterface).tag())
{ {
// Make a lookup table of entries for owner/neighbour // Make a lookup table of entries for owner/neighbour
HashTable<SLList<label>, label, Hash<label> > neighboursTable HashTable<SLList<label>, label, Hash<label> > neighboursTable

View file

@ -59,6 +59,12 @@ class processorAMGInterface
// agglomerated // agglomerated
const processorLduInterface& fineProcInterface_; const processorLduInterface& fineProcInterface_;
//- Communicator to use for parallel communication
const label comm_;
//- Message tag used for sending
const int tag_;
// Private Member Functions // Private Member Functions
@ -84,13 +90,13 @@ public:
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
); );
// Destructor //- Destructor
virtual ~processorAMGInterface();
virtual ~processorAMGInterface();
// Member Functions // Member Functions
@ -104,6 +110,21 @@ public:
} }
// Communications support
//- Return communicator used for parallel communication
virtual int comm() const
{
return comm_;
}
//- Return message tag used for sending
virtual int tag() const
{
return tag_;
}
// Interface transfer functions // Interface transfer functions
//- Initialise interface data transfer //- Initialise interface data transfer

View file

@ -48,7 +48,8 @@ Foam::regionCoupleAMGInterface::regionCoupleAMGInterface
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
) )
: :
ggiAMGInterface ggiAMGInterface
@ -57,7 +58,8 @@ Foam::regionCoupleAMGInterface::regionCoupleAMGInterface
coarseInterfaces, coarseInterfaces,
fineInterface, fineInterface,
localRestrictAddressing, localRestrictAddressing,
neighbourRestrictAddressing neighbourRestrictAddressing,
coarseComm
), ),
coupled_(fineInterface.coupled()) coupled_(fineInterface.coupled())
{} {}

View file

@ -46,7 +46,7 @@ namespace Foam
{ {
/*---------------------------------------------------------------------------*\ /*---------------------------------------------------------------------------*\
Class regionCoupleAMGInterface Declaration Class regionCoupleAMGInterface Declaration
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
class regionCoupleAMGInterface class regionCoupleAMGInterface
@ -76,13 +76,13 @@ public:
const lduInterfacePtrsList& coarseInterfaces, const lduInterfacePtrsList& coarseInterfaces,
const lduInterface& fineInterface, const lduInterface& fineInterface,
const labelField& localRestrictAddressing, const labelField& localRestrictAddressing,
const labelField& neighbourRestrictAddressing const labelField& neighbourRestrictAddressing,
const label coarseComm
); );
// Destructor //- Destructor
virtual ~regionCoupleAMGInterface();
virtual ~regionCoupleAMGInterface();
// Member Functions // Member Functions

View file

@ -59,10 +59,9 @@ public:
// Constructors // Constructors
// Destructor //- Destructor
virtual ~lduMesh()
virtual ~lduMesh() {}
{}
// Member Functions // Member Functions
@ -78,6 +77,10 @@ public:
//- Return a list of pointers for each patch //- Return a list of pointers for each patch
// with only those pointing to interfaces being set // with only those pointing to interfaces being set
virtual lduInterfacePtrsList interfaces() const = 0; virtual lduInterfacePtrsList interfaces() const = 0;
//- Return communicator used for parallel communication
virtual int comm() const = 0;
}; };

View file

@ -69,6 +69,9 @@ class lduPrimitiveMesh
// Note this does must be held as a copy. HJ, 20/Feb/2009 // Note this does must be held as a copy. HJ, 20/Feb/2009
lduSchedule patchSchedule_; lduSchedule patchSchedule_;
//- Communicator to use for parallel communication
const label comm_;
// Private Member Functions // Private Member Functions
@ -89,12 +92,14 @@ public:
( (
const label nCells, const label nCells,
const unallocLabelList& l, const unallocLabelList& l,
const unallocLabelList& u const unallocLabelList& u,
const label comm
) )
: :
lduAddressing(nCells), lduAddressing(nCells),
lowerAddr_(l), lowerAddr_(l),
upperAddr_(u) upperAddr_(u),
comm_(comm)
{} {}
@ -104,12 +109,14 @@ public:
const label nCells, const label nCells,
labelList& l, labelList& l,
labelList& u, labelList& u,
const label comm,
bool reUse bool reUse
) )
: :
lduAddressing(nCells), lduAddressing(nCells),
lowerAddr_(l, reUse), lowerAddr_(l, reUse),
upperAddr_(u, reUse) upperAddr_(u, reUse),
comm_(comm)
{} {}
@ -194,6 +201,12 @@ public:
{ {
return patchSchedule_; return patchSchedule_;
} }
//- Return communicator used for parallel communication
virtual label comm() const
{
return comm_;
}
}; };

View file

@ -111,9 +111,8 @@ public:
); );
// Destructor //- Destructor
virtual ~ggiPointPatch();
virtual ~ggiPointPatch();
// Member Functions // Member Functions
@ -122,6 +121,21 @@ public:
//- Return true because this patch is coupled //- Return true because this patch is coupled
virtual bool coupled() const; virtual bool coupled() const;
// Communications support
//- Return communicator used for communication
virtual label comm() const
{
return ggiPolyPatch_.comm();
}
//- Return message tag to use for communication
virtual int tag() const
{
return ggiPolyPatch_.tag();
}
}; };

View file

@ -124,9 +124,8 @@ public:
); );
// Destructor //- Destructor
virtual ~processorPointPatch();
virtual ~processorPointPatch();
// Member functions // Member functions
@ -175,6 +174,21 @@ public:
} }
// Communications support
//- Return communicator used for communication
virtual label comm() const
{
return procPolyPatch_.comm();
}
//- Return message tag to use for communication
virtual int tag() const
{
return procPolyPatch_.tag();
}
// Access functions for demand driven data // Access functions for demand driven data
//- Return mesh points //- Return mesh points

View file

@ -33,7 +33,8 @@ License
Foam::List<Foam::labelPair> Foam::mapDistribute::schedule Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
( (
const labelListList& subMap, const labelListList& subMap,
const labelListList& constructMap const labelListList& constructMap,
const int tag
) )
{ {
// Communications: send and receive processor // Communications: send and receive processor
@ -74,7 +75,7 @@ Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
slave++ slave++
) )
{ {
IPstream fromSlave(Pstream::scheduled, slave); IPstream fromSlave(Pstream::scheduled, slave, 0, tag);
List<labelPair> nbrData(fromSlave); List<labelPair> nbrData(fromSlave);
forAll (nbrData, i) forAll (nbrData, i)
@ -95,18 +96,24 @@ Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
slave++ slave++
) )
{ {
OPstream toSlave(Pstream::scheduled, slave); OPstream toSlave(Pstream::scheduled, slave, 0, tag);
toSlave << allComms; toSlave << allComms;
} }
} }
else else
{ {
{ {
OPstream toMaster(Pstream::scheduled, Pstream::masterNo()); OPstream toMaster(Pstream::scheduled, Pstream::masterNo(), 0, tag);
toMaster << allComms; toMaster << allComms;
} }
{ {
IPstream fromMaster(Pstream::scheduled, Pstream::masterNo()); IPstream fromMaster
(
Pstream::scheduled,
Pstream::masterNo(),
0,
tag
);
fromMaster >> allComms; fromMaster >> allComms;
} }
} }
@ -156,7 +163,7 @@ const Foam::List<Foam::labelPair>& Foam::mapDistribute::schedule() const
( (
new List<labelPair> new List<labelPair>
( (
schedule(subMap_, constructMap_) schedule(subMap_, constructMap_, Pstream::msgType())
) )
); );
} }
@ -311,7 +318,7 @@ Foam::mapDistribute::mapDistribute(const mapDistribute& map)
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void Foam::mapDistribute::compact(const boolList& elemIsUsed) void Foam::mapDistribute::compact(const boolList& elemIsUsed, const int tag)
{ {
// 1. send back to sender. Have him delete the corresponding element // 1. send back to sender. Have him delete the corresponding element
// from the submap and do the same to the constructMap locally // from the submap and do the same to the constructMap locally
@ -319,7 +326,12 @@ void Foam::mapDistribute::compact(const boolList& elemIsUsed)
// Send elemIsUsed field to neighbour. Use nonblocking code from // Send elemIsUsed field to neighbour. Use nonblocking code from
// mapDistribute but in reverse order. // mapDistribute but in reverse order.
if (Pstream::parRun())
{ {
label startOfRequests = Pstream::nRequests();
// Set up receives from neighbours
List<boolList> sendFields(Pstream::nProcs()); List<boolList> sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++) for (label domain = 0; domain < Pstream::nProcs(); domain++)
@ -340,7 +352,8 @@ void Foam::mapDistribute::compact(const boolList& elemIsUsed)
Pstream::nonBlocking, Pstream::nonBlocking,
domain, domain,
reinterpret_cast<const char*>(subField.begin()), reinterpret_cast<const char*>(subField.begin()),
subField.size()*sizeof(bool) subField.size()*sizeof(bool),
tag
); );
} }
} }
@ -361,7 +374,8 @@ void Foam::mapDistribute::compact(const boolList& elemIsUsed)
Pstream::nonBlocking, Pstream::nonBlocking,
domain, domain,
reinterpret_cast<char*>(recvFields[domain].begin()), reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].size()*sizeof(bool) recvFields[domain].size()*sizeof(bool),
tag
); );
} }
} }
@ -382,8 +396,7 @@ void Foam::mapDistribute::compact(const boolList& elemIsUsed)
// Wait for all to finish // Wait for all to finish
OPstream::waitRequests(); Pstream::waitRequests(startOfRequests);
IPstream::waitRequests();
// Compact out all submap entries that are referring to unused elements // Compact out all submap entries that are referring to unused elements

View file

@ -166,7 +166,8 @@ public:
static List<labelPair> schedule static List<labelPair> schedule
( (
const labelListList& subMap, const labelListList& subMap,
const labelListList& constructMap const labelListList& constructMap,
const int tag
); );
//- Return a schedule. Demand driven. See above. //- Return a schedule. Demand driven. See above.
@ -178,8 +179,11 @@ public:
//- Compact maps. Gets per field a bool whether it is used locally //- Compact maps. Gets per field a bool whether it is used locally
// and works out itself what this side and sender side can remove // and works out itself what this side and sender side can remove
// from maps. // from maps.
void compact(const boolList& elemIsUsed); void compact
(
const boolList& elemIsUsed,
const int tag = Pstream::msgType()
);
//- Distribute data. Note:schedule only used for Pstream::scheduled //- Distribute data. Note:schedule only used for Pstream::scheduled
// for now, all others just use send-to-all, receive-from-all. // for now, all others just use send-to-all, receive-from-all.
@ -191,7 +195,8 @@ public:
const label constructSize, const label constructSize,
const labelListList& subMap, const labelListList& subMap,
const labelListList& constructMap, const labelListList& constructMap,
List<T>& List<T>&,
const int tag = Pstream::msgType()
); );
//- Distribute data. If multiple processors writing to same //- Distribute data. If multiple processors writing to same
@ -206,12 +211,17 @@ public:
const labelListList& constructMap, const labelListList& constructMap,
List<T>&, List<T>&,
const CombineOp& cop, const CombineOp& cop,
const T& nullValue const T& nullValue,
const int tag = Pstream::msgType()
); );
//- Distribute data using default commsType. //- Distribute data using default commsType.
template<class T> template<class T>
void distribute(List<T>& fld) const void distribute
(
List<T>& fld,
const int tag = Pstream::msgType()
) const
{ {
if if
( (

View file

@ -36,9 +36,35 @@ void Foam::mapDistribute::distribute
const label constructSize, const label constructSize,
const labelListList& subMap, const labelListList& subMap,
const labelListList& constructMap, const labelListList& constructMap,
List<T>& field List<T>& field,
const int tag
) )
{ {
if (!Pstream::parRun())
{
// Do only me to me.
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> subField(mySubMap.size());
forAll(mySubMap, i)
{
subField[i] = field[mySubMap[i]];
}
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
field.setSize(constructSize);
forAll(map, i)
{
field[map[i]] = subField[i];
}
return;
}
if (commsType == Pstream::blocking) if (commsType == Pstream::blocking)
{ {
// Since buffered sending can reuse the field to collect the // Since buffered sending can reuse the field to collect the
@ -51,7 +77,7 @@ void Foam::mapDistribute::distribute
if (domain != Pstream::myProcNo() && map.size()) if (domain != Pstream::myProcNo() && map.size())
{ {
OPstream toNbr(Pstream::blocking, domain); OPstream toNbr(Pstream::blocking, domain, 0, tag);
toNbr << UIndirectList<T>(field, map); toNbr << UIndirectList<T>(field, map);
} }
} }
@ -82,7 +108,7 @@ void Foam::mapDistribute::distribute
if (domain != Pstream::myProcNo() && map.size()) if (domain != Pstream::myProcNo() && map.size())
{ {
IPstream fromNbr(Pstream::blocking, domain); IPstream fromNbr(Pstream::blocking, domain, 0, tag);
List<T> subField(fromNbr); List<T> subField(fromNbr);
checkReceivedSize(domain, map.size(), subField.size()); checkReceivedSize(domain, map.size(), subField.size());
@ -116,28 +142,52 @@ void Foam::mapDistribute::distribute
forAll(schedule, i) forAll(schedule, i)
{ {
const labelPair& twoProcs = schedule[i]; const labelPair& twoProcs = schedule[i];
// twoProcs is a swap pair of processors. The first one is the
// one that needs to send first and then receive.
label sendProc = twoProcs[0]; label sendProc = twoProcs[0];
label recvProc = twoProcs[1]; label recvProc = twoProcs[1];
if (Pstream::myProcNo() == sendProc) if (Pstream::myProcNo() == sendProc)
{ {
// I am sender. Send to recvProc. // I am send first, receive next
OPstream toNbr(Pstream::scheduled, recvProc); {
toNbr << UIndirectList<T>(field, subMap[recvProc]); OPstream toNbr(Pstream::scheduled, recvProc, 0, tag);
toNbr << UIndirectList<T>(field, subMap[recvProc]);
}
{
IPstream fromNbr(Pstream::scheduled, recvProc, 0, tag);
List<T> subField(fromNbr);
const labelList& map = constructMap[recvProc];
checkReceivedSize(recvProc, map.size(), subField.size());
forAll(map, i)
{
newField[map[i]] = subField[i];
}
}
} }
else else
{ {
// I am receiver. Receive from sendProc. // I am receive first, send next
IPstream fromNbr(Pstream::scheduled, sendProc);
List<T> subField(fromNbr);
const labelList& map = constructMap[sendProc];
checkReceivedSize(recvProc, map.size(), subField.size());
forAll(map, i)
{ {
newField[map[i]] = subField[i]; IPstream fromNbr(Pstream::scheduled, sendProc, 0, tag);
List<T> subField(fromNbr);
const labelList& map = constructMap[sendProc];
checkReceivedSize(sendProc, map.size(), subField.size());
forAll(map, i)
{
newField[map[i]] = subField[i];
}
}
{
OPstream toNbr(Pstream::scheduled, sendProc, 0, tag);
toNbr << UIndirectList<T>(field, subMap[sendProc]);
} }
} }
} }
@ -146,129 +196,173 @@ void Foam::mapDistribute::distribute
} }
else if (commsType == Pstream::nonBlocking) else if (commsType == Pstream::nonBlocking)
{ {
label nOutstanding = Pstream::nRequests();
if (!contiguous<T>()) if (!contiguous<T>())
{ {
FatalErrorIn // Stream data into buffer
( for (label domain = 0; domain < Pstream::nProcs(); domain++)
"template<class T>\n"
"void mapDistribute::distribute\n"
"(\n"
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n"
" const label constructSize,\n"
" const labelListList& subMap,\n"
" const labelListList& constructMap,\n"
" List<T>& field\n"
")\n"
) << "Non-blocking only supported for contiguous data."
<< exit(FatalError);
}
// Set up sends to neighbours
List<List<T> > sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{ {
List<T>& subField = sendFields[domain]; const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
// Put data into send buffer
OPstream toDomain(Pstream::nonBlocking, domain, 0, tag);
toDomain << UIndirectList<T>(field, map);
}
}
// Start receiving. Do not block.
{
// Set up 'send' to myself
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> mySubField(mySubMap.size());
forAll(mySubMap, i)
{
mySubField[i] = field[mySubMap[i]];
}
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
// Receive sub field from myself
{
const labelList& map = constructMap[Pstream::myProcNo()];
forAll(map, i)
{
field[map[i]] = mySubField[i];
}
}
}
// Block ourselves, waiting only for the current comms
Pstream::waitRequests(nOutstanding);
// Consume
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
IPstream str(Pstream::nonBlocking, domain, 0, tag);
List<T> recvField(str);
checkReceivedSize(domain, map.size(), recvField.size());
forAll(map, i)
{
field[map[i]] = recvField[i];
}
}
}
}
else
{
// Set up sends to neighbours
List<List<T > > sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
List<T>& subField = sendFields[domain];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.byteSize(),
tag
);
}
}
// Set up receives from neighbours
List<List<T > > recvFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
recvFields[domain].setSize(map.size());
IPstream::read
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].byteSize(),
tag
);
}
}
// Set up 'send' to myself
{
const labelList& map = subMap[Pstream::myProcNo()];
List<T>& subField = sendFields[Pstream::myProcNo()];
subField.setSize(map.size()); subField.setSize(map.size());
forAll(map, i) forAll(map, i)
{ {
subField[i] = field[map[i]]; subField[i] = field[map[i]];
} }
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.size()*sizeof(T)
);
} }
}
// Set up receives from neighbours
List<List<T> > recvFields(Pstream::nProcs()); // Combine bits. Note that can reuse field storage
for (label domain = 0; domain < Pstream::nProcs(); domain++) field.setSize(constructSize);
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size()) // Receive sub field from myself (sendFields[Pstream::myProcNo()])
{ {
recvFields[domain].setSize(map.size()); const labelList& map = constructMap[Pstream::myProcNo()];
IPstream::read const List<T>& subField = sendFields[Pstream::myProcNo()];
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].size()*sizeof(T)
);
}
}
// Set up 'send' to myself
{
const labelList& map = subMap[Pstream::myProcNo()];
List<T>& subField = sendFields[Pstream::myProcNo()];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
}
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
// Receive sub field from myself (sendFields[Pstream::myProcNo()])
{
const labelList& map = constructMap[Pstream::myProcNo()];
const List<T>& subField = sendFields[Pstream::myProcNo()];
forAll(map, i)
{
field[map[i]] = subField[i];
}
}
// Wait for all to finish
OPstream::waitRequests();
IPstream::waitRequests();
// Collect neighbour fields
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
checkReceivedSize
(
domain,
map.size(),
recvFields[domain].size()
);
forAll(map, i) forAll(map, i)
{ {
field[map[i]] = recvFields[domain][i]; field[map[i]] = subField[i];
}
}
// Wait for all to finish
Pstream::waitRequests(nOutstanding);
// Collect neighbour fields
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
const List<T>& subField = recvFields[domain];
checkReceivedSize(domain, map.size(), subField.size());
forAll(map, i)
{
field[map[i]] = subField[i];
}
} }
} }
} }
@ -294,9 +388,35 @@ void Foam::mapDistribute::distribute
const labelListList& constructMap, const labelListList& constructMap,
List<T>& field, List<T>& field,
const CombineOp& cop, const CombineOp& cop,
const T& nullValue const T& nullValue,
const int tag
) )
{ {
if (!Pstream::parRun())
{
// Do only me to me.
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> subField(mySubMap.size());
forAll(mySubMap, i)
{
subField[i] = field[mySubMap[i]];
}
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
field.setSize(constructSize);
field = nullValue;
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
return;
}
if (commsType == Pstream::blocking) if (commsType == Pstream::blocking)
{ {
// Since buffered sending can reuse the field to collect the // Since buffered sending can reuse the field to collect the
@ -309,7 +429,7 @@ void Foam::mapDistribute::distribute
if (domain != Pstream::myProcNo() && map.size()) if (domain != Pstream::myProcNo() && map.size())
{ {
OPstream toNbr(Pstream::blocking, domain); OPstream toNbr(Pstream::blocking, domain, 0, tag);
toNbr << UIndirectList<T>(field, map); toNbr << UIndirectList<T>(field, map);
} }
} }
@ -341,28 +461,10 @@ void Foam::mapDistribute::distribute
if (domain != Pstream::myProcNo() && map.size()) if (domain != Pstream::myProcNo() && map.size())
{ {
IPstream fromNbr(Pstream::blocking, domain); IPstream fromNbr(Pstream::blocking, domain, 0, tag);
List<T> subField(fromNbr); List<T> subField(fromNbr);
if (subField.size() != map.size()) checkReceivedSize(domain, map.size(), subField.size());
{
FatalErrorIn
(
"template<class T>\n"
"void mapDistribute::distribute\n"
"(\n"
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n"
" const label constructSize,\n"
" const labelListList& subMap,\n"
" const labelListList& constructMap,\n"
" List<T>& field\n"
")\n"
) << "Expected from processor " << domain
<< " " << map.size() << " but received "
<< subField.size() << " elements."
<< abort(FatalError);
}
forAll(map, i) forAll(map, i)
{ {
@ -393,195 +495,227 @@ void Foam::mapDistribute::distribute
forAll(schedule, i) forAll(schedule, i)
{ {
const labelPair& twoProcs = schedule[i]; const labelPair& twoProcs = schedule[i];
// twoProcs is a swap pair of processors. The first one is the
// one that needs to send first and then receive.
label sendProc = twoProcs[0]; label sendProc = twoProcs[0];
label recvProc = twoProcs[1]; label recvProc = twoProcs[1];
if (Pstream::myProcNo() == sendProc) if (Pstream::myProcNo() == sendProc)
{ {
// I am sender. Send to recvProc. // I am send first, receive next
OPstream toNbr(Pstream::scheduled, recvProc); {
toNbr << UIndirectList<T>(field, subMap[recvProc]); OPstream toNbr(Pstream::scheduled, recvProc, 0, tag);
toNbr << UIndirectList<T>(field, subMap[recvProc]);
}
{
IPstream fromNbr(Pstream::scheduled, recvProc, 0, tag);
List<T> subField(fromNbr);
const labelList& map = constructMap[recvProc];
checkReceivedSize(recvProc, map.size(), subField.size());
forAll(map, i)
{
cop(newField[map[i]], subField[i]);
}
}
} }
else else
{ {
// I am receiver. Receive from sendProc. // I am receive first, send next
IPstream fromNbr(Pstream::scheduled, sendProc);
List<T> subField(fromNbr);
const labelList& map = constructMap[sendProc];
if (subField.size() != map.size())
{ {
FatalErrorIn IPstream fromNbr(Pstream::scheduled, sendProc, 0, tag);
( List<T> subField(fromNbr);
"template<class T>\n" const labelList& map = constructMap[sendProc];
"void mapDistribute::distribute\n"
"(\n" checkReceivedSize(sendProc, map.size(), subField.size());
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n" forAll(map, i)
" const label constructSize,\n" {
" const labelListList& subMap,\n" cop(newField[map[i]], subField[i]);
" const labelListList& constructMap,\n" }
" List<T>& field\n"
")\n"
) << "Expected from processor " << sendProc
<< " " << map.size() << " but received "
<< subField.size() << " elements."
<< abort(FatalError);
} }
forAll(map, i)
{ {
cop(newField[map[i]], subField[i]); OPstream toNbr(Pstream::scheduled, sendProc, 0, tag);
toNbr << UIndirectList<T>(field, subMap[sendProc]);
} }
} }
} }
field.transfer(newField); field.transfer(newField);
} }
else if (commsType == Pstream::nonBlocking) else if (commsType == Pstream::nonBlocking)
{ {
label nOutstanding = Pstream::nRequests();
if (!contiguous<T>()) if (!contiguous<T>())
{ {
FatalErrorIn // Stream data into buffer
( for (label domain = 0; domain < Pstream::nProcs(); domain++)
"template<class T>\n"
"void mapDistribute::distribute\n"
"(\n"
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n"
" const label constructSize,\n"
" const labelListList& subMap,\n"
" const labelListList& constructMap,\n"
" List<T>& field\n"
")\n"
) << "Non-blocking only supported for contiguous data."
<< exit(FatalError);
}
// Set up sends to neighbours
List<List<T> > sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{ {
List<T>& subField = sendFields[domain]; const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
// Put data into send buffer
OPstream toDomain(Pstream::nonBlocking, domain, 0, tag);
toDomain << UIndirectList<T>(field, map);
}
}
// Start receiving. Do not block.
{
// Set up 'send' to myself
List<T> mySubField(field, subMap[Pstream::myProcNo()]);
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
field = nullValue;
// Receive sub field from myself
{
const labelList& map = constructMap[Pstream::myProcNo()];
forAll(map, i)
{
cop(field[map[i]], mySubField[i]);
}
}
}
// Block ourselves, waiting only for the current comms
Pstream::waitRequests(nOutstanding);
// Consume
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
IPstream str(Pstream::nonBlocking, domain, 0, tag);
List<T> recvField(str);
checkReceivedSize(domain, map.size(), recvField.size());
forAll(map, i)
{
cop(field[map[i]], recvField[i]);
}
}
}
}
else
{
// Set up sends to neighbours
List<List<T> > sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
List<T>& subField = sendFields[domain];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.size()*sizeof(T),
tag
);
}
}
// Set up receives from neighbours
List<List<T > > recvFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
recvFields[domain].setSize(map.size());
IPstream::read
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].size()*sizeof(T),
tag
);
}
}
// Set up 'send' to myself
{
const labelList& map = subMap[Pstream::myProcNo()];
List<T>& subField = sendFields[Pstream::myProcNo()];
subField.setSize(map.size()); subField.setSize(map.size());
forAll(map, i) forAll(map, i)
{ {
subField[i] = field[map[i]]; subField[i] = field[map[i]];
} }
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.size()*sizeof(T)
);
} }
}
// Set up receives from neighbours
List<List<T> > recvFields(Pstream::nProcs()); // Combine bits. Note that can reuse field storage
for (label domain = 0; domain < Pstream::nProcs(); domain++) field.setSize(constructSize);
{ field = nullValue;
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size()) // Receive sub field from myself (subField)
{ {
recvFields[domain].setSize(map.size()); const labelList& map = constructMap[Pstream::myProcNo()];
IPstream::read const List<T>& subField = sendFields[Pstream::myProcNo()];
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].size()*sizeof(T)
);
}
}
// Set up 'send' to myself
{
const labelList& map = subMap[Pstream::myProcNo()];
List<T>& subField = sendFields[Pstream::myProcNo()];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
}
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
field = nullValue;
// Receive sub field from myself (subField)
{
const labelList& map = constructMap[Pstream::myProcNo()];
const List<T>& subField = sendFields[Pstream::myProcNo()];
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
}
// Wait for all to finish
OPstream::waitRequests();
IPstream::waitRequests();
// Collect neighbour fields
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
if (recvFields[domain].size() != map.size())
{
FatalErrorIn
(
"template<class T>\n"
"void mapDistribute::distribute\n"
"(\n"
" const Pstream::commsTypes commsType,\n"
" const List<labelPair>& schedule,\n"
" const label constructSize,\n"
" const labelListList& subMap,\n"
" const labelListList& constructMap,\n"
" List<T>& field\n"
")\n"
) << "Expected from processor " << domain
<< " " << map.size() << " but received "
<< recvFields[domain].size() << " elements."
<< abort(FatalError);
}
forAll(map, i) forAll(map, i)
{ {
cop(field[map[i]], recvFields[domain][i]); cop(field[map[i]], subField[i]);
}
}
// Wait for all to finish
Pstream::waitRequests(nOutstanding);
// Collect neighbour fields
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
const List<T>& subField = recvFields[domain];
checkReceivedSize(domain, map.size(), subField.size());
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
} }
} }
} }
} }
else else
{ {
// This needs to be cleaned up: temporary solution. HJ, 15/Jun/2014
FatalErrorIn("mapDistribute::distribute(..)") FatalErrorIn("mapDistribute::distribute(..)")
<< "Unknown communication schedule " << label(commsType) << "Unknown communication schedule " << commsType
<< abort(FatalError); << abort(FatalError);
} }
} }

View file

@ -244,6 +244,7 @@ Foam::polyMesh::polyMesh(const IOobject& io)
bounds_(allPoints_), bounds_(allPoints_),
geometricD_(Vector<label>::zero), geometricD_(Vector<label>::zero),
solutionD_(Vector<label>::zero), solutionD_(Vector<label>::zero),
comm_(Pstream::worldComm),
pointZones_ pointZones_
( (
IOobject IOobject
@ -441,6 +442,7 @@ Foam::polyMesh::polyMesh
bounds_(allPoints_, syncPar), bounds_(allPoints_, syncPar),
geometricD_(Vector<label>::zero), geometricD_(Vector<label>::zero),
solutionD_(Vector<label>::zero), solutionD_(Vector<label>::zero),
comm_(Pstream::worldComm),
pointZones_ pointZones_
( (
IOobject IOobject
@ -601,6 +603,7 @@ Foam::polyMesh::polyMesh
bounds_(allPoints_, syncPar), bounds_(allPoints_, syncPar),
geometricD_(Vector<label>::zero), geometricD_(Vector<label>::zero),
solutionD_(Vector<label>::zero), solutionD_(Vector<label>::zero),
comm_(Pstream::worldComm),
pointZones_ pointZones_
( (
IOobject IOobject
@ -925,6 +928,18 @@ Foam::label Foam::polyMesh::nSolutionD() const
} }
Foam::label Foam::polyMesh::comm() const
{
return comm_;
}
Foam::label& Foam::polyMesh::comm()
{
return comm_;
}
// Add boundary patches. Constructor helper // Add boundary patches. Constructor helper
void Foam::polyMesh::addPatches void Foam::polyMesh::addPatches
( (

View file

@ -134,6 +134,9 @@ private:
// Defined according to the presence of empty patches // Defined according to the presence of empty patches
mutable Vector<label> solutionD_; mutable Vector<label> solutionD_;
//- Communicator used for parallel communication
label comm_;
// Zoning information // Zoning information
@ -293,9 +296,8 @@ public:
); );
// Destructor //- Destructor
virtual ~polyMesh();
virtual ~polyMesh();
// Member Functions // Member Functions
@ -386,23 +388,36 @@ public:
//- Return the number of valid solved-for dimensions in the mesh //- Return the number of valid solved-for dimensions in the mesh
label nSolutionD() const; label nSolutionD() const;
//- Return point zone mesh
const pointZoneMesh& pointZones() const
{
return pointZones_;
}
//- Return face zone mesh // Communication support
const faceZoneMesh& faceZones() const
{ //- Return communicator used for parallel communication
return faceZones_; label comm() const;
}
//- Return communicator used for parallel communication
label& comm();
// Point, face and cell zones
//- Return point zone mesh
const pointZoneMesh& pointZones() const
{
return pointZones_;
}
//- Return face zone mesh
const faceZoneMesh& faceZones() const
{
return faceZones_;
}
//- Return cell zone mesh
const cellZoneMesh& cellZones() const
{
return cellZones_;
}
//- Return cell zone mesh
const cellZoneMesh& cellZones() const
{
return cellZones_;
}
//- Return parallel info //- Return parallel info
const globalMeshData& globalData() const; const globalMeshData& globalData() const;

View file

@ -31,6 +31,7 @@ Contributor
#include "ggiPolyPatch.H" #include "ggiPolyPatch.H"
#include "polyBoundaryMesh.H" #include "polyBoundaryMesh.H"
#include "polyMesh.H"
#include "addToRunTimeSelectionTable.H" #include "addToRunTimeSelectionTable.H"
#include "demandDrivenData.H" #include "demandDrivenData.H"
#include "polyPatchID.H" #include "polyPatchID.H"
@ -65,7 +66,7 @@ bool Foam::ggiPolyPatch::active() const
// For decomposition and reconstruction // For decomposition and reconstruction
// If not runing in parallel and the patch is not local, this is a serial // If not runing in parallel and the patch is not local, this is a serial
// operation on a piece of a parallel decomposition and is therefore // operation on a piece of a parallel decomposition and is therefore
// inactive. HJ, 5/Spe/2016 // inactive. HJ, 5/Sep/2016
if (!Pstream::parRun() && !localParallel()) if (!Pstream::parRun() && !localParallel())
{ {
return false; return false;
@ -770,6 +771,18 @@ const Foam::faceZone& Foam::ggiPolyPatch::zone() const
} }
Foam::label Foam::ggiPolyPatch::comm() const
{
return boundaryMesh().mesh().comm();
}
int Foam::ggiPolyPatch::tag() const
{
return Pstream::msgType();
}
const Foam::labelList& Foam::ggiPolyPatch::zoneAddressing() const const Foam::labelList& Foam::ggiPolyPatch::zoneAddressing() const
{ {
if (!zoneAddressingPtr_) if (!zoneAddressingPtr_)

View file

@ -303,6 +303,15 @@ public:
const faceZone& zone() const; const faceZone& zone() const;
// Communications support
//- Return communicator used for communication
virtual label comm() const;
//- Return message tag to use for communication
virtual int tag() const;
// Interpolation data // Interpolation data
//- Is this the master side? //- Is this the master side?

View file

@ -805,6 +805,18 @@ Foam::mixingPlanePolyPatch::patchToPatch() const
} }
Foam::label Foam::mixingPlanePolyPatch::comm() const
{
return boundaryMesh().mesh().comm();
}
int Foam::mixingPlanePolyPatch::tag() const
{
return Pstream::msgType();
}
const Foam::vectorField& const Foam::vectorField&
Foam::mixingPlanePolyPatch::reconFaceCellCentres() const Foam::mixingPlanePolyPatch::reconFaceCellCentres() const
{ {

View file

@ -278,9 +278,8 @@ public:
} }
// Destructor //- Destructor
virtual ~mixingPlanePolyPatch();
virtual ~mixingPlanePolyPatch();
// Member functions // Member functions
@ -346,6 +345,15 @@ public:
const mixingPlaneZoneInterpolation& patchToPatch() const; const mixingPlaneZoneInterpolation& patchToPatch() const;
// Communications support
//- Return communicator used for communication
virtual label comm() const;
//- Return message tag to use for communication
virtual int tag() const;
// Interpolation functions // Interpolation functions
//- Interpolate face field to profile: given field on a //- Interpolate face field to profile: given field on a

View file

@ -27,12 +27,13 @@ License
#include "addToRunTimeSelectionTable.H" #include "addToRunTimeSelectionTable.H"
#include "dictionary.H" #include "dictionary.H"
#include "SubField.H" #include "SubField.H"
#include "demandDrivenData.H"
#include "matchPoints.H" #include "matchPoints.H"
#include "OFstream.H" #include "OFstream.H"
#include "polyBoundaryMesh.H"
#include "polyMesh.H" #include "polyMesh.H"
#include "foamTime.H" #include "foamTime.H"
#include "transformList.H" #include "transformList.H"
#include "demandDrivenData.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -134,6 +135,18 @@ Foam::processorPolyPatch::~processorPolyPatch()
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
Foam::label Foam::processorPolyPatch::comm() const
{
return boundaryMesh().mesh().comm();
}
int Foam::processorPolyPatch::tag() const
{
return Pstream::msgType();
}
void Foam::processorPolyPatch::initAddressing() void Foam::processorPolyPatch::initAddressing()
{ {
polyPatch::initAddressing(); polyPatch::initAddressing();

View file

@ -43,6 +43,7 @@ SourceFiles
#define processorPolyPatch_H #define processorPolyPatch_H
#include "coupledPolyPatch.H" #include "coupledPolyPatch.H"
#include "polyBoundaryMesh.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -189,9 +190,8 @@ public:
} }
// Destructor //- Destructor
virtual ~processorPolyPatch();
virtual ~processorPolyPatch();
// Member functions // Member functions
@ -208,6 +208,15 @@ public:
return neighbProcNo_; return neighbProcNo_;
} }
// Communications support
//- Return communicator used for communication
virtual label comm() const;
//- Return message tag to use for communication
virtual int tag() const;
//- Does the processor own the patch ? //- Does the processor own the patch ?
bool owner() const bool owner() const
{ {

View file

@ -921,6 +921,18 @@ void Foam::regionCouplePolyPatch::detach() const
} }
Foam::label Foam::regionCouplePolyPatch::comm() const
{
return boundaryMesh().mesh().comm();
}
int Foam::regionCouplePolyPatch::tag() const
{
return Pstream::msgType();
}
const Foam::labelList& Foam::regionCouplePolyPatch::zoneAddressing() const const Foam::labelList& Foam::regionCouplePolyPatch::zoneAddressing() const
{ {
if (!zoneAddressingPtr_) if (!zoneAddressingPtr_)

View file

@ -291,7 +291,7 @@ public:
} }
//-Destructor //- Destructor
virtual ~regionCouplePolyPatch(); virtual ~regionCouplePolyPatch();
@ -351,6 +351,15 @@ public:
void detach() const; void detach() const;
// Communications support
//- Return communicator used for communication
virtual label comm() const;
//- Return message tag to use for communication
virtual int tag() const;
// Interpolation data // Interpolation data
//- Is this the master side? //- Is this the master side?

View file

@ -651,7 +651,8 @@ Foam::autoPtr<Foam::amgMatrix> Foam::pamgPolicy::restrictMatrix
coarseInterfaces, coarseInterfaces,
fineInterface, fineInterface,
fineInterface.interfaceInternalField(child_), fineInterface.interfaceInternalField(child_),
fineInterfaceAddr[intI] fineInterfaceAddr[intI],
Pstream::worldComm //HJ, AMG Comm fineMesh.comm()
).ptr() ).ptr()
); );
} }

View file

@ -61,6 +61,7 @@ void tetPolyMesh::clearOut() const
clearOutParPointData(); clearOutParPointData();
} }
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
// Construct from components // Construct from components
@ -71,6 +72,7 @@ tetPolyMesh::tetPolyMesh(const polyMesh& pMesh)
boundary_(*this, pMesh.boundaryMesh()), boundary_(*this, pMesh.boundaryMesh()),
faceOffset_(mesh_.nPoints()), faceOffset_(mesh_.nPoints()),
cellOffset_(faceOffset_ + mesh_.nFaces()), cellOffset_(faceOffset_ + mesh_.nFaces()),
comm_(Pstream::worldComm),
nPoints_(-1), nPoints_(-1),
nEdges_(-1), nEdges_(-1),
nTets_(-1), nTets_(-1),
@ -105,6 +107,18 @@ tetPolyMesh::~tetPolyMesh()
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
Foam::label Foam::tetPolyMesh::comm() const
{
return comm_;
}
Foam::label& Foam::tetPolyMesh::comm()
{
return comm_;
}
// Return number of edges in decomposition for a face // Return number of edges in decomposition for a face
label tetPolyMesh::nEdgesForFace(const label faceID) const label tetPolyMesh::nEdgesForFace(const label faceID) const
{ {

View file

@ -116,7 +116,7 @@ class tetPolyMeshMapper;
class tetPolyMeshLduAddressing; class tetPolyMeshLduAddressing;
/*---------------------------------------------------------------------------*\ /*---------------------------------------------------------------------------*\
Class tetPolyMesh Declaration Class tetPolyMesh Declaration
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
class tetPolyMesh class tetPolyMesh
@ -138,6 +138,9 @@ class tetPolyMesh
//- Offset in numbering to first cell centre //- Offset in numbering to first cell centre
label cellOffset_; label cellOffset_;
//- Communicator used for parallel communication
label comm_;
// Demand-driven data // Demand-driven data
@ -205,9 +208,9 @@ public:
//- Construct from components //- Construct from components
explicit tetPolyMesh(const polyMesh& pMesh); explicit tetPolyMesh(const polyMesh& pMesh);
// Destructor
~tetPolyMesh(); //- Destructor
virtual ~tetPolyMesh();
// Member Functions // Member Functions
@ -370,6 +373,15 @@ public:
const edgeList& parallelEdges() const; const edgeList& parallelEdges() const;
// Communication support
//- Return communicator used for parallel communication
label comm() const;
//- Return communicator used for parallel communication
label& comm();
// Edit // Edit
//- Update mesh topology using the morph engine //- Update mesh topology using the morph engine