Improved parallel comms by using MPI allreduce directly on primitive types

This commit is contained in:
Hrvoje Jasak 2016-10-06 12:49:25 +01:00
parent 327be98074
commit b7b1edcf95
7 changed files with 591 additions and 397 deletions

View file

@ -141,6 +141,7 @@ $(StringStreams)/StringStreamsPrint.C
Pstreams = $(Streams)/Pstreams
$(Pstreams)/Pstream.C
$(Pstreams)/PstreamReduceOps.C
$(Pstreams)/PstreamCommsStruct.C
$(Pstreams)/PstreamGlobals.C
$(Pstreams)/IPstream.C

View file

@ -32,21 +32,11 @@ License
#include "OSspecific.H"
#include "PstreamGlobals.H"
#include "SubList.H"
#include "allReduce.H"
#include <cstring>
#include <cstdlib>
#include <csignal>
#if defined(WM_SP)
# define MPI_SCALAR MPI_FLOAT
#elif defined(WM_DP)
# define MPI_SCALAR MPI_DOUBLE
#elif defined(WM_LDP)
# define MPI_SCALAR MPI_LONG_DOUBLE
#endif
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
defineTypeNameAndDebug(Foam::Pstream, 0);
@ -548,8 +538,6 @@ Foam::label Foam::Pstream::procNo
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
@ -680,230 +668,6 @@ void Foam::Pstream::abort()
}
void Foam::reduce
(
bool& Value,
const andOp<bool>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
// Note: C++ bool is a type separate from C and cannot be cast
// For safety and compatibility with compilers, convert bool to int
// to comply with MPI types. HJ, 23/Sep/2016
int intBool = 0;
if (Value)
{
intBool = 1;
}
allReduce(intBool, 1, MPI_INT, MPI_LAND, bop, tag, comm);
if (intBool > 0)
{
Value = true;
}
else
{
Value = false;
}
}
void Foam::reduce
(
bool& Value,
const orOp<bool>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
// Note: C++ bool is a type separate from C and cannot be cast
// For safety and compatibility with compilers, convert bool to int
// to comply with MPI types. HJ, 23/Sep/2016
int intBool = 0;
if (Value)
{
intBool = 1;
}
allReduce(intBool, 1, MPI_INT, MPI_LOR, bop, tag, comm);
if (intBool > 0)
{
Value = true;
}
else
{
Value = false;
}
}
void Foam::reduce
(
scalar& Value,
const minOp<scalar>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_SCALAR, MPI_MIN, bop, tag, comm);
}
void Foam::reduce
(
scalar& Value,
const maxOp<scalar>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_SCALAR, MPI_MAX, bop, tag, comm);
}
void Foam::reduce
(
scalar& Value,
const sumOp<scalar>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_SCALAR, MPI_SUM, bop, tag, comm);
}
void Foam::reduce
(
vector2D& Value,
const sumOp<vector2D>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 2, MPI_SCALAR, MPI_SUM, bop, tag, comm);
}
void Foam::sumReduce
(
scalar& Value,
label& Count,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
vector2D twoScalars(Value, scalar(Count));
reduce(twoScalars, sumOp<vector2D>(), tag, comm);
Value = twoScalars.x();
Count = twoScalars.y();
}
void Foam::reduce
(
scalar& Value,
const sumOp<scalar>& bop,
const int tag,
const label comm,
label& requestID
)
{
#ifdef MPIX_COMM_TYPE_SHARED
// Assume mpich2 with non-blocking collectives extensions. Once mpi3
// is available this will change.
MPI_Request request;
scalar v = Value;
MPIX_Ireduce
(
&v,
&Value,
1,
MPI_SCALAR,
MPI_SUM,
0, // root
PstreamGlobals::MPICommunicators_[comm],
&request
);
requestID = PstreamGlobals::outstandingRequests_.size();
PstreamGlobals::outstandingRequests_.append(request);
if (debug)
{
Pout<< "Pstream::allocateRequest for non-blocking reduce"
<< " : request:" << requestID
<< endl;
}
#else
// Non-blocking not yet implemented in mpi
reduce(Value, bop, tag, comm);
requestID = -1;
#endif
}
Foam::label Foam::Pstream::nRequests()
{
return PstreamGlobals::outstandingRequests_.size();

View file

@ -0,0 +1,357 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | foam-extend: Open Source CFD
\\ / O peration | Version: 4.0
\\ / A nd | Web: http://www.foam-extend.org
\\/ M anipulation | For copyright notice see file Copyright
-------------------------------------------------------------------------------
License
This file is part of foam-extend.
foam-extend is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
foam-extend is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with foam-extend. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "mpi.h"
#include "label.H"
#include "Pstream.H"
#include "PstreamReduceOps.H"
#include "allReduce.H"
// Check type of label for use in MPI calls
#if defined(WM_INT)
# define MPI_LABEL MPI_INT
#elif defined(WM_LONG)
# define MPI_LABEL MPI_LONG
#elif defined(WM_LLONG)
# define MPI_LABEL MPI_LONG_LONG
#endif
// Check type of scalar for use in MPI calls
#if defined(WM_SP)
# define MPI_SCALAR MPI_FLOAT
#elif defined(WM_DP)
# define MPI_SCALAR MPI_DOUBLE
#elif defined(WM_LDP)
# define MPI_SCALAR MPI_LONG_DOUBLE
#endif
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
// Optimizing template specialisations using MPI_REDUCE
void Foam::reduce
(
bool& Value,
const andOp<bool>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
// Note: C++ bool is a type separate from C and cannot be cast
// For safety and compatibility with compilers, convert bool to int
// to comply with MPI types. HJ, 23/Sep/2016
int intBool = 0;
if (Value)
{
intBool = 1;
}
allReduce(intBool, 1, MPI_INT, MPI_LAND, bop, tag, comm);
if (intBool > 0)
{
Value = true;
}
else
{
Value = false;
}
}
void Foam::reduce
(
bool& Value,
const orOp<bool>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
// Note: C++ bool is a type separate from C and cannot be cast
// For safety and compatibility with compilers, convert bool to int
// to comply with MPI types. HJ, 23/Sep/2016
int intBool = 0;
if (Value)
{
intBool = 1;
}
allReduce(intBool, 1, MPI_INT, MPI_LOR, bop, tag, comm);
if (intBool > 0)
{
Value = true;
}
else
{
Value = false;
}
}
void Foam::reduce
(
label& Value,
const minOp<label>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_LABEL, MPI_MIN, bop, tag, comm);
}
void Foam::reduce
(
label& Value,
const maxOp<label>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_LABEL, MPI_MAX, bop, tag, comm);
}
void Foam::reduce
(
label& Value,
const sumOp<label>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_LABEL, MPI_SUM, bop, tag, comm);
}
void Foam::reduce
(
scalar& Value,
const minOp<scalar>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_SCALAR, MPI_MIN, bop, tag, comm);
}
void Foam::reduce
(
scalar& Value,
const maxOp<scalar>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_SCALAR, MPI_MAX, bop, tag, comm);
}
void Foam::reduce
(
scalar& Value,
const sumOp<scalar>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 1, MPI_SCALAR, MPI_SUM, bop, tag, comm);
}
void Foam::reduce
(
UList<label>& Value,
const sumOp<UList<label> >& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(*Value.begin(), Value.size(), MPI_LABEL, MPI_SUM, bop, tag, comm);
}
void Foam::reduce
(
vector2D& Value,
const sumOp<vector2D>& bop,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
allReduce(Value, 2, MPI_SCALAR, MPI_SUM, bop, tag, comm);
}
void Foam::sumReduce
(
scalar& Value,
label& Count,
const int tag,
const label comm
)
{
if (Pstream::warnComm != -1 && comm != Pstream::warnComm)
{
Pout<< "** reducing:" << Value << " with comm:" << comm
<< " warnComm:" << Pstream::warnComm
<< endl;
error::printStack(Pout);
}
vector2D twoScalars(Value, scalar(Count));
reduce(twoScalars, sumOp<vector2D>(), tag, comm);
Value = twoScalars.x();
Count = twoScalars.y();
}
void Foam::reduce
(
scalar& Value,
const sumOp<scalar>& bop,
const int tag,
const label comm,
label& requestID
)
{
#ifdef MPIX_COMM_TYPE_SHARED
// Assume mpich2 with non-blocking collectives extensions. Once mpi3
// is available this will change.
MPI_Request request;
scalar v = Value;
MPIX_Ireduce
(
&v,
&Value,
1,
MPI_SCALAR,
MPI_SUM,
0, // root
PstreamGlobals::MPICommunicators_[comm],
&request
);
requestID = PstreamGlobals::outstandingRequests_.size();
PstreamGlobals::outstandingRequests_.append(request);
if (debug)
{
Pout<< "Pstream::allocateRequest for non-blocking reduce"
<< " : request:" << requestID
<< endl;
}
#else
// Non-blocking not yet implemented in mpi
reduce(Value, bop, tag, comm);
requestID = -1;
#endif
}
// ************************************************************************* //

View file

@ -159,6 +159,33 @@ void reduce
const label comm = Pstream::worldComm
);
// Insist there are specialisations for the common reductions of labels
void reduce
(
label& Value,
const sumOp<label>& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
void reduce
(
label& Value,
const minOp<label>& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
void reduce
(
label& Value,
const maxOp<label>& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
// Insist there are specialisations for the common reductions of scalars
void reduce
@ -211,6 +238,34 @@ void reduce
);
// Insist there are specialisations for the common reductions of
// lists of labels
void reduce
(
UList<label>& Value,
const sumOp<UList<label> >& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
void reduce
(
UList<label>& Value,
const minOp<UList<label> >& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
void reduce
(
UList<label>& Value,
const maxOp<UList<label> >& bop,
const int tag = Pstream::msgType(),
const label comm = Pstream::worldComm
);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam

View file

@ -44,155 +44,155 @@ void Foam::allReduce
return;
}
if (Pstream::nProcs(comm) <= Pstream::nProcsSimpleSum)
{
if (Pstream::master(comm))
{
for
(
int slave = Pstream::firstSlave();
slave <= Pstream::lastSlave(comm);
slave++
)
{
Type value;
// if (Pstream::nProcs(comm) <= Pstream::nProcsSimpleSum)
// {
// if (Pstream::master(comm))
// {
// for
// (
// int slave = Pstream::firstSlave();
// slave <= Pstream::lastSlave(comm);
// slave++
// )
// {
// Type value;
if
(
MPI_Recv
(
&value,
MPICount,
MPIType,
slave,
tag,
PstreamGlobals::MPICommunicators_[comm],
MPI_STATUS_IGNORE
)
)
{
FatalErrorIn
(
"void Foam::allReduce\n"
"(\n"
" Type&,\n"
" int,\n"
" MPI_Datatype,\n"
" MPI_Op,\n"
" const BinaryOp&,\n"
" const int\n"
")\n"
) << "MPI_Recv failed"
<< Foam::abort(FatalError);
}
// if
// (
// MPI_Recv
// (
// &value,
// MPICount,
// MPIType,
// slave,
// tag,
// PstreamGlobals::MPICommunicators_[comm],
// MPI_STATUS_IGNORE
// )
// )
// {
// FatalErrorIn
// (
// "void Foam::allReduce\n"
// "(\n"
// " Type&,\n"
// " int,\n"
// " MPI_Datatype,\n"
// " MPI_Op,\n"
// " const BinaryOp&,\n"
// " const int\n"
// ")\n"
// ) << "MPI_Recv failed"
// << Foam::abort(FatalError);
// }
Value = bop(Value, value);
}
}
else
{
if
(
MPI_Send
(
&Value,
MPICount,
MPIType,
Pstream::masterNo(),
tag,
PstreamGlobals::MPICommunicators_[comm]
)
)
{
FatalErrorIn
(
"void Foam::allReduce\n"
"(\n"
" Type&,\n"
" int,\n"
" MPI_Datatype,\n"
" MPI_Op,\n"
" const BinaryOp&,\n"
" const int\n"
")\n"
) << "MPI_Send failed"
<< Foam::abort(FatalError);
}
}
// Value = bop(Value, value);
// }
// }
// else
// {
// if
// (
// MPI_Send
// (
// &Value,
// MPICount,
// MPIType,
// Pstream::masterNo(),
// tag,
// PstreamGlobals::MPICommunicators_[comm]
// )
// )
// {
// FatalErrorIn
// (
// "void Foam::allReduce\n"
// "(\n"
// " Type&,\n"
// " int,\n"
// " MPI_Datatype,\n"
// " MPI_Op,\n"
// " const BinaryOp&,\n"
// " const int\n"
// ")\n"
// ) << "MPI_Send failed"
// << Foam::abort(FatalError);
// }
// }
if (Pstream::master(comm))
{
for
(
int slave = Pstream::firstSlave();
slave <= Pstream::lastSlave(comm);
slave++
)
{
if
(
MPI_Send
(
&Value,
MPICount,
MPIType,
slave,
tag,
PstreamGlobals::MPICommunicators_[comm]
)
)
{
FatalErrorIn
(
"void Foam::allReduce\n"
"(\n"
" Type&,\n"
" int,\n"
" MPI_Datatype,\n"
" MPI_Op,\n"
" const BinaryOp&,\n"
" const int\n"
")\n"
) << "MPI_Send failed"
<< Foam::abort(FatalError);
}
}
}
else
{
if
(
MPI_Recv
(
&Value,
MPICount,
MPIType,
Pstream::masterNo(),
tag,
PstreamGlobals::MPICommunicators_[comm],
MPI_STATUS_IGNORE
)
)
{
FatalErrorIn
(
"void Foam::allReduce\n"
"(\n"
" Type&,\n"
" int,\n"
" MPI_Datatype,\n"
" MPI_Op,\n"
" const BinaryOp&,\n"
" const int\n"
")\n"
) << "MPI_Recv failed"
<< Foam::abort(FatalError);
}
}
}
else
{
// if (Pstream::master(comm))
// {
// for
// (
// int slave = Pstream::firstSlave();
// slave <= Pstream::lastSlave(comm);
// slave++
// )
// {
// if
// (
// MPI_Send
// (
// &Value,
// MPICount,
// MPIType,
// slave,
// tag,
// PstreamGlobals::MPICommunicators_[comm]
// )
// )
// {
// FatalErrorIn
// (
// "void Foam::allReduce\n"
// "(\n"
// " Type&,\n"
// " int,\n"
// " MPI_Datatype,\n"
// " MPI_Op,\n"
// " const BinaryOp&,\n"
// " const int\n"
// ")\n"
// ) << "MPI_Send failed"
// << Foam::abort(FatalError);
// }
// }
// }
// else
// {
// if
// (
// MPI_Recv
// (
// &Value,
// MPICount,
// MPIType,
// Pstream::masterNo(),
// tag,
// PstreamGlobals::MPICommunicators_[comm],
// MPI_STATUS_IGNORE
// )
// )
// {
// FatalErrorIn
// (
// "void Foam::allReduce\n"
// "(\n"
// " Type&,\n"
// " int,\n"
// " MPI_Datatype,\n"
// " MPI_Op,\n"
// " const BinaryOp&,\n"
// " const int\n"
// ")\n"
// ) << "MPI_Recv failed"
// << Foam::abort(FatalError);
// }
// }
// }
// else
// {
Type sum;
MPI_Allreduce
@ -206,7 +206,7 @@ void Foam::allReduce
);
Value = sum;
}
// }
}

View file

@ -97,7 +97,7 @@ void Foam::ggiAMGInterface::initFastReduce() const
// Note: reduce with a comm will only be present on processors containing
// master or slave faces. Other processors created a dummy map above
// HJ, 20/Sep/2016
reduce(zoneProcID, maxOp<labelField>(), tag(), comm());
reduce(zoneProcID, maxOp<UList<label> >(), tag(), comm());
// Find out where my zone data is coming from
labelList nRecv(Pstream::nProcs(), 0);
@ -340,7 +340,6 @@ Foam::ggiAMGInterface::ggiAMGInterface
// On the fine level, addressing is made in a labelListList
if (fineGgiInterface_.fineLevel())
{
Info<< "fineGgiInterface start: " << lTime_.elapsedCpuTime() << endl;
// This addressing defines how to interpolate for all zone faces
// across the interface
const labelListList& fineAddr = fineGgiInterface_.addressing();
@ -348,6 +347,10 @@ Foam::ggiAMGInterface::ggiAMGInterface
// Perform analysis only for local faces
// HJ, 22/Jun/2016
label curMasterProc, curSlaveProc;
long curMaster, curSlave;
forAll (fineZa, fineZaI)
{
// Get the local face (from zone) to analyse
@ -358,10 +361,10 @@ Foam::ggiAMGInterface::ggiAMGInterface
forAll (curFineNbrs, nbrI)
{
long curMaster = -1;
label curMasterProc = -1;
long curSlave = -1;
label curSlaveProc = -1;
curMaster = -1;
curMasterProc = -1;
curSlave = -1;
curSlaveProc = -1;
// Note. Signalling in global clustering requires
// me to recognise clustering from separate
@ -527,24 +530,27 @@ Foam::ggiAMGInterface::ggiAMGInterface
}
} // end for all current neighbours
} // end for all fine faces
Info<< "fineGgiInterface end: " << lTime_.elapsedCpuTime() << endl;
}
else
{
// Coarse level, addressing is stored in faceCells
// This addressing defines which faces from zone are local
Info<< "coarseGgiInterface start: " << lTime_.elapsedCpuTime() << endl;
// Perform analysis only for local faces
// HJ, 22/Jun/2016
label curMasterProc, curSlaveProc;
long curMaster, curSlave;
forAll (fineZa, fineZaI)
{
// Get the local face (from zone) to analyse
const label ffI = fineZa[fineZaI];
long curMaster = -1;
label curMasterProc = -1;
long curSlave = -1;
label curSlaveProc = -1;
curMaster = -1;
curMasterProc = -1;
curSlave = -1;
curSlaveProc = -1;
// Note. Signalling in global clustering requires
// me to recognise clustering from separate
@ -709,7 +715,6 @@ Foam::ggiAMGInterface::ggiAMGInterface
nAgglomPairs++;
}
} // end for all fine faces
Info<< "coarseGgiInterface end: " << lTime_.elapsedCpuTime() << endl;
} // end of else in fine level (coarse level)
// Since only local faces are analysed, lists can now be resized
@ -721,7 +726,7 @@ Foam::ggiAMGInterface::ggiAMGInterface
// In order to assemble the coarse global face zone, find out
// how many faces have been created on each processor.
// Note that masters and slaves both count faces so we will
// only ask master sizes to count
// only ask master sizes to count
labelList nCoarseFacesPerProc(Pstream::nProcs(), 0);
nCoarseFacesPerProc[Pstream::myProcNo()] = nCoarseFaces;
@ -731,7 +736,10 @@ Foam::ggiAMGInterface::ggiAMGInterface
// contact with the GGI interface will have zero zone size.
// This needs to be handled separately in the initFastReduce
// HJ, 20/Sep/2016
reduce(nCoarseFacesPerProc, sumOp<labelList>(), tag(), comm());
// Optimised comm: Wait for info from previous processor, add your
// number of coarse faces and pass to next processor
reduce(nCoarseFacesPerProc, sumOp<UList<label> >(), tag(), comm());
// Coarse global face zone is assembled by adding all faces from proc0,
// followed by all faces from proc1 etc.

View file

@ -55,6 +55,9 @@ Description
# undef FOAM_LABEL_MAX
# define FOAM_LABEL_MAX INT_MAX
// Define identifier for label type for int
# define WM_INT
# include "int.H"
# include "long.H"
# include "longLong.H"
@ -80,6 +83,9 @@ namespace Foam
# undef FOAM_LABEL_MAX
# define FOAM_LABEL_MAX LONG_MAX
// Define identifier for label type for long
# define WM_LONG
# include "int.H"
# include "long.H"
@ -105,6 +111,9 @@ namespace Foam
# undef FOAM_LABEL_MAX
# define FOAM_LABEL_MAX LLONG_MAX
// Define identifier for label type for long long
# define WM_LLONG
# include "int.H"
# include "long.H"
# include "longLong.H"