ENH: globalIndex with direct gather/broadcast

- less communication than gatherList/scatterList

ENH: refine send granularity in Pstream::exchange

STYLE: ensure PstreamBuffers and defaultCommsType agree

- simpler loops for lduSchedule
This commit is contained in:
Mark Olesen
2022-03-07 09:40:13 +01:00
parent b8c3dc4e49
commit 0cf02eb384
18 changed files with 275 additions and 196 deletions

View File

@ -210,7 +210,7 @@ void broadcast_chunks
<< "Contiguous data only." << sizeof(T) << Foam::abort(FatalError); << "Contiguous data only." << sizeof(T) << Foam::abort(FatalError);
} }
if (UPstream::maxCommsSize <= int(sizeof(T))) if (UPstream::maxCommsSize <= 0)
{ {
// Do in one go // Do in one go
Info<< "send " << sendData.size() << " elements in one go" << endl; Info<< "send " << sendData.size() << " elements in one go" << endl;
@ -230,48 +230,61 @@ void broadcast_chunks
// guaranteed that some processor's sending size is some other // guaranteed that some processor's sending size is some other
// processor's receive size. Also we can ignore any local comms. // processor's receive size. Also we can ignore any local comms.
// We need to send bytes so the number of iterations: // We need to send chunks so the number of iterations:
// maxChunkSize iterations // maxChunkSize iterations
// ------------ ---------- // ------------ ----------
// 0 0 // 0 0
// 1..maxChunkSize 1 // 1..maxChunkSize 1
// maxChunkSize+1..2*maxChunkSize 2 // maxChunkSize+1..2*maxChunkSize 2
// etc. // ...
const label maxChunkSize(UPstream::maxCommsSize/sizeof(T)); const label maxChunkSize
(
max
(
static_cast<label>(1),
static_cast<label>(UPstream::maxCommsSize/sizeof(T))
)
);
label nIter(0); label nChunks(0);
{ {
label nSendMax = 0; // Get max send count (elements)
// forAll(sendBufs, proci) // forAll(sendBufs, proci)
// { // {
// if (proci != Pstream::myProcNo(comm)) // if (proci != Pstream::myProcNo(comm))
// { // {
// nSendMax = max(nSendMax, sendBufs[proci].size()); // nChunks = max(nChunks, sendBufs[proci].size());
// } // }
// } // }
nSendMax = sendSize; nChunks = sendSize;
if (nSendMax) // Convert from send count (elements) to number of chunks.
// Can normally calculate with (count-1), but add some safety
if (nChunks)
{ {
nIter = 1 + ((nSendMax-1)/maxChunkSize); nChunks = 1 + (nChunks/maxChunkSize);
} }
reduce(nIter, maxOp<label>(), tag, comm); reduce(nChunks, maxOp<label>(), tag, comm);
Info Info
<< "send " << nSendMax << " elements (" << "send " << sendSize << " elements ("
<< (nSendMax*sizeof(T)) << " bytes) in " << nIter << (sendSize*sizeof(T)) << " bytes) in " << nChunks
<< " iterations of " << maxChunkSize << " chunks (" << " chunks of " << maxChunkSize << " elements ("
<< (maxChunkSize*sizeof(T)) << " bytes) for maxCommsSize:" << (maxChunkSize*sizeof(T)) << " bytes) for maxCommsSize:"
<< Pstream::maxCommsSize << Pstream::maxCommsSize
<< endl; << endl;
} }
// stress-test with shortened sendSize
// will produce useless loops, but no calls
// sendSize /= 2;
label nSend(0); label nSend(0);
label startSend(0); label startSend(0);
char* charPtrSend; char* charPtrSend;
for (label iter = 0; iter < nIter; ++iter) for (label iter = 0; iter < nChunks; ++iter)
{ {
nSend = min nSend = min
( (
@ -297,6 +310,8 @@ void broadcast_chunks
startSend += nSend; startSend += nSend;
} }
} }
Info<< "final: " << startSend << endl;
} }
@ -305,8 +320,9 @@ void broadcast_chunks
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
argList::noCheckProcessorDirectories(); argList::noCheckProcessorDirectories();
argList::addOption("comms-size", "int", "override Pstream::maxCommsSize");
#include "setRootCase.H" #include "setRootCase.H"
#include "createTime.H"
if (!Pstream::parRun()) if (!Pstream::parRun())
{ {
@ -322,6 +338,9 @@ int main(int argc, char *argv[])
broadcast_chunks<labelList, label>(input1); broadcast_chunks<labelList, label>(input1);
Pstream::maxCommsSize = 33; Pstream::maxCommsSize = 33;
args.readIfPresent("comms-size", Pstream::maxCommsSize);
broadcast_chunks<labelList, label>(input1); broadcast_chunks<labelList, label>(input1);
// Mostly the same with PstreamBuffers // Mostly the same with PstreamBuffers

View File

@ -999,16 +999,16 @@ void correctCoupledBoundaryConditions(fvMesh& mesh)
const lduSchedule& patchSchedule = const lduSchedule& patchSchedule =
fld.mesh().globalData().patchSchedule(); fld.mesh().globalData().patchSchedule();
forAll(patchSchedule, patchEvali) for (const auto& schedEval : patchSchedule)
{ {
const label patchi = patchSchedule[patchEvali].patch; const label patchi = schedEval.patch;
const auto& fvp = mesh.boundary()[patchi]; const auto& fvp = mesh.boundary()[patchi];
auto& pfld = bfld[patchi]; auto& pfld = bfld[patchi];
const auto* ppPtr = isA<CoupledPatchType>(fvp); const auto* ppPtr = isA<CoupledPatchType>(fvp);
if (ppPtr && ppPtr->coupled()) if (ppPtr && ppPtr->coupled())
{ {
if (patchSchedule[patchEvali].init) if (schedEval.init)
{ {
pfld.initEvaluate(Pstream::commsTypes::scheduled); pfld.initEvaluate(Pstream::commsTypes::scheduled);
} }

View File

@ -95,9 +95,10 @@ void evaluateConstraintTypes(GeometricField<Type, fvPatchField, volMesh>& fld)
const lduSchedule& patchSchedule = const lduSchedule& patchSchedule =
fld.mesh().globalData().patchSchedule(); fld.mesh().globalData().patchSchedule();
forAll(patchSchedule, patchEvali) for (const auto& schedEval : patchSchedule)
{ {
label patchi = patchSchedule[patchEvali].patch; const label patchi = schedEval.patch;
fvPatchField<Type>& tgtField = fldBf[patchi]; fvPatchField<Type>& tgtField = fldBf[patchi];
if if
@ -106,7 +107,7 @@ void evaluateConstraintTypes(GeometricField<Type, fvPatchField, volMesh>& fld)
&& polyPatch::constraintType(tgtField.patch().patch().type()) && polyPatch::constraintType(tgtField.patch().patch().type())
) )
{ {
if (patchSchedule[patchEvali].init) if (schedEval.init)
{ {
tgtField.initEvaluate(Pstream::commsTypes::scheduled); tgtField.initEvaluate(Pstream::commsTypes::scheduled);
} }

View File

@ -224,7 +224,7 @@ void Foam::Pstream::exchange
} }
} }
if (UPstream::maxCommsSize <= int(sizeof(T))) if (UPstream::maxCommsSize <= 0)
{ {
// Do the exchanging in one go // Do the exchanging in one go
exchangeContainer<Container, T> exchangeContainer<Container, T>
@ -244,38 +244,41 @@ void Foam::Pstream::exchange
// guaranteed that some processor's sending size is some other // guaranteed that some processor's sending size is some other
// processor's receive size. Also we can ignore any local comms. // processor's receive size. Also we can ignore any local comms.
// We need to send bytes so the number of iterations: // We need to send chunks so the number of iterations:
// maxChunkSize iterations // maxChunkSize iterations
// ------------ ---------- // ------------ ----------
// 0 0 // 0 0
// 1..maxChunkSize 1 // 1..maxChunkSize 1
// maxChunkSize+1..2*maxChunkSize 2 // maxChunkSize+1..2*maxChunkSize 2
// etc. // ...
const label maxChunkSize(UPstream::maxCommsSize/sizeof(T)); const label maxChunkSize
(
max
(
static_cast<label>(1),
static_cast<label>(UPstream::maxCommsSize/sizeof(T))
)
);
label nIter(0); label nChunks(0);
{ {
label nSendMax = 0; // Get max send count (elements)
forAll(sendBufs, proci) forAll(sendBufs, proci)
{ {
if (proci != Pstream::myProcNo(comm)) if (proci != Pstream::myProcNo(comm))
{ {
nSendMax = max(nSendMax, sendBufs[proci].size()); nChunks = max(nChunks, sendBufs[proci].size());
} }
} }
if (nSendMax) // Convert from send count (elements) to number of chunks.
// Can normally calculate with (count-1), but add some safety
if (nChunks)
{ {
nIter = 1 + ((nSendMax-1)/maxChunkSize); nChunks = 1 + (nChunks/maxChunkSize);
} }
reduce(nIter, maxOp<label>(), tag, comm); reduce(nChunks, maxOp<label>(), tag, comm);
/// Info<< "send " << nSendMax << " elements ("
/// << (nSendMax*sizeof(T)) << " bytes) in " << nIter
/// << " iterations of " << maxChunkSize << " chunks ("
/// << (maxChunkSize*sizeof(T)) << " bytes) maxCommsSize:"
/// << Pstream::maxCommsSize << endl;
} }
labelList nRecv(sendBufs.size()); labelList nRecv(sendBufs.size());
@ -286,7 +289,7 @@ void Foam::Pstream::exchange
List<const char*> charPtrSend(sendBufs.size()); List<const char*> charPtrSend(sendBufs.size());
List<char*> charPtrRecv(sendBufs.size()); List<char*> charPtrRecv(sendBufs.size());
for (label iter = 0; iter < nIter; ++iter) for (label iter = 0; iter < nChunks; ++iter)
{ {
forAll(sendBufs, proci) forAll(sendBufs, proci)
{ {

View File

@ -440,16 +440,18 @@ void Foam::GeometricField<Type, PatchField, GeoMesh>::Boundary::evaluate()
const lduSchedule& patchSchedule = const lduSchedule& patchSchedule =
bmesh_.mesh().globalData().patchSchedule(); bmesh_.mesh().globalData().patchSchedule();
forAll(patchSchedule, patchEvali) for (const auto& schedEval : patchSchedule)
{ {
if (patchSchedule[patchEvali].init) const label patchi = schedEval.patch;
if (schedEval.init)
{ {
this->operator[](patchSchedule[patchEvali].patch) this->operator[](patchi)
.initEvaluate(Pstream::commsTypes::scheduled); .initEvaluate(Pstream::commsTypes::scheduled);
} }
else else
{ {
this->operator[](patchSchedule[patchEvali].patch) this->operator[](patchi)
.evaluate(Pstream::commsTypes::scheduled); .evaluate(Pstream::commsTypes::scheduled);
} }
} }

View File

@ -6,7 +6,7 @@
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Copyright (C) 2011-2017 OpenFOAM Foundation Copyright (C) 2011-2017 OpenFOAM Foundation
Copyright (C) 2019 OpenCFD Ltd. Copyright (C) 2019-2022 OpenCFD Ltd.
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
This file is part of OpenFOAM. This file is part of OpenFOAM.
@ -148,13 +148,13 @@ void Foam::LduMatrix<Type, DType, LUType>::updateMatrixInterfaces
const lduSchedule& patchSchedule = this->patchSchedule(); const lduSchedule& patchSchedule = this->patchSchedule();
// Loop over all the "normal" interfaces relating to standard patches // Loop over all the "normal" interfaces relating to standard patches
forAll(patchSchedule, i) for (const auto& schedEval : patchSchedule)
{ {
label interfacei = patchSchedule[i].patch; const label interfacei = schedEval.patch;
if (interfaces_.set(interfacei)) if (interfaces_.set(interfacei))
{ {
if (patchSchedule[i].init) if (schedEval.init)
{ {
interfaces_[interfacei].initInterfaceMatrixUpdate interfaces_[interfacei].initInterfaceMatrixUpdate
( (

View File

@ -126,17 +126,16 @@ Foam::labelListList Foam::GAMGProcAgglomeration::globalCellCells
const lduAddressing& addr = mesh.lduAddr(); const lduAddressing& addr = mesh.lduAddr();
lduInterfacePtrsList interfaces = mesh.interfaces(); lduInterfacePtrsList interfaces = mesh.interfaces();
const label myProcID = Pstream::myProcNo(mesh.comm()); const label myProcID = UPstream::myProcNo(mesh.comm());
globalIndex globalNumbering const globalIndex globalNumbering
( (
addr.size(), addr.size(),
Pstream::msgType(),
mesh.comm(), mesh.comm(),
Pstream::parRun() UPstream::parRun()
); );
labelList globalIndices const labelList globalIndices
( (
identity identity
( (
@ -163,9 +162,9 @@ Foam::labelListList Foam::GAMGProcAgglomeration::globalCellCells
} }
} }
if (Pstream::parRun()) if (UPstream::parRun())
{ {
Pstream::waitRequests(nReq); UPstream::waitRequests(nReq);
} }
forAll(interfaces, inti) forAll(interfaces, inti)

View File

@ -93,8 +93,8 @@ void Foam::pointBoundaryMesh::calcGeometry()
if if
( (
Pstream::defaultCommsType == Pstream::commsTypes::blocking pBufs.commsType() == Pstream::commsTypes::blocking
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking || pBufs.commsType() == Pstream::commsTypes::nonBlocking
) )
{ {
forAll(*this, patchi) forAll(*this, patchi)
@ -109,18 +109,18 @@ void Foam::pointBoundaryMesh::calcGeometry()
operator[](patchi).calcGeometry(pBufs); operator[](patchi).calcGeometry(pBufs);
} }
} }
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled) else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
{ {
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule(); const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
// Dummy. // Dummy.
pBufs.finishedSends(); pBufs.finishedSends();
forAll(patchSchedule, patchEvali) for (const auto& schedEval : patchSchedule)
{ {
label patchi = patchSchedule[patchEvali].patch; const label patchi = schedEval.patch;
if (patchSchedule[patchEvali].init) if (schedEval.init)
{ {
operator[](patchi).initGeometry(pBufs); operator[](patchi).initGeometry(pBufs);
} }
@ -139,8 +139,8 @@ void Foam::pointBoundaryMesh::movePoints(const pointField& p)
if if
( (
Pstream::defaultCommsType == Pstream::commsTypes::blocking pBufs.commsType() == Pstream::commsTypes::blocking
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking || pBufs.commsType() == Pstream::commsTypes::nonBlocking
) )
{ {
forAll(*this, patchi) forAll(*this, patchi)
@ -155,18 +155,18 @@ void Foam::pointBoundaryMesh::movePoints(const pointField& p)
operator[](patchi).movePoints(pBufs, p); operator[](patchi).movePoints(pBufs, p);
} }
} }
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled) else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
{ {
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule(); const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
// Dummy. // Dummy.
pBufs.finishedSends(); pBufs.finishedSends();
forAll(patchSchedule, patchEvali) for (const auto& schedEval : patchSchedule)
{ {
label patchi = patchSchedule[patchEvali].patch; const label patchi = schedEval.patch;
if (patchSchedule[patchEvali].init) if (schedEval.init)
{ {
operator[](patchi).initMovePoints(pBufs, p); operator[](patchi).initMovePoints(pBufs, p);
} }
@ -185,8 +185,8 @@ void Foam::pointBoundaryMesh::updateMesh()
if if
( (
Pstream::defaultCommsType == Pstream::commsTypes::blocking pBufs.commsType() == Pstream::commsTypes::blocking
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking || pBufs.commsType() == Pstream::commsTypes::nonBlocking
) )
{ {
forAll(*this, patchi) forAll(*this, patchi)
@ -201,18 +201,18 @@ void Foam::pointBoundaryMesh::updateMesh()
operator[](patchi).updateMesh(pBufs); operator[](patchi).updateMesh(pBufs);
} }
} }
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled) else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
{ {
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule(); const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
// Dummy. // Dummy.
pBufs.finishedSends(); pBufs.finishedSends();
forAll(patchSchedule, patchEvali) for (const auto& schedEval : patchSchedule)
{ {
label patchi = patchSchedule[patchEvali].patch; const label patchi = schedEval.patch;
if (patchSchedule[patchEvali].init) if (schedEval.init)
{ {
operator[](patchi).initUpdateMesh(pBufs); operator[](patchi).initUpdateMesh(pBufs);
} }

View File

@ -267,8 +267,8 @@ void Foam::polyBoundaryMesh::calcGeometry()
if if
( (
Pstream::defaultCommsType == Pstream::commsTypes::blocking pBufs.commsType() == Pstream::commsTypes::blocking
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking || pBufs.commsType() == Pstream::commsTypes::nonBlocking
) )
{ {
forAll(*this, patchi) forAll(*this, patchi)
@ -283,18 +283,18 @@ void Foam::polyBoundaryMesh::calcGeometry()
operator[](patchi).calcGeometry(pBufs); operator[](patchi).calcGeometry(pBufs);
} }
} }
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled) else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
{ {
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule(); const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
// Dummy. // Dummy.
pBufs.finishedSends(); pBufs.finishedSends();
for (const auto& patchEval : patchSchedule) for (const auto& schedEval : patchSchedule)
{ {
const label patchi = patchEval.patch; const label patchi = schedEval.patch;
if (patchEval.init) if (schedEval.init)
{ {
operator[](patchi).initGeometry(pBufs); operator[](patchi).initGeometry(pBufs);
} }
@ -1120,8 +1120,8 @@ void Foam::polyBoundaryMesh::movePoints(const pointField& p)
if if
( (
Pstream::defaultCommsType == Pstream::commsTypes::blocking pBufs.commsType() == Pstream::commsTypes::blocking
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking || pBufs.commsType() == Pstream::commsTypes::nonBlocking
) )
{ {
forAll(*this, patchi) forAll(*this, patchi)
@ -1136,18 +1136,18 @@ void Foam::polyBoundaryMesh::movePoints(const pointField& p)
operator[](patchi).movePoints(pBufs, p); operator[](patchi).movePoints(pBufs, p);
} }
} }
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled) else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
{ {
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule(); const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
// Dummy. // Dummy.
pBufs.finishedSends(); pBufs.finishedSends();
for (const auto& patchEval : patchSchedule) for (const auto& schedEval : patchSchedule)
{ {
const label patchi = patchEval.patch; const label patchi = schedEval.patch;
if (patchEval.init) if (schedEval.init)
{ {
operator[](patchi).initMovePoints(pBufs, p); operator[](patchi).initMovePoints(pBufs, p);
} }
@ -1170,8 +1170,8 @@ void Foam::polyBoundaryMesh::updateMesh()
if if
( (
Pstream::defaultCommsType == Pstream::commsTypes::blocking pBufs.commsType() == Pstream::commsTypes::blocking
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking || pBufs.commsType() == Pstream::commsTypes::nonBlocking
) )
{ {
forAll(*this, patchi) forAll(*this, patchi)
@ -1186,18 +1186,18 @@ void Foam::polyBoundaryMesh::updateMesh()
operator[](patchi).updateMesh(pBufs); operator[](patchi).updateMesh(pBufs);
} }
} }
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled) else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
{ {
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule(); const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
// Dummy. // Dummy.
pBufs.finishedSends(); pBufs.finishedSends();
for (const auto& patchEval : patchSchedule) for (const auto& schedEval : patchSchedule)
{ {
const label patchi = patchEval.patch; const label patchi = schedEval.patch;
if (patchEval.init) if (schedEval.init)
{ {
operator[](patchi).initUpdateMesh(pBufs); operator[](patchi).initUpdateMesh(pBufs);
} }

View File

@ -188,12 +188,6 @@ void Foam::globalIndex::bin
} }
void Foam::globalIndex::reset(const label localSize)
{
reset(localSize, Pstream::msgType(), UPstream::worldComm, true);
}
void Foam::globalIndex::reset void Foam::globalIndex::reset
( (
const label localSize, const label localSize,
@ -209,25 +203,27 @@ void Foam::globalIndex::reset
void Foam::globalIndex::reset void Foam::globalIndex::reset
( (
const label localSize, const label localSize,
const int tag,
const label comm, const label comm,
const bool parallel const bool parallel
) )
{ {
labelList localLens;
const label len = Pstream::nProcs(comm); const label len = Pstream::nProcs(comm);
if (len) if (len)
{ {
// Seed with localSize, zero elsewhere (for non-parallel branch) if (parallel && UPstream::parRun())
// NB: can consider UPstream::listGatherValues
labelList localLens(len, Zero);
localLens[Pstream::myProcNo(comm)] = localSize;
if (parallel)
{ {
Pstream::gatherList(localLens, tag, comm); localLens = UPstream::listGatherValues(localSize, comm);
Pstream::scatterList(localLens, tag, comm); Pstream::broadcast(localLens, comm);
}
else
{
// Non-parallel branch: use localSize on-proc, zero elsewhere
localLens.resize(len, Zero);
localLens[Pstream::myProcNo(comm)] = localSize;
} }
reset(localLens, true); // checkOverflow = true reset(localLens, true); // checkOverflow = true

View File

@ -127,9 +127,14 @@ public:
enum accessType accType enum accessType accType
); );
//- Construct from local size. //- Construct from local size, using gather/broadcast
// Communication with default communicator and message tag. //- with default/specified communicator if parallel.
inline explicit globalIndex(const label localSize); inline explicit globalIndex
(
const label localSize,
const label comm = UPstream::worldComm, //!< communicator
const bool parallel = UPstream::parRun() //!< use parallel comms
);
//- Construct by gathering local sizes without rescattering. //- Construct by gathering local sizes without rescattering.
//- This 'one-sided' globalIndex will be empty on non-master processes. //- This 'one-sided' globalIndex will be empty on non-master processes.
@ -148,18 +153,7 @@ public:
( (
const label localSize, const label localSize,
const globalIndex::gatherNone, const globalIndex::gatherNone,
const label comm = -1 //!< dummy communicator const label comm = -1 //!< no communicator needed
);
//- Construct from local size.
// Communication with given communicator and message tag,
// unless parallel == false
inline globalIndex
(
const label localSize,
const int tag, //!< message tag
const label comm, //!< communicator
const bool parallel //!< use parallel comms
); );
//- Construct from Istream. //- Construct from Istream.
@ -211,9 +205,14 @@ public:
//- Write-access to the offsets, for changing after construction //- Write-access to the offsets, for changing after construction
inline labelList& offsets() noexcept; inline labelList& offsets() noexcept;
//- Reset from local size. //- Reset from local size, using gather/broadcast
// Does communication with default communicator and message tag. //- with default/specified communicator if parallel.
void reset(const label localSize); void reset
(
const label localSize,
const label comm = UPstream::worldComm, //!< communicator
const bool parallel = UPstream::parRun() //!< use parallel comms
);
//- Reset by gathering local sizes without rescattering. //- Reset by gathering local sizes without rescattering.
//- This 'one-sided' globalIndex will be empty on non-master processes. //- This 'one-sided' globalIndex will be empty on non-master processes.
@ -226,23 +225,12 @@ public:
const label comm = UPstream::worldComm //!< communicator const label comm = UPstream::worldComm //!< communicator
); );
//- Reset from local size.
// Does communication with given communicator and message tag,
// unless parallel == false
void reset
(
const label localSize,
const int tag, //!< message tag
const label comm, //!< communicator
const bool parallel //!< use parallel comms
);
//- Reset from list of local sizes, //- Reset from list of local sizes,
//- with optional check for label overflow. //- with optional check for label overflow.
//- No communication required //- No communication required
void reset void reset
( (
const labelUList& sizes, const labelUList& localLens,
const bool checkOverflow = false const bool checkOverflow = false
); );
@ -328,15 +316,6 @@ public:
inline label whichProcID(const label i) const; inline label whichProcID(const label i) const;
// Housekeeping
//- Same as localStart
label offset(const label proci) const
{
return localStart(proci);
}
// Iteration // Iteration
//- Forward input iterator with const access //- Forward input iterator with const access
@ -818,6 +797,41 @@ public:
friend Istream& operator>>(Istream& is, globalIndex& gi); friend Istream& operator>>(Istream& is, globalIndex& gi);
friend Ostream& operator<<(Ostream& os, const globalIndex& gi); friend Ostream& operator<<(Ostream& os, const globalIndex& gi);
// Housekeeping
//- Construct from local size, using gather/broadcast
//- with default/specified communicator if parallel.
FOAM_DEPRECATED_FOR(2022-03, "construct without message tag")
globalIndex
(
const label localSize,
const int tag, // message tag (unused)
const label comm, // communicator
const bool parallel // use parallel comms
)
{
reset(localSize, comm, parallel);
}
//- Reset from local size, using gather/broadcast
//- with default/specified communicator if parallel.
FOAM_DEPRECATED_FOR(2022-03, "reset without message tag")
void reset
(
const label localSize,
const int tag, // message tag (unused)
const label comm, // communicator
const bool parallel // use parallel comms
)
{
reset(localSize, comm, parallel);
}
//- Prefer localStart() to avoid confusing with offsets()
FOAM_DEPRECATED_FOR(2022-02, "use localStart()")
label offset(const label proci) const { return localStart(proci); }
}; };

View File

@ -77,9 +77,14 @@ inline Foam::globalIndex::globalIndex
} }
inline Foam::globalIndex::globalIndex(const label localSize) inline Foam::globalIndex::globalIndex
(
const label localSize,
const label comm,
const bool parallel
)
{ {
reset(localSize); reset(localSize, comm, parallel);
} }
@ -109,18 +114,6 @@ inline Foam::globalIndex::globalIndex
} }
inline Foam::globalIndex::globalIndex
(
const label localSize,
const int tag,
const label comm,
const bool parallel
)
{
reset(localSize, tag, comm, parallel);
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
inline bool Foam::globalIndex::empty() const inline bool Foam::globalIndex::empty() const

View File

@ -296,7 +296,7 @@ bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
wordList worlds(numprocs); wordList worlds(numprocs);
worlds[Pstream::myProcNo()] = world; worlds[Pstream::myProcNo()] = world;
Pstream::gatherList(worlds); Pstream::gatherList(worlds);
Pstream::scatterList(worlds); Pstream::broadcast(worlds);
// Compact // Compact
if (Pstream::master()) if (Pstream::master())
@ -315,8 +315,8 @@ bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
worldIDs_[proci] = allWorlds_.find(world); worldIDs_[proci] = allWorlds_.find(world);
} }
} }
Pstream::scatter(allWorlds_); Pstream::broadcast(allWorlds_);
Pstream::scatter(worldIDs_); Pstream::broadcast(worldIDs_);
DynamicList<label> subRanks; DynamicList<label> subRanks;
forAll(worlds, proci) forAll(worlds, proci)

View File

@ -54,8 +54,9 @@ namespace Foam
namespace PstreamDetail namespace PstreamDetail
{ {
// MPI_Bcast, using root=0
template<class Type> template<class Type>
void allBroadcast void broadcast0
( (
Type* values, Type* values,
int count, int count,
@ -63,6 +64,18 @@ void allBroadcast
const label communicator const label communicator
); );
// MPI_Reduce, using root=0
template<class Type>
void reduce0
(
Type* values,
int count,
MPI_Datatype datatype,
MPI_Op optype,
const label communicator
);
// MPI_Allreduce
template<class Type> template<class Type>
void allReduce void allReduce
( (
@ -73,6 +86,8 @@ void allReduce
const label communicator const label communicator
); );
// MPI_Iallreduce
template<class Type> template<class Type>
void iallReduce void iallReduce
( (

View File

@ -33,7 +33,7 @@ License
// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * //
template<class Type> template<class Type>
void Foam::PstreamDetail::allBroadcast void Foam::PstreamDetail::broadcast0
( (
Type* values, Type* values,
int count, int count,
@ -62,6 +62,55 @@ void Foam::PstreamDetail::allBroadcast
} }
template<class Type>
void Foam::PstreamDetail::reduce0
(
Type* values,
int count,
MPI_Datatype datatype,
MPI_Op optype,
const label communicator
)
{
if (!UPstream::parRun())
{
return;
}
if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
{
Pout<< "** reducing:";
if (count == 1)
{
Pout<< (*values);
}
else
{
Pout<< UList<Type>(values, count);
}
Pout<< " with comm:" << communicator
<< " warnComm:" << UPstream::warnComm << endl;
error::printStack(Pout);
}
profilingPstream::beginTiming();
// const int retval =
MPI_Reduce
(
MPI_IN_PLACE,
values,
count,
datatype,
optype,
0, // (root process) is master == UPstream::masterNo()
PstreamGlobals::MPICommunicators_[communicator]
);
profilingPstream::addReduceTime();
}
template<class Type> template<class Type>
void Foam::PstreamDetail::allReduce void Foam::PstreamDetail::allReduce
( (

View File

@ -6,7 +6,7 @@
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Copyright (C) 2011-2017 OpenFOAM Foundation Copyright (C) 2011-2017 OpenFOAM Foundation
Copyright (C) 2018 OpenCFD Ltd. Copyright (C) 2018-2022 OpenCFD Ltd.
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
This file is part of OpenFOAM. This file is part of OpenFOAM.
@ -423,16 +423,16 @@ void Foam::motionSmootherAlgo::setDisplacementPatchFields
// fixedValue bc's first. // fixedValue bc's first.
labelHashSet adaptPatchSet(patchIDs); labelHashSet adaptPatchSet(patchIDs);
const lduSchedule& patchSchedule = displacement.mesh().globalData(). const lduSchedule& patchSchedule =
patchSchedule(); displacement.mesh().globalData().patchSchedule();
forAll(patchSchedule, patchEvalI) for (const auto& schedEval : patchSchedule)
{ {
const label patchi = patchSchedule[patchEvalI].patch; const label patchi = schedEval.patch;
if (!adaptPatchSet.found(patchi)) if (!adaptPatchSet.found(patchi))
{ {
if (patchSchedule[patchEvalI].init) if (schedEval.init)
{ {
displacementBf[patchi] displacementBf[patchi]
.initEvaluate(Pstream::commsTypes::scheduled); .initEvaluate(Pstream::commsTypes::scheduled);
@ -575,17 +575,16 @@ void Foam::motionSmootherAlgo::correctBoundaryConditions
const lduSchedule& patchSchedule = mesh_.globalData().patchSchedule(); const lduSchedule& patchSchedule = mesh_.globalData().patchSchedule();
pointVectorField::Boundary& displacementBf = auto& displacementBf = displacement.boundaryFieldRef();
displacement.boundaryFieldRef();
// 1. evaluate on adaptPatches // 1. evaluate on adaptPatches
forAll(patchSchedule, patchEvalI) for (const auto& schedEval : patchSchedule)
{ {
const label patchi = patchSchedule[patchEvalI].patch; const label patchi = schedEval.patch;
if (adaptPatchSet.found(patchi)) if (adaptPatchSet.found(patchi))
{ {
if (patchSchedule[patchEvalI].init) if (schedEval.init)
{ {
displacementBf[patchi] displacementBf[patchi]
.initEvaluate(Pstream::commsTypes::blocking); .initEvaluate(Pstream::commsTypes::blocking);
@ -600,13 +599,13 @@ void Foam::motionSmootherAlgo::correctBoundaryConditions
// 2. evaluate on non-AdaptPatches // 2. evaluate on non-AdaptPatches
forAll(patchSchedule, patchEvalI) for (const auto& schedEval : patchSchedule)
{ {
const label patchi = patchSchedule[patchEvalI].patch; const label patchi = schedEval.patch;
if (!adaptPatchSet.found(patchi)) if (!adaptPatchSet.found(patchi))
{ {
if (patchSchedule[patchEvalI].init) if (schedEval.init)
{ {
displacementBf[patchi] displacementBf[patchi]
.initEvaluate(Pstream::commsTypes::blocking); .initEvaluate(Pstream::commsTypes::blocking);

View File

@ -5,7 +5,7 @@
\\ / A nd | www.openfoam.com \\ / A nd | www.openfoam.com
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Copyright (C) 2016-2020 OpenCFD Ltd. Copyright (C) 2016-2022 OpenCFD Ltd.
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
This file is part of OpenFOAM. This file is part of OpenFOAM.
@ -91,9 +91,10 @@ void Foam::functionObjects::mapFields::evaluateConstraintTypes
const lduSchedule& patchSchedule = const lduSchedule& patchSchedule =
fld.mesh().globalData().patchSchedule(); fld.mesh().globalData().patchSchedule();
forAll(patchSchedule, patchEvali) for (const auto& schedEval : patchSchedule)
{ {
label patchi = patchSchedule[patchEvali].patch; const label patchi = schedEval.patch;
fvPatchField<Type>& tgtField = fldBf[patchi]; fvPatchField<Type>& tgtField = fldBf[patchi];
if if
@ -102,7 +103,7 @@ void Foam::functionObjects::mapFields::evaluateConstraintTypes
&& polyPatch::constraintType(tgtField.patch().patch().type()) && polyPatch::constraintType(tgtField.patch().patch().type())
) )
{ {
if (patchSchedule[patchEvali].init) if (schedEval.init)
{ {
tgtField.initEvaluate(Pstream::commsTypes::scheduled); tgtField.initEvaluate(Pstream::commsTypes::scheduled);
} }

View File

@ -480,13 +480,7 @@ void Foam::decompositionMethod::calcCellCells
// Create global cell numbers // Create global cell numbers
// ~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~
const globalIndex globalAgglom const globalIndex globalAgglom(nLocalCoarse, Pstream::worldComm, parallel);
(
nLocalCoarse,
Pstream::msgType(),
Pstream::worldComm,
parallel
);
// Get agglomerate owner on other side of coupled faces // Get agglomerate owner on other side of coupled faces
@ -679,13 +673,7 @@ void Foam::decompositionMethod::calcCellCells
// Create global cell numbers // Create global cell numbers
// ~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~
const globalIndex globalAgglom const globalIndex globalAgglom(nLocalCoarse, Pstream::worldComm, parallel);
(
nLocalCoarse,
Pstream::msgType(),
Pstream::worldComm,
parallel
);
// Get agglomerate owner on other side of coupled faces // Get agglomerate owner on other side of coupled faces