ENH: avoid prior communication when using mapDistribute

- in most cases can simply construct mapDistribute with the sendMap
  and have it take care of communication and addressing for the
  corresponding constructMap.

  This removes code duplication, which in some cases was also using
  much less efficient mechanisms (eg, combineReduce on list of
  lists, or an allGatherList on the send sizes etc) and also
  reduces the number of places where Pstream::exchange/exchangeSizes
  is being called.

ENH: reduce communication in turbulentDFSEMInlet

- was doing an allGatherList to populate a mapDistribute.
  Now simply use PstreamBuffers mechanisms directly.
This commit is contained in:
Mark Olesen
2023-02-14 11:24:22 +01:00
parent 1ce7a62209
commit fb69a54bc3
12 changed files with 105 additions and 534 deletions

View File

@ -67,54 +67,24 @@ void testMapDistribute()
labelList nSend(Pstream::nProcs(), Zero); labelList nSend(Pstream::nProcs(), Zero);
forAll(complexData, i) forAll(complexData, i)
{ {
const label procI = complexData[i].first(); const label proci = complexData[i].first();
nSend[procI]++; nSend[proci]++;
} }
// Collect items to be sent // Collect items to be sent
labelListList sendMap(Pstream::nProcs()); labelListList sendMap(Pstream::nProcs());
forAll(sendMap, procI) forAll(sendMap, proci)
{ {
sendMap[procI].setSize(nSend[procI]); sendMap[proci].resize_nocopy(nSend[proci]);
nSend[proci] = 0;
} }
nSend = 0;
forAll(complexData, i) forAll(complexData, i)
{ {
const label procI = complexData[i].first(); const label proci = complexData[i].first();
sendMap[procI][nSend[procI]++] = i; sendMap[proci][nSend[proci]++] = i;
} }
// Sync how many to send mapDistribute map(std::move(sendMap));
labelList nRecv;
Pstream::exchangeSizes(sendMap, nRecv);
// Collect items to be received
labelListList recvMap(Pstream::nProcs());
forAll(recvMap, procI)
{
recvMap[procI].setSize(nRecv[procI]);
}
label constructSize = 0;
// Construct with my own elements first
forAll(recvMap[Pstream::myProcNo()], i)
{
recvMap[Pstream::myProcNo()][i] = constructSize++;
}
// Construct from other processors
forAll(recvMap, procI)
{
if (procI != Pstream::myProcNo())
{
forAll(recvMap[procI], i)
{
recvMap[procI][i] = constructSize++;
}
}
}
// Construct distribute map (destructively)
mapDistribute map(constructSize, std::move(sendMap), std::move(recvMap));
// Distribute complexData // Distribute complexData
map.distribute(complexData); map.distribute(complexData);

View File

@ -54,7 +54,6 @@ Foam::DistributedDelaunayMesh<Triangulation>::buildMap
forAll(toProc, i) forAll(toProc, i)
{ {
label proci = toProc[i]; label proci = toProc[i];
nSend[proci]++; nSend[proci]++;
} }
@ -64,8 +63,7 @@ Foam::DistributedDelaunayMesh<Triangulation>::buildMap
forAll(nSend, proci) forAll(nSend, proci)
{ {
sendMap[proci].setSize(nSend[proci]); sendMap[proci].resize_nocopy(nSend[proci]);
nSend[proci] = 0; nSend[proci] = 0;
} }
@ -73,49 +71,10 @@ Foam::DistributedDelaunayMesh<Triangulation>::buildMap
forAll(toProc, i) forAll(toProc, i)
{ {
label proci = toProc[i]; label proci = toProc[i];
sendMap[proci][nSend[proci]++] = i; sendMap[proci][nSend[proci]++] = i;
} }
// 4. Send over how many I need to receive return autoPtr<mapDistribute>::New(std::move(sendMap));
labelList recvSizes;
Pstream::exchangeSizes(sendMap, recvSizes);
// Determine receive map
// ~~~~~~~~~~~~~~~~~~~~~
labelListList constructMap(Pstream::nProcs());
// Local transfers first
constructMap[Pstream::myProcNo()] = identity
(
sendMap[Pstream::myProcNo()].size()
);
label constructSize = constructMap[Pstream::myProcNo()].size();
forAll(constructMap, proci)
{
if (proci != Pstream::myProcNo())
{
label nRecv = recvSizes[proci];
constructMap[proci].setSize(nRecv);
for (label i = 0; i < nRecv; i++)
{
constructMap[proci][i] = constructSize++;
}
}
}
return autoPtr<mapDistribute>::New
(
constructSize,
std::move(sendMap),
std::move(constructMap)
);
} }

View File

@ -59,7 +59,6 @@ Foam::autoPtr<Foam::mapDistribute> Foam::backgroundMeshDecomposition::buildMap
forAll(toProc, i) forAll(toProc, i)
{ {
label proci = toProc[i]; label proci = toProc[i];
nSend[proci]++; nSend[proci]++;
} }
@ -69,8 +68,7 @@ Foam::autoPtr<Foam::mapDistribute> Foam::backgroundMeshDecomposition::buildMap
forAll(nSend, proci) forAll(nSend, proci)
{ {
sendMap[proci].setSize(nSend[proci]); sendMap[proci].resize_nocopy(nSend[proci]);
nSend[proci] = 0; nSend[proci] = 0;
} }
@ -78,49 +76,10 @@ Foam::autoPtr<Foam::mapDistribute> Foam::backgroundMeshDecomposition::buildMap
forAll(toProc, i) forAll(toProc, i)
{ {
label proci = toProc[i]; label proci = toProc[i];
sendMap[proci][nSend[proci]++] = i; sendMap[proci][nSend[proci]++] = i;
} }
// 4. Send over how many I need to receive return autoPtr<mapDistribute>::New(std::move(sendMap));
labelList recvSizes;
Pstream::exchangeSizes(sendMap, recvSizes);
// Determine receive map
// ~~~~~~~~~~~~~~~~~~~~~
labelListList constructMap(Pstream::nProcs());
// Local transfers first
constructMap[Pstream::myProcNo()] = identity
(
sendMap[Pstream::myProcNo()].size()
);
label constructSize = constructMap[Pstream::myProcNo()].size();
forAll(constructMap, proci)
{
if (proci != Pstream::myProcNo())
{
label nRecv = recvSizes[proci];
constructMap[proci].setSize(nRecv);
for (label i = 0; i < nRecv; i++)
{
constructMap[proci][i] = constructSize++;
}
}
}
return autoPtr<mapDistribute>::New
(
constructSize,
std::move(sendMap),
std::move(constructMap)
);
} }

View File

@ -146,7 +146,6 @@ Foam::parLagrangianDistributor::distributeLagrangianPositions
labelListList subMap; labelListList subMap;
// Allocate transfer buffers // Allocate transfer buffers
PstreamBuffers pBufs(Pstream::commsTypes::nonBlocking); PstreamBuffers pBufs(Pstream::commsTypes::nonBlocking);
@ -283,33 +282,24 @@ Foam::parLagrangianDistributor::distributeLagrangianPositions
lpi.rename(cloudName); lpi.rename(cloudName);
} }
// Work the send indices (subMap) into a mapDistributeBase // Until now (FEB-2023) we have always used processor ordering for the
labelListList sizes(Pstream::nProcs()); // construct map (whereas mapDistribute has local transfers first),
labelList& nsTransPs = sizes[Pstream::myProcNo()]; // so we'll stick with that for now, but can likely just use the subMap
nsTransPs.setSize(Pstream::nProcs()); // directly with mapDistribute and have it determine the constructMap.
forAll(subMap, sendProcI)
{ labelList recvSizes;
nsTransPs[sendProcI] = subMap[sendProcI].size(); Pstream::exchangeSizes(subMap, recvSizes);
}
// Send sizes across. Note: blocks.
Pstream::combineReduce(sizes, Pstream::listEq());
labelListList constructMap(Pstream::nProcs());
label constructSize = 0; label constructSize = 0;
forAll(constructMap, procI) labelListList constructMap(Pstream::nProcs());
forAll(constructMap, proci)
{ {
const label nRecv = sizes[procI][UPstream::myProcNo()]; const label len = recvSizes[proci];
constructMap[proci] = identity(len, constructSize);
labelList& map = constructMap[procI]; constructSize += len;
map.setSize(nRecv);
forAll(map, i)
{
map[i] = constructSize++;
}
} }
return autoPtr<mapDistributeBase>::New return autoPtr<mapDistributeBase>::New
( (
constructSize, constructSize,

View File

@ -450,13 +450,16 @@ void Foam::turbulentDFSEMInletFvPatchVectorField::convectEddies
} }
} }
reduce(nRecycled, sumOp<label>()); if (debug)
if (debug && nRecycled > 0)
{ {
Info<< "Patch: " << patch().patch().name() reduce(nRecycled, sumOp<label>());
<< " recycled " << nRecycled << " eddies"
<< endl; if (nRecycled)
{
Info<< "Patch: " << patch().patch().name()
<< " recycled " << nRecycled << " eddies"
<< endl;
}
} }
} }
@ -492,11 +495,11 @@ void Foam::turbulentDFSEMInletFvPatchVectorField::calcOverlappingProcEddies
Pstream::allGatherList(patchBBs); Pstream::allGatherList(patchBBs);
// Per processor indices into all segments to send // Per processor indices into all segments to send
List<DynamicList<label>> dynSendMap(Pstream::nProcs()); List<DynamicList<label>> sendMap(UPstream::nProcs());
// Collect overlapping eddies
forAll(eddies_, i) forAll(eddies_, i)
{ {
// Collect overlapping eddies
const eddy& e = eddies_[i]; const eddy& e = eddies_[i];
// Eddy bounds // Eddy bounds
@ -505,91 +508,41 @@ void Foam::turbulentDFSEMInletFvPatchVectorField::calcOverlappingProcEddies
ebb.min() += x; ebb.min() += x;
ebb.max() += x; ebb.max() += x;
forAll(patchBBs, procI) forAll(patchBBs, proci)
{ {
// Not including intersection with local patch // Not including intersection with local patch
if (procI != Pstream::myProcNo()) if (proci != Pstream::myProcNo())
{ {
if (ebb.overlaps(patchBBs[procI])) if (ebb.overlaps(patchBBs[proci]))
{ {
dynSendMap[procI].append(i); sendMap[proci].push_back(i);
} }
} }
} }
} }
labelListList sendMap(Pstream::nProcs());
forAll(sendMap, procI) PstreamBuffers pBufs(UPstream::commsTypes::nonBlocking);
forAll(sendMap, proci)
{ {
sendMap[procI].transfer(dynSendMap[procI]); if (proci != Pstream::myProcNo() && !sendMap[proci].empty())
}
// Send the number of eddies for local processors to receive
labelListList sendSizes(Pstream::nProcs());
sendSizes[Pstream::myProcNo()].setSize(Pstream::nProcs());
forAll(sendMap, procI)
{
sendSizes[Pstream::myProcNo()][procI] = sendMap[procI].size();
}
Pstream::allGatherList(sendSizes);
// Determine order of receiving
labelListList constructMap(Pstream::nProcs());
// Local segment first
constructMap[Pstream::myProcNo()] = identity
(
sendMap[Pstream::myProcNo()].size()
);
label segmentI = constructMap[Pstream::myProcNo()].size();
forAll(constructMap, procI)
{
if (procI != Pstream::myProcNo())
{ {
// What I need to receive is what other processor is sending to me UOPstream os(proci, pBufs);
const label nRecv = sendSizes[procI][Pstream::myProcNo()];
constructMap[procI].setSize(nRecv);
for (label i = 0; i < nRecv; ++i) os << UIndirectList<eddy>(eddies_, sendMap[proci]);
{
constructMap[procI][i] = segmentI++;
}
} }
} }
mapDistribute map(segmentI, std::move(sendMap), std::move(constructMap));
PstreamBuffers pBufs(Pstream::commsTypes::nonBlocking);
for (const int domain : Pstream::allProcs())
{
const labelList& sendElems = map.subMap()[domain];
if (domain != Pstream::myProcNo() && sendElems.size())
{
List<eddy> subEddies(UIndirectList<eddy>(eddies_, sendElems));
UOPstream toDomain(domain, pBufs);
toDomain<< subEddies;
}
}
// Start receiving
pBufs.finishedSends(); pBufs.finishedSends();
// Consume for (const int proci : pBufs.allProcs())
for (const int domain : Pstream::allProcs())
{ {
const labelList& recvElems = map.constructMap()[domain]; if (proci != Pstream::myProcNo() && pBufs.recvDataCount(proci))
if (domain != Pstream::myProcNo() && recvElems.size())
{ {
UIPstream str(domain, pBufs); UIPstream is(proci, pBufs);
{
str >> overlappingEddies[domain]; is >> overlappingEddies[proci];
}
} }
} }
@ -1001,10 +954,8 @@ void Foam::turbulentDFSEMInletFvPatchVectorField::updateCoeffs()
List<List<eddy>> overlappingEddies(Pstream::nProcs()); List<List<eddy>> overlappingEddies(Pstream::nProcs());
calcOverlappingProcEddies(overlappingEddies); calcOverlappingProcEddies(overlappingEddies);
forAll(overlappingEddies, procI) for (const List<eddy>& eddies : overlappingEddies)
{ {
const List<eddy>& eddies = overlappingEddies[procI];
if (eddies.size()) if (eddies.size())
{ {
forAll(U, faceI) forAll(U, faceI)

View File

@ -822,8 +822,7 @@ void Foam::InteractionLists<ParticleType>::buildMap
forAll(nSend, proci) forAll(nSend, proci)
{ {
sendMap[proci].setSize(nSend[proci]); sendMap[proci].resize_nocopy(nSend[proci]);
nSend[proci] = 0; nSend[proci] = 0;
} }
@ -831,52 +830,10 @@ void Foam::InteractionLists<ParticleType>::buildMap
forAll(toProc, i) forAll(toProc, i)
{ {
label proci = toProc[i]; label proci = toProc[i];
sendMap[proci][nSend[proci]++] = i; sendMap[proci][nSend[proci]++] = i;
} }
// 4. Send over how many I need to receive mapPtr.reset(new mapDistribute(std::move(sendMap)));
labelList recvSizes;
Pstream::exchangeSizes(sendMap, recvSizes);
// Determine receive map
// ~~~~~~~~~~~~~~~~~~~~~
labelListList constructMap(Pstream::nProcs());
// Local transfers first
constructMap[Pstream::myProcNo()] = identity
(
sendMap[Pstream::myProcNo()].size()
);
label constructSize = constructMap[Pstream::myProcNo()].size();
forAll(constructMap, proci)
{
if (proci != Pstream::myProcNo())
{
const label nRecv = recvSizes[proci];
constructMap[proci].setSize(nRecv);
for (label i = 0; i < nRecv; i++)
{
constructMap[proci][i] = constructSize++;
}
}
}
mapPtr.reset
(
new mapDistribute
(
constructSize,
std::move(sendMap),
std::move(constructMap)
)
);
} }

View File

@ -344,48 +344,7 @@ Foam::autoPtr<Foam::mapDistribute> Foam::advancingFrontAMI::calcProcMap
} }
} }
return autoPtr<mapDistribute>::New(std::move(sendMap));
// Send over how many faces I need to receive
labelListList sendSizes(Pstream::nProcs());
sendSizes[Pstream::myProcNo()].setSize(Pstream::nProcs());
forAll(sendMap, proci)
{
sendSizes[Pstream::myProcNo()][proci] = sendMap[proci].size();
}
Pstream::allGatherList(sendSizes);
// Determine order of receiving
labelListList constructMap(Pstream::nProcs());
// My local segment first
constructMap[Pstream::myProcNo()] = identity
(
sendMap[Pstream::myProcNo()].size()
);
label segmentI = constructMap[Pstream::myProcNo()].size();
forAll(constructMap, proci)
{
if (proci != Pstream::myProcNo())
{
// What I need to receive is what other processor is sending to me
label nRecv = sendSizes[proci][Pstream::myProcNo()];
constructMap[proci].setSize(nRecv);
for (label i = 0; i < nRecv; ++i)
{
constructMap[proci][i] = segmentI++;
}
}
}
return autoPtr<mapDistribute>::New
(
segmentI, // size after construction
std::move(sendMap),
std::move(constructMap)
);
} }

View File

@ -80,7 +80,6 @@ Foam::autoPtr<Foam::mapDistribute> Foam::nearestFaceAMI::calcFaceMap
labelListList sendMap(Pstream::nProcs()); labelListList sendMap(Pstream::nProcs());
forAll(sendMap, proci) forAll(sendMap, proci)
{ {
dynSendMap[proci].shrink();
sendMap[proci].transfer(dynSendMap[proci]); sendMap[proci].transfer(dynSendMap[proci]);
if (debug) if (debug)

View File

@ -5,7 +5,7 @@
\\ / A nd | www.openfoam.com \\ / A nd | www.openfoam.com
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Copyright (C) 2017-2022 OpenCFD Ltd. Copyright (C) 2017-2023 OpenCFD Ltd.
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
This file is part of OpenFOAM. This file is part of OpenFOAM.
@ -511,7 +511,7 @@ Foam::autoPtr<Foam::mapDistribute> Foam::processorLODs::box::createMap
allIDs.insert(elems); allIDs.insert(elems);
} }
sendElems[proci] = allIDs.toc(); sendElems[proci] = allIDs.sortedToc();
} }
} }
@ -530,59 +530,7 @@ Foam::autoPtr<Foam::mapDistribute> Foam::processorLODs::box::createMap
} }
} }
return createLODMap(sendElems); return autoPtr<mapDistribute>::New(std::move(sendElems));
}
Foam::autoPtr<Foam::mapDistribute> Foam::processorLODs::box::createLODMap
(
List<labelList>& sendElems
) const
{
// Send over how many objects I need to receive
const label localProci = Pstream::myProcNo();
labelListList sendSizes(Pstream::nProcs());
sendSizes[localProci].setSize(Pstream::nProcs());
forAll(sendElems, proci)
{
sendSizes[localProci][proci] = sendElems[proci].size();
}
Pstream::allGatherList(sendSizes);
// Determine order of receiving
labelListList constructMap(Pstream::nProcs());
// My local segment first
constructMap[localProci] = identity(sendElems[localProci].size());
label segmenti = constructMap[localProci].size();
forAll(constructMap, proci)
{
if (proci != localProci)
{
// What I need to receive is what other processor is sending to me
label nRecv = sendSizes[proci][localProci];
constructMap[proci].setSize(nRecv);
for (label& addr : constructMap[proci])
{
addr = segmenti++;
}
}
}
autoPtr<mapDistribute> mapPtr
(
new mapDistribute
(
segmenti, // size after construction
std::move(sendElems),
std::move(constructMap)
)
);
return mapPtr;
} }
@ -618,9 +566,8 @@ Foam::processorLODs::box::box
treeBoundBox srcBb(srcPoints_); treeBoundBox srcBb(srcPoints_);
srcBb.inflate(0.01); srcBb.inflate(0.01);
DynamicList<treeBoundBox> newProcBoxes(1); procBoxes.resize(1);
newProcBoxes.append(srcBb); procBoxes.front() = srcBb;
procBoxes.transfer(newProcBoxes);
} }
} }
} }

View File

@ -49,7 +49,6 @@ Description
namespace Foam namespace Foam
{ {
namespace processorLODs namespace processorLODs
{ {
@ -165,12 +164,6 @@ protected:
const label nTgtElems const label nTgtElems
); );
//- Use the current list of send elements to create the mapDistribute
autoPtr<mapDistribute> createLODMap
(
List<labelList>& sendElems
) const;
public: public:
@ -195,9 +188,6 @@ public:
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace processorLODs } // End namespace processorLODs
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam } // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -6,7 +6,7 @@
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation Copyright (C) 2011-2016 OpenFOAM Foundation
Copyright (C) 2015-2022 OpenCFD Ltd. Copyright (C) 2015-2023 OpenCFD Ltd.
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
This file is part of OpenFOAM. This file is part of OpenFOAM.
@ -608,10 +608,9 @@ Foam::distributedTriSurfaceMesh::distributeSegments
} }
// Convert dynamicList to labelList // Convert dynamicList to labelList
sendMap.setSize(Pstream::nProcs()); sendMap.resize_nocopy(Pstream::nProcs());
forAll(sendMap, proci) forAll(sendMap, proci)
{ {
dynSendMap[proci].shrink();
sendMap[proci].transfer(dynSendMap[proci]); sendMap[proci].transfer(dynSendMap[proci]);
} }
@ -875,14 +874,9 @@ Foam::distributedTriSurfaceMesh::calcLocalQueries
// Pack into distribution map // Pack into distribution map
// ~~~~~~~~~~~~~~~~~~~~~~~~~~ auto mapPtr = autoPtr<mapDistribute>::New(std::move(sendMap));
autoPtr<mapDistribute> mapPtr(new mapDistribute(std::move(sendMap)));
// Send over queries // Send over queries
// ~~~~~~~~~~~~~~~~~
mapPtr().distribute(triangleIndex); mapPtr().distribute(triangleIndex);
return mapPtr; return mapPtr;
@ -1119,10 +1113,9 @@ Foam::distributedTriSurfaceMesh::calcLocalQueries
} }
// Convert dynamicList to labelList // Convert dynamicList to labelList
sendMap.setSize(Pstream::nProcs()); sendMap.resize_nocopy(Pstream::nProcs());
forAll(sendMap, proci) forAll(sendMap, proci)
{ {
dynSendMap[proci].shrink();
sendMap[proci].transfer(dynSendMap[proci]); sendMap[proci].transfer(dynSendMap[proci]);
} }
@ -4198,7 +4191,7 @@ void Foam::distributedTriSurfaceMesh::getVolumeType
labelListList sendMap(Pstream::nProcs()); labelListList sendMap(Pstream::nProcs());
{ {
// 1. Count // 1. Count
labelList nSend(Pstream::nProcs(), 0); labelList nSend(Pstream::nProcs(), Zero);
forAll(samples, samplei) forAll(samples, samplei)
{ {
// Find the processors this sample overlaps. // Find the processors this sample overlaps.
@ -4224,9 +4217,9 @@ void Foam::distributedTriSurfaceMesh::getVolumeType
forAll(nSend, proci) forAll(nSend, proci)
{ {
sendMap[proci].setSize(nSend[proci]); sendMap[proci].resize_nocopy(nSend[proci]);
nSend[proci] = 0;
} }
nSend = 0;
// 2. Fill // 2. Fill
forAll(samples, samplei) forAll(samples, samplei)
@ -4236,8 +4229,7 @@ void Foam::distributedTriSurfaceMesh::getVolumeType
{ {
if (contains(procBb_[proci], samples[samplei])) if (contains(procBb_[proci], samples[samplei]))
{ {
labelList& procSend = sendMap[proci]; sendMap[proci][nSend[proci]++] = samplei;
procSend[nSend[proci]++] = samplei;
} }
} }
} }
@ -4525,7 +4517,7 @@ Foam::distributedTriSurfaceMesh::localQueries
// cheap we do a multi-pass algorithm to save some memory temporarily. // cheap we do a multi-pass algorithm to save some memory temporarily.
// 1. Count // 1. Count
labelList nSend(Pstream::nProcs(), 0); labelList nSend(Pstream::nProcs(), Zero);
forAll(info, i) forAll(info, i)
{ {
@ -4540,7 +4532,7 @@ Foam::distributedTriSurfaceMesh::localQueries
labelListList sendMap(Pstream::nProcs()); labelListList sendMap(Pstream::nProcs());
forAll(nSend, proci) forAll(nSend, proci)
{ {
sendMap[proci].setSize(nSend[proci]); sendMap[proci].resize_nocopy(nSend[proci]);
nSend[proci] = 0; nSend[proci] = 0;
} }
@ -4560,65 +4552,11 @@ Foam::distributedTriSurfaceMesh::localQueries
} }
// Send over how many i need to receive
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
labelListList sendSizes(Pstream::nProcs());
sendSizes[Pstream::myProcNo()].setSize(Pstream::nProcs());
forAll(sendMap, proci)
{
sendSizes[Pstream::myProcNo()][proci] = sendMap[proci].size();
}
Pstream::allGatherList(sendSizes);
// Determine receive map
// ~~~~~~~~~~~~~~~~~~~~~
labelListList constructMap(Pstream::nProcs());
// My local segments first
constructMap[Pstream::myProcNo()] = identity
(
sendMap[Pstream::myProcNo()].size()
);
label segmenti = constructMap[Pstream::myProcNo()].size();
forAll(constructMap, proci)
{
if (proci != Pstream::myProcNo())
{
// What i need to receive is what other processor is sending to me.
label nRecv = sendSizes[proci][Pstream::myProcNo()];
constructMap[proci].setSize(nRecv);
for (label i = 0; i < nRecv; i++)
{
constructMap[proci][i] = segmenti++;
}
}
}
// Pack into distribution map // Pack into distribution map
// ~~~~~~~~~~~~~~~~~~~~~~~~~~ auto mapPtr = autoPtr<mapDistribute>::New(std::move(sendMap));
autoPtr<mapDistribute> mapPtr
(
new mapDistribute
(
segmenti, // size after construction
std::move(sendMap),
std::move(constructMap)
)
);
const mapDistribute& map = mapPtr();
// Send over queries // Send over queries
// ~~~~~~~~~~~~~~~~~ mapPtr().distribute(triangleIndex);
map.distribute(triangleIndex);
return mapPtr; return mapPtr;
} }
@ -4770,13 +4708,6 @@ void Foam::distributedTriSurfaceMesh::distribute
} }
// Send over how many faces/points i need to receive
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
labelList faceRecvSizes;
Pstream::exchangeSizes(faceSendMap, faceRecvSizes);
// Exchange surfaces // Exchange surfaces
// ~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~
@ -4821,61 +4752,55 @@ void Foam::distributedTriSurfaceMesh::distribute
// Send all // Send all
// ~~~~~~~~ // ~~~~~~~~
PstreamBuffers pBufs(Pstream::defaultCommsType); PstreamBuffers pBufs(UPstream::commsTypes::nonBlocking);
forAll(faceSendMap, proci) forAll(faceSendMap, proci)
{ {
if (proci != Pstream::myProcNo()) if (proci != Pstream::myProcNo() && !faceSendMap[proci].empty())
{ {
if (faceSendMap[proci].size() > 0) UOPstream os(proci, pBufs);
{
UOPstream str(proci, pBufs);
labelList pointMap; labelList pointMap;
triSurface subSurface triSurface subSurface
(
subsetMesh
( (
subsetMesh *this,
( faceSendMap[proci],
*this, pointMap
faceSendMap[proci], )
pointMap );
) os << subSurface;
);
str << subSurface;
}
} }
} }
pBufs.finishedSends(); // no-op for blocking pBufs.finishedSends();
// Receive and merge all // Receive and merge all
// ~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~
forAll(faceRecvSizes, proci) for (const int proci : pBufs.allProcs())
{ {
if (proci != Pstream::myProcNo()) if (proci != Pstream::myProcNo() && pBufs.recvDataCount(proci))
{ {
if (faceRecvSizes[proci] > 0) UIPstream is(proci, pBufs);
{
UIPstream str(proci, pBufs);
// Receive // Receive
triSurface subSurface(str); triSurface subSurface(is);
// Merge into allSurf // Merge into allSurf
merge merge
( (
mergeDist_, mergeDist_,
subSurface, subSurface,
subSurface.points(), subSurface.points(),
allTris, allTris,
allPoints, allPoints,
faceConstructMap[proci], faceConstructMap[proci],
pointConstructMap[proci] pointConstructMap[proci]
); );
}
} }
} }

View File

@ -6,7 +6,7 @@
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Copyright (C) 2012-2017 OpenFOAM Foundation Copyright (C) 2012-2017 OpenFOAM Foundation
Copyright (C) 2015-2022 OpenCFD Ltd. Copyright (C) 2015-2023 OpenCFD Ltd.
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
This file is part of OpenFOAM. This file is part of OpenFOAM.
@ -232,42 +232,7 @@ Foam::autoPtr<Foam::mapDistribute> Foam::meshToMesh::calcProcMap
} }
} }
return autoPtr<mapDistribute>::New(std::move(sendMap));
// send over how many tgt cells I need to receive from each
// processor
labelListList sendSizes(Pstream::nProcs());
sendSizes[Pstream::myProcNo()].resize(Pstream::nProcs());
forAll(sendMap, proci)
{
sendSizes[Pstream::myProcNo()][proci] = sendMap[proci].size();
}
Pstream::allGatherList(sendSizes);
// determine order of receiving
labelListList constructMap(Pstream::nProcs());
label segmentI = 0;
forAll(constructMap, proci)
{
// what I need to receive is what other processor is sending
// to me
label nRecv = sendSizes[proci][Pstream::myProcNo()];
constructMap[proci].setSize(nRecv);
for (label i = 0; i < nRecv; i++)
{
constructMap[proci][i] = segmentI++;
}
}
return autoPtr<mapDistribute>::New
(
segmentI, // size after construction
std::move(sendMap),
std::move(constructMap)
);
break; break;
} }
} }