mirror of
https://develop.openfoam.com/Development/openfoam.git
synced 2025-11-28 03:28:01 +00:00
ENH: simplify gather code for decompositionMethods
- use globalIndex for gather/scatter. Reuse code patterns from globalIndex for handling of the xadjcy arrays.
This commit is contained in:
committed by
Andrew Heather
parent
a6d1f47943
commit
f4ebc90a02
@ -55,51 +55,114 @@ Foam::label Foam::metisLikeDecomp::decomposeGeneral
|
|||||||
Info<< type() << "Decomp : running in parallel."
|
Info<< type() << "Decomp : running in parallel."
|
||||||
<< " Decomposing all of graph on master processor." << endl;
|
<< " Decomposing all of graph on master processor." << endl;
|
||||||
}
|
}
|
||||||
const globalIndex globalCells(xadj.size()-1);
|
|
||||||
label nTotalConnections = returnReduce(adjncy.size(), sumOp<label>());
|
|
||||||
|
|
||||||
// Send all to master. Use scheduled to save some storage.
|
const globalIndex globalAdjncy(adjncy.size());
|
||||||
|
const globalIndex globalCells(xadj.size()-1);
|
||||||
|
|
||||||
|
List<label> allAdjncy(globalAdjncy.gather(adjncy));
|
||||||
|
|
||||||
|
// Gathering xadj to master is similar to globalIndex gather()
|
||||||
|
// except for the following:
|
||||||
|
//
|
||||||
|
// - gathered list is size+1
|
||||||
|
// - apply local to global renumbering
|
||||||
|
|
||||||
|
const UPstream::commsTypes commsType = UPstream::commsTypes::nonBlocking;
|
||||||
|
const label startOfRequests = UPstream::nRequests();
|
||||||
|
|
||||||
|
|
||||||
|
List<label> allXadj;
|
||||||
if (Pstream::master())
|
if (Pstream::master())
|
||||||
{
|
{
|
||||||
List<label> allAdjncy(nTotalConnections);
|
allXadj.resize(globalCells.totalSize()+1);
|
||||||
List<label> allXadj(globalCells.totalSize()+1);
|
allXadj.last() = globalAdjncy.totalSize(); // Final end offset
|
||||||
List<scalar> allWeights(globalCells.totalSize());
|
|
||||||
|
|
||||||
// Insert my own
|
// My values - no renumbering required
|
||||||
label nTotalCells = 0;
|
SubList<label>(allXadj, globalCells.localSize(0)) =
|
||||||
forAll(cWeights, celli)
|
SubList<label>(xadj, globalCells.localSize(0));
|
||||||
{
|
|
||||||
allXadj[nTotalCells] = xadj[celli];
|
|
||||||
allWeights[nTotalCells++] = cWeights[celli];
|
|
||||||
}
|
|
||||||
nTotalConnections = 0;
|
|
||||||
forAll(adjncy, i)
|
|
||||||
{
|
|
||||||
allAdjncy[nTotalConnections++] = adjncy[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const int slave : Pstream::subProcs())
|
for (const int proci : globalCells.subProcs())
|
||||||
{
|
{
|
||||||
IPstream fromSlave(Pstream::commsTypes::scheduled, slave);
|
SubList<label> procSlot(allXadj, globalCells.range(proci));
|
||||||
List<label> nbrAdjncy(fromSlave);
|
|
||||||
List<label> nbrXadj(fromSlave);
|
|
||||||
List<scalar> nbrWeights(fromSlave);
|
|
||||||
|
|
||||||
// Append.
|
if (procSlot.empty())
|
||||||
forAll(nbrXadj, celli)
|
|
||||||
{
|
{
|
||||||
allXadj[nTotalCells] = nTotalConnections+nbrXadj[celli];
|
// Nothing to do
|
||||||
allWeights[nTotalCells++] = nbrWeights[celli];
|
|
||||||
}
|
}
|
||||||
// No need to renumber xadj since already global.
|
else
|
||||||
forAll(nbrAdjncy, i)
|
|
||||||
{
|
{
|
||||||
allAdjncy[nTotalConnections++] = nbrAdjncy[i];
|
IPstream::read
|
||||||
|
(
|
||||||
|
commsType,
|
||||||
|
proci,
|
||||||
|
procSlot.data_bytes(),
|
||||||
|
procSlot.size_bytes(),
|
||||||
|
UPstream::msgType(),
|
||||||
|
UPstream::worldComm
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
allXadj[nTotalCells] = nTotalConnections;
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Send my part of the graph (local numbering)
|
||||||
|
|
||||||
labelList allDecomp;
|
if (xadj.size() <= 1)
|
||||||
|
{
|
||||||
|
// Nothing to do
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
SubList<label> procSlot(xadj, xadj.size()-1);
|
||||||
|
|
||||||
|
OPstream::write
|
||||||
|
(
|
||||||
|
commsType,
|
||||||
|
UPstream::masterNo(),
|
||||||
|
procSlot.cdata_bytes(),
|
||||||
|
procSlot.size_bytes(),
|
||||||
|
UPstream::msgType(),
|
||||||
|
UPstream::worldComm
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (commsType == UPstream::commsTypes::nonBlocking)
|
||||||
|
{
|
||||||
|
// Wait for all to finish
|
||||||
|
UPstream::waitRequests(startOfRequests);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local to global renumbering
|
||||||
|
if (Pstream::master())
|
||||||
|
{
|
||||||
|
for (const int proci : globalCells.subProcs())
|
||||||
|
{
|
||||||
|
SubList<label> procSlot(allXadj, globalCells.range(proci));
|
||||||
|
|
||||||
|
globalAdjncy.inplaceToGlobal(proci, procSlot);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore zero-sized weights ... and poorly sized ones too
|
||||||
|
List<scalar> allWeights;
|
||||||
|
if
|
||||||
|
(
|
||||||
|
returnReduce
|
||||||
|
(
|
||||||
|
(cWeights.size() == globalCells.localSize()), andOp<bool>()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
{
|
||||||
|
allWeights = globalCells.gather(cWeights);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Global decomposition
|
||||||
|
labelList allDecomp;
|
||||||
|
|
||||||
|
if (Pstream::master())
|
||||||
|
{
|
||||||
decomposeSerial
|
decomposeSerial
|
||||||
(
|
(
|
||||||
allAdjncy,
|
allAdjncy,
|
||||||
@ -108,45 +171,14 @@ Foam::label Foam::metisLikeDecomp::decomposeGeneral
|
|||||||
allDecomp
|
allDecomp
|
||||||
);
|
);
|
||||||
|
|
||||||
|
allAdjncy.clear(); // Not needed anymore
|
||||||
// Send allFinalDecomp back
|
allXadj.clear(); // ...
|
||||||
for (const int slave : Pstream::subProcs())
|
allWeights.clear(); // ...
|
||||||
{
|
|
||||||
OPstream toSlave(Pstream::commsTypes::scheduled, slave);
|
|
||||||
toSlave << SubList<label>
|
|
||||||
(
|
|
||||||
allDecomp,
|
|
||||||
globalCells.localSize(slave),
|
|
||||||
globalCells.localStart(slave)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get my own part (always first)
|
|
||||||
decomp = SubList<label>(allDecomp, globalCells.localSize());
|
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
// Send my part of the graph (already in global numbering)
|
|
||||||
{
|
|
||||||
OPstream toMaster
|
|
||||||
(
|
|
||||||
Pstream::commsTypes::scheduled,
|
|
||||||
Pstream::masterNo()
|
|
||||||
);
|
|
||||||
toMaster
|
|
||||||
<< adjncy
|
|
||||||
<< SubList<label>(xadj, xadj.size()-1)
|
|
||||||
<< cWeights;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Receive back decomposition
|
// The processor-local decomposition (output)
|
||||||
IPstream fromMaster
|
decomp.resize_nocopy(globalCells.localSize());
|
||||||
(
|
globalCells.scatter(allDecomp, decomp);
|
||||||
Pstream::commsTypes::scheduled,
|
|
||||||
Pstream::masterNo()
|
|
||||||
);
|
|
||||||
fromMaster >> decomp;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -337,73 +337,18 @@ Foam::labelList Foam::simpleGeomDecomp::decompose
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
globalIndex globalNumbers(points.size());
|
const globalIndex globalNumbers(points.size());
|
||||||
|
|
||||||
// Collect all points on master
|
pointField allPoints(globalNumbers.gather(points));
|
||||||
|
|
||||||
|
labelList allDecomp;
|
||||||
if (Pstream::master())
|
if (Pstream::master())
|
||||||
{
|
{
|
||||||
pointField allPoints(globalNumbers.totalSize());
|
allDecomp = decomposeOneProc(allPoints);
|
||||||
|
allPoints.clear(); // Not needed anymore
|
||||||
label nTotalPoints = 0;
|
|
||||||
// Master first
|
|
||||||
SubField<point>(allPoints, points.size()) = points;
|
|
||||||
nTotalPoints += points.size();
|
|
||||||
|
|
||||||
// Add received
|
|
||||||
for (const int subproci : Pstream::subProcs())
|
|
||||||
{
|
|
||||||
IPstream fromProc(Pstream::commsTypes::scheduled, subproci);
|
|
||||||
pointField nbrPoints(fromProc);
|
|
||||||
SubField<point>
|
|
||||||
(
|
|
||||||
allPoints,
|
|
||||||
nbrPoints.size(),
|
|
||||||
nTotalPoints
|
|
||||||
) = nbrPoints;
|
|
||||||
nTotalPoints += nbrPoints.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decompose
|
|
||||||
labelList finalDecomp(decomposeOneProc(allPoints));
|
|
||||||
|
|
||||||
// Send back
|
|
||||||
for (const int subproci : Pstream::subProcs())
|
|
||||||
{
|
|
||||||
OPstream toProc(Pstream::commsTypes::scheduled, subproci);
|
|
||||||
toProc << SubField<label>
|
|
||||||
(
|
|
||||||
finalDecomp,
|
|
||||||
globalNumbers.localSize(subproci),
|
|
||||||
globalNumbers.localStart(subproci)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// Get my own part
|
|
||||||
finalDecomp.setSize(points.size());
|
|
||||||
|
|
||||||
return finalDecomp;
|
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
// Send my points
|
|
||||||
{
|
|
||||||
OPstream toMaster
|
|
||||||
(
|
|
||||||
Pstream::commsTypes::scheduled,
|
|
||||||
Pstream::masterNo()
|
|
||||||
);
|
|
||||||
toMaster<< points;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Receive back decomposition
|
return globalNumbers.scatter(allDecomp);
|
||||||
IPstream fromMaster
|
|
||||||
(
|
|
||||||
Pstream::commsTypes::scheduled,
|
|
||||||
Pstream::masterNo()
|
|
||||||
);
|
|
||||||
labelList finalDecomp(fromMaster);
|
|
||||||
|
|
||||||
return finalDecomp;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,88 +359,31 @@ Foam::labelList Foam::simpleGeomDecomp::decompose
|
|||||||
const scalarField& weights
|
const scalarField& weights
|
||||||
) const
|
) const
|
||||||
{
|
{
|
||||||
if (!Pstream::parRun())
|
if (returnReduce((points.size() != weights.size()), orOp<bool>()))
|
||||||
|
{
|
||||||
|
// Ignore zero-sized weights ... and poorly sized ones too
|
||||||
|
return decompose(points);
|
||||||
|
}
|
||||||
|
else if (!Pstream::parRun())
|
||||||
{
|
{
|
||||||
return decomposeOneProc(points, weights);
|
return decomposeOneProc(points, weights);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
globalIndex globalNumbers(points.size());
|
const globalIndex globalNumbers(points.size());
|
||||||
|
|
||||||
// Collect all points on master
|
pointField allPoints(globalNumbers.gather(points));
|
||||||
|
scalarField allWeights(globalNumbers.gather(weights));
|
||||||
|
|
||||||
|
labelList allDecomp;
|
||||||
if (Pstream::master())
|
if (Pstream::master())
|
||||||
{
|
{
|
||||||
pointField allPoints(globalNumbers.totalSize());
|
allDecomp = decomposeOneProc(allPoints, allWeights);
|
||||||
scalarField allWeights(globalNumbers.totalSize());
|
allPoints.clear(); // Not needed anymore
|
||||||
|
allWeights.clear(); // ...
|
||||||
label nTotalPoints = 0;
|
|
||||||
// Master first
|
|
||||||
SubField<point>(allPoints, points.size()) = points;
|
|
||||||
SubField<scalar>(allWeights, points.size()) = weights;
|
|
||||||
nTotalPoints += points.size();
|
|
||||||
|
|
||||||
// Add received
|
|
||||||
for (const int subproci : Pstream::subProcs())
|
|
||||||
{
|
|
||||||
IPstream fromProc(Pstream::commsTypes::scheduled, subproci);
|
|
||||||
pointField nbrPoints(fromProc);
|
|
||||||
scalarField nbrWeights(fromProc);
|
|
||||||
SubField<point>
|
|
||||||
(
|
|
||||||
allPoints,
|
|
||||||
nbrPoints.size(),
|
|
||||||
nTotalPoints
|
|
||||||
) = nbrPoints;
|
|
||||||
SubField<scalar>
|
|
||||||
(
|
|
||||||
allWeights,
|
|
||||||
nbrWeights.size(),
|
|
||||||
nTotalPoints
|
|
||||||
) = nbrWeights;
|
|
||||||
nTotalPoints += nbrPoints.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decompose
|
|
||||||
labelList finalDecomp(decomposeOneProc(allPoints, allWeights));
|
|
||||||
|
|
||||||
// Send back
|
|
||||||
for (const int subproci : Pstream::subProcs())
|
|
||||||
{
|
|
||||||
OPstream toProc(Pstream::commsTypes::scheduled, subproci);
|
|
||||||
toProc << SubField<label>
|
|
||||||
(
|
|
||||||
finalDecomp,
|
|
||||||
globalNumbers.localSize(subproci),
|
|
||||||
globalNumbers.localStart(subproci)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// Get my own part
|
|
||||||
finalDecomp.setSize(points.size());
|
|
||||||
|
|
||||||
return finalDecomp;
|
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
// Send my points
|
|
||||||
{
|
|
||||||
OPstream toMaster
|
|
||||||
(
|
|
||||||
Pstream::commsTypes::scheduled,
|
|
||||||
Pstream::masterNo()
|
|
||||||
);
|
|
||||||
toMaster<< points << weights;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Receive back decomposition
|
return globalNumbers.scatter(allDecomp);
|
||||||
IPstream fromMaster
|
|
||||||
(
|
|
||||||
Pstream::commsTypes::scheduled,
|
|
||||||
Pstream::masterNo()
|
|
||||||
);
|
|
||||||
labelList finalDecomp(fromMaster);
|
|
||||||
|
|
||||||
return finalDecomp;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user