ENH: add separate tracking of MPI_Comm_free, MPI_Group_free pending

- permits distinction between communicators/groups that were
  user-created (eg, MPI_Comm_create) versus those queried from MPI.
  Previously simply relied on non-null values, but that is too fragile

ENH: support List<Request> version of UPstream::finishedRequests

- allows more independent algorithms

ENH: added UPstream::probeMessage(...). Blocking or non-blocking
This commit is contained in:
Mark Olesen
2023-02-03 10:06:39 +01:00
parent ab4c5f25ac
commit 068ab8ccc7
12 changed files with 516 additions and 53 deletions

View File

@ -1,2 +1,4 @@
/* EXE_INC = */
/* EXE_LIBS = */
include $(GENERAL_RULES)/mpi-rules
EXE_INC = $(PFLAGS) $(PINC) $(c++LESSWARN)
EXE_LIBS = $(PLIBS)

View File

@ -33,6 +33,7 @@ Description
#include "polyMesh.H"
#include "globalMeshData.H"
#include "OFstream.H"
#include <mpi.h>
using namespace Foam;
@ -42,11 +43,25 @@ using namespace Foam;
int main(int argc, char *argv[])
{
argList::noFunctionObjects();
argList::addBoolOption("verbose", "Set debug level");
argList::addBoolOption("comm-graph", "Test simple graph communicator");
argList::addNote
(
"Create graph of OpenFOAM mesh connections"
);
// Capture manually. We need values before proper startup
int nVerbose = 0;
for (int argi = 1; argi < argc; ++argi)
{
if (strcmp(argv[argi], "-verbose") == 0)
{
++nVerbose;
}
}
UPstream::debug = nVerbose;
#include "setRootCase.H"
if (!Pstream::parRun())
@ -105,6 +120,127 @@ int main(int argc, char *argv[])
<< "Use neato, circo or fdp graphviz tools" << nl;
}
if (Pstream::parRun() && args.found("comm-graph"))
{
Info<< nl;
// Local neighbours
const labelList& neighbours =
mesh.globalData().topology().procNeighbours();
Pout<< "Neigbours: " << flatOutput(neighbours) << endl;
// As integers values
List<int> connected(neighbours.size());
List<int> weights(neighbours.size());
forAll(neighbours, i)
{
connected[i] = neighbours[i];
weights[i] = 1;
}
MPI_Comm topoComm;
int mpiErrorCode =
MPI_Dist_graph_create_adjacent
(
MPI_COMM_WORLD,
// Connections into this rank
connected.size(), connected.cdata(), MPI_UNWEIGHTED,
// Connections out of this rank
connected.size(), connected.cdata(), MPI_UNWEIGHTED,
MPI_INFO_NULL,
0, // no reordering (apparently broken anyhow)
&topoComm
);
if (mpiErrorCode)
{
FatalError
<< "Failed to create topo communicator. Error:"
<< mpiErrorCode << exit(FatalError);
}
int topo_rank = 0;
int topo_nprocs = 0;
int topo_inCount = 0;
int topo_outCount = 0;
int topo_isWeighted = 0;
MPI_Comm_rank(topoComm, &topo_rank);
MPI_Comm_size(topoComm, &topo_nprocs);
{
int topo_type = 0;
MPI_Topo_test(topoComm, &topo_type);
if (MPI_CART == topo_type)
{
Info<< "MPI topology : Cartesian" << endl;
}
else if (MPI_GRAPH == topo_type)
{
Info<< "MPI topology : Graph" << endl;
}
else if (MPI_DIST_GRAPH == topo_type)
{
Info<< "MPI topology : Distributed graph" << endl;
}
else
{
Info<< "MPI topology : None" << endl;
}
}
MPI_Dist_graph_neighbors_count
(
topoComm,
&topo_inCount,
&topo_outCount,
&topo_isWeighted
);
Pout<< "Topo comm with "
<< topo_rank << " / " << topo_nprocs
<< " from " << connected.size() << flatOutput(connected)
<< " numNbr:" << topo_inCount
<< nl;
List<int> myPatchIds(neighbours.size());
forAll(myPatchIds, i)
{
// Patches to neighbours
myPatchIds[i] =
mesh.globalData().topology().procPatchLookup(neighbours[i]);
}
List<int> nbrPatchIds(neighbours.size(), Zero);
mpiErrorCode = MPI_Neighbor_alltoall
(
myPatchIds.data(),
1, // one element per neighbour
MPI_INT,
nbrPatchIds.data(),
1, // one element per neighbour
MPI_INT,
topoComm
);
if (mpiErrorCode)
{
FatalError
<< "MPI Error: " << mpiErrorCode << exit(FatalError);
}
Pout<< "proc neighbours:" << flatOutput(neighbours)
<< " my patches:" << flatOutput(myPatchIds)
<< " their patches:" << flatOutput(nbrPatchIds)
<< endl;
MPI_Comm_free(&topoComm);
}
Info<< nl << "End\n" << endl;
return 0;