diff --git a/applications/test/parallel-comm2/Test-parallel-comm2.C b/applications/test/parallel-comm2/Test-parallel-comm2.C index 17f8283d13..ce01dc7cf9 100644 --- a/applications/test/parallel-comm2/Test-parallel-comm2.C +++ b/applications/test/parallel-comm2/Test-parallel-comm2.C @@ -64,9 +64,12 @@ int main(int argc, char *argv[]) argList::noBanner(); argList::noCheckProcessorDirectories(); argList::addBoolOption("verbose", "Set debug level"); + argList::addBoolOption("info", "information"); argList::addBoolOption("print-tree", "Report tree(s) as graph"); argList::addBoolOption("comm-split", "Test simple comm split"); - argList::addBoolOption("host-comm", "Test DIY host-comm split"); + argList::addBoolOption("mpi-host-comm", "Test DIY host-comm split"); + argList::addBoolOption("host-comm", "Test Pstream host-comm"); + argList::addBoolOption("host-broadcast", "Test host-base broadcasts"); // Capture manually. We need values before proper startup int nVerbose = 0; @@ -90,76 +93,30 @@ int main(int argc, char *argv[]) << " with " << UPstream::nComms() << " predefined comm(s)." << " proc:" << UPstream::myProcNo() << nl; - Info<< nl; - - //- Process IDs within a given communicator - Info<< "procIDs: " - << flatOutput(UPstream::procID(UPstream::commWorld())) << endl; - - rankInfo(UPstream::commWorld()); - Pout<< endl; - - const int myProci = UPstream::myProcNo(UPstream::commWorld()); - int localRanki = myProci; - - labelList subRanks; - UPstream::communicator newComm; - - if (UPstream::parRun() && optPrintTree) { Info<< "comms: " << UPstream::whichCommunication() << endl; UPstream::printCommTree(UPstream::commWorld()); } - if (!args.found("comm-split") && !args.found("host-comm")) + if (args.found("info")) { - #if 1 - // With first ranks - subRanks = identity(UPstream::nProcs(UPstream::commWorld()) / 2); - - newComm.reset(UPstream::commWorld(), subRanks); - localRanki = UPstream::myProcNo(newComm); - - Pout.prefix() = - ( - '[' + Foam::name(myProci) + " a:" + Foam::name(localRanki) + "] " - ); + Info<< nl; + //- Process IDs within a given communicator Info<< "procIDs: " - << flatOutput(UPstream::procID(newComm)) << endl; + << flatOutput(UPstream::procID(UPstream::commWorld())) << endl; - rankInfo(newComm); + rankInfo(UPstream::commWorld()); Pout<< endl; - #endif - - #if 1 - // With every other rank - subRanks = identity(UPstream::nProcs(UPstream::commWorld())); - - for (label& val : subRanks) - { - if (val % 2) val = -1; - } - - newComm.reset(UPstream::commWorld(), subRanks); - localRanki = UPstream::myProcNo(newComm); - - Pout.prefix() = - ( - '[' + Foam::name(myProci) + " b:" + Foam::name(localRanki) + "] " - ); - - Info<< "procIDs: " - << flatOutput(UPstream::procID(newComm)) << endl; - - rankInfo(newComm); - Pout<< endl; - #endif } - if (Pstream::parRun() && args.found("comm-split")) + bool generalTest = true; + + if (UPstream::parRun() && args.found("comm-split")) { + generalTest = false; + int world_nprocs = 0; int world_rank = -1; MPI_Comm_size(MPI_COMM_WORLD, &world_nprocs); @@ -167,37 +124,37 @@ int main(int argc, char *argv[]) int host_nprocs = 0; int host_rank = -1; - MPI_Comm hostComm; + MPI_Comm commIntraHost; MPI_Comm_split_type ( MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, // OMPI_COMM_TYPE_NODE - 0, MPI_INFO_NULL, &hostComm + 0, MPI_INFO_NULL, &commIntraHost ); - MPI_Comm_size(hostComm, &host_nprocs); - MPI_Comm_rank(hostComm, &host_rank); + MPI_Comm_size(commIntraHost, &host_nprocs); + MPI_Comm_rank(commIntraHost, &host_rank); int leader_nprocs = 0; int leader_rank = -1; - MPI_Comm hostMasterComm; + MPI_Comm commInterHost; if (false) { // Easy enough to use MPI_Comm_split, but slightly annoying // that it returns MPI_COMM_NULL for unused ranks... - MPI_Comm hostMasterComm; + MPI_Comm commInterHost; MPI_Comm_split ( MPI_COMM_WORLD, (host_rank == 0) ? 0 : MPI_UNDEFINED, - 0, &hostMasterComm + 0, &commInterHost ); - if (hostMasterComm != MPI_COMM_NULL) + if (commInterHost != MPI_COMM_NULL) { - MPI_Comm_size(hostMasterComm, &leader_nprocs); - MPI_Comm_rank(hostMasterComm, &leader_rank); + MPI_Comm_size(commInterHost, &leader_nprocs); + MPI_Comm_rank(commInterHost, &leader_rank); } } else @@ -242,35 +199,39 @@ int main(int argc, char *argv[]) MPI_COMM_WORLD, active_group, UPstream::msgType(), - &hostMasterComm + &commInterHost ); // Groups not needed after this... MPI_Group_free(&parent_group); MPI_Group_free(&active_group); - MPI_Comm_size(hostMasterComm, &leader_nprocs); - MPI_Comm_rank(hostMasterComm, &leader_rank); + MPI_Comm_size(commInterHost, &leader_nprocs); + MPI_Comm_rank(commInterHost, &leader_rank); } Pout<< nl << "[MPI_Comm_split_type]" << nl - << "Host comm with " << host_rank << " / " << host_nprocs + << "Host rank " << host_rank << " / " << host_nprocs << " on " << hostName() - << " master:" << (host_rank == 0) - << " leader rank: " << leader_rank - << " / " << leader_nprocs + << " inter-rank: " << leader_rank << " / " << leader_nprocs << " host leader:" << (leader_rank == 0) << " sub-rank:" << (leader_rank > 0) << nl; - if (hostMasterComm != MPI_COMM_NULL) + if (commInterHost != MPI_COMM_NULL) { - MPI_Comm_free(&hostMasterComm); + MPI_Comm_free(&commInterHost); + } + if (commIntraHost != MPI_COMM_NULL) + { + MPI_Comm_free(&commIntraHost); } - MPI_Comm_free(&hostComm); } - if (Pstream::parRun() && args.found("host-comm")) + + if (UPstream::parRun() && args.found("mpi-host-comm")) { + generalTest = false; + // Host communicator, based on the current world communicator // Use hostname // Lowest rank per hostname is the IO rank @@ -290,40 +251,32 @@ int main(int argc, char *argv[]) // - pro: fixed digest length enables direct MPI calls // can avoid Pstream::gatherList() during setup... - SHA1Digest myHostDigest(SHA1(hostName()).digest()); - List digests; if (UPstream::master(UPstream::commGlobal())) { digests.resize(numprocs); } - UPstream::mpiGather - ( - reinterpret_cast(myHostDigest.cdata_bytes()), - SHA1Digest::max_size(), // Num send per proc - digests.data_bytes(), // Recv - SHA1Digest::max_size(), // Num recv per proc - UPstream::commGlobal() - ); + { + const SHA1Digest myHostDigest(SHA1(hostName()).digest()); - // MPI_Gather - // ( - // myHostDigest.cdata_bytes(), // Send - // SHA1Digest::max_size(), // Num send per proc - // MPI_BYTE, - // digests.data_bytes(), // Recv - // SHA1Digest::max_size(), // Num recv per proc - // MPI_BYTE, - // 0, // root - // MPI_COMM_WORLD - // ); + UPstream::mpiGather + ( + reinterpret_cast(myHostDigest.cdata_bytes()), + SHA1Digest::max_size(), // Num send per proc + digests.data_bytes(), // Recv + SHA1Digest::max_size(), // Num recv per proc + UPstream::commGlobal() + ); + } + + + labelList hostIDs(numprocs); + DynamicList