Contributed by Mattijs Janssens.
1. Any non-blocking data exchange needs to know in advance the sizes to
receive so it can size the buffer. For "halo" exchanges this is not
a problem since the sizes are known in advance but or all other data
exchanges these sizes need to be exchanged in advance.
This was previously done by having all processors send the sizes of data to
send to the master and send it back such that all processors
- had the same information
- all could work out who was sending what to where and hence what needed to
be received.
This is now changed such that we only send the size to the
destination processor (instead of to all as previously). This means
that
- the list of sizes to send is now of size nProcs v.s. nProcs*nProcs before
- we cut out the route to the master and back by using a native MPI
call
It causes a small change to the API of exchange and PstreamBuffers -
they now return the sizes of the local buffers only (a labelList) and
not the sizes of the buffers on all processors (labelListList)
2. Reversing the order of the way in which the sending is done when
scattering information from the master processor to the other
processors. This is done in a tree like fashion. Each processor has a
set of processors to receive from/ send to. When receiving it will
first receive from the processors with the least amount of
sub-processors (i.e. the ones which return first). When sending it
needs to do the opposite: start sending to the processor with the
most amount of sub-tree since this is the critical path.
198 lines
5.4 KiB
C
198 lines
5.4 KiB
C
/*---------------------------------------------------------------------------*\
|
|
========= |
|
|
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
|
|
\\ / O peration |
|
|
\\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
|
|
\\/ M anipulation |
|
|
-------------------------------------------------------------------------------
|
|
License
|
|
This file is part of OpenFOAM.
|
|
|
|
OpenFOAM is free software: you can redistribute it and/or modify it
|
|
under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
(at your option) any later version.
|
|
|
|
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
Application
|
|
parallelTest
|
|
|
|
Description
|
|
Test for various parallel routines.
|
|
|
|
\*---------------------------------------------------------------------------*/
|
|
|
|
#include "List.H"
|
|
#include "mapDistribute.H"
|
|
#include "argList.H"
|
|
#include "Time.H"
|
|
#include "IPstream.H"
|
|
#include "OPstream.H"
|
|
#include "vector.H"
|
|
#include "IOstreams.H"
|
|
#include "Random.H"
|
|
#include "Tuple2.H"
|
|
|
|
using namespace Foam;
|
|
|
|
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
|
|
|
int main(int argc, char *argv[])
|
|
{
|
|
|
|
#include "setRootCase.H"
|
|
#include "createTime.H"
|
|
|
|
|
|
// Test mapDistribute
|
|
// ~~~~~~~~~~~~~~~~~~
|
|
|
|
if (true)
|
|
{
|
|
Random rndGen(43544*Pstream::myProcNo());
|
|
|
|
// Generate random data.
|
|
List<Tuple2<label, List<scalar>>> complexData(100);
|
|
forAll(complexData, i)
|
|
{
|
|
complexData[i].first() = rndGen.integer(0, Pstream::nProcs()-1);
|
|
complexData[i].second().setSize(3);
|
|
complexData[i].second()[0] = 1;
|
|
complexData[i].second()[1] = 2;
|
|
complexData[i].second()[2] = 3;
|
|
}
|
|
|
|
// Send all ones to processor indicated by .first()
|
|
|
|
|
|
// Count how many to send
|
|
labelList nSend(Pstream::nProcs(), 0);
|
|
forAll(complexData, i)
|
|
{
|
|
label procI = complexData[i].first();
|
|
nSend[procI]++;
|
|
}
|
|
|
|
// Collect items to be sent
|
|
labelListList sendMap(Pstream::nProcs());
|
|
forAll(sendMap, procI)
|
|
{
|
|
sendMap[procI].setSize(nSend[procI]);
|
|
}
|
|
nSend = 0;
|
|
forAll(complexData, i)
|
|
{
|
|
label procI = complexData[i].first();
|
|
sendMap[procI][nSend[procI]++] = i;
|
|
}
|
|
|
|
// Sync how many to send
|
|
labelList nRecv;
|
|
Pstream::exchangeSizes(sendMap, nRecv);
|
|
|
|
// Collect items to be received
|
|
labelListList recvMap(Pstream::nProcs());
|
|
forAll(recvMap, procI)
|
|
{
|
|
recvMap[procI].setSize(nRecv[procI]);
|
|
}
|
|
|
|
label constructSize = 0;
|
|
// Construct with my own elements first
|
|
forAll(recvMap[Pstream::myProcNo()], i)
|
|
{
|
|
recvMap[Pstream::myProcNo()][i] = constructSize++;
|
|
}
|
|
// Construct from other processors
|
|
forAll(recvMap, procI)
|
|
{
|
|
if (procI != Pstream::myProcNo())
|
|
{
|
|
forAll(recvMap[procI], i)
|
|
{
|
|
recvMap[procI][i] = constructSize++;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// Construct distribute map (destructively)
|
|
mapDistribute map(constructSize, sendMap.xfer(), recvMap.xfer());
|
|
|
|
// Distribute complexData
|
|
map.distribute(complexData);
|
|
|
|
Pout<< "complexData:" << complexData << endl;
|
|
}
|
|
|
|
|
|
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
|
|
|
Perr<< "\nStarting transfers\n" << endl;
|
|
|
|
vector data(0, 1, 2);
|
|
|
|
if (Pstream::parRun())
|
|
{
|
|
if (Pstream::myProcNo() != Pstream::masterNo())
|
|
{
|
|
{
|
|
Perr<< "slave sending to master "
|
|
<< Pstream::masterNo() << endl;
|
|
OPstream toMaster(Pstream::blocking, Pstream::masterNo());
|
|
toMaster << data;
|
|
}
|
|
|
|
Perr<< "slave receiving from master "
|
|
<< Pstream::masterNo() << endl;
|
|
IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
|
|
fromMaster >> data;
|
|
|
|
Perr<< data << endl;
|
|
}
|
|
else
|
|
{
|
|
for
|
|
(
|
|
int slave=Pstream::firstSlave();
|
|
slave<=Pstream::lastSlave();
|
|
slave++
|
|
)
|
|
{
|
|
Perr << "master receiving from slave " << slave << endl;
|
|
IPstream fromSlave(Pstream::blocking, slave);
|
|
fromSlave >> data;
|
|
|
|
Perr<< data << endl;
|
|
}
|
|
|
|
for
|
|
(
|
|
int slave=Pstream::firstSlave();
|
|
slave<=Pstream::lastSlave();
|
|
slave++
|
|
)
|
|
{
|
|
Perr << "master sending to slave " << slave << endl;
|
|
OPstream toSlave(Pstream::blocking, slave);
|
|
toSlave << data;
|
|
}
|
|
}
|
|
}
|
|
|
|
Info<< "End\n" << endl;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
// ************************************************************************* //
|