mirror of
https://develop.openfoam.com/Development/openfoam.git
synced 2025-11-28 03:28:01 +00:00
415 lines
11 KiB
C
415 lines
11 KiB
C
/*---------------------------------------------------------------------------*\
|
|
========= |
|
|
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
|
|
\\ / O peration |
|
|
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
|
|
\\/ M anipulation |
|
|
-------------------------------------------------------------------------------
|
|
License
|
|
This file is part of OpenFOAM.
|
|
|
|
OpenFOAM is free software; you can redistribute it and/or modify it
|
|
under the terms of the GNU General Public License as published by the
|
|
Free Software Foundation; either version 2 of the License, or (at your
|
|
option) any later version.
|
|
|
|
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with OpenFOAM; if not, write to the Free Software Foundation,
|
|
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
\*---------------------------------------------------------------------------*/
|
|
|
|
#include "mapDistribute.H"
|
|
#include "commSchedule.H"
|
|
#include "HashSet.H"
|
|
#include "ListOps.H"
|
|
|
|
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
|
|
|
|
Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
|
|
(
|
|
const labelListList& subMap,
|
|
const labelListList& constructMap
|
|
)
|
|
{
|
|
// Communications: send and receive processor
|
|
List<labelPair> allComms;
|
|
|
|
{
|
|
HashSet<labelPair, labelPair::Hash<> > commsSet(Pstream::nProcs());
|
|
|
|
// Find what communication is required
|
|
forAll(subMap, procI)
|
|
{
|
|
if (procI != Pstream::myProcNo())
|
|
{
|
|
if (subMap[procI].size())
|
|
{
|
|
// I need to send to procI
|
|
commsSet.insert(labelPair(Pstream::myProcNo(), procI));
|
|
}
|
|
if (constructMap[procI].size())
|
|
{
|
|
// I need to receive from procI
|
|
commsSet.insert(labelPair(procI, Pstream::myProcNo()));
|
|
}
|
|
}
|
|
}
|
|
allComms = commsSet.toc();
|
|
}
|
|
|
|
|
|
// Reduce
|
|
if (Pstream::master())
|
|
{
|
|
// Receive and merge
|
|
for
|
|
(
|
|
int slave=Pstream::firstSlave();
|
|
slave<=Pstream::lastSlave();
|
|
slave++
|
|
)
|
|
{
|
|
IPstream fromSlave(Pstream::blocking, slave);
|
|
List<labelPair> nbrData(fromSlave);
|
|
|
|
forAll(nbrData, i)
|
|
{
|
|
if (findIndex(allComms, nbrData[i]) == -1)
|
|
{
|
|
label sz = allComms.size();
|
|
allComms.setSize(sz+1);
|
|
allComms[sz] = nbrData[i];
|
|
}
|
|
}
|
|
}
|
|
// Send back
|
|
for
|
|
(
|
|
int slave=Pstream::firstSlave();
|
|
slave<=Pstream::lastSlave();
|
|
slave++
|
|
)
|
|
{
|
|
OPstream toSlave(Pstream::blocking, slave);
|
|
toSlave << allComms;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
{
|
|
OPstream toMaster(Pstream::blocking, Pstream::masterNo());
|
|
toMaster << allComms;
|
|
}
|
|
{
|
|
IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
|
|
fromMaster >> allComms;
|
|
}
|
|
}
|
|
|
|
|
|
// Determine my schedule.
|
|
labelList mySchedule
|
|
(
|
|
commSchedule
|
|
(
|
|
Pstream::nProcs(),
|
|
allComms
|
|
).procSchedule()[Pstream::myProcNo()]
|
|
);
|
|
|
|
// Processors involved in my schedule
|
|
return UIndirectList<labelPair>(allComms, mySchedule);
|
|
|
|
|
|
//if (debug)
|
|
//{
|
|
// Pout<< "I need to:" << endl;
|
|
// const List<labelPair>& comms = schedule();
|
|
// forAll(comms, i)
|
|
// {
|
|
// const labelPair& twoProcs = comms[i];
|
|
// label sendProc = twoProcs[0];
|
|
// label recvProc = twoProcs[1];
|
|
//
|
|
// if (recvProc == Pstream::myProcNo())
|
|
// {
|
|
// Pout<< " receive from " << sendProc << endl;
|
|
// }
|
|
// else
|
|
// {
|
|
// Pout<< " send to " << recvProc << endl;
|
|
// }
|
|
// }
|
|
//}
|
|
}
|
|
|
|
|
|
const Foam::List<Foam::labelPair>& Foam::mapDistribute::schedule() const
|
|
{
|
|
if (schedulePtr_.empty())
|
|
{
|
|
schedulePtr_.reset
|
|
(
|
|
new List<labelPair>
|
|
(
|
|
schedule(subMap_, constructMap_)
|
|
)
|
|
);
|
|
}
|
|
return schedulePtr_();
|
|
}
|
|
|
|
|
|
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
|
|
|
|
//- Construct from components
|
|
Foam::mapDistribute::mapDistribute
|
|
(
|
|
const label constructSize,
|
|
const labelListList& subMap,
|
|
const labelListList& constructMap
|
|
)
|
|
:
|
|
constructSize_(constructSize),
|
|
subMap_(subMap),
|
|
constructMap_(constructMap),
|
|
schedulePtr_()
|
|
{}
|
|
|
|
|
|
//- (optionally destructively) construct from components
|
|
Foam::mapDistribute::mapDistribute
|
|
(
|
|
const label constructSize,
|
|
labelListList& subMap,
|
|
labelListList& constructMap,
|
|
const bool reUse // clone or reuse
|
|
)
|
|
:
|
|
constructSize_(constructSize),
|
|
subMap_(subMap, reUse),
|
|
constructMap_(constructMap, reUse),
|
|
schedulePtr_()
|
|
{}
|
|
|
|
|
|
Foam::mapDistribute::mapDistribute
|
|
(
|
|
const labelList& sendProcs,
|
|
const labelList& recvProcs
|
|
)
|
|
:
|
|
constructSize_(sendProcs.size()),
|
|
schedulePtr_()
|
|
{
|
|
if (sendProcs.size() != recvProcs.size())
|
|
{
|
|
FatalErrorIn
|
|
(
|
|
"mapDistribute::mapDistribute(const labelList&, const labelList&)"
|
|
) << "The send and receive data is not the same length. sendProcs:"
|
|
<< sendProcs.size() << " recvProcs:" << recvProcs.size()
|
|
<< abort(FatalError);
|
|
}
|
|
|
|
// Per processor the number of samples we have to send/receive.
|
|
labelList nSend(Pstream::nProcs(), 0);
|
|
labelList nRecv(Pstream::nProcs(), 0);
|
|
|
|
forAll(sendProcs, sampleI)
|
|
{
|
|
label sendProc = sendProcs[sampleI];
|
|
label recvProc = recvProcs[sampleI];
|
|
|
|
// Note that also need to include local communication (both
|
|
// RecvProc and sendProc on local processor)
|
|
|
|
if (Pstream::myProcNo() == sendProc)
|
|
{
|
|
// I am the sender. Count destination processor.
|
|
nSend[recvProc]++;
|
|
}
|
|
if (Pstream::myProcNo() == recvProc)
|
|
{
|
|
// I am the receiver.
|
|
nRecv[sendProc]++;
|
|
}
|
|
}
|
|
|
|
subMap_.setSize(Pstream::nProcs());
|
|
constructMap_.setSize(Pstream::nProcs());
|
|
forAll(nSend, procI)
|
|
{
|
|
subMap_[procI].setSize(nSend[procI]);
|
|
constructMap_[procI].setSize(nRecv[procI]);
|
|
}
|
|
nSend = 0;
|
|
nRecv = 0;
|
|
|
|
forAll(sendProcs, sampleI)
|
|
{
|
|
label sendProc = sendProcs[sampleI];
|
|
label recvProc = recvProcs[sampleI];
|
|
|
|
if (Pstream::myProcNo() == sendProc)
|
|
{
|
|
// I am the sender. Store index I need to send.
|
|
subMap_[recvProc][nSend[recvProc]++] = sampleI;
|
|
}
|
|
if (Pstream::myProcNo() == recvProc)
|
|
{
|
|
// I am the receiver.
|
|
constructMap_[sendProc][nRecv[sendProc]++] = sampleI;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
|
|
|
|
void Foam::mapDistribute::compact(const boolList& elemIsUsed)
|
|
{
|
|
// 1. send back to sender. Have him delete the corresponding element
|
|
// from the submap and do the same to the constructMap locally
|
|
// (and in same order).
|
|
|
|
// Send elemIsUsed field to neighbour. Use nonblocking code from
|
|
// mapDistribute but in reverse order.
|
|
{
|
|
List<boolList> sendFields(Pstream::nProcs());
|
|
|
|
for (label domain = 0; domain < Pstream::nProcs(); domain++)
|
|
{
|
|
const labelList& map = constructMap_[domain];
|
|
|
|
if (domain != Pstream::myProcNo() && map.size())
|
|
{
|
|
boolList& subField = sendFields[domain];
|
|
subField.setSize(map.size());
|
|
forAll(map, i)
|
|
{
|
|
subField[i] = elemIsUsed[map[i]];
|
|
}
|
|
|
|
OPstream::write
|
|
(
|
|
Pstream::nonBlocking,
|
|
domain,
|
|
reinterpret_cast<const char*>(subField.begin()),
|
|
subField.size()*sizeof(bool)
|
|
);
|
|
}
|
|
}
|
|
|
|
// Set up receives from neighbours
|
|
|
|
List<boolList> recvFields(Pstream::nProcs());
|
|
|
|
for (label domain = 0; domain < Pstream::nProcs(); domain++)
|
|
{
|
|
const labelList& map = subMap_[domain];
|
|
|
|
if (domain != Pstream::myProcNo() && map.size())
|
|
{
|
|
recvFields[domain].setSize(map.size());
|
|
IPstream::read
|
|
(
|
|
Pstream::nonBlocking,
|
|
domain,
|
|
reinterpret_cast<char*>(recvFields[domain].begin()),
|
|
recvFields[domain].size()*sizeof(bool)
|
|
);
|
|
}
|
|
}
|
|
|
|
|
|
// Set up 'send' to myself - write directly into recvFields
|
|
|
|
{
|
|
const labelList& map = constructMap_[Pstream::myProcNo()];
|
|
|
|
recvFields[Pstream::myProcNo()].setSize(map.size());
|
|
forAll(map, i)
|
|
{
|
|
recvFields[Pstream::myProcNo()][i] = elemIsUsed[map[i]];
|
|
}
|
|
}
|
|
|
|
|
|
// Wait for all to finish
|
|
|
|
OPstream::waitRequests();
|
|
IPstream::waitRequests();
|
|
|
|
|
|
// Compact out all submap entries that are referring to unused elements
|
|
for (label domain = 0; domain < Pstream::nProcs(); domain++)
|
|
{
|
|
const labelList& map = subMap_[domain];
|
|
|
|
labelList newMap(map.size());
|
|
label newI = 0;
|
|
|
|
forAll(map, i)
|
|
{
|
|
if (recvFields[domain][i])
|
|
{
|
|
// So element is used on destination side
|
|
newMap[newI++] = map[i];
|
|
}
|
|
}
|
|
if (newI < map.size())
|
|
{
|
|
newMap.setSize(newI);
|
|
subMap_[domain].transfer(newMap);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
// 2. remove from construct map - since end-result (element in elemIsUsed)
|
|
// not used.
|
|
|
|
label maxConstructIndex = -1;
|
|
|
|
for (label domain = 0; domain < Pstream::nProcs(); domain++)
|
|
{
|
|
const labelList& map = constructMap_[domain];
|
|
|
|
labelList newMap(map.size());
|
|
label newI = 0;
|
|
|
|
forAll(map, i)
|
|
{
|
|
label destinationI = map[i];
|
|
|
|
// Is element is used on destination side
|
|
if (elemIsUsed[destinationI])
|
|
{
|
|
maxConstructIndex = max(maxConstructIndex, destinationI);
|
|
|
|
newMap[newI++] = destinationI;
|
|
}
|
|
}
|
|
if (newI < map.size())
|
|
{
|
|
newMap.setSize(newI);
|
|
constructMap_[domain].transfer(newMap);
|
|
}
|
|
}
|
|
|
|
constructSize_ = maxConstructIndex+1;
|
|
|
|
// Clear the schedule (note:not necessary if nothing changed)
|
|
schedulePtr_.clear();
|
|
}
|
|
|
|
|
|
// ************************************************************************* //
|