Files
openfoam/src/OpenFOAM/meshes/polyMesh/mapPolyMesh/mapDistribute/mapDistribute.C
mattijs cff2580336 Merge branch 'master' into splitCyclic
Conflicts:
	applications/utilities/mesh/manipulation/splitMeshRegions/splitMeshRegions.C
	applications/utilities/parallelProcessing/decomposePar/domainDecompositionMesh.C
	src/OpenFOAM/db/IOstreams/Pstreams/UPstream.H
	src/OpenFOAM/fields/pointPatchFields/pointPatchField/pointPatchField.C
	src/OpenFOAM/matrices/lduMatrix/solvers/GAMG/interfaces/cyclicGAMGInterface/cyclicGAMGInterface.C
	src/OpenFOAM/meshes/pointMesh/pointPatches/constraint/processor/processorPointPatch.H
	src/OpenFOAM/meshes/polyMesh/polyPatches/constraint/cyclic/cyclicPolyPatch.H
	src/OpenFOAM/meshes/polyMesh/syncTools/syncTools.C
	src/OpenFOAM/meshes/polyMesh/syncTools/syncToolsTemplates.C
	src/meshTools/sets/topoSets/faceSet.C
	src/parallel/decompose/decompositionMethods/decompositionMethod/decompositionMethod.C
2010-04-16 12:09:34 +01:00

749 lines
21 KiB
C

/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "mapDistribute.H"
#include "commSchedule.H"
#include "HashSet.H"
#include "globalIndex.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
Foam::List<Foam::labelPair> Foam::mapDistribute::schedule
(
const labelListList& subMap,
const labelListList& constructMap
)
{
// Communications: send and receive processor
List<labelPair> allComms;
{
HashSet<labelPair, labelPair::Hash<> > commsSet(Pstream::nProcs());
// Find what communication is required
forAll(subMap, procI)
{
if (procI != Pstream::myProcNo())
{
if (subMap[procI].size())
{
// I need to send to procI
commsSet.insert(labelPair(Pstream::myProcNo(), procI));
}
if (constructMap[procI].size())
{
// I need to receive from procI
commsSet.insert(labelPair(procI, Pstream::myProcNo()));
}
}
}
allComms = commsSet.toc();
}
// Reduce
if (Pstream::master())
{
// Receive and merge
for
(
int slave=Pstream::firstSlave();
slave<=Pstream::lastSlave();
slave++
)
{
IPstream fromSlave(Pstream::scheduled, slave);
List<labelPair> nbrData(fromSlave);
forAll(nbrData, i)
{
if (findIndex(allComms, nbrData[i]) == -1)
{
label sz = allComms.size();
allComms.setSize(sz+1);
allComms[sz] = nbrData[i];
}
}
}
// Send back
for
(
int slave=Pstream::firstSlave();
slave<=Pstream::lastSlave();
slave++
)
{
OPstream toSlave(Pstream::scheduled, slave);
toSlave << allComms;
}
}
else
{
{
OPstream toMaster(Pstream::scheduled, Pstream::masterNo());
toMaster << allComms;
}
{
IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
fromMaster >> allComms;
}
}
// Determine my schedule.
labelList mySchedule
(
commSchedule
(
Pstream::nProcs(),
allComms
).procSchedule()[Pstream::myProcNo()]
);
// Processors involved in my schedule
return List<labelPair>(UIndirectList<labelPair>(allComms, mySchedule));
//if (debug)
//{
// Pout<< "I need to:" << endl;
// const List<labelPair>& comms = schedule();
// forAll(comms, i)
// {
// const labelPair& twoProcs = comms[i];
// label sendProc = twoProcs[0];
// label recvProc = twoProcs[1];
//
// if (recvProc == Pstream::myProcNo())
// {
// Pout<< " receive from " << sendProc << endl;
// }
// else
// {
// Pout<< " send to " << recvProc << endl;
// }
// }
//}
}
const Foam::List<Foam::labelPair>& Foam::mapDistribute::schedule() const
{
if (schedulePtr_.empty())
{
schedulePtr_.reset
(
new List<labelPair>
(
schedule(subMap_, constructMap_)
)
);
}
return schedulePtr_();
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
//- Construct from components
Foam::mapDistribute::mapDistribute
(
const label constructSize,
const Xfer<labelListList>& subMap,
const Xfer<labelListList>& constructMap
)
:
constructSize_(constructSize),
subMap_(subMap),
constructMap_(constructMap),
schedulePtr_()
{}
Foam::mapDistribute::mapDistribute
(
const labelList& sendProcs,
const labelList& recvProcs
)
:
constructSize_(0),
schedulePtr_()
{
if (sendProcs.size() != recvProcs.size())
{
FatalErrorIn
(
"mapDistribute::mapDistribute(const labelList&, const labelList&)"
) << "The send and receive data is not the same length. sendProcs:"
<< sendProcs.size() << " recvProcs:" << recvProcs.size()
<< abort(FatalError);
}
// Per processor the number of samples we have to send/receive.
labelList nSend(Pstream::nProcs(), 0);
labelList nRecv(Pstream::nProcs(), 0);
forAll(sendProcs, sampleI)
{
label sendProc = sendProcs[sampleI];
label recvProc = recvProcs[sampleI];
// Note that also need to include local communication (both
// RecvProc and sendProc on local processor)
if (Pstream::myProcNo() == sendProc)
{
// I am the sender. Count destination processor.
nSend[recvProc]++;
}
if (Pstream::myProcNo() == recvProc)
{
// I am the receiver.
nRecv[sendProc]++;
}
}
subMap_.setSize(Pstream::nProcs());
constructMap_.setSize(Pstream::nProcs());
forAll(nSend, procI)
{
subMap_[procI].setSize(nSend[procI]);
constructMap_[procI].setSize(nRecv[procI]);
}
nSend = 0;
nRecv = 0;
forAll(sendProcs, sampleI)
{
label sendProc = sendProcs[sampleI];
label recvProc = recvProcs[sampleI];
if (Pstream::myProcNo() == sendProc)
{
// I am the sender. Store index I need to send.
subMap_[recvProc][nSend[recvProc]++] = sampleI;
}
if (Pstream::myProcNo() == recvProc)
{
// I am the receiver.
constructMap_[sendProc][nRecv[sendProc]++] = sampleI;
// Largest entry inside constructMap
constructSize_ = sampleI+1;
}
}
}
Foam::mapDistribute::mapDistribute
(
const globalIndex& globalNumbering,
labelList& elements,
List<Map<label> >& compactMap
)
:
constructSize_(0),
schedulePtr_()
{
// 1. Construct per processor compact addressing of the global elements
// needed. The ones from the local processor are not included since
// these are always all needed.
compactMap.setSize(Pstream::nProcs());
{
// Count all (non-local) elements needed. Just for presizing map.
labelList nNonLocal(Pstream::nProcs(), 0);
forAll(elements, i)
{
label globalIndex = elements[i];
if (!globalNumbering.isLocal(globalIndex))
{
label procI = globalNumbering.whichProcID(globalIndex);
nNonLocal[procI]++;
}
}
forAll(compactMap, procI)
{
compactMap[procI].clear();
if (procI != Pstream::myProcNo())
{
compactMap[procI].resize(2*nNonLocal[procI]);
}
}
// Collect all (non-local) elements needed.
forAll(elements, i)
{
label globalIndex = elements[i];
if (!globalNumbering.isLocal(globalIndex))
{
label procI = globalNumbering.whichProcID(globalIndex);
label index = globalNumbering.toLocal(procI, globalIndex);
label nCompact = compactMap[procI].size();
compactMap[procI].insert(index, nCompact);
}
}
//// Sort remote elements needed (not really necessary)
//forAll(compactMap, procI)
//{
// if (procI != Pstream::myProcNo())
// {
// Map<label>& globalMap = compactMap[procI];
//
// SortableList<label> sorted(globalMap.toc().xfer());
//
// forAll(sorted, i)
// {
// Map<label>::iterator iter = globalMap.find(sorted[i]);
// iter() = i;
// }
// }
//}
}
// 2. The overall compact addressing is
// - myProcNo data first (uncompacted)
// - all other processors consecutively
labelList compactStart(Pstream::nProcs());
compactStart[Pstream::myProcNo()] = 0;
constructSize_ = globalNumbering.localSize();
forAll(compactStart, procI)
{
if (procI != Pstream::myProcNo())
{
compactStart[procI] = constructSize_;
constructSize_ += compactMap[procI].size();
}
}
// 3. Find out what to receive/send in compact addressing.
// What I want to receive is what others have to send
labelListList wantedRemoteElements(Pstream::nProcs());
// Compact addressing for received data
constructMap_.setSize(Pstream::nProcs());
forAll(compactMap, procI)
{
if (procI == Pstream::myProcNo())
{
// All my own elements are used
label nLocal = globalNumbering.localSize();
wantedRemoteElements[procI] = identity(nLocal);
constructMap_[procI] = identity(nLocal);
}
else
{
// Remote elements wanted from processor procI
labelList& remoteElem = wantedRemoteElements[procI];
labelList& localElem = constructMap_[procI];
remoteElem.setSize(compactMap[procI].size());
localElem.setSize(compactMap[procI].size());
label i = 0;
forAllIter(Map<label>, compactMap[procI], iter)
{
const label compactI = compactStart[procI] + iter();
remoteElem[i] = iter.key();
localElem[i] = compactI;
iter() = compactI;
i++;
}
}
}
subMap_.setSize(Pstream::nProcs());
labelListList sendSizes;
Pstream::exchange<labelList, label>
(
wantedRemoteElements,
subMap_,
sendSizes
);
// Renumber elements
forAll(elements, i)
{
elements[i] = renumber(globalNumbering, compactMap, elements[i]);
}
}
Foam::mapDistribute::mapDistribute
(
const globalIndex& globalNumbering,
labelListList& cellCells,
List<Map<label> >& compactMap
)
:
constructSize_(0),
schedulePtr_()
{
// 1. Construct per processor compact addressing of the global elements
// needed. The ones from the local processor are not included since
// these are always all needed.
compactMap.setSize(Pstream::nProcs());
{
// Count all (non-local) elements needed. Just for presizing map.
labelList nNonLocal(Pstream::nProcs(), 0);
forAll(cellCells, cellI)
{
const labelList& cCells = cellCells[cellI];
forAll(cCells, i)
{
label globalIndex = cCells[i];
if (!globalNumbering.isLocal(globalIndex))
{
label procI = globalNumbering.whichProcID(globalIndex);
nNonLocal[procI]++;
}
}
}
forAll(compactMap, procI)
{
compactMap[procI].clear();
if (procI != Pstream::myProcNo())
{
compactMap[procI].resize(2*nNonLocal[procI]);
}
}
// Collect all (non-local) elements needed.
// Collect all (non-local) elements needed.
forAll(cellCells, cellI)
{
const labelList& cCells = cellCells[cellI];
forAll(cCells, i)
{
label globalIndex = cCells[i];
if (!globalNumbering.isLocal(globalIndex))
{
label procI = globalNumbering.whichProcID(globalIndex);
label index = globalNumbering.toLocal(procI, globalIndex);
label nCompact = compactMap[procI].size();
compactMap[procI].insert(index, nCompact);
}
}
}
//// Sort remote elements needed (not really necessary)
//forAll(compactMap, procI)
//{
// if (procI != Pstream::myProcNo())
// {
// Map<label>& globalMap = compactMap[procI];
//
// SortableList<label> sorted(globalMap.toc().xfer());
//
// forAll(sorted, i)
// {
// Map<label>::iterator iter = globalMap.find(sorted[i]);
// iter() = i;
// }
// }
//}
}
// 2. The overall compact addressing is
// - myProcNo data first (uncompacted)
// - all other processors consecutively
labelList compactStart(Pstream::nProcs());
compactStart[Pstream::myProcNo()] = 0;
constructSize_ = globalNumbering.localSize();
forAll(compactStart, procI)
{
if (procI != Pstream::myProcNo())
{
compactStart[procI] = constructSize_;
constructSize_ += compactMap[procI].size();
}
}
// 3. Find out what to receive/send in compact addressing.
// What I want to receive is what others have to send
labelListList wantedRemoteElements(Pstream::nProcs());
// Compact addressing for received data
constructMap_.setSize(Pstream::nProcs());
forAll(compactMap, procI)
{
if (procI == Pstream::myProcNo())
{
// All my own elements are used
label nLocal = globalNumbering.localSize();
wantedRemoteElements[procI] = identity(nLocal);
constructMap_[procI] = identity(nLocal);
}
else
{
// Remote elements wanted from processor procI
labelList& remoteElem = wantedRemoteElements[procI];
labelList& localElem = constructMap_[procI];
remoteElem.setSize(compactMap[procI].size());
localElem.setSize(compactMap[procI].size());
label i = 0;
forAllIter(Map<label>, compactMap[procI], iter)
{
const label compactI = compactStart[procI] + iter();
remoteElem[i] = iter.key();
localElem[i] = compactI;
iter() = compactI;
i++;
}
}
}
subMap_.setSize(Pstream::nProcs());
labelListList sendSizes;
Pstream::exchange<labelList, label>
(
wantedRemoteElements,
subMap_,
sendSizes
);
// Renumber elements
forAll(cellCells, cellI)
{
labelList& cCells = cellCells[cellI];
forAll(cCells, i)
{
cCells[i] = renumber(globalNumbering, compactMap, cCells[i]);
}
}
}
Foam::mapDistribute::mapDistribute(const mapDistribute& map)
:
constructSize_(map.constructSize_),
subMap_(map.subMap_),
constructMap_(map.constructMap_),
schedulePtr_()
{}
// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
Foam::label Foam::mapDistribute::renumber
(
const globalIndex& globalNumbering,
const List<Map<label> >& compactMap,
const label globalI
)
{
if (globalNumbering.isLocal(globalI))
{
return globalNumbering.toLocal(globalI);
}
else
{
label procI = globalNumbering.whichProcID(globalI);
label index = globalNumbering.toLocal(procI, globalI);
return compactMap[procI][index];
}
}
void Foam::mapDistribute::compact(const boolList& elemIsUsed)
{
// 1. send back to sender. Have sender delete the corresponding element
// from the submap and do the same to the constructMap locally
// (and in same order).
// Send elemIsUsed field to neighbour. Use nonblocking code from
// mapDistribute but in reverse order.
if (Pstream::parRun())
{
List<boolList> sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap_[domain];
if (domain != Pstream::myProcNo() && map.size())
{
boolList& subField = sendFields[domain];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = elemIsUsed[map[i]];
}
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.size()*sizeof(bool)
);
}
}
// Set up receives from neighbours
List<boolList> recvFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap_[domain];
if (domain != Pstream::myProcNo() && map.size())
{
recvFields[domain].setSize(map.size());
IPstream::read
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].size()*sizeof(bool)
);
}
}
// Set up 'send' to myself - write directly into recvFields
{
const labelList& map = constructMap_[Pstream::myProcNo()];
recvFields[Pstream::myProcNo()].setSize(map.size());
forAll(map, i)
{
recvFields[Pstream::myProcNo()][i] = elemIsUsed[map[i]];
}
}
// Wait for all to finish
Pstream::waitRequests();
// Compact out all submap entries that are referring to unused elements
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap_[domain];
labelList newMap(map.size());
label newI = 0;
forAll(map, i)
{
if (recvFields[domain][i])
{
// So element is used on destination side
newMap[newI++] = map[i];
}
}
if (newI < map.size())
{
newMap.setSize(newI);
subMap_[domain].transfer(newMap);
}
}
}
// 2. remove from construct map - since end-result (element in elemIsUsed)
// not used.
label maxConstructIndex = -1;
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap_[domain];
labelList newMap(map.size());
label newI = 0;
forAll(map, i)
{
label destinationI = map[i];
// Is element is used on destination side
if (elemIsUsed[destinationI])
{
maxConstructIndex = max(maxConstructIndex, destinationI);
newMap[newI++] = destinationI;
}
}
if (newI < map.size())
{
newMap.setSize(newI);
constructMap_[domain].transfer(newMap);
}
}
constructSize_ = maxConstructIndex+1;
// Clear the schedule (note:not necessary if nothing changed)
schedulePtr_.clear();
}
// * * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * //
void Foam::mapDistribute::operator=(const mapDistribute& rhs)
{
// Check for assignment to self
if (this == &rhs)
{
FatalErrorIn
(
"Foam::mapDistribute::operator=(const Foam::mapDistribute&)"
) << "Attempted assignment to self"
<< abort(FatalError);
}
constructSize_ = rhs.constructSize_;
subMap_ = rhs.subMap_;
constructMap_ = rhs.constructMap_;
schedulePtr_.clear();
}
// ************************************************************************* //