ENH: improve processor topology handling in zoneDistribute (#2371)

- now largely encapsulated using PstreamBuffers methods,
  which makes it simpler to centralize and maintain

- avoid building intermediate structures when sending data,
  remove unused methods/data

TUT: parallel version of depthCharge2D

STYLE: minor update in ProcessorTopology
This commit is contained in:
Mark Olesen
2022-03-02 14:00:29 +01:00
committed by Andrew Heather
parent 14631984df
commit 2b54d86152
17 changed files with 264 additions and 286 deletions

View File

@ -592,8 +592,6 @@ polyBoundaryMesh = $(polyMesh)/polyBoundaryMesh
$(polyBoundaryMesh)/polyBoundaryMesh.C
$(polyBoundaryMesh)/polyBoundaryMeshEntries.C
meshes/ProcessorTopology/commSchedule.C
globalMeshData = $(polyMesh)/globalMeshData
$(globalMeshData)/globalMeshData.C
$(globalMeshData)/globalPoints.C
@ -812,6 +810,7 @@ algorithms/indexedOctree/volumeType.C
algorithms/dynamicIndexedOctree/dynamicIndexedOctreeName.C
algorithms/dynamicIndexedOctree/dynamicTreeDataPoint.C
parallel/commSchedule/commSchedule.C
parallel/globalIndex/globalIndex.C
meshes/data/data.C

View File

@ -27,6 +27,7 @@ License
\*---------------------------------------------------------------------------*/
#include "PstreamBuffers.H"
#include "bitSet.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
@ -306,4 +307,72 @@ void Foam::PstreamBuffers::finishedSends
}
bool Foam::PstreamBuffers::finishedSends
(
bitSet& sendConnections,
DynamicList<label>& sendProcs,
DynamicList<label>& recvProcs,
const bool wait
)
{
bool changed = (sendConnections.size() != nProcs());
if (changed)
{
sendConnections.resize(nProcs());
}
// Update send connections
// - reasonable to assume there are no self-sends on UPstream::myProcNo
forAll(sendBuf_, proci)
{
// ie, hasSendData(proci)
if (sendConnections.set(proci, !sendBuf_[proci].empty()))
{
// The state changed
changed = true;
}
}
reduce(changed, orOp<bool>());
if (changed)
{
// Create send/recv topology
// The send ranks
sendProcs.clear();
forAll(sendBuf_, proci)
{
// ie, hasSendData(proci)
if (!sendBuf_[proci].empty())
{
sendProcs.append(proci);
}
}
finishedSends(wait); // All-to-all
// The recv ranks
recvProcs.clear();
forAll(recvBuf_, proci)
{
// ie, hasRecvData(proci)
if (!recvBuf_[proci].empty())
{
recvProcs.append(proci);
}
}
}
else
{
// Use existing send/recv ranks
finishedSends(sendProcs, recvProcs, wait);
}
return changed;
}
// ************************************************************************* //

View File

@ -82,6 +82,9 @@ SourceFiles
namespace Foam
{
// Forward Declarations
class bitSet;
/*---------------------------------------------------------------------------*\
Class PstreamBuffers Declaration
\*---------------------------------------------------------------------------*/
@ -300,6 +303,25 @@ public:
const bool wait = true
);
//- A caching version that uses a limited send/recv connectivity.
//
// Non-blocking mode: populates receive buffers.
// \param sendConnections on/off for sending ranks
// \param sendProcs ranks used for sends
// \param recvProcs ranks used for recvs
// \param wait wait for requests to complete (in nonBlocking mode)
//
// \return True if the send/recv connectivity changed
//
// \warning currently only valid for nonBlocking comms.
bool finishedSends
(
bitSet& sendConnections,
DynamicList<label>& sendProcs,
DynamicList<label>& recvProcs,
const bool wait = true
);
//- Mark sends as done using subset of send/recv ranks
//- and recover the sizes (bytes) received.
//

View File

@ -29,7 +29,6 @@ License
#include "ListOps.H"
#include "Pstream.H"
#include "commSchedule.H"
#include "boolList.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
@ -50,22 +49,16 @@ Foam::labelList Foam::ProcessorTopology<Container, ProcPatch>::procNeighbours
forAll(patches, patchi)
{
const typename Container::const_reference patch = patches[patchi];
if (isA<ProcPatch>(patch))
const auto* cpp = isA<ProcPatch>(patches[patchi]);
if (cpp)
{
const ProcPatch& procPatch =
refCast<const ProcPatch>(patch);
const label nbrProci = cpp->neighbProcNo();
label pNeighbProcNo = procPatch.neighbProcNo();
if (!isNeighbourProc[pNeighbProcNo])
if (!isNeighbourProc[nbrProci])
{
nNeighbours++;
maxNb = max(maxNb, procPatch.neighbProcNo());
isNeighbourProc[pNeighbProcNo] = true;
isNeighbourProc[nbrProci] = true;
maxNb = max(maxNb, nbrProci);
++nNeighbours;
}
}
}
@ -87,15 +80,13 @@ Foam::labelList Foam::ProcessorTopology<Container, ProcPatch>::procNeighbours
forAll(patches, patchi)
{
const typename Container::const_reference patch = patches[patchi];
if (isA<ProcPatch>(patch))
const auto* cpp = isA<ProcPatch>(patches[patchi]);
if (cpp)
{
const ProcPatch& procPatch =
refCast<const ProcPatch>(patch);
const label nbrProci = cpp->neighbProcNo();
// Construct reverse map
procPatchMap_[procPatch.neighbProcNo()] = patchi;
// Reverse map
procPatchMap_[nbrProci] = patchi;
}
}
@ -113,7 +104,7 @@ Foam::ProcessorTopology<Container, ProcPatch>::ProcessorTopology
)
:
labelListList(Pstream::nProcs(comm)),
patchSchedule_(2*patches.size())
patchSchedule_()
{
if (Pstream::parRun())
{
@ -132,6 +123,8 @@ Foam::ProcessorTopology<Container, ProcPatch>::ProcessorTopology
&& Pstream::defaultCommsType == Pstream::commsTypes::scheduled
)
{
patchSchedule_.resize(2*patches.size());
label patchEvali = 0;
// 1. All non-processor patches
@ -155,21 +148,21 @@ Foam::ProcessorTopology<Container, ProcPatch>::ProcessorTopology
// to determine the schedule. Each processor pair stands for both
// send and receive.
label nComms = 0;
forAll(*this, proci)
for (const labelList& neighbours : *this)
{
nComms += operator[](proci).size();
nComms += neighbours.size();
}
DynamicList<labelPair> comms(nComms);
forAll(*this, proci)
{
const labelList& nbrs = operator[](proci);
const labelList& neighbours = operator[](proci);
forAll(nbrs, i)
for (const label nbrProci : neighbours)
{
if (proci < nbrs[i])
if (proci < nbrProci)
{
comms.append(labelPair(proci, nbrs[i]));
comms.append(labelPair(proci, nbrProci));
}
}
}
@ -185,10 +178,8 @@ Foam::ProcessorTopology<Container, ProcPatch>::ProcessorTopology
).procSchedule()[Pstream::myProcNo(comm)]
);
forAll(mySchedule, iter)
for (const label commI : mySchedule)
{
label commI = mySchedule[iter];
// Get the other processor
label nb = comms[commI][0];
if (nb == Pstream::myProcNo(comm))

View File

@ -40,8 +40,8 @@ SourceFiles
\*---------------------------------------------------------------------------*/
#ifndef ProcessorTopology_H
#define ProcessorTopology_H
#ifndef Foam_ProcessorTopology_H
#define Foam_ProcessorTopology_H
#include "labelList.H"
#include "lduSchedule.H"
@ -60,10 +60,7 @@ class ProcessorTopology
:
public labelListList
{
private:
// Private data
// Private Data
//- Local map from neighbour proc to patchi. Different per processor!
// -1 or patchi for connection to procID
@ -76,8 +73,8 @@ private:
// Private Member Functions
//- Return all neighbouring processors of this processor. Set
// procPatchMap_.
//- Return all neighbouring processors of this processor.
// Sets procPatchMap_.
labelList procNeighbours(const label nProcs, const Container&);
public:
@ -88,25 +85,27 @@ public:
ProcessorTopology(const Container& patches, const label comm);
// Static Functions
//- Calculate non-blocking (i.e. unscheduled) schedule
static lduSchedule nonBlockingSchedule(const Container& patches);
// Member Functions
//- From neighbour processor to index in boundaryMesh. Local information
// (so not same over all processors)
const labelList& procPatchMap() const
//- From neighbour processor to index in boundaryMesh.
// Local information (so not same over all processors)
const labelList& procPatchMap() const noexcept
{
return procPatchMap_;
}
//- Order in which the patches should be initialised/evaluated
// corresponding to the schedule
const lduSchedule& patchSchedule() const
//- corresponding to the schedule
const lduSchedule& patchSchedule() const noexcept
{
return patchSchedule_;
}
//- Calculate non-blocking (i.e. unscheduled) schedule
static lduSchedule nonBlockingSchedule(const Container& patches);
};

View File

@ -45,7 +45,6 @@ namespace Foam
namespace Foam
{
// Private Member Functions
// Count the number of outstanding communications for a single processor
static label outstandingComms
@ -99,7 +98,7 @@ Foam::commSchedule::commSchedule
}
// Note: no need to shrink procToComms. Are small.
if (debug && Pstream::master())
if (debug && UPstream::master())
{
Pout<< "commSchedule : Wanted communication:" << endl;
@ -198,7 +197,7 @@ Foam::commSchedule::commSchedule
busy[comms[maxCommI][1]] = true;
}
if (debug && Pstream::master())
if (debug && UPstream::master())
{
label nIterComms = nScheduled-oldNScheduled;
@ -240,7 +239,7 @@ Foam::commSchedule::commSchedule
iter++;
}
if (debug && Pstream::master())
if (debug && UPstream::master())
{
Pout<< endl;
}
@ -282,7 +281,7 @@ Foam::commSchedule::commSchedule
procSchedule_[proc1][nProcScheduled[proc1]++] = commI;
}
if (debug && Pstream::master())
if (debug && UPstream::master())
{
Pout<< "commSchedule::commSchedule : Per processor:" << endl;

View File

@ -27,13 +27,6 @@ License
\*---------------------------------------------------------------------------*/
#include "zoneDistribute.H"
#include "dummyTransform.H"
#include "emptyPolyPatch.H"
#include "processorPolyPatch.H"
#include "syncTools.H"
#include "wedgePolyPatch.H"
#include "globalPoints.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
@ -43,61 +36,14 @@ namespace Foam
}
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
Foam::autoPtr<Foam::indirectPrimitivePatch>
Foam::zoneDistribute::coupledFacesPatch() const
{
const polyBoundaryMesh& patches = mesh_.boundaryMesh();
label nCoupled = 0;
for (const polyPatch& pp : patches)
{
if (isA<processorPolyPatch>(pp))
{
nCoupled += pp.size();
}
}
labelList coupledFaces(nCoupled);
nCoupled = 0;
for (const polyPatch& pp : patches)
{
if (isA<processorPolyPatch>(pp))
{
label facei = pp.start();
forAll(pp, i)
{
coupledFaces[nCoupled++] = facei++;
}
}
}
return autoPtr<indirectPrimitivePatch>::New
(
IndirectList<face>
(
mesh_.faces(),
coupledFaces
),
mesh_.points()
);
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::zoneDistribute::zoneDistribute(const fvMesh& mesh)
:
MeshObject<fvMesh, Foam::TopologicalMeshObject, zoneDistribute>(mesh),
coupledBoundaryPoints_(coupledFacesPatch()().meshPoints()),
send_(UPstream::nProcs()),
stencil_(zoneCPCStencil::New(mesh)),
globalNumbering_(stencil_.globalNumbering()),
sendTo_(), // Initial zero-sized
recvFrom_() // Initial zero-sized
send_(UPstream::nProcs())
{}
@ -140,20 +86,10 @@ void Foam::zoneDistribute::setUpCommforZone
if (UPstream::parRun())
{
if (sendTo_.empty())
{
// First time
sendTo_.resize(UPstream::nProcs());
recvFrom_.resize(UPstream::nProcs());
sendTo_ = false;
recvFrom_ = false;
}
const labelHashSet& comms = stencil.needsComm();
List<labelHashSet> needed(UPstream::nProcs());
for (const label celli : comms)
// Bin according to originating (sending) processor
for (const label celli : stencil.needsComm())
{
if (zone[celli])
{
@ -169,73 +105,24 @@ void Foam::zoneDistribute::setUpCommforZone
}
}
// Stream the send data into PstreamBuffers,
// which we also use to track the current topology.
PstreamBuffers pBufs(UPstream::commsTypes::nonBlocking);
// Stream data into buffer
for (const int proci : UPstream::allProcs())
{
if (proci != UPstream::myProcNo() && !needed[proci].empty())
{
// Put data into send buffer
// Serialize as List
UOPstream toProc(proci, pBufs);
toProc << needed[proci].sortedToc();
}
}
pBufs.finishedSends(sendConnections_, sendProcs_, recvProcs_);
// Need update, or use existing partial communication info?
bool fullUpdate = false;
for (const int proci : UPstream::allProcs())
{
if
(
proci != UPstream::myProcNo()
&& (!sendTo_[proci] && !needed[proci].empty())
)
{
// Changed state sendTo_ from false -> true
sendTo_[proci] = true;
fullUpdate = true;
}
}
if (returnReduce(fullUpdate, orOp<bool>()))
{
pBufs.finishedSends();
// Update which ones receive
for (const int proci : UPstream::allProcs())
{
recvFrom_[proci] = pBufs.hasRecvData(proci);
}
}
else
{
// No change in senders...
// - can communicate with a subset of processors
DynamicList<label> sendProcs;
DynamicList<label> recvProcs;
for (const int proci : UPstream::allProcs())
{
if (sendTo_[proci])
{
sendProcs.append(proci);
}
if (recvFrom_[proci])
{
recvProcs.append(proci);
}
}
// Wait until everything is written
pBufs.finishedSends(sendProcs, recvProcs);
}
for (const int proci : UPstream::allProcs())
for (const int proci : pBufs.allProcs())
{
send_[proci].clear();

View File

@ -31,12 +31,28 @@ Description
Class for parallel communication in a narrow band. It either provides a Map
with the neighbouring values of the selected region or returns a Map of the
required values in global addressing. Also holds a reference to the stencil
Before the data transfer the communation has to be set up:
Before the data transfer the communication has to be set up:
exchangeFields_.setUpCommforZone(interfaceCell_);
Is used in the plicRDF
Original code supplied by Henning Scheufler, DLR (2019)
Additional optimization of processor communication
provided by Tetsuo AOYAGI, RIST (2022), to use a more compact
exchange of sizes with an updated version of PstreamBuffers.
This optimization uses additional sendTo/recvFrom member data
to track the topological connectivity, acting like an on-the-fly
sub-communicator, and respects corner connectivity.
-# Initially topological connections are empty (or all false).
-# Scan the stencil global cellIds (active zones only) and split
into sub-lists according the originating processor (the sender).
-# If an originating processor appears/disappears, need to update
the connectivity information (requires an all-to-all).
-# When possible, the topological send/recv is used in PstreamBuffers
finishedSends (minimizes communication).
.
SourceFiles
zoneDistributeI.H
zoneDistribute.C
@ -51,7 +67,6 @@ SourceFiles
#include "volFields.H"
#include "zoneCPCStencil.H"
#include "IOobject.H"
#include "MeshObject.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -69,47 +84,41 @@ class zoneDistribute
{
// Private Data
//- labels of the points on coupled patches
labelList coupledBoundaryPoints_;
//- Addressing for processor-to-processor comms
List<labelList> send_;
//- Return patch of all coupled faces.
autoPtr<indirectPrimitivePatch> coupledFacesPatch() const;
//- Reference to the zone stencil
zoneCPCStencil& stencil_;
//- Global number into cells and faces
//- Global number into index of cells/faces
const globalIndex& globalNumbering_;
//- Parallel: send data to processors (true/false)
// updated in setUpCommforZone
boolList sendTo_;
//- Global cell/face index to send for processor-to-processor comms
List<labelList> send_;
//- Parallel: receive data from processors (true/false)
// updated in setUpCommforZone
boolList recvFrom_;
//- Parallel [cache]: send connectivity (true/false)
bitSet sendConnections_;
//- Parallel [cache]: send data to these ranks
DynamicList<label> sendProcs_;
//- Parallel [cache]: recv data from these ranks
DynamicList<label> recvProcs_;
// Private Member Functions
//- Gives patchNumber and patchFaceNumber for a given
//- Geometric volume field
//- Return local volField value at (cell or face) index
template<typename Type>
Type getLocalValue
(
const GeometricField<Type, fvPatchField, volMesh>& phi,
const VolumeField<Type>& phi,
const label localIdx
) const;
//- Gives patchNumber and patchFaceNumber for a given
//- Geometric volume field
template<typename Type>
Type faceValue
(
const GeometricField<Type, fvPatchField, volMesh>& phi,
const VolumeField<Type>& phi,
const label localIdx
) const;
@ -158,7 +167,7 @@ public:
template<typename Type>
Type getValue
(
const GeometricField<Type, fvPatchField, volMesh>& phi,
const VolumeField<Type>& phi,
const Map<Type>& valuesFromOtherProc,
const label gblIdx
) const;
@ -168,7 +177,7 @@ public:
Map<Field<Type>> getFields
(
const boolList& zone,
const GeometricField<Type, fvPatchField, volMesh>& phi
const VolumeField<Type>& phi
);
//- Returns stencil and provides a Map with globalNumbering
@ -176,7 +185,7 @@ public:
Map<Type> getDatafromOtherProc
(
const boolList& zone,
const GeometricField<Type, fvPatchField, volMesh>& phi
const VolumeField<Type>& phi
);
};

View File

@ -26,16 +26,14 @@ License
\*---------------------------------------------------------------------------*/
#include "zoneDistribute.H"
#include "DynamicField.H"
#include "syncTools.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
template<typename Type>
Type Foam::zoneDistribute::getLocalValue
(
const GeometricField<Type, fvPatchField, volMesh>& phi,
const VolumeField<Type>& phi,
const label localIdx
) const
{
@ -51,7 +49,7 @@ Type Foam::zoneDistribute::getLocalValue
template<typename Type>
Type Foam::zoneDistribute::faceValue
(
const GeometricField<Type, fvPatchField, volMesh>& phi,
const VolumeField<Type>& phi,
const label localIdx
) const
{
@ -80,7 +78,7 @@ Type Foam::zoneDistribute::faceValue
template<typename Type>
Type Foam::zoneDistribute::getValue
(
const GeometricField<Type, fvPatchField, volMesh>& phi,
const VolumeField<Type>& phi,
const Map<Type>& valuesFromOtherProc,
const label gblIdx
) const
@ -90,8 +88,9 @@ Type Foam::zoneDistribute::getValue
const label localIdx = globalNumbering_.toLocal(gblIdx);
return getLocalValue(phi,localIdx);
}
else // from other proc
else
{
// From other proc
return valuesFromOtherProc[gblIdx];
}
}
@ -101,7 +100,7 @@ template<typename Type>
Foam::Map<Foam::Field<Type>> Foam::zoneDistribute::getFields
(
const boolList& zone,
const GeometricField<Type, fvPatchField, volMesh>& phi
const VolumeField<Type>& phi
)
{
if (zone.size() != phi.size())
@ -144,7 +143,7 @@ template<typename Type>
Foam::Map<Type> Foam::zoneDistribute::getDatafromOtherProc
(
const boolList& zone,
const GeometricField<Type, fvPatchField, volMesh>& phi
const VolumeField<Type>& phi
)
{
if (zone.size() != phi.size())
@ -162,86 +161,51 @@ Foam::Map<Type> Foam::zoneDistribute::getDatafromOtherProc
if (UPstream::parRun())
{
const bool commsPrepared = (sendTo_.size() == UPstream::nProcs());
if (!commsPrepared)
if (sendConnections_.empty())
{
FatalErrorInFunction
<< "The send/recv information not initialized - "
WarningInFunction
<< "The send/recv connections not initialized - "
<< "likely that setUpCommforZone() was not called"
<< endl;
// But don't exit/abort for now
}
List<Map<Type>> sendValues(UPstream::nProcs());
forAll(send_, proci)
{
for (const label sendIdx : send_[proci])
{
sendValues[proci].insert
(
sendIdx,
getLocalValue(phi, globalNumbering_.toLocal(sendIdx))
);
}
}
// Stream the send data into PstreamBuffers,
// which we also use to track the current topology.
PstreamBuffers pBufs(UPstream::commsTypes::nonBlocking);
// Stream data into buffer
for (const int proci : UPstream::allProcs())
{
if (proci != UPstream::myProcNo() && !sendValues[proci].empty())
if (proci != UPstream::myProcNo() && !send_[proci].empty())
{
// Put data into send buffer
// Serialize as Map
Map<Type> sendValues(2*send_[proci].size());
for (const label sendIdx : send_[proci])
{
sendValues.insert
(
sendIdx,
getLocalValue(phi, globalNumbering_.toLocal(sendIdx))
);
}
UOPstream toProc(proci, pBufs);
toProc << sendValues[proci];
toProc << sendValues;
}
}
pBufs.finishedSends(sendConnections_, sendProcs_, recvProcs_);
if (commsPrepared)
for (const int proci : pBufs.allProcs())
{
DynamicList<label> sendProcs;
DynamicList<label> recvProcs;
for (const int proci : UPstream::allProcs())
{
if (sendTo_[proci])
{
sendProcs.append(proci);
}
if (recvFrom_[proci])
{
recvProcs.append(proci);
}
}
pBufs.finishedSends(sendProcs, recvProcs);
}
else
{
// Fallback to all-to-all communication
pBufs.finishedSends();
}
for (const int proci : UPstream::allProcs())
{
// Do not use recvFrom_[proci] here
// - could be incorrect if comms are not prepared!
if (proci != UPstream::myProcNo() && pBufs.hasRecvData(proci))
{
UIPstream fromProc(proci, pBufs);
Map<Type> tmpValue(fromProc);
Map<Type> tmpValues(fromProc);
neiValues += tmpValue;
neiValues += tmpValues;
}
}
}

View File

@ -67,11 +67,7 @@ Foam::reconstructedDistanceFunction::coupledFacesPatch() const
return autoPtr<indirectPrimitivePatch>::New
(
IndirectList<face>
(
mesh_.faces(),
nCoupledFaces
),
IndirectList<face>(mesh_.faces(), std::move(nCoupledFaces)),
mesh_.points()
);
}

View File

@ -28,8 +28,10 @@ License
#include "gradAlpha.H"
#include "fvc.H"
#include "leastSquareGrad.H"
#include "zoneDistribute.H"
#include "addToRunTimeSelectionTable.H"
#include "profiling.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
namespace Foam
@ -50,8 +52,7 @@ void Foam::reconstruction::gradAlpha::gradSurf(const volScalarField& phi)
leastSquareGrad<scalar> lsGrad("polyDegree1",mesh_.geometricD());
zoneDistribute& exchangeFields = zoneDistribute::New(mesh_);
exchangeFields.setUpCommforZone(interfaceCell_,true);
exchangeFields.setUpCommforZone(interfaceCell_, true);
Map<vector> mapCC
(
@ -135,10 +136,7 @@ void Foam::reconstruction::gradAlpha::reconstruct(bool forceUpdate)
if (mesh_.topoChanging())
{
// Introduced resizing to cope with changing meshes
if (interfaceCell_.size() != mesh_.nCells())
{
interfaceCell_.resize(mesh_.nCells());
}
interfaceCell_.resize_nocopy(mesh_.nCells());
}
interfaceCell_ = false;

View File

@ -45,10 +45,9 @@ SourceFiles
#include "typeInfo.H"
#include "reconstructionSchemes.H"
#include "volFields.H"
#include "DynamicField.H"
#include "dimensionedScalar.H"
#include "autoPtr.H"
#include "surfaceIteratorPLIC.H"
#include "zoneDistribute.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -86,6 +85,7 @@ class gradAlpha
//- SurfaceIterator finds the plane centre for specified VOF value
surfaceIteratorPLIC sIterPLIC_;
// Private Member Functions
//- Compute gradient at the surfaces

View File

@ -29,6 +29,7 @@ License
#include "interpolationCellPoint.H"
#include "fvc.H"
#include "leastSquareGrad.H"
#include "zoneDistribute.H"
#include "addToRunTimeSelectionTable.H"
#include "profiling.H"
@ -50,11 +51,11 @@ void Foam::reconstruction::plicRDF::interpolateNormal()
{
addProfilingInFunction(geometricVoF);
scalar dt = mesh_.time().deltaTValue();
zoneDistribute& exchangeFields = zoneDistribute::New(mesh_);
leastSquareGrad<scalar> lsGrad("polyDegree1",mesh_.geometricD());
exchangeFields.setUpCommforZone(interfaceCell_,false);
zoneDistribute& exchangeFields = zoneDistribute::New(mesh_);
exchangeFields.setUpCommforZone(interfaceCell_, false);
Map<vector> mapCentre
(
@ -74,8 +75,8 @@ void Foam::reconstruction::plicRDF::interpolateNormal()
exchangeFields.getDatafromOtherProc(interfaceCell_, alpha1_)
);
DynamicField<vector > cellCentre(100);
DynamicField<scalar > alphaValues(100);
DynamicField<vector> cellCentre(100);
DynamicField<scalar> alphaValues(100);
DynamicList<vector> foundNormals(30);

View File

@ -59,7 +59,6 @@ SourceFiles
#include "autoPtr.H"
#include "surfaceIteratorPLIC.H"
#include "reconstructedDistanceFunction.H"
#include "zoneDistribute.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -0,0 +1,18 @@
#!/bin/sh
cd "${0%/*}" || exit # Run from this directory
. ${WM_PROJECT_DIR:?}/bin/tools/RunFunctions # Tutorial run functions
#------------------------------------------------------------------------------
runApplication blockMesh
restore0Dir
runApplication setFields
runApplication setAlphaField
decomposePar
runParallel $(getApplication)
#------------------------------------------------------------------------------

View File

@ -0,0 +1,27 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2112 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object decomposeParDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
numberOfSubdomains 8;
method hierarchical;
coeffs
{
n (4 2 1);
}
// ************************************************************************* //