Decomposition/redistribution: Separated choice of mesh decomposition and redistribution methods
When snappyHexMesh is run in parallel it re-balances the mesh during refinement
and layer addition by redistribution which requires a decomposition method
that operates in parallel, e.g. hierachical or ptscotch. decomposePar uses a
decomposition method which operates in serial e.g. hierachical but NOT
ptscotch. In order to run decomposePar followed by snappyHexMesh in parallel it
has been necessary to change the method specified in decomposeParDict but now
this is avoided by separately specifying the decomposition and distribution
methods, e.g. in the incompressible/simpleFoam/motorBike case:
numberOfSubdomains 6;
decomposer hierarchical;
distributor ptscotch;
hierarchicalCoeffs
{
n (3 2 1);
order xyz;
}
The distributor entry is also used for run-time mesh redistribution, e.g. in the
multiphase/interFoam/RAS/floatingObject case re-distribution for load-balancing
is enabled in constant/dynamicMeshDict:
distributor
{
type distributor;
libs ("libfvMeshDistributors.so");
redistributionInterval 10;
}
which uses the distributor specified in system/decomposeParDict:
distributor hierarchical;
This rationalisation provides the structure for development of mesh
redistribution and load-balancing.
This commit is contained in:
@ -803,18 +803,13 @@ Foam::backgroundMeshDecomposition::backgroundMeshDecomposition
|
||||
bFTreePtr_(),
|
||||
allBackgroundMeshBounds_(Pstream::nProcs()),
|
||||
globalBackgroundBounds_(),
|
||||
decomposeDict_
|
||||
decomposerPtr_
|
||||
(
|
||||
IOobject
|
||||
decompositionMethod::NewDecomposer
|
||||
(
|
||||
"decomposeParDict",
|
||||
runTime_.system(),
|
||||
runTime_,
|
||||
IOobject::MUST_READ_IF_MODIFIED,
|
||||
IOobject::NO_WRITE
|
||||
decompositionMethod::decomposeParDict(runTime)
|
||||
)
|
||||
),
|
||||
decomposerPtr_(decompositionMethod::New(decomposeDict_)),
|
||||
spanScale_(coeffsDict.lookup<scalar>("spanScale")),
|
||||
minCellSizeLimit_
|
||||
(
|
||||
@ -831,15 +826,6 @@ Foam::backgroundMeshDecomposition::backgroundMeshDecomposition
|
||||
<< exit(FatalError);
|
||||
}
|
||||
|
||||
if (!decomposerPtr_().parallelAware())
|
||||
{
|
||||
FatalErrorInFunction
|
||||
<< "You have selected decomposition method "
|
||||
<< decomposerPtr_().typeName
|
||||
<< " which is not parallel aware." << endl
|
||||
<< exit(FatalError);
|
||||
}
|
||||
|
||||
Info<< nl << "Building initial background mesh decomposition" << endl;
|
||||
|
||||
initialRefinement();
|
||||
|
||||
@ -124,9 +124,6 @@ class backgroundMeshDecomposition
|
||||
// a point that is not found on any processor is in the domain at all
|
||||
treeBoundBox globalBackgroundBounds_;
|
||||
|
||||
//- Decomposition dictionary
|
||||
IOdictionary decomposeDict_;
|
||||
|
||||
//- Decomposition method
|
||||
autoPtr<decompositionMethod> decomposerPtr_;
|
||||
|
||||
|
||||
@ -522,27 +522,18 @@ int main(int argc, char *argv[])
|
||||
printMeshData(mesh);
|
||||
|
||||
// Allocate a decomposer
|
||||
IOdictionary decompositionDict
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"decomposeParDict",
|
||||
runTime.system(),
|
||||
mesh,
|
||||
IOobject::MUST_READ_IF_MODIFIED,
|
||||
IOobject::NO_WRITE
|
||||
)
|
||||
);
|
||||
|
||||
autoPtr<decompositionMethod> decomposer
|
||||
(
|
||||
decompositionMethod::New
|
||||
decompositionMethod::NewDecomposer
|
||||
(
|
||||
decompositionDict
|
||||
decompositionMethod::decomposeParDict(runTime)
|
||||
)
|
||||
);
|
||||
|
||||
labelList decomp = decomposer().decompose(mesh, mesh.cellCentres());
|
||||
const labelList decomp
|
||||
(
|
||||
decomposer().decompose(mesh, mesh.cellCentres())
|
||||
);
|
||||
|
||||
// Global matching tolerance
|
||||
const scalar tolDim = getMergeDistance
|
||||
|
||||
@ -753,23 +753,12 @@ int main(int argc, char *argv[])
|
||||
const Switch keepPatches(meshDict.lookupOrDefault("keepPatches", false));
|
||||
|
||||
|
||||
|
||||
// Read decomposePar dictionary
|
||||
dictionary decomposeDict;
|
||||
{
|
||||
if (Pstream::parRun())
|
||||
{
|
||||
decomposeDict = IOdictionary
|
||||
(
|
||||
IOobject
|
||||
(
|
||||
"decomposeParDict",
|
||||
runTime.system(),
|
||||
mesh,
|
||||
IOobject::MUST_READ_IF_MODIFIED,
|
||||
IOobject::NO_WRITE
|
||||
)
|
||||
);
|
||||
decomposeDict = decompositionMethod::decomposeParDict(runTime);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1220,30 +1209,14 @@ int main(int argc, char *argv[])
|
||||
// Decomposition
|
||||
autoPtr<decompositionMethod> decomposerPtr
|
||||
(
|
||||
decompositionMethod::New
|
||||
(
|
||||
decomposeDict
|
||||
)
|
||||
decompositionMethod::NewDistributor(decomposeDict)
|
||||
);
|
||||
decompositionMethod& decomposer = decomposerPtr();
|
||||
|
||||
if (Pstream::parRun() && !decomposer.parallelAware())
|
||||
{
|
||||
FatalErrorInFunction
|
||||
<< "You have selected decomposition method "
|
||||
<< decomposer.typeName
|
||||
<< " which is not parallel aware." << endl
|
||||
<< "Please select one that is (hierarchical, ptscotch)"
|
||||
<< exit(FatalError);
|
||||
}
|
||||
|
||||
// Mesh distribution engine (uses tolerance to reconstruct meshes)
|
||||
fvMeshDistribute distributor(mesh);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// Now do the real work -refinement -snapping -layers
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
@ -844,9 +844,9 @@ int main(int argc, char *argv[])
|
||||
bool oldParRun = UPstream::parRun();
|
||||
UPstream::parRun() = false;
|
||||
|
||||
autoPtr<decompositionMethod> decomposePtr = decompositionMethod::New
|
||||
autoPtr<decompositionMethod> decomposePtr
|
||||
(
|
||||
decomposeDict
|
||||
decompositionMethod::NewDecomposer(decomposeDict)
|
||||
);
|
||||
|
||||
labelList cellToRegion
|
||||
|
||||
Reference in New Issue
Block a user