mirror of
https://develop.openfoam.com/Development/openfoam.git
synced 2025-11-28 03:28:01 +00:00
ENH: additional decompose options
- decomposePar: -no-fields to suppress decomposition of fields - makeFaMesh: -no-decompose to suppress creation of *ProcAddressing and fields, -no-fields to suppress decomposition of fields only
This commit is contained in:
@ -11,11 +11,11 @@ License
|
|||||||
This file is part of OpenFOAM, distributed under GPL-3.0-or-later.
|
This file is part of OpenFOAM, distributed under GPL-3.0-or-later.
|
||||||
|
|
||||||
Description
|
Description
|
||||||
Decompose area fields, when mesh was generated in parallel
|
Write proc addressing and decompose area fields (parallel only).
|
||||||
|
|
||||||
\*---------------------------------------------------------------------------*/
|
\*---------------------------------------------------------------------------*/
|
||||||
|
|
||||||
if (Pstream::parRun())
|
if (doDecompose && Pstream::parRun())
|
||||||
{
|
{
|
||||||
faMeshReconstructor reconstructor(aMesh);
|
faMeshReconstructor reconstructor(aMesh);
|
||||||
reconstructor.writeAddressing();
|
reconstructor.writeAddressing();
|
||||||
@ -43,17 +43,34 @@ if (Pstream::parRun())
|
|||||||
|
|
||||||
reconstructor.writeMesh();
|
reconstructor.writeMesh();
|
||||||
|
|
||||||
|
if (doDecompFields)
|
||||||
|
{
|
||||||
const bool oldDistributed = fileHandler().distributed();
|
const bool oldDistributed = fileHandler().distributed();
|
||||||
auto oldHandler = fileHandler(fileOperation::NewUncollated());
|
auto oldHandler = fileHandler(fileOperation::NewUncollated());
|
||||||
fileHandler().distributed(true);
|
fileHandler().distributed(true);
|
||||||
|
|
||||||
IOobjectList objects(fullMesh.time(), runTime.timeName());
|
IOobjectList objects(fullMesh.time(), runTime.timeName());
|
||||||
|
|
||||||
faFieldDecomposer::readFields(fullMesh, objects, areaScalarFields);
|
faFieldDecomposer::readFields
|
||||||
faFieldDecomposer::readFields(fullMesh, objects, areaVectorFields);
|
(
|
||||||
faFieldDecomposer::readFields(fullMesh, objects, areaSphTensorFields);
|
fullMesh, objects, areaScalarFields
|
||||||
faFieldDecomposer::readFields(fullMesh, objects, areaSymmTensorFields);
|
);
|
||||||
faFieldDecomposer::readFields(fullMesh, objects, areaTensorFields);
|
faFieldDecomposer::readFields
|
||||||
|
(
|
||||||
|
fullMesh, objects, areaVectorFields
|
||||||
|
);
|
||||||
|
faFieldDecomposer::readFields
|
||||||
|
(
|
||||||
|
fullMesh, objects, areaSphTensorFields
|
||||||
|
);
|
||||||
|
faFieldDecomposer::readFields
|
||||||
|
(
|
||||||
|
fullMesh, objects, areaSymmTensorFields
|
||||||
|
);
|
||||||
|
faFieldDecomposer::readFields
|
||||||
|
(
|
||||||
|
fullMesh, objects, areaTensorFields
|
||||||
|
);
|
||||||
|
|
||||||
// Restore old settings
|
// Restore old settings
|
||||||
if (oldHandler)
|
if (oldHandler)
|
||||||
@ -62,6 +79,7 @@ if (Pstream::parRun())
|
|||||||
}
|
}
|
||||||
fileHandler().distributed(oldDistributed);
|
fileHandler().distributed(oldDistributed);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const label nAreaFields =
|
const label nAreaFields =
|
||||||
(
|
(
|
||||||
@ -124,4 +142,5 @@ if (Pstream::parRun())
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ************************************************************************* //
|
// ************************************************************************* //
|
||||||
|
|||||||
@ -71,10 +71,21 @@ int main(int argc, char *argv[])
|
|||||||
);
|
);
|
||||||
argList::addOption("dict", "file", "Alternative faMeshDefinition");
|
argList::addOption("dict", "file", "Alternative faMeshDefinition");
|
||||||
|
|
||||||
|
argList::addDryRunOption
|
||||||
|
(
|
||||||
|
"Create but do not write"
|
||||||
|
);
|
||||||
argList::addBoolOption
|
argList::addBoolOption
|
||||||
(
|
(
|
||||||
"dry-run",
|
"no-decompose",
|
||||||
"Create but do not write"
|
"Suppress procAddressing creation and field decomposition"
|
||||||
|
" (parallel)"
|
||||||
|
);
|
||||||
|
argList::addBoolOption
|
||||||
|
(
|
||||||
|
"no-fields",
|
||||||
|
"Suppress field decomposition"
|
||||||
|
" (parallel)"
|
||||||
);
|
);
|
||||||
argList::addBoolOption
|
argList::addBoolOption
|
||||||
(
|
(
|
||||||
@ -93,7 +104,17 @@ int main(int argc, char *argv[])
|
|||||||
#include "createTime.H"
|
#include "createTime.H"
|
||||||
#include "createNamedPolyMesh.H"
|
#include "createNamedPolyMesh.H"
|
||||||
|
|
||||||
const bool dryrun = args.found("dry-run");
|
const bool doDecompose = !args.found("no-decompose");
|
||||||
|
const bool doDecompFields = !args.found("no-fields");
|
||||||
|
|
||||||
|
if (!doDecompose)
|
||||||
|
{
|
||||||
|
Info<< "Skip decompose of finiteArea mesh/fields" << nl;
|
||||||
|
}
|
||||||
|
else if (!doDecompFields)
|
||||||
|
{
|
||||||
|
Info<< "Skip decompose of finiteArea fields" << nl;
|
||||||
|
}
|
||||||
|
|
||||||
// Reading faMeshDefinition dictionary
|
// Reading faMeshDefinition dictionary
|
||||||
#include "findMeshDefinitionDict.H"
|
#include "findMeshDefinitionDict.H"
|
||||||
@ -122,7 +143,7 @@ int main(int argc, char *argv[])
|
|||||||
#include "faMeshWriteVTK.H"
|
#include "faMeshWriteVTK.H"
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dryrun)
|
if (args.dryRun())
|
||||||
{
|
{
|
||||||
Info<< "\ndry-run: not writing mesh or decomposing fields\n" << nl;
|
Info<< "\ndry-run: not writing mesh or decomposing fields\n" << nl;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -141,11 +141,15 @@ Usage
|
|||||||
|
|
||||||
\*---------------------------------------------------------------------------*/
|
\*---------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
#include "argList.H"
|
||||||
|
#include "timeSelector.H"
|
||||||
#include "OSspecific.H"
|
#include "OSspecific.H"
|
||||||
#include "fvCFD.H"
|
|
||||||
#include "IOobjectList.H"
|
#include "IOobjectList.H"
|
||||||
|
|
||||||
|
#include "decompositionModel.H"
|
||||||
#include "domainDecomposition.H"
|
#include "domainDecomposition.H"
|
||||||
#include "domainDecompositionDryRun.H"
|
#include "domainDecompositionDryRun.H"
|
||||||
|
|
||||||
#include "labelIOField.H"
|
#include "labelIOField.H"
|
||||||
#include "labelFieldIOField.H"
|
#include "labelFieldIOField.H"
|
||||||
#include "scalarIOField.H"
|
#include "scalarIOField.H"
|
||||||
@ -165,9 +169,7 @@ Usage
|
|||||||
#include "fvFieldDecomposer.H"
|
#include "fvFieldDecomposer.H"
|
||||||
#include "pointFieldDecomposer.H"
|
#include "pointFieldDecomposer.H"
|
||||||
#include "lagrangianFieldDecomposer.H"
|
#include "lagrangianFieldDecomposer.H"
|
||||||
#include "decompositionModel.H"
|
|
||||||
|
|
||||||
#include "faCFD.H"
|
|
||||||
#include "emptyFaPatch.H"
|
#include "emptyFaPatch.H"
|
||||||
#include "faMeshDecomposition.H"
|
#include "faMeshDecomposition.H"
|
||||||
#include "faFieldDecomposer.H"
|
#include "faFieldDecomposer.H"
|
||||||
@ -306,6 +308,8 @@ void decomposeUniform
|
|||||||
} // End namespace Foam
|
} // End namespace Foam
|
||||||
|
|
||||||
|
|
||||||
|
using namespace Foam;
|
||||||
|
|
||||||
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
|
||||||
|
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
@ -384,6 +388,11 @@ int main(int argc, char *argv[])
|
|||||||
"fields",
|
"fields",
|
||||||
"Use existing geometry decomposition and convert fields only"
|
"Use existing geometry decomposition and convert fields only"
|
||||||
);
|
);
|
||||||
|
argList::addBoolOption
|
||||||
|
(
|
||||||
|
"no-fields",
|
||||||
|
"Suppress conversion of fields (volume, finite-area, lagrangian)"
|
||||||
|
);
|
||||||
|
|
||||||
argList::addBoolOption
|
argList::addBoolOption
|
||||||
(
|
(
|
||||||
@ -417,6 +426,7 @@ int main(int argc, char *argv[])
|
|||||||
|
|
||||||
const bool decomposeIfRequired = args.found("ifRequired");
|
const bool decomposeIfRequired = args.found("ifRequired");
|
||||||
|
|
||||||
|
const bool doDecompFields = !args.found("no-fields");
|
||||||
const bool doFiniteArea = !args.found("no-finite-area");
|
const bool doFiniteArea = !args.found("no-finite-area");
|
||||||
const bool doLagrangian = !args.found("no-lagrangian");
|
const bool doLagrangian = !args.found("no-lagrangian");
|
||||||
|
|
||||||
@ -436,6 +446,18 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
if (decomposeFieldsOnly && !doDecompFields)
|
||||||
|
{
|
||||||
|
FatalErrorIn(args.executable())
|
||||||
|
<< "Options -fields and -no-fields are mutually exclusive"
|
||||||
|
<< " ... giving up" << nl
|
||||||
|
<< exit(FatalError);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!doDecompFields)
|
||||||
|
{
|
||||||
|
Info<< "Skip decompose of all fields" << nl;
|
||||||
|
}
|
||||||
if (!doFiniteArea)
|
if (!doFiniteArea)
|
||||||
{
|
{
|
||||||
Info<< "Skip decompose of finiteArea mesh/fields" << nl;
|
Info<< "Skip decompose of finiteArea mesh/fields" << nl;
|
||||||
@ -448,6 +470,7 @@ int main(int argc, char *argv[])
|
|||||||
times = timeSelector::selectIfPresent(runTime, args);
|
times = timeSelector::selectIfPresent(runTime, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Allow override of decomposeParDict location
|
// Allow override of decomposeParDict location
|
||||||
fileName decompDictFile(args.get<fileName>("decomposeParDict", ""));
|
fileName decompDictFile(args.get<fileName>("decomposeParDict", ""));
|
||||||
if (!decompDictFile.empty() && !decompDictFile.isAbsolute())
|
if (!decompDictFile.empty() && !decompDictFile.isAbsolute())
|
||||||
@ -507,7 +530,8 @@ int main(int argc, char *argv[])
|
|||||||
Info<< nl << endl;
|
Info<< nl << endl;
|
||||||
|
|
||||||
// Determine the existing processor count directly
|
// Determine the existing processor count directly
|
||||||
label nProcs = fileHandler().nProcs(runTime.path(), regionDir);
|
const label nProcsOld =
|
||||||
|
fileHandler().nProcs(runTime.path(), regionDir);
|
||||||
|
|
||||||
// Get requested numberOfSubdomains directly from the dictionary.
|
// Get requested numberOfSubdomains directly from the dictionary.
|
||||||
// Note: have no mesh yet so cannot use decompositionModel::New
|
// Note: have no mesh yet so cannot use decompositionModel::New
|
||||||
@ -538,22 +562,22 @@ int main(int argc, char *argv[])
|
|||||||
if (decomposeFieldsOnly)
|
if (decomposeFieldsOnly)
|
||||||
{
|
{
|
||||||
// Sanity check on previously decomposed case
|
// Sanity check on previously decomposed case
|
||||||
if (nProcs != nDomains)
|
if (nProcsOld != nDomains)
|
||||||
{
|
{
|
||||||
FatalErrorInFunction
|
FatalErrorIn(args.executable())
|
||||||
<< "Specified -fields, but the case was decomposed with "
|
<< "Specified -fields, but the case was decomposed with "
|
||||||
<< nProcs << " domains"
|
<< nProcsOld << " domains"
|
||||||
<< nl
|
<< nl
|
||||||
<< "instead of " << nDomains
|
<< "instead of " << nDomains
|
||||||
<< " domains as specified in decomposeParDict" << nl
|
<< " domains as specified in decomposeParDict" << nl
|
||||||
<< exit(FatalError);
|
<< exit(FatalError);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (nProcs)
|
else if (nProcsOld)
|
||||||
{
|
{
|
||||||
bool procDirsProblem = true;
|
bool procDirsProblem = true;
|
||||||
|
|
||||||
if (decomposeIfRequired && nProcs == nDomains)
|
if (decomposeIfRequired && nProcsOld == nDomains)
|
||||||
{
|
{
|
||||||
// We can reuse the decomposition
|
// We can reuse the decomposition
|
||||||
decomposeFieldsOnly = true;
|
decomposeFieldsOnly = true;
|
||||||
@ -571,7 +595,7 @@ int main(int argc, char *argv[])
|
|||||||
|
|
||||||
if (forceOverwrite)
|
if (forceOverwrite)
|
||||||
{
|
{
|
||||||
Info<< "Removing " << nProcs
|
Info<< "Removing " << nProcsOld
|
||||||
<< " existing processor directories" << endl;
|
<< " existing processor directories" << endl;
|
||||||
|
|
||||||
// Remove existing processors directory
|
// Remove existing processors directory
|
||||||
@ -614,8 +638,8 @@ int main(int argc, char *argv[])
|
|||||||
|
|
||||||
if (procDirsProblem)
|
if (procDirsProblem)
|
||||||
{
|
{
|
||||||
FatalErrorInFunction
|
FatalErrorIn(args.executable())
|
||||||
<< "Case is already decomposed with " << nProcs
|
<< "Case is already decomposed with " << nProcsOld
|
||||||
<< " domains, use the -force option or manually" << nl
|
<< " domains, use the -force option or manually" << nl
|
||||||
<< "remove processor directories before decomposing. e.g.,"
|
<< "remove processor directories before decomposing. e.g.,"
|
||||||
<< nl
|
<< nl
|
||||||
@ -644,7 +668,6 @@ int main(int argc, char *argv[])
|
|||||||
if (!decomposeFieldsOnly)
|
if (!decomposeFieldsOnly)
|
||||||
{
|
{
|
||||||
mesh.decomposeMesh();
|
mesh.decomposeMesh();
|
||||||
|
|
||||||
mesh.writeDecomposition(decomposeSets);
|
mesh.writeDecomposition(decomposeSets);
|
||||||
|
|
||||||
if (writeCellDist)
|
if (writeCellDist)
|
||||||
@ -747,7 +770,7 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Decompose the field files
|
// Decompose field files, lagrangian, finite-area
|
||||||
|
|
||||||
// Cached processor meshes and maps. These are only preserved if
|
// Cached processor meshes and maps. These are only preserved if
|
||||||
// running with multiple times.
|
// running with multiple times.
|
||||||
@ -756,8 +779,9 @@ int main(int argc, char *argv[])
|
|||||||
PtrList<labelIOList> faceProcAddressingList(mesh.nProcs());
|
PtrList<labelIOList> faceProcAddressingList(mesh.nProcs());
|
||||||
PtrList<labelIOList> cellProcAddressingList(mesh.nProcs());
|
PtrList<labelIOList> cellProcAddressingList(mesh.nProcs());
|
||||||
PtrList<labelIOList> boundaryProcAddressingList(mesh.nProcs());
|
PtrList<labelIOList> boundaryProcAddressingList(mesh.nProcs());
|
||||||
PtrList<fvFieldDecomposer> fieldDecomposerList(mesh.nProcs());
|
|
||||||
PtrList<labelIOList> pointProcAddressingList(mesh.nProcs());
|
PtrList<labelIOList> pointProcAddressingList(mesh.nProcs());
|
||||||
|
|
||||||
|
PtrList<fvFieldDecomposer> fieldDecomposerList(mesh.nProcs());
|
||||||
PtrList<pointFieldDecomposer> pointFieldDecomposerList
|
PtrList<pointFieldDecomposer> pointFieldDecomposerList
|
||||||
(
|
(
|
||||||
mesh.nProcs()
|
mesh.nProcs()
|
||||||
@ -765,83 +789,139 @@ int main(int argc, char *argv[])
|
|||||||
|
|
||||||
|
|
||||||
// Loop over all times
|
// Loop over all times
|
||||||
forAll(times, timeI)
|
forAll(times, timei)
|
||||||
{
|
{
|
||||||
runTime.setTime(times[timeI], timeI);
|
runTime.setTime(times[timei], timei);
|
||||||
|
|
||||||
Info<< "Time = " << runTime.timeName() << endl;
|
Info<< "Time = " << runTime.timeName() << endl;
|
||||||
|
|
||||||
// Search for list of objects for this time
|
// Field objects at this time
|
||||||
IOobjectList objects(mesh, runTime.timeName());
|
IOobjectList objects;
|
||||||
|
|
||||||
|
if (doDecompFields)
|
||||||
|
{
|
||||||
|
objects = IOobjectList(mesh, runTime.timeName());
|
||||||
|
|
||||||
|
// Ignore generated fields: (cellDist)
|
||||||
|
objects.remove("cellDist");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finite area handling
|
||||||
|
autoPtr<faMeshDecomposition> faMeshDecompPtr;
|
||||||
|
if (doFiniteArea)
|
||||||
|
{
|
||||||
|
IOobject io
|
||||||
|
(
|
||||||
|
"faBoundary",
|
||||||
|
mesh.time().findInstance
|
||||||
|
(
|
||||||
|
mesh.dbDir()/polyMesh::meshSubDir,
|
||||||
|
"boundary"
|
||||||
|
),
|
||||||
|
faMesh::meshSubDir,
|
||||||
|
mesh,
|
||||||
|
IOobject::READ_IF_PRESENT,
|
||||||
|
IOobject::NO_WRITE,
|
||||||
|
false // not registered
|
||||||
|
);
|
||||||
|
|
||||||
|
if (io.typeHeaderOk<faBoundaryMesh>(true))
|
||||||
|
{
|
||||||
|
// Always based on the volume decomposition!
|
||||||
|
faMeshDecompPtr.reset
|
||||||
|
(
|
||||||
|
new faMeshDecomposition
|
||||||
|
(
|
||||||
|
mesh,
|
||||||
|
mesh.nProcs(),
|
||||||
|
mesh.model()
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Construct the vol fields
|
// Vol fields
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~
|
// ~~~~~~~~~~
|
||||||
PtrList<volScalarField> volScalarFields;
|
PtrList<volScalarField> volScalarFields;
|
||||||
readFields(mesh, objects, volScalarFields, false);
|
|
||||||
PtrList<volVectorField> volVectorFields;
|
PtrList<volVectorField> volVectorFields;
|
||||||
readFields(mesh, objects, volVectorFields, false);
|
PtrList<volSphericalTensorField> volSphTensorFields;
|
||||||
PtrList<volSphericalTensorField> volSphericalTensorFields;
|
|
||||||
readFields(mesh, objects, volSphericalTensorFields, false);
|
|
||||||
PtrList<volSymmTensorField> volSymmTensorFields;
|
PtrList<volSymmTensorField> volSymmTensorFields;
|
||||||
readFields(mesh, objects, volSymmTensorFields, false);
|
|
||||||
PtrList<volTensorField> volTensorFields;
|
PtrList<volTensorField> volTensorFields;
|
||||||
|
|
||||||
|
if (doDecompFields)
|
||||||
|
{
|
||||||
|
readFields(mesh, objects, volScalarFields, false);
|
||||||
|
readFields(mesh, objects, volVectorFields, false);
|
||||||
|
readFields(mesh, objects, volSphTensorFields, false);
|
||||||
|
readFields(mesh, objects, volSymmTensorFields, false);
|
||||||
readFields(mesh, objects, volTensorFields, false);
|
readFields(mesh, objects, volTensorFields, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal fields
|
||||||
// Construct the dimensioned fields
|
// ~~~~~~~~~~~~~~~
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
PtrList<DimensionedField<scalar, volMesh>> dimScalarFields;
|
PtrList<DimensionedField<scalar, volMesh>> dimScalarFields;
|
||||||
readFields(mesh, objects, dimScalarFields);
|
|
||||||
PtrList<DimensionedField<vector, volMesh>> dimVectorFields;
|
PtrList<DimensionedField<vector, volMesh>> dimVectorFields;
|
||||||
readFields(mesh, objects, dimVectorFields);
|
|
||||||
PtrList<DimensionedField<sphericalTensor, volMesh>>
|
PtrList<DimensionedField<sphericalTensor, volMesh>>
|
||||||
dimSphericalTensorFields;
|
dimSphTensorFields;
|
||||||
readFields(mesh, objects, dimSphericalTensorFields);
|
|
||||||
PtrList<DimensionedField<symmTensor, volMesh>>
|
PtrList<DimensionedField<symmTensor, volMesh>>
|
||||||
dimSymmTensorFields;
|
dimSymmTensorFields;
|
||||||
readFields(mesh, objects, dimSymmTensorFields);
|
|
||||||
PtrList<DimensionedField<tensor, volMesh>> dimTensorFields;
|
PtrList<DimensionedField<tensor, volMesh>> dimTensorFields;
|
||||||
|
|
||||||
|
if (doDecompFields)
|
||||||
|
{
|
||||||
|
readFields(mesh, objects, dimScalarFields);
|
||||||
|
readFields(mesh, objects, dimVectorFields);
|
||||||
|
readFields(mesh, objects, dimSphTensorFields);
|
||||||
|
readFields(mesh, objects, dimSymmTensorFields);
|
||||||
readFields(mesh, objects, dimTensorFields);
|
readFields(mesh, objects, dimTensorFields);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Surface fields
|
||||||
// Construct the surface fields
|
// ~~~~~~~~~~~~~~
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
PtrList<surfaceScalarField> surfaceScalarFields;
|
PtrList<surfaceScalarField> surfaceScalarFields;
|
||||||
readFields(mesh, objects, surfaceScalarFields, false);
|
|
||||||
PtrList<surfaceVectorField> surfaceVectorFields;
|
PtrList<surfaceVectorField> surfaceVectorFields;
|
||||||
readFields(mesh, objects, surfaceVectorFields, false);
|
|
||||||
PtrList<surfaceSphericalTensorField>
|
PtrList<surfaceSphericalTensorField>
|
||||||
surfaceSphericalTensorFields;
|
surfaceSphTensorFields;
|
||||||
readFields(mesh, objects, surfaceSphericalTensorFields, false);
|
|
||||||
PtrList<surfaceSymmTensorField> surfaceSymmTensorFields;
|
PtrList<surfaceSymmTensorField> surfaceSymmTensorFields;
|
||||||
readFields(mesh, objects, surfaceSymmTensorFields, false);
|
|
||||||
PtrList<surfaceTensorField> surfaceTensorFields;
|
PtrList<surfaceTensorField> surfaceTensorFields;
|
||||||
|
|
||||||
|
if (doDecompFields)
|
||||||
|
{
|
||||||
|
readFields(mesh, objects, surfaceScalarFields, false);
|
||||||
|
readFields(mesh, objects, surfaceVectorFields, false);
|
||||||
|
readFields(mesh, objects, surfaceSphTensorFields, false);
|
||||||
|
readFields(mesh, objects, surfaceSymmTensorFields, false);
|
||||||
readFields(mesh, objects, surfaceTensorFields, false);
|
readFields(mesh, objects, surfaceTensorFields, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Construct the point fields
|
// Point fields
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
|
// ~~~~~~~~~~~~
|
||||||
const pointMesh& pMesh = pointMesh::New(mesh);
|
const pointMesh& pMesh = pointMesh::New(mesh);
|
||||||
|
|
||||||
PtrList<pointScalarField> pointScalarFields;
|
PtrList<pointScalarField> pointScalarFields;
|
||||||
readFields(pMesh, objects, pointScalarFields, false);
|
|
||||||
PtrList<pointVectorField> pointVectorFields;
|
PtrList<pointVectorField> pointVectorFields;
|
||||||
readFields(pMesh, objects, pointVectorFields, false);
|
PtrList<pointSphericalTensorField> pointSphTensorFields;
|
||||||
PtrList<pointSphericalTensorField> pointSphericalTensorFields;
|
|
||||||
readFields(pMesh, objects, pointSphericalTensorFields, false);
|
|
||||||
PtrList<pointSymmTensorField> pointSymmTensorFields;
|
PtrList<pointSymmTensorField> pointSymmTensorFields;
|
||||||
readFields(pMesh, objects, pointSymmTensorFields, false);
|
|
||||||
PtrList<pointTensorField> pointTensorFields;
|
PtrList<pointTensorField> pointTensorFields;
|
||||||
|
|
||||||
|
if (doDecompFields)
|
||||||
|
{
|
||||||
|
readFields(pMesh, objects, pointScalarFields, false);
|
||||||
|
readFields(pMesh, objects, pointVectorFields, false);
|
||||||
|
readFields(pMesh, objects, pointSphTensorFields, false);
|
||||||
|
readFields(pMesh, objects, pointSymmTensorFields, false);
|
||||||
readFields(pMesh, objects, pointTensorFields, false);
|
readFields(pMesh, objects, pointTensorFields, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Construct the Lagrangian fields
|
// Lagrangian fields
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
// ~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
fileNameList cloudDirs;
|
fileNameList cloudDirs;
|
||||||
|
|
||||||
if (doLagrangian)
|
if (doDecompFields && doLagrangian)
|
||||||
{
|
{
|
||||||
cloudDirs = fileHandler().readDir
|
cloudDirs = fileHandler().readDir
|
||||||
(
|
(
|
||||||
@ -889,13 +969,17 @@ int main(int argc, char *argv[])
|
|||||||
cloudDirs.size()
|
cloudDirs.size()
|
||||||
);
|
);
|
||||||
PtrList<PtrList<sphericalTensorIOField>>
|
PtrList<PtrList<sphericalTensorIOField>>
|
||||||
lagrangianSphericalTensorFields
|
lagrangianSphTensorFields
|
||||||
(
|
(
|
||||||
cloudDirs.size()
|
cloudDirs.size()
|
||||||
);
|
);
|
||||||
PtrList<PtrList<sphericalTensorFieldCompactIOField>>
|
PtrList<PtrList<sphericalTensorFieldCompactIOField>>
|
||||||
lagrangianSphericalTensorFieldFields(cloudDirs.size());
|
lagrangianSphTensorFieldFields
|
||||||
PtrList<PtrList<symmTensorIOField>> lagrangianSymmTensorFields
|
(
|
||||||
|
cloudDirs.size()
|
||||||
|
);
|
||||||
|
PtrList<PtrList<symmTensorIOField>>
|
||||||
|
lagrangianSymmTensorFields
|
||||||
(
|
(
|
||||||
cloudDirs.size()
|
cloudDirs.size()
|
||||||
);
|
);
|
||||||
@ -914,6 +998,7 @@ int main(int argc, char *argv[])
|
|||||||
cloudDirs.size()
|
cloudDirs.size()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
label cloudI = 0;
|
label cloudI = 0;
|
||||||
|
|
||||||
for (const fileName& cloudDir : cloudDirs)
|
for (const fileName& cloudDir : cloudDirs)
|
||||||
@ -977,7 +1062,7 @@ int main(int argc, char *argv[])
|
|||||||
// Check
|
// Check
|
||||||
if (celli < 0 || celli >= mesh.nCells())
|
if (celli < 0 || celli >= mesh.nCells())
|
||||||
{
|
{
|
||||||
FatalErrorInFunction
|
FatalErrorIn(args.executable())
|
||||||
<< "Illegal cell number " << celli
|
<< "Illegal cell number " << celli
|
||||||
<< " for particle with index "
|
<< " for particle with index "
|
||||||
<< p.index()
|
<< p.index()
|
||||||
@ -1059,14 +1144,14 @@ int main(int argc, char *argv[])
|
|||||||
(
|
(
|
||||||
cloudI,
|
cloudI,
|
||||||
lagrangianObjects,
|
lagrangianObjects,
|
||||||
lagrangianSphericalTensorFields
|
lagrangianSphTensorFields
|
||||||
);
|
);
|
||||||
|
|
||||||
lagrangianFieldDecomposer::readFieldFields
|
lagrangianFieldDecomposer::readFieldFields
|
||||||
(
|
(
|
||||||
cloudI,
|
cloudI,
|
||||||
lagrangianObjects,
|
lagrangianObjects,
|
||||||
lagrangianSphericalTensorFieldFields
|
lagrangianSphTensorFieldFields
|
||||||
);
|
);
|
||||||
|
|
||||||
lagrangianFieldDecomposer::readFields
|
lagrangianFieldDecomposer::readFields
|
||||||
@ -1097,29 +1182,34 @@ int main(int argc, char *argv[])
|
|||||||
lagrangianTensorFieldFields
|
lagrangianTensorFieldFields
|
||||||
);
|
);
|
||||||
|
|
||||||
cloudI++;
|
++cloudI;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lagrangianPositions.setSize(cloudI);
|
lagrangianPositions.resize(cloudI);
|
||||||
cellParticles.setSize(cloudI);
|
cellParticles.resize(cloudI);
|
||||||
lagrangianLabelFields.setSize(cloudI);
|
lagrangianLabelFields.resize(cloudI);
|
||||||
lagrangianLabelFieldFields.setSize(cloudI);
|
lagrangianLabelFieldFields.resize(cloudI);
|
||||||
lagrangianScalarFields.setSize(cloudI);
|
lagrangianScalarFields.resize(cloudI);
|
||||||
lagrangianScalarFieldFields.setSize(cloudI);
|
lagrangianScalarFieldFields.resize(cloudI);
|
||||||
lagrangianVectorFields.setSize(cloudI);
|
lagrangianVectorFields.resize(cloudI);
|
||||||
lagrangianVectorFieldFields.setSize(cloudI);
|
lagrangianVectorFieldFields.resize(cloudI);
|
||||||
lagrangianSphericalTensorFields.setSize(cloudI);
|
lagrangianSphTensorFields.resize(cloudI);
|
||||||
lagrangianSphericalTensorFieldFields.setSize(cloudI);
|
lagrangianSphTensorFieldFields.resize(cloudI);
|
||||||
lagrangianSymmTensorFields.setSize(cloudI);
|
lagrangianSymmTensorFields.resize(cloudI);
|
||||||
lagrangianSymmTensorFieldFields.setSize(cloudI);
|
lagrangianSymmTensorFieldFields.resize(cloudI);
|
||||||
lagrangianTensorFields.setSize(cloudI);
|
lagrangianTensorFields.resize(cloudI);
|
||||||
lagrangianTensorFieldFields.setSize(cloudI);
|
lagrangianTensorFieldFields.resize(cloudI);
|
||||||
|
|
||||||
Info<< endl;
|
Info<< endl;
|
||||||
|
|
||||||
// split the fields over processors
|
// split the fields over processors
|
||||||
for (label proci = 0; proci < mesh.nProcs(); ++proci)
|
for
|
||||||
|
(
|
||||||
|
label proci = 0;
|
||||||
|
doDecompFields && proci < mesh.nProcs();
|
||||||
|
++proci
|
||||||
|
)
|
||||||
{
|
{
|
||||||
Info<< "Processor " << proci << ": field transfer" << endl;
|
Info<< "Processor " << proci << ": field transfer" << endl;
|
||||||
|
|
||||||
@ -1207,22 +1297,19 @@ int main(int argc, char *argv[])
|
|||||||
const fvFieldDecomposer& fieldDecomposer =
|
const fvFieldDecomposer& fieldDecomposer =
|
||||||
fieldDecomposerList[proci];
|
fieldDecomposerList[proci];
|
||||||
|
|
||||||
// vol fields
|
// Vol fields
|
||||||
fieldDecomposer.decomposeFields(volScalarFields);
|
fieldDecomposer.decomposeFields(volScalarFields);
|
||||||
fieldDecomposer.decomposeFields(volVectorFields);
|
fieldDecomposer.decomposeFields(volVectorFields);
|
||||||
fieldDecomposer.decomposeFields
|
fieldDecomposer.decomposeFields(volSphTensorFields);
|
||||||
(
|
|
||||||
volSphericalTensorFields
|
|
||||||
);
|
|
||||||
fieldDecomposer.decomposeFields(volSymmTensorFields);
|
fieldDecomposer.decomposeFields(volSymmTensorFields);
|
||||||
fieldDecomposer.decomposeFields(volTensorFields);
|
fieldDecomposer.decomposeFields(volTensorFields);
|
||||||
|
|
||||||
// surface fields
|
// Surface fields
|
||||||
fieldDecomposer.decomposeFields(surfaceScalarFields);
|
fieldDecomposer.decomposeFields(surfaceScalarFields);
|
||||||
fieldDecomposer.decomposeFields(surfaceVectorFields);
|
fieldDecomposer.decomposeFields(surfaceVectorFields);
|
||||||
fieldDecomposer.decomposeFields
|
fieldDecomposer.decomposeFields
|
||||||
(
|
(
|
||||||
surfaceSphericalTensorFields
|
surfaceSphTensorFields
|
||||||
);
|
);
|
||||||
fieldDecomposer.decomposeFields
|
fieldDecomposer.decomposeFields
|
||||||
(
|
(
|
||||||
@ -1233,7 +1320,7 @@ int main(int argc, char *argv[])
|
|||||||
// internal fields
|
// internal fields
|
||||||
fieldDecomposer.decomposeFields(dimScalarFields);
|
fieldDecomposer.decomposeFields(dimScalarFields);
|
||||||
fieldDecomposer.decomposeFields(dimVectorFields);
|
fieldDecomposer.decomposeFields(dimVectorFields);
|
||||||
fieldDecomposer.decomposeFields(dimSphericalTensorFields);
|
fieldDecomposer.decomposeFields(dimSphTensorFields);
|
||||||
fieldDecomposer.decomposeFields(dimSymmTensorFields);
|
fieldDecomposer.decomposeFields(dimSymmTensorFields);
|
||||||
fieldDecomposer.decomposeFields(dimTensorFields);
|
fieldDecomposer.decomposeFields(dimTensorFields);
|
||||||
|
|
||||||
@ -1250,7 +1337,7 @@ int main(int argc, char *argv[])
|
|||||||
(
|
(
|
||||||
pointScalarFields.size()
|
pointScalarFields.size()
|
||||||
|| pointVectorFields.size()
|
|| pointVectorFields.size()
|
||||||
|| pointSphericalTensorFields.size()
|
|| pointSphTensorFields.size()
|
||||||
|| pointSymmTensorFields.size()
|
|| pointSymmTensorFields.size()
|
||||||
|| pointTensorFields.size()
|
|| pointTensorFields.size()
|
||||||
)
|
)
|
||||||
@ -1284,10 +1371,7 @@ int main(int argc, char *argv[])
|
|||||||
|
|
||||||
pointDecomposer.decomposeFields(pointScalarFields);
|
pointDecomposer.decomposeFields(pointScalarFields);
|
||||||
pointDecomposer.decomposeFields(pointVectorFields);
|
pointDecomposer.decomposeFields(pointVectorFields);
|
||||||
pointDecomposer.decomposeFields
|
pointDecomposer.decomposeFields(pointSphTensorFields);
|
||||||
(
|
|
||||||
pointSphericalTensorFields
|
|
||||||
);
|
|
||||||
pointDecomposer.decomposeFields(pointSymmTensorFields);
|
pointDecomposer.decomposeFields(pointSymmTensorFields);
|
||||||
pointDecomposer.decomposeFields(pointTensorFields);
|
pointDecomposer.decomposeFields(pointTensorFields);
|
||||||
|
|
||||||
@ -1351,12 +1435,12 @@ int main(int argc, char *argv[])
|
|||||||
fieldDecomposer.decomposeFields
|
fieldDecomposer.decomposeFields
|
||||||
(
|
(
|
||||||
cloudDirs[cloudI],
|
cloudDirs[cloudI],
|
||||||
lagrangianSphericalTensorFields[cloudI]
|
lagrangianSphTensorFields[cloudI]
|
||||||
);
|
);
|
||||||
fieldDecomposer.decomposeFieldFields
|
fieldDecomposer.decomposeFieldFields
|
||||||
(
|
(
|
||||||
cloudDirs[cloudI],
|
cloudDirs[cloudI],
|
||||||
lagrangianSphericalTensorFieldFields[cloudI]
|
lagrangianSphTensorFieldFields[cloudI]
|
||||||
);
|
);
|
||||||
fieldDecomposer.decomposeFields
|
fieldDecomposer.decomposeFields
|
||||||
(
|
(
|
||||||
@ -1382,16 +1466,23 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decompose the "uniform" directory in the time region
|
if (doDecompFields)
|
||||||
|
{
|
||||||
|
// Decompose "uniform" directory in the time region
|
||||||
// directory
|
// directory
|
||||||
decomposeUniform(copyUniform, mesh, processorDb, regionDir);
|
decomposeUniform
|
||||||
|
(
|
||||||
|
copyUniform, mesh, processorDb, regionDir
|
||||||
|
);
|
||||||
|
|
||||||
// For a multi-region case, also decompose the "uniform"
|
// For a multi-region case, also decompose "uniform"
|
||||||
// directory in the time directory
|
// directory in the time directory
|
||||||
if (regionNames.size() > 1 && regioni == 0)
|
if (regionNames.size() > 1 && regioni == 0)
|
||||||
{
|
{
|
||||||
decomposeUniform(copyUniform, mesh, processorDb);
|
decomposeUniform(copyUniform, mesh, processorDb);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// We have cached all the constant mesh data for the current
|
// We have cached all the constant mesh data for the current
|
||||||
// processor. This is only important if running with
|
// processor. This is only important if running with
|
||||||
@ -1406,72 +1497,46 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Finite area mesh and field decomposition
|
// Finite area mesh and field decomposition
|
||||||
|
if (faMeshDecompPtr)
|
||||||
IOobject faMeshBoundaryIOobj
|
|
||||||
(
|
|
||||||
"faBoundary",
|
|
||||||
mesh.time().findInstance
|
|
||||||
(
|
|
||||||
mesh.dbDir()/polyMesh::meshSubDir,
|
|
||||||
"boundary"
|
|
||||||
),
|
|
||||||
faMesh::meshSubDir,
|
|
||||||
mesh,
|
|
||||||
IOobject::READ_IF_PRESENT,
|
|
||||||
IOobject::NO_WRITE,
|
|
||||||
false // not registered
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
if
|
|
||||||
(
|
|
||||||
doFiniteArea
|
|
||||||
&& faMeshBoundaryIOobj.typeHeaderOk<faBoundaryMesh>(true)
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
Info<< "\nFinite area mesh decomposition" << endl;
|
Info<< "\nFinite area mesh decomposition" << endl;
|
||||||
|
|
||||||
// Always based on the volume decomposition!
|
faMeshDecomposition& aMesh = faMeshDecompPtr();
|
||||||
faMeshDecomposition aMesh
|
|
||||||
(
|
|
||||||
mesh,
|
|
||||||
mesh.nProcs(),
|
|
||||||
mesh.model()
|
|
||||||
);
|
|
||||||
|
|
||||||
aMesh.decomposeMesh();
|
aMesh.decomposeMesh();
|
||||||
aMesh.writeDecomposition();
|
aMesh.writeDecomposition();
|
||||||
|
|
||||||
|
|
||||||
// Construct the area fields
|
// Area fields
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~
|
// ~~~~~~~~~~~
|
||||||
PtrList<areaScalarField> areaScalarFields;
|
PtrList<areaScalarField> areaScalarFields;
|
||||||
readFields(aMesh, objects, areaScalarFields);
|
|
||||||
|
|
||||||
PtrList<areaVectorField> areaVectorFields;
|
PtrList<areaVectorField> areaVectorFields;
|
||||||
readFields(aMesh, objects, areaVectorFields);
|
PtrList<areaSphericalTensorField> areaSphTensorFields;
|
||||||
|
|
||||||
PtrList<areaSphericalTensorField> areaSphericalTensorFields;
|
|
||||||
readFields(aMesh, objects, areaSphericalTensorFields);
|
|
||||||
|
|
||||||
PtrList<areaSymmTensorField> areaSymmTensorFields;
|
PtrList<areaSymmTensorField> areaSymmTensorFields;
|
||||||
readFields(aMesh, objects, areaSymmTensorFields);
|
|
||||||
|
|
||||||
PtrList<areaTensorField> areaTensorFields;
|
PtrList<areaTensorField> areaTensorFields;
|
||||||
|
|
||||||
|
// Edge fields (limited number of types)
|
||||||
|
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
PtrList<edgeScalarField> edgeScalarFields;
|
||||||
|
|
||||||
|
if (doDecompFields)
|
||||||
|
{
|
||||||
|
readFields(aMesh, objects, areaScalarFields);
|
||||||
|
readFields(aMesh, objects, areaVectorFields);
|
||||||
|
readFields(aMesh, objects, areaSphTensorFields);
|
||||||
|
readFields(aMesh, objects, areaSymmTensorFields);
|
||||||
readFields(aMesh, objects, areaTensorFields);
|
readFields(aMesh, objects, areaTensorFields);
|
||||||
|
|
||||||
|
|
||||||
// Construct the edge fields
|
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
PtrList<edgeScalarField> edgeScalarFields;
|
|
||||||
readFields(aMesh, objects, edgeScalarFields);
|
readFields(aMesh, objects, edgeScalarFields);
|
||||||
|
}
|
||||||
|
|
||||||
const label nAreaFields =
|
const label nAreaFields =
|
||||||
(
|
(
|
||||||
areaScalarFields.size()
|
areaScalarFields.size()
|
||||||
+ areaVectorFields.size()
|
+ areaVectorFields.size()
|
||||||
+ areaSphericalTensorFields.size()
|
+ areaSphTensorFields.size()
|
||||||
+ areaSymmTensorFields.size()
|
+ areaSymmTensorFields.size()
|
||||||
+ areaTensorFields.size()
|
+ areaTensorFields.size()
|
||||||
+ edgeScalarFields.size()
|
+ edgeScalarFields.size()
|
||||||
@ -1574,10 +1639,7 @@ int main(int argc, char *argv[])
|
|||||||
|
|
||||||
fieldDecomposer.decomposeFields(areaScalarFields);
|
fieldDecomposer.decomposeFields(areaScalarFields);
|
||||||
fieldDecomposer.decomposeFields(areaVectorFields);
|
fieldDecomposer.decomposeFields(areaVectorFields);
|
||||||
fieldDecomposer.decomposeFields
|
fieldDecomposer.decomposeFields(areaSphTensorFields);
|
||||||
(
|
|
||||||
areaSphericalTensorFields
|
|
||||||
);
|
|
||||||
fieldDecomposer.decomposeFields(areaSymmTensorFields);
|
fieldDecomposer.decomposeFields(areaSymmTensorFields);
|
||||||
fieldDecomposer.decomposeFields(areaTensorFields);
|
fieldDecomposer.decomposeFields(areaTensorFields);
|
||||||
|
|
||||||
|
|||||||
@ -52,7 +52,7 @@ void Foam::domainDecomposition::writeVolField
|
|||||||
false
|
false
|
||||||
),
|
),
|
||||||
this->mesh(),
|
this->mesh(),
|
||||||
dimensionedScalar(dimless, Zero),
|
dimensionedScalar("cellDist", dimless, -1),
|
||||||
zeroGradientFvPatchScalarField::typeName
|
zeroGradientFvPatchScalarField::typeName
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@ -44,13 +44,6 @@ void Foam::readFields
|
|||||||
// Search list of objects for fields of type GeoField
|
// Search list of objects for fields of type GeoField
|
||||||
IOobjectList fieldObjects(objects.lookupClass<GeoField>());
|
IOobjectList fieldObjects(objects.lookupClass<GeoField>());
|
||||||
|
|
||||||
// Remove the cellDist field
|
|
||||||
auto iter = fieldObjects.find("cellDist");
|
|
||||||
if (iter.found())
|
|
||||||
{
|
|
||||||
fieldObjects.erase(iter);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use sorted set of names
|
// Use sorted set of names
|
||||||
// (different processors might read objects in different order)
|
// (different processors might read objects in different order)
|
||||||
const wordList masterNames(fieldObjects.sortedNames());
|
const wordList masterNames(fieldObjects.sortedNames());
|
||||||
|
|||||||
@ -46,13 +46,6 @@ void Foam::faFieldDecomposer::readFields
|
|||||||
// Search list of objects for fields of type GeoField
|
// Search list of objects for fields of type GeoField
|
||||||
IOobjectList fieldObjects(objects.lookupClass<GeoField>());
|
IOobjectList fieldObjects(objects.lookupClass<GeoField>());
|
||||||
|
|
||||||
/// // Remove the cellDist field
|
|
||||||
/// auto iter = fieldObjects.find("cellDist");
|
|
||||||
/// if (iter.found())
|
|
||||||
/// {
|
|
||||||
/// fieldObjects.erase(iter);
|
|
||||||
/// }
|
|
||||||
|
|
||||||
// Use sorted set of names
|
// Use sorted set of names
|
||||||
// (different processors might read objects in different order)
|
// (different processors might read objects in different order)
|
||||||
const wordList masterNames(fieldObjects.sortedNames());
|
const wordList masterNames(fieldObjects.sortedNames());
|
||||||
|
|||||||
@ -155,19 +155,19 @@ public:
|
|||||||
// Settings
|
// Settings
|
||||||
|
|
||||||
//- Number of processor in decomposition
|
//- Number of processor in decomposition
|
||||||
label nProcs() const
|
label nProcs() const noexcept
|
||||||
{
|
{
|
||||||
return nProcs_;
|
return nProcs_;
|
||||||
}
|
}
|
||||||
|
|
||||||
//- Is decomposition data to be distributed for each processor
|
//- Is decomposition data to be distributed for each processor
|
||||||
bool distributed() const
|
bool distributed() const noexcept
|
||||||
{
|
{
|
||||||
return distributed_;
|
return distributed_;
|
||||||
}
|
}
|
||||||
|
|
||||||
//- Change distributed flag
|
//- Change distributed flag
|
||||||
bool distributed(const bool on)
|
bool distributed(const bool on) noexcept
|
||||||
{
|
{
|
||||||
bool old(distributed_);
|
bool old(distributed_);
|
||||||
distributed_ = on;
|
distributed_ = on;
|
||||||
@ -175,13 +175,13 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
//- Are global face zones used
|
//- Are global face zones used
|
||||||
bool useGlobalFaceZones() const
|
bool useGlobalFaceZones() const noexcept
|
||||||
{
|
{
|
||||||
return distributed_;
|
return distributed_;
|
||||||
}
|
}
|
||||||
|
|
||||||
//- Change global face zones flag
|
//- Change global face zones flag
|
||||||
bool useGlobalFaceZones(const bool on)
|
bool useGlobalFaceZones(const bool on) noexcept
|
||||||
{
|
{
|
||||||
bool old(hasGlobalFaceZones_);
|
bool old(hasGlobalFaceZones_);
|
||||||
hasGlobalFaceZones_ = on;
|
hasGlobalFaceZones_ = on;
|
||||||
|
|||||||
Reference in New Issue
Block a user