ENH: code modernization for decompose/reconstruct

- simplify procAddressing read/write

- avoid accessing points in faMeshReconstructor.
  Can rely on the patch meshPoints (labelList), which does not need
  access to a pointField

- report number of points on decomposed mesh.
  Can be useful additional information.
  Additional statistics for finite area decomposition

- provide bundled reconstructAllFields for various reconstructors

- remove reconstructPar checks for very old face addressing
  (from foam2.0 - ie, older than OpenFOAM itself)

- bundle all reading into fieldsDistributor tools,
  where it can be reused by various utilities as required.

- combine decomposition fields as respective fieldsCache
  which eliminates most of the clutter from decomposePar
  and similfies reuse in the future.

STYLE: remove old wordHashSet selection (deprecated in 2018)

BUG: incorrect face flip handling for faMeshReconstructor

- a latent bug which is not yet triggered since the faMesh faces are
  currently only definable on boundary faces (which never flip)
This commit is contained in:
Mark Olesen
2022-04-24 15:06:40 +02:00
committed by Andrew Heather
parent eccc998ed2
commit 3b6761afed
51 changed files with 3270 additions and 2443 deletions

View File

@ -8,6 +8,4 @@ domainDecompositionWrite.C
domainDecompositionDryRun.C
domainDecompositionDryRunWrite.C
lagrangianFieldDecomposer.C
EXE = $(FOAM_APPBIN)/decomposePar

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2017 OpenFOAM Foundation
Copyright (C) 2016-2021 OpenCFD Ltd.
Copyright (C) 2016-2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -150,29 +150,19 @@ Usage
#include "domainDecomposition.H"
#include "domainDecompositionDryRun.H"
#include "labelIOField.H"
#include "labelFieldIOField.H"
#include "scalarIOField.H"
#include "scalarFieldIOField.H"
#include "vectorIOField.H"
#include "vectorFieldIOField.H"
#include "sphericalTensorIOField.H"
#include "sphericalTensorFieldIOField.H"
#include "symmTensorIOField.H"
#include "symmTensorFieldIOField.H"
#include "tensorIOField.H"
#include "tensorFieldIOField.H"
#include "pointFields.H"
#include "regionProperties.H"
#include "readFields.H"
#include "fieldsDistributor.H"
#include "fvFieldDecomposer.H"
#include "pointFields.H"
#include "pointFieldDecomposer.H"
#include "lagrangianFieldDecomposer.H"
#include "emptyFaPatch.H"
#include "faMeshDecomposition.H"
#include "faFieldDecomposer.H"
#include "faMeshDecomposition.H"
// * * * * * * * * * * * * * * * Local Functions * * * * * * * * * * * * * * //
@ -838,58 +828,14 @@ int main(int argc, char *argv[])
}
// Vol fields
// ~~~~~~~~~~
PtrList<volScalarField> volScalarFields;
PtrList<volVectorField> volVectorFields;
PtrList<volSphericalTensorField> volSphTensorFields;
PtrList<volSymmTensorField> volSymmTensorFields;
PtrList<volTensorField> volTensorFields;
// Volume/surface/internal fields
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fvFieldDecomposer::fieldsCache volumeFieldCache;
if (doDecompFields)
{
readFields(mesh, objects, volScalarFields, false);
readFields(mesh, objects, volVectorFields, false);
readFields(mesh, objects, volSphTensorFields, false);
readFields(mesh, objects, volSymmTensorFields, false);
readFields(mesh, objects, volTensorFields, false);
}
// Internal fields
// ~~~~~~~~~~~~~~~
PtrList<DimensionedField<scalar, volMesh>> dimScalarFields;
PtrList<DimensionedField<vector, volMesh>> dimVectorFields;
PtrList<DimensionedField<sphericalTensor, volMesh>>
dimSphTensorFields;
PtrList<DimensionedField<symmTensor, volMesh>>
dimSymmTensorFields;
PtrList<DimensionedField<tensor, volMesh>> dimTensorFields;
if (doDecompFields)
{
readFields(mesh, objects, dimScalarFields);
readFields(mesh, objects, dimVectorFields);
readFields(mesh, objects, dimSphTensorFields);
readFields(mesh, objects, dimSymmTensorFields);
readFields(mesh, objects, dimTensorFields);
}
// Surface fields
// ~~~~~~~~~~~~~~
PtrList<surfaceScalarField> surfaceScalarFields;
PtrList<surfaceVectorField> surfaceVectorFields;
PtrList<surfaceSphericalTensorField>
surfaceSphTensorFields;
PtrList<surfaceSymmTensorField> surfaceSymmTensorFields;
PtrList<surfaceTensorField> surfaceTensorFields;
if (doDecompFields)
{
readFields(mesh, objects, surfaceScalarFields, false);
readFields(mesh, objects, surfaceVectorFields, false);
readFields(mesh, objects, surfaceSphTensorFields, false);
readFields(mesh, objects, surfaceSymmTensorFields, false);
readFields(mesh, objects, surfaceTensorFields, false);
volumeFieldCache.readAllFields(mesh, objects);
}
@ -897,19 +843,11 @@ int main(int argc, char *argv[])
// ~~~~~~~~~~~~
const pointMesh& pMesh = pointMesh::New(mesh);
PtrList<pointScalarField> pointScalarFields;
PtrList<pointVectorField> pointVectorFields;
PtrList<pointSphericalTensorField> pointSphTensorFields;
PtrList<pointSymmTensorField> pointSymmTensorFields;
PtrList<pointTensorField> pointTensorFields;
pointFieldDecomposer::fieldsCache pointFieldCache;
if (doDecompFields)
{
readFields(pMesh, objects, pointScalarFields, false);
readFields(pMesh, objects, pointVectorFields, false);
readFields(pMesh, objects, pointSphTensorFields, false);
readFields(pMesh, objects, pointSymmTensorFields, false);
readFields(pMesh, objects, pointTensorFields, false);
pointFieldCache.readAllFields(pMesh, objects);
}
@ -938,63 +876,10 @@ int main(int argc, char *argv[])
cloudDirs.size()
);
PtrList<PtrList<labelIOField>> lagrangianLabelFields
lagrangianFieldDecomposer::fieldsCache lagrangianFieldCache
(
cloudDirs.size()
);
PtrList<PtrList<labelFieldCompactIOField>>
lagrangianLabelFieldFields
(
cloudDirs.size()
);
PtrList<PtrList<scalarIOField>> lagrangianScalarFields
(
cloudDirs.size()
);
PtrList<PtrList<scalarFieldCompactIOField>>
lagrangianScalarFieldFields
(
cloudDirs.size()
);
PtrList<PtrList<vectorIOField>> lagrangianVectorFields
(
cloudDirs.size()
);
PtrList<PtrList<vectorFieldCompactIOField>>
lagrangianVectorFieldFields
(
cloudDirs.size()
);
PtrList<PtrList<sphericalTensorIOField>>
lagrangianSphTensorFields
(
cloudDirs.size()
);
PtrList<PtrList<sphericalTensorFieldCompactIOField>>
lagrangianSphTensorFieldFields
(
cloudDirs.size()
);
PtrList<PtrList<symmTensorIOField>>
lagrangianSymmTensorFields
(
cloudDirs.size()
);
PtrList<PtrList<symmTensorFieldCompactIOField>>
lagrangianSymmTensorFieldFields
(
cloudDirs.size()
);
PtrList<PtrList<tensorIOField>> lagrangianTensorFields
(
cloudDirs.size()
);
PtrList<PtrList<tensorFieldCompactIOField>>
lagrangianTensorFieldFields
(
cloudDirs.size()
);
label cloudI = 0;
@ -1095,88 +980,10 @@ int main(int argc, char *argv[])
false
);
lagrangianFieldDecomposer::readFields
lagrangianFieldCache.readAllFields
(
cloudI,
lagrangianObjects,
lagrangianLabelFields
);
lagrangianFieldDecomposer::readFieldFields
(
cloudI,
lagrangianObjects,
lagrangianLabelFieldFields
);
lagrangianFieldDecomposer::readFields
(
cloudI,
lagrangianObjects,
lagrangianScalarFields
);
lagrangianFieldDecomposer::readFieldFields
(
cloudI,
lagrangianObjects,
lagrangianScalarFieldFields
);
lagrangianFieldDecomposer::readFields
(
cloudI,
lagrangianObjects,
lagrangianVectorFields
);
lagrangianFieldDecomposer::readFieldFields
(
cloudI,
lagrangianObjects,
lagrangianVectorFieldFields
);
lagrangianFieldDecomposer::readFields
(
cloudI,
lagrangianObjects,
lagrangianSphTensorFields
);
lagrangianFieldDecomposer::readFieldFields
(
cloudI,
lagrangianObjects,
lagrangianSphTensorFieldFields
);
lagrangianFieldDecomposer::readFields
(
cloudI,
lagrangianObjects,
lagrangianSymmTensorFields
);
lagrangianFieldDecomposer::readFieldFields
(
cloudI,
lagrangianObjects,
lagrangianSymmTensorFieldFields
);
lagrangianFieldDecomposer::readFields
(
cloudI,
lagrangianObjects,
lagrangianTensorFields
);
lagrangianFieldDecomposer::readFieldFields
(
cloudI,
lagrangianObjects,
lagrangianTensorFieldFields
lagrangianObjects
);
++cloudI;
@ -1185,18 +992,7 @@ int main(int argc, char *argv[])
lagrangianPositions.resize(cloudI);
cellParticles.resize(cloudI);
lagrangianLabelFields.resize(cloudI);
lagrangianLabelFieldFields.resize(cloudI);
lagrangianScalarFields.resize(cloudI);
lagrangianScalarFieldFields.resize(cloudI);
lagrangianVectorFields.resize(cloudI);
lagrangianVectorFieldFields.resize(cloudI);
lagrangianSphTensorFields.resize(cloudI);
lagrangianSphTensorFieldFields.resize(cloudI);
lagrangianSymmTensorFields.resize(cloudI);
lagrangianSymmTensorFieldFields.resize(cloudI);
lagrangianTensorFields.resize(cloudI);
lagrangianTensorFieldFields.resize(cloudI);
lagrangianFieldCache.resize(cloudI);
Info<< endl;
@ -1291,35 +1087,11 @@ int main(int argc, char *argv[])
)
);
}
const fvFieldDecomposer& fieldDecomposer =
fieldDecomposerList[proci];
// Vol fields
fieldDecomposer.decomposeFields(volScalarFields);
fieldDecomposer.decomposeFields(volVectorFields);
fieldDecomposer.decomposeFields(volSphTensorFields);
fieldDecomposer.decomposeFields(volSymmTensorFields);
fieldDecomposer.decomposeFields(volTensorFields);
// Surface fields
fieldDecomposer.decomposeFields(surfaceScalarFields);
fieldDecomposer.decomposeFields(surfaceVectorFields);
fieldDecomposer.decomposeFields
volumeFieldCache.decomposeAllFields
(
surfaceSphTensorFields
fieldDecomposerList[proci]
);
fieldDecomposer.decomposeFields
(
surfaceSymmTensorFields
);
fieldDecomposer.decomposeFields(surfaceTensorFields);
// internal fields
fieldDecomposer.decomposeFields(dimScalarFields);
fieldDecomposer.decomposeFields(dimVectorFields);
fieldDecomposer.decomposeFields(dimSphTensorFields);
fieldDecomposer.decomposeFields(dimSymmTensorFields);
fieldDecomposer.decomposeFields(dimTensorFields);
if (times.size() == 1)
{
@ -1330,14 +1102,7 @@ int main(int argc, char *argv[])
// Point fields
if
(
pointScalarFields.size()
|| pointVectorFields.size()
|| pointSphTensorFields.size()
|| pointSymmTensorFields.size()
|| pointTensorFields.size()
)
if (!pointFieldCache.empty())
{
const labelIOList& pointProcAddressing = procAddressing
(
@ -1363,15 +1128,11 @@ int main(int argc, char *argv[])
)
);
}
const pointFieldDecomposer& pointDecomposer =
pointFieldDecomposerList[proci];
pointDecomposer.decomposeFields(pointScalarFields);
pointDecomposer.decomposeFields(pointVectorFields);
pointDecomposer.decomposeFields(pointSphTensorFields);
pointDecomposer.decomposeFields(pointSymmTensorFields);
pointDecomposer.decomposeFields(pointTensorFields);
pointFieldCache.decomposeAllFields
(
pointFieldDecomposerList[proci]
);
if (times.size() == 1)
{
@ -1382,9 +1143,9 @@ int main(int argc, char *argv[])
// If there is lagrangian data write it out
forAll(lagrangianPositions, cloudI)
forAll(lagrangianPositions, cloudi)
{
if (lagrangianPositions[cloudI].size())
if (lagrangianPositions[cloudi].size())
{
lagrangianFieldDecomposer fieldDecomposer
(
@ -1392,74 +1153,18 @@ int main(int argc, char *argv[])
procMesh,
faceProcAddressing,
cellProcAddressing,
cloudDirs[cloudI],
lagrangianPositions[cloudI],
cellParticles[cloudI]
cloudDirs[cloudi],
lagrangianPositions[cloudi],
cellParticles[cloudi]
);
// Lagrangian fields
{
fieldDecomposer.decomposeFields
(
cloudDirs[cloudI],
lagrangianLabelFields[cloudI]
);
fieldDecomposer.decomposeFieldFields
(
cloudDirs[cloudI],
lagrangianLabelFieldFields[cloudI]
);
fieldDecomposer.decomposeFields
(
cloudDirs[cloudI],
lagrangianScalarFields[cloudI]
);
fieldDecomposer.decomposeFieldFields
(
cloudDirs[cloudI],
lagrangianScalarFieldFields[cloudI]
);
fieldDecomposer.decomposeFields
(
cloudDirs[cloudI],
lagrangianVectorFields[cloudI]
);
fieldDecomposer.decomposeFieldFields
(
cloudDirs[cloudI],
lagrangianVectorFieldFields[cloudI]
);
fieldDecomposer.decomposeFields
(
cloudDirs[cloudI],
lagrangianSphTensorFields[cloudI]
);
fieldDecomposer.decomposeFieldFields
(
cloudDirs[cloudI],
lagrangianSphTensorFieldFields[cloudI]
);
fieldDecomposer.decomposeFields
(
cloudDirs[cloudI],
lagrangianSymmTensorFields[cloudI]
);
fieldDecomposer.decomposeFieldFields
(
cloudDirs[cloudI],
lagrangianSymmTensorFieldFields[cloudI]
);
fieldDecomposer.decomposeFields
(
cloudDirs[cloudI],
lagrangianTensorFields[cloudI]
);
fieldDecomposer.decomposeFieldFields
(
cloudDirs[cloudI],
lagrangianTensorFieldFields[cloudI]
);
}
lagrangianFieldCache.decomposeAllFields
(
cloudi,
cloudDirs[cloudi],
fieldDecomposer
);
}
}
@ -1506,38 +1211,17 @@ int main(int argc, char *argv[])
aMesh.writeDecomposition();
// Area fields
// ~~~~~~~~~~~
PtrList<areaScalarField> areaScalarFields;
PtrList<areaVectorField> areaVectorFields;
PtrList<areaSphericalTensorField> areaSphTensorFields;
PtrList<areaSymmTensorField> areaSymmTensorFields;
PtrList<areaTensorField> areaTensorFields;
// Area/edge fields
// ~~~~~~~~~~~~~~~~
// Edge fields (limited number of types)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PtrList<edgeScalarField> edgeScalarFields;
faFieldDecomposer::fieldsCache areaFieldCache;
if (doDecompFields)
{
readFields(aMesh, objects, areaScalarFields);
readFields(aMesh, objects, areaVectorFields);
readFields(aMesh, objects, areaSphTensorFields);
readFields(aMesh, objects, areaSymmTensorFields);
readFields(aMesh, objects, areaTensorFields);
readFields(aMesh, objects, edgeScalarFields);
areaFieldCache.readAllFields(aMesh, objects);
}
const label nAreaFields =
(
areaScalarFields.size()
+ areaVectorFields.size()
+ areaSphTensorFields.size()
+ areaSymmTensorFields.size()
+ areaTensorFields.size()
+ edgeScalarFields.size()
);
const label nAreaFields = areaFieldCache.size();
Info<< endl;
Info<< "Finite area field transfer: "
@ -1634,13 +1318,7 @@ int main(int argc, char *argv[])
boundaryProcAddressing
);
fieldDecomposer.decomposeFields(areaScalarFields);
fieldDecomposer.decomposeFields(areaVectorFields);
fieldDecomposer.decomposeFields(areaSphTensorFields);
fieldDecomposer.decomposeFields(areaSymmTensorFields);
fieldDecomposer.decomposeFields(areaTensorFields);
fieldDecomposer.decomposeFields(edgeScalarFields);
areaFieldCache.decomposeAllFields(fieldDecomposer);
}
}
}

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
Copyright (C) 2019-2021 OpenCFD Ltd.
Copyright (C) 2019-2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -37,6 +37,7 @@ License
#include "DynamicList.H"
#include "fvFieldDecomposer.H"
#include "IOobjectList.H"
#include "PtrDynList.H"
#include "cellSet.H"
#include "faceSet.H"
#include "pointSet.H"
@ -178,32 +179,32 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
}
PtrList<const cellSet> cellSets;
PtrList<const faceSet> faceSets;
PtrList<const pointSet> pointSets;
PtrDynList<const cellSet> cellSets;
PtrDynList<const faceSet> faceSets;
PtrDynList<const pointSet> pointSets;
if (decomposeSets)
{
// Read sets
IOobjectList objects(*this, facesInstance(), "polyMesh/sets");
{
IOobjectList cSets(objects.lookupClass(cellSet::typeName));
forAllConstIters(cSets, iter)
IOobjectList sets(objects.lookupClass<cellSet>());
forAllConstIters(sets, iter)
{
cellSets.append(new cellSet(*iter()));
cellSets.append(new cellSet(*(iter.val())));
}
}
{
IOobjectList fSets(objects.lookupClass(faceSet::typeName));
forAllConstIters(fSets, iter)
IOobjectList sets(objects.lookupClass<faceSet>());
forAllConstIters(sets, iter)
{
faceSets.append(new faceSet(*iter()));
faceSets.append(new faceSet(*(iter.val())));
}
}
{
IOobjectList pSets(objects.lookupClass(pointSet::typeName));
forAllConstIters(pSets, iter)
IOobjectList sets(objects.lookupClass<pointSet>());
forAllConstIters(sets, iter)
{
pointSets.append(new pointSet(*iter()));
pointSets.append(new pointSet(*(iter.val())));
}
}
}
@ -225,13 +226,11 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
);
label maxProcCells = 0;
label maxProcFaces = 0;
label totProcFaces = 0;
label maxProcPatches = 0;
label totProcPatches = 0;
label maxProcFaces = 0;
// Write out the meshes
for (label proci = 0; proci < nProcs_; proci++)
@ -265,7 +264,6 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
{
// Mark the original face as used
// Remember to decrement the index by one (turning index)
//
label curF = mag(curFaceLabels[facei]) - 1;
faceLookup[curF] = facei;
@ -855,31 +853,35 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
Info<< "Number of cells = " << procMesh.nCells() << nl;
if (procMesh.nCells())
{
Info<< " Number of points = " << procMesh.nPoints() << nl;
}
maxProcCells = max(maxProcCells, procMesh.nCells());
label nBoundaryFaces = 0;
label nProcPatches = 0;
label nProcFaces = 0;
forAll(procMesh.boundaryMesh(), patchi)
for (const polyPatch& pp : procMesh.boundaryMesh())
{
if (isA<processorPolyPatch>(procMesh.boundaryMesh()[patchi]))
const auto* cpp = isA<processorPolyPatch>(pp);
if (cpp)
{
const processorPolyPatch& ppp =
refCast<const processorPolyPatch>
(
procMesh.boundaryMesh()[patchi]
);
const auto& procPatch = *cpp;
Info<< " Number of faces shared with processor "
<< ppp.neighbProcNo() << " = " << ppp.size() << endl;
<< procPatch.neighbProcNo() << " = "
<< procPatch.size() << nl;
nProcPatches++;
nProcFaces += ppp.size();
nProcFaces += procPatch.size();
++nProcPatches;
}
else
{
nBoundaryFaces += procMesh.boundaryMesh()[patchi].size();
nBoundaryFaces += pp.size();
}
}
@ -892,103 +894,79 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
totProcFaces += nProcFaces;
totProcPatches += nProcPatches;
maxProcPatches = max(maxProcPatches, nProcPatches);
maxProcFaces = max(maxProcFaces, nProcFaces);
maxProcPatches = max(maxProcPatches, nProcPatches);
// create and write the addressing information
labelIOList pointProcAddressing
(
IOobject
(
"pointProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procPointAddressing_[proci]
);
pointProcAddressing.write();
// Write the addressing information
labelIOList faceProcAddressing
IOobject ioAddr
(
IOobject
(
"faceProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procFaceAddressing_[proci]
"procAddressing",
procMesh.facesInstance(),
polyMesh::meshSubDir,
procMesh.thisDb(),
IOobject::NO_READ,
IOobject::NO_WRITE,
false // not registered
);
faceProcAddressing.write();
labelIOList cellProcAddressing
(
IOobject
(
"cellProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procCellAddressing_[proci]
);
cellProcAddressing.write();
// pointProcAddressing
ioAddr.rename("pointProcAddressing");
IOListRef<label>(ioAddr, procPointAddressing_[proci]).write();
// faceProcAddressing
ioAddr.rename("faceProcAddressing");
IOListRef<label>(ioAddr, procFaceAddressing_[proci]).write();
// cellProcAddressing
ioAddr.rename("cellProcAddressing");
IOListRef<label>(ioAddr, procCellAddressing_[proci]).write();
// Write patch map for backwards compatibility.
// (= identity map for original patches, -1 for processor patches)
label nMeshPatches = curPatchSizes.size();
labelList procBoundaryAddressing(identity(nMeshPatches));
procBoundaryAddressing.setSize(nMeshPatches+nProcPatches, -1);
labelList procBoundaryAddr(identity(nMeshPatches));
procBoundaryAddr.resize(nMeshPatches+nProcPatches, -1);
labelIOList boundaryProcAddressing
(
IOobject
(
"boundaryProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procBoundaryAddressing
);
boundaryProcAddressing.write();
// boundaryProcAddressing
ioAddr.rename("boundaryProcAddressing");
IOListRef<label>(ioAddr, procBoundaryAddr).write();
}
scalar avgProcCells = scalar(nCells())/nProcs_;
scalar avgProcPatches = scalar(totProcPatches)/nProcs_;
scalar avgProcFaces = scalar(totProcFaces)/nProcs_;
// In case of all faces on one processor. Just to avoid division by 0.
if (totProcPatches == 0)
{
avgProcPatches = 1;
}
if (totProcFaces == 0)
{
avgProcFaces = 1;
}
// Summary stats
Info<< nl
<< "Number of processor faces = " << totProcFaces/2 << nl
<< "Max number of cells = " << maxProcCells
<< " (" << 100.0*(maxProcCells-avgProcCells)/avgProcCells
<< "% above average " << avgProcCells << ")" << nl
<< "Max number of processor patches = " << maxProcPatches
<< " (" << 100.0*(maxProcPatches-avgProcPatches)/avgProcPatches
<< "% above average " << avgProcPatches << ")" << nl
<< "Max number of faces between processors = " << maxProcFaces
<< " (" << 100.0*(maxProcFaces-avgProcFaces)/avgProcFaces
<< "% above average " << avgProcFaces << ")" << nl
<< endl;
<< "Number of processor faces = " << (totProcFaces/2) << nl
<< "Max number of cells = " << maxProcCells;
if (maxProcCells != nCells())
{
scalar avgValue = scalar(nCells())/nProcs_;
Info<< " (" << 100.0*(maxProcCells-avgValue)/avgValue
<< "% above average " << avgValue << ')';
}
Info<< nl;
Info<< "Max number of processor patches = " << maxProcPatches;
if (totProcPatches)
{
scalar avgValue = scalar(totProcPatches)/nProcs_;
Info<< " (" << 100.0*(maxProcPatches-avgValue)/avgValue
<< "% above average " << avgValue << ')';
}
Info<< nl;
Info<< "Max number of faces between processors = " << maxProcFaces;
if (totProcFaces)
{
scalar avgValue = scalar(totProcFaces)/nProcs_;
Info<< " (" << 100.0*(maxProcFaces-avgValue)/avgValue
<< "% above average " << avgValue << ')';
}
Info<< nl << endl;
return true;
}

View File

@ -1,103 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2017 OpenFOAM Foundation
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Description
Lagrangian field decomposer.
\*---------------------------------------------------------------------------*/
#include "lagrangianFieldDecomposer.H"
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::lagrangianFieldDecomposer::lagrangianFieldDecomposer
(
const polyMesh& mesh,
const polyMesh& procMesh,
const labelList& faceProcAddressing,
const labelList& cellProcAddressing,
const word& cloudName,
const Cloud<indexedParticle>& lagrangianPositions,
const List<SLList<indexedParticle*>*>& cellParticles
)
:
procMesh_(procMesh),
positions_(procMesh, cloudName, IDLList<passiveParticle>()),
particleIndices_(lagrangianPositions.size())
{
label pi = 0;
labelList decodedProcFaceAddressing(faceProcAddressing.size());
forAll(faceProcAddressing, i)
{
decodedProcFaceAddressing[i] = mag(faceProcAddressing[i]) - 1;
}
forAll(cellProcAddressing, procCelli)
{
label celli = cellProcAddressing[procCelli];
if (cellParticles[celli])
{
SLList<indexedParticle*>& particlePtrs = *cellParticles[celli];
forAllConstIters(particlePtrs, iter)
{
const indexedParticle& ppi = *iter();
particleIndices_[pi++] = ppi.index();
const label mappedTetFace =
decodedProcFaceAddressing.find(ppi.tetFace());
if (mappedTetFace == -1)
{
FatalErrorInFunction
<< "Face lookup failure." << nl
<< abort(FatalError);
}
positions_.append
(
new passiveParticle
(
procMesh,
ppi.coordinates(),
procCelli,
mappedTetFace,
ppi.procTetPt(procMesh, procCelli, mappedTetFace)
)
);
}
}
}
particleIndices_.setSize(pi);
IOPosition<Cloud<passiveParticle>>(positions_).write();
}
// ************************************************************************* //

View File

@ -1,166 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::lagrangianFieldDecomposer
Description
Lagrangian field decomposer.
SourceFiles
lagrangianFieldDecomposer.C
lagrangianFieldDecomposerFields.C
\*---------------------------------------------------------------------------*/
#ifndef lagrangianFieldDecomposer_H
#define lagrangianFieldDecomposer_H
#include "Cloud.H"
#include "CompactIOField.H"
#include "indexedParticle.H"
#include "passiveParticle.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
class IOobjectList;
/*---------------------------------------------------------------------------*\
Class lagrangianFieldDecomposer Declaration
\*---------------------------------------------------------------------------*/
class lagrangianFieldDecomposer
{
// Private data
//- Reference to processor mesh
const polyMesh& procMesh_;
//- Lagrangian positions for this processor
Cloud<passiveParticle> positions_;
//- The indices of the particles on this processor
labelList particleIndices_;
// Private Member Functions
//- No copy construct
lagrangianFieldDecomposer(const lagrangianFieldDecomposer&) = delete;
//- No copy assignment
void operator=(const lagrangianFieldDecomposer&) = delete;
public:
// Constructors
//- Construct from components
lagrangianFieldDecomposer
(
const polyMesh& mesh, //<! unused
const polyMesh& procMesh,
const labelList& faceProcAddressing,
const labelList& cellProcAddressing,
const word& cloudName,
const Cloud<indexedParticle>& lagrangianPositions,
const List<SLList<indexedParticle*>*>& cellParticles
);
// Member Functions
// Read the fields and hold on the pointer list
template<class Type>
static void readFields
(
const label cloudI,
const IOobjectList& lagrangianObjects,
PtrList<PtrList<IOField<Type>>>& lagrangianFields
);
template<class Type>
static void readFieldFields
(
const label cloudI,
const IOobjectList& lagrangianObjects,
PtrList
<
PtrList<CompactIOField<Field<Type>, Type>>
>& lagrangianFields
);
//- Decompose volume field
template<class Type>
tmp<IOField<Type>> decomposeField
(
const word& cloudName,
const IOField<Type>& field
) const;
template<class Type>
tmp<CompactIOField<Field<Type>, Type>> decomposeFieldField
(
const word& cloudName,
const CompactIOField<Field<Type>, Type>& field
) const;
template<class GeoField>
void decomposeFields
(
const word& cloudName,
const PtrList<GeoField>& fields
) const;
template<class GeoField>
void decomposeFieldFields
(
const word& cloudName,
const PtrList<GeoField>& fields
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
#include "lagrangianFieldDecomposerFields.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -1,216 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2017 OpenFOAM Foundation
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "lagrangianFieldDecomposer.H"
#include "IOobjectList.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
void Foam::lagrangianFieldDecomposer::readFields
(
const label cloudI,
const IOobjectList& lagrangianObjects,
PtrList<PtrList<IOField<Type>>>& lagrangianFields
)
{
// Search list of objects for lagrangian fields
IOobjectList lagrangianTypeObjects
(
lagrangianObjects.lookupClass(IOField<Type>::typeName)
);
lagrangianFields.set
(
cloudI,
new PtrList<IOField<Type>>
(
lagrangianTypeObjects.size()
)
);
label lagrangianFieldi = 0;
forAllConstIters(lagrangianTypeObjects, iter)
{
lagrangianFields[cloudI].set
(
lagrangianFieldi++,
new IOField<Type>(*iter())
);
}
}
template<class Type>
void Foam::lagrangianFieldDecomposer::readFieldFields
(
const label cloudI,
const IOobjectList& lagrangianObjects,
PtrList<PtrList<CompactIOField<Field<Type>, Type>>>& lagrangianFields
)
{
// Search list of objects for lagrangian fields
IOobjectList lagrangianTypeObjectsA
(
lagrangianObjects.lookupClass(IOField<Field<Type>>::typeName)
);
IOobjectList lagrangianTypeObjectsB
(
lagrangianObjects.lookupClass
(
CompactIOField<Field<Type>,
Type>::typeName
)
);
lagrangianFields.set
(
cloudI,
new PtrList<CompactIOField<Field<Type>, Type>>
(
lagrangianTypeObjectsA.size() + lagrangianTypeObjectsB.size()
)
);
label lagrangianFieldi = 0;
forAllConstIters(lagrangianTypeObjectsA, iter)
{
lagrangianFields[cloudI].set
(
lagrangianFieldi++,
new CompactIOField<Field<Type>, Type>(*iter())
);
}
forAllConstIters(lagrangianTypeObjectsB, iter)
{
lagrangianFields[cloudI].set
(
lagrangianFieldi++,
new CompactIOField<Field<Type>, Type>(*iter())
);
}
}
template<class Type>
Foam::tmp<Foam::IOField<Type>>
Foam::lagrangianFieldDecomposer::decomposeField
(
const word& cloudName,
const IOField<Type>& field
) const
{
// Create and map the internal field values
Field<Type> procField(field, particleIndices_);
// Create the field for the processor
return tmp<IOField<Type>>::New
(
IOobject
(
field.name(),
procMesh_.time().timeName(),
cloud::prefix/cloudName,
procMesh_,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
procField
);
}
template<class Type>
Foam::tmp<Foam::CompactIOField<Foam::Field<Type>, Type>>
Foam::lagrangianFieldDecomposer::decomposeFieldField
(
const word& cloudName,
const CompactIOField<Field<Type>, Type>& field
) const
{
// Create and map the internal field values
Field<Field<Type>> procField(field, particleIndices_);
// Create the field for the processor
return tmp<CompactIOField<Field<Type>, Type>>::New
(
IOobject
(
field.name(),
procMesh_.time().timeName(),
cloud::prefix/cloudName,
procMesh_,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
procField
);
}
template<class GeoField>
void Foam::lagrangianFieldDecomposer::decomposeFields
(
const word& cloudName,
const PtrList<GeoField>& fields
) const
{
//if (particleIndices_.size())
{
bool valid = particleIndices_.size() > 0;
forAll(fields, fieldi)
{
decomposeField(cloudName, fields[fieldi])().write(valid);
}
}
}
template<class GeoField>
void Foam::lagrangianFieldDecomposer::decomposeFieldFields
(
const word& cloudName,
const PtrList<GeoField>& fields
) const
{
//if (particleIndices_.size())
{
bool valid = particleIndices_.size() > 0;
forAll(fields, fieldi)
{
decomposeFieldField(cloudName, fields[fieldi])().write(valid);
}
}
}
// ************************************************************************* //

View File

@ -1,90 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "GeometricField.H"
#include "readFields.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
template<class Type, template<class> class PatchField, class GeoMesh>
void Foam::readFields
(
const typename GeoMesh::Mesh& mesh,
const IOobjectList& objects,
PtrList<GeometricField<Type, PatchField, GeoMesh>>& fields,
const bool readOldTime
)
{
typedef GeometricField<Type, PatchField, GeoMesh> GeoField;
// Search list of objects for fields of type GeoField
IOobjectList fieldObjects(objects.lookupClass<GeoField>());
// Use sorted set of names
// (different processors might read objects in different order)
const wordList masterNames(fieldObjects.sortedNames());
// Construct the fields
fields.resize(masterNames.size());
forAll(masterNames, i)
{
const IOobject& io = *fieldObjects[masterNames[i]];
fields.set(i, new GeoField(io, mesh, readOldTime));
}
}
template<class Mesh, class GeoField>
void Foam::readFields
(
const Mesh& mesh,
const IOobjectList& objects,
PtrList<GeoField>& fields
)
{
// Search list of objects for fields of type GeomField
IOobjectList fieldObjects(objects.lookupClass<GeoField>());
// Use sorted set of names
// (different processors might read objects in different order)
const wordList masterNames(fieldObjects.sortedNames());
// Construct the fields
fields.resize(masterNames.size());
forAll(masterNames, i)
{
const IOobject& io = *fieldObjects[masterNames[i]];
fields.set(i, new GeoField(io, mesh));
}
}
// ************************************************************************* //

View File

@ -1,77 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Global
readFields
Description
SourceFiles
readFields.C
\*---------------------------------------------------------------------------*/
#ifndef readFields_H
#define readFields_H
#include "IOobjectList.H"
#include "PtrList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// Read the fields and hold on the pointer list
template<class Type, template<class> class PatchField, class GeoMesh>
void readFields
(
const typename GeoMesh::Mesh& mesh,
const IOobjectList& objects,
PtrList<GeometricField<Type, PatchField, GeoMesh>>& fields,
const bool readOldTime
);
// Read the fields and hold on the pointer list
template<class Mesh, class GeoField>
void readFields
(
const Mesh& mesh,
const IOobjectList& objects,
PtrList<GeoField>& fields
);
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
#include "readFields.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -1,48 +0,0 @@
{
// Foam version 2.1 changes the addressing of faces in faceProcAddressing
// The following code checks and modifies the addressing for cases where
// the decomposition has been done with the foam2.0 and earlier tools, but
// the reconstruction is attempted with version 2.1 or later
label minFaceIndex = labelMax;
PtrList<labelIOList>& faceProcAddressing = procMeshes.faceProcAddressing();
forAll(faceProcAddressing, proci)
{
const labelList& curFaceAddr = faceProcAddressing[proci];
forAll(curFaceAddr, facei)
{
if (mag(curFaceAddr[facei]) < minFaceIndex)
{
minFaceIndex = mag(curFaceAddr[facei]);
}
}
}
if (minFaceIndex < 1)
{
WarningInFunction
<< "parallel decomposition addressing." << endl
<< "It looks like you are trying to reconstruct the case "
<< "decomposed with an earlier version of FOAM, which could\n"
<< "potentially cause compatibility problems. The code will "
<< "attempt to update the addressing automatically; in case of\n"
<< "failure, please repeat the decomposition of the case using "
<< "the current version fo decomposePar"
<< endl;
forAll(faceProcAddressing, proci)
{
labelList& curFaceAddr = faceProcAddressing[proci];
forAll(curFaceAddr, facei)
{
curFaceAddr[facei] += sign(curFaceAddr[facei]);
}
faceProcAddressing[proci].write();
}
}
}

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2017 OpenFOAM Foundation
Copyright (C) 2015-2021 OpenCFD Ltd.
Copyright (C) 2015-2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -334,11 +334,6 @@ int main(int argc, char *argv[])
// Read all meshes and addressing to reconstructed mesh
processorMeshes procMeshes(databases, regionName);
// Check face addressing for meshes that have been decomposed
// with a very old foam version
#include "checkFaceAddressingComp.H"
// Loop over all times
forAll(timeDirs, timei)
{
@ -362,11 +357,11 @@ int main(int argc, char *argv[])
}
// Check if any new meshes need to be read.
fvMesh::readUpdateState meshStat = mesh.readUpdate();
polyMesh::readUpdateState meshStat = mesh.readUpdate();
fvMesh::readUpdateState procStat = procMeshes.readUpdate();
polyMesh::readUpdateState procStat = procMeshes.readUpdate();
if (procStat == fvMesh::POINTS_MOVED)
if (procStat == polyMesh::POINTS_MOVED)
{
// Reconstruct the points for moving mesh cases and write
// them out
@ -407,83 +402,7 @@ int main(int argc, char *argv[])
procMeshes.boundaryProcAddressing()
);
reconstructor.reconstructFvVolumeInternalFields<scalar>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeInternalFields<vector>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeInternalFields<sphericalTensor>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeInternalFields<symmTensor>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeInternalFields<tensor>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeFields<scalar>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeFields<vector>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeFields<sphericalTensor>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeFields<symmTensor>
(
objects,
selectedFields
);
reconstructor.reconstructFvVolumeFields<tensor>
(
objects,
selectedFields
);
reconstructor.reconstructFvSurfaceFields<scalar>
(
objects,
selectedFields
);
reconstructor.reconstructFvSurfaceFields<vector>
(
objects,
selectedFields
);
reconstructor.reconstructFvSurfaceFields<sphericalTensor>
(
objects,
selectedFields
);
reconstructor.reconstructFvSurfaceFields<symmTensor>
(
objects,
selectedFields
);
reconstructor.reconstructFvSurfaceFields<tensor>
(
objects,
selectedFields
);
reconstructor.reconstructAllFields(objects, selectedFields);
if (reconstructor.nReconstructed() == 0)
{
@ -515,31 +434,7 @@ int main(int argc, char *argv[])
procMeshes.boundaryProcAddressing()
);
reconstructor.reconstructFields<scalar>
(
objects,
selectedFields
);
reconstructor.reconstructFields<vector>
(
objects,
selectedFields
);
reconstructor.reconstructFields<sphericalTensor>
(
objects,
selectedFields
);
reconstructor.reconstructFields<symmTensor>
(
objects,
selectedFields
);
reconstructor.reconstructFields<tensor>
(
objects,
selectedFields
);
reconstructor.reconstructAllFields(objects, selectedFields);
if (reconstructor.nReconstructed() == 0)
{
@ -631,78 +526,7 @@ int main(int argc, char *argv[])
reconstructor.reconstructPositions(cloudName);
reconstructor.reconstructFields<label>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFieldFields<label>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFields<scalar>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFieldFields<scalar>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFields<vector>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFieldFields<vector>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFields<sphericalTensor>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFieldFields<sphericalTensor>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFields<symmTensor>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFieldFields<symmTensor>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFields<tensor>
(
cloudName,
cloudObjs,
selectedLagrangianFields
);
reconstructor.reconstructFieldFields<tensor>
reconstructor.reconstructAllFields
(
cloudName,
cloudObjs,
@ -724,12 +548,12 @@ int main(int argc, char *argv[])
}
else if
(
objects.lookupClass(areaScalarField::typeName).size()
|| objects.lookupClass(areaVectorField::typeName).size()
|| objects.lookupClass(areaSphericalTensorField::typeName).size()
|| objects.lookupClass(areaSymmTensorField::typeName).size()
|| objects.lookupClass(areaTensorField::typeName).size()
|| objects.lookupClass(edgeScalarField::typeName).size()
objects.count<areaScalarField>()
|| objects.count<areaVectorField>()
|| objects.count<areaSphericalTensorField>()
|| objects.count<areaSymmTensorField>()
|| objects.count<areaTensorField>()
|| objects.count<edgeScalarField>()
)
{
Info << "Reconstructing FA fields" << nl << endl;
@ -747,13 +571,7 @@ int main(int argc, char *argv[])
procFaMeshes.boundaryProcAddressing()
);
reconstructor.reconstructFaAreaFields<scalar>(objects);
reconstructor.reconstructFaAreaFields<vector>(objects);
reconstructor.reconstructFaAreaFields<sphericalTensor>(objects);
reconstructor.reconstructFaAreaFields<symmTensor>(objects);
reconstructor.reconstructFaAreaFields<tensor>(objects);
reconstructor.reconstructFaEdgeFields<scalar>(objects);
reconstructor.reconstructAllFields(objects);
}
else
{
@ -782,21 +600,19 @@ int main(int argc, char *argv[])
polyMesh::meshSubDir/"sets"
);
IOobjectList cSets(objects.lookupClass(cellSet::typeName));
forAllConstIters(cSets, iter)
for (const word& setName : objects.sortedNames<cellSet>())
{
cSetNames.insert(iter.key(), cSetNames.size());
cSetNames.insert(setName, cSetNames.size());
}
IOobjectList fSets(objects.lookupClass(faceSet::typeName));
forAllConstIters(fSets, iter)
for (const word& setName : objects.sortedNames<faceSet>())
{
fSetNames.insert(iter.key(), fSetNames.size());
fSetNames.insert(setName, fSetNames.size());
}
IOobjectList pSets(objects.lookupClass(pointSet::typeName));
forAllConstIters(pSets, iter)
for (const word& setName : objects.sortedNames<pointSet>())
{
pSetNames.insert(iter.key(), pSetNames.size());
pSetNames.insert(setName, pSetNames.size());
}
}
@ -840,30 +656,25 @@ int main(int argc, char *argv[])
const labelList& cellMap =
procMeshes.cellProcAddressing()[proci];
IOobjectList cSets
(
objects.lookupClass(cellSet::typeName)
);
forAllConstIters(cSets, iter)
for (const IOobject& io : objects.sorted<cellSet>())
{
// Load cellSet
const cellSet procSet(*iter());
label setI = cSetNames[iter.key()];
if (!cellSets.set(setI))
const cellSet procSet(io);
const label seti = cSetNames[io.name()];
if (!cellSets.set(seti))
{
cellSets.set
(
setI,
seti,
new cellSet
(
mesh,
iter.key(),
io.name(),
procSet.size()
)
);
}
cellSet& cSet = cellSets[setI];
cellSet& cSet = cellSets[seti];
cSet.instance() = runTime.timeName();
for (const label celli : procSet)
@ -876,30 +687,25 @@ int main(int argc, char *argv[])
const labelList& faceMap =
procMeshes.faceProcAddressing()[proci];
IOobjectList fSets
(
objects.lookupClass(faceSet::typeName)
);
forAllConstIters(fSets, iter)
for (const IOobject& io : objects.sorted<faceSet>())
{
// Load faceSet
const faceSet procSet(*iter());
label setI = fSetNames[iter.key()];
if (!faceSets.set(setI))
const faceSet procSet(io);
const label seti = fSetNames[io.name()];
if (!faceSets.set(seti))
{
faceSets.set
(
setI,
seti,
new faceSet
(
mesh,
iter.key(),
io.name(),
procSet.size()
)
);
}
faceSet& fSet = faceSets[setI];
faceSet& fSet = faceSets[seti];
fSet.instance() = runTime.timeName();
for (const label facei : procSet)
@ -911,32 +717,28 @@ int main(int argc, char *argv[])
const labelList& pointMap =
procMeshes.pointProcAddressing()[proci];
IOobjectList pSets
(
objects.lookupClass(pointSet::typeName)
);
forAllConstIters(pSets, iter)
for (const IOobject& io : objects.sorted<pointSet>())
{
// Load pointSet
const pointSet propSet(*iter());
label setI = pSetNames[iter.key()];
if (!pointSets.set(setI))
const pointSet procSet(io);
const label seti = pSetNames[io.name()];
if (!pointSets.set(seti))
{
pointSets.set
(
setI,
seti,
new pointSet
(
mesh,
iter.key(),
propSet.size()
io.name(),
procSet.size()
)
);
}
pointSet& pSet = pointSets[setI];
pointSet& pSet = pointSets[seti];
pSet.instance() = runTime.timeName();
for (const label pointi : propSet)
for (const label pointi : procSet)
{
pSet.insert(pointMap[pointi]);
}

View File

@ -522,14 +522,11 @@ void writeMaps
Info<< " pointProcAddressing" << endl;
ioAddr.rename("pointProcAddressing");
labelIOList(ioAddr, pointProcAddressing).write();
IOListRef<label>(ioAddr, pointProcAddressing).write();
// From processor face to reconstructed mesh face
Info<< " faceProcAddressing" << endl;
ioAddr.rename("faceProcAddressing");
labelIOList faceProcAddr(ioAddr, faceProcAddressing);
// Now add turning index to faceProcAddressing.
@ -572,17 +569,15 @@ void writeMaps
// From processor cell to reconstructed mesh cell
Info<< " cellProcAddressing" << endl;
ioAddr.rename("cellProcAddressing");
labelIOList(ioAddr, cellProcAddressing).write();
IOListRef<label>(ioAddr, cellProcAddressing).write();
// From processor patch to reconstructed mesh patch
Info<< " boundaryProcAddressing" << endl;
ioAddr.rename("boundaryProcAddressing");
labelIOList(ioAddr, boundProcAddressing).write();
IOListRef<label>(ioAddr, boundProcAddressing).write();
Info<< endl;
}

View File

@ -78,6 +78,7 @@ Usage
#include "fvMesh.H"
#include "fvMeshTools.H"
#include "fvMeshDistribute.H"
#include "fieldsDistributor.H"
#include "decompositionMethod.H"
#include "decompositionModel.H"
#include "timeSelector.H"
@ -260,7 +261,6 @@ void printMeshData(const polyMesh& mesh)
{
// Collect all data on master
globalIndex globalCells(mesh.nCells());
labelListList patchNeiProcNo(Pstream::nProcs());
labelListList patchSize(Pstream::nProcs());
const labelList& pPatches = mesh.globalData().processorPatches();
@ -281,74 +281,94 @@ void printMeshData(const polyMesh& mesh)
// Print stats
globalIndex globalBoundaryFaces(mesh.nBoundaryFaces());
const globalIndex globalCells(mesh.nCells());
const globalIndex globalBoundaryFaces(mesh.nBoundaryFaces());
label maxProcCells = 0;
label maxProcFaces = 0;
label totProcFaces = 0;
label maxProcPatches = 0;
label totProcPatches = 0;
label maxProcFaces = 0;
for (const int procI : Pstream::allProcs())
for (const int proci : Pstream::allProcs())
{
const label nLocalCells = globalCells.localSize(proci);
const label nBndFaces = globalBoundaryFaces.localSize(proci);
Info<< nl
<< "Processor " << procI << nl
<< " Number of cells = " << globalCells.localSize(procI)
<< endl;
<< "Processor " << proci;
label nProcFaces = 0;
const labelList& nei = patchNeiProcNo[procI];
forAll(patchNeiProcNo[procI], i)
if (!nLocalCells)
{
Info<< " Number of faces shared with processor "
<< patchNeiProcNo[procI][i] << " = " << patchSize[procI][i]
<< endl;
nProcFaces += patchSize[procI][i];
Info<< " (empty)" << endl;
continue;
}
else
{
Info<< nl
<< " Number of cells = " << nLocalCells << endl;
}
Info<< " Number of processor patches = " << nei.size() << nl
<< " Number of processor faces = " << nProcFaces << nl
<< " Number of boundary faces = "
<< globalBoundaryFaces.localSize(procI)-nProcFaces << endl;
label nProcFaces = 0;
const labelList& nei = patchNeiProcNo[proci];
maxProcCells = max(maxProcCells, globalCells.localSize(procI));
forAll(patchNeiProcNo[proci], i)
{
Info<< " Number of faces shared with processor "
<< patchNeiProcNo[proci][i] << " = "
<< patchSize[proci][i] << nl;
nProcFaces += patchSize[proci][i];
}
{
Info<< " Number of processor patches = " << nei.size() << nl
<< " Number of processor faces = " << nProcFaces << nl
<< " Number of boundary faces = "
<< nBndFaces-nProcFaces << endl;
}
maxProcCells = max(maxProcCells, nLocalCells);
totProcFaces += nProcFaces;
totProcPatches += nei.size();
maxProcPatches = max(maxProcPatches, nei.size());
maxProcFaces = max(maxProcFaces, nProcFaces);
maxProcPatches = max(maxProcPatches, nei.size());
}
// Stats
scalar avgProcCells = scalar(globalCells.size())/Pstream::nProcs();
scalar avgProcPatches = scalar(totProcPatches)/Pstream::nProcs();
scalar avgProcFaces = scalar(totProcFaces)/Pstream::nProcs();
// In case of all faces on one processor. Just to avoid division by 0.
if (totProcPatches == 0)
{
avgProcPatches = 1;
}
if (totProcFaces == 0)
{
avgProcFaces = 1;
}
// Summary stats
Info<< nl
<< "Number of processor faces = " << totProcFaces/2 << nl
<< "Max number of cells = " << maxProcCells
<< " (" << 100.0*(maxProcCells-avgProcCells)/avgProcCells
<< "% above average " << avgProcCells << ")" << nl
<< "Max number of processor patches = " << maxProcPatches
<< " (" << 100.0*(maxProcPatches-avgProcPatches)/avgProcPatches
<< "% above average " << avgProcPatches << ")" << nl
<< "Max number of faces between processors = " << maxProcFaces
<< " (" << 100.0*(maxProcFaces-avgProcFaces)/avgProcFaces
<< "% above average " << avgProcFaces << ")" << nl
<< endl;
<< "Number of processor faces = " << (totProcFaces/2) << nl
<< "Max number of cells = " << maxProcCells;
if (maxProcCells != globalCells.totalSize())
{
scalar avgValue = scalar(globalCells.totalSize())/Pstream::nProcs();
Info<< " (" << 100.0*(maxProcCells-avgValue)/avgValue
<< "% above average " << avgValue << ')';
}
Info<< nl;
Info<< "Max number of processor patches = " << maxProcPatches;
if (totProcPatches)
{
scalar avgValue = scalar(totProcPatches)/Pstream::nProcs();
Info<< " (" << 100.0*(maxProcPatches-avgValue)/avgValue
<< "% above average " << avgValue << ')';
}
Info<< nl;
Info<< "Max number of faces between processors = " << maxProcFaces;
if (totProcFaces)
{
scalar avgValue = scalar(totProcFaces)/Pstream::nProcs();
Info<< " (" << 100.0*(maxProcFaces-avgValue)/avgValue
<< "% above average " << avgValue << ')';
}
Info<< nl << endl;
}
@ -513,180 +533,6 @@ void determineDecomposition
}
// Generic mesh-based field reading
template<class GeoField>
void readField
(
const IOobject& io,
const fvMesh& mesh,
const label i,
PtrList<GeoField>& fields
)
{
fields.set(i, new GeoField(io, mesh));
}
// Definition of readField for GeometricFields only
template<class Type, template<class> class PatchField, class GeoMesh>
void readField
(
const IOobject& io,
const fvMesh& mesh,
const label i,
PtrList<GeometricField<Type, PatchField, GeoMesh>>& fields
)
{
fields.set
(
i,
new GeometricField<Type, PatchField, GeoMesh>(io, mesh, false)
);
}
// Read vol or surface fields
template<class GeoField>
void readFields
(
const boolList& haveMesh,
const fvMesh& mesh,
const autoPtr<fvMeshSubset>& subsetterPtr,
IOobjectList& allObjects,
PtrList<GeoField>& fields
)
{
// Get my objects of type
IOobjectList objects(allObjects.lookupClass(GeoField::typeName));
// Check that we all have all objects
wordList objectNames = objects.sortedNames();
// Get master names
wordList masterNames(objectNames);
Pstream::scatter(masterNames);
if (haveMesh[Pstream::myProcNo()] && objectNames != masterNames)
{
FatalErrorInFunction
<< "Objects not synchronised across processors." << nl
<< "Master has " << flatOutput(masterNames) << nl
<< "Processor " << Pstream::myProcNo()
<< " has " << flatOutput(objectNames)
<< exit(FatalError);
}
fields.setSize(masterNames.size());
// Have master send all fields to processors that don't have a mesh. The
// issue is if a patchField does any parallel operations inside its
// construct-from-dictionary. This will not work when going to more
// processors (e.g. decompose = 1 -> many) ! We could make a special
// exception for decomposePar but nicer would be to have read-communicator
// ... For now detect if decomposing & disable parRun
if (Pstream::master())
{
// Work out if we're decomposing - none of the subprocs has a mesh
bool decompose = true;
for (const int procI : Pstream::subProcs())
{
if (haveMesh[procI])
{
decompose = false;
}
}
forAll(masterNames, i)
{
const word& name = masterNames[i];
IOobject& io = *objects[name];
io.writeOpt(IOobject::AUTO_WRITE);
// Load field (but not oldTime)
const bool oldParRun = Pstream::parRun();
if (decompose)
{
Pstream::parRun(false);
}
readField(io, mesh, i, fields);
if (decompose)
{
Pstream::parRun(oldParRun);
}
// Create zero sized field and send
if (subsetterPtr)
{
const bool oldParRun = Pstream::parRun(false);
tmp<GeoField> tsubfld = subsetterPtr().interpolate(fields[i]);
Pstream::parRun(oldParRun);
// Send to all processors that don't have a mesh
for (const int procI : Pstream::subProcs())
{
if (!haveMesh[procI])
{
OPstream toProc(Pstream::commsTypes::blocking, procI);
toProc<< tsubfld();
}
}
}
}
}
else if (!haveMesh[Pstream::myProcNo()])
{
// Don't have mesh (nor fields). Receive empty field from master.
forAll(masterNames, i)
{
const word& name = masterNames[i];
// Receive field
IPstream fromMaster
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
);
dictionary fieldDict(fromMaster);
fields.set
(
i,
new GeoField
(
IOobject
(
name,
mesh.time().timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
mesh,
fieldDict
)
);
//// Write it for next time round (since mesh gets written as well)
//fields[i].write();
}
}
else
{
// Have mesh so just try to load
forAll(masterNames, i)
{
const word& name = masterNames[i];
IOobject& io = *objects[name];
io.writeOpt(IOobject::AUTO_WRITE);
// Load field (but not oldtime)
readField(io, mesh, i, fields);
}
}
}
// Variant of GeometricField::correctBoundaryConditions that only
// evaluates selected patch fields
template<class GeoField, class CoupledPatchType>
@ -817,7 +663,8 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
runTime.caseName() = baseRunTime.caseName();
runTime.processorCase(false);
}
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -826,7 +673,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
volScalarFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -835,7 +682,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
volVectorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -844,7 +691,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
volSphereTensorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -853,7 +700,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
volSymmTensorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -865,7 +712,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
// surfaceFields
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -874,7 +721,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
surfScalarFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -883,7 +730,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
surfVectorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -892,7 +739,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
surfSphereTensorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -901,7 +748,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
surfSymmTensorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -912,7 +759,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
// Dimensioned internal fields
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -921,7 +768,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
dimScalarFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -930,7 +777,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
dimVectorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -939,7 +786,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
dimSphereTensorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,
@ -948,7 +795,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
dimSymmTensorFields
);
readFields
fieldsDistributor::readFields
(
haveMesh,
mesh,