Patches contributed by Mattijs Janssens:

splitMeshRegions: handle flipping of faces for surface fields

subsetMesh: subset dimensionedFields

decomposePar: use run-time selection of decomposition constraints. Used to
    keep cells on particular processors. See the decomposeParDict in

$FOAM_UTILITIES/parallel/decomposePar:
  - preserveBaffles: keep baffle faces on same processor
  - preserveFaceZones: keep faceZones owner and neighbour on same processor
  - preservePatches: keep owner and neighbour on same processor. Note: not
    suitable for cyclicAMI since these are not coupled on the patch level
  - singleProcessorFaceSets: keep complete faceSet on a single processor
  - refinementHistory: keep cells originating from a single cell on the
    same processor.

decomposePar: clean up decomposition of refinement data from snappyHexMesh

reconstructPar: reconstruct refinement data (refineHexMesh, snappyHexMesh)

reconstructParMesh: reconstruct refinement data (refineHexMesh, snappyHexMesh)

redistributePar:
  - corrected mapping surfaceFields
  - adding processor patches in order consistent with decomposePar

argList: check that slaves are running same version as master

fvMeshSubset: move to dynamicMesh library

fvMeshDistribute:
  - support for mapping dimensionedFields
  - corrected mapping of surfaceFields

parallel routines: allow parallel running on single processor

Field: support for
  - distributed mapping
  - mapping with flipping

mapDistribute: support for flipping

AMIInterpolation: avoid constructing localPoints
This commit is contained in:
Henry Weller
2016-05-15 16:36:48 +01:00
parent 26658647fa
commit 1441f8cab0
93 changed files with 9230 additions and 2650 deletions

View File

@ -200,6 +200,7 @@ void subsetSurfaceFields
( (
const fvMesh& mesh, const fvMesh& mesh,
const fvMesh& subMesh, const fvMesh& subMesh,
const labelList& cellMap,
const labelList& faceMap, const labelList& faceMap,
const labelHashSet& addedPatches const labelHashSet& addedPatches
) )
@ -223,6 +224,7 @@ void subsetSurfaceFields
fld, fld,
subMesh, subMesh,
patchMap, patchMap,
cellMap,
faceMap faceMap
) )
); );
@ -828,6 +830,7 @@ void createAndWriteRegion
( (
mesh, mesh,
newMesh(), newMesh(),
map().cellMap(),
map().faceMap(), map().faceMap(),
addedPatches addedPatches
); );
@ -835,6 +838,7 @@ void createAndWriteRegion
( (
mesh, mesh,
newMesh(), newMesh(),
map().cellMap(),
map().faceMap(), map().faceMap(),
addedPatches addedPatches
); );
@ -842,6 +846,7 @@ void createAndWriteRegion
( (
mesh, mesh,
newMesh(), newMesh(),
map().cellMap(),
map().faceMap(), map().faceMap(),
addedPatches addedPatches
); );
@ -849,6 +854,7 @@ void createAndWriteRegion
( (
mesh, mesh,
newMesh(), newMesh(),
map().cellMap(),
map().faceMap(), map().faceMap(),
addedPatches addedPatches
); );
@ -856,6 +862,7 @@ void createAndWriteRegion
( (
mesh, mesh,
newMesh(), newMesh(),
map().cellMap(),
map().faceMap(), map().faceMap(),
addedPatches addedPatches
); );

View File

@ -1,8 +1,8 @@
EXE_INC = \ EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude -I$(LIB_SRC)/meshTools/lnInclude
EXE_LIBS = \ EXE_LIBS = \
-lfiniteVolume \ -ldynamicMesh \
-lmeshTools \
-lgenericPatchFields -lgenericPatchFields

View File

@ -150,6 +150,40 @@ void subsetPointFields
} }
template<class Type>
void subsetDimensionedFields
(
const fvMeshSubset& subsetter,
const wordList& fieldNames,
PtrList<DimensionedField<Type, volMesh> >& subFields
)
{
const fvMesh& baseMesh = subsetter.baseMesh();
forAll(fieldNames, i)
{
const word& fieldName = fieldNames[i];
Info<< "Subsetting field " << fieldName << endl;
DimensionedField<Type, volMesh> fld
(
IOobject
(
fieldName,
baseMesh.time().timeName(),
baseMesh,
IOobject::MUST_READ,
IOobject::NO_WRITE
),
baseMesh
);
subFields.set(i, subsetter.interpolate(fld));
}
}
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
@ -361,6 +395,42 @@ int main(int argc, char *argv[])
subsetPointFields(subsetter, pMesh, pointTensorNames, pointTensorFlds); subsetPointFields(subsetter, pMesh, pointTensorNames, pointTensorFlds);
// Read dimensioned fields and subset
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
typedef volScalarField::Internal dimScalType;
wordList scalarDimNames(objects.names(dimScalType::typeName));
PtrList<dimScalType> scalarDimFlds(scalarDimNames.size());
subsetDimensionedFields(subsetter, scalarDimNames, scalarDimFlds);
typedef volVectorField::Internal dimVecType;
wordList vectorDimNames(objects.names(dimVecType::typeName));
PtrList<dimVecType> vectorDimFlds(vectorDimNames.size());
subsetDimensionedFields(subsetter, vectorDimNames, vectorDimFlds);
typedef volSphericalTensorField::Internal dimSphereType;
wordList sphericalTensorDimNames(objects.names(dimSphereType::typeName));
PtrList<dimSphereType> sphericalTensorDimFlds
(
sphericalTensorDimNames.size()
);
subsetDimensionedFields
(
subsetter,
sphericalTensorDimNames,
sphericalTensorDimFlds
);
typedef volSymmTensorField::Internal dimSymmTensorType;
wordList symmTensorDimNames(objects.names(dimSymmTensorType::typeName));
PtrList<dimSymmTensorType> symmTensorDimFlds(symmTensorDimNames.size());
subsetDimensionedFields(subsetter, symmTensorDimNames, symmTensorDimFlds);
typedef volTensorField::Internal dimTensorType;
wordList tensorDimNames(objects.names(dimTensorType::typeName));
PtrList<dimTensorType> tensorDimFlds(tensorDimNames.size());
subsetDimensionedFields(subsetter, tensorDimNames, tensorDimFlds);
// Write mesh and fields to new time // Write mesh and fields to new time
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -461,6 +531,33 @@ int main(int argc, char *argv[])
pointTensorFlds[i].write(); pointTensorFlds[i].write();
} }
// DimensionedFields
forAll(scalarDimFlds, i)
{
scalarDimFlds[i].rename(scalarDimNames[i]);
scalarDimFlds[i].write();
}
forAll(vectorDimFlds, i)
{
vectorDimFlds[i].rename(vectorDimNames[i]);
vectorDimFlds[i].write();
}
forAll(sphericalTensorDimFlds, i)
{
sphericalTensorDimFlds[i].rename(sphericalTensorDimNames[i]);
sphericalTensorDimFlds[i].write();
}
forAll(symmTensorDimFlds, i)
{
symmTensorDimFlds[i].rename(symmTensorDimNames[i]);
symmTensorDimFlds[i].write();
}
forAll(tensorDimFlds, i)
{
tensorDimFlds[i].rename(tensorDimNames[i]);
tensorDimFlds[i].write();
}
Info<< "End\n" << endl; Info<< "End\n" << endl;

View File

@ -2,15 +2,15 @@ EXE_INC = \
-I$(LIB_SRC)/parallel/decompose/decompose/lnInclude \ -I$(LIB_SRC)/parallel/decompose/decompose/lnInclude \
-I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \ -I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/regionModels/regionModel/lnInclude -I$(LIB_SRC)/regionModels/regionModel/lnInclude
EXE_LIBS = \ EXE_LIBS = \
-lfiniteVolume \ -ldynamicMesh \
-ldecompose \ -ldecompose \
-lgenericPatchFields \ -lgenericPatchFields \
-ldecompositionMethods -L$(FOAM_LIBBIN)/dummy -lmetisDecomp -lscotchDecomp \ -ldecompositionMethods -L$(FOAM_LIBBIN)/dummy -lmetisDecomp -lscotchDecomp \
-llagrangian \ -llagrangian \
-lmeshTools \
-lregionModels -lregionModels

View File

@ -96,6 +96,7 @@ Usage
#include "fvFieldDecomposer.H" #include "fvFieldDecomposer.H"
#include "pointFieldDecomposer.H" #include "pointFieldDecomposer.H"
#include "lagrangianFieldDecomposer.H" #include "lagrangianFieldDecomposer.H"
#include "decompositionModel.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -260,7 +261,8 @@ int main(int argc, char *argv[])
++nProcs; ++nProcs;
} }
// get requested numberOfSubdomains // get requested numberOfSubdomains. Note: have no mesh yet so
// cannot use decompositionModel::New
const label nDomains = readLabel const label nDomains = readLabel
( (
IOdictionary IOdictionary
@ -819,16 +821,6 @@ int main(int argc, char *argv[])
processorDb.setTime(runTime); processorDb.setTime(runTime);
// remove files remnants that can cause horrible problems
// - mut and nut are used to mark the new turbulence models,
// their existence prevents old models from being upgraded
{
fileName timeDir(processorDb.path()/processorDb.timeName());
rm(timeDir/"mut");
rm(timeDir/"nut");
}
// read the mesh // read the mesh
if (!procMeshList.set(proci)) if (!procMeshList.set(proci))
{ {

View File

@ -17,11 +17,61 @@ FoamFile
numberOfSubdomains 2; numberOfSubdomains 2;
// Optional decomposition constraints
//constraints
//{
// preserveBaffles
// {
// //- Keep owner and neighbour of baffles on same processor (i.e.
// // keep it detectable as a baffle). Baffles are two boundary face
// // sharing the same points
// type preserveBaffles;
// }
// preserveFaceZones
// {
// //- Keep owner and neighbour on same processor for faces in zones
// type preserveFaceZones;
// zones (".*");
// }
// preservePatches
// {
// //- Keep owner and neighbour on same processor for faces in patches
// // (only makes sense for cyclic patches. Not suitable for e.g.
// // cyclicAMI since these are not coupled on the patch level. Use
// // singleProcessorFaceSets for those)
// type preservePatches;
// patches (".*");
// }
// singleProcessorFaceSets
// {
// //- Keep all of faceSet on a single processor. This puts all cells
// // connected with a point, edge or face on the same processor.
// // (just having face connected cells might not guarantee a balanced
// // decomposition)
// // The processor can be -1 (the decompositionMethod chooses the
// // processor for a good load balance) or explicitly provided (upsets
// // balance)
// type singleProcessorFaceSets;
// singleProcessorFaceSets ((f1 -1));
// }
// refinementHistory
// {
// //- Decompose cells such that all cell originating from single cell
// // end up on same processor
// type refinementHistory;
// }
//}
// Deprecated form of specifying decomposition constraints:
//- Keep owner and neighbour on same processor for faces in zones: //- Keep owner and neighbour on same processor for faces in zones:
// preserveFaceZones (heater solid1 solid3); // preserveFaceZones (heater solid1 solid3);
//- Keep owner and neighbour on same processor for faces in patches: //- Keep owner and neighbour on same processor for faces in patches:
// (makes sense only for cyclic patches) // (makes sense only for cyclic patches. Not suitable for e.g. cyclicAMI
// since these are not coupled on the patch level. Use
// singleProcessorFaceSets for those)
//preservePatches (cyclic_half0 cyclic_half1); //preservePatches (cyclic_half0 cyclic_half1);
//- Keep all of faceSet on a single processor. This puts all cells //- Keep all of faceSet on a single processor. This puts all cells
@ -32,12 +82,13 @@ numberOfSubdomains 2;
// for a good load balance) or explicitly provided (upsets balance). // for a good load balance) or explicitly provided (upsets balance).
//singleProcessorFaceSets ((f0 -1)); //singleProcessorFaceSets ((f0 -1));
//- Keep owner and neighbour of baffles on same processor (i.e. keep it //- Keep owner and neighbour of baffles on same processor (i.e. keep it
// detectable as a baffle). Baffles are two boundary face sharing the // detectable as a baffle). Baffles are two boundary face sharing the
// same points. // same points.
//preserveBaffles true; //preserveBaffles true;
//- Use the volScalarField named here as a weight for each cell in the //- Use the volScalarField named here as a weight for each cell in the
// decomposition. For example, use a particle population field to decompose // decomposition. For example, use a particle population field to decompose
// for a balanced number of particles in a lagrangian simulation. // for a balanced number of particles in a lagrangian simulation.

View File

@ -31,14 +31,14 @@ License
#include "fvMesh.H" #include "fvMesh.H"
#include "OSspecific.H" #include "OSspecific.H"
#include "Map.H" #include "Map.H"
#include "globalMeshData.H"
#include "DynamicList.H" #include "DynamicList.H"
#include "fvFieldDecomposer.H" #include "fvFieldDecomposer.H"
#include "IOobjectList.H" #include "IOobjectList.H"
#include "cellSet.H" #include "cellSet.H"
#include "faceSet.H" #include "faceSet.H"
#include "pointSet.H" #include "pointSet.H"
#include "uniformDimensionedFields.H" #include "decompositionModel.H"
#include "hexRef8Data.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
@ -90,18 +90,16 @@ Foam::domainDecomposition::domainDecomposition(const IOobject& io)
) )
: NULL : NULL
), ),
decompositionDict_ nProcs_
( (
IOobject readInt
( (
"decomposeParDict", decompositionModel::New
time().system(), (
*this, *this
IOobject::MUST_READ_IF_MODIFIED, ).lookup("numberOfSubdomains")
IOobject::NO_WRITE
) )
), ),
nProcs_(readInt(decompositionDict_.lookup("numberOfSubdomains"))),
distributed_(false), distributed_(false),
cellToProc_(nCells()), cellToProc_(nCells()),
procPointAddressing_(nProcs_), procPointAddressing_(nProcs_),
@ -115,7 +113,10 @@ Foam::domainDecomposition::domainDecomposition(const IOobject& io)
procProcessorPatchSubPatchIDs_(nProcs_), procProcessorPatchSubPatchIDs_(nProcs_),
procProcessorPatchSubPatchStarts_(nProcs_) procProcessorPatchSubPatchStarts_(nProcs_)
{ {
decompositionDict_.readIfPresent("distributed", distributed_); decompositionModel::New
(
*this
).readIfPresent("distributed", distributed_);
} }
@ -195,57 +196,20 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
} }
autoPtr<labelIOList> cellLevelPtr; // Load refinement data (if any)
{ hexRef8Data baseMeshData
IOobject io
( (
"cellLevel", IOobject
(
"dummy",
facesInstance(), facesInstance(),
polyMesh::meshSubDir, polyMesh::meshSubDir,
*this, *this,
IOobject::MUST_READ, IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE IOobject::NO_WRITE,
false
)
); );
if (io.headerOk())
{
Info<< "Reading hexRef8 data : " << io.name() << endl;
cellLevelPtr.reset(new labelIOList(io));
}
}
autoPtr<labelIOList> pointLevelPtr;
{
IOobject io
(
"pointLevel",
facesInstance(),
polyMesh::meshSubDir,
*this,
IOobject::MUST_READ,
IOobject::NO_WRITE
);
if (io.headerOk())
{
Info<< "Reading hexRef8 data : " << io.name() << endl;
pointLevelPtr.reset(new labelIOList(io));
}
}
autoPtr<uniformDimensionedScalarField> level0EdgePtr;
{
IOobject io
(
"level0Edge",
facesInstance(),
polyMesh::meshSubDir,
*this,
IOobject::MUST_READ,
IOobject::NO_WRITE
);
if (io.headerOk())
{
Info<< "Reading hexRef8 data : " << io.name() << endl;
level0EdgePtr.reset(new uniformDimensionedScalarField(io));
}
}
@ -771,8 +735,8 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
} }
} }
// Set the precision of the points data to 10 // Set the precision of the points data to be min 10
IOstream::defaultPrecision(10); IOstream::defaultPrecision(max(10u, IOstream::defaultPrecision()));
procMesh.write(); procMesh.write();
@ -842,64 +806,23 @@ bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
} }
// hexRef8 data // Optional hexRef8 data
if (cellLevelPtr.valid()) hexRef8Data
{
labelIOList
( (
IOobject IOobject
( (
cellLevelPtr().name(), "dummy",
facesInstance(), facesInstance(),
polyMesh::meshSubDir, polyMesh::meshSubDir,
procMesh, procMesh,
IOobject::NO_READ, IOobject::NO_READ,
IOobject::AUTO_WRITE IOobject::NO_WRITE,
false
), ),
UIndirectList<label> baseMeshData,
( procCellAddressing_[proci],
cellLevelPtr(),
procCellAddressing_[proci]
)()
).write();
}
if (pointLevelPtr.valid())
{
labelIOList
(
IOobject
(
pointLevelPtr().name(),
facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
UIndirectList<label>
(
pointLevelPtr(),
procPointAddressing_[proci] procPointAddressing_[proci]
)()
).write(); ).write();
}
if (level0EdgePtr.valid())
{
uniformDimensionedScalarField
(
IOobject
(
level0EdgePtr().name(),
facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
level0EdgePtr()
).write();
}
// Statistics // Statistics

View File

@ -61,9 +61,6 @@ class domainDecomposition
//- Optional: points at the facesInstance //- Optional: points at the facesInstance
autoPtr<pointIOField> facesInstancePointsPtr_; autoPtr<pointIOField> facesInstancePointsPtr_;
//- Mesh decomposition control dictionary
IOdictionary decompositionDict_;
//- Number of processors in decomposition //- Number of processors in decomposition
label nProcs_; label nProcs_;

View File

@ -30,6 +30,7 @@ License
#include "regionSplit.H" #include "regionSplit.H"
#include "Tuple2.H" #include "Tuple2.H"
#include "faceSet.H" #include "faceSet.H"
#include "decompositionModel.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -39,15 +40,12 @@ void Foam::domainDecomposition::distributeCells()
cpuTime decompositionTime; cpuTime decompositionTime;
autoPtr<decompositionMethod> decomposePtr = decompositionMethod::New const decompositionModel& method = decompositionModel::New(*this);
(
decompositionDict_
);
scalarField cellWeights; scalarField cellWeights;
if (decompositionDict_.found("weightField")) if (method.found("weightField"))
{ {
word weightName = decompositionDict_.lookup("weightField"); word weightName = method.lookup("weightField");
volScalarField weights volScalarField weights
( (
@ -64,7 +62,7 @@ void Foam::domainDecomposition::distributeCells()
cellWeights = weights.primitiveField(); cellWeights = weights.primitiveField();
} }
cellToProc_ = decomposePtr().decompose(*this, cellWeights); cellToProc_ = method.decomposer().decompose(*this, cellWeights);
Info<< "\nFinished decomposition in " Info<< "\nFinished decomposition in "
<< decompositionTime.elapsedCpuTime() << decompositionTime.elapsedCpuTime()

View File

@ -23,10 +23,48 @@ License
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
#include "GeometricField.H"
#include "readFields.H" #include "readFields.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
template<class Type, template<class> class PatchField, class GeoMesh>
void Foam::readFields
(
const typename GeoMesh::Mesh& mesh,
const IOobjectList& objects,
PtrList<GeometricField<Type, PatchField, GeoMesh> >& fields,
const bool readOldTime
)
{
typedef GeometricField<Type, PatchField, GeoMesh> GeoField;
// Search list of objects for fields of type GeomField
IOobjectList fieldObjects(objects.lookupClass(GeoField::typeName));
// Remove the cellDist field
IOobjectList::iterator celDistIter = fieldObjects.find("cellDist");
if (celDistIter != fieldObjects.end())
{
fieldObjects.erase(celDistIter);
}
// Get sorted set of names (different processors might read objects in
// different order)
const wordList masterNames(fieldObjects.sortedNames());
// Construct the fields
fields.setSize(masterNames.size());
forAll(masterNames, i)
{
const IOobject& io = *fieldObjects[masterNames[i]];
fields.set(i, new GeoField(io, mesh, readOldTime));
}
}
template<class Mesh, class GeoField> template<class Mesh, class GeoField>
void Foam::readFields void Foam::readFields
( (
@ -38,24 +76,21 @@ void Foam::readFields
// Search list of objects for fields of type GeomField // Search list of objects for fields of type GeomField
IOobjectList fieldObjects(objects.lookupClass(GeoField::typeName)); IOobjectList fieldObjects(objects.lookupClass(GeoField::typeName));
// Remove the cellDist field
IOobjectList::iterator celDistIter = fieldObjects.find("cellDist");
if (celDistIter != fieldObjects.end())
{
fieldObjects.erase(celDistIter);
}
// Construct the fields // Construct the fields
fields.setSize(fieldObjects.size()); fields.setSize(fieldObjects.size());
label fieldi = 0; // Get sorted set of names (different processors might read objects in
forAllIter(IOobjectList, fieldObjects, iter) // different order)
const wordList masterNames(fieldObjects.sortedNames());
// Construct the fields
fields.setSize(masterNames.size());
forAll(masterNames, i)
{ {
fields.set const IOobject& io = *fieldObjects[masterNames[i]];
(
fieldi++, fields.set(i, new GeoField(io, mesh));
new GeoField(*iter(), mesh)
);
} }
} }

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -41,6 +41,16 @@ SourceFiles
namespace Foam namespace Foam
{ {
// Read the fields and hold on the pointer list
template<class Type, template<class> class PatchField, class GeoMesh>
void readFields
(
const typename GeoMesh::Mesh& mesh,
const IOobjectList& objects,
PtrList<GeometricField<Type, PatchField, GeoMesh> >& fields,
const bool readOldTime
);
// Read the fields and hold on the pointer list // Read the fields and hold on the pointer list
template<class Mesh, class GeoField> template<class Mesh, class GeoField>
void readFields void readFields

View File

@ -1,6 +1,7 @@
EXE_INC = \ EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/parallel/reconstruct/reconstruct/lnInclude \ -I$(LIB_SRC)/parallel/reconstruct/reconstruct/lnInclude \
-I$(LIB_SRC)/regionModels/regionModel/lnInclude -I$(LIB_SRC)/regionModels/regionModel/lnInclude
@ -9,6 +10,7 @@ EXE_LIBS = \
-lfiniteVolume \ -lfiniteVolume \
-lgenericPatchFields \ -lgenericPatchFields \
-llagrangian \ -llagrangian \
-ldynamicMesh \
-lmeshTools \ -lmeshTools \
-lreconstruct \ -lreconstruct \
-lregionModels -lregionModels

View File

@ -45,6 +45,8 @@ Description
#include "faceSet.H" #include "faceSet.H"
#include "pointSet.H" #include "pointSet.H"
#include "hexRef8Data.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
bool haveAllTimes bool haveAllTimes
@ -868,6 +870,78 @@ int main(int argc, char *argv[])
pointSets[i].write(); pointSets[i].write();
} }
} }
// Reconstruct refinement data
{
PtrList<hexRef8Data> procData(procMeshes.meshes().size());
forAll(procMeshes.meshes(), procI)
{
const fvMesh& procMesh = procMeshes.meshes()[procI];
procData.set
(
procI,
new hexRef8Data
(
IOobject
(
"dummy",
procMesh.time().timeName(),
polyMesh::meshSubDir,
procMesh,
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE,
false
)
)
);
}
// Combine individual parts
const PtrList<labelIOList>& cellAddr =
procMeshes.cellProcAddressing();
UPtrList<const labelList> cellMaps(cellAddr.size());
forAll(cellAddr, i)
{
cellMaps.set(i, &cellAddr[i]);
}
const PtrList<labelIOList>& pointAddr =
procMeshes.pointProcAddressing();
UPtrList<const labelList> pointMaps(pointAddr.size());
forAll(pointAddr, i)
{
pointMaps.set(i, &pointAddr[i]);
}
UPtrList<const hexRef8Data> procRefs(procData.size());
forAll(procData, i)
{
procRefs.set(i, &procData[i]);
}
hexRef8Data
(
IOobject
(
"dummy",
mesh.time().timeName(),
polyMesh::meshSubDir,
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
cellMaps,
pointMaps,
procRefs
).write();
}
} }
} }
@ -885,7 +959,7 @@ int main(int argc, char *argv[])
} }
} }
Info<< "End.\n" << endl; Info<< "\nEnd\n" << endl;
return 0; return 0;
} }

View File

@ -49,7 +49,7 @@ Description
#include "faceCoupleInfo.H" #include "faceCoupleInfo.H"
#include "fvMeshAdder.H" #include "fvMeshAdder.H"
#include "polyTopoChange.H" #include "polyTopoChange.H"
#include "zeroGradientFvPatchFields.H" #include "extrapolatedCalculatedFvPatchFields.H"
using namespace Foam; using namespace Foam;
@ -401,13 +401,14 @@ void writeCellDistance
), ),
masterMesh, masterMesh,
dimensionedScalar("cellDist", dimless, 0), dimensionedScalar("cellDist", dimless, 0),
zeroGradientFvPatchScalarField::typeName extrapolatedCalculatedFvPatchScalarField::typeName
); );
forAll(cellDecomposition, celli) forAll(cellDecomposition, celli)
{ {
cellDist[celli] = cellDecomposition[celli]; cellDist[celli] = cellDecomposition[celli];
} }
cellDist.correctBoundaryConditions();
cellDist.write(); cellDist.write();

View File

@ -236,13 +236,15 @@ void writeDecomposition
false // do not register false // do not register
), ),
mesh, mesh,
dimensionedScalar(name, dimless, -1) dimensionedScalar(name, dimless, -1),
extrapolatedCalculatedFvPatchScalarField::typeName
); );
forAll(procCells, cI) forAll(procCells, cI)
{ {
procCells[cI] = decomp[cI]; procCells[cI] = decomp[cI];
} }
procCells.correctBoundaryConditions();
procCells.write(); procCells.write();
} }

View File

@ -2,12 +2,13 @@ EXE_INC = \
/* -DFULLDEBUG -g -O0 */ \ /* -DFULLDEBUG -g -O0 */ \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/sampling/lnInclude \ -I$(LIB_SRC)/sampling/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude -I$(LIB_SRC)/lagrangian/basic/lnInclude
EXE_LIBS = \ EXE_LIBS = \
-lfiniteVolume \ -ldynamicMesh \
-lmeshTools \
-lsampling \ -lsampling \
-lgenericPatchFields \ -lgenericPatchFields \
-llagrangian -llagrangian

View File

@ -2,12 +2,12 @@ EXE_INC = \
-I$(WM_THIRD_PARTY_DIR)/tecio/tecsrc/lnInclude \ -I$(WM_THIRD_PARTY_DIR)/tecio/tecsrc/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude -I$(LIB_SRC)/meshTools/lnInclude
EXE_LIBS = \ EXE_LIBS = \
-llagrangian \ -llagrangian \
-lfiniteVolume \ -ldynamicMesh \
-lgenericPatchFields \ -lgenericPatchFields \
-lmeshTools \
-L$(FOAM_EXT_LIBBIN) -ltecio -L$(FOAM_EXT_LIBBIN) -ltecio

View File

@ -3,6 +3,7 @@
EXE_INC = \ EXE_INC = \
-I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude \ -I$(LIB_SRC)/lagrangian/basic/lnInclude \
-I../../vtkPVReaders/lnInclude \ -I../../vtkPVReaders/lnInclude \
-I../PVFoamReader \ -I../PVFoamReader \
@ -14,8 +15,7 @@ EXE_INC = \
) )
LIB_LIBS = \ LIB_LIBS = \
-lmeshTools \ -ldynamicMesh \
-lfiniteVolume \
-lgenericPatchFields \ -lgenericPatchFields \
-llagrangian \ -llagrangian \
-L$(FOAM_LIBBIN) -lvtkPVReaders \ -L$(FOAM_LIBBIN) -lvtkPVReaders \

View File

@ -49,9 +49,9 @@ wmake $targetType lagrangian/distributionModels
wmake $targetType genericPatchFields wmake $targetType genericPatchFields
wmake $targetType conversion wmake $targetType conversion
wmake $targetType sampling
wmake $targetType mesh/extrudeModel wmake $targetType mesh/extrudeModel
wmake $targetType dynamicMesh wmake $targetType dynamicMesh
wmake $targetType sampling
wmake $targetType dynamicFvMesh wmake $targetType dynamicFvMesh
wmake $targetType topoChangerFvMesh wmake $targetType topoChangerFvMesh

View File

@ -94,6 +94,9 @@ $(strings)/wordRe/wordRe.C
$(strings)/lists/hashedWordList.C $(strings)/lists/hashedWordList.C
$(strings)/stringOps/stringOps.C $(strings)/stringOps/stringOps.C
ops = primitives/ops
$(ops)/flipOp.C
primitives/hashes/Hasher/Hasher.C primitives/hashes/Hasher/Hasher.C
sha1 = primitives/hashes/SHA1 sha1 = primitives/hashes/SHA1
@ -216,8 +219,8 @@ $(dll)/codedBase/codedBase.C
db/functionObjects/functionObject/functionObject.C db/functionObjects/functionObject/functionObject.C
db/functionObjects/functionObjectList/functionObjectList.C db/functionObjects/functionObjectList/functionObjectList.C
db/functionObjects/functionObjectFile/functionObjectFile.C db/functionObjects/writeFile/writeFile.C
db/functionObjects/functionObjectFiles/functionObjectFiles.C db/functionObjects/writeFiles/writeFiles.C
db/functionObjects/timeControl/timeControl.C db/functionObjects/timeControl/timeControl.C
db/functionObjects/timeControl/timeControlFunctionObject.C db/functionObjects/timeControl/timeControlFunctionObject.C
@ -513,6 +516,7 @@ $(mapPolyMesh)/mapPolyMesh.C
$(mapPolyMesh)/faceMapper/faceMapper.C $(mapPolyMesh)/faceMapper/faceMapper.C
$(mapPolyMesh)/cellMapper/cellMapper.C $(mapPolyMesh)/cellMapper/cellMapper.C
$(mapPolyMesh)/mapDistribute/mapDistribute.C $(mapPolyMesh)/mapDistribute/mapDistribute.C
$(mapPolyMesh)/mapDistribute/mapDistributeBase.C
$(mapPolyMesh)/mapDistribute/mapDistributePolyMesh.C $(mapPolyMesh)/mapDistribute/mapDistributePolyMesh.C
$(mapPolyMesh)/mapDistribute/IOmapDistribute.C $(mapPolyMesh)/mapDistribute/IOmapDistribute.C
$(mapPolyMesh)/mapAddedPolyMesh.C $(mapPolyMesh)/mapAddedPolyMesh.C

View File

@ -57,6 +57,24 @@ const Foam::NamedEnum<Foam::UPstream::commsTypes, 3>
void Foam::UPstream::setParRun(const label nProcs) void Foam::UPstream::setParRun(const label nProcs)
{ {
if (nProcs == 0)
{
parRun_ = false;
freeCommunicator(UPstream::worldComm);
label comm = allocateCommunicator(-1, labelList(1, label(0)), false);
if (comm != UPstream::worldComm)
{
FatalErrorIn("UPstream::setParRun(const label)")
<< "problem : comm:" << comm
<< " UPstream::worldComm:" << UPstream::worldComm
<< Foam::exit(FatalError);
}
Pout.prefix() = "";
Perr.prefix() = "";
}
else
{
parRun_ = true; parRun_ = true;
// Redo worldComm communicator (this has been created at static // Redo worldComm communicator (this has been created at static
@ -73,6 +91,7 @@ void Foam::UPstream::setParRun(const label nProcs)
Pout.prefix() = '[' + name(myProcNo(Pstream::worldComm)) + "] "; Pout.prefix() = '[' + name(myProcNo(Pstream::worldComm)) + "] ";
Perr.prefix() = '[' + name(myProcNo(Pstream::worldComm)) + "] "; Perr.prefix() = '[' + name(myProcNo(Pstream::worldComm)) + "] ";
}
} }

View File

@ -50,7 +50,7 @@ void Foam::Pstream::combineGather
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; const commsStruct& myComm = comms[UPstream::myProcNo(comm)];
@ -177,7 +177,7 @@ void Foam::Pstream::combineScatter
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)]; const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)];
@ -278,7 +278,7 @@ void Foam::Pstream::listCombineGather
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; const commsStruct& myComm = comms[UPstream::myProcNo(comm)];
@ -412,7 +412,7 @@ void Foam::Pstream::listCombineScatter
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)]; const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)];
@ -525,7 +525,7 @@ void Foam::Pstream::mapCombineGather
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; const commsStruct& myComm = comms[UPstream::myProcNo(comm)];
@ -625,7 +625,7 @@ void Foam::Pstream::mapCombineScatter
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)]; const UPstream::commsStruct& myComm = comms[UPstream::myProcNo(comm)];

View File

@ -61,7 +61,9 @@ void Foam::Pstream::exchange
recvBufs.setSize(sendBufs.size()); recvBufs.setSize(sendBufs.size());
if (UPstream::nProcs(comm) > 1) recvBufs.setSize(sendBufs.size());
if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
label startOfRequests = Pstream::nRequests(); label startOfRequests = Pstream::nRequests();

View File

@ -52,7 +52,7 @@ void Pstream::gather
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; const commsStruct& myComm = comms[UPstream::myProcNo(comm)];
@ -151,7 +151,7 @@ void Pstream::scatter
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
// Get my communication order // Get my communication order
const commsStruct& myComm = comms[UPstream::myProcNo(comm)]; const commsStruct& myComm = comms[UPstream::myProcNo(comm)];

View File

@ -53,7 +53,7 @@ void Pstream::gatherList
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
if (Values.size() != UPstream::nProcs(comm)) if (Values.size() != UPstream::nProcs(comm))
{ {
@ -209,7 +209,7 @@ void Pstream::scatterList
const label comm const label comm
) )
{ {
if (UPstream::nProcs(comm) > 1) if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
{ {
if (Values.size() != UPstream::nProcs(comm)) if (Values.size() != UPstream::nProcs(comm))
{ {

View File

@ -222,6 +222,7 @@ bool Foam::objectRegistry::checkIn(regIOobject& io) const
{ {
Pout<< "objectRegistry::checkIn(regIOobject&) : " Pout<< "objectRegistry::checkIn(regIOobject&) : "
<< name() << " : checking in " << io.name() << name() << " : checking in " << io.name()
<< " of type " << io.type()
<< endl; << endl;
} }
@ -354,6 +355,7 @@ bool Foam::objectRegistry::writeObject
Pout<< "objectRegistry::write() : " Pout<< "objectRegistry::write() : "
<< name() << " : Considering writing object " << name() << " : Considering writing object "
<< iter.key() << iter.key()
<< " of type " << iter()->type()
<< " with writeOpt " << iter()->writeOpt() << " with writeOpt " << iter()->writeOpt()
<< " to file " << iter()->objectPath() << " to file " << iter()->objectPath()
<< endl; << endl;

View File

@ -155,6 +155,15 @@ public:
const word& fieldDictEntry="value" const word& fieldDictEntry="value"
); );
//- Construct from dictionary
DimensionedField
(
const IOobject&,
const Mesh& mesh,
const dictionary& fieldDict,
const word& fieldDictEntry="value"
);
//- Construct as copy //- Construct as copy
DimensionedField DimensionedField
( (

View File

@ -80,6 +80,24 @@ Foam::DimensionedField<Type, GeoMesh>::DimensionedField
} }
template<class Type, class GeoMesh>
Foam::DimensionedField<Type, GeoMesh>::DimensionedField
(
const IOobject& io,
const Mesh& mesh,
const dictionary& fieldDict,
const word& fieldDictEntry
)
:
regIOobject(io),
Field<Type>(0),
mesh_(mesh),
dimensions_(dimless)
{
readField(fieldDict, fieldDictEntry);
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type, class GeoMesh> template<class Type, class GeoMesh>

View File

@ -27,6 +27,8 @@ License
#include "FieldM.H" #include "FieldM.H"
#include "dictionary.H" #include "dictionary.H"
#include "contiguous.H" #include "contiguous.H"
#include "mapDistributeBase.H"
#include "flipOp.H"
// * * * * * * * * * * * * * * * Static Members * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Static Members * * * * * * * * * * * * * * //
@ -122,12 +124,13 @@ template<class Type>
Foam::Field<Type>::Field Foam::Field<Type>::Field
( (
const UList<Type>& mapF, const UList<Type>& mapF,
const FieldMapper& mapper const FieldMapper& mapper,
const bool applyFlip
) )
: :
List<Type>(mapper.size()) List<Type>(mapper.size())
{ {
map(mapF, mapper); map(mapF, mapper, applyFlip);
} }
@ -136,12 +139,13 @@ Foam::Field<Type>::Field
( (
const UList<Type>& mapF, const UList<Type>& mapF,
const FieldMapper& mapper, const FieldMapper& mapper,
const Type& defaultValue const Type& defaultValue,
const bool applyFlip
) )
: :
List<Type>(mapper.size(), defaultValue) List<Type>(mapper.size(), defaultValue)
{ {
map(mapF, mapper); map(mapF, mapper, applyFlip);
} }
@ -150,12 +154,13 @@ Foam::Field<Type>::Field
( (
const UList<Type>& mapF, const UList<Type>& mapF,
const FieldMapper& mapper, const FieldMapper& mapper,
const UList<Type>& defaultValues const UList<Type>& defaultValues,
const bool applyFlip
) )
: :
List<Type>(defaultValues) List<Type>(defaultValues)
{ {
map(mapF, mapper); map(mapF, mapper, applyFlip);
} }
@ -163,12 +168,13 @@ template<class Type>
Foam::Field<Type>::Field Foam::Field<Type>::Field
( (
const tmp<Field<Type>>& tmapF, const tmp<Field<Type>>& tmapF,
const FieldMapper& mapper const FieldMapper& mapper,
const bool applyFlip
) )
: :
List<Type>(mapper.size()) List<Type>(mapper.size())
{ {
map(tmapF, mapper); map(tmapF, mapper, applyFlip);
} }
@ -177,12 +183,13 @@ Foam::Field<Type>::Field
( (
const tmp<Field<Type>>& tmapF, const tmp<Field<Type>>& tmapF,
const FieldMapper& mapper, const FieldMapper& mapper,
const Type& defaultValue const Type& defaultValue,
const bool applyFlip
) )
: :
List<Type>(mapper.size(), defaultValue) List<Type>(mapper.size(), defaultValue)
{ {
map(tmapF, mapper); map(tmapF, mapper, applyFlip);
} }
@ -191,12 +198,13 @@ Foam::Field<Type>::Field
( (
const tmp<Field<Type>>& tmapF, const tmp<Field<Type>>& tmapF,
const FieldMapper& mapper, const FieldMapper& mapper,
const UList<Type>& defaultValues const UList<Type>& defaultValues,
const bool applyFlip
) )
: :
List<Type>(defaultValues) List<Type>(defaultValues)
{ {
map(tmapF, mapper); map(tmapF, mapper, applyFlip);
} }
@ -433,9 +441,44 @@ template<class Type>
void Foam::Field<Type>::map void Foam::Field<Type>::map
( (
const UList<Type>& mapF, const UList<Type>& mapF,
const FieldMapper& mapper const FieldMapper& mapper,
const bool applyFlip
) )
{ {
if (mapper.distributed())
{
// Fetch remote parts of mapF
const mapDistributeBase& distMap = mapper.distributeMap();
Field<Type> newMapF(mapF);
if (applyFlip)
{
distMap.distribute(newMapF);
}
else
{
distMap.distribute(newMapF, noOp());
}
if (mapper.direct() && notNull(mapper.directAddressing()))
{
map(newMapF, mapper.directAddressing());
}
else if (!mapper.direct())
{
map(newMapF, mapper.addressing(), mapper.weights());
}
else if (mapper.direct() && isNull(mapper.directAddressing()))
{
// Special case, no local mapper. Assume ordering already correct
// from distribution. Note: this behaviour is different compared
// to local mapper.
this->transfer(newMapF);
this->setSize(mapper.size());
}
}
else
{
if if
( (
mapper.direct() mapper.direct()
@ -449,6 +492,7 @@ void Foam::Field<Type>::map
{ {
map(mapF, mapper.addressing(), mapper.weights()); map(mapF, mapper.addressing(), mapper.weights());
} }
}
} }
@ -456,10 +500,11 @@ template<class Type>
void Foam::Field<Type>::map void Foam::Field<Type>::map
( (
const tmp<Field<Type>>& tmapF, const tmp<Field<Type>>& tmapF,
const FieldMapper& mapper const FieldMapper& mapper,
const bool applyFlip
) )
{ {
map(tmapF(), mapper); map(tmapF(), mapper, applyFlip);
tmapF.clear(); tmapF.clear();
} }
@ -467,9 +512,45 @@ void Foam::Field<Type>::map
template<class Type> template<class Type>
void Foam::Field<Type>::autoMap void Foam::Field<Type>::autoMap
( (
const FieldMapper& mapper const FieldMapper& mapper,
const bool applyFlip
) )
{ {
if (mapper.distributed())
{
// Fetch remote parts of *this
const mapDistributeBase& distMap = mapper.distributeMap();
Field<Type> fCpy(*this);
if (applyFlip)
{
distMap.distribute(fCpy);
}
else
{
distMap.distribute(fCpy, noOp());
}
if
(
(mapper.direct()
&& notNull(mapper.directAddressing()))
|| !mapper.direct()
)
{
this->map(fCpy, mapper);
}
else if (mapper.direct() && isNull(mapper.directAddressing()))
{
// Special case, no local mapper. Assume ordering already correct
// from distribution. Note: this behaviour is different compared
// to local mapper.
this->transfer(fCpy);
this->setSize(mapper.size());
}
}
else
{
if if
( (
( (
@ -487,6 +568,7 @@ void Foam::Field<Type>::autoMap
{ {
this->setSize(mapper.size()); this->setSize(mapper.size());
} }
}
} }

View File

@ -160,7 +160,8 @@ public:
Field Field
( (
const UList<Type>& mapF, const UList<Type>& mapF,
const FieldMapper& map const FieldMapper& map,
const bool applyFlip = true
); );
//- Construct by mapping from the given field //- Construct by mapping from the given field
@ -168,7 +169,8 @@ public:
( (
const UList<Type>& mapF, const UList<Type>& mapF,
const FieldMapper& map, const FieldMapper& map,
const Type& defaultValue const Type& defaultValue,
const bool applyFlip = true
); );
//- Construct by mapping from the given field //- Construct by mapping from the given field
@ -176,14 +178,16 @@ public:
( (
const UList<Type>& mapF, const UList<Type>& mapF,
const FieldMapper& map, const FieldMapper& map,
const UList<Type>& defaultValues const UList<Type>& defaultValues,
const bool applyFlip = true
); );
//- Construct by mapping from the given tmp field //- Construct by mapping from the given tmp field
Field Field
( (
const tmp<Field<Type>>& tmapF, const tmp<Field<Type>>& tmapF,
const FieldMapper& map const FieldMapper& map,
const bool applyFlip = true
); );
//- Construct by mapping from the given tmp field. Supplied uniform //- Construct by mapping from the given tmp field. Supplied uniform
@ -192,7 +196,8 @@ public:
( (
const tmp<Field<Type>>& tmapF, const tmp<Field<Type>>& tmapF,
const FieldMapper& map, const FieldMapper& map,
const Type& defaultValue const Type& defaultValue,
const bool applyFlip = true
); );
//- Construct by mapping from the given tmp field. Supplied values //- Construct by mapping from the given tmp field. Supplied values
@ -201,7 +206,8 @@ public:
( (
const tmp<Field<Type>>& tmapF, const tmp<Field<Type>>& tmapF,
const FieldMapper& map, const FieldMapper& map,
const UList<Type>& defaultValues const UList<Type>& defaultValues,
const bool applyFlip = true
); );
//- Construct as copy //- Construct as copy
@ -272,20 +278,23 @@ public:
void map void map
( (
const UList<Type>& mapF, const UList<Type>& mapF,
const FieldMapper& map const FieldMapper& map,
const bool applyFlip = true
); );
//- Map from the given tmp field //- Map from the given tmp field
void map void map
( (
const tmp<Field<Type>>& tmapF, const tmp<Field<Type>>& tmapF,
const FieldMapper& map const FieldMapper& map,
const bool applyFlip = true
); );
//- Map from self //- Map from self
void autoMap void autoMap
( (
const FieldMapper& map const FieldMapper& map,
const bool applyFlip = true
); );
//- 1 to 1 reverse-map from the given field //- 1 to 1 reverse-map from the given field

View File

@ -37,6 +37,8 @@ Description
namespace Foam namespace Foam
{ {
class mapDistributeBase;
/*---------------------------------------------------------------------------*\ /*---------------------------------------------------------------------------*\
Class FieldMapper Declaration Class FieldMapper Declaration
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
@ -64,6 +66,19 @@ public:
virtual bool direct() const = 0; virtual bool direct() const = 0;
virtual bool distributed() const
{
return false;
}
virtual const mapDistributeBase& distributeMap() const
{
FatalErrorInFunction
<< "attempt to access null distributeMap"
<< abort(FatalError);
return *reinterpret_cast<mapDistributeBase*>(NULL);
}
//- Are there unmapped values? I.e. do all size() elements get //- Are there unmapped values? I.e. do all size() elements get
// get value // get value
virtual bool hasUnmapped() const = 0; virtual bool hasUnmapped() const = 0;

View File

@ -784,9 +784,6 @@ void Foam::argList::parse
if (Pstream::master()) if (Pstream::master())
{ {
slaveProcs.setSize(Pstream::nProcs() - 1); slaveProcs.setSize(Pstream::nProcs() - 1);
string slaveMachine;
label slavePid;
label proci = 0; label proci = 0;
for for
( (
@ -796,15 +793,30 @@ void Foam::argList::parse
) )
{ {
IPstream fromSlave(Pstream::scheduled, slave); IPstream fromSlave(Pstream::scheduled, slave);
fromSlave >> slaveMachine >> slavePid;
string slaveBuild;
string slaveMachine;
label slavePid;
fromSlave >> slaveBuild >> slaveMachine >> slavePid;
slaveProcs[proci++] = slaveMachine + "." + name(slavePid); slaveProcs[proci++] = slaveMachine + "." + name(slavePid);
// Check build string to make sure all processors are running
// the same build
if (slaveBuild != Foam::FOAMbuild)
{
FatalErrorIn(executable())
<< "Master is running version " << Foam::FOAMbuild
<< "; slave " << proci << " is running version "
<< slaveBuild
<< exit(FatalError);
}
} }
} }
else else
{ {
OPstream toMaster(Pstream::scheduled, Pstream::masterNo()); OPstream toMaster(Pstream::scheduled, Pstream::masterNo());
toMaster << hostName() << pid(); toMaster << string(Foam::FOAMbuild) << hostName() << pid();
} }
} }

View File

@ -118,6 +118,12 @@ Note:
+------+ 0 +------+ 0
When constructing from components optionally a 'flip' on
the maps can be specified. This will interpret the map
values as index+flip, similar to e.g. faceProcAddressing. The flip
will only be applied to fieldTypes (scalar, vector, .. triad)
SourceFiles SourceFiles
mapDistribute.C mapDistribute.C
mapDistributeTemplates.C mapDistributeTemplates.C
@ -127,12 +133,8 @@ SourceFiles
#ifndef mapDistribute_H #ifndef mapDistribute_H
#define mapDistribute_H #define mapDistribute_H
#include "mapDistributeBase.H"
#include "transformList.H" #include "transformList.H"
#include "labelList.H"
#include "labelPair.H"
#include "Pstream.H"
#include "boolList.H"
#include "Map.H"
#include "vectorTensorTransform.H" #include "vectorTensorTransform.H"
#include "coupledPolyPatch.H" #include "coupledPolyPatch.H"
@ -141,9 +143,6 @@ SourceFiles
namespace Foam namespace Foam
{ {
class mapPolyMesh;
class globalIndex;
class PstreamBuffers;
class globalIndexAndTransform; class globalIndexAndTransform;
/*---------------------------------------------------------------------------*\ /*---------------------------------------------------------------------------*\
@ -151,20 +150,11 @@ class globalIndexAndTransform;
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
class mapDistribute class mapDistribute
:
public mapDistributeBase
{ {
// Private data // Private data
//- Size of reconstructed data
label constructSize_;
//- Maps from subsetted data back to original data
labelListList subMap_;
//- Maps from subsetted data to new reconstructed data
labelListList constructMap_;
// Optional transformation
//- For every globalIndexAndTransform::transformPermutations //- For every globalIndexAndTransform::transformPermutations
// gives the elements that need to be transformed // gives the elements that need to be transformed
labelListList transformElements_; labelListList transformElements_;
@ -172,55 +162,8 @@ class mapDistribute
//- Destination in constructMap for transformed elements //- Destination in constructMap for transformed elements
labelList transformStart_; labelList transformStart_;
//- Schedule
mutable autoPtr<List<labelPair>> schedulePtr_;
// Private Member Functions // Private Member Functions
static void checkReceivedSize
(
const label proci,
const label expectedSize,
const label receivedSize
);
//- Construct per processor compact addressing of the global elements
// needed. The ones from the local processor are not included since
// these are always all needed.
void calcCompactAddressing
(
const globalIndex& globalNumbering,
const labelList& elements,
List<Map<label>>& compactMap
) const;
void calcCompactAddressing
(
const globalIndex& globalNumbering,
const labelListList& elements,
List<Map<label>>& compactMap
) const;
void exchangeAddressing
(
const int tag,
const globalIndex& globalNumbering,
labelList& elements,
List<Map<label>>& compactMap,
labelList& compactStart
);
void exchangeAddressing
(
const int tag,
const globalIndex& globalNumbering,
labelListList& elements,
List<Map<label>>& compactMap,
labelList& compactStart
);
//- Helper function: copy transformElements without transformation //- Helper function: copy transformElements without transformation
template<class T> template<class T>
void applyDummyTransforms(List<T>& field) const; void applyDummyTransforms(List<T>& field) const;
@ -245,7 +188,6 @@ class mapDistribute
const TransformOp& top const TransformOp& top
) const; ) const;
public: public:
// Public classes // Public classes
@ -362,7 +304,6 @@ public:
}; };
// Declare name of the class and its debug switch // Declare name of the class and its debug switch
ClassName("mapDistribute"); ClassName("mapDistribute");
@ -377,7 +318,9 @@ public:
( (
const label constructSize, const label constructSize,
const Xfer<labelListList>& subMap, const Xfer<labelListList>& subMap,
const Xfer<labelListList>& constructMap const Xfer<labelListList>& constructMap,
const bool subHasFlip = false,
const bool constructHasFlip = false
); );
//- Construct from components //- Construct from components
@ -387,7 +330,9 @@ public:
const Xfer<labelListList>& subMap, const Xfer<labelListList>& subMap,
const Xfer<labelListList>& constructMap, const Xfer<labelListList>& constructMap,
const Xfer<labelListList>& transformElements, const Xfer<labelListList>& transformElements,
const Xfer<labelList>& transformStart const Xfer<labelList>& transformStart,
const bool subHasFlip = false,
const bool constructHasFlip = false
); );
//- Construct from reverse addressing: per data item the send //- Construct from reverse addressing: per data item the send
@ -457,47 +402,22 @@ public:
//- Construct copy //- Construct copy
mapDistribute(const mapDistribute&); mapDistribute(const mapDistribute&);
//- Construct from Istream
mapDistribute(Istream&);
//- Clone
autoPtr<mapDistribute> clone() const;
//- Destructor
virtual ~mapDistribute()
{}
// Member Functions // Member Functions
// Access // Access
//- Constructed data size
label constructSize() const
{
return constructSize_;
}
//- Constructed data size
label& constructSize()
{
return constructSize_;
}
//- From subsetted data back to original data
const labelListList& subMap() const
{
return subMap_;
}
//- From subsetted data back to original data
labelListList& subMap()
{
return subMap_;
}
//- From subsetted data to new reconstructed data
const labelListList& constructMap() const
{
return constructMap_;
}
//- From subsetted data to new reconstructed data
labelListList& constructMap()
{
return constructMap_;
}
//- For every globalIndexAndTransform::transformPermutations //- For every globalIndexAndTransform::transformPermutations
// gives the elements that need to be transformed // gives the elements that need to be transformed
const labelListList& transformElements() const const labelListList& transformElements() const
@ -514,17 +434,6 @@ public:
//- Find transform from transformElements //- Find transform from transformElements
label whichTransform(const label index) const; label whichTransform(const label index) const;
//- Calculate a schedule. See above.
static List<labelPair> schedule
(
const labelListList& subMap,
const labelListList& constructMap,
const int tag
);
//- Return a schedule. Demand driven. See above.
const List<labelPair>& schedule() const;
// Other // Other
@ -534,53 +443,25 @@ public:
//- Transfer contents to the Xfer container //- Transfer contents to the Xfer container
Xfer<mapDistribute> xfer(); Xfer<mapDistribute> xfer();
//- Helper for construct from globalIndex. Renumbers element
// (in globalIndex numbering) into compact indices.
static label renumber
(
const globalIndex&,
const List<Map<label>>& compactMap,
const label globalElement
);
//- Compact maps. Gets per field a bool whether it is used (locally) //- Distribute data using default commsType.
// and works out itself what this side and sender side can remove
// from maps.
void compact
(
const boolList& elemIsUsed,
const int tag = UPstream::msgType()
);
//- Distribute data. Note:schedule only used for Pstream::scheduled
// for now, all others just use send-to-all, receive-from-all.
template<class T> template<class T>
static void distribute void distribute
( (
const Pstream::commsTypes commsType, List<T>& fld,
const List<labelPair>& schedule, const bool dummyTransform = true,
const label constructSize,
const labelListList& subMap,
const labelListList& constructMap,
List<T>&,
const int tag = UPstream::msgType() const int tag = UPstream::msgType()
); ) const;
//- Distribute data. If multiple processors writing to same //- Distribute data using default commsType.
// position adds contributions using cop. template<class T, class negateOp>
template<class T, class CombineOp> void distribute
static void distribute
( (
const Pstream::commsTypes commsType, List<T>& fld,
const List<labelPair>& schedule, const negateOp& negOp,
const label constructSize, const bool dummyTransform = true,
const labelListList& subMap,
const labelListList& constructMap,
List<T>&,
const CombineOp& cop,
const T& nullValue,
const int tag = UPstream::msgType() const int tag = UPstream::msgType()
); ) const;
//- Distribute data using default commsType. //- Distribute data using default commsType.
template<class T> template<class T>
@ -591,25 +472,6 @@ public:
const int tag = UPstream::msgType() const int tag = UPstream::msgType()
) const; ) const;
//- Distribute data using default commsType.
template<class T>
void distribute
(
List<T>& fld,
const bool dummyTransform = true,
const int tag = UPstream::msgType()
) const;
//- Same but with transforms
template<class T, class TransformOp>
void distribute
(
const globalIndexAndTransform&,
List<T>& fld,
const TransformOp& top,
const int tag = UPstream::msgType()
) const;
//- Reverse distribute data using default commsType. //- Reverse distribute data using default commsType.
template<class T> template<class T>
void reverseDistribute void reverseDistribute
@ -620,17 +482,6 @@ public:
const int tag = UPstream::msgType() const int tag = UPstream::msgType()
) const; ) const;
//- Same but with transforms
template<class T, class TransformOp>
void reverseDistribute
(
const globalIndexAndTransform&,
const label constructSize,
List<T>& fld,
const TransformOp& top,
const int tag = UPstream::msgType()
) const;
//- Reverse distribute data using default commsType. //- Reverse distribute data using default commsType.
// Since constructSize might be larger than supplied size supply // Since constructSize might be larger than supplied size supply
// a nullValue // a nullValue
@ -644,7 +495,28 @@ public:
const int tag = UPstream::msgType() const int tag = UPstream::msgType()
) const; ) const;
//- Same but with transforms //- Distribute with transforms
template<class T, class TransformOp>
void distribute
(
const globalIndexAndTransform&,
List<T>& fld,
const TransformOp& top,
const int tag = UPstream::msgType()
) const;
//- Reverse distribute with transforms
template<class T, class TransformOp>
void reverseDistribute
(
const globalIndexAndTransform&,
const label constructSize,
List<T>& fld,
const TransformOp& top,
const int tag = UPstream::msgType()
) const;
//- Reverse distribute with transforms
template<class T, class TransformOp> template<class T, class TransformOp>
void reverseDistribute void reverseDistribute
( (
@ -656,13 +528,6 @@ public:
const int tag = UPstream::msgType() const int tag = UPstream::msgType()
) const; ) const;
//- Do all sends using PstreamBuffers
template<class T>
void send(PstreamBuffers&, const List<T>&) const;
//- Do all receives using PstreamBuffers
template<class T>
void receive(PstreamBuffers&, List<T>&) const;
//- Debug: print layout. Can only be used on maps with sorted //- Debug: print layout. Can only be used on maps with sorted
// storage (local data first, then non-local data) // storage (local data first, then non-local data)
void printLayout(Ostream& os) const; void printLayout(Ostream& os) const;
@ -670,14 +535,16 @@ public:
//- Correct for topo change. //- Correct for topo change.
void updateMesh(const mapPolyMesh&) void updateMesh(const mapPolyMesh&)
{ {
NotImplemented; notImplemented
(
"mapDistribute::updateMesh(const mapPolyMesh&)"
);
} }
// Member Operators // Member Operators
void operator=(const mapDistribute&); void operator=(const mapDistribute&);
// IOstream operators // IOstream operators
//- Read dictionary from Istream //- Read dictionary from Istream
@ -689,6 +556,7 @@ public:
}; };
// Template specialisation for primitives that do not need transform
template<> template<>
void mapDistribute::transform::operator() void mapDistribute::transform::operator()
( (

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,486 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::mapDistributeBase
Description
Class containing processor-to-processor mapping information.
We store mapping from the bits-to-send to the complete starting list
(subXXXMap) and from the received bits to their location in the new
list (constructXXXMap).
Note:
Schedule is a list of processor pairs (one send, one receive. One of
them will be myself) which forms a scheduled (i.e. non-buffered) exchange.
See distribute on how to use it.
Note2: number of items sent on one processor have to equal the number
of items received on the other processor.
To aid constructing these maps there are the constructors from global
numbering, either with or without transforms.
Constructors using compact numbering: layout is
- all my own elements first (whether used or not)
- followed by used-only remote elements sorted by remote processor.
So e.g 4 procs and on proc 1 the compact
table will first have all globalIndex.localSize() elements from proc1
followed by used-only elements of proc0, proc2, proc3.
The constructed mapDistributeBase sends the local elements from and
receives the remote elements into their compact position.
compactMap[proci] is the position of elements from proci in the compact
map. compactMap[myProcNo()] is empty since trivial addressing.
It rewrites the input global indices into indices into the constructed
data.
When constructing from components optionally a 'flip' on
the maps can be specified. This will interpret the map
values as index+flip, similar to e.g. faceProcAddressing. The flip
will only be applied to fieldTypes (scalar, vector, .. triad)
SourceFiles
mapDistributeBase.C
mapDistributeBaseTemplates.C
\*---------------------------------------------------------------------------*/
#ifndef mapDistributeBase_H
#define mapDistributeBase_H
#include "labelList.H"
#include "labelPair.H"
#include "Pstream.H"
#include "boolList.H"
#include "Map.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
class mapPolyMesh;
class globalIndex;
class PstreamBuffers;
/*---------------------------------------------------------------------------*\
Class mapDistributeBase Declaration
\*---------------------------------------------------------------------------*/
class mapDistributeBase
{
protected:
// Protected data
//- Size of reconstructed data
label constructSize_;
//- Maps from subsetted data back to original data
labelListList subMap_;
//- Maps from subsetted data to new reconstructed data
labelListList constructMap_;
//- Whether subMap includes flip or not
bool subHasFlip_;
//- Whether constructMap includes flip or not
bool constructHasFlip_;
//- Schedule
mutable autoPtr<List<labelPair>> schedulePtr_;
// Private Member Functions
static void checkReceivedSize
(
const label proci,
const label expectedSize,
const label receivedSize
);
//- Construct per processor compact addressing of the global elements
// needed. The ones from the local processor are not included since
// these are always all needed.
void calcCompactAddressing
(
const globalIndex& globalNumbering,
const labelList& elements,
List<Map<label>>& compactMap
) const;
void calcCompactAddressing
(
const globalIndex& globalNumbering,
const labelListList& elements,
List<Map<label>>& compactMap
) const;
void exchangeAddressing
(
const int tag,
const globalIndex& globalNumbering,
labelList& elements,
List<Map<label>>& compactMap,
labelList& compactStart
);
void exchangeAddressing
(
const int tag,
const globalIndex& globalNumbering,
labelListList& elements,
List<Map<label>>& compactMap,
labelList& compactStart
);
template<class T, class CombineOp, class negateOp>
static void flipAndCombine
(
const UList<label>& map,
const bool hasFlip,
const UList<T>& rhs,
const CombineOp& cop,
const negateOp& negOp,
List<T>& lhs
);
template<class T, class negateOp>
static T accessAndFlip
(
const UList<T>& fld,
const label index,
const bool hasFlip,
const negateOp& negOp
);
public:
// Declare name of the class and its debug switch
ClassName("mapDistributeBase");
// Constructors
//- Construct null
mapDistributeBase();
//- Construct from components
mapDistributeBase
(
const label constructSize,
const Xfer<labelListList>& subMap,
const Xfer<labelListList>& constructMap,
const bool subHasFlip = false,
const bool constructHasFlip = false
);
//- Construct from reverse addressing: per data item the send
// processor and the receive processor. (note: data is not stored
// sorted per processor so cannot use printLayout).
mapDistributeBase
(
const labelList& sendProcs,
const labelList& recvProcs
);
//- Construct from list of (possibly) remote elements in globalIndex
// numbering (or -1). Determines compact numbering (see above) and
// distribute map to get data into this ordering and renumbers the
// elements to be in compact numbering.
mapDistributeBase
(
const globalIndex&,
labelList& elements,
List<Map<label>>& compactMap,
const int tag = Pstream::msgType()
);
//- Special variant that works with the info sorted into bins
// according to local indices. E.g. think cellCells where
// cellCells[localCellI] is a list of global cells
mapDistributeBase
(
const globalIndex&,
labelListList& cellCells,
List<Map<label>>& compactMap,
const int tag = Pstream::msgType()
);
//- Construct by transferring parameter content
mapDistributeBase(const Xfer<mapDistributeBase>&);
//- Construct copy
mapDistributeBase(const mapDistributeBase&);
//- Construct from Istream
mapDistributeBase(Istream&);
// Member Functions
// Access
//- Constructed data size
label constructSize() const
{
return constructSize_;
}
//- Constructed data size
label& constructSize()
{
return constructSize_;
}
//- From subsetted data back to original data
const labelListList& subMap() const
{
return subMap_;
}
//- From subsetted data back to original data
labelListList& subMap()
{
return subMap_;
}
//- From subsetted data to new reconstructed data
const labelListList& constructMap() const
{
return constructMap_;
}
//- From subsetted data to new reconstructed data
labelListList& constructMap()
{
return constructMap_;
}
//- Does subMap include a sign
bool subHasFlip() const
{
return subHasFlip_;
}
//- Does subMap include a sign
bool& subHasFlip()
{
return subHasFlip_;
}
//- Does constructMap include a sign
bool constructHasFlip() const
{
return constructHasFlip_;
}
//- Does constructMap include a sign
bool& constructHasFlip()
{
return constructHasFlip_;
}
//- Calculate a schedule. See above.
static List<labelPair> schedule
(
const labelListList& subMap,
const labelListList& constructMap,
const int tag
);
//- Return a schedule. Demand driven. See above.
const List<labelPair>& schedule() const;
// Other
//- Transfer the contents of the argument and annul the argument.
void transfer(mapDistributeBase&);
//- Transfer contents to the Xfer container
Xfer<mapDistributeBase> xfer();
//- Helper for construct from globalIndex. Renumbers element
// (in globalIndex numbering) into compact indices.
static label renumber
(
const globalIndex&,
const List<Map<label>>& compactMap,
const label globalElement
);
//- Compact maps. Gets per field a bool whether it is used (locally)
// and works out itself what this side and sender side can remove
// from maps. Only compacts non-local elements (i.e. the stuff
// that gets sent over), does not change the local layout
void compact
(
const boolList& elemIsUsed,
const int tag = UPstream::msgType()
);
//- Compact all maps and layout. Returns compaction maps for
// subMap and constructMap
void compact
(
const boolList& elemIsUsed,
const label localSize, // max index for subMap
labelList& oldToNewSub,
labelList& oldToNewConstruct,
const int tag = UPstream::msgType()
);
//- Distribute data. Note:schedule only used for Pstream::scheduled
// for now, all others just use send-to-all, receive-from-all.
template<class T, class negateOp>
static void distribute
(
const Pstream::commsTypes commsType,
const List<labelPair>& schedule,
const label constructSize,
const labelListList& subMap,
const bool subHasFlip,
const labelListList& constructMap,
const bool constructHasFlip,
List<T>&,
const negateOp& negOp,
const int tag = UPstream::msgType()
);
//- Distribute data. If multiple processors writing to same
// position adds contributions using cop.
template<class T, class CombineOp, class negateOp>
static void distribute
(
const Pstream::commsTypes commsType,
const List<labelPair>& schedule,
const label constructSize,
const labelListList& subMap,
const bool subHasFlip,
const labelListList& constructMap,
const bool constructHasFlip,
List<T>&,
const CombineOp& cop,
const negateOp& negOp,
const T& nullValue,
const int tag = UPstream::msgType()
);
//- Distribute data using default commsType.
template<class T>
void distribute
(
List<T>& fld,
const int tag = UPstream::msgType()
) const;
//- Distribute data using default commsType.
template<class T, class negateOp>
void distribute
(
List<T>& fld,
const negateOp& negOp,
const int tag = UPstream::msgType()
) const;
//- Distribute data using default commsType.
template<class T>
void distribute
(
DynamicList<T>& fld,
const int tag = UPstream::msgType()
) const;
//- Reverse distribute data using default commsType.
template<class T>
void reverseDistribute
(
const label constructSize,
List<T>&,
const int tag = UPstream::msgType()
) const;
//- Reverse distribute data using default commsType.
// Since constructSize might be larger than supplied size supply
// a nullValue
template<class T>
void reverseDistribute
(
const label constructSize,
const T& nullValue,
List<T>& fld,
const int tag = UPstream::msgType()
) const;
//- Do all sends using PstreamBuffers
template<class T>
void send(PstreamBuffers&, const List<T>&) const;
//- Do all receives using PstreamBuffers
template<class T>
void receive(PstreamBuffers&, List<T>&) const;
//- Debug: print layout. Can only be used on maps with sorted
// storage (local data first, then non-local data)
void printLayout(Ostream& os) const;
//- Correct for topo change.
void updateMesh(const mapPolyMesh&)
{
NotImplemented;
}
// Member Operators
void operator=(const mapDistributeBase&);
// IOstream operators
//- Read dictionary from Istream
friend Istream& operator>>(Istream&, mapDistributeBase&);
//- Write dictionary to Ostream
friend Ostream& operator<<(Ostream&, const mapDistributeBase&);
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
#include "mapDistributeBaseTemplates.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -33,6 +33,8 @@ void Foam::mapDistributePolyMesh::calcPatchSizes()
{ {
oldPatchSizes_.setSize(oldPatchStarts_.size()); oldPatchSizes_.setSize(oldPatchStarts_.size());
if (oldPatchStarts_.size())
{
// Calculate old patch sizes // Calculate old patch sizes
for (label patchi = 0; patchi < oldPatchStarts_.size() - 1; patchi++) for (label patchi = 0; patchi < oldPatchStarts_.size() - 1; patchi++)
{ {
@ -51,11 +53,27 @@ void Foam::mapDistributePolyMesh::calcPatchSizes()
<< "Calculated negative old patch size:" << oldPatchSizes_ << nl << "Calculated negative old patch size:" << oldPatchSizes_ << nl
<< "Error in mapping data" << abort(FatalError); << "Error in mapping data" << abort(FatalError);
} }
}
} }
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::mapDistributePolyMesh::mapDistributePolyMesh()
:
nOldPoints_(0),
nOldFaces_(0),
nOldCells_(0),
oldPatchSizes_(0),
oldPatchStarts_(0),
oldPatchNMeshPoints_(0),
pointMap_(),
faceMap_(),
cellMap_(),
patchMap_()
{}
Foam::mapDistributePolyMesh::mapDistributePolyMesh Foam::mapDistributePolyMesh::mapDistributePolyMesh
( (
const polyMesh& mesh, const polyMesh& mesh,
@ -77,10 +95,12 @@ Foam::mapDistributePolyMesh::mapDistributePolyMesh
const Xfer<labelListList>& constructPointMap, const Xfer<labelListList>& constructPointMap,
const Xfer<labelListList>& constructFaceMap, const Xfer<labelListList>& constructFaceMap,
const Xfer<labelListList>& constructCellMap, const Xfer<labelListList>& constructCellMap,
const Xfer<labelListList>& constructPatchMap const Xfer<labelListList>& constructPatchMap,
const bool subFaceHasFlip,
const bool constructFaceHasFlip
) )
: :
mesh_(mesh),
nOldPoints_(nOldPoints), nOldPoints_(nOldPoints),
nOldFaces_(nOldFaces), nOldFaces_(nOldFaces),
nOldCells_(nOldCells), nOldCells_(nOldCells),
@ -88,7 +108,14 @@ Foam::mapDistributePolyMesh::mapDistributePolyMesh
oldPatchStarts_(oldPatchStarts), oldPatchStarts_(oldPatchStarts),
oldPatchNMeshPoints_(oldPatchNMeshPoints), oldPatchNMeshPoints_(oldPatchNMeshPoints),
pointMap_(mesh.nPoints(), subPointMap, constructPointMap), pointMap_(mesh.nPoints(), subPointMap, constructPointMap),
faceMap_(mesh.nFaces(), subFaceMap, constructFaceMap), faceMap_
(
mesh.nFaces(),
subFaceMap,
constructFaceMap,
subFaceHasFlip,
constructFaceHasFlip
),
cellMap_(mesh.nCells(), subCellMap, constructCellMap), cellMap_(mesh.nCells(), subCellMap, constructCellMap),
patchMap_(mesh.boundaryMesh().size(), subPatchMap, constructPatchMap) patchMap_(mesh.boundaryMesh().size(), subPatchMap, constructPatchMap)
{ {
@ -96,8 +123,84 @@ Foam::mapDistributePolyMesh::mapDistributePolyMesh
} }
Foam::mapDistributePolyMesh::mapDistributePolyMesh
(
// mesh before changes
const label nOldPoints,
const label nOldFaces,
const label nOldCells,
const Xfer<labelList>& oldPatchStarts,
const Xfer<labelList>& oldPatchNMeshPoints,
// how to transfer pieces of mesh
const Xfer<mapDistribute>& pointMap,
const Xfer<mapDistribute>& faceMap,
const Xfer<mapDistribute>& cellMap,
const Xfer<mapDistribute>& patchMap
)
:
nOldPoints_(nOldPoints),
nOldFaces_(nOldFaces),
nOldCells_(nOldCells),
oldPatchSizes_(oldPatchStarts().size()),
oldPatchStarts_(oldPatchStarts),
oldPatchNMeshPoints_(oldPatchNMeshPoints),
pointMap_(pointMap),
faceMap_(faceMap),
cellMap_(cellMap),
patchMap_(patchMap)
{
calcPatchSizes();
}
Foam::mapDistributePolyMesh::mapDistributePolyMesh
(
const Xfer<mapDistributePolyMesh>& map
)
:
nOldPoints_(map().nOldPoints_),
nOldFaces_(map().nOldFaces_),
nOldCells_(map().nOldCells_),
oldPatchSizes_(map().oldPatchSizes_.xfer()),
oldPatchStarts_(map().oldPatchStarts_.xfer()),
oldPatchNMeshPoints_(map().oldPatchNMeshPoints_.xfer()),
pointMap_(map().pointMap_.xfer()),
faceMap_(map().faceMap_.xfer()),
cellMap_(map().cellMap_.xfer()),
patchMap_(map().patchMap_.xfer())
{}
Foam::mapDistributePolyMesh::mapDistributePolyMesh(Istream& is)
{
is >> *this;
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
void Foam::mapDistributePolyMesh::transfer(mapDistributePolyMesh& rhs)
{
nOldPoints_ = rhs.nOldPoints_;
nOldFaces_ = rhs.nOldFaces_;
nOldCells_ = rhs.nOldCells_;
oldPatchSizes_.transfer(rhs.oldPatchSizes_);
oldPatchStarts_.transfer(rhs.oldPatchStarts_);
oldPatchNMeshPoints_.transfer(rhs.oldPatchNMeshPoints_);
pointMap_.transfer(rhs.pointMap_);
faceMap_.transfer(rhs.faceMap_);
cellMap_.transfer(rhs.cellMap_);
patchMap_.transfer(rhs.patchMap_);
}
Foam::Xfer<Foam::mapDistributePolyMesh> Foam::mapDistributePolyMesh::xfer()
{
return xferMove(*this);
}
void Foam::mapDistributePolyMesh::distributePointIndices(labelList& lst) const void Foam::mapDistributePolyMesh::distributePointIndices(labelList& lst) const
{ {
// Construct boolList from selected elements // Construct boolList from selected elements
@ -186,10 +289,61 @@ void Foam::mapDistributePolyMesh::distributePatchIndices(labelList& lst) const
} }
// * * * * * * * * * * * * * * * Friend Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * //
void Foam::mapDistributePolyMesh::operator=(const mapDistributePolyMesh& rhs)
{
nOldPoints_ = rhs.nOldPoints_;
nOldFaces_ = rhs.nOldFaces_;
nOldCells_ = rhs.nOldCells_;
oldPatchSizes_ = rhs.oldPatchSizes_;
oldPatchStarts_ = rhs.oldPatchStarts_;
oldPatchNMeshPoints_ = rhs.oldPatchNMeshPoints_;
pointMap_ = rhs.pointMap_;
faceMap_ = rhs.faceMap_;
cellMap_ = rhs.cellMap_;
patchMap_ = rhs.patchMap_;
}
// * * * * * * * * * * * * * * * Friend Operators * * * * * * * * * * * * * // // * * * * * * * * * * * * * * Istream Operator * * * * * * * * * * * * * * //
Foam::Istream& Foam::operator>>(Istream& is, mapDistributePolyMesh& map)
{
is.fatalCheck("operator>>(Istream&, mapDistributePolyMesh&)");
is >> map.nOldPoints_
>> map.nOldFaces_
>> map.nOldCells_
>> map.oldPatchSizes_
>> map.oldPatchStarts_
>> map.oldPatchNMeshPoints_
>> map.pointMap_
>> map.faceMap_
>> map.cellMap_
>> map.patchMap_;
return is;
}
// * * * * * * * * * * * * * * Ostream Operator * * * * * * * * * * * * * * //
Foam::Ostream& Foam::operator<<(Ostream& os, const mapDistributePolyMesh& map)
{
os << map.nOldPoints_
<< token::SPACE << map.nOldFaces_
<< token::SPACE << map.nOldCells_ << token::NL
<< map.oldPatchSizes_ << token::NL
<< map.oldPatchStarts_ << token::NL
<< map.oldPatchNMeshPoints_ << token::NL
<< map.pointMap_ << token::NL
<< map.faceMap_ << token::NL
<< map.cellMap_ << token::NL
<< map.patchMap_;
return os;
}
// ************************************************************************* // // ************************************************************************* //

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -59,39 +59,36 @@ class mapDistributePolyMesh
{ {
// Private data // Private data
const polyMesh& mesh_;
//- Number of old live points //- Number of old live points
const label nOldPoints_; label nOldPoints_;
//- Number of old live faces //- Number of old live faces
const label nOldFaces_; label nOldFaces_;
//- Number of old live cells //- Number of old live cells
const label nOldCells_; label nOldCells_;
//- List of the old patch sizes //- List of the old patch sizes
labelList oldPatchSizes_; labelList oldPatchSizes_;
//- List of the old patch start labels //- List of the old patch start labels
const labelList oldPatchStarts_; labelList oldPatchStarts_;
//- List of numbers of mesh points per old patch //- List of numbers of mesh points per old patch
const labelList oldPatchNMeshPoints_; labelList oldPatchNMeshPoints_;
//- Point distribute map //- Point distribute map
const mapDistribute pointMap_; mapDistribute pointMap_;
//- Face distribute map //- Face distribute map
const mapDistribute faceMap_; mapDistribute faceMap_;
//- Cell distribute map //- Cell distribute map
const mapDistribute cellMap_; mapDistribute cellMap_;
//- Patch distribute map //- Patch distribute map
const mapDistribute patchMap_; mapDistribute patchMap_;
// Private Member Functions // Private Member Functions
@ -101,14 +98,14 @@ class mapDistributePolyMesh
//- Disallow default bitwise copy construct //- Disallow default bitwise copy construct
mapDistributePolyMesh(const mapDistributePolyMesh&); mapDistributePolyMesh(const mapDistributePolyMesh&);
//- Disallow default bitwise assignment
void operator=(const mapDistributePolyMesh&);
public: public:
// Constructors // Constructors
//- Construct null
mapDistributePolyMesh();
//- Construct from components. Note that mesh has to be changed already //- Construct from components. Note that mesh has to be changed already
// since uses mesh.nPoints etc as the new size. // since uses mesh.nPoints etc as the new size.
mapDistributePolyMesh mapDistributePolyMesh
@ -132,19 +129,40 @@ public:
const Xfer<labelListList>& constructPointMap, const Xfer<labelListList>& constructPointMap,
const Xfer<labelListList>& constructFaceMap, const Xfer<labelListList>& constructFaceMap,
const Xfer<labelListList>& constructCellMap, const Xfer<labelListList>& constructCellMap,
const Xfer<labelListList>& constructPatchMap const Xfer<labelListList>& constructPatchMap,
const bool subFaceHasFlip = false,
const bool constructFaceHasFlip = false
); );
//- Construct from components
mapDistributePolyMesh
(
// mesh before changes
const label nOldPoints,
const label nOldFaces,
const label nOldCells,
const Xfer<labelList>& oldPatchStarts,
const Xfer<labelList>& oldPatchNMeshPoints,
// how to subset pieces of mesh to send across
const Xfer<mapDistribute>& pointMap,
const Xfer<mapDistribute>& faceMap,
const Xfer<mapDistribute>& cellMap,
const Xfer<mapDistribute>& patchMap
);
//- Construct by transferring parameter content
mapDistributePolyMesh(const Xfer<mapDistributePolyMesh>&);
//- Construct from Istream
mapDistributePolyMesh(Istream&);
// Member Functions // Member Functions
// Access // Access
const polyMesh& mesh() const
{
return mesh_;
}
//- Number of points in mesh before distribution //- Number of points in mesh before distribution
label nOldPoints() const label nOldPoints() const
{ {
@ -206,7 +224,13 @@ public:
} }
// Edit // Other
//- Transfer the contents of the argument and annul the argument.
void transfer(mapDistributePolyMesh&);
//- Transfer contents to the Xfer container
Xfer<mapDistributePolyMesh> xfer();
//- Distribute list of point data //- Distribute list of point data
template<class T> template<class T>
@ -251,6 +275,19 @@ public:
{ {
NotImplemented; NotImplemented;
} }
// Member operators
void operator=(const mapDistributePolyMesh&);
// IOstream operators
//- Read dictionary from Istream
friend Istream& operator>>(Istream&, mapDistributePolyMesh&);
//- Write dictionary to Ostream
friend Ostream& operator<<(Ostream&, const mapDistributePolyMesh&);
}; };

View File

@ -28,759 +28,10 @@ License
#include "PstreamCombineReduceOps.H" #include "PstreamCombineReduceOps.H"
#include "globalIndexAndTransform.H" #include "globalIndexAndTransform.H"
#include "transformField.H" #include "transformField.H"
#include "flipOp.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
template<class T>
void Foam::mapDistribute::distribute
(
const Pstream::commsTypes commsType,
const List<labelPair>& schedule,
const label constructSize,
const labelListList& subMap,
const labelListList& constructMap,
List<T>& field,
const int tag
)
{
if (!Pstream::parRun())
{
// Do only me to me.
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> subField(mySubMap.size());
forAll(mySubMap, i)
{
subField[i] = field[mySubMap[i]];
}
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
field.setSize(constructSize);
forAll(map, i)
{
field[map[i]] = subField[i];
}
return;
}
if (commsType == Pstream::blocking)
{
// Since buffered sending can reuse the field to collect the
// received data.
// Send sub field to neighbour
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
OPstream toNbr(Pstream::blocking, domain, 0, tag);
toNbr << UIndirectList<T>(field, map);
}
}
// Subset myself
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> subField(mySubMap.size());
forAll(mySubMap, i)
{
subField[i] = field[mySubMap[i]];
}
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
field.setSize(constructSize);
forAll(map, i)
{
field[map[i]] = subField[i];
}
// Receive sub field from neighbour
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
IPstream fromNbr(Pstream::blocking, domain, 0, tag);
List<T> subField(fromNbr);
checkReceivedSize(domain, map.size(), subField.size());
forAll(map, i)
{
field[map[i]] = subField[i];
}
}
}
}
else if (commsType == Pstream::scheduled)
{
// Need to make sure I don't overwrite field with received data
// since the data might need to be sent to another processor. So
// allocate a new field for the results.
List<T> newField(constructSize);
// Subset myself
UIndirectList<T> subField(field, subMap[Pstream::myProcNo()]);
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
forAll(map, i)
{
newField[map[i]] = subField[i];
}
// Schedule will already have pruned 0-sized comms
forAll(schedule, i)
{
const labelPair& twoProcs = schedule[i];
// twoProcs is a swap pair of processors. The first one is the
// one that needs to send first and then receive.
label sendProc = twoProcs[0];
label recvProc = twoProcs[1];
if (Pstream::myProcNo() == sendProc)
{
// I am send first, receive next
{
OPstream toNbr(Pstream::scheduled, recvProc, 0, tag);
toNbr << UIndirectList<T>(field, subMap[recvProc]);
}
{
IPstream fromNbr(Pstream::scheduled, recvProc, 0, tag);
List<T> subField(fromNbr);
const labelList& map = constructMap[recvProc];
checkReceivedSize(recvProc, map.size(), subField.size());
forAll(map, i)
{
newField[map[i]] = subField[i];
}
}
}
else
{
// I am receive first, send next
{
IPstream fromNbr(Pstream::scheduled, sendProc, 0, tag);
List<T> subField(fromNbr);
const labelList& map = constructMap[sendProc];
checkReceivedSize(sendProc, map.size(), subField.size());
forAll(map, i)
{
newField[map[i]] = subField[i];
}
}
{
OPstream toNbr(Pstream::scheduled, sendProc, 0, tag);
toNbr << UIndirectList<T>(field, subMap[sendProc]);
}
}
}
field.transfer(newField);
}
else if (commsType == Pstream::nonBlocking)
{
label nOutstanding = Pstream::nRequests();
if (!contiguous<T>())
{
PstreamBuffers pBufs(Pstream::nonBlocking, tag);
// Stream data into buffer
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
// Put data into send buffer
UOPstream toDomain(domain, pBufs);
toDomain << UIndirectList<T>(field, map);
}
}
// Start receiving. Do not block.
pBufs.finishedSends(false);
{
// Set up 'send' to myself
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> mySubField(mySubMap.size());
forAll(mySubMap, i)
{
mySubField[i] = field[mySubMap[i]];
}
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
// Receive sub field from myself
{
const labelList& map = constructMap[Pstream::myProcNo()];
forAll(map, i)
{
field[map[i]] = mySubField[i];
}
}
}
// Block ourselves, waiting only for the current comms
Pstream::waitRequests(nOutstanding);
// Consume
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
UIPstream str(domain, pBufs);
List<T> recvField(str);
checkReceivedSize(domain, map.size(), recvField.size());
forAll(map, i)
{
field[map[i]] = recvField[i];
}
}
}
}
else
{
// Set up sends to neighbours
List<List<T >> sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
List<T>& subField = sendFields[domain];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.byteSize(),
tag
);
}
}
// Set up receives from neighbours
List<List<T >> recvFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
recvFields[domain].setSize(map.size());
IPstream::read
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].byteSize(),
tag
);
}
}
// Set up 'send' to myself
{
const labelList& map = subMap[Pstream::myProcNo()];
List<T>& subField = sendFields[Pstream::myProcNo()];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
}
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
// Receive sub field from myself (sendFields[Pstream::myProcNo()])
{
const labelList& map = constructMap[Pstream::myProcNo()];
const List<T>& subField = sendFields[Pstream::myProcNo()];
forAll(map, i)
{
field[map[i]] = subField[i];
}
}
// Wait for all to finish
Pstream::waitRequests(nOutstanding);
// Collect neighbour fields
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
const List<T>& subField = recvFields[domain];
checkReceivedSize(domain, map.size(), subField.size());
forAll(map, i)
{
field[map[i]] = subField[i];
}
}
}
}
}
else
{
FatalErrorInFunction
<< "Unknown communication schedule " << commsType
<< abort(FatalError);
}
}
template<class T, class CombineOp>
void Foam::mapDistribute::distribute
(
const Pstream::commsTypes commsType,
const List<labelPair>& schedule,
const label constructSize,
const labelListList& subMap,
const labelListList& constructMap,
List<T>& field,
const CombineOp& cop,
const T& nullValue,
const int tag
)
{
if (!Pstream::parRun())
{
// Do only me to me.
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> subField(mySubMap.size());
forAll(mySubMap, i)
{
subField[i] = field[mySubMap[i]];
}
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
field.setSize(constructSize);
field = nullValue;
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
return;
}
if (commsType == Pstream::blocking)
{
// Since buffered sending can reuse the field to collect the
// received data.
// Send sub field to neighbour
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
OPstream toNbr(Pstream::blocking, domain, 0, tag);
toNbr << UIndirectList<T>(field, map);
}
}
// Subset myself
const labelList& mySubMap = subMap[Pstream::myProcNo()];
List<T> subField(mySubMap.size());
forAll(mySubMap, i)
{
subField[i] = field[mySubMap[i]];
}
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
field.setSize(constructSize);
field = nullValue;
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
// Receive sub field from neighbour
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
IPstream fromNbr(Pstream::blocking, domain, 0, tag);
List<T> subField(fromNbr);
checkReceivedSize(domain, map.size(), subField.size());
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
}
}
}
else if (commsType == Pstream::scheduled)
{
// Need to make sure I don't overwrite field with received data
// since the data might need to be sent to another processor. So
// allocate a new field for the results.
List<T> newField(constructSize, nullValue);
// Subset myself
UIndirectList<T> subField(field, subMap[Pstream::myProcNo()]);
// Receive sub field from myself (subField)
const labelList& map = constructMap[Pstream::myProcNo()];
forAll(map, i)
{
cop(newField[map[i]], subField[i]);
}
// Schedule will already have pruned 0-sized comms
forAll(schedule, i)
{
const labelPair& twoProcs = schedule[i];
// twoProcs is a swap pair of processors. The first one is the
// one that needs to send first and then receive.
label sendProc = twoProcs[0];
label recvProc = twoProcs[1];
if (Pstream::myProcNo() == sendProc)
{
// I am send first, receive next
{
OPstream toNbr(Pstream::scheduled, recvProc, 0, tag);
toNbr << UIndirectList<T>(field, subMap[recvProc]);
}
{
IPstream fromNbr(Pstream::scheduled, recvProc, 0, tag);
List<T> subField(fromNbr);
const labelList& map = constructMap[recvProc];
checkReceivedSize(recvProc, map.size(), subField.size());
forAll(map, i)
{
cop(newField[map[i]], subField[i]);
}
}
}
else
{
// I am receive first, send next
{
IPstream fromNbr(Pstream::scheduled, sendProc, 0, tag);
List<T> subField(fromNbr);
const labelList& map = constructMap[sendProc];
checkReceivedSize(sendProc, map.size(), subField.size());
forAll(map, i)
{
cop(newField[map[i]], subField[i]);
}
}
{
OPstream toNbr(Pstream::scheduled, sendProc, 0, tag);
toNbr << UIndirectList<T>(field, subMap[sendProc]);
}
}
}
field.transfer(newField);
}
else if (commsType == Pstream::nonBlocking)
{
label nOutstanding = Pstream::nRequests();
if (!contiguous<T>())
{
PstreamBuffers pBufs(Pstream::nonBlocking, tag);
// Stream data into buffer
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
// Put data into send buffer
UOPstream toDomain(domain, pBufs);
toDomain << UIndirectList<T>(field, map);
}
}
// Start receiving. Do not block.
pBufs.finishedSends(false);
{
// Set up 'send' to myself
List<T> mySubField(field, subMap[Pstream::myProcNo()]);
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
field = nullValue;
// Receive sub field from myself
{
const labelList& map = constructMap[Pstream::myProcNo()];
forAll(map, i)
{
cop(field[map[i]], mySubField[i]);
}
}
}
// Block ourselves, waiting only for the current comms
Pstream::waitRequests(nOutstanding);
// Consume
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
UIPstream str(domain, pBufs);
List<T> recvField(str);
checkReceivedSize(domain, map.size(), recvField.size());
forAll(map, i)
{
cop(field[map[i]], recvField[i]);
}
}
}
}
else
{
// Set up sends to neighbours
List<List<T >> sendFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
List<T>& subField = sendFields[domain];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
OPstream::write
(
Pstream::nonBlocking,
domain,
reinterpret_cast<const char*>(subField.begin()),
subField.size()*sizeof(T),
tag
);
}
}
// Set up receives from neighbours
List<List<T >> recvFields(Pstream::nProcs());
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
recvFields[domain].setSize(map.size());
UIPstream::read
(
Pstream::nonBlocking,
domain,
reinterpret_cast<char*>(recvFields[domain].begin()),
recvFields[domain].size()*sizeof(T),
tag
);
}
}
// Set up 'send' to myself
{
const labelList& map = subMap[Pstream::myProcNo()];
List<T>& subField = sendFields[Pstream::myProcNo()];
subField.setSize(map.size());
forAll(map, i)
{
subField[i] = field[map[i]];
}
}
// Combine bits. Note that can reuse field storage
field.setSize(constructSize);
field = nullValue;
// Receive sub field from myself (subField)
{
const labelList& map = constructMap[Pstream::myProcNo()];
const List<T>& subField = sendFields[Pstream::myProcNo()];
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
}
// Wait for all to finish
Pstream::waitRequests(nOutstanding);
// Collect neighbour fields
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap[domain];
if (domain != Pstream::myProcNo() && map.size())
{
const List<T>& subField = recvFields[domain];
checkReceivedSize(domain, map.size(), subField.size());
forAll(map, i)
{
cop(field[map[i]], subField[i]);
}
}
}
}
}
else
{
FatalErrorInFunction
<< "Unknown communication schedule " << commsType
<< abort(FatalError);
}
}
template<class T>
void Foam::mapDistribute::send(PstreamBuffers& pBufs, const List<T>& field)
const
{
// Stream data into buffer
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = subMap_[domain];
if (map.size())
{
// Put data into send buffer
UOPstream toDomain(domain, pBufs);
toDomain << UIndirectList<T>(field, map);
}
}
// Start sending and receiving but do not block.
pBufs.finishedSends(false);
}
template<class T>
void Foam::mapDistribute::receive(PstreamBuffers& pBufs, List<T>& field) const
{
// Consume
field.setSize(constructSize_);
for (label domain = 0; domain < Pstream::nProcs(); domain++)
{
const labelList& map = constructMap_[domain];
if (map.size())
{
UIPstream str(domain, pBufs);
List<T> recvField(str);
if (recvField.size() != map.size())
{
FatalErrorInFunction
<< "Expected from processor " << domain
<< " " << map.size() << " but received "
<< recvField.size() << " elements."
<< abort(FatalError);
}
forAll(map, i)
{
field[map[i]] = recvField[i];
}
}
}
}
template<class T> template<class T>
void Foam::mapDistribute::applyDummyTransforms(List<T>& field) const void Foam::mapDistribute::applyDummyTransforms(List<T>& field) const
{ {
@ -874,6 +125,37 @@ void Foam::mapDistribute::applyInverseTransforms
} }
template<class T, class negateOp>
void Foam::mapDistribute::distribute
(
List<T>& fld,
const negateOp& negOp,
const bool dummyTransform,
const int tag
) const
{
mapDistributeBase::distribute(fld, negOp, tag);
//- Fill in transformed slots with copies
if (dummyTransform)
{
applyDummyTransforms(fld);
}
}
template<class T>
void Foam::mapDistribute::distribute
(
List<T>& fld,
const bool dummyTransform,
const int tag
) const
{
distribute(fld, flipOp(), dummyTransform, tag);
}
template<class T> template<class T>
void Foam::mapDistribute::distribute void Foam::mapDistribute::distribute
( (
@ -892,62 +174,6 @@ void Foam::mapDistribute::distribute
} }
template<class T>
void Foam::mapDistribute::distribute
(
List<T>& fld,
const bool dummyTransform,
const int tag
) const
{
if (Pstream::defaultCommsType == Pstream::nonBlocking)
{
distribute
(
Pstream::nonBlocking,
List<labelPair>(),
constructSize_,
subMap_,
constructMap_,
fld,
tag
);
}
else if (Pstream::defaultCommsType == Pstream::scheduled)
{
distribute
(
Pstream::scheduled,
schedule(),
constructSize_,
subMap_,
constructMap_,
fld,
tag
);
}
else
{
distribute
(
Pstream::blocking,
List<labelPair>(),
constructSize_,
subMap_,
constructMap_,
fld,
tag
);
}
// Fill in transformed slots with copies
if (dummyTransform)
{
applyDummyTransforms(fld);
}
}
template<class T> template<class T>
void Foam::mapDistribute::reverseDistribute void Foam::mapDistribute::reverseDistribute
( (
@ -962,45 +188,7 @@ void Foam::mapDistribute::reverseDistribute
applyDummyInverseTransforms(fld); applyDummyInverseTransforms(fld);
} }
if (Pstream::defaultCommsType == Pstream::nonBlocking) mapDistributeBase::reverseDistribute(constructSize, fld, tag);
{
distribute
(
Pstream::nonBlocking,
List<labelPair>(),
constructSize,
constructMap_,
subMap_,
fld,
tag
);
}
else if (Pstream::defaultCommsType == Pstream::scheduled)
{
distribute
(
Pstream::scheduled,
schedule(),
constructSize,
constructMap_,
subMap_,
fld,
tag
);
}
else
{
distribute
(
Pstream::blocking,
List<labelPair>(),
constructSize,
constructMap_,
subMap_,
fld,
tag
);
}
} }
@ -1019,51 +207,7 @@ void Foam::mapDistribute::reverseDistribute
applyDummyInverseTransforms(fld); applyDummyInverseTransforms(fld);
} }
if (Pstream::defaultCommsType == Pstream::nonBlocking) mapDistributeBase::reverseDistribute(constructSize, nullValue, fld, tag);
{
distribute
(
Pstream::nonBlocking,
List<labelPair>(),
constructSize,
constructMap_,
subMap_,
fld,
eqOp<T>(),
nullValue,
tag
);
}
else if (Pstream::defaultCommsType == Pstream::scheduled)
{
distribute
(
Pstream::scheduled,
schedule(),
constructSize,
constructMap_,
subMap_,
fld,
eqOp<T>(),
nullValue,
tag
);
}
else
{
distribute
(
Pstream::blocking,
List<labelPair>(),
constructSize,
constructMap_,
subMap_,
fld,
eqOp<T>(),
nullValue,
tag
);
}
} }

View File

@ -29,11 +29,9 @@ License
#include "polyMesh.H" #include "polyMesh.H"
#include "demandDrivenData.H" #include "demandDrivenData.H"
#include "OFstream.H" #include "OFstream.H"
#include "patchZones.H"
#include "matchPoints.H" #include "matchPoints.H"
#include "EdgeMap.H" #include "EdgeMap.H"
#include "Time.H" #include "Time.H"
#include "diagTensor.H"
#include "transformField.H" #include "transformField.H"
#include "SubField.H" #include "SubField.H"
#include "unitConversion.H" #include "unitConversion.H"

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License

View File

@ -164,7 +164,8 @@ public:
const polyMesh&, const polyMesh&,
UList<T>&, UList<T>&,
const CombineOp& cop, const CombineOp& cop,
const TransformOp& top const TransformOp& top,
const bool parRun = Pstream::parRun()
); );
@ -556,7 +557,8 @@ public:
( (
const polyMesh& mesh, const polyMesh& mesh,
PackedList<nBits>& faceValues, PackedList<nBits>& faceValues,
const CombineOp& cop const CombineOp& cop,
const bool parRun = Pstream::parRun()
); );
template<unsigned nBits> template<unsigned nBits>

View File

@ -1284,7 +1284,8 @@ void Foam::syncTools::syncBoundaryFaceList
const polyMesh& mesh, const polyMesh& mesh,
UList<T>& faceValues, UList<T>& faceValues,
const CombineOp& cop, const CombineOp& cop,
const TransformOp& top const TransformOp& top,
const bool parRun
) )
{ {
const label nBFaces = mesh.nFaces() - mesh.nInternalFaces(); const label nBFaces = mesh.nFaces() - mesh.nInternalFaces();
@ -1299,7 +1300,7 @@ void Foam::syncTools::syncBoundaryFaceList
const polyBoundaryMesh& patches = mesh.boundaryMesh(); const polyBoundaryMesh& patches = mesh.boundaryMesh();
if (Pstream::parRun()) if (parRun)
{ {
PstreamBuffers pBufs(Pstream::nonBlocking); PstreamBuffers pBufs(Pstream::nonBlocking);
@ -1405,7 +1406,8 @@ void Foam::syncTools::syncFaceList
( (
const polyMesh& mesh, const polyMesh& mesh,
PackedList<nBits>& faceValues, PackedList<nBits>& faceValues,
const CombineOp& cop const CombineOp& cop,
const bool parRun
) )
{ {
if (faceValues.size() != mesh.nFaces()) if (faceValues.size() != mesh.nFaces())
@ -1418,7 +1420,7 @@ void Foam::syncTools::syncFaceList
const polyBoundaryMesh& patches = mesh.boundaryMesh(); const polyBoundaryMesh& patches = mesh.boundaryMesh();
if (Pstream::parRun()) if (parRun)
{ {
PstreamBuffers pBufs(Pstream::nonBlocking); PstreamBuffers pBufs(Pstream::nonBlocking);

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation \\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -23,53 +23,53 @@ License
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
#include "ptscotchDecomp.H" #include "flipOp.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
// Insert at front of list template<>
template<class Type> Foam::scalar Foam::flipOp::operator()(const scalar& v) const
void Foam::ptscotchDecomp::prepend
(
const UList<Type>& extraLst,
List<Type>& lst
)
{ {
label nExtra = extraLst.size(); return -v;
// Make space for initial elements
lst.setSize(lst.size() + nExtra);
for (label i = lst.size()-1; i >= nExtra; i--)
{
lst[i] = lst[i-nExtra];
}
// Insert at front
forAll(extraLst, i)
{
lst[i] = extraLst[i];
}
} }
// Insert at back of list template<> Foam::vector Foam::flipOp::operator()(const vector& v) const
template<class Type>
void Foam::ptscotchDecomp::append
(
const UList<Type>& extraLst,
List<Type>& lst
)
{ {
label sz = lst.size(); return -v;
}
// Make space for initial elements
lst.setSize(sz + extraLst.size());
// Insert at back template<>Foam::sphericalTensor Foam::flipOp::operator()
forAll(extraLst, i) (
{ const sphericalTensor& v
lst[sz++] = extraLst[i]; ) const
} {
return -v;
}
template<> Foam::symmTensor Foam::flipOp::operator()
(
const symmTensor& v
) const
{
return -v;
}
template<> Foam::tensor Foam::flipOp::operator()(const tensor& v) const
{
return -v;
}
template<> Foam::triad Foam::flipOp::operator()
(
const triad& v
) const
{
return -v;
} }

View File

@ -0,0 +1,103 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::flipOp
Description
Class containing functor to negate primitives. Dummy for all other types.
Used in mesh transformations where face can flip.
SourceFiles
flipOp.C
\*---------------------------------------------------------------------------*/
#ifndef flipOp_H
#define flipOp_H
#include "fieldTypes.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class flipOp Declaration
\*---------------------------------------------------------------------------*/
class flipOp
{
public:
template<class Type>
Type operator()(const Type& val) const
{
return val;
}
};
class noOp
{
public:
template<class Type>
Type operator()(const Type& val) const
{
return val;
}
};
class flipLabelOp
{
public:
label operator()(const label& val) const
{
return -val;
}
};
// Template specialisation for primitives that support negation
template<> scalar flipOp::operator()(const scalar&) const;
template<> vector flipOp::operator()(const vector&) const;
template<> sphericalTensor flipOp::operator()(const sphericalTensor&) const;
template<> symmTensor flipOp::operator()(const symmTensor&) const;
template<> tensor flipOp::operator()(const tensor&) const;
template<> triad flipOp::operator()(const triad&) const;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -28,17 +28,22 @@ polyTopoChange/polyTopoChange/addPatchCellLayer.C
polyTopoChange/polyTopoChange/pointEdgeCollapse/pointEdgeCollapse.C polyTopoChange/polyTopoChange/pointEdgeCollapse/pointEdgeCollapse.C
polyTopoChange/polyTopoChange/edgeCollapser.C polyTopoChange/polyTopoChange/edgeCollapser.C
polyTopoChange/polyTopoChange/faceCollapser.C polyTopoChange/polyTopoChange/faceCollapser.C
polyTopoChange/polyTopoChange/hexRef8.C
polyTopoChange/polyTopoChange/removeCells.C polyTopoChange/polyTopoChange/removeCells.C
polyTopoChange/polyTopoChange/removeFaces.C polyTopoChange/polyTopoChange/removeFaces.C
polyTopoChange/polyTopoChange/refinementData.C polyTopoChange/polyTopoChange/refinementData.C
polyTopoChange/polyTopoChange/refinementDistanceData.C polyTopoChange/polyTopoChange/refinementDistanceData.C
polyTopoChange/polyTopoChange/refinementHistory.C
polyTopoChange/polyTopoChange/removePoints.C polyTopoChange/polyTopoChange/removePoints.C
polyTopoChange/polyTopoChange/combineFaces.C polyTopoChange/polyTopoChange/combineFaces.C
polyTopoChange/polyTopoChange/duplicatePoints.C polyTopoChange/polyTopoChange/duplicatePoints.C
polyTopoChange/polyTopoChange/tetDecomposer.C polyTopoChange/polyTopoChange/tetDecomposer.C
hexRef8 = polyTopoChange/polyTopoChange/hexRef8
$(hexRef8)/hexRef8.C
$(hexRef8)/hexRef8Data.C
$(hexRef8)/refinementHistory.C
slidingInterface/slidingInterface.C slidingInterface/slidingInterface.C
slidingInterface/slidingInterfaceProjectPoints.C slidingInterface/slidingInterfaceProjectPoints.C
slidingInterface/coupleSlidingInterface.C slidingInterface/coupleSlidingInterface.C
@ -83,6 +88,8 @@ polyMeshAdder/polyMeshAdder.C
fvMeshTools/fvMeshTools.C fvMeshTools/fvMeshTools.C
fvMeshSubset/fvMeshSubset.C
motionSmoother/motionSmoother.C motionSmoother/motionSmoother.C
motionSmoother/motionSmootherAlgo.C motionSmoother/motionSmootherAlgo.C
motionSmoother/motionSmootherAlgoCheck.C motionSmoother/motionSmootherAlgoCheck.C

View File

@ -28,6 +28,14 @@ License
#include "faceCoupleInfo.H" #include "faceCoupleInfo.H"
#include "fvMesh.H" #include "fvMesh.H"
/* * * * * * * * * * * * * * * Static Member Data * * * * * * * * * * * * * */
namespace Foam
{
defineTypeNameAndDebug(fvMeshAdder, 0);
}
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
Foam::labelList Foam::fvMeshAdder::calcPatchMap Foam::labelList Foam::fvMeshAdder::calcPatchMap
@ -105,6 +113,12 @@ Foam::autoPtr<Foam::mapAddedPolyMesh> Foam::fvMeshAdder::add
fvMeshAdder::MapSurfaceFields<symmTensor>(mapPtr, mesh0, mesh1); fvMeshAdder::MapSurfaceFields<symmTensor>(mapPtr, mesh0, mesh1);
fvMeshAdder::MapSurfaceFields<tensor>(mapPtr, mesh0, mesh1); fvMeshAdder::MapSurfaceFields<tensor>(mapPtr, mesh0, mesh1);
fvMeshAdder::MapDimFields<scalar>(mapPtr, mesh0, mesh1);
fvMeshAdder::MapDimFields<vector>(mapPtr, mesh0, mesh1);
fvMeshAdder::MapDimFields<sphericalTensor>(mapPtr, mesh0, mesh1);
fvMeshAdder::MapDimFields<symmTensor>(mapPtr, mesh0, mesh1);
fvMeshAdder::MapDimFields<tensor>(mapPtr, mesh0, mesh1);
return mapPtr; return mapPtr;
} }

View File

@ -26,7 +26,7 @@ Class
Description Description
Adds two fvMeshes without using any polyMesh morphing. Adds two fvMeshes without using any polyMesh morphing.
Uses fvMeshAdder. Uses polyMeshAdder.
SourceFiles SourceFiles
fvMeshAdder.C fvMeshAdder.C
@ -42,6 +42,7 @@ SourceFiles
#include "fvPatchFieldsFwd.H" #include "fvPatchFieldsFwd.H"
#include "fvsPatchFieldsFwd.H" #include "fvsPatchFieldsFwd.H"
#include "fvPatchFieldMapper.H" #include "fvPatchFieldMapper.H"
#include "DimensionedField.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -101,8 +102,22 @@ private:
const GeometricField<Type, fvsPatchField, surfaceMesh>& fldToAdd const GeometricField<Type, fvsPatchField, surfaceMesh>& fldToAdd
); );
//- Update single dimensionedField.
template<class Type>
static void MapDimField
(
const mapAddedPolyMesh& meshMap,
DimensionedField<Type, volMesh>& fld,
const DimensionedField<Type, volMesh>& fldToAdd
);
public: public:
// Declare name of the class and its debug switch
ClassName("fvMeshAdder");
// Member Functions // Member Functions
//- Inplace add mesh to fvMesh. Maps all stored fields. Returns map. //- Inplace add mesh to fvMesh. Maps all stored fields. Returns map.
@ -131,6 +146,15 @@ public:
const fvMesh& mesh, const fvMesh& mesh,
const fvMesh& meshToAdd const fvMesh& meshToAdd
); );
//- Map all DimensionedFields of Type
template<class Type>
static void MapDimFields
(
const mapAddedPolyMesh&,
const fvMesh& mesh,
const fvMesh& meshToAdd
);
}; };

View File

@ -280,6 +280,12 @@ void Foam::fvMeshAdder::MapVolFields
++fieldIter ++fieldIter
) )
{ {
if (debug)
{
Pout<< "MapVolFields : Storing old time for " << fieldIter()->name()
<< endl;
}
const_cast<GeometricField<Type, fvPatchField, volMesh>*>(fieldIter()) const_cast<GeometricField<Type, fvPatchField, volMesh>*>(fieldIter())
->storeOldTimes(); ->storeOldTimes();
} }
@ -304,6 +310,12 @@ void Foam::fvMeshAdder::MapVolFields
const GeometricField<Type, fvPatchField, volMesh>& fldToAdd = const GeometricField<Type, fvPatchField, volMesh>& fldToAdd =
*fieldsToAdd[fld.name()]; *fieldsToAdd[fld.name()];
if (debug)
{
Pout<< "MapVolFields : mapping " << fld.name()
<< " and " << fldToAdd.name() << endl;
}
MapVolField<Type>(meshMap, fld, fldToAdd); MapVolField<Type>(meshMap, fld, fldToAdd);
} }
else else
@ -585,8 +597,13 @@ void Foam::fvMeshAdder::MapSurfaceFields
++fieldIter ++fieldIter
) )
{ {
const_cast<fldType*>(fieldIter()) if (debug)
->storeOldTimes(); {
Pout<< "MapSurfaceFields : Storing old time for "
<< fieldIter()->name() << endl;
}
const_cast<fldType*>(fieldIter())->storeOldTimes();
} }
@ -604,6 +621,12 @@ void Foam::fvMeshAdder::MapSurfaceFields
{ {
const fldType& fldToAdd = *fieldsToAdd[fld.name()]; const fldType& fldToAdd = *fieldsToAdd[fld.name()];
if (debug)
{
Pout<< "MapSurfaceFields : mapping " << fld.name()
<< " and " << fldToAdd.name() << endl;
}
MapSurfaceField<Type>(meshMap, fld, fldToAdd); MapSurfaceField<Type>(meshMap, fld, fldToAdd);
} }
else else
@ -617,4 +640,80 @@ void Foam::fvMeshAdder::MapSurfaceFields
} }
template<class Type>
void Foam::fvMeshAdder::MapDimField
(
const mapAddedPolyMesh& meshMap,
DimensionedField<Type, volMesh>& fld,
const DimensionedField<Type, volMesh>& fldToAdd
)
{
const fvMesh& mesh = fld.mesh();
// Store old field
Field<Type> oldField(fld);
fld.setSize(mesh.nCells());
fld.rmap(oldField, meshMap.oldCellMap());
fld.rmap(fldToAdd, meshMap.addedCellMap());
}
template<class Type>
void Foam::fvMeshAdder::MapDimFields
(
const mapAddedPolyMesh& meshMap,
const fvMesh& mesh,
const fvMesh& meshToAdd
)
{
typedef DimensionedField<Type, volMesh> fldType;
// Note: use strict flag on lookupClass to avoid picking up
// volFields
HashTable<const fldType*> fields
(
mesh.objectRegistry::lookupClass<fldType>(true)
);
HashTable<const fldType*> fieldsToAdd
(
meshToAdd.objectRegistry::lookupClass<fldType>(true)
);
for
(
typename HashTable<const fldType*>::
iterator fieldIter = fields.begin();
fieldIter != fields.end();
++fieldIter
)
{
fldType& fld = const_cast<fldType&>(*fieldIter());
if (fieldsToAdd.found(fld.name()))
{
const fldType& fldToAdd = *fieldsToAdd[fld.name()];
if (debug)
{
Pout<< "MapDimFields : mapping " << fld.name()
<< " and " << fldToAdd.name() << endl;
}
MapDimField<Type>(meshMap, fld, fldToAdd);
}
else
{
WarningIn("fvMeshAdder::MapDimFields(..)")
<< "Not mapping field " << fld.name()
<< " since not present on mesh to add"
<< endl;
}
}
}
// ************************************************************************* // // ************************************************************************* //

View File

@ -40,17 +40,125 @@ License
#include "syncTools.H" #include "syncTools.H"
#include "CompactListList.H" #include "CompactListList.H"
#include "fvMeshTools.H" #include "fvMeshTools.H"
#include "ListOps.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
namespace Foam namespace Foam
{ {
defineTypeNameAndDebug(fvMeshDistribute, 0); defineTypeNameAndDebug(fvMeshDistribute, 0);
//- Less function class that can be used for sorting processor patches
class lessProcPatches
{
const labelList& nbrProc_;
const labelList& referPatchID_;
public:
lessProcPatches( const labelList& nbrProc, const labelList& referPatchID)
:
nbrProc_(nbrProc),
referPatchID_(referPatchID)
{}
bool operator()(const label a, const label b)
{
if (nbrProc_[a] < nbrProc_[b])
{
return true;
}
else if (nbrProc_[a] > nbrProc_[b])
{
return false;
}
else
{
// Equal neighbour processor
return referPatchID_[a] < referPatchID_[b];
}
}
};
} }
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * // // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void Foam::fvMeshDistribute::inplaceRenumberWithFlip
(
const labelUList& oldToNew,
const bool oldToNewHasFlip,
const bool lstHasFlip,
labelUList& lst
)
{
if (!lstHasFlip && !oldToNewHasFlip)
{
Foam::inplaceRenumber(oldToNew, lst);
}
else
{
// Either input data or map encodes sign so result encodes sign
forAll(lst, elemI)
{
// Extract old value and sign
label val = lst[elemI];
label sign = 1;
if (lstHasFlip)
{
if (val > 0)
{
val = val-1;
}
else if (val < 0)
{
val = -val-1;
sign = -1;
}
else
{
FatalErrorInFunction
<< "Problem : zero value " << val
<< " at index " << elemI << " out of " << lst.size()
<< " list with flip bit" << exit(FatalError);
}
}
// Lookup new value and possibly change sign
label newVal = oldToNew[val];
if (oldToNewHasFlip)
{
if (newVal > 0)
{
newVal = newVal-1;
}
else if (newVal < 0)
{
newVal = -newVal-1;
sign = -sign;
}
else
{
FatalErrorInFunction
<< "Problem : zero value " << newVal
<< " at index " << elemI << " out of "
<< oldToNew.size()
<< " list with flip bit" << exit(FatalError);
}
}
// Encode new value and sign
lst[elemI] = sign*(newVal+1);
}
}
}
Foam::labelList Foam::fvMeshDistribute::select Foam::labelList Foam::fvMeshDistribute::select
( (
const bool selectEqual, const bool selectEqual,
@ -453,7 +561,13 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::fvMeshDistribute::repatch
forAll(constructFaceMap, proci) forAll(constructFaceMap, proci)
{ {
inplaceRenumber(map().reverseFaceMap(), constructFaceMap[proci]); inplaceRenumberWithFlip
(
map().reverseFaceMap(),
false,
true,
constructFaceMap[proci]
);
} }
@ -882,12 +996,45 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::fvMeshDistribute::doRemoveCells
meshMod meshMod
); );
//// Generate test field
//tmp<surfaceScalarField> sfld(generateTestField(mesh_));
// Save internal fields (note: not as DimensionedFields since would
// get mapped)
PtrList<Field<scalar>> sFlds;
saveInternalFields(sFlds);
PtrList<Field<vector>> vFlds;
saveInternalFields(vFlds);
PtrList<Field<sphericalTensor>> sptFlds;
saveInternalFields(sptFlds);
PtrList<Field<symmTensor>> sytFlds;
saveInternalFields(sytFlds);
PtrList<Field<tensor>> tFlds;
saveInternalFields(tFlds);
// Change the mesh. No inflation. Note: no parallel comms allowed. // Change the mesh. No inflation. Note: no parallel comms allowed.
autoPtr<mapPolyMesh> map = meshMod.changeMesh(mesh_, false, false); autoPtr<mapPolyMesh> map = meshMod.changeMesh(mesh_, false, false);
// Update fields // Update fields
mesh_.updateMesh(map); mesh_.updateMesh(map);
// Any exposed faces in a surfaceField will not be mapped. Map the value
// of these separately (until there is support in all PatchFields for
// mapping from internal faces ...)
mapExposedFaces(map(), sFlds);
mapExposedFaces(map(), vFlds);
mapExposedFaces(map(), sptFlds);
mapExposedFaces(map(), sytFlds);
mapExposedFaces(map(), tFlds);
//// Test test field
//testField(sfld);
// Move mesh (since morphing does not do this) // Move mesh (since morphing does not do this)
if (map().hasMotionPoints()) if (map().hasMotionPoints())
{ {
@ -911,10 +1058,18 @@ void Foam::fvMeshDistribute::addProcPatches
// contain for all current boundary faces the global patchID (for non-proc // contain for all current boundary faces the global patchID (for non-proc
// patch) or the processor. // patch) or the processor.
// Determine a visit order such that the processor patches get added
// in order of increasing neighbour processor (and for same neighbour
// processor (in case of processor cyclics) in order of increasing
// 'refer' patch)
labelList indices;
sortedOrder(nbrProc, indices, lessProcPatches(nbrProc, referPatchID));
procPatchID.setSize(Pstream::nProcs()); procPatchID.setSize(Pstream::nProcs());
forAll(nbrProc, bFacei) forAll(indices, i)
{ {
label bFacei = indices[i];
label proci = nbrProc[bFacei]; label proci = nbrProc[bFacei];
if (proci != -1 && proci != Pstream::myProcNo()) if (proci != -1 && proci != Pstream::myProcNo())
@ -927,6 +1082,7 @@ void Foam::fvMeshDistribute::addProcPatches
if (referPatchID[bFacei] == -1) if (referPatchID[bFacei] == -1)
{ {
// Ordinary processor boundary // Ordinary processor boundary
processorPolyPatch pp processorPolyPatch pp
( (
0, // size 0, // size
@ -934,7 +1090,7 @@ void Foam::fvMeshDistribute::addProcPatches
mesh_.boundaryMesh().size(), mesh_.boundaryMesh().size(),
mesh_.boundaryMesh(), mesh_.boundaryMesh(),
Pstream::myProcNo(), Pstream::myProcNo(),
nbrProc[bFacei] proci
); );
procPatchID[proci].insert procPatchID[proci].insert
@ -957,7 +1113,6 @@ void Foam::fvMeshDistribute::addProcPatches
( (
mesh_.boundaryMesh()[referPatchID[bFacei]] mesh_.boundaryMesh()[referPatchID[bFacei]]
); );
processorCyclicPolyPatch pp processorCyclicPolyPatch pp
( (
0, // size 0, // size
@ -965,7 +1120,7 @@ void Foam::fvMeshDistribute::addProcPatches
mesh_.boundaryMesh().size(), mesh_.boundaryMesh().size(),
mesh_.boundaryMesh(), mesh_.boundaryMesh(),
Pstream::myProcNo(), Pstream::myProcNo(),
nbrProc[bFacei], proci,
pcPatch.name(), pcPatch.name(),
pcPatch.transform() pcPatch.transform()
); );
@ -1500,6 +1655,33 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
const wordList surfTensors(mesh_.names(surfaceTensorField::typeName)); const wordList surfTensors(mesh_.names(surfaceTensorField::typeName));
checkEqualWordList("surfaceTensorFields", surfTensors); checkEqualWordList("surfaceTensorFields", surfTensors);
typedef volScalarField::Internal dimScalType;
const wordList dimScalars(mesh_.names(dimScalType::typeName));
checkEqualWordList("volScalarField::Internal", dimScalars);
typedef volVectorField::Internal dimVecType;
const wordList dimVectors(mesh_.names(dimVecType::typeName));
checkEqualWordList("volVectorField::Internal", dimVectors);
typedef volSphericalTensorField::Internal dimSphereType;
const wordList dimSphereTensors(mesh_.names(dimSphereType::typeName));
checkEqualWordList
(
"volSphericalTensorField::Internal",
dimSphereTensors
);
typedef volSymmTensorField::Internal dimSymmTensorType;
const wordList dimSymmTensors(mesh_.names(dimSymmTensorType::typeName));
checkEqualWordList
(
"volSymmTensorField::Internal",
dimSymmTensors
);
typedef volTensorField::Internal dimTensorType;
const wordList dimTensors(mesh_.names(dimTensorType::typeName));
checkEqualWordList("volTensorField::Internal", dimTensors);
@ -1626,10 +1808,13 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
); );
subCellMap[recvProc] = subsetter.cellMap(); subCellMap[recvProc] = subsetter.cellMap();
subFaceMap[recvProc] = renumber subFaceMap[recvProc] = subsetter.faceFlipMap();
inplaceRenumberWithFlip
( (
repatchFaceMap, repatchFaceMap,
subsetter.faceMap() false, // oldToNew has flip
true, // subFaceMap has flip
subFaceMap[recvProc]
); );
subPointMap[recvProc] = subsetter.pointMap(); subPointMap[recvProc] = subsetter.pointMap();
subPatchMap[recvProc] = subsetter.patchMap(); subPatchMap[recvProc] = subsetter.patchMap();
@ -1681,6 +1866,8 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
procSourceNewNbrProc, procSourceNewNbrProc,
str str
); );
// volFields
sendFields<volScalarField>(recvProc, volScalars, subsetter, str); sendFields<volScalarField>(recvProc, volScalars, subsetter, str);
sendFields<volVectorField>(recvProc, volVectors, subsetter, str); sendFields<volVectorField>(recvProc, volVectors, subsetter, str);
sendFields<volSphericalTensorField> sendFields<volSphericalTensorField>
@ -1699,6 +1886,7 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
); );
sendFields<volTensorField>(recvProc, volTensors, subsetter, str); sendFields<volTensorField>(recvProc, volTensors, subsetter, str);
// surfaceFields
sendFields<surfaceScalarField> sendFields<surfaceScalarField>
( (
recvProc, recvProc,
@ -1734,6 +1922,43 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
subsetter, subsetter,
str str
); );
// dimensionedFields
sendFields<volScalarField::Internal>
(
recvProc,
dimScalars,
subsetter,
str
);
sendFields<volVectorField::Internal>
(
recvProc,
dimVectors,
subsetter,
str
);
sendFields<volSphericalTensorField::Internal>
(
recvProc,
dimSphereTensors,
subsetter,
str
);
sendFields<volSymmTensorField::Internal>
(
recvProc,
dimSymmTensors,
subsetter,
str
);
sendFields<volTensorField::Internal>
(
recvProc,
dimTensors,
subsetter,
str
);
} }
} }
@ -1771,12 +1996,24 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
repatchFaceMap, repatchFaceMap,
subMap().faceMap() subMap().faceMap()
); );
// Insert the sign bit from face flipping
labelList& faceMap = subFaceMap[Pstream::myProcNo()];
forAll(faceMap, faceI)
{
faceMap[faceI] += 1;
}
const labelHashSet& flip = subMap().flipFaceFlux();
forAllConstIter(labelHashSet, flip, iter)
{
label faceI = iter.key();
faceMap[faceI] = -faceMap[faceI];
}
subPointMap[Pstream::myProcNo()] = subMap().pointMap(); subPointMap[Pstream::myProcNo()] = subMap().pointMap();
subPatchMap[Pstream::myProcNo()] = identity(patches.size()); subPatchMap[Pstream::myProcNo()] = identity(patches.size());
// Initialize all addressing into current mesh // Initialize all addressing into current mesh
constructCellMap[Pstream::myProcNo()] = identity(mesh_.nCells()); constructCellMap[Pstream::myProcNo()] = identity(mesh_.nCells());
constructFaceMap[Pstream::myProcNo()] = identity(mesh_.nFaces()); constructFaceMap[Pstream::myProcNo()] = identity(mesh_.nFaces()) + 1;
constructPointMap[Pstream::myProcNo()] = identity(mesh_.nPoints()); constructPointMap[Pstream::myProcNo()] = identity(mesh_.nPoints());
constructPatchMap[Pstream::myProcNo()] = identity(patches.size()); constructPatchMap[Pstream::myProcNo()] = identity(patches.size());
@ -1872,17 +2109,26 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
labelList domainSourceNewNbrProc; labelList domainSourceNewNbrProc;
autoPtr<fvMesh> domainMeshPtr; autoPtr<fvMesh> domainMeshPtr;
PtrList<volScalarField> vsf; PtrList<volScalarField> vsf;
PtrList<volVectorField> vvf; PtrList<volVectorField> vvf;
PtrList<volSphericalTensorField> vsptf; PtrList<volSphericalTensorField> vsptf;
PtrList<volSymmTensorField> vsytf; PtrList<volSymmTensorField> vsytf;
PtrList<volTensorField> vtf; PtrList<volTensorField> vtf;
PtrList<surfaceScalarField> ssf; PtrList<surfaceScalarField> ssf;
PtrList<surfaceVectorField> svf; PtrList<surfaceVectorField> svf;
PtrList<surfaceSphericalTensorField> ssptf; PtrList<surfaceSphericalTensorField> ssptf;
PtrList<surfaceSymmTensorField> ssytf; PtrList<surfaceSymmTensorField> ssytf;
PtrList<surfaceTensorField> stf; PtrList<surfaceTensorField> stf;
PtrList<volScalarField::Internal> dsf;
PtrList<volVectorField::Internal> dvf;
PtrList<volSphericalTensorField::Internal> dstf;
PtrList<volSymmTensorField::Internal> dsytf;
PtrList<volTensorField::Internal> dtf;
// Opposite of sendMesh // Opposite of sendMesh
{ {
domainMeshPtr = receiveMesh domainMeshPtr = receiveMesh
@ -1908,6 +2154,7 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
// of problems reading consecutive fields from single stream. // of problems reading consecutive fields from single stream.
dictionary fieldDicts(str); dictionary fieldDicts(str);
// Vol fields
receiveFields<volScalarField> receiveFields<volScalarField>
( (
sendProc, sendProc,
@ -1949,6 +2196,7 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
fieldDicts.subDict(volTensorField::typeName) fieldDicts.subDict(volTensorField::typeName)
); );
// Surface fields
receiveFields<surfaceScalarField> receiveFields<surfaceScalarField>
( (
sendProc, sendProc,
@ -1989,12 +2237,70 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
stf, stf,
fieldDicts.subDict(surfaceTensorField::typeName) fieldDicts.subDict(surfaceTensorField::typeName)
); );
// Dimensioned fields
receiveFields<volScalarField::Internal>
(
sendProc,
dimScalars,
domainMesh,
dsf,
fieldDicts.subDict
(
volScalarField::Internal::typeName
)
);
receiveFields<volVectorField::Internal>
(
sendProc,
dimVectors,
domainMesh,
dvf,
fieldDicts.subDict
(
volVectorField::Internal::typeName
)
);
receiveFields<volSphericalTensorField::Internal>
(
sendProc,
dimSphereTensors,
domainMesh,
dstf,
fieldDicts.subDict
(
volSphericalTensorField::Internal::
typeName
)
);
receiveFields<volSymmTensorField::Internal>
(
sendProc,
dimSymmTensors,
domainMesh,
dsytf,
fieldDicts.subDict
(
volSymmTensorField::Internal::typeName
)
);
receiveFields<volTensorField::Internal>
(
sendProc,
dimTensors,
domainMesh,
dtf,
fieldDicts.subDict
(
volTensorField::Internal::typeName
)
);
} }
const fvMesh& domainMesh = domainMeshPtr(); const fvMesh& domainMesh = domainMeshPtr();
constructCellMap[sendProc] = identity(domainMesh.nCells()); constructCellMap[sendProc] = identity(domainMesh.nCells());
constructFaceMap[sendProc] = identity(domainMesh.nFaces()); constructFaceMap[sendProc] = identity(domainMesh.nFaces()) + 1;
constructPointMap[sendProc] = identity(domainMesh.nPoints()); constructPointMap[sendProc] = identity(domainMesh.nPoints());
constructPatchMap[sendProc] = constructPatchMap[sendProc] =
identity(domainMesh.boundaryMesh().size()); identity(domainMesh.boundaryMesh().size());
@ -2105,28 +2411,76 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
domainSourceNewNbrProc domainSourceNewNbrProc
); );
// Update all addressing so xxProcAddressing points to correct item // Update all addressing so xxProcAddressing points to correct
// in masterMesh. // item in masterMesh.
const labelList& oldCellMap = map().oldCellMap(); const labelList& oldCellMap = map().oldCellMap();
const labelList& oldFaceMap = map().oldFaceMap(); const labelList& oldFaceMap = map().oldFaceMap();
const labelList& oldPointMap = map().oldPointMap(); const labelList& oldPointMap = map().oldPointMap();
const labelList& oldPatchMap = map().oldPatchMap(); const labelList& oldPatchMap = map().oldPatchMap();
//Note: old mesh faces never flipped!
forAll(constructPatchMap, proci) forAll(constructPatchMap, proci)
{ {
if (proci != sendProc && constructPatchMap[proci].size()) if (proci != sendProc && constructPatchMap[proci].size())
{ {
// Processor already in mesh (either myProcNo or received) // Processor already in mesh (either myProcNo or received)
inplaceRenumber(oldCellMap, constructCellMap[proci]); inplaceRenumber(oldCellMap, constructCellMap[proci]);
inplaceRenumber(oldFaceMap, constructFaceMap[proci]); inplaceRenumberWithFlip
(
oldFaceMap,
false,
true,
constructFaceMap[proci]
);
inplaceRenumber(oldPointMap, constructPointMap[proci]); inplaceRenumber(oldPointMap, constructPointMap[proci]);
inplaceRenumber(oldPatchMap, constructPatchMap[proci]); inplaceRenumber(oldPatchMap, constructPatchMap[proci]);
} }
} }
labelHashSet flippedAddedFaces;
{
// Find out if any faces of domain mesh were flipped (boundary
// faces becoming internal)
label nBnd = domainMesh.nFaces()-domainMesh.nInternalFaces();
flippedAddedFaces.resize(nBnd/4);
for
(
label domainFaceI = domainMesh.nInternalFaces();
domainFaceI < domainMesh.nFaces();
domainFaceI++
)
{
label newFaceI = map().addedFaceMap()[domainFaceI];
label newCellI = mesh_.faceOwner()[newFaceI];
label domainCellI = domainMesh.faceOwner()[domainFaceI];
if (newCellI != map().addedCellMap()[domainCellI])
{
flippedAddedFaces.insert(domainFaceI);
}
}
}
// Added processor // Added processor
inplaceRenumber(map().addedCellMap(), constructCellMap[sendProc]); inplaceRenumber(map().addedCellMap(), constructCellMap[sendProc]);
inplaceRenumber(map().addedFaceMap(), constructFaceMap[sendProc]); // Add flip
forAllConstIter(labelHashSet, flippedAddedFaces, iter)
{
label domainFaceI = iter.key();
label& val = constructFaceMap[sendProc][domainFaceI];
val = -val;
}
inplaceRenumberWithFlip
(
map().addedFaceMap(),
false,
true, // constructFaceMap has flip sign
constructFaceMap[sendProc]
);
inplaceRenumber(map().addedPointMap(), constructPointMap[sendProc]); inplaceRenumber(map().addedPointMap(), constructPointMap[sendProc]);
inplaceRenumber(map().addedPatchMap(), constructPatchMap[sendProc]); inplaceRenumber(map().addedPatchMap(), constructPatchMap[sendProc]);
@ -2236,35 +2590,6 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
Zero Zero
); );
initPatchFields<surfaceScalarField, processorFvsPatchField<scalar>>
(
Zero
);
initPatchFields<surfaceVectorField, processorFvsPatchField<vector>>
(
Zero
);
initPatchFields
<
surfaceSphericalTensorField,
processorFvsPatchField<sphericalTensor>
>
(
Zero
);
initPatchFields
<
surfaceSymmTensorField,
processorFvsPatchField<symmTensor>
>
(
Zero
);
initPatchFields<surfaceTensorField, processorFvsPatchField<tensor>>
(
Zero
);
mesh_.setInstance(mesh_.time().timeName()); mesh_.setInstance(mesh_.time().timeName());
@ -2308,7 +2633,10 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
constructPointMap.xfer(), constructPointMap.xfer(),
constructFaceMap.xfer(), constructFaceMap.xfer(),
constructCellMap.xfer(), constructCellMap.xfer(),
constructPatchMap.xfer() constructPatchMap.xfer(),
true, // subFaceMap has flip
true // constructFaceMap has flip
) )
); );
} }

View File

@ -82,6 +82,14 @@ class fvMeshDistribute
// Private Member Functions // Private Member Functions
static void inplaceRenumberWithFlip
(
const labelUList& oldToNew,
const bool oldToNewHasFlip,
const bool lstHasFlip,
labelUList& lst
);
//- Find indices with value //- Find indices with value
static labelList select static labelList select
( (
@ -117,6 +125,18 @@ class fvMeshDistribute
const PtrList<FieldField<fvsPatchField, T>>& oldBflds const PtrList<FieldField<fvsPatchField, T>>& oldBflds
); );
//- Save internal fields of surfaceFields
template<class T>
void saveInternalFields(PtrList<Field<T>>& iflds) const;
//- Set value of patch faces resulting from internal faces
template<class T>
void mapExposedFaces
(
const mapPolyMesh& map,
const PtrList<Field<T>>& oldFlds
);
//- Init patch fields of certain type //- Init patch fields of certain type
template<class GeoField, class PatchFieldType> template<class GeoField, class PatchFieldType>
void initPatchFields void initPatchFields
@ -151,6 +171,7 @@ class fvMeshDistribute
labelListList& constructPointMap labelListList& constructPointMap
); );
// Coupling information // Coupling information
//- Construct the local environment of all boundary faces. //- Construct the local environment of all boundary faces.
@ -240,7 +261,7 @@ class fvMeshDistribute
( (
const labelList& neighbourNewProc, // new processor per b. face const labelList& neighbourNewProc, // new processor per b. face
const labelList& referPatchID, // -1 or original patch const labelList& referPatchID, // -1 or original patch
const List<Map<label>>& procPatchID// patchID const List<Map<label>>& procPatchID // patchID
); );
//- Send mesh and coupling data. //- Send mesh and coupling data.

View File

@ -55,13 +55,14 @@ void Foam::fvMeshDistribute::printFieldInfo(const fvMesh& mesh)
} }
// Save whole boundary field
template<class T, class Mesh> template<class T, class Mesh>
void Foam::fvMeshDistribute::saveBoundaryFields void Foam::fvMeshDistribute::saveBoundaryFields
( (
PtrList<FieldField<fvsPatchField, T>>& bflds PtrList<FieldField<fvsPatchField, T>>& bflds
) const ) const
{ {
// Save whole boundary field
typedef GeometricField<T, fvsPatchField, Mesh> fldType; typedef GeometricField<T, fvsPatchField, Mesh> fldType;
HashTable<const fldType*> flds HashTable<const fldType*> flds
@ -84,7 +85,6 @@ void Foam::fvMeshDistribute::saveBoundaryFields
} }
// Map boundary field
template<class T, class Mesh> template<class T, class Mesh>
void Foam::fvMeshDistribute::mapBoundaryFields void Foam::fvMeshDistribute::mapBoundaryFields
( (
@ -92,6 +92,8 @@ void Foam::fvMeshDistribute::mapBoundaryFields
const PtrList<FieldField<fvsPatchField, T>>& oldBflds const PtrList<FieldField<fvsPatchField, T>>& oldBflds
) )
{ {
// Map boundary field
const labelList& oldPatchStarts = map.oldPatchStarts(); const labelList& oldPatchStarts = map.oldPatchStarts();
const labelList& faceMap = map.faceMap(); const labelList& faceMap = map.faceMap();
@ -145,13 +147,103 @@ void Foam::fvMeshDistribute::mapBoundaryFields
} }
// Init patch fields of certain type template<class T>
void Foam::fvMeshDistribute::saveInternalFields
(
PtrList<Field<T> >& iflds
) const
{
typedef GeometricField<T, fvsPatchField, surfaceMesh> fldType;
HashTable<const fldType*> flds
(
static_cast<const fvMesh&>(mesh_).objectRegistry::lookupClass<fldType>()
);
iflds.setSize(flds.size());
label i = 0;
forAllConstIter(typename HashTable<const fldType*>, flds, iter)
{
const fldType& fld = *iter();
iflds.set(i, fld.primitiveField().clone());
i++;
}
}
template<class T>
void Foam::fvMeshDistribute::mapExposedFaces
(
const mapPolyMesh& map,
const PtrList<Field<T> >& oldFlds
)
{
// Set boundary values of exposed internal faces
const labelList& faceMap = map.faceMap();
typedef GeometricField<T, fvsPatchField, surfaceMesh> fldType;
HashTable<fldType*> flds
(
mesh_.objectRegistry::lookupClass<fldType>()
);
if (flds.size() != oldFlds.size())
{
FatalErrorIn("fvMeshDistribute::mapExposedFaces(..)") << "problem"
<< abort(FatalError);
}
label fieldI = 0;
forAllIter(typename HashTable<fldType*>, flds, iter)
{
fldType& fld = *iter();
typename fldType::Boundary& bfld = fld.boundaryFieldRef();
const Field<T>& oldInternal = oldFlds[fieldI++];
// Pull from old internal field into bfld.
forAll(bfld, patchI)
{
fvsPatchField<T>& patchFld = bfld[patchI];
forAll(patchFld, i)
{
const label faceI = patchFld.patch().start()+i;
label oldFaceI = faceMap[faceI];
if (oldFaceI < oldInternal.size())
{
patchFld[i] = oldInternal[oldFaceI];
if (map.flipFaceFlux().found(faceI))
{
patchFld[i] = flipOp()(patchFld[i]);
}
}
}
}
}
}
template<class GeoField, class PatchFieldType> template<class GeoField, class PatchFieldType>
void Foam::fvMeshDistribute::initPatchFields void Foam::fvMeshDistribute::initPatchFields
( (
const typename GeoField::value_type& initVal const typename GeoField::value_type& initVal
) )
{ {
// Init patch fields of certain type
HashTable<GeoField*> flds HashTable<GeoField*> flds
( (
mesh_.objectRegistry::lookupClass<GeoField>() mesh_.objectRegistry::lookupClass<GeoField>()
@ -161,8 +253,7 @@ void Foam::fvMeshDistribute::initPatchFields
{ {
GeoField& fld = *iter(); GeoField& fld = *iter();
typename GeoField::Boundary& bfld = typename GeoField::Boundary& bfld = fld.boundaryFieldRef();
fld.boundaryFieldRef();
forAll(bfld, patchi) forAll(bfld, patchi)
{ {
@ -175,10 +266,11 @@ void Foam::fvMeshDistribute::initPatchFields
} }
// correctBoundaryConditions patch fields of certain type
template<class GeoField> template<class GeoField>
void Foam::fvMeshDistribute::correctBoundaryConditions() void Foam::fvMeshDistribute::correctBoundaryConditions()
{ {
// correctBoundaryConditions patch fields of certain type
HashTable<GeoField*> flds HashTable<GeoField*> flds
( (
mesh_.objectRegistry::lookupClass<GeoField>() mesh_.objectRegistry::lookupClass<GeoField>()
@ -192,24 +284,6 @@ void Foam::fvMeshDistribute::correctBoundaryConditions()
} }
// Send fields. Note order supplied so we can receive in exactly the same order.
// Note that field gets written as entry in dictionary so we
// can construct from subdictionary.
// (since otherwise the reading as-a-dictionary mixes up entries from
// consecutive fields)
// The dictionary constructed is:
// volScalarField
// {
// p {internalField ..; boundaryField ..;}
// k {internalField ..; boundaryField ..;}
// }
// volVectorField
// {
// U {internalField ... }
// }
// volVectorField {U {internalField ..; boundaryField ..;}}
//
template<class GeoField> template<class GeoField>
void Foam::fvMeshDistribute::sendFields void Foam::fvMeshDistribute::sendFields
( (
@ -219,6 +293,25 @@ void Foam::fvMeshDistribute::sendFields
Ostream& toNbr Ostream& toNbr
) )
{ {
// Send fields. Note order supplied so we can receive in exactly the same
// order.
// Note that field gets written as entry in dictionary so we
// can construct from subdictionary.
// (since otherwise the reading as-a-dictionary mixes up entries from
// consecutive fields)
// The dictionary constructed is:
// volScalarField
// {
// p {internalField ..; boundaryField ..;}
// k {internalField ..; boundaryField ..;}
// }
// volVectorField
// {
// U {internalField ... }
// }
// volVectorField {U {internalField ..; boundaryField ..;}}
toNbr << GeoField::typeName << token::NL << token::BEGIN_BLOCK << token::NL; toNbr << GeoField::typeName << token::NL << token::BEGIN_BLOCK << token::NL;
forAll(fieldNames, i) forAll(fieldNames, i)
{ {
@ -244,7 +337,6 @@ void Foam::fvMeshDistribute::sendFields
} }
// Opposite of sendFields
template<class GeoField> template<class GeoField>
void Foam::fvMeshDistribute::receiveFields void Foam::fvMeshDistribute::receiveFields
( (

View File

@ -34,6 +34,9 @@ Description
#include "emptyPolyPatch.H" #include "emptyPolyPatch.H"
#include "demandDrivenData.H" #include "demandDrivenData.H"
#include "cyclicPolyPatch.H" #include "cyclicPolyPatch.H"
#include "removeCells.H"
#include "polyTopoChange.H"
#include "mapPolyMesh.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -354,6 +357,39 @@ void Foam::fvMeshSubset::subsetZones()
} }
Foam::labelList Foam::fvMeshSubset::getCellsToRemove
(
const labelList& region,
const label currentRegion
) const
{
// Count
label nKeep = 0;
forAll(region, cellI)
{
if (region[cellI] == currentRegion)
{
nKeep++;
}
}
// Collect cells to remove
label nRemove = baseMesh().nCells() - nKeep;
labelList cellsToRemove(nRemove);
nRemove = 0;
forAll(region, cellI)
{
if (region[cellI] != currentRegion)
{
cellsToRemove[nRemove++] = cellI;
}
}
return cellsToRemove;
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::fvMeshSubset::fvMeshSubset(const fvMesh& baseMesh) Foam::fvMeshSubset::fvMeshSubset(const fvMesh& baseMesh)
@ -363,7 +399,8 @@ Foam::fvMeshSubset::fvMeshSubset(const fvMesh& baseMesh)
pointMap_(0), pointMap_(0),
faceMap_(0), faceMap_(0),
cellMap_(0), cellMap_(0),
patchMap_(0) patchMap_(0),
faceFlipMapPtr_()
{} {}
@ -400,6 +437,10 @@ void Foam::fvMeshSubset::setCellSubset
} }
// Clear demand driven data
faceFlipMapPtr_.clear();
cellMap_ = globalCellMap.toc(); cellMap_ = globalCellMap.toc();
// Sort the cell map in the ascending order // Sort the cell map in the ascending order
@ -793,6 +834,8 @@ void Foam::fvMeshSubset::setLargeCellSubset
<< abort(FatalError); << abort(FatalError);
} }
// Clear demand driven data
faceFlipMapPtr_.clear();
// Get the cells for the current region. // Get the cells for the current region.
cellMap_.setSize(oldCells.size()); cellMap_.setSize(oldCells.size());
@ -1358,6 +1401,68 @@ void Foam::fvMeshSubset::setLargeCellSubset
} }
Foam::labelList Foam::fvMeshSubset::getExposedFaces
(
const labelList& region,
const label currentRegion,
const bool syncCouples
) const
{
// Collect cells to remove
labelList cellsToRemove(getCellsToRemove(region, currentRegion));
return removeCells(baseMesh(), syncCouples).getExposedFaces(cellsToRemove);
}
void Foam::fvMeshSubset::setLargeCellSubset
(
const labelList& region,
const label currentRegion,
const labelList& exposedFaces,
const labelList& patchIDs,
const bool syncCouples
)
{
// Collect cells to remove
labelList cellsToRemove(getCellsToRemove(region, currentRegion));
// Mesh changing engine.
polyTopoChange meshMod(baseMesh());
removeCells cellRemover(baseMesh(), syncCouples);
cellRemover.setRefinement
(
cellsToRemove,
exposedFaces,
patchIDs,
meshMod
);
// Create mesh, return map from old to new mesh.
autoPtr<mapPolyMesh> map = meshMod.makeMesh
(
fvMeshSubsetPtr_,
IOobject
(
baseMesh().name(),
baseMesh().time().timeName(),
baseMesh().time(),
IOobject::NO_READ,
IOobject::NO_WRITE
),
baseMesh(),
syncCouples
);
pointMap_ = map().pointMap();
faceMap_ = map().faceMap();
cellMap_ = map().cellMap();
patchMap_ = identity(baseMesh().boundaryMesh().size());
}
bool Foam::fvMeshSubset::hasSubMesh() const bool Foam::fvMeshSubset::hasSubMesh() const
{ {
return fvMeshSubsetPtr_.valid(); return fvMeshSubsetPtr_.valid();
@ -1396,6 +1501,44 @@ const labelList& Foam::fvMeshSubset::faceMap() const
} }
const labelList& Foam::fvMeshSubset::faceFlipMap() const
{
if (!faceFlipMapPtr_.valid())
{
const labelList& subToBaseFace = faceMap();
const labelList& subToBaseCell = cellMap();
faceFlipMapPtr_.reset(new labelList(subToBaseFace.size()));
labelList& faceFlipMap = faceFlipMapPtr_();
// Only exposed internal faces might be flipped (since we don't do
// any cell renumbering, just compacting)
label subInt = subMesh().nInternalFaces();
const labelList& subOwn = subMesh().faceOwner();
const labelList& own = baseMesh_.faceOwner();
for (label subFaceI = 0; subFaceI < subInt; subFaceI++)
{
faceFlipMap[subFaceI] = subToBaseFace[subFaceI]+1;
}
for (label subFaceI = subInt; subFaceI < subOwn.size(); subFaceI++)
{
label faceI = subToBaseFace[subFaceI];
if (subToBaseCell[subOwn[subFaceI]] == own[faceI])
{
faceFlipMap[subFaceI] = faceI+1;
}
else
{
faceFlipMap[subFaceI] = -faceI-1;
}
}
}
return faceFlipMapPtr_();
}
const labelList& Foam::fvMeshSubset::cellMap() const const labelList& Foam::fvMeshSubset::cellMap() const
{ {
checkCellSubset(); checkCellSubset();

View File

@ -41,10 +41,12 @@ Description
a face on a coupled patch 'losing' its neighbour it will move the a face on a coupled patch 'losing' its neighbour it will move the
face into the oldInternalFaces patch. face into the oldInternalFaces patch.
- if a user supplied patch is used the mapping becomes a problem. - if a user supplied patch is used it is up to the destination
Do the new faces get the value of the internal face they came from? patchField to handle exposed internal faces (mapping from face -1).
What if e.g. the user supplied patch is a fixedValue 0? So for now If not provided the default is to assign the internalField. All the
they get the face of existing patch face 0. basic patch field types (e.g. fixedValue) will give a warning and
preferably derived patch field types should be used that know how to
handle exposed faces (e.g. use uniformFixedValue instead of fixedValue)
SourceFiles SourceFiles
fvMeshSubset.C fvMeshSubset.C
@ -94,6 +96,9 @@ private:
//- Patch mapping array //- Patch mapping array
labelList patchMap_; labelList patchMap_;
//- Optional face mapping array with flip encoded
mutable autoPtr<labelList> faceFlipMapPtr_;
// Private Member Functions // Private Member Functions
@ -124,6 +129,13 @@ private:
//- Create zones for submesh //- Create zones for submesh
void subsetZones(); void subsetZones();
//- Helper: extract cells-to-remove from cells-to-keep
labelList getCellsToRemove
(
const labelList& region,
const label currentRegion
) const;
//- Disallow default bitwise copy construct //- Disallow default bitwise copy construct
fvMeshSubset(const fvMeshSubset&); fvMeshSubset(const fvMeshSubset&);
@ -174,6 +186,32 @@ public:
); );
//- Two step subsetting
//- Get labels of exposed faces.
// These are
// - internal faces that become boundary faces
// - coupled faces that become uncoupled (since one of the
// sides gets deleted)
labelList getExposedFaces
(
const labelList& region,
const label currentRegion,
const bool syncCouples = true
) const;
//- For every exposed face (from above getExposedFaces)
// used supplied (existing!) patch
void setLargeCellSubset
(
const labelList& region,
const label currentRegion,
const labelList& exposedFaces,
const labelList& patchIDs,
const bool syncCouples = true
);
// Access // Access
//- Original mesh //- Original mesh
@ -196,6 +234,9 @@ public:
//- Return face map //- Return face map
const labelList& faceMap() const; const labelList& faceMap() const;
//- Return face map with sign to encode flipped faces
const labelList& faceFlipMap() const;
//- Return cell map //- Return cell map
const labelList& cellMap() const; const labelList& cellMap() const;
@ -224,7 +265,8 @@ public:
const GeometricField<Type, fvPatchField, volMesh>& const GeometricField<Type, fvPatchField, volMesh>&
) const; ) const;
//- Map surface field //- Map surface field. Optionally negates value if flipping
// a face (from exposing an internal face)
template<class Type> template<class Type>
static tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> static tmp<GeometricField<Type, fvsPatchField, surfaceMesh>>
interpolate interpolate
@ -232,14 +274,17 @@ public:
const GeometricField<Type, fvsPatchField, surfaceMesh>&, const GeometricField<Type, fvsPatchField, surfaceMesh>&,
const fvMesh& sMesh, const fvMesh& sMesh,
const labelList& patchMap, const labelList& patchMap,
const labelList& faceMap const labelList& cellMap,
const labelList& faceMap,
const bool negateIfFlipped = true
); );
template<class Type> template<class Type>
tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> tmp<GeometricField<Type, fvsPatchField, surfaceMesh>>
interpolate interpolate
( (
const GeometricField<Type, fvsPatchField, surfaceMesh>& const GeometricField<Type, fvsPatchField, surfaceMesh>&,
const bool negateIfFlipped = true
) const; ) const;
//- Map point field //- Map point field
@ -259,6 +304,20 @@ public:
( (
const GeometricField<Type, pointPatchField, pointMesh>& const GeometricField<Type, pointPatchField, pointMesh>&
) const; ) const;
//- Map dimensioned field
template<class Type>
static tmp<DimensionedField<Type, volMesh>>
interpolate
(
const DimensionedField<Type, volMesh>&,
const fvMesh& sMesh,
const labelList& cellMap
);
template<class Type>
tmp<DimensionedField<Type, volMesh>>
interpolate(const DimensionedField<Type, volMesh>&) const;
}; };

View File

@ -29,6 +29,7 @@ License
#include "emptyFvPatchFields.H" #include "emptyFvPatchFields.H"
#include "directFvPatchFieldMapper.H" #include "directFvPatchFieldMapper.H"
#include "directPointPatchFieldMapper.H" #include "directPointPatchFieldMapper.H"
#include "flipOp.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -178,7 +179,9 @@ tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate
const GeometricField<Type, fvsPatchField, surfaceMesh>& vf, const GeometricField<Type, fvsPatchField, surfaceMesh>& vf,
const fvMesh& sMesh, const fvMesh& sMesh,
const labelList& patchMap, const labelList& patchMap,
const labelList& faceMap const labelList& cellMap,
const labelList& faceMap,
const bool negateIfFlipped
) )
{ {
// 1. Create the complete field with dummy patch fields // 1. Create the complete field with dummy patch fields
@ -297,14 +300,24 @@ tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate
// Postprocess patch field for exposed faces // Postprocess patch field for exposed faces
fvsPatchField<Type>& pfld = bf[patchi]; fvsPatchField<Type>& pfld = bf[patchi];
const labelUList& fc = bf[patchi].patch().faceCells();
const labelList& own = vf.mesh().faceOwner();
forAll(pfld, i) forAll(pfld, i)
{ {
label baseFacei = faceMap[subPatch.start()+i]; label baseFacei = faceMap[subPatch.start()+i];
if (baseFacei < vf.primitiveField().size()) if (baseFacei < vf.primitiveField().size())
{ {
// Exposed internal face Type val = vf.internalField()[baseFacei];
pfld[i] = vf.primitiveField()[baseFacei];
if (cellMap[fc[i]] == own[baseFacei] || !negateIfFlipped)
{
pfld[i] = val;
}
else
{
pfld[i] = flipOp()(val);
}
} }
else else
{ {
@ -329,7 +342,8 @@ tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate
template<class Type> template<class Type>
tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate
( (
const GeometricField<Type, fvsPatchField, surfaceMesh>& sf const GeometricField<Type, fvsPatchField, surfaceMesh>& sf,
const bool negateIfFlipped
) const ) const
{ {
return interpolate return interpolate
@ -337,7 +351,9 @@ tmp<GeometricField<Type, fvsPatchField, surfaceMesh>> fvMeshSubset::interpolate
sf, sf,
subMesh(), subMesh(),
patchMap(), patchMap(),
faceMap() cellMap(),
faceMap(),
negateIfFlipped
); );
} }
@ -489,6 +505,47 @@ tmp<GeometricField<Type, pointPatchField, pointMesh>> fvMeshSubset::interpolate
} }
template<class Type>
tmp<DimensionedField<Type, volMesh>> fvMeshSubset::interpolate
(
const DimensionedField<Type, volMesh>& df,
const fvMesh& sMesh,
const labelList& cellMap
)
{
// Create the complete field from the pieces
tmp<DimensionedField<Type, volMesh>> tresF
(
new DimensionedField<Type, volMesh>
(
IOobject
(
"subset"+df.name(),
sMesh.time().timeName(),
sMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
sMesh,
df.dimensions(),
Field<Type>(df, cellMap)
)
);
return tresF;
}
template<class Type>
tmp<DimensionedField<Type, volMesh>> fvMeshSubset::interpolate
(
const DimensionedField<Type, volMesh>& df
) const
{
return interpolate(df, subMesh(), cellMap());
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam } // End namespace Foam

View File

@ -256,7 +256,8 @@ void Foam::multiDirRefinement::refineHex8
false false
), ),
List<refinementHistory::splitCell8>(0), List<refinementHistory::splitCell8>(0),
labelList(0) labelList(0),
false
) // refinement history ) // refinement history
); );

View File

@ -793,7 +793,7 @@ Foam::label Foam::hexRef8::findLevel
// Gets cell level such that the face has four points <= level. // Gets cell level such that the face has four points <= level.
Foam::label Foam::hexRef8::getAnchorLevel(const label facei) const Foam::label Foam::hexRef8::faceLevel(const label facei) const
{ {
const face& f = mesh_.faces()[facei]; const face& f = mesh_.faces()[facei];
@ -2218,7 +2218,8 @@ Foam::hexRef8::hexRef8
IOobject::AUTO_WRITE IOobject::AUTO_WRITE
), ),
List<refinementHistory::splitCell8>(0), List<refinementHistory::splitCell8>(0),
labelList(0) labelList(0),
false
), ),
faceRemover_(mesh_, GREAT), // merge boundary faces wherever possible faceRemover_(mesh_, GREAT), // merge boundary faces wherever possible
savedPointLevel_(0), savedPointLevel_(0),
@ -3475,7 +3476,7 @@ Foam::labelListList Foam::hexRef8::setRefinement
for (label facei = 0; facei < mesh_.nFaces(); facei++) for (label facei = 0; facei < mesh_.nFaces(); facei++)
{ {
faceAnchorLevel[facei] = getAnchorLevel(facei); faceAnchorLevel[facei] = faceLevel(facei);
} }
// -1 : no need to split face // -1 : no need to split face

View File

@ -411,7 +411,7 @@ public:
// Refinement // Refinement
//- Gets level such that the face has four points <= level. //- Gets level such that the face has four points <= level.
label getAnchorLevel(const label facei) const; label faceLevel(const label facei) const;
//- Given valid mesh and current cell level and proposed //- Given valid mesh and current cell level and proposed
// cells to refine calculate any clashes (due to 2:1) and return // cells to refine calculate any clashes (due to 2:1) and return

View File

@ -0,0 +1,339 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "IOobject.H"
#include "UList.H"
#include "hexRef8Data.H"
#include "mapPolyMesh.H"
#include "mapDistributePolyMesh.H"
#include "polyMesh.H"
#include "syncTools.H"
#include "refinementHistory.H"
#include "fvMesh.H"
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::hexRef8Data::hexRef8Data(const IOobject& io)
{
{
IOobject rio(io);
rio.rename("cellLevel");
bool haveFile = returnReduce(rio.headerOk(), orOp<bool>());
if (haveFile)
{
Info<< "Reading hexRef8 data : " << rio.name() << endl;
cellLevelPtr_.reset(new labelIOList(rio));
}
}
{
IOobject rio(io);
rio.rename("pointLevel");
bool haveFile = returnReduce(rio.headerOk(), orOp<bool>());
if (haveFile)
{
Info<< "Reading hexRef8 data : " << rio.name() << endl;
pointLevelPtr_.reset(new labelIOList(rio));
}
}
{
IOobject rio(io);
rio.rename("level0Edge");
bool haveFile = returnReduce(rio.headerOk(), orOp<bool>());
if (haveFile)
{
Info<< "Reading hexRef8 data : " << rio.name() << endl;
level0EdgePtr_.reset(new uniformDimensionedScalarField(rio));
}
}
{
IOobject rio(io);
rio.rename("refinementHistory");
bool haveFile = returnReduce(rio.headerOk(), orOp<bool>());
if (haveFile)
{
Info<< "Reading hexRef8 data : " << rio.name() << endl;
refHistoryPtr_.reset(new refinementHistory(rio));
}
}
}
Foam::hexRef8Data::hexRef8Data
(
const IOobject& io,
const hexRef8Data& data,
const labelList& cellMap,
const labelList& pointMap
)
{
if (data.cellLevelPtr_.valid())
{
IOobject rio(io);
rio.rename(data.cellLevelPtr_().name());
cellLevelPtr_.reset
(
new labelIOList
(
rio,
UIndirectList<label>(data.cellLevelPtr_(), cellMap)()
)
);
}
if (data.pointLevelPtr_.valid())
{
IOobject rio(io);
rio.rename(data.pointLevelPtr_().name());
pointLevelPtr_.reset
(
new labelIOList
(
rio,
UIndirectList<label>(data.pointLevelPtr_(), pointMap)()
)
);
}
if (data.level0EdgePtr_.valid())
{
IOobject rio(io);
rio.rename(data.level0EdgePtr_().name());
level0EdgePtr_.reset
(
new uniformDimensionedScalarField(rio, data.level0EdgePtr_())
);
}
if (data.refHistoryPtr_.valid())
{
IOobject rio(io);
rio.rename(data.refHistoryPtr_().name());
refHistoryPtr_ = data.refHistoryPtr_().clone(rio, cellMap);
}
}
Foam::hexRef8Data::hexRef8Data
(
const IOobject& io,
const UPtrList<const labelList>& cellMaps,
const UPtrList<const labelList>& pointMaps,
const UPtrList<const hexRef8Data>& procDatas
)
{
const polyMesh& mesh = dynamic_cast<const polyMesh&>(io.db());
// cellLevel
if (procDatas[0].cellLevelPtr_.valid())
{
IOobject rio(io);
rio.rename(procDatas[0].cellLevelPtr_().name());
cellLevelPtr_.reset(new labelIOList(rio, mesh.nCells()));
labelList& cellLevel = cellLevelPtr_();
forAll(procDatas, procI)
{
const labelList& procCellLevel = procDatas[procI].cellLevelPtr_();
UIndirectList<label>(cellLevel, cellMaps[procI]) = procCellLevel;
}
}
// pointLevel
if (procDatas[0].pointLevelPtr_.valid())
{
IOobject rio(io);
rio.rename(procDatas[0].pointLevelPtr_().name());
pointLevelPtr_.reset(new labelIOList(rio, mesh.nPoints()));
labelList& pointLevel = pointLevelPtr_();
forAll(procDatas, procI)
{
const labelList& procPointLevel = procDatas[procI].pointLevelPtr_();
UIndirectList<label>(pointLevel, pointMaps[procI]) = procPointLevel;
}
}
// level0Edge
if (procDatas[0].level0EdgePtr_.valid())
{
IOobject rio(io);
rio.rename(procDatas[0].level0EdgePtr_().name());
level0EdgePtr_.reset
(
new uniformDimensionedScalarField
(
rio,
procDatas[0].level0EdgePtr_()
)
);
}
// refinementHistory
if (procDatas[0].refHistoryPtr_.valid())
{
IOobject rio(io);
rio.rename(procDatas[0].refHistoryPtr_().name());
UPtrList<const refinementHistory> procRefs(procDatas.size());
forAll(procDatas, i)
{
procRefs.set(i, &procDatas[i].refHistoryPtr_());
}
refHistoryPtr_.reset
(
new refinementHistory
(
rio,
cellMaps,
procRefs
)
);
}
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::hexRef8Data::~hexRef8Data()
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
void Foam::hexRef8Data::sync(const IOobject& io)
{
const polyMesh& mesh = dynamic_cast<const polyMesh&>(io.db());
bool hasCellLevel = returnReduce(cellLevelPtr_.valid(), orOp<bool>());
if (hasCellLevel && !cellLevelPtr_.valid())
{
IOobject rio(io);
rio.rename("cellLevel");
rio.readOpt() = IOobject::NO_READ;
cellLevelPtr_.reset(new labelIOList(rio, labelList(mesh.nCells(), 0)));
}
bool hasPointLevel = returnReduce(pointLevelPtr_.valid(), orOp<bool>());
if (hasPointLevel && !pointLevelPtr_.valid())
{
IOobject rio(io);
rio.rename("pointLevel");
rio.readOpt() = IOobject::NO_READ;
pointLevelPtr_.reset
(
new labelIOList(rio, labelList(mesh.nPoints(), 0))
);
}
bool hasLevel0Edge = returnReduce(level0EdgePtr_.valid(), orOp<bool>());
if (hasLevel0Edge)
{
// Get master length
scalar masterLen = level0EdgePtr_().value();
Pstream::scatter(masterLen);
if (!level0EdgePtr_.valid())
{
IOobject rio(io);
rio.rename("level0Edge");
rio.readOpt() = IOobject::NO_READ;
level0EdgePtr_.reset
(
new uniformDimensionedScalarField
(
rio,
dimensionedScalar("zero", dimLength, masterLen)
)
);
}
}
bool hasHistory = returnReduce(refHistoryPtr_.valid(), orOp<bool>());
if (hasHistory && !refHistoryPtr_.valid())
{
IOobject rio(io);
rio.rename("refinementHistory");
rio.readOpt() = IOobject::NO_READ;
refHistoryPtr_.reset(new refinementHistory(rio, mesh.nCells(), true));
}
}
void Foam::hexRef8Data::distribute(const mapDistributePolyMesh& map)
{
if (cellLevelPtr_.valid())
{
map.cellMap().distribute(cellLevelPtr_());
}
if (pointLevelPtr_.valid())
{
map.pointMap().distribute(pointLevelPtr_());
}
// No need to distribute the level0Edge
if (refHistoryPtr_.valid() && refHistoryPtr_().active())
{
refHistoryPtr_().distribute(map);
}
}
bool Foam::hexRef8Data::write() const
{
bool ok = true;
if (cellLevelPtr_.valid())
{
ok = ok && cellLevelPtr_().write();
}
if (pointLevelPtr_.valid())
{
ok = ok && pointLevelPtr_().write();
}
if (level0EdgePtr_.valid())
{
ok = ok && level0EdgePtr_().write();
}
if (refHistoryPtr_.valid())
{
ok = ok && refHistoryPtr_().write();
}
return ok;
}
// ************************************************************************* //

View File

@ -0,0 +1,136 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::hexRef8Data
Description
Various for reading/decomposing/reconstructing/distributing refinement
data.
SourceFiles
hexRef8Data.C
\*---------------------------------------------------------------------------*/
#ifndef hexRef8Data_H
#define hexRef8Data_H
#include "labelIOList.H"
#include "uniformDimensionedFields.H"
#include "UPtrList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// Forward declaration of classes
class mapPolyMesh;
class mapDistributePolyMesh;
class refinementHistory;
class fvMesh;
/*---------------------------------------------------------------------------*\
Class hexRef8Data Declaration
\*---------------------------------------------------------------------------*/
class hexRef8Data
{
private:
// Private data
autoPtr<labelIOList> cellLevelPtr_;
autoPtr<labelIOList> pointLevelPtr_;
autoPtr<uniformDimensionedScalarField> level0EdgePtr_;
autoPtr<refinementHistory> refHistoryPtr_;
// Private Member Functions
//- Disallow default bitwise copy construct
hexRef8Data(const hexRef8Data&);
//- Disallow default bitwise assignment
void operator=(const hexRef8Data&);
public:
// Constructors
//- Construct read. Has special provision for only some processors
// having the files so can be used in redistribution.
hexRef8Data(const IOobject& io);
//- Construct as subset
hexRef8Data
(
const IOobject& io,
const hexRef8Data&,
const labelList& cellMap,
const labelList& pointMap
);
//- Construct from multiple hexRef8Data
hexRef8Data
(
const IOobject& io,
const UPtrList<const labelList>& cellMaps,
const UPtrList<const labelList>& pointMaps,
const UPtrList<const hexRef8Data>&
);
//- Destructor
~hexRef8Data();
// Member Functions
//- Parallel synchronise. This enforces valid objects on all processors
// (even if they don't have a mesh). Used by redistributePar.
void sync(const IOobject& io);
//- In-place distribute
void distribute(const mapDistributePolyMesh&);
//- Write
bool write() const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -23,12 +23,11 @@ License
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
#include "DynamicList.H"
#include "refinementHistory.H" #include "refinementHistory.H"
#include "ListOps.H"
#include "mapPolyMesh.H" #include "mapPolyMesh.H"
#include "mapDistributePolyMesh.H" #include "mapDistributePolyMesh.H"
#include "polyMesh.H" #include "polyMesh.H"
#include "syncTools.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
@ -138,7 +137,59 @@ Foam::refinementHistory::splitCell8::splitCell8(const splitCell8& sc)
{} {}
// * * * * * * * * * * * * * * * Friend Operators * * * * * * * * * * * * * // // * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * * //
void Foam::refinementHistory::splitCell8::operator=(const splitCell8& s)
{
//- Assignment operator since autoPtr otherwise 'steals' storage.
// Check for assignment to self
if (this == &s)
{
FatalErrorIn("splitCell8::operator=(const Foam::splitCell8&)")
<< "Attempted assignment to self"
<< abort(FatalError);
}
parent_ = s.parent_;
addedCellsPtr_.reset
(
s.addedCellsPtr_.valid()
? new FixedList<label, 8>(s.addedCellsPtr_())
: NULL
);
}
bool Foam::refinementHistory::splitCell8::operator==(const splitCell8& s) const
{
if (addedCellsPtr_.valid() != s.addedCellsPtr_.valid())
{
return false;
}
else if (parent_ != s.parent_)
{
return false;
}
else if (addedCellsPtr_.valid())
{
return addedCellsPtr_() == s.addedCellsPtr_();
}
else
{
return true;
}
}
bool Foam::refinementHistory::splitCell8::operator!=(const splitCell8& s) const
{
return !operator==(s);
}
// * * * * * * * * * * * * * * Friend Operators * * * * * * * * * * * * * * //
Foam::Istream& Foam::operator>>(Istream& is, refinementHistory::splitCell8& sc) Foam::Istream& Foam::operator>>(Istream& is, refinementHistory::splitCell8& sc)
{ {
@ -183,6 +234,8 @@ Foam::Ostream& Foam::operator<<
} }
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void Foam::refinementHistory::checkIndices() const void Foam::refinementHistory::checkIndices() const
{ {
// Check indices. // Check indices.
@ -319,11 +372,192 @@ void Foam::refinementHistory::markSplit
} }
// Mark index and all its descendants
void Foam::refinementHistory::mark
(
const label val,
const label index,
labelList& splitToVal
) const
{
splitToVal[index] = val;
const splitCell8& split = splitCells_[index];
if (split.addedCellsPtr_.valid())
{
const FixedList<label, 8>& splits = split.addedCellsPtr_();
forAll(splits, i)
{
if (splits[i] >= 0)
{
mark(val, splits[i], splitToVal);
}
}
}
}
Foam::label Foam::refinementHistory::markCommonCells
(
labelList& cellToCluster
) const
{
label clusterI = 0;
labelList splitToCluster(splitCells_.size(), -1);
// Pass1: find top of all clusters
forAll(visibleCells_, cellI)
{
label index = visibleCells_[cellI];
if (index >= 0)
{
// Find highest ancestor
while (splitCells_[index].parent_ != -1)
{
index = splitCells_[index].parent_;
}
// Mark tree with clusterI
if (splitToCluster[index] == -1)
{
mark(clusterI, index, splitToCluster);
clusterI++;
}
}
}
// Pass2: mark all cells with cluster
cellToCluster.setSize(visibleCells_.size(), -1);
forAll(visibleCells_, cellI)
{
label index = visibleCells_[cellI];
if (index >= 0)
{
cellToCluster[cellI] = splitToCluster[index];
}
}
return clusterI;
}
void Foam::refinementHistory::add
(
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const
{
const polyMesh& mesh = dynamic_cast<const polyMesh&>(db());
blockedFace.setSize(mesh.nFaces(), true);
// Find common parent for all cells
labelList cellToCluster;
markCommonCells(cellToCluster);
// Unblock all faces inbetween same cluster
label nUnblocked = 0;
forAll(mesh.faceNeighbour(), faceI)
{
label ownCluster = cellToCluster[mesh.faceOwner()[faceI]];
label neiCluster = cellToCluster[mesh.faceNeighbour()[faceI]];
if (ownCluster != -1 && ownCluster == neiCluster)
{
if (blockedFace[faceI])
{
blockedFace[faceI] = false;
nUnblocked++;
}
}
}
if (refinementHistory::debug)
{
reduce(nUnblocked, sumOp<label>());
Info<< type() << " : unblocked " << nUnblocked << " faces" << endl;
}
syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>());
}
void Foam::refinementHistory::apply
(
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const
{
const polyMesh& mesh = dynamic_cast<const polyMesh&>(db());
// Find common parent for all cells
labelList cellToCluster;
label nClusters = markCommonCells(cellToCluster);
// Unblock all faces inbetween same cluster
labelList clusterToProc(nClusters, -1);
label nChanged = 0;
forAll(mesh.faceNeighbour(), faceI)
{
label own = mesh.faceOwner()[faceI];
label nei = mesh.faceNeighbour()[faceI];
label ownCluster = cellToCluster[own];
label neiCluster = cellToCluster[nei];
if (ownCluster != -1 && ownCluster == neiCluster)
{
if (clusterToProc[ownCluster] == -1)
{
clusterToProc[ownCluster] = decomposition[own];
}
if (decomposition[own] != clusterToProc[ownCluster])
{
decomposition[own] = clusterToProc[ownCluster];
nChanged++;
}
if (decomposition[nei] != clusterToProc[ownCluster])
{
decomposition[nei] = clusterToProc[ownCluster];
nChanged++;
}
}
}
if (refinementHistory::debug)
{
reduce(nChanged, sumOp<label>());
Info<< type() << " : changed decomposition on " << nChanged
<< " cells" << endl;
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::refinementHistory::refinementHistory(const IOobject& io) Foam::refinementHistory::refinementHistory(const IOobject& io)
: :
regIOobject(io) regIOobject(io),
active_(false)
{ {
// Temporary warning // Temporary warning
if (io.readOpt() == IOobject::MUST_READ_IF_MODIFIED) if (io.readOpt() == IOobject::MUST_READ_IF_MODIFIED)
@ -345,12 +579,18 @@ Foam::refinementHistory::refinementHistory(const IOobject& io)
close(); close();
} }
// When running in redistributPar + READ_IF_PRESENT it can happen
// that some processors do have refinementHistory and some don't so
// test for active has to be outside of above condition.
active_ = (returnReduce(visibleCells_.size(), sumOp<label>()) > 0);
if (debug) if (debug)
{ {
Pout<< "refinementHistory::refinementHistory :" Pout<< "refinementHistory::refinementHistory :"
<< " constructed history from IOobject :" << " constructed history from IOobject :"
<< " splitCells:" << splitCells_.size() << " splitCells:" << splitCells_.size()
<< " visibleCells:" << visibleCells_.size() << " visibleCells:" << visibleCells_.size()
<< " active:" << active_
<< endl; << endl;
} }
} }
@ -360,10 +600,12 @@ Foam::refinementHistory::refinementHistory
( (
const IOobject& io, const IOobject& io,
const List<splitCell8>& splitCells, const List<splitCell8>& splitCells,
const labelList& visibleCells const labelList& visibleCells,
const bool active
) )
: :
regIOobject(io), regIOobject(io),
active_(active),
splitCells_(splitCells), splitCells_(splitCells),
freeSplitCells_(0), freeSplitCells_(0),
visibleCells_(visibleCells) visibleCells_(visibleCells)
@ -397,6 +639,66 @@ Foam::refinementHistory::refinementHistory
<< " constructed history from IOobject or components :" << " constructed history from IOobject or components :"
<< " splitCells:" << splitCells_.size() << " splitCells:" << splitCells_.size()
<< " visibleCells:" << visibleCells_.size() << " visibleCells:" << visibleCells_.size()
<< " active:" << active_
<< endl;
}
}
Foam::refinementHistory::refinementHistory
(
const IOobject& io,
const label nCells
)
:
regIOobject(io),
active_(false),
freeSplitCells_(0)
{
// Temporary warning
if (io.readOpt() == IOobject::MUST_READ_IF_MODIFIED)
{
WarningInFunction
<< "Specified IOobject::MUST_READ_IF_MODIFIED but class"
<< " does not support automatic rereading."
<< endl;
}
if
(
io.readOpt() == IOobject::MUST_READ
|| io.readOpt() == IOobject::MUST_READ_IF_MODIFIED
|| (io.readOpt() == IOobject::READ_IF_PRESENT && headerOk())
)
{
readStream(typeName) >> *this;
close();
}
else
{
visibleCells_.setSize(nCells);
splitCells_.setCapacity(nCells);
for (label cellI = 0; cellI < nCells; cellI++)
{
visibleCells_[cellI] = cellI;
splitCells_.append(splitCell8());
}
}
active_ = (returnReduce(visibleCells_.size(), sumOp<label>()) > 0);
// Check indices.
checkIndices();
if (debug)
{
Pout<< "refinementHistory::refinementHistory :"
<< " constructed history from IOobject or initial size :"
<< " splitCells:" << splitCells_.size()
<< " visibleCells:" << visibleCells_.size()
<< " active:" << active_
<< endl; << endl;
} }
} }
@ -406,13 +708,15 @@ Foam::refinementHistory::refinementHistory
Foam::refinementHistory::refinementHistory Foam::refinementHistory::refinementHistory
( (
const IOobject& io, const IOobject& io,
const label nCells const label nCells,
const bool active
) )
: :
regIOobject(io), regIOobject(io),
active_(active),
freeSplitCells_(0) freeSplitCells_(0)
{ {
// Temporary warning // Warn for MUST_READ_IF_MODIFIED
if (io.readOpt() == IOobject::MUST_READ_IF_MODIFIED) if (io.readOpt() == IOobject::MUST_READ_IF_MODIFIED)
{ {
WarningInFunction WarningInFunction
@ -452,6 +756,7 @@ Foam::refinementHistory::refinementHistory
<< " constructed history from IOobject or initial size :" << " constructed history from IOobject or initial size :"
<< " splitCells:" << splitCells_.size() << " splitCells:" << splitCells_.size()
<< " visibleCells:" << visibleCells_.size() << " visibleCells:" << visibleCells_.size()
<< " active:" << active_
<< endl; << endl;
} }
} }
@ -465,6 +770,7 @@ Foam::refinementHistory::refinementHistory
) )
: :
regIOobject(io), regIOobject(io),
active_(rh.active_),
splitCells_(rh.splitCells()), splitCells_(rh.splitCells()),
freeSplitCells_(rh.freeSplitCells()), freeSplitCells_(rh.freeSplitCells()),
visibleCells_(rh.visibleCells()) visibleCells_(rh.visibleCells())
@ -477,6 +783,126 @@ Foam::refinementHistory::refinementHistory
} }
// Construct from multiple
Foam::refinementHistory::refinementHistory
(
const IOobject& io,
const UPtrList<const labelList>& cellMaps,
const UPtrList<const refinementHistory>& refs
)
:
regIOobject(io),
active_(false)
{
if
(
io.readOpt() == IOobject::MUST_READ
|| io.readOpt() == IOobject::MUST_READ_IF_MODIFIED
|| (io.readOpt() == IOobject::READ_IF_PRESENT && headerOk())
)
{
WarningIn
(
"refinementHistory::refinementHistory(const IOobject&"
", const labelListList&, const PtrList<refinementHistory>&)"
) << "read option IOobject::MUST_READ, READ_IF_PRESENT or "
<< "MUST_READ_IF_MODIFIED"
<< " suggests that a read constructor would be more appropriate."
<< endl;
}
const polyMesh& mesh = dynamic_cast<const polyMesh&>(db());
// Determine offsets into splitCells
labelList offsets(refs.size()+1);
offsets[0] = 0;
forAll(refs, refI)
{
const DynamicList<splitCell8>& subSplits = refs[refI].splitCells();
offsets[refI+1] = offsets[refI]+subSplits.size();
}
// Construct merged splitCells
splitCells_.setSize(offsets.last());
forAll(refs, refI)
{
const DynamicList<splitCell8>& subSplits = refs[refI].splitCells();
forAll(subSplits, i)
{
splitCell8& newSplit = splitCells_[offsets[refI]+i];
// Copy
newSplit = subSplits[i];
// Offset indices
if (newSplit.parent_ >= 0)
{
newSplit.parent_ += offsets[refI];
}
if (newSplit.addedCellsPtr_.valid())
{
FixedList<label, 8>& splits = newSplit.addedCellsPtr_();
forAll(splits, i)
{
if (splits[i] >= 0)
{
splits[i] += offsets[refI];
}
}
}
}
}
// Construct merged visibleCells
visibleCells_.setSize(mesh.nCells(), -1);
forAll(refs, refI)
{
const labelList& cellMap = cellMaps[refI];
const labelList& subVis = refs[refI].visibleCells();
forAll(subVis, i)
{
label& newVis = visibleCells_[cellMap[i]];
newVis = subVis[i];
if (newVis >= 0)
{
newVis += offsets[refI];
}
}
}
// Is active if any of the refinementHistories is active (assumes active
// flag parallel synchronised)
active_ = false;
forAll(refs, refI)
{
if (refs[refI].active())
{
active_ = true;
break;
}
}
// Check indices.
checkIndices();
if (debug)
{
Pout<< "refinementHistory::refinementHistory :"
<< " constructed history from multiple refinementHistories :"
<< " splitCells:" << splitCells_.size()
<< " visibleCells:" << visibleCells_.size()
<< endl;
}
}
// Construct from Istream // Construct from Istream
Foam::refinementHistory::refinementHistory(const IOobject& io, Istream& is) Foam::refinementHistory::refinementHistory(const IOobject& io, Istream& is)
: :
@ -485,6 +911,8 @@ Foam::refinementHistory::refinementHistory(const IOobject& io, Istream& is)
freeSplitCells_(0), freeSplitCells_(0),
visibleCells_(is) visibleCells_(is)
{ {
active_ = (returnReduce(visibleCells_.size(), sumOp<label>()) > 0);
// Check indices. // Check indices.
checkIndices(); checkIndices();
@ -501,6 +929,192 @@ Foam::refinementHistory::refinementHistory(const IOobject& io, Istream& is)
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
Foam::autoPtr<Foam::refinementHistory> Foam::refinementHistory::clone
(
const IOobject& io,
// Per visible cell the processor it is going to
const labelList& decomposition,
// Per splitCell entry the processor it moves to
const labelList& splitCellProc,
// Per splitCell entry the number of live cells that move to that processor
const labelList& splitCellNum,
const label procI,
// From old to new splitCells
labelList& oldToNewSplit
) const
{
oldToNewSplit.setSize(splitCells_.size());
oldToNewSplit = -1;
// Compacted splitCells
DynamicList<splitCell8> newSplitCells(splitCells_.size());
// Loop over all entries. Note: could recurse like countProc so only
// visit used entries but is probably not worth it.
forAll(splitCells_, index)
{
if (splitCellProc[index] == procI && splitCellNum[index] == 8)
{
// Entry moves in its whole to procI
oldToNewSplit[index] = newSplitCells.size();
newSplitCells.append(splitCells_[index]);
}
}
// Add live cells that are subsetted.
forAll(visibleCells_, cellI)
{
label index = visibleCells_[cellI];
if (index >= 0 && decomposition[cellI] == procI)
{
label parent = splitCells_[index].parent_;
// Create new splitCell with parent
oldToNewSplit[index] = newSplitCells.size();
newSplitCells.append(splitCell8(parent));
}
}
//forAll(oldToNewSplit, index)
//{
// Pout<< "old:" << index << " new:" << oldToNewSplit[index]
// << endl;
//}
newSplitCells.shrink();
// Renumber contents of newSplitCells
forAll(newSplitCells, index)
{
splitCell8& split = newSplitCells[index];
if (split.parent_ >= 0)
{
split.parent_ = oldToNewSplit[split.parent_];
}
if (split.addedCellsPtr_.valid())
{
FixedList<label, 8>& splits = split.addedCellsPtr_();
forAll(splits, i)
{
if (splits[i] >= 0)
{
splits[i] = oldToNewSplit[splits[i]];
}
}
}
}
// Count number of cells
label nSub = 0;
forAll(decomposition, cellI)
{
if (decomposition[cellI] == procI)
{
nSub++;
}
}
labelList newVisibleCells(nSub);
nSub = 0;
forAll(visibleCells_, cellI)
{
if (decomposition[cellI] == procI)
{
label index = visibleCells_[cellI];
if (index >= 0)
{
index = oldToNewSplit[index];
}
newVisibleCells[nSub++] = index;
}
}
return autoPtr<refinementHistory>
(
new refinementHistory
(
io,
newSplitCells,
newVisibleCells,
active_
)
);
}
Foam::autoPtr<Foam::refinementHistory> Foam::refinementHistory::clone
(
const IOobject& io,
const labelList& cellMap
) const
{
if (active_)
{
// Mark selected cells with '1'
labelList decomposition(visibleCells_.size(), 0);
forAll(cellMap, i)
{
decomposition[cellMap[i]] = 1;
}
// Per splitCell entry the processor it moves to
labelList splitCellProc(splitCells_.size(), -1);
// Per splitCell entry the number of live cells that move to that
// processor
labelList splitCellNum(splitCells_.size(), 0);
forAll(visibleCells_, cellI)
{
label index = visibleCells_[cellI];
if (index >= 0)
{
countProc
(
splitCells_[index].parent_,
decomposition[cellI],
splitCellProc,
splitCellNum
);
}
}
labelList oldToNewSplit;
return clone
(
io,
decomposition,
splitCellProc,
splitCellNum,
1, //procI,
oldToNewSplit
);
}
else
{
return autoPtr<refinementHistory>
(
new refinementHistory
(
io,
DynamicList<splitCell8>(0),
labelList(0),
false
)
);
}
}
void Foam::refinementHistory::resize(const label size) void Foam::refinementHistory::resize(const label size)
{ {
label oldSize = visibleCells_.size(); label oldSize = visibleCells_.size();
@ -698,9 +1312,6 @@ void Foam::refinementHistory::distribute(const mapDistributePolyMesh& map)
} }
} }
//Pout<< "refinementHistory::distribute :"
// << " destination:" << destination << endl;
// Per splitCell entry the processor it moves to // Per splitCell entry the processor it moves to
labelList splitCellProc(splitCells_.size(), -1); labelList splitCellProc(splitCells_.size(), -1);
// Per splitCell entry the number of live cells that move to that processor // Per splitCell entry the number of live cells that move to that processor
@ -746,21 +1357,11 @@ void Foam::refinementHistory::distribute(const mapDistributePolyMesh& map)
forAll(splitCells_, index) forAll(splitCells_, index)
{ {
// Pout<< "oldCell:" << index
// << " proc:" << splitCellProc[index]
// << " nCells:" << splitCellNum[index]
// << endl;
if (splitCellProc[index] == proci && splitCellNum[index] == 8) if (splitCellProc[index] == proci && splitCellNum[index] == 8)
{ {
// Entry moves in its whole to proci // Entry moves in its whole to proci
oldToNew[index] = newSplitCells.size(); oldToNew[index] = newSplitCells.size();
newSplitCells.append(splitCells_[index]); newSplitCells.append(splitCells_[index]);
//Pout<< "Added oldCell " << index
// << " info " << newSplitCells.last()
// << " at position " << newSplitCells.size()-1
// << endl;
} }
} }
@ -773,10 +1374,6 @@ void Foam::refinementHistory::distribute(const mapDistributePolyMesh& map)
{ {
label parent = splitCells_[index].parent_; label parent = splitCells_[index].parent_;
//Pout<< "Adding refined cell " << celli
// << " since moves to "
// << proci << " old parent:" << parent << endl;
// Create new splitCell with parent // Create new splitCell with parent
oldToNew[index] = newSplitCells.size(); oldToNew[index] = newSplitCells.size();
newSplitCells.append(splitCell8(parent)); newSplitCells.append(splitCell8(parent));
@ -849,7 +1446,9 @@ void Foam::refinementHistory::distribute(const mapDistributePolyMesh& map)
// Remove all entries. Leave storage intact. // Remove all entries. Leave storage intact.
splitCells_.clear(); splitCells_.clear();
visibleCells_.setSize(map.mesh().nCells()); const polyMesh& mesh = dynamic_cast<const polyMesh&>(db());
visibleCells_.setSize(mesh.nCells());
visibleCells_ = -1; visibleCells_ = -1;
for (label proci = 0; proci < Pstream::nProcs(); proci++) for (label proci = 0; proci < Pstream::nProcs(); proci++)
@ -1138,6 +1737,17 @@ void Foam::refinementHistory::combineCells
} }
bool Foam::refinementHistory::read()
{
bool ok = readData(readStream(typeName));
close();
active_ = (returnReduce(visibleCells_.size(), sumOp<label>()) > 0);
return ok;
}
bool Foam::refinementHistory::readData(Istream& is) bool Foam::refinementHistory::readData(Istream& is)
{ {
is >> *this; is >> *this;

View File

@ -74,9 +74,10 @@ SourceFiles
#include "DynamicList.H" #include "DynamicList.H"
#include "labelList.H" #include "labelList.H"
#include "FixedList.H" #include "FixedList.H"
#include "SLList.H"
#include "autoPtr.H" #include "autoPtr.H"
#include "regIOobject.H" #include "regIOobject.H"
#include "boolList.H"
#include "labelPair.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -122,50 +123,11 @@ public:
splitCell8(const splitCell8&); splitCell8(const splitCell8&);
//- Copy operator since autoPtr otherwise 'steals' storage. //- Copy operator since autoPtr otherwise 'steals' storage.
void operator=(const splitCell8& s) void operator=(const splitCell8& s);
{
// Check for assignment to self
if (this == &s)
{
FatalErrorInFunction
<< "Attempted assignment to self"
<< abort(FatalError);
}
parent_ = s.parent_; bool operator==(const splitCell8& s) const;
addedCellsPtr_.reset bool operator!=(const splitCell8& s) const;
(
s.addedCellsPtr_.valid()
? new FixedList<label, 8>(s.addedCellsPtr_())
: NULL
);
}
bool operator==(const splitCell8& s) const
{
if (addedCellsPtr_.valid() != s.addedCellsPtr_.valid())
{
return false;
}
else if (parent_ != s.parent_)
{
return false;
}
else if (addedCellsPtr_.valid())
{
return addedCellsPtr_() == s.addedCellsPtr_();
}
else
{
return true;
}
}
bool operator!=(const splitCell8& s) const
{
return !operator==(s);
}
friend Istream& operator>>(Istream&, splitCell8&); friend Istream& operator>>(Istream&, splitCell8&);
friend Ostream& operator<<(Ostream&, const splitCell8&); friend Ostream& operator<<(Ostream&, const splitCell8&);
@ -176,6 +138,9 @@ private:
// Private data // Private data
//- Is active?
bool active_;
//- Storage for splitCells //- Storage for splitCells
DynamicList<splitCell8> splitCells_; DynamicList<splitCell8> splitCells_;
@ -226,6 +191,15 @@ private:
labelList& splitCellNum labelList& splitCellNum
) const; ) const;
// For distribution:
//- Mark index and all its descendants
void mark(const label, const label, labelList&) const;
//- Mark cells according to top parent. Return number of clusters
// (set of cells originating from same parent)
label markCommonCells(labelList& cellToCluster) const;
public: public:
// Declare name of the class and its debug switch // Declare name of the class and its debug switch
@ -234,25 +208,47 @@ public:
// Constructors // Constructors
//- Construct (read) given an IOobject //- Construct (read) given an IOobject. If global number of visible
// cells > 0 becomes active
refinementHistory(const IOobject&); refinementHistory(const IOobject&);
//- Construct (read) or construct null //- Construct (read) or construct from components
refinementHistory refinementHistory
( (
const IOobject&, const IOobject&,
const List<splitCell8>& splitCells, const List<splitCell8>& splitCells,
const labelList& visibleCells const labelList& visibleCells,
const bool active
); );
//- Construct (read) or construct from initial number of cells //- Construct (read) or construct from initial number of cells
// (all visible) // (all visible). If global number of visible
// cells > 0 becomes active
refinementHistory(const IOobject&, const label nCells); refinementHistory(const IOobject&, const label nCells);
//- Construct (read) or construct from initial number of cells
// (all visible) and active flag
refinementHistory
(
const IOobject&,
const label nCells,
const bool active
);
//- Construct as copy //- Construct as copy
refinementHistory(const IOobject&, const refinementHistory&); refinementHistory(const IOobject&, const refinementHistory&);
//- Construct from Istream //- Construct from multiple refinement histories. If global number of
// visible cells > 0 becomes active
refinementHistory
(
const IOobject&,
const UPtrList<const labelList>& cellMaps,
const UPtrList<const refinementHistory>&
);
//- Construct from Istream. If global number of
// visible cells > 0 becomes active
refinementHistory(const IOobject&, Istream&); refinementHistory(const IOobject&, Istream&);
@ -278,12 +274,16 @@ public:
return freeSplitCells_; return freeSplitCells_;
} }
//- Is there unrefinement history. Note that this will fall over if //- Is there unrefinement history?
// there are 0 cells in the mesh. But this gives problems with
// lots of other programs anyway.
bool active() const bool active() const
{ {
return visibleCells_.size() > 0; return active_;
}
//- Is there unrefinement history?
bool& active()
{
return active_;
} }
//- Get parent of cell //- Get parent of cell
@ -314,6 +314,23 @@ public:
const labelList& combinedCells const labelList& combinedCells
); );
//- Low level clone
autoPtr<refinementHistory> clone
(
const IOobject& io,
const labelList& decomposition,
const labelList& splitCellProc,
const labelList& splitCellNum,
const label procI,
labelList& oldToNewSplit
) const;
//- Create clone from subset
autoPtr<refinementHistory> clone
(
const IOobject& io,
const labelList& cellMap
) const;
//- Update numbering for mesh changes //- Update numbering for mesh changes
void updateMesh(const mapPolyMesh&); void updateMesh(const mapPolyMesh&);
@ -343,20 +360,44 @@ public:
void writeDebug() const; void writeDebug() const;
//- ReadData function required for regIOobject read operation //- Read object. If global number of visible cells > 0 becomes active
virtual bool read();
//- ReadData function required for regIOobject read operation. Note:
// does not do a reduction - does not set active_ flag
virtual bool readData(Istream&); virtual bool readData(Istream&);
//- WriteData function required for regIOobject write operation //- WriteData function required for regIOobject write operation
virtual bool writeData(Ostream&) const; virtual bool writeData(Ostream&) const;
// Helpers for decompositionConstraint
// Friend Functions //- Add my decomposition constraints
void add
(
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const;
//- Apply any additional post-decomposition constraints
void apply
(
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const;
// Friend Operators
// IOstream Operators // IOstream Operators
//- Istream operator. Note: does not do a reduction - does not set
// active_ flag
friend Istream& operator>>(Istream&, refinementHistory&); friend Istream& operator>>(Istream&, refinementHistory&);
friend Ostream& operator<<(Ostream&, const refinementHistory&); friend Ostream& operator<<(Ostream&, const refinementHistory&);
}; };

View File

@ -2,7 +2,6 @@ fvMesh/fvMeshGeometry.C
fvMesh/fvMesh.C fvMesh/fvMesh.C
fvMesh/singleCellFvMesh/singleCellFvMesh.C fvMesh/singleCellFvMesh/singleCellFvMesh.C
fvMesh/fvMeshSubset/fvMeshSubset.C
fvBoundaryMesh = fvMesh/fvBoundaryMesh fvBoundaryMesh = fvMesh/fvBoundaryMesh
$(fvBoundaryMesh)/fvBoundaryMesh.C $(fvBoundaryMesh)/fvBoundaryMesh.C

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -96,6 +96,12 @@ Foam::fixedFluxPressureFvPatchScalarField::fixedFluxPressureFvPatchScalarField
patchInternalField() + gradient()*(patch().nf() & patch().delta()) patchInternalField() + gradient()*(patch().nf() & patch().delta())
); );
} }
else
{
// Enforce mapping of values so we have a valid starting value. This
// constructor is used when reconstructing fields
this->map(ptf, mapper);
}
} }

View File

@ -175,7 +175,7 @@ void Foam::meshRefinement::calcNeighbourData
label own = faceCells[i]; label own = faceCells[i];
label ownLevel = cellLevel[own]; label ownLevel = cellLevel[own];
label faceLevel = meshCutter_.getAnchorLevel(pp.start()+i); label faceLevel = meshCutter_.faceLevel(pp.start()+i);
// Normal distance from face centre to cell centre // Normal distance from face centre to cell centre
scalar d = ((faceCentres[i] - cellCentres[own]) & fn); scalar d = ((faceCentres[i] - cellCentres[own]) & fn);

View File

@ -27,6 +27,7 @@ License
#include "AMIMethod.H" #include "AMIMethod.H"
#include "meshTools.H" #include "meshTools.H"
#include "mapDistribute.H" #include "mapDistribute.H"
#include "flipOp.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
@ -933,7 +934,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::update
tgtMagSf_, tgtMagSf_,
triMode_, triMode_,
reverseTarget_, reverseTarget_,
requireMatch_ requireMatch_ && (lowWeightCorrection_ < 0)
) )
); );
@ -978,27 +979,33 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::update
// send data back to originating procs. Note that contributions // send data back to originating procs. Note that contributions
// from different processors get added (ListAppendEqOp) // from different processors get added (ListAppendEqOp)
mapDistribute::distribute mapDistributeBase::distribute
( (
Pstream::nonBlocking, Pstream::nonBlocking,
List<labelPair>(), List<labelPair>(),
tgtPatch.size(), tgtPatch.size(),
map.constructMap(), map.constructMap(),
false, // has flip
map.subMap(), map.subMap(),
false, // has flip
tgtAddress_, tgtAddress_,
ListAppendEqOp<label>(), ListAppendEqOp<label>(),
flipOp(), // flip operation
labelList() labelList()
); );
mapDistribute::distribute mapDistributeBase::distribute
( (
Pstream::nonBlocking, Pstream::nonBlocking,
List<labelPair>(), List<labelPair>(),
tgtPatch.size(), tgtPatch.size(),
map.constructMap(), map.constructMap(),
false,
map.subMap(), map.subMap(),
false,
tgtWeights_, tgtWeights_,
ListAppendEqOp<scalar>(), ListAppendEqOp<scalar>(),
flipOp(),
scalarList() scalarList()
); );
@ -1050,7 +1057,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::update
tgtMagSf_, tgtMagSf_,
triMode_, triMode_,
reverseTarget_, reverseTarget_,
requireMatch_ requireMatch_ && (lowWeightCorrection_ < 0)
) )
); );

View File

@ -97,7 +97,7 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::calcOverlappingProcs
forAll(procBb, proci) forAll(procBb, proci)
{ {
const List<treeBoundBox>& bbs = procBb[proci]; const treeBoundBoxList& bbs = procBb[proci];
forAll(bbs, bbI) forAll(bbs, bbI)
{ {

View File

@ -200,7 +200,7 @@ void Foam::AMIMethod<SourcePatch, TargetPatch>::resetTree()
// Clear the old octree // Clear the old octree
treePtr_.clear(); treePtr_.clear();
treeBoundBox bb(tgtPatch_.points()); treeBoundBox bb(tgtPatch_.points(), tgtPatch_.meshPoints());
bb.inflate(0.01); bb.inflate(0.01);
if (!treePtr_.valid()) if (!treePtr_.valid())

View File

@ -310,16 +310,14 @@ void Foam::directAMI<SourcePatch, TargetPatch>::calculate
forAll(srcAddr, i) forAll(srcAddr, i)
{ {
scalar magSf = this->srcMagSf_[i]; scalar magSf = this->srcMagSf_[i];
// srcWeights[i] = scalarList(srcAddr[i].size(), magSf);
srcWeights[i] = scalarList(1, magSf);
srcAddress[i].transfer(srcAddr[i]); srcAddress[i].transfer(srcAddr[i]);
srcWeights[i] = scalarList(1, magSf);
} }
forAll(tgtAddr, i) forAll(tgtAddr, i)
{ {
scalar magSf = this->tgtMagSf_[i]; scalar magSf = this->tgtMagSf_[i];
// tgtWeights[i] = scalarList(tgtAddr[i].size(), magSf);
tgtWeights[i] = scalarList(1, magSf);
tgtAddress[i].transfer(tgtAddr[i]); tgtAddress[i].transfer(tgtAddr[i]);
tgtWeights[i] = scalarList(1, magSf);
} }
} }

View File

@ -61,15 +61,18 @@ void Foam::mappedPatchBase::distribute
} }
default: default:
{ {
map().distribute mapDistributeBase::distribute
( (
Pstream::defaultCommsType, Pstream::defaultCommsType,
map().schedule(), map().schedule(),
map().constructSize(), map().constructSize(),
map().subMap(), map().subMap(),
false,
map().constructMap(), map().constructMap(),
false,
lst, lst,
cop, cop,
flipOp(),
Type(Zero) Type(Zero)
); );
} }
@ -117,15 +120,18 @@ void Foam::mappedPatchBase::reverseDistribute
default: default:
{ {
label cSize = sampleSize(); label cSize = sampleSize();
map().distribute mapDistributeBase::distribute
( (
Pstream::defaultCommsType, Pstream::defaultCommsType,
map().schedule(), map().schedule(),
cSize, cSize,
map().constructMap(), map().constructMap(),
false,
map().subMap(), map().subMap(),
false,
lst, lst,
cop, cop,
flipOp(),
Type(Zero) Type(Zero)
); );
break; break;

View File

@ -1,3 +1,4 @@
decompositionModel.C
fvFieldDecomposer.C fvFieldDecomposer.C
LIB = $(FOAM_LIBBIN)/libdecompose LIB = $(FOAM_LIBBIN)/libdecompose

View File

@ -1,9 +1,11 @@
EXE_INC = \ EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude -I$(LIB_SRC)/lagrangian/basic/lnInclude
LIB_LIBS = \ LIB_LIBS = \
-lfiniteVolume \ -lfiniteVolume \
-lmeshTools \ -lmeshTools \
-ldecompositionMethods \
-llagrangian -llagrangian

View File

@ -0,0 +1,164 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2014-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "decompositionModel.H"
#include "polyMesh.H"
#include "Time.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
namespace Foam
{
defineTypeNameAndDebug(decompositionModel, 0);
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::decompositionModel::decompositionModel
(
const polyMesh& mesh,
const fileName& decompDictFile
)
:
MeshObject
<
polyMesh,
Foam::UpdateableMeshObject,
decompositionModel
>(mesh),
IOdictionary
(
selectIO
(
IOobject
(
"decomposeParDict",
mesh.time().system(),
mesh.local(),
mesh.db(),
IOobject::MUST_READ,
IOobject::NO_WRITE,
false //io.registerObject()
),
decompDictFile
)
)
{}
Foam::decompositionModel::decompositionModel
(
const polyMesh& mesh,
const dictionary& dict,
const fileName& decompDictFile
)
:
MeshObject
<
polyMesh,
Foam::UpdateableMeshObject,
decompositionModel
>(mesh),
IOdictionary
(
selectIO
(
IOobject
(
"decomposeParDict",
mesh.time().system(),
mesh.local(),
mesh.db(),
(dict.size() ? IOobject::NO_READ : IOobject::MUST_READ),
IOobject::NO_WRITE,
false //io.registerObject()
),
decompDictFile
),
dict
)
{}
// * * * * * * * * * * * * * * * * * Selectors * * * * * * * * * * * * * * * //
const Foam::decompositionModel& Foam::decompositionModel::New
(
const polyMesh& mesh,
const fileName& decompDictFile
)
{
return
MeshObject
<
polyMesh,
Foam::UpdateableMeshObject,
decompositionModel
>::New(mesh, decompDictFile);
}
const Foam::decompositionModel& Foam::decompositionModel::New
(
const polyMesh& mesh,
const dictionary& dict,
const fileName& decompDictFile
)
{
return
MeshObject
<
polyMesh,
Foam::UpdateableMeshObject,
decompositionModel
>::New(mesh, dict, decompDictFile);
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
Foam::IOobject Foam::decompositionModel::selectIO
(
const IOobject& io,
const fileName& f
)
{
return
(
f.size()
? IOobject // construct from filePath instead
(
f,
io.db(),
io.readOpt(),
io.writeOpt(),
io.registerObject()
)
: io
);
}
// ************************************************************************* //

View File

@ -0,0 +1,145 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2014-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::decompositionModel
Description
MeshObject wrapper of decompositionMethod
SourceFiles
\*---------------------------------------------------------------------------*/
#ifndef decompositionModel_H
#define decompositionModel_H
#include "IOdictionary.H"
#include "MeshObject.H"
#include "decompositionMethod.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// Forward declaration of classes
class mapPolyMesh;
class polyMesh;
/*---------------------------------------------------------------------------*\
Class decompositionModel Declaration
\*---------------------------------------------------------------------------*/
class decompositionModel
:
public MeshObject
<
polyMesh,
UpdateableMeshObject,
decompositionModel
>,
public IOdictionary
{
// Private data
mutable autoPtr<decompositionMethod> decomposerPtr_;
public:
// Declare name of the class and its debug switch
ClassName("decompositionModel");
// Selectors
//- Read (optionallly from absolute path) & register on mesh
static const decompositionModel& New
(
const polyMesh& mesh,
const fileName& decompDictFile = ""
);
//- Read (optionallly from supplied dictionary) & register on mesh
static const decompositionModel& New
(
const polyMesh& mesh,
const dictionary& dict,
const fileName& decompDictFile = ""
);
// Constructors
//- Construct from typeName or optional path to controlDictionary
decompositionModel(const polyMesh&, const fileName& = "");
//- Construct from typeName or optional path to controlDictionary
decompositionModel
(
const polyMesh&,
const dictionary& dict,
const fileName& = ""
);
// Member functions
decompositionMethod& decomposer() const
{
if (!decomposerPtr_.valid())
{
decomposerPtr_ = decompositionMethod::New(*this);
}
return decomposerPtr_();
}
//- Helper: return IOobject with optionally absolute path provided
static IOobject selectIO(const IOobject&, const fileName&);
// UpdateableMeshObject
virtual bool movePoints()
{
return false;
}
virtual void updateMesh(const mapPolyMesh&)
{}
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -7,4 +7,15 @@ multiLevelDecomp/multiLevelDecomp.C
structuredDecomp/structuredDecomp.C structuredDecomp/structuredDecomp.C
noDecomp/noDecomp.C noDecomp/noDecomp.C
decompositionConstraints = decompositionConstraints
$(decompositionConstraints)/decompositionConstraint/decompositionConstraint.C
$(decompositionConstraints)/preserveBaffles/preserveBafflesConstraint.C
$(decompositionConstraints)/preserveFaceZones/preserveFaceZonesConstraint.C
$(decompositionConstraints)/preservePatches/preservePatchesConstraint.C
$(decompositionConstraints)/singleProcessorFaceSets/singleProcessorFaceSetsConstraint.C
$(decompositionConstraints)/refinementHistory/refinementHistoryConstraint.C
LIB = $(FOAM_LIBBIN)/libdecompositionMethods LIB = $(FOAM_LIBBIN)/libdecompositionMethods

View File

@ -1,7 +1,9 @@
EXE_INC = \ EXE_INC = \
-I$(LIB_SRC)/meshTools/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude -I$(LIB_SRC)/finiteVolume/lnInclude
LIB_LIBS = \ LIB_LIBS = \
-lmeshTools \ -lmeshTools \
-ldynamicMesh \
-lfiniteVolume -lfiniteVolume

View File

@ -0,0 +1,86 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "decompositionConstraint.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
namespace Foam
{
defineTypeNameAndDebug(decompositionConstraint, 1);
defineRunTimeSelectionTable(decompositionConstraint, dictionary);
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::decompositionConstraint::decompositionConstraint
(
const dictionary& constraintsDict,
const word& type
)
:
//coeffDict_(constraintsDict.subOrEmptyDict(type + "Coeffs"))
coeffDict_(constraintsDict)
{}
// * * * * * * * * * * * * * * * * Selectors * * * * * * * * * * * * * * * * //
Foam::autoPtr<Foam::decompositionConstraint>
Foam::decompositionConstraint::New
(
const dictionary& dict,
const word& modelType
)
{
Info<< "Selecting decompositionConstraint " << modelType << endl;
dictionaryConstructorTable::iterator cstrIter =
dictionaryConstructorTablePtr_->find(modelType);
if (cstrIter == dictionaryConstructorTablePtr_->end())
{
FatalIOErrorInFunction(dict)
<< "Unknown decompositionConstraint type "
<< modelType << nl << nl
<< "Valid decompositionConstraint types:" << endl
<< dictionaryConstructorTablePtr_->sortedToc()
<< exit(FatalIOError);
}
return autoPtr<decompositionConstraint>
(
cstrIter()(dict, modelType)
);
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::decompositionConstraint::~decompositionConstraint()
{}
// ************************************************************************* //

View File

@ -0,0 +1,155 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::decompositionConstraint
Description
SourceFiles
decompositionConstraint.C
\*---------------------------------------------------------------------------*/
#ifndef decompositionConstraint_H
#define decompositionConstraint_H
#include "dictionary.H"
#include "runTimeSelectionTables.H"
#include "boolList.H"
#include "labelList.H"
#include "labelPair.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// Forward declaration of classes
class polyMesh;
/*---------------------------------------------------------------------------*\
Class decompositionConstraint Declaration
\*---------------------------------------------------------------------------*/
class decompositionConstraint
{
protected:
// Protected data
//- Model coefficients dictionary
dictionary coeffDict_;
private:
// Private Member Functions
//- Disallow default bitwise copy construct
decompositionConstraint(const decompositionConstraint&);
//- Disallow default bitwise assignment
void operator=(const decompositionConstraint&);
public:
//- Runtime type information
TypeName("decompositionConstraint");
// Declare run-time constructor selection table
declareRunTimeSelectionTable
(
autoPtr,
decompositionConstraint,
dictionary,
(
const dictionary& constraintsDict,
const word& type
),
(constraintsDict, type)
);
// Constructors
//- Construct with generic dictionary with optional entry for type
decompositionConstraint
(
const dictionary& constraintsDict,
const word& type
);
// Selectors
//- Return a reference to the selected decompositionConstraint
static autoPtr<decompositionConstraint> New
(
const dictionary& constraintsDict,
const word& type
);
//- Destructor
virtual ~decompositionConstraint();
// Member Functions
//- Add my constraints to list of constraints
virtual void add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const = 0;
//- Apply any additional post-decomposition constraints. Usually no
// need to do anything since decomposition method should have already
// obeyed the constraints
virtual void apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const
{}
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,243 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "preserveBafflesConstraint.H"
#include "addToRunTimeSelectionTable.H"
#include "syncTools.H"
#include "localPointRegion.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
namespace decompositionConstraints
{
defineTypeName(preserveBafflesConstraint);
addToRunTimeSelectionTable
(
decompositionConstraint,
preserveBafflesConstraint,
dictionary
);
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::decompositionConstraints::preserveBafflesConstraint::
preserveBafflesConstraint
(
const dictionary& constraintsDict,
const word& modelType
)
:
decompositionConstraint(constraintsDict, typeName)
{
if (decompositionConstraint::debug)
{
Info<< type() << " : setting constraints to preserve baffles"
//<< returnReduce(bafflePairs.size(), sumOp<label>())
<< endl;
}
}
Foam::decompositionConstraints::preserveBafflesConstraint::
preserveBafflesConstraint()
:
decompositionConstraint(dictionary(), typeName)
{
if (decompositionConstraint::debug)
{
Info<< type() << " : setting constraints to preserve baffles"
//<< returnReduce(bafflePairs.size(), sumOp<label>())
<< endl;
}
}
// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
void Foam::decompositionConstraints::preserveBafflesConstraint::add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const
{
const labelPairList bafflePairs
(
localPointRegion::findDuplicateFacePairs(mesh)
);
if (decompositionConstraint::debug & 2)
{
Info<< type() << " : setting constraints to preserve "
<< returnReduce(bafflePairs.size(), sumOp<label>())
<< " baffles" << endl;
}
// Merge into explicitConnections
{
// Convert into face-to-face addressing
labelList faceToFace(mesh.nFaces(), -1);
forAll(explicitConnections, i)
{
const labelPair& p = explicitConnections[i];
faceToFace[p[0]] = p[1];
faceToFace[p[1]] = p[0];
}
// Merge in bafflePairs
forAll(bafflePairs, i)
{
const labelPair& p = bafflePairs[i];
if (faceToFace[p[0]] == -1 && faceToFace[p[1]] == -1)
{
faceToFace[p[0]] = p[1];
faceToFace[p[1]] = p[0];
}
else if (labelPair::compare(p, labelPair(p[0], faceToFace[p[0]])))
{
// Connection already present
}
else
{
label p0Slave = faceToFace[p[0]];
label p1Slave = faceToFace[p[1]];
IOWarningInFunction(coeffDict_)
<< "When adding baffle between faces "
<< p[0] << " at " << mesh.faceCentres()[p[0]]
<< " and "
<< p[1] << " at " << mesh.faceCentres()[p[1]]
<< " : face " << p[0] << " already is connected to face "
<< p0Slave << " at " << mesh.faceCentres()[p0Slave]
<< " and face " << p[1] << " already is connected to face "
<< p1Slave << " at " << mesh.faceCentres()[p1Slave]
<< endl;
}
}
// Convert back into labelPairList
label n = 0;
forAll(faceToFace, faceI)
{
label otherFaceI = faceToFace[faceI];
if (otherFaceI != -1 && faceI < otherFaceI)
{
// I am master of slave
n++;
}
}
explicitConnections.setSize(n);
n = 0;
forAll(faceToFace, faceI)
{
label otherFaceI = faceToFace[faceI];
if (otherFaceI != -1 && faceI < otherFaceI)
{
explicitConnections[n++] = labelPair(faceI, otherFaceI);
}
}
}
// Make sure blockedFace is uptodate
blockedFace.setSize(mesh.nFaces(), true);
forAll(explicitConnections, i)
{
blockedFace[explicitConnections[i].first()] = false;
blockedFace[explicitConnections[i].second()] = false;
}
syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>());
}
void Foam::decompositionConstraints::preserveBafflesConstraint::apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const
{
const labelPairList bafflePairs
(
localPointRegion::findDuplicateFacePairs(mesh)
);
label nChanged = 0;
forAll(bafflePairs, i)
{
const labelPair& baffle = bafflePairs[i];
label f0 = baffle.first();
label f1 = baffle.second();
const label procI = decomposition[mesh.faceOwner()[f0]];
if (mesh.isInternalFace(f0))
{
label nei0 = mesh.faceNeighbour()[f0];
if (decomposition[nei0] != procI)
{
decomposition[nei0] = procI;
nChanged++;
}
}
label own1 = mesh.faceOwner()[f1];
if (decomposition[own1] != procI)
{
decomposition[own1] = procI;
nChanged++;
}
if (mesh.isInternalFace(f1))
{
label nei1 = mesh.faceNeighbour()[f1];
if (decomposition[nei1] != procI)
{
decomposition[nei1] = procI;
}
}
}
if (decompositionConstraint::debug & 2)
{
reduce(nChanged, sumOp<label>());
Info<< type() << " : changed decomposition on " << nChanged
<< " cells" << endl;
}
}
// ************************************************************************* //

View File

@ -0,0 +1,116 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::preserveBafflesConstraint
Description
Detects baffles and keeps owner and neighbour on same processor.
SourceFiles
preserveBafflesConstraint.C
\*---------------------------------------------------------------------------*/
#ifndef preserveBafflesConstraint_H
#define preserveBafflesConstraint_H
#include "decompositionConstraint.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
namespace decompositionConstraints
{
/*---------------------------------------------------------------------------*\
Class preserveBafflesConstraint Declaration
\*---------------------------------------------------------------------------*/
class preserveBafflesConstraint
:
public decompositionConstraint
{
// Private data
public:
//- Runtime type information
TypeName("preserveBaffles");
// Constructors
//- Construct with generic dictionary with optional entry for type
preserveBafflesConstraint
(
const dictionary& constraintsDict,
const word& type
);
//- Construct from components
preserveBafflesConstraint();
//- Destructor
virtual ~preserveBafflesConstraint()
{}
// Member Functions
//- Add my constraints to list of constraints
virtual void add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const;
//- Apply any additional post-decomposition constraints
virtual void apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace decompositionConstraints
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,216 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "preserveFaceZonesConstraint.H"
#include "addToRunTimeSelectionTable.H"
#include "syncTools.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
namespace decompositionConstraints
{
defineTypeName(preserveFaceZonesConstraint);
addToRunTimeSelectionTable
(
decompositionConstraint,
preserveFaceZonesConstraint,
dictionary
);
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::decompositionConstraints::preserveFaceZonesConstraint::
preserveFaceZonesConstraint
(
const dictionary& constraintsDict,
const word& modelType
)
:
decompositionConstraint(constraintsDict, typeName),
zones_(coeffDict_.lookup("zones"))
{
if (decompositionConstraint::debug)
{
Info<< type() << " : adding constraints to keep owner and neighbour"
<< " of faces in zones " << zones_
<< " on same processor" << endl;
}
}
Foam::decompositionConstraints::preserveFaceZonesConstraint::
preserveFaceZonesConstraint
(
const wordReList& zones
)
:
decompositionConstraint(dictionary(), typeName),
zones_(zones)
{
if (decompositionConstraint::debug)
{
Info<< type() << " : adding constraints to keep owner and neighbour"
<< " of faces in zones " << zones_
<< " on same processor" << endl;
}
}
// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
void Foam::decompositionConstraints::preserveFaceZonesConstraint::add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const
{
blockedFace.setSize(mesh.nFaces(), true);
const faceZoneMesh& fZones = mesh.faceZones();
const labelList zoneIDs = findStrings(zones_, fZones.names());
label nUnblocked = 0;
forAll(zoneIDs, i)
{
const faceZone& fz = fZones[zoneIDs[i]];
forAll(fz, i)
{
if (blockedFace[fz[i]])
{
blockedFace[fz[i]] = false;
nUnblocked++;
}
}
}
if (decompositionConstraint::debug & 2)
{
reduce(nUnblocked, sumOp<label>());
Info<< type() << " : unblocked " << nUnblocked << " faces" << endl;
}
syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>());
}
void Foam::decompositionConstraints::preserveFaceZonesConstraint::apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const
{
// If the decomposition has not enforced the constraint do it over
// here.
// Synchronise decomposition on boundary
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
const polyBoundaryMesh& pbm = mesh.boundaryMesh();
labelList destProc(mesh.nFaces()-mesh.nInternalFaces(), labelMax);
forAll(pbm, patchI)
{
const polyPatch& pp = pbm[patchI];
const labelUList& faceCells = pp.faceCells();
forAll(faceCells, i)
{
label bFaceI = pp.start()+i-mesh.nInternalFaces();
destProc[bFaceI] = decomposition[faceCells[i]];
}
}
syncTools::syncBoundaryFaceList(mesh, destProc, minEqOp<label>());
// Override if differing
// ~~~~~~~~~~~~~~~~~~~~~
const faceZoneMesh& fZones = mesh.faceZones();
const labelList zoneIDs = findStrings(zones_, fZones.names());
label nChanged = 0;
forAll(zoneIDs, i)
{
const faceZone& fz = fZones[zoneIDs[i]];
forAll(fz, i)
{
label faceI = fz[i];
label own = mesh.faceOwner()[faceI];
if (mesh.isInternalFace(faceI))
{
label nei = mesh.faceNeighbour()[faceI];
if (decomposition[own] != decomposition[nei])
{
decomposition[nei] = decomposition[own];
nChanged++;
}
}
else
{
label bFaceI = faceI-mesh.nInternalFaces();
if (decomposition[own] != destProc[bFaceI])
{
decomposition[own] = destProc[bFaceI];
nChanged++;
}
}
}
}
if (decompositionConstraint::debug & 2)
{
reduce(nChanged, sumOp<label>());
Info<< type() << " : changed decomposition on " << nChanged
<< " cells" << endl;
}
}
// ************************************************************************* //

View File

@ -0,0 +1,122 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::preserveFaceZonesConstraint
Description
Constraint to keep/move owner and neighbour of faceZone onto same
processor.
SourceFiles
preserveFaceZonesConstraint.C
\*---------------------------------------------------------------------------*/
#ifndef preserveFaceZonesConstraint_H
#define preserveFaceZonesConstraint_H
#include "decompositionConstraint.H"
#include "wordReList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
namespace decompositionConstraints
{
/*---------------------------------------------------------------------------*\
Class preserveFaceZonesConstraint Declaration
\*---------------------------------------------------------------------------*/
class preserveFaceZonesConstraint
:
public decompositionConstraint
{
// Private data
//- List of zones to keep together
wordReList zones_;
public:
//- Runtime type information
TypeName("preserveFaceZones");
// Constructors
//- Construct with generic dictionary with optional entry for type
preserveFaceZonesConstraint
(
const dictionary& constraintsDict,
const word& type
);
//- Construct from components
preserveFaceZonesConstraint(const wordReList& zones);
//- Destructor
virtual ~preserveFaceZonesConstraint()
{}
// Member Functions
//- Add my constraints to list of constraints
virtual void add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const;
//- Apply any additional post-decomposition constraints
virtual void apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace decompositionConstraints
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,200 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "preservePatchesConstraint.H"
#include "addToRunTimeSelectionTable.H"
#include "syncTools.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
namespace decompositionConstraints
{
defineTypeName(preservePatchesConstraint);
addToRunTimeSelectionTable
(
decompositionConstraint,
preservePatchesConstraint,
dictionary
);
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::decompositionConstraints::preservePatchesConstraint::
preservePatchesConstraint
(
const dictionary& constraintsDict,
const word& modelType
)
:
decompositionConstraint(constraintsDict, typeName),
patches_(coeffDict_.lookup("patches"))
{
if (decompositionConstraint::debug)
{
Info<< type() << " : adding constraints to keep owner of faces"
<< " in patches " << patches_
<< " on same processor. This only makes sense for cyclics." << endl;
}
}
Foam::decompositionConstraints::preservePatchesConstraint::
preservePatchesConstraint
(
const wordReList& patches
)
:
decompositionConstraint(dictionary(), typeName),
patches_(patches)
{
if (decompositionConstraint::debug)
{
Info<< type() << " : adding constraints to keep owner of faces"
<< " in patches " << patches_
<< " on same processor. This only makes sense for cyclics." << endl;
}
}
// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
void Foam::decompositionConstraints::preservePatchesConstraint::add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const
{
const polyBoundaryMesh& pbm = mesh.boundaryMesh();
blockedFace.setSize(mesh.nFaces(), true);
const labelList patchIDs(pbm.patchSet(patches_).sortedToc());
label nUnblocked = 0;
forAll(patchIDs, i)
{
const polyPatch& pp = pbm[patchIDs[i]];
forAll(pp, i)
{
if (blockedFace[pp.start() + i])
{
blockedFace[pp.start() + i] = false;
nUnblocked++;
}
}
}
if (decompositionConstraint::debug & 2)
{
reduce(nUnblocked, sumOp<label>());
Info<< type() << " : unblocked " << nUnblocked << " faces" << endl;
}
syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>());
}
void Foam::decompositionConstraints::preservePatchesConstraint::apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const
{
// If the decomposition has not enforced the constraint do it over
// here.
// Synchronise decomposition on patchIDs
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
const polyBoundaryMesh& pbm = mesh.boundaryMesh();
labelList destProc(mesh.nFaces()-mesh.nInternalFaces(), labelMax);
forAll(pbm, patchI)
{
const polyPatch& pp = pbm[patchI];
const labelUList& faceCells = pp.faceCells();
forAll(faceCells, i)
{
label bFaceI = pp.start()+i-mesh.nInternalFaces();
destProc[bFaceI] = decomposition[faceCells[i]];
}
}
syncTools::syncBoundaryFaceList(mesh, destProc, minEqOp<label>());
// Override if differing
// ~~~~~~~~~~~~~~~~~~~~~
const labelList patchIDs(pbm.patchSet(patches_).sortedToc());
label nChanged = 0;
forAll(patchIDs, i)
{
const polyPatch& pp = pbm[patchIDs[i]];
const labelUList& faceCells = pp.faceCells();
forAll(faceCells, i)
{
label bFaceI = pp.start()+i-mesh.nInternalFaces();
if (decomposition[faceCells[i]] != destProc[bFaceI])
{
decomposition[faceCells[i]] = destProc[bFaceI];
nChanged++;
}
}
}
if (decompositionConstraint::debug & 2)
{
reduce(nChanged, sumOp<label>());
Info<< type() << " : changed decomposition on " << nChanged
<< " cells" << endl;
}
}
// ************************************************************************* //

View File

@ -0,0 +1,123 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::preservePatchesConstraint
Description
Constraint to keep owner and neighbour of (cyclic) patch on same
processor.
SourceFiles
preservePatchesConstraint.C
\*---------------------------------------------------------------------------*/
#ifndef preservePatchesConstraint_H
#define preservePatchesConstraint_H
#include "decompositionConstraint.H"
#include "wordReList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
namespace decompositionConstraints
{
/*---------------------------------------------------------------------------*\
Class preservePatchesConstraint Declaration
\*---------------------------------------------------------------------------*/
class preservePatchesConstraint
:
public decompositionConstraint
{
// Private data
//- List of patches to keep together
wordReList patches_;
public:
//- Runtime type information
TypeName("preservePatches");
// Constructors
//- Construct with generic dictionary with optional entry for type
preservePatchesConstraint
(
const dictionary& constraintsDict,
const word& type
);
//- Construct from components
preservePatchesConstraint(const wordReList& patches);
//- Destructor
virtual ~preservePatchesConstraint()
{}
// Member Functions
//- Add my constraints to list of constraints
virtual void add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const;
//- Apply any additional post-decomposition constraints
virtual void apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace decompositionConstraints
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,212 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "refinementHistoryConstraint.H"
#include "addToRunTimeSelectionTable.H"
#include "syncTools.H"
#include "refinementHistory.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
defineTypeName(refinementHistoryConstraint);
addToRunTimeSelectionTable
(
decompositionConstraint,
refinementHistoryConstraint,
dictionary
);
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::refinementHistoryConstraint::refinementHistoryConstraint
(
const dictionary& constraintsDict,
const word& modelType
)
:
decompositionConstraint(constraintsDict, typeName)
{
if (decompositionConstraint::debug)
{
Info<< type() << " : setting constraints to preserve refinement history"
<< endl;
}
}
Foam::refinementHistoryConstraint::refinementHistoryConstraint()
:
decompositionConstraint(dictionary(), typeName)
{
if (decompositionConstraint::debug)
{
Info<< type() << " : setting constraints to refinement history"
<< endl;
}
}
// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
void Foam::refinementHistoryConstraint::add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const
{
autoPtr<const refinementHistory> storagePtr;
refinementHistory const* refPtr = NULL;
if (mesh.foundObject<refinementHistory>("refinementHistory"))
{
if (decompositionConstraint::debug)
{
Info<< type() << " : found refinementHistory" << endl;
}
refPtr = &mesh.lookupObject<refinementHistory>("refinementHistory");
}
else
{
if (decompositionConstraint::debug)
{
Info<< type() << " : reading refinementHistory from time "
<< mesh.facesInstance() << endl;
}
storagePtr.reset
(
new refinementHistory
(
IOobject
(
"refinementHistory",
mesh.facesInstance(),
polyMesh::meshSubDir,
mesh,
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE
),
mesh.nCells()
)
);
}
const refinementHistory& history =
(
storagePtr.valid()
? storagePtr()
: *refPtr
);
if (history.active())
{
// refinementHistory itself implements decompositionConstraint
history.add
(
blockedFace,
specifiedProcessorFaces,
specifiedProcessor,
explicitConnections
);
}
}
void Foam::refinementHistoryConstraint::apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const
{
autoPtr<const refinementHistory> storagePtr;
refinementHistory const* refPtr = NULL;
if (mesh.foundObject<refinementHistory>("refinementHistory"))
{
//if (decompositionConstraint::debug)
//{
// Info<< type() << " : found refinementHistory" << endl;
//}
refPtr = &mesh.lookupObject<refinementHistory>("refinementHistory");
}
else
{
//if (decompositionConstraint::debug)
//{
// Info<< type() << " : reading refinementHistory from time "
// << mesh.facesInstance() << endl;
//}
storagePtr.reset
(
new refinementHistory
(
IOobject
(
"refinementHistory",
mesh.facesInstance(),
polyMesh::meshSubDir,
mesh,
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE
),
mesh.nCells()
)
);
}
const refinementHistory& history =
(
storagePtr.valid()
? storagePtr()
: *refPtr
);
if (history.active())
{
// refinementHistory itself implements decompositionConstraint
history.apply
(
blockedFace,
specifiedProcessorFaces,
specifiedProcessor,
explicitConnections,
decomposition
);
}
}
// ************************************************************************* //

View File

@ -0,0 +1,114 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::refinementHistoryConstraint
Description
Constraint to keep all cells originating from refining the same cell
onto the same processor. Reads polyMesh/refinementHistory.
SourceFiles
refinementHistoryConstraint.C
\*---------------------------------------------------------------------------*/
#ifndef refinementHistoryConstraint_H
#define refinementHistoryConstraint_H
#include "decompositionConstraint.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class refinementHistoryConstraint Declaration
\*---------------------------------------------------------------------------*/
class refinementHistoryConstraint
:
public decompositionConstraint
{
// Private data
public:
//- Runtime type information
TypeName("refinementHistory");
// Constructors
//- Construct with generic dictionary with optional entry for type
refinementHistoryConstraint
(
const dictionary& constraintsDict,
const word& type
);
//- Construct from components
refinementHistoryConstraint();
//- Destructor
virtual ~refinementHistoryConstraint()
{}
// Member Functions
//- Add my constraints to list of constraints
virtual void add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const;
//- Apply any additional post-decomposition constraints
virtual void apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,319 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "singleProcessorFaceSetsConstraint.H"
#include "addToRunTimeSelectionTable.H"
#include "syncTools.H"
#include "faceSet.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
namespace decompositionConstraints
{
defineTypeName(singleProcessorFaceSetsConstraint);
addToRunTimeSelectionTable
(
decompositionConstraint,
singleProcessorFaceSetsConstraint,
dictionary
);
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::decompositionConstraints::singleProcessorFaceSetsConstraint::
singleProcessorFaceSetsConstraint
(
const dictionary& constraintsDict,
const word& modelType
)
:
decompositionConstraint(constraintsDict, typeName),
setNameAndProcs_(coeffDict_.lookup("singleProcessorFaceSets"))
{
if (decompositionConstraint::debug)
{
Info<< type()
<< " : adding constraints to keep" << endl;
forAll(setNameAndProcs_, setI)
{
Info<< " all cells connected to faceSet "
<< setNameAndProcs_[setI].first()
<< " on processor " << setNameAndProcs_[setI].second() << endl;
}
}
}
Foam::decompositionConstraints::singleProcessorFaceSetsConstraint::
singleProcessorFaceSetsConstraint
(
const List<Tuple2<word, label> >& setNameAndProcs
)
:
decompositionConstraint(dictionary(), typeName),
setNameAndProcs_(setNameAndProcs)
{
if (decompositionConstraint::debug)
{
Info<< type()
<< " : adding constraints to keep" << endl;
forAll(setNameAndProcs_, setI)
{
Info<< " all cells connected to faceSet "
<< setNameAndProcs_[setI].first()
<< " on processor " << setNameAndProcs_[setI].second() << endl;
}
}
}
// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
void Foam::decompositionConstraints::singleProcessorFaceSetsConstraint::add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const
{
blockedFace.setSize(mesh.nFaces(), true);
// Mark faces already in set
labelList faceToSet(mesh.nFaces(), -1);
forAll(specifiedProcessorFaces, setI)
{
const labelList& faceLabels = specifiedProcessorFaces[setI];
forAll(faceLabels, i)
{
faceToSet[faceLabels[i]] = setI;
}
}
forAll(setNameAndProcs_, setI)
{
//Info<< "Keeping all cells connected to faceSet "
// << setNameAndProcs_[setI].first()
// << " on processor " << setNameAndProcs_[setI].second() << endl;
const label destProcI = setNameAndProcs_[setI].second();
// Read faceSet
const faceSet fz(mesh, setNameAndProcs_[setI].first());
// Check that it does not overlap with existing specifiedProcessorFaces
labelList nMatch(specifiedProcessorFaces.size(), 0);
forAllConstIter(faceSet, fz, iter)
{
label setI = faceToSet[iter.key()];
if (setI != -1)
{
nMatch[setI]++;
}
}
// Only store if all faces are not yet in specifiedProcessorFaces
// (on all processors)
bool store = true;
forAll(nMatch, setI)
{
if (nMatch[setI] == fz.size())
{
// full match
store = false;
break;
}
else if (nMatch[setI] > 0)
{
// partial match
store = false;
break;
}
}
reduce(store, andOp<bool>());
if (store)
{
specifiedProcessorFaces.append(new labelList(fz.sortedToc()));
specifiedProcessor.append(destProcI);
}
}
// Unblock all point connected faces
// 1. Mark all points on specifiedProcessorFaces
boolList procFacePoint(mesh.nPoints(), false);
forAll(specifiedProcessorFaces, setI)
{
const labelList& set = specifiedProcessorFaces[setI];
forAll(set, fI)
{
const face& f = mesh.faces()[set[fI]];
forAll(f, fp)
{
procFacePoint[f[fp]] = true;
}
}
}
syncTools::syncPointList(mesh, procFacePoint, orEqOp<bool>(), false);
// 2. Unblock all faces on procFacePoint
label nUnblocked = 0;
forAll(procFacePoint, pointI)
{
if (procFacePoint[pointI])
{
const labelList& pFaces = mesh.pointFaces()[pointI];
forAll(pFaces, i)
{
if (blockedFace[pFaces[i]])
{
blockedFace[pFaces[i]] = false;
nUnblocked++;
}
}
}
}
if (decompositionConstraint::debug & 2)
{
reduce(nUnblocked, sumOp<label>());
Info<< type() << " : unblocked " << nUnblocked << " faces" << endl;
}
syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>());
}
void Foam::decompositionConstraints::singleProcessorFaceSetsConstraint::apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const
{
// For specifiedProcessorFaces rework the cellToProc to enforce
// all on one processor since we can't guarantee that the input
// to regionSplit was a single region.
// E.g. faceSet 'a' with the cells split into two regions
// by a notch formed by two walls
//
// \ /
// \ /
// ---a----+-----a-----
//
//
// Note that reworking the cellToProc might make the decomposition
// unbalanced.
label nChanged = 0;
forAll(specifiedProcessorFaces, setI)
{
const labelList& set = specifiedProcessorFaces[setI];
// Get the processor to use for the set
label procI = specifiedProcessor[setI];
if (procI == -1)
{
// If no processor specified use the one from the
// 0th element
if (set.size())
{
procI = decomposition[mesh.faceOwner()[set[0]]];
}
reduce(procI, maxOp<label>());
}
// Get all points on the sets
boolList procFacePoint(mesh.nPoints(), false);
forAll(set, fI)
{
const face& f = mesh.faces()[set[fI]];
forAll(f, fp)
{
procFacePoint[f[fp]] = true;
}
}
syncTools::syncPointList(mesh, procFacePoint, orEqOp<bool>(), false);
// 2. Unblock all faces on procFacePoint
forAll(procFacePoint, pointI)
{
if (procFacePoint[pointI])
{
const labelList& pFaces = mesh.pointFaces()[pointI];
forAll(pFaces, i)
{
label faceI = pFaces[i];
label own = mesh.faceOwner()[faceI];
if (decomposition[own] != procI)
{
decomposition[own] = procI;
nChanged++;
}
if (mesh.isInternalFace(faceI))
{
label nei = mesh.faceNeighbour()[faceI];
if (decomposition[nei] != procI)
{
decomposition[nei] = procI;
nChanged++;
}
}
}
}
}
}
if (decompositionConstraint::debug & 2)
{
reduce(nChanged, sumOp<label>());
Info<< type() << " : changed decomposition on " << nChanged
<< " cells" << endl;
}
}
// ************************************************************************* //

View File

@ -0,0 +1,124 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2015-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::singleProcessorFaceSetsConstraint
Description
Constraint to keep all cells connected to face or point of faceSet on
a single processor.
SourceFiles
singleProcessorFaceSetsConstraint.C
\*---------------------------------------------------------------------------*/
#ifndef singleProcessorFaceSetsConstraint_H
#define singleProcessorFaceSetsConstraint_H
#include "decompositionConstraint.H"
#include "Tuple2.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
namespace decompositionConstraints
{
/*---------------------------------------------------------------------------*\
Class singleProcessorFaceSetsConstraint Declaration
\*---------------------------------------------------------------------------*/
class singleProcessorFaceSetsConstraint
:
public decompositionConstraint
{
// Private data
//- List of faceSet+processor
List<Tuple2<word, label> > setNameAndProcs_;
public:
//- Runtime type information
TypeName("singleProcessorFaceSets");
// Constructors
//- Construct with generic dictionary with optional entry for type
singleProcessorFaceSetsConstraint
(
const dictionary& constraintsDict,
const word& type
);
//- Construct from components
singleProcessorFaceSetsConstraint
(
const List<Tuple2<word, label> >& setNameAndProcs
);
//- Destructor
virtual ~singleProcessorFaceSetsConstraint()
{}
// Member Functions
//- Add my constraints to list of constraints
virtual void add
(
const polyMesh& mesh,
boolList& blockedFace,
PtrList<labelList>& specifiedProcessorFaces,
labelList& specifiedProcessor,
List<labelPair>& explicitConnections
) const;
//- Apply any additional post-decomposition constraints
virtual void apply
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace decompositionConstraints
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -36,6 +36,11 @@ InClass
#include "minData.H" #include "minData.H"
#include "FaceCellWave.H" #include "FaceCellWave.H"
#include "preserveBafflesConstraint.H"
#include "preservePatchesConstraint.H"
#include "preserveFaceZonesConstraint.H"
#include "singleProcessorFaceSetsConstraint.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
namespace Foam namespace Foam
@ -44,6 +49,129 @@ namespace Foam
defineRunTimeSelectionTable(decompositionMethod, dictionary); defineRunTimeSelectionTable(decompositionMethod, dictionary);
} }
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::decompositionMethod::decompositionMethod
(
const dictionary& decompositionDict
)
:
decompositionDict_(decompositionDict),
nProcessors_
(
readLabel(decompositionDict.lookup("numberOfSubdomains"))
)
{
// Read any constraints
wordList constraintTypes_;
if (decompositionDict_.found("constraints"))
{
//PtrList<dictionary> constraintsList
//(
// decompositionDict_.lookup("constraints")
//);
//forAll(constraintsList, i)
//{
// const dictionary& dict = constraintsList[i];
const dictionary& constraintsList = decompositionDict_.subDict
(
"constraints"
);
forAllConstIter(dictionary, constraintsList, iter)
{
const dictionary& dict = iter().dict();
constraintTypes_.append(dict.lookup("type"));
constraints_.append
(
decompositionConstraint::New
(
dict,
constraintTypes_.last()
)
);
}
}
// Backwards compatibility
if
(
decompositionDict_.found("preserveBaffles")
&& findIndex
(
constraintTypes_,
decompositionConstraints::preserveBafflesConstraint::typeName
) == -1
)
{
constraints_.append
(
new decompositionConstraints::preserveBafflesConstraint()
);
}
if
(
decompositionDict_.found("preservePatches")
&& findIndex
(
constraintTypes_,
decompositionConstraints::preservePatchesConstraint::typeName
) == -1
)
{
const wordReList pNames(decompositionDict_.lookup("preservePatches"));
constraints_.append
(
new decompositionConstraints::preservePatchesConstraint(pNames)
);
}
if
(
decompositionDict_.found("preserveFaceZones")
&& findIndex
(
constraintTypes_,
decompositionConstraints::preserveFaceZonesConstraint::typeName
) == -1
)
{
const wordReList zNames(decompositionDict_.lookup("preserveFaceZones"));
constraints_.append
(
new decompositionConstraints::preserveFaceZonesConstraint(zNames)
);
}
if
(
decompositionDict_.found("singleProcessorFaceSets")
&& findIndex
(
constraintTypes_,
decompositionConstraints::preserveFaceZonesConstraint::typeName
) == -1
)
{
const List<Tuple2<word, label>> zNameAndProcs
(
decompositionDict_.lookup("singleProcessorFaceSets")
);
constraints_.append
(
new decompositionConstraints::singleProcessorFaceSetsConstraint
(
zNameAndProcs
)
);
}
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
Foam::autoPtr<Foam::decompositionMethod> Foam::decompositionMethod::New Foam::autoPtr<Foam::decompositionMethod> Foam::decompositionMethod::New
@ -53,12 +181,6 @@ Foam::autoPtr<Foam::decompositionMethod> Foam::decompositionMethod::New
{ {
word methodType(decompositionDict.lookup("method")); word methodType(decompositionDict.lookup("method"));
if (methodType == "scotch" && Pstream::parRun())
{
methodType = "ptscotch";
}
Info<< "Selecting decompositionMethod " << methodType << endl; Info<< "Selecting decompositionMethod " << methodType << endl;
dictionaryConstructorTable::iterator cstrIter = dictionaryConstructorTable::iterator cstrIter =
@ -367,6 +489,210 @@ void Foam::decompositionMethod::calcCellCells
} }
void Foam::decompositionMethod::calcCellCells
(
const polyMesh& mesh,
const labelList& agglom,
const label nLocalCoarse,
const bool parallel,
CompactListList<label>& cellCells,
CompactListList<scalar>& cellCellWeights
)
{
const labelList& faceOwner = mesh.faceOwner();
const labelList& faceNeighbour = mesh.faceNeighbour();
const polyBoundaryMesh& patches = mesh.boundaryMesh();
// Create global cell numbers
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
globalIndex globalAgglom
(
nLocalCoarse,
Pstream::msgType(),
Pstream::worldComm,
parallel
);
// Get agglomerate owner on other side of coupled faces
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
labelList globalNeighbour(mesh.nFaces()-mesh.nInternalFaces());
forAll(patches, patchI)
{
const polyPatch& pp = patches[patchI];
if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
{
label faceI = pp.start();
label bFaceI = pp.start() - mesh.nInternalFaces();
forAll(pp, i)
{
globalNeighbour[bFaceI] = globalAgglom.toGlobal
(
agglom[faceOwner[faceI]]
);
bFaceI++;
faceI++;
}
}
}
// Get the cell on the other side of coupled patches
syncTools::swapBoundaryFaceList(mesh, globalNeighbour);
// Count number of faces (internal + coupled)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Number of faces per coarse cell
labelList nFacesPerCell(nLocalCoarse, 0);
for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
{
label own = agglom[faceOwner[faceI]];
label nei = agglom[faceNeighbour[faceI]];
nFacesPerCell[own]++;
nFacesPerCell[nei]++;
}
forAll(patches, patchI)
{
const polyPatch& pp = patches[patchI];
if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
{
label faceI = pp.start();
label bFaceI = pp.start()-mesh.nInternalFaces();
forAll(pp, i)
{
label own = agglom[faceOwner[faceI]];
label globalNei = globalNeighbour[bFaceI];
if
(
!globalAgglom.isLocal(globalNei)
|| globalAgglom.toLocal(globalNei) != own
)
{
nFacesPerCell[own]++;
}
faceI++;
bFaceI++;
}
}
}
// Fill in offset and data
// ~~~~~~~~~~~~~~~~~~~~~~~
cellCells.setSize(nFacesPerCell);
cellCellWeights.setSize(nFacesPerCell);
nFacesPerCell = 0;
labelList& m = cellCells.m();
scalarList& w = cellCellWeights.m();
const labelList& offsets = cellCells.offsets();
// For internal faces is just offsetted owner and neighbour
for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
{
label own = agglom[faceOwner[faceI]];
label nei = agglom[faceNeighbour[faceI]];
label ownIndex = offsets[own] + nFacesPerCell[own]++;
label neiIndex = offsets[nei] + nFacesPerCell[nei]++;
m[ownIndex] = globalAgglom.toGlobal(nei);
w[ownIndex] = mag(mesh.faceAreas()[faceI]);
m[neiIndex] = globalAgglom.toGlobal(own);
w[ownIndex] = mag(mesh.faceAreas()[faceI]);
}
// For boundary faces is offsetted coupled neighbour
forAll(patches, patchI)
{
const polyPatch& pp = patches[patchI];
if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
{
label faceI = pp.start();
label bFaceI = pp.start()-mesh.nInternalFaces();
forAll(pp, i)
{
label own = agglom[faceOwner[faceI]];
label globalNei = globalNeighbour[bFaceI];
if
(
!globalAgglom.isLocal(globalNei)
|| globalAgglom.toLocal(globalNei) != own
)
{
label ownIndex = offsets[own] + nFacesPerCell[own]++;
m[ownIndex] = globalNei;
w[ownIndex] = mag(mesh.faceAreas()[faceI]);
}
faceI++;
bFaceI++;
}
}
}
// Check for duplicates connections between cells
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Done as postprocessing step since we now have cellCells.
label newIndex = 0;
labelHashSet nbrCells;
if (cellCells.size() == 0)
{
return;
}
label startIndex = cellCells.offsets()[0];
forAll(cellCells, cellI)
{
nbrCells.clear();
nbrCells.insert(globalAgglom.toGlobal(cellI));
label endIndex = cellCells.offsets()[cellI+1];
for (label i = startIndex; i < endIndex; i++)
{
if (nbrCells.insert(cellCells.m()[i]))
{
cellCells.m()[newIndex] = cellCells.m()[i];
cellCellWeights.m()[newIndex] = cellCellWeights.m()[i];
newIndex++;
}
}
startIndex = endIndex;
cellCells.offsets()[cellI+1] = newIndex;
cellCellWeights.offsets()[cellI+1] = newIndex;
}
cellCells.m().setSize(newIndex);
cellCellWeights.m().setSize(newIndex);
}
//void Foam::decompositionMethod::calcCellCells //void Foam::decompositionMethod::calcCellCells
//( //(
// const polyMesh& mesh, // const polyMesh& mesh,
@ -1101,172 +1427,45 @@ void Foam::decompositionMethod::setConstraints
{ {
blockedFace.setSize(mesh.nFaces()); blockedFace.setSize(mesh.nFaces());
blockedFace = true; blockedFace = true;
//label nUnblocked = 0;
specifiedProcessorFaces.clear(); specifiedProcessorFaces.clear();
explicitConnections.clear(); explicitConnections.clear();
forAll(constraints_, constraintI)
if (decompositionDict_.found("preservePatches"))
{ {
wordList pNames(decompositionDict_.lookup("preservePatches")); constraints_[constraintI].add
Info<< nl
<< "Keeping owner of faces in patches " << pNames
<< " on same processor. This only makes sense for cyclics." << endl;
const polyBoundaryMesh& patches = mesh.boundaryMesh();
forAll(pNames, i)
{
const label patchi = patches.findPatchID(pNames[i]);
if (patchi == -1)
{
FatalErrorInFunction
<< "Unknown preservePatch " << pNames[i]
<< endl << "Valid patches are " << patches.names()
<< exit(FatalError);
}
const polyPatch& pp = patches[patchi];
forAll(pp, i)
{
if (blockedFace[pp.start() + i])
{
blockedFace[pp.start() + i] = false;
//nUnblocked++;
}
}
}
}
if (decompositionDict_.found("preserveFaceZones"))
{
wordList zNames(decompositionDict_.lookup("preserveFaceZones"));
Info<< nl
<< "Keeping owner and neighbour of faces in zones " << zNames
<< " on same processor" << endl;
const faceZoneMesh& fZones = mesh.faceZones();
forAll(zNames, i)
{
label zoneI = fZones.findZoneID(zNames[i]);
if (zoneI == -1)
{
FatalErrorInFunction
<< "Unknown preserveFaceZone " << zNames[i]
<< endl << "Valid faceZones are " << fZones.names()
<< exit(FatalError);
}
const faceZone& fz = fZones[zoneI];
forAll(fz, i)
{
if (blockedFace[fz[i]])
{
blockedFace[fz[i]] = false;
//nUnblocked++;
}
}
}
}
bool preserveBaffles = decompositionDict_.lookupOrDefault
( (
"preserveBaffles", mesh,
false blockedFace,
specifiedProcessorFaces,
specifiedProcessor,
explicitConnections
); );
if (preserveBaffles)
{
Info<< nl
<< "Keeping owner of faces in baffles "
<< " on same processor." << endl;
explicitConnections = localPointRegion::findDuplicateFacePairs(mesh);
forAll(explicitConnections, i)
{
blockedFace[explicitConnections[i].first()] = false;
blockedFace[explicitConnections[i].second()] = false;
}
} }
}
if
void Foam::decompositionMethod::applyConstraints
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& decomposition
)
{
forAll(constraints_, constraintI)
{
constraints_[constraintI].apply
( (
decompositionDict_.found("preservePatches") mesh,
|| decompositionDict_.found("preserveFaceZones") blockedFace,
|| preserveBaffles specifiedProcessorFaces,
) specifiedProcessor,
{ explicitConnections,
syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>()); decomposition
//reduce(nUnblocked, sumOp<label>());
}
// Specified processor for group of cells connected to faces
label nProcSets = 0;
if (decompositionDict_.found("singleProcessorFaceSets"))
{
List<Tuple2<word, label>> zNameAndProcs
(
decompositionDict_.lookup("singleProcessorFaceSets")
); );
specifiedProcessorFaces.setSize(zNameAndProcs.size());
specifiedProcessor.setSize(zNameAndProcs.size());
forAll(zNameAndProcs, setI)
{
Info<< "Keeping all cells connected to faceSet "
<< zNameAndProcs[setI].first()
<< " on processor " << zNameAndProcs[setI].second() << endl;
// Read faceSet
faceSet fz(mesh, zNameAndProcs[setI].first());
specifiedProcessorFaces.set(setI, new labelList(fz.sortedToc()));
specifiedProcessor[setI] = zNameAndProcs[setI].second();
nProcSets += fz.size();
}
reduce(nProcSets, sumOp<label>());
// Unblock all point connected faces
// 1. Mark all points on specifiedProcessorFaces
boolList procFacePoint(mesh.nPoints(), false);
forAll(specifiedProcessorFaces, setI)
{
const labelList& set = specifiedProcessorFaces[setI];
forAll(set, fI)
{
const face& f = mesh.faces()[set[fI]];
forAll(f, fp)
{
procFacePoint[f[fp]] = true;
}
}
}
syncTools::syncPointList(mesh, procFacePoint, orEqOp<bool>(), false);
// 2. Unblock all faces on procFacePoint
forAll(procFacePoint, pointI)
{
if (procFacePoint[pointI])
{
const labelList& pFaces = mesh.pointFaces()[pointI];
forAll(pFaces, i)
{
blockedFace[pFaces[i]] = false;
}
}
}
syncTools::syncFaceList(mesh, blockedFace, andEqOp<bool>());
} }
} }
@ -1277,6 +1476,8 @@ Foam::labelList Foam::decompositionMethod::decompose
const scalarField& cellWeights const scalarField& cellWeights
) )
{ {
// Collect all constraints
boolList blockedFace; boolList blockedFace;
PtrList<labelList> specifiedProcessorFaces; PtrList<labelList> specifiedProcessorFaces;
labelList specifiedProcessor; labelList specifiedProcessor;
@ -1304,6 +1505,19 @@ Foam::labelList Foam::decompositionMethod::decompose
explicitConnections // baffles explicitConnections // baffles
); );
// Give any constraint the option of modifying the decomposition
applyConstraints
(
mesh,
blockedFace,
specifiedProcessorFaces,
specifiedProcessor,
explicitConnections,
finalDecomp
);
return finalDecomp; return finalDecomp;
} }

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -36,8 +36,8 @@ SourceFiles
#define decompositionMethod_H #define decompositionMethod_H
#include "polyMesh.H" #include "polyMesh.H"
#include "pointField.H"
#include "CompactListList.H" #include "CompactListList.H"
#include "decompositionConstraint.H"
namespace Foam namespace Foam
{ {
@ -56,6 +56,8 @@ protected:
const dictionary& decompositionDict_; const dictionary& decompositionDict_;
label nProcessors_; label nProcessors_;
//- Optional constraints
PtrList<decompositionConstraint> constraints_;
private: private:
@ -98,14 +100,7 @@ public:
// Constructors // Constructors
//- Construct given the decomposition dictionary //- Construct given the decomposition dictionary
decompositionMethod(const dictionary& decompositionDict) decompositionMethod(const dictionary& decompositionDict);
:
decompositionDict_(decompositionDict),
nProcessors_
(
readLabel(decompositionDict.lookup("numberOfSubdomains"))
)
{}
//- Destructor //- Destructor
@ -226,6 +221,19 @@ public:
CompactListList<label>& cellCells CompactListList<label>& cellCells
); );
//- Helper: determine (local or global) cellCells and face weights
// from mesh agglomeration.
// Uses mag of faceArea as weights
static void calcCellCells
(
const polyMesh& mesh,
const labelList& agglom,
const label nLocalCoarse,
const bool parallel,
CompactListList<label>& cellCells,
CompactListList<scalar>& cellCellWeights
);
//- Helper: extract constraints: //- Helper: extract constraints:
// blockedface: existing faces where owner and neighbour on same // blockedface: existing faces where owner and neighbour on same
// proc // proc
@ -241,6 +249,19 @@ public:
List<labelPair>& explicitConnections List<labelPair>& explicitConnections
); );
//- Helper: apply constraints to a decomposition. This gives
// constraints opportunity to modify decomposition in case
// the native decomposition method has not obeyed all constraints
void applyConstraints
(
const polyMesh& mesh,
const boolList& blockedFace,
const PtrList<labelList>& specifiedProcessorFaces,
const labelList& specifiedProcessor,
const List<labelPair>& explicitConnections,
labelList& finalDecomp
);
// Decompose a mesh with constraints: // Decompose a mesh with constraints:
// - blockedFace : whether owner and neighbour should be on same // - blockedFace : whether owner and neighbour should be on same
// processor // processor

View File

@ -67,13 +67,6 @@ class ptscotchDecomp
{ {
// Private Member Functions // Private Member Functions
//- Insert list in front of list.
template<class Type>
static void prepend(const UList<Type>&, List<Type>&);
//- Insert list at end of list.
template<class Type>
static void append(const UList<Type>&, List<Type>&);
//- Check and print error message //- Check and print error message
static void check(const int, const char*); static void check(const int, const char*);
@ -177,12 +170,6 @@ public:
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
#include "ptscotchDecompTemplates.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif #endif
// ************************************************************************* // // ************************************************************************* //

View File

@ -1,9 +1,9 @@
EXE_INC = \ EXE_INC = \
-I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \ -I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude -I$(LIB_SRC)/meshTools/lnInclude
LIB_LIBS = \ LIB_LIBS = \
-ldecompositionMethods \ -ldecompositionMethods \
-lfiniteVolume \ -ldynamicMesh
-lmeshTools