Add the OpenFOAM source tree

This commit is contained in:
Henry
2014-12-10 22:40:10 +00:00
parent ee487c860d
commit 446e5777f0
13379 changed files with 3983377 additions and 0 deletions

View File

@ -0,0 +1,9 @@
decomposePar.C
domainDecomposition.C
domainDecompositionMesh.C
domainDecompositionDistribute.C
dimFieldDecomposer.C
pointFieldDecomposer.C
lagrangianFieldDecomposer.C
EXE = $(FOAM_APPBIN)/decomposePar

View File

@ -0,0 +1,16 @@
EXE_INC = \
-I$(LIB_SRC)/parallel/decompose/decompose/lnInclude \
-I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/regionModels/regionModel/lnInclude
EXE_LIBS = \
-lfiniteVolume \
-ldecompose \
-lgenericPatchFields \
-ldecompositionMethods -L$(FOAM_LIBBIN)/dummy -lmetisDecomp -lscotchDecomp \
-llagrangian \
-lmeshTools \
-lregionModels

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,143 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: dev |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
note "mesh decomposition control dictionary";
object decomposeParDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
numberOfSubdomains 2;
//- Keep owner and neighbour on same processor for faces in zones:
// preserveFaceZones (heater solid1 solid3);
//- Keep owner and neighbour on same processor for faces in patches:
// (makes sense only for cyclic patches)
//preservePatches (cyclic_half0 cyclic_half1);
//- Keep all of faceSet on a single processor. This puts all cells
// connected with a point, edge or face on the same processor.
// (just having face connected cells might not guarantee a balanced
// decomposition)
// The processor can be -1 (the decompositionMethod chooses the processor
// for a good load balance) or explicitly provided (upsets balance).
//singleProcessorFaceSets ((f0 -1));
//- Keep owner and neighbour of baffles on same processor (i.e. keep it
// detectable as a baffle). Baffles are two boundary face sharing the
// same points.
//preserveBaffles true;
//- Use the volScalarField named here as a weight for each cell in the
// decomposition. For example, use a particle population field to decompose
// for a balanced number of particles in a lagrangian simulation.
// weightField dsmcRhoNMean;
method scotch;
//method hierarchical;
// method simple;
// method metis;
// method manual;
// method multiLevel;
// method structured; // does 2D decomposition of structured mesh
multiLevelCoeffs
{
// Decomposition methods to apply in turn. This is like hierarchical but
// fully general - every method can be used at every level.
level0
{
numberOfSubdomains 64;
//method simple;
//simpleCoeffs
//{
// n (2 1 1);
// delta 0.001;
//}
method scotch;
}
level1
{
numberOfSubdomains 4;
method scotch;
}
}
// Desired output
simpleCoeffs
{
n (2 1 1);
delta 0.001;
}
hierarchicalCoeffs
{
n (1 2 1);
delta 0.001;
order xyz;
}
metisCoeffs
{
/*
processorWeights
(
1
1
1
1
);
*/
}
scotchCoeffs
{
//processorWeights
//(
// 1
// 1
// 1
// 1
//);
//writeGraph true;
//strategy "b";
}
manualCoeffs
{
dataFile "decompositionData";
}
structuredCoeffs
{
// Patches to do 2D decomposition on. Structured mesh only; cells have
// to be in 'columns' on top of patches.
patches (movingWall);
// Method to use on the 2D subset
method scotch;
}
//// Is the case distributed? Note: command-line argument -roots takes
//// precedence
//distributed yes;
//// Per slave (so nProcs-1 entries) the directory above the case.
//roots
//(
// "/tmp"
// "/tmp"
//);
// ************************************************************************* //

View File

@ -0,0 +1,52 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "dimFieldDecomposer.H"
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::dimFieldDecomposer::dimFieldDecomposer
(
const fvMesh& completeMesh,
const fvMesh& procMesh,
const labelList& faceAddressing,
const labelList& cellAddressing
)
:
completeMesh_(completeMesh),
procMesh_(procMesh),
faceAddressing_(faceAddressing),
cellAddressing_(cellAddressing)
{}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::dimFieldDecomposer::~dimFieldDecomposer()
{}
// ************************************************************************* //

View File

@ -0,0 +1,129 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2013 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::dimFieldDecomposer
Description
Dimensioned field decomposer.
SourceFiles
dimFieldDecomposer.C
dimFieldDecomposerDecomposeFields.C
\*---------------------------------------------------------------------------*/
#ifndef dimFieldDecomposer_H
#define dimFieldDecomposer_H
#include "fvMesh.H"
#include "surfaceFields.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
class IOobjectList;
/*---------------------------------------------------------------------------*\
Class fvFieldDecomposer Declaration
\*---------------------------------------------------------------------------*/
class dimFieldDecomposer
{
private:
// Private data
//- Reference to complete mesh
const fvMesh& completeMesh_;
//- Reference to processor mesh
const fvMesh& procMesh_;
//- Reference to face addressing
const labelList& faceAddressing_;
//- Reference to cell addressing
const labelList& cellAddressing_;
// Private Member Functions
//- Disallow default bitwise copy construct
dimFieldDecomposer(const dimFieldDecomposer&);
//- Disallow default bitwise assignment
void operator=(const dimFieldDecomposer&);
public:
// Constructors
//- Construct from components
dimFieldDecomposer
(
const fvMesh& completeMesh,
const fvMesh& procMesh,
const labelList& faceAddressing,
const labelList& cellAddressing
);
//- Destructor
~dimFieldDecomposer();
// Member Functions
//- Decompose field
template<class Type>
tmp<DimensionedField<Type, volMesh> > decomposeField
(
const DimensionedField<Type, volMesh>& field
) const;
//- Decompose llist of fields
template<class GeoField>
void decomposeFields(const PtrList<GeoField>& fields) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
# include "dimFieldDecomposerDecomposeFields.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,74 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "dimFieldDecomposer.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
Foam::tmp<Foam::DimensionedField<Type, Foam::volMesh> >
Foam::dimFieldDecomposer::decomposeField
(
const DimensionedField<Type, volMesh>& field
) const
{
// Create and map the internal field values
Field<Type> mappedField(field, cellAddressing_);
// Create the field for the processor
return tmp<DimensionedField<Type, volMesh> >
(
new DimensionedField<Type, volMesh>
(
IOobject
(
field.name(),
procMesh_.time().timeName(),
procMesh_,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procMesh_,
field.dimensions(),
mappedField
)
);
}
template<class GeoField>
void Foam::dimFieldDecomposer::decomposeFields
(
const PtrList<GeoField>& fields
) const
{
forAll(fields, fieldI)
{
decomposeField(fields[fieldI])().write();
}
}
// ************************************************************************* //

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,213 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2014 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::domainDecomposition
Description
Automatic domain decomposition class for finite-volume meshes
SourceFiles
domainDecomposition.C
decomposeMesh.C
\*---------------------------------------------------------------------------*/
#ifndef domainDecomposition_H
#define domainDecomposition_H
#include "fvMesh.H"
#include "labelList.H"
#include "SLList.H"
#include "PtrList.H"
#include "point.H"
#include "Time.H"
#include "volFields.H"
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class domainDecomposition Declaration
\*---------------------------------------------------------------------------*/
class domainDecomposition
:
public fvMesh
{
// Private data
//- Optional: points at the facesInstance
autoPtr<pointIOField> facesInstancePointsPtr_;
//- Mesh decomposition control dictionary
IOdictionary decompositionDict_;
//- Number of processors in decomposition
label nProcs_;
//- Is the decomposition data to be distributed for each processor
bool distributed_;
//- Processor label for each cell
labelList cellToProc_;
//- Labels of points for each processor
labelListList procPointAddressing_;
//- Labels of faces for each processor
// Note: Face turning index is stored as the sign on addressing
// Only the processor boundary faces are affected: if the sign of the
// index is negative, the processor face is the reverse of the
// original face. In order to do this properly, all face
// indices will be incremented by 1 and the decremented as
// necessary to avoid the problem of face number zero having no
// sign.
List<DynamicList<label> > procFaceAddressing_;
//- Labels of cells for each processor
labelListList procCellAddressing_;
//- Sizes for processor mesh patches
// Excludes inter-processor boundaries
labelListList procPatchSize_;
//- Start indices for processor patches
// Excludes inter-processor boundaries
labelListList procPatchStartIndex_;
// Per inter-processor patch information
//- Neighbour processor ID for inter-processor boundaries
labelListList procNeighbourProcessors_;
//- Sizes for inter-processor patches
labelListList procProcessorPatchSize_;
//- Start indices (in procFaceAddressing_) for inter-processor patches
labelListList procProcessorPatchStartIndex_;
//- Sub patch IDs for inter-processor patches
List<labelListList> procProcessorPatchSubPatchIDs_;
//- Sub patch sizes for inter-processor patches
List<labelListList> procProcessorPatchSubPatchStarts_;
// Private Member Functions
void distributeCells();
//- Mark all elements with value or -2 if occur twice
static void mark
(
const labelList& zoneElems,
const label zoneI,
labelList& elementToZone
);
//- Append single element to list
static void append(labelList&, const label);
//- Add face to inter-processor patch
void addInterProcFace
(
const label facei,
const label ownerProc,
const label nbrProc,
List<Map<label> >&,
List<DynamicList<DynamicList<label> > >&
) const;
//- Generate sub patch info for processor cyclics
template <class BinaryOp>
void processInterCyclics
(
const polyBoundaryMesh& patches,
List<DynamicList<DynamicList<label> > >& interPatchFaces,
List<Map<label> >& procNbrToInterPatch,
List<labelListList>& subPatchIDs,
List<labelListList>& subPatchStarts,
bool owner,
BinaryOp bop
) const;
public:
// Constructors
//- Construct from IOobject
domainDecomposition(const IOobject& io);
//- Destructor
~domainDecomposition();
// Member Functions
//- Number of processor in decomposition
label nProcs() const
{
return nProcs_;
}
//- Is the decomposition data to be distributed for each processor
bool distributed() const
{
return distributed_;
}
//- Decompose mesh.
void decomposeMesh();
//- Write decomposition
bool writeDecomposition(const bool decomposeSets);
//- Cell-processor decomposition labels
const labelList& cellToProc() const
{
return cellToProc_;
}
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
#include "domainDecompositionTemplates.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,75 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2013 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "domainDecomposition.H"
#include "decompositionMethod.H"
#include "cpuTime.H"
#include "cellSet.H"
#include "regionSplit.H"
#include "Tuple2.H"
#include "faceSet.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
void Foam::domainDecomposition::distributeCells()
{
Info<< "\nCalculating distribution of cells" << endl;
cpuTime decompositionTime;
autoPtr<decompositionMethod> decomposePtr = decompositionMethod::New
(
decompositionDict_
);
scalarField cellWeights;
if (decompositionDict_.found("weightField"))
{
word weightName = decompositionDict_.lookup("weightField");
volScalarField weights
(
IOobject
(
weightName,
time().timeName(),
*this,
IOobject::MUST_READ,
IOobject::NO_WRITE
),
*this
);
cellWeights = weights.internalField();
}
cellToProc_ = decomposePtr().decompose(*this, cellWeights);
Info<< "\nFinished decomposition in "
<< decompositionTime.elapsedCpuTime()
<< " s" << endl;
}
// ************************************************************************* //

View File

@ -0,0 +1,485 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2014 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
InClass
domainDecomposition
Description
Private member of domainDecomposition.
Decomposes the mesh into bits
\*---------------------------------------------------------------------------*/
#include "domainDecomposition.H"
#include "IOstreams.H"
#include "boolList.H"
#include "cyclicPolyPatch.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void Foam::domainDecomposition::append(labelList& lst, const label elem)
{
label sz = lst.size();
lst.setSize(sz+1);
lst[sz] = elem;
}
void Foam::domainDecomposition::addInterProcFace
(
const label facei,
const label ownerProc,
const label nbrProc,
List<Map<label> >& nbrToInterPatch,
List<DynamicList<DynamicList<label> > >& interPatchFaces
) const
{
Map<label>::iterator patchIter = nbrToInterPatch[ownerProc].find(nbrProc);
// Introduce turning index only for internal faces (are duplicated).
label ownerIndex = facei+1;
label nbrIndex = -(facei+1);
if (patchIter != nbrToInterPatch[ownerProc].end())
{
// Existing interproc patch. Add to both sides.
label toNbrProcPatchI = patchIter();
interPatchFaces[ownerProc][toNbrProcPatchI].append(ownerIndex);
if (isInternalFace(facei))
{
label toOwnerProcPatchI = nbrToInterPatch[nbrProc][ownerProc];
interPatchFaces[nbrProc][toOwnerProcPatchI].append(nbrIndex);
}
}
else
{
// Create new interproc patches.
label toNbrProcPatchI = nbrToInterPatch[ownerProc].size();
nbrToInterPatch[ownerProc].insert(nbrProc, toNbrProcPatchI);
DynamicList<label> oneFace;
oneFace.append(ownerIndex);
interPatchFaces[ownerProc].append(oneFace);
if (isInternalFace(facei))
{
label toOwnerProcPatchI = nbrToInterPatch[nbrProc].size();
nbrToInterPatch[nbrProc].insert(ownerProc, toOwnerProcPatchI);
oneFace.clear();
oneFace.append(nbrIndex);
interPatchFaces[nbrProc].append(oneFace);
}
}
}
void Foam::domainDecomposition::decomposeMesh()
{
// Decide which cell goes to which processor
distributeCells();
// Distribute the cells according to the given processor label
// calculate the addressing information for the original mesh
Info<< "\nCalculating original mesh data" << endl;
// set references to the original mesh
const polyBoundaryMesh& patches = boundaryMesh();
const faceList& fcs = faces();
const labelList& owner = faceOwner();
const labelList& neighbour = faceNeighbour();
// loop through the list of processor labels for the cell and add the
// cell shape to the list of cells for the appropriate processor
Info<< "\nDistributing cells to processors" << endl;
// Cells per processor
procCellAddressing_ = invertOneToMany(nProcs_, cellToProc_);
Info<< "\nDistributing faces to processors" << endl;
// Loop through all internal faces and decide which processor they belong to
// First visit all internal faces. If cells at both sides belong to the
// same processor, the face is an internal face. If they are different,
// it belongs to both processors.
procFaceAddressing_.setSize(nProcs_);
// Internal faces
forAll(neighbour, facei)
{
if (cellToProc_[owner[facei]] == cellToProc_[neighbour[facei]])
{
// Face internal to processor. Notice no turning index.
procFaceAddressing_[cellToProc_[owner[facei]]].append(facei+1);
}
}
// for all processors, set the size of start index and patch size
// lists to the number of patches in the mesh
forAll(procPatchSize_, procI)
{
procPatchSize_[procI].setSize(patches.size());
procPatchStartIndex_[procI].setSize(patches.size());
}
forAll(patches, patchi)
{
// Reset size and start index for all processors
forAll(procPatchSize_, procI)
{
procPatchSize_[procI][patchi] = 0;
procPatchStartIndex_[procI][patchi] =
procFaceAddressing_[procI].size();
}
const label patchStart = patches[patchi].start();
if (!isA<cyclicPolyPatch>(patches[patchi]))
{
// Normal patch. Add faces to processor where the cell
// next to the face lives
const labelUList& patchFaceCells =
patches[patchi].faceCells();
forAll(patchFaceCells, facei)
{
const label curProc = cellToProc_[patchFaceCells[facei]];
// add the face without turning index
procFaceAddressing_[curProc].append(patchStart+facei+1);
// increment the number of faces for this patch
procPatchSize_[curProc][patchi]++;
}
}
else
{
const cyclicPolyPatch& pp = refCast<const cyclicPolyPatch>
(
patches[patchi]
);
// cyclic: check opposite side on this processor
const labelUList& patchFaceCells = pp.faceCells();
const labelUList& nbrPatchFaceCells =
pp.neighbPatch().faceCells();
forAll(patchFaceCells, facei)
{
const label curProc = cellToProc_[patchFaceCells[facei]];
const label nbrProc = cellToProc_[nbrPatchFaceCells[facei]];
if (curProc == nbrProc)
{
// add the face without turning index
procFaceAddressing_[curProc].append(patchStart+facei+1);
// increment the number of faces for this patch
procPatchSize_[curProc][patchi]++;
}
}
}
}
// Done internal bits of the new mesh and the ordinary patches.
// Per processor, from neighbour processor to the inter-processor patch
// that communicates with that neighbour
List<Map<label> > procNbrToInterPatch(nProcs_);
// Per processor the faces per inter-processor patch
List<DynamicList<DynamicList<label> > > interPatchFaces(nProcs_);
// Processor boundaries from internal faces
forAll(neighbour, facei)
{
label ownerProc = cellToProc_[owner[facei]];
label nbrProc = cellToProc_[neighbour[facei]];
if (ownerProc != nbrProc)
{
// inter - processor patch face found.
addInterProcFace
(
facei,
ownerProc,
nbrProc,
procNbrToInterPatch,
interPatchFaces
);
}
}
// Add the proper processor faces to the sub information. For faces
// originating from internal faces this is always -1.
List<labelListList> subPatchIDs(nProcs_);
List<labelListList> subPatchStarts(nProcs_);
forAll(interPatchFaces, procI)
{
label nInterfaces = interPatchFaces[procI].size();
subPatchIDs[procI].setSize(nInterfaces, labelList(1, label(-1)));
subPatchStarts[procI].setSize(nInterfaces, labelList(1, label(0)));
}
// Special handling needed for the case that multiple processor cyclic
// patches are created on each local processor domain, e.g. if a 3x3 case
// is decomposed using the decomposition:
//
// | 1 | 0 | 2 |
// cyclic left | 2 | 0 | 1 | cyclic right
// | 2 | 0 | 1 |
//
// - processors 1 and 2 will both have pieces of both cyclic left- and
// right sub-patches present
// - the interface patch faces are stored in a single list, where each
// sub-patch is referenced into the list using a patch start index and
// size
// - if the patches are in order (in the boundary file) of left, right
// - processor 1 will send: left, right
// - processor 1 will need to receive in reverse order: right, left
// - similarly for processor 2
// - the sub-patches are therefore generated in 4 passes of the patch lists
// 1. add faces from owner patch where local proc i < nbr proc i
// 2. add faces from nbr patch where local proc i < nbr proc i
// 3. add faces from owner patch where local proc i > nbr proc i
// 4. add faces from nbr patch where local proc i > nbr proc i
processInterCyclics
(
patches,
interPatchFaces,
procNbrToInterPatch,
subPatchIDs,
subPatchStarts,
true,
lessOp<label>()
);
processInterCyclics
(
patches,
interPatchFaces,
procNbrToInterPatch,
subPatchIDs,
subPatchStarts,
false,
lessOp<label>()
);
processInterCyclics
(
patches,
interPatchFaces,
procNbrToInterPatch,
subPatchIDs,
subPatchStarts,
false,
greaterOp<label>()
);
processInterCyclics
(
patches,
interPatchFaces,
procNbrToInterPatch,
subPatchIDs,
subPatchStarts,
true,
greaterOp<label>()
);
// Sort inter-proc patch by neighbour
labelList order;
forAll(procNbrToInterPatch, procI)
{
label nInterfaces = procNbrToInterPatch[procI].size();
procNeighbourProcessors_[procI].setSize(nInterfaces);
procProcessorPatchSize_[procI].setSize(nInterfaces);
procProcessorPatchStartIndex_[procI].setSize(nInterfaces);
procProcessorPatchSubPatchIDs_[procI].setSize(nInterfaces);
procProcessorPatchSubPatchStarts_[procI].setSize(nInterfaces);
//Info<< "Processor " << procI << endl;
// Get sorted neighbour processors
const Map<label>& curNbrToInterPatch = procNbrToInterPatch[procI];
labelList nbrs = curNbrToInterPatch.toc();
sortedOrder(nbrs, order);
DynamicList<DynamicList<label> >& curInterPatchFaces =
interPatchFaces[procI];
forAll(nbrs, i)
{
const label nbrProc = nbrs[i];
const label interPatch = curNbrToInterPatch[nbrProc];
procNeighbourProcessors_[procI][i] = nbrProc;
procProcessorPatchSize_[procI][i] =
curInterPatchFaces[interPatch].size();
procProcessorPatchStartIndex_[procI][i] =
procFaceAddressing_[procI].size();
// Add size as last element to substarts and transfer
append
(
subPatchStarts[procI][interPatch],
curInterPatchFaces[interPatch].size()
);
procProcessorPatchSubPatchIDs_[procI][i].transfer
(
subPatchIDs[procI][interPatch]
);
procProcessorPatchSubPatchStarts_[procI][i].transfer
(
subPatchStarts[procI][interPatch]
);
//Info<< " nbr:" << nbrProc << endl;
//Info<< " interpatch:" << interPatch << endl;
//Info<< " size:" << procProcessorPatchSize_[procI][i] << endl;
//Info<< " start:" << procProcessorPatchStartIndex_[procI][i]
// << endl;
//Info<< " subPatches:"
// << procProcessorPatchSubPatchIDs_[procI][i]
// << endl;
//Info<< " subStarts:"
// << procProcessorPatchSubPatchStarts_[procI][i] << endl;
// And add all the face labels for interPatch
DynamicList<label>& interPatchFaces =
curInterPatchFaces[interPatch];
forAll(interPatchFaces, j)
{
procFaceAddressing_[procI].append(interPatchFaces[j]);
}
interPatchFaces.clearStorage();
}
curInterPatchFaces.clearStorage();
procFaceAddressing_[procI].shrink();
}
////XXXXXXX
//// Print a bit
// forAll(procPatchStartIndex_, procI)
// {
// Info<< "Processor:" << procI << endl;
//
// Info<< " total faces:" << procFaceAddressing_[procI].size()
// << endl;
//
// const labelList& curProcPatchStartIndex = procPatchStartIndex_[procI];
//
// forAll(curProcPatchStartIndex, patchI)
// {
// Info<< " patch:" << patchI
// << "\tstart:" << curProcPatchStartIndex[patchI]
// << "\tsize:" << procPatchSize_[procI][patchI]
// << endl;
// }
// }
// Info<< endl;
//
// forAll(procNeighbourProcessors_, procI)
// {
// Info<< "Processor " << procI << endl;
//
// forAll(procNeighbourProcessors_[procI], i)
// {
// Info<< " nbr:" << procNeighbourProcessors_[procI][i] << endl;
// Info<< " size:" << procProcessorPatchSize_[procI][i] << endl;
// Info<< " start:" << procProcessorPatchStartIndex_[procI][i]
// << endl;
// }
// }
// Info<< endl;
//
// forAll(procFaceAddressing_, procI)
// {
// Info<< "Processor:" << procI << endl;
//
// Info<< " faces:" << procFaceAddressing_[procI] << endl;
// }
Info<< "\nDistributing points to processors" << endl;
// For every processor, loop through the list of faces for the processor.
// For every face, loop through the list of points and mark the point as
// used for the processor. Collect the list of used points for the
// processor.
forAll(procPointAddressing_, procI)
{
boolList pointLabels(nPoints(), false);
// Get reference to list of used faces
const labelList& procFaceLabels = procFaceAddressing_[procI];
forAll(procFaceLabels, facei)
{
// Because of the turning index, some labels may be negative
const labelList& facePoints = fcs[mag(procFaceLabels[facei]) - 1];
forAll(facePoints, pointi)
{
// Mark the point as used
pointLabels[facePoints[pointi]] = true;
}
}
// Collect the used points
labelList& procPointLabels = procPointAddressing_[procI];
procPointLabels.setSize(pointLabels.size());
label nUsedPoints = 0;
forAll(pointLabels, pointi)
{
if (pointLabels[pointi])
{
procPointLabels[nUsedPoints] = pointi;
nUsedPoints++;
}
}
// Reset the size of used points
procPointLabels.setSize(nUsedPoints);
}
}
// ************************************************************************* //

View File

@ -0,0 +1,125 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2014 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "cyclicPolyPatch.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
template <class BinaryOp>
void Foam::domainDecomposition::processInterCyclics
(
const polyBoundaryMesh& patches,
List<DynamicList<DynamicList<label> > >& interPatchFaces,
List<Map<label> >& procNbrToInterPatch,
List<labelListList>& subPatchIDs,
List<labelListList>& subPatchStarts,
bool owner,
BinaryOp bop
) const
{
// Processor boundaries from split cyclics
forAll(patches, patchi)
{
if (isA<cyclicPolyPatch>(patches[patchi]))
{
const cyclicPolyPatch& pp = refCast<const cyclicPolyPatch>
(
patches[patchi]
);
if (pp.owner() != owner)
{
continue;
}
// cyclic: check opposite side on this processor
const labelUList& patchFaceCells = pp.faceCells();
const labelUList& nbrPatchFaceCells =
pp.neighbPatch().faceCells();
// Store old sizes. Used to detect which inter-proc patches
// have been added to.
labelListList oldInterfaceSizes(nProcs_);
forAll(oldInterfaceSizes, procI)
{
labelList& curOldSizes = oldInterfaceSizes[procI];
curOldSizes.setSize(interPatchFaces[procI].size());
forAll(curOldSizes, interI)
{
curOldSizes[interI] =
interPatchFaces[procI][interI].size();
}
}
// Add faces with different owner and neighbour processors
forAll(patchFaceCells, facei)
{
const label ownerProc = cellToProc_[patchFaceCells[facei]];
const label nbrProc = cellToProc_[nbrPatchFaceCells[facei]];
if (bop(ownerProc, nbrProc))
{
// inter - processor patch face found.
addInterProcFace
(
pp.start()+facei,
ownerProc,
nbrProc,
procNbrToInterPatch,
interPatchFaces
);
}
}
// 1. Check if any faces added to existing interfaces
forAll(oldInterfaceSizes, procI)
{
const labelList& curOldSizes = oldInterfaceSizes[procI];
forAll(curOldSizes, interI)
{
label oldSz = curOldSizes[interI];
if (interPatchFaces[procI][interI].size() > oldSz)
{
// Added faces to this interface. Add an entry
append(subPatchIDs[procI][interI], patchi);
append(subPatchStarts[procI][interI], oldSz);
}
}
}
// 2. Any new interfaces
forAll(subPatchIDs, procI)
{
label nIntfcs = interPatchFaces[procI].size();
subPatchIDs[procI].setSize(nIntfcs, labelList(1, patchi));
subPatchStarts[procI].setSize(nIntfcs, labelList(1, label(0)));
}
}
}
}
// ************************************************************************* //

View File

@ -0,0 +1,119 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Description
Lagrangian field decomposer.
\*---------------------------------------------------------------------------*/
#include "lagrangianFieldDecomposer.H"
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
// Construct from components
Foam::lagrangianFieldDecomposer::lagrangianFieldDecomposer
(
const polyMesh& mesh,
const polyMesh& procMesh,
const labelList& faceProcAddressing,
const labelList& cellProcAddressing,
const word& cloudName,
const Cloud<indexedParticle>& lagrangianPositions,
const List<SLList<indexedParticle*>*>& cellParticles
)
:
procMesh_(procMesh),
positions_(procMesh, cloudName, false),
particleIndices_(lagrangianPositions.size())
{
label pi = 0;
// faceProcAddressing not required currently
// labelList decodedProcFaceAddressing(faceProcAddressing.size());
// forAll(faceProcAddressing, i)
// {
// decodedProcFaceAddressing[i] = mag(faceProcAddressing[i]) - 1;
// }
forAll(cellProcAddressing, procCelli)
{
label celli = cellProcAddressing[procCelli];
if (cellParticles[celli])
{
SLList<indexedParticle*>& particlePtrs = *cellParticles[celli];
forAllConstIter(SLList<indexedParticle*>, particlePtrs, iter)
{
const indexedParticle& ppi = *iter();
particleIndices_[pi++] = ppi.index();
// label mappedTetFace = findIndex
// (
// decodedProcFaceAddressing,
// ppi.tetFace()
// );
// if (mappedTetFace == -1)
// {
// FatalErrorIn
// (
// "Foam::lagrangianFieldDecomposer"
// "::lagrangianFieldDecomposer"
// "("
// "const polyMesh& mesh, "
// "const polyMesh& procMesh, "
// "const labelList& faceProcAddressing, "
// "const labelList& cellProcAddressing, "
// "const word& cloudName, "
// "const Cloud<indexedParticle>& "
// "lagrangianPositions, "
// "const List<SLList<indexedParticle*>*>& "
// "cellParticles"
// ")"
// ) << "Face lookup failure." << nl
// << abort(FatalError);
// }
positions_.append
(
new passiveParticle
(
procMesh,
ppi.position(),
procCelli,
false
)
);
}
}
}
particleIndices_.setSize(pi);
IOPosition<Cloud<passiveParticle> >(positions_).write();
}
// ************************************************************************* //

View File

@ -0,0 +1,166 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::lagrangianFieldDecomposer
Description
Lagrangian field decomposer.
SourceFiles
lagrangianFieldDecomposer.C
lagrangianFieldDecomposerDecomposeFields.C
\*---------------------------------------------------------------------------*/
#ifndef lagrangianFieldDecomposer_H
#define lagrangianFieldDecomposer_H
#include "Cloud.H"
#include "CompactIOField.H"
#include "indexedParticle.H"
#include "passiveParticle.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
class IOobjectList;
/*---------------------------------------------------------------------------*\
Class lagrangianFieldDecomposer Declaration
\*---------------------------------------------------------------------------*/
class lagrangianFieldDecomposer
{
// Private data
//- Reference to processor mesh
const polyMesh& procMesh_;
//- Lagrangian positions for this processor
Cloud<passiveParticle> positions_;
//- The indices of the particles on this processor
labelList particleIndices_;
// Private Member Functions
//- Disallow default bitwise copy construct
lagrangianFieldDecomposer(const lagrangianFieldDecomposer&);
//- Disallow default bitwise assignment
void operator=(const lagrangianFieldDecomposer&);
public:
// Constructors
//- Construct from components
lagrangianFieldDecomposer
(
const polyMesh& mesh,
const polyMesh& procMesh,
const labelList& faceProcAddressing,
const labelList& cellProcAddressing,
const word& cloudName,
const Cloud<indexedParticle>& lagrangianPositions,
const List<SLList<indexedParticle*>*>& cellParticles
);
// Member Functions
// Read the fields and hold on the pointer list
template<class Type>
static void readFields
(
const label cloudI,
const IOobjectList& lagrangianObjects,
PtrList<PtrList<IOField<Type> > >& lagrangianFields
// PtrList<IOField<Type> >& lagrangianFields
);
template<class Type>
static void readFieldFields
(
const label cloudI,
const IOobjectList& lagrangianObjects,
PtrList
<
PtrList<CompactIOField<Field<Type>, Type> >
>& lagrangianFields
// PtrList<CompactIOField<Field<Type>, Type > >& lagrangianFields
);
//- Decompose volume field
template<class Type>
tmp<IOField<Type> > decomposeField
(
const word& cloudName,
const IOField<Type>& field
) const;
template<class Type>
tmp<CompactIOField<Field<Type>, Type> > decomposeFieldField
(
const word& cloudName,
const CompactIOField<Field<Type>, Type>& field
) const;
template<class GeoField>
void decomposeFields
(
const word& cloudName,
const PtrList<GeoField>& fields
) const;
template<class GeoField>
void decomposeFieldFields
(
const word& cloudName,
const PtrList<GeoField>& fields
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
# include "lagrangianFieldDecomposerDecomposeFields.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,216 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "lagrangianFieldDecomposer.H"
#include "IOobjectList.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
void Foam::lagrangianFieldDecomposer::readFields
(
const label cloudI,
const IOobjectList& lagrangianObjects,
PtrList<PtrList<IOField<Type> > >& lagrangianFields
)
{
// Search list of objects for lagrangian fields
IOobjectList lagrangianTypeObjects
(
lagrangianObjects.lookupClass(IOField<Type>::typeName)
);
lagrangianFields.set
(
cloudI,
new PtrList<IOField<Type> >
(
lagrangianTypeObjects.size()
)
);
label lagrangianFieldi = 0;
forAllIter(IOobjectList, lagrangianTypeObjects, iter)
{
lagrangianFields[cloudI].set
(
lagrangianFieldi++,
new IOField<Type>(*iter())
);
}
}
template<class Type>
void Foam::lagrangianFieldDecomposer::readFieldFields
(
const label cloudI,
const IOobjectList& lagrangianObjects,
PtrList<PtrList<CompactIOField<Field<Type>, Type> > >& lagrangianFields
)
{
// Search list of objects for lagrangian fields
IOobjectList lagrangianTypeObjectsA
(
lagrangianObjects.lookupClass(IOField<Field<Type> >::typeName)
);
IOobjectList lagrangianTypeObjectsB
(
lagrangianObjects.lookupClass
(
CompactIOField<Field<Type>,
Type>::typeName
)
);
lagrangianFields.set
(
cloudI,
new PtrList<CompactIOField<Field<Type>, Type> >
(
lagrangianTypeObjectsA.size() + lagrangianTypeObjectsB.size()
)
);
label lagrangianFieldi = 0;
forAllIter(IOobjectList, lagrangianTypeObjectsA, iter)
{
lagrangianFields[cloudI].set
(
lagrangianFieldi++,
new CompactIOField<Field<Type>, Type>(*iter())
);
}
forAllIter(IOobjectList, lagrangianTypeObjectsB, iter)
{
lagrangianFields[cloudI].set
(
lagrangianFieldi++,
new CompactIOField<Field<Type>, Type>(*iter())
);
}
}
template<class Type>
Foam::tmp<Foam::IOField<Type> >
Foam::lagrangianFieldDecomposer::decomposeField
(
const word& cloudName,
const IOField<Type>& field
) const
{
// Create and map the internal field values
Field<Type> procField(field, particleIndices_);
// Create the field for the processor
return tmp<IOField<Type> >
(
new IOField<Type>
(
IOobject
(
field.name(),
procMesh_.time().timeName(),
cloud::prefix/cloudName,
procMesh_,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procField
)
);
}
template<class Type>
Foam::tmp<Foam::CompactIOField<Foam::Field<Type>, Type> >
Foam::lagrangianFieldDecomposer::decomposeFieldField
(
const word& cloudName,
const CompactIOField<Field<Type>, Type>& field
) const
{
// Create and map the internal field values
Field<Field<Type> > procField(field, particleIndices_);
// Create the field for the processor
return tmp<CompactIOField<Field<Type>, Type> >
(
new CompactIOField<Field<Type>, Type>
(
IOobject
(
field.name(),
procMesh_.time().timeName(),
cloud::prefix/cloudName,
procMesh_,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procField
)
);
}
template<class GeoField>
void Foam::lagrangianFieldDecomposer::decomposeFields
(
const word& cloudName,
const PtrList<GeoField>& fields
) const
{
if (particleIndices_.size())
{
forAll(fields, fieldI)
{
decomposeField(cloudName, fields[fieldI])().write();
}
}
}
template<class GeoField>
void Foam::lagrangianFieldDecomposer::decomposeFieldFields
(
const word& cloudName,
const PtrList<GeoField>& fields
) const
{
if (particleIndices_.size())
{
forAll(fields, fieldI)
{
decomposeFieldField(cloudName, fields[fieldI])().write();
}
}
}
// ************************************************************************* //

View File

@ -0,0 +1,128 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2013 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "pointFieldDecomposer.H"
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::pointFieldDecomposer::patchFieldDecomposer::patchFieldDecomposer
(
const pointPatch& completeMeshPatch,
const pointPatch& procMeshPatch,
const labelList& directAddr
)
:
pointPatchFieldMapperPatchRef
(
completeMeshPatch,
procMeshPatch
),
directAddressing_(procMeshPatch.size(), -1),
hasUnmapped_(false)
{
// Create the inverse-addressing of the patch point labels.
labelList pointMap(completeMeshPatch.boundaryMesh().mesh().size(), -1);
const labelList& completeMeshPatchPoints = completeMeshPatch.meshPoints();
forAll(completeMeshPatchPoints, pointi)
{
pointMap[completeMeshPatchPoints[pointi]] = pointi;
}
// Use the inverse point addressing to create the addressing table for this
// patch
const labelList& procMeshPatchPoints = procMeshPatch.meshPoints();
forAll(procMeshPatchPoints, pointi)
{
directAddressing_[pointi] =
pointMap[directAddr[procMeshPatchPoints[pointi]]];
}
// Check that all the patch point addresses are set
if (directAddressing_.size() && min(directAddressing_) < 0)
{
hasUnmapped_ = true;
FatalErrorIn
(
"pointFieldDecomposer::patchFieldDecomposer()"
) << "Incomplete patch point addressing"
<< abort(FatalError);
}
}
Foam::pointFieldDecomposer::pointFieldDecomposer
(
const pointMesh& completeMesh,
const pointMesh& procMesh,
const labelList& pointAddressing,
const labelList& boundaryAddressing
)
:
completeMesh_(completeMesh),
procMesh_(procMesh),
pointAddressing_(pointAddressing),
boundaryAddressing_(boundaryAddressing),
patchFieldDecomposerPtrs_
(
procMesh_.boundary().size(),
static_cast<patchFieldDecomposer*>(NULL)
)
{
forAll(boundaryAddressing_, patchi)
{
if (boundaryAddressing_[patchi] >= 0)
{
patchFieldDecomposerPtrs_[patchi] = new patchFieldDecomposer
(
completeMesh_.boundary()[boundaryAddressing_[patchi]],
procMesh_.boundary()[patchi],
pointAddressing_
);
}
}
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::pointFieldDecomposer::~pointFieldDecomposer()
{
forAll(patchFieldDecomposerPtrs_, patchi)
{
if (patchFieldDecomposerPtrs_[patchi])
{
delete patchFieldDecomposerPtrs_[patchi];
}
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// ************************************************************************* //

View File

@ -0,0 +1,182 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2013 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::pointFieldDecomposer
Description
Point field decomposer.
SourceFiles
pointFieldDecomposer.C
pointFieldDecomposerDecomposeFields.C
\*---------------------------------------------------------------------------*/
#ifndef pointFieldDecomposer_H
#define pointFieldDecomposer_H
#include "pointMesh.H"
#include "pointPatchFieldMapperPatchRef.H"
#include "pointFields.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class pointFieldDecomposer Declaration
\*---------------------------------------------------------------------------*/
class pointFieldDecomposer
{
public:
//- Point patch field decomposer class
class patchFieldDecomposer
:
public pointPatchFieldMapperPatchRef
{
// Private data
labelList directAddressing_;
//- Does map contain any unmapped values
bool hasUnmapped_;
public:
// Constructors
//- Construct given addressing
patchFieldDecomposer
(
const pointPatch& completeMeshPatch,
const pointPatch& procMeshPatch,
const labelList& directAddr
);
// Member functions
label size() const
{
return directAddressing_.size();
}
bool direct() const
{
return true;
}
bool hasUnmapped() const
{
return hasUnmapped_;
}
const labelUList& directAddressing() const
{
return directAddressing_;
}
};
private:
// Private data
//- Reference to complete mesh
const pointMesh& completeMesh_;
//- Reference to processor mesh
const pointMesh& procMesh_;
//- Reference to point addressing
const labelList& pointAddressing_;
//- Reference to boundary addressing
const labelList& boundaryAddressing_;
//- List of patch field decomposers
List<patchFieldDecomposer*> patchFieldDecomposerPtrs_;
// Private Member Functions
//- Disallow default bitwise copy construct
pointFieldDecomposer(const pointFieldDecomposer&);
//- Disallow default bitwise assignment
void operator=(const pointFieldDecomposer&);
public:
// Constructors
//- Construct from components
pointFieldDecomposer
(
const pointMesh& completeMesh,
const pointMesh& procMesh,
const labelList& pointAddressing,
const labelList& boundaryAddressing
);
//- Destructor
~pointFieldDecomposer();
// Member Functions
//- Decompose point field
template<class Type>
tmp<GeometricField<Type, pointPatchField, pointMesh> >
decomposeField
(
const GeometricField<Type, pointPatchField, pointMesh>&
) const;
template<class GeoField>
void decomposeFields(const PtrList<GeoField>& fields) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
# include "pointFieldDecomposerDecomposeFields.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,112 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "pointFieldDecomposer.H"
#include "processorPointPatchFields.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
Foam::tmp<Foam::GeometricField<Type, Foam::pointPatchField, Foam::pointMesh> >
Foam::pointFieldDecomposer::decomposeField
(
const GeometricField<Type, pointPatchField, pointMesh>& field
) const
{
// Create and map the internal field values
Field<Type> internalField(field.internalField(), pointAddressing_);
// Create a list of pointers for the patchFields
PtrList<pointPatchField<Type> > patchFields(boundaryAddressing_.size());
// Create and map the patch field values
forAll(boundaryAddressing_, patchi)
{
if (patchFieldDecomposerPtrs_[patchi])
{
patchFields.set
(
patchi,
pointPatchField<Type>::New
(
field.boundaryField()[boundaryAddressing_[patchi]],
procMesh_.boundary()[patchi],
DimensionedField<Type, pointMesh>::null(),
*patchFieldDecomposerPtrs_[patchi]
)
);
}
else
{
patchFields.set
(
patchi,
new processorPointPatchField<Type>
(
procMesh_.boundary()[patchi],
DimensionedField<Type, pointMesh>::null()
)
);
}
}
// Create the field for the processor
return tmp<GeometricField<Type, pointPatchField, pointMesh> >
(
new GeometricField<Type, pointPatchField, pointMesh>
(
IOobject
(
field.name(),
procMesh_().time().timeName(),
procMesh_(),
IOobject::NO_READ,
IOobject::NO_WRITE
),
procMesh_,
field.dimensions(),
internalField,
patchFields
)
);
}
template<class GeoField>
void Foam::pointFieldDecomposer::decomposeFields
(
const PtrList<GeoField>& fields
) const
{
forAll(fields, fieldI)
{
decomposeField(fields[fieldI])().write();
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// ************************************************************************* //

View File

@ -0,0 +1,63 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "readFields.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
template<class Mesh, class GeoField>
void Foam::readFields
(
const Mesh& mesh,
const IOobjectList& objects,
PtrList<GeoField>& fields
)
{
// Search list of objects for fields of type GeomField
IOobjectList fieldObjects(objects.lookupClass(GeoField::typeName));
// Remove the cellDist field
IOobjectList::iterator celDistIter = fieldObjects.find("cellDist");
if (celDistIter != fieldObjects.end())
{
fieldObjects.erase(celDistIter);
}
// Construct the fields
fields.setSize(fieldObjects.size());
label fieldI = 0;
forAllIter(IOobjectList, fieldObjects, iter)
{
fields.set
(
fieldI++,
new GeoField(*iter(), mesh)
);
}
}
// ************************************************************************* //

View File

@ -0,0 +1,65 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Global
readFields
Description
SourceFiles
readFields.C
\*---------------------------------------------------------------------------*/
#ifndef readFields_H
#define readFields_H
#include "IOobjectList.H"
#include "PtrList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// Read the fields and hold on the pointer list
template<class Mesh, class GeoField>
void readFields
(
const Mesh& mesh,
const IOobjectList& objects,
PtrList<GeoField>& fields
);
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
# include "readFields.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,3 @@
reconstructPar.C
EXE = $(FOAM_APPBIN)/reconstructPar

View File

@ -0,0 +1,14 @@
EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/parallel/reconstruct/reconstruct/lnInclude \
-I$(LIB_SRC)/regionModels/regionModel/lnInclude
EXE_LIBS = \
-lfiniteVolume \
-lgenericPatchFields \
-llagrangian \
-lmeshTools \
-lreconstruct \
-lregionModels

View File

@ -0,0 +1,48 @@
{
// Foam version 2.1 changes the addressing of faces in faceProcAddressing
// The following code checks and modifies the addressing for cases where
// the decomposition has been done with the foam2.0 and earlier tools, but
// the reconstruction is attempted with version 2.1 or later
label minFaceIndex = labelMax;
PtrList<labelIOList>& faceProcAddressing = procMeshes.faceProcAddressing();
forAll(faceProcAddressing, procI)
{
const labelList& curFaceAddr = faceProcAddressing[procI];
forAll(curFaceAddr, faceI)
{
if (mag(curFaceAddr[faceI]) < minFaceIndex)
{
minFaceIndex = mag(curFaceAddr[faceI]);
}
}
}
if (minFaceIndex < 1)
{
WarningIn(args.executable())
<< "parallel decomposition addressing." << endl
<< "It looks like you are trying to reconstruct the case "
<< "decomposed with an earlier version of FOAM, which could\n"
<< "potentially cause compatibility problems. The code will "
<< "attempt to update the addressing automatically; in case of\n"
<< "failure, please repeat the decomposition of the case using "
<< "the current version fo decomposePar"
<< endl;
forAll(faceProcAddressing, procI)
{
labelList& curFaceAddr = faceProcAddressing[procI];
forAll(curFaceAddr, faceI)
{
curFaceAddr[faceI] += sign(curFaceAddr[faceI]);
}
faceProcAddressing[procI].write();
}
}
}

View File

@ -0,0 +1,891 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2014 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Application
reconstructPar
Description
Reconstructs fields of a case that is decomposed for parallel
execution of OpenFOAM.
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "timeSelector.H"
#include "fvCFD.H"
#include "IOobjectList.H"
#include "processorMeshes.H"
#include "regionProperties.H"
#include "fvFieldReconstructor.H"
#include "pointFieldReconstructor.H"
#include "reconstructLagrangian.H"
#include "cellSet.H"
#include "faceSet.H"
#include "pointSet.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
bool haveAllTimes
(
const HashSet<word>& masterTimeDirSet,
const instantList& timeDirs
)
{
// Loop over all times
forAll(timeDirs, timeI)
{
if (!masterTimeDirSet.found(timeDirs[timeI].name()))
{
return false;
}
}
return true;
}
int main(int argc, char *argv[])
{
argList::addNote
(
"Reconstruct fields of a parallel case"
);
// enable -constant ... if someone really wants it
// enable -zeroTime to prevent accidentally trashing the initial fields
timeSelector::addOptions(true, true);
argList::noParallel();
#include "addRegionOption.H"
argList::addBoolOption
(
"allRegions",
"operate on all regions in regionProperties"
);
argList::addOption
(
"fields",
"list",
"specify a list of fields to be reconstructed. Eg, '(U T p)' - "
"regular expressions not currently supported"
);
argList::addOption
(
"lagrangianFields",
"list",
"specify a list of lagrangian fields to be reconstructed. Eg, '(U d)' -"
"regular expressions not currently supported, "
"positions always included."
);
argList::addBoolOption
(
"noLagrangian",
"skip reconstructing lagrangian positions and fields"
);
argList::addBoolOption
(
"noSets",
"skip reconstructing cellSets, faceSets, pointSets"
);
argList::addBoolOption
(
"newTimes",
"only reconstruct new times (i.e. that do not exist already)"
);
#include "setRootCase.H"
#include "createTime.H"
HashSet<word> selectedFields;
if (args.optionFound("fields"))
{
args.optionLookup("fields")() >> selectedFields;
}
const bool noLagrangian = args.optionFound("noLagrangian");
if (noLagrangian)
{
Info<< "Skipping reconstructing lagrangian positions and fields"
<< nl << endl;
}
const bool noReconstructSets = args.optionFound("noSets");
if (noReconstructSets)
{
Info<< "Skipping reconstructing cellSets, faceSets and pointSets"
<< nl << endl;
}
HashSet<word> selectedLagrangianFields;
if (args.optionFound("lagrangianFields"))
{
if (noLagrangian)
{
FatalErrorIn(args.executable())
<< "Cannot specify noLagrangian and lagrangianFields "
<< "options together."
<< exit(FatalError);
}
args.optionLookup("lagrangianFields")() >> selectedLagrangianFields;
}
const bool newTimes = args.optionFound("newTimes");
const bool allRegions = args.optionFound("allRegions");
// determine the processor count directly
label nProcs = 0;
while (isDir(args.path()/(word("processor") + name(nProcs))))
{
++nProcs;
}
if (!nProcs)
{
FatalErrorIn(args.executable())
<< "No processor* directories found"
<< exit(FatalError);
}
// Create the processor databases
PtrList<Time> databases(nProcs);
forAll(databases, procI)
{
databases.set
(
procI,
new Time
(
Time::controlDictName,
args.rootPath(),
args.caseName()/fileName(word("processor") + name(procI))
)
);
}
// use the times list from the master processor
// and select a subset based on the command-line options
instantList timeDirs = timeSelector::select
(
databases[0].times(),
args
);
// Note that we do not set the runTime time so it is still the
// one set through the controlDict. The -time option
// only affects the selected set of times from processor0.
// - can be illogical
// + any point motion handled through mesh.readUpdate
if (timeDirs.empty())
{
FatalErrorIn(args.executable())
<< "No times selected"
<< exit(FatalError);
}
// Get current times if -newTimes
instantList masterTimeDirs;
if (newTimes)
{
masterTimeDirs = runTime.times();
}
HashSet<word> masterTimeDirSet(2*masterTimeDirs.size());
forAll(masterTimeDirs, i)
{
masterTimeDirSet.insert(masterTimeDirs[i].name());
}
// Set all times on processor meshes equal to reconstructed mesh
forAll(databases, procI)
{
databases[procI].setTime(runTime);
}
wordList regionNames;
wordList regionDirs;
if (allRegions)
{
Info<< "Reconstructing for all regions in regionProperties" << nl
<< endl;
regionProperties rp(runTime);
forAllConstIter(HashTable<wordList>, rp, iter)
{
const wordList& regions = iter();
forAll(regions, i)
{
if (findIndex(regionNames, regions[i]) == -1)
{
regionNames.append(regions[i]);
}
}
}
regionDirs = regionNames;
}
else
{
word regionName;
if (args.optionReadIfPresent("region", regionName))
{
regionNames = wordList(1, regionName);
regionDirs = regionNames;
}
else
{
regionNames = wordList(1, fvMesh::defaultRegion);
regionDirs = wordList(1, word::null);
}
}
forAll(regionNames, regionI)
{
const word& regionName = regionNames[regionI];
const word& regionDir = regionDirs[regionI];
Info<< "\n\nReconstructing fields for mesh " << regionName << nl
<< endl;
if
(
newTimes
&& regionNames.size() == 1
&& regionDirs[0].empty()
&& haveAllTimes(masterTimeDirSet, timeDirs)
)
{
Info<< "Skipping region " << regionName
<< " since already have all times"
<< endl << endl;
continue;
}
fvMesh mesh
(
IOobject
(
regionName,
runTime.timeName(),
runTime,
Foam::IOobject::MUST_READ
)
);
// Read all meshes and addressing to reconstructed mesh
processorMeshes procMeshes(databases, regionName);
// check face addressing for meshes that have been decomposed
// with a very old foam version
#include "checkFaceAddressingComp.H"
// Loop over all times
forAll(timeDirs, timeI)
{
if (newTimes && masterTimeDirSet.found(timeDirs[timeI].name()))
{
Info<< "Skipping time " << timeDirs[timeI].name()
<< endl << endl;
continue;
}
// Set time for global database
runTime.setTime(timeDirs[timeI], timeI);
Info<< "Time = " << runTime.timeName() << endl << endl;
// Set time for all databases
forAll(databases, procI)
{
databases[procI].setTime(timeDirs[timeI], timeI);
}
// Check if any new meshes need to be read.
fvMesh::readUpdateState meshStat = mesh.readUpdate();
fvMesh::readUpdateState procStat = procMeshes.readUpdate();
if (procStat == fvMesh::POINTS_MOVED)
{
// Reconstruct the points for moving mesh cases and write
// them out
procMeshes.reconstructPoints(mesh);
}
else if (meshStat != procStat)
{
WarningIn(args.executable())
<< "readUpdate for the reconstructed mesh:"
<< meshStat << nl
<< "readUpdate for the processor meshes :"
<< procStat << nl
<< "These should be equal or your addressing"
<< " might be incorrect."
<< " Please check your time directories for any "
<< "mesh directories." << endl;
}
// Get list of objects from processor0 database
IOobjectList objects
(
procMeshes.meshes()[0],
databases[0].timeName()
);
{
// If there are any FV fields, reconstruct them
Info<< "Reconstructing FV fields" << nl << endl;
fvFieldReconstructor fvReconstructor
(
mesh,
procMeshes.meshes(),
procMeshes.faceProcAddressing(),
procMeshes.cellProcAddressing(),
procMeshes.boundaryProcAddressing()
);
fvReconstructor.reconstructFvVolumeInternalFields<scalar>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeInternalFields<vector>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeInternalFields
<sphericalTensor>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeInternalFields<symmTensor>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeInternalFields<tensor>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeFields<scalar>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeFields<vector>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeFields<sphericalTensor>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeFields<symmTensor>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvVolumeFields<tensor>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvSurfaceFields<scalar>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvSurfaceFields<vector>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvSurfaceFields<sphericalTensor>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvSurfaceFields<symmTensor>
(
objects,
selectedFields
);
fvReconstructor.reconstructFvSurfaceFields<tensor>
(
objects,
selectedFields
);
if (fvReconstructor.nReconstructed() == 0)
{
Info<< "No FV fields" << nl << endl;
}
}
{
Info<< "Reconstructing point fields" << nl << endl;
const pointMesh& pMesh = pointMesh::New(mesh);
PtrList<pointMesh> pMeshes(procMeshes.meshes().size());
forAll(pMeshes, procI)
{
pMeshes.set
(
procI,
new pointMesh(procMeshes.meshes()[procI])
);
}
pointFieldReconstructor pointReconstructor
(
pMesh,
pMeshes,
procMeshes.pointProcAddressing(),
procMeshes.boundaryProcAddressing()
);
pointReconstructor.reconstructFields<scalar>
(
objects,
selectedFields
);
pointReconstructor.reconstructFields<vector>
(
objects,
selectedFields
);
pointReconstructor.reconstructFields<sphericalTensor>
(
objects,
selectedFields
);
pointReconstructor.reconstructFields<symmTensor>
(
objects,
selectedFields
);
pointReconstructor.reconstructFields<tensor>
(
objects,
selectedFields
);
if (pointReconstructor.nReconstructed() == 0)
{
Info<< "No point fields" << nl << endl;
}
}
// If there are any clouds, reconstruct them.
// The problem is that a cloud of size zero will not get written so
// in pass 1 we determine the cloud names and per cloud name the
// fields. Note that the fields are stored as IOobjectList from
// the first processor that has them. They are in pass2 only used
// for name and type (scalar, vector etc).
if (!noLagrangian)
{
HashTable<IOobjectList> cloudObjects;
forAll(databases, procI)
{
fileNameList cloudDirs
(
readDir
(
databases[procI].timePath()
/ regionDir
/ cloud::prefix,
fileName::DIRECTORY
)
);
forAll(cloudDirs, i)
{
// Check if we already have cloud objects for this
// cloudname
HashTable<IOobjectList>::const_iterator iter =
cloudObjects.find(cloudDirs[i]);
if (iter == cloudObjects.end())
{
// Do local scan for valid cloud objects
IOobjectList sprayObjs
(
procMeshes.meshes()[procI],
databases[procI].timeName(),
cloud::prefix/cloudDirs[i]
);
IOobject* positionsPtr =
sprayObjs.lookup(word("positions"));
if (positionsPtr)
{
cloudObjects.insert(cloudDirs[i], sprayObjs);
}
}
}
}
if (cloudObjects.size())
{
// Pass2: reconstruct the cloud
forAllConstIter(HashTable<IOobjectList>, cloudObjects, iter)
{
const word cloudName = string::validate<word>
(
iter.key()
);
// Objects (on arbitrary processor)
const IOobjectList& sprayObjs = iter();
Info<< "Reconstructing lagrangian fields for cloud "
<< cloudName << nl << endl;
reconstructLagrangianPositions
(
mesh,
cloudName,
procMeshes.meshes(),
procMeshes.faceProcAddressing(),
procMeshes.cellProcAddressing()
);
reconstructLagrangianFields<label>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFieldFields<label>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFields<scalar>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFieldFields<scalar>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFields<vector>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFieldFields<vector>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFields<sphericalTensor>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFieldFields<sphericalTensor>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFields<symmTensor>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFieldFields<symmTensor>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFields<tensor>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
reconstructLagrangianFieldFields<tensor>
(
cloudName,
mesh,
procMeshes.meshes(),
sprayObjs,
selectedLagrangianFields
);
}
}
else
{
Info<< "No lagrangian fields" << nl << endl;
}
}
if (!noReconstructSets)
{
// Scan to find all sets
HashTable<label> cSetNames;
HashTable<label> fSetNames;
HashTable<label> pSetNames;
forAll(procMeshes.meshes(), procI)
{
const fvMesh& procMesh = procMeshes.meshes()[procI];
// Note: look at sets in current time only or between
// mesh and current time?. For now current time. This will
// miss out on sets in intermediate times that have not
// been reconstructed.
IOobjectList objects
(
procMesh,
databases[0].timeName(), //procMesh.facesInstance()
polyMesh::meshSubDir/"sets"
);
IOobjectList cSets(objects.lookupClass(cellSet::typeName));
forAllConstIter(IOobjectList, cSets, iter)
{
cSetNames.insert(iter.key(), cSetNames.size());
}
IOobjectList fSets(objects.lookupClass(faceSet::typeName));
forAllConstIter(IOobjectList, fSets, iter)
{
fSetNames.insert(iter.key(), fSetNames.size());
}
IOobjectList pSets(objects.lookupClass(pointSet::typeName));
forAllConstIter(IOobjectList, pSets, iter)
{
pSetNames.insert(iter.key(), pSetNames.size());
}
}
// Construct all sets
PtrList<cellSet> cellSets(cSetNames.size());
PtrList<faceSet> faceSets(fSetNames.size());
PtrList<pointSet> pointSets(pSetNames.size());
Info<< "Reconstructing sets:" << endl;
if (cSetNames.size())
{
Info<< " cellSets " << cSetNames.sortedToc() << endl;
}
if (fSetNames.size())
{
Info<< " faceSets " << fSetNames.sortedToc() << endl;
}
if (pSetNames.size())
{
Info<< " pointSets " << pSetNames.sortedToc() << endl;
}
// Load sets
forAll(procMeshes.meshes(), procI)
{
const fvMesh& procMesh = procMeshes.meshes()[procI];
IOobjectList objects
(
procMesh,
databases[0].timeName(), //procMesh.facesInstance(),
polyMesh::meshSubDir/"sets"
);
// cellSets
const labelList& cellMap =
procMeshes.cellProcAddressing()[procI];
IOobjectList cSets(objects.lookupClass(cellSet::typeName));
forAllConstIter(IOobjectList, cSets, iter)
{
// Load cellSet
const cellSet procSet(*iter());
label setI = cSetNames[iter.key()];
if (!cellSets.set(setI))
{
cellSets.set
(
setI,
new cellSet(mesh, iter.key(), procSet.size())
);
}
cellSet& cSet = cellSets[setI];
forAllConstIter(cellSet, procSet, iter)
{
cSet.insert(cellMap[iter.key()]);
}
}
// faceSets
const labelList& faceMap =
procMeshes.faceProcAddressing()[procI];
IOobjectList fSets(objects.lookupClass(faceSet::typeName));
forAllConstIter(IOobjectList, fSets, iter)
{
// Load faceSet
const faceSet procSet(*iter());
label setI = fSetNames[iter.key()];
if (!faceSets.set(setI))
{
faceSets.set
(
setI,
new faceSet(mesh, iter.key(), procSet.size())
);
}
faceSet& fSet = faceSets[setI];
forAllConstIter(faceSet, procSet, iter)
{
fSet.insert(mag(faceMap[iter.key()])-1);
}
}
// pointSets
const labelList& pointMap =
procMeshes.pointProcAddressing()[procI];
IOobjectList pSets(objects.lookupClass(pointSet::typeName));
forAllConstIter(IOobjectList, pSets, iter)
{
// Load pointSet
const pointSet propSet(*iter());
label setI = pSetNames[iter.key()];
if (!pointSets.set(setI))
{
pointSets.set
(
setI,
new pointSet(mesh, iter.key(), propSet.size())
);
}
pointSet& pSet = pointSets[setI];
forAllConstIter(pointSet, propSet, iter)
{
pSet.insert(pointMap[iter.key()]);
}
}
}
// Write sets
forAll(cellSets, i)
{
cellSets[i].write();
}
forAll(faceSets, i)
{
faceSets[i].write();
}
forAll(pointSets, i)
{
pointSets[i].write();
}
}
}
}
// If there are any "uniform" directories copy them from
// the master processor
forAll(timeDirs, timeI)
{
runTime.setTime(timeDirs[timeI], timeI);
databases[0].setTime(timeDirs[timeI], timeI);
fileName uniformDir0 = databases[0].timePath()/"uniform";
if (isDir(uniformDir0))
{
cp(uniformDir0, runTime.timePath());
}
}
Info<< "End.\n" << endl;
return 0;
}
// ************************************************************************* //

View File

@ -0,0 +1,3 @@
reconstructParMesh.C
EXE = $(FOAM_APPBIN)/reconstructParMesh

View File

@ -0,0 +1,8 @@
EXE_INC = \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude
EXE_LIBS = \
-ldynamicMesh \
-lmeshTools

View File

@ -0,0 +1,917 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2014 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Application
reconstructParMesh
Description
Reconstructs a mesh using geometric information only.
Writes point/face/cell procAddressing so afterwards reconstructPar can be
used to reconstruct fields.
Note:
- uses geometric matching tolerance (set with -mergeTol (at your option)
If the parallel case does not have correct procBoundaries use the
-fullMatch option which will check all boundary faces (bit slower).
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "timeSelector.H"
#include "IOobjectList.H"
#include "labelIOList.H"
#include "processorPolyPatch.H"
#include "mapAddedPolyMesh.H"
#include "polyMeshAdder.H"
#include "faceCoupleInfo.H"
#include "fvMeshAdder.H"
#include "polyTopoChange.H"
#include "zeroGradientFvPatchFields.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Tolerance (as fraction of the bounding box). Needs to be fairly lax since
// usually meshes get written with limited precision (6 digits)
static const scalar defaultMergeTol = 1e-7;
static void renumber
(
const labelList& map,
labelList& elems
)
{
forAll(elems, i)
{
if (elems[i] >= 0)
{
elems[i] = map[elems[i]];
}
}
}
// Determine which faces are coupled. Uses geometric merge distance.
// Looks either at all boundaryFaces (fullMatch) or only at the
// procBoundaries for procI. Assumes that masterMesh contains already merged
// all the processors < procI.
autoPtr<faceCoupleInfo> determineCoupledFaces
(
const bool fullMatch,
const label procI,
const polyMesh& masterMesh,
const polyMesh& meshToAdd,
const scalar mergeDist
)
{
if (fullMatch || masterMesh.nCells() == 0)
{
return autoPtr<faceCoupleInfo>
(
new faceCoupleInfo
(
masterMesh,
meshToAdd,
mergeDist, // absolute merging distance
true // matching faces identical
)
);
}
else
{
// Pick up all patches on masterMesh ending in "toDDD" where DDD is
// the processor number procI.
const polyBoundaryMesh& masterPatches = masterMesh.boundaryMesh();
const string toProcString("to" + name(procI));
DynamicList<label> masterFaces
(
masterMesh.nFaces()
- masterMesh.nInternalFaces()
);
forAll(masterPatches, patchI)
{
const polyPatch& pp = masterPatches[patchI];
if
(
isA<processorPolyPatch>(pp)
&& (
pp.name().rfind(toProcString)
== (pp.name().size()-toProcString.size())
)
)
{
label meshFaceI = pp.start();
forAll(pp, i)
{
masterFaces.append(meshFaceI++);
}
}
}
masterFaces.shrink();
// Pick up all patches on meshToAdd ending in "procBoundaryDDDtoYYY"
// where DDD is the processor number procI and YYY is < procI.
const polyBoundaryMesh& addPatches = meshToAdd.boundaryMesh();
DynamicList<label> addFaces
(
meshToAdd.nFaces()
- meshToAdd.nInternalFaces()
);
forAll(addPatches, patchI)
{
const polyPatch& pp = addPatches[patchI];
if (isA<processorPolyPatch>(pp))
{
bool isConnected = false;
for (label mergedProcI = 0; mergedProcI < procI; mergedProcI++)
{
const string fromProcString
(
"procBoundary"
+ name(procI)
+ "to"
+ name(mergedProcI)
);
if (pp.name() == fromProcString)
{
isConnected = true;
break;
}
}
if (isConnected)
{
label meshFaceI = pp.start();
forAll(pp, i)
{
addFaces.append(meshFaceI++);
}
}
}
}
addFaces.shrink();
return autoPtr<faceCoupleInfo>
(
new faceCoupleInfo
(
masterMesh,
masterFaces,
meshToAdd,
addFaces,
mergeDist, // absolute merging distance
true, // matching faces identical?
false, // if perfectmatch are faces already ordered
// (e.g. processor patches)
false // are faces each on separate patch?
)
);
}
}
autoPtr<mapPolyMesh> mergeSharedPoints
(
const scalar mergeDist,
polyMesh& mesh,
labelListList& pointProcAddressing
)
{
// Find out which sets of points get merged and create a map from
// mesh point to unique point.
Map<label> pointToMaster
(
fvMeshAdder::findSharedPoints
(
mesh,
mergeDist
)
);
Info<< "mergeSharedPoints : detected " << pointToMaster.size()
<< " points that are to be merged." << endl;
if (returnReduce(pointToMaster.size(), sumOp<label>()) == 0)
{
return autoPtr<mapPolyMesh>(NULL);
}
polyTopoChange meshMod(mesh);
fvMeshAdder::mergePoints(mesh, pointToMaster, meshMod);
// Change the mesh (no inflation). Note: parallel comms allowed.
autoPtr<mapPolyMesh> map = meshMod.changeMesh(mesh, false, true);
// Update fields. No inflation, parallel sync.
mesh.updateMesh(map);
// pointProcAddressing give indices into the master mesh so adapt them
// for changed point numbering.
// Adapt constructMaps for merged points.
forAll(pointProcAddressing, procI)
{
labelList& constructMap = pointProcAddressing[procI];
forAll(constructMap, i)
{
label oldPointI = constructMap[i];
// New label of point after changeMesh.
label newPointI = map().reversePointMap()[oldPointI];
if (newPointI < -1)
{
constructMap[i] = -newPointI-2;
}
else if (newPointI >= 0)
{
constructMap[i] = newPointI;
}
else
{
FatalErrorIn("fvMeshDistribute::mergeSharedPoints()")
<< "Problem. oldPointI:" << oldPointI
<< " newPointI:" << newPointI << abort(FatalError);
}
}
}
return map;
}
boundBox procBounds
(
const argList& args,
const PtrList<Time>& databases,
const word& regionDir
)
{
boundBox bb = boundBox::invertedBox;
forAll(databases, procI)
{
fileName pointsInstance
(
databases[procI].findInstance
(
regionDir/polyMesh::meshSubDir,
"points"
)
);
if (pointsInstance != databases[procI].timeName())
{
FatalErrorIn(args.executable())
<< "Your time was specified as " << databases[procI].timeName()
<< " but there is no polyMesh/points in that time." << endl
<< "(there is a points file in " << pointsInstance
<< ")" << endl
<< "Please rerun with the correct time specified"
<< " (through the -constant, -time or -latestTime "
<< "(at your option)."
<< endl << exit(FatalError);
}
Info<< "Reading points from "
<< databases[procI].caseName()
<< " for time = " << databases[procI].timeName()
<< nl << endl;
pointIOField points
(
IOobject
(
"points",
databases[procI].findInstance
(
regionDir/polyMesh::meshSubDir,
"points"
),
regionDir/polyMesh::meshSubDir,
databases[procI],
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
)
);
boundBox domainBb(points, false);
bb.min() = min(bb.min(), domainBb.min());
bb.max() = max(bb.max(), domainBb.max());
}
return bb;
}
void writeCellDistance
(
Time& runTime,
const fvMesh& masterMesh,
const labelListList& cellProcAddressing
)
{
// Write the decomposition as labelList for use with 'manual'
// decomposition method.
labelIOList cellDecomposition
(
IOobject
(
"cellDecomposition",
masterMesh.facesInstance(),
masterMesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
masterMesh.nCells()
);
forAll(cellProcAddressing, procI)
{
const labelList& pCells = cellProcAddressing[procI];
UIndirectList<label>(cellDecomposition, pCells) = procI;
}
cellDecomposition.write();
Info<< nl << "Wrote decomposition to "
<< cellDecomposition.objectPath()
<< " for use in manual decomposition." << endl;
// Write as volScalarField for postprocessing. Change time to 0
// if was 'constant'
{
const scalar oldTime = runTime.value();
const label oldIndex = runTime.timeIndex();
if (runTime.timeName() == runTime.constant() && oldIndex == 0)
{
runTime.setTime(0, oldIndex+1);
}
volScalarField cellDist
(
IOobject
(
"cellDist",
runTime.timeName(),
masterMesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
masterMesh,
dimensionedScalar("cellDist", dimless, 0),
zeroGradientFvPatchScalarField::typeName
);
forAll(cellDecomposition, cellI)
{
cellDist[cellI] = cellDecomposition[cellI];
}
cellDist.write();
Info<< nl << "Wrote decomposition as volScalarField to "
<< cellDist.name() << " for use in postprocessing."
<< endl;
// Restore time
runTime.setTime(oldTime, oldIndex);
}
}
int main(int argc, char *argv[])
{
argList::addNote
(
"reconstruct a mesh using geometric information only"
);
argList::noParallel();
argList::addOption
(
"mergeTol",
"scalar",
"specify the merge distance relative to the bounding box size "
"(default 1e-7)"
);
argList::addBoolOption
(
"fullMatch",
"do (slower) geometric matching on all boundary faces"
);
argList::addBoolOption
(
"cellDist",
"write cell distribution as a labelList - for use with 'manual' "
"decomposition method or as a volScalarField for post-processing."
);
#include "addTimeOptions.H"
#include "addRegionOption.H"
#include "setRootCase.H"
#include "createTime.H"
Info<< "This is an experimental tool which tries to merge"
<< " individual processor" << nl
<< "meshes back into one master mesh. Use it if the original"
<< " master mesh has" << nl
<< "been deleted or if the processor meshes have been modified"
<< " (topology change)." << nl
<< "This tool will write the resulting mesh to a new time step"
<< " and construct" << nl
<< "xxxxProcAddressing files in the processor meshes so"
<< " reconstructPar can be" << nl
<< "used to regenerate the fields on the master mesh." << nl
<< nl
<< "Not well tested & use at your own risk!" << nl
<< endl;
word regionName = polyMesh::defaultRegion;
word regionDir = word::null;
if
(
args.optionReadIfPresent("region", regionName)
&& regionName != polyMesh::defaultRegion
)
{
regionDir = regionName;
Info<< "Operating on region " << regionName << nl << endl;
}
scalar mergeTol = defaultMergeTol;
args.optionReadIfPresent("mergeTol", mergeTol);
scalar writeTol = Foam::pow(10.0, -scalar(IOstream::defaultPrecision()));
Info<< "Merge tolerance : " << mergeTol << nl
<< "Write tolerance : " << writeTol << endl;
if (runTime.writeFormat() == IOstream::ASCII && mergeTol < writeTol)
{
FatalErrorIn(args.executable())
<< "Your current settings specify ASCII writing with "
<< IOstream::defaultPrecision() << " digits precision." << endl
<< "Your merging tolerance (" << mergeTol << ") is finer than this."
<< endl
<< "Please change your writeFormat to binary"
<< " or increase the writePrecision" << endl
<< "or adjust the merge tolerance (-mergeTol)."
<< exit(FatalError);
}
const bool fullMatch = args.optionFound("fullMatch");
if (fullMatch)
{
Info<< "Doing geometric matching on all boundary faces." << nl << endl;
}
else
{
Info<< "Doing geometric matching on correct procBoundaries only."
<< nl << "This assumes a correct decomposition." << endl;
}
bool writeCellDist = args.optionFound("cellDist");
int nProcs = 0;
while
(
isDir
(
args.rootPath()
/ args.caseName()
/ fileName(word("processor") + name(nProcs))
)
)
{
nProcs++;
}
Info<< "Found " << nProcs << " processor directories" << nl << endl;
// Read all time databases
PtrList<Time> databases(nProcs);
forAll(databases, procI)
{
Info<< "Reading database "
<< args.caseName()/fileName(word("processor") + name(procI))
<< endl;
databases.set
(
procI,
new Time
(
Time::controlDictName,
args.rootPath(),
args.caseName()/fileName(word("processor") + name(procI))
)
);
}
// use the times list from the master processor
// and select a subset based on the command-line options
instantList Times = timeSelector::select
(
databases[0].times(),
args
);
// set startTime and endTime depending on -time and -latestTime options
#include "checkTimeOptions.H"
if (Times.empty())
{
FatalErrorIn(args.executable())
<< "No times selected"
<< exit(FatalError);
}
// Loop over all times
for (label timeI = startTime; timeI < endTime; timeI++)
{
// Set time for global database
runTime.setTime(Times[timeI], timeI);
Info<< "Time = " << runTime.timeName() << endl << endl;
// Set time for all databases
forAll(databases, procI)
{
databases[procI].setTime(Times[timeI], timeI);
}
const fileName meshPath =
databases[0].path()
/databases[0].timeName()
/regionDir
/polyMesh::meshSubDir;
if (!isFile(meshPath/"faces"))
{
Info<< "No mesh." << nl << endl;
continue;
}
// Read point on individual processors to determine merge tolerance
// (otherwise single cell domains might give problems)
const boundBox bb = procBounds(args, databases, regionDir);
const scalar mergeDist = mergeTol*bb.mag();
Info<< "Overall mesh bounding box : " << bb << nl
<< "Relative tolerance : " << mergeTol << nl
<< "Absolute matching distance : " << mergeDist << nl
<< endl;
// Addressing from processor to reconstructed case
labelListList cellProcAddressing(nProcs);
labelListList faceProcAddressing(nProcs);
labelListList pointProcAddressing(nProcs);
labelListList boundaryProcAddressing(nProcs);
// Internal faces on the final reconstructed mesh
label masterInternalFaces;
// Owner addressing on the final reconstructed mesh
labelList masterOwner;
{
// Construct empty mesh.
Info<< "Constructing empty mesh to add to." << nl << endl;
fvMesh masterMesh
(
IOobject
(
regionName,
runTime.timeName(),
runTime,
IOobject::NO_READ
),
xferCopy(pointField()),
xferCopy(faceList()),
xferCopy(cellList())
);
for (label procI = 0; procI < nProcs; procI++)
{
Info<< "Reading mesh to add from "
<< databases[procI].caseName()
<< " for time = " << databases[procI].timeName()
<< nl << endl;
fvMesh meshToAdd
(
IOobject
(
regionName,
databases[procI].timeName(),
databases[procI]
)
);
// Initialize its addressing
cellProcAddressing[procI] = identity(meshToAdd.nCells());
faceProcAddressing[procI] = identity(meshToAdd.nFaces());
pointProcAddressing[procI] = identity(meshToAdd.nPoints());
boundaryProcAddressing[procI] =
identity(meshToAdd.boundaryMesh().size());
// Find geometrically shared points/faces.
autoPtr<faceCoupleInfo> couples = determineCoupledFaces
(
fullMatch,
procI,
masterMesh,
meshToAdd,
mergeDist
);
// Add elements to mesh
Info<< "Adding to master mesh" << nl << endl;
autoPtr<mapAddedPolyMesh> map = fvMeshAdder::add
(
masterMesh,
meshToAdd,
couples
);
// Update all addressing so xxProcAddressing points to correct
// item in masterMesh.
// Processors that were already in masterMesh
for (label mergedI = 0; mergedI < procI; mergedI++)
{
renumber(map().oldCellMap(), cellProcAddressing[mergedI]);
renumber(map().oldFaceMap(), faceProcAddressing[mergedI]);
renumber(map().oldPointMap(), pointProcAddressing[mergedI]);
// Note: boundary is special since can contain -1.
renumber
(
map().oldPatchMap(),
boundaryProcAddressing[mergedI]
);
}
// Added processor
renumber(map().addedCellMap(), cellProcAddressing[procI]);
renumber(map().addedFaceMap(), faceProcAddressing[procI]);
renumber(map().addedPointMap(), pointProcAddressing[procI]);
renumber(map().addedPatchMap(), boundaryProcAddressing[procI]);
Info<< endl;
}
// See if any points on the mastermesh have become connected
// because of connections through processor meshes.
mergeSharedPoints(mergeDist, masterMesh, pointProcAddressing);
// Save some properties on the reconstructed mesh
masterInternalFaces = masterMesh.nInternalFaces();
masterOwner = masterMesh.faceOwner();
Info<< "\nWriting merged mesh to "
<< runTime.path()/runTime.timeName()
<< nl << endl;
if (!masterMesh.write())
{
FatalErrorIn(args.executable())
<< "Failed writing polyMesh."
<< exit(FatalError);
}
if (writeCellDist)
{
writeCellDistance(runTime, masterMesh, cellProcAddressing);
}
}
// Write the addressing
Info<< "Reconstructing the addressing from the processor meshes"
<< " to the newly reconstructed mesh" << nl << endl;
forAll(databases, procI)
{
Info<< "Reading processor " << procI << " mesh from "
<< databases[procI].caseName() << endl;
polyMesh procMesh
(
IOobject
(
regionName,
databases[procI].timeName(),
databases[procI]
)
);
// From processor point to reconstructed mesh point
Info<< "Writing pointProcAddressing to "
<< databases[procI].caseName()
/procMesh.facesInstance()
/polyMesh::meshSubDir
<< endl;
labelIOList
(
IOobject
(
"pointProcAddressing",
procMesh.facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false // do not register
),
pointProcAddressing[procI]
).write();
// From processor face to reconstructed mesh face
Info<< "Writing faceProcAddressing to "
<< databases[procI].caseName()
/procMesh.facesInstance()
/polyMesh::meshSubDir
<< endl;
labelIOList faceProcAddr
(
IOobject
(
"faceProcAddressing",
procMesh.facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false // do not register
),
faceProcAddressing[procI]
);
// Now add turning index to faceProcAddressing.
// See reconstructPar for meaning of turning index.
forAll(faceProcAddr, procFaceI)
{
label masterFaceI = faceProcAddr[procFaceI];
if
(
!procMesh.isInternalFace(procFaceI)
&& masterFaceI < masterInternalFaces
)
{
// proc face is now external but used to be internal face.
// Check if we have owner or neighbour.
label procOwn = procMesh.faceOwner()[procFaceI];
label masterOwn = masterOwner[masterFaceI];
if (cellProcAddressing[procI][procOwn] == masterOwn)
{
// No turning. Offset by 1.
faceProcAddr[procFaceI]++;
}
else
{
// Turned face.
faceProcAddr[procFaceI] =
-1 - faceProcAddr[procFaceI];
}
}
else
{
// No turning. Offset by 1.
faceProcAddr[procFaceI]++;
}
}
faceProcAddr.write();
// From processor cell to reconstructed mesh cell
Info<< "Writing cellProcAddressing to "
<< databases[procI].caseName()
/procMesh.facesInstance()
/polyMesh::meshSubDir
<< endl;
labelIOList
(
IOobject
(
"cellProcAddressing",
procMesh.facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false // do not register
),
cellProcAddressing[procI]
).write();
// From processor patch to reconstructed mesh patch
Info<< "Writing boundaryProcAddressing to "
<< databases[procI].caseName()
/procMesh.facesInstance()
/polyMesh::meshSubDir
<< endl;
labelIOList
(
IOobject
(
"boundaryProcAddressing",
procMesh.facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false // do not register
),
boundaryProcAddressing[procI]
).write();
Info<< endl;
}
}
Info<< "End.\n" << endl;
return 0;
}
// ************************************************************************* //

View File

@ -0,0 +1,4 @@
loadOrCreateMesh.C
redistributePar.C
EXE = $(FOAM_APPBIN)/redistributePar

View File

@ -0,0 +1,13 @@
EXE_INC = \
-I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude
EXE_LIBS = \
-lfiniteVolume \
-lgenericPatchFields \
-ldecompositionMethods \
-L$(FOAM_LIBBIN)/dummy -lptscotchDecomp \
-lmeshTools \
-ldynamicMesh

View File

@ -0,0 +1,362 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012-2014 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "loadOrCreateMesh.H"
#include "processorPolyPatch.H"
#include "processorCyclicPolyPatch.H"
#include "Time.H"
#include "IOPtrList.H"
// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * //
namespace Foam
{
defineTemplateTypeNameAndDebug(IOPtrList<entry>, 0);
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Read mesh if available. Otherwise create empty mesh with same non-proc
// patches as proc0 mesh. Requires all processors to have all patches
// (and in same order).
Foam::autoPtr<Foam::fvMesh> Foam::loadOrCreateMesh
(
const IOobject& io
)
{
fileName meshSubDir;
if (io.name() == polyMesh::defaultRegion)
{
meshSubDir = polyMesh::meshSubDir;
}
else
{
meshSubDir = io.name()/polyMesh::meshSubDir;
}
// Scatter master patches
PtrList<entry> patchEntries;
if (Pstream::master())
{
// Read PtrList of dictionary as dictionary.
const word oldTypeName = IOPtrList<entry>::typeName;
const_cast<word&>(IOPtrList<entry>::typeName) = word::null;
IOPtrList<entry> dictList
(
IOobject
(
"boundary",
io.time().findInstance
(
meshSubDir,
"boundary",
IOobject::MUST_READ
),
meshSubDir,
io.db(),
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
)
);
const_cast<word&>(IOPtrList<entry>::typeName) = oldTypeName;
// Fake type back to what was in field
const_cast<word&>(dictList.type()) = dictList.headerClassName();
patchEntries.transfer(dictList);
// Send patches
for
(
int slave=Pstream::firstSlave();
slave<=Pstream::lastSlave();
slave++
)
{
OPstream toSlave(Pstream::scheduled, slave);
toSlave << patchEntries;
}
}
else
{
// Receive patches
IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
fromMaster >> patchEntries;
}
// Check who has a mesh
const bool haveMesh = isDir(io.time().path()/io.instance()/meshSubDir);
if (!haveMesh)
{
bool oldParRun = Pstream::parRun();
Pstream::parRun() = false;
// Create dummy mesh. Only used on procs that don't have mesh.
IOobject noReadIO(io);
noReadIO.readOpt() = IOobject::NO_READ;
fvMesh dummyMesh
(
noReadIO,
xferCopy(pointField()),
xferCopy(faceList()),
xferCopy(labelList()),
xferCopy(labelList()),
false
);
// Add patches
List<polyPatch*> patches(patchEntries.size());
label nPatches = 0;
forAll(patchEntries, patchI)
{
const entry& e = patchEntries[patchI];
const word type(e.dict().lookup("type"));
const word& name = e.keyword();
if
(
type != processorPolyPatch::typeName
&& type != processorCyclicPolyPatch::typeName
)
{
dictionary patchDict(e.dict());
patchDict.set("nFaces", 0);
patchDict.set("startFace", 0);
patches[patchI] = polyPatch::New
(
name,
patchDict,
nPatches++,
dummyMesh.boundaryMesh()
).ptr();
}
}
patches.setSize(nPatches);
dummyMesh.addFvPatches(patches, false); // no parallel comms
// Add some dummy zones so upon reading it does not read them
// from the undecomposed case. Should be done as extra argument to
// regIOobject::readStream?
List<pointZone*> pz
(
1,
new pointZone
(
"dummyPointZone",
labelList(0),
0,
dummyMesh.pointZones()
)
);
List<faceZone*> fz
(
1,
new faceZone
(
"dummyFaceZone",
labelList(0),
boolList(0),
0,
dummyMesh.faceZones()
)
);
List<cellZone*> cz
(
1,
new cellZone
(
"dummyCellZone",
labelList(0),
0,
dummyMesh.cellZones()
)
);
dummyMesh.addZones(pz, fz, cz);
//Pout<< "Writing dummy mesh to " << dummyMesh.polyMesh::objectPath()
// << endl;
dummyMesh.write();
Pstream::parRun() = oldParRun;
}
//Pout<< "Reading mesh from " << io.objectPath() << endl;
autoPtr<fvMesh> meshPtr(new fvMesh(io));
fvMesh& mesh = meshPtr();
// Sync patches
// ~~~~~~~~~~~~
if (!Pstream::master() && haveMesh)
{
// Check master names against mine
const polyBoundaryMesh& patches = mesh.boundaryMesh();
forAll(patchEntries, patchI)
{
const entry& e = patchEntries[patchI];
const word type(e.dict().lookup("type"));
const word& name = e.keyword();
if (type == processorPolyPatch::typeName)
{
break;
}
if (patchI >= patches.size())
{
FatalErrorIn
(
"createMesh(const Time&, const fileName&, const bool)"
) << "Non-processor patches not synchronised."
<< endl
<< "Processor " << Pstream::myProcNo()
<< " has only " << patches.size()
<< " patches, master has "
<< patchI
<< exit(FatalError);
}
if
(
type != patches[patchI].type()
|| name != patches[patchI].name()
)
{
FatalErrorIn
(
"createMesh(const Time&, const fileName&, const bool)"
) << "Non-processor patches not synchronised."
<< endl
<< "Master patch " << patchI
<< " name:" << type
<< " type:" << type << endl
<< "Processor " << Pstream::myProcNo()
<< " patch " << patchI
<< " has name:" << patches[patchI].name()
<< " type:" << patches[patchI].type()
<< exit(FatalError);
}
}
}
// Determine zones
// ~~~~~~~~~~~~~~~
wordList pointZoneNames(mesh.pointZones().names());
Pstream::scatter(pointZoneNames);
wordList faceZoneNames(mesh.faceZones().names());
Pstream::scatter(faceZoneNames);
wordList cellZoneNames(mesh.cellZones().names());
Pstream::scatter(cellZoneNames);
if (!haveMesh)
{
// Add the zones. Make sure to remove the old dummy ones first
mesh.pointZones().clear();
mesh.faceZones().clear();
mesh.cellZones().clear();
List<pointZone*> pz(pointZoneNames.size());
forAll(pointZoneNames, i)
{
pz[i] = new pointZone
(
pointZoneNames[i],
labelList(0),
i,
mesh.pointZones()
);
}
List<faceZone*> fz(faceZoneNames.size());
forAll(faceZoneNames, i)
{
fz[i] = new faceZone
(
faceZoneNames[i],
labelList(0),
boolList(0),
i,
mesh.faceZones()
);
}
List<cellZone*> cz(cellZoneNames.size());
forAll(cellZoneNames, i)
{
cz[i] = new cellZone
(
cellZoneNames[i],
labelList(0),
i,
mesh.cellZones()
);
}
mesh.addZones(pz, fz, cz);
}
if (!haveMesh)
{
// We created a dummy mesh file above. Delete it.
const fileName meshFiles = io.time().path()/io.instance()/meshSubDir;
//Pout<< "Removing dummy mesh " << meshFiles << endl;
mesh.removeFiles();
rmDir(meshFiles);
}
// Force recreation of globalMeshData.
mesh.clearOut();
mesh.globalData();
// Do some checks.
// Check if the boundary definition is unique
mesh.boundaryMesh().checkDefinition(true);
// Check if the boundary processor patches are correct
mesh.boundaryMesh().checkParallelSync(true);
// Check names of zones are equal
mesh.cellZones().checkDefinition(true);
mesh.cellZones().checkParallelSync(true);
mesh.faceZones().checkDefinition(true);
mesh.faceZones().checkParallelSync(true);
mesh.pointZones().checkDefinition(true);
mesh.pointZones().checkParallelSync(true);
return meshPtr;
}
// ************************************************************************* //

View File

@ -0,0 +1,58 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
InNamespace
Foam
Description
Load or create (0 size) a mesh. Used in distributing meshes to a
larger number of processors
SourceFiles
loadOrCreateMesh.C
\*---------------------------------------------------------------------------*/
#ifndef loadOrCreateMesh_H
#define loadOrCreateMesh_H
#include "fvMesh.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
//- Load (if it exists) or create zero cell mesh given an IOobject:
// name : regionName
// instance : exact directory where to find mesh (i.e. does not
// do a findInstance
autoPtr<fvMesh> loadOrCreateMesh(const IOobject& io);
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,820 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2014 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Application
redistributePar
Description
Redistributes existing decomposed mesh and fields according to the current
settings in the decomposeParDict file.
Must be run on maximum number of source and destination processors.
Balances mesh and writes new mesh to new time directory.
Can also work like decomposePar:
\verbatim
# Create empty processor directories (have to exist for argList)
mkdir processor0
..
mkdir processorN
# Copy undecomposed polyMesh
cp -r constant processor0
# Distribute
mpirun -np ddd redistributePar -parallel
\endverbatim
\*---------------------------------------------------------------------------*/
#include "fvMesh.H"
#include "decompositionMethod.H"
#include "PstreamReduceOps.H"
#include "fvCFD.H"
#include "fvMeshDistribute.H"
#include "mapDistributePolyMesh.H"
#include "IOobjectList.H"
#include "globalIndex.H"
#include "loadOrCreateMesh.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Tolerance (as fraction of the bounding box). Needs to be fairly lax since
// usually meshes get written with limited precision (6 digits)
static const scalar defaultMergeTol = 1e-6;
// Get merging distance when matching face centres
scalar getMergeDistance
(
const argList& args,
const Time& runTime,
const boundBox& bb
)
{
scalar mergeTol = defaultMergeTol;
args.optionReadIfPresent("mergeTol", mergeTol);
scalar writeTol =
Foam::pow(scalar(10.0), -scalar(IOstream::defaultPrecision()));
Info<< "Merge tolerance : " << mergeTol << nl
<< "Write tolerance : " << writeTol << endl;
if (runTime.writeFormat() == IOstream::ASCII && mergeTol < writeTol)
{
FatalErrorIn("getMergeDistance")
<< "Your current settings specify ASCII writing with "
<< IOstream::defaultPrecision() << " digits precision." << endl
<< "Your merging tolerance (" << mergeTol << ") is finer than this."
<< endl
<< "Please change your writeFormat to binary"
<< " or increase the writePrecision" << endl
<< "or adjust the merge tolerance (-mergeTol)."
<< exit(FatalError);
}
scalar mergeDist = mergeTol * bb.mag();
Info<< "Overall meshes bounding box : " << bb << nl
<< "Relative tolerance : " << mergeTol << nl
<< "Absolute matching distance : " << mergeDist << nl
<< endl;
return mergeDist;
}
//void printMeshData(Ostream& os, const polyMesh& mesh)
//{
// os << "Number of points: " << mesh.points().size() << nl
// << " faces: " << mesh.faces().size() << nl
// << " internal faces: " << mesh.faceNeighbour().size() << nl
// << " cells: " << mesh.cells().size() << nl
// << " boundary patches: " << mesh.boundaryMesh().size() << nl
// << " point zones: " << mesh.pointZones().size() << nl
// << " face zones: " << mesh.faceZones().size() << nl
// << " cell zones: " << mesh.cellZones().size() << nl;
//}
void printMeshData(const polyMesh& mesh)
{
// Collect all data on master
globalIndex globalCells(mesh.nCells());
labelListList patchNeiProcNo(Pstream::nProcs());
labelListList patchSize(Pstream::nProcs());
const labelList& pPatches = mesh.globalData().processorPatches();
patchNeiProcNo[Pstream::myProcNo()].setSize(pPatches.size());
patchSize[Pstream::myProcNo()].setSize(pPatches.size());
forAll(pPatches, i)
{
const processorPolyPatch& ppp = refCast<const processorPolyPatch>
(
mesh.boundaryMesh()[pPatches[i]]
);
patchNeiProcNo[Pstream::myProcNo()][i] = ppp.neighbProcNo();
patchSize[Pstream::myProcNo()][i] = ppp.size();
}
Pstream::gatherList(patchNeiProcNo);
Pstream::gatherList(patchSize);
// Print stats
globalIndex globalBoundaryFaces(mesh.nFaces()-mesh.nInternalFaces());
label maxProcCells = 0;
label totProcFaces = 0;
label maxProcPatches = 0;
label totProcPatches = 0;
label maxProcFaces = 0;
for (label procI = 0; procI < Pstream::nProcs(); procI++)
{
Info<< endl
<< "Processor " << procI << nl
<< " Number of cells = " << globalCells.localSize(procI)
<< endl;
label nProcFaces = 0;
const labelList& nei = patchNeiProcNo[procI];
forAll(patchNeiProcNo[procI], i)
{
Info<< " Number of faces shared with processor "
<< patchNeiProcNo[procI][i] << " = " << patchSize[procI][i]
<< endl;
nProcFaces += patchSize[procI][i];
}
Info<< " Number of processor patches = " << nei.size() << nl
<< " Number of processor faces = " << nProcFaces << nl
<< " Number of boundary faces = "
<< globalBoundaryFaces.localSize(procI) << endl;
maxProcCells = max(maxProcCells, globalCells.localSize(procI));
totProcFaces += nProcFaces;
totProcPatches += nei.size();
maxProcPatches = max(maxProcPatches, nei.size());
maxProcFaces = max(maxProcFaces, nProcFaces);
}
// Stats
scalar avgProcCells = scalar(globalCells.size())/Pstream::nProcs();
scalar avgProcPatches = scalar(totProcPatches)/Pstream::nProcs();
scalar avgProcFaces = scalar(totProcFaces)/Pstream::nProcs();
// In case of all faces on one processor. Just to avoid division by 0.
if (totProcPatches == 0)
{
avgProcPatches = 1;
}
if (totProcFaces == 0)
{
avgProcFaces = 1;
}
Info<< nl
<< "Number of processor faces = " << totProcFaces/2 << nl
<< "Max number of cells = " << maxProcCells
<< " (" << 100.0*(maxProcCells-avgProcCells)/avgProcCells
<< "% above average " << avgProcCells << ")" << nl
<< "Max number of processor patches = " << maxProcPatches
<< " (" << 100.0*(maxProcPatches-avgProcPatches)/avgProcPatches
<< "% above average " << avgProcPatches << ")" << nl
<< "Max number of faces between processors = " << maxProcFaces
<< " (" << 100.0*(maxProcFaces-avgProcFaces)/avgProcFaces
<< "% above average " << avgProcFaces << ")" << nl
<< endl;
}
// Debugging: write volScalarField with decomposition for post processing.
void writeDecomposition
(
const word& name,
const fvMesh& mesh,
const labelList& decomp
)
{
Info<< "Writing wanted cell distribution to volScalarField " << name
<< " for postprocessing purposes." << nl << endl;
volScalarField procCells
(
IOobject
(
name,
mesh.time().timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE,
false // do not register
),
mesh,
dimensionedScalar(name, dimless, -1),
zeroGradientFvPatchScalarField::typeName
);
forAll(procCells, cI)
{
procCells[cI] = decomp[cI];
}
procCells.write();
}
// Read vol or surface fields
//template<class T, class Mesh>
template<class GeoField>
void readFields
(
const boolList& haveMesh,
const fvMesh& mesh,
const autoPtr<fvMeshSubset>& subsetterPtr,
IOobjectList& allObjects,
PtrList<GeoField>& fields
)
{
//typedef GeometricField<T, fvPatchField, Mesh> fldType;
// Get my objects of type
IOobjectList objects(allObjects.lookupClass(GeoField::typeName));
// Check that we all have all objects
wordList objectNames = objects.toc();
// Get master names
wordList masterNames(objectNames);
Pstream::scatter(masterNames);
if (haveMesh[Pstream::myProcNo()] && objectNames != masterNames)
{
FatalErrorIn("readFields()")
<< "differing fields of type " << GeoField::typeName
<< " on processors." << endl
<< "Master has:" << masterNames << endl
<< Pstream::myProcNo() << " has:" << objectNames
<< abort(FatalError);
}
fields.setSize(masterNames.size());
// Have master send all fields to processors that don't have a mesh
if (Pstream::master())
{
forAll(masterNames, i)
{
const word& name = masterNames[i];
IOobject& io = *objects[name];
io.writeOpt() = IOobject::AUTO_WRITE;
// Load field
fields.set(i, new GeoField(io, mesh));
// Create zero sized field and send
if (subsetterPtr.valid())
{
tmp<GeoField> tsubfld = subsetterPtr().interpolate(fields[i]);
// Send to all processors that don't have a mesh
for (label procI = 1; procI < Pstream::nProcs(); procI++)
{
if (!haveMesh[procI])
{
OPstream toProc(Pstream::blocking, procI);
toProc<< tsubfld();
}
}
}
}
}
else if (!haveMesh[Pstream::myProcNo()])
{
// Don't have mesh (nor fields). Receive empty field from master.
forAll(masterNames, i)
{
const word& name = masterNames[i];
// Receive field
IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
dictionary fieldDict(fromMaster);
fields.set
(
i,
new GeoField
(
IOobject
(
name,
mesh.time().timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
mesh,
fieldDict
)
);
//// Write it for next time round (since mesh gets written as well)
//fields[i].write();
}
}
else
{
// Have mesh so just try to load
forAll(masterNames, i)
{
const word& name = masterNames[i];
IOobject& io = *objects[name];
io.writeOpt() = IOobject::AUTO_WRITE;
// Load field
fields.set(i, new GeoField(io, mesh));
}
}
}
// Debugging: compare two fields.
void compareFields
(
const scalar tolDim,
const volVectorField& a,
const volVectorField& b
)
{
forAll(a, cellI)
{
if (mag(b[cellI] - a[cellI]) > tolDim)
{
FatalErrorIn
(
"compareFields"
"(const scalar, const volVectorField&, const volVectorField&)"
) << "Did not map volVectorField correctly:" << nl
<< "cell:" << cellI
<< " transfer b:" << b[cellI]
<< " real cc:" << a[cellI]
<< abort(FatalError);
}
}
forAll(a.boundaryField(), patchI)
{
// We have real mesh cellcentre and
// mapped original cell centre.
const fvPatchVectorField& aBoundary =
a.boundaryField()[patchI];
const fvPatchVectorField& bBoundary =
b.boundaryField()[patchI];
if (!bBoundary.coupled())
{
forAll(aBoundary, i)
{
if (mag(aBoundary[i] - bBoundary[i]) > tolDim)
{
WarningIn
(
"compareFields"
"(const scalar, const volVectorField&"
", const volVectorField&)"
) << "Did not map volVectorField correctly:"
<< endl
<< "patch:" << patchI << " patchFace:" << i
<< " cc:" << endl
<< " real :" << aBoundary[i] << endl
<< " mapped :" << bBoundary[i] << endl
<< "This might be just a precision entry"
<< " on writing the mesh." << endl;
//<< abort(FatalError);
}
}
}
}
}
int main(int argc, char *argv[])
{
# include "addRegionOption.H"
# include "addOverwriteOption.H"
argList::addOption
(
"mergeTol",
"scalar",
"specify the merge distance relative to the bounding box size "
"(default 1e-6)"
);
// Include explicit constant options, have zero from time range
timeSelector::addOptions();
# include "setRootCase.H"
if (env("FOAM_SIGFPE"))
{
WarningIn(args.executable())
<< "Detected floating point exception trapping (FOAM_SIGFPE)."
<< " This might give" << nl
<< " problems when mapping fields. Switch it off in case"
<< " of problems." << endl;
}
// Create processor directory if non-existing
if (!Pstream::master() && !isDir(args.path()))
{
Pout<< "Creating case directory " << args.path() << endl;
mkDir(args.path());
}
// Make sure we do not use the master-only reading.
regIOobject::fileModificationChecking = regIOobject::timeStamp;
# include "createTime.H"
// Allow override of time
instantList times = timeSelector::selectIfPresent(runTime, args);
runTime.setTime(times[0], 0);
runTime.functionObjects().off();
word regionName = polyMesh::defaultRegion;
fileName meshSubDir;
if (args.optionReadIfPresent("region", regionName))
{
meshSubDir = regionName/polyMesh::meshSubDir;
}
else
{
meshSubDir = polyMesh::meshSubDir;
}
Info<< "Using mesh subdirectory " << meshSubDir << nl << endl;
const bool overwrite = args.optionFound("overwrite");
// Get time instance directory. Since not all processors have meshes
// just use the master one everywhere.
fileName masterInstDir;
if (Pstream::master())
{
masterInstDir = runTime.findInstance(meshSubDir, "points");
}
Pstream::scatter(masterInstDir);
// Check who has a mesh
const fileName meshPath = runTime.path()/masterInstDir/meshSubDir;
Info<< "Found points in " << meshPath << nl << endl;
boolList haveMesh(Pstream::nProcs(), false);
haveMesh[Pstream::myProcNo()] = isDir(meshPath);
Pstream::gatherList(haveMesh);
Pstream::scatterList(haveMesh);
Info<< "Per processor mesh availability : " << haveMesh << endl;
const bool allHaveMesh = (findIndex(haveMesh, false) == -1);
autoPtr<fvMesh> meshPtr = loadOrCreateMesh
(
IOobject
(
regionName,
masterInstDir,
runTime,
Foam::IOobject::MUST_READ
)
);
fvMesh& mesh = meshPtr();
// Print some statistics
Info<< "Before distribution:" << endl;
printMeshData(mesh);
IOdictionary decompositionDict
(
IOobject
(
"decomposeParDict",
runTime.system(),
mesh,
IOobject::MUST_READ_IF_MODIFIED,
IOobject::NO_WRITE
)
);
labelList finalDecomp;
// Create decompositionMethod and new decomposition
{
autoPtr<decompositionMethod> decomposer
(
decompositionMethod::New
(
decompositionDict
)
);
if (!decomposer().parallelAware())
{
WarningIn(args.executable())
<< "You have selected decomposition method "
<< decomposer().typeName
<< " which does" << endl
<< "not synchronise the decomposition across"
<< " processor patches." << endl
<< " You might want to select a decomposition method which"
<< " is aware of this. Continuing."
<< endl;
}
finalDecomp = decomposer().decompose(mesh, mesh.cellCentres());
}
// Dump decomposition to volScalarField
if (!overwrite)
{
writeDecomposition("decomposition", mesh, finalDecomp);
}
// Create 0 sized mesh to do all the generation of zero sized
// fields on processors that have zero sized meshes. Note that this is
// only nessecary on master but since polyMesh construction with
// Pstream::parRun does parallel comms we have to do it on all
// processors
autoPtr<fvMeshSubset> subsetterPtr;
if (!allHaveMesh)
{
// Find last non-processor patch.
const polyBoundaryMesh& patches = mesh.boundaryMesh();
label nonProcI = -1;
forAll(patches, patchI)
{
if (isA<processorPolyPatch>(patches[patchI]))
{
break;
}
nonProcI++;
}
if (nonProcI == -1)
{
FatalErrorIn(args.executable())
<< "Cannot find non-processor patch on processor "
<< Pstream::myProcNo() << endl
<< " Current patches:" << patches.names() << abort(FatalError);
}
// Subset 0 cells, no parallel comms. This is used to create zero-sized
// fields.
subsetterPtr.reset(new fvMeshSubset(mesh));
subsetterPtr().setLargeCellSubset(labelHashSet(0), nonProcI, false);
}
// Get original objects (before incrementing time!)
IOobjectList objects(mesh, runTime.timeName());
// We don't want to map the decomposition (mapping already tested when
// mapping the cell centre field)
IOobjectList::iterator iter = objects.find("decomposition");
if (iter != objects.end())
{
objects.erase(iter);
}
// volFields
PtrList<volScalarField> volScalarFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
volScalarFields
);
PtrList<volVectorField> volVectorFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
volVectorFields
);
PtrList<volSphericalTensorField> volSphereTensorFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
volSphereTensorFields
);
PtrList<volSymmTensorField> volSymmTensorFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
volSymmTensorFields
);
PtrList<volTensorField> volTensorFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
volTensorFields
);
// surfaceFields
PtrList<surfaceScalarField> surfScalarFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
surfScalarFields
);
PtrList<surfaceVectorField> surfVectorFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
surfVectorFields
);
PtrList<surfaceSphericalTensorField> surfSphereTensorFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
surfSphereTensorFields
);
PtrList<surfaceSymmTensorField> surfSymmTensorFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
surfSymmTensorFields
);
PtrList<surfaceTensorField> surfTensorFields;
readFields
(
haveMesh,
mesh,
subsetterPtr,
objects,
surfTensorFields
);
// Debugging: Create additional volField that will be mapped.
// Used to test correctness of mapping
//volVectorField mapCc("mapCc", 1*mesh.C());
// Global matching tolerance
const scalar tolDim = getMergeDistance
(
args,
runTime,
mesh.bounds()
);
// Mesh distribution engine
fvMeshDistribute distributor(mesh, tolDim);
//Pout<< "Wanted distribution:"
// << distributor.countCells(finalDecomp) << nl << endl;
// Do actual sending/receiving of mesh
autoPtr<mapDistributePolyMesh> map = distributor.distribute(finalDecomp);
//// Distribute any non-registered data accordingly
//map().distributeFaceData(faceCc);
// Print some statistics
Info<< "After distribution:" << endl;
printMeshData(mesh);
if (!overwrite)
{
runTime++;
}
else
{
mesh.setInstance(masterInstDir);
}
Info<< "Writing redistributed mesh to " << runTime.timeName() << nl << endl;
mesh.write();
// Debugging: test mapped cellcentre field.
//compareFields(tolDim, mesh.C(), mapCc);
// Print nice message
// ~~~~~~~~~~~~~~~~~~
// Determine which processors remain so we can print nice final message.
labelList nFaces(Pstream::nProcs());
nFaces[Pstream::myProcNo()] = mesh.nFaces();
Pstream::gatherList(nFaces);
Pstream::scatterList(nFaces);
Info<< nl
<< "You can pick up the redecomposed mesh from the polyMesh directory"
<< " in " << runTime.timeName() << "." << nl
<< "If you redecomposed the mesh to less processors you can delete"
<< nl
<< "the processor directories with 0 sized meshes in them." << nl
<< "Below is a sample set of commands to do this."
<< " Take care when issuing these" << nl
<< "commands." << nl << endl;
forAll(nFaces, procI)
{
fileName procDir = "processor" + name(procI);
if (nFaces[procI] == 0)
{
Info<< " rm -r " << procDir.c_str() << nl;
}
else
{
fileName timeDir = procDir/runTime.timeName()/meshSubDir;
fileName constDir = procDir/runTime.constant()/meshSubDir;
Info<< " rm -r " << constDir.c_str() << nl
<< " mv " << timeDir.c_str()
<< ' ' << constDir.c_str() << nl;
}
}
Info<< endl;
Info<< "End\n" << endl;
return 0;
}
// ************************************************************************* //