Merge branch 'master' of ssh://dm/home/dm4/OpenFOAM/OpenFOAM-dev

This commit is contained in:
Henry
2012-04-02 12:06:42 +01:00
26 changed files with 1531 additions and 1094 deletions

View File

@ -4,5 +4,7 @@ set -x
wclean libso conformalVoronoiMesh
wclean
wclean cvMeshSurfaceSimplify
wclean cvMeshBackgroundMesh
# ----------------------------------------------------------------- end-of-file

View File

@ -5,5 +5,6 @@ set -x
wmake libso conformalVoronoiMesh
wmake
wmake cvMeshSurfaceSimplify
wmake cvMeshBackgroundMesh
# ----------------------------------------------------------------- end-of-file

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -142,7 +142,7 @@ void Foam::backgroundMeshDecomposition::initialRefinement()
zeroGradientFvPatchScalarField::typeName
);
const conformationSurfaces& geometry = cvMesh_.geometryToConformTo();
const conformationSurfaces& geometry = geometryToConformTo_;
decompositionMethod& decomposer = decomposerPtr_();
@ -512,7 +512,7 @@ bool Foam::backgroundMeshDecomposition::refineCell
// Sample the box to find an estimate of the min size, and a volume
// estimate when overlapping == true.
const conformationSurfaces& geometry = cvMesh_.geometryToConformTo();
const conformationSurfaces& geometry = geometryToConformTo_;
treeBoundBox cellBb
(
@ -578,7 +578,7 @@ bool Foam::backgroundMeshDecomposition::refineCell
forAll(samplePoints, i)
{
scalar s = cvMesh_.cellSizeControl().cellSize
scalar s = cellSizeControl_.cellSize
(
hitInfo[i].hitPoint()
);
@ -693,7 +693,7 @@ void Foam::backgroundMeshDecomposition::buildPatchAndTree()
// Overall bb
treeBoundBox overallBb(boundaryFacesPtr_().localPoints());
Random& rnd = cvMesh_.rndGen();
Random& rnd = rndGen_;
bFTreePtr_.reset
(
@ -726,11 +726,11 @@ void Foam::backgroundMeshDecomposition::buildPatchAndTree()
octreeNearestDistances_ = bFTreePtr_().calcNearestDistance();
if (cvMesh_.cvMeshControls().objOutput())
if (cvMeshControls_.objOutput())
{
OFstream fStr
(
cvMesh_.time().path()
mesh_.time().path()
/"backgroundMeshDecomposition_proc_"
+ name(Pstream::myProcNo())
+ "_boundaryFaces.obj"
@ -777,15 +777,18 @@ Foam::backgroundMeshDecomposition::backgroundMeshDecomposition
const conformalVoronoiMesh& cvMesh
)
:
coeffsDict_(coeffsDict),
cvMesh_(cvMesh),
runTime_(cvMesh.time()),
geometryToConformTo_(cvMesh.geometryToConformTo()),
cellSizeControl_(cvMesh.cellSizeControl()),
rndGen_(cvMesh.rndGen()),
cvMeshControls_(cvMesh.cvMeshControls()),
mesh_
(
IOobject
(
fvMesh::defaultRegion,
cvMesh_.time().timeName(),
cvMesh_.time(),
runTime_.timeName(),
runTime_,
IOobject::MUST_READ
)
),
@ -805,22 +808,22 @@ Foam::backgroundMeshDecomposition::backgroundMeshDecomposition
IOobject
(
"decomposeParDict",
cvMesh_.time().system(),
cvMesh_.time(),
runTime_.system(),
runTime_,
IOobject::MUST_READ_IF_MODIFIED,
IOobject::NO_WRITE
)
),
decomposerPtr_(decompositionMethod::New(decomposeDict_)),
mergeDist_(1e-6*mesh_.bounds().mag()),
spanScale_(readScalar(coeffsDict_.lookup("spanScale"))),
spanScale_(readScalar(coeffsDict.lookup("spanScale"))),
minCellSizeLimit_
(
coeffsDict_.lookupOrDefault<scalar>("minCellSizeLimit", 0.0)
coeffsDict.lookupOrDefault<scalar>("minCellSizeLimit", 0.0)
),
minLevels_(readLabel(coeffsDict_.lookup("minLevels"))),
volRes_(readLabel(coeffsDict_.lookup("sampleResolution"))),
maxCellWeightCoeff_(readScalar(coeffsDict_.lookup("maxCellWeightCoeff")))
minLevels_(readLabel(coeffsDict.lookup("minLevels"))),
volRes_(readLabel(coeffsDict.lookup("sampleResolution"))),
maxCellWeightCoeff_(readScalar(coeffsDict.lookup("maxCellWeightCoeff")))
{
if (!Pstream::parRun())
{
@ -854,6 +857,74 @@ Foam::backgroundMeshDecomposition::backgroundMeshDecomposition
}
Foam::backgroundMeshDecomposition::backgroundMeshDecomposition
(
const scalar spanScale,
const scalar minCellSizeLimit,
const label minLevels,
const label volRes,
const scalar maxCellWeightCoeff,
const Time& runTime,
const conformationSurfaces& geometryToConformTo,
const cellSizeControlSurfaces& cellSizeControl,
Random& rndGen,
const cvControls& cvMeshControls
)
:
runTime_(runTime),
geometryToConformTo_(geometryToConformTo),
cellSizeControl_(cellSizeControl),
rndGen_(rndGen),
cvMeshControls_(cvMeshControls),
mesh_
(
IOobject
(
fvMesh::defaultRegion,
runTime_.timeName(),
runTime_,
IOobject::MUST_READ
)
),
meshCutter_
(
mesh_,
labelList(mesh_.nCells(), 0),
labelList(mesh_.nPoints(), 0)
),
boundaryFacesPtr_(),
bFTreePtr_(),
octreeNearestDistances_(),
allBackgroundMeshBounds_(Pstream::nProcs()),
globalBackgroundBounds_(),
decomposeDict_
(
IOobject
(
"decomposeParDict",
runTime_.system(),
runTime_,
IOobject::MUST_READ_IF_MODIFIED,
IOobject::NO_WRITE
)
),
decomposerPtr_(decompositionMethod::New(decomposeDict_)),
mergeDist_(1e-6*mesh_.bounds().mag()),
spanScale_(spanScale),
minCellSizeLimit_(minCellSizeLimit),
minLevels_(minLevels),
volRes_(volRes),
maxCellWeightCoeff_(maxCellWeightCoeff)
{
// Stand-alone operation
Info<< nl << "Building initial background mesh decomposition" << endl;
initialRefinement();
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::backgroundMeshDecomposition::~backgroundMeshDecomposition()

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -92,10 +92,22 @@ class backgroundMeshDecomposition
// Private data
//- Method details dictionary
dictionary coeffsDict_;
//dictionary coeffsDict_;
//- Reference to the conformalVoronoiMesh holding this object
const conformalVoronoiMesh& cvMesh_;
//- Reference to runtime
const Time& runTime_;
//- Reference to surface
const conformationSurfaces& geometryToConformTo_;
//- The cell size control object
const cellSizeControlSurfaces& cellSizeControl_;
//- Random number generator
Random& rndGen_;
//- Controls
const cvControls& cvMeshControls_;
//- Mesh stored on for this processor, specifiying the domain that it
// is responsible for.
@ -191,13 +203,29 @@ public:
// Constructors
//- Construct from components
//- Construct from components in cvMesh operation
backgroundMeshDecomposition
(
const dictionary& coeffsDict,
const conformalVoronoiMesh& cvMesh
);
//- Construct from components for standalone operation
backgroundMeshDecomposition
(
const scalar spanScale,
const scalar minCellSizeLimit,
const label minLevels,
const label volRes,
const scalar maxCellWeightCoeff,
const Time& runTime,
const conformationSurfaces& geometryToConformTo,
const cellSizeControlSurfaces& cellSizeControl,
Random& rndGen,
const cvControls& cvMeshControls
);
//- Destructor
~backgroundMeshDecomposition();
@ -299,6 +327,15 @@ public:
//- Return the boundBox of this processor
inline const treeBoundBox& procBounds() const;
//- Return the cell level of the underlying mesh
inline const labelList& cellLevel() const;
//- Return the point level of the underlying mesh
inline const labelList& pointLevel() const;
//- Return the current decomposition method
inline const decompositionMethod& decomposer() const;
};

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -43,9 +43,30 @@ Foam::backgroundMeshDecomposition::octreeNearestDistances() const
}
const Foam::treeBoundBox& Foam::backgroundMeshDecomposition::procBounds() const
const Foam::treeBoundBox&
Foam::backgroundMeshDecomposition::procBounds() const
{
return allBackgroundMeshBounds_[Pstream::myProcNo()];
}
const Foam::labelList& Foam::backgroundMeshDecomposition::cellLevel() const
{
return meshCutter_.cellLevel();
}
const Foam::labelList& Foam::backgroundMeshDecomposition::pointLevel() const
{
return meshCutter_.pointLevel();
}
const Foam::decompositionMethod&
Foam::backgroundMeshDecomposition::decomposer() const
{
return decomposerPtr_();
}
// ************************************************************************* //

View File

@ -0,0 +1,3 @@
cvMeshBackgroundMesh.C
EXE = $(FOAM_APPBIN)/cvMeshBackgroundMesh

View File

@ -0,0 +1,29 @@
EXE_DEBUG = -DFULLDEBUG -g -O0
EXE_FROUNDING_MATH = -frounding-math
EXE_NDEBUG = -DNDEBUG
include $(GENERAL_RULES)/CGAL
EXE_INC = \
${EXE_FROUNDING_MATH} \
${EXE_NDEBUG} \
${CGAL_INC} \
-I../conformalVoronoiMesh/lnInclude \
-I$(LIB_SRC)/parallel/decompose/decompositionMethods/lnInclude \
-I$(LIB_SRC)/edgeMesh/lnInclude \
-I$(LIB_SRC)/sampling/lnInclude \
-I$(LIB_SRC)/triSurface/lnInclude \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude
EXE_LIBS = \
$(CGAL_LIBS) \
-lconformalVoronoiMesh \
-ldecompositionMethods /* -L$(FOAM_LIBBIN)/dummy -lscotchDecomp */ \
-ledgeMesh \
-lsampling \
-ltriSurface \
-lmeshTools \
-ldynamicMesh \
-lfiniteVolume

View File

@ -0,0 +1,770 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Application
cvMeshBackGroundMesh
Description
Writes out background mesh as constructed by cvMesh and constructs
distanceSurface.
\*---------------------------------------------------------------------------*/
#include "PatchTools.H"
#include "argList.H"
#include "Time.H"
#include "triSurface.H"
#include "searchableSurfaces.H"
#include "conformationSurfaces.H"
#include "cellSizeControlSurfaces.H"
#include "backgroundMeshDecomposition.H"
#include "cellShape.H"
#include "cellModeller.H"
#include "DynamicField.H"
#include "isoSurfaceCell.H"
#include "vtkSurfaceWriter.H"
#include "syncTools.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Tolerance (as fraction of the bounding box). Needs to be fairly lax since
// usually meshes get written with limited precision (6 digits)
static const scalar defaultMergeTol = 1E-6;
// Get merging distance when matching face centres
scalar getMergeDistance
(
const argList& args,
const Time& runTime,
const boundBox& bb
)
{
scalar mergeTol = defaultMergeTol;
args.optionReadIfPresent("mergeTol", mergeTol);
scalar writeTol =
Foam::pow(scalar(10.0), -scalar(IOstream::defaultPrecision()));
Info<< "Merge tolerance : " << mergeTol << nl
<< "Write tolerance : " << writeTol << endl;
if (runTime.writeFormat() == IOstream::ASCII && mergeTol < writeTol)
{
FatalErrorIn("getMergeDistance")
<< "Your current settings specify ASCII writing with "
<< IOstream::defaultPrecision() << " digits precision." << endl
<< "Your merging tolerance (" << mergeTol << ") is finer than this."
<< endl
<< "Please change your writeFormat to binary"
<< " or increase the writePrecision" << endl
<< "or adjust the merge tolerance (-mergeTol)."
<< exit(FatalError);
}
scalar mergeDist = mergeTol * bb.mag();
Info<< "Overall meshes bounding box : " << bb << nl
<< "Relative tolerance : " << mergeTol << nl
<< "Absolute matching distance : " << mergeDist << nl
<< endl;
return mergeDist;
}
void printMeshData(const polyMesh& mesh)
{
// Collect all data on master
globalIndex globalCells(mesh.nCells());
labelListList patchNeiProcNo(Pstream::nProcs());
labelListList patchSize(Pstream::nProcs());
const labelList& pPatches = mesh.globalData().processorPatches();
patchNeiProcNo[Pstream::myProcNo()].setSize(pPatches.size());
patchSize[Pstream::myProcNo()].setSize(pPatches.size());
forAll(pPatches, i)
{
const processorPolyPatch& ppp = refCast<const processorPolyPatch>
(
mesh.boundaryMesh()[pPatches[i]]
);
patchNeiProcNo[Pstream::myProcNo()][i] = ppp.neighbProcNo();
patchSize[Pstream::myProcNo()][i] = ppp.size();
}
Pstream::gatherList(patchNeiProcNo);
Pstream::gatherList(patchSize);
// Print stats
globalIndex globalBoundaryFaces(mesh.nFaces()-mesh.nInternalFaces());
label maxProcCells = 0;
label totProcFaces = 0;
label maxProcPatches = 0;
label totProcPatches = 0;
label maxProcFaces = 0;
for (label procI = 0; procI < Pstream::nProcs(); procI++)
{
Info<< endl
<< "Processor " << procI << nl
<< " Number of cells = " << globalCells.localSize(procI)
<< endl;
label nProcFaces = 0;
const labelList& nei = patchNeiProcNo[procI];
forAll(patchNeiProcNo[procI], i)
{
Info<< " Number of faces shared with processor "
<< patchNeiProcNo[procI][i] << " = " << patchSize[procI][i]
<< endl;
nProcFaces += patchSize[procI][i];
}
Info<< " Number of processor patches = " << nei.size() << nl
<< " Number of processor faces = " << nProcFaces << nl
<< " Number of boundary faces = "
<< globalBoundaryFaces.localSize(procI) << endl;
maxProcCells = max(maxProcCells, globalCells.localSize(procI));
totProcFaces += nProcFaces;
totProcPatches += nei.size();
maxProcPatches = max(maxProcPatches, nei.size());
maxProcFaces = max(maxProcFaces, nProcFaces);
}
// Stats
scalar avgProcCells = scalar(globalCells.size())/Pstream::nProcs();
scalar avgProcPatches = scalar(totProcPatches)/Pstream::nProcs();
scalar avgProcFaces = scalar(totProcFaces)/Pstream::nProcs();
// In case of all faces on one processor. Just to avoid division by 0.
if (totProcPatches == 0)
{
avgProcPatches = 1;
}
if (totProcFaces == 0)
{
avgProcFaces = 1;
}
Info<< nl
<< "Number of processor faces = " << totProcFaces/2 << nl
<< "Max number of cells = " << maxProcCells
<< " (" << 100.0*(maxProcCells-avgProcCells)/avgProcCells
<< "% above average " << avgProcCells << ")" << nl
<< "Max number of processor patches = " << maxProcPatches
<< " (" << 100.0*(maxProcPatches-avgProcPatches)/avgProcPatches
<< "% above average " << avgProcPatches << ")" << nl
<< "Max number of faces between processors = " << maxProcFaces
<< " (" << 100.0*(maxProcFaces-avgProcFaces)/avgProcFaces
<< "% above average " << avgProcFaces << ")" << nl
<< endl;
}
// Return cellID
label cellLabel
(
const Vector<label>& nCells,
const label i,
const label j,
const label k
)
{
return i*nCells[1]*nCells[2]+j*nCells[2]+k;
}
label vtxLabel
(
const Vector<label>& nCells,
const label i,
const label j,
const label k
)
{
Vector<label> nPoints
(
nCells[0]+1,
nCells[1]+1,
nCells[2]+1
);
return i*nPoints[1]*nPoints[2]+j*nPoints[2]+k;
}
autoPtr<polyMesh> generateHexMesh
(
const IOobject& io,
const point& origin,
const vector& cellSize,
const Vector<label>& nCells
)
{
pointField points;
if (nCells[0]+nCells[1]+nCells[2] > 0)
{
points.setSize((nCells[0]+1)*(nCells[1]+1)*(nCells[2]+1));
// Generate points
for (label i = 0; i <= nCells[0]; i++)
{
for (label j = 0; j <= nCells[1]; j++)
{
for (label k = 0; k <= nCells[2]; k++)
{
point pt = origin;
pt.x() += i*cellSize[0];
pt.y() += j*cellSize[1];
pt.z() += k*cellSize[2];
points[vtxLabel(nCells, i, j, k)] = pt;
}
}
}
}
const cellModel& hex = *(cellModeller::lookup("hex"));
cellShapeList cellShapes(nCells[0]*nCells[1]*nCells[2]);
labelList hexPoints(8);
label cellI = 0;
for (label i = 0; i < nCells[0]; i++)
{
for (label j = 0; j < nCells[1]; j++)
{
for (label k = 0; k < nCells[2]; k++)
{
hexPoints[0] = vtxLabel(nCells, i, j, k);
hexPoints[1] = vtxLabel(nCells, i+1, j, k);
hexPoints[2] = vtxLabel(nCells, i+1, j+1, k);
hexPoints[3] = vtxLabel(nCells, i, j+1, k);
hexPoints[4] = vtxLabel(nCells, i, j, k+1);
hexPoints[5] = vtxLabel(nCells, i+1, j, k+1);
hexPoints[6] = vtxLabel(nCells, i+1, j+1, k+1);
hexPoints[7] = vtxLabel(nCells, i, j+1, k+1);
cellShapes[cellI++] = cellShape(hex, hexPoints);
}
}
}
faceListList boundary(0);
wordList patchNames(0);
wordList patchTypes(0);
word defaultFacesName = "defaultFaces";
word defaultFacesType = polyPatch::typeName;
wordList patchPhysicalTypes(0);
return autoPtr<polyMesh>
(
new polyMesh
(
io,
xferMoveTo<pointField>(points),
cellShapes,
boundary,
patchNames,
patchTypes,
defaultFacesName,
defaultFacesType,
patchPhysicalTypes
)
);
}
// Determine for every point a signed distance to the nearest surface
// (outside is positive)
tmp<scalarField> signedDistance
(
const scalarField& distSqr,
const pointField& points,
const searchableSurfaces& geometry,
const labelList& surfaces
)
{
tmp<scalarField> tfld(new scalarField(points.size(), Foam::sqr(GREAT)));
scalarField& fld = tfld();
// Find nearest
List<pointIndexHit> nearest;
labelList nearestSurfaces;
searchableSurfacesQueries::findNearest
(
geometry,
surfaces,
points,
scalarField(points.size(), Foam::sqr(GREAT)),//distSqr
nearestSurfaces,
nearest
);
// Determine sign of nearest. Sort by surface to do this.
DynamicField<point> surfPoints(points.size());
DynamicList<label> surfIndices(points.size());
forAll(surfaces, surfI)
{
// Extract points on this surface
surfPoints.clear();
surfIndices.clear();
forAll(nearestSurfaces, i)
{
if (nearestSurfaces[i] == surfI)
{
surfPoints.append(points[i]);
surfIndices.append(i);
}
}
// Calculate sideness of these surface points
label geomI = surfaces[surfI];
List<searchableSurface::volumeType> volType;
geometry[geomI].getVolumeType(surfPoints, volType);
// Push back to original
forAll(volType, i)
{
label pointI = surfIndices[i];
scalar dist = mag(points[pointI] - nearest[pointI].hitPoint());
searchableSurface::volumeType vT = volType[i];
if (vT == searchableSurface::OUTSIDE)
{
fld[pointI] = dist;
}
else if (vT == searchableSurface::INSIDE)
{
fld[i] = -dist;
}
else
{
FatalErrorIn("signedDistance()")
<< "getVolumeType failure, neither INSIDE or OUTSIDE"
<< exit(FatalError);
}
}
}
return tfld;
}
// Main program:
int main(int argc, char *argv[])
{
argList::addNote
(
"Generate cvMesh-consistent representation of surfaces"
);
argList::addBoolOption
(
"writeMesh",
"write the resulting mesh and distance fields"
);
argList::addOption
(
"mergeTol",
"scalar",
"specify the merge distance relative to the bounding box size "
"(default 1E-6)"
);
#include "setRootCase.H"
#include "createTime.H"
runTime.functionObjects().off();
const bool writeMesh = args.optionFound("writeMesh");
if (writeMesh)
{
Info<< "Writing resulting mesh and cellDistance, pointDistance fields."
<< nl << endl;
}
IOdictionary cvMeshDict
(
IOobject
(
"cvMeshDict",
runTime.system(),
runTime,
IOobject::MUST_READ_IF_MODIFIED,
IOobject::NO_WRITE
)
);
// Define/load all geometry
searchableSurfaces allGeometry
(
IOobject
(
"cvSearchableSurfaces",
runTime.constant(),
"triSurface",
runTime,
IOobject::MUST_READ,
IOobject::NO_WRITE
),
cvMeshDict.subDict("geometry")
);
Random rndGen(64293*Pstream::myProcNo());
conformationSurfaces geometryToConformTo
(
runTime,
rndGen,
allGeometry,
cvMeshDict.subDict("surfaceConformation")
);
cellSizeControlSurfaces cellSizeControl
(
allGeometry,
cvMeshDict.subDict("motionControl")
);
// Generate starting block mesh
vector cellSize;
{
const treeBoundBox& bb = geometryToConformTo.globalBounds();
// Determine the number of cells in each direction.
const vector span = bb.span();
vector nScalarCells = span/cellSizeControl.defaultCellSize();
// Calculate initial cell size to be a little bit smaller than the
// defaultCellSize to avoid initial refinement triggering.
Vector<label> nCells = Vector<label>
(
label(nScalarCells.x())+2,
label(nScalarCells.y())+2,
label(nScalarCells.z())+2
);
cellSize = vector
(
span[0]/nCells[0],
span[1]/nCells[1],
span[2]/nCells[2]
);
Info<< "Generating initial hex mesh with" << nl
<< " bounding box : " << bb << nl
<< " nCells : " << nCells << nl
<< " cellSize : " << cellSize << nl
<< endl;
autoPtr<polyMesh> meshPtr
(
generateHexMesh
(
IOobject
(
polyMesh::defaultRegion,
runTime.constant(),
runTime
),
bb.min(),
cellSize,
(
Pstream::master()
? nCells
: Vector<label>(0, 0, 0)
)
)
);
Info<< "Writing initial hex mesh to " << meshPtr().instance() << nl
<< endl;
meshPtr().write();
}
// Distribute the initial mesh
if (Pstream::parRun())
{
# include "createMesh.H"
Info<< "Loaded mesh:" << endl;
printMeshData(mesh);
// Allocate a decomposer
IOdictionary decompositionDict
(
IOobject
(
"decomposeParDict",
runTime.system(),
mesh,
IOobject::MUST_READ_IF_MODIFIED,
IOobject::NO_WRITE
)
);
autoPtr<decompositionMethod> decomposer
(
decompositionMethod::New
(
decompositionDict
)
);
labelList decomp = decomposer().decompose(mesh, mesh.cellCentres());
// Global matching tolerance
const scalar tolDim = getMergeDistance
(
args,
runTime,
mesh.bounds()
);
// Mesh distribution engine
fvMeshDistribute distributor(mesh, tolDim);
Info<< "Wanted distribution:"
<< distributor.countCells(decomp) << nl << endl;
// Do actual sending/receiving of mesh
autoPtr<mapDistributePolyMesh> map = distributor.distribute(decomp);
// Print some statistics
//Info<< "After distribution:" << endl;
//printMeshData(mesh);
mesh.setInstance(runTime.constant());
Info<< "Writing redistributed mesh" << nl << endl;
mesh.write();
}
Info<< "Refining backgroud mesh according to cell size specification" << nl
<< endl;
backgroundMeshDecomposition backgroundMesh
(
1.0, //spanScale,ratio of poly cell size v.s. hex cell size
0.0, //minCellSizeLimit
0, //minLevels
4, //volRes, check multiple points per cell
20.0, //maxCellWeightCoeff
runTime,
geometryToConformTo,
cellSizeControl,
rndGen,
cvMeshDict
);
if (writeMesh)
{
runTime++;
Info<< "Writing mesh to " << runTime.timeName() << endl;
backgroundMesh.mesh().write();
}
const scalar tolDim = getMergeDistance
(
args,
runTime,
backgroundMesh.mesh().bounds()
);
faceList isoFaces;
pointField isoPoints;
{
// Apply a distanceSurface to it.
const fvMesh& fvm = backgroundMesh.mesh();
volScalarField cellDistance
(
IOobject
(
"cellDistance",
fvm.time().timeName(),
fvm.time(),
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
fvm,
dimensionedScalar("zero", dimLength, 0)
);
const searchableSurfaces& geometry = geometryToConformTo.geometry();
const labelList& surfaces = geometryToConformTo.surfaces();
// Get maximum search size per cell
scalarField distSqr(cellDistance.size());
const labelList& cellLevel = backgroundMesh.cellLevel();
forAll(cellLevel, cellI)
{
// The largest edge of the cell will always be less than the
// span of the bounding box of the cell.
distSqr[cellI] = magSqr(cellSize)/pow(2, cellLevel[cellI]);
}
{
// Internal field
cellDistance.internalField() = signedDistance
(
distSqr,
fvm.C(),
geometry,
surfaces
);
// Patch fields
forAll(fvm.C().boundaryField(), patchI)
{
const pointField& cc = fvm.C().boundaryField()[patchI];
fvPatchScalarField& fld = cellDistance.boundaryField()[patchI];
scalarField patchDistSqr
(
fld.patch().patchInternalField(distSqr)
);
fld = signedDistance(patchDistSqr, cc, geometry, surfaces);
}
// On processor patches the fvm.C() will already be the cell centre
// on the opposite side so no need to swap cellDistance.
if (writeMesh)
{
cellDistance.write();
}
}
// Distance to points
pointScalarField pointDistance
(
IOobject
(
"pointDistance",
fvm.time().timeName(),
fvm.time(),
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
pointMesh::New(fvm),
dimensionedScalar("zero", dimLength, 0)
);
{
scalarField pointDistSqr(fvm.nPoints(), -sqr(GREAT));
for (label faceI = 0; faceI < fvm.nInternalFaces(); faceI++)
{
label own = fvm.faceOwner()[faceI];
label ownDistSqr = distSqr[own];
const face& f = fvm.faces()[faceI];
forAll(f, fp)
{
pointDistSqr[f[fp]] = max(pointDistSqr[f[fp]], ownDistSqr);
}
}
syncTools::syncPointList
(
fvm,
pointDistSqr,
maxEqOp<scalar>(),
-sqr(GREAT) // null value
);
pointDistance.internalField() = signedDistance
(
pointDistSqr,
fvm.points(),
geometry,
surfaces
);
if (writeMesh)
{
pointDistance.write();
}
}
isoSurfaceCell iso
(
fvm,
cellDistance,
pointDistance,
0, //distance,
false //regularise
);
isoFaces.setSize(iso.size());
forAll(isoFaces, i)
{
isoFaces[i] = iso[i].triFaceFace();
}
isoPoints = iso.points();
}
pointField mergedPoints;
faceList mergedFaces;
labelList pointMergeMap;
PatchTools::gatherAndMerge
(
tolDim,
primitivePatch
(
SubList<face>(isoFaces, isoFaces.size()),
isoPoints
),
mergedPoints,
mergedFaces,
pointMergeMap
);
vtkSurfaceWriter writer;
writer.write
(
runTime.path(),
"iso",
mergedPoints,
mergedFaces
);
Info<< "End\n" << endl;
return 0;
}
// ************************************************************************* //

View File

@ -1,3 +1,4 @@
loadOrCreateMesh.C
redistributePar.C
EXE = $(FOAM_APPBIN)/redistributePar

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -54,6 +54,7 @@ Description
#include "mapDistributePolyMesh.H"
#include "IOobjectList.H"
#include "globalIndex.H"
#include "loadOrCreateMesh.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -62,292 +63,292 @@ Description
static const scalar defaultMergeTol = 1E-6;
// Read mesh if available. Otherwise create empty mesh with same non-proc
// patches as proc0 mesh. Requires all processors to have all patches
// (and in same order).
autoPtr<fvMesh> createMesh
(
const Time& runTime,
const word& regionName,
const fileName& instDir,
const bool haveMesh
)
{
//Pout<< "Create mesh for time = "
// << runTime.timeName() << nl << endl;
IOobject io
(
regionName,
instDir,
runTime,
IOobject::MUST_READ
);
if (!haveMesh)
{
// Create dummy mesh. Only used on procs that don't have mesh.
IOobject noReadIO(io);
noReadIO.readOpt() = IOobject::NO_READ;
fvMesh dummyMesh
(
noReadIO,
xferCopy(pointField()),
xferCopy(faceList()),
xferCopy(labelList()),
xferCopy(labelList()),
false
);
// Add some dummy zones so upon reading it does not read them
// from the undecomposed case. Should be done as extra argument to
// regIOobject::readStream?
List<pointZone*> pz
(
1,
new pointZone
(
"dummyPointZone",
labelList(0),
0,
dummyMesh.pointZones()
)
);
List<faceZone*> fz
(
1,
new faceZone
(
"dummyFaceZone",
labelList(0),
boolList(0),
0,
dummyMesh.faceZones()
)
);
List<cellZone*> cz
(
1,
new cellZone
(
"dummyCellZone",
labelList(0),
0,
dummyMesh.cellZones()
)
);
dummyMesh.addZones(pz, fz, cz);
//Pout<< "Writing dummy mesh to " << dummyMesh.polyMesh::objectPath()
// << endl;
dummyMesh.write();
}
//Pout<< "Reading mesh from " << io.objectPath() << endl;
autoPtr<fvMesh> meshPtr(new fvMesh(io));
fvMesh& mesh = meshPtr();
// Sync patches
// ~~~~~~~~~~~~
if (Pstream::master())
{
// Send patches
for
(
int slave=Pstream::firstSlave();
slave<=Pstream::lastSlave();
slave++
)
{
OPstream toSlave(Pstream::scheduled, slave);
toSlave << mesh.boundaryMesh();
}
}
else
{
// Receive patches
IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
PtrList<entry> patchEntries(fromMaster);
if (haveMesh)
{
// Check master names against mine
const polyBoundaryMesh& patches = mesh.boundaryMesh();
forAll(patchEntries, patchI)
{
const entry& e = patchEntries[patchI];
const word type(e.dict().lookup("type"));
const word& name = e.keyword();
if (type == processorPolyPatch::typeName)
{
break;
}
if (patchI >= patches.size())
{
FatalErrorIn
(
"createMesh(const Time&, const fileName&, const bool)"
) << "Non-processor patches not synchronised."
<< endl
<< "Processor " << Pstream::myProcNo()
<< " has only " << patches.size()
<< " patches, master has "
<< patchI
<< exit(FatalError);
}
if
(
type != patches[patchI].type()
|| name != patches[patchI].name()
)
{
FatalErrorIn
(
"createMesh(const Time&, const fileName&, const bool)"
) << "Non-processor patches not synchronised."
<< endl
<< "Master patch " << patchI
<< " name:" << type
<< " type:" << type << endl
<< "Processor " << Pstream::myProcNo()
<< " patch " << patchI
<< " has name:" << patches[patchI].name()
<< " type:" << patches[patchI].type()
<< exit(FatalError);
}
}
}
else
{
// Add patch
List<polyPatch*> patches(patchEntries.size());
label nPatches = 0;
forAll(patchEntries, patchI)
{
const entry& e = patchEntries[patchI];
const word type(e.dict().lookup("type"));
const word& name = e.keyword();
if (type == processorPolyPatch::typeName)
{
break;
}
//Pout<< "Adding patch:" << nPatches
// << " name:" << name << " type:" << type << endl;
dictionary patchDict(e.dict());
patchDict.remove("nFaces");
patchDict.add("nFaces", 0);
patchDict.remove("startFace");
patchDict.add("startFace", 0);
patches[patchI] = polyPatch::New
(
name,
patchDict,
nPatches++,
mesh.boundaryMesh()
).ptr();
}
patches.setSize(nPatches);
mesh.addFvPatches(patches, false); // no parallel comms
//// Write empty mesh now we have correct patches
//meshPtr().write();
}
}
// Determine zones
// ~~~~~~~~~~~~~~~
wordList pointZoneNames(mesh.pointZones().names());
Pstream::scatter(pointZoneNames);
wordList faceZoneNames(mesh.faceZones().names());
Pstream::scatter(faceZoneNames);
wordList cellZoneNames(mesh.cellZones().names());
Pstream::scatter(cellZoneNames);
if (!haveMesh)
{
// Add the zones. Make sure to remove the old dummy ones first
mesh.pointZones().clear();
mesh.faceZones().clear();
mesh.cellZones().clear();
List<pointZone*> pz(pointZoneNames.size());
forAll(pointZoneNames, i)
{
pz[i] = new pointZone
(
pointZoneNames[i],
labelList(0),
i,
mesh.pointZones()
);
}
List<faceZone*> fz(faceZoneNames.size());
forAll(faceZoneNames, i)
{
fz[i] = new faceZone
(
faceZoneNames[i],
labelList(0),
boolList(0),
i,
mesh.faceZones()
);
}
List<cellZone*> cz(cellZoneNames.size());
forAll(cellZoneNames, i)
{
cz[i] = new cellZone
(
cellZoneNames[i],
labelList(0),
i,
mesh.cellZones()
);
}
mesh.addZones(pz, fz, cz);
}
if (!haveMesh)
{
// We created a dummy mesh file above. Delete it.
//Pout<< "Removing dummy mesh " << io.objectPath() << endl;
rmDir(io.objectPath());
}
// Force recreation of globalMeshData.
mesh.clearOut();
mesh.globalData();
// Do some checks.
// Check if the boundary definition is unique
mesh.boundaryMesh().checkDefinition(true);
// Check if the boundary processor patches are correct
mesh.boundaryMesh().checkParallelSync(true);
// Check names of zones are equal
mesh.cellZones().checkDefinition(true);
mesh.cellZones().checkParallelSync(true);
mesh.faceZones().checkDefinition(true);
mesh.faceZones().checkParallelSync(true);
mesh.pointZones().checkDefinition(true);
mesh.pointZones().checkParallelSync(true);
return meshPtr;
}
//// Read mesh if available. Otherwise create empty mesh with same non-proc
//// patches as proc0 mesh. Requires all processors to have all patches
//// (and in same order).
//autoPtr<fvMesh> createMesh
//(
// const Time& runTime,
// const word& regionName,
// const fileName& instDir,
// const bool haveMesh
//)
//{
// //Pout<< "Create mesh for time = "
// // << runTime.timeName() << nl << endl;
//
// IOobject io
// (
// regionName,
// instDir,
// runTime,
// IOobject::MUST_READ
// );
//
// if (!haveMesh)
// {
// // Create dummy mesh. Only used on procs that don't have mesh.
// IOobject noReadIO(io);
// noReadIO.readOpt() = IOobject::NO_READ;
// fvMesh dummyMesh
// (
// noReadIO,
// xferCopy(pointField()),
// xferCopy(faceList()),
// xferCopy(labelList()),
// xferCopy(labelList()),
// false
// );
// // Add some dummy zones so upon reading it does not read them
// // from the undecomposed case. Should be done as extra argument to
// // regIOobject::readStream?
// List<pointZone*> pz
// (
// 1,
// new pointZone
// (
// "dummyPointZone",
// labelList(0),
// 0,
// dummyMesh.pointZones()
// )
// );
// List<faceZone*> fz
// (
// 1,
// new faceZone
// (
// "dummyFaceZone",
// labelList(0),
// boolList(0),
// 0,
// dummyMesh.faceZones()
// )
// );
// List<cellZone*> cz
// (
// 1,
// new cellZone
// (
// "dummyCellZone",
// labelList(0),
// 0,
// dummyMesh.cellZones()
// )
// );
// dummyMesh.addZones(pz, fz, cz);
// //Pout<< "Writing dummy mesh to " << dummyMesh.polyMesh::objectPath()
// // << endl;
// dummyMesh.write();
// }
//
// //Pout<< "Reading mesh from " << io.objectPath() << endl;
// autoPtr<fvMesh> meshPtr(new fvMesh(io));
// fvMesh& mesh = meshPtr();
//
//
// // Sync patches
// // ~~~~~~~~~~~~
//
// if (Pstream::master())
// {
// // Send patches
// for
// (
// int slave=Pstream::firstSlave();
// slave<=Pstream::lastSlave();
// slave++
// )
// {
// OPstream toSlave(Pstream::scheduled, slave);
// toSlave << mesh.boundaryMesh();
// }
// }
// else
// {
// // Receive patches
// IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
// PtrList<entry> patchEntries(fromMaster);
//
// if (haveMesh)
// {
// // Check master names against mine
//
// const polyBoundaryMesh& patches = mesh.boundaryMesh();
//
// forAll(patchEntries, patchI)
// {
// const entry& e = patchEntries[patchI];
// const word type(e.dict().lookup("type"));
// const word& name = e.keyword();
//
// if (type == processorPolyPatch::typeName)
// {
// break;
// }
//
// if (patchI >= patches.size())
// {
// FatalErrorIn
// (
// "createMesh(const Time&, const fileName&, const bool)"
// ) << "Non-processor patches not synchronised."
// << endl
// << "Processor " << Pstream::myProcNo()
// << " has only " << patches.size()
// << " patches, master has "
// << patchI
// << exit(FatalError);
// }
//
// if
// (
// type != patches[patchI].type()
// || name != patches[patchI].name()
// )
// {
// FatalErrorIn
// (
// "createMesh(const Time&, const fileName&, const bool)"
// ) << "Non-processor patches not synchronised."
// << endl
// << "Master patch " << patchI
// << " name:" << type
// << " type:" << type << endl
// << "Processor " << Pstream::myProcNo()
// << " patch " << patchI
// << " has name:" << patches[patchI].name()
// << " type:" << patches[patchI].type()
// << exit(FatalError);
// }
// }
// }
// else
// {
// // Add patch
// List<polyPatch*> patches(patchEntries.size());
// label nPatches = 0;
//
// forAll(patchEntries, patchI)
// {
// const entry& e = patchEntries[patchI];
// const word type(e.dict().lookup("type"));
// const word& name = e.keyword();
//
// if (type == processorPolyPatch::typeName)
// {
// break;
// }
//
// //Pout<< "Adding patch:" << nPatches
// // << " name:" << name << " type:" << type << endl;
//
// dictionary patchDict(e.dict());
// patchDict.remove("nFaces");
// patchDict.add("nFaces", 0);
// patchDict.remove("startFace");
// patchDict.add("startFace", 0);
//
// patches[patchI] = polyPatch::New
// (
// name,
// patchDict,
// nPatches++,
// mesh.boundaryMesh()
// ).ptr();
// }
// patches.setSize(nPatches);
// mesh.addFvPatches(patches, false); // no parallel comms
//
// //// Write empty mesh now we have correct patches
// //meshPtr().write();
// }
// }
//
//
// // Determine zones
// // ~~~~~~~~~~~~~~~
//
// wordList pointZoneNames(mesh.pointZones().names());
// Pstream::scatter(pointZoneNames);
// wordList faceZoneNames(mesh.faceZones().names());
// Pstream::scatter(faceZoneNames);
// wordList cellZoneNames(mesh.cellZones().names());
// Pstream::scatter(cellZoneNames);
//
// if (!haveMesh)
// {
// // Add the zones. Make sure to remove the old dummy ones first
// mesh.pointZones().clear();
// mesh.faceZones().clear();
// mesh.cellZones().clear();
//
// List<pointZone*> pz(pointZoneNames.size());
// forAll(pointZoneNames, i)
// {
// pz[i] = new pointZone
// (
// pointZoneNames[i],
// labelList(0),
// i,
// mesh.pointZones()
// );
// }
// List<faceZone*> fz(faceZoneNames.size());
// forAll(faceZoneNames, i)
// {
// fz[i] = new faceZone
// (
// faceZoneNames[i],
// labelList(0),
// boolList(0),
// i,
// mesh.faceZones()
// );
// }
// List<cellZone*> cz(cellZoneNames.size());
// forAll(cellZoneNames, i)
// {
// cz[i] = new cellZone
// (
// cellZoneNames[i],
// labelList(0),
// i,
// mesh.cellZones()
// );
// }
// mesh.addZones(pz, fz, cz);
// }
//
//
// if (!haveMesh)
// {
// // We created a dummy mesh file above. Delete it.
// //Pout<< "Removing dummy mesh " << io.objectPath() << endl;
// rmDir(io.objectPath());
// }
//
// // Force recreation of globalMeshData.
// mesh.clearOut();
// mesh.globalData();
//
//
// // Do some checks.
//
// // Check if the boundary definition is unique
// mesh.boundaryMesh().checkDefinition(true);
// // Check if the boundary processor patches are correct
// mesh.boundaryMesh().checkParallelSync(true);
// // Check names of zones are equal
// mesh.cellZones().checkDefinition(true);
// mesh.cellZones().checkParallelSync(true);
// mesh.faceZones().checkDefinition(true);
// mesh.faceZones().checkParallelSync(true);
// mesh.pointZones().checkDefinition(true);
// mesh.pointZones().checkParallelSync(true);
//
// return meshPtr;
//}
// Get merging distance when matching face centres
@ -786,13 +787,24 @@ int main(int argc, char *argv[])
const bool allHaveMesh = (findIndex(haveMesh, false) == -1);
// Create mesh
autoPtr<fvMesh> meshPtr = createMesh
//autoPtr<fvMesh> meshPtr = createMesh
//(
// runTime,
// regionName,
// masterInstDir,
// haveMesh[Pstream::myProcNo()]
//);
autoPtr<fvMesh> meshPtr = loadOrCreateMesh
(
runTime,
regionName,
masterInstDir,
haveMesh[Pstream::myProcNo()]
IOobject
(
regionName,
masterInstDir,
runTime,
Foam::IOobject::MUST_READ
)
);
fvMesh& mesh = meshPtr();
// Print some statistics

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -216,7 +216,6 @@ bool intersectSurfaces
int main(int argc, char *argv[])
{
argList::validArgs.clear();
argList::noParallel();
argList::validArgs.append("action");
argList::validArgs.append("surface file");