merging master into local branch

This commit is contained in:
andy
2009-06-11 17:29:19 +01:00
705 changed files with 29639 additions and 6777 deletions

View File

@ -816,6 +816,8 @@ int main(int argc, char *argv[])
// Pre-filtering: flip "owner" boundary or wrong oriented internal
// faces and move to neighbour
boolList fm(faces.size(), false);
forAll (faces, facei)
{
if
@ -824,6 +826,7 @@ int main(int argc, char *argv[])
|| (neighbour[facei] != -1 && owner[facei] > neighbour[facei])
)
{
fm[facei] = true;
faces[facei] = faces[facei].reverseFace();
Swap(owner[facei], neighbour[facei]);
}
@ -1175,7 +1178,7 @@ int main(int argc, char *argv[])
false, // flipFaceFlux
-1, // patchID
faceZonei, // zoneID
false // zoneFlip
fm[facei] // zoneFlip
);
}

View File

@ -96,7 +96,7 @@ void writeMesh
const fvMesh& mesh = meshRefiner.mesh();
meshRefiner.printMeshInfo(debug, msg);
Info<< "Writing mesh to time " << mesh.time().timeName() << endl;
Info<< "Writing mesh to time " << meshRefiner.timeName() << endl;
meshRefiner.write(meshRefinement::MESH|meshRefinement::SCALARLEVELS, "");
if (debug & meshRefinement::OBJINTERSECTIONS)
@ -104,7 +104,7 @@ void writeMesh
meshRefiner.write
(
meshRefinement::OBJINTERSECTIONS,
mesh.time().path()/mesh.time().timeName()
mesh.time().path()/meshRefiner.timeName()
);
}
Info<< "Written mesh in = "
@ -115,6 +115,7 @@ void writeMesh
int main(int argc, char *argv[])
{
argList::validOptions.insert("overwrite", "");
# include "setRootCase.H"
# include "createTime.H"
runTime.functionObjects().off();
@ -123,6 +124,9 @@ int main(int argc, char *argv[])
Info<< "Read mesh in = "
<< runTime.cpuTimeIncrement() << " s" << endl;
const bool overwrite = args.optionFound("overwrite");
// Check patches and faceZones are synchronised
mesh.boundaryMesh().checkParallelSync(true);
meshRefinement::checkCoupledFaceZones(mesh);
@ -170,6 +174,13 @@ int main(int argc, char *argv[])
const dictionary& layerDict = meshDict.subDict("addLayersControls");
const scalar mergeDist = getMergeDistance
(
mesh,
readScalar(meshDict.lookup("mergeTolerance"))
);
// Debug
// ~~~~~
@ -192,8 +203,9 @@ int main(int argc, char *argv[])
IOobject
(
"abc", // dummy name
mesh.time().constant(), // directory
"triSurface", // instance
//mesh.time().constant(), // instance
mesh.time().findInstance("triSurface", word::null),// instance
"triSurface", // local
mesh.time(), // registry
IOobject::MUST_READ,
IOobject::NO_WRITE
@ -235,6 +247,34 @@ int main(int argc, char *argv[])
<< mesh.time().cpuTimeIncrement() << " s" << nl << endl;
// Refinement engine
// ~~~~~~~~~~~~~~~~~
Info<< nl
<< "Determining initial surface intersections" << nl
<< "-----------------------------------------" << nl
<< endl;
// Main refinement engine
meshRefinement meshRefiner
(
mesh,
mergeDist, // tolerance used in sorting coordinates
overwrite, // overwrite mesh files?
surfaces, // for surface intersection refinement
shells // for volume (inside/outside) refinement
);
Info<< "Calculated surface intersections in = "
<< mesh.time().cpuTimeIncrement() << " s" << nl << endl;
// Some stats
meshRefiner.printMeshInfo(debug, "Initial mesh");
meshRefiner.write
(
debug&meshRefinement::OBJINTERSECTIONS,
mesh.time().path()/meshRefiner.timeName()
);
// Add all the surface regions as patches
@ -265,9 +305,8 @@ int main(int argc, char *argv[])
forAll(regNames, i)
{
label patchI = meshRefinement::addPatch
label patchI = meshRefiner.addMeshedPatch
(
mesh,
regNames[i],
wallPolyPatch::typeName
);
@ -308,45 +347,10 @@ int main(int argc, char *argv[])
<< exit(FatalError);
}
const scalar mergeDist = getMergeDistance
(
mesh,
readScalar(meshDict.lookup("mergeTolerance"))
);
// Mesh distribution engine (uses tolerance to reconstruct meshes)
fvMeshDistribute distributor(mesh, mergeDist);
// Refinement engine
// ~~~~~~~~~~~~~~~~~
Info<< nl
<< "Determining initial surface intersections" << nl
<< "-----------------------------------------" << nl
<< endl;
// Main refinement engine
meshRefinement meshRefiner
(
mesh,
mergeDist, // tolerance used in sorting coordinates
surfaces, // for surface intersection refinement
shells // for volume (inside/outside) refinement
);
Info<< "Calculated surface intersections in = "
<< mesh.time().cpuTimeIncrement() << " s" << nl << endl;
// Some stats
meshRefiner.printMeshInfo(debug, "Initial mesh");
meshRefiner.write
(
debug&meshRefinement::OBJINTERSECTIONS,
mesh.time().path()/mesh.time().timeName()
);
@ -370,6 +374,11 @@ int main(int argc, char *argv[])
// Refinement parameters
refinementParameters refineParams(refineDict);
if (!overwrite)
{
const_cast<Time&>(mesh.time())++;
}
refineDriver.doRefine(refineDict, refineParams, wantSnap, motionDict);
writeMesh
@ -391,6 +400,11 @@ int main(int argc, char *argv[])
// Snap parameters
snapParameters snapParams(snapDict);
if (!overwrite)
{
const_cast<Time&>(mesh.time())++;
}
snapDriver.doSnap(snapDict, motionDict, snapParams);
writeMesh
@ -403,15 +417,16 @@ int main(int argc, char *argv[])
if (wantLayers)
{
autoLayerDriver layerDriver
(
meshRefiner,
globalToPatch
);
autoLayerDriver layerDriver(meshRefiner);
// Layer addition parameters
layerParameters layerParams(layerDict, mesh.boundaryMesh());
if (!overwrite)
{
const_cast<Time&>(mesh.time())++;
}
layerDriver.doLayers
(
layerDict,
@ -435,7 +450,7 @@ int main(int argc, char *argv[])
Info<< "End\n" << endl;
return 0;
return(0);
}

View File

@ -56,13 +56,12 @@ processorVolPatchFieldDecomposer
const unallocLabelList& addressingSlice
)
:
addressing_(addressingSlice.size()),
weights_(addressingSlice.size())
directAddressing_(addressingSlice.size())
{
const labelList& own = mesh.faceOwner();
const labelList& neighb = mesh.faceNeighbour();
forAll (addressing_, i)
forAll (directAddressing_, i)
{
// Subtract one to align addressing.
label ai = mag(addressingSlice[i]) - 1;
@ -74,18 +73,14 @@ processorVolPatchFieldDecomposer
// on the parallel boundary.
// Give face the value of the neighbour.
addressing_[i].setSize(1);
weights_[i].setSize(1);
weights_[i][0] = 1.0;
if (addressingSlice[i] >= 0)
{
// I have the owner so use the neighbour value
addressing_[i][0] = neighb[ai];
directAddressing_[i] = neighb[ai];
}
else
{
addressing_[i][0] = own[ai];
directAddressing_[i] = own[ai];
}
}
else
@ -96,12 +91,7 @@ processorVolPatchFieldDecomposer
// up the different (face) list of data), so I will
// just grab the value from the owner cell
addressing_[i].setSize(1);
weights_[i].setSize(1);
addressing_[i][0] = own[ai];
weights_[i][0] = 1.0;
directAddressing_[i] = own[ai];
}
}
}

View File

@ -96,15 +96,16 @@ public:
};
//- Processor patch field decomposer class
//- Processor patch field decomposer class. Maps either owner or
// neighbour data (no interpolate anymore - processorFvPatchField
// holds neighbour data)
class processorVolPatchFieldDecomposer
:
public fvPatchFieldMapper
{
// Private data
labelListList addressing_;
scalarListList weights_;
labelList directAddressing_;
public:
@ -120,27 +121,23 @@ public:
label size() const
{
return addressing_.size();
return directAddressing_.size();
}
bool direct() const
{
return false;
return true;
}
const labelListList& addressing() const
const unallocLabelList& directAddressing() const
{
return addressing_;
}
const scalarListList& weights() const
{
return weights_;
return directAddressing_;
}
};
//- Processor patch field decomposer class
//- Processor patch field decomposer class. Surface field is assumed
// to have direction (so manipulates sign when mapping)
class processorSurfacePatchFieldDecomposer
:
public fvPatchFieldMapper

View File

@ -178,7 +178,6 @@ void writeAllDataBinary
}
template<class Type>
void writeAllFaceData
(
@ -275,7 +274,7 @@ template<class Type>
bool writePatchField
(
const Foam::Field<Type>& pf,
const Foam::label patchI,
const Foam::label patchi,
const Foam::label ensightPatchI,
const Foam::faceSets& boundaryFaceSet,
const Foam::ensightMesh::nFacePrimitives& nfp,
@ -335,7 +334,7 @@ template<class Type>
bool writePatchFieldBinary
(
const Foam::Field<Type>& pf,
const Foam::label patchI,
const Foam::label patchi,
const Foam::label ensightPatchI,
const Foam::faceSets& boundaryFaceSet,
const Foam::ensightMesh::nFacePrimitives& nfp,
@ -406,34 +405,27 @@ void writePatchField
const Time& runTime = eMesh.mesh().time();
const List<faceSets>& boundaryFaceSets = eMesh.boundaryFaceSets();
const HashTable<labelList>& allPatchNames = eMesh.allPatchNames();
const HashTable<label>& patchIndices = eMesh.patchIndices();
const wordList& allPatchNames = eMesh.allPatchNames();
const List<labelList>& allPatchProcs = eMesh.allPatchProcs();
const HashTable<ensightMesh::nFacePrimitives>&
nPatchPrims = eMesh.nPatchPrims();
label patchI = -1;
if (patchIndices.found(patchName))
{
patchI = patchIndices.find(patchName)();
}
label ensightPatchI = eMesh.patchPartOffset();
for
(
HashTable<labelList>::const_iterator iter =
allPatchNames.begin();
iter != allPatchNames.end();
++iter
)
label patchi = -1;
forAll(allPatchNames, i)
{
if (iter.key() == patchName) break;
if (allPatchNames[i] == patchName)
{
patchi = i;
break;
}
ensightPatchI++;
}
const labelList& patchProcessors = allPatchNames.find(patchName)();
const labelList& patchProcessors = allPatchProcs[patchi];
word pfName = patchName + '.' + fieldName;
@ -472,14 +464,14 @@ void writePatchField
ensightFile << pTraits<Type>::typeName << nl;
}
if (patchI >= 0)
if (patchi >= 0)
{
writePatchField
(
pf,
patchI,
patchi,
ensightPatchI,
boundaryFaceSets[patchI],
boundaryFaceSets[patchi],
nPatchPrims.find(patchName)(),
patchProcessors,
ensightFile
@ -507,6 +499,7 @@ void writePatchField
}
}
template<class Type>
void ensightFieldAscii
(
@ -527,8 +520,8 @@ void ensightFieldAscii
const cellSets& meshCellSets = eMesh.meshCellSets();
const List<faceSets>& boundaryFaceSets = eMesh.boundaryFaceSets();
const HashTable<labelList>& allPatchNames = eMesh.allPatchNames();
const HashTable<label>& patchIndices = eMesh.patchIndices();
const wordList& allPatchNames = eMesh.allPatchNames();
const List<labelList>& allPatchProcs = eMesh.allPatchProcs();
const wordHashSet& patchNames = eMesh.patchNames();
const HashTable<ensightMesh::nFacePrimitives>&
nPatchPrims = eMesh.nPatchPrims();
@ -623,30 +616,23 @@ void ensightFieldAscii
label ensightPatchI = eMesh.patchPartOffset();
for
(
HashTable<labelList>::const_iterator iter = allPatchNames.begin();
iter != allPatchNames.end();
++iter
)
forAll(allPatchNames, patchi)
{
const word& patchName = iter.key();
const labelList& patchProcessors = iter();
const word& patchName = allPatchNames[patchi];
const labelList& patchProcessors = allPatchProcs[patchi];
if (patchNames.empty() || patchNames.found(patchName))
{
if (patchIndices.found(patchName))
if (mesh.boundary()[patchi].size())
{
label patchI = patchIndices.find(patchName)();
if
(
writePatchField
(
vf.boundaryField()[patchI],
patchI,
vf.boundaryField()[patchi],
patchi,
ensightPatchI,
boundaryFaceSets[patchI],
boundaryFaceSets[patchi],
nPatchPrims.find(patchName)(),
patchProcessors,
ensightFile
@ -708,8 +694,8 @@ void ensightFieldBinary
const cellSets& meshCellSets = eMesh.meshCellSets();
const List<faceSets>& boundaryFaceSets = eMesh.boundaryFaceSets();
const HashTable<labelList>& allPatchNames = eMesh.allPatchNames();
const HashTable<label>& patchIndices = eMesh.patchIndices();
const wordList& allPatchNames = eMesh.allPatchNames();
const List<labelList>& allPatchProcs = eMesh.allPatchProcs();
const wordHashSet& patchNames = eMesh.patchNames();
const HashTable<ensightMesh::nFacePrimitives>&
nPatchPrims = eMesh.nPatchPrims();
@ -726,7 +712,11 @@ void ensightFieldBinary
{
// set the filename of the ensight file
fileName ensightFileName(timeFile + "." + fieldObject.name());
ensightFilePtr = new std::ofstream((postProcPath/ensightFileName).c_str(), ios_base::out | ios_base::binary | ios_base::trunc);
ensightFilePtr = new std::ofstream
(
(postProcPath/ensightFileName).c_str(),
ios_base::out | ios_base::binary | ios_base::trunc
);
// Check on file opened?
}
@ -787,38 +777,62 @@ void ensightFieldBinary
}
}
writeAllDataBinary("penta6", vf, prisms, meshCellSets.nPrisms, ensightFile);
writeAllDataBinary("pyramid5", vf, pyrs, meshCellSets.nPyrs, ensightFile);
writeAllDataBinary("tetra4", vf, tets, meshCellSets.nTets, ensightFile);
writeAllDataBinary("nfaced", vf, polys, meshCellSets.nPolys, ensightFile);
writeAllDataBinary
(
"penta6",
vf,
prisms,
meshCellSets.nPrisms,
ensightFile
);
writeAllDataBinary
(
"pyramid5",
vf,
pyrs,
meshCellSets.nPyrs,
ensightFile
);
writeAllDataBinary
(
"tetra4",
vf,
tets,
meshCellSets.nTets,
ensightFile
);
writeAllDataBinary
(
"nfaced",
vf,
polys,
meshCellSets.nPolys,
ensightFile
);
}
label ensightPatchI = eMesh.patchPartOffset();
for
(
HashTable<labelList>::const_iterator iter = allPatchNames.begin();
iter != allPatchNames.end();
++iter
)
forAll(allPatchNames, patchi)
{
const word& patchName = iter.key();
const labelList& patchProcessors = iter();
const word& patchName = allPatchNames[patchi];
const labelList& patchProcessors = allPatchProcs[patchi];
if (patchNames.empty() || patchNames.found(patchName))
{
if (patchIndices.found(patchName))
if (mesh.boundary()[patchi].size())
{
label patchI = patchIndices.find(patchName)();
if
(
writePatchFieldBinary
(
vf.boundaryField()[patchI],
patchI,
vf.boundaryField()[patchi],
patchi,
ensightPatchI,
boundaryFaceSets[patchI],
boundaryFaceSets[patchi],
nPatchPrims.find(patchName)(),
patchProcessors,
ensightFile
@ -859,6 +873,7 @@ void ensightFieldBinary
}
}
template<class Type>
void ensightField
(

View File

@ -28,6 +28,7 @@ License
#include "Time.H"
#include "ensightMesh.H"
#include "fvMesh.H"
#include "globalMeshData.H"
#include "PstreamCombineReduceOps.H"
#include "processorPolyPatch.H"
#include "cellModeller.H"
@ -40,43 +41,37 @@ License
namespace Foam
{
class concatPatchNames
{
public:
void operator()
(
HashTable<labelList>& x,
const HashTable<labelList>& y
) const
//- Proxy-class to hold the patch processor list combination operator
class concatPatchProcs
{
forAllConstIter(HashTable<labelList>, y, iter)
public:
void operator()
(
List<labelList>& x,
const List<labelList>& y
) const
{
HashTable<labelList>::iterator xiter = x.find(iter.key());
if (xiter == x.end())
forAll(y, i)
{
x.insert(iter.key(), iter());
}
else
{
labelList& xPatches = xiter();
const labelList& yPatches = iter();
const labelList& yPatches = y[i];
label offset = xPatches.size();
xPatches.setSize(offset + yPatches.size());
forAll(yPatches, i)
if (yPatches.size())
{
xPatches[i + offset] = yPatches[i];
labelList& xPatches = x[i];
label offset = xPatches.size();
xPatches.setSize(offset + yPatches.size());
forAll(yPatches, i)
{
xPatches[i + offset] = yPatches[i];
}
}
}
}
}
};
};
} // End namespace Foam
@ -95,7 +90,7 @@ Foam::ensightMesh::ensightMesh
meshCellSets_(mesh_.nCells()),
boundaryFaceSets_(mesh_.boundary().size()),
allPatchNames_(0),
patchIndices_(0),
allPatchProcs_(0),
patchNames_(0),
nPatchPrims_(0)
{
@ -109,32 +104,24 @@ Foam::ensightMesh::ensightMesh
if (!args.optionFound("noPatches"))
{
forAll (mesh_.boundaryMesh(), patchI)
{
if
(
typeid(mesh_.boundaryMesh()[patchI])
!= typeid(processorPolyPatch)
)
{
if (!allPatchNames_.found(mesh_.boundaryMesh()[patchI].name()))
{
allPatchNames_.insert
(
mesh_.boundaryMesh()[patchI].name(),
labelList(1, Pstream::myProcNo())
);
allPatchNames_ = wordList::subList
(
mesh_.boundaryMesh().names(), mesh_.boundary().size()
- mesh_.globalData().processorPatches().size()
);
patchIndices_.insert
(
mesh_.boundaryMesh()[patchI].name(),
patchI
);
}
allPatchProcs_.setSize(allPatchNames_.size());
forAll (allPatchProcs_, patchi)
{
if (mesh_.boundary()[patchi].size())
{
allPatchProcs_[patchi].setSize(1);
allPatchProcs_[patchi][0] = Pstream::myProcNo();
}
}
combineReduce(allPatchNames_, concatPatchNames());
combineReduce(allPatchProcs_, concatPatchProcs());
if (args.optionFound("patches"))
{
@ -142,7 +129,7 @@ Foam::ensightMesh::ensightMesh
if (patchNameList.empty())
{
patchNameList = allPatchNames_.toc();
patchNameList = allPatchNames_;
}
forAll (patchNameList, i)
@ -230,15 +217,15 @@ Foam::ensightMesh::ensightMesh
if (!args.optionFound("noPatches"))
{
forAll (mesh.boundary(), patchI)
forAll (mesh.boundary(), patchi)
{
if (mesh.boundary()[patchI].size())
if (mesh.boundary()[patchi].size())
{
const polyPatch& p = mesh.boundaryMesh()[patchI];
const polyPatch& p = mesh.boundaryMesh()[patchi];
labelList& tris = boundaryFaceSets_[patchI].tris;
labelList& quads = boundaryFaceSets_[patchI].quads;
labelList& polys = boundaryFaceSets_[patchI].polys;
labelList& tris = boundaryFaceSets_[patchi].tris;
labelList& quads = boundaryFaceSets_[patchi].quads;
labelList& polys = boundaryFaceSets_[patchi].polys;
tris.setSize(p.size());
quads.setSize(p.size());
@ -274,21 +261,19 @@ Foam::ensightMesh::ensightMesh
}
forAllConstIter(HashTable<labelList>, allPatchNames_, iter)
forAll(allPatchNames_, patchi)
{
const word& patchName = iter.key();
const word& patchName = allPatchNames_[patchi];
nFacePrimitives nfp;
if (patchNames_.empty() || patchNames_.found(patchName))
{
if (patchIndices_.found(patchName))
if (mesh.boundary()[patchi].size())
{
label patchI = patchIndices_.find(patchName)();
nfp.nPoints = mesh.boundaryMesh()[patchI].localPoints().size();
nfp.nTris = boundaryFaceSets_[patchI].tris.size();
nfp.nQuads = boundaryFaceSets_[patchI].quads.size();
nfp.nPolys = boundaryFaceSets_[patchI].polys.size();
nfp.nPoints = mesh.boundaryMesh()[patchi].localPoints().size();
nfp.nTris = boundaryFaceSets_[patchi].tris.size();
nfp.nQuads = boundaryFaceSets_[patchi].quads.size();
nfp.nPolys = boundaryFaceSets_[patchi].polys.size();
}
}
@ -1052,13 +1037,13 @@ void Foam::ensightMesh::writeAscii
label ensightPatchI = patchPartOffset_;
forAllConstIter(HashTable<labelList>, allPatchNames_, iter)
forAll(allPatchNames_, patchi)
{
const labelList& patchProcessors = iter();
const word& patchName = allPatchNames_[patchi];
const labelList& patchProcessors = allPatchProcs_[patchi];
if (patchNames_.empty() || patchNames_.found(iter.key()))
if (patchNames_.empty() || patchNames_.found(patchName))
{
const word& patchName = iter.key();
const nFacePrimitives& nfp = nPatchPrims_.find(patchName)();
const labelList *trisPtr = NULL;
@ -1068,14 +1053,13 @@ void Foam::ensightMesh::writeAscii
const pointField *patchPointsPtr = NULL;
const faceList *patchFacesPtr = NULL;
if (patchIndices_.found(iter.key()))
if (mesh_.boundary()[patchi].size())
{
label patchI = patchIndices_.find(iter.key())();
const polyPatch& p = mesh_.boundaryMesh()[patchI];
const polyPatch& p = mesh_.boundaryMesh()[patchi];
trisPtr = &boundaryFaceSets_[patchI].tris;
quadsPtr = &boundaryFaceSets_[patchI].quads;
polysPtr = &boundaryFaceSets_[patchI].polys;
trisPtr = &boundaryFaceSets_[patchi].tris;
quadsPtr = &boundaryFaceSets_[patchi].quads;
polysPtr = &boundaryFaceSets_[patchi].polys;
patchPointsPtr = &(p.localPoints());
patchFacesPtr = &(p.localFaces());
@ -1265,7 +1249,7 @@ void Foam::ensightMesh::writeBinary
{
writeEnsDataBinary("part",ensightGeometryFile);
writeEnsDataBinary(1,ensightGeometryFile);
writeEnsDataBinary("FOAM cells",ensightGeometryFile);
writeEnsDataBinary("internalMesh",ensightGeometryFile);
writeEnsDataBinary("coordinates",ensightGeometryFile);
writeEnsDataBinary(nPoints,ensightGeometryFile);
@ -1379,14 +1363,14 @@ void Foam::ensightMesh::writeBinary
label ensightPatchI = patchPartOffset_;
label iCount = 0;
forAllConstIter(HashTable<labelList>, allPatchNames_, iter)
forAll(allPatchNames_, patchi)
{
iCount ++;
const labelList& patchProcessors = iter();
const word& patchName = allPatchNames_[patchi];
const labelList& patchProcessors = allPatchProcs_[patchi];
if (patchNames_.empty() || patchNames_.found(iter.key()))
if (patchNames_.empty() || patchNames_.found(patchName))
{
const word& patchName = iter.key();
const nFacePrimitives& nfp = nPatchPrims_.find(patchName)();
const labelList *trisPtr = NULL;
@ -1396,14 +1380,13 @@ void Foam::ensightMesh::writeBinary
const pointField *patchPointsPtr = NULL;
const faceList *patchFacesPtr = NULL;
if (patchIndices_.found(iter.key()))
if (mesh_.boundary()[patchi].size())
{
label patchI = patchIndices_.find(iter.key())();
const polyPatch& p = mesh_.boundaryMesh()[patchI];
const polyPatch& p = mesh_.boundaryMesh()[patchi];
trisPtr = &boundaryFaceSets_[patchI].tris;
quadsPtr = &boundaryFaceSets_[patchI].quads;
polysPtr = &boundaryFaceSets_[patchI].polys;
trisPtr = &boundaryFaceSets_[patchi].tris;
quadsPtr = &boundaryFaceSets_[patchi].quads;
polysPtr = &boundaryFaceSets_[patchi].polys;
patchPointsPtr = &(p.localPoints());
patchFacesPtr = &(p.localFaces());
@ -1424,7 +1407,7 @@ void Foam::ensightMesh::writeBinary
writeEnsDataBinary("part",ensightGeometryFile);
writeEnsDataBinary(ensightPatchI++,ensightGeometryFile);
//writeEnsDataBinary(patchName.c_str(),ensightGeometryFile);
writeEnsDataBinary(iter.key().c_str(),ensightGeometryFile);
writeEnsDataBinary(patchName.c_str(),ensightGeometryFile);
writeEnsDataBinary("coordinates",ensightGeometryFile);
writeEnsDataBinary(nfp.nPoints,ensightGeometryFile);

View File

@ -91,9 +91,9 @@ class ensightMesh
List<faceSets> boundaryFaceSets_;
HashTable<labelList> allPatchNames_;
wordList allPatchNames_;
HashTable<label> patchIndices_;
List<labelList> allPatchProcs_;
wordHashSet patchNames_;
@ -269,14 +269,14 @@ public:
return boundaryFaceSets_;
}
const HashTable<labelList>& allPatchNames() const
const wordList& allPatchNames() const
{
return allPatchNames_;
}
const HashTable<label>& patchIndices() const
const List<labelList>& allPatchProcs() const
{
return patchIndices_;
return allPatchProcs_;
}
const wordHashSet& patchNames() const

View File

@ -0,0 +1,3 @@
particleTracks.C
EXE = $(FOAM_APPBIN)/particleTracks

View File

@ -0,0 +1,7 @@
EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude
EXE_LIBS = \
-lfiniteVolume \
-llagrangian

View File

@ -0,0 +1,22 @@
IOdictionary propsDict
(
IOobject
(
"particleTrackProperties",
runTime.constant(),
mesh,
IOobject::MUST_READ
)
);
word cloudName(propsDict.lookup("cloudName"));
label sampleFrequency(readLabel(propsDict.lookup("sampleFrequency")));
// outputMode: compositeFile, filePerTrack
//word outputmode(propsDict.lookup("outputMode"))
label maxPositions(readLabel(propsDict.lookup("maxPositions")));
// outputFormat: raw, vtk
//word outputFormat(propsDict.lookup("outputFormat"));

View File

@ -0,0 +1,270 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2008-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Application
particleTracks
Description
Generates a VTK file of particle tracks for cases that were computed using
a tracked-parcel-type cloud
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "Cloud.H"
#include "IOdictionary.H"
#include "fvMesh.H"
#include "Time.H"
#include "timeSelector.H"
#include "OFstream.H"
#include "passiveParticle.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
int main(int argc, char *argv[])
{
# include "setRootCase.H"
# include "createTime.H"
instantList timeDirs = timeSelector::select0(runTime, args);
# include "createMesh.H"
# include "createFields.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "Scanning times to determine track data" << nl << endl;
labelList maxIds(Pstream::nProcs(), -1);
forAll(timeDirs, timeI)
{
runTime.setTime(timeDirs[timeI], timeI);
Info<< "Time = " << runTime.timeName() << endl;
IOobject origProcHeader
(
"origProc",
runTime.timeName(),
cloud::prefix/cloudName,
mesh,
IOobject::MUST_READ
);
IOobject idHeader
(
"id",
runTime.timeName(),
cloud::prefix/cloudName,
mesh,
IOobject::MUST_READ
);
if (idHeader.headerOk() && origProcHeader.headerOk())
{
IOField<label> origProc(origProcHeader);
IOField<label> id(idHeader);
forAll(id, i)
{
maxIds[origProc[i]] = max(maxIds[origProc[i]], id[i]);
}
}
}
Pstream::listCombineGather(maxIds, maxOp<label>());
Pstream::listCombineScatter(maxIds);
labelList numIds = maxIds + 1;
// calc starting ids for particles on each processor
List<label> startIds(numIds.size(), 0);
for (label i = 0; i < numIds.size()-1; i++)
{
startIds[i+1] += startIds[i] + numIds[i];
}
label nParticle = startIds[startIds.size()-1] + numIds[startIds.size()-1];
// number of tracks to generate
label nTracks = nParticle/sampleFrequency;
// storage for all particle tracks
List<DynamicList<vector> > allTracks(nTracks);
Info<< "\nGenerating " << nTracks << " particle tracks" << nl << endl;
forAll(timeDirs, timeI)
{
runTime.setTime(timeDirs[timeI], timeI);
Info<< "Time = " << runTime.timeName() << endl;
IOobject positionsHeader
(
"positions",
runTime.timeName(),
cloud::prefix/cloudName,
mesh,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
);
IOobject origProcHeader
(
"origProc",
runTime.timeName(),
cloud::prefix/cloudName,
mesh,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
);
IOobject idHeader
(
"id",
runTime.timeName(),
cloud::prefix/cloudName,
mesh,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
);
if
(
positionsHeader.headerOk()
&& origProcHeader.headerOk()
&& idHeader.headerOk()
)
{
Info<< " Reading particle positions" << endl;
Cloud<passiveParticle> myCloud(mesh, cloudName, false);
Info<< " Reading particle id" << endl;
IOField<label> id(idHeader);
Info<< " Reading particle origProc" << endl;
IOField<label> origProc(origProcHeader);
// collect the track data on the master processor
label i = 0;
List<pointField> allPositions(Pstream::nProcs());
allPositions[Pstream::myProcNo()].setSize(myCloud.size());
forAllConstIter(Cloud<passiveParticle>, myCloud, iter)
{
allPositions[Pstream::myProcNo()][i++] = iter().position();
}
Pstream::gatherList(allPositions);
List<labelList> allIds(Pstream::nProcs());
allIds[Pstream::myProcNo()] = id;
Pstream::gatherList(allIds);
List<labelList> allOrigProcs(Pstream::nProcs());
allOrigProcs[Pstream::myProcNo()] = origProc;
Pstream::gatherList(allOrigProcs);
Info<< " Constructing tracks" << nl << endl;
if (Pstream::master())
{
forAll(allPositions, procI)
{
forAll(allPositions[procI], i)
{
label globalId =
startIds[allOrigProcs[procI][i]]
+ allIds[procI][i];
if (globalId % sampleFrequency == 0)
{
label trackId = globalId/sampleFrequency;
if (allTracks[trackId].size() < maxPositions)
{
allTracks[trackId].append
(
allPositions[procI][i]
);
}
}
}
}
}
}
else
{
Info<< " No particles read" << nl << endl;
}
}
if (Pstream::master())
{
Info<< "\nWriting particle tracks" << nl << endl;
OFstream vtkTracks("particleTracks.vtk");
// Total number of points in tracks + 1 per track
label nPoints = 0;
forAll(allTracks, trackI)
{
nPoints += allTracks[trackI].size();
}
vtkTracks
<< "# vtk DataFile Version 2.0" << nl
<< "particleTracks" << nl
<< "ASCII" << nl
<< "DATASET POLYDATA" << nl
<< "POINTS " << nPoints << " float" << nl;
// Write track points to file
forAll(allTracks, trackI)
{
forAll(allTracks[trackI], i)
{
const vector& pt = allTracks[trackI][i];
vtkTracks << pt.x() << ' ' << pt.y() << ' ' << pt.z() << nl;
}
}
// write track (line) connectivity to file
vtkTracks << "LINES " << nTracks << ' ' << nPoints+nTracks << nl;
// Write ids of track points to file
label globalPtI = 0;
forAll(allTracks, trackI)
{
vtkTracks << allTracks[trackI].size();
forAll(allTracks[trackI], i)
{
vtkTracks << ' ' << globalPtI;
globalPtI++;
}
vtkTracks << nl;
}
Info<< "end" << endl;
}
return 0;
}
// ************************************************************************* //

View File

@ -0,0 +1,3 @@
surfaceRedistributePar.C
EXE = $(FOAM_APPBIN)/surfaceRedistributePar

View File

@ -0,0 +1,7 @@
EXE_INC = \
-I$(LIB_SRC)/triSurface/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude
EXE_LIBS = \
-lmeshTools \
-ltriSurface

View File

@ -0,0 +1,295 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2007 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Application
surfaceRedistributePar
Description
(Re)distribution of triSurface. Either takes an undecomposed surface
or an already decomposed surface and redistribute it so each processor
has all triangles that overlap its mesh.
Note
- best decomposition option is hierarchGeomDecomp since
guarantees square decompositions.
- triangles might be present on multiple processors.
- merging uses geometric tolerance so take care with writing precision.
\*---------------------------------------------------------------------------*/
#include "treeBoundBox.H"
#include "FixedList.H"
#include "argList.H"
#include "Time.H"
#include "polyMesh.H"
#include "distributedTriSurfaceMesh.H"
#include "mapDistribute.H"
#include "triSurfaceFields.H"
#include "Pair.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Print on master all the per-processor surface stats.
void writeProcStats
(
const triSurface& s,
const List<List<treeBoundBox> >& meshBb
)
{
// Determine surface bounding boxes, faces, points
List<treeBoundBox> surfBb(Pstream::nProcs());
{
surfBb[Pstream::myProcNo()] = boundBox(s.points(), false);
Pstream::gatherList(surfBb);
Pstream::scatterList(surfBb);
}
labelList nPoints(Pstream::nProcs());
nPoints[Pstream::myProcNo()] = s.points().size();
Pstream::gatherList(nPoints);
Pstream::scatterList(nPoints);
labelList nFaces(Pstream::nProcs());
nFaces[Pstream::myProcNo()] = s.size();
Pstream::gatherList(nFaces);
Pstream::scatterList(nFaces);
forAll(surfBb, procI)
{
const List<treeBoundBox>& bbs = meshBb[procI];
Info<< "processor" << procI << endl
<< "\tMesh bounds : " << bbs[0] << nl;
for (label i = 1; i < bbs.size(); i++)
{
Info<< "\t " << bbs[i]<< nl;
}
Info<< "\tSurface bounding box : " << surfBb[procI] << nl
<< "\tTriangles : " << nFaces[procI] << nl
<< "\tVertices : " << nPoints[procI]
<< endl;
}
Info<< endl;
}
// Main program:
int main(int argc, char *argv[])
{
argList::validArgs.append("triSurfaceMesh");
argList::validArgs.append("distributionType");
argList::validOptions.insert("keepNonMapped", "");
# include "setRootCase.H"
# include "createTime.H"
runTime.functionObjects().off();
fileName surfFileName(args.additionalArgs()[0]);
Info<< "Reading surface from " << surfFileName << nl << endl;
const word distType(args.additionalArgs()[1]);
Info<< "Using distribution method "
<< distributedTriSurfaceMesh::distributionTypeNames_[distType]
<< " " << distType << nl << endl;
bool keepNonMapped = args.options().found("keepNonMapped");
if (keepNonMapped)
{
Info<< "Preserving surface outside of mesh bounds." << nl << endl;
}
else
{
Info<< "Removing surface outside of mesh bounds." << nl << endl;
}
if (!Pstream::parRun())
{
FatalErrorIn(args.executable())
<< "Please run this program on the decomposed case."
<< " It will read surface " << surfFileName
<< " and decompose it such that it overlaps the mesh bounding box."
<< exit(FatalError);
}
# include "createPolyMesh.H"
Random rndGen(653213);
// Determine mesh bounding boxes:
List<List<treeBoundBox> > meshBb(Pstream::nProcs());
{
meshBb[Pstream::myProcNo()] = List<treeBoundBox>
(
1,
treeBoundBox
(
boundBox(mesh.points(), false)
).extend(rndGen, 1E-3)
);
Pstream::gatherList(meshBb);
Pstream::scatterList(meshBb);
}
IOobject io
(
surfFileName, // name
//runTime.findInstance("triSurface", surfFileName), // instance
runTime.constant(), // instance
"triSurface", // local
runTime, // registry
IOobject::MUST_READ,
IOobject::NO_WRITE
);
const fileName actualPath(io.filePath());
fileName localPath(actualPath);
localPath.replace(runTime.rootPath() + '/', "");
if (actualPath == io.objectPath())
{
Info<< "Loading local (decomposed) surface " << localPath << nl <<endl;
}
else
{
Info<< "Loading undecomposed surface " << localPath << nl << endl;
}
// Create dummy dictionary for bounding boxes if does not exist.
if (!isFile(actualPath / "Dict"))
{
dictionary dict;
dict.add("bounds", meshBb[Pstream::myProcNo()]);
dict.add("distributionType", distType);
dict.add("mergeDistance", SMALL);
IOdictionary ioDict
(
IOobject
(
io.name() + "Dict",
io.instance(),
io.local(),
io.db(),
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
dict
);
Info<< "Writing dummy bounds dictionary to " << ioDict.name()
<< nl << endl;
ioDict.regIOobject::writeObject
(
IOstream::ASCII,
IOstream::currentVersion,
ioDict.time().writeCompression()
);
}
// Load surface
distributedTriSurfaceMesh surfMesh(io);
Info<< "Loaded surface" << nl << endl;
// Generate a test field
{
const triSurface& s = static_cast<const triSurface&>(surfMesh);
autoPtr<triSurfaceVectorField> fcPtr
(
new triSurfaceVectorField
(
IOobject
(
surfMesh.searchableSurface::name(), // name
surfMesh.searchableSurface::instance(), // instance
surfMesh.searchableSurface::local(), // local
surfMesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
surfMesh,
dimLength
)
);
triSurfaceVectorField& fc = fcPtr();
forAll(fc, triI)
{
fc[triI] = s[triI].centre(s.points());
}
// Steal pointer and store object on surfMesh
fcPtr.ptr()->store();
}
// Write per-processor stats
Info<< "Before redistribution:" << endl;
writeProcStats(surfMesh, meshBb);
// Do redistribution
Info<< "Redistributing surface" << nl << endl;
autoPtr<mapDistribute> faceMap;
autoPtr<mapDistribute> pointMap;
surfMesh.distribute
(
meshBb[Pstream::myProcNo()],
keepNonMapped,
faceMap,
pointMap
);
faceMap.clear();
pointMap.clear();
Info<< endl;
// Write per-processor stats
Info<< "After redistribution:" << endl;
writeProcStats(surfMesh, meshBb);
Info<< "Writing surface." << nl << endl;
surfMesh.searchableSurface::write();
Info<< "End\n" << endl;
return 0;
}
// ************************************************************************* //