Merge remote branch 'OpenCFD/master' into olesenm

This commit is contained in:
Mark Olesen
2011-01-03 14:01:16 +01:00
14 changed files with 376 additions and 139 deletions

View File

@ -53,6 +53,7 @@ Description
#include "fvMeshDistribute.H"
#include "mapDistributePolyMesh.H"
#include "IOobjectList.H"
#include "globalIndex.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -107,7 +108,9 @@ autoPtr<fvMesh> createMesh
fvMesh& mesh = meshPtr();
// Determine patches.
// Sync patches
// ~~~~~~~~~~~~
if (Pstream::master())
{
// Send patches
@ -118,14 +121,14 @@ autoPtr<fvMesh> createMesh
slave++
)
{
OPstream toSlave(Pstream::blocking, slave);
OPstream toSlave(Pstream::scheduled, slave);
toSlave << mesh.boundaryMesh();
}
}
else
{
// Receive patches
IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
PtrList<entry> patchEntries(fromMaster);
if (haveMesh)
@ -224,6 +227,58 @@ autoPtr<fvMesh> createMesh
}
}
// Determine zones
// ~~~~~~~~~~~~~~~
wordList pointZoneNames(mesh.pointZones().names());
Pstream::scatter(pointZoneNames);
wordList faceZoneNames(mesh.faceZones().names());
Pstream::scatter(faceZoneNames);
wordList cellZoneNames(mesh.cellZones().names());
Pstream::scatter(cellZoneNames);
if (!haveMesh)
{
// Add the zones
List<pointZone*> pz(pointZoneNames.size());
forAll(pointZoneNames, i)
{
pz[i] = new pointZone
(
pointZoneNames[i],
labelList(0),
i,
mesh.pointZones()
);
}
List<faceZone*> fz(faceZoneNames.size());
forAll(faceZoneNames, i)
{
fz[i] = new faceZone
(
faceZoneNames[i],
labelList(0),
boolList(0),
i,
mesh.faceZones()
);
}
List<cellZone*> cz(cellZoneNames.size());
forAll(cellZoneNames, i)
{
cz[i] = new cellZone
(
cellZoneNames[i],
labelList(0),
i,
mesh.cellZones()
);
}
mesh.addZones(pz, fz, cz);
}
if (!haveMesh)
{
// We created a dummy mesh file above. Delete it.
@ -236,6 +291,21 @@ autoPtr<fvMesh> createMesh
mesh.clearOut();
mesh.globalData();
// Do some checks.
// Check if the boundary definition is unique
mesh.boundaryMesh().checkDefinition(true);
// Check if the boundary processor patches are correct
mesh.boundaryMesh().checkParallelSync(true);
// Check names of zones are equal
mesh.cellZones().checkDefinition(true);
mesh.cellZones().checkParallelSync(true);
mesh.faceZones().checkDefinition(true);
mesh.faceZones().checkParallelSync(true);
mesh.pointZones().checkDefinition(true);
mesh.pointZones().checkParallelSync(true);
return meshPtr;
}
@ -292,6 +362,59 @@ void printMeshData(Ostream& os, const polyMesh& mesh)
<< " face zones: " << mesh.faceZones().size() << nl
<< " cell zones: " << mesh.cellZones().size() << nl;
}
void printMeshData(const polyMesh& mesh)
{
// Collect all data on master
globalIndex globalCells(mesh.nCells());
labelListList patchNeiProcNo(Pstream::nProcs());
labelListList patchSize(Pstream::nProcs());
const labelList& pPatches = mesh.globalData().processorPatches();
patchNeiProcNo[Pstream::myProcNo()].setSize(pPatches.size());
patchSize[Pstream::myProcNo()].setSize(pPatches.size());
forAll(pPatches, i)
{
const processorPolyPatch& ppp = refCast<const processorPolyPatch>
(
mesh.boundaryMesh()[pPatches[i]]
);
patchNeiProcNo[Pstream::myProcNo()][i] = ppp.neighbProcNo();
patchSize[Pstream::myProcNo()][i] = ppp.size();
}
Pstream::gatherList(patchNeiProcNo);
Pstream::gatherList(patchSize);
// Print stats
globalIndex globalBoundaryFaces(mesh.nFaces()-mesh.nInternalFaces());
for (label procI = 0; procI < Pstream::nProcs(); procI++)
{
Info<< endl
<< "Processor " << procI << nl
<< " Number of cells = " << globalCells.localSize(procI)
<< endl;
label nProcFaces = 0;
const labelList& nei = patchNeiProcNo[procI];
forAll(patchNeiProcNo[procI], i)
{
Info<< " Number of faces shared with processor "
<< patchNeiProcNo[procI][i] << " = " << patchSize[procI][i]
<< endl;
nProcFaces += patchSize[procI][i];
}
Info<< " Number of processor patches = " << nei.size() << nl
<< " Number of processor faces = " << nProcFaces << nl
<< " Number of boundary faces = "
<< globalBoundaryFaces.localSize(procI) << endl;
}
}
// Debugging: write volScalarField with decomposition for post processing.
@ -507,6 +630,7 @@ void compareFields
int main(int argc, char *argv[])
{
# include "addRegionOption.H"
# include "addOverwriteOption.H"
argList::addOption
(
"mergeTol",
@ -539,6 +663,8 @@ int main(int argc, char *argv[])
}
Info<< "Using mesh subdirectory " << meshSubDir << nl << endl;
const bool overwrite = args.optionFound("overwrite");
// Get time instance directory. Since not all processors have meshes
// just use the master one everywhere.
@ -573,9 +699,11 @@ int main(int argc, char *argv[])
);
fvMesh& mesh = meshPtr();
Pout<< "Read mesh:" << endl;
printMeshData(Pout, mesh);
Pout<< endl;
//Pout<< "Read mesh:" << endl;
//printMeshData(Pout, mesh);
//Pout<< endl;
IOdictionary decompositionDict
(
@ -618,7 +746,10 @@ int main(int argc, char *argv[])
}
// Dump decomposition to volScalarField
writeDecomposition("decomposition", mesh, finalDecomp);
if (!overwrite)
{
writeDecomposition("decomposition", mesh, finalDecomp);
}
// Create 0 sized mesh to do all the generation of zero sized
@ -796,12 +927,20 @@ int main(int argc, char *argv[])
// Print a bit
Pout<< "After distribution mesh:" << endl;
printMeshData(Pout, mesh);
Pout<< endl;
// Print some statistics
Info<< "After distribution:" << endl;
printMeshData(mesh);
runTime++;
Pout<< "Writing redistributed mesh to " << runTime.timeName() << nl << endl;
if (!overwrite)
{
runTime++;
}
else
{
mesh.setInstance(masterInstDir);
}
Info<< "Writing redistributed mesh to " << runTime.timeName() << nl << endl;
mesh.write();

View File

@ -392,13 +392,17 @@ int main(int argc, char *argv[])
problemFaces.append(faceI);
}
}
OFstream str("badFaces");
Info<< "Dumping bad quality faces to " << str.name() << endl
<< "Paste this into the input for surfaceSubset" << nl
<< nl << endl;
if (!problemFaces.empty())
{
OFstream str("badFaces");
str << problemFaces;
Info<< "Dumping bad quality faces to " << str.name() << endl
<< "Paste this into the input for surfaceSubset" << nl
<< nl << endl;
str << problemFaces;
}
}
}

View File

@ -34,7 +34,7 @@ Foam::globalIndex::globalIndex(const label localSize)
labelList localSizes(Pstream::nProcs());
localSizes[Pstream::myProcNo()] = localSize;
Pstream::gatherList(localSizes);
Pstream::scatterList(localSizes); // just to balance out comms
Pstream::scatterList(localSizes);
label offset = 0;
offsets_[0] = 0;

View File

@ -307,7 +307,7 @@ Foam::mapDistribute::mapDistribute
{
label globalIndex = elements[i];
if (!globalNumbering.isLocal(globalIndex))
if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex))
{
label procI = globalNumbering.whichProcID(globalIndex);
nNonLocal[procI]++;
@ -329,7 +329,7 @@ Foam::mapDistribute::mapDistribute
{
label globalIndex = elements[i];
if (!globalNumbering.isLocal(globalIndex))
if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex))
{
label procI = globalNumbering.whichProcID(globalIndex);
label index = globalNumbering.toLocal(procI, globalIndex);
@ -452,7 +452,7 @@ Foam::mapDistribute::mapDistribute
{
label globalIndex = cCells[i];
if (!globalNumbering.isLocal(globalIndex))
if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex))
{
label procI = globalNumbering.whichProcID(globalIndex);
nNonLocal[procI]++;
@ -482,7 +482,7 @@ Foam::mapDistribute::mapDistribute
{
label globalIndex = cCells[i];
if (!globalNumbering.isLocal(globalIndex))
if (globalIndex != -1 && !globalNumbering.isLocal(globalIndex))
{
label procI = globalNumbering.whichProcID(globalIndex);
label index = globalNumbering.toLocal(procI, globalIndex);
@ -603,6 +603,10 @@ Foam::label Foam::mapDistribute::renumber
const label globalI
)
{
if (globalI == -1)
{
return globalI;
}
if (globalNumbering.isLocal(globalI))
{
return globalNumbering.toLocal(globalI);

View File

@ -123,7 +123,7 @@ public:
);
//- Construct from list of (possibly) remote elements in globalIndex
// numbering. Determines compact numbering (see above) and
// numbering (or -1). Determines compact numbering (see above) and
// distribute map to get data into this ordering and renumbers the
// elements to be in compact numbering.
mapDistribute
@ -133,7 +133,7 @@ public:
List<Map<label> >& compactMap
);
//- Special variant that works with the into sorted into bins
//- Special variant that works with the info sorted into bins
// according to local indices. E.g. think cellCells where
// cellCells[localCellI] is a list of global cells
mapDistribute

View File

@ -156,10 +156,7 @@ inline Point Foam::tetrahedron<Point, PointRef>::circumCentre() const
if (Foam::mag(denom) < ROOTVSMALL)
{
WarningIn("Point tetrahedron<Point, PointRef>::circumCentre() const")
<< "Degenerate tetrahedron:" << nl << *this << nl
<<"Returning centre instead of circumCentre."
<< endl;
// Degenerate tetrahedron, returning centre instead of circumCentre.
return centre();
}
@ -186,11 +183,7 @@ inline Foam::scalar Foam::tetrahedron<Point, PointRef>::circumRadius() const
if (Foam::mag(denom) < ROOTVSMALL)
{
WarningIn("Point tetrahedron<Point, PointRef>::circumCentre() const")
<< "Degenerate tetrahedron:" << nl << *this << nl
<< "Returning GREAT for circumRadius."
<< endl;
// Degenerate tetrahedron, returning GREAT for circumRadius.
return GREAT;
}
@ -272,16 +265,7 @@ Foam::scalar Foam::tetrahedron<Point, PointRef>::barycentric
if (Foam::mag(detT) < SMALL)
{
WarningIn
(
"List<scalar> tetrahedron<Point, PointRef>::barycentric"
"("
"const point& pt"
") const"
)
<< "Degenerate tetrahedron:" << nl << *this << nl
<< "Returning 1/4 barycentric coordinates."
<< endl;
// Degenerate tetrahedron, returning 1/4 barycentric coordinates.
bary = List<scalar>(4, 0.25);

View File

@ -121,10 +121,7 @@ inline Point Foam::triangle<Point, PointRef>::circumCentre() const
if (Foam::mag(c) < ROOTVSMALL)
{
WarningIn("Point triangle<Point, PointRef>::circumCentre() const")
<< "Degenerate triangle:" << nl << *this << nl
<< "Returning centre instead of circumCentre."
<< endl;
// Degenerate triangle, returning centre instead of circumCentre.
return centre();
}
@ -147,10 +144,7 @@ inline Foam::scalar Foam::triangle<Point, PointRef>::circumRadius() const
if (Foam::mag(denom) < VSMALL)
{
WarningIn("scalar triangle<Point, PointRef>::circumRadius() const")
<< "Degenerate triangle:" << nl << *this << nl
<< "Returning GREAT for circumRadius."
<< endl;
// Degenerate triangle, returning GREAT for circumRadius.
return GREAT;
}
@ -266,16 +260,7 @@ Foam::scalar Foam::triangle<Point, PointRef>::barycentric
if (Foam::mag(denom) < SMALL)
{
WarningIn
(
"List<scalar> triangle<Point, PointRef>::barycentric"
"("
"const point& pt"
") const"
)
<< "Degenerate triangle:" << nl << *this << nl
<< "Returning 1/3 barycentric coordinates."
<< endl;
// Degenerate triangle, returning 1/3 barycentric coordinates.
bary = List<scalar>(3, 1.0/3.0);
@ -540,20 +525,7 @@ Foam::pointHit Foam::triangle<Point, PointRef>::nearestPointClassify
{
if ((d1 - d3) < ROOTVSMALL)
{
WarningIn
(
"pointHit triangle<Point, PointRef>::nearestPointClassify"
"("
"const point& p,"
"label& nearType,"
"label& nearLabel"
") const"
)
<< "Degenerate triangle:" << nl << *this << nl
<< "d1, d3: " << d1 << ", " << d3 << endl;
// For d1 = d3, a_ and b_ are likely coincident.
// Degenerate triangle, for d1 = d3, a_ and b_ are likely coincident
nearType = POINT;
nearLabel = 0;
return pointHit(false, a_, Foam::mag(a_ - p), true);
@ -589,20 +561,7 @@ Foam::pointHit Foam::triangle<Point, PointRef>::nearestPointClassify
{
if ((d2 - d6) < ROOTVSMALL)
{
WarningIn
(
"pointHit triangle<Point, PointRef>::nearestPointClassify"
"("
"const point& p,"
"label& nearType,"
"label& nearLabel"
") const"
)
<< "Degenerate triangle:" << nl << *this << nl
<< "d2, d6: " << d2 << ", " << d6 << endl;
// For d2 = d6, a_ and c_ are likely coincident.
// Degenerate triangle, for d2 = d6, a_ and c_ are likely coincident
nearType = POINT;
nearLabel = 0;
return pointHit(false, a_, Foam::mag(a_ - p), true);
@ -624,21 +583,8 @@ Foam::pointHit Foam::triangle<Point, PointRef>::nearestPointClassify
{
if (((d4 - d3) + (d5 - d6)) < ROOTVSMALL)
{
WarningIn
(
"pointHit triangle<Point, PointRef>::nearestPointClassify"
"("
"const point& p,"
"label& nearType,"
"label& nearLabel"
") const"
)
<< "Degenerate triangle:" << nl << *this << nl
<< "(d4 - d3), (d6 - d5): " << (d4 - d3) << ", " << (d6 - d5)
<< endl;
// For (d4 - d3) = (d6 - d5), b_ and c_ are likely coincident.
// Degenerate triangle, for (d4 - d3) = (d6 - d5), b_ and c_ are
// likely coincident
nearType = POINT;
nearLabel = 1;
return pointHit(false, b_, Foam::mag(b_ - p), true);
@ -658,19 +604,8 @@ Foam::pointHit Foam::triangle<Point, PointRef>::nearestPointClassify
if ((va + vb + vc) < ROOTVSMALL)
{
WarningIn
(
"pointHit triangle<Point, PointRef>::nearestPointClassify"
"("
"const point& p,"
"label& nearType,"
"label& nearLabel"
") const"
)
<< "Degenerate triangle:" << nl << *this << nl
<< "va, vb, vc: " << va << ", " << vb << ", " << vc
<< endl;
// Degenerate triangle, return the centre because no edge or points are
// closest
point nearPt = centre();
nearType = NONE,
nearLabel = -1;

View File

@ -104,7 +104,7 @@ inline Foam::scalar Foam::Particle<ParticleType>::tetLambda
if (mag(lambdaNumerator) < SMALL)
{
// Track starts on the face, and is potentially
// parallel to it. +-SMALL)/+-SMALL is not a good
// parallel to it. +-SMALL/+-SMALL is not a good
// comparison, return 0.0, in anticipation of tet
// centre correction.

View File

@ -108,8 +108,8 @@ License
#include "addToRunTimeSelectionTable.H"
#include "floatScalar.H"
#include "Time.H"
#include "cyclicPolyPatch.H"
#include "OFstream.H"
#include "globalIndex.H"
extern "C"
{
@ -162,7 +162,6 @@ void Foam::scotchDecomp::check(const int retVal, const char* str)
}
// Call scotch with options from dictionary.
Foam::label Foam::scotchDecomp::decompose
(
const fileName& meshPath,
@ -172,6 +171,128 @@ Foam::label Foam::scotchDecomp::decompose
List<int>& finalDecomp
)
{
if (!Pstream::parRun())
{
decomposeOneProc
(
meshPath,
adjncy,
xadj,
cWeights,
finalDecomp
);
}
else
{
if (debug)
{
Info<< "scotchDecomp : running in parallel."
<< " Decomposing all of graph on master processor." << endl;
}
globalIndex globalCells(xadj.size()-1);
label nTotalConnections = returnReduce(adjncy.size(), sumOp<label>());
// Send all to master. Use scheduled to save some storage.
if (Pstream::master())
{
Field<int> allAdjncy(nTotalConnections);
Field<int> allXadj(globalCells.size()+1);
scalarField allWeights(globalCells.size());
// Insert my own
label nTotalCells = 0;
forAll(cWeights, cellI)
{
allXadj[nTotalCells] = xadj[cellI];
allWeights[nTotalCells++] = cWeights[cellI];
}
nTotalConnections = 0;
forAll(adjncy, i)
{
allAdjncy[nTotalConnections++] = adjncy[i];
}
for (int slave=1; slave<Pstream::nProcs(); slave++)
{
IPstream fromSlave(Pstream::scheduled, slave);
Field<int> nbrAdjncy(fromSlave);
Field<int> nbrXadj(fromSlave);
scalarField nbrWeights(fromSlave);
// Append.
//label procStart = nTotalCells;
forAll(nbrXadj, cellI)
{
allXadj[nTotalCells] = nTotalConnections+nbrXadj[cellI];
allWeights[nTotalCells++] = nbrWeights[cellI];
}
// No need to renumber xadj since already global.
forAll(nbrAdjncy, i)
{
allAdjncy[nTotalConnections++] = nbrAdjncy[i];
}
}
allXadj[nTotalCells] = nTotalConnections;
Field<int> allFinalDecomp;
decomposeOneProc
(
meshPath,
allAdjncy,
allXadj,
allWeights,
allFinalDecomp
);
// Send allFinalDecomp back
for (int slave=1; slave<Pstream::nProcs(); slave++)
{
OPstream toSlave(Pstream::scheduled, slave);
toSlave << SubField<int>
(
allFinalDecomp,
globalCells.localSize(slave),
globalCells.offset(slave)
);
}
// Get my own part (always first)
finalDecomp = SubField<int>
(
allFinalDecomp,
globalCells.localSize()
);
}
else
{
// Send my part of the graph (already in global numbering)
{
OPstream toMaster(Pstream::scheduled, Pstream::masterNo());
toMaster<< adjncy << SubField<int>(xadj, xadj.size()-1)
<< cWeights;
}
// Receive back decomposition
IPstream fromMaster(Pstream::scheduled, Pstream::masterNo());
fromMaster >> finalDecomp;
}
}
return 0;
}
// Call scotch with options from dictionary.
Foam::label Foam::scotchDecomp::decomposeOneProc
(
const fileName& meshPath,
const List<int>& adjncy,
const List<int>& xadj,
const scalarField& cWeights,
List<int>& finalDecomp
)
{
// Dump graph
if (decompositionDict_.found("scotchCoeffs"))
@ -247,7 +368,8 @@ Foam::label Foam::scotchDecomp::decompose
// Check for externally provided cellweights and if so initialise weights
scalar minWeights = gMin(cWeights);
// Note: min, not gMin since routine runs on master only.
scalar minWeights = min(cWeights);
if (cWeights.size() > 0)
{
if (minWeights <= 0)
@ -432,7 +554,7 @@ Foam::labelList Foam::scotchDecomp::decompose
<< exit(FatalError);
}
// Calculate local or global (if Pstream::parRun()) connectivity
CompactListList<label> cellCells;
calcCellCells(mesh, identity(mesh.nCells()), mesh.nCells(), cellCells);
@ -475,6 +597,7 @@ Foam::labelList Foam::scotchDecomp::decompose
<< exit(FatalError);
}
// Calculate local or global (if Pstream::parRun()) connectivity
CompactListList<label> cellCells;
calcCellCells(mesh, agglom, agglomPoints.size(), cellCells);
@ -528,7 +651,14 @@ Foam::labelList Foam::scotchDecomp::decompose
// Decompose using weights
List<int> finalDecomp;
decompose(".", cellCells.m(), cellCells.offsets(), cWeights, finalDecomp);
decompose
(
"scotch",
cellCells.m(),
cellCells.offsets(),
cWeights,
finalDecomp
);
// Copy back to labelList
labelList decomp(finalDecomp.size());

View File

@ -25,7 +25,10 @@ Class
Foam::scotchDecomp
Description
Scotch domain decomposition
Scotch domain decomposition.
When run in parallel will collect the whole graph on to the master,
decompose and send back. Run ptscotchDecomp for proper distributed
decomposition.
SourceFiles
scotchDecomp.C
@ -62,6 +65,16 @@ class scotchDecomp
List<int>& finalDecomp
);
//- Decompose non-parallel
label decomposeOneProc
(
const fileName& meshPath,
const List<int>& adjncy,
const List<int>& xadj,
const scalarField& cWeights,
List<int>& finalDecomp
);
//- Disallow default bitwise copy construct and assignment
void operator=(const scotchDecomp&);
scotchDecomp(const scotchDecomp&);
@ -88,8 +101,8 @@ public:
virtual bool parallelAware() const
{
// Metis does not know about proc boundaries
return false;
// Knows about coupled boundaries
return true;
}
//- Return for every coordinate the wanted processor number. Use the

View File

@ -51,7 +51,7 @@ Foam::scalar Foam::streamLineParticle::calcSubCycleDeltaT
scalar fraction = testParticle.trackToFace(position()+dt*U, td);
td.keepParticle = oldKeepParticle;
td.switchProcessor = oldSwitchProcessor;
// Adapt the dt to subdivide the trajectory into 4 substeps.
// Adapt the dt to subdivide the trajectory into substeps.
return dt*fraction/td.nSubCycle_;
}

View File

@ -28,10 +28,13 @@ Description
Mixed boundary condition for temperature, to be used for heat-transfer
on back-to-back baffles.
If my temperature is T1, neighbour is T2:
T1 > T2: my side becomes fixedValue T2 bc, other side becomes fixedGradient.
Specifies gradient and temperature such that the equations are the same
on both sides:
- refGradient = zero gradient
- refValue = neighbour value
- mixFraction = nbrKDelta / (nbrKDelta + myKDelta())
where KDelta is heat-transfer coefficient K * deltaCoeffs
Example usage:
myInterfacePatchName
@ -54,12 +57,6 @@ Description
Note: runs in parallel with arbitrary decomposition. Uses directMapped
functionality to calculate exchange.
Note: lags interface data so both sides use same data.
- problem: schedule to calculate average would interfere
with standard processor swaps.
- so: updateCoeffs sets both to same Twall. Only need to do
this for last outer iteration but don't have access to this.
SourceFiles
turbulentTemperatureCoupledBaffleMixedFvPatchScalarField.C

View File

@ -18,13 +18,22 @@ runApplication decomposePar
cp system/decomposeParDict-par system/decomposeParDict
runParallel snappyHexMesh 2 -overwrite
# Add wildcard entries for meshes patches since not preserved
# Add wildcard entries for meshed patches since not preserved
# by decomposePar. Notice -literalRE option to add wildcard itself
# without evaluation.
runParallel changeDictionary 2 -literalRE
runParallel setSet 2 -batch makeZones
runParallel windSimpleFoam 2
cp system/decomposeParDict-4proc system/decomposeParDict
runParallel redistributeMeshPar 4 -overwrite
runParallel renumberMesh 4 -overwrite
# Add wildcard entries for meshes patches since not preserved
# by decomposePar. Notice -literalRE option to add wildcard itself
# without evaluation.
#runParallel changeDictionary 4 -literalRE
runParallel setSet 4 -batch makeZones
runParallel windSimpleFoam 4
runApplication reconstructParMesh -constant
runApplication reconstructPar

View File

@ -0,0 +1,22 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: dev |
| \\ / A nd | Web: www.OpenFOAM.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object decomposeParDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
numberOfSubdomains 4;
method ptscotch;
// ************************************************************************* //