BUG: finite volume did not work before.

This commit is contained in:
mattijs
2010-03-03 10:12:35 +00:00
parent 46ad2e55d6
commit 5ece14ec6f
108 changed files with 3119 additions and 2099 deletions

View File

@ -296,12 +296,7 @@ label mergePatchFaces
const faceZone& fZone = mesh.faceZones()[zoneID];
zoneFlip = fZone.flipMap()[fZone.whichFace(newMasterI)];
}
labelPair patchIDs = polyTopoChange::whichPatch
(
mesh.boundaryMesh(),
newMasterI
);
label patchID = mesh.boundaryMesh().whichPatch(newMasterI);
Pout<< "Restoring new master face " << newMasterI
<< " to vertices " << setFaceVerts[0] << endl;
@ -316,11 +311,10 @@ label mergePatchFaces
own, // owner
-1, // neighbour
false, // face flip
patchIDs[0], // patch for face
patchID, // patch for face
false, // remove from zone
zoneID, // zone for face
zoneFlip, // face flip in zone
patchIDs[1] // subPatch
zoneFlip // face flip in zone
)
);
@ -342,10 +336,9 @@ label mergePatchFaces
-1, // masterEdgeID,
newMasterI, // masterFaceID,
false, // flipFaceFlux,
patchIDs[0], // patchID,
patchID, // patchID,
zoneID, // zoneID,
zoneFlip, // zoneFlip
patchIDs[1] // subPatch
zoneFlip // zoneFlip
)
);
}

View File

@ -45,12 +45,17 @@ defineTypeNameAndDebug(Foam::cellSplitter, 0);
void Foam::cellSplitter::getFaceInfo
(
const label faceI,
labelPair& patchIDs,
label& patchID,
label& zoneID,
label& zoneFlip
) const
{
patchIDs = polyTopoChange::whichPatch(mesh_.boundaryMesh(), faceI);
patchID = -1;
if (!mesh_.isInternalFace(faceI))
{
patchID = mesh_.boundaryMesh().whichPatch(faceI);
}
zoneID = mesh_.faceZones().whichZone(faceI);
@ -161,16 +166,17 @@ void Foam::cellSplitter::setRefinement
label anchorPoint = mesh_.cellPoints()[cellI][0];
label addedPointI = meshMod.setAction
(
polyAddPoint
label addedPointI =
meshMod.setAction
(
iter(), // point
anchorPoint, // master point
-1, // zone for point
true // supports a cell
)
);
polyAddPoint
(
iter(), // point
anchorPoint, // master point
-1, // zone for point
true // supports a cell
)
);
addedPoints_.insert(cellI, addedPointI);
//Pout<< "Added point " << addedPointI
@ -199,17 +205,19 @@ void Foam::cellSplitter::setRefinement
// Add other pyramids
for (label i = 1; i < cFaces.size(); i++)
label addedCellI = meshMod.setAction
(
polyAddCell
{
label addedCellI =
meshMod.setAction
(
-1, // master point
-1, // master edge
-1, // master face
cellI, // master cell
-1 // zone
)
);
polyAddCell
(
-1, // master point
-1, // master edge
-1, // master face
cellI, // master cell
-1 // zone
)
);
newCells[i] = addedCellI;
}
@ -296,8 +304,7 @@ void Foam::cellSplitter::setRefinement
false, // flux flip
-1, // patch for face
-1, // zone for face
false, // face zone flip
-1 // subPatch
false // face zone flip
)
);
}
@ -343,8 +350,7 @@ void Foam::cellSplitter::setRefinement
false, // flux flip
-1, // patch for face
-1, // zone for face
false, // face zone flip
-1 // subPatch
false // face zone flip
)
);
}
@ -399,8 +405,7 @@ void Foam::cellSplitter::setRefinement
-1, // patch for face
false, // remove from zone
-1, // zone for face
false, // face zone flip
-1 // subPatch
false // face zone flip
)
);
}
@ -418,8 +423,7 @@ void Foam::cellSplitter::setRefinement
-1, // patch for face
false, // remove from zone
-1, // zone for face
false, // face zone flip
-1 // subPatch
false // face zone flip
)
);
}
@ -429,9 +433,9 @@ void Foam::cellSplitter::setRefinement
{
label newOwn = newOwner(faceI, cellToCells);
labelPair patchIDs;
label zoneID, zoneFlip;
getFaceInfo(faceI, patchIDs, zoneID, zoneFlip);
label patchID, zoneID, zoneFlip;
getFaceInfo(faceI, patchID, zoneID, zoneFlip);
meshMod.setAction
(
polyModifyFace
@ -441,11 +445,10 @@ void Foam::cellSplitter::setRefinement
newOwn, // owner
-1, // neighbour
false, // flux flip
patchIDs[0], // patch for face
patchID, // patch for face
false, // remove from zone
zoneID, // zone for face
zoneFlip, // face zone flip
patchIDs[1]
zoneFlip // face zone flip
)
);
}

View File

@ -39,7 +39,6 @@ SourceFiles
#include "Map.H"
#include "edge.H"
#include "labelPair.H"
#include "typeInfo.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -74,7 +73,7 @@ class cellSplitter
void getFaceInfo
(
const label faceI,
labelPair& patchIDs,
label& patchID,
label& zoneID,
label& zoneFlip
) const;

View File

@ -1200,8 +1200,7 @@ int main(int argc, char *argv[])
false, // flipFaceFlux
-1, // patchID
faceZonei, // zoneID
fm[facei], // zoneFlip
-1 // subPatchID
fm[facei] // zoneFlip
);
}
@ -1252,8 +1251,7 @@ int main(int argc, char *argv[])
false, // flipFaceFlux
patchi, // patchID
-1, // zoneID
false, // zoneFlip
-1 // subPatchID
false // zoneFlip
);
// For baffles create the opposite face
@ -1270,8 +1268,7 @@ int main(int argc, char *argv[])
false, // flipFaceFlux
patchi, // patchID
-1, // zoneID
false, // zoneFlip
-1
false // zoneFlip
);
}
}
@ -1320,8 +1317,7 @@ int main(int argc, char *argv[])
false, //flipFaceFlux
-1, //patchID
-1, //zoneID
false, //zoneFlip
-1 //subPatchID
false //zoneFlip
);
}
}

View File

@ -136,8 +136,7 @@ void Foam::extrude2DMesh::setRefinement
false, // flipFaceFlux
-1, // patchID
zoneID, // zoneID
zoneFlip, // zoneFlip
-1 // subPatchID
zoneFlip // zoneFlip
);
}
@ -174,8 +173,7 @@ void Foam::extrude2DMesh::setRefinement
false, // flipFaceFlux
patchI, // patchID
zoneID, // zoneID
zoneFlip, // zoneFlip
-1 //?TBD subPatchID
zoneFlip // zoneFlip
);
}
}
@ -238,8 +236,7 @@ void Foam::extrude2DMesh::setRefinement
false, // flipFaceFlux
frontPatchI, // patchID
-1, // zoneID
false, // zoneFlip
-1 //?TDB subPatchID
false // zoneFlip
);
// Offset to create front face.
@ -258,8 +255,7 @@ void Foam::extrude2DMesh::setRefinement
false, // flipFaceFlux
frontPatchI, // patchID
-1, // zoneID
false, // zoneFlip
-1 //?TDB subPatchID
false // zoneFlip
);
}
}

View File

@ -198,69 +198,55 @@ void dumpCyclicMatch(const fileName& prefix, const polyMesh& mesh)
forAll(patches, patchI)
{
if (isA<cyclicPolyPatch>(patches[patchI]))
if
(
isA<cyclicPolyPatch>(patches[patchI])
&& refCast<const cyclicPolyPatch>(patches[patchI]).owner()
)
{
const cyclicPolyPatch& cycPatch =
refCast<const cyclicPolyPatch>(patches[patchI]);
label halfSize = cycPatch.size()/2;
// Dump halves
// Dump patches
{
OFstream str(prefix+cycPatch.name()+"_half0.obj");
OFstream str(prefix+cycPatch.name()+".obj");
Pout<< "Dumping " << cycPatch.name()
<< " half0 faces to " << str.name() << endl;
<< " faces to " << str.name() << endl;
meshTools::writeOBJ
(
str,
static_cast<faceList>
(
SubList<face>
(
cycPatch,
halfSize
)
),
cycPatch,
cycPatch.points()
);
}
const cyclicPolyPatch& nbrPatch = cycPatch.neighbPatch();
{
OFstream str(prefix+cycPatch.name()+"_half1.obj");
Pout<< "Dumping " << cycPatch.name()
<< " half1 faces to " << str.name() << endl;
OFstream str(prefix+nbrPatch.name()+".obj");
Pout<< "Dumping " << nbrPatch.name()
<< " faces to " << str.name() << endl;
meshTools::writeOBJ
(
str,
static_cast<faceList>
(
SubList<face>
(
cycPatch,
halfSize,
halfSize
)
),
cycPatch.points()
nbrPatch,
nbrPatch.points()
);
}
// Lines between corresponding face centres
OFstream str(prefix+cycPatch.name()+"_match.obj");
OFstream str(prefix+cycPatch.name()+nbrPatch.name()+"_match.obj");
label vertI = 0;
Pout<< "Dumping cyclic match as lines between face centres to "
<< str.name() << endl;
for (label faceI = 0; faceI < halfSize; faceI++)
forAll(cycPatch, faceI)
{
const point& fc0 = mesh.faceCentres()[cycPatch.start()+faceI];
meshTools::writeOBJ(str, fc0);
vertI++;
label nbrFaceI = halfSize + faceI;
const point& fc1 =
mesh.faceCentres()[cycPatch.start()+nbrFaceI];
const point& fc1 = mesh.faceCentres()[nbrPatch.start()+faceI];
meshTools::writeOBJ(str, fc1);
vertI++;
@ -273,34 +259,13 @@ void dumpCyclicMatch(const fileName& prefix, const polyMesh& mesh)
void separateList
(
const vectorField& separation,
const vector& separation,
UList<vector>& field
)
{
if (separation.size() == 1)
forAll(field, i)
{
// Single value for all.
forAll(field, i)
{
field[i] += separation[0];
}
}
else if (separation.size() == field.size())
{
forAll(field, i)
{
field[i] += separation[i];
}
}
else
{
FatalErrorIn
(
"separateList(const vectorField&, UList<vector>&)"
) << "Sizes of field and transformation not equal. field:"
<< field.size() << " transformation:" << separation.size()
<< abort(FatalError);
field[i] += separation;
}
}
@ -427,13 +392,19 @@ void syncPoints
{
const polyPatch& pp = patches[patchI];
if (isA<cyclicPolyPatch>(pp))
if
(
isA<cyclicPolyPatch>(pp)
&& refCast<const cyclicPolyPatch>(pp).owner()
)
{
const cyclicPolyPatch& cycPatch =
refCast<const cyclicPolyPatch>(pp);
const edgeList& coupledPoints = cycPatch.coupledPoints();
const labelList& meshPts = cycPatch.meshPoints();
const cyclicPolyPatch& nbrPatch = cycPatch.neighbPatch();
const labelList& nbrMeshPts = nbrPatch.meshPoints();
pointField half0Values(coupledPoints.size());
@ -452,14 +423,14 @@ void syncPoints
else if (cycPatch.separated())
{
hasTransformation = true;
const vectorField& v = cycPatch.coupledPolyPatch::separation();
const vector& v = cycPatch.separation();
separateList(v, half0Values);
}
forAll(coupledPoints, i)
{
const edge& e = coupledPoints[i];
label point1 = meshPts[e[1]];
label point1 = nbrMeshPts[e[1]];
points[point1] = half0Values[i];
}
}
@ -783,13 +754,8 @@ int main(int argc, char *argv[])
// current separation also includes the normal
// ( separation_ = (nf&(Cr - Cf))*nf ).
// For processor patches:
// - disallow multiple separation/transformation. This basically
// excludes decomposed cyclics. Use the (probably 0) separation
// to align the points.
// For cyclic patches:
// - for separated ones use our own recalculated offset vector
// - for rotational ones use current one.
// - for separated ones use user specified offset vector
forAll(mesh.boundaryMesh(), patchI)
{
@ -813,51 +779,18 @@ int main(int argc, char *argv[])
if (cycpp.transform() == cyclicPolyPatch::TRANSLATIONAL)
{
// Force to wanted separation
Info<< "On cyclic translation patch " << pp.name()
<< " forcing uniform separation of "
<< cycpp.separationVector() << endl;
const_cast<vectorField&>(cpp.separation()) =
pointField(1, cycpp.separationVector());
const_cast<vector&>(cpp.separation()) =
cycpp.separationVector();
}
else
{
const_cast<vectorField&>(cpp.separation()) =
pointField
(
1,
pp[pp.size()/2].centre(mesh.points())
- pp[0].centre(mesh.points())
);
}
}
else
{
const_cast<vectorField&>(cpp.separation())
.setSize(1);
}
Info<< "On coupled patch " << pp.name()
<< " forcing uniform separation of "
<< cpp.separation() << endl;
}
else if (!cpp.parallel())
{
Info<< "On coupled patch " << pp.name()
<< " forcing uniform rotation of "
<< cpp.forwardT()[0] << endl;
const_cast<tensorField&>
(
cpp.forwardT()
).setSize(1);
const_cast<tensorField&>
(
cpp.reverseT()
).setSize(1);
Info<< "On coupled patch " << pp.name()
<< " forcing uniform rotation of "
<< cpp.forwardT() << endl;
}
}
}

View File

@ -306,8 +306,6 @@ autoPtr<mapPolyMesh> reorderMesh
labelList patchStarts(patches.size());
labelList oldPatchNMeshPoints(patches.size());
labelListList patchPointMap(patches.size());
labelListList subPatches(patches.size());
labelListList subPatchStarts(patches.size());
forAll(patches, patchI)
{
@ -315,16 +313,6 @@ autoPtr<mapPolyMesh> reorderMesh
patchStarts[patchI] = patches[patchI].start();
oldPatchNMeshPoints[patchI] = patches[patchI].nPoints();
patchPointMap[patchI] = identity(patches[patchI].nPoints());
if (isA<processorPolyPatch>(patches[patchI]))
{
const processorPolyPatch& ppp = refCast<const processorPolyPatch>
(
patches[patchI]
);
subPatches[patchI] = ppp.patchIDs();
subPatchStarts[patchI] = ppp.starts();
}
}
mesh.resetPrimitives
@ -335,8 +323,7 @@ autoPtr<mapPolyMesh> reorderMesh
xferMove(newNeighbour),
patchSizes,
patchStarts,
subPatches,
subPatchStarts
true
);
return autoPtr<mapPolyMesh>

View File

@ -556,7 +556,7 @@ void getInterfaceSizes
label cellI = mesh.faceOwner()[i+mesh.nInternalFaces()];
coupledRegion[i] = cellRegion[cellI];
}
syncTools::swapBoundaryFaceList(mesh, coupledRegion, false);
syncTools::swapBoundaryFaceList(mesh, coupledRegion);
forAll(coupledRegion, i)
{
@ -726,7 +726,7 @@ autoPtr<mapPolyMesh> createRegionMesh
label cellI = mesh.faceOwner()[i+mesh.nInternalFaces()];
coupledRegion[i] = cellRegion[cellI];
}
syncTools::swapBoundaryFaceList(mesh, coupledRegion, false);
syncTools::swapBoundaryFaceList(mesh, coupledRegion);
// Topology change container. Start off from existing mesh.
@ -1375,7 +1375,7 @@ int main(int argc, char *argv[])
{
neiZoneID[i] = zoneID[mesh.faceOwner()[i+mesh.nInternalFaces()]];
}
syncTools::swapBoundaryFaceList(mesh, neiZoneID, false);
syncTools::swapBoundaryFaceList(mesh, neiZoneID);
// Determine connected regions

View File

@ -337,22 +337,22 @@ int main(int argc, char *argv[])
// Construct the point fields
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
pointMesh pMesh(mesh);
// pointMesh pMesh(mesh);
PtrList<pointScalarField> pointScalarFields;
readFields(pMesh, objects, pointScalarFields);
// readFields(pMesh, objects, pointScalarFields);
PtrList<pointVectorField> pointVectorFields;
readFields(pMesh, objects, pointVectorFields);
// readFields(pMesh, objects, pointVectorFields);
PtrList<pointSphericalTensorField> pointSphericalTensorFields;
readFields(pMesh, objects, pointSphericalTensorFields);
// readFields(pMesh, objects, pointSphericalTensorFields);
PtrList<pointSymmTensorField> pointSymmTensorFields;
readFields(pMesh, objects, pointSymmTensorFields);
// readFields(pMesh, objects, pointSymmTensorFields);
PtrList<pointTensorField> pointTensorFields;
readFields(pMesh, objects, pointTensorFields);
// readFields(pMesh, objects, pointTensorFields);
// Construct the Lagrangian fields
@ -652,45 +652,45 @@ int main(int argc, char *argv[])
}
// Point fields
if
(
pointScalarFields.size()
|| pointVectorFields.size()
|| pointSphericalTensorFields.size()
|| pointSymmTensorFields.size()
|| pointTensorFields.size()
)
{
labelIOList pointProcAddressing
(
IOobject
(
"pointProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::MUST_READ,
IOobject::NO_WRITE
)
);
pointMesh procPMesh(procMesh, true);
pointFieldDecomposer fieldDecomposer
(
pMesh,
procPMesh,
pointProcAddressing,
boundaryProcAddressing
);
fieldDecomposer.decomposeFields(pointScalarFields);
fieldDecomposer.decomposeFields(pointVectorFields);
fieldDecomposer.decomposeFields(pointSphericalTensorFields);
fieldDecomposer.decomposeFields(pointSymmTensorFields);
fieldDecomposer.decomposeFields(pointTensorFields);
}
// // Point fields
// if
// (
// pointScalarFields.size()
// || pointVectorFields.size()
// || pointSphericalTensorFields.size()
// || pointSymmTensorFields.size()
// || pointTensorFields.size()
// )
// {
// labelIOList pointProcAddressing
// (
// IOobject
// (
// "pointProcAddressing",
// procMesh.facesInstance(),
// procMesh.meshSubDir,
// procMesh,
// IOobject::MUST_READ,
// IOobject::NO_WRITE
// )
// );
//
// pointMesh procPMesh(procMesh, true);
//
// pointFieldDecomposer fieldDecomposer
// (
// pMesh,
// procPMesh,
// pointProcAddressing,
// boundaryProcAddressing
// );
//
// fieldDecomposer.decomposeFields(pointScalarFields);
// fieldDecomposer.decomposeFields(pointVectorFields);
// fieldDecomposer.decomposeFields(pointSphericalTensorFields);
// fieldDecomposer.decomposeFields(pointSymmTensorFields);
// fieldDecomposer.decomposeFields(pointTensorFields);
// }
// If there is lagrangian data write it out

View File

@ -29,6 +29,7 @@ License
#include "dictionary.H"
#include "labelIOList.H"
#include "processorPolyPatch.H"
#include "processorCyclicPolyPatch.H"
#include "fvMesh.H"
#include "OSspecific.H"
#include "Map.H"
@ -293,10 +294,34 @@ bool Foam::domainDecomposition::writeDecomposition()
const polyPatchList& meshPatches = boundaryMesh();
// Count the number of inter-proc patches
label nInterProcPatches = 0;
forAll(curSubPatchIDs, procPatchI)
{
Info<< "For processor " << procI
<< " have to destination processor "
<< curNeighbourProcessors[procPatchI] << endl;
forAll(curSubPatchIDs[procPatchI], i)
{
Info<< " from patch:" << curSubPatchIDs[procPatchI][i]
<< " starting at:" << curSubStarts[procPatchI][i]
<< endl;
}
nInterProcPatches += curSubPatchIDs[procPatchI].size();
}
Info<< "For processor " << procI
<< " have " << nInterProcPatches << " to neighbouring processors"
<< endl;
List<polyPatch*> procPatches
(
curPatchSizes.size()
+ curProcessorPatchSizes.size(),
+ nInterProcPatches, //curProcessorPatchSizes.size(),
reinterpret_cast<polyPatch*>(0)
);
@ -335,23 +360,72 @@ bool Foam::domainDecomposition::writeDecomposition()
forAll(curProcessorPatchSizes, procPatchI)
{
procPatches[nPatches] =
new processorPolyPatch
(
word("procBoundary") + Foam::name(procI)
+ word("to")
+ Foam::name(curNeighbourProcessors[procPatchI]),
curProcessorPatchSizes[procPatchI],
curProcessorPatchStarts[procPatchI],
nPatches,
procMesh.boundaryMesh(),
procI,
curNeighbourProcessors[procPatchI],
curSubPatchIDs[procPatchI],
curSubStarts[procPatchI]
);
const labelList& subPatchID = curSubPatchIDs[procPatchI];
const labelList& subStarts = curSubStarts[procPatchI];
nPatches++;
label curStart = curProcessorPatchStarts[procPatchI];
forAll(subPatchID, i)
{
label size =
(
i < subPatchID.size()-1
? subStarts[i+1] - subStarts[i]
: curProcessorPatchSizes[procPatchI] - subStarts[i]
);
Info<< "From processor:" << procI << endl
<< " to processor:" << curNeighbourProcessors[procPatchI]
<< endl
<< " via patch:" << subPatchID[i] << endl
<< " start :" << curStart << endl
<< " size :" << size << endl;
if (subPatchID[i] == -1)
{
// From internal faces
procPatches[nPatches] =
new processorPolyPatch
(
word("procBoundary") + Foam::name(procI)
+ "to"
+ Foam::name(curNeighbourProcessors[procPatchI]),
size,
curStart,
nPatches,
procMesh.boundaryMesh(),
procI,
curNeighbourProcessors[procPatchI]
);
}
else
{
// From cyclic
const word& referPatch =
boundaryMesh()[subPatchID[i]].name();
procPatches[nPatches] =
new processorCyclicPolyPatch
(
word("procBoundary") + Foam::name(procI)
+ "to"
+ Foam::name(curNeighbourProcessors[procPatchI])
+ "through"
+ referPatch,
size,
curStart,
nPatches,
procMesh.boundaryMesh(),
procI,
curNeighbourProcessors[procPatchI],
referPatch
);
}
curStart += size;
nPatches++;
}
}

View File

@ -40,7 +40,7 @@ Description
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void domainDecomposition::append(labelList& lst, const label elem)
void Foam::domainDecomposition::append(labelList& lst, const label elem)
{
label sz = lst.size();
lst.setSize(sz+1);
@ -48,7 +48,7 @@ void domainDecomposition::append(labelList& lst, const label elem)
}
void domainDecomposition::addInterProcFace
void Foam::domainDecomposition::addInterProcFace
(
const label facei,
const label ownerProc,
@ -97,7 +97,7 @@ void domainDecomposition::addInterProcFace
}
void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
void Foam::domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
{
// Decide which cell goes to which processor
distributeCells();
@ -230,14 +230,15 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
facei,
ownerProc,
nbrProc,
procNbrToInterPatch,
interPatchFaces
);
}
}
// Add the proper processor faces to the sub information. Since faces
// originate from internal faces this is always -1.
// Add the proper processor faces to the sub information. For faces
// originating from internal faces this is always -1.
List<labelListList> subPatchIDs(nProcs_);
List<labelListList> subPatchStarts(nProcs_);
forAll(interPatchFaces, procI)
@ -415,7 +416,16 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
forAll(procBoundaryAddressing_, procI)
{
label nNormal = procPatchSize_[procI].size();
label nInterProc = procProcessorPatchSize_[procI].size();
const labelListList& curSubPatchIDs =
procProcessorPatchSubPatchIDs_[procI];
// label nInterProc = procProcessorPatchSize_[procI].size();
label nInterProc = 0;
forAll(curSubPatchIDs, procPatchI)
{
nInterProc += curSubPatchIDs[procPatchI].size();
}
procBoundaryAddressing_[procI].setSize(nNormal + nInterProc);
@ -423,9 +433,14 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
{
procBoundaryAddressing_[procI][patchI] = patchI;
}
for (label patchI = nNormal; patchI < nNormal+nInterProc; patchI++)
label patchI = nNormal;
forAll(curSubPatchIDs, procPatchI)
{
procBoundaryAddressing_[procI][patchI] = -1;
forAll(curSubPatchIDs[procPatchI], i)
{
procBoundaryAddressing_[procI][patchI++] =
curSubPatchIDs[procPatchI][i];
}
}
}

View File

@ -145,7 +145,11 @@ Foam::fvFieldDecomposer::fvFieldDecomposer
{
forAll(boundaryAddressing_, patchi)
{
if (boundaryAddressing_[patchi] >= 0)
if
(
boundaryAddressing_[patchi] >= 0
&& !isA<processorLduInterface>(procMesh.boundary()[patchi])
)
{
patchFieldDecomposerPtrs_[patchi] = new patchFieldDecomposer
(

View File

@ -27,6 +27,8 @@ License
#include "fvFieldDecomposer.H"
#include "processorFvPatchField.H"
#include "processorFvsPatchField.H"
#include "processorCyclicFvPatchField.H"
#include "processorCyclicFvsPatchField.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
@ -45,7 +47,7 @@ Foam::fvFieldDecomposer::decomposeField
forAll(boundaryAddressing_, patchi)
{
if (boundaryAddressing_[patchi] >= 0)
if (patchFieldDecomposerPtrs_[patchi])
{
patchFields.set
(
@ -59,7 +61,24 @@ Foam::fvFieldDecomposer::decomposeField
)
);
}
else
else if (isA<processorCyclicFvPatch>(procMesh_.boundary()[patchi]))
{
patchFields.set
(
patchi,
new processorCyclicFvPatchField<Type>
(
procMesh_.boundary()[patchi],
DimensionedField<Type, volMesh>::null(),
Field<Type>
(
field.internalField(),
*processorVolPatchFieldDecomposerPtrs_[patchi]
)
)
);
}
else if (isA<processorFvPatch>(procMesh_.boundary()[patchi]))
{
patchFields.set
(
@ -76,6 +95,11 @@ Foam::fvFieldDecomposer::decomposeField
)
);
}
else
{
FatalErrorIn("fvFieldDecomposer::decomposeField()")
<< "Unknown type." << abort(FatalError);
}
}
// Create the field for the processor
@ -156,7 +180,7 @@ Foam::fvFieldDecomposer::decomposeField
forAll(boundaryAddressing_, patchi)
{
if (boundaryAddressing_[patchi] >= 0)
if (patchFieldDecomposerPtrs_[patchi])
{
patchFields.set
(
@ -170,7 +194,24 @@ Foam::fvFieldDecomposer::decomposeField
)
);
}
else
else if (isA<processorCyclicFvPatch>(procMesh_.boundary()[patchi]))
{
patchFields.set
(
patchi,
new processorCyclicFvsPatchField<Type>
(
procMesh_.boundary()[patchi],
DimensionedField<Type, surfaceMesh>::null(),
Field<Type>
(
allFaceField,
*processorSurfacePatchFieldDecomposerPtrs_[patchi]
)
)
);
}
else if (isA<processorFvPatch>(procMesh_.boundary()[patchi]))
{
patchFields.set
(
@ -187,6 +228,11 @@ Foam::fvFieldDecomposer::decomposeField
)
);
}
else
{
FatalErrorIn("fvFieldDecomposer::decomposeField()")
<< "Unknown type." << abort(FatalError);
}
}
// Create the field for the processor

View File

@ -80,7 +80,7 @@ void Foam::channelIndex::walkOppositeFaces
isFrontBndFace[faceI-mesh.nInternalFaces()] = true;
}
}
syncTools::swapBoundaryFaceList(mesh, isFrontBndFace, false);
syncTools::swapBoundaryFaceList(mesh, isFrontBndFace);
// Add
forAll(isFrontBndFace, i)

View File

@ -31,7 +31,6 @@ Description
\*---------------------------------------------------------------------------*/
#include "fvCFD.H"
#include "cyclicPolyPatch.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Main program:
@ -77,35 +76,12 @@ int main(int argc, char *argv[])
}
// Give patch area
if (isA<cyclicPolyPatch>(mesh.boundaryMesh()[patchi]))
{
Info<< " Cyclic patch vector area: " << nl;
label nFaces = mesh.boundaryMesh()[patchi].size();
vector sum1 = vector::zero;
vector sum2 = vector::zero;
for (label i=0; i<nFaces/2; i++)
{
sum1 += mesh.Sf().boundaryField()[patchi][i];
sum2 += mesh.Sf().boundaryField()[patchi][i+nFaces/2];
}
reduce(sum1, sumOp<vector>());
reduce(sum2, sumOp<vector>());
Info<< " - half 1 = " << sum1 << ", " << mag(sum1) << nl
<< " - half 2 = " << sum2 << ", " << mag(sum2) << nl
<< " - total = " << (sum1 + sum2) << ", "
<< mag(sum1 + sum2) << endl;
Info<< " Cyclic patch area magnitude = "
<< gSum(mesh.magSf().boundaryField()[patchi])/2.0 << endl;
}
else
{
Info<< " Area vector of patch "
<< patchName << '[' << patchi << ']' << " = "
<< gSum(mesh.Sf().boundaryField()[patchi]) << endl;
Info<< " Area magnitude of patch "
<< patchName << '[' << patchi << ']' << " = "
<< gSum(mesh.magSf().boundaryField()[patchi]) << endl;
}
Info<< " Area vector of patch "
<< patchName << '[' << patchi << ']' << " = "
<< gSum(mesh.Sf().boundaryField()[patchi]) << endl;
Info<< " Area magnitude of patch "
<< patchName << '[' << patchi << ']' << " = "
<< gSum(mesh.magSf().boundaryField()[patchi]) << endl;
// Read field and calc integral
if (fieldHeader.headerClassName() == volScalarField::typeName)

View File

@ -0,0 +1,3 @@
foamUpgradeCyclics.C
EXE = $(FOAM_APPBIN)/foamUpgradeCyclics

View File

@ -0,0 +1,281 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Application
foamUpgradeCyclics
Description
Simple tool to upgrade mesh and fields for split cyclics
Usage
- foamUpgradeCyclics [OPTION]
@param -test \n
Suppress writing the updated files with split cyclics
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "Time.H"
#include "IOdictionary.H"
#include "polyMesh.H"
#include "entry.H"
#include "IOPtrList.H"
#include "cyclicPolyPatch.H"
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
defineTemplateTypeNameAndDebug(IOPtrList<entry>, 0);
}
// Main program:
int main(int argc, char *argv[])
{
argList::addBoolOption("test");
#include "addRegionOption.H"
# include "setRootCase.H"
# include "createTime.H"
Foam::word regionName = polyMesh::defaultRegion;
args.optionReadIfPresent("region", regionName);
fileName regionPrefix = "";
if (regionName != polyMesh::defaultRegion)
{
regionPrefix = regionName;
}
// Per cyclic patch the new name for this side and the other side
HashTable<word> thisNames;
HashTable<word> nbrNames;
// Read boundary file without reading mesh
{
IOobject io
(
"boundary",
runTime.findInstance
(
regionPrefix/polyMesh::meshSubDir,
"boundary"
),
polyMesh::meshSubDir,
runTime,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
);
Info<< "Reading boundary from " << io.filePath() << endl;
// Read PtrList of dictionary.
const word oldTypeName = IOPtrList<entry>::typeName;
const_cast<word&>(IOPtrList<entry>::typeName) = word::null;
IOPtrList<entry> patches(io);
const_cast<word&>(IOPtrList<entry>::typeName) = oldTypeName;
// Fake type back to what was in field
const_cast<word&>(patches.type()) = patches.headerClassName();
// Temporary convert to dictionary
dictionary patchDict;
forAll(patches, i)
{
patchDict.add(patches[i].keyword(), patches[i].dict());
}
// Replace any 'cyclic'
label nOldCyclics = 0;
forAll(patches, patchI)
{
const dictionary& patchDict = patches[patchI].dict();
if (word(patchDict["type"]) == cyclicPolyPatch::typeName)
{
if (patchDict.found("neighbourPatch"))
{
Info<< "Patch " << patches[patchI].keyword()
<< " already has 'neighbourPatch' entry; assuming it"
<< " is already converted." << endl;
}
else
{
Info<< "Patch " << patches[patchI].keyword()
<< " does not have 'neighbourPatch' entry; assuming it"
<< " is of the old type." << endl;
nOldCyclics++;
}
}
}
Info<< "Detected " << nOldCyclics << " old cyclics." << nl << endl;
// edo the
PtrList<entry> oldPatches(patches);
Pout<< "oldPatches:" << oldPatches << endl;
// Extend
label nOldPatches = patches.size();
patches.setSize(nOldPatches+nOldCyclics);
// Add new entries
label newPatchI = 0;
forAll(oldPatches, patchI)
{
const dictionary& patchDict = oldPatches[patchI].dict();
if
(
word(patchDict["type"]) == cyclicPolyPatch::typeName
&& !patchDict.found("neighbourPatch")
)
{
const word& name = oldPatches[patchI].keyword();
label nFaces = readLabel(patchDict["nFaces"]);
label startFace = readLabel(patchDict["startFace"]);
word thisName = name + "_half0";
word nbrName = name + "_half1";
thisNames.insert(name, thisName);
nbrNames.insert(name, nbrName);
// Change entry on this side
patches.set(newPatchI, oldPatches(patchI));
dictionary& thisPatchDict = patches[newPatchI].dict();
thisPatchDict.add("neighbourPatch", nbrName);
thisPatchDict.set("nFaces", nFaces/2);
patches[newPatchI].keyword() = thisName;
newPatchI++;
// Add entry on other side
patches.set(newPatchI, oldPatches(patchI));
dictionary& nbrPatchDict = patches[newPatchI].dict();
nbrPatchDict.add("neighbourPatch", nbrName);
nbrPatchDict.set("nFaces", nFaces/2);
nbrPatchDict.set("startFace", startFace+nFaces/2);
patches[newPatchI].keyword() = nbrName;
newPatchI++;
}
else
{
patches.set(newPatchI++, oldPatches(patchI));
}
}
Info<< "boundary:" << patches << endl;
if (returnReduce(nOldCyclics, sumOp<label>()) >= 0)
{
if (args.optionFound("test"))
{
Info<< "-test option: no changes made" << nl << endl;
}
else
{
if (mvBak(patches.objectPath(), "old"))
{
Info<< "Backup to "
<< (patches.objectPath() + ".old") << nl;
}
Info<< "Write to "
<< patches.objectPath() << nl << endl;
patches.write();
}
}
}
// {
// // Read dictionary. (disable class type checking so we can load
// // field)
// Info<< "Loading dictionary " << fieldName << endl;
// const word oldTypeName = IOdictionary::typeName;
// const_cast<word&>(IOdictionary::typeName) = word::null;
//
// IOdictionary fieldDict
// (
// IOobject
// (
// "p",
// instance,
// mesh,
// IOobject::MUST_READ,
// IOobject::NO_WRITE,
// false
// )
// );
// const_cast<word&>(IOdictionary::typeName) = oldTypeName;
// // Fake type back to what was in field
// const_cast<word&>(fieldDict.type()) = fieldDict.headerClassName();
//
// Info<< "Loaded dictionary " << fieldName
// << " with entries " << fieldDict.toc() << endl;
//
// dictionary& boundaryField = fieldDict.subDict("boundaryField");
//
// forAllConstIter(HashTable<word>, thisNames, iter)
// {
// const word& patchName = iter.key();
// const word& newName = iter();
//
// Info<< "Looking for entry for patch " << patchName << endl;
//
// if (boundaryField.found(patchName) && !boundaryField.found(iter()))
// {
// const dictionary& patchDict = boundaryField[patchName];
//
// Field<scalar> fld(patchDict.lookup("value"));
//
//
// }
//
//
// forAllIter(IDLList<entry>, boundaryField, patchIter)
// {
//
// }
return 0;
}
// ************************************************************************* //