Initial set of changes.

This commit is contained in:
mattijs
2009-01-15 18:29:08 +00:00
parent 73426723b0
commit 2dbf42085d
84 changed files with 4747 additions and 2764 deletions

View File

@ -5,4 +5,4 @@ EXE_INC = \
EXE_LIBS = \
-lmeshTools \
-ldynamicMesh
/* -ldynamicMesh */

View File

@ -81,45 +81,21 @@ const curvedEdgeList& blockMesh::edges() const
}
wordList blockMesh::patchNames() const
PtrList<dictionary> blockMesh::patchDicts() const
{
const polyPatchList& patchTopologies = topology().boundaryMesh();
wordList names(patchTopologies.size());
forAll (names, patchI)
PtrList<dictionary> patchDicts(patchTopologies.size());
forAll(patchTopologies, patchI)
{
names[patchI] = patchTopologies[patchI].name();
OStringStream os;
patchTopologies[patchI].write(os);
IStringStream is(os.str());
patchDicts.set(patchI, new dictionary(is));
patchDicts[patchI].set("name", patchTopologies[patchI].name());
}
return names;
}
wordList blockMesh::patchTypes() const
{
const polyPatchList& patchTopologies = topology().boundaryMesh();
wordList types(patchTopologies.size());
forAll (types, patchI)
{
types[patchI] = patchTopologies[patchI].type();
}
return types;
}
wordList blockMesh::patchPhysicalTypes() const
{
const polyPatchList& patchTopologies = topology().boundaryMesh();
wordList physicalTypes(patchTopologies.size());
forAll (physicalTypes, patchI)
{
physicalTypes[patchI] = patchTopologies[patchI].physicalType();
}
return physicalTypes;
return patchDicts;
}

View File

@ -88,6 +88,30 @@ class blockMesh
const faceList& patchShapes
);
bool readPatches
(
const dictionary& meshDescription,
const pointField& tmpBlockPoints,
faceListList& tmpBlocksPatches,
wordList& patchNames,
wordList& patchTypes,
wordList& nbrPatchNames
);
bool readBoundary
(
const dictionary& meshDescription,
const pointField& tmpBlockPoints,
faceListList& tmpBlocksPatches,
PtrList<dictionary>& patchDicts
);
void createCellShapes
(
const pointField& tmpBlockPoints,
PtrList<cellShape>& tmpBlockCells
);
polyMesh* createTopology(IOdictionary&);
void checkBlockMesh(const polyMesh&);
@ -140,11 +164,15 @@ public:
return patches_;
}
wordList patchNames() const;
wordList patchTypes() const;
//- Get patch information from the topology mesh
PtrList<dictionary> patchDicts() const;
wordList patchPhysicalTypes() const;
// wordList patchNames() const;
//
// wordList patchTypes() const;
//
// wordList patchPhysicalTypes() const;
//- Number of blocks with specified zones
label numZonedBlocks() const;

View File

@ -190,23 +190,8 @@ int main(int argc, char *argv[])
Info<< nl << "Creating mesh from block mesh" << endl;
wordList patchNames = blocks.patchNames();
wordList patchTypes = blocks.patchTypes();
word defaultFacesName = "defaultFaces";
word defaultFacesType = emptyPolyPatch::typeName;
wordList patchPhysicalTypes = blocks.patchPhysicalTypes();
preservePatchTypes
(
runTime,
runTime.constant(),
polyMeshDir,
patchNames,
patchTypes,
defaultFacesName,
defaultFacesType,
patchPhysicalTypes
);
polyMesh mesh
(
@ -219,11 +204,9 @@ int main(int argc, char *argv[])
blocks.points(),
blocks.cells(),
blocks.patches(),
patchNames,
patchTypes,
blocks.patchDicts(),
defaultFacesName,
defaultFacesType,
patchPhysicalTypes
defaultFacesType
);

View File

@ -28,6 +28,7 @@ License
#include "Time.H"
#include "preservePatchTypes.H"
#include "emptyPolyPatch.H"
#include "cyclicPolyPatch.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -126,6 +127,248 @@ bool Foam::blockMesh::patchLabelsOK
}
bool Foam::blockMesh::readPatches
(
const dictionary& meshDescription,
const pointField& tmpBlockPoints,
faceListList& tmpBlocksPatches,
wordList& patchNames,
wordList& patchTypes,
wordList& nbrPatchNames
)
{
bool topologyOK = true;
ITstream& patchStream(meshDescription.lookup("patches"));
// read number of patches in mesh
label nPatches = 0;
token firstToken(patchStream);
if (firstToken.isLabel())
{
nPatches = firstToken.labelToken();
tmpBlocksPatches.setSize(nPatches);
patchNames.setSize(nPatches);
patchTypes.setSize(nPatches);
nbrPatchNames.setSize(nPatches);
}
else
{
patchStream.putBack(firstToken);
}
// Read beginning of blocks
patchStream.readBegin("patches");
nPatches = 0;
token lastToken(patchStream);
while
(
!(
lastToken.isPunctuation()
&& lastToken.pToken() == token::END_LIST
)
)
{
if (tmpBlocksPatches.size() <= nPatches)
{
tmpBlocksPatches.setSize(nPatches + 1);
patchNames.setSize(nPatches + 1);
patchTypes.setSize(nPatches + 1);
nbrPatchNames.setSize(nPatches + 1);
}
patchStream.putBack(lastToken);
patchStream
>> patchTypes[nPatches]
>> patchNames[nPatches];
// Read optional neighbour patch name
if (patchTypes[nPatches] == cyclicPolyPatch::typeName)
{
patchStream >> lastToken;
if (lastToken.isWord())
{
nbrPatchNames[nPatches] = lastToken.wordToken();
}
else
{
patchStream.putBack(lastToken);
}
}
// Read patch faces
patchStream >> tmpBlocksPatches[nPatches];
// Catch multiple patches asap.
for (label i = 0; i < nPatches; i++)
{
if (patchNames[nPatches] == patchNames[i])
{
FatalErrorIn
(
"blockMesh::createTopology(IOdictionary&)"
) << "Duplicate patch " << patchNames[nPatches]
<< " at line " << patchStream.lineNumber()
<< ". Exiting !" << nl
<< exit(FatalError);
}
}
topologyOK = topologyOK && patchLabelsOK
(
nPatches,
tmpBlockPoints,
tmpBlocksPatches[nPatches]
);
nPatches++;
// Split old style cyclics
if (patchTypes[nPatches-1] == cyclicPolyPatch::typeName)
{
if (nbrPatchNames[nPatches] == word::null)
{
word halfA = patchNames[nPatches-1] + "_half0";
word halfB = patchNames[nPatches-1] + "_half1";
WarningIn("blockMesh::createTopology(IOdictionary&)")
<< "Old-style cyclic definition."
<< " Splitting patch "
<< patchNames[nPatches-1] << " into two halves "
<< halfA << " and " << halfB << endl
<< " Alternatively use new syntax "
<< " cyclic <name> <neighbourname> <faces>" << endl;
// Add extra patch
if (tmpBlocksPatches.size() <= nPatches)
{
tmpBlocksPatches.setSize(nPatches + 1);
patchNames.setSize(nPatches + 1);
patchTypes.setSize(nPatches + 1);
nbrPatchNames.setSize(nPatches + 1);
}
patchNames[nPatches-1] = halfA;
nbrPatchNames[nPatches-1] = halfB;
patchTypes[nPatches] = patchTypes[nPatches-1];
patchNames[nPatches] = halfB;
nbrPatchNames[nPatches] = halfA;
// Split faces
if ((tmpBlocksPatches[nPatches-1].size() % 2) != 0)
{
FatalErrorIn
(
"blockMesh::createTopology(IOdictionary&)"
) << "Size of cyclic faces is not a multiple of 2 :"
<< tmpBlocksPatches[nPatches-1]
<< exit(FatalError);
}
label sz = tmpBlocksPatches[nPatches-1].size()/2;
faceList unsplitFaces(tmpBlocksPatches[nPatches-1], true);
tmpBlocksPatches[nPatches-1] = faceList
(
SubList<face>(unsplitFaces, sz)
);
tmpBlocksPatches[nPatches] = faceList
(
SubList<face>(unsplitFaces, sz, sz)
);
nPatches++;
}
}
patchStream >> lastToken;
}
patchStream.putBack(lastToken);
// Read end of blocks
patchStream.readEnd("patches");
return topologyOK;
}
bool Foam::blockMesh::readBoundary
(
const dictionary& meshDescription,
const pointField& tmpBlockPoints,
faceListList& tmpBlocksPatches,
PtrList<dictionary>& patchDicts
)
{
bool topologyOK = true;
// Read like boundary file
const PtrList<entry> patchesInfo
(
meshDescription.lookup("boundary")
);
tmpBlocksPatches.setSize(patchesInfo.size());
patchDicts.setSize(patchesInfo.size());
forAll(tmpBlocksPatches, patchI)
{
const entry& patchInfo = patchesInfo[patchI];
// Construct dictionary and add name
patchDicts.set(patchI, new dictionary(patchInfo.dict()));
patchDicts[patchI].set("name", patchInfo.keyword());
// Read block faces
patchDicts[patchI].lookup("faces") >> tmpBlocksPatches[patchI];
topologyOK = topologyOK && patchLabelsOK
(
patchI,
tmpBlockPoints,
tmpBlocksPatches[patchI]
);
}
return topologyOK;
}
void Foam::blockMesh::createCellShapes
(
const pointField& tmpBlockPoints,
PtrList<cellShape>& tmpBlockCells
)
{
const blockMesh& blocks = *this;
tmpBlockCells.setSize(blocks.size());
forAll(blocks, blockLabel)
{
tmpBlockCells.set
(
blockLabel,
new cellShape(blocks[blockLabel].blockDef().blockShape())
);
if (tmpBlockCells[blockLabel].mag(tmpBlockPoints) < 0.0)
{
WarningIn
(
"blockMesh::createTopology(IOdictionary&)"
) << "negative volume block : " << blockLabel
<< ", probably defined inside-out" << endl;
}
}
}
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
Foam::polyMesh* Foam::blockMesh::createTopology(IOdictionary& meshDescription)
@ -296,123 +539,44 @@ Foam::polyMesh* Foam::blockMesh::createTopology(IOdictionary& meshDescription)
}
polyMesh* blockMeshPtr = NULL;
Info<< nl << "Creating patches" << endl;
if (meshDescription.found("patches"))
{
Info<< nl << "Reading patches section" << endl;
faceListList tmpBlocksPatches;
wordList patchNames;
wordList patchTypes;
wordList nbrPatchNames;
{
ITstream& patchStream(meshDescription.lookup("patches"));
// read number of patches in mesh
label nPatches = 0;
token firstToken(patchStream);
if (firstToken.isLabel())
{
nPatches = firstToken.labelToken();
tmpBlocksPatches.setSize(nPatches);
patchNames.setSize(nPatches);
patchTypes.setSize(nPatches);
}
else
{
patchStream.putBack(firstToken);
}
// Read beginning of blocks
patchStream.readBegin("patches");
nPatches = 0;
token lastToken(patchStream);
while
topologyOK = topologyOK && readPatches
(
!(
lastToken.isPunctuation()
&& lastToken.pToken() == token::END_LIST
)
)
{
if (tmpBlocksPatches.size() <= nPatches)
{
tmpBlocksPatches.setSize(nPatches + 1);
patchNames.setSize(nPatches + 1);
patchTypes.setSize(nPatches + 1);
}
patchStream.putBack(lastToken);
patchStream
>> patchTypes[nPatches]
>> patchNames[nPatches]
>> tmpBlocksPatches[nPatches];
// Catch multiple patches asap.
for (label i = 0; i < nPatches; i++)
{
if (patchNames[nPatches] == patchNames[i])
{
FatalErrorIn
(
"blockMesh::createTopology(IOdictionary&)"
) << "Duplicate patch " << patchNames[nPatches]
<< " at line " << patchStream.lineNumber()
<< ". Exiting !" << nl
<< exit(FatalError);
}
}
topologyOK = topologyOK && patchLabelsOK
(
nPatches,
meshDescription,
tmpBlockPoints,
tmpBlocksPatches[nPatches]
tmpBlocksPatches,
patchNames,
patchTypes,
nbrPatchNames
);
nPatches++;
patchStream >> lastToken;
}
patchStream.putBack(lastToken);
// Read end of blocks
patchStream.readEnd("patches");
}
if (!topologyOK)
{
FatalErrorIn("blockMesh::createTopology(IOdictionary&)")
<< "Cannot create mesh due to errors in topology, exiting !" << nl
<< exit(FatalError);
<< "Cannot create mesh due to errors in topology, exiting !"
<< nl << exit(FatalError);
}
Info<< nl << "Creating block mesh topology" << endl;
PtrList<cellShape> tmpBlockCells(blocks.size());
forAll(blocks, blockLabel)
{
tmpBlockCells.set
(
blockLabel,
new cellShape(blocks[blockLabel].blockDef().blockShape())
);
createCellShapes(tmpBlockPoints, tmpBlockCells);
if (tmpBlockCells[blockLabel].mag(tmpBlockPoints) < 0.0)
{
WarningIn
(
"blockMesh::createTopology(IOdictionary&)"
) << "negative volume block : " << blockLabel
<< ", probably defined inside-out" << endl;
}
}
Info<< nl << "Reading physicalType from existing boundary file" << endl;
wordList patchPhysicalTypes(tmpBlocksPatches.size());
@ -428,7 +592,7 @@ Foam::polyMesh* Foam::blockMesh::createTopology(IOdictionary& meshDescription)
patchPhysicalTypes
);
polyMesh* blockMeshPtr = new polyMesh
blockMeshPtr = new polyMesh
(
IOobject
(
@ -448,6 +612,54 @@ Foam::polyMesh* Foam::blockMesh::createTopology(IOdictionary& meshDescription)
defaultPatchType,
patchPhysicalTypes
);
}
else if (meshDescription.found("boundary"))
{
faceListList tmpBlocksPatches;
PtrList<dictionary> patchDicts;
topologyOK = topologyOK && readBoundary
(
meshDescription,
tmpBlockPoints,
tmpBlocksPatches,
patchDicts
);
if (!topologyOK)
{
FatalErrorIn("blockMesh::createTopology(IOdictionary&)")
<< "Cannot create mesh due to errors in topology, exiting !"
<< nl << exit(FatalError);
}
Info<< nl << "Creating block mesh topology" << endl;
PtrList<cellShape> tmpBlockCells(blocks.size());
createCellShapes(tmpBlockPoints, tmpBlockCells);
blockMeshPtr = new polyMesh
(
IOobject
(
"blockMesh",
meshDescription.time().constant(),
meshDescription.time(),
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
tmpBlockPoints,
tmpBlockCells,
tmpBlocksPatches,
patchDicts,
defaultPatchName,
defaultPatchType
);
}
checkBlockMesh(*blockMeshPtr);

View File

@ -40,6 +40,63 @@ Description
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void domainDecomposition::append(labelList& lst, const label elem)
{
label sz = lst.size();
lst.setSize(sz+1);
lst[sz] = elem;
}
void domainDecomposition::addInterProcFace
(
const label facei,
const label ownerProc,
const label nbrProc,
List<Map<label> >& nbrToInterPatch,
List<DynamicList<DynamicList<label> > >& interPatchFaces
) const
{
Map<label>::iterator patchIter = nbrToInterPatch[ownerProc].find(nbrProc);
// Introduce turning index only for internal faces (are duplicated).
label ownerIndex = facei+1;
label nbrIndex = -(facei+1);
if (patchIter != nbrToInterPatch[ownerProc].end())
{
// Existing interproc patch. Add to both sides.
label toNbrProcPatchI = patchIter();
interPatchFaces[ownerProc][toNbrProcPatchI].append(ownerIndex);
if (isInternalFace(facei))
{
label toOwnerProcPatchI = nbrToInterPatch[nbrProc][ownerProc];
interPatchFaces[nbrProc][toOwnerProcPatchI].append(nbrIndex);
}
}
else
{
// Create new interproc patches.
label toNbrProcPatchI = nbrToInterPatch[ownerProc].size();
nbrToInterPatch[ownerProc].insert(nbrProc, toNbrProcPatchI);
DynamicList<label> oneFace;
oneFace.append(ownerIndex);
interPatchFaces[ownerProc].append(oneFace);
if (isInternalFace(facei))
{
label toOwnerProcPatchI = nbrToInterPatch[nbrProc].size();
nbrToInterPatch[nbrProc].insert(ownerProc, toOwnerProcPatchI);
oneFace.clear();
oneFace.append(nbrIndex);
interPatchFaces[nbrProc].append(oneFace);
}
}
}
void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
{
// Decide which cell goes to which processor
@ -61,31 +118,8 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
Info<< "\nDistributing cells to processors" << endl;
// Memory management
{
List<SLList<label> > procCellList(nProcs_);
forAll (cellToProc_, celli)
{
if (cellToProc_[celli] >= nProcs_)
{
FatalErrorIn("domainDecomposition::decomposeMesh()")
<< "Impossible processor label " << cellToProc_[celli]
<< "for cell " << celli
<< abort(FatalError);
}
else
{
procCellList[cellToProc_[celli]].append(celli);
}
}
// Convert linked lists into normal lists
forAll (procCellList, procI)
{
procCellAddressing_[procI] = procCellList[procI];
}
}
// Cells per processor
procCellAddressing_ = invertOneToMany(nProcs_, cellToProc_);
Info << "\nDistributing faces to processors" << endl;
@ -94,135 +128,18 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
// same processor, the face is an internal face. If they are different,
// it belongs to both processors.
// Memory management
{
List<SLList<label> > procFaceList(nProcs_);
procFaceAddressing_.setSize(nProcs_);
// Internal faces
forAll (neighbour, facei)
{
if (cellToProc_[owner[facei]] == cellToProc_[neighbour[facei]])
{
// Face internal to processor
procFaceList[cellToProc_[owner[facei]]].append(facei);
// Face internal to processor. Notice no turning index.
procFaceAddressing_[cellToProc_[owner[facei]]].append(facei+1);
}
}
// Detect inter-processor boundaries
// Neighbour processor for each subdomain
List<SLList<label> > interProcBoundaries(nProcs_);
// Face labels belonging to each inter-processor boundary
List<SLList<SLList<label> > > interProcBFaces(nProcs_);
List<SLList<label> > procPatchIndex(nProcs_);
forAll (neighbour, facei)
{
if (cellToProc_[owner[facei]] != cellToProc_[neighbour[facei]])
{
// inter - processor patch face found. Go through the list of
// inside boundaries for the owner processor and try to find
// this inter-processor patch.
label ownerProc = cellToProc_[owner[facei]];
label neighbourProc = cellToProc_[neighbour[facei]];
SLList<label>::iterator curInterProcBdrsOwnIter =
interProcBoundaries[ownerProc].begin();
SLList<SLList<label> >::iterator curInterProcBFacesOwnIter =
interProcBFaces[ownerProc].begin();
bool interProcBouFound = false;
// WARNING: Synchronous SLList iterators
for
(
;
curInterProcBdrsOwnIter
!= interProcBoundaries[ownerProc].end()
&& curInterProcBFacesOwnIter
!= interProcBFaces[ownerProc].end();
++curInterProcBdrsOwnIter, ++curInterProcBFacesOwnIter
)
{
if (curInterProcBdrsOwnIter() == neighbourProc)
{
// the inter - processor boundary exists. Add the face
interProcBouFound = true;
curInterProcBFacesOwnIter().append(facei);
SLList<label>::iterator curInterProcBdrsNeiIter =
interProcBoundaries[neighbourProc].begin();
SLList<SLList<label> >::iterator
curInterProcBFacesNeiIter =
interProcBFaces[neighbourProc].begin();
bool neighbourFound = false;
// WARNING: Synchronous SLList iterators
for
(
;
curInterProcBdrsNeiIter !=
interProcBoundaries[neighbourProc].end()
&& curInterProcBFacesNeiIter !=
interProcBFaces[neighbourProc].end();
++curInterProcBdrsNeiIter,
++curInterProcBFacesNeiIter
)
{
if (curInterProcBdrsNeiIter() == ownerProc)
{
// boundary found. Add the face
neighbourFound = true;
curInterProcBFacesNeiIter().append(facei);
}
if (neighbourFound) break;
}
if (interProcBouFound && !neighbourFound)
{
FatalErrorIn("domainDecomposition::decomposeMesh()")
<< "Inconsistency in inter - "
<< "processor boundary lists for processors "
<< ownerProc << " and " << neighbourProc
<< abort(FatalError);
}
}
if (interProcBouFound) break;
}
if (!interProcBouFound)
{
// inter - processor boundaries do not exist and need to
// be created
// set the new addressing information
// owner
interProcBoundaries[ownerProc].append(neighbourProc);
interProcBFaces[ownerProc].append(SLList<label>(facei));
// neighbour
interProcBoundaries[neighbourProc].append(ownerProc);
interProcBFaces[neighbourProc].append(SLList<label>(facei));
}
}
}
// Loop through patches. For cyclic boundaries detect inter-processor
// faces; for all other, add faces to the face list and remember start
// and size of all patches.
// for all processors, set the size of start index and patch size
// lists to the number of patches in the mesh
forAll (procPatchSize_, procI)
@ -238,12 +155,12 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
{
procPatchSize_[procI][patchi] = 0;
procPatchStartIndex_[procI][patchi] =
procFaceList[procI].size();
procFaceAddressing_[procI].size();
}
const label patchStart = patches[patchi].start();
if (typeid(patches[patchi]) != typeid(cyclicPolyPatch))
if (!isA<cyclicPolyPatch>(patches[patchi]))
{
// Normal patch. Add faces to processor where the cell
// next to the face lives
@ -255,8 +172,8 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
{
const label curProc = cellToProc_[patchFaceCells[facei]];
// add the face
procFaceList[curProc].append(patchStart + facei);
// add the face without turning index
procFaceAddressing_[curProc].append(patchStart+facei+1);
// increment the number of faces for this patch
procPatchSize_[curProc][patchi]++;
@ -264,397 +181,304 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
}
else
{
// Cyclic patch special treatment
const polyPatch& cPatch = patches[patchi];
const label cycOffset = cPatch.size()/2;
// Set reference to faceCells for both patches
const labelList::subList firstFaceCells
const cyclicPolyPatch& pp = refCast<const cyclicPolyPatch>
(
cPatch.faceCells(),
cycOffset
patches[patchi]
);
// cyclic: check opposite side on this processor
const unallocLabelList& patchFaceCells = pp.faceCells();
const labelList::subList secondFaceCells
(
cPatch.faceCells(),
cycOffset,
cycOffset
);
const unallocLabelList& nbrPatchFaceCells =
pp.neighbPatch().faceCells();
forAll (firstFaceCells, facei)
forAll (patchFaceCells, facei)
{
if
(
cellToProc_[firstFaceCells[facei]]
!= cellToProc_[secondFaceCells[facei]]
)
const label curProc = cellToProc_[patchFaceCells[facei]];
const label nbrProc = cellToProc_[nbrPatchFaceCells[facei]];
if (curProc == nbrProc)
{
// This face becomes an inter-processor boundary face
// inter - processor patch face found. Go through
// the list of inside boundaries for the owner
// processor and try to find this inter-processor
// patch.
cyclicParallel_ = true;
label ownerProc = cellToProc_[firstFaceCells[facei]];
label neighbourProc =
cellToProc_[secondFaceCells[facei]];
SLList<label>::iterator curInterProcBdrsOwnIter =
interProcBoundaries[ownerProc].begin();
SLList<SLList<label> >::iterator
curInterProcBFacesOwnIter =
interProcBFaces[ownerProc].begin();
bool interProcBouFound = false;
// WARNING: Synchronous SLList iterators
for
(
;
curInterProcBdrsOwnIter !=
interProcBoundaries[ownerProc].end()
&& curInterProcBFacesOwnIter !=
interProcBFaces[ownerProc].end();
++curInterProcBdrsOwnIter,
++curInterProcBFacesOwnIter
)
{
if (curInterProcBdrsOwnIter() == neighbourProc)
{
// the inter - processor boundary exists.
// Add the face
interProcBouFound = true;
curInterProcBFacesOwnIter().append
(patchStart + facei);
SLList<label>::iterator curInterProcBdrsNeiIter
= interProcBoundaries[neighbourProc].begin();
SLList<SLList<label> >::iterator
curInterProcBFacesNeiIter =
interProcBFaces[neighbourProc].begin();
bool neighbourFound = false;
// WARNING: Synchronous SLList iterators
for
(
;
curInterProcBdrsNeiIter
!= interProcBoundaries[neighbourProc].end()
&& curInterProcBFacesNeiIter
!= interProcBFaces[neighbourProc].end();
++curInterProcBdrsNeiIter,
++curInterProcBFacesNeiIter
)
{
if (curInterProcBdrsNeiIter() == ownerProc)
{
// boundary found. Add the face
neighbourFound = true;
curInterProcBFacesNeiIter()
.append
(
patchStart
+ cycOffset
+ facei
);
}
if (neighbourFound) break;
}
if (interProcBouFound && !neighbourFound)
{
FatalErrorIn
(
"domainDecomposition::decomposeMesh()"
) << "Inconsistency in inter-processor "
<< "boundary lists for processors "
<< ownerProc << " and " << neighbourProc
<< " in cyclic boundary matching"
<< abort(FatalError);
}
}
if (interProcBouFound) break;
}
if (!interProcBouFound)
{
// inter - processor boundaries do not exist
// and need to be created
// set the new addressing information
// owner
interProcBoundaries[ownerProc]
.append(neighbourProc);
interProcBFaces[ownerProc]
.append(SLList<label>(patchStart + facei));
// neighbour
interProcBoundaries[neighbourProc]
.append(ownerProc);
interProcBFaces[neighbourProc]
.append
(
SLList<label>
(
patchStart
+ cycOffset
+ facei
)
);
}
}
else
{
// This cyclic face remains on the processor
label ownerProc = cellToProc_[firstFaceCells[facei]];
// add the face
procFaceList[ownerProc].append(patchStart + facei);
// add the face without turning index
procFaceAddressing_[curProc].append(patchStart+facei+1);
// increment the number of faces for this patch
procPatchSize_[ownerProc][patchi]++;
// Note: I cannot add the other side of the cyclic
// boundary here because this would violate the order.
// They will be added in a separate loop below
//
procPatchSize_[curProc][patchi]++;
}
}
}
}
// Ordering in cyclic boundaries is important.
// Add the other half of cyclic faces for cyclic boundaries
// that remain on the processor
forAll (secondFaceCells, facei)
// Done internal bits of the new mesh and the ordinary patches.
// Per processor, from neighbour processor to the interprocessorpatch that
// communicates with that neighbour.
List<Map<label> > procNbrToInterPatch(nProcs_);
// Per processor the faces per interprocessorpatch.
List<DynamicList<DynamicList<label> > > interPatchFaces(nProcs_);
// Processor boundaries from internal faces
forAll (neighbour, facei)
{
if
label ownerProc = cellToProc_[owner[facei]];
label nbrProc = cellToProc_[neighbour[facei]];
if (ownerProc != nbrProc)
{
// inter - processor patch face found.
addInterProcFace
(
cellToProc_[firstFaceCells[facei]]
== cellToProc_[secondFaceCells[facei]]
)
facei,
ownerProc,
nbrProc,
procNbrToInterPatch,
interPatchFaces
);
}
}
// Add the proper processor faces to the sub information. Since faces
// originate from internal faces this is always -1.
List<labelListList> subPatchIDs(nProcs_);
List<labelListList> subPatchStarts(nProcs_);
forAll(interPatchFaces, procI)
{
// This cyclic face remains on the processor
label ownerProc = cellToProc_[firstFaceCells[facei]];
label nInterfaces = interPatchFaces[procI].size();
// add the second face
procFaceList[ownerProc].append
(patchStart + cycOffset + facei);
// increment the number of faces for this patch
procPatchSize_[ownerProc][patchi]++;
}
}
}
subPatchIDs[procI].setSize(nInterfaces, labelList(1, -1));
subPatchStarts[procI].setSize(nInterfaces, labelList(1, 0));
}
// Convert linked lists into normal lists
// Add inter-processor boundaries and remember start indices
forAll (procFaceList, procI)
// Processor boundaries from split cyclics
forAll (patches, patchi)
{
// Get internal and regular boundary processor faces
SLList<label>& curProcFaces = procFaceList[procI];
// Get reference to processor face addressing
labelList& curProcFaceAddressing = procFaceAddressing_[procI];
labelList& curProcNeighbourProcessors =
procNeighbourProcessors_[procI];
labelList& curProcProcessorPatchSize =
procProcessorPatchSize_[procI];
labelList& curProcProcessorPatchStartIndex =
procProcessorPatchStartIndex_[procI];
// calculate the size
label nFacesOnProcessor = curProcFaces.size();
for
if (isA<cyclicPolyPatch>(patches[patchi]))
{
const cyclicPolyPatch& pp = refCast<const cyclicPolyPatch>
(
SLList<SLList<label> >::iterator curInterProcBFacesIter =
interProcBFaces[procI].begin();
curInterProcBFacesIter != interProcBFaces[procI].end();
++curInterProcBFacesIter
)
{
nFacesOnProcessor += curInterProcBFacesIter().size();
}
curProcFaceAddressing.setSize(nFacesOnProcessor);
// Fill in the list. Calculate turning index.
// Turning index will be -1 only for some faces on processor
// boundaries, i.e. the ones where the current processor ID
// is in the cell which is a face neighbour.
// Turning index is stored as the sign of the face addressing list
label nFaces = 0;
// Add internal and boundary faces
// Remember to increment the index by one such that the
// turning index works properly.
for
(
SLList<label>::iterator curProcFacesIter = curProcFaces.begin();
curProcFacesIter != curProcFaces.end();
++curProcFacesIter
)
{
curProcFaceAddressing[nFaces] = curProcFacesIter() + 1;
nFaces++;
}
// Add inter-processor boundary faces. At the beginning of each
// patch, grab the patch start index and size
curProcNeighbourProcessors.setSize
(
interProcBoundaries[procI].size()
patches[patchi]
);
curProcProcessorPatchSize.setSize
// cyclic: check opposite side on this processor
const unallocLabelList& patchFaceCells = pp.faceCells();
const unallocLabelList& nbrPatchFaceCells =
pp.neighbPatch().faceCells();
// Store old sizes. Used to detect which inter-proc patches
// have been added to.
labelListList oldInterfaceSizes(nProcs_);
forAll(oldInterfaceSizes, procI)
{
labelList& curOldSizes = oldInterfaceSizes[procI];
curOldSizes.setSize(interPatchFaces[procI].size());
forAll(curOldSizes, interI)
{
curOldSizes[interI] =
interPatchFaces[procI][interI].size();
}
}
// Add faces with different owner and neighbour processors
forAll (patchFaceCells, facei)
{
const label ownerProc = cellToProc_[patchFaceCells[facei]];
const label nbrProc = cellToProc_[nbrPatchFaceCells[facei]];
if (ownerProc != nbrProc)
{
// inter - processor patch face found.
addInterProcFace
(
interProcBoundaries[procI].size()
pp.start()+facei,
ownerProc,
nbrProc,
procNbrToInterPatch,
interPatchFaces
);
}
}
// 1. Check if any faces added to existing interfaces
forAll(oldInterfaceSizes, procI)
{
const labelList& curOldSizes = oldInterfaceSizes[procI];
forAll(curOldSizes, interI)
{
label oldSz = curOldSizes[interI];
if (interPatchFaces[procI][interI].size() > oldSz)
{
// Added faces to this interface. Add an entry
append(subPatchIDs[procI][interI], patchi);
append(subPatchStarts[procI][interI], oldSz);
}
}
}
// 2. Any new interfaces
forAll(subPatchIDs, procI)
{
label nIntfcs = interPatchFaces[procI].size();
subPatchIDs[procI].setSize(nIntfcs, labelList(1, patchi));
subPatchStarts[procI].setSize(nIntfcs, labelList(1, 0));
}
}
}
// Shrink processor patch face addressing
forAll(interPatchFaces, procI)
{
DynamicList<DynamicList<label> >& curInterPatchFaces =
interPatchFaces[procI];
forAll(curInterPatchFaces, i)
{
curInterPatchFaces[i].shrink();
}
curInterPatchFaces.shrink();
}
// Sort inter-proc patch by neighbour
labelList order;
forAll(procNbrToInterPatch, procI)
{
label nInterfaces = procNbrToInterPatch[procI].size();
procNeighbourProcessors_[procI].setSize(nInterfaces);
procProcessorPatchSize_[procI].setSize(nInterfaces);
procProcessorPatchStartIndex_[procI].setSize(nInterfaces);
procProcessorPatchSubPatchIDs_[procI].setSize(nInterfaces);
procProcessorPatchSubPatchStarts_[procI].setSize(nInterfaces);
Info<< "Processor " << procI << endl;
// Get sorted neighbour processors
const Map<label>& curNbrToInterPatch = procNbrToInterPatch[procI];
labelList nbrs = curNbrToInterPatch.toc();
sortedOrder(nbrs, order);
DynamicList<DynamicList<label> >& curInterPatchFaces =
interPatchFaces[procI];
forAll(order, i)
{
const label nbrProc = nbrs[i];
const label interPatch = curNbrToInterPatch[nbrProc];
procNeighbourProcessors_[procI][i] = nbrProc;
procProcessorPatchSize_[procI][i] = curInterPatchFaces[i].size();
procProcessorPatchStartIndex_[procI][i] =
procFaceAddressing_[procI].size();
// Add size as last element to substarts and transfer
append
(
subPatchStarts[procI][interPatch],
curInterPatchFaces[interPatch].size()
);
procProcessorPatchSubPatchIDs_[procI][i].transfer
(
subPatchIDs[procI][interPatch]
);
procProcessorPatchSubPatchStarts_[procI][i].transfer
(
subPatchStarts[procI][interPatch]
);
curProcProcessorPatchStartIndex.setSize
(
interProcBoundaries[procI].size()
);
Info<< " nbr:" << nbrProc << endl;
Info<< " interpatch:" << interPatch << endl;
Info<< " size:" << procProcessorPatchSize_[procI][i] << endl;
Info<< " start:" << procProcessorPatchStartIndex_[procI][i]
<< endl;
Info<< " subPatches:" << procProcessorPatchSubPatchIDs_[procI][i]
<< endl;
Info<< " subStarts:"
<< procProcessorPatchSubPatchStarts_[procI][i] << endl;
label nProcPatches = 0;
// And add all the face labels for interPatch
DynamicList<label>& interPatchFaces =
curInterPatchFaces[interPatch];
SLList<label>::iterator curInterProcBdrsIter =
interProcBoundaries[procI].begin();
SLList<SLList<label> >::iterator curInterProcBFacesIter =
interProcBFaces[procI].begin();
for
(
;
curInterProcBdrsIter != interProcBoundaries[procI].end()
&& curInterProcBFacesIter != interProcBFaces[procI].end();
++curInterProcBdrsIter, ++curInterProcBFacesIter
)
forAll(interPatchFaces, j)
{
curProcNeighbourProcessors[nProcPatches] =
curInterProcBdrsIter();
procFaceAddressing_[procI].append(interPatchFaces[j]);
}
interPatchFaces.clearStorage();
}
curInterPatchFaces.clearStorage();
procFaceAddressing_[procI].shrink();
}
// Get start index for processor patch
curProcProcessorPatchStartIndex[nProcPatches] = nFaces;
label& curSize =
curProcProcessorPatchSize[nProcPatches];
curSize = 0;
// add faces for this processor boundary
for
(
SLList<label>::iterator curFacesIter =
curInterProcBFacesIter().begin();
curFacesIter != curInterProcBFacesIter().end();
++curFacesIter
)
// Set the patch map. No filterPatches allowed.
forAll(procBoundaryAddressing_, procI)
{
// add the face
label nNormal = procPatchSize_[procI].size();
label nInterProc = procProcessorPatchSize_[procI].size();
// Remember to increment the index by one such that the
// turning index works properly.
if (cellToProc_[owner[curFacesIter()]] == procI)
procBoundaryAddressing_[procI].setSize(nNormal + nInterProc);
for (label patchI = 0; patchI < nNormal; patchI++)
{
curProcFaceAddressing[nFaces] = curFacesIter() + 1;
procBoundaryAddressing_[procI][patchI] = patchI;
}
else
for (label patchI = nNormal; patchI < nNormal+nInterProc; patchI++)
{
// turning face
curProcFaceAddressing[nFaces] = -(curFacesIter() + 1);
}
// increment the size
curSize++;
nFaces++;
}
nProcPatches++;
}
procBoundaryAddressing_[procI][patchI] = -1;
}
}
Info << "\nCalculating processor boundary addressing" << endl;
// For every patch of processor boundary, find the index of the original
// patch. Mis-alignment is caused by the fact that patches with zero size
// are omitted. For processor patches, set index to -1.
// At the same time, filter the procPatchSize_ and procPatchStartIndex_
// lists to exclude zero-size patches
forAll (procPatchSize_, procI)
//XXXXXXX
// Print a bit
forAll(procPatchStartIndex_, procI)
{
// Make a local copy of old lists
const labelList oldPatchSizes = procPatchSize_[procI];
Info<< "Processor:" << procI << endl;
const labelList oldPatchStarts = procPatchStartIndex_[procI];
Info<< " total faces:" << procFaceAddressing_[procI].size() << endl;
labelList& curPatchSizes = procPatchSize_[procI];
const labelList& curProcPatchStartIndex = procPatchStartIndex_[procI];
labelList& curPatchStarts = procPatchStartIndex_[procI];
const labelList& curProcessorPatchSizes =
procProcessorPatchSize_[procI];
labelList& curBoundaryAddressing = procBoundaryAddressing_[procI];
curBoundaryAddressing.setSize
(
oldPatchSizes.size()
+ curProcessorPatchSizes.size()
);
label nPatches = 0;
forAll (oldPatchSizes, patchi)
forAll(curProcPatchStartIndex, patchI)
{
if (!filterEmptyPatches || oldPatchSizes[patchi] > 0)
Info<< " patch:" << patchI
<< "\tstart:" << curProcPatchStartIndex[patchI]
<< "\tsize:" << procPatchSize_[procI][patchI]
<< endl;
}
}
Info<< endl;
forAll(procBoundaryAddressing_, procI)
{
curBoundaryAddressing[nPatches] = patchi;
curPatchSizes[nPatches] = oldPatchSizes[patchi];
curPatchStarts[nPatches] = oldPatchStarts[patchi];
nPatches++;
}
Info<< "Processor:" << procI << endl;
Info<< " patchMap:" << procBoundaryAddressing_[procI] << endl;
}
Info<< endl;
// reset to the size of live patches
curPatchSizes.setSize(nPatches);
curPatchStarts.setSize(nPatches);
forAll (curProcessorPatchSizes, procPatchI)
forAll(procNeighbourProcessors_, procI)
{
curBoundaryAddressing[nPatches] = -1;
Info<< "Processor " << procI << endl;
nPatches++;
forAll(procNeighbourProcessors_[procI], i)
{
Info<< " nbr:" << procNeighbourProcessors_[procI][i] << endl;
Info<< " size:" << procProcessorPatchSize_[procI][i] << endl;
Info<< " start:" << procProcessorPatchStartIndex_[procI][i]
<< endl;
}
}
Info<< endl;
// forAll(procFaceAddressing_, procI)
// {
// Info<< "Processor:" << procI << endl;
//
// Info<< " faces:" << procFaceAddressing_[procI] << endl;
// }
curBoundaryAddressing.setSize(nPatches);
}
Info << "\nDistributing points to processors" << endl;
// For every processor, loop through the list of faces for the processor.
@ -701,77 +525,4 @@ void domainDecomposition::decomposeMesh(const bool filterEmptyPatches)
// Reset the size of used points
procPointLabels.setSize(nUsedPoints);
}
// Gather data about globally shared points
// Memory management
{
labelList pointsUsage(nPoints(), 0);
// Globally shared points are the ones used by more than 2 processors
// Size the list approximately and gather the points
labelHashSet gSharedPoints
(
min(100, nPoints()/1000)
);
// Loop through all the processors and mark up points used by
// processor boundaries. When a point is used twice, it is a
// globally shared point
for (label procI = 0; procI < nProcs_; procI++)
{
// Get list of face labels
const labelList& curFaceLabels = procFaceAddressing_[procI];
// Get start of processor faces
const labelList& curProcessorPatchStarts =
procProcessorPatchStartIndex_[procI];
const labelList& curProcessorPatchSizes =
procProcessorPatchSize_[procI];
// Reset the lookup list
pointsUsage = 0;
forAll (curProcessorPatchStarts, patchi)
{
const label curStart = curProcessorPatchStarts[patchi];
const label curEnd = curStart + curProcessorPatchSizes[patchi];
for
(
label facei = curStart;
facei < curEnd;
facei++
)
{
// Mark the original face as used
// Remember to decrement the index by one (turning index)
//
const label curF = mag(curFaceLabels[facei]) - 1;
const face& f = fcs[curF];
forAll (f, pointi)
{
if (pointsUsage[f[pointi]] == 0)
{
// Point not previously used
pointsUsage[f[pointi]] = patchi + 1;
}
else if (pointsUsage[f[pointi]] != patchi + 1)
{
// Point used by some other patch = global point!
gSharedPoints.insert(f[pointi]);
}
}
}
}
}
// Grab the result from the hash list
globallySharedPoints_ = gSharedPoints.toc();
sort(globallySharedPoints_);
}
}

View File

@ -63,7 +63,7 @@ Usage
#include "OSspecific.H"
#include "fvCFD.H"
#include "IOobjectList.H"
#include "processorFvPatchFields.H"
//#include "processorFvPatchFields.H"
#include "domainDecomposition.H"
#include "labelIOField.H"
#include "scalarIOField.H"

View File

@ -27,7 +27,6 @@ License
#include "domainDecomposition.H"
#include "decompositionMethod.H"
#include "cpuTime.H"
#include "cyclicPolyPatch.H"
#include "cellSet.H"
#include "regionSplit.H"

View File

@ -91,8 +91,8 @@ domainDecomposition::domainDecomposition(const IOobject& io)
procNeighbourProcessors_(nProcs_),
procProcessorPatchSize_(nProcs_),
procProcessorPatchStartIndex_(nProcs_),
globallySharedPoints_(0),
cyclicParallel_(false)
procProcessorPatchSubPatchIDs_(nProcs_),
procProcessorPatchSubPatchStarts_(nProcs_)
{
if (decompositionDict_.found("distributed"))
{
@ -114,15 +114,6 @@ bool domainDecomposition::writeDecomposition()
{
Info<< "\nConstructing processor meshes" << endl;
// Make a lookup map for globally shared points
Map<label> sharedPointLookup(2*globallySharedPoints_.size());
forAll (globallySharedPoints_, pointi)
{
sharedPointLookup.insert(globallySharedPoints_[pointi], pointi);
}
// Mark point/faces/cells that are in zones.
// -1 : not in zone
// -2 : in multiple zones
@ -293,6 +284,12 @@ bool domainDecomposition::writeDecomposition()
const labelList& curProcessorPatchStarts =
procProcessorPatchStartIndex_[procI];
const labelListList& curSubPatchIDs =
procProcessorPatchSubPatchIDs_[procI];
const labelListList& curSubStarts =
procProcessorPatchSubPatchStarts_[procI];
const polyPatchList& meshPatches = boundaryMesh();
List<polyPatch*> procPatches
@ -331,12 +328,24 @@ bool domainDecomposition::writeDecomposition()
nPatches,
procMesh.boundaryMesh(),
procI,
curNeighbourProcessors[procPatchI]
curNeighbourProcessors[procPatchI],
curSubPatchIDs[procPatchI],
curSubStarts[procPatchI]
);
nPatches++;
}
forAll(procPatches, patchI)
{
Pout<< " " << patchI
<< '\t' << "name:" << procPatches[patchI]->name()
<< '\t' << "type:" << procPatches[patchI]->type()
<< '\t' << "size:" << procPatches[patchI]->size()
<< endl;
}
// Add boundary patches
procMesh.addPatches(procPatches);

View File

@ -30,6 +30,7 @@ Description
SourceFiles
domainDecomposition.C
decomposeMesh.C
\*---------------------------------------------------------------------------*/
@ -78,9 +79,9 @@ class domainDecomposition
// index is negative, the processor face is the reverse of the
// original face. In order to do this properly, all face
// indices will be incremented by 1 and the decremented as
// necessary t avoid the problem of face number zero having no
// necessary to avoid the problem of face number zero having no
// sign.
labelListList procFaceAddressing_;
List<DynamicList<label> > procFaceAddressing_;
//- Labels of cells for each processor
labelListList procCellAddressing_;
@ -96,21 +97,23 @@ class domainDecomposition
// Excludes inter-processor boundaries
labelListList procPatchStartIndex_;
// Per inter-processor patch information
//- Neighbour processor ID for inter-processor boundaries
labelListList procNeighbourProcessors_;
//- Sizes for inter-processor patches
labelListList procProcessorPatchSize_;
//- Start indices for inter-processor patches
//- Start indices (in procFaceAddressing_) for inter-processor patches
labelListList procProcessorPatchStartIndex_;
//- List of globally shared point labels
labelList globallySharedPoints_;
//- Are there cyclic-parallel faces
bool cyclicParallel_;
//- Sub patch IDs for inter-processor patches
List<labelListList> procProcessorPatchSubPatchIDs_;
//- Sub patch sizes for inter-processor patches
List<labelListList> procProcessorPatchSubPatchStarts_;
// Private Member Functions
@ -124,6 +127,21 @@ class domainDecomposition
labelList& elementToZone
);
//- Append single element to list
static void append(labelList&, const label);
//- Add face to interProcessor patch.
void addInterProcFace
(
const label facei,
const label ownerProc,
const label nbrProc,
List<Map<label> >&,
List<DynamicList<DynamicList<label> > >&
) const;
public:
// Constructors

View File

@ -18,16 +18,16 @@ wmake libso finiteVolume
( cd decompositionAgglomeration && ./Allwmake )
wmake libso sampling
#wmake libso sampling
wmake libso dynamicMesh
wmake libso dynamicFvMesh
wmake libso topoChangerFvMesh
wmake libso fvMotionSolver
wmake libso engine
wmake libso ODE
wmake libso randomProcesses
#wmake libso dynamicFvMesh
#wmake libso topoChangerFvMesh
#wmake libso fvMotionSolver
#wmake libso engine
#
#wmake libso ODE
#wmake libso randomProcesses
( cd thermophysicalModels && ./Allwmake )
( cd transportModels && ./Allwmake )
@ -36,7 +36,7 @@ wmake libso randomProcesses
( cd postProcessing && ./Allwmake )
( cd conversion && ./Allwmake )
wmake libso autoMesh
wmake libso errorEstimation
#wmake libso autoMesh
#wmake libso errorEstimation
# ----------------------------------------------------------------- end-of-file

View File

@ -215,7 +215,9 @@ $(lduMatrix)/preconditioners/diagonalPreconditioner/diagonalPreconditioner.C
$(lduMatrix)/preconditioners/DICPreconditioner/DICPreconditioner.C
$(lduMatrix)/preconditioners/FDICPreconditioner/FDICPreconditioner.C
$(lduMatrix)/preconditioners/DILUPreconditioner/DILUPreconditioner.C
/*
$(lduMatrix)/preconditioners/GAMGPreconditioner/GAMGPreconditioner.C
*/
lduAddressing = $(lduMatrix)/lduAddressing
$(lduAddressing)/lduAddressing.C
@ -228,6 +230,7 @@ $(lduInterfaceFields)/lduInterfaceField/lduInterfaceField.C
$(lduInterfaceFields)/processorLduInterfaceField/processorLduInterfaceField.C
$(lduInterfaceFields)/cyclicLduInterfaceField/cyclicLduInterfaceField.C
/*
GAMG = $(lduMatrix)/solvers/GAMG
$(GAMG)/GAMGSolver.C
$(GAMG)/GAMGSolverAgglomerateMatrix.C
@ -259,6 +262,7 @@ $(pairGAMGAgglomeration)/pairGAMGAgglomerationCombineLevels.C
algebraicPairGAMGAgglomeration = $(GAMGAgglomerations)/algebraicPairGAMGAgglomeration
$(algebraicPairGAMGAgglomeration)/algebraicPairGAMGAgglomeration.C
*/
meshes/lduMesh/lduMesh.C
@ -463,6 +467,7 @@ $(Fields)/diagTensorField/diagTensorIOField.C
$(Fields)/symmTensorField/symmTensorIOField.C
$(Fields)/tensorField/tensorIOField.C
$(Fields)/transformField/transformField.C
pointPatchFields = fields/pointPatchFields
$(pointPatchFields)/pointPatchField/pointPatchFields.C

View File

@ -442,26 +442,14 @@ void Foam::FaceCellWave<Type>::enterDomain
template <class Type>
void Foam::FaceCellWave<Type>::transform
(
const tensorField& rotTensor,
const tensor& rotTensor,
const label nFaces,
List<Type>& faceInfo
)
{
if (rotTensor.size() == 1)
{
const tensor& T = rotTensor[0];
for(label faceI = 0; faceI < nFaces; faceI++)
{
faceInfo[faceI].transform(mesh_, T);
}
}
else
{
for(label faceI = 0; faceI < nFaces; faceI++)
{
faceInfo[faceI].transform(mesh_, rotTensor[faceI]);
}
faceInfo[faceI].transform(mesh_, rotTensor);
}
}

View File

@ -254,7 +254,7 @@ class FaceCellWave
//- Apply transformation to Type
void transform
(
const tensorField& rotTensor,
const tensor& rotTensor,
const label nFaces,
List<Type>& faceInfo
);

View File

@ -28,7 +28,11 @@ License
// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * //
Foam::labelList Foam::invert(const label len, const labelList& map)
Foam::labelList Foam::invert
(
const label len,
const UList<label>& map
)
{
labelList inverse(len, -1);
@ -40,8 +44,8 @@ Foam::labelList Foam::invert(const label len, const labelList& map)
{
if (inverse[newPos] >= 0)
{
FatalErrorIn("invert(const label, const labelList&)")
<< "Map is not one to one. At index " << i
FatalErrorIn("invert(const label, const UList<label>&)")
<< "Map is not one-to-one. At index " << i
<< " element " << newPos << " has already occurred before"
<< nl << "Please use invertOneToMany instead"
<< abort(FatalError);
@ -54,7 +58,11 @@ Foam::labelList Foam::invert(const label len, const labelList& map)
}
Foam::labelListList Foam::invertOneToMany(const label len, const labelList& map)
Foam::labelListList Foam::invertOneToMany
(
const label len,
const UList<label>& map
)
{
labelList nElems(len, 0);

View File

@ -44,56 +44,69 @@ SourceFiles
namespace Foam
{
//- Renumber the values (not the indices) of a list. List elements <= 0 are
// left as is.
template<class List>
List renumber(const labelList& oldToNew, const List&);
//- Renumber the values (not the indices) of a list.
// Negative ListType elements are left as is.
template<class ListType>
ListType renumber(const UList<label>& oldToNew, const ListType&);
//- Inplace renumber the values of a list. List elements <= 0 are
// left as is.
template<class List>
void inplaceRenumber(const labelList& oldToNew, List&);
//- Inplace renumber the values of a list.
// Negative ListType elements are left as is.
template<class ListType>
void inplaceRenumber(const UList<label>& oldToNew, ListType&);
//- Reorder the elements (indices, not values) of a list.
// List elements <= 0 are left as is.
template<class List>
List reorder(const labelList& oldToNew, const List&);
// Negative ListType elements are left as is.
template<class ListType>
ListType reorder(const UList<label>& oldToNew, const ListType&);
//- Inplace reorder the elements of a list.
// List elements <= 0 are left as is.
template<class List>
void inplaceReorder(const labelList& oldToNew, List&);
// Negative ListType elements are left as is.
template<class ListType>
void inplaceReorder(const UList<label>& oldToNew, ListType&);
// Variants to work with iterators and sparse tables. Need to have iterators
// and insert()
// Variants to work with iterators and sparse tables.
// Need to have iterators and insert()
//- Map values. Do not map negative values.
template<class Container>
void inplaceMapValue(const labelList& oldToNew, Container&);
void inplaceMapValue(const UList<label>& oldToNew, Container&);
//- Recreate with mapped keys. Remove elements with negative key.
template<class Container>
void inplaceMapKey(const labelList& oldToNew, Container&);
void inplaceMapKey(const UList<label>& oldToNew, Container&);
//- Extract elements of List whose region is certain value. Use e.g.
// to extract all selected elements:
//- Generate the (stable) sort order for the list
template<class T>
void sortedOrder(const UList<T>&, labelList& order);
//- Generate (sorted) indices corresponding to duplicate list values
template<class T>
void duplicateOrder(const UList<T>&, labelList& order);
//- Generate (sorted) indices corresponding to unique list values
template<class T>
void uniqueOrder(const UList<T>&, labelList& order);
//- Extract elements of List whose region is certain value.
// Use e.g. to extract all selected elements:
// subset<boolList, labelList>(selectedElems, true, lst);
template<class T, class List>
List subset(const UList<T>& regions, const T& region, const List&);
template<class T, class ListType>
ListType subset(const UList<T>& regions, const T& region, const ListType&);
//- Inplace extract elements of List whose region is certain value. Use e.g.
// to extract all selected elements:
// inplaceSubset<boolList, labelList>(selectedElems, true, lst);
template<class T, class List>
void inplaceSubset(const UList<T>& regions, const T& region, List&);
template<class T, class ListType>
void inplaceSubset(const UList<T>& regions, const T& region, ListType&);
//- Invert one-to-one map. Unmapped elements will be -1.
labelList invert(const label len, const labelList& oldToNew);
labelList invert(const label len, const UList<label>&);
//- Invert one-to-many map. Unmapped elements will be size 0.
labelListList invertOneToMany(const label len, const labelList&);
labelListList invertOneToMany(const label len, const UList<label>&);
//- Invert many-to-many. Input and output types need to be inherited
// from List. E.g. faces to pointFaces.
@ -113,72 +126,72 @@ labelList identity(const label len);
//- Find first occurence of given element and return index,
// return -1 if not found. Linear search.
template<class List>
template<class ListType>
label findIndex
(
const List&,
typename List::const_reference,
const ListType&,
typename ListType::const_reference,
const label start=0
);
//- Find all occurences of given element. Linear search.
template<class List>
template<class ListType>
labelList findIndices
(
const List&,
typename List::const_reference,
const ListType&,
typename ListType::const_reference,
const label start=0
);
//- Opposite of findIndices: set values at indices to given value
template<class List>
template<class ListType>
void setValues
(
List&,
const labelList& indices,
typename List::const_reference
ListType&,
const UList<label>& indices,
typename ListType::const_reference
);
//- Opposite of findIndices: set values at indices to given value
template<class List>
List createWithValues
template<class ListType>
ListType createWithValues
(
const label sz,
const typename List::const_reference initValue,
const labelList& indices,
typename List::const_reference setValue
const typename ListType::const_reference initValue,
const UList<label>& indices,
typename ListType::const_reference setValue
);
//- Find index of max element (and larger than given element).
// return -1 if not found. Linear search.
template<class List>
label findMax(const List&, const label start=0);
template<class ListType>
label findMax(const ListType&, const label start=0);
//- Find index of min element (and less than given element).
// return -1 if not found. Linear search.
template<class List>
label findMin(const List&, const label start=0);
template<class ListType>
label findMin(const ListType&, const label start=0);
//- Find first occurence of given element in sorted list and return index,
// return -1 if not found. Binary search.
template<class List>
template<class ListType>
label findSortedIndex
(
const List&,
typename List::const_reference,
const ListType&,
typename ListType::const_reference,
const label start=0
);
//- Find last element < given value in sorted list and return index,
// return -1 if not found. Binary search.
template<class List>
template<class ListType>
label findLower
(
const List&,
typename List::const_reference,
const ListType&,
typename ListType::const_reference,
const label start=0
);

View File

@ -28,15 +28,15 @@ License
// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * //
template<class List>
List Foam::renumber
template<class ListType>
ListType Foam::renumber
(
const labelList& oldToNew,
const List& lst
const UList<label>& oldToNew,
const ListType& lst
)
{
// Create copy
List newLst(lst.size());
ListType newLst(lst.size());
forAll(lst, elemI)
{
@ -50,11 +50,11 @@ List Foam::renumber
}
template<class List>
template<class ListType>
void Foam::inplaceRenumber
(
const labelList& oldToNew,
List& lst
const UList<label>& oldToNew,
ListType& lst
)
{
forAll(lst, elemI)
@ -67,15 +67,15 @@ void Foam::inplaceRenumber
}
template<class List>
List Foam::reorder
template<class ListType>
ListType Foam::reorder
(
const labelList& oldToNew,
const List& lst
const UList<label>& oldToNew,
const ListType& lst
)
{
// Create copy
List newLst(lst.size());
ListType newLst(lst.size());
forAll(lst, elemI)
{
@ -92,15 +92,15 @@ List Foam::reorder
}
template<class List>
template<class ListType>
void Foam::inplaceReorder
(
const labelList& oldToNew,
List& lst
const UList<label>& oldToNew,
ListType& lst
)
{
// Create copy
List newLst(lst.size());
ListType newLst(lst.size());
forAll(lst, elemI)
{
@ -121,7 +121,7 @@ void Foam::inplaceReorder
template<class Container>
void Foam::inplaceMapValue
(
const labelList& oldToNew,
const UList<label>& oldToNew,
Container& lst
)
{
@ -143,7 +143,7 @@ void Foam::inplaceMapValue
template<class Container>
void Foam::inplaceMapKey
(
const labelList& oldToNew,
const UList<label>& oldToNew,
Container& lst
)
{
@ -166,18 +166,90 @@ void Foam::inplaceMapKey
}
template<class T, class List>
List Foam::subset(const UList<T>& regions, const T& region, const List& lst)
template<class T>
void Foam::sortedOrder
(
const UList<T>& lst,
labelList& order
)
{
order.setSize(lst.size());
forAll(order, elemI)
{
order[elemI] = elemI;
}
Foam::stableSort(order, typename UList<T>::less(lst));
}
template<class T>
void Foam::duplicateOrder
(
const UList<T>& lst,
labelList& order
)
{
if (lst.size() < 2)
{
order.clear();
return;
}
sortedOrder(lst, order);
label n = 0;
for (label i = 0; i < order.size() - 1; ++i)
{
if (lst[order[i]] == lst[order[i+1]])
{
order[n++] = order[i];
}
}
order.setSize(n);
}
template<class T>
void Foam::uniqueOrder
(
const UList<T>& lst,
labelList& order
)
{
sortedOrder(lst, order);
if (order.size() > 1)
{
label n = 0;
for (label i = 0; i < order.size() - 1; ++i)
{
if (lst[order[i]] != lst[order[i+1]])
{
order[n++] = order[i];
}
}
order.setSize(n);
}
}
template<class T, class ListType>
ListType Foam::subset
(
const UList<T>& regions,
const T& region,
const ListType& lst
)
{
if (regions.size() < lst.size())
{
FatalErrorIn("subset(const UList<T>&, const T&, const List&)")
FatalErrorIn("subset(const UList<T>&, const T&, const ListType&)")
<< "Regions is of size " << regions.size()
<< "; list it is supposed to index is of size " << lst.size()
<< abort(FatalError);
}
List newLst(lst.size());
ListType newLst(lst.size());
label nElem = 0;
forAll(lst, elemI)
@ -193,12 +265,17 @@ List Foam::subset(const UList<T>& regions, const T& region, const List& lst)
}
template<class T, class List>
void Foam::inplaceSubset(const UList<T>& regions, const T& region, List& lst)
template<class T, class ListType>
void Foam::inplaceSubset
(
const UList<T>& regions,
const T& region,
ListType& lst
)
{
if (regions.size() < lst.size())
{
FatalErrorIn("inplaceSubset(const UList<T>&, const T&, List&)")
FatalErrorIn("inplaceSubset(const UList<T>&, const T&, ListType&)")
<< "Regions is of size " << regions.size()
<< "; list it is supposed to index is of size " << lst.size()
<< abort(FatalError);
@ -221,8 +298,8 @@ void Foam::inplaceSubset(const UList<T>& regions, const T& region, List& lst)
}
// As clarification coded as inversion from pointEdges to edges but completely
// general.
// As clarification:
// coded as inversion from pointEdges to edges but completely general.
template<class InList, class OutList>
void Foam::invertManyToMany
(
@ -268,11 +345,11 @@ void Foam::invertManyToMany
}
template<class List>
template<class ListType>
Foam::label Foam::findIndex
(
const List& l,
typename List::const_reference t,
const ListType& l,
typename ListType::const_reference t,
const label start
)
{
@ -291,11 +368,11 @@ Foam::label Foam::findIndex
}
template<class List>
template<class ListType>
Foam::labelList Foam::findIndices
(
const List& l,
typename List::const_reference t,
const ListType& l,
typename ListType::const_reference t,
const label start
)
{
@ -326,12 +403,12 @@ Foam::labelList Foam::findIndices
}
template<class List>
template<class ListType>
void Foam::setValues
(
List& l,
const labelList& indices,
typename List::const_reference t
ListType& l,
const UList<label>& indices,
typename ListType::const_reference t
)
{
forAll(indices, i)
@ -341,23 +418,23 @@ void Foam::setValues
}
template<class List>
List Foam::createWithValues
template<class ListType>
ListType Foam::createWithValues
(
const label sz,
const typename List::const_reference initValue,
const labelList& indices,
typename List::const_reference setValue
const typename ListType::const_reference initValue,
const UList<label>& indices,
typename ListType::const_reference setValue
)
{
List l(sz, initValue);
ListType l(sz, initValue);
setValues(l, indices, setValue);
return l;
}
template<class List>
Foam::label Foam::findMax(const List& l, const label start)
template<class ListType>
Foam::label Foam::findMax(const ListType& l, const label start)
{
if (start >= l.size())
{
@ -378,8 +455,8 @@ Foam::label Foam::findMax(const List& l, const label start)
}
template<class List>
Foam::label Foam::findMin(const List& l, const label start)
template<class ListType>
Foam::label Foam::findMin(const ListType& l, const label start)
{
if (start >= l.size())
{
@ -400,11 +477,11 @@ Foam::label Foam::findMin(const List& l, const label start)
}
template<class List>
template<class ListType>
Foam::label Foam::findSortedIndex
(
const List& l,
typename List::const_reference t,
const ListType& l,
typename ListType::const_reference t,
const label start
)
{
@ -438,11 +515,11 @@ Foam::label Foam::findSortedIndex
}
template<class List>
template<class ListType>
Foam::label Foam::findLower
(
const List& l,
typename List::const_reference t,
const ListType& l,
typename ListType::const_reference t,
const label start
)
{
@ -489,31 +566,31 @@ Foam::label Foam::findLower
template<class Container, class T, int nRows>
Foam::List<Container> Foam::initList(const T elems[nRows])
{
List<Container> faces(nRows);
List<Container> lst(nRows);
forAll(faces, faceI)
forAll(lst, rowI)
{
faces[faceI] = Container(elems[faceI]);
lst[rowI] = Container(elems[rowI]);
}
return faces;
return lst;
}
template<class Container, class T, int nRows, int nColumns>
Foam::List<Container> Foam::initListList(const T elems[nRows][nColumns])
{
List<Container> faces(nRows);
List<Container> lst(nRows);
Container f(nColumns);
forAll(faces, faceI)
Container cols(nColumns);
forAll(lst, rowI)
{
forAll(f, i)
forAll(cols, colI)
{
f[i] = elems[faceI][i];
cols[colI] = elems[rowI][colI];
}
faces[faceI] = f;
lst[rowI] = cols;
}
return faces;
return lst;
}

View File

@ -45,7 +45,7 @@ void Foam::UList<T>::assign(const UList<T>& a)
{
if (a.size_ != this->size_)
{
FatalErrorIn("UList<T>::operator=(const UList<T>&)")
FatalErrorIn("UList<T>::assign(const UList<T>&)")
<< "ULists have different sizes: "
<< this->size_ << " " << a.size_
<< abort(FatalError);

View File

@ -92,6 +92,26 @@ public:
//- Declare friendship with the SubList class
friend class SubList<T>;
// Public classes
//- Less function class that can be used for sorting
class less
{
const UList<T>& values_;
public:
less(const UList<T>& values)
:
values_(values)
{}
bool operator()(const label a, const label b)
{
return values_[a] < values_[b];
}
};
// Constructors

View File

@ -60,7 +60,7 @@ void Foam::UList<T>::writeEntry(const word& keyword, Ostream& os) const
template<class T>
Foam::Ostream& Foam:: operator<<(Foam::Ostream& os, const Foam::UList<T>& L)
Foam::Ostream& Foam::operator<<(Foam::Ostream& os, const Foam::UList<T>& L)
{
// Write list contents depending on data format
if (os.format() == IOstream::ASCII || !contiguous<T>())

View File

@ -49,32 +49,13 @@ Foam::List<T> Foam::transform
template <class T>
void Foam::transformList
(
const tensorField& rotTensor,
const tensor& rotTensor,
UList<T>& field
)
{
if (rotTensor.size() == 1)
{
forAll(field, i)
{
field[i] = transform(rotTensor[0], field[i]);
}
}
else if (rotTensor.size() == field.size())
{
forAll(field, i)
{
field[i] = transform(rotTensor[i], field[i]);
}
}
else
{
FatalErrorIn
(
"transformList(const tensorField&, UList<T>&)"
) << "Sizes of field and transformation not equal. field:"
<< field.size() << " transformation:" << rotTensor.size()
<< abort(FatalError);
field[i] = transform(rotTensor, field[i]);
}
}
@ -82,25 +63,13 @@ void Foam::transformList
template <class T>
void Foam::transformList
(
const tensorField& rotTensor,
const tensor& rotTensor,
Map<T>& field
)
{
if (rotTensor.size() == 1)
{
forAllIter(typename Map<T>, field, iter)
{
iter() = transform(rotTensor[0], iter());
}
}
else
{
FatalErrorIn
(
"transformList(const tensorField&, Map<T>&)"
) << "Multiple transformation tensors not supported. field:"
<< field.size() << " transformation:" << rotTensor.size()
<< abort(FatalError);
iter() = transform(rotTensor, iter());
}
}
@ -108,25 +77,13 @@ void Foam::transformList
template <class T>
void Foam::transformList
(
const tensorField& rotTensor,
const tensor& rotTensor,
EdgeMap<T>& field
)
{
if (rotTensor.size() == 1)
{
forAllIter(typename EdgeMap<T>, field, iter)
{
iter() = transform(rotTensor[0], iter());
}
}
else
{
FatalErrorIn
(
"transformList(const tensorField&, EdgeMap<T>&)"
) << "Multiple transformation tensors not supported. field:"
<< field.size() << " transformation:" << rotTensor.size()
<< abort(FatalError);
iter() = transform(rotTensor, iter());
}
}

View File

@ -40,7 +40,6 @@ SourceFiles
#include "List.H"
#include "Map.H"
#include "EdgeMap.H"
#include "tensorField.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -58,37 +57,47 @@ List<T> transform
const UList<T>& field
);
//- Apply transformation to list. Either single transformation tensor
// or one tensor per element.
//- Apply transformation to list.
template<class T>
void transformList(const tensorField&, UList<T>&);
void transformList(const tensor&, UList<T>&);
template<class T>
void transformList(const tensorField&, Map<T>&);
void transformList(const tensor&, Map<T>&);
template<class T>
void transformList(const tensorField&, EdgeMap<T>&);
void transformList(const tensor&, EdgeMap<T>&);
template<>
inline void transformList(const tensorField&, UList<label>&)
inline void transformList(const tensor&, UList<bool>&)
{}
template<>
inline void transformList(const tensorField&, Map<label>&)
inline void transformList(const tensor&, Map<bool>&)
{}
template<>
inline void transformList(const tensorField&, EdgeMap<label>&)
inline void transformList(const tensor&, EdgeMap<bool>&)
{}
template<>
inline void transformList(const tensorField&, UList<scalar>&)
inline void transformList(const tensor&, UList<label>&)
{}
template<>
inline void transformList(const tensorField&, Map<scalar>&)
inline void transformList(const tensor&, Map<label>&)
{}
template<>
inline void transformList(const tensorField&, EdgeMap<scalar>&)
inline void transformList(const tensor&, EdgeMap<label>&)
{}
template<>
inline void transformList(const tensor&, UList<scalar>&)
{}
template<>
inline void transformList(const tensor&, Map<scalar>&)
{}
template<>
inline void transformList(const tensor&, EdgeMap<scalar>&)
{}

View File

@ -133,8 +133,8 @@ void cyclicPointPatchField<Type>::swapAdd(Field<Type>& pField) const
forAll(pairs, pairi)
{
Type tmp = pf[pairs[pairi][0]];
pf[pairs[pairi][0]] = transform(forwardT()[0], pf[pairs[pairi][1]]);
pf[pairs[pairi][1]] = transform(reverseT()[0], tmp);
pf[pairs[pairi][0]] = transform(forwardT(), pf[pairs[pairi][1]]);
pf[pairs[pairi][1]] = transform(reverseT(), tmp);
}
}
else

View File

@ -137,13 +137,13 @@ public:
}
//- Return face transformation tensor
virtual const tensorField& forwardT() const
virtual const tensor& forwardT() const
{
return cyclicPatch_.forwardT();
}
//- Return neighbour-cell transformation tensor
virtual const tensorField& reverseT() const
virtual const tensor& reverseT() const
{
return cyclicPatch_.reverseT();
}

View File

@ -134,22 +134,120 @@ void processorPointPatchField<Type>::swapAdd(Field<Type>& pField) const
procPatch_.nonGlobalPatchPoints();
const processorPolyPatch& ppp = procPatch_.procPolyPatch();
const labelListList& pointFaces = ppp.pointFaces();
const tensorField& forwardT = ppp.forwardT();
if (forwardT.size() == 1)
// Mark patch that transformed point:
// -3 : global patch point so handled in different patch
// -2 : nonGlobalPatchPoints, initial value
// -1 : originating from internal face, no transform necessary
// >=0 : originating from coupled patch
labelList hasTransformed(ppp.nPoints(), -3);
forAll(nonGlobalPatchPoints, i)
{
transform(pnf, forwardT[0], pnf);
hasTransformed[nonGlobalPatchPoints[i]] = -2;
}
forAll(ppp.patchIDs(), subI)
{
label patchI = ppp.patchIDs()[subI];
if (patchI == -1)
{
for
(
label faceI = ppp.starts()[subI];
faceI < ppp.starts()[subI+1];
faceI++
)
{
const face& f = ppp.localFaces()[faceI];
forAll(f, fp)
{
label pointI = f[fp];
if (hasTransformed[pointI] == -3)
{
// special point, handled elsewhere
}
else if (hasTransformed[pointI] == -2)
{
// first visit. Just mark.
hasTransformed[pointI] = patchI;
}
else if (hasTransformed[pointI] == patchI)
{
// already done
}
else
{
forAll(nonGlobalPatchPoints, pfi)
{
pnf[pfi] = transform
FatalErrorIn
(
forwardT[pointFaces[nonGlobalPatchPoints[pfi]][0]],
pnf[pfi]
);
"processorPointPatchField<Type>::"
"swapAdd(Field<Type>& pField) const"
) << "Point " << pointI
<< " on patch " << ppp.name()
<< " already transformed by patch "
<< hasTransformed[pointI]
<< abort(FatalError);
}
}
}
}
else if
(
!refCast<const coupledPolyPatch>
(
ppp.boundaryMesh()[patchI]
).parallel()
)
{
const tensor& T = refCast<const coupledPolyPatch>
(
ppp.boundaryMesh()[patchI]
).forwardT();
for
(
label faceI = ppp.starts()[subI];
faceI < ppp.starts()[subI+1];
faceI++
)
{
const face& f = ppp.localFaces()[faceI];
forAll(f, fp)
{
label pointI = f[fp];
if (hasTransformed[pointI] == -3)
{
// special point, handled elsewhere
}
else if (hasTransformed[pointI] == -2)
{
pnf[pointI] = transform(T, pnf[pointI]);
hasTransformed[pointI] = patchI;
}
else if (hasTransformed[pointI] == patchI)
{
// already done
}
else
{
FatalErrorIn
(
"processorPointPatchField<Type>::"
"swapAdd(Field<Type>& pField) const"
) << "Point " << pointI
<< " on patch " << ppp.name()
<< " subPatch " << patchI
<< " already transformed by patch "
<< hasTransformed[pointI]
<< abort(FatalError);
}
}
}
}
}
}

View File

@ -154,8 +154,8 @@ public:
{
return
!(
procPatch_.procPolyPatch().parallel()
|| pTraits<Type>::rank == 0
pTraits<Type>::rank == 0
|| procPatch_.procPolyPatch().parallel()
);
}

View File

@ -34,6 +34,15 @@ namespace Foam
defineTypeNameAndDebug(cyclicLduInterface, 0);
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::cyclicLduInterface::cyclicLduInterface(const label neighbPatchID)
:
neighbPatchID_(neighbPatchID)
{}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::cyclicLduInterface::~cyclicLduInterface()

View File

@ -50,6 +50,12 @@ namespace Foam
class cyclicLduInterface
{
// Private data
//- Coupled patch
const label neighbPatchID_;
// const cyclicLduInterface* neighbPatch_;
public:
@ -58,6 +64,9 @@ public:
// Constructors
//- Construct from components
cyclicLduInterface(const label neighbPatchID);
// Destructor
virtual ~cyclicLduInterface();
@ -67,11 +76,28 @@ public:
// Access
//- Return processor number
label neighbPatchID() const
{
return neighbPatchID_;
}
// //- Return processor number
// const cyclicLduInterface& neighbPatch() const
// {
// if (!neighbPatch_)
// {
// FatalErrorIn("cyclicLduInterface::neighbPatch() const")
// << "Not owner." << abort(FatalError);
// }
// return neighbPatchID_;
// }
//- Return face transformation tensor
virtual const tensorField& forwardT() const = 0;
virtual const tensor& forwardT() const = 0;
//- Return face reverse transformation tensor
virtual const tensorField& reverseT() const = 0;
virtual const tensor& reverseT() const = 0;
};

View File

@ -36,18 +36,6 @@ namespace Foam
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void Foam::processorLduInterface::resizeBuf
(
List<char>& buf,
const label size
) const
{
if (buf.size() < size)
{
buf.setSize(size);
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //

View File

@ -61,9 +61,6 @@ class processorLduInterface
// Only sized and used when compressed or non-blocking comms used.
mutable List<char> receiveBuf_;
//- Resize the buffer if required
void resizeBuf(List<char>& buf, const label size) const;
public:
@ -92,8 +89,17 @@ public:
//- Return neigbour processor number
virtual int neighbProcNo() const = 0;
//- Return face transformation tensor
virtual const tensorField& forwardT() const = 0;
//- Set send buffer to sufficient size to hold List<Type>(nElems).
// Returns reference to buffer (note:buffer.size() is number
// of characters, not nElems)
template<class Type>
List<Type>& setSendBuf(const label nElems) const;
//- Set receive buffer to sufficient size to hold List<Type>(nElems)
// Returns reference to buffer (note:buffer.size() is number
// of characters, not nElems)
template<class Type>
List<Type>& setReceiveBuf(const label nElems) const;
// Transfer functions
@ -146,6 +152,26 @@ public:
const Pstream::commsTypes commsType,
const label size
) const;
//- Raw field send function of internal send buffer with data
// compression.
template<class Type>
void compressedBufferSend
(
const Pstream::commsTypes commsType
) const;
//- Raw field receive function of internal receive buffer with data
// compression.
// Returns reference to buffer (note:buffer.size() is number
// of characters, not size)
template<class Type>
const List<Type>& compressedBufferReceive
(
const Pstream::commsTypes commsType,
const label size
) const;
};

View File

@ -30,6 +30,49 @@ License
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
Foam::List<Type>& Foam::processorLduInterface::setSendBuf(const label nElems)
const
{
if (!contiguous<Type>())
{
FatalErrorIn("processorLduInterface::setSendBuf(const label) const")
<< "Cannot return the binary size of a list of "
"non-primitive elements"
<< abort(FatalError);
}
label nBytes = nElems*sizeof(Type);
sendBuf_.setSize(nBytes);
return reinterpret_cast<List<Type>&>(sendBuf_);
}
template<class Type>
Foam::List<Type>& Foam::processorLduInterface::setReceiveBuf
(
const label nElems
) const
{
if (!contiguous<Type>())
{
FatalErrorIn("processorLduInterface::setReceiveBuf(const label) const")
<< "Cannot return the binary size of a list of "
"non-primitive elements"
<< abort(FatalError);
}
label nBytes = nElems*sizeof(Type);
//receiveBuf_.setSize(nBytes, '\0'); // necessary because of expanding
// compression?
receiveBuf_.setSize(nBytes);
return reinterpret_cast<List<Type>&>(receiveBuf_);
}
template<class Type>
void Foam::processorLduInterface::send
(
@ -49,17 +92,17 @@ void Foam::processorLduInterface::send
}
else if (commsType == Pstream::nonBlocking)
{
resizeBuf(receiveBuf_, f.size()*sizeof(Type));
setReceiveBuf<Type>(f.size());
IPstream::read
(
commsType,
neighbProcNo(),
receiveBuf_.begin(),
receiveBuf_.size()
f.byteSize()
);
resizeBuf(sendBuf_, f.byteSize());
setSendBuf<Type>(f.size());
memcpy(sendBuf_.begin(), f.begin(), f.byteSize());
OPstream::write
@ -139,7 +182,7 @@ void Foam::processorLduInterface::compressedSend
const scalar *sArray = reinterpret_cast<const scalar*>(f.begin());
const scalar *slast = &sArray[nm1];
resizeBuf(sendBuf_, nBytes);
setSendBuf<float>(nFloats);
float *fArray = reinterpret_cast<float*>(sendBuf_.begin());
for (register label i=0; i<nm1; i++)
@ -161,7 +204,7 @@ void Foam::processorLduInterface::compressedSend
}
else if (commsType == Pstream::nonBlocking)
{
resizeBuf(receiveBuf_, nBytes);
setReceiveBuf<float>(nFloats);
IPstream::read
(
@ -192,6 +235,7 @@ void Foam::processorLduInterface::compressedSend
}
}
template<class Type>
void Foam::processorLduInterface::compressedReceive
(
@ -205,18 +249,17 @@ void Foam::processorLduInterface::compressedReceive
label nm1 = (f.size() - 1)*nCmpts;
label nlast = sizeof(Type)/sizeof(float);
label nFloats = nm1 + nlast;
label nBytes = nFloats*sizeof(float);
if (commsType == Pstream::blocking || commsType == Pstream::scheduled)
{
resizeBuf(receiveBuf_, nBytes);
setReceiveBuf<float>(nFloats);
IPstream::read
(
commsType,
neighbProcNo(),
receiveBuf_.begin(),
nBytes
receiveBuf_.size()
);
}
else if (commsType != Pstream::nonBlocking)
@ -243,6 +286,7 @@ void Foam::processorLduInterface::compressedReceive
}
}
template<class Type>
Foam::tmp<Foam::Field<Type> > Foam::processorLduInterface::compressedReceive
(
@ -256,4 +300,155 @@ Foam::tmp<Foam::Field<Type> > Foam::processorLduInterface::compressedReceive
}
template<class Type>
void Foam::processorLduInterface::compressedBufferSend
(
const Pstream::commsTypes commsType
) const
{
// Optionally inline compress sendBuf
if
(
sizeof(scalar) > sizeof(float)
&& sendBuf_.size()
&& Pstream::floatTransfer
)
{
const List<Type>& f = reinterpret_cast<const List<Type>&>(sendBuf_);
label fSize = f.size()/sizeof(Type);
// Inplace compress
static const label nCmpts = sizeof(Type)/sizeof(scalar);
label nm1 = (fSize - 1)*nCmpts;
label nlast = sizeof(Type)/sizeof(float);
label nFloats = nm1 + nlast;
const scalar *sArray = reinterpret_cast<const scalar*>(f.begin());
const scalar *slast = &sArray[nm1];
float *fArray = reinterpret_cast<float*>(sendBuf_.begin());
for (register label i=0; i<nm1; i++)
{
fArray[i] = sArray[i] - slast[i%nCmpts];
}
reinterpret_cast<Type&>(fArray[nm1]) = f[fSize - 1];
// Trim
setSendBuf<float>(nFloats);
}
// Send sendBuf
if (commsType == Pstream::blocking || commsType == Pstream::scheduled)
{
OPstream::write
(
commsType,
neighbProcNo(),
sendBuf_.begin(),
sendBuf_.size()
);
}
else if (commsType == Pstream::nonBlocking)
{
setReceiveBuf<char>(sendBuf_.size());
IPstream::read
(
commsType,
neighbProcNo(),
receiveBuf_.begin(),
receiveBuf_.size()
);
OPstream::write
(
commsType,
neighbProcNo(),
sendBuf_.begin(),
sendBuf_.size()
);
}
else
{
FatalErrorIn("processorLduInterface::compressedBufferSend")
<< "Unsupported communications type " << commsType
<< exit(FatalError);
}
}
template<class Type>
const Foam::List<Type>& Foam::processorLduInterface::compressedBufferReceive
(
const Pstream::commsTypes commsType,
const label size
) const
{
if (sizeof(scalar) > sizeof(float) && size && Pstream::floatTransfer)
{
static const label nCmpts = sizeof(Type)/sizeof(scalar);
label nm1 = (size - 1)*nCmpts;
label nlast = sizeof(Type)/sizeof(float);
label nFloats = nm1 + nlast;
if (commsType == Pstream::blocking || commsType == Pstream::scheduled)
{
setReceiveBuf<float>(nFloats);
IPstream::read
(
commsType,
neighbProcNo(),
receiveBuf_.begin(),
receiveBuf_.size()
);
}
else if (commsType != Pstream::nonBlocking)
{
FatalErrorIn("processorLduInterface::compressedBufferReceive")
<< "Unsupported communications type " << commsType
<< exit(FatalError);
}
// Inline expand
List<Type>& f = setReceiveBuf<Type>(size);
label fSize = f.size()/sizeof(Type);
const float *fArray =
reinterpret_cast<const float*>(receiveBuf_.begin());
f[fSize - 1] = reinterpret_cast<const Type&>(fArray[nm1]);
scalar *sArray = reinterpret_cast<scalar*>(f.begin());
const scalar *slast = &sArray[nm1];
for (register label i=0; i<nm1; i++)
{
sArray[i] = fArray[i] + slast[i%nCmpts];
}
}
else
{
if (commsType == Pstream::blocking || commsType == Pstream::scheduled)
{
setReceiveBuf<Type>(size);
IPstream::read
(
commsType,
neighbProcNo(),
receiveBuf_.begin(),
receiveBuf_.size()
);
}
else if (commsType != Pstream::nonBlocking)
{
FatalErrorIn("processorLduInterface::compressedBufferReceive")
<< "Unsupported communications type " << commsType
<< exit(FatalError);
}
}
return reinterpret_cast<List<Type>&>(receiveBuf_);
}
// ************************************************************************* //

View File

@ -51,19 +51,10 @@ void Foam::cyclicLduInterfaceField::transformCoupleField
{
if (doTransform())
{
label sizeby2 = pnf.size()/2;
scalar forwardScale =
pow(diag(forwardT()[0]).component(cmpt), rank());
pow(diag(forwardT()).component(cmpt), rank());
scalar reverseScale =
pow(diag(reverseT()[0]).component(cmpt), rank());
for (label facei=0; facei<sizeby2; facei++)
{
pnf[facei] *= forwardScale;
pnf[facei + sizeby2] *= reverseScale;
}
pnf *= forwardScale;
}
}

View File

@ -77,10 +77,10 @@ public:
virtual bool doTransform() const = 0;
//- Return face transformation tensor
virtual const tensorField& forwardT() const = 0;
virtual const tensor& forwardT() const = 0;
//- Return neighbour-cell transformation tensor
virtual const tensorField& reverseT() const = 0;
virtual const tensor& reverseT() const = 0;
//- Return rank of component for transform
virtual int rank() const = 0;

View File

@ -43,24 +43,24 @@ Foam::processorLduInterfaceField::~processorLduInterfaceField()
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
void Foam::processorLduInterfaceField::transformCoupleField
(
scalarField& f,
const direction cmpt
) const
{
if (doTransform())
{
if (forwardT().size() == 1)
{
f *= pow(diag(forwardT()[0]).component(cmpt), rank());
}
else
{
f *= pow(diag(forwardT())().component(cmpt), rank());
}
}
}
//void Foam::processorLduInterfaceField::transformCoupleField
//(
// scalarField& f,
// const direction cmpt
//) const
//{
// if (doTransform())
// {
// if (forwardT().size() == 1)
// {
// f *= pow(diag(forwardT()[0]).component(cmpt), rank());
// }
// else
// {
// f *= pow(diag(forwardT())().component(cmpt), rank());
// }
// }
//}
// ************************************************************************* //

View File

@ -79,22 +79,22 @@ public:
//- Return neigbour processor number
virtual int neighbProcNo() const = 0;
//- Is the transform required
virtual bool doTransform() const = 0;
//- Return face transformation tensor
virtual const tensorField& forwardT() const = 0;
// //- Is the transform required
// virtual bool doTransform() const = 0;
//
// //- Return face transformation tensor
// virtual const tensorField& forwardT() const = 0;
//- Return rank of component for transform
virtual int rank() const = 0;
//- Transform given patch component field
void transformCoupleField
(
scalarField& f,
const direction cmpt
) const;
// //- Transform given patch component field
// virtual void transformCoupleField
// (
// scalarField& f,
// const direction cmpt
// ) const = 0;
};

View File

@ -26,6 +26,7 @@ License
#include "cyclicGAMGInterface.H"
#include "addToRunTimeSelectionTable.H"
#include "Map.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
@ -50,6 +51,10 @@ Foam::cyclicGAMGInterface::cyclicGAMGInterface
const labelField& neighbourRestrictAddressing
)
:
cyclicLduInterface
(
refCast<const cyclicLduInterface>(fineInterface).neighbPatchID()
),
GAMGInterface
(
fineInterface,
@ -59,13 +64,13 @@ Foam::cyclicGAMGInterface::cyclicGAMGInterface
fineCyclicInterface_(refCast<const cyclicLduInterface>(fineInterface))
{
// Make a lookup table of entries for owner/neighbour
HashTable<SLList<label>, label, Hash<label> > neighboursTable
Map<SLList<label> > neighboursTable
(
localRestrictAddressing.size()
);
// Table of face-sets to be agglomerated
HashTable<SLList<SLList<label> >, label, Hash<label> > faceFaceTable
Map<SLList<SLList<label> > > faceFaceTable
(
localRestrictAddressing.size()
);
@ -228,6 +233,9 @@ Foam::tmp<Foam::labelField> Foam::cyclicGAMGInterface::transfer
const unallocLabelList& interfaceData
) const
{
notImplemented("cyclicGAMGInterface::transfer(..)");
//XXXXX to be done
tmp<labelField> tpnf(new labelField(size()));
labelField& pnf = tpnf();
@ -249,12 +257,13 @@ Foam::tmp<Foam::labelField> Foam::cyclicGAMGInterface::internalFieldTransfer
const unallocLabelList& iF
) const
{
notImplemented("cyclicGAMGInterface::internalFieldTransfer(..)");
//XXXXX to be done
tmp<labelField> tpnf(new labelField(size()));
labelField& pnf = tpnf();
label sizeby2 = size()/2;
for (label facei=0; facei<sizeby2; facei++)
forAll(pnf, facei)
{
pnf[facei] = iF[faceCells_[facei + sizeby2]];
pnf[facei + sizeby2] = iF[faceCells_[facei]];

View File

@ -50,8 +50,8 @@ namespace Foam
class cyclicGAMGInterface
:
public GAMGInterface,
virtual public cyclicLduInterface
virtual public cyclicLduInterface,
public GAMGInterface
{
// Private data
@ -114,13 +114,13 @@ public:
//- Cyclic interface functions
//- Return face transformation tensor
virtual const tensorField& forwardT() const
virtual const tensor& forwardT() const
{
return fineCyclicInterface_.forwardT();
}
//- Return neighbour-cell transformation tensor
virtual const tensorField& reverseT() const
virtual const tensor& reverseT() const
{
return fineCyclicInterface_.reverseT();
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2008 OpenCFD Ltd.
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2008 OpenCFD Ltd.
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -130,13 +130,13 @@ public:
}
//- Return face transformation tensor
const tensorField& forwardT() const
const tensor& forwardT() const
{
return cyclicPolyPatch_.forwardT();
}
//- Return neighbour-cell transformation tensor
const tensorField& reverseT() const
const tensor& reverseT() const
{
return cyclicPolyPatch_.reverseT();
}

View File

@ -414,30 +414,6 @@ void Foam::globalMeshData::calcSharedEdges() const
}
// Helper function to count coincident faces. This part used to be
// in updateMesh but I've had to move it to a separate function
// because of aliasing optimisation errors in icc9.1 on the
// Itanium.
Foam::label Foam::globalMeshData::countCoincidentFaces
(
const scalar tolDim,
const vectorField& separationDist
)
{
label nCoincident = 0;
forAll(separationDist, faceI)
{
if (mag(separationDist[faceI]) < tolDim)
{
// Faces coincide
nCoincident++;
}
}
return nCoincident;
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
// Construct from polyMesh
@ -781,18 +757,14 @@ void Foam::globalMeshData::updateMesh()
if (Pstream::myProcNo() > procPatch.neighbProcNo())
{
// Uncount my faces. Handle cyclics separately.
if (procPatch.separated())
forAll(procPatch.patchIDs(), i)
{
const vectorField& separationDist = procPatch.separation();
nTotalFaces_ -= countCoincidentFaces(tolDim, separationDist);
}
else
if (procPatch.patchIDs()[i] == -1)
{
// Normal, unseparated processor patch. Remove duplicates.
nTotalFaces_ -= procPatch.size();
label sz = procPatch.starts()[i+1]-procPatch.starts()[i];
nTotalFaces_ -= sz;
}
}
}
}

View File

@ -210,13 +210,6 @@ class globalMeshData
//- Calculate shared edge addressing
void calcSharedEdges() const;
//- Count coincident faces.
static label countCoincidentFaces
(
const scalar tolDim,
const vectorField& separationDist
);
//- Disallow default bitwise copy construct
globalMeshData(const globalMeshData&);

View File

@ -611,6 +611,8 @@ void Foam::polyMesh::resetPrimitives
const labelList& neighbour,
const labelList& patchSizes,
const labelList& patchStarts,
const labelListList& subPatches,
const labelListList& subPatchStarts,
const bool validBoundary
)
{
@ -648,6 +650,17 @@ void Foam::polyMesh::resetPrimitives
patchI,
boundary_
);
if (isA<processorPolyPatch>(boundary_[patchI]))
{
// Set the sub-patch information
processorPolyPatch& ppp = refCast<processorPolyPatch>
(
boundary_[patchI]
);
const_cast<labelList&>(ppp.patchIDs()) = subPatches[patchI];
const_cast<labelList&>(ppp.starts()) = subPatchStarts[patchI];
}
}
@ -672,6 +685,9 @@ void Foam::polyMesh::resetPrimitives
" const labelList& neighbour,\n"
" const labelList& patchSizes,\n"
" const labelList& patchStarts\n"
" const labelListList& subPatches,\n"
" const labelListList& subPatchStarts,\n"
" const bool validBoundary\n"
")\n"
) << "Face " << faceI << " contains vertex labels out of range: "
<< curFace << " Max point index = " << points_.size()
@ -711,9 +727,11 @@ void Foam::polyMesh::resetPrimitives
" const labelList& neighbour,\n"
" const labelList& patchSizes,\n"
" const labelList& patchStarts\n"
" const labelListList& subPatches,\n"
" const labelListList& subPatchStarts,\n"
" const bool validBoundary\n"
")\n"
)
<< "no points or no cells in mesh" << endl;
) << "no points or no cells in mesh" << endl;
}
}
}

View File

@ -190,6 +190,19 @@ private:
const label patchID
) const;
void setTopology
(
const cellShapeList& cellsAsShapes,
const faceListList& boundaryFaces,
const wordList& boundaryPatchNames,
labelList& patchSizes,
labelList& patchStarts,
label& defaultPatchStart,
label& nFaces,
cellList& cells
);
public:
@ -253,6 +266,20 @@ public:
const bool syncPar = true
);
//- Construct from cell shapes with patch information in dictionary
// format.
polyMesh
(
const IOobject& io,
const pointField& points,
const cellShapeList& shapes,
const faceListList& boundaryFaces,
const PtrList<dictionary>& boundaryDicts,
const word& defaultBoundaryPatchName,
const word& defaultBoundaryPatchType,
const bool syncPar = true
);
// Destructor
@ -443,6 +470,8 @@ public:
const labelList& neighbour,
const labelList& patchSizes,
const labelList& patchStarts,
const labelListList& subPatches,
const labelListList& subPatchStarts,
const bool validBoundary = true
);

View File

@ -133,6 +133,291 @@ Foam::labelList Foam::polyMesh::facePatchFaceCells
}
//- Set faces_, calculate cells and patchStarts.
void Foam::polyMesh::setTopology
(
const cellShapeList& cellsAsShapes,
const faceListList& boundaryFaces,
const wordList& boundaryPatchNames,
labelList& patchSizes,
labelList& patchStarts,
label& defaultPatchStart,
label& nFaces,
cellList& cells
)
{
// Calculate the faces of all cells
// Initialise maximum possible numer of mesh faces to 0
label maxFaces = 0;
// Set up a list of face shapes for each cell
faceListList cellsFaceShapes(cellsAsShapes.size());
cells.setSize(cellsAsShapes.size());
forAll(cellsFaceShapes, cellI)
{
cellsFaceShapes[cellI] = cellsAsShapes[cellI].faces();
cells[cellI].setSize(cellsFaceShapes[cellI].size());
// Initialise cells to -1 to flag undefined faces
static_cast<labelList&>(cells[cellI]) = -1;
// Count maximum possible numer of mesh faces
maxFaces += cellsFaceShapes[cellI].size();
}
// Set size of faces array to maximum possible number of mesh faces
faces_.setSize(maxFaces);
// Initialise number of faces to 0
nFaces = 0;
// set reference to point-cell addressing
labelListList PointCells = cellShapePointCells(cellsAsShapes);
bool found = false;
forAll(cells, cellI)
{
// Note:
// Insertion cannot be done in one go as the faces need to be
// added into the list in the increasing order of neighbour
// cells. Therefore, all neighbours will be detected first
// and then added in the correct order.
const faceList& curFaces = cellsFaceShapes[cellI];
// Record the neighbour cell
labelList neiCells(curFaces.size(), -1);
// Record the face of neighbour cell
labelList faceOfNeiCell(curFaces.size(), -1);
label nNeighbours = 0;
// For all faces ...
forAll(curFaces, faceI)
{
// Skip faces that have already been matched
if (cells[cellI][faceI] >= 0) continue;
found = false;
const face& curFace = curFaces[faceI];
// Get the list of labels
const labelList& curPoints = curFace;
// For all points
forAll(curPoints, pointI)
{
// dGget the list of cells sharing this point
const labelList& curNeighbours =
PointCells[curPoints[pointI]];
// For all neighbours
forAll(curNeighbours, neiI)
{
label curNei = curNeighbours[neiI];
// Reject neighbours with the lower label
if (curNei > cellI)
{
// Get the list of search faces
const faceList& searchFaces = cellsFaceShapes[curNei];
forAll(searchFaces, neiFaceI)
{
if (searchFaces[neiFaceI] == curFace)
{
// Match!!
found = true;
// Record the neighbour cell and face
neiCells[faceI] = curNei;
faceOfNeiCell[faceI] = neiFaceI;
nNeighbours++;
break;
}
}
if (found) break;
}
if (found) break;
}
if (found) break;
} // End of current points
} // End of current faces
// Add the faces in the increasing order of neighbours
for (label neiSearch = 0; neiSearch < nNeighbours; neiSearch++)
{
// Find the lowest neighbour which is still valid
label nextNei = -1;
label minNei = cells.size();
forAll (neiCells, ncI)
{
if (neiCells[ncI] > -1 && neiCells[ncI] < minNei)
{
nextNei = ncI;
minNei = neiCells[ncI];
}
}
if (nextNei > -1)
{
// Add the face to the list of faces
faces_[nFaces] = curFaces[nextNei];
// Set cell-face and cell-neighbour-face to current face label
cells[cellI][nextNei] = nFaces;
cells[neiCells[nextNei]][faceOfNeiCell[nextNei]] = nFaces;
// Stop the neighbour from being used again
neiCells[nextNei] = -1;
// Increment number of faces counter
nFaces++;
}
else
{
FatalErrorIn
(
"polyMesh::setTopology\n"
"(\n"
" const cellShapeList& cellsAsShapes,\n"
" const faceListList& boundaryFaces,\n"
" const wordList& boundaryPatchNames,\n"
" labelList& patchSizes,\n"
" labelList& patchStarts,\n"
" label& defaultPatchStart,\n"
" label& nFaces,\n"
" cellList& cells\n"
")"
) << "Error in internal face insertion"
<< abort(FatalError);
}
}
}
// Do boundary faces
patchSizes.setSize(boundaryFaces.size(), -1);
patchStarts.setSize(boundaryFaces.size(), -1);
forAll (boundaryFaces, patchI)
{
const faceList& patchFaces = boundaryFaces[patchI];
labelList curPatchFaceCells =
facePatchFaceCells
(
patchFaces,
PointCells,
cellsFaceShapes,
patchI
);
// Grab the start label
label curPatchStart = nFaces;
forAll (patchFaces, faceI)
{
const face& curFace = patchFaces[faceI];
const label cellInside = curPatchFaceCells[faceI];
faces_[nFaces] = curFace;
// get faces of the cell inside
const faceList& facesOfCellInside = cellsFaceShapes[cellInside];
bool found = false;
forAll (facesOfCellInside, cellFaceI)
{
if (facesOfCellInside[cellFaceI] == curFace)
{
if (cells[cellInside][cellFaceI] >= 0)
{
FatalErrorIn
(
"polyMesh::setTopology\n"
"(\n"
" const cellShapeList& cellsAsShapes,\n"
" const faceListList& boundaryFaces,\n"
" const wordList& boundaryPatchNames,\n"
" labelList& patchSizes,\n"
" labelList& patchStarts,\n"
" label& defaultPatchStart,\n"
" label& nFaces,\n"
" cellList& cells\n"
")"
) << "Trying to specify a boundary face " << curFace
<< " on the face on cell " << cellInside
<< " which is either an internal face or already "
<< "belongs to some other patch. This is face "
<< faceI << " of patch "
<< patchI << " named "
<< boundaryPatchNames[patchI] << "."
<< abort(FatalError);
}
found = true;
cells[cellInside][cellFaceI] = nFaces;
break;
}
}
if (!found)
{
FatalErrorIn("polyMesh::polyMesh(... construct from shapes...)")
<< "face " << faceI << " of patch " << patchI
<< " does not seem to belong to cell " << cellInside
<< " which, according to the addressing, "
<< "should be next to it."
<< abort(FatalError);
}
// increment the counter of faces
nFaces++;
}
patchSizes[patchI] = nFaces - curPatchStart;
patchStarts[patchI] = curPatchStart;
}
// Grab "non-existing" faces and put them into a default patch
defaultPatchStart = nFaces;
forAll(cells, cellI)
{
labelList& curCellFaces = cells[cellI];
forAll(curCellFaces, faceI)
{
if (curCellFaces[faceI] == -1) // "non-existent" face
{
curCellFaces[faceI] = nFaces;
faces_[nFaces] = cellsFaceShapes[cellI][faceI];
nFaces++;
}
}
}
// Reset the size of the face list
faces_.setSize(nFaces);
return ;
}
Foam::polyMesh::polyMesh
(
const IOobject& io,
@ -273,272 +558,24 @@ Foam::polyMesh::polyMesh
// Remove all of the old mesh files if they exist
removeFiles(instance());
// Calculate the faces of all cells
// Initialise maximum possible numer of mesh faces to 0
label maxFaces = 0;
// Set up a list of face shapes for each cell
faceListList cellsFaceShapes(cellsAsShapes.size());
cellList cells(cellsAsShapes.size());
forAll(cellsFaceShapes, cellI)
{
cellsFaceShapes[cellI] = cellsAsShapes[cellI].faces();
cells[cellI].setSize(cellsFaceShapes[cellI].size());
// Initialise cells to -1 to flag undefined faces
static_cast<labelList&>(cells[cellI]) = -1;
// Count maximum possible numer of mesh faces
maxFaces += cellsFaceShapes[cellI].size();
}
// Set size of faces array to maximum possible number of mesh faces
faces_.setSize(maxFaces);
// Initialise number of faces to 0
label nFaces = 0;
// set reference to point-cell addressing
labelListList PointCells = cellShapePointCells(cellsAsShapes);
bool found = false;
forAll(cells, cellI)
{
// Note:
// Insertion cannot be done in one go as the faces need to be
// added into the list in the increasing order of neighbour
// cells. Therefore, all neighbours will be detected first
// and then added in the correct order.
const faceList& curFaces = cellsFaceShapes[cellI];
// Record the neighbour cell
labelList neiCells(curFaces.size(), -1);
// Record the face of neighbour cell
labelList faceOfNeiCell(curFaces.size(), -1);
label nNeighbours = 0;
// For all faces ...
forAll(curFaces, faceI)
{
// Skip faces that have already been matched
if (cells[cellI][faceI] >= 0) continue;
found = false;
const face& curFace = curFaces[faceI];
// Get the list of labels
const labelList& curPoints = curFace;
// For all points
forAll(curPoints, pointI)
{
// dGget the list of cells sharing this point
const labelList& curNeighbours =
PointCells[curPoints[pointI]];
// For all neighbours
forAll(curNeighbours, neiI)
{
label curNei = curNeighbours[neiI];
// Reject neighbours with the lower label
if (curNei > cellI)
{
// Get the list of search faces
const faceList& searchFaces = cellsFaceShapes[curNei];
forAll(searchFaces, neiFaceI)
{
if (searchFaces[neiFaceI] == curFace)
{
// Match!!
found = true;
// Record the neighbour cell and face
neiCells[faceI] = curNei;
faceOfNeiCell[faceI] = neiFaceI;
nNeighbours++;
break;
}
}
if (found) break;
}
if (found) break;
}
if (found) break;
} // End of current points
} // End of current faces
// Add the faces in the increasing order of neighbours
for (label neiSearch = 0; neiSearch < nNeighbours; neiSearch++)
{
// Find the lowest neighbour which is still valid
label nextNei = -1;
label minNei = cells.size();
forAll (neiCells, ncI)
{
if (neiCells[ncI] > -1 && neiCells[ncI] < minNei)
{
nextNei = ncI;
minNei = neiCells[ncI];
}
}
if (nextNei > -1)
{
// Add the face to the list of faces
faces_[nFaces] = curFaces[nextNei];
// Set cell-face and cell-neighbour-face to current face label
cells[cellI][nextNei] = nFaces;
cells[neiCells[nextNei]][faceOfNeiCell[nextNei]] = nFaces;
// Stop the neighbour from being used again
neiCells[nextNei] = -1;
// Increment number of faces counter
nFaces++;
}
else
{
FatalErrorIn
// Calculate faces and cells
labelList patchSizes;
labelList patchStarts;
label defaultPatchStart;
label nFaces;
cellList cells;
setTopology
(
"polyMesh::polyMesh\n"
"(\n"
" const IOobject& io,\n"
" const pointField& points,\n"
" const cellShapeList& cellsAsShapes,\n"
" const faceListList& boundaryFaces,\n"
" const wordList& boundaryPatchTypes,\n"
" const wordList& boundaryPatchNames,\n"
" const word& defaultBoundaryPatchType\n"
")"
) << "Error in internal face insertion"
<< abort(FatalError);
}
}
}
// Do boundary faces
labelList patchSizes(boundaryFaces.size(), -1);
labelList patchStarts(boundaryFaces.size(), -1);
forAll (boundaryFaces, patchI)
{
const faceList& patchFaces = boundaryFaces[patchI];
labelList curPatchFaceCells =
facePatchFaceCells
(
patchFaces,
PointCells,
cellsFaceShapes,
patchI
cellsAsShapes,
boundaryFaces,
boundaryPatchNames,
patchSizes,
patchStarts,
defaultPatchStart,
nFaces,
cells
);
// Grab the start label
label curPatchStart = nFaces;
forAll (patchFaces, faceI)
{
const face& curFace = patchFaces[faceI];
const label cellInside = curPatchFaceCells[faceI];
faces_[nFaces] = curFace;
// get faces of the cell inside
const faceList& facesOfCellInside = cellsFaceShapes[cellInside];
bool found = false;
forAll (facesOfCellInside, cellFaceI)
{
if (facesOfCellInside[cellFaceI] == curFace)
{
if (cells[cellInside][cellFaceI] >= 0)
{
FatalErrorIn
(
"polyMesh::polyMesh\n"
"(\n"
" const IOobject& io,\n"
" const pointField& points,\n"
" const cellShapeList& cellsAsShapes,\n"
" const faceListList& boundaryFaces,\n"
" const wordList& boundaryPatchTypes,\n"
" const wordList& boundaryPatchNames,\n"
" const word& defaultBoundaryPatchType\n"
")"
) << "Trying to specify a boundary face " << curFace
<< " on the face on cell " << cellInside
<< " which is either an internal face or already "
<< "belongs to some other patch. This is face "
<< faceI << " of patch "
<< patchI << " named "
<< boundaryPatchNames[patchI] << "."
<< abort(FatalError);
}
found = true;
cells[cellInside][cellFaceI] = nFaces;
break;
}
}
if (!found)
{
FatalErrorIn("polyMesh::polyMesh(... construct from shapes...)")
<< "face " << faceI << " of patch " << patchI
<< " does not seem to belong to cell " << cellInside
<< " which, according to the addressing, "
<< "should be next to it."
<< abort(FatalError);
}
// increment the counter of faces
nFaces++;
}
patchSizes[patchI] = nFaces - curPatchStart;
patchStarts[patchI] = curPatchStart;
}
// Grab "non-existing" faces and put them into a default patch
label defaultPatchStart = nFaces;
forAll(cells, cellI)
{
labelList& curCellFaces = cells[cellI];
forAll(curCellFaces, faceI)
{
if (curCellFaces[faceI] == -1) // "non-existent" face
{
curCellFaces[faceI] = nFaces;
faces_[nFaces] = cellsFaceShapes[cellI][faceI];
nFaces++;
}
}
}
// Reset the size of the face list
faces_.setSize(nFaces);
// Warning: Patches can only be added once the face list is
// completed, as they hold a subList of the face list
forAll (boundaryFaces, patchI)
@ -619,4 +656,241 @@ Foam::polyMesh::polyMesh
}
//XXXXXXXXXX
Foam::polyMesh::polyMesh
(
const IOobject& io,
const pointField& points,
const cellShapeList& cellsAsShapes,
const faceListList& boundaryFaces,
const PtrList<dictionary>& boundaryDicts,
const word& defaultBoundaryPatchName,
const word& defaultBoundaryPatchType,
const bool syncPar
)
:
objectRegistry(io),
primitiveMesh(),
points_
(
IOobject
(
"points",
instance(),
meshSubDir,
*this,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
points
),
faces_
(
IOobject
(
"faces",
instance(),
meshSubDir,
*this,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
0
),
owner_
(
IOobject
(
"owner",
instance(),
meshSubDir,
*this,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
0
),
neighbour_
(
IOobject
(
"neighbour",
instance(),
meshSubDir,
*this,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
0
),
clearedPrimitives_(false),
boundary_
(
IOobject
(
"boundary",
instance(),
meshSubDir,
*this,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
*this,
boundaryFaces.size() + 1 // add room for a default patch
),
bounds_(points_, syncPar),
directions_(Vector<label>::zero),
pointZones_
(
IOobject
(
"pointZones",
instance(),
meshSubDir,
*this,
IOobject::NO_READ,
IOobject::NO_WRITE
),
*this,
0
),
faceZones_
(
IOobject
(
"faceZones",
instance(),
meshSubDir,
*this,
IOobject::NO_READ,
IOobject::NO_WRITE
),
*this,
0
),
cellZones_
(
IOobject
(
"cellZones",
instance(),
meshSubDir,
*this,
IOobject::NO_READ,
IOobject::NO_WRITE
),
*this,
0
),
globalMeshDataPtr_(NULL),
moving_(false),
curMotionTimeIndex_(time().timeIndex()),
oldPointsPtr_(NULL)
{
if (debug)
{
Info<<"Constructing polyMesh from cell and boundary shapes." << endl;
}
// Remove all of the old mesh files if they exist
removeFiles(instance());
wordList boundaryPatchNames(boundaryDicts.size());
forAll(boundaryDicts, patchI)
{
boundaryDicts[patchI].lookup("name") >> boundaryPatchNames[patchI];
}
// Calculate faces and cells
labelList patchSizes;
labelList patchStarts;
label defaultPatchStart;
label nFaces;
cellList cells;
setTopology
(
cellsAsShapes,
boundaryFaces,
boundaryPatchNames,
patchSizes,
patchStarts,
defaultPatchStart,
nFaces,
cells
);
// Warning: Patches can only be added once the face list is
// completed, as they hold a subList of the face list
forAll (boundaryFaces, patchI)
{
dictionary patchDict(boundaryDicts[patchI]);
patchDict.set("nFaces", patchSizes[patchI]);
patchDict.set("startFace", patchStarts[patchI]);
// add the patch to the list
boundary_.set
(
patchI,
polyPatch::New
(
boundaryPatchNames[patchI],
patchDict,
patchI,
boundary_
)
);
}
label nAllPatches = boundaryFaces.size();
if (nFaces > defaultPatchStart)
{
WarningIn("polyMesh::polyMesh(... construct from shapes...)")
<< "Found " << nFaces - defaultPatchStart
<< " undefined faces in mesh; adding to default patch." << endl;
boundary_.set
(
nAllPatches,
polyPatch::New
(
defaultBoundaryPatchType,
defaultBoundaryPatchName,
nFaces - defaultPatchStart,
defaultPatchStart,
boundary_.size() - 1,
boundary_
)
);
nAllPatches++;
}
// Reset the size of the boundary
boundary_.setSize(nAllPatches);
// Set the primitive mesh
initMesh(cells);
if (syncPar)
{
// Calculate topology for the patches (processor-processor comms etc.)
boundary_.updateMesh();
// Calculate the geometry for the patches (transformation tensors etc.)
boundary_.calcGeometry();
}
if (debug)
{
if (checkMesh())
{
Info << "Mesh OK" << endl;
}
}
}
//XXXXX
// ************************************************************************* //

View File

@ -250,6 +250,11 @@ Foam::label Foam::coupledPolyPatch::getRotation
void Foam::coupledPolyPatch::calcTransformTensors
(
bool& separated,
vector& separation,
bool& parallel,
tensor& forwardT,
tensor& reverseT,
const vectorField& Cf,
const vectorField& Cr,
const vectorField& nf,
@ -263,6 +268,7 @@ void Foam::coupledPolyPatch::calcTransformTensors
Pout<< "coupledPolyPatch::calcTransformTensors : " << name() << endl
<< " (half)size:" << Cf.size() << nl
<< " absTol:" << absTol << nl
//<< " smallDist:" << smallDist << nl
<< " sum(mag(nf & nr)):" << sum(mag(nf & nr)) << endl;
}
@ -277,9 +283,11 @@ void Foam::coupledPolyPatch::calcTransformTensors
if (size() == 0)
{
// Dummy geometry.
separation_.setSize(0);
forwardT_ = I;
reverseT_ = I;
separated = false;
separation = vector::zero;
parallel = true;
forwardT = I;
reverseT = I;
}
else
{
@ -294,89 +302,70 @@ void Foam::coupledPolyPatch::calcTransformTensors
{
// Rotation, no separation
separation_.setSize(0);
separated = false;
separation = vector::zero;
forwardT_.setSize(Cf.size());
reverseT_.setSize(Cf.size());
parallel = false;
forwardT = rotationTensor(-nr[0], nf[0]);
reverseT = rotationTensor(nf[0], -nr[0]);
forAll (forwardT_, facei)
// Check
forAll (forwardT, facei)
{
forwardT_[facei] = rotationTensor(-nr[facei], nf[facei]);
reverseT_[facei] = rotationTensor(nf[facei], -nr[facei]);
tensor T = rotationTensor(-nr[facei], nf[facei]);
if (mag(T - forwardT) > smallDist[facei])
{
FatalErrorIn
(
"coupledPolyPatch::calcTransformTensors(..)"
) << "Non-uniform rotation. Difference between face 0"
<< " (at " << Cf[0] << ") and face " << facei
<< " (at " << Cf[facei] << ") is " << mag(T - forwardT)
<< " which is larger than geometric tolerance "
<< smallDist[facei] << endl
<< exit(FatalError);
}
if (debug)
{
Pout<< " sum(mag(forwardT_ - forwardT_[0])):"
<< sum(mag(forwardT_ - forwardT_[0]))
<< endl;
}
if (sum(mag(forwardT_ - forwardT_[0])) < error)
{
forwardT_.setSize(1);
reverseT_.setSize(1);
}
}
else
{
forwardT_.setSize(0);
reverseT_.setSize(0);
// No rotation, possibly separation
parallel = true;
forwardT = I;
reverseT = I;
separation_ = (nf&(Cr - Cf))*nf;
vectorField separationField = (nf&(Cr - Cf))*nf;
// Three situations:
// - separation is zero. No separation.
// - separation is same. Single separation vector.
// - separation differs per face. Separation vectorField.
if (mag(separationField[0]) < smallDist[0])
{
separated = false;
separation = vector::zero;
}
else
{
separated = true;
separation = separationField[0];
}
// Check for different separation per face
bool sameSeparation = true;
forAll(separation_, facei)
forAll(separationField, facei)
{
scalar smallSqr = sqr(smallDist[facei]);
if (magSqr(separation_[facei] - separation_[0]) > smallSqr)
if (magSqr(separationField[facei] - separation) > smallSqr)
{
if (debug)
{
Pout<< " separation " << separation_[facei]
<< " at " << facei
<< " differs from separation[0] " << separation_[0]
<< " by more than local tolerance "
<< smallDist[facei]
<< ". Assuming non-uniform separation." << endl;
}
sameSeparation = false;
break;
}
}
if (sameSeparation)
{
// Check for zero separation (at 0 so everywhere)
if (magSqr(separation_[0]) < sqr(smallDist[0]))
{
if (debug)
{
Pout<< " separation " << mag(separation_[0])
<< " less than local tolerance " << smallDist[0]
<< ". Assuming zero separation." << endl;
}
separation_.setSize(0);
}
else
{
if (debug)
{
Pout<< " separation " << mag(separation_[0])
<< " more than local tolerance " << smallDist[0]
<< ". Assuming uniform separation." << endl;
}
separation_.setSize(1);
FatalErrorIn
(
"coupledPolyPatch::calcTransformTensors(..)"
) << "Non-uniform separation. Difference between face 0"
<< " (at " << Cf[0] << ") and face " << facei
<< " (at " << Cf[facei] << ") is "
<< mag(separationField[facei] - separation)
<< " which is larger than geometric tolerance "
<< smallDist[facei] << endl
<< exit(FatalError);
}
}
}
@ -384,8 +373,10 @@ void Foam::coupledPolyPatch::calcTransformTensors
if (debug)
{
Pout<< " separation_:" << separation_ << nl
<< " forwardT size:" << forwardT_.size() << endl;
Pout<< " separated:" << separated << nl
<< " separation :" << separation << nl
<< " parallel :" << parallel << nl
<< " forwardT :" << forwardT << endl;
}
}

View File

@ -32,7 +32,6 @@ Description
SourceFiles
coupledPolyPatch.C
coupledPolyPatchMorph.C
\*---------------------------------------------------------------------------*/
@ -54,17 +53,6 @@ class coupledPolyPatch
:
public polyPatch
{
// Private data
//- offset (distance) vector from one side of the couple to the other
mutable vectorField separation_;
//- Face transformation tensor
mutable tensorField forwardT_;
//- Neighbour-cell transformation tensor
mutable tensorField reverseT_;
public:
// Static data members
@ -82,6 +70,11 @@ protected:
// absTol : absolute error in normal
void calcTransformTensors
(
bool& separated,
vector& separation,
bool& parallel,
tensor& forwardT,
tensor& reverseT,
const vectorField& Cf,
const vectorField& Cr,
const vectorField& nf,
@ -238,39 +231,42 @@ public:
return true;
}
//- Are the planes separated.
virtual bool separated() const = 0;
//- Are the coupled planes separated
bool separated() const
{
return separation_.size();
}
//- If the planes are separated the separation vector.
virtual const vector& separation() const = 0;
//- Return the offset (distance) vector from one side of the couple
// to the other
const vectorField& separation() const
{
return separation_;
}
//- Are the cyclic planes parallel.
virtual bool parallel() const = 0;
//- Return face transformation tensor.
virtual const tensor& forwardT() const = 0;
//- Return neighbour-cell transformation tensor.
virtual const tensor& reverseT() const = 0;
//- Are the cyclic planes parallel
bool parallel() const
{
return forwardT_.size() == 0;
}
//- Return face transformation tensor
const tensorField& forwardT() const
{
return forwardT_;
}
//- Return neighbour-cell transformation tensor
const tensorField& reverseT() const
{
return reverseT_;
}
//- Initialise the calculation of the patch geometry
virtual void initGeometry
(
const primitivePatch& referPatch,
UList<point>& nbrCtrs,
UList<point>& nbrAreas,
UList<point>& nbrCc
) = 0;
//- Calculate the patch geometry
virtual void calcGeometry
(
const primitivePatch& referPatch,
const UList<point>& thisCtrs,
const UList<point>& thisAreas,
const UList<point>& thisCc,
const UList<point>& nbrCtrs,
const UList<point>& nbrAreas,
const UList<point>& nbrCc
) = 0;
//- Initialize ordering for primitivePatch. Does not
// refer to *this (except for name() and type() etc.)

View File

@ -34,6 +34,7 @@ License
#include "matchPoints.H"
#include "EdgeMap.H"
#include "Time.H"
#include "diagTensor.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
@ -87,33 +88,12 @@ void Foam::cyclicPolyPatch::calcTransforms()
{
if (size() > 0)
{
const pointField& points = this->points();
// Half0
const cyclicPolyPatch& half0 = *this;
primitivePatch half0
(
SubList<face>
(
*this,
size()/2
),
points
);
pointField half0Ctrs(calcFaceCentres(half0, half0.points()));
scalarField half0Tols(calcFaceTol(half0, half0.points(), half0Ctrs));
primitivePatch half1
(
SubList<face>
(
*this,
size()/2,
size()/2
),
points
);
pointField half1Ctrs(calcFaceCentres(half1, half1.points()));
// Dump halves
if (debug)
{
fileName casePath(boundaryMesh().mesh().time().path());
@ -122,6 +102,27 @@ void Foam::cyclicPolyPatch::calcTransforms()
Pout<< "cyclicPolyPatch::calcTransforms : Writing half0"
<< " faces to OBJ file " << nm0 << endl;
writeOBJ(nm0, half0, half0.points());
}
vectorField half0Areas(half0.size());
forAll(half0, facei)
{
half0Areas[facei] = half0[facei].normal(half0.points());
}
// Half0
const cyclicPolyPatch& half1 = neighbPatch();
pointField half1Ctrs(calcFaceCentres(half1, half1.points()));
// Dump halves
if (debug)
{
fileName casePath(boundaryMesh().mesh().time().path());
fileName nm1(casePath/name()+"_half1_faces.obj");
Pout<< "cyclicPolyPatch::calcTransforms : Writing half1"
@ -129,17 +130,60 @@ void Foam::cyclicPolyPatch::calcTransforms()
writeOBJ(nm1, half1, half1.points());
}
vectorField half0Normals(half0.size());
vectorField half1Normals(half1.size());
vectorField half1Areas(half1.size());
for (label facei = 0; facei < size()/2; facei++)
forAll(half1, facei)
{
half0Normals[facei] = operator[](facei).normal(points);
label nbrFacei = facei+size()/2;
half1Normals[facei] = operator[](nbrFacei).normal(points);
half1Areas[facei] = half1[facei].normal(half1.points());
}
scalar magSf = mag(half0Normals[facei]);
scalar nbrMagSf = mag(half1Normals[facei]);
calcTransforms
(
half0,
half0Ctrs,
half0Areas,
half1Ctrs,
half1Areas
);
}
}
void Foam::cyclicPolyPatch::calcTransforms
(
const primitivePatch& half0,
const UList<point>& half0Ctrs,
const UList<point>& half0Areas,
const UList<point>& half1Ctrs,
const UList<point>& half1Areas
)
{
Pout<< "cyclicPolyPatch::calcTransforms : name:" << name() << endl
<< " half0Ctrs:"
<< " min:" << min(half0Ctrs) << " max:" << max(half0Ctrs)<< endl
<< " half1Ctrs:"
<< " min:" << min(half1Ctrs) << " max:" << max(half1Ctrs)<< endl
<< endl;
if (half0Ctrs.size() > 0)
{
scalarField half0Tols
(
calcFaceTol
(
half0,
half0.points(),
static_cast<const pointField&>(half0Ctrs)
)
);
vectorField half0Normals(half0Areas.size());
vectorField half1Normals(half1Areas.size());
forAll(half0, facei)
{
scalar magSf = mag(half0Areas[facei]);
scalar nbrMagSf = mag(half1Areas[facei]);
scalar avSf = (magSf + nbrMagSf)/2.0;
if (magSf < ROOTVSMALL && nbrMagSf < ROOTVSMALL)
@ -148,15 +192,14 @@ void Foam::cyclicPolyPatch::calcTransforms()
// check. (note use of sqrt(VSMALL) since that is how mag
// scales)
half0Normals[facei] = point(1, 0, 0);
half1Normals[facei] = half0Normals[facei];
half1Normals[facei] = -half0Normals[facei];
}
else if (mag(magSf - nbrMagSf)/avSf > coupledPolyPatch::matchTol)
{
FatalErrorIn
(
"cyclicPolyPatch::calcTransforms()"
) << "face " << facei << " area does not match neighbour "
<< nbrFacei << " by "
) << "face " << facei << " area does not match neighbour by "
<< 100*mag(magSf - nbrMagSf)/avSf
<< "% -- possible face ordering problem." << endl
<< "patch:" << name()
@ -165,33 +208,44 @@ void Foam::cyclicPolyPatch::calcTransforms()
<< " matching tolerance:" << coupledPolyPatch::matchTol
<< endl
<< "Mesh face:" << start()+facei
<< " vertices:"
<< IndirectList<point>(points, operator[](facei))()
<< " fc:" << half0Ctrs[facei]
<< endl
<< "Neighbour face:" << start()+nbrFacei
<< " vertices:"
<< IndirectList<point>(points, operator[](nbrFacei))()
<< "Neighbour fc:" << half1Ctrs[facei]
<< endl
<< "Rerun with cyclic debug flag set"
<< " for more information." << exit(FatalError);
}
else
{
half0Normals[facei] /= magSf;
half1Normals[facei] /= nbrMagSf;
half0Normals[facei] = half0Areas[facei] / magSf;
half1Normals[facei] = half1Areas[facei] / nbrMagSf;
}
}
// Calculate transformation tensors
calcTransformTensors
(
half0Ctrs,
half1Ctrs,
separated_,
separation_,
parallel_,
forwardT_,
reverseT_,
static_cast<const pointField&>(half0Ctrs),
static_cast<const pointField&>(half1Ctrs),
half0Normals,
half1Normals,
half0Tols
);
Pout<< "cyclicPolyPatch::calcTransforms : calculated transforms for:"
<< name() << endl
<< " separated_:" << separated_ << endl
<< " separation_:" << separation_ << endl
<< " parallel_:" << parallel_ << endl
<< " forwardT_:" << forwardT_ << endl
<< " reverseT_:" << reverseT_ << endl
<< endl;
}
}
@ -610,14 +664,22 @@ Foam::cyclicPolyPatch::cyclicPolyPatch
)
:
coupledPolyPatch(name, size, start, index, bm),
neighbPatchName_(""),
neighbPatchID_(-1),
coupledPointsPtr_(NULL),
coupledEdgesPtr_(NULL),
featureCos_(0.9),
transform_(UNKNOWN),
rotationAxis_(vector::zero),
rotationCentre_(point::zero)
rotationCentre_(point::zero),
separated_(false),
separation_(vector::zero),
parallel_(true),
forwardT_(I),
reverseT_(I)
{
calcTransforms();
// Neighbour patch might not be valid yet so no transformation
// calculation possible.
}
@ -630,12 +692,19 @@ Foam::cyclicPolyPatch::cyclicPolyPatch
)
:
coupledPolyPatch(name, dict, index, bm),
neighbPatchName_(dict.lookup("neighbourPatch")),
neighbPatchID_(-1),
coupledPointsPtr_(NULL),
coupledEdgesPtr_(NULL),
featureCos_(0.9),
transform_(UNKNOWN),
rotationAxis_(vector::zero),
rotationCentre_(point::zero)
rotationCentre_(point::zero),
separated_(false),
separation_(vector::zero),
parallel_(true),
forwardT_(I),
reverseT_(I)
{
dict.readIfPresent("featureCos", featureCos_);
@ -652,12 +721,13 @@ Foam::cyclicPolyPatch::cyclicPolyPatch
}
default:
{
// no additioanl info required
// no additional info required
}
}
}
calcTransforms();
// Neighbour patch might not be valid yet so no transformation
// calculation possible.
}
@ -668,14 +738,22 @@ Foam::cyclicPolyPatch::cyclicPolyPatch
)
:
coupledPolyPatch(pp, bm),
neighbPatchName_(pp.neighbPatchName()),
neighbPatchID_(-1),
coupledPointsPtr_(NULL),
coupledEdgesPtr_(NULL),
featureCos_(pp.featureCos_),
transform_(pp.transform_),
rotationAxis_(pp.rotationAxis_),
rotationCentre_(pp.rotationCentre_)
rotationCentre_(pp.rotationCentre_),
separated_(false),
separation_(vector::zero),
parallel_(true),
forwardT_(I),
reverseT_(I)
{
calcTransforms();
// Neighbour patch might not be valid yet so no transformation
// calculation possible.
}
@ -685,10 +763,13 @@ Foam::cyclicPolyPatch::cyclicPolyPatch
const polyBoundaryMesh& bm,
const label index,
const label newSize,
const label newStart
const label newStart,
const word& neighbPatchName
)
:
coupledPolyPatch(pp, bm, index, newSize, newStart),
neighbPatchName_(neighbPatchName),
neighbPatchID_(-1),
coupledPointsPtr_(NULL),
coupledEdgesPtr_(NULL),
featureCos_(pp.featureCos_),
@ -696,7 +777,8 @@ Foam::cyclicPolyPatch::cyclicPolyPatch
rotationAxis_(pp.rotationAxis_),
rotationCentre_(pp.rotationCentre_)
{
calcTransforms();
// Neighbour patch might not be valid yet so no transformation
// calculation possible.
}
@ -720,8 +802,48 @@ void Foam::cyclicPolyPatch::initGeometry()
void Foam::cyclicPolyPatch::calcGeometry()
{
polyPatch::calcGeometry();
calcTransforms();
}
void Foam::cyclicPolyPatch::initGeometry
(
const primitivePatch& referPatch,
UList<point>& nbrCtrs,
UList<point>& nbrAreas,
UList<point>& nbrCc
)
{}
void Foam::cyclicPolyPatch::calcGeometry
(
const primitivePatch& referPatch,
const UList<point>& thisCtrs,
const UList<point>& thisAreas,
const UList<point>& thisCc,
const UList<point>& nbrCtrs,
const UList<point>& nbrAreas,
const UList<point>& nbrCc
)
{
polyPatch::calcGeometry();
Pout<< "cyclicPolyPatch::calcGeometry : name:" << name()
<< " referred from:" << referPatch.size() << endl;
calcTransforms
(
referPatch,
thisCtrs,
thisAreas,
nbrCtrs,
nbrAreas
);
}
void Foam::cyclicPolyPatch::initMovePoints(const pointField& p)
{
polyPatch::initMovePoints(p);
@ -750,82 +872,89 @@ const Foam::edgeList& Foam::cyclicPolyPatch::coupledPoints() const
{
if (!coupledPointsPtr_)
{
// Look at cyclic patch as two halves, A and B.
// Now all we know is that relative face index in halfA is same
// as coupled face in halfB and also that the 0th vertex
// corresponds.
WarningIn("cyclicPolyPatch::coupledPoints() const")
<< "to be implemented." << endl;
// From halfA point to halfB or -1.
labelList coupledPoint(nPoints(), -1);
for (label patchFaceA = 0; patchFaceA < size()/2; patchFaceA++)
{
const face& fA = localFaces()[patchFaceA];
forAll(fA, indexA)
{
label patchPointA = fA[indexA];
if (coupledPoint[patchPointA] == -1)
{
const face& fB = localFaces()[patchFaceA + size()/2];
label indexB = (fB.size() - indexA) % fB.size();
// Filter out points on wedge axis
if (patchPointA != fB[indexB])
{
coupledPoint[patchPointA] = fB[indexB];
}
}
}
coupledPointsPtr_ = new edgeList();
}
coupledPointsPtr_ = new edgeList(nPoints());
edgeList& connected = *coupledPointsPtr_;
// Extract coupled points.
label connectedI = 0;
forAll(coupledPoint, i)
{
if (coupledPoint[i] != -1)
{
connected[connectedI++] = edge(i, coupledPoint[i]);
}
}
connected.setSize(connectedI);
if (debug)
{
OFstream str
(
boundaryMesh().mesh().time().path()
/"coupledPoints.obj"
);
label vertI = 0;
Pout<< "Writing file " << str.name() << " with coordinates of "
<< "coupled points" << endl;
forAll(connected, i)
{
const point& a = points()[meshPoints()[connected[i][0]]];
const point& b = points()[meshPoints()[connected[i][1]]];
str<< "v " << a.x() << ' ' << a.y() << ' ' << a.z() << nl;
str<< "v " << b.x() << ' ' << b.y() << ' ' << b.z() << nl;
vertI += 2;
str<< "l " << vertI-1 << ' ' << vertI << nl;
}
}
// Remove any addressing calculated for the coupled edges calculation
const_cast<primitivePatch&>(static_cast<const primitivePatch&>(*this))
.clearOut();
}
// // Look at cyclic patch as two halves, A and B.
// // Now all we know is that relative face index in halfA is same
// // as coupled face in halfB and also that the 0th vertex
// // corresponds.
//
// // From halfA point to halfB or -1.
// labelList coupledPoint(nPoints(), -1);
//
// for (label patchFaceA = 0; patchFaceA < size()/2; patchFaceA++)
// {
// const face& fA = localFaces()[patchFaceA];
//
// forAll(fA, indexA)
// {
// label patchPointA = fA[indexA];
//
// if (coupledPoint[patchPointA] == -1)
// {
// const face& fB = localFaces()[patchFaceA + size()/2];
//
// label indexB = (fB.size() - indexA) % fB.size();
//
// // Filter out points on wedge axis
// if (patchPointA != fB[indexB])
// {
// coupledPoint[patchPointA] = fB[indexB];
// }
// }
// }
// }
//
// coupledPointsPtr_ = new edgeList(nPoints());
// edgeList& connected = *coupledPointsPtr_;
//
// // Extract coupled points.
// label connectedI = 0;
//
// forAll(coupledPoint, i)
// {
// if (coupledPoint[i] != -1)
// {
// connected[connectedI++] = edge(i, coupledPoint[i]);
// }
// }
//
// connected.setSize(connectedI);
//
// if (debug)
// {
// OFstream str
// (
// boundaryMesh().mesh().time().path()
// /"coupledPoints.obj"
// );
// label vertI = 0;
//
// Pout<< "Writing file " << str.name() << " with coordinates of "
// << "coupled points" << endl;
//
// forAll(connected, i)
// {
// const point& a = points()[meshPoints()[connected[i][0]]];
// const point& b = points()[meshPoints()[connected[i][1]]];
//
// str<< "v " << a.x() << ' ' << a.y() << ' ' << a.z() << nl;
// str<< "v " << b.x() << ' ' << b.y() << ' ' << b.z() << nl;
// vertI += 2;
//
// str<< "l " << vertI-1 << ' ' << vertI << nl;
// }
// }
//
// // Remove any addressing calculated for the coupled edges calculation
// const_cast<primitivePatch&>(static_cast<const primitivePatch&>(*this))
// .clearOut();
// }
return *coupledPointsPtr_;
}
@ -834,126 +963,133 @@ const Foam::edgeList& Foam::cyclicPolyPatch::coupledEdges() const
{
if (!coupledEdgesPtr_)
{
// Build map from points on halfA to points on halfB.
const edgeList& pointCouples = coupledPoints();
WarningIn("cyclicPolyPatch::coupledEdges() const")
<< "to be implemented." << endl;
Map<label> aToB(2*pointCouples.size());
forAll(pointCouples, i)
{
const edge& e = pointCouples[i];
aToB.insert(e[0], e[1]);
coupledEdgesPtr_ = new edgeList();
}
// Map from edge on half A to points (in halfB indices)
EdgeMap<label> edgeMap(nEdges());
for (label patchFaceA = 0; patchFaceA < size()/2; patchFaceA++)
{
const labelList& fEdges = faceEdges()[patchFaceA];
forAll(fEdges, i)
{
label edgeI = fEdges[i];
const edge& e = edges()[edgeI];
// Convert edge end points to corresponding points on halfB
// side.
Map<label>::const_iterator fnd0 = aToB.find(e[0]);
if (fnd0 != aToB.end())
{
Map<label>::const_iterator fnd1 = aToB.find(e[1]);
if (fnd1 != aToB.end())
{
edgeMap.insert(edge(fnd0(), fnd1()), edgeI);
}
}
}
}
coupledEdgesPtr_ = new edgeList(nEdges()/2);
edgeList& coupledEdges = *coupledEdgesPtr_;
label coupleI = 0;
for (label patchFaceB = size()/2; patchFaceB < size(); patchFaceB++)
{
const labelList& fEdges = faceEdges()[patchFaceB];
forAll(fEdges, i)
{
label edgeI = fEdges[i];
const edge& e = edges()[edgeI];
// Look up halfA edge from HashTable.
EdgeMap<label>::iterator iter = edgeMap.find(e);
if (iter != edgeMap.end())
{
label halfAEdgeI = iter();
// Store correspondence. Filter out edges on wedge axis.
if (halfAEdgeI != edgeI)
{
coupledEdges[coupleI++] = edge(halfAEdgeI, edgeI);
}
// Remove so we build unique list
edgeMap.erase(iter);
}
}
}
coupledEdges.setSize(coupleI);
// Some checks
forAll(coupledEdges, i)
{
const edge& e = coupledEdges[i];
if (e[0] == e[1] || e[0] < 0 || e[1] < 0)
{
FatalErrorIn("cyclicPolyPatch::coupledEdges() const")
<< "Problem : at position " << i
<< " illegal couple:" << e
<< abort(FatalError);
}
}
if (debug)
{
OFstream str
(
boundaryMesh().mesh().time().path()
/"coupledEdges.obj"
);
label vertI = 0;
Pout<< "Writing file " << str.name() << " with centres of "
<< "coupled edges" << endl;
forAll(coupledEdges, i)
{
const edge& e = coupledEdges[i];
const point& a = edges()[e[0]].centre(localPoints());
const point& b = edges()[e[1]].centre(localPoints());
str<< "v " << a.x() << ' ' << a.y() << ' ' << a.z() << nl;
str<< "v " << b.x() << ' ' << b.y() << ' ' << b.z() << nl;
vertI += 2;
str<< "l " << vertI-1 << ' ' << vertI << nl;
}
}
// Remove any addressing calculated for the coupled edges calculation
const_cast<primitivePatch&>(static_cast<const primitivePatch&>(*this))
.clearOut();
}
// // Build map from points on halfA to points on halfB.
// const edgeList& pointCouples = coupledPoints();
//
// Map<label> aToB(2*pointCouples.size());
//
// forAll(pointCouples, i)
// {
// const edge& e = pointCouples[i];
//
// aToB.insert(e[0], e[1]);
// }
//
// // Map from edge on half A to points (in halfB indices)
// EdgeMap<label> edgeMap(nEdges());
//
// for (label patchFaceA = 0; patchFaceA < size()/2; patchFaceA++)
// {
// const labelList& fEdges = faceEdges()[patchFaceA];
//
// forAll(fEdges, i)
// {
// label edgeI = fEdges[i];
//
// const edge& e = edges()[edgeI];
//
// // Convert edge end points to corresponding points on halfB
// // side.
// Map<label>::const_iterator fnd0 = aToB.find(e[0]);
// if (fnd0 != aToB.end())
// {
// Map<label>::const_iterator fnd1 = aToB.find(e[1]);
// if (fnd1 != aToB.end())
// {
// edgeMap.insert(edge(fnd0(), fnd1()), edgeI);
// }
// }
// }
// }
//
// coupledEdgesPtr_ = new edgeList(nEdges()/2);
// edgeList& coupledEdges = *coupledEdgesPtr_;
// label coupleI = 0;
//
// for (label patchFaceB = size()/2; patchFaceB < size(); patchFaceB++)
// {
// const labelList& fEdges = faceEdges()[patchFaceB];
//
// forAll(fEdges, i)
// {
// label edgeI = fEdges[i];
//
// const edge& e = edges()[edgeI];
//
// // Look up halfA edge from HashTable.
// EdgeMap<label>::iterator iter = edgeMap.find(e);
//
// if (iter != edgeMap.end())
// {
// label halfAEdgeI = iter();
//
// // Store correspondence. Filter out edges on wedge axis.
// if (halfAEdgeI != edgeI)
// {
// coupledEdges[coupleI++] = edge(halfAEdgeI, edgeI);
// }
//
// // Remove so we build unique list
// edgeMap.erase(iter);
// }
// }
// }
// coupledEdges.setSize(coupleI);
//
//
// // Some checks
//
// forAll(coupledEdges, i)
// {
// const edge& e = coupledEdges[i];
//
// if (e[0] == e[1] || e[0] < 0 || e[1] < 0)
// {
// FatalErrorIn("cyclicPolyPatch::coupledEdges() const")
// << "Problem : at position " << i
// << " illegal couple:" << e
// << abort(FatalError);
// }
// }
//
// if (debug)
// {
// OFstream str
// (
// boundaryMesh().mesh().time().path()
// /"coupledEdges.obj"
// );
// label vertI = 0;
//
// Pout<< "Writing file " << str.name() << " with centres of "
// << "coupled edges" << endl;
//
// forAll(coupledEdges, i)
// {
// const edge& e = coupledEdges[i];
//
// const point& a = edges()[e[0]].centre(localPoints());
// const point& b = edges()[e[1]].centre(localPoints());
//
// str<< "v " << a.x() << ' ' << a.y() << ' ' << a.z() << nl;
// str<< "v " << b.x() << ' ' << b.y() << ' ' << b.z() << nl;
// vertI += 2;
//
// str<< "l " << vertI-1 << ' ' << vertI << nl;
// }
// }
//
// // Remove any addressing calculated for the coupled edges calculation
// const_cast<primitivePatch&>(static_cast<const primitivePatch&>(*this))
// .clearOut();
// }
return *coupledEdgesPtr_;
}
@ -1305,6 +1441,8 @@ bool Foam::cyclicPolyPatch::order
void Foam::cyclicPolyPatch::write(Ostream& os) const
{
polyPatch::write(os);
os.writeKeyword("neighbourPatch") << neighbPatchName_
<< token::END_STATEMENT << nl;
os.writeKeyword("featureCos") << featureCos_ << token::END_STATEMENT << nl;
switch (transform_)
{

View File

@ -42,7 +42,6 @@ Description
SourceFiles
cyclicPolyPatch.C
cyclicPolyPatchMorph.C
\*---------------------------------------------------------------------------*/
@ -54,6 +53,7 @@ SourceFiles
#include "FixedList.H"
#include "edgeList.H"
#include "transform.H"
#include "polyBoundaryMesh.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -85,6 +85,13 @@ private:
// Private data
//- Name of other half
const word neighbPatchName_;
//- Index of other half
mutable label neighbPatchID_;
//- List of edges formed from connected points. e[0] is the point on
// the first half of the patch, e[1] the corresponding point on the
// second half.
@ -108,6 +115,24 @@ private:
point rotationCentre_;
// Transformation
//- Has non-zero separation
mutable bool separated_;
//- Offset (distance) vector from one side of the couple to the other
mutable vector separation_;
//- No rotation
mutable bool parallel_;
//- Face transformation tensor
mutable tensor forwardT_;
//- Neighbour-cell transformation tensor
mutable tensor reverseT_;
// Private member functions
//- Find amongst selected faces the one with the largest area
@ -115,6 +140,15 @@ private:
void calcTransforms();
void calcTransforms
(
const primitivePatch& half0,
const UList<point>& half0Ctrs,
const UList<point>& half0Areas,
const UList<point>& half1Ctrs,
const UList<point>& half1Areas
);
// Face ordering
@ -172,9 +206,30 @@ protected:
//- Initialise the calculation of the patch geometry
virtual void initGeometry();
//- Initialise the calculation of the patch geometry
virtual void initGeometry
(
const primitivePatch& referPatch,
UList<point>& nbrCtrs,
UList<point>& nbrAreas,
UList<point>& nbrCc
);
//- Calculate the patch geometry
virtual void calcGeometry();
//- Calculate the patch geometry
virtual void calcGeometry
(
const primitivePatch& referPatch,
const UList<point>& thisCtrs,
const UList<point>& thisAreas,
const UList<point>& thisCc,
const UList<point>& nbrCtrs,
const UList<point>& nbrAreas,
const UList<point>& nbrCc
);
//- Initialise the patches for moving points
virtual void initMovePoints(const pointField&);
@ -225,7 +280,8 @@ public:
const polyBoundaryMesh& bm,
const label index,
const label newSize,
const label newStart
const label newStart,
const word& neighbPatchName
);
//- Construct and return a clone, resetting the boundary mesh
@ -246,7 +302,15 @@ public:
{
return autoPtr<polyPatch>
(
new cyclicPolyPatch(*this, bm, index, newSize, newStart)
new cyclicPolyPatch
(
*this,
bm,
index,
newSize,
newStart,
neighbPatchName_
)
);
}
@ -258,6 +322,48 @@ public:
// Member Functions
const word& neighbPatchName() const
{
return neighbPatchName_;
}
//- Neighbour patchID.
label neighbPatchID() const
{
if (neighbPatchID_ == -1)
{
neighbPatchID_ = this->boundaryMesh().findPatchID
(
neighbPatchName_
);
if (neighbPatchID_ == -1)
{
FatalErrorIn("cyclicPolyPatch::neighbPatchID() const")
<< "Illegal neighbourPatch name " << neighbPatchName_
<< endl << "Valid patch names are "
<< this->boundaryMesh().names()
<< exit(FatalError);
}
}
return neighbPatchID_;
}
bool owner() const
{
return index() < neighbPatchID();
}
bool neighbour() const
{
return !owner();
}
const cyclicPolyPatch& neighbPatch() const
{
const polyPatch& pp = this->boundaryMesh()[neighbPatchID()];
return refCast<const cyclicPolyPatch>(pp);
}
//- Return connected points (in patch local point indexing). Demand
// driven calculation. Does primitivePatch::clearOut after calculation!
const edgeList& coupledPoints() const;
@ -267,67 +373,64 @@ public:
const edgeList& coupledEdges() const;
vector separation(const label facei) const
//- Are the coupled planes separated
virtual bool separated() const
{
if (facei < size()/2)
{
return coupledPolyPatch::separation()[0];
}
else
{
return -coupledPolyPatch::separation()[0];
}
return separated_;
}
const tensor& transformT(const label facei) const
virtual const vector& separation() const
{
if (facei < size()/2)
{
return reverseT()[0];
}
else
{
return forwardT()[0];
}
return separation_;
}
template<class T>
T transform(const T& t, const label facei) const
//- Are the cyclic planes parallel
virtual bool parallel() const
{
if (parallel())
{
return t;
}
else
{
return Foam::transform(transformT(facei), t);
}
return parallel_;
}
label transformLocalFace(const label facei) const
//- Return face transformation tensor
virtual const tensor& forwardT() const
{
if (facei < size()/2)
{
return facei + size()/2;
return forwardT_;
}
else
//- Return neighbour-cell transformation tensor
virtual const tensor& reverseT() const
{
return facei - size()/2;
}
return reverseT_;
}
label transformGlobalFace(const label facei) const
{
if (facei - start() < size()/2)
label offset = facei-start();
label neighbStart = neighbPatch().start();
//label neighbSize = neighbPatch().size();
//label neighbOffset = facei-neighbStart;
if (offset >= 0 && offset < size())
{
return facei + size()/2;
return neighbStart+offset;
}
//else if (neighbOffset >= 0 && neighbOffset < neighbSize)
//{
// return start()+neighbOffset;
//}
else
{
return facei - size()/2;
FatalErrorIn
(
"cyclicPolyPatch::transformGlobalFace(const label) const"
) << "Face " << facei << " not in patch " << name()
//<< " nor in patch " << neighbPatch().name()
<< exit(FatalError);
return -1;
}
}
////- Transform a position on a (local) face
//virtual void transformPosition(point&, const label facei) const;
//- Initialize ordering for primitivePatch. Does not
// refer to *this (except for name() and type() etc.)

View File

@ -33,7 +33,6 @@ License
#include "OFstream.H"
#include "polyMesh.H"
#include "Time.H"
#include "transformList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -44,6 +43,34 @@ namespace Foam
}
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void Foam::processorPolyPatch::checkSubPatches() const
{
if (starts_.size() != patchIDs_.size()+1)
{
FatalErrorIn("processorPolyPatch::checkPatches() const")
<< "starts should have one more element than patchIDs." << endl
<< "starts:" << starts_ << " patchIDs:" << patchIDs_
<< exit(FatalError);
}
if (starts_[0] != 0)
{
FatalErrorIn("processorPolyPatch::checkPatches() const")
<< "starts[0] should be 0." << endl
<< "starts:" << starts_ << " patchIDs:" << patchIDs_
<< exit(FatalError);
}
if (starts_[starts_.size()-1] != size())
{
FatalErrorIn("processorPolyPatch::checkPatches() const")
<< "Last element in starts should be the size." << endl
<< "starts:" << starts_ << " size:" << size()
<< exit(FatalError);
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Foam::processorPolyPatch::processorPolyPatch
@ -54,18 +81,24 @@ Foam::processorPolyPatch::processorPolyPatch
const label index,
const polyBoundaryMesh& bm,
const int myProcNo,
const int neighbProcNo
const int neighbProcNo,
const labelList& patchIDs,
const labelList& starts
)
:
coupledPolyPatch(name, size, start, index, bm),
myProcNo_(myProcNo),
neighbProcNo_(neighbProcNo),
patchIDs_(patchIDs),
starts_(starts),
neighbFaceCentres_(),
neighbFaceAreas_(),
neighbFaceCellCentres_(),
neighbPointsPtr_(NULL),
neighbEdgesPtr_(NULL)
{}
{
checkSubPatches();
}
Foam::processorPolyPatch::processorPolyPatch
@ -79,12 +112,16 @@ Foam::processorPolyPatch::processorPolyPatch
coupledPolyPatch(name, dict, index, bm),
myProcNo_(readLabel(dict.lookup("myProcNo"))),
neighbProcNo_(readLabel(dict.lookup("neighbProcNo"))),
patchIDs_(dict.lookup("patchIDs")),
starts_(dict.lookup("starts")),
neighbFaceCentres_(),
neighbFaceAreas_(),
neighbFaceCellCentres_(),
neighbPointsPtr_(NULL),
neighbEdgesPtr_(NULL)
{}
{
checkSubPatches();
}
Foam::processorPolyPatch::processorPolyPatch
@ -96,6 +133,8 @@ Foam::processorPolyPatch::processorPolyPatch
coupledPolyPatch(pp, bm),
myProcNo_(pp.myProcNo_),
neighbProcNo_(pp.neighbProcNo_),
patchIDs_(pp.patchIDs_),
starts_(pp.starts_),
neighbFaceCentres_(),
neighbFaceAreas_(),
neighbFaceCellCentres_(),
@ -110,18 +149,24 @@ Foam::processorPolyPatch::processorPolyPatch
const polyBoundaryMesh& bm,
const label index,
const label newSize,
const label newStart
const label newStart,
const labelList& patchIDs,
const labelList& starts
)
:
coupledPolyPatch(pp, bm, index, newSize, newStart),
myProcNo_(pp.myProcNo_),
neighbProcNo_(pp.neighbProcNo_),
patchIDs_(patchIDs),
starts_(starts),
neighbFaceCentres_(),
neighbFaceAreas_(),
neighbFaceCellCentres_(),
neighbPointsPtr_(NULL),
neighbEdgesPtr_(NULL)
{}
{
checkSubPatches();
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
@ -137,25 +182,67 @@ Foam::processorPolyPatch::~processorPolyPatch()
void Foam::processorPolyPatch::initGeometry()
{
Pout<< "**processorPolyPatch::initGeometry()" << endl;
if (Pstream::parRun())
{
pointField fc(size());
vectorField fa(size());
pointField cc(size());
forAll(patchIDs_, i)
{
label patchI = patchIDs_[i];
autoPtr<primitivePatch> subPp = subPatch(i);
SubField<point> subFc(subSlice(fc, i));
SubField<vector> subFa(subSlice(fa, i));
SubField<point> subCc(subSlice(cc, i));
subFc.assign(subSlice(faceCentres(), i));
subFa.assign(subSlice(faceAreas(), i));
subCc.assign(subSlice(faceCellCentres()(), i));
if (patchI != -1)
{
coupledPolyPatch& pp = const_cast<coupledPolyPatch&>
(
refCast<const coupledPolyPatch>
(
boundaryMesh()[patchI]
)
);
Pout<< name() << " calling initGeometry on " << pp.name()
<< endl;
pp.initGeometry(subPp, subFc, subFa, subCc);
Pout<< name() << " from patchI:" << patchI
<< " calculated fc:" << subFc
<< " fa:" << subFa << " subCC:" << subCc << endl;
Pout<< name() << " fc:" << fc << endl;
}
}
Pout<< name() << " fc:" << fc << " fa:" << fa << " cc:" << cc << endl;
OPstream toNeighbProc
(
Pstream::blocking,
neighbProcNo(),
+ 3*(sizeof(label) + size()*sizeof(vector))
3*(sizeof(label) + size()*sizeof(vector) + sizeof(scalar))
);
toNeighbProc
<< faceCentres()
<< faceAreas()
<< faceCellCentres();
toNeighbProc << fc << fa << cc;
}
}
void Foam::processorPolyPatch::calcGeometry()
{
Pout<< "processorPolyPatch::calcGeometry() for " << name() << endl;
if (Pstream::parRun())
{
{
@ -163,7 +250,7 @@ void Foam::processorPolyPatch::calcGeometry()
(
Pstream::blocking,
neighbProcNo(),
3*(sizeof(label) + size()*sizeof(vector))
3*(sizeof(label) + size()*sizeof(vector) + sizeof(scalar))
);
fromNeighbProc
>> neighbFaceCentres_
@ -171,69 +258,57 @@ void Foam::processorPolyPatch::calcGeometry()
>> neighbFaceCellCentres_;
}
// My normals
vectorField faceNormals(size());
Pout<< "processorPolyPatch::calcGeometry() : received data for "
<< neighbFaceCentres_.size() << " faces." << endl;
// Neighbour normals
vectorField nbrFaceNormals(neighbFaceAreas_.size());
forAll(patchIDs_, i)
{
label patchI = patchIDs_[i];
// Calculate normals from areas and check
forAll(faceNormals, facei)
if (patchI == -1)
{
scalar magSf = mag(faceAreas()[facei]);
scalar nbrMagSf = mag(neighbFaceAreas_[facei]);
scalar avSf = (magSf + nbrMagSf)/2.0;
if (magSf < ROOTVSMALL && nbrMagSf < ROOTVSMALL)
{
// Undetermined normal. Use dummy normal to force separation
// check. (note use of sqrt(VSMALL) since that is how mag
// scales)
faceNormals[facei] = point(1, 0, 0);
nbrFaceNormals[facei] = faceNormals[facei];
}
else if (mag(magSf - nbrMagSf)/avSf > coupledPolyPatch::matchTol)
{
FatalErrorIn
(
"processorPolyPatch::calcGeometry()"
) << "face " << facei << " area does not match neighbour by "
<< 100*mag(magSf - nbrMagSf)/avSf
<< "% -- possible face ordering problem." << endl
<< "patch:" << name()
<< " my area:" << magSf
<< " neighbour area:" << nbrMagSf
<< " matching tolerance:" << coupledPolyPatch::matchTol
<< endl
<< "Mesh face:" << start()+facei
<< " vertices:"
<< IndirectList<point>(points(), operator[](facei))()
<< endl
<< "Rerun with processor debug flag set for"
<< " more information." << exit(FatalError);
// Anything needs doing for ex-internal faces?
}
else
{
faceNormals[facei] = faceAreas()[facei]/magSf;
nbrFaceNormals[facei] = neighbFaceAreas_[facei]/nbrMagSf;
}
}
calcTransformTensors
coupledPolyPatch& pp = const_cast<coupledPolyPatch&>
(
faceCentres(),
neighbFaceCentres_,
faceNormals,
nbrFaceNormals,
calcFaceTol(*this, points(), faceCentres())
refCast<const coupledPolyPatch>
(
boundaryMesh()[patchI]
)
);
Pout<< "processorPolyPatch::calcGeometry() : referring to " << pp.name()
<< " for faces size:" << starts_[i+1]-starts_[i]
<< " start:" << starts_[i]
<< endl;
pp.calcGeometry
(
subPatch(i),
subSlice(faceCentres(), i),
subSlice(faceAreas(), i),
subSlice(faceCellCentres()(), i),
subSlice(neighbFaceCentres_, i),
subSlice(neighbFaceAreas_, i),
subSlice(neighbFaceCellCentres_, i)
);
}
}
}
Pout<< "**neighbFaceCentres_:" << neighbFaceCentres_ << endl;
Pout<< "**neighbFaceAreas_:" << neighbFaceAreas_ << endl;
Pout<< "**neighbFaceCellCentres_:" << neighbFaceCellCentres_ << endl;
}
void Foam::processorPolyPatch::initMovePoints(const pointField& p)
{
// Update local polyPatch quantities
polyPatch::movePoints(p);
// Recalculate geometry
processorPolyPatch::initGeometry();
}
@ -426,54 +501,24 @@ const Foam::labelList& Foam::processorPolyPatch::neighbEdges() const
}
Foam::label Foam::processorPolyPatch::whichSubPatch(const label facei) const
{
label index = findSortedIndex(starts_, facei);
if (index < 0 || index >= starts_.size())
{
FatalErrorIn("processorPolyPatch::whichSubPatch(const label) const")
<< "Illegal local face index " << facei << " for patch " << name()
<< endl << "Face index should be between 0 and "
<< starts_[starts_.size()-1]-1 << abort(FatalError);
}
return patchIDs_[index];
}
void Foam::processorPolyPatch::initOrder(const primitivePatch& pp) const
{
if (!Pstream::parRun())
{
return;
}
if (debug)
{
fileName nm
(
boundaryMesh().mesh().time().path()
/name()+"_faces.obj"
);
Pout<< "processorPolyPatch::order : Writing my " << pp.size()
<< " faces to OBJ file " << nm << endl;
writeOBJ(nm, pp, pp.points());
// Calculate my face centres
pointField ctrs(calcFaceCentres(pp, pp.points()));
OFstream localStr
(
boundaryMesh().mesh().time().path()
/name() + "_localFaceCentres.obj"
);
Pout<< "processorPolyPatch::order : "
<< "Dumping " << ctrs.size()
<< " local faceCentres to " << localStr.name() << endl;
forAll(ctrs, faceI)
{
writeOBJ(localStr, ctrs[faceI]);
}
}
const bool isMaster = Pstream::myProcNo() < neighbProcNo();
if (isMaster)
{
pointField ctrs(calcFaceCentres(pp, pp.points()));
pointField anchors(getAnchorPoints(pp, pp.points()));
// Now send all info over to the neighbour
OPstream toNeighbour(Pstream::blocking, neighbProcNo());
toNeighbour << ctrs << anchors;
}
notImplemented("processorPolyPatch::order(..)");
}
@ -488,269 +533,8 @@ bool Foam::processorPolyPatch::order
labelList& rotation
) const
{
if (!Pstream::parRun())
{
notImplemented("processorPolyPatch::order(..)");
return false;
}
faceMap.setSize(pp.size());
faceMap = -1;
rotation.setSize(pp.size());
rotation = 0;
const bool isMaster = Pstream::myProcNo() < neighbProcNo();
if (isMaster)
{
// Do nothing (i.e. identical mapping, zero rotation).
// See comment at top.
forAll(faceMap, patchFaceI)
{
faceMap[patchFaceI] = patchFaceI;
}
return false;
}
else
{
vectorField masterCtrs;
vectorField masterAnchors;
// Receive data from neighbour
{
IPstream fromNeighbour(Pstream::blocking, neighbProcNo());
fromNeighbour >> masterCtrs >> masterAnchors;
}
// Calculate my face centres
pointField ctrs(calcFaceCentres(pp, pp.points()));
// Calculate typical distance from face centre
scalarField tols(calcFaceTol(pp, pp.points(), ctrs));
if (debug || masterCtrs.size() != pp.size())
{
{
OFstream nbrStr
(
boundaryMesh().mesh().time().path()
/name() + "_nbrFaceCentres.obj"
);
Pout<< "processorPolyPatch::order : "
<< "Dumping neighbour faceCentres to " << nbrStr.name()
<< endl;
forAll(masterCtrs, faceI)
{
writeOBJ(nbrStr, masterCtrs[faceI]);
}
}
if (masterCtrs.size() != pp.size())
{
FatalErrorIn
(
"processorPolyPatch::order(const primitivePatch&"
", labelList&, labelList&) const"
) << "in patch:" << name() << " : "
<< "Local size of patch is " << pp.size() << " (faces)."
<< endl
<< "Received from neighbour " << masterCtrs.size()
<< " faceCentres!"
<< abort(FatalError);
}
}
// Geometric match of face centre vectors
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 1. Try existing ordering and transformation
bool matchedAll = false;
if
(
separated()
&& (separation().size() == 1 || separation().size() == pp.size())
)
{
vectorField transformedCtrs;
const vectorField& v = separation();
if (v.size() == 1)
{
transformedCtrs = masterCtrs-v[0];
}
else
{
transformedCtrs = masterCtrs-v;
}
matchedAll = matchPoints
(
ctrs,
transformedCtrs,
tols,
true,
faceMap
);
if (matchedAll)
{
// Use transformed centers from now on
masterCtrs = transformedCtrs;
// Transform anchors
if (v.size() == 1)
{
masterAnchors -= v[0];
}
else
{
masterAnchors -= v;
}
}
}
else if
(
!parallel()
&& (forwardT().size() == 1 || forwardT().size() == pp.size())
)
{
vectorField transformedCtrs = masterCtrs;
transformList(forwardT(), transformedCtrs);
matchedAll = matchPoints
(
ctrs,
transformedCtrs,
tols,
true,
faceMap
);
if (matchedAll)
{
// Use transformed centers from now on
masterCtrs = transformedCtrs;
// Transform anchors
transformList(forwardT(), masterAnchors);
}
}
// 2. Try zero separation automatic matching
if (!matchedAll)
{
matchedAll = matchPoints(ctrs, masterCtrs, tols, true, faceMap);
}
if (!matchedAll || debug)
{
// Dump faces
fileName str
(
boundaryMesh().mesh().time().path()
/name()/name()+"_faces.obj"
);
Pout<< "processorPolyPatch::order :"
<< " Writing faces to OBJ file " << str.name() << endl;
writeOBJ(str, pp, pp.points());
OFstream ccStr
(
boundaryMesh().mesh().time().path()
/name() + "_faceCentresConnections.obj"
);
Pout<< "processorPolyPatch::order :"
<< " Dumping newly found match as lines between"
<< " corresponding face centres to OBJ file " << ccStr.name()
<< endl;
label vertI = 0;
forAll(ctrs, faceI)
{
label masterFaceI = faceMap[faceI];
if (masterFaceI != -1)
{
const point& c0 = masterCtrs[masterFaceI];
const point& c1 = ctrs[faceI];
writeOBJ(ccStr, c0, c1, vertI);
}
}
}
if (!matchedAll)
{
SeriousErrorIn
(
"processorPolyPatch::order(const primitivePatch&"
", labelList&, labelList&) const"
) << "in patch:" << name() << " : "
<< "Cannot match vectors to faces on both sides of patch"
<< endl
<< " masterCtrs[0]:" << masterCtrs[0] << endl
<< " ctrs[0]:" << ctrs[0] << endl
<< " Please check your topology changes or maybe you have"
<< " multiple separated (from cyclics) processor patches"
<< endl
<< " Continuing with incorrect face ordering from now on!"
<< endl;
return false;
}
// Set rotation.
forAll(faceMap, oldFaceI)
{
// The face f will be at newFaceI (after morphing) and we want its
// anchorPoint (= f[0]) to align with the anchorpoint for the
// corresponding face on the other side.
label newFaceI = faceMap[oldFaceI];
const point& wantedAnchor = masterAnchors[newFaceI];
rotation[newFaceI] = getRotation
(
pp.points(),
pp[oldFaceI],
wantedAnchor,
tols[oldFaceI]
);
if (rotation[newFaceI] == -1)
{
SeriousErrorIn
(
"processorPolyPatch::order(const primitivePatch&"
", labelList&, labelList&) const"
) << "in patch " << name()
<< " : "
<< "Cannot find point on face " << pp[oldFaceI]
<< " with vertices "
<< IndirectList<point>(pp.points(), pp[oldFaceI])()
<< " that matches point " << wantedAnchor
<< " when matching the halves of processor patch " << name()
<< "Continuing with incorrect face ordering from now on!"
<< endl;
return false;
}
}
forAll(faceMap, faceI)
{
if (faceMap[faceI] != faceI || rotation[faceI] != 0)
{
return true;
}
}
return false;
}
}
@ -761,6 +545,10 @@ void Foam::processorPolyPatch::write(Ostream& os) const
<< token::END_STATEMENT << nl;
os.writeKeyword("neighbProcNo") << neighbProcNo_
<< token::END_STATEMENT << nl;
os.writeKeyword("patchIDs") << patchIDs_
<< token::END_STATEMENT << nl;
os.writeKeyword("starts") << starts_
<< token::END_STATEMENT << nl;
}

View File

@ -44,6 +44,8 @@ SourceFiles
#define processorPolyPatch_H
#include "coupledPolyPatch.H"
#include "polyBoundaryMesh.H"
#include "faceListFwd.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -63,6 +65,14 @@ class processorPolyPatch
int myProcNo_;
int neighbProcNo_;
// Per patch information
//- Originating patches
labelList patchIDs_;
//- Slice starts
labelList starts_;
//- Processor-neighbbour patch face centres
vectorField neighbFaceCentres_;
@ -81,16 +91,9 @@ class processorPolyPatch
mutable labelList* neighbEdgesPtr_;
// Private member functions
// Private static data
//- Whether to use geometric or topological matching
static bool geometricMatch_;
//- Relative tolerance (for geometric matching only). Is factor of
// maximum edge length per face.
static scalar matchTol_;
void checkSubPatches() const;
protected:
@ -99,9 +102,38 @@ protected:
//- Initialise the calculation of the patch geometry
void initGeometry();
//- Initialise the calculation of the patch geometry with externally
// provided geometry
virtual void initGeometry
(
const primitivePatch& referPatch,
UList<point>&,
UList<point>&,
UList<point>&
)
{
notImplemented("processorPolyPatch::initGeometry(..)");
}
//- Calculate the patch geometry
void calcGeometry();
//- Calculate the patch geometry with externally
// provided geometry
virtual void calcGeometry
(
const primitivePatch& referPatch,
const UList<point>& thisCtrs,
const UList<point>& thisAreas,
const UList<point>& thisCc,
const UList<point>& nbrCtrs,
const UList<point>& nbrAreas,
const UList<point>& nbrCc
)
{
notImplemented("processorPolyPatch::calcGeometry(..)");
}
//- Initialise the patches for moving points
void initMovePoints(const pointField&);
@ -132,7 +164,9 @@ public:
const label index,
const polyBoundaryMesh& bm,
const int myProcNo,
const int neighbProcNo
const int neighbProcNo,
const labelList& patchIDs,
const labelList& starts
);
//- Construct from dictionary
@ -145,7 +179,11 @@ public:
);
//- Construct as copy, resetting the boundary mesh
processorPolyPatch(const processorPolyPatch&, const polyBoundaryMesh&);
processorPolyPatch
(
const processorPolyPatch&,
const polyBoundaryMesh&
);
//- Construct as given the original patch and resetting the
// face list and boundary mesh information
@ -155,7 +193,9 @@ public:
const polyBoundaryMesh& bm,
const label index,
const label newSize,
const label newStart
const label newStart,
const labelList& patchIDs,
const labelList& starts
);
//- Construct and return a clone, resetting the boundary mesh
@ -171,7 +211,9 @@ public:
const polyBoundaryMesh& bm,
const label index,
const label newSize,
const label newStart
const label newStart,
const labelList& patchIDs,
const labelList& starts
) const
{
return autoPtr<polyPatch>
@ -182,7 +224,9 @@ public:
bm,
index,
newSize,
newStart
newStart,
patchIDs,
starts
)
);
}
@ -219,6 +263,16 @@ public:
return !owner();
}
const labelList& patchIDs() const
{
return patchIDs_;
}
const labelList& starts() const
{
return starts_;
}
//- Return processor-neighbbour patch face centres
const vectorField& neighbFaceCentres() const
{
@ -237,6 +291,146 @@ public:
return neighbFaceCellCentres_;
}
//- Are the planes separated.
virtual bool separated() const
{
notImplemented("processorPolyPatch::separated(..)");
return false;
}
//- If the planes are separated the separation vector.
virtual const vector& separation() const
{
notImplemented("processorPolyPatch::separation(..)");
return vector::zero;
}
//- Are the cyclic planes parallel.
virtual bool parallel() const
{
forAll(patchIDs_, i)
{
label patchI = patchIDs_[i];
if (patchI != -1)
{
if
(
!refCast<const coupledPolyPatch>
(
boundaryMesh()[patchI]
).parallel()
)
{
return false;
}
}
}
return true;
}
//- Return face transformation tensor.
virtual const tensor& forwardT() const
{
notImplemented("processorPolyPatch::forwardT(..)");
return tensor::zero;
}
//- Return neighbour-cell transformation tensor.
virtual const tensor& reverseT() const
{
notImplemented("processorPolyPatch::reverseT(..)");
return tensor::zero;
}
//- Find sub patch for local face. -1 for internal faces or label of
// cyclic patch.
label whichSubPatch(const label facei) const;
//- Slice patch
autoPtr<primitivePatch> subPatch(const label subI) const
{
const faceList& thisFaces = static_cast<const faceList&>(*this);
return autoPtr<primitivePatch>
(
new primitivePatch
(
faceSubList
(
thisFaces,
this->starts_[subI+1]-this->starts_[subI],
this->starts_[subI]
),
this->points()
)
);
}
//- Slice patchlist to subpatch
template<class T>
const typename List<T>::subList subSlice
(
const UList<T>& l,
const label subI
) const
{
return typename List<T>::subList
(
l,
this->starts_[subI+1]-this->starts_[subI],
this->starts_[subI]
);
}
//- Slice patchlist to subpatch
template<class T>
typename List<T>::subList subSlice
(
UList<T>& l,
const label subI
)
{
return typename List<T>::subList
(
l,
this->starts_[subI+1]-this->starts_[subI],
this->starts_[subI]
);
}
//- Slice patchField to subpatch
template<class T>
const typename Field<T>::subField subSlice
(
const Field<T>& l,
const label subI
) const
{
return typename Field<T>::subList
(
l,
this->starts_[subI+1]-this->starts_[subI],
this->starts_[subI]
);
}
//- Slice patchField to subpatch
template<class T>
typename Field<T>::subField subSlice
(
Field<T>& l,
const label subI
)
{
return typename Field<T>::subList
(
l,
this->starts_[subI+1]-this->starts_[subI],
this->starts_[subI]
);
}
//- Return neighbour point labels. This is for my local point (-1 or)
// the corresponding local point on the other side. It is -1 if
// there are multiple corresponding points on this or the other side
@ -247,6 +441,8 @@ public:
// corresponding local edge on the other side. See above for -1 cause.
const labelList& neighbEdges() const;
////- Transform a position
//virtual void transformPosition(point&, const label facei) const;
//- Initialize ordering for primitivePatch. Does not
// refer to *this (except for name() and type() etc.)

View File

@ -32,15 +32,7 @@ Description
SourceFiles
polyPatch.C
calcPolyPatchAddressing.C
calcPolyPatchFaceCells.C
calcPolyPatchIntBdryEdges.C
calcPolyPatchMeshEdges.C
calcPolyPatchMeshData.C
calcPolyPatchPointAddressing.C
clearPolyPatch.C
newPolyPatch.C
polyPatchTools.C
\*---------------------------------------------------------------------------*/

View File

@ -49,26 +49,26 @@ bool Foam::syncTools::hasCouples(const polyBoundaryMesh& patches)
void Foam::syncTools::checkTransform
(
const coupledPolyPatch& pp,
const processorPolyPatch& pp,
const bool applySeparation
)
{
if (!pp.parallel() && pp.forwardT().size() > 1)
{
FatalErrorIn("syncTools::checkTransform(const coupledPolyPatch&)")
<< "Non-uniform transformation not supported for point or edge"
<< " fields." << endl
<< "Patch:" << pp.name()
<< abort(FatalError);
}
if (applySeparation && pp.separated() && pp.separation().size() > 1)
{
FatalErrorIn("syncTools::checkTransform(const coupledPolyPatch&)")
<< "Non-uniform separation vector not supported for point or edge"
<< " fields." << endl
<< "Patch:" << pp.name()
<< abort(FatalError);
}
// if (!pp.parallel() && pp.forwardT().size() > 1)
// {
// FatalErrorIn("syncTools::checkTransform(const coupledPolyPatch&)")
// << "Non-uniform transformation not supported for point or edge"
// << " fields." << endl
// << "Patch:" << pp.name()
// << abort(FatalError);
// }
// if (applySeparation && pp.separated() && pp.separation().size() > 1)
// {
// FatalErrorIn("syncTools::checkTransform(const coupledPolyPatch&)")
// << "Non-uniform separation vector not supported for point or edge"
// << " fields." << endl
// << "Patch:" << pp.name()
// << abort(FatalError);
// }
}
@ -365,34 +365,13 @@ Foam::PackedList<1> Foam::syncTools::getMasterFaces(const polyMesh& mesh)
template <>
void Foam::syncTools::separateList
(
const vectorField& separation,
const vector& separation,
UList<vector>& field
)
{
if (separation.size() == 1)
{
// Single value for all.
forAll(field, i)
{
field[i] += separation[0];
}
}
else if (separation.size() == field.size())
{
forAll(field, i)
{
field[i] += separation[i];
}
}
else
{
FatalErrorIn
(
"syncTools::separateList(const vectorField&, UList<vector>&)"
) << "Sizes of field and transformation not equal. field:"
<< field.size() << " transformation:" << separation.size()
<< abort(FatalError);
field[i] += separation;
}
}
@ -400,33 +379,13 @@ void Foam::syncTools::separateList
template <>
void Foam::syncTools::separateList
(
const vectorField& separation,
const vector& separation,
Map<vector>& field
)
{
if (separation.size() == 1)
{
// Single value for all.
forAllIter(Map<vector>, field, iter)
{
iter() += separation[0];
}
}
else if (separation.size() == field.size())
{
forAllIter(Map<vector>, field, iter)
{
iter() += separation[iter.key()];
}
}
else
{
FatalErrorIn
(
"syncTools::separateList(const vectorField&, Map<vector>&)"
) << "Sizes of field and transformation not equal. field:"
<< field.size() << " transformation:" << separation.size()
<< abort(FatalError);
iter() += separation;
}
}
@ -434,26 +393,13 @@ void Foam::syncTools::separateList
template <>
void Foam::syncTools::separateList
(
const vectorField& separation,
const vector& separation,
EdgeMap<vector>& field
)
{
if (separation.size() == 1)
{
// Single value for all.
forAllIter(EdgeMap<vector>, field, iter)
{
iter() += separation[0];
}
}
else
{
FatalErrorIn
(
"syncTools::separateList(const vectorField&, EdgeMap<vector>&)"
) << "Multiple separation vectors not supported. field:"
<< field.size() << " transformation:" << separation.size()
<< abort(FatalError);
iter() += separation;
}
}

View File

@ -48,7 +48,6 @@ SourceFiles
#include "UList.H"
#include "Pstream.H"
#include "transformList.H"
#include "Map.H"
#include "EdgeMap.H"
#include "PackedList.H"
@ -60,7 +59,7 @@ namespace Foam
class polyBoundaryMesh;
class polyMesh;
class coupledPolyPatch;
class processorPolyPatch;
/*---------------------------------------------------------------------------*\
Class syncTools Declaration
@ -74,18 +73,17 @@ class syncTools
static bool hasCouples(const polyBoundaryMesh&);
//- Check for single transformation tensor only.
static void checkTransform(const coupledPolyPatch&, const bool);
static void checkTransform(const processorPolyPatch&, const bool);
//- Apply separation to list. Either single vector or one vector
// per element.
//- Apply separation to list.
template <class T>
static void separateList(const vectorField&, UList<T>&);
static void separateList(const vector&, UList<T>&);
template <class T>
static void separateList(const vectorField&, Map<T>&);
static void separateList(const vector&, Map<T>&);
template <class T>
static void separateList(const vectorField&, EdgeMap<T>&);
static void separateList(const vector&, EdgeMap<T>&);
//- Combine value with existing value in map.
template <class T, class CombineOp>
@ -271,13 +269,13 @@ public:
template<>
void syncTools::separateList(const vectorField&, UList<vector>&);
void syncTools::separateList(const vector&, UList<vector>&);
template<>
void syncTools::separateList(const vectorField&, Map<vector>&);
void syncTools::separateList(const vector&, Map<vector>&);
template<>
void syncTools::separateList(const vectorField&, EdgeMap<vector>&);
void syncTools::separateList(const vector&, EdgeMap<vector>&);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -31,13 +31,14 @@ License
#include "globalMeshData.H"
#include "contiguous.H"
#include "transform.H"
#include "transformList.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template <class T>
void Foam::syncTools::separateList
(
const vectorField& separation,
const vector& separation,
UList<T>& field
)
{}
@ -46,7 +47,7 @@ void Foam::syncTools::separateList
template <class T>
void Foam::syncTools::separateList
(
const vectorField& separation,
const vector& separation,
Map<T>& field
)
{}
@ -55,7 +56,7 @@ void Foam::syncTools::separateList
template <class T>
void Foam::syncTools::separateList
(
const vectorField& separation,
const vector& separation,
EdgeMap<T>& field
)
{}
@ -182,7 +183,6 @@ void Foam::syncTools::syncPointMap
{
const processorPolyPatch& procPatch =
refCast<const processorPolyPatch>(patches[patchI]);
checkTransform(procPatch, applySeparation);
IPstream fromNb(Pstream::blocking, procPatch.neighbProcNo());
Map<T> nbrPatchInfo(fromNb);
@ -228,7 +228,6 @@ void Foam::syncTools::syncPointMap
{
const cyclicPolyPatch& cycPatch =
refCast<const cyclicPolyPatch>(patches[patchI]);
checkTransform(cycPatch, applySeparation);
const edgeList& coupledPoints = cycPatch.coupledPoints();
const labelList& meshPts = cycPatch.meshPoints();
@ -268,7 +267,7 @@ void Foam::syncTools::syncPointMap
{
hasTransformation = true;
const vectorField& v = cycPatch.coupledPolyPatch::separation();
const vector& v = cycPatch.separation();
separateList(v, half0Values);
separateList(-v, half1Values);
}
@ -537,7 +536,6 @@ void Foam::syncTools::syncEdgeMap
{
const processorPolyPatch& procPatch =
refCast<const processorPolyPatch>(patches[patchI]);
checkTransform(procPatch, applySeparation);
const labelList& meshPts = procPatch.meshPoints();
@ -587,7 +585,6 @@ void Foam::syncTools::syncEdgeMap
{
const cyclicPolyPatch& cycPatch =
refCast<const cyclicPolyPatch>(patches[patchI]);
checkTransform(cycPatch, applySeparation);
const edgeList& coupledEdges = cycPatch.coupledEdges();
const labelList& meshPts = cycPatch.meshPoints();
@ -637,7 +634,7 @@ void Foam::syncTools::syncEdgeMap
}
else if (applySeparation && cycPatch.separated())
{
const vectorField& v = cycPatch.coupledPolyPatch::separation();
const vector& v = cycPatch.separation();
separateList(v, half0Values);
separateList(-v, half1Values);
}
@ -972,8 +969,6 @@ void Foam::syncTools::syncPointList
const cyclicPolyPatch& cycPatch =
refCast<const cyclicPolyPatch>(patches[patchI]);
checkTransform(cycPatch, applySeparation);
const edgeList& coupledPoints = cycPatch.coupledPoints();
const labelList& meshPts = cycPatch.meshPoints();
@ -1000,7 +995,7 @@ void Foam::syncTools::syncPointList
else if (applySeparation && cycPatch.separated())
{
hasTransformation = true;
const vectorField& v = cycPatch.coupledPolyPatch::separation();
const vector& v = cycPatch.separation();
separateList(v, half0Values);
separateList(-v, half1Values);
}
@ -1246,8 +1241,6 @@ void Foam::syncTools::syncEdgeList
const cyclicPolyPatch& cycPatch =
refCast<const cyclicPolyPatch>(patches[patchI]);
checkTransform(cycPatch, applySeparation);
const edgeList& coupledEdges = cycPatch.coupledEdges();
const labelList& meshEdges = cycPatch.meshEdges();
@ -1275,7 +1268,7 @@ void Foam::syncTools::syncEdgeList
{
hasTransformation = true;
const vectorField& v = cycPatch.coupledPolyPatch::separation();
const vector& v = cycPatch.separation();
separateList(v, half0Values);
separateList(-v, half1Values);
}
@ -1419,17 +1412,17 @@ void Foam::syncTools::syncBoundaryFaceList
&& patches[patchI].size() > 0
)
{
const processorPolyPatch& procPatch =
const processorPolyPatch& ppp =
refCast<const processorPolyPatch>(patches[patchI]);
List<T> nbrPatchInfo(procPatch.size());
List<T> nbrPatchInfo(ppp.size());
if (contiguous<T>())
{
IPstream::read
(
Pstream::blocking,
procPatch.neighbProcNo(),
ppp.neighbProcNo(),
reinterpret_cast<char*>(nbrPatchInfo.begin()),
nbrPatchInfo.byteSize()
);
@ -1439,22 +1432,43 @@ void Foam::syncTools::syncBoundaryFaceList
IPstream fromNeighb
(
Pstream::blocking,
procPatch.neighbProcNo()
ppp.neighbProcNo()
);
fromNeighb >> nbrPatchInfo;
}
if (!procPatch.parallel())
// Apply any transforms
forAll(ppp.patchIDs(), i)
{
transformList(procPatch.forwardT(), nbrPatchInfo);
label subPatchI = ppp.patchIDs()[i];
if (subPatchI != -1)
{
const coupledPolyPatch& cpp =
refCast<const coupledPolyPatch>(patches[subPatchI]);
SubList<T> slice(ppp.subSlice(nbrPatchInfo, i));
if (!cpp.parallel())
{
transformList
(
cpp.forwardT(),
slice
);
}
else if (applySeparation && procPatch.separated())
else if (applySeparation && cpp.separated())
{
separateList(-procPatch.separation(), nbrPatchInfo);
separateList
(
-cpp.separation(),
slice
);
}
}
}
label bFaceI = procPatch.start()-mesh.nInternalFaces();
label bFaceI = ppp.start()-mesh.nInternalFaces();
forAll(nbrPatchInfo, i)
{
@ -1472,36 +1486,42 @@ void Foam::syncTools::syncBoundaryFaceList
const cyclicPolyPatch& cycPatch =
refCast<const cyclicPolyPatch>(patches[patchI]);
label patchStart = cycPatch.start()-mesh.nInternalFaces();
// Have only owner do anything
if (cycPatch.owner())
{
label ownStart =
cycPatch.start()-mesh.nInternalFaces();
label nbrStart =
cycPatch.neighbPatch().start()-mesh.nInternalFaces();
label half = cycPatch.size()/2;
label half1Start = patchStart+half;
label sz = cycPatch.size();
List<T> half0Values(SubList<T>(faceValues, half, patchStart));
List<T> half1Values(SubList<T>(faceValues, half, half1Start));
List<T> ownVals(SubList<T>(faceValues, sz, ownStart));
List<T> nbrVals(SubList<T>(faceValues, sz, nbrStart));
if (!cycPatch.parallel())
{
transformList(cycPatch.reverseT(), half0Values);
transformList(cycPatch.forwardT(), half1Values);
transformList(cycPatch.reverseT(), ownVals);
transformList(cycPatch.forwardT(), nbrVals);
}
else if (applySeparation && cycPatch.separated())
{
const vectorField& v = cycPatch.coupledPolyPatch::separation();
separateList(v, half0Values);
separateList(-v, half1Values);
const vector& v = cycPatch.separation();
separateList(v, ownVals);
separateList(-v, nbrVals);
}
label i0 = patchStart;
forAll(half1Values, i)
label i0 = ownStart;
forAll(nbrVals, i)
{
cop(faceValues[i0++], half1Values[i]);
cop(faceValues[i0++], nbrVals[i]);
}
label i1 = half1Start;
forAll(half0Values, i)
label i1 = nbrStart;
forAll(ownVals, i)
{
cop(faceValues[i1++], half0Values[i]);
cop(faceValues[i1++], ownVals[i]);
}
}
}
}

View File

@ -19,6 +19,8 @@ meshReader/starcd/STARCDMeshReader.C
meshWriter/meshWriter.C
meshWriter/starcd/STARCDMeshWriter.C
/*
polyDualMesh/polyDualMesh.C
*/
LIB = $(FOAM_LIBBIN)/libconversion

View File

@ -4,10 +4,10 @@ set -x
wmake libso decompositionMethods
if [ -d "$FOAM_MPI_LIBBIN" ]
then
wmake libso parMetisDecomp
fi
#if [ -d "$FOAM_MPI_LIBBIN" ]
#then
# wmake libso parMetisDecomp
#fi
wmake libso MGridGenGamgAgglomeration

View File

@ -2,7 +2,9 @@ decompositionMethod/decompositionMethod.C
geomDecomp/geomDecomp.C
simpleGeomDecomp/simpleGeomDecomp.C
hierarchGeomDecomp/hierarchGeomDecomp.C
metisDecomp/metisDecomp.C
/*metisDecomp/metisDecomp.C*/
manualDecomp/manualDecomp.C
LIB = $(FOAM_LIBBIN)/libdecompositionMethods

View File

@ -1,6 +1,4 @@
EXE_INC = \
-I$(WM_THIRD_PARTY_DIR)/metis-5.0pre2/include
LIB_LIBS = \
-lmetis \
-lGKlib
LIB_LIBS =

View File

@ -79,11 +79,12 @@ meshCut/wallLayerCells/wallNormalInfo/wallNormalInfo.C
polyTopoChange/attachPolyTopoChanger/attachPolyTopoChanger.C
polyTopoChange/repatchPolyTopoChanger/repatchPolyTopoChanger.C
/*
fvMeshAdder/fvMeshAdder.C
fvMeshDistribute/fvMeshDistribute.C
polyMeshAdder/faceCoupleInfo.C
polyMeshAdder/polyMeshAdder.C
*/
motionSmoother/motionSmoother.C
motionSmoother/motionSmootherCheck.C
motionSmoother/polyMeshGeometry/polyMeshGeometry.C

View File

@ -62,7 +62,7 @@ public:
// Dummy transform for faces. Used in synchronisation
void transformList
(
const tensorField& rotTensor,
const tensor& rotTensor,
UList<face>& field
)
{};

View File

@ -595,7 +595,11 @@ void Foam::polyTopoChange::getFaceOrder
labelList& oldToNew,
labelList& patchSizes,
labelList& patchStarts
labelList& patchStarts,
labelListList& subPatches,
labelListList& subPatchSizes,
labelListList& subPatchStarts
) const
{
oldToNew.setSize(faceOwner_.size());
@ -668,6 +672,10 @@ void Foam::polyTopoChange::getFaceOrder
patchSizes.setSize(nPatches_);
patchSizes = 0;
// Per patch (that contains sub regions) the number of faces per subregion.
// Does not include the face originating from subRegion -1.
List<Map<label> > subPatchSizeMap(nPatches_);
patchStarts[0] = newFaceI;
for (label faceI = 0; faceI < nActiveFaces; faceI++)
@ -675,6 +683,21 @@ void Foam::polyTopoChange::getFaceOrder
if (region_[faceI] >= 0)
{
patchSizes[region_[faceI]]++;
if (subRegion_[faceI] >= 0)
{
Map<label>& subSizes = subPatchSizeMap[region_[faceI]];
Map<label>::iterator iter = subSizes.find(subRegion_[faceI]);
if (iter != subSizes.end())
{
iter()++;
}
else
{
subSizes.insert(subRegion_[faceI], 1);
}
}
}
}
@ -686,21 +709,105 @@ void Foam::polyTopoChange::getFaceOrder
faceI += patchSizes[patchI];
}
//if (debug)
//{
// Pout<< "patchSizes:" << patchSizes << nl
// << "patchStarts:" << patchStarts << endl;
//}
// From subpatch to sub index
List<Map<label> > subRegionToIndex(nPatches_);
subPatches.setSize(nPatches_);
subPatchSizes.setSize(nPatches_);
subPatchStarts.setSize(nPatches_);
forAll(subPatchSizeMap, patchI)
{
if (subPatchSizeMap[patchI].size() > 0)
{
// Check if there are any internal faces. Note: no need to parallel
// sync sizes since processor patches are unique.
label nSubFaces = 0;
forAllConstIter(Map<label>, subPatchSizeMap[patchI], iter)
{
nSubFaces += iter();
}
label nFromInternal = patchSizes[patchI] - nSubFaces;
if (nFromInternal > 0)
{
subPatchSizeMap[patchI].insert(-1, nFromInternal);
}
// Sort according to patch. patch=-1 (internal faces) come first.
labelList usedSubs = subPatchSizeMap[patchI].toc();
// From index to subpatch
subPatches[patchI] = SortableList<label>(usedSubs);
// Reverse: from subpatch to index
forAll(subPatches, i)
{
subRegionToIndex[patchI].insert
(
usedSubs[i],
subPatches[patchI][i]
);
}
subPatchSizes[patchI].setSize(subPatches[patchI].size());
subPatchStarts[patchI].setSize(subPatches[patchI].size()+1);
label subFaceI = 0;
forAll(subPatches[patchI], i)
{
label subPatchI = subPatches[patchI][i];
subPatchSizes[patchI][i] = subPatchSizeMap[patchI][subPatchI];
subPatchStarts[patchI][i] = subFaceI;
subFaceI += subPatchSizes[patchI][i];
}
subPatchStarts[patchI][subPatchStarts[patchI].size()-1] = subFaceI;
}
}
if (debug)
{
forAll(patchSizes, patchI)
{
Pout<< "Patch:" << patchI
<< " \tsize:" << patchSizes[patchI]
<< " \tstart:" << patchStarts[patchI]
<< endl;
forAll(subPatches[patchI], subI)
{
Pout<< "\tsubPatch:" << subPatches[patchI][subI]
<< " \tsize:" << subPatchSizes[patchI][subI]
<< " \tstart:" << subPatchStarts[patchI][subI]
<< endl;
}
}
Pout<< endl;
}
labelList workPatchStarts(patchStarts);
labelListList workSubPatchStarts(subPatchStarts);
for (label faceI = 0; faceI < nActiveFaces; faceI++)
{
if (region_[faceI] >= 0)
{
label patchI = region_[faceI];
if (subPatchStarts[patchI].size() > 0)
{
label subPatchI = subRegion_[faceI];
label index = subRegionToIndex[patchI][subPatchI];
oldToNew[faceI] = workSubPatchStarts[patchI][index]++;
}
else
{
oldToNew[faceI] = workPatchStarts[region_[faceI]]++;
}
}
}
// Retired faces.
for (label faceI = nActiveFaces; faceI < oldToNew.size(); faceI++)
@ -708,6 +815,8 @@ void Foam::polyTopoChange::getFaceOrder
oldToNew[faceI] = faceI;
}
if (debug)
{
// Check done all faces.
forAll(oldToNew, faceI)
{
@ -723,6 +832,7 @@ void Foam::polyTopoChange::getFaceOrder
<< abort(FatalError);
}
}
}
}
@ -741,6 +851,10 @@ void Foam::polyTopoChange::reorderCompactFaces
region_.setSize(newSize);
region_.shrink();
reorder(oldToNew, subRegion_);
subRegion_.setSize(newSize);
subRegion_.shrink();
reorder(oldToNew, faceOwner_);
faceOwner_.setSize(newSize);
faceOwner_.shrink();
@ -773,7 +887,11 @@ void Foam::polyTopoChange::compact
const bool orderPoints,
label& nInternalPoints,
labelList& patchSizes,
labelList& patchStarts
labelList& patchStarts,
labelListList& subPatches,
labelListList& subPatchSizes,
labelListList& subPatchStarts
)
{
points_.shrink();
@ -782,6 +900,7 @@ void Foam::polyTopoChange::compact
faces_.shrink();
region_.shrink();
subRegion_.shrink();
faceOwner_.shrink();
faceNeighbour_.shrink();
faceMap_.shrink();
@ -1105,7 +1224,11 @@ void Foam::polyTopoChange::compact
localFaceMap,
patchSizes,
patchStarts
patchStarts,
subPatches,
subPatchSizes,
subPatchStarts
);
// Reorder faces.
@ -1777,6 +1900,11 @@ void Foam::polyTopoChange::reorderCoupledFaces
const polyBoundaryMesh& boundary,
const labelList& patchStarts,
const labelList& patchSizes,
const labelListList& subPatches,
const labelListList& subPatchSizes,
const labelListList& subPatchStarts,
const pointField& points
)
{
@ -1890,6 +2018,11 @@ void Foam::polyTopoChange::compactAndReorder
pointField& newPoints,
labelList& patchSizes,
labelList& patchStarts,
labelListList& subPatches,
labelListList& subPatchSizes,
labelListList& subPatchStarts,
List<objectMap>& pointsFromPoints,
List<objectMap>& facesFromPoints,
List<objectMap>& facesFromEdges,
@ -1917,7 +2050,17 @@ void Foam::polyTopoChange::compactAndReorder
// Remove any holes from points/faces/cells and sort faces.
// Sets nActiveFaces_.
compact(orderCells, orderPoints, nInternalPoints, patchSizes, patchStarts);
compact
(
orderCells,
orderPoints,
nInternalPoints,
patchSizes,
patchStarts,
subPatches,
subPatchSizes,
subPatchStarts
);
// Transfer points to pointField. points_ are now cleared!
// Only done since e.g. reorderCoupledFaces requires pointField.
@ -1930,6 +2073,9 @@ void Foam::polyTopoChange::compactAndReorder
mesh.boundaryMesh(),
patchStarts,
patchSizes,
subPatches,
subPatchSizes,
subPatchStarts,
newPoints
);
@ -2030,6 +2176,7 @@ Foam::polyTopoChange::polyTopoChange(const label nPatches, const bool strict)
retiredPoints_(0),
faces_(0),
region_(0),
subRegion_(0),
faceOwner_(0),
faceNeighbour_(0),
faceMap_(0),
@ -2065,6 +2212,7 @@ Foam::polyTopoChange::polyTopoChange
retiredPoints_(0),
faces_(0),
region_(0),
subRegion_(0),
faceOwner_(0),
faceNeighbour_(0),
faceMap_(0),
@ -2112,6 +2260,8 @@ void Foam::polyTopoChange::clear()
faces_.setSize(0);
region_.clear();
region_.setSize(0);
subRegion_.clear();
subRegion_.setSize(0);
faceOwner_.clear();
faceOwner_.setSize(0);
faceNeighbour_.clear();
@ -2272,6 +2422,7 @@ void Foam::polyTopoChange::addMesh
faces_.setSize(faces_.size() + nAllFaces);
region_.setSize(region_.size() + nAllFaces);
subRegion_.setSize(subRegion_.size() + nAllFaces);
faceOwner_.setSize(faceOwner_.size() + nAllFaces);
faceNeighbour_.setSize(faceNeighbour_.size() + nAllFaces);
faceMap_.setSize(faceMap_.size() + nAllFaces);
@ -2643,7 +2794,8 @@ Foam::label Foam::polyTopoChange::addFace
const bool flipFaceFlux,
const label patchID,
const label zoneID,
const bool zoneFlip
const bool zoneFlip,
const label subPatchID
)
{
// Check validity
@ -2656,6 +2808,7 @@ Foam::label Foam::polyTopoChange::addFace
faces_.append(f);
region_.append(patchID);
subRegion_.append(subPatchID);
faceOwner_.append(own);
faceNeighbour_.append(nei);
@ -2708,7 +2861,8 @@ void Foam::polyTopoChange::modifyFace
const bool flipFaceFlux,
const label patchID,
const label zoneID,
const bool zoneFlip
const bool zoneFlip,
const label subPatchID
)
{
// Check validity
@ -2721,6 +2875,7 @@ void Foam::polyTopoChange::modifyFace
faceOwner_[faceI] = own;
faceNeighbour_[faceI] = nei;
region_[faceI] = patchID;
subRegion_[faceI] = subPatchID;
if (flipFaceFlux)
{
@ -2778,6 +2933,7 @@ void Foam::polyTopoChange::removeFace(const label faceI, const label mergeFaceI)
faces_[faceI].setSize(0);
region_[faceI] = -1;
subRegion_[faceI] = -1;
faceOwner_[faceI] = -1;
faceNeighbour_[faceI] = -1;
faceMap_[faceI] = -1;
@ -2907,6 +3063,10 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::changeMesh
// patch slicing
labelList patchSizes;
labelList patchStarts;
// subpatch slicing
labelListList subPatches;
labelListList subPatchSizes;
labelListList subPatchStarts;
// inflate maps
List<objectMap> pointsFromPoints;
List<objectMap> facesFromPoints;
@ -2934,6 +3094,9 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::changeMesh
newPoints,
patchSizes,
patchStarts,
subPatches,
subPatchSizes,
subPatchStarts,
pointsFromPoints,
facesFromPoints,
facesFromEdges,
@ -2987,6 +3150,8 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::changeMesh
faceNeighbour_,
patchSizes,
patchStarts,
subPatches,
subPatchStarts,
syncParallel
);
@ -3004,6 +3169,8 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::changeMesh
faceNeighbour_,
patchSizes,
patchStarts,
subPatches,
subPatchStarts,
syncParallel
);
// Invalidate new points to go into map.
@ -3012,6 +3179,7 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::changeMesh
mesh.changing(true);
}
if (debug)
{
// Some stats on changes
@ -3057,6 +3225,8 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::changeMesh
faces_.setSize(0);
region_.clear();
region_.setSize(0);
subRegion_.clear();
subRegion_.setSize(0);
faceOwner_.clear();
faceOwner_.setSize(0);
faceNeighbour_.clear();
@ -3188,6 +3358,10 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::makeMesh
// patch slicing
labelList patchSizes;
labelList patchStarts;
// subpatch slicing
labelListList subPatches;
labelListList subPatchSizes;
labelListList subPatchStarts;
// inflate maps
List<objectMap> pointsFromPoints;
List<objectMap> facesFromPoints;
@ -3216,6 +3390,9 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::makeMesh
newPoints,
patchSizes,
patchStarts,
subPatches,
subPatchSizes,
subPatchStarts,
pointsFromPoints,
facesFromPoints,
facesFromEdges,
@ -3291,6 +3468,8 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::makeMesh
faces_.setSize(0);
region_.clear();
region_.setSize(0);
subRegion_.clear();
subRegion_.setSize(0);
faceOwner_.clear();
faceOwner_.setSize(0);
faceNeighbour_.clear();
@ -3312,6 +3491,17 @@ Foam::autoPtr<Foam::mapPolyMesh> Foam::polyTopoChange::makeMesh
patchSizes[patchI],
patchStarts[patchI]
).ptr();
// If it was a processor patch reset the sub addressing
if (isA<processorPolyPatch>(oldPatches[patchI]))
{
processorPolyPatch& ppp = refCast<processorPolyPatch>
(
*newBoundary[patchI]
);
const_cast<labelList&>(ppp.patchIDs()) = subPatches[patchI];
const_cast<labelList&>(ppp.starts()) = subPatchStarts[patchI];
}
}
newMesh.addFvPatches(newBoundary);
}

View File

@ -50,9 +50,8 @@ Description
To see if point is equal to above value we don't use == (which might give
problems with roundoff error) but instead compare the individual component
with >.
- coupled patches: the reorderCoupledFaces routine (borrowed from
the couplePatches utility) reorders coupled patch faces and
uses the cyclicPolyPatch,processorPolyPatch functionality.
- coupled patches: the reorderCoupledFaces routine reorders coupled patch
faces and uses the cyclicPolyPatch,processorPolyPatch functionality.
SourceFiles
polyTopoChange.C
@ -148,6 +147,10 @@ class polyTopoChange
//- Patch for every external face (-1 for internal faces)
DynamicList<label> region_;
//- Sub-patch (for processor faces). -1 for internal and non-
// processor patches.
DynamicList<label> subRegion_;
//- Owner for all faces
DynamicList<label> faceOwner_;
@ -286,7 +289,11 @@ class polyTopoChange
labelList& oldToNew,
labelList& patchSizes,
labelList& patchStarts
labelList& patchStarts,
labelListList& subPatches,
labelListList& subPatchSizes,
labelListList& subPatchStarts
) const;
//- Compact and reorder faces according to map
@ -307,7 +314,10 @@ class polyTopoChange
const bool orderPoints,
label& nInternalPoints,
labelList& patchSizes,
labelList& patchStarts
labelList& patchStarts,
labelListList& subPatches,
labelListList& subPatchSizes,
labelListList& subPatchStarts
);
//- Select either internal or external faces out of faceLabels
@ -372,6 +382,9 @@ class polyTopoChange
const polyBoundaryMesh&,
const labelList& patchStarts,
const labelList& patchSizes,
const labelListList& subPatches,
const labelListList& subPatchSizes,
const labelListList& subPatchStarts,
const pointField& points
);
@ -385,6 +398,9 @@ class polyTopoChange
pointField& newPoints,
labelList& patchSizes,
labelList& patchStarts,
labelListList& subPatches,
labelListList& subPatchSizes,
labelListList& subPatchStarts,
List<objectMap>& pointsFromPoints,
List<objectMap>& facesFromPoints,
List<objectMap>& facesFromEdges,
@ -515,7 +531,8 @@ public:
const bool flipFaceFlux,
const label patchID,
const label zoneID,
const bool zoneFlip
const bool zoneFlip,
const label subPatchID = -1
);
//- Modify vertices or cell of face.
@ -528,7 +545,8 @@ public:
const bool flipFaceFlux,
const label patchID,
const label zoneID,
const bool zoneFlip
const bool zoneFlip,
const label subPatchID = -1
);
//- Remove/merge face.

View File

@ -1,7 +1,9 @@
fvMesh/fvMeshGeometry.C
fvMesh/fvMesh.C
/*
fvMesh/fvMeshSubset/fvMeshSubset.C
*/
fvBoundaryMesh = fvMesh/fvBoundaryMesh
$(fvBoundaryMesh)/fvBoundaryMesh.C
@ -23,7 +25,9 @@ $(constraintFvPatches)/processor/processorFvPatch.C
derivedFvPatches = $(fvPatches)/derived
$(derivedFvPatches)/wall/wallFvPatch.C
/*
$(derivedFvPatches)/directMapped/directMappedFvPatch.C
*/
wallDist = fvMesh/wallDist
$(wallDist)/wallPointYPlus/wallPointYPlus.C
@ -38,8 +42,10 @@ fvMeshMapper = fvMesh/fvMeshMapper
$(fvMeshMapper)/fvPatchMapper.C
$(fvMeshMapper)/fvSurfaceMapper.C
/*
extendedStencil = fvMesh/extendedStencil
$(extendedStencil)/extendedStencil.C
*/
fvPatchFields = fields/fvPatchFields
$(fvPatchFields)/fvPatchField/fvPatchFields.C
@ -71,12 +77,14 @@ $(constraintFvPatchFields)/wedge/wedgeFvPatchScalarField.C
derivedFvPatchFields = $(fvPatchFields)/derived
$(derivedFvPatchFields)/advective/advectiveFvPatchFields.C
$(derivedFvPatchFields)/fixedInternalValueFvPatchField/fixedInternalValueFvPatchFields.C
/*
$(derivedFvPatchFields)/directMappedFixedValue/directMappedFixedValueFvPatchFields.C
$(derivedFvPatchFields)/directMappedVelocityFluxFixedValue/directMappedVelocityFluxFixedValueFvPatchField.C
$(derivedFvPatchFields)/fan/fanFvPatchFields.C
$(derivedFvPatchFields)/fixedFluxBuoyantPressure/fixedFluxBuoyantPressureFvPatchScalarField.C
$(derivedFvPatchFields)/fixedFluxPressure/fixedFluxPressureFvPatchScalarField.C
$(derivedFvPatchFields)/fixedInternalValueFvPatchField/fixedInternalValueFvPatchFields.C
$(derivedFvPatchFields)/fixedNormalSlip/fixedNormalSlipFvPatchFields.C
$(derivedFvPatchFields)/fluxCorrectedVelocity/fluxCorrectedVelocityFvPatchVectorField.C
$(derivedFvPatchFields)/freestream/freestreamFvPatchFields.C
@ -111,6 +119,7 @@ $(derivedFvPatchFields)/turbulentInlet/turbulentInletFvPatchFields.C
$(derivedFvPatchFields)/turbulentIntensityKineticEnergyInlet/turbulentIntensityKineticEnergyInletFvPatchScalarField.C
$(derivedFvPatchFields)/uniformFixedValue/uniformFixedValueFvPatchFields.C
$(derivedFvPatchFields)/waveTransmissive/waveTransmissiveFvPatchFields.C
*/
fvsPatchFields = fields/fvsPatchFields
$(fvsPatchFields)/fvsPatchField/fvsPatchFields.C
@ -134,8 +143,10 @@ fields/surfaceFields/surfaceFields.C
fvMatrices/fvMatrices.C
fvMatrices/fvScalarMatrix/fvScalarMatrix.C
/*
fvMatrices/solvers/MULES/MULES.C
fvMatrices/solvers/GAMGSymSolver/GAMGAgglomerations/faceAreaPairGAMGAgglomeration/faceAreaPairGAMGAgglomeration.C
*/
interpolation = interpolation/interpolation
$(interpolation)/interpolation/interpolations.C
@ -156,6 +167,7 @@ $(surfaceInterpolation)/surfaceInterpolationScheme/surfaceInterpolationSchemes.C
schemes = $(surfaceInterpolation)/schemes
$(schemes)/linear/linear.C
$(schemes)/midPoint/midPoint.C
/*
$(schemes)/downwind/downwind.C
$(schemes)/weighted/weighted.C
$(schemes)/cubic/cubic.C
@ -170,10 +182,12 @@ $(schemes)/localMax/localMax.C
$(schemes)/localMin/localMin.C
$(schemes)/quadraticFit/quadraticFit.C
$(schemes)/quadraticFit/quadraticFitData.C
*/
limitedSchemes = $(surfaceInterpolation)/limitedSchemes
$(limitedSchemes)/limitedSurfaceInterpolationScheme/limitedSurfaceInterpolationSchemes.C
$(limitedSchemes)/upwind/upwind.C
/*
$(limitedSchemes)/blended/blended.C
$(limitedSchemes)/linearUpwind/linearUpwind.C
$(limitedSchemes)/linearUpwindV/linearUpwindV.C
@ -195,11 +209,13 @@ $(limitedSchemes)/filteredLinear/filteredLinear.C
$(limitedSchemes)/filteredLinear2/filteredLinear2.C
$(limitedSchemes)/filteredLinear3/filteredLinear3.C
$(limitedSchemes)/limitWith/limitWith.C
*/
multivariateSchemes = $(surfaceInterpolation)/multivariateSchemes
$(multivariateSchemes)/multivariateSurfaceInterpolationScheme/multivariateSurfaceInterpolationSchemes.C
$(multivariateSchemes)/multivariateSelectionScheme/multivariateSelectionSchemes.C
$(multivariateSchemes)/upwind/multivariateUpwind.C
/*
$(multivariateSchemes)/Gamma/multivariateGamma.C
$(multivariateSchemes)/vanLeer/multivariateVanLeer.C
$(multivariateSchemes)/Minmod/multivariateMinmod.C
@ -207,6 +223,7 @@ $(multivariateSchemes)/SuperBee/multivariateSuperBee.C
$(multivariateSchemes)/MUSCL/multivariateMUSCL.C
$(multivariateSchemes)/limitedLinear/multivariateLimitedLinear.C
$(multivariateSchemes)/limitedCubic/multivariateLimitedCubic.C
*/
finiteVolume/fv/fv.C
finiteVolume/fvSchemes/fvSchemes.C
@ -234,17 +251,21 @@ $(divSchemes)/gaussDivScheme/gaussDivSchemes.C
gradSchemes = finiteVolume/gradSchemes
$(gradSchemes)/gradScheme/gradSchemes.C
$(gradSchemes)/gaussGrad/gaussGrads.C
/*
$(gradSchemes)/leastSquaresGrad/leastSquaresVectors.C
$(gradSchemes)/leastSquaresGrad/leastSquaresGrads.C
$(gradSchemes)/extendedLeastSquaresGrad/extendedLeastSquaresVectors.C
$(gradSchemes)/extendedLeastSquaresGrad/extendedLeastSquaresGrads.C
$(gradSchemes)/fourthGrad/fourthGrads.C
*/
limitedGradSchemes = $(gradSchemes)/limitedGradSchemes
/*
$(limitedGradSchemes)/faceLimitedGrad/faceLimitedGrads.C
$(limitedGradSchemes)/cellLimitedGrad/cellLimitedGrads.C
$(limitedGradSchemes)/faceMDLimitedGrad/faceMDLimitedGrads.C
$(limitedGradSchemes)/cellMDLimitedGrad/cellMDLimitedGrads.C
*/
snGradSchemes = finiteVolume/snGradSchemes
$(snGradSchemes)/snGradScheme/snGradSchemes.C
@ -266,6 +287,7 @@ finiteVolume/fvc/fvcMeshPhi.C
cfdTools/general/findRefCell/findRefCell.C
cfdTools/general/adjustPhi/adjustPhi.C
cfdTools/general/bound/bound.C
/*
cfdTools/general/porousMedia/porousZone.C
cfdTools/general/porousMedia/porousZones.C
cfdTools/general/MRF/MRFZone.C
@ -276,16 +298,20 @@ cfdTools/general/SRF/SRFModel/SRFModel/SRFModel.C
cfdTools/general/SRF/SRFModel/SRFModel/newSRFModel.C
cfdTools/general/SRF/SRFModel/rpm/rpm.C
cfdTools/general/SRF/derivedFvPatchFields/SRFVelocityFvPatchVectorField/SRFVelocityFvPatchVectorField.C
*/
fvMeshCutSurface = fvMesh/fvMeshCutSurface
/*
meshCut = $(fvMeshCutSurface)/meshCut
$(meshCut)/meshCutSurface.C
$(meshCut)/cellAddressing.C
*/
/*
edgeCuts = $(fvMeshCutSurface)/edgeCuts
$(edgeCuts)/meshEdgeCuts.C
$(edgeCuts)/faceDecompIsoSurfaceCuts.C
$(edgeCuts)/cellDecompIsoSurfaceCuts.C
*/
LIB = $(FOAM_LIBBIN)/libfiniteVolume

View File

@ -110,11 +110,118 @@ coupledFvPatchField<Type>::coupledFvPatchField
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
// Referred patch functionality
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<class Type>
void coupledFvPatchField<Type>::patchInternalField
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
if (start+size > referringPatch.size())
{
FatalErrorIn("coupledFvPatchField<Type>::patchInternalField(..)")
<< "patch size:" << referringPatch.size()
<< " start:" << start << " size:" << size
<< abort(FatalError);
}
const unallocLabelList& faceCells = referringPatch.faceCells();
exchangeBuf.setSize(this->size());
label facei = start;
forAll(exchangeBuf, i)
{
exchangeBuf[i] = this->internalField()[faceCells[facei++]];
}
}
template<class Type>
tmp<Field<Type> > coupledFvPatchField<Type>::valueInternalCoeffs
(
const coupledFvPatch& referringPatch,
const label size,
const label start,
const tmp<scalarField>& w
) const
{
// ? check what is passed in!
if (w().size() != size)
{
FatalErrorIn("coupledFvPatchField<Type>::valueInternalCoeffs(..)")
<< "Call with correct slice size." << abort(FatalError);
}
return Type(pTraits<Type>::one)*w;
}
template<class Type>
tmp<Field<Type> > coupledFvPatchField<Type>::valueBoundaryCoeffs
(
const coupledFvPatch& referringPatch,
const label size,
const label start,
const tmp<scalarField>& w
) const
{
// ? check what is passed in!
if (w().size() != size)
{
FatalErrorIn("coupledFvPatchField<Type>::valueBoundaryCoeffs(..)")
<< "Call with correct slice size." << abort(FatalError);
}
return Type(pTraits<Type>::one)*(1.0 - w);
}
template<class Type>
tmp<Field<Type> > coupledFvPatchField<Type>::gradientInternalCoeffs
(
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
SubField<scalar> subDc(referringPatch.deltaCoeffs(), size, start);
return -Type(pTraits<Type>::one)*subDc;
}
template<class Type>
tmp<Field<Type> > coupledFvPatchField<Type>::gradientBoundaryCoeffs
(
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
return -this->gradientInternalCoeffs
(
referringPatch,
size,
start
);
}
// Local patch functionality
// ~~~~~~~~~~~~~~~~~~~~~~~~~
template<class Type>
tmp<Field<Type> > coupledFvPatchField<Type>::snGrad() const
{
return
(this->patchNeighbourField() - this->patchInternalField())
(
this->patchNeighbourField()
- this->fvPatchField<Type>::patchInternalField()
)
*this->patch().deltaCoeffs();
}
@ -139,8 +246,14 @@ void coupledFvPatchField<Type>::evaluate(const Pstream::commsTypes)
Field<Type>::operator=
(
this->patch().weights()*this->patchInternalField()
+ (1.0 - this->patch().weights())*this->patchNeighbourField()
(
this->patch().weights()
* this->fvPatchField<Type>::patchInternalField()
)
+ (
(1.0 - this->patch().weights())
* this->patchNeighbourField()
)
);
fvPatchField<Type>::evaluate();

View File

@ -121,6 +121,124 @@ public:
// Member functions
// Referred-patch functionality. Get called with a slice (size, start)
// of a patch that supplies fields and geometry/topology.
// Evaluation functions
//- Return internal field next to patch as patch field
virtual void patchInternalField
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const;
//- Get patch-normal gradient
virtual void snGrad
(
Field<Type>& exchangeBuf,
const Field<Type>& subFld,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const = 0;
//- Initialise the evaluation of the patch field.
virtual void initEvaluate
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const = 0;
//- Evaluate the patch field.
virtual void evaluate
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const = 0;
//- Return the matrix diagonal coefficients corresponding to the
// evaluation of the value of this patchField with given weights
virtual tmp<Field<Type> > valueInternalCoeffs
(
const coupledFvPatch& referringPatch,
const label size,
const label start,
const tmp<scalarField>&
) const;
//- Return the matrix source coefficients corresponding to the
// evaluation of the value of this patchField with given weights
virtual tmp<Field<Type> > valueBoundaryCoeffs
(
const coupledFvPatch& referringPatch,
const label size,
const label start,
const tmp<scalarField>&
) const;
//- Return the matrix diagonal coefficients corresponding to the
// evaluation of the gradient of this patchField
virtual tmp<Field<Type> > gradientInternalCoeffs
(
const coupledFvPatch& referringPatch,
const label size,
const label start
) const;
//- Return the matrix source coefficients corresponding to the
// evaluation of the gradient of this patchField
virtual tmp<Field<Type> > gradientBoundaryCoeffs
(
const coupledFvPatch& referringPatch,
const label size,
const label start
) const;
// Coupled interface functionality
//- Initialise neighbour matrix update
virtual void initInterfaceMatrixUpdate
(
const scalarField& psiInternal,
scalarField& result,
const lduMatrix& m,
const scalarField& coeffs,
const direction cmpt,
const coupledFvPatch& referringPatch,
const label size,
const label start,
scalarField& exchangeBuf
) const = 0;
//- Update result field based on interface functionality
virtual void updateInterfaceMatrix
(
const scalarField& psiInternal,
scalarField& result,
const lduMatrix&,
const scalarField& coeffs,
const direction,
const coupledFvPatch& referringPatch,
const label size,
const label start,
scalarField& exchangeBuf
) const = 0;
//- Write
//?virtual void write(Ostream&) const;
//XXXX
// Access
//- Return true if this patch field is derived from

View File

@ -36,6 +36,8 @@ namespace Foam
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
makePatchFieldsTypeName(coupled);
//makePatchTypeFieldTypeName(coupledFvPatchScalarField);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -25,6 +25,7 @@ License
\*---------------------------------------------------------------------------*/
#include "cyclicFvPatchField.H"
#include "transformField.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -108,7 +109,11 @@ cyclicFvPatchField<Type>::cyclicFvPatchField
<< exit(FatalIOError);
}
this->evaluate(Pstream::blocking);
Pout<< "Construct from dictionary for " << p.name() << endl
<< "Underlying cyclic:" << cyclicPatch_.name()
<< " with parallel:" << cyclicPatch_.parallel() << endl;
this->coupledFvPatchField<Type>::evaluate(Pstream::blocking);
}
@ -138,38 +143,231 @@ cyclicFvPatchField<Type>::cyclicFvPatchField
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
// Referred patch functionality
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<class Type>
void cyclicFvPatchField<Type>::snGrad
(
Field<Type>& exchangeBuf,
const Field<Type>& subFld,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
if (subFld.size() != size)
{
FatalErrorIn("cyclicFvPatchField<Type>::snGrad(..)")
<< "Call with correct slice size." << abort(FatalError);
}
// Slice delta coeffs
SubField<scalar> subDc(referringPatch.deltaCoeffs(), size, start);
// Get internal field
tmp<Field<Type> > patchFld(new Field<Type>(size));
this->patchInternalField(patchFld(), referringPatch, size, start);
exchangeBuf = subDc * (subFld - patchFld);
}
template<class Type>
void cyclicFvPatchField<Type>::initEvaluate
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
//? What about updateCoeffs? What if patch holds face-wise additional
// information? Where is it stored? Who updates it?
// if (!this->updated())
// {
// this->updateCoeffs();
// }
if (exchangeBuf.size() != size)
{
FatalErrorIn("cyclicFvPatchField<Type>::initEvaluate(..)")
<< "Call with correct slice size." << abort(FatalError);
}
// Get sender side. Equivalent to (1-w)*patchNeighbourField() in non-remote
// version (so includes the transformation!).
//Pout<< "initEvaluate name:" << cyclicPatch_.name()
// << " size:" << size << " start:" << start
// << " referringPatch.weights():" << referringPatch.weights() << endl;
const SubField<scalar> subWeights
(
referringPatch.weights(),
size,
start
);
tmp<Field<Type> > patchFld(new Field<Type>(size));
this->patchInternalField(patchFld(), referringPatch, size, start);
//Pout<< "initEvaluate name:" << cyclicPatch_.name()
// << " patchFld:" << patchFld()
// << " subWeights:" << subWeights << endl;
if (doTransform())
{
//Pout<< "initEvaluate name:" << cyclicPatch_.name()
// << " reverseT:" << reverseT() << endl;
tmp<Field<Type> > tfld =
(1.0-subWeights)
* transform(reverseT(), patchFld);
forAll(tfld(), i)
{
exchangeBuf[i] = tfld()[i];
}
//Pout<< "initEvaluate name:" << cyclicPatch_.name()
// << " exchangeBuf:" << exchangeBuf << endl;
}
else
{
//Pout<< "initEvaluate name:" << cyclicPatch_.name()
// << " no transform" << endl;
tmp<Field<Type> > tfld = (1.0-subWeights)*patchFld;
forAll(tfld(), i)
{
exchangeBuf[i] = tfld()[i];
}
//Pout<< "initEvaluate name:" << cyclicPatch_.name()
// << " exchangeBuf:" << exchangeBuf << endl;
}
}
template<class Type>
void cyclicFvPatchField<Type>::evaluate
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
// if (!this->updated())
// {
// this->updateCoeffs();
// }
if (exchangeBuf.size() != size)
{
FatalErrorIn("cyclicFvPatchField<Type>::evaluate(..)")
<< "Call with correct slice size." << abort(FatalError);
}
const SubField<scalar> subWeights
(
referringPatch.weights(),
size,
start
);
tmp<Field<Type> > patchFld(new Field<Type>(size));
this->patchInternalField(patchFld(), referringPatch, size, start);
exchangeBuf += subWeights * patchFld;
//?? fvPatchField<Type>::evaluate();
}
template<class Type>
void cyclicFvPatchField<Type>::initInterfaceMatrixUpdate
(
const scalarField& psiInternal,
scalarField& result,
const lduMatrix&,
const scalarField& coeffs,
const direction,
const coupledFvPatch& referringPatch,
const label size,
const label start,
scalarField& exchangeBuf
) const
{
const unallocLabelList& faceCells = referringPatch.faceCells();
label facei = start;
forAll(exchangeBuf, elemI)
{
exchangeBuf[elemI] = psiInternal[faceCells[facei++]];
}
}
template<class Type>
void cyclicFvPatchField<Type>::updateInterfaceMatrix
(
const scalarField& psiInternal,
scalarField& result,
const lduMatrix&,
const scalarField& coeffs,
const direction cmpt,
const coupledFvPatch& referringPatch,
const label size,
const label start,
scalarField& exchangeBuf
) const
{
// Transform according to the transformation tensor
transformCoupleField(exchangeBuf, cmpt);
// Multiply the field by coefficients and add into the result
const unallocLabelList& faceCells = referringPatch.faceCells();
label facei = start;
forAll(exchangeBuf, elemI)
{
result[faceCells[facei]] -= coeffs[facei]*exchangeBuf[elemI];
facei++;
}
}
// Local patch functionality
// ~~~~~~~~~~~~~~~~~~~~~~~~~
template<class Type>
tmp<Field<Type> > cyclicFvPatchField<Type>::patchNeighbourField() const
{
const Field<Type>& iField = this->internalField();
const unallocLabelList& faceCells = cyclicPatch_.faceCells();
const unallocLabelList& nbrFaceCells =
cyclicPatch().cyclicPatch().neighbPatch().faceCells();
tmp<Field<Type> > tpnf(new Field<Type>(this->size()));
Field<Type>& pnf = tpnf();
label sizeby2 = this->size()/2;
if (doTransform())
{
for (label facei=0; facei<sizeby2; facei++)
forAll(pnf, facei)
{
pnf[facei] = transform
(
forwardT()[0], iField[faceCells[facei + sizeby2]]
);
pnf[facei + sizeby2] = transform
(
reverseT()[0], iField[faceCells[facei]]
forwardT(), iField[nbrFaceCells[facei]]
);
}
}
else
{
for (label facei=0; facei<sizeby2; facei++)
forAll(pnf, facei)
{
pnf[facei] = iField[faceCells[facei + sizeby2]];
pnf[facei + sizeby2] = iField[faceCells[facei]];
pnf[facei] = iField[nbrFaceCells[facei]];
}
}
@ -190,19 +388,20 @@ void cyclicFvPatchField<Type>::updateInterfaceMatrix
{
scalarField pnf(this->size());
label sizeby2 = this->size()/2;
const unallocLabelList& faceCells = cyclicPatch_.faceCells();
const unallocLabelList& nbrFaceCells =
cyclicPatch().cyclicPatch().neighbPatch().faceCells();
for (label facei=0; facei<sizeby2; facei++)
forAll(pnf, facei)
{
pnf[facei] = psiInternal[faceCells[facei + sizeby2]];
pnf[facei + sizeby2] = psiInternal[faceCells[facei]];
pnf[facei] = psiInternal[nbrFaceCells[facei]];
}
// Transform according to the transformation tensors
transformCoupleField(pnf, cmpt);
// Multiply the field by coefficients and add into the result
const unallocLabelList& faceCells = cyclicPatch_.faceCells();
forAll(faceCells, elemI)
{
result[faceCells[elemI]] -= coeffs[elemI]*pnf[elemI];

View File

@ -142,7 +142,7 @@ public:
// Access
//- Retirn local reference cast into the cyclic patch
//- Return local reference cast into the cyclic patch
const cyclicFvPatch& cyclicPatch() const
{
return cyclicPatch_;
@ -151,6 +151,66 @@ public:
// Evaluation functions
// Referred-patch functionality. Get called with a slice (size, start)
// of a patch that supplies fields and geometry/topology.
//- Get patch-normal gradient
virtual void snGrad
(
Field<Type>& exchangeBuf,
const Field<Type>& subFld,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const;
//- Initialise the evaluation of the patch field.
virtual void initEvaluate
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const;
//- Evaluate the patch field.
virtual void evaluate
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const;
//- Initialise neighbour matrix update
virtual void initInterfaceMatrixUpdate
(
const scalarField& psiInternal,
scalarField& result,
const lduMatrix& m,
const scalarField& coeffs,
const direction cmpt,
const coupledFvPatch& referringPatch,
const label size,
const label start,
scalarField& exchangeBuf
) const;
//- Update result field based on interface functionality
virtual void updateInterfaceMatrix
(
const scalarField& psiInternal,
scalarField& result,
const lduMatrix&,
const scalarField& coeffs,
const direction,
const coupledFvPatch& referringPatch,
const label size,
const label start,
scalarField& exchangeBuf
) const;
//- Return neighbour coupled given internal cell data
tmp<Field<Type> > patchNeighbourField() const;
@ -175,13 +235,13 @@ public:
}
//- Return face transformation tensor
virtual const tensorField& forwardT() const
virtual const tensor& forwardT() const
{
return cyclicPatch_.forwardT();
}
//- Return neighbour-cell transformation tensor
virtual const tensorField& reverseT() const
virtual const tensor& reverseT() const
{
return cyclicPatch_.reverseT();
}

View File

@ -36,6 +36,7 @@ namespace Foam
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
makePatchFields(cyclic);
//makePatchTypeField(fvPatchScalarField, cyclicFvPatchScalarField);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -30,12 +30,41 @@ License
#include "OPstream.H"
#include "demandDrivenData.H"
#include "transformField.H"
#include "diagTensorField.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
template<class Type>
const coupledFvPatchField<Type>& processorFvPatchField<Type>::patchField
(
const label patchID
) const
{
//const GeometricField<Type, fvPatchField, volMesh>& field =
//this->db().objectRegistry::
//lookupObject<GeometricField<Type, fvPatchField, volMesh> >
//(
// this->dimensionedInternalField().name()
//);
const GeometricField<Type, fvPatchField, volMesh>& field =
static_cast
<
const GeometricField<Type, fvPatchField, volMesh>&
>(this->dimensionedInternalField());
return refCast<const coupledFvPatchField<Type> >
(
field.boundaryField()[patchID]
);
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
template<class Type>
@ -177,7 +206,52 @@ void processorFvPatchField<Type>::initEvaluate
{
if (Pstream::parRun())
{
procPatch_.compressedSend(commsType, this->patchInternalField()());
const processorPolyPatch& pp = procPatch_.procPolyPatch();
// Get reference to proc patch built-in buffer
List<Type>& sendBuf = procPatch_.setSendBuf<Type>(this->size());
forAll(pp.patchIDs(), i)
{
SubField<Type> subSendFld(pp.subSlice(sendBuf, i));
Field<Type>& subFld = static_cast<Field<Type>&>
(
static_cast<UList<Type>&>(subSendFld)
);
label patchI = pp.patchIDs()[i];
//Pout<< "initEvaluate on "
// << this->dimensionedInternalField().name()
// << " patch:" << pp.name()
// << " subSize:" << (pp.starts()[i+1]-pp.starts()[i])
// << " subStart:" << pp.starts()[i]
// << " subPatch:" << patchI << endl;
if (patchI == -1)
{
// Assign internal field
this->patchInternalField
(
subFld,
procPatch_,
subSendFld.size(),
pp.starts()[i]
);
}
else
{
// Assign evaluation of referred patch
patchField(patchI).initEvaluate
(
subFld,
procPatch_,
subSendFld.size(),
pp.starts()[i]
);
}
}
procPatch_.compressedBufferSend<Type>(commsType);
}
}
@ -192,9 +266,39 @@ void processorFvPatchField<Type>::evaluate
{
procPatch_.compressedReceive<Type>(commsType, *this);
if (doTransform())
const processorPolyPatch& pp = procPatch_.procPolyPatch();
forAll(pp.patchIDs(), i)
{
transform(*this, procPatch_.forwardT(), *this);
label patchI = pp.patchIDs()[i];
//Pout<< "evaluate on " << this->dimensionedInternalField().name()
// << " patch:" << pp.name()
// << " subSize:" << (pp.starts()[i+1]-pp.starts()[i])
// << " subStart:" << pp.starts()[i]
// << " subPatch:" << patchI << endl;
if (patchI == -1)
{
// No evaluation needed.
}
else
{
SubField<Type> subRecvFld(pp.subSlice(*this, i));
Field<Type>& subFld = static_cast<Field<Type>&>
(
static_cast<UList<Type>&>(subRecvFld)
);
patchField(patchI).evaluate
(
subFld,
procPatch_,
subRecvFld.size(),
pp.starts()[i]
);
}
}
}
}
@ -203,7 +307,58 @@ void processorFvPatchField<Type>::evaluate
template<class Type>
tmp<Field<Type> > processorFvPatchField<Type>::snGrad() const
{
return this->patch().deltaCoeffs()*(*this - this->patchInternalField());
tmp<Field<Type> > tpnf(new Field<Type>(this->size()));
Field<Type>& pnf = tpnf();
const processorPolyPatch& pp = procPatch_.procPolyPatch();
forAll(pp.patchIDs(), i)
{
label patchI = pp.patchIDs()[i];
label subStart = pp.starts()[i];
label subSize = pp.starts()[i+1] - pp.starts()[i];
const SubField<Type> subThis(pp.subSlice(*this, i));
SubField<Type> subPnf(pp.subSlice(pnf, i));
Field<Type>& subFld = static_cast<Field<Type>&>
(
static_cast<UList<Type>&>(subPnf)
);
//Pout<< "snGrad on " << this->dimensionedInternalField().name()
// << " patch:" << pp.name()
// << " subSize:" << (pp.starts()[i+1]-pp.starts()[i])
// << " subStart:" << pp.starts()[i]
// << " subPatch:" << patchI << endl;
if (patchI == -1)
{
// Slice delta coeffs
const SubField<scalar> subDc
(
pp.subSlice(procPatch_.deltaCoeffs(), i)
);
tmp<Field<Type> > subInt(new Field<Type>(subSize));
this->patchInternalField(subInt(), procPatch_, subSize, subStart);
subFld = (subDc*(subThis-subInt))();
}
else
{
patchField(patchI).snGrad
(
subFld,
subThis,
procPatch_,
subSize,
subStart
);
}
}
return tpnf;
}
@ -211,51 +366,181 @@ template<class Type>
void processorFvPatchField<Type>::initInterfaceMatrixUpdate
(
const scalarField& psiInternal,
scalarField&,
const lduMatrix&,
const scalarField&,
const direction,
scalarField& result,
const lduMatrix& m,
const scalarField& coeffs,
const direction cmpt,
const Pstream::commsTypes commsType
) const
{
procPatch_.compressedSend
// Get reference to proc patch built-in buffer
List<scalar>& sendFld = procPatch_.setSendBuf<scalar>(this->size());
const processorPolyPatch& pp = procPatch_.procPolyPatch();
forAll(pp.patchIDs(), i)
{
label subStart = pp.starts()[i];
label subSize = pp.starts()[i+1] - pp.starts()[i];
SubField<scalar> subSendFld(sendFld, subSize, subStart);
Field<scalar>& subFld = static_cast<Field<scalar>&>
(
commsType,
this->patch().patchInternalField(psiInternal)()
static_cast<UList<scalar>&>(subSendFld)
);
label patchI = pp.patchIDs()[i];
//Pout<< "initInterfaceMatrixUpdate on "
// << this->dimensionedInternalField().name()
// << " patch:" << pp.name()
// << " subSize:" << (pp.starts()[i+1]-pp.starts()[i])
// << " subStart:" << pp.starts()[i]
// << " subPatch:" << patchI << endl;
if (patchI == -1)
{
const unallocLabelList& faceCells = pp.faceCells();
label facei = subStart;
forAll(subFld, i)
{
subFld[i] = psiInternal[faceCells[facei++]];
}
}
else
{
patchField(patchI).initInterfaceMatrixUpdate
(
psiInternal,
result,
m,
coeffs,
cmpt,
procPatch_,
subSize,
subStart,
subFld
);
}
}
procPatch_.compressedBufferSend<scalar>(commsType);
}
template<class Type>
void processorFvPatchField<Type>::updateInterfaceMatrix
(
const scalarField&,
const scalarField& psiInternal,
scalarField& result,
const lduMatrix&,
const lduMatrix& m,
const scalarField& coeffs,
const direction cmpt,
const Pstream::commsTypes commsType
) const
{
scalarField pnf
const List<scalar>& recvFld = procPatch_.compressedBufferReceive<scalar>
(
procPatch_.compressedReceive<scalar>(commsType, this->size())()
commsType,
this->size()
);
// Transform according to the transformation tensor
transformCoupleField(pnf, cmpt);
const processorPolyPatch& pp = procPatch_.procPolyPatch();
// Multiply the field by coefficients and add into the result
const unallocLabelList& faceCells = this->patch().faceCells();
forAll(faceCells, elemI)
forAll(pp.patchIDs(), i)
{
result[faceCells[elemI]] -= coeffs[elemI]*pnf[elemI];
label subStart = pp.starts()[i];
label subSize = pp.starts()[i+1] - pp.starts()[i];
SubField<scalar> subRecvFld(recvFld, subSize, subStart);
Field<scalar>& subFld = static_cast<Field<scalar>&>
(
static_cast<UList<scalar>&>(subRecvFld)
);
label patchI = pp.patchIDs()[i];
//Pout<< "updateInterfaceMatrix on "
// << this->dimensionedInternalField().name()
// << " patch:" << pp.name()
// << " subSize:" << (pp.starts()[i+1]-pp.starts()[i])
// << " subStart:" << pp.starts()[i]
// << " subPatch:" << patchI << endl;
if (patchI == -1)
{
const unallocLabelList& faceCells = pp.faceCells();
label facei = subStart;
forAll(subFld, elemI)
{
result[faceCells[facei]] -= coeffs[facei]*subFld[elemI];
facei++;
}
}
else
{
patchField(patchI).updateInterfaceMatrix
(
psiInternal,
result,
m,
coeffs,
cmpt,
procPatch_,
subSize,
subStart,
subFld
);
}
}
}
//template<class Type>
//void Foam::processorFvPatchField<Type>::transformCoupleField
//(
// scalarField& f,
// const direction cmpt
//) const
//{
// if (pTraits<Type>::rank > 0)
// {
// const processorPolyPatch& pp = procPatch_.procPolyPatch();
//
// forAll(pp.patchIDs(), i)
// {
// label patchI = pp.patchIDs()[i];
//
// if (patchI == -1)
// {
// // ? anything needs to be transformed?
// }
// else
// {
// const coupledPolyPatch& cpp =
// refCast<const coupledPolyPatch>(pp.boundaryMesh()[patchI]);
//
// if (!cpp.parallel())
// {
// const tensor& T = cpp.forwardT();
//
// SubField<scalar> subFld(pp.subSlice(f, i));
// const scalarField& fld =
// static_cast<const scalarField&>(subFld);
//
// const_cast<scalarField&>(fld) *=
// pow(diag(T).component(cmpt), rank());
// }
// }
// }
// }
//}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam

View File

@ -60,7 +60,10 @@ class processorFvPatchField
//- Local reference cast into the processor patch
const processorFvPatch& procPatch_;
// Private Member Functions
//- Get other patchfield
const coupledFvPatchField<Type>& patchField(const label patchID) const;
public:
//- Runtime type information
@ -206,23 +209,100 @@ public:
return procPatch_.neighbProcNo();
}
//- Does the patch field perform the transfromation
virtual bool doTransform() const
{
return !(procPatch_.parallel() || pTraits<Type>::rank == 0);
}
//- Return face transformation tensor
virtual const tensorField& forwardT() const
{
return procPatch_.forwardT();
}
//- Return rank of component for transform
virtual int rank() const
{
return pTraits<Type>::rank;
}
// //- Transform given patch component field
// void transformCoupleField
// (
// scalarField& f,
// const direction cmpt
// ) const;
// Referred-patch functionality. Get called with a slice (size, start)
// of a patch that supplies fields and geometry/topology.
//- Get patch-normal gradient
virtual void snGrad
(
Field<Type>& exchangeBuf,
const Field<Type>& subFld,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
notImplemented("processorFvPatchField::snGrad(..)");
}
//- Initialise the evaluation of the patch field.
virtual void initEvaluate
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
notImplemented("processorFvPatchField::initEvaluate(..)");
}
//- Evaluate the patch field.
virtual void evaluate
(
Field<Type>& exchangeBuf,
const coupledFvPatch& referringPatch,
const label size,
const label start
) const
{
notImplemented("processorFvPatchField::evaluate(..)");
}
//- Initialise neighbour matrix update
virtual void initInterfaceMatrixUpdate
(
const scalarField& psiInternal,
scalarField& result,
const lduMatrix& m,
const scalarField& coeffs,
const direction cmpt,
const coupledFvPatch& referringPatch,
const label size,
const label start,
scalarField& exchangeBuf
) const
{
notImplemented
(
"processorFvPatchField::initInterfaceMatrixUpdate(..)"
);
}
//- Update result field based on interface functionality
virtual void updateInterfaceMatrix
(
const scalarField& psiInternal,
scalarField& result,
const lduMatrix&,
const scalarField& coeffs,
const direction,
const coupledFvPatch& referringPatch,
const label size,
const label start,
scalarField& exchangeBuf
) const
{
notImplemented
(
"processorFvPatchField::updateInterfaceMatrix(..)"
);
}
};

View File

@ -36,6 +36,7 @@ namespace Foam
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
makePatchFields(processor);
//makePatchTypeField(fvPatchScalarField, processorFvPatchScalarField);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -105,23 +105,20 @@ public:
return coupledPolyPatch_.coupled();
}
//- Return face transformation tensor
const tensorField& forwardT() const
{
return coupledPolyPatch_.forwardT();
}
//- Return neighbour-cell transformation tensor
const tensorField& reverseT() const
{
return coupledPolyPatch_.reverseT();
}
//- Are the cyclic planes parallel
bool parallel() const
{
return coupledPolyPatch_.parallel();
}
// //- Are the planes separated.
// virtual bool separated() const = 0;
//
// //- If the planes are separated the separation vector.
// virtual const vector& separation() const = 0;
//
// //- Are the cyclic planes parallel.
// virtual bool parallel() const = 0;
//
// //- Return face transformation tensor.
// virtual const tensor& forwardT() const = 0;
//
// //- Return neighbour-cell transformation tensor.
// const tensor& reverseT() const = 0;
//- Return faceCell addressing
virtual const unallocLabelList& faceCells() const
@ -142,20 +139,26 @@ public:
const unallocLabelList& internalData
) const = 0;
//- Initialise interface data transfer
virtual void initTransfer
(
const Pstream::commsTypes commsType,
const unallocLabelList& interfaceData
) const
{}
//- Transfer and return neighbour field
virtual tmp<labelField> transfer
(
const Pstream::commsTypes commsType,
const unallocLabelList& interfaceData
) const = 0;
// //- Initialise interface data transfer
// virtual void initTransfer
// (
// const Pstream::commsTypes commsType,
// const unallocLabelList& interfaceData
// ) const
// {
// notImplemented("coupledFvPatch::initTransfer(..)");
// }
//
// //- Transfer and return neighbour field
// virtual tmp<labelField> transfer
// (
// const Pstream::commsTypes commsType,
// const unallocLabelList& interfaceData
// ) const
// {
// notImplemented("coupledFvPatch::initTransfer(..)");
// return tmp<labelField>();
// }
//- Initialise neighbour field transfer
virtual void initInternalFieldTransfer

View File

@ -44,30 +44,31 @@ addToRunTimeSelectionTable(fvPatch, cyclicFvPatch, polyPatch);
// Make patch weighting factors
void cyclicFvPatch::makeWeights(scalarField& w) const
{
const cyclicFvPatch& nbrPatch = neighbFvPatch();
const scalarField& magFa = magSf();
const scalarField& nbrMagFa = nbrPatch.magSf();
scalarField deltas = nf() & fvPatch::delta();
label sizeby2 = deltas.size()/2;
scalarField nbrDeltas = nbrPatch.nf() & nbrPatch.fvPatch::delta();
for (label facei = 0; facei < sizeby2; facei++)
forAll(magFa, facei)
{
scalar avFa = (magFa[facei] + magFa[facei + sizeby2])/2.0;
scalar avFa = (magFa[facei] + nbrMagFa[facei])/2.0;
if (mag(magFa[facei] - magFa[facei + sizeby2])/avFa > 1e-4)
if (mag(magFa[facei] - nbrMagFa[facei])/avFa > 1e-4)
{
FatalErrorIn("cyclicFvPatch::makeWeights(scalarField& w) const")
<< "face " << facei << " and " << facei + sizeby2
<< " areas do not match by "
<< 100*mag(magFa[facei] - magFa[facei + sizeby2])/avFa
<< "face " << facei << " areas do not match by "
<< 100*mag(magFa[facei] - nbrMagFa[facei])/avFa
<< "% -- possible face ordering problem"
<< abort(FatalError);
}
scalar di = deltas[facei];
scalar dni = deltas[facei + sizeby2];
scalar dni = nbrDeltas[facei];
w[facei] = dni/(di + dni);
w[facei + sizeby2] = 1 - w[facei];
}
}
@ -75,16 +76,18 @@ void cyclicFvPatch::makeWeights(scalarField& w) const
// Make patch face - neighbour cell distances
void cyclicFvPatch::makeDeltaCoeffs(scalarField& dc) const
{
scalarField deltas = nf() & fvPatch::delta();
label sizeby2 = deltas.size()/2;
//const cyclicPolyPatch& nbrPatch = cyclicPolyPatch_.neighbPatch();
const cyclicFvPatch& nbrPatch = neighbFvPatch();
for (label facei = 0; facei < sizeby2; facei++)
scalarField deltas = nf() & fvPatch::delta();
scalarField nbrDeltas = nbrPatch.nf() & nbrPatch.fvPatch::delta();
forAll(deltas, facei)
{
scalar di = deltas[facei];
scalar dni = deltas[facei + sizeby2];
scalar dni = nbrDeltas[facei];
dc[facei] = 1.0/(di + dni);
dc[facei + sizeby2] = dc[facei];
}
}
@ -93,7 +96,7 @@ void cyclicFvPatch::makeDeltaCoeffs(scalarField& dc) const
tmp<vectorField> cyclicFvPatch::delta() const
{
vectorField patchD = fvPatch::delta();
label sizeby2 = patchD.size()/2;
vectorField nbrPatchD = neighbFvPatch().fvPatch::delta();
tmp<vectorField> tpdv(new vectorField(patchD.size()));
vectorField& pdv = tpdv();
@ -101,24 +104,22 @@ tmp<vectorField> cyclicFvPatch::delta() const
// To the transformation if necessary
if (parallel())
{
for (label facei = 0; facei < sizeby2; facei++)
forAll(patchD, facei)
{
vector ddi = patchD[facei];
vector dni = patchD[facei + sizeby2];
vector dni = nbrPatchD[facei];
pdv[facei] = ddi - dni;
pdv[facei + sizeby2] = -pdv[facei];
}
}
else
{
for (label facei = 0; facei < sizeby2; facei++)
forAll(patchD, facei)
{
vector ddi = patchD[facei];
vector dni = patchD[facei + sizeby2];
vector dni = nbrPatchD[facei];
pdv[facei] = ddi - transform(forwardT()[0], dni);
pdv[facei + sizeby2] = -transform(reverseT()[0], pdv[facei]);
pdv[facei] = ddi - transform(forwardT(), dni);
}
}
@ -141,6 +142,8 @@ tmp<labelField> cyclicFvPatch::transfer
const unallocLabelList& interfaceData
) const
{
notImplemented("cyclicFvPatch::transfer(..)");
tmp<labelField> tpnf(new labelField(this->size()));
labelField& pnf = tpnf();
@ -162,6 +165,9 @@ tmp<labelField> cyclicFvPatch::internalFieldTransfer
const unallocLabelList& iF
) const
{
notImplemented("cyclicFvPatch::internalFieldTransfer(..)");
// ** TO BE DONE - needs neighbour patch field!
const unallocLabelList& faceCells = this->patch().faceCells();
tmp<labelField> tpnf(new labelField(this->size()));

View File

@ -39,6 +39,7 @@ SourceFiles
#include "coupledFvPatch.H"
#include "cyclicLduInterface.H"
#include "cyclicPolyPatch.H"
#include "fvBoundaryMesh.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -82,6 +83,10 @@ public:
cyclicFvPatch(const polyPatch& patch, const fvBoundaryMesh& bm)
:
coupledFvPatch(patch, bm),
cyclicLduInterface
(
refCast<const cyclicPolyPatch>(patch).neighbPatchID()
),
cyclicPolyPatch_(refCast<const cyclicPolyPatch>(patch))
{}
@ -90,21 +95,41 @@ public:
// Access
//- Return face transformation tensor
const tensorField& forwardT() const
//- Return local reference cast into the cyclic patch
const cyclicPolyPatch& cyclicPatch() const
{
return coupledFvPatch::forwardT();
return cyclicPolyPatch_;
}
//- Are the cyclic planes parallel
virtual bool parallel() const
{
return cyclicPolyPatch_.parallel();
}
//- Return face transformation tensor
virtual const tensor& forwardT() const
{
return cyclicPolyPatch_.forwardT();
}
//- Return neighbour-cell transformation tensor
const tensorField& reverseT() const
virtual const tensor& reverseT() const
{
return coupledFvPatch::reverseT();
return cyclicPolyPatch_.reverseT();
}
const cyclicFvPatch& neighbFvPatch() const
{
return refCast<const cyclicFvPatch>
(
this->boundaryMesh()[cyclicPolyPatch_.neighbPatchID()]
);
}
//- Return delta (P to N) vectors across coupled patch
tmp<vectorField> delta() const;
virtual tmp<vectorField> delta() const;
// Interface transfer functions

View File

@ -27,6 +27,7 @@ License
#include "processorFvPatch.H"
#include "addToRunTimeSelectionTable.H"
#include "transformField.H"
#include "polyBoundaryMesh.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -45,16 +46,29 @@ void processorFvPatch::makeWeights(scalarField& w) const
{
if (Pstream::parRun())
{
const processorPolyPatch& pp = procPolyPatch();
Pout<< name() << " pp.neighbFaceAreas():" << pp.neighbFaceAreas()
<< endl;
Pout<< name() << " pp.neighbFaceCentres():" << pp.neighbFaceCentres()
<< endl;
Pout<< name() << " pp.neighbFaceCellCentres():" << pp.neighbFaceCellCentres()
<< endl;
// The face normals point in the opposite direction on the other side
scalarField neighbFaceCentresCn
(
(
procPolyPatch_.neighbFaceAreas()
/(mag(procPolyPatch_.neighbFaceAreas()) + VSMALL)
pp.neighbFaceAreas()
/(mag(pp.neighbFaceAreas()) + VSMALL)
)
& (
procPolyPatch_.neighbFaceCentres()
- procPolyPatch_.neighbFaceCellCentres())
pp.neighbFaceCentres()
- pp.neighbFaceCellCentres()
)
);
w = neighbFaceCentresCn/((nf()&fvPatch::delta()) + neighbFaceCentresCn);
@ -63,11 +77,22 @@ void processorFvPatch::makeWeights(scalarField& w) const
{
w = 1.0;
}
Pout<< name() << " w:" << w
<< endl;
}
void processorFvPatch::makeDeltaCoeffs(scalarField& dc) const
{
Pout<< name() << " fvPatch::delta():" << fvPatch::delta()
<< endl;
Pout<< name() << " nf():" << nf()
<< endl;
Pout<< name() << " weights():" << weights()
<< endl;
if (Pstream::parRun())
{
dc = (1.0 - weights())/(nf() & fvPatch::delta());
@ -76,41 +101,71 @@ void processorFvPatch::makeDeltaCoeffs(scalarField& dc) const
{
dc = 1.0/(nf() & fvPatch::delta());
}
Pout<< name() << " dc:" << dc << endl;
}
tmp<vectorField> processorFvPatch::delta() const
{
tmp<vectorField> deltaFld(fvPatch::delta());
if (Pstream::parRun())
{
// To the transformation if necessary
if (parallel())
// Do the transformation if necessary.
const processorPolyPatch& pp = procPolyPatch();
const pointField& nfc = pp.neighbFaceCentres();
const pointField& ncc = pp.neighbFaceCellCentres();
forAll(pp.patchIDs(), i)
{
return
fvPatch::delta()
- (
procPolyPatch_.neighbFaceCentres()
- procPolyPatch_.neighbFaceCellCentres()
const SubField<point> subFc(pp.subSlice(nfc, i));
const SubField<point> subCc(pp.subSlice(ncc, i));
SubField<vector> subDelta(pp.subSlice(deltaFld(), i));
const vectorField& subFld = static_cast<const vectorField&>
(
subDelta
);
label patchI = pp.patchIDs()[i];
Pout<< name() << " delta:" << " subFc:" << subFc
<< " subCc:" << subCc << " subDelta:" << subDelta
<< endl;
if (patchI == -1)
{
const_cast<vectorField&>(subFld) -= (subFc - subCc);
}
else
{
return
fvPatch::delta()
- transform
const coupledPolyPatch& subPatch =
refCast<const coupledPolyPatch>
(
forwardT(),
(
procPolyPatch_.neighbFaceCentres()
- procPolyPatch_.neighbFaceCellCentres()
)
pp.boundaryMesh()[patchI]
);
}
if (subPatch.parallel())
{
const_cast<vectorField&>(subFld) -= (subFc - subCc);
}
else
{
return fvPatch::delta();
const_cast<vectorField&>(subFld) -= transform
(
subPatch.forwardT(),
subFc - subCc
);
}
}
Pout<< name() << " subDelta:" << subDelta
<< endl;
}
}
return deltaFld;
}
@ -123,24 +178,24 @@ tmp<labelField> processorFvPatch::interfaceInternalField
}
void processorFvPatch::initTransfer
(
const Pstream::commsTypes commsType,
const unallocLabelList& interfaceData
) const
{
send(commsType, interfaceData);
}
tmp<labelField> processorFvPatch::transfer
(
const Pstream::commsTypes commsType,
const unallocLabelList&
) const
{
return receive<label>(commsType, this->size());
}
//void processorFvPatch::initTransfer
//(
// const Pstream::commsTypes commsType,
// const unallocLabelList& interfaceData
//) const
//{
// send(commsType, interfaceData);
//}
//
//
//tmp<labelField> processorFvPatch::transfer
//(
// const Pstream::commsTypes commsType,
// const unallocLabelList&
//) const
//{
// return receive<label>(commsType, this->size());
//}
void processorFvPatch::initInternalFieldTransfer

View File

@ -113,14 +113,49 @@ public:
}
}
//- Return face transformation tensor
virtual const tensorField& forwardT() const
const processorPolyPatch& procPolyPatch() const
{
return procPolyPatch_.forwardT();
return procPolyPatch_;
}
//- Are the planes separated.
virtual bool separated() const
{
notImplemented("processorFvPatch::separated() const");
return false;
}
//- If the planes are separated the separation vector.
virtual const vector& separation() const
{
notImplemented("processorFvPatch::separation() const");
return vector::zero;
}
//- Are the cyclic planes parallel
virtual bool parallel() const
{
notImplemented("processorFvPatch::separated() const");
return false;
}
//- Return face transformation tensor
virtual const tensor& forwardT() const
{
notImplemented("processorFvPatch::forwardT() const");
return tensor::zero;
}
//- Return neighbour-cell transformation tensor
virtual const tensor& reverseT() const
{
notImplemented("processorFvPatch::reverseT() const");
return tensor::zero;
}
//- Return delta (P to N) vectors across coupled patch
tmp<vectorField> delta() const;
virtual tmp<vectorField> delta() const;
// Interface transfer functions
@ -137,14 +172,21 @@ public:
(
const Pstream::commsTypes commsType,
const unallocLabelList& interfaceData
) const;
) const
{
notImplemented("processorFvPatch::initTransfer() const");
}
//- Transfer and return neighbour field
virtual tmp<labelField> transfer
(
const Pstream::commsTypes commsType,
const unallocLabelList& interfaceData
) const;
) const
{
notImplemented("processorFvPatch::initTransfer() const");
return tmp<labelField>(new labelField());
}
//- Initialise neighbour field transfer
virtual void initInternalFieldTransfer

View File

@ -120,39 +120,24 @@ void Foam::Particle<ParticleType>::correctAfterParallelTransfer
celli_ = ppp.faceCells()[facei_];
if (!ppp.parallel())
label subPatchI = ppp.whichSubPatch(facei_);
const coupledPolyPatch& cpp =
refCast<const coupledPolyPatch>
(cloud_.pMesh().boundaryMesh()[subPatchI]);
// We are on receiving end.
if (!cpp.parallel())
{
if (ppp.forwardT().size() == 1)
{
const tensor& T = ppp.forwardT()[0];
const tensor& T = cpp.forwardT();
transformPosition(T);
static_cast<ParticleType&>(*this).transformProperties(T);
}
else
else if (cpp.separated())
{
const tensor& T = ppp.forwardT()[facei_];
transformPosition(T);
static_cast<ParticleType&>(*this).transformProperties(T);
}
}
else if (ppp.separated())
{
if (ppp.separation().size() == 1)
{
position_ -= ppp.separation()[0];
static_cast<ParticleType&>(*this).transformProperties
(
-ppp.separation()[0]
);
}
else
{
position_ -= ppp.separation()[facei_];
static_cast<ParticleType&>(*this).transformProperties
(
-ppp.separation()[facei_]
);
}
const vector d = -cpp.separation();
position_ += d;
static_cast<ParticleType&>(*this).transformProperties(d);
}
// Reset the face index for the next tracking operation
@ -208,7 +193,6 @@ Foam::label Foam::Particle<ParticleType>::track
}
template<class ParticleType>
Foam::label Foam::Particle<ParticleType>::track(const vector& endPosition)
{
@ -216,6 +200,7 @@ Foam::label Foam::Particle<ParticleType>::track(const vector& endPosition)
return track(endPosition, dummyTd);
}
template<class ParticleType>
template<class TrackData>
Foam::scalar Foam::Particle<ParticleType>::trackToFace
@ -461,7 +446,7 @@ void Foam::Particle<ParticleType>::hitCyclicPatch
TrackData&
)
{
label patchFacei_ = cpp.whichFace(facei_);
// Transform (still on sending side)
facei_ = cpp.transformGlobalFace(facei_);
@ -469,18 +454,17 @@ void Foam::Particle<ParticleType>::hitCyclicPatch
if (!cpp.parallel())
{
const tensor& T = cpp.transformT(patchFacei_);
const tensor& T = cpp.reverseT();
transformPosition(T);
static_cast<ParticleType&>(*this).transformProperties(T);
}
else if (cpp.separated())
{
position_ += cpp.separation(patchFacei_);
static_cast<ParticleType&>(*this).transformProperties
(
cpp.separation(patchFacei_)
);
const vector& d = cpp.separation();
position_ += d;
static_cast<ParticleType&>(*this).transformProperties(d);
}
}

View File

@ -1,12 +1,15 @@
/*
cellClassification/cellClassification.C
cellClassification/cellInfo.C
cellQuality/cellQuality.C
*/
cellDist/cellDistFuncs.C
cellDist/patchWave/patchWave.C
cellDist/wallPoint/wallPoint.C
cellFeatures/cellFeatures.C
coordinateSystems/coordinateSystem.C
@ -21,18 +24,17 @@ coordinateSystems/coordinateRotation/coordinateRotation.C
coordinateSystems/coordinateRotation/EulerCoordinateRotation.C
coordinateSystems/coordinateRotation/STARCDCoordinateRotation.C
/*
edgeFaceCirculator/edgeFaceCirculator.C
*/
polyMeshZipUpCells/polyMeshZipUpCells.C
primitiveMeshGeometry/primitiveMeshGeometry.C
meshSearch/meshSearch.C
meshTools/meshTools.C
PointEdgeWave/PointEdgeWaveName.C
PointEdgeWave/pointEdgePoint.C
regionSplit/regionSplit.C
octree/octreeName.C
@ -70,6 +72,8 @@ $(topoSets)/topoSet.C
$(topoSets)/faceSet.C
$(topoSets)/pointSet.C
/*
sets/topoSetSource/topoSetSource.C
cellSources = sets/cellSources
@ -127,6 +131,8 @@ intersectedSurface = $(booleanOps)/intersectedSurface
$(intersectedSurface)/intersectedSurface.C
$(intersectedSurface)/edgeSurface.C
*/
triSurface/triSurfaceSearch/triSurfaceSearch.C
triSurface/octreeData/octreeDataTriSurface.C
triSurface/octreeData/octreeDataTriSurfaceTreeLeaf.C

View File

@ -27,6 +27,7 @@ License
#include "polyMeshZipUpCells.H"
#include "polyMesh.H"
#include "Time.H"
#include "globalMeshData.H"
// #define DEBUG_ZIPUP 1
// #define DEBUG_CHAIN 1
@ -745,13 +746,27 @@ bool Foam::polyMeshZipUpCells(polyMesh& mesh)
// Collect the patch sizes
labelList patchSizes(bMesh.size(), 0);
labelList patchStarts(bMesh.size(), 0);
forAll (bMesh, patchI)
{
patchSizes[patchI] = bMesh[patchI].size();
patchStarts[patchI] = bMesh[patchI].start();
}
labelListList subPatches(bMesh.size());
labelListList subPatchStarts(bMesh.size());
forAll(mesh.globalData().processorPatches(), i)
{
label patchI = mesh.globalData().processorPatches()[i];
const processorPolyPatch& ppp = refCast<const processorPolyPatch>
(
bMesh[patchI]
);
subPatches[patchI] = ppp.patchIDs();
subPatchStarts[patchI] = ppp.starts();
}
// Reset the mesh. Number of active faces is one beyond the last patch
// (patches guaranteed to be in increasing order)
mesh.resetPrimitives
@ -763,6 +778,8 @@ bool Foam::polyMeshZipUpCells(polyMesh& mesh)
mesh.faceNeighbour(),
patchSizes,
patchStarts,
subPatches,
subPatchStarts,
true // boundary forms valid boundary mesh.
);

View File

@ -1,61 +0,0 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 1.5 |
| \\ / A nd | Web: http://www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class polyBoundaryMesh;
object boundary;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
6
(
bottomWall
{
type wall;
nFaces 1200;
startFace 175300;
}
topWall
{
type wall;
nFaces 1200;
startFace 176500;
}
sides1
{
type cyclic;
nFaces 2000;
startFace 177700;
featureCos 0.9;
}
sides2
{
type cyclic;
nFaces 2000;
startFace 179700;
featureCos 0.9;
}
inout1
{
type cyclic;
nFaces 1500;
startFace 181700;
featureCos 0.9;
}
inout2
{
type cyclic;
nFaces 1500;
startFace 183200;
featureCos 0.9;
}
)
// ************************************************************************* //

View File

@ -1,39 +0,0 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 1.5 |
| \\ / A nd | Web: http://www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class polyBoundaryMesh;
object boundary;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
3
(
movingWall
{
type wall;
nFaces 20;
startFace 760;
}
fixedWalls
{
type wall;
nFaces 60;
startFace 780;
}
frontAndBack
{
type empty;
nFaces 800;
startFace 840;
}
)
// ************************************************************************* //

View File

@ -1,4 +1,4 @@
c++DBUG =
c++DBUG = -O0 -DFULLDEBUG -g
c++OPT = -march=opteron -O3
#c++OPT = -march=nocona -O3
# -ftree-vectorize -ftree-vectorizer-verbose=3