Merge branch 'master' of ssh://dm/home/dm4/OpenFOAM/OpenFOAM-dev

Conflicts:
	src/thermophysicalModels/basic/Make/files
	src/thermophysicalModels/basic/basicThermo/basicThermo.C
	src/thermophysicalModels/basic/psiThermo/hPsiThermo/hPsiThermo.C
	src/thermophysicalModels/basic/psiThermo/hsPsiThermo/hsPsiThermo.C
	src/thermophysicalModels/basic/rhoThermo/hRhoThermo/hRhoThermo.C
	src/thermophysicalModels/basic/rhoThermo/hsRhoThermo/hsRhoThermo.C
	src/thermophysicalModels/thermalPorousZone/thermalModel/fixedTemperature/fixedTemperature.C
This commit is contained in:
Henry
2012-05-30 15:32:20 +01:00
111 changed files with 3526 additions and 2049 deletions

View File

@ -15,6 +15,9 @@
const volScalarField& kappa = tkappa();
//const volSymmTensorField& K = tK();
tmp<volScalarField> trhoCp = cp*rho;
const volScalarField& rhoCp = trhoCp();
volScalarField& T = thermo.T();
IObasicSourceList& sources = solidHeatSources[i];

View File

@ -4,7 +4,7 @@
tmp<fvScalarMatrix> TEqn
(
- fvm::laplacian(betav*kappa, T, "laplacian(K,T)")
+ sources(rho, T)
+ sources(rhoCp, T)
);
TEqn().relax();

View File

@ -15,6 +15,9 @@
const volScalarField& kappa = tkappa();
//const volSymmTensorField& K = tK();
tmp<volScalarField> trhoCp = cp*rho;
const volScalarField& rhoCp = trhoCp();
volScalarField& T = thermo.T();
IObasicSourceList& sources = solidHeatSources[i];

View File

@ -10,7 +10,7 @@ if (finalIter)
(
fvm::ddt(betav*rho*cp, T)
- fvm::laplacian(betav*kappa, T, "laplacian(K,T)")
+ sources(rho, T)
+ sources(rhoCp, T)
);
TEqn().relax();

View File

@ -0,0 +1,75 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: dev |
| \\ / A nd | Web: http://www.openfoam.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
root "";
case "";
instance "";
local "";
class dictionary;
object meshQualityDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
//- Maximum non-orthogonality allowed. Set to 180 to disable.
maxNonOrtho 65;
//- Max skewness allowed. Set to <0 to disable.
maxBoundarySkewness 50;
//- Max skewness allowed. Set to <0 to disable.
maxInternalSkewness 10;
//- Max concaveness allowed. Is angle (in degrees) below which concavity
// is allowed. 0 is straight face, <0 would be convex face.
// Set to 180 to disable.
maxConcave 80;
//- Minimum pyramid volume. Is absolute volume of cell pyramid.
// Set to a sensible fraction of the smallest cell volume expected.
// Set to very negative number (e.g. -1E30) to disable.
minVol 1e-20;
//- Minimum quality of the tet formed by the face-centre
// and variable base point minimum decomposition triangles and
// the cell centre. This has to be a positive number for tracking
// to work. Set to very negative number (e.g. -1E30) to
// disable.
// <0 = inside out tet,
// 0 = flat tet
// 1 = regular tet
minTetQuality 1e-30;
//- Minimum face area. Set to <0 to disable.
minArea -1;
//- Minimum face twist. Set to <-1 to disable. dot product of face normal
//- and face centre triangles normal
minTwist 0.0;
//- minimum normalised cell determinant
//- 1 = hex, <= 0 = folded or flattened illegal cell
minDeterminant 0.001;
//- minFaceWeight (0 -> 0.5)
minFaceWeight 0.02;
//- minVolRatio (0 -> 1)
minVolRatio 0.01;
//must be >0 for Fluent compatibility
minTriangleTwist -1;
// ************************************************************************* //

View File

@ -43,7 +43,7 @@ inline bool Foam::pointEdgeCollapse::update
<< "problem." << abort(FatalError);
}
if (w2.collapseIndex_ == -1)
if (w2.collapseIndex_ == -1 || collapseIndex_ == -1)
{
// Not marked for collapse; only happens on edges.
return false;
@ -56,11 +56,11 @@ inline bool Foam::pointEdgeCollapse::update
}
else
{
// Same coordinate. Same string?
// Take over w2 if it is 'better'
if (w2.collapseIndex_ < collapseIndex_)
{
// Take over string index from w2 (and also coordinate but this
// was same)
// Take over string index and coordinate from w2
operator=(w2);
return true;
}
@ -85,35 +85,6 @@ inline bool Foam::pointEdgeCollapse::update
{
return false;
}
// if (samePoint(w2.collapsePoint_))
// {
// // Same coordinate. Same string?
// if (w2.collapseIndex_ < collapseIndex_)
// {
// // Take over string index from w2 (and also coordinate but
// // this was same)
// operator=(w2);
// return true;
// }
// else
// {
// return false;
// }
// }
// else
// {
// // Find nearest coordinate
// if (magSqr(w2.collapsePoint_) < magSqr(collapsePoint_))
// {
// operator=(w2);
// return true;
// }
// else
// {
// return false;
// }
// }
}
}

View File

@ -14,7 +14,7 @@ FoamFile
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Move points:
// Move boundary points:
// Every entry is two coordinates. First one is location of the point to move,
// the second is the position to move to.
pointsToMove
@ -22,15 +22,15 @@ pointsToMove
(( -0.17861 -0.45073 0.75276)( -0.18 -0.45073 0.75276))
);
// Split edge in two:
// First coord is a point on the edge to cut, second is the position of the
// newly introduced point
// Split boundary edge in two:
// First coord is a point on the (boundary) edge to cut, second is the
// position of the newly introduced point
edgesToSplit
(
(( -0.17692 -0.45312 0.74516)( -0.18 -0.45 0.742))
);
// Triangulate a face:
// Triangulate a boundary face:
// First coord is a point on the face to triangulate. It will introduce a
// point on the face, triangulate and move the point to the second coordinate.
facesToTriangulate
@ -38,8 +38,8 @@ facesToTriangulate
(( -0.039123 -0.45045 0.74083) (-0.03844 -0.45049 0.73572))
);
// Edges to collapse. First coord is point on the edge, second is coordinate
// to collapse to.
// Boundary edges to collapse. First coord is point on the edge, second
// is coordinate to collapse to.
edgesToCollapse
(
((0.054975 0.099987 0.0044074)(0.054975 0.099987 0.0044074))
@ -55,7 +55,8 @@ cellsToSplit
);
// Change patch:
// Changes patchID of faces. Coord selects the face, label is the patch index.
// Changes patchID of boundary faces. Coord selects the face, label is the
// patch index.
facesToRepatch
(
(( -0.039123 -0.45045 0.74083) 1)

View File

@ -1,3 +0,0 @@
checkCvMesh.C
EXE = $(FOAM_APPBIN)/checkCvMesh

View File

@ -1,14 +0,0 @@
EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/triSurface/lnInclude \
-I$(LIB_SRC)/mesh/autoMesh/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude
EXE_LIBS = \
-lfiniteVolume \
-ldynamicMesh \
-ltriSurface \
-lautoMesh \
-lmeshTools

View File

@ -1,122 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Application
checkCvMesh
Description
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "Time.H"
#include "fvMesh.H"
#include "autoSnapDriver.H"
#include "faceSet.H"
#include "motionSmoother.H"
#include "timeSelector.H"
using namespace Foam;
int main(int argc, char *argv[])
{
timeSelector::addOptions();
# include "addOverwriteOption.H"
# include "setRootCase.H"
# include "createTime.H"
instantList timeDirs = timeSelector::select0(runTime, args);
# include "createNamedPolyMesh.H"
runTime.functionObjects().off();
forAll(timeDirs, timeI)
{
runTime.setTime(timeDirs[timeI], timeI);
Info<< "Time = " << runTime.timeName()
<< nl << endl;
mesh.readUpdate();
// Check patches and faceZones are synchronised
mesh.boundaryMesh().checkParallelSync(true);
meshRefinement::checkCoupledFaceZones(mesh);
// Read meshing dictionary
IOdictionary cvMeshDict
(
IOobject
(
"cvMeshDict",
runTime.system(),
mesh,
IOobject::MUST_READ_IF_MODIFIED,
IOobject::NO_WRITE
)
);
// mesh motion and mesh quality parameters
const dictionary& meshQualityDict
= cvMeshDict.subDict("meshQualityControls");
Info<< "Checking mesh ..." << endl;
faceSet wrongFaces(mesh, "wrongFaces", mesh.nFaces()/100);
motionSmoother::checkMesh(false, mesh, meshQualityDict, wrongFaces);
const label nInitErrors = returnReduce
(
wrongFaces.size(),
sumOp<label>()
);
Info<< "Detected " << nInitErrors << " illegal faces"
<< " (concave, zero area or negative cell pyramid volume)"
<< endl;
if (nInitErrors > 0)
{
Info<< "Writing " << nInitErrors
<< " faces in error to set "
<< wrongFaces.name() << endl;
wrongFaces.instance() = mesh.pointsInstance();
wrongFaces.write();
}
Info<< nl << "End of time " << runTime.timeName() << nl << endl;
}
Info<< "End\n" << endl;
return 0;
}

View File

@ -96,8 +96,9 @@ bool Foam::cellSizeControlSurfaces::evalCellSizeFunctions
if (cellSizeFunctions_.size())
{
// Initialise to the last (lowest) priority
label previousPriority = cellSizeFunctions_.last().priority();
// Maintain priority of current hit. Initialise so it always goes
// through at least once.
label previousPriority = -1;
forAll(cellSizeFunctions_, i)
{

View File

@ -196,7 +196,7 @@ void Foam::conformalVoronoiMesh::buildSurfaceConformation
{
if (vit->internalPoint() && !vit->nearBoundary())
{
const Foam::point& pt = topoint(vit->point());
pointFromPoint pt = topoint(vit->point());
const scalar range = sqr(2.0*targetCellSize(pt));
pointIndexHit pHit;

View File

@ -238,6 +238,25 @@ int main(int argc, char *argv[])
dict.lookup("constructFrom")
);
// Any merging of small edges
const scalar mergeTol(dict.lookupOrDefault<scalar>("mergeTol", 1e-4));
Info<< "Extruding from " << ExtrudeModeNames[mode]
<< " using model " << model().type() << endl;
if (flipNormals)
{
Info<< "Flipping normals before extruding" << endl;
}
if (mergeTol > 0)
{
Info<< "Collapsing edges < " << mergeTol << " of bounding box" << endl;
}
else
{
Info<< "Not collapsing any edges after extrusion" << endl;
}
Info<< endl;
// Generated mesh (one of either)
autoPtr<fvMesh> meshFromMesh;
@ -251,10 +270,10 @@ int main(int argc, char *argv[])
if (mode == PATCH || mode == MESH)
{
if (flipNormals)
if (flipNormals && mode == MESH)
{
FatalErrorIn(args.executable())
<< "Flipping normals not supported for extrusions from patch."
<< "Flipping normals not supported for extrusions from mesh."
<< exit(FatalError);
}
@ -297,6 +316,60 @@ int main(int argc, char *argv[])
addPatchCellLayer layerExtrude(mesh, (mode == MESH));
const labelList meshFaces(patchFaces(patches, sourcePatches));
if (mode == PATCH && flipNormals)
{
// Cheat. Flip patch faces in mesh. This invalidates the
// mesh (open cells) but does produce the correct extrusion.
polyTopoChange meshMod(mesh);
forAll(meshFaces, i)
{
label meshFaceI = meshFaces[i];
label patchI = patches.whichPatch(meshFaceI);
label own = mesh.faceOwner()[meshFaceI];
label nei = -1;
if (patchI == -1)
{
nei = mesh.faceNeighbour()[meshFaceI];
}
label zoneI = mesh.faceZones().whichZone(meshFaceI);
bool zoneFlip = false;
if (zoneI != -1)
{
label index = mesh.faceZones()[zoneI].whichFace(meshFaceI);
zoneFlip = mesh.faceZones()[zoneI].flipMap()[index];
}
meshMod.modifyFace
(
mesh.faces()[meshFaceI].reverseFace(), // modified face
meshFaceI, // label of face
own, // owner
nei, // neighbour
true, // face flip
patchI, // patch for face
zoneI, // zone for face
zoneFlip // face flip in zone
);
}
// Change the mesh. No inflation.
autoPtr<mapPolyMesh> map = meshMod.changeMesh(mesh, false);
// Update fields
mesh.updateMesh(map);
// Move mesh (since morphing does not do this)
if (map().hasMotionPoints())
{
mesh.movePoints(map().preMotionPoints());
}
}
indirectPrimitivePatch extrudePatch
(
IndirectList<face>
@ -471,11 +544,6 @@ int main(int argc, char *argv[])
displacement[pointI] = extrudePt - layer0Points[pointI];
}
if (flipNormals)
{
Info<< "Flipping faces." << nl << endl;
displacement = -displacement;
}
// Check if wedge (has layer0 different from original patch points)
// If so move the mesh to starting position.
@ -666,7 +734,7 @@ int main(int argc, char *argv[])
const boundBox& bb = mesh.bounds();
const vector span = bb.span();
const scalar mergeDim = 1e-4 * bb.minDim();
const scalar mergeDim = mergeTol * bb.minDim();
Info<< "Mesh bounding box : " << bb << nl
<< " with span : " << span << nl
@ -677,6 +745,7 @@ int main(int argc, char *argv[])
// Collapse edges
// ~~~~~~~~~~~~~~
if (mergeDim > 0)
{
Info<< "Collapsing edges < " << mergeDim << " ..." << nl << endl;

View File

@ -31,7 +31,8 @@ exposedPatchName movingWall;
// If construct from surface:
surface "movingWall.stl";
// Flip surface normals before usage.
// Flip surface normals before usage. Valid only for extrude from surface or
// patch.
flipNormals false;
//- Linear extrusion in point-normal direction
@ -87,4 +88,8 @@ sigmaRadialCoeffs
// degree wedges.
mergeFaces false; //true;
// Merge small edges. Fraction of bounding box.
mergeTol 0;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -123,14 +123,6 @@ void writeMesh
int main(int argc, char *argv[])
{
# include "addOverwriteOption.H"
Foam::argList::addBoolOption
(
"checkOnly",
"check existing mesh against snappyHexMeshDict settings"
);
# include "setRootCase.H"
# include "createTime.H"
runTime.functionObjects().off();
@ -141,8 +133,6 @@ int main(int argc, char *argv[])
const bool overwrite = args.optionFound("overwrite");
const bool checkOnly = args.optionFound("checkOnly");
// Check patches and faceZones are synchronised
mesh.boundaryMesh().checkParallelSync(true);
meshRefinement::checkCoupledFaceZones(mesh);
@ -184,36 +174,6 @@ int main(int argc, char *argv[])
);
if (checkOnly)
{
Info<< "Checking initial mesh ..." << endl;
faceSet wrongFaces(mesh, "wrongFaces", mesh.nFaces()/100);
motionSmoother::checkMesh(false, mesh, motionDict, wrongFaces);
const label nInitErrors = returnReduce
(
wrongFaces.size(),
sumOp<label>()
);
Info<< "Detected " << nInitErrors << " illegal faces"
<< " (concave, zero area or negative cell pyramid volume)"
<< endl;
if (nInitErrors > 0)
{
Info<< "Writing " << nInitErrors
<< " faces in error to set "
<< wrongFaces.name() << endl;
wrongFaces.instance() = mesh.pointsInstance();
wrongFaces.write();
}
Info<< "End\n" << endl;
return 0;
}
// Read decomposePar dictionary
IOdictionary decomposeDict

View File

@ -1,6 +1,7 @@
printMeshStats.C
checkTopology.C
checkGeometry.C
checkMeshQuality.C
checkMesh.C
EXE = $(FOAM_APPBIN)/checkMesh

View File

@ -1,5 +1,7 @@
EXE_INC = \
-I$(LIB_SRC)/meshTools/lnInclude
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude
EXE_LIBS = \
-lmeshTools
-lmeshTools \
-ldynamicMesh

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -27,6 +27,21 @@ Application
Description
Checks validity of a mesh
Usage
- checkMesh [OPTION]
\param -allGeometry \n
Checks all (including non finite-volume specific) geometry
\param -allTopology \n
Checks all (including non finite-volume specific) addressing
\param -meshQuality \n
Checks against user defined (in \a system/meshQualityDict) quality settings
\param -region \<name\> \n
Specify an alternative mesh region.
\*---------------------------------------------------------------------------*/
#include "argList.H"
@ -39,6 +54,7 @@ Description
#include "printMeshStats.H"
#include "checkTopology.H"
#include "checkGeometry.H"
#include "checkMeshQuality.H"
using namespace Foam;
@ -63,6 +79,11 @@ int main(int argc, char *argv[])
"allTopology",
"include extra topology checks"
);
argList::addBoolOption
(
"meshQuality",
"read user-defined mesh quality criterions from system/meshQualityDict"
);
# include "setRootCase.H"
# include "createTime.H"
@ -72,6 +93,46 @@ int main(int argc, char *argv[])
const bool noTopology = args.optionFound("noTopology");
const bool allGeometry = args.optionFound("allGeometry");
const bool allTopology = args.optionFound("allTopology");
const bool meshQuality = args.optionFound("meshQuality");
if (noTopology)
{
Info<< "Disabling all topology checks." << nl << endl;
}
if (allTopology)
{
Info<< "Enabling all (cell, face, edge, point) topology checks."
<< nl << endl;
}
if (allGeometry)
{
Info<< "Enabling all geometry checks." << nl << endl;
}
if (meshQuality)
{
Info<< "Enabling user-defined geometry checks." << nl << endl;
}
autoPtr<IOdictionary> qualDict;
if (meshQuality)
{
qualDict.reset
(
new IOdictionary
(
IOobject
(
"meshQualityDict",
mesh.time().system(),
mesh,
IOobject::MUST_READ,
IOobject::NO_WRITE
)
)
);
}
forAll(timeDirs, timeI)
{
@ -96,25 +157,31 @@ int main(int argc, char *argv[])
printMeshStats(mesh, allTopology);
label noFailedChecks = 0;
label nFailedChecks = 0;
if (!noTopology)
{
noFailedChecks += checkTopology(mesh, allTopology, allGeometry);
nFailedChecks += checkTopology(mesh, allTopology, allGeometry);
}
noFailedChecks += checkGeometry(mesh, allGeometry);
nFailedChecks += checkGeometry(mesh, allGeometry);
// Note: no reduction in noFailedChecks necessary since is
if (meshQuality)
{
nFailedChecks += checkMeshQuality(mesh, qualDict());
}
// Note: no reduction in nFailedChecks necessary since is
// counter of checks, not counter of failed cells,faces etc.
if (noFailedChecks == 0)
if (nFailedChecks == 0)
{
Info<< "\nMesh OK.\n" << endl;
}
else
{
Info<< "\nFailed " << noFailedChecks << " mesh checks.\n"
Info<< "\nFailed " << nFailedChecks << " mesh checks.\n"
<< endl;
}
}
@ -124,6 +191,12 @@ int main(int argc, char *argv[])
label nFailedChecks = checkGeometry(mesh, allGeometry);
if (meshQuality)
{
nFailedChecks += checkMeshQuality(mesh, qualDict());
}
if (nFailedChecks)
{
Info<< "\nFailed " << nFailedChecks << " mesh checks.\n"

View File

@ -0,0 +1,34 @@
#include "checkMeshQuality.H"
#include "polyMesh.H"
#include "cellSet.H"
#include "faceSet.H"
#include "motionSmoother.H"
Foam::label Foam::checkMeshQuality
(
const polyMesh& mesh,
const dictionary& dict
)
{
label noFailedChecks = 0;
{
faceSet faces(mesh, "meshQualityFaces", mesh.nFaces()/100+1);
motionSmoother::checkMesh(false, mesh, dict, faces);
label nFaces = returnReduce(faces.size(), sumOp<label>());
if (nFaces > 0)
{
noFailedChecks++;
Info<< " <<Writing " << nFaces
<< " faces in error to set " << faces.name() << endl;
faces.instance() = mesh.pointsInstance();
faces.write();
}
}
return noFailedChecks;
}

View File

@ -0,0 +1,6 @@
#include "polyMesh.H"
namespace Foam
{
label checkMeshQuality(const polyMesh& mesh, const dictionary&);
}

View File

@ -152,13 +152,17 @@ void Foam::printMeshStats(const polyMesh& mesh, const bool allTopology)
{
Pstream::mapCombineGather(polyhedralFaces, plusEqOp<label>());
Info<< " Breakdown of polyhedra by number of faces:" << endl;
Info<< " faces" << " number of cells" << endl;
Info<< " Breakdown of polyhedra by number of faces:" << nl
<< " faces" << " number of cells" << endl;
forAllConstIter(Map<label>, polyhedralFaces, iter)
labelList sortedKeys = polyhedralFaces.sortedToc();
forAll(sortedKeys, keyI)
{
label nFaces = sortedKeys[keyI];
Info<< setf(std::ios::right) << setw(13)
<< iter.key() << " " << iter() << nl;
<< nFaces << " " << polyhedralFaces[nFaces] << nl;
}
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -206,6 +206,14 @@ int main(int argc, char *argv[])
runTime
);
// cvMesh vertices
writeMeshObject<pointIOField>
(
"internalDelaunayVertices",
regionPrefix,
runTime
);
if (runTime.writeFormat() == IOstream::ASCII)
{
// Only do zones when converting from binary to ascii

View File

@ -51,9 +51,6 @@ Foam::autoPtr<Foam::fvMesh> Foam::loadOrCreateMesh
// Check who has a mesh
const bool haveMesh = isDir(io.time().path()/io.instance()/meshSubDir);
Pout<< "meshpath:" << io.time().path()/io.instance()/meshSubDir << endl;
Pout<< "haveMesh:" << haveMesh << endl;
if (!haveMesh)
{
// Create dummy mesh. Only used on procs that don't have mesh.
@ -293,8 +290,10 @@ Pout<< "haveMesh:" << haveMesh << endl;
if (!haveMesh)
{
// We created a dummy mesh file above. Delete it.
//Pout<< "Removing dummy mesh " << io.objectPath() << endl;
rmDir(io.objectPath());
const fileName meshFiles = io.time().path()/io.instance()/meshSubDir;
//Pout<< "Removing dummy mesh " << meshFiles << endl;
mesh.removeFiles();
rmDir(meshFiles);
}
// Force recreation of globalMeshData.

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -52,6 +52,7 @@ Foam::vtkTopo::vtkTopo(const polyMesh& mesh)
const cellModel& tet = *(cellModeller::lookup("tet"));
const cellModel& pyr = *(cellModeller::lookup("pyr"));
const cellModel& prism = *(cellModeller::lookup("prism"));
const cellModel& wedge = *(cellModeller::lookup("wedge"));
const cellModel& tetWedge = *(cellModeller::lookup("tetWedge"));
const cellModel& hex = *(cellModeller::lookup("hex"));
@ -77,7 +78,7 @@ Foam::vtkTopo::vtkTopo(const polyMesh& mesh)
if
(
model != hex
// && model != wedge // See above.
&& model != wedge // See above.
&& model != prism
&& model != pyr
&& model != tet
@ -164,21 +165,21 @@ Foam::vtkTopo::vtkTopo(const polyMesh& mesh)
cellTypes_[cellI] = VTK_WEDGE;
}
// else if (cellModel == wedge)
// {
// // Treat as squeezed hex
// vtkVerts.setSize(8);
// vtkVerts[0] = cellShape[0];
// vtkVerts[1] = cellShape[1];
// vtkVerts[2] = cellShape[2];
// vtkVerts[3] = cellShape[0];
// vtkVerts[4] = cellShape[3];
// vtkVerts[5] = cellShape[4];
// vtkVerts[6] = cellShape[5];
// vtkVerts[7] = cellShape[6];
//
// cellTypes_[cellI] = VTK_HEXAHEDRON;
// }
else if (cellModel == wedge)
{
// Treat as squeezed hex
vtkVerts.setSize(8);
vtkVerts[0] = cellShape[0];
vtkVerts[1] = cellShape[1];
vtkVerts[2] = cellShape[2];
vtkVerts[3] = cellShape[2];
vtkVerts[4] = cellShape[3];
vtkVerts[5] = cellShape[4];
vtkVerts[6] = cellShape[5];
vtkVerts[7] = cellShape[6];
cellTypes_[cellI] = VTK_HEXAHEDRON;
}
else if (cellModel == hex)
{
vtkVerts = cellShape;

View File

@ -472,6 +472,33 @@ void unmarkBaffles
}
void writeStats(const extendedFeatureEdgeMesh& fem, Ostream& os)
{
os << " points : " << fem.points().size() << nl
<< " of which" << nl
<< " convex : "
<< fem.concaveStart() << nl
<< " concave : "
<< (fem.mixedStart()-fem.concaveStart()) << nl
<< " mixed : "
<< (fem.nonFeatureStart()-fem.mixedStart()) << nl
<< " non-feature : "
<< (fem.points().size()-fem.nonFeatureStart()) << nl
<< " edges : " << fem.edges().size() << nl
<< " of which" << nl
<< " external edges : "
<< fem.internalStart() << nl
<< " internal edges : "
<< (fem.flatStart()- fem.internalStart()) << nl
<< " flat edges : "
<< (fem.openStart()- fem.flatStart()) << nl
<< " open edges : "
<< (fem.multipleStart()- fem.openStart()) << nl
<< " multiply connected : "
<< (fem.edges().size()- fem.multipleStart()) << nl;
}
// Main program:
int main(int argc, char *argv[])
@ -514,7 +541,7 @@ int main(int argc, char *argv[])
forAllConstIter(dictionary, dict, iter)
{
dictionary surfaceDict = dict.subDict(iter().keyword());
const dictionary& surfaceDict = iter().dict();
const fileName surfFileName = iter().keyword();
const fileName sFeatFileName = surfFileName.lessExt().name();
@ -522,16 +549,16 @@ int main(int argc, char *argv[])
Info<< "Surface : " << surfFileName << nl << endl;
const Switch writeVTK =
surfaceDict.lookupOrAddDefault<Switch>("writeVTK", "off");
surfaceDict.lookupOrDefault<Switch>("writeVTK", "off");
const Switch writeObj =
surfaceDict.lookupOrAddDefault<Switch>("writeObj", "off");
surfaceDict.lookupOrDefault<Switch>("writeObj", "off");
const Switch curvature =
surfaceDict.lookupOrAddDefault<Switch>("curvature", "off");
surfaceDict.lookupOrDefault<Switch>("curvature", "off");
const Switch featureProximity =
surfaceDict.lookupOrAddDefault<Switch>("featureProximity", "off");
surfaceDict.lookupOrDefault<Switch>("featureProximity", "off");
const Switch closeness =
surfaceDict.lookupOrAddDefault<Switch>("closeness", "off");
surfaceDict.lookupOrDefault<Switch>("closeness", "off");
const word extractionMethod = surfaceDict.lookup("extractionMethod");
@ -579,7 +606,7 @@ int main(int argc, char *argv[])
// Either construct features from surface & featureAngle or read set.
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
surfaceFeatures set(surf);
@ -670,7 +697,10 @@ int main(int argc, char *argv[])
if (surfaceDict.isDict("subsetFeatures"))
{
dictionary subsetDict = surfaceDict.subDict("subsetFeatures");
const dictionary& subsetDict = surfaceDict.subDict
(
"subsetFeatures"
);
if (subsetDict.found("insideBox"))
{
@ -704,7 +734,7 @@ int main(int argc, char *argv[])
}
const Switch manifoldEdges =
subsetDict.lookupOrAddDefault<Switch>("manifoldEdges", "no");
subsetDict.lookupOrDefault<Switch>("manifoldEdges", "no");
if (manifoldEdges)
{
@ -746,16 +776,6 @@ int main(int argc, char *argv[])
// newSet.writeObj("final");
//}
Info<< nl
<< "Final feature set after trimming and subsetting:" << nl
<< " feature points : " << newSet.featurePoints().size() << nl
<< " feature edges : " << newSet.featureEdges().size() << nl
<< " of which" << nl
<< " region edges : " << newSet.nRegionEdges() << nl
<< " external edges : " << newSet.nExternalEdges() << nl
<< " internal edges : " << newSet.nInternalEdges() << nl
<< endl;
// Extracting and writing a extendedFeatureEdgeMesh
extendedFeatureEdgeMesh feMesh
(
@ -764,6 +784,51 @@ int main(int argc, char *argv[])
sFeatFileName + ".extendedFeatureEdgeMesh"
);
if (surfaceDict.isDict("addFeatures"))
{
const dictionary& subsetDict = surfaceDict.subDict
(
"addFeatures"
);
const word addFeName = subsetDict["name"];
Info<< "Adding (without merging) features from " << addFeName
<< nl << endl;
const Switch flip = subsetDict["flip"];
extendedFeatureEdgeMesh addFeMesh
(
IOobject
(
addFeName,
runTime.time().constant(),
"extendedFeatureEdgeMesh",
runTime.time(),
IOobject::MUST_READ,
IOobject::NO_WRITE
)
);
Info<< "Read " << addFeMesh.name() << nl;
writeStats(addFeMesh, Info);
if (flip)
{
Info<< "Flipping " << addFeMesh.name() << endl;
addFeMesh.flipNormals();
Info<< "After flipping " << addFeMesh.name() << nl;
writeStats(addFeMesh, Info);
}
feMesh.add(addFeMesh);
}
Info<< nl
<< "Final feature set:" << nl;
writeStats(feMesh, Info);
Info<< nl << "Writing extendedFeatureEdgeMesh to "
<< feMesh.objectPath() << endl;

View File

@ -70,6 +70,12 @@ surface2.nas
manifoldEdges no;
}
addFeatures
{
// Add (without merging) another extendedFeatureEdgeMesh
name axZ.extendedFeatureEdgeMesh;
}
// Output the curvature of the surface
curvature no;

View File

@ -1,8 +1,8 @@
EXE_INC = \
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/triSurface/lnInclude
-I$(LIB_SRC)/surfMesh/lnInclude
EXE_LIBS = \
-lmeshTools \
-ltriSurface
-lsurfMesh

View File

@ -22,23 +22,29 @@ License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Description
Extracts triSurface from a polyMesh. Triangulates all boundary faces.
Region numbers on triangles are the patch numbers of the polyMesh.
Extracts surface from a polyMesh. Depending on output surface format
triangulates faces.
Region numbers on faces cannot be guaranteed to be the same as the patch
indices.
Optionally only triangulates named patches.
If run in parallel the processor patches get filtered out by default and
the mesh gets merged. (based on vertex position, not topology, so might go
wrong!).
the mesh gets merged (based on topology).
\*---------------------------------------------------------------------------*/
#include "MeshedSurface.H"
#include "UnsortedMeshedSurface.H"
#include "argList.H"
#include "Time.H"
#include "polyMesh.H"
#include "triSurface.H"
#include "triSurfaceTools.H"
#include "processorPolyPatch.H"
#include "ListListOps.H"
#include "uindirectPrimitivePatch.H"
#include "globalMeshData.H"
#include "globalIndex.H"
using namespace Foam;
@ -50,7 +56,7 @@ int main(int argc, char *argv[])
{
argList::addNote
(
"extract surface from a polyMesh and triangulate boundary faces"
"extract surface from a polyMesh"
);
argList::validArgs.append("output file");
#include "addRegionOption.H"
@ -63,27 +69,37 @@ int main(int argc, char *argv[])
(
"patches",
"(patch0 .. patchN)",
"only triangulate named patches"
"only triangulate selected patches (wildcards supported)"
);
#include "setRootCase.H"
#include "createTime.H"
const fileName outFileName(runTime.path()/args[1]);
const word outFileName(args[1]);
Info<< "Extracting triSurface from boundaryMesh ..."
Info<< "Extracting surface from boundaryMesh ..."
<< endl << endl;
Pout<< "Reading mesh from time " << runTime.value() << endl;
#include "createNamedPolyMesh.H"
const bool includeProcPatches =
!(
args.optionFound("excludeProcPatches")
|| Pstream::parRun()
);
if (includeProcPatches)
{
Info<< "Including all processor patches." << nl << endl;
}
else if (Pstream::parRun())
{
Info<< "Excluding all processor patches." << nl << endl;
}
Info<< "Reading mesh from time " << runTime.value() << endl;
#include "createNamedPolyMesh.H"
// Create local surface from:
// - explicitly named patches only (-patches (at your option)
// - all patches (default in sequential mode)
@ -98,24 +114,10 @@ int main(int argc, char *argv[])
if (args.optionFound("patches"))
{
const wordList patchNames
includePatches = bMesh.patchSet
(
args.optionLookup("patches")()
wordReList(args.optionLookup("patches")())
);
forAll(patchNames, patchNameI)
{
const word& patchName = patchNames[patchNameI];
const label patchI = bMesh.findPatchID(patchName);
if (patchI == -1)
{
FatalErrorIn(args.executable()) << "No such patch "
<< patchName << endl << "Patches are " << bMesh.names()
<< exit(FatalError);
}
includePatches.insert(patchI);
}
}
else
{
@ -127,212 +129,155 @@ int main(int argc, char *argv[])
{
includePatches.insert(patchI);
}
else
{
Pout<< patch.name() << " : skipped since processorPatch"
<< endl;
}
}
}
triSurface localSurface
// Collect sizes. Hash on names to handle local-only patches (e.g.
// processor patches)
HashTable<label> patchSize(1000);
label nFaces = 0;
forAllConstIter(labelHashSet, includePatches, iter)
{
const polyPatch& pp = bMesh[iter.key()];
patchSize.insert(pp.name(), pp.size());
nFaces += pp.size();
}
Pstream::mapCombineGather(patchSize, plusEqOp<label>());
// Allocate zone/patch for all patches
HashTable<label> compactZoneID(1000);
forAllConstIter(HashTable<label>, patchSize, iter)
{
label sz = compactZoneID.size();
compactZoneID.insert(iter.key(), sz);
}
Pstream::mapCombineScatter(compactZoneID);
// Rework HashTable into labelList just for speed of conversion
labelList patchToCompactZone(bMesh.size(), -1);
forAllConstIter(HashTable<label>, compactZoneID, iter)
{
label patchI = bMesh.findPatchID(iter.key());
if (patchI != -1)
{
patchToCompactZone[patchI] = iter();
}
}
// Collect faces on zones
DynamicList<label> faceLabels(nFaces);
DynamicList<label> compactZones(nFaces);
forAllConstIter(labelHashSet, includePatches, iter)
{
const polyPatch& pp = bMesh[iter.key()];
forAll(pp, i)
{
faceLabels.append(pp.start()+i);
compactZones.append(patchToCompactZone[pp.index()]);
}
}
// Addressing engine for all faces
uindirectPrimitivePatch allBoundary
(
triSurfaceTools::triangulate
(
mesh.boundaryMesh(),
includePatches
)
UIndirectList<face>(mesh.faces(), faceLabels),
mesh.points()
);
// Find correspondence to master points
labelList pointToGlobal;
labelList uniqueMeshPoints;
autoPtr<globalIndex> globalNumbers = mesh.globalData().mergePoints
(
allBoundary.meshPoints(),
allBoundary.meshPointMap(),
pointToGlobal,
uniqueMeshPoints
);
if (!Pstream::parRun())
// Gather all unique points on master
List<pointField> gatheredPoints(Pstream::nProcs());
gatheredPoints[Pstream::myProcNo()] = pointField
(
mesh.points(),
uniqueMeshPoints
);
Pstream::gatherList(gatheredPoints);
// Gather all faces
List<faceList> gatheredFaces(Pstream::nProcs());
gatheredFaces[Pstream::myProcNo()] = allBoundary.localFaces();
forAll(gatheredFaces[Pstream::myProcNo()], i)
{
Info<< "Writing surface to " << outFileName << endl;
localSurface.write(outFileName);
inplaceRenumber(pointToGlobal, gatheredFaces[Pstream::myProcNo()][i]);
}
else
Pstream::gatherList(gatheredFaces);
// Gather all ZoneIDs
List<labelList> gatheredZones(Pstream::nProcs());
gatheredZones[Pstream::myProcNo()] = compactZones.xfer();
Pstream::gatherList(gatheredZones);
// On master combine all points, faces, zones
if (Pstream::master())
{
// Write local surface
fileName localPath = runTime.path()/runTime.caseName() + ".obj";
pointField allPoints = ListListOps::combine<pointField>
(
gatheredPoints,
accessOp<pointField>()
);
gatheredPoints.clear();
Pout<< "Writing local surface to " << localPath << endl;
faceList allFaces = ListListOps::combine<faceList>
(
gatheredFaces,
accessOp<faceList>()
);
gatheredFaces.clear();
localSurface.write(localPath);
Info<< endl;
labelList allZones = ListListOps::combine<labelList>
(
gatheredZones,
accessOp<labelList>()
);
gatheredZones.clear();
// Gather all points on master
List<pointField> gatheredPoints(Pstream::nProcs());
gatheredPoints[Pstream::myProcNo()] = localSurface.points();
Pstream::gatherList(gatheredPoints);
// Gather all localSurface patches
List<geometricSurfacePatchList> gatheredPatches(Pstream::nProcs());
gatheredPatches[Pstream::myProcNo()] = localSurface.patches();
Pstream::gatherList(gatheredPatches);
// Gather all faces
List<List<labelledTri> > gatheredFaces(Pstream::nProcs());
gatheredFaces[Pstream::myProcNo()] = localSurface;
Pstream::gatherList(gatheredFaces);
if (Pstream::master())
// Zones
surfZoneIdentifierList surfZones(compactZoneID.size());
forAllConstIter(HashTable<label>, compactZoneID, iter)
{
// On master combine all points
pointField allPoints =
ListListOps::combine<pointField>
(
gatheredPoints,
accessOp<pointField>()
);
// Count number of patches.
label nPatches = 0;
forAll(gatheredPatches, procI)
{
nPatches += gatheredPatches[procI].size();
}
// Count number of faces.
label nFaces = 0;
forAll(gatheredFaces, procI)
{
nFaces += gatheredFaces[procI].size();
}
// Loop over all processors and
// - construct mapping from local to global patches
// - relabel faces (both points and regions)
label newPatchI = 0;
// Name to new patchI
HashTable<label> nameToIndex(2*nPatches);
// Storage (oversized) for all patches
geometricSurfacePatchList allPatches(nPatches);
label newFaceI = 0;
// Storage for all faces
List<labelledTri> allFaces(nFaces);
// Offset into allPoints for current processor
label pointOffset = 0;
for (label procI = 0; procI < Pstream::nProcs(); procI++)
{
Info<< "Processor " << procI << endl
<< "-----------" << endl;
const geometricSurfacePatchList& patches =
gatheredPatches[procI];
// From local patch numbering to global
labelList localToGlobal(patches.size());
forAll(patches, patchI)
{
const geometricSurfacePatch& sp = patches[patchI];
if (!nameToIndex.found(sp.name()))
{
nameToIndex.insert(sp.name(), newPatchI);
localToGlobal[patchI] = newPatchI;
allPatches[newPatchI] = sp;
newPatchI++;
}
else
{
localToGlobal[patchI] = nameToIndex[sp.name()];
}
}
Info<< "Local patch to global patch mapping:"
<< endl;
forAll(patches, patchI)
{
Info<< " name : " << patches[patchI].name() << endl
<< " local : " << patchI << endl
<< " global : " << localToGlobal[patchI]
<< endl;
}
Info<< "Local points added in global points starting at "
<< pointOffset
<< endl;
// Collect and relabel faces
const List<labelledTri>& localFaces = gatheredFaces[procI];
forAll(localFaces, faceI)
{
const labelledTri& f = localFaces[faceI];
allFaces[newFaceI++] =
labelledTri
(
f[0] + pointOffset,
f[1] + pointOffset,
f[2] + pointOffset,
localToGlobal[f.region()]
);
}
pointOffset += gatheredPoints[procI].size();
Info<< endl;
}
allPatches.setSize(newPatchI);
// We now have allPoints, allFaces and allPatches.
// Construct overall (yet unmerged) surface from these.
triSurface allSurf(allFaces, allPatches, allPoints);
// Cleanup (which does point merge as well)
allSurf.cleanup(false);
// Write surface mesh
label slashIndex = runTime.caseName().find_last_of('/');
fileName globalCasePath(runTime.caseName().substr(0, slashIndex));
Info<< "Writing merged surface to " << globalCasePath << endl;
// create database for the sequential run
fileName globalPath
(
runTime.rootPath()
/ globalCasePath
/ args[1]
);
allSurf.write(globalPath);
surfZones[iter()] = surfZoneIdentifier(iter.key(), iter());
Info<< "surfZone " << iter() << " : " << surfZones[iter()].name()
<< endl;
}
UnsortedMeshedSurface<face> unsortedFace
(
xferMove(allPoints),
xferMove(allFaces),
xferMove(allZones),
xferMove(surfZones)
);
MeshedSurface<face> sortedFace(unsortedFace);
fileName globalCasePath
(
runTime.processorCase()
? runTime.path()/".."/outFileName
: runTime.path()/outFileName
);
Info<< "Writing merged surface to " << globalCasePath << endl;
sortedFace.write(globalCasePath);
}
Info<< "End\n" << endl;

View File

@ -3,7 +3,7 @@
# ========= |
# \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
# \\ / O peration |
# \\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
# \\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
# \\/ M anipulation |
#------------------------------------------------------------------------------
# License
@ -107,15 +107,23 @@ then
echo "pack manually" 1>&2
foamPackSource $packDir $packFile
else
echo "pack with git-archive" 1>&2
( cd $packDir && git archive --format=tar --prefix=$packDir/ HEAD) > $packBase.tar
if [ ! -f $packDir/.build ]
then
echo "Error: $packDir/.build does not exists" 1>&2
echo " Please update this by running e.g. 'wmakePrintBuild -update' in $packDir" 1>&2
exit 2
fi
echo "add in time-stamp and lnInclude directories" 1>&2
tar cf $packBase.tar2 $packDir/.timeStamp $packDir/.build `find -H $packDir -type d -name lnInclude`
tar Af $packBase.tar $packBase.tar2
echo "gzip tar file" 1>&2
gzip -c9 $packBase.tar > $packFile
echo "pack with git-archive" 1>&2 &&
( cd $packDir && git archive --format=tar --prefix=$packDir/ HEAD) > $packBase.tar &&
echo "add in time-stamp and lnInclude directories" 1>&2 &&
tar cf $packBase.tar2 $packDir/.timeStamp $packDir/.build `find -H $packDir -type d -name lnInclude` &&
tar Af $packBase.tar $packBase.tar2 &&
echo "gzip tar file" 1>&2 &&
gzip -c9 $packBase.tar > $packFile &&
rm -f $packBase.tar $packBase.tar2 2>/dev/null
fi

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -165,8 +165,14 @@ Foam::Istream& Foam::UIPstream::read(token& t)
}
// String
case token::STRING :
case token::VERBATIMSTRING :
{
// Recurse to read actual string
read(t);
t.type() = token::VERBATIMSTRING;
return *this;
}
case token::STRING :
{
string* pval = new string;
if (read(*pval))

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -151,10 +151,19 @@ Foam::UOPstream::~UOPstream()
// * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
Foam::Ostream& Foam::UOPstream::write(const token&)
Foam::Ostream& Foam::UOPstream::write(const token& t)
{
notImplemented("Ostream& UOPstream::write(const token&)");
setBad();
// Raw token output only supported for verbatim strings for now
if (t.type() == token::VERBATIMSTRING)
{
write(char(token::VERBATIMSTRING));
write(t.stringToken());
}
else
{
notImplemented("Ostream& UOPstream::write(const token&)");
setBad();
}
return *this;
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -29,8 +29,16 @@ License
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Foam::Ostream& Foam::OSstream::write(const token&)
Foam::Ostream& Foam::OSstream::write(const token& t)
{
if (t.type() == token::VERBATIMSTRING)
{
write(char(token::HASH));
write(char(token::BEGIN_BLOCK));
writeQuoted(t.stringToken(), false);
write(char(token::HASH));
write(char(token::END_BLOCK));
}
return *this;
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -65,8 +65,16 @@ void Foam::prefixOSstream::print(Ostream& os) const
}
Foam::Ostream& Foam::prefixOSstream::write(const token&)
Foam::Ostream& Foam::prefixOSstream::write(const token& t)
{
if (t.type() == token::VERBATIMSTRING)
{
write(char(token::HASH));
write(char(token::BEGIN_BLOCK));
writeQuoted(t.stringToken(), false);
write(char(token::HASH));
write(char(token::END_BLOCK));
}
return *this;
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -225,9 +225,9 @@ void Foam::primitiveEntry::write(Ostream& os, const bool contentsOnly) const
const token& t = operator[](i);
if (t.type() == token::VERBATIMSTRING)
{
os << token::HASH << token::BEGIN_BLOCK;
os.writeQuoted(t.stringToken(), false);
os << token::HASH << token::END_BLOCK;
// Bypass token output operator to avoid losing verbatimness.
// Handle in Ostreams themselves
os.write(t);
}
else
{

View File

@ -59,8 +59,6 @@ void Foam::globalMeshData::syncData
if (slavePoints.size()+nTransformSlavePoints > 0)
{
const labelList& transformSlavePoints = transformedSlaves[i];
// Combine master with untransformed slave data
forAll(slavePoints, j)
{
@ -68,20 +66,27 @@ void Foam::globalMeshData::syncData
}
// Combine master with transformed slave data
forAll(transformSlavePoints, j)
if (nTransformSlavePoints)
{
cop(elem, elems[transformSlavePoints[j]]);
const labelList& transformSlavePoints = transformedSlaves[i];
forAll(transformSlavePoints, j)
{
cop(elem, elems[transformSlavePoints[j]]);
}
}
// Copy result back to slave slots
forAll(slavePoints, j)
{
elems[slavePoints[j]] = elem;
}
forAll(transformSlavePoints, j)
if (nTransformSlavePoints)
{
elems[transformSlavePoints[j]] = elem;
const labelList& transformSlavePoints = transformedSlaves[i];
forAll(transformSlavePoints, j)
{
elems[transformSlavePoints[j]] = elem;
}
}
}
}
@ -125,8 +130,6 @@ void Foam::globalMeshData::syncData
if (slavePoints.size()+nTransformSlavePoints > 0)
{
const labelList& transformSlavePoints = transformedSlaves[i];
// Combine master with untransformed slave data
forAll(slavePoints, j)
{
@ -134,20 +137,27 @@ void Foam::globalMeshData::syncData
}
// Combine master with transformed slave data
forAll(transformSlavePoints, j)
if (nTransformSlavePoints)
{
cop(elem, elems[transformSlavePoints[j]]);
const labelList& transformSlavePoints = transformedSlaves[i];
forAll(transformSlavePoints, j)
{
cop(elem, elems[transformSlavePoints[j]]);
}
}
// Copy result back to slave slots
forAll(slavePoints, j)
{
elems[slavePoints[j]] = elem;
}
forAll(transformSlavePoints, j)
if (nTransformSlavePoints)
{
elems[transformSlavePoints[j]] = elem;
const labelList& transformSlavePoints = transformedSlaves[i];
forAll(transformSlavePoints, j)
{
elems[transformSlavePoints[j]] = elem;
}
}
}
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -554,6 +554,15 @@ public:
const int tag = UPstream::msgType()
);
//- Distribute data using default commsType.
template<class T>
void distribute
(
DynamicList<T>& fld,
const bool dummyTransform = true,
const int tag = UPstream::msgType()
) const;
//- Distribute data using default commsType.
template<class T>
void distribute

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -887,6 +887,25 @@ void Foam::mapDistribute::applyInverseTransforms
}
//- Distribute data using default commsType.
template<class T>
void Foam::mapDistribute::distribute
(
DynamicList<T>& fld,
const bool dummyTransform,
const int tag
) const
{
fld.shrink();
List<T>& fldList = static_cast<List<T>& >(fld);
distribute(fldList, dummyTransform, tag);
fld.setCapacity(fldList.size());
}
//- Distribute data using default commsType.
template<class T>
void Foam::mapDistribute::distribute

View File

@ -1217,7 +1217,7 @@ const Foam::globalMeshData& Foam::polyMesh::globalData() const
// Remove all files and some subdirs (eg, sets)
void Foam::polyMesh::removeFiles(const fileName& instanceDir) const
{
fileName meshFilesPath = thisDb().path()/instanceDir/meshDir();
fileName meshFilesPath = thisDb().time().path()/instanceDir/meshDir();
rm(meshFilesPath/"points");
rm(meshFilesPath/"faces");

View File

@ -634,7 +634,7 @@ bool Foam::processorPolyPatch::order
fileName str
(
boundaryMesh().mesh().time().path()
/name()/name()+"_faces.obj"
/name() + "_faces.obj"
);
Pout<< "processorPolyPatch::order :"
<< " Writing faces to OBJ file " << str.name() << endl;

View File

@ -25,7 +25,6 @@ License
#include "primitiveMesh.H"
#include "pyramidPointFaceRef.H"
#include "tetrahedron.H"
#include "ListOps.H"
#include "unitConversion.H"
#include "SortableList.H"

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -58,7 +58,7 @@ void Foam::ptscotchDecomp::check(const int retVal, const char* str)
{}
Foam::label Foam::ptscotchDecomp::decomposeZeroDomains
Foam::label Foam::ptscotchDecomp::decompose
(
const fileName& meshPath,
const List<int>& initxadj,
@ -72,6 +72,7 @@ Foam::label Foam::ptscotchDecomp::decomposeZeroDomains
(
"label ptscotchDecomp::decompose"
"("
"onst fileName&,"
"const List<int>&, "
"const List<int>&, "
"const scalarField&, "
@ -84,8 +85,10 @@ Foam::label Foam::ptscotchDecomp::decomposeZeroDomains
Foam::label Foam::ptscotchDecomp::decompose
(
const fileName& meshPath,
const List<int>& adjncy,
const List<int>& xadj,
const int adjncySize,
const int adjncy[],
const int xadjSize,
const int xadj[],
const scalarField& cWeights,
List<int>& finalDecomp
) const
@ -94,9 +97,12 @@ Foam::label Foam::ptscotchDecomp::decompose
(
"label ptscotchDecomp::decompose"
"("
"const List<int>&, "
"const List<int>&, "
"const scalarField&, "
"const fileName&,"
"const int,"
"const int,"
"const int,"
"const int,"
"const scalarField&,"
"List<int>&"
")"
) << notImplementedMessage << exit(FatalError);

View File

@ -1,3 +0,0 @@
createShellMesh.C
LIB = $(FOAM_LIBBIN)/libcreateShellMesh

View File

@ -1,9 +0,0 @@
EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude -DFULLDEBUG -g -O0
LIB_LIBS = \
-lfiniteVolume \
-lmeshTools \
-ldynamicMesh

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -127,9 +127,7 @@ public:
// A point can be deleted if
// - it is not used by any edge.
// or
// - is not used by an internal edge
// - is used by only two boundary edges. (note that these two
// edges will always be boundary ones!)
// - is used by only two edges
// - these two edges are sufficiently in line (cos > minCos)
// - all processors agree that point can be deleted.
label countPointUsage

View File

@ -33,6 +33,7 @@ License
#include "OFstream.H"
#include "IFstream.H"
#include "unitConversion.H"
#include "DynamicField.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
@ -1059,12 +1060,368 @@ Foam::extendedFeatureEdgeMesh::edgeTreesByType() const
}
void Foam::extendedFeatureEdgeMesh::add(const extendedFeatureEdgeMesh& fem)
{
// Points
// ~~~~~~
// From current points into combined points
labelList reversePointMap(points().size());
labelList reverseFemPointMap(fem.points().size());
label newPointI = 0;
for (label i = 0; i < concaveStart(); i++)
{
reversePointMap[i] = newPointI++;
}
for (label i = 0; i < fem.concaveStart(); i++)
{
reverseFemPointMap[i] = newPointI++;
}
// Concave
label newConcaveStart = newPointI;
for (label i = concaveStart(); i < mixedStart(); i++)
{
reversePointMap[i] = newPointI++;
}
for (label i = fem.concaveStart(); i < fem.mixedStart(); i++)
{
reverseFemPointMap[i] = newPointI++;
}
// Mixed
label newMixedStart = newPointI;
for (label i = mixedStart(); i < nonFeatureStart(); i++)
{
reversePointMap[i] = newPointI++;
}
for (label i = fem.mixedStart(); i < fem.nonFeatureStart(); i++)
{
reverseFemPointMap[i] = newPointI++;
}
// Non-feature
label newNonFeatureStart = newPointI;
for (label i = nonFeatureStart(); i < points().size(); i++)
{
reversePointMap[i] = newPointI++;
}
for (label i = fem.nonFeatureStart(); i < fem.points().size(); i++)
{
reverseFemPointMap[i] = newPointI++;
}
pointField newPoints(newPointI);
newPoints.rmap(points(), reversePointMap);
newPoints.rmap(fem.points(), reverseFemPointMap);
// Edges
// ~~~~~
// From current edges into combined edges
labelList reverseEdgeMap(edges().size());
labelList reverseFemEdgeMap(fem.edges().size());
// External
label newEdgeI = 0;
for (label i = 0; i < internalStart(); i++)
{
reverseEdgeMap[i] = newEdgeI++;
}
for (label i = 0; i < fem.internalStart(); i++)
{
reverseFemEdgeMap[i] = newEdgeI++;
}
// Internal
label newInternalStart = newEdgeI;
for (label i = internalStart(); i < flatStart(); i++)
{
reverseEdgeMap[i] = newEdgeI++;
}
for (label i = fem.internalStart(); i < fem.flatStart(); i++)
{
reverseFemEdgeMap[i] = newEdgeI++;
}
// Flat
label newFlatStart = newEdgeI;
for (label i = flatStart(); i < openStart(); i++)
{
reverseEdgeMap[i] = newEdgeI++;
}
for (label i = fem.flatStart(); i < fem.openStart(); i++)
{
reverseFemEdgeMap[i] = newEdgeI++;
}
// Open
label newOpenStart = newEdgeI;
for (label i = openStart(); i < multipleStart(); i++)
{
reverseEdgeMap[i] = newEdgeI++;
}
for (label i = fem.openStart(); i < fem.multipleStart(); i++)
{
reverseFemEdgeMap[i] = newEdgeI++;
}
// Multiple
label newMultipleStart = newEdgeI;
for (label i = multipleStart(); i < edges().size(); i++)
{
reverseEdgeMap[i] = newEdgeI++;
}
for (label i = fem.multipleStart(); i < fem.edges().size(); i++)
{
reverseFemEdgeMap[i] = newEdgeI++;
}
edgeList newEdges(newEdgeI);
forAll(edges(), i)
{
const edge& e = edges()[i];
newEdges[reverseEdgeMap[i]] = edge
(
reversePointMap[e[0]],
reversePointMap[e[1]]
);
}
forAll(fem.edges(), i)
{
const edge& e = fem.edges()[i];
newEdges[reverseFemEdgeMap[i]] = edge
(
reverseFemPointMap[e[0]],
reverseFemPointMap[e[1]]
);
}
pointField newEdgeDirections(newEdgeI);
newEdgeDirections.rmap(edgeDirections(), reverseEdgeMap);
newEdgeDirections.rmap(fem.edgeDirections(), reverseFemEdgeMap);
// Normals
// ~~~~~~~
// Combine normals
DynamicField<point> newNormals(normals().size()+fem.normals().size());
newNormals.append(normals());
newNormals.append(fem.normals());
// Combine and re-index into newNormals
labelListList newEdgeNormals(edgeNormals().size()+fem.edgeNormals().size());
UIndirectList<labelList>(newEdgeNormals, reverseEdgeMap) =
edgeNormals();
UIndirectList<labelList>(newEdgeNormals, reverseFemEdgeMap) =
fem.edgeNormals();
forAll(reverseFemEdgeMap, i)
{
label mapI = reverseFemEdgeMap[i];
labelList& en = newEdgeNormals[mapI];
forAll(en, j)
{
en[j] += edgeNormals().size();
}
}
// Combine and re-index into newFeaturePointNormals
labelListList newFeaturePointNormals
(
featurePointNormals().size()
+ fem.featurePointNormals().size()
);
// Note: featurePointNormals only go up to nonFeatureStart
UIndirectList<labelList>
(
newFeaturePointNormals,
SubList<label>(reversePointMap, featurePointNormals().size())
) = featurePointNormals();
UIndirectList<labelList>
(
newFeaturePointNormals,
SubList<label>(reverseFemPointMap, fem.featurePointNormals().size())
) = fem.featurePointNormals();
forAll(fem.featurePointNormals(), i)
{
label mapI = reverseFemPointMap[i];
labelList& fn = newFeaturePointNormals[mapI];
forAll(fn, j)
{
fn[j] += featurePointNormals().size();
}
}
// Combine regionEdges
DynamicList<label> newRegionEdges
(
regionEdges().size()
+ fem.regionEdges().size()
);
forAll(regionEdges(), i)
{
newRegionEdges.append(reverseEdgeMap[regionEdges()[i]]);
}
forAll(fem.regionEdges(), i)
{
newRegionEdges.append(reverseFemEdgeMap[fem.regionEdges()[i]]);
}
// Assign
// ~~~~~~
// Transfer
concaveStart_ = newConcaveStart;
mixedStart_ = newMixedStart;
nonFeatureStart_ = newNonFeatureStart;
// Reset points and edges
reset(xferMove(newPoints), newEdges.xfer());
// Transfer
internalStart_ = newInternalStart;
flatStart_ = newFlatStart;
openStart_ = newOpenStart;
multipleStart_ = newMultipleStart;
edgeDirections_.transfer(newEdgeDirections);
normals_.transfer(newNormals);
edgeNormals_.transfer(newEdgeNormals);
featurePointNormals_.transfer(newFeaturePointNormals);
regionEdges_.transfer(newRegionEdges);
pointTree_.clear();
edgeTree_.clear();
edgeTreesByType_.clear();
}
void Foam::extendedFeatureEdgeMesh::flipNormals()
{
// Points
// ~~~~~~
// From current points into new points
labelList reversePointMap(identity(points().size()));
// Flip convex and concave points
label newPointI = 0;
// Concave points become convex
for (label i = concaveStart(); i < mixedStart(); i++)
{
reversePointMap[i] = newPointI++;
}
// Convex points become concave
label newConcaveStart = newPointI;
for (label i = 0; i < concaveStart(); i++)
{
reversePointMap[i] = newPointI++;
}
// Edges
// ~~~~~~
// From current edges into new edges
labelList reverseEdgeMap(identity(edges().size()));
// Flip external and internal edges
label newEdgeI = 0;
// Internal become external
for (label i = internalStart(); i < flatStart(); i++)
{
reverseEdgeMap[i] = newEdgeI++;
}
// External become internal
label newInternalStart = newEdgeI;
for (label i = 0; i < internalStart(); i++)
{
reverseEdgeMap[i] = newEdgeI++;
}
pointField newPoints(points().size());
newPoints.rmap(points(), reversePointMap);
edgeList newEdges(edges().size());
forAll(edges(), i)
{
const edge& e = edges()[i];
newEdges[reverseEdgeMap[i]] = edge
(
reversePointMap[e[0]],
reversePointMap[e[1]]
);
}
// Normals are flipped
// ~~~~~~~~~~~~~~~~~~~
pointField newEdgeDirections(edges().size());
newEdgeDirections.rmap(-1.0*edgeDirections(), reverseEdgeMap);
pointField newNormals(-1.0*normals());
labelListList newEdgeNormals(edgeNormals().size());
UIndirectList<labelList>(newEdgeNormals, reverseEdgeMap) = edgeNormals();
labelListList newFeaturePointNormals(featurePointNormals().size());
// Note: featurePointNormals only go up to nonFeatureStart
UIndirectList<labelList>
(
newFeaturePointNormals,
SubList<label>(reversePointMap, featurePointNormals().size())
) = featurePointNormals();
labelList newRegionEdges(regionEdges().size());
forAll(regionEdges(), i)
{
newRegionEdges[i] = reverseEdgeMap[regionEdges()[i]];
}
// Transfer
concaveStart_ = newConcaveStart;
// Reset points and edges
reset(xferMove(newPoints), newEdges.xfer());
// Transfer
internalStart_ = newInternalStart;
edgeDirections_.transfer(newEdgeDirections);
normals_.transfer(newNormals);
edgeNormals_.transfer(newEdgeNormals);
featurePointNormals_.transfer(newFeaturePointNormals);
regionEdges_.transfer(newRegionEdges);
pointTree_.clear();
edgeTree_.clear();
edgeTreesByType_.clear();
}
//XXXXX
void Foam::extendedFeatureEdgeMesh::writeObj
(
const fileName& prefix
) const
{
Pout<< nl << "Writing extendedFeatureEdgeMesh components to " << prefix
Info<< nl << "Writing extendedFeatureEdgeMesh components to " << prefix
<< endl;
label verti = 0;
@ -1072,7 +1429,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
edgeMesh::write(prefix + "_edgeMesh.obj");
OFstream convexFtPtStr(prefix + "_convexFeaturePts.obj");
Pout<< "Writing convex feature points to " << convexFtPtStr.name() << endl;
Info<< "Writing convex feature points to " << convexFtPtStr.name() << endl;
for(label i = 0; i < concaveStart_; i++)
{
@ -1080,7 +1437,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream concaveFtPtStr(prefix + "_concaveFeaturePts.obj");
Pout<< "Writing concave feature points to "
Info<< "Writing concave feature points to "
<< concaveFtPtStr.name() << endl;
for(label i = concaveStart_; i < mixedStart_; i++)
@ -1089,7 +1446,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream mixedFtPtStr(prefix + "_mixedFeaturePts.obj");
Pout<< "Writing mixed feature points to " << mixedFtPtStr.name() << endl;
Info<< "Writing mixed feature points to " << mixedFtPtStr.name() << endl;
for(label i = mixedStart_; i < nonFeatureStart_; i++)
{
@ -1097,7 +1454,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream mixedFtPtStructureStr(prefix + "_mixedFeaturePtsStructure.obj");
Pout<< "Writing mixed feature point structure to "
Info<< "Writing mixed feature point structure to "
<< mixedFtPtStructureStr.name() << endl;
verti = 0;
@ -1116,7 +1473,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream externalStr(prefix + "_externalEdges.obj");
Pout<< "Writing external edges to " << externalStr.name() << endl;
Info<< "Writing external edges to " << externalStr.name() << endl;
verti = 0;
for (label i = externalStart_; i < internalStart_; i++)
@ -1129,7 +1486,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream internalStr(prefix + "_internalEdges.obj");
Pout<< "Writing internal edges to " << internalStr.name() << endl;
Info<< "Writing internal edges to " << internalStr.name() << endl;
verti = 0;
for (label i = internalStart_; i < flatStart_; i++)
@ -1142,7 +1499,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream flatStr(prefix + "_flatEdges.obj");
Pout<< "Writing flat edges to " << flatStr.name() << endl;
Info<< "Writing flat edges to " << flatStr.name() << endl;
verti = 0;
for (label i = flatStart_; i < openStart_; i++)
@ -1155,7 +1512,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream openStr(prefix + "_openEdges.obj");
Pout<< "Writing open edges to " << openStr.name() << endl;
Info<< "Writing open edges to " << openStr.name() << endl;
verti = 0;
for (label i = openStart_; i < multipleStart_; i++)
@ -1168,7 +1525,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream multipleStr(prefix + "_multipleEdges.obj");
Pout<< "Writing multiple edges to " << multipleStr.name() << endl;
Info<< "Writing multiple edges to " << multipleStr.name() << endl;
verti = 0;
for (label i = multipleStart_; i < edges().size(); i++)
@ -1181,7 +1538,7 @@ void Foam::extendedFeatureEdgeMesh::writeObj
}
OFstream regionStr(prefix + "_regionEdges.obj");
Pout<< "Writing region edges to " << regionStr.name() << endl;
Info<< "Writing region edges to " << regionStr.name() << endl;
verti = 0;
forAll(regionEdges_, i)
@ -1197,23 +1554,26 @@ void Foam::extendedFeatureEdgeMesh::writeObj
bool Foam::extendedFeatureEdgeMesh::writeData(Ostream& os) const
{
os << "// points, edges, concaveStart, mixedStart, nonFeatureStart, " << nl
<< "// internalStart, flatStart, openStart, multipleStart, " << nl
<< "// normals, edgeNormals, featurePointNormals, regionEdges" << nl
<< endl;
os << points() << nl
os << "// points" << nl
<< points() << nl
<< "// edges" << nl
<< edges() << nl
<< "// concaveStart mixedStart nonFeatureStart" << nl
<< concaveStart_ << token::SPACE
<< mixedStart_ << token::SPACE
<< nonFeatureStart_ << token::SPACE
<< nonFeatureStart_ << nl
<< "// internalStart flatStart openStart multipleStart" << nl
<< internalStart_ << token::SPACE
<< flatStart_ << token::SPACE
<< openStart_ << token::SPACE
<< multipleStart_ << nl
<< "// normals" << nl
<< normals_ << nl
<< "// edgeNormals" << nl
<< edgeNormals_ << nl
<< "// featurePointNormals" << nl
<< featurePointNormals_ << nl
<< "// regionEdges" << nl
<< regionEdges_
<< endl;

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -30,8 +30,8 @@ Description
Feature points are a sorted subset at the start of the overall points list:
0 .. concaveStart_-1 : convex points (w.r.t normals)
concaveStart_-1 .. mixedStart_-1 : concave points
mixedStart_ .. nonFeatureStart_ : mixed internal/external points
concaveStart_ .. mixedStart_-1 : concave points
mixedStart_ .. nonFeatureStart_-1 : mixed internal/external points
nonFeatureStart_ .. size-1 : non-feature points
Feature edges are the edgeList of the edgeMesh and are sorted:
@ -151,6 +151,7 @@ private:
labelListList edgeNormals_;
//- Indices of the normals that are adjacent to the feature points
// (only valid for 0..nonFeatureStart_-1)
labelListList featurePointNormals_;
//- Feature edges which are on the boundary between regions
@ -212,9 +213,10 @@ public:
const Xfer<edgeList>&
);
//- Construct (read) given surfaceFeatures, an objectRegistry and a
// fileName to write to. Extracts, classifies and reorders the data
// from surfaceFeatures.
//- Construct given a surface with selected edges,point
// (surfaceFeatures), an objectRegistry and a
// fileName to write to.
// Extracts, classifies and reorders the data from surfaceFeatures.
extendedFeatureEdgeMesh
(
const surfaceFeatures& sFeat,
@ -381,6 +383,16 @@ public:
edgeTreesByType() const;
// Edit
//- Add extendedFeatureEdgeMesh. No filtering of duplicates.
void add(const extendedFeatureEdgeMesh&);
//- Flip normals. All concave become convex, all internal external
// etc.
void flipNormals();
// Write
//- Write all components of the extendedFeatureEdgeMesh as obj files

View File

@ -83,7 +83,8 @@ Foam::pressureGradientExplicitSource::pressureGradientExplicitSource
Ubar_(coeffs_.lookup("Ubar")),
gradPini_(coeffs_.lookup("gradPini")),
gradP_(gradPini_),
flowDir_(Ubar_/mag(Ubar_))
flowDir_(Ubar_/mag(Ubar_)),
invAPtr_(NULL)
{
coeffs_.lookup("fieldNames") >> fieldNames_;
@ -124,36 +125,9 @@ Foam::pressureGradientExplicitSource::pressureGradientExplicitSource
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
void Foam::pressureGradientExplicitSource::addSup
(
fvMatrix<vector>& eqn,
const label fieldI
)
{
DimensionedField<vector, volMesh> Su
(
IOobject
(
name_ + fieldNames_[fieldI] + "Sup",
mesh_.time().timeName(),
mesh_,
IOobject::NO_READ,
IOobject::NO_WRITE
),
mesh_,
dimensionedVector("zero", gradP_.dimensions(), vector::zero)
);
UIndirectList<vector>(Su, cells_) = flowDir_*gradP_.value();
eqn += Su;
}
void Foam::pressureGradientExplicitSource::correct(volVectorField& U)
{
const volScalarField& rAU =
mesh_.lookupObject<volScalarField>("(1|A(" + U.name() + "))");
const scalarField& rAU = invAPtr_().internalField();
// Integrate flow variables over cell set
scalar magUbarAve = 0.0;
@ -196,4 +170,61 @@ void Foam::pressureGradientExplicitSource::correct(volVectorField& U)
}
void Foam::pressureGradientExplicitSource::addSup
(
fvMatrix<vector>& eqn,
const label fieldI
)
{
DimensionedField<vector, volMesh> Su
(
IOobject
(
name_ + fieldNames_[fieldI] + "Sup",
mesh_.time().timeName(),
mesh_,
IOobject::NO_READ,
IOobject::NO_WRITE
),
mesh_,
dimensionedVector("zero", gradP_.dimensions(), vector::zero)
);
UIndirectList<vector>(Su, cells_) = flowDir_*gradP_.value();
eqn += Su;
}
void Foam::pressureGradientExplicitSource::setValue
(
fvMatrix<vector>& eqn,
const label
)
{
if (invAPtr_.empty())
{
invAPtr_.reset
(
new volScalarField
(
IOobject
(
name_ + "::invA",
mesh_.time().timeName(),
mesh_,
IOobject::NO_READ,
IOobject::NO_WRITE
),
1.0/eqn.A()
)
);
}
else
{
invAPtr_() = 1.0/eqn.A();
}
}
// ************************************************************************* //

View File

@ -33,7 +33,7 @@ Description
pressureGradientExplicitSourceCoeffs
{
UName U; // name of velocity field
fieldNames (U); // name of velocity field
Ubar (10.0 0 0); // desired average velocity
gradPini gradPini [0 2 -2 0 0] 0; // initial pressure gradient
flowDir (1 0 0); // flow direction
@ -82,6 +82,9 @@ class pressureGradientExplicitSource
//- Flow direction
vector flowDir_;
//- Matrix 1/A coefficients field pointer
autoPtr<volScalarField> invAPtr_;
// Private Member Functions
@ -126,6 +129,13 @@ public:
//- Add explicit contribution to equation
virtual void addSup(fvMatrix<vector>& eqn, const label fieldI);
//- Set 1/A coefficient
virtual void setValue
(
fvMatrix<vector>& eqn,
const label fieldI
);
// I-O

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -47,33 +47,34 @@ void Foam::bladeModel::interpolateWeights
scalar& ddx
) const
{
scalar x = -GREAT;
i2 = 0;
label nElem = values.size();
i2 = 0;
while ((x < xIn) && (i2 < nElem))
if (nElem == 1)
{
x = values[i2];
i2++;
}
if (i2 == 0)
{
i1 = i2;
ddx = 0.0;
return;
}
else if (i2 == values.size())
{
i2 = values.size() - 1;
i1 = i2;
ddx = 0.0;
return;
}
else
{
i1 = i2 - 1;
ddx = (xIn - values[i1])/(values[i2] - values[i1]);
while ((values[i2] < xIn) && (i2 < nElem))
{
i2++;
}
if (i2 == nElem)
{
i2 = nElem - 1;
i1 = i2;
ddx = 0.0;
return;
}
else
{
i1 = i2 - 1;
ddx = (xIn - values[i1])/(values[i2] - values[i1]);
}
}
}
@ -120,14 +121,8 @@ Foam::bladeModel::bladeModel(const dictionary& dict)
}
else
{
FatalErrorIn
(
"Foam::bladeModel::bladeModel"
"("
"const dictionary&, "
"const word&"
")"
) << "No blade data specified" << exit(FatalError);
FatalErrorIn("Foam::bladeModel::bladeModel(const dictionary&)")
<< "No blade data specified" << exit(FatalError);
}
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -49,33 +49,34 @@ void Foam::lookupProfile::interpolateWeights
scalar& ddx
) const
{
scalar x = -GREAT;
i2 = 0;
label nElem = values.size();
i2 = 0;
while ((x < xIn) && (i2 < nElem))
if (nElem == 1)
{
x = values[i2];
i2++;
}
if (i2 == 0)
{
i1 = i2;
ddx = 0.0;
return;
}
else if (i2 == values.size())
{
i2 = values.size() - 1;
i1 = i2;
ddx = 0.0;
return;
}
else
{
i1 = i2 - 1;
ddx = (xIn - values[i1])/(values[i2] - values[i1]);
while ((values[i2] < xIn) && (i2 < nElem))
{
i2++;
}
if (i2 == nElem)
{
i2 = nElem - 1;
i1 = i2;
ddx = 0.0;
return;
}
else
{
i1 = i2 - 1;
ddx = (xIn - values[i1])/(values[i2] - values[i1]);
}
}
}

View File

@ -38,7 +38,7 @@ namespace Foam
// * * * * * * * * * * * * Protected Member Functions * * * * * * * * * * * //
Foam::scalar Foam::seriesProfile::evaluate
Foam::scalar Foam::seriesProfile::evaluateDrag
(
const scalar& xIn,
const List<scalar>& values
@ -48,7 +48,26 @@ Foam::scalar Foam::seriesProfile::evaluate
forAll(values, i)
{
result += values[i]*sin((i + 1)*xIn);
result += values[i]*cos(i*xIn);
}
return result;
}
Foam::scalar Foam::seriesProfile::evaluateLift
(
const scalar& xIn,
const List<scalar>& values
) const
{
scalar result = 0.0;
forAll(values, i)
{
// note: first contribution always zero since sin(0) = 0, but
// keep zero base to be consitent with drag coeffs
result += values[i]*sin(i*xIn);
}
return result;
@ -108,8 +127,8 @@ Foam::seriesProfile::seriesProfile
void Foam::seriesProfile::Cdl(const scalar alpha, scalar& Cd, scalar& Cl) const
{
Cd = evaluate(alpha, CdCoeffs_);
Cl = evaluate(alpha, ClCoeffs_);
Cd = evaluateDrag(alpha, CdCoeffs_);
Cl = evaluateLift(alpha, ClCoeffs_);
}

View File

@ -28,7 +28,7 @@ Description
Series-up based profile data - drag and lift coefficients computed as
sum of cosine series
Cd = sum_i(CdCoeff)*sin(i*AOA)
Cd = sum_i(CdCoeff)*cos(i*AOA)
Cl = sum_i(ClCoeff)*sin(i*AOA)
where:
@ -79,12 +79,21 @@ protected:
// Protected Member Functions
//- Evaluate
scalar evaluate
(
const scalar& xIn,
const List<scalar>& values
) const;
// Evaluate
//- Drag
scalar evaluateDrag
(
const scalar& xIn,
const List<scalar>& values
) const;
//- Lift
scalar evaluateLift
(
const scalar& xIn,
const List<scalar>& values
) const;
public:

View File

@ -67,6 +67,7 @@ namespace Foam
void Foam::rotorDiskSource::checkData()
{
// set inflow type
switch (selectionMode())
{
case smCellSet:
@ -129,11 +130,10 @@ void Foam::rotorDiskSource::setFaceArea(vector& axis, const bool correct)
const label nInternalFaces = mesh_.nInternalFaces();
const polyBoundaryMesh& pbm = mesh_.boundaryMesh();
const vectorField& Sf = mesh_.Sf().internalField();
const scalarField& magSf = mesh_.magSf().internalField();
const vectorField& Sf = mesh_.Sf();
const scalarField& magSf = mesh_.magSf();
vector n = vector::zero;
label nFace = 0;
// calculate cell addressing for selected cells
labelList cellAddr(mesh_.nCells(), -1);
@ -158,7 +158,6 @@ void Foam::rotorDiskSource::setFaceArea(vector& axis, const bool correct)
// correct for parallel running
syncTools::swapBoundaryFaceList(mesh_, nbrFaceCellAddr);
// add internal field contributions
for (label faceI = 0; faceI < nInternalFaces; faceI++)
{
@ -173,7 +172,6 @@ void Foam::rotorDiskSource::setFaceArea(vector& axis, const bool correct)
{
area_[own] += magSf[faceI];
n += Sf[faceI];
nFace++;
}
}
else if ((own == -1) && (nbr != -1))
@ -184,7 +182,6 @@ void Foam::rotorDiskSource::setFaceArea(vector& axis, const bool correct)
{
area_[nbr] += magSf[faceI];
n -= Sf[faceI];
nFace++;
}
}
}
@ -210,7 +207,6 @@ void Foam::rotorDiskSource::setFaceArea(vector& axis, const bool correct)
{
area_[own] += magSfp[j];
n += Sfp[j];
nFace++;
}
}
}
@ -222,11 +218,10 @@ void Foam::rotorDiskSource::setFaceArea(vector& axis, const bool correct)
const label own = cellAddr[mesh_.faceOwner()[faceI]];
const vector nf = Sfp[j]/magSfp[j];
if ((own != -1) && ((nf & axis) > 0.8))
if ((own != -1) && ((nf & axis) > tol))
{
area_[own] += magSfp[j];
n += Sfp[j];
nFace++;
}
}
}
@ -359,32 +354,30 @@ void Foam::rotorDiskSource::constructGeometry()
forAll(cells_, i)
{
const label cellI = cells_[i];
// position in (planar) rotor co-ordinate system
x_[i] = coordSys_.localPosition(C[cellI]);
// cache max radius
rMax_ = max(rMax_, x_[i].x());
// swept angle relative to rDir axis [radians] in range 0 -> 2*pi
scalar psi = x_[i].y();
if (psi < 0)
if (area_[i] > ROOTVSMALL)
{
psi += mathematical::twoPi;
const label cellI = cells_[i];
// position in (planar) rotor co-ordinate system
x_[i] = coordSys_.localPosition(C[cellI]);
// cache max radius
rMax_ = max(rMax_, x_[i].x());
// swept angle relative to rDir axis [radians] in range 0 -> 2*pi
scalar psi = x_[i].y();
// blade flap angle [radians]
scalar beta =
flap_.beta0 - flap_.beta1*cos(psi) - flap_.beta2*sin(psi);
// determine rotation tensor to convert from planar system into the
// rotor cone system
scalar c = cos(beta);
scalar s = sin(beta);
R_[i] = tensor(c, 0, -s, 0, 1, 0, s, 0, c);
invR_[i] = R_[i].T();
}
// blade flap angle [radians]
scalar beta = flap_.beta0 - flap_.beta1*cos(psi) - flap_.beta2*sin(psi);
// determine rotation tensor to convert from planar system into the
// rotor cone system
scalar cNeg = cos(-beta);
scalar sNeg = sin(-beta);
R_[i] = tensor(cNeg, 0.0, -sNeg, 0.0, 1.0, 0.0, sNeg, 0.0, cNeg);
scalar cPos = cos(beta);
scalar sPos = sin(beta);
invR_[i] = tensor(cPos, 0.0, -sPos, 0.0, 1.0, 0.0, sPos, 0.0, cPos);
}
}
@ -440,21 +433,22 @@ Foam::rotorDiskSource::rotorDiskSource
:
basicSource(name, modelType, dict, mesh),
rhoName_("none"),
rhoRef_(1.0),
omega_(0.0),
nBlades_(0),
inletFlow_(ifLocal),
inletVelocity_(vector::zero),
tipEffect_(1.0),
flap_(),
trim_(trimModel::New(*this, coeffs_)),
blade_(coeffs_.subDict("blade")),
profiles_(coeffs_.subDict("profiles")),
x_(cells_.size(), vector::zero),
R_(cells_.size(), I),
invR_(cells_.size(), I),
area_(cells_.size(), 0.0),
coordSys_(false),
rMax_(0.0)
rMax_(0.0),
trim_(trimModel::New(*this, coeffs_)),
blade_(coeffs_.subDict("blade")),
profiles_(coeffs_.subDict("profiles"))
{
read(dict);
}
@ -477,13 +471,15 @@ void Foam::rotorDiskSource::calculate
const bool output
) const
{
tmp<volScalarField> trho;
if (rhoName_ != "none")
{
trho = mesh_.lookupObject<volScalarField>(rhoName_);
}
const scalarField& V = mesh_.V();
const bool compressible = rhoName_ != "none";
tmp<volScalarField> trho
(
compressible
? mesh_.lookupObject<volScalarField>(rhoName_)
: volScalarField::null()
);
// logging info
@ -503,17 +499,17 @@ void Foam::rotorDiskSource::calculate
// velocity in local cylindrical reference frame
vector Uc = coordSys_.localVector(U[cellI]);
// apply correction in local system due to coning
// transform from rotor cylindrical into local coning system
Uc = R_[i] & Uc;
// set radial component of velocity to zero
Uc.x() = 0.0;
// remove blade linear velocity from blade normal component
Uc.y() -= radius*omega_;
// set blade normal component of velocity
Uc.y() = radius*omega_ - Uc.y();
// determine blade data for this radius
// i1 = index of upper bound data point in blade list
// i2 = index of upper radius bound data point in blade list
scalar twist = 0.0;
scalar chord = 0.0;
label i1 = -1;
@ -521,18 +517,15 @@ void Foam::rotorDiskSource::calculate
scalar invDr = 0.0;
blade_.interpolate(radius, twist, chord, i1, i2, invDr);
// effective angle of attack
scalar alphaEff =
mathematical::pi + atan2(Uc.z(), Uc.y()) - (alphag[i] + twist);
// flip geometric angle if blade is spinning in reverse (clockwise)
scalar alphaGeom = alphag[i] + twist;
if (omega_ < 0)
{
alphaGeom = mathematical::pi - alphaGeom;
}
if (alphaEff > mathematical::pi)
{
alphaEff -= mathematical::twoPi;
}
if (alphaEff < -mathematical::pi)
{
alphaEff += mathematical::twoPi;
}
// effective angle of attack
scalar alphaEff = alphaGeom - atan2(-Uc.z(), Uc.y());
AOAmin = min(AOAmin, alphaEff);
AOAmax = max(AOAmax, alphaEff);
@ -557,21 +550,21 @@ void Foam::rotorDiskSource::calculate
// calculate forces perpendicular to blade
scalar pDyn = 0.5*magSqr(Uc);
if (trho.valid())
if (compressible)
{
pDyn *= trho()[cellI];
}
scalar f = pDyn*chord*nBlades_*area_[i]/mathematical::twoPi;
vector localForce = vector(0.0, f*Cd, tipFactor*f*Cl);
scalar f = pDyn*chord*nBlades_*area_[i]/radius/mathematical::twoPi;
vector localForce = vector(0.0, -f*Cd, tipFactor*f*Cl);
// accumulate forces
dragEff += rhoRef_*localForce.y();
liftEff += rhoRef_*localForce.z();
// convert force from local coning system into rotor cylindrical
localForce = invR_[i] & localForce;
// accumulate forces
dragEff += localForce.y();
liftEff += localForce.z();
// convert force to global cartesian co-ordinate system
force[cellI] = coordSys_.globalVector(localForce);
@ -609,6 +602,7 @@ void Foam::rotorDiskSource::addSup(fvMatrix<vector>& eqn, const label fieldI)
}
else
{
coeffs_.lookup("rhoRef") >> rhoRef_;
dims.reset(dimForce/dimVolume/dimDensity);
}
@ -659,6 +653,8 @@ bool Foam::rotorDiskSource::read(const dictionary& dict)
coeffs_.lookup("fieldNames") >> fieldNames_;
applied_.setSize(fieldNames_.size(), false);
// read co-ordinate system/geometry invariant properties
scalar rpm(readScalar(coeffs_.lookup("rpm")));
omega_ = rpm/60.0*mathematical::twoPi;
@ -676,14 +672,17 @@ bool Foam::rotorDiskSource::read(const dictionary& dict)
flap_.beta1 = degToRad(flap_.beta1);
flap_.beta2 = degToRad(flap_.beta2);
trim_->read(coeffs_);
checkData();
// create co-ordinate system
createCoordinateSystem();
// read co-odinate system dependent properties
checkData();
constructGeometry();
trim_->read(coeffs_);
if (debug)
{
writeField("alphag", trim_->thetag()(), true);

View File

@ -148,7 +148,11 @@ protected:
//- Name of density field
word rhoName_;
//- Reference density if rhoName = 'none'
scalar rhoRef_;
//- Rotational speed [rad/s]
// Positive anti-clockwise when looking along -ve lift direction
scalar omega_;
//- Number of blades
@ -167,15 +171,6 @@ protected:
//- Blade flap coefficients [rad/s]
flapData flap_;
//- Trim model
autoPtr<trimModel> trim_;
//- Blade data
bladeModel blade_;
//- Profile data
profileModelList profiles_;
//- Cell centre positions in local rotor frame
// (Cylindrical r, theta, z)
List<point> x_;
@ -195,6 +190,15 @@ protected:
//- Maximum radius
scalar rMax_;
//- Trim model
autoPtr<trimModel> trim_;
//- Blade data
bladeModel blade_;
//- Profile data
profileModelList profiles_;
// Protected Member Functions

View File

@ -67,15 +67,10 @@ void Foam::fixedTrim::read(const dictionary& dict)
scalar theta1c = degToRad(readScalar(coeffs_.lookup("theta1c")));
scalar theta1s = degToRad(readScalar(coeffs_.lookup("theta1s")));
const List<vector>& x = rotor_.x();
const List<point>& x = rotor_.x();
forAll(thetag_, i)
{
scalar psi = x[i].y();
if (psi < 0)
{
psi += mathematical::twoPi;
}
thetag_[i] = theta0 + theta1c*cos(psi) + theta1s*sin(psi);
}
}

View File

@ -142,11 +142,6 @@ Foam::tmp<Foam::scalarField> Foam::targetForceTrim::thetag() const
forAll(t, i)
{
scalar psi = x[i].y();
if (psi < 0)
{
psi += mathematical::twoPi;
}
t[i] = theta_[0] + theta_[1]*cos(psi) + theta_[2]*sin(psi);
}

View File

@ -174,6 +174,7 @@ $(derivedFvPatchFields)/waveSurfacePressure/waveSurfacePressureFvPatchScalarFiel
$(derivedFvPatchFields)/phaseHydrostaticPressure/phaseHydrostaticPressureFvPatchScalarField.C
$(derivedFvPatchFields)/variableHeightFlowRate/variableHeightFlowRateFvPatchField.C
$(derivedFvPatchFields)/variableHeightFlowRateInletVelocity/variableHeightFlowRateInletVelocityFvPatchVectorField.C
$(derivedFvPatchFields)/temperatureJump/temperatureJumpFvPatchScalarField.C
fvsPatchFields = fields/fvsPatchFields
$(fvsPatchFields)/fvsPatchField/fvsPatchFields.C

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -48,7 +48,7 @@ void wedgeFvPatchField<scalar>::evaluate(const Pstream::commsTypes)
updateCoeffs();
}
operator==(patchInternalField());
this->operator==(patchInternalField());
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -132,7 +132,7 @@ void Foam::cylindricalInletVelocityFvPatchVectorField::updateCoeffs()
vector hatAxis = axis_/mag(axis_);
const vectorField r(patch().Cf() - centre_);
tmp<vectorField> d = r - (hatAxis & r)*hatAxis;
const vectorField d = r - (hatAxis & r)*hatAxis;
tmp<vectorField> tangVel
(

View File

@ -129,11 +129,19 @@ void Foam::flowRateInletVelocityFvPatchVectorField::updateCoeffs()
}
else if (phi.dimensions() == dimDensity*dimVelocity*dimArea)
{
const fvPatchField<scalar>& rhop =
patch().lookupPatchField<volScalarField, scalar>(rhoName_);
if (rhoName_ == "none")
{
// volumetric flow-rate if density not given
operator==(n*avgU);
}
else
{
// mass flow-rate
const fvPatchField<scalar>& rhop =
patch().lookupPatchField<volScalarField, scalar>(rhoName_);
// mass flow-rate
operator==(n*avgU/rhop);
operator==(n*avgU/rhop);
}
}
else
{

View File

@ -30,8 +30,10 @@ Description
The basis of the patch (volumetric or mass) is determined by the
dimensions of the flux, phi.
The current density is used to correct the velocity when applying the
mass basis.
If the flux is mass-based
- the current density is used to correct the velocity
- volumetric flow rate can be applied by setting the 'rho' entry to 'none'
Example of the boundary condition specification:
\verbatim
@ -39,6 +41,7 @@ Description
{
type flowRateInletVelocity;
flowRate 0.2; // Volumetric/mass flow rate [m3/s or kg/s]
rho rho; // none | rho [m3/s or kg/s]
value uniform (0 0 0); // placeholder
}
\endverbatim

View File

@ -0,0 +1,129 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "addToRunTimeSelectionTable.H"
#include "temperatureJumpFvPatchScalarField.H"
#include "volFields.H"
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::temperatureJumpFvPatchScalarField::temperatureJumpFvPatchScalarField
(
const fvPatch& p,
const DimensionedField<scalar, volMesh>& iF
)
:
fixedJumpFvPatchField<scalar>(p, iF),
jumpTable_(0)
{}
Foam::temperatureJumpFvPatchScalarField::temperatureJumpFvPatchScalarField
(
const temperatureJumpFvPatchScalarField& ptf,
const fvPatch& p,
const DimensionedField<scalar, volMesh>& iF,
const fvPatchFieldMapper& mapper
)
:
fixedJumpFvPatchField<scalar>(ptf, p, iF, mapper),
jumpTable_(ptf.jumpTable_().clone().ptr())
{}
Foam::temperatureJumpFvPatchScalarField::temperatureJumpFvPatchScalarField
(
const fvPatch& p,
const DimensionedField<scalar, volMesh>& iF,
const dictionary& dict
)
:
fixedJumpFvPatchField<scalar>(p, iF),
jumpTable_(new DataEntry<scalar>("jumpTable"))
{
if (this->cyclicPatch().owner())
{
jumpTable_ = DataEntry<scalar>::New("jumpTable", dict);
}
if (dict.found("value"))
{
fvPatchScalarField::operator=
(
scalarField("value", dict, p.size())
);
}
}
Foam::temperatureJumpFvPatchScalarField::temperatureJumpFvPatchScalarField
(
const temperatureJumpFvPatchScalarField& ptf
)
:
cyclicLduInterfaceField(),
fixedJumpFvPatchField<scalar>(ptf),
jumpTable_(ptf.jumpTable_().clone().ptr())
{}
Foam::temperatureJumpFvPatchScalarField::temperatureJumpFvPatchScalarField
(
const temperatureJumpFvPatchScalarField& ptf,
const DimensionedField<scalar, volMesh>& iF
)
:
fixedJumpFvPatchField<scalar>(ptf, iF),
jumpTable_(ptf.jumpTable_().clone().ptr())
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
void Foam::temperatureJumpFvPatchScalarField::write(Ostream& os) const
{
fixedJumpFvPatchField<scalar>::write(os);
if (this->cyclicPatch().owner())
{
jumpTable_->writeData(os);
}
this->writeEntry("value", os);
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
makePatchTypeField
(
fvPatchScalarField,
temperatureJumpFvPatchScalarField
);
}
// ************************************************************************* //

View File

@ -0,0 +1,159 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::temperatureJumpFvPatchScalarField
Description
Introduce a jump in temperature on a cycle patch
front
{
type temperatureJump;
patchType cyclic;
jumpTable constant 100;
value uniform 300;
}
SourceFiles
temperatureJumpFvPatchScalarField.C
\*---------------------------------------------------------------------------*/
#ifndef temperatureJumpFvPatchScalarField_H
#define temperatureJumpFvPatchScalarField_H
#include "fixedJumpFvPatchField.H"
#include "DataEntry.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class temperatureJumpFvPatchScalarField Declaration
\*---------------------------------------------------------------------------*/
class temperatureJumpFvPatchScalarField
:
public fixedJumpFvPatchField<scalar>
{
// Private data
//- Interpolation table
autoPtr<DataEntry<scalar> > jumpTable_;
public:
//- Runtime type information
TypeName("temperatureJump");
// Constructors
//- Construct from patch and internal field
temperatureJumpFvPatchScalarField
(
const fvPatch&,
const DimensionedField<scalar, volMesh>&
);
//- Construct from patch, internal field and dictionary
temperatureJumpFvPatchScalarField
(
const fvPatch&,
const DimensionedField<scalar, volMesh>&,
const dictionary&
);
//- Construct by mapping given temperatureJumpFvPatchScalarField onto a
// new patch
temperatureJumpFvPatchScalarField
(
const temperatureJumpFvPatchScalarField&,
const fvPatch&,
const DimensionedField<scalar, volMesh>&,
const fvPatchFieldMapper&
);
//- Construct as copy
temperatureJumpFvPatchScalarField
(
const temperatureJumpFvPatchScalarField&
);
//- Construct and return a clone
virtual tmp<fvPatchField<scalar> > clone() const
{
return tmp<fvPatchField<scalar> >
(
new temperatureJumpFvPatchScalarField(*this)
);
}
//- Construct as copy setting internal field reference
temperatureJumpFvPatchScalarField
(
const temperatureJumpFvPatchScalarField&,
const DimensionedField<scalar, volMesh>&
);
//- Construct and return a clone setting internal field reference
virtual tmp<fvPatchField<scalar> > clone
(
const DimensionedField<scalar, volMesh>& iF
) const
{
return tmp<fvPatchField<scalar> >
(
new temperatureJumpFvPatchScalarField(*this, iF)
);
}
// Member functions
// Access functions
//- Return jumpTable
const DataEntry<scalar>& jumpTable() const
{
return jumpTable_();
}
//- Write
virtual void write(Ostream&) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -85,6 +85,7 @@ class CollisionRecordList
//- List of active wall collisions
DynamicList<WallCollisionRecord<WallType> > wallRecords_;
public:
// Constructors
@ -115,11 +116,11 @@ public:
//- Return the active pair collisions
inline const DynamicList<PairCollisionRecord<PairType> >&
pairRecords() const;
pairRecords() const;
//- Return the active wall collisions
inline const DynamicList<WallCollisionRecord<WallType> >&
wallRecords() const;
wallRecords() const;
// Fields representing the data from each record, i.e if the
// records 0-N containing each data members {a, b, c, d...}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -80,12 +80,15 @@ void Foam::ReactingParcel<ParcelType>::cellValueSourceCorrection
)
{
scalar addedMass = 0.0;
scalar maxMassI = 0.0;
forAll(td.cloud().rhoTrans(), i)
{
addedMass += td.cloud().rhoTrans(i)[cellI];
scalar dm = td.cloud().rhoTrans(i)[cellI];
maxMassI = max(maxMassI, mag(dm));
addedMass += dm;
}
if (addedMass < ROOTVSMALL)
if (maxMassI < ROOTVSMALL)
{
return;
}
@ -95,16 +98,13 @@ void Foam::ReactingParcel<ParcelType>::cellValueSourceCorrection
this->rhoc_ += addedMass/td.cloud().pMesh().cellVolumes()[cellI];
const scalar massCellNew = massCell + addedMass;
this->Uc_ += td.cloud().UTrans()[cellI]/massCellNew;
this->Uc_ = (this->Uc_*massCell + td.cloud().UTrans()[cellI])/massCellNew;
scalar CpEff = 0.0;
if (addedMass > ROOTVSMALL)
forAll(td.cloud().rhoTrans(), i)
{
forAll(td.cloud().rhoTrans(), i)
{
scalar Y = td.cloud().rhoTrans(i)[cellI]/addedMass;
CpEff += Y*td.cloud().composition().carrier().Cp(i, this->Tc_);
}
scalar Y = td.cloud().rhoTrans(i)[cellI]/addedMass;
CpEff += Y*td.cloud().composition().carrier().Cp(i, this->Tc_);
}
const scalar Cpc = td.CpInterp().psi()[cellI];
@ -152,7 +152,7 @@ void Foam::ReactingParcel<ParcelType>::correctSurfaceValues
)
{
// No correction if total concentration of emitted species is small
if (sum(Cs) < SMALL)
if (!td.cloud().heatTransfer().BirdCorrection() || (sum(Cs) < SMALL))
{
return;
}

View File

@ -661,10 +661,10 @@ void Foam::PairCollision<CloudType>::collide()
{
preInteraction();
parcelInteraction();
wallInteraction();
parcelInteraction();
postInteraction();
}

View File

@ -238,7 +238,7 @@ void Foam::SurfaceFilmModel<CloudType>::cacheFilmFields
diameterParcelPatch_ =
filmModel.cloudDiameterTrans().boundaryField()[filmPatchI];
filmModel.toPrimary(filmPatchI, diameterParcelPatch_, maxOp<scalar>());
filmModel.toPrimary(filmPatchI, diameterParcelPatch_, maxEqOp<scalar>());
UFilmPatch_ = filmModel.Us().boundaryField()[filmPatchI];
filmModel.toPrimary(filmPatchI, UFilmPatch_);

View File

@ -411,10 +411,10 @@ void Foam::ThermoSurfaceFilm<CloudType>::splashInteraction
const scalar EKIn = 0.5*m*magSqr(Urel);
// incident surface energy [J]
const scalar ESigmaIn = sigma*p.areaS(d);
const scalar ESigmaIn = np*sigma*p.areaS(d);
// dissipative energy
const scalar Ed = max(0.8*EKIn, Wec/12*pi*sigma*sqr(d));
const scalar Ed = max(0.8*EKIn, np*Wec/12*pi*sigma*sqr(d));
// total energy [J]
const scalar EKs = EKIn + ESigmaIn - ESigmaSec - Ed;

View File

@ -439,6 +439,86 @@ void Foam::autoLayerDriver::handleFeatureAngle
}
//Foam::tmp<Foam::scalarField> Foam::autoLayerDriver::undistortedEdgeLength
//(
// const indirectPrimitivePatch& pp,
// const bool relativeSizes,
// const bool faceSize
//)
//{
// const fvMesh& mesh = meshRefiner_.mesh();
//
// tmp<scalarField> tfld(new scalarField());
// scalarField& fld = tfld();
//
// if (faceSize)
// {
// fld.setSize(pp.size());
// }
// else
// {
// fld.setSize(pp.nPoints());
// }
//
//
// if (relativeSizes)
// {
// const scalar edge0Len = meshRefiner_.meshCutter().level0EdgeLength();
// const labelList& cellLevel = meshRefiner_.meshCutter().cellLevel();
//
// if (faceSize)
// {
// forAll(pp, i)
// {
// label faceI = pp.addressing()[i];
// label ownLevel = cellLevel[mesh.faceOwner()[faceI]];
// fld[i] = edge0Len/(1<<ownLevel);
// }
// }
// else
// {
// // Determine per point the max cell level of connected cells
// // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// labelList maxPointLevel(pp.nPoints(), labelMin);
//
// forAll(pp, i)
// {
// label faceI = pp.addressing()[i];
// label ownLevel = cellLevel[mesh.faceOwner()[faceI]];
//
// const face& f = pp.localFaces()[i];
// forAll(f, fp)
// {
// maxPointLevel[f[fp]] =
// max(maxPointLevel[f[fp]], ownLevel);
// }
// }
//
// syncTools::syncPointList
// (
// mesh,
// pp.meshPoints(),
// maxPointLevel,
// maxEqOp<label>(),
// labelMin // null value
// );
//
//
// forAll(maxPointLevel, pointI)
// {
// // Find undistorted edge size for this level.
// fld[i] = edge0Len/(1<<maxPointLevel[pointI]);
// }
// }
// }
// else
// {
// // Use actual cell size
// }
//}
// No extrusion on cells with warped faces. Calculates the thickness of the
// layer and compares it to the space the warped face takes up. Disables
// extrusion if layer thickness is more than faceRatio of the thickness of
@ -2383,22 +2463,27 @@ void Foam::autoLayerDriver::addLayers
// Disable extrusion on warped faces
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// It is hard to calculate some length scale if not in relative
// mode so disable this check.
if (layerParams.relativeSizes())
{
// Undistorted edge length
const scalar edge0Len =
meshRefiner_.meshCutter().level0EdgeLength();
const labelList& cellLevel = meshRefiner_.meshCutter().cellLevel();
// Undistorted edge length
const scalar edge0Len = meshRefiner_.meshCutter().level0EdgeLength();
const labelList& cellLevel = meshRefiner_.meshCutter().cellLevel();
handleWarpedFaces
(
pp,
layerParams.maxFaceThicknessRatio(),
edge0Len,
cellLevel,
handleWarpedFaces
(
pp,
layerParams.maxFaceThicknessRatio(),
edge0Len,
cellLevel,
patchDisp,
patchNLayers,
extrudeStatus
);
patchDisp,
patchNLayers,
extrudeStatus
);
}
//// Disable extrusion on cells with multiple patch faces
//// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -2585,6 +2670,21 @@ void Foam::autoLayerDriver::addLayers
dimensionedScalar("medialRatio", dimless, 0.0)
);
pointVectorField medialVec
(
IOobject
(
"medialVec",
meshRefiner_.timeName(),
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
meshMover().pMesh(),
dimensionedVector("medialVec", dimLength, vector::zero)
);
// Setup information for medial axis smoothing. Calculates medial axis
// and a smoothed displacement direction.
// - pointMedialDist : distance to medial axis
@ -2600,7 +2700,8 @@ void Foam::autoLayerDriver::addLayers
dispVec,
medialRatio,
pointMedialDist
pointMedialDist,
medialVec
);
@ -2692,6 +2793,7 @@ void Foam::autoLayerDriver::addLayers
dispVec,
medialRatio,
pointMedialDist,
medialVec,
extrudeStatus,
patchDisp,
@ -2830,7 +2932,7 @@ void Foam::autoLayerDriver::addLayers
mesh.name(),
static_cast<polyMesh&>(mesh).instance(),
mesh.time(), // register with runTime
static_cast<polyMesh&>(mesh).readOpt(),
IOobject::NO_READ,
static_cast<polyMesh&>(mesh).writeOpt()
), // io params from original mesh but new name
mesh, // original mesh

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -460,7 +460,8 @@ class autoLayerDriver
pointVectorField& dispVec,
pointScalarField& medialRatio,
pointScalarField& medialDist
pointScalarField& medialDist,
pointVectorField& medialVec
) const;
//- Main routine to shrink mesh
@ -481,6 +482,7 @@ class autoLayerDriver
const pointVectorField& dispVec,
const pointScalarField& medialRatio,
const pointScalarField& medialDist,
const pointVectorField& medialVec,
List<extrudeMode>& extrudeStatus,
pointField& patchDisp,

View File

@ -689,7 +689,8 @@ void Foam::autoLayerDriver::medialAxisSmoothingInfo
pointVectorField& dispVec,
pointScalarField& medialRatio,
pointScalarField& medialDist
pointScalarField& medialDist,
pointVectorField& medialVec
) const
{
@ -929,7 +930,7 @@ void Foam::autoLayerDriver::medialAxisSmoothingInfo
forAll(pointMedialDist, pointI)
{
medialDist[pointI] = Foam::sqrt(pointMedialDist[pointI].distSqr());
//medialVec[pointI] = pointMedialDist[pointI].origin();
medialVec[pointI] = pointMedialDist[pointI].origin();
}
}
@ -966,14 +967,14 @@ void Foam::autoLayerDriver::medialAxisSmoothingInfo
<< " : normalised direction of nearest displacement" << nl
<< " " << medialDist.name()
<< " : distance to medial axis" << nl
//<< " " << medialVec.name()
//<< " : nearest point on medial axis" << nl
<< " " << medialVec.name()
<< " : nearest point on medial axis" << nl
<< " " << medialRatio.name()
<< " : ratio of medial distance to wall distance" << nl
<< endl;
dispVec.write();
medialDist.write();
//medialVec.write();
medialVec.write();
medialRatio.write();
}
}
@ -996,7 +997,7 @@ void Foam::autoLayerDriver::shrinkMeshMedialDistance
const pointVectorField& dispVec,
const pointScalarField& medialRatio,
const pointScalarField& medialDist,
//const pointVectorField& medialVec,
const pointVectorField& medialVec,
List<extrudeMode>& extrudeStatus,
pointField& patchDisp,
@ -1066,6 +1067,24 @@ void Foam::autoLayerDriver::shrinkMeshMedialDistance
<< str().name() << endl;
}
autoPtr<OFstream> medialVecStr;
label medialVertI = 0;
if (debug)
{
medialVecStr.reset
(
new OFstream
(
mesh.time().path()
/ "thicknessRatioExcludeMedialVec_"
+ meshRefiner_.timeName()
+ ".obj"
)
);
Info<< "Writing points with too large a extrusion distance to "
<< medialVecStr().name() << endl;
}
forAll(meshPoints, patchPointI)
{
if (extrudeStatus[patchPointI] != NOEXTRUDE)
@ -1082,12 +1101,9 @@ void Foam::autoLayerDriver::shrinkMeshMedialDistance
vector n =
patchDisp[patchPointI]
/ (mag(patchDisp[patchPointI]) + VSMALL);
//vector mVec = mesh.points()[pointI]-medialVec[pointI];
//scalar mDist = mag(mVec);
//scalar thicknessRatio =
// (n&mVec)
// *thickness[patchPointI]
// /(mDist+VSMALL);
vector mVec = mesh.points()[pointI]-medialVec[pointI];
mVec /= mag(mVec)+VSMALL;
thicknessRatio *= (n&mVec);
if (thicknessRatio > maxThicknessToMedialRatio)
{
@ -1103,8 +1119,9 @@ void Foam::autoLayerDriver::shrinkMeshMedialDistance
minThickness[patchPointI]
+thickness[patchPointI]
)
//<< " since near medial at:" << medialVec[pointI]
//<< " with thicknessRatio:" << thicknessRatio
<< " medial direction:" << mVec
<< " extrusion direction:" << n
<< " with thicknessRatio:" << thicknessRatio
<< endl;
}
@ -1124,6 +1141,16 @@ void Foam::autoLayerDriver::shrinkMeshMedialDistance
vertI++;
str()<< "l " << vertI-1 << ' ' << vertI << nl;
}
if (medialVecStr.valid())
{
const point& pt = mesh.points()[pointI];
meshTools::writeOBJ(medialVecStr(), pt);
medialVertI++;
meshTools::writeOBJ(medialVecStr(), medialVec[pointI]);
medialVertI++;
medialVecStr()<< "l " << medialVertI-1
<< ' ' << medialVertI << nl;
}
}
}
}
@ -1228,6 +1255,15 @@ void Foam::autoLayerDriver::shrinkMeshMedialDistance
)
)()
);
// Above move will have changed the instance only on the points (which
// is correct).
// However the previous mesh written will be the mesh with layers
// (see autoLayerDriver.C) so we now have to force writing all files
// so we can easily step through time steps. Note that if you
// don't write the mesh with layers this is not necessary.
meshRefiner_.mesh().setInstance(meshRefiner_.timeName());
meshRefiner_.write
(
debug,

View File

@ -59,16 +59,16 @@ namespace Foam
};
//- Combine operator for interpolateToSource/Target
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
class combineBinaryOp
{
const BinaryOp& bop_;
const CombineOp& cop_;
public:
combineBinaryOp(const BinaryOp& bop)
combineBinaryOp(const CombineOp& cop)
:
bop_(bop)
cop_(cop)
{}
void operator()
@ -79,7 +79,7 @@ namespace Foam
const scalar weight
) const
{
x = bop_(x, weight*y);
cop_(x, weight*y);
}
};
}
@ -142,8 +142,8 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::writeIntersectionOBJ
template<class SourcePatch, class TargetPatch>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::checkPatches
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
) const
{
const scalar maxBoundsError = 0.05;
@ -166,8 +166,8 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::checkPatches
(
"AMIInterpolation<SourcePatch, TargetPatch>::checkPatches"
"("
"const primitivePatch&, "
"const primitivePatch&"
"const SourcePatch&, "
"const TargetPatch&"
")"
) << "Source and target patch bounding boxes are not similar" << nl
<< " source box span : " << bbSrc.span() << nl
@ -182,7 +182,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::checkPatches
template<class SourcePatch, class TargetPatch>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::resetTree
(
const primitivePatch& tgtPatch
const TargetPatch& tgtPatch
)
{
// Clear the old octree
@ -212,8 +212,8 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::resetTree
template<class SourcePatch, class TargetPatch>
Foam::label Foam::AMIInterpolation<SourcePatch, TargetPatch>::calcDistribution
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
) const
{
label procI = 0;
@ -297,7 +297,7 @@ template<class SourcePatch, class TargetPatch>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::distributePatches
(
const mapDistribute& map,
const primitivePatch& pp,
const TargetPatch& pp,
const globalIndex& gi,
List<faceList>& faces,
List<pointField>& points,
@ -394,7 +394,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::
distributeAndMergePatches
(
const mapDistribute& map,
const primitivePatch& tgtPatch,
const TargetPatch& tgtPatch,
const globalIndex& gi,
faceList& tgtFaces,
pointField& tgtPoints,
@ -509,8 +509,8 @@ template<class SourcePatch, class TargetPatch>
Foam::autoPtr<Foam::mapDistribute>
Foam::AMIInterpolation<SourcePatch, TargetPatch>::calcProcMap
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
) const
{
// Get decomposition of patch
@ -711,7 +711,7 @@ template<class SourcePatch, class TargetPatch>
Foam::label Foam::AMIInterpolation<SourcePatch, TargetPatch>::findTargetFace
(
const label srcFaceI,
const primitivePatch& srcPatch
const SourcePatch& srcPatch
) const
{
label targetFaceI = -1;
@ -745,7 +745,7 @@ template<class SourcePatch, class TargetPatch>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::appendNbrFaces
(
const label faceI,
const primitivePatch& patch,
const TargetPatch& patch,
const DynamicList<label>& visitedFaces,
DynamicList<label>& faceIDs
) const
@ -793,8 +793,8 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::setNextFaces
label& startSeedI,
label& srcFaceI,
label& tgtFaceI,
const primitivePatch& srcPatch0,
const primitivePatch& tgtPatch0,
const SourcePatch& srcPatch0,
const TargetPatch& tgtPatch0,
const boolList& mapFlag,
labelList& seedFaces,
const DynamicList<label>& visitedFaces
@ -897,8 +897,8 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::setNextFaces
"label&, "
"label&, "
"label&, "
"const primitivePatch&, "
"const primitivePatch&, "
"const SourcePatch&, "
"const TargetPatch&, "
"const boolList&, "
"labelList&, "
"const DynamicList<label>&"
@ -913,26 +913,27 @@ Foam::scalar Foam::AMIInterpolation<SourcePatch, TargetPatch>::interArea
(
const label srcFaceI,
const label tgtFaceI,
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
) const
{
// quick reject if either face has zero area
if (srcMagSf_[srcFaceI] < ROOTVSMALL || tgtMagSf_[tgtFaceI] < ROOTVSMALL)
{
return 0.0;
}
const pointField& srcPoints = srcPatch.points();
const pointField& tgtPoints = tgtPatch.points();
// create intersection object
faceAreaIntersect inter(srcPoints, tgtPoints, reverseTarget_);
// references to candidate faces
const face& src = srcPatch[srcFaceI];
const face& tgt = tgtPatch[tgtFaceI];
// quick reject if either face has zero area
// Note: do not used stored face areas for target patch
if ((srcMagSf_[srcFaceI] < ROOTVSMALL) || (tgt.mag(tgtPoints) < ROOTVSMALL))
{
return 0.0;
}
// create intersection object
faceAreaIntersect inter(srcPoints, tgtPoints, reverseTarget_);
// crude resultant norm
vector n(-src.normal(srcPoints));
if (reverseTarget_)
@ -963,20 +964,17 @@ Foam::scalar Foam::AMIInterpolation<SourcePatch, TargetPatch>::interArea
template<class SourcePatch, class TargetPatch>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::calcAddressing
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch,
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch,
label srcFaceI,
label tgtFaceI
)
{
if (!srcPatch.size() || !tgtPatch.size())
if (debug && (!srcPatch.size() || !tgtPatch.size()))
{
if (debug)
{
Pout<< "AMI: Patches not on processor: Source faces = "
<< srcPatch.size() << ", target faces = " << tgtPatch.size()
<< endl;
}
Pout<< "AMI: Patches not on processor: Source faces = "
<< srcPatch.size() << ", target faces = " << tgtPatch.size()
<< endl;
}
if (!srcPatch.size())
@ -990,8 +988,8 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::calcAddressing
"void Foam::AMIInterpolation<SourcePatch, TargetPatch>::"
"calcAddressing"
"("
"const primitivePatch&, "
"const primitivePatch&, "
"const SourcePatch&, "
"const TargetPatch&, "
"label, "
"label"
")"
@ -1035,8 +1033,8 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::calcAddressing
"void Foam::AMIInterpolation<SourcePatch, TargetPatch>::"
"calcAddressing"
"("
"const primitivePatch&, "
"const primitivePatch&, "
"const SourcePatch&, "
"const TargetPatch&, "
"label, "
"label"
")"
@ -1542,7 +1540,7 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::AMIInterpolation
{
// create new patches for source and target
pointField srcPoints = srcPatch.points();
primitivePatch srcPatch0
SourcePatch srcPatch0
(
SubList<face>
(
@ -1563,7 +1561,7 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::AMIInterpolation
}
pointField tgtPoints = tgtPatch.points();
primitivePatch tgtPatch0
TargetPatch tgtPatch0
(
SubList<face>
(
@ -1734,8 +1732,8 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::~AMIInterpolation()
template<class SourcePatch, class TargetPatch>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::update
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
)
{
// Calculate face areas
@ -1784,7 +1782,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::update
tgtFaceIDs
);
primitivePatch
TargetPatch
newTgtPatch
(
SubList<face>
@ -1900,7 +1898,7 @@ template<class Type, class CombineOp>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToTarget
(
const UList<Type>& fld,
const CombineOp& bop,
const CombineOp& cop,
List<Type>& result
) const
{
@ -1937,7 +1935,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToTarget
forAll(faces, i)
{
bop(result[faceI], faceI, work[faces[i]], weights[i]);
cop(result[faceI], faceI, work[faces[i]], weights[i]);
}
}
}
@ -1950,7 +1948,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToTarget
forAll(faces, i)
{
bop(result[faceI], faceI, fld[faces[i]], weights[i]);
cop(result[faceI], faceI, fld[faces[i]], weights[i]);
}
}
}
@ -1962,7 +1960,7 @@ template<class Type, class CombineOp>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
(
const UList<Type>& fld,
const CombineOp& bop,
const CombineOp& cop,
List<Type>& result
) const
{
@ -1999,7 +1997,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
forAll(faces, i)
{
bop(result[faceI], faceI, work[faces[i]], weights[i]);
cop(result[faceI], faceI, work[faces[i]], weights[i]);
}
}
}
@ -2012,7 +2010,7 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
forAll(faces, i)
{
bop(result[faceI], faceI, fld[faces[i]], weights[i]);
cop(result[faceI], faceI, fld[faces[i]], weights[i]);
}
}
}
@ -2020,12 +2018,12 @@ void Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
template<class SourcePatch, class TargetPatch>
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
Foam::tmp<Foam::Field<Type> >
Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
(
const Field<Type>& fld,
const BinaryOp& bop
const CombineOp& cop
) const
{
tmp<Field<Type> > tresult
@ -2037,32 +2035,32 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
)
);
interpolateToSource(fld, combineBinaryOp<Type, BinaryOp>(bop), tresult());
interpolateToSource(fld, combineBinaryOp<Type, CombineOp>(cop), tresult());
return tresult;
}
template<class SourcePatch, class TargetPatch>
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
Foam::tmp<Foam::Field<Type> >
Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
(
const tmp<Field<Type> >& tFld,
const BinaryOp& bop
const CombineOp& cop
) const
{
return interpolateToSource(tFld(), bop);
return interpolateToSource(tFld(), cop);
}
template<class SourcePatch, class TargetPatch>
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
Foam::tmp<Foam::Field<Type> >
Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToTarget
(
const Field<Type>& fld,
const BinaryOp& bop
const CombineOp& cop
) const
{
tmp<Field<Type> > tresult
@ -2074,22 +2072,22 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToTarget
)
);
interpolateToTarget(fld, combineBinaryOp<Type, BinaryOp>(bop), tresult());
interpolateToTarget(fld, combineBinaryOp<Type, CombineOp>(cop), tresult());
return tresult;
}
template<class SourcePatch, class TargetPatch>
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
Foam::tmp<Foam::Field<Type> >
Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToTarget
(
const tmp<Field<Type> >& tFld,
const BinaryOp& bop
const CombineOp& cop
) const
{
return interpolateToTarget(tFld(), bop);
return interpolateToTarget(tFld(), cop);
}
@ -2101,7 +2099,7 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
const Field<Type>& fld
) const
{
return interpolateToSource(fld, sumOp<Type>());
return interpolateToSource(fld, plusEqOp<Type>());
}
@ -2113,7 +2111,7 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToSource
const tmp<Field<Type> >& tFld
) const
{
return interpolateToSource(tFld(), sumOp<Type>());
return interpolateToSource(tFld(), plusEqOp<Type>());
}
@ -2125,7 +2123,7 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToTarget
const Field<Type>& fld
) const
{
return interpolateToTarget(fld, sumOp<Type>());
return interpolateToTarget(fld, plusEqOp<Type>());
}
@ -2137,15 +2135,15 @@ Foam::AMIInterpolation<SourcePatch, TargetPatch>::interpolateToTarget
const tmp<Field<Type> >& tFld
) const
{
return interpolateToTarget(tFld(), sumOp<Type>());
return interpolateToTarget(tFld(), plusEqOp<Type>());
}
template<class SourcePatch, class TargetPatch>
void Foam::AMIInterpolation<SourcePatch, TargetPatch>::writeFaceConnectivity
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch,
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch,
const labelListList& srcAddress
)
const

View File

@ -160,12 +160,12 @@ class AMIInterpolation
//- Check that patches are valid
void checkPatches
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
) const;
//- Reset the octree for the traget patch face search
void resetTree(const primitivePatch& tgtPatch);
void resetTree(const TargetPatch& tgtPatch);
@ -174,8 +174,8 @@ class AMIInterpolation
//- Calculate if patches are on multiple processors
label calcDistribution
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
) const;
label calcOverlappingProcs
@ -188,7 +188,7 @@ class AMIInterpolation
void distributePatches
(
const mapDistribute& map,
const primitivePatch& pp,
const TargetPatch& pp,
const globalIndex& gi,
List<faceList>& faces,
List<pointField>& points,
@ -198,7 +198,7 @@ class AMIInterpolation
void distributeAndMergePatches
(
const mapDistribute& map,
const primitivePatch& tgtPatch,
const TargetPatch& tgtPatch,
const globalIndex& gi,
faceList& tgtFaces,
pointField& tgtPoints,
@ -207,8 +207,8 @@ class AMIInterpolation
autoPtr<mapDistribute> calcProcMap
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
) const;
@ -228,14 +228,14 @@ class AMIInterpolation
label findTargetFace
(
const label srcFaceI,
const primitivePatch& srcPatch
const SourcePatch& srcPatch
) const;
//- Add faces neighbouring faceI to the ID list
void appendNbrFaces
(
const label faceI,
const primitivePatch& patch,
const TargetPatch& patch,
const DynamicList<label>& visitedFaces,
DynamicList<label>& faceIDs
) const;
@ -246,8 +246,8 @@ class AMIInterpolation
label& startSeedI,
label& srcFaceI,
label& tgtFaceI,
const primitivePatch& srcPatch0,
const primitivePatch& tgtPatch0,
const SourcePatch& srcPatch0,
const TargetPatch& tgtPatch0,
const boolList& mapFlag,
labelList& seedFaces,
const DynamicList<label>& visitedFaces
@ -261,15 +261,15 @@ class AMIInterpolation
(
const label srcFaceI,
const label tgtFaceI,
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
) const;
//- Calculate addressing
void calcAddressing
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch,
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch,
label srcFaceI = -1,
label tgtFaceI = -1
);
@ -398,8 +398,8 @@ public:
//- Update addressing and weights
void update
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch
);
@ -429,36 +429,36 @@ public:
//- Interpolate from target to source with supplied binary op
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
tmp<Field<Type> > interpolateToSource
(
const Field<Type>& fld,
const BinaryOp& bop
const CombineOp& cop
) const;
//- Interpolate from target tmp field to source with supplied
// binary op
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
tmp<Field<Type> > interpolateToSource
(
const tmp<Field<Type> >& tFld,
const BinaryOp& bop
const CombineOp& cop
) const;
//- Interpolate from source to target with supplied op
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
tmp<Field<Type> > interpolateToTarget
(
const Field<Type>& fld,
const BinaryOp& bop
const CombineOp& cop
) const;
//- Interpolate from source tmp field to target with supplied op
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
tmp<Field<Type> > interpolateToTarget
(
const tmp<Field<Type> >& tFld,
const BinaryOp& bop
const CombineOp& cop
) const;
//- Interpolate from target to source
@ -495,8 +495,8 @@ public:
//- Write face connectivity as OBJ file
void writeFaceConnectivity
(
const primitivePatch& srcPatch,
const primitivePatch& tgtPatch,
const SourcePatch& srcPatch,
const TargetPatch& tgtPatch,
const labelListList& srcAddress
) const;
};

View File

@ -376,16 +376,16 @@ public:
void distribute(List<Type>& lst) const;
//- Wrapper around map/interpolate data distribution with operation
template<class Type, class BinaryOp>
void distribute(List<Type>& lst, const BinaryOp& bop) const;
template<class Type, class CombineOp>
void distribute(List<Type>& lst, const CombineOp& cop) const;
//- Wrapper around map/interpolate data distribution
template<class Type>
void reverseDistribute(List<Type>& lst) const;
//- Wrapper around map/interpolate data distribution with operation
template<class Type, class BinaryOp>
void reverseDistribute(List<Type>& lst, const BinaryOp& bop) const;
template<class Type, class CombineOp>
void reverseDistribute(List<Type>& lst, const CombineOp& cop) const;
// I/O

View File

@ -41,11 +41,11 @@ void Foam::mappedPatchBase::distribute(List<Type>& lst) const
}
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
void Foam::mappedPatchBase::distribute
(
List<Type>& lst,
const BinaryOp& bop
const CombineOp& cop
) const
{
switch (mode_)
@ -55,7 +55,7 @@ void Foam::mappedPatchBase::distribute
lst = AMI().interpolateToSource
(
Field<Type>(lst.xfer()),
bop
cop
);
break;
}
@ -69,7 +69,7 @@ void Foam::mappedPatchBase::distribute
map().subMap(),
map().constructMap(),
lst,
bop,
cop,
pTraits<Type>::zero
);
}
@ -96,11 +96,11 @@ void Foam::mappedPatchBase::reverseDistribute(List<Type>& lst) const
}
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
void Foam::mappedPatchBase::reverseDistribute
(
List<Type>& lst,
const BinaryOp& bop
const CombineOp& cop
) const
{
switch (mode_)
@ -110,7 +110,7 @@ void Foam::mappedPatchBase::reverseDistribute
lst = AMI().interpolateToTarget
(
Field<Type>(lst.xfer()),
bop
cop
);
break;
}
@ -125,7 +125,7 @@ void Foam::mappedPatchBase::reverseDistribute
map().constructMap(),
map().subMap(),
lst,
bop,
cop,
pTraits<Type>::zero
);
break;

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -135,7 +135,7 @@ Foam::fileName Foam::topoSet::topoSet::localPath
const word& name
)
{
return mesh.facesInstance()/polyMesh::meshSubDir/"sets"/name;
return mesh.facesInstance()/mesh.dbDir()/polyMesh::meshSubDir/"sets"/name;
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -217,6 +217,9 @@ void Foam::surfaceFeatures::classifyFeatureAngles
const vectorField& faceNormals = surf_.faceNormals();
const pointField& points = surf_.points();
// Special case: minCos=1
bool selectAll = (mag(minCos-1.0) < SMALL);
forAll(edgeFaces, edgeI)
{
const labelList& eFaces = edgeFaces[edgeI];
@ -235,25 +238,25 @@ void Foam::surfaceFeatures::classifyFeatureAngles
{
edgeStat[edgeI] = REGION;
}
else
else if
(
selectAll
|| ((faceNormals[face0] & faceNormals[face1]) < minCos)
)
{
if ((faceNormals[face0] & faceNormals[face1]) < minCos)
// Check if convex or concave by looking at angle
// between face centres and normal
vector f0Tof1 =
surf_[face1].centre(points)
- surf_[face0].centre(points);
if ((f0Tof1 & faceNormals[face0]) >= 0.0)
{
// Check if convex or concave by looking at angle
// between face centres and normal
vector f0Tof1 =
surf_[face1].centre(points)
- surf_[face0].centre(points);
if ((f0Tof1 & faceNormals[face0]) > 0.0)
{
edgeStat[edgeI] = INTERNAL;
}
else
{
edgeStat[edgeI] = EXTERNAL;
}
edgeStat[edgeI] = INTERNAL;
}
else
{
edgeStat[edgeI] = EXTERNAL;
}
}
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -103,9 +103,9 @@ License
Note: writes out .dgr files for debugging. Run with e.g.
Note: writeGraph=true : writes out .dgr files for debugging. Run with e.g.
mpirun -np 4 dgpart 2 'processor%r.grf'
mpirun -np 4 dgpart 2 'region0_%r.dgr'
- %r gets replaced by current processor rank
- decompose into 2 domains
@ -167,192 +167,192 @@ void Foam::ptscotchDecomp::check(const int retVal, const char* str)
}
//- Does prevention of 0 cell domains and calls ptscotch.
Foam::label Foam::ptscotchDecomp::decomposeZeroDomains
(
const fileName& meshPath,
const List<int>& initadjncy,
const List<int>& initxadj,
const scalarField& initcWeights,
List<int>& finalDecomp
) const
{
globalIndex globalCells(initxadj.size()-1);
bool hasZeroDomain = false;
for (label procI = 0; procI < Pstream::nProcs(); procI++)
{
if (globalCells.localSize(procI) == 0)
{
hasZeroDomain = true;
break;
}
}
if (!hasZeroDomain)
{
return decompose
(
meshPath,
initadjncy,
initxadj,
initcWeights,
finalDecomp
);
}
if (debug)
{
Info<< "ptscotchDecomp : have graphs with locally 0 cells."
<< " trickling down." << endl;
}
// Make sure every domain has at least one cell
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// (scotch does not like zero sized domains)
// Trickle cells from processors that have them up to those that
// don't.
// Number of cells to send to the next processor
// (is same as number of cells next processor has to receive)
List<int> nSendCells(Pstream::nProcs(), 0);
for (label procI = nSendCells.size()-1; procI >=1; procI--)
{
label nLocalCells = globalCells.localSize(procI);
if (nLocalCells-nSendCells[procI] < 1)
{
nSendCells[procI-1] = nSendCells[procI]-nLocalCells+1;
}
}
// First receive (so increasing the sizes of all arrays)
Field<int> xadj(initxadj);
Field<int> adjncy(initadjncy);
scalarField cWeights(initcWeights);
if (Pstream::myProcNo() >= 1 && nSendCells[Pstream::myProcNo()-1] > 0)
{
// Receive cells from previous processor
IPstream fromPrevProc(Pstream::blocking, Pstream::myProcNo()-1);
Field<int> prevXadj(fromPrevProc);
Field<int> prevAdjncy(fromPrevProc);
scalarField prevCellWeights(fromPrevProc);
if (prevXadj.size() != nSendCells[Pstream::myProcNo()-1])
{
FatalErrorIn("ptscotchDecomp::decompose(..)")
<< "Expected from processor " << Pstream::myProcNo()-1
<< " connectivity for " << nSendCells[Pstream::myProcNo()-1]
<< " nCells but only received " << prevXadj.size()
<< abort(FatalError);
}
// Insert adjncy
prepend(prevAdjncy, adjncy);
// Adapt offsets and prepend xadj
xadj += prevAdjncy.size();
prepend(prevXadj, xadj);
// Weights
prepend(prevCellWeights, cWeights);
}
// Send to my next processor
if (nSendCells[Pstream::myProcNo()] > 0)
{
// Send cells to next processor
OPstream toNextProc(Pstream::blocking, Pstream::myProcNo()+1);
int nCells = nSendCells[Pstream::myProcNo()];
int startCell = xadj.size()-1 - nCells;
int startFace = xadj[startCell];
int nFaces = adjncy.size()-startFace;
// Send for all cell data: last nCells elements
// Send for all face data: last nFaces elements
toNextProc
<< Field<int>::subField(xadj, nCells, startCell)-startFace
<< Field<int>::subField(adjncy, nFaces, startFace)
<<
(
cWeights.size()
? static_cast<const scalarField&>
(
scalarField::subField(cWeights, nCells, startCell)
)
: scalarField(0)
);
// Remove data that has been sent
if (cWeights.size())
{
cWeights.setSize(cWeights.size()-nCells);
}
adjncy.setSize(adjncy.size()-nFaces);
xadj.setSize(xadj.size() - nCells);
}
// Do decomposition as normal. Sets finalDecomp.
label result = decompose(meshPath, adjncy, xadj, cWeights, finalDecomp);
if (debug)
{
Info<< "ptscotchDecomp : have graphs with locally 0 cells."
<< " trickling up." << endl;
}
// If we sent cells across make sure we undo it
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Receive back from next processor if I sent something
if (nSendCells[Pstream::myProcNo()] > 0)
{
IPstream fromNextProc(Pstream::blocking, Pstream::myProcNo()+1);
List<int> nextFinalDecomp(fromNextProc);
if (nextFinalDecomp.size() != nSendCells[Pstream::myProcNo()])
{
FatalErrorIn("parMetisDecomp::decompose(..)")
<< "Expected from processor " << Pstream::myProcNo()+1
<< " decomposition for " << nSendCells[Pstream::myProcNo()]
<< " nCells but only received " << nextFinalDecomp.size()
<< abort(FatalError);
}
append(nextFinalDecomp, finalDecomp);
}
// Send back to previous processor.
if (Pstream::myProcNo() >= 1 && nSendCells[Pstream::myProcNo()-1] > 0)
{
OPstream toPrevProc(Pstream::blocking, Pstream::myProcNo()-1);
int nToPrevious = nSendCells[Pstream::myProcNo()-1];
toPrevProc <<
SubList<int>
(
finalDecomp,
nToPrevious,
finalDecomp.size()-nToPrevious
);
// Remove locally what has been sent
finalDecomp.setSize(finalDecomp.size()-nToPrevious);
}
return result;
}
////- Does prevention of 0 cell domains and calls ptscotch.
//Foam::label Foam::ptscotchDecomp::decomposeZeroDomains
//(
// const fileName& meshPath,
// const List<int>& initadjncy,
// const List<int>& initxadj,
// const scalarField& initcWeights,
//
// List<int>& finalDecomp
//) const
//{
// globalIndex globalCells(initxadj.size()-1);
//
// bool hasZeroDomain = false;
// for (label procI = 0; procI < Pstream::nProcs(); procI++)
// {
// if (globalCells.localSize(procI) == 0)
// {
// hasZeroDomain = true;
// break;
// }
// }
//
// if (!hasZeroDomain)
// {
// return decompose
// (
// meshPath,
// initadjncy,
// initxadj,
// initcWeights,
// finalDecomp
// );
// }
//
//
// if (debug)
// {
// Info<< "ptscotchDecomp : have graphs with locally 0 cells."
// << " trickling down." << endl;
// }
//
// // Make sure every domain has at least one cell
// // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// // (scotch does not like zero sized domains)
// // Trickle cells from processors that have them up to those that
// // don't.
//
//
// // Number of cells to send to the next processor
// // (is same as number of cells next processor has to receive)
// List<int> nSendCells(Pstream::nProcs(), 0);
//
// for (label procI = nSendCells.size()-1; procI >=1; procI--)
// {
// label nLocalCells = globalCells.localSize(procI);
// if (nLocalCells-nSendCells[procI] < 1)
// {
// nSendCells[procI-1] = nSendCells[procI]-nLocalCells+1;
// }
// }
//
// // First receive (so increasing the sizes of all arrays)
//
// Field<int> xadj(initxadj);
// Field<int> adjncy(initadjncy);
// scalarField cWeights(initcWeights);
//
// if (Pstream::myProcNo() >= 1 && nSendCells[Pstream::myProcNo()-1] > 0)
// {
// // Receive cells from previous processor
// IPstream fromPrevProc(Pstream::blocking, Pstream::myProcNo()-1);
//
// Field<int> prevXadj(fromPrevProc);
// Field<int> prevAdjncy(fromPrevProc);
// scalarField prevCellWeights(fromPrevProc);
//
// if (prevXadj.size() != nSendCells[Pstream::myProcNo()-1])
// {
// FatalErrorIn("ptscotchDecomp::decompose(..)")
// << "Expected from processor " << Pstream::myProcNo()-1
// << " connectivity for " << nSendCells[Pstream::myProcNo()-1]
// << " nCells but only received " << prevXadj.size()
// << abort(FatalError);
// }
//
// // Insert adjncy
// prepend(prevAdjncy, adjncy);
// // Adapt offsets and prepend xadj
// xadj += prevAdjncy.size();
// prepend(prevXadj, xadj);
// // Weights
// prepend(prevCellWeights, cWeights);
// }
//
//
// // Send to my next processor
//
// if (nSendCells[Pstream::myProcNo()] > 0)
// {
// // Send cells to next processor
// OPstream toNextProc(Pstream::blocking, Pstream::myProcNo()+1);
//
// int nCells = nSendCells[Pstream::myProcNo()];
// int startCell = xadj.size()-1 - nCells;
// int startFace = xadj[startCell];
// int nFaces = adjncy.size()-startFace;
//
// // Send for all cell data: last nCells elements
// // Send for all face data: last nFaces elements
// toNextProc
// << Field<int>::subField(xadj, nCells, startCell)-startFace
// << Field<int>::subField(adjncy, nFaces, startFace)
// <<
// (
// cWeights.size()
// ? static_cast<const scalarField&>
// (
// scalarField::subField(cWeights, nCells, startCell)
// )
// : scalarField(0)
// );
//
// // Remove data that has been sent
// if (cWeights.size())
// {
// cWeights.setSize(cWeights.size()-nCells);
// }
// adjncy.setSize(adjncy.size()-nFaces);
// xadj.setSize(xadj.size() - nCells);
// }
//
//
// // Do decomposition as normal. Sets finalDecomp.
// label result = decompose(meshPath, adjncy, xadj, cWeights, finalDecomp);
//
//
// if (debug)
// {
// Info<< "ptscotchDecomp : have graphs with locally 0 cells."
// << " trickling up." << endl;
// }
//
//
// // If we sent cells across make sure we undo it
// // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// // Receive back from next processor if I sent something
// if (nSendCells[Pstream::myProcNo()] > 0)
// {
// IPstream fromNextProc(Pstream::blocking, Pstream::myProcNo()+1);
//
// List<int> nextFinalDecomp(fromNextProc);
//
// if (nextFinalDecomp.size() != nSendCells[Pstream::myProcNo()])
// {
// FatalErrorIn("parMetisDecomp::decompose(..)")
// << "Expected from processor " << Pstream::myProcNo()+1
// << " decomposition for " << nSendCells[Pstream::myProcNo()]
// << " nCells but only received " << nextFinalDecomp.size()
// << abort(FatalError);
// }
//
// append(nextFinalDecomp, finalDecomp);
// }
//
// // Send back to previous processor.
// if (Pstream::myProcNo() >= 1 && nSendCells[Pstream::myProcNo()-1] > 0)
// {
// OPstream toPrevProc(Pstream::blocking, Pstream::myProcNo()-1);
//
// int nToPrevious = nSendCells[Pstream::myProcNo()-1];
//
// toPrevProc <<
// SubList<int>
// (
// finalDecomp,
// nToPrevious,
// finalDecomp.size()-nToPrevious
// );
//
// // Remove locally what has been sent
// finalDecomp.setSize(finalDecomp.size()-nToPrevious);
// }
// return result;
//}
// Call scotch with options from dictionary.
@ -362,13 +362,42 @@ Foam::label Foam::ptscotchDecomp::decompose
const List<int>& adjncy,
const List<int>& xadj,
const scalarField& cWeights,
List<int>& finalDecomp
) const
{
List<int> dummyAdjncy(1);
List<int> dummyXadj(1);
dummyXadj[0] = 0;
return decompose
(
meshPath,
adjncy.size(),
(adjncy.size() ? adjncy.begin() : dummyAdjncy.begin()),
xadj.size(),
(xadj.size() ? xadj.begin() : dummyXadj.begin()),
cWeights,
finalDecomp
);
}
// Call scotch with options from dictionary.
Foam::label Foam::ptscotchDecomp::decompose
(
const fileName& meshPath,
const int adjncySize,
const int adjncy[],
const int xadjSize,
const int xadj[],
const scalarField& cWeights,
List<int>& finalDecomp
) const
{
if (debug)
{
Pout<< "ptscotchDecomp : entering with xadj:" << xadj.size() << endl;
Pout<< "ptscotchDecomp : entering with xadj:" << xadjSize << endl;
}
// Dump graph
@ -387,7 +416,7 @@ Foam::label Foam::ptscotchDecomp::decompose
Pout<< "Dumping Scotch graph file to " << str.name() << endl
<< "Use this in combination with dgpart." << endl;
globalIndex globalCells(xadj.size()-1);
globalIndex globalCells(xadjSize-1);
// Distributed graph file (.grf)
label version = 2;
@ -400,17 +429,17 @@ Foam::label Foam::ptscotchDecomp::decompose
// Total number of vertices (vertglbnbr)
str << globalCells.size();
// Total number of connections (edgeglbnbr)
str << ' ' << returnReduce(xadj[xadj.size()-1], sumOp<label>())
str << ' ' << returnReduce(xadj[xadjSize-1], sumOp<label>())
<< nl;
// Local number of vertices (vertlocnbr)
str << xadj.size()-1;
str << xadjSize-1;
// Local number of connections (edgelocnbr)
str << ' ' << xadj[xadj.size()-1] << nl;
str << ' ' << xadj[xadjSize-1] << nl;
// Numbering starts from 0
label baseval = 0;
// 100*hasVertlabels+10*hasEdgeWeights+1*hasVertWeighs
str << baseval << ' ' << "000" << nl;
for (label cellI = 0; cellI < xadj.size()-1; cellI++)
for (label cellI = 0; cellI < xadjSize-1; cellI++)
{
label start = xadj[cellI];
label end = xadj[cellI+1];
@ -462,8 +491,9 @@ Foam::label Foam::ptscotchDecomp::decompose
// Check for externally provided cellweights and if so initialise weights
scalar minWeights = gMin(cWeights);
scalar maxWeights = gMax(cWeights);
if (!cWeights.empty())
if (maxWeights > minWeights)
{
if (minWeights <= 0)
{
@ -474,13 +504,13 @@ Foam::label Foam::ptscotchDecomp::decompose
<< endl;
}
if (cWeights.size() != xadj.size()-1)
if (cWeights.size() != xadjSize-1)
{
FatalErrorIn
(
"ptscotchDecomp::decompose(..)"
) << "Number of cell weights " << cWeights.size()
<< " does not equal number of cells " << xadj.size()-1
<< " does not equal number of cells " << xadjSize-1
<< exit(FatalError);
}
}
@ -508,7 +538,7 @@ Foam::label Foam::ptscotchDecomp::decompose
Pstream::scatter(rangeScale);
if (!cWeights.empty())
if (maxWeights > minWeights)
{
// Convert to integers.
velotab.setSize(cWeights.size());
@ -532,11 +562,11 @@ Foam::label Foam::ptscotchDecomp::decompose
if (debug)
{
Pout<< "SCOTCH_dgraphBuild with:" << nl
<< "xadj.size()-1 : " << xadj.size()-1 << nl
<< "xadj : " << long(xadj.begin()) << nl
<< "xadjSize-1 : " << xadjSize-1 << nl
<< "xadj : " << long(xadj) << nl
<< "velotab : " << long(velotab.begin()) << nl
<< "adjncy.size() : " << adjncy.size() << nl
<< "adjncy : " << long(adjncy.begin()) << nl
<< "adjncySize : " << adjncySize << nl
<< "adjncy : " << long(adjncy) << nl
<< endl;
}
@ -546,19 +576,19 @@ Foam::label Foam::ptscotchDecomp::decompose
(
&grafdat, // grafdat
0, // baseval, c-style numbering
xadj.size()-1, // vertlocnbr, nCells
xadj.size()-1, // vertlocmax
const_cast<SCOTCH_Num*>(xadj.begin()),
xadjSize-1, // vertlocnbr, nCells
xadjSize-1, // vertlocmax
const_cast<SCOTCH_Num*>(xadj),
// vertloctab, start index per cell into
// adjncy
const_cast<SCOTCH_Num*>(&xadj[1]),// vendloctab, end index ,,
const_cast<SCOTCH_Num*>(xadj+1),// vendloctab, end index ,,
const_cast<SCOTCH_Num*>(velotab.begin()),// veloloctab, vtx weights
NULL, // vlblloctab
adjncy.size(), // edgelocnbr, number of arcs
adjncy.size(), // edgelocsiz
const_cast<SCOTCH_Num*>(adjncy.begin()), // edgeloctab
adjncySize, // edgelocnbr, number of arcs
adjncySize, // edgelocsiz
const_cast<SCOTCH_Num*>(adjncy), // edgeloctab
NULL, // edgegsttab
NULL // edlotab, edge weights
),
@ -620,12 +650,6 @@ Foam::label Foam::ptscotchDecomp::decompose
}
//SCOTCH_Mapping mapdat;
//SCOTCH_dgraphMapInit(&grafdat, &mapdat, &archdat, NULL);
//SCOTCH_dgraphMapCompute(&grafdat, &mapdat, &stradat); /*Perform mapping*/
//SCOTCHdgraphMapExit(&grafdat, &mapdat);
// Hack:switch off fpu error trapping
# ifdef LINUX_GNUC
int oldExcepts = fedisableexcept
@ -636,12 +660,15 @@ Foam::label Foam::ptscotchDecomp::decompose
);
# endif
// Note: always provide allocated storage even if local size 0
finalDecomp.setSize(max(1, xadjSize-1));
finalDecomp = 0;
if (debug)
{
Pout<< "SCOTCH_dgraphMap" << endl;
}
finalDecomp.setSize(xadj.size()-1);
finalDecomp = 0;
check
(
SCOTCH_dgraphMap
@ -660,7 +687,7 @@ Foam::label Foam::ptscotchDecomp::decompose
//finalDecomp.setSize(xadj.size()-1);
//finalDecomp.setSize(xadjSize-1);
//check
//(
// SCOTCH_dgraphPart
@ -719,12 +746,6 @@ Foam::labelList Foam::ptscotchDecomp::decompose
<< exit(FatalError);
}
// // For running sequential ...
// if (Pstream::nProcs() <= 1)
// {
// return scotchDecomp(decompositionDict_, mesh_)
// .decompose(points, pointWeights);
// }
// Make Metis CSR (Compressed Storage Format) storage
// adjncy : contains neighbours (= edges in graph)
@ -743,7 +764,7 @@ Foam::labelList Foam::ptscotchDecomp::decompose
// Decompose using default weights
List<int> finalDecomp;
decomposeZeroDomains
decompose
(
mesh.time().path()/mesh.name(),
cellCells.m(),
@ -753,7 +774,7 @@ Foam::labelList Foam::ptscotchDecomp::decompose
);
// Copy back to labelList
labelList decomp(finalDecomp.size());
labelList decomp(points.size());
forAll(decomp, i)
{
decomp[i] = finalDecomp[i];
@ -780,12 +801,6 @@ Foam::labelList Foam::ptscotchDecomp::decompose
<< exit(FatalError);
}
// // For running sequential ...
// if (Pstream::nProcs() <= 1)
// {
// return scotchDecomp(decompositionDict_, mesh)
// .decompose(agglom, agglomPoints, pointWeights);
// }
// Make Metis CSR (Compressed Storage Format) storage
// adjncy : contains neighbours (= edges in graph)
@ -802,7 +817,7 @@ Foam::labelList Foam::ptscotchDecomp::decompose
// Decompose using weights
List<int> finalDecomp;
decomposeZeroDomains
decompose
(
mesh.time().path()/mesh.name(),
cellCells.m(),
@ -840,13 +855,6 @@ Foam::labelList Foam::ptscotchDecomp::decompose
<< ")." << exit(FatalError);
}
// // For running sequential ...
// if (Pstream::nProcs() <= 1)
// {
// return scotchDecomp(decompositionDict_, mesh)
// .decompose(globalCellCells, cellCentres, cWeights);
// }
// Make Metis CSR (Compressed Storage Format) storage
// adjncy : contains neighbours (= edges in graph)
@ -856,7 +864,7 @@ Foam::labelList Foam::ptscotchDecomp::decompose
// Decompose using weights
List<int> finalDecomp;
decomposeZeroDomains
decompose
(
"ptscotch",
cellCells.m(),
@ -866,7 +874,7 @@ Foam::labelList Foam::ptscotchDecomp::decompose
);
// Copy back to labelList
labelList decomp(finalDecomp.size());
labelList decomp(cellCentres.size());
forAll(decomp, i)
{
decomp[i] = finalDecomp[i];

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -60,17 +60,7 @@ class ptscotchDecomp
//- Check and print error message
static void check(const int, const char*);
//- Decompose with optionally zero sized domains
label decomposeZeroDomains
(
const fileName& meshPath,
const List<int>& initadjncy,
const List<int>& initxadj,
const scalarField& initcWeights,
List<int>& finalDecomp
) const;
//- Decompose
//- Decompose. Handles size 0 arrays
label decompose
(
const fileName& meshPath,
@ -80,6 +70,18 @@ class ptscotchDecomp
List<int>& finalDecomp
) const;
//- Low level decompose
label decompose
(
const fileName& meshPath,
const int adjncySize,
const int adjncy[],
const int xadjSize,
const int xadj[],
const scalarField& cWeights,
List<int>& finalDecomp
) const;
//- Disallow default bitwise copy construct and assignment
void operator=(const ptscotchDecomp&);
ptscotchDecomp(const ptscotchDecomp&);

View File

@ -157,9 +157,22 @@ void Foam::fieldMinMax::writeFileHeader()
{
fieldMinMaxFilePtr_()
<< "# Time" << token::TAB << "field" << token::TAB
<< "min" << token::TAB << "position(min)" << token::TAB
<< "max" << token::TAB << "position(max)" << token::TAB
<< endl;
<< "min" << token::TAB << "position(min)";
if (Pstream::parRun())
{
fieldMinMaxFilePtr_() << token::TAB << "proc";
}
fieldMinMaxFilePtr_()
<< token::TAB << "max" << token::TAB << "position(max)";
if (Pstream::parRun())
{
fieldMinMaxFilePtr_() << token::TAB << "proc";
}
fieldMinMaxFilePtr_() << endl;
}
}

View File

@ -88,16 +88,43 @@ void Foam::fieldMinMax::calcMinMaxFields
fieldMinMaxFilePtr_()
<< obr_.time().value() << token::TAB
<< fieldName << token::TAB
<< minValue << token::TAB << minC << token::TAB
<< maxValue << token::TAB << maxC << endl;
<< minValue << token::TAB << minC;
if (Pstream::parRun())
{
fieldMinMaxFilePtr_() << token::TAB << minI;
}
fieldMinMaxFilePtr_()
<< token::TAB << maxValue << token::TAB << maxC;
if (Pstream::parRun())
{
fieldMinMaxFilePtr_() << token::TAB << maxI;
}
fieldMinMaxFilePtr_() << endl;
}
if (log_)
{
Info<< " min(mag(" << fieldName << ")) = "
<< minValue << " at position " << minC << nl
<< " max(mag(" << fieldName << ")) = "
<< maxValue << " at position " << maxC << nl;
<< minValue << " at position " << minC;
if (Pstream::parRun())
{
Info<< " on processor " << minI;
}
Info<< nl << " max(mag(" << fieldName << ")) = "
<< maxValue << " at position " << maxC;
if (Pstream::parRun())
{
Info<< " on processor " << maxI;
}
Info<< endl;
}
}
break;
@ -142,16 +169,43 @@ void Foam::fieldMinMax::calcMinMaxFields
fieldMinMaxFilePtr_()
<< obr_.time().value() << token::TAB
<< fieldName << token::TAB
<< minValue << token::TAB << minC << token::TAB
<< maxValue << token::TAB << maxC << endl;
<< minValue << token::TAB << minC;
if (Pstream::parRun())
{
fieldMinMaxFilePtr_() << token::TAB << minI;
}
fieldMinMaxFilePtr_()
<< token::TAB << maxValue << token::TAB << maxC;
if (Pstream::parRun())
{
fieldMinMaxFilePtr_() << token::TAB << maxI;
}
fieldMinMaxFilePtr_() << endl;
}
if (log_)
{
Info<< " min(" << fieldName << ") = "
<< minValue << " at position " << minC << nl
<< " max(" << fieldName << ") = "
<< maxValue << " at position " << maxC << nl;
<< minValue << " at position " << minC;
if (Pstream::parRun())
{
Info<< " on processor " << minI;
}
Info<< nl << " max(" << fieldName << ") = "
<< maxValue << " at position " << maxC;
if (Pstream::parRun())
{
Info<< " on processor " << maxI;
}
Info<< endl;
}
}
break;

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -117,6 +117,7 @@ protected:
//- List of patch IDs internally coupled with the primary region
labelList intCoupledPatchIDs_;
//- Region name
word regionName_;
@ -235,21 +236,21 @@ public:
) const;
//- Convert a local region field to the primary region with op
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
void toPrimary
(
const label regionPatchI,
List<Type>& regionField,
const BinaryOp& bop
const CombineOp& cop
) const;
//- Convert a primary region field to the local region with op
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
void toRegion
(
const label regionPatchI,
List<Type>& primaryFieldField,
const BinaryOp& bop
const CombineOp& cop
) const;

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -77,12 +77,12 @@ void Foam::regionModels::regionModel::toRegion
}
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
void Foam::regionModels::regionModel::toPrimary
(
const label regionPatchI,
List<Type>& regionField,
const BinaryOp& bop
const CombineOp& cop
) const
{
forAll(intCoupledPatchIDs_, i)
@ -94,7 +94,7 @@ void Foam::regionModels::regionModel::toPrimary
(
regionMesh().boundaryMesh()[regionPatchI]
);
mpb.reverseDistribute(regionField, bop);
mpb.reverseDistribute(regionField, cop);
return;
}
}
@ -105,19 +105,19 @@ void Foam::regionModels::regionModel::toPrimary
"("
"const label, "
"List<Type>&, "
"const BinaryOp&"
"const CombineOp&"
") const"
) << "Region patch ID " << regionPatchI << " not found in region mesh"
<< abort(FatalError);
}
template<class Type, class BinaryOp>
template<class Type, class CombineOp>
void Foam::regionModels::regionModel::toRegion
(
const label regionPatchI,
List<Type>& primaryField,
const BinaryOp& bop
const CombineOp& cop
) const
{
forAll(intCoupledPatchIDs_, i)
@ -129,14 +129,14 @@ void Foam::regionModels::regionModel::toRegion
(
regionMesh().boundaryMesh()[regionPatchI]
);
mpb.distribute(primaryField, bop);
mpb.distribute(primaryField, cop);
return;
}
}
FatalErrorIn
(
"const void toRegion(const label, List<Type>&, const BinaryOp&) const"
"const void toRegion(const label, List<Type>&, const CombineOp&) const"
) << "Region patch ID " << regionPatchI << " not found in region mesh"
<< abort(FatalError);
}

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -879,22 +879,19 @@ scalar kinematicSingleLayer::CourantNumber() const
if (regionMesh().nInternalFaces() > 0)
{
const scalar deltaT = time_.deltaTValue();
const scalarField sumPhi(fvc::surfaceSum(mag(phi_)));
const surfaceScalarField SfUfbyDelta
(
regionMesh().surfaceInterpolation::deltaCoeffs()*mag(phi_)
);
const surfaceScalarField rhoDelta(fvc::interpolate(rho_*delta_));
const surfaceScalarField& magSf = regionMesh().magSf();
const scalarField& V = regionMesh().V();
forAll(rhoDelta, i)
forAll(deltaRho_, i)
{
if (rhoDelta[i] > ROOTVSMALL)
if (deltaRho_[i] > SMALL)
{
CoNum = max(CoNum, SfUfbyDelta[i]/rhoDelta[i]/magSf[i]*deltaT);
CoNum = max(CoNum, sumPhi[i]/deltaRho_[i]/V[i]);
}
}
CoNum *= 0.5*time_.deltaTValue();
}
reduce(CoNum, maxOp<scalar>());

View File

@ -229,15 +229,14 @@ Foam::MeshedSurface<Face>::MeshedSurface
ParentType(List<Face>(), surf.points())
{
labelList faceMap;
this->storedZones().transfer(surf.sortedZones(faceMap));
this->storedZones() = surf.sortedZones(faceMap);
const List<Face>& origFaces = surf.faces();
List<Face> newFaces(origFaces.size());
// this is somewhat like ListOps reorder and/or IndirectList
forAll(newFaces, faceI)
{
newFaces[faceI] = origFaces[faceMap[faceI]];
newFaces[faceMap[faceI]] = origFaces[faceI];
}
this->storedFaces().transfer(newFaces);
@ -1084,7 +1083,7 @@ void Foam::MeshedSurface<Face>::transfer
forAll(faceMap, faceI)
{
newFaces[faceI].transfer(oldFaces[faceMap[faceI]]);
newFaces[faceMap[faceI]].transfer(oldFaces[faceI]);
}
reset

View File

@ -2,7 +2,7 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
@ -280,6 +280,7 @@ public:
//- Sort faces according to zoneIds
// Returns a surfZoneList and sets faceMap to index within faces()
// (i.e. map from original,unsorted to sorted)
surfZoneList sortedZones(labelList& faceMap) const;
//- Set zones to 0 and set a single zone

View File

@ -1,6 +1,8 @@
EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/specie/lnInclude
-I$(LIB_SRC)/thermophysicalModels/specie/lnInclude \
-I/home/tom3/sergio/development/chtMultiRegionCoupledFoam/lnInclude
LIB_LIBS = \
-lfiniteVolume
-lfiniteVolume \
-L$(FOAM_USER_LIBBIN)/FvPatchScalarFieldEnthalpyJump

View File

@ -61,10 +61,14 @@ Foam::wordList Foam::basicThermo::heBoundaryTypes()
{
hbt[patchi] = gradientEnergyFvPatchScalarField::typeName;
}
else if (isA<mixedFvPatchScalarField>(tbf[patchi]))
else if(isA<mixedFvPatchScalarField>(tbf[patchi]))
{
hbt[patchi] = mixedEnergyFvPatchScalarField::typeName;
}
else if (isA<temperatureJumpFvPatchScalarField>(tbf[patchi]))
{
hbt[patchi] = enthalpyJumpFvPatchScalarField::typeName;
}
}
return hbt;

View File

@ -0,0 +1,165 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "addToRunTimeSelectionTable.H"
#include "enthalpyJumpFvPatchScalarField.H"
#include "temperatureJumpFvPatchScalarField.H"
#include "basicThermo.H"
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::enthalpyJumpFvPatchScalarField::enthalpyJumpFvPatchScalarField
(
const fvPatch& p,
const DimensionedField<scalar, volMesh>& iF
)
:
fixedJumpFvPatchField<scalar>(p, iF)
{}
Foam::enthalpyJumpFvPatchScalarField::enthalpyJumpFvPatchScalarField
(
const enthalpyJumpFvPatchScalarField& ptf,
const fvPatch& p,
const DimensionedField<scalar, volMesh>& iF,
const fvPatchFieldMapper& mapper
)
:
fixedJumpFvPatchField<scalar>(ptf, p, iF, mapper)
{}
Foam::enthalpyJumpFvPatchScalarField::enthalpyJumpFvPatchScalarField
(
const fvPatch& p,
const DimensionedField<scalar, volMesh>& iF,
const dictionary& dict
)
:
fixedJumpFvPatchField<scalar>(p, iF)
{
if (dict.found("value"))
{
fvPatchScalarField::operator=
(
scalarField("value", dict, p.size())
);
}
}
Foam::enthalpyJumpFvPatchScalarField::enthalpyJumpFvPatchScalarField
(
const enthalpyJumpFvPatchScalarField& ptf
)
:
cyclicLduInterfaceField(),
fixedJumpFvPatchField<scalar>(ptf)
{}
Foam::enthalpyJumpFvPatchScalarField::enthalpyJumpFvPatchScalarField
(
const enthalpyJumpFvPatchScalarField& ptf,
const DimensionedField<scalar, volMesh>& iF
)
:
fixedJumpFvPatchField<scalar>(ptf, iF)
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
void Foam::enthalpyJumpFvPatchScalarField::updateCoeffs()
{
if (this->updated())
{
return;
}
if (this->cyclicPatch().owner())
{
const basicThermo& thermo = db().lookupObject<basicThermo>
(
"thermophysicalProperties"
);
label patchID = patch().index();
const temperatureJumpFvPatchScalarField& TbPatch =
refCast<const temperatureJumpFvPatchScalarField>
(
thermo.T().boundaryField()[patchID]
);
const scalar time = this->db().time().value();
const scalarField jumpTb
(
patch().size(), TbPatch.jumpTable().value(time)
);
const labelUList& faceCells = this->patch().faceCells();
if (db().foundObject<volScalarField>("h"))
{
jump_ = thermo.h(jumpTb, faceCells);
}
else if (db().foundObject<volScalarField>("hs"))
{
jump_ = thermo.hs(jumpTb, faceCells);
}
else
{
FatalErrorIn("enthalpyJumpFvPatchScalarField::updateCoeffs()")
<< " hs or h are not found in db()"
<< exit(FatalError);
}
}
fixedJumpFvPatchField<scalar>::updateCoeffs();
}
void Foam::enthalpyJumpFvPatchScalarField::write(Ostream& os) const
{
fixedJumpFvPatchField<scalar>::write(os);
this->writeEntry("value", os);
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
makePatchTypeField
(
fvPatchScalarField,
enthalpyJumpFvPatchScalarField
);
}
// ************************************************************************* //

View File

@ -0,0 +1,142 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::enthalpyJumpFvPatchScalarField
Description
SourceFiles
enthalpyJumpFvPatchScalarField.C
\*---------------------------------------------------------------------------*/
#ifndef enthalpyJumpFvPatchScalarField_H
#define enthalpyJumpFvPatchScalarField_H
#include "fixedJumpFvPatchField.H"
#include "DataEntry.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class enthalpyJumpFvPatchScalarField Declaration
\*---------------------------------------------------------------------------*/
class enthalpyJumpFvPatchScalarField
:
public fixedJumpFvPatchField<scalar>
{
public:
//- Runtime type information
TypeName("enthalpyJump");
// Constructors
//- Construct from patch and internal field
enthalpyJumpFvPatchScalarField
(
const fvPatch&,
const DimensionedField<scalar, volMesh>&
);
//- Construct from patch, internal field and dictionary
enthalpyJumpFvPatchScalarField
(
const fvPatch&,
const DimensionedField<scalar, volMesh>&,
const dictionary&
);
//- Construct by mapping given enthalpyJumpFvPatchScalarField onto a
// new patch
enthalpyJumpFvPatchScalarField
(
const enthalpyJumpFvPatchScalarField&,
const fvPatch&,
const DimensionedField<scalar, volMesh>&,
const fvPatchFieldMapper&
);
//- Construct as copy
enthalpyJumpFvPatchScalarField
(
const enthalpyJumpFvPatchScalarField&
);
//- Construct and return a clone
virtual tmp<fvPatchField<scalar> > clone() const
{
return tmp<fvPatchField<scalar> >
(
new enthalpyJumpFvPatchScalarField(*this)
);
}
//- Construct as copy setting internal field reference
enthalpyJumpFvPatchScalarField
(
const enthalpyJumpFvPatchScalarField&,
const DimensionedField<scalar, volMesh>&
);
//- Construct and return a clone setting internal field reference
virtual tmp<fvPatchField<scalar> > clone
(
const DimensionedField<scalar, volMesh>& iF
) const
{
return tmp<fvPatchField<scalar> >
(
new enthalpyJumpFvPatchScalarField(*this, iF)
);
}
// Member functions
// Evaluation functions
//- Update the coefficients
virtual void updateCoeffs();
//- Write
virtual void write(Ostream&) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

Some files were not shown because too many files have changed in this diff Show More