Merge branch 'master' into cvm

This commit is contained in:
graham
2009-02-09 10:01:14 +00:00
286 changed files with 10233 additions and 4140 deletions

View File

@ -0,0 +1,3 @@
buoyantBoussinesqSimpleFoam.C
EXE = $(FOAM_APPBIN)/buoyantBoussinesqSimpleFoam

View File

@ -0,0 +1,12 @@
EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/turbulenceModels \
-I$(LIB_SRC)/turbulenceModels/incompressible/RAS/lnInclude \
-I$(LIB_SRC)/transportModels \
-I$(LIB_SRC)/transportModels/incompressible/singlePhaseTransportModel
EXE_LIBS = \
-lfiniteVolume \
-lmeshTools \
-lincompressibleRASModels \
-lincompressibleTransportModels

View File

@ -0,0 +1,19 @@
{
volScalarField kappaEff
(
"kappaEff",
turbulence->nu() + turbulence->nut()/Prt
);
fvScalarMatrix TEqn
(
fvm::div(phi, T)
- fvm::Sp(fvc::div(phi), T)
- fvm::laplacian(kappaEff, T)
);
TEqn.relax();
eqnResidual = TEqn.solve().initialResidual();
maxResidual = max(eqnResidual, maxResidual);
}

View File

@ -0,0 +1,25 @@
// Solve the momentum equation
tmp<fvVectorMatrix> UEqn
(
fvm::div(phi, U)
- fvm::Sp(fvc::div(phi), U)
+ turbulence->divDevReff(U)
);
UEqn().relax();
eqnResidual = solve
(
UEqn()
==
-fvc::reconstruct
(
(
fvc::snGrad(pd)
- betaghf*fvc::snGrad(T)
) * mesh.magSf()
)
).initialResidual();
maxResidual = max(eqnResidual, maxResidual);

View File

@ -0,0 +1,105 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Application
buoyantBoussinesqSimpleFoam
Description
Steady-state solver for buoyant, turbulent flow of incompressible fluids
Uses the Boussinesq approximation:
\f[
rho_{eff} = 1 - beta(T - T_{ref})
\f]
where:
\f$ rho_{eff} \f$ = the effective (driving) density
beta = thermal expansion coefficient [1/K]
T = temperature [K]
\f$ T_{ref} \f$ = reference temperature [K]
Valid when:
\f[
rho_{eff} << 1
\f]
\*---------------------------------------------------------------------------*/
#include "fvCFD.H"
#include "singlePhaseTransportModel.H"
#include "RASModel.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
int main(int argc, char *argv[])
{
# include "setRootCase.H"
# include "createTime.H"
# include "createMesh.H"
# include "readEnvironmentalProperties.H"
# include "createFields.H"
# include "initContinuityErrs.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "\nStarting time loop\n" << endl;
for (runTime++; !runTime.end(); runTime++)
{
Info<< "Time = " << runTime.timeName() << nl << endl;
# include "readSIMPLEControls.H"
# include "initConvergenceCheck.H"
pd.storePrevIter();
// Pressure-velocity SIMPLE corrector
{
# include "UEqn.H"
# include "TEqn.H"
# include "pdEqn.H"
}
turbulence->correct();
if (runTime.write())
{
# include "writeAdditionalFields.H"
}
Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
<< " ClockTime = " << runTime.elapsedClockTime() << " s"
<< nl << endl;
# include "convergenceCheck.H"
}
Info<< "End\n" << endl;
return 0;
}
// ************************************************************************* //

View File

@ -0,0 +1,9 @@
// check convergence
if (maxResidual < convergenceCriterion)
{
Info<< "reached convergence criterion: " << convergenceCriterion << endl;
runTime.writeAndEnd();
Info<< "latestTime = " << runTime.timeName() << endl;
}

View File

@ -0,0 +1,67 @@
Info<< "Reading thermophysical properties\n" << endl;
Info<< "Reading field T\n" << endl;
volScalarField T
(
IOobject
(
"T",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh
);
// kinematic pd
Info<< "Reading field pd\n" << endl;
volScalarField pd
(
IOobject
(
"pd",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh
);
Info<< "Reading field U\n" << endl;
volVectorField U
(
IOobject
(
"U",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh
);
# include "createPhi.H"
# include "readTransportProperties.H"
Info<< "Creating turbulence model\n" << endl;
autoPtr<incompressible::RASModel> turbulence
(
incompressible::RASModel::New(U, phi, laminarTransport)
);
Info<< "Calculating field beta*(g.h)\n" << endl;
surfaceScalarField betaghf("betagh", beta*(g & mesh.Cf()));
label pdRefCell = 0;
scalar pdRefValue = 0.0;
setRefCell
(
pd,
mesh.solutionDict().subDict("SIMPLE"),
pdRefCell,
pdRefValue
);

View File

@ -0,0 +1,7 @@
// initialize values for convergence checks
scalar eqnResidual = 1, maxResidual = 0;
scalar convergenceCriterion = 0;
simple.readIfPresent("convergence", convergenceCriterion);

View File

@ -0,0 +1,49 @@
{
volScalarField rUA("rUA", 1.0/UEqn().A());
surfaceScalarField rUAf("(1|A(U))", fvc::interpolate(rUA));
U = rUA*UEqn().H();
UEqn.clear();
phi = fvc::interpolate(U) & mesh.Sf();
adjustPhi(phi, U, pd);
surfaceScalarField buoyancyPhi = -betaghf*fvc::snGrad(T)*rUAf*mesh.magSf();
phi -= buoyancyPhi;
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{
fvScalarMatrix pdEqn
(
fvm::laplacian(rUAf, pd) == fvc::div(phi)
);
pdEqn.setReference(pdRefCell, pdRefValue);
// retain the residual from the first iteration
if (nonOrth == 0)
{
eqnResidual = pdEqn.solve().initialResidual();
maxResidual = max(eqnResidual, maxResidual);
}
else
{
pdEqn.solve();
}
if (nonOrth == nNonOrthCorr)
{
// Calculate the conservative fluxes
phi -= pdEqn.flux();
// Explicitly relax pressure for momentum corrector
pd.relax();
// Correct the momentum source with the pressure gradient flux
// calculated from the relaxed pressure
U -= rUA*fvc::reconstruct((buoyancyPhi + pdEqn.flux())/rUAf);
U.correctBoundaryConditions();
}
}
#include "continuityErrs.H"
}

View File

@ -0,0 +1,13 @@
singlePhaseTransportModel laminarTransport(U, phi);
// thermal expansion coefficient [1/K]
dimensionedScalar beta(laminarTransport.lookup("beta"));
// reference temperature [K]
dimensionedScalar TRef(laminarTransport.lookup("TRef"));
// reference kinematic pressure [m2/s2]
dimensionedScalar pRef(laminarTransport.lookup("pRef"));
// turbulent Prandtl number
dimensionedScalar Prt(laminarTransport.lookup("Prt"));

View File

@ -0,0 +1,29 @@
{
volScalarField rhoEff
(
IOobject
(
"rhoEff",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
1.0 - beta*(T - TRef)
);
rhoEff.write();
volScalarField p
(
IOobject
(
"p",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
pd + rhoEff*(g & mesh.C()) + pRef
);
p.write();
}

View File

@ -9,4 +9,18 @@
UEqn.relax(); UEqn.relax();
solve(UEqn == -fvc::grad(pd) - fvc::grad(rho)*gh); if (momentumPredictor)
{
solve
(
UEqn
==
-fvc::reconstruct
(
(
fvc::snGrad(pd)
+ ghf*fvc::snGrad(rho)
) * mesh.magSf()
)
);
}

View File

@ -41,37 +41,41 @@ Description
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
#include "setRootCase.H"
#include "createTime.H"
#include "createMesh.H"
#include "readEnvironmentalProperties.H"
#include "createFields.H"
#include "initContinuityErrs.H"
#include "readTimeControls.H"
#include "compressibleCourantNo.H"
#include "setInitialDeltaT.H"
# include "setRootCase.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
# include "createTime.H"
# include "createMesh.H"
# include "readEnvironmentalProperties.H"
# include "createFields.H"
# include "initContinuityErrs.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info<< "\nStarting time loop\n" << endl; Info<< "\nStarting time loop\n" << endl;
while (runTime.run()) while (runTime.run())
{ {
# include "readPISOControls.H" #include "readTimeControls.H"
# include "compressibleCourantNo.H" #include "readPISOControls.H"
//# include "setDeltaT.H" #include "compressibleCourantNo.H"
#include "setDeltaT.H"
runTime++; runTime++;
Info<< "Time = " << runTime.timeName() << nl << endl; Info<< "Time = " << runTime.timeName() << nl << endl;
# include "rhoEqn.H" #include "rhoEqn.H"
# include "UEqn.H" #include "UEqn.H"
// --- PISO loop // --- PISO loop
for (int corr=0; corr<nCorr; corr++) for (int corr=0; corr<nCorr; corr++)
{ {
# include "hEqn.H" #include "hEqn.H"
# include "pEqn.H" #include "pEqn.H"
} }
turbulence->correct(); turbulence->correct();

View File

@ -52,11 +52,13 @@
) )
); );
Info<< "Creating field dpdt\n" << endl; Info<< "Creating field DpDt\n" << endl;
volScalarField dpdt = fvc::ddt(p); volScalarField DpDt =
fvc::DDt(surfaceScalarField("phiU", phi/fvc::interpolate(rho)), p);
Info<< "Calculating field g.h\n" << endl; Info<< "Calculating field g.h\n" << endl;
volScalarField gh("gh", g & mesh.C()); volScalarField gh("gh", g & mesh.C());
surfaceScalarField ghf("gh", g & mesh.Cf());
dimensionedScalar pRef("pRef", p.dimensions(), thermo->lookup("pRef")); dimensionedScalar pRef("pRef", p.dimensions(), thermo->lookup("pRef"));

View File

@ -5,9 +5,7 @@
+ fvm::div(phi, h) + fvm::div(phi, h)
- fvm::laplacian(turbulence->alphaEff(), h) - fvm::laplacian(turbulence->alphaEff(), h)
== ==
dpdt DpDt
+ fvc::div(phi/fvc::interpolate(rho)*fvc::interpolate(p))
- p*fvc::div(phi/fvc::interpolate(rho))
); );
hEqn.relax(); hEqn.relax();

View File

@ -1,60 +1,67 @@
bool closedVolume = pd.needReference();
rho = thermo->rho();
volScalarField rUA = 1.0/UEqn.A();
U = rUA*UEqn.H();
phi =
fvc::interpolate(rho)
*(
(fvc::interpolate(U) & mesh.Sf())
+ fvc::ddtPhiCorr(rUA, rho, U, phi)
)
- fvc::interpolate(rho*rUA*gh)*fvc::snGrad(rho)*mesh.magSf();
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{ {
fvScalarMatrix pdEqn bool closedVolume = pd.needReference();
rho = thermo->rho();
volScalarField rUA = 1.0/UEqn.A();
surfaceScalarField rhorUAf("(rho*(1|A(U)))", fvc::interpolate(rho*rUA));
U = rUA*UEqn.H();
surfaceScalarField phiU
( (
fvm::ddt(psi, pd) fvc::interpolate(rho)
+ fvc::ddt(psi)*pRef *(
+ fvc::ddt(psi, rho)*gh (fvc::interpolate(U) & mesh.Sf())
+ fvc::div(phi) + fvc::ddtPhiCorr(rUA, rho, U, phi)
- fvm::laplacian(rho*rUA, pd) )
); );
if (corr == nCorr-1 && nonOrth == nNonOrthCorr) phi = phiU - ghf*fvc::snGrad(rho)*rhorUAf*mesh.magSf();
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{ {
pdEqn.solve(mesh.solver(pd.name() + "Final")); fvScalarMatrix pdEqn
} (
else fvm::ddt(psi, pd)
{ + fvc::ddt(psi)*pRef
pdEqn.solve(mesh.solver(pd.name())); + fvc::ddt(psi, rho)*gh
+ fvc::div(phi)
- fvm::laplacian(rhorUAf, pd)
);
if (corr == nCorr-1 && nonOrth == nNonOrthCorr)
{
pdEqn.solve(mesh.solver(pd.name() + "Final"));
}
else
{
pdEqn.solve(mesh.solver(pd.name()));
}
if (nonOrth == nNonOrthCorr)
{
phi += pdEqn.flux();
}
} }
if (nonOrth == nNonOrthCorr) U += rUA*fvc::reconstruct((phi - phiU)/rhorUAf);
U.correctBoundaryConditions();
p == pd + rho*gh + pRef;
DpDt = fvc::DDt(surfaceScalarField("phiU", phi/fvc::interpolate(rho)), p);
#include "rhoEqn.H"
#include "compressibleContinuityErrs.H"
// For closed-volume cases adjust the pressure and density levels
// to obey overall mass continuity
if (closedVolume)
{ {
phi += pdEqn.flux(); p += (initialMass - fvc::domainIntegrate(thermo->psi()*p))
/fvc::domainIntegrate(thermo->psi());
rho = thermo->rho();
} }
}
p == pd + rho*gh + pRef;
dpdt = fvc::ddt(p);
#include "rhoEqn.H"
#include "compressibleContinuityErrs.H"
U -= rUA*(fvc::grad(pd) + fvc::grad(rho)*gh);
U.correctBoundaryConditions();
// For closed-volume cases adjust the pressure and density levels
// to obey overall mass continuity
if (closedVolume)
{
p += (initialMass - fvc::domainIntegrate(thermo->psi()*p))
/fvc::domainIntegrate(thermo->psi());
pd == p - (rho*gh + pRef); pd == p - (rho*gh + pRef);
rho = thermo->rho();
} }

View File

@ -11,7 +11,15 @@
eqnResidual = solve eqnResidual = solve
( (
UEqn() == -fvc::grad(pd) - fvc::grad(rho)*gh UEqn()
==
-fvc::reconstruct
(
(
fvc::snGrad(pd)
+ ghf*fvc::snGrad(rho)
) * mesh.magSf()
)
).initialResidual(); ).initialResidual();
maxResidual = max(eqnResidual, maxResidual); maxResidual = max(eqnResidual, maxResidual);

View File

@ -53,6 +53,7 @@
Info<< "Calculating field g.h\n" << endl; Info<< "Calculating field g.h\n" << endl;
volScalarField gh("gh", g & mesh.C()); volScalarField gh("gh", g & mesh.C());
surfaceScalarField ghf("gh", g & mesh.Cf());
dimensionedScalar pRef("pRef", p.dimensions(), thermo->lookup("pRef")); dimensionedScalar pRef("pRef", p.dimensions(), thermo->lookup("pRef"));

View File

@ -1,55 +1,65 @@
volScalarField rUA = 1.0/UEqn().A();
U = rUA*UEqn().H();
UEqn.clear();
phi = fvc::interpolate(rho)*(fvc::interpolate(U) & mesh.Sf());
bool closedVolume = adjustPhi(phi, U, p);
phi -= fvc::interpolate(rho*gh*rUA)*fvc::snGrad(rho)*mesh.magSf();
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{ {
fvScalarMatrix pdEqn volScalarField rUA = 1.0/UEqn().A();
( surfaceScalarField rhorUAf("(rho*(1|A(U)))", fvc::interpolate(rho*rUA));
fvm::laplacian(rho*rUA, pd) == fvc::div(phi)
);
pdEqn.setReference(pdRefCell, pdRefValue); U = rUA*UEqn().H();
// retain the residual from the first iteration UEqn.clear();
if (nonOrth == 0)
phi = fvc::interpolate(rho)*(fvc::interpolate(U) & mesh.Sf());
bool closedVolume = adjustPhi(phi, U, p);
surfaceScalarField buoyancyPhi = ghf*fvc::snGrad(rho)*rhorUAf*mesh.magSf();
phi -= buoyancyPhi;
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{ {
eqnResidual = pdEqn.solve().initialResidual(); fvScalarMatrix pdEqn
maxResidual = max(eqnResidual, maxResidual); (
} fvm::laplacian(rhorUAf, pd) == fvc::div(phi)
else );
{
pdEqn.solve(); pdEqn.setReference(pdRefCell, pdRefValue);
// retain the residual from the first iteration
if (nonOrth == 0)
{
eqnResidual = pdEqn.solve().initialResidual();
maxResidual = max(eqnResidual, maxResidual);
}
else
{
pdEqn.solve();
}
if (nonOrth == nNonOrthCorr)
{
// Calculate the conservative fluxes
phi -= pdEqn.flux();
// Explicitly relax pressure for momentum corrector
pd.relax();
// Correct the momentum source with the pressure gradient flux
// calculated from the relaxed pressure
U -= rUA*fvc::reconstruct((buoyancyPhi + pdEqn.flux())/rhorUAf);
U.correctBoundaryConditions();
}
} }
if (nonOrth == nNonOrthCorr) #include "continuityErrs.H"
p == pd + rho*gh + pRef;
// For closed-volume cases adjust the pressure and density levels
// to obey overall mass continuity
if (closedVolume)
{ {
phi -= pdEqn.flux(); p += (initialMass - fvc::domainIntegrate(thermo->psi()*p))
/fvc::domainIntegrate(thermo->psi());
} }
}
#include "continuityErrs.H" rho = thermo->rho();
rho.relax();
Info<< "rho max/min : " << max(rho).value() << " " << min(rho).value() << endl;
// Explicitly relax pressure for momentum corrector
pd.relax();
p = pd + rho*gh + pRef;
U -= rUA*(fvc::grad(pd) + fvc::grad(rho)*gh);
U.correctBoundaryConditions();
// For closed-volume cases adjust the pressure and density levels
// to obey overall mass continuity
if (closedVolume)
{
p += (initialMass - fvc::domainIntegrate(thermo->psi()*p))
/fvc::domainIntegrate(thermo->psi());
pd == p - (rho*gh + pRef); pd == p - (rho*gh + pRef);
} }
rho = thermo->rho();
rho.relax();
Info<< "rho max/min : " << max(rho).value() << " " << min(rho).value() << endl;

View File

@ -1,4 +1,5 @@
EXE_INC = \ EXE_INC = \
-I../buoyantSimpleFoam \
-I$(LIB_SRC)/thermophysicalModels/basic/lnInclude \ -I$(LIB_SRC)/thermophysicalModels/basic/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/radiation/lnInclude \ -I$(LIB_SRC)/thermophysicalModels/radiation/lnInclude \
-I$(LIB_SRC)/turbulenceModels \ -I$(LIB_SRC)/turbulenceModels \

View File

@ -1,18 +0,0 @@
// Solve the Momentum equation
tmp<fvVectorMatrix> UEqn
(
fvm::div(phi, U)
- fvm::Sp(fvc::div(phi), U)
+ turbulence->divDevRhoReff(U)
);
UEqn().relax();
eqnResidual = solve
(
UEqn() == -fvc::grad(pd) - fvc::grad(rho)*gh
).initialResidual();
maxResidual = max(eqnResidual, maxResidual);

View File

@ -54,6 +54,7 @@
Info<< "Calculating field g.h\n" << endl; Info<< "Calculating field g.h\n" << endl;
volScalarField gh("gh", g & mesh.C()); volScalarField gh("gh", g & mesh.C());
surfaceScalarField ghf("gh", g & mesh.Cf());
dimensionedScalar pRef("pRef", p.dimensions(), thermo->lookup("pRef")); dimensionedScalar pRef("pRef", p.dimensions(), thermo->lookup("pRef"));

View File

@ -1,54 +0,0 @@
volScalarField rUA = 1.0/UEqn().A();
U = rUA*UEqn().H();
UEqn.clear();
phi = fvc::interpolate(rho)*(fvc::interpolate(U) & mesh.Sf());
bool closedVolume = adjustPhi(phi, U, p);
phi -= fvc::interpolate(rho*gh*rUA)*fvc::snGrad(rho)*mesh.magSf();
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{
fvScalarMatrix pdEqn
(
fvm::laplacian(rho*rUA, pd) == fvc::div(phi)
);
pdEqn.setReference(pdRefCell, pdRefValue);
// retain the residual from the first iteration
if (nonOrth == 0)
{
eqnResidual = pdEqn.solve().initialResidual();
maxResidual = max(eqnResidual, maxResidual);
}
else
{
pdEqn.solve();
}
if (nonOrth == nNonOrthCorr)
{
phi -= pdEqn.flux();
}
}
#include "continuityErrs.H"
// Explicitly relax pressure for momentum corrector
pd.relax();
p = pd + rho*gh + pRef;
U -= rUA*(fvc::grad(pd) + fvc::grad(rho)*gh);
U.correctBoundaryConditions();
// For closed-volume cases adjust the pressure and density levels
// to obey overall mass continuity
if (closedVolume)
{
p += (initialMass - fvc::domainIntegrate(thermo->psi()*p))
/fvc::domainIntegrate(thermo->psi());
pd == p - (rho*gh + pRef);
}
rho = thermo->rho();
rho.relax();
Info<< "rho max/min : " << max(rho).value() << " " << min(rho).value() << endl;

View File

@ -7,6 +7,8 @@ derivedFvPatchFields/solidWallHeatFluxTemperatureCoupled/solidWallHeatFluxTemper
derivedFvPatchFields/solidWallTemperatureCoupled/solidWallTemperatureCoupledFvPatchScalarField.C derivedFvPatchFields/solidWallTemperatureCoupled/solidWallTemperatureCoupledFvPatchScalarField.C
derivedFvPatchFields/solidWallMixedTemperatureCoupled/solidWallMixedTemperatureCoupledFvPatchScalarField.C derivedFvPatchFields/solidWallMixedTemperatureCoupled/solidWallMixedTemperatureCoupledFvPatchScalarField.C
fluid/compressibleCourantNo.C
chtMultiRegionFoam.C chtMultiRegionFoam.C
EXE = $(FOAM_APPBIN)/chtMultiRegionFoam EXE = $(FOAM_APPBIN)/chtMultiRegionFoam

View File

@ -36,16 +36,10 @@ Description
#include "turbulenceModel.H" #include "turbulenceModel.H"
#include "fixedGradientFvPatchFields.H" #include "fixedGradientFvPatchFields.H"
#include "regionProperties.H" #include "regionProperties.H"
#include "compressibleCourantNo.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#include "solveContinuityEquation.C"
#include "solveMomentumEquation.C"
#include "compressibleContinuityErrors.C"
#include "solvePressureDifferenceEquation.C"
#include "solveEnthalpyEquation.C"
#include "compressibleCourantNo.C"
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
@ -58,7 +52,6 @@ int main(int argc, char *argv[])
# include "createSolidMeshes.H" # include "createSolidMeshes.H"
# include "createFluidFields.H" # include "createFluidFields.H"
# include "createSolidFields.H" # include "createSolidFields.H"
# include "initContinuityErrs.H" # include "initContinuityErrs.H"
@ -89,6 +82,7 @@ int main(int argc, char *argv[])
{ {
Info<< "\nSolving for fluid region " Info<< "\nSolving for fluid region "
<< fluidRegions[i].name() << endl; << fluidRegions[i].name() << endl;
# include "setRegionFluidFields.H"
# include "readFluidMultiRegionPISOControls.H" # include "readFluidMultiRegionPISOControls.H"
# include "solveFluid.H" # include "solveFluid.H"
} }
@ -97,6 +91,7 @@ int main(int argc, char *argv[])
{ {
Info<< "\nSolving for solid region " Info<< "\nSolving for solid region "
<< solidRegions[i].name() << endl; << solidRegions[i].name() << endl;
# include "setRegionSolidFields.H"
# include "readSolidMultiRegionPISOControls.H" # include "readSolidMultiRegionPISOControls.H"
# include "solveSolid.H" # include "solveSolid.H"
} }

View File

@ -1,10 +1,25 @@
tmp<fvVectorMatrix> UEqn = solveMomentumEquation // Solve the Momentum equation
tmp<fvVectorMatrix> UEqn
( (
momentumPredictor, fvm::ddt(rho, U)
Uf[i], + fvm::div(phi, U)
rhof[i], + turb.divDevRhoReff(U)
phif[i],
pdf[i],
ghf[i],
turb[i]
); );
UEqn().relax();
if (momentumPredictor)
{
solve
(
UEqn()
==
-fvc::reconstruct
(
(
fvc::snGrad(pd)
+ ghf*fvc::snGrad(rho)
) * mesh.magSf()
)
);
}

View File

@ -1,58 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Description
Continuity errors for fluid meshes
\*---------------------------------------------------------------------------*/
void compressibleContinuityErrors
(
scalar& cumulativeContErr,
const volScalarField& rho,
const basicThermo& thermo
)
{
dimensionedScalar totalMass = fvc::domainIntegrate(rho);
scalar sumLocalContErr =
(
fvc::domainIntegrate(mag(rho - thermo.rho()))/totalMass
).value();
scalar globalContErr =
(
fvc::domainIntegrate(rho - thermo.rho())/totalMass
).value();
cumulativeContErr += globalContErr;
const word& regionName = rho.mesh().name();
Info<< "time step continuity errors (" << regionName << ")"
<< ": sum local = " << sumLocalContErr
<< ", global = " << globalContErr
<< ", cumulative = " << cumulativeContErr
<< endl;
}

View File

@ -0,0 +1,21 @@
{
dimensionedScalar totalMass = fvc::domainIntegrate(rho);
scalar sumLocalContErr =
(
fvc::domainIntegrate(mag(rho - thermo.rho()))/totalMass
).value();
scalar globalContErr =
(
fvc::domainIntegrate(rho - thermo.rho())/totalMass
).value();
cumulativeContErr[i] += globalContErr;
Info<< "time step continuity errors (" << mesh.name() << ")"
<< ": sum local = " << sumLocalContErr
<< ", global = " << globalContErr
<< ", cumulative = " << cumulativeContErr[i]
<< endl;
}

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd. \\ / A nd | Copyright (C) 1991-2008 OpenCFD Ltd.
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -22,13 +22,12 @@ License
along with OpenFOAM; if not, write to the Free Software Foundation, along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Description
Calculates and outputs the mean and maximum Courant Numbers for the fluid
regions
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
scalar compressibleCourantNo #include "compressibleCourantNo.H"
#include "fvc.H"
Foam::scalar Foam::compressibleCourantNo
( (
const fvMesh& mesh, const fvMesh& mesh,
const Time& runTime, const Time& runTime,

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd. \\ / A nd | Copyright (C) 1991-2008 OpenCFD Ltd.
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -23,34 +23,27 @@ License
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Description Description
Solve enthalpy equation Calculates and outputs the mean and maximum Courant Numbers for the fluid
regions
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
void solveEnthalpyEquation #ifndef compressibleCourantNo_H
( #define compressibleCourantNo_H
const volScalarField& rho,
const volScalarField& DpDt, #include "fvMesh.H"
const surfaceScalarField& phi,
const compressible::turbulenceModel& turb, namespace Foam
basicThermo& thermo
)
{ {
volScalarField& h = thermo.h(); scalar compressibleCourantNo
tmp<fvScalarMatrix> hEqn
( (
fvm::ddt(rho, h) const fvMesh& mesh,
+ fvm::div(phi, h) const Time& runTime,
- fvm::laplacian(turb.alphaEff(), h) const volScalarField& rho,
== const surfaceScalarField& phi
DpDt
); );
hEqn().relax();
hEqn().solve();
thermo.correct();
Info<< "Min/max T:" << min(thermo.T()) << ' ' << max(thermo.T())
<< endl;
} }
#endif
// ************************************************************************* //

View File

@ -7,8 +7,8 @@
( (
fluidRegions[regionI], fluidRegions[regionI],
runTime, runTime,
rhof[regionI], rhoFluid[regionI],
phif[regionI] phiFluid[regionI]
), ),
CoNum CoNum
); );

View File

@ -1,15 +1,16 @@
// Initialise fluid field pointer lists // Initialise fluid field pointer lists
PtrList<basicThermo> thermof(fluidRegions.size()); PtrList<basicThermo> thermoFluid(fluidRegions.size());
PtrList<volScalarField> rhof(fluidRegions.size()); PtrList<volScalarField> rhoFluid(fluidRegions.size());
PtrList<volScalarField> Kf(fluidRegions.size()); PtrList<volScalarField> KFluid(fluidRegions.size());
PtrList<volVectorField> Uf(fluidRegions.size()); PtrList<volVectorField> UFluid(fluidRegions.size());
PtrList<surfaceScalarField> phif(fluidRegions.size()); PtrList<surfaceScalarField> phiFluid(fluidRegions.size());
PtrList<compressible::turbulenceModel> turb(fluidRegions.size()); PtrList<compressible::turbulenceModel> turbulence(fluidRegions.size());
PtrList<volScalarField> DpDtf(fluidRegions.size()); PtrList<volScalarField> DpDtFluid(fluidRegions.size());
PtrList<volScalarField> ghf(fluidRegions.size()); PtrList<volScalarField> ghFluid(fluidRegions.size());
PtrList<volScalarField> pdf(fluidRegions.size()); PtrList<surfaceScalarField> ghfFluid(fluidRegions.size());
PtrList<volScalarField> pdFluid(fluidRegions.size());
List<scalar> initialMassf(fluidRegions.size()); List<scalar> initialMassFluid(fluidRegions.size());
dimensionedScalar pRef dimensionedScalar pRef
( (
@ -24,8 +25,8 @@
Info<< "*** Reading fluid mesh thermophysical properties for region " Info<< "*** Reading fluid mesh thermophysical properties for region "
<< fluidRegions[i].name() << nl << endl; << fluidRegions[i].name() << nl << endl;
Info<< " Adding to pdf\n" << endl; Info<< " Adding to pdFluid\n" << endl;
pdf.set pdFluid.set
( (
i, i,
new volScalarField new volScalarField
@ -42,16 +43,15 @@
) )
); );
Info<< " Adding to thermof\n" << endl; Info<< " Adding to thermoFluid\n" << endl;
thermoFluid.set
thermof.set
( (
i, i,
basicThermo::New(fluidRegions[i]).ptr() basicThermo::New(fluidRegions[i]).ptr()
); );
Info<< " Adding to rhof\n" << endl; Info<< " Adding to rhoFluid\n" << endl;
rhof.set rhoFluid.set
( (
i, i,
new volScalarField new volScalarField
@ -64,12 +64,12 @@
IOobject::NO_READ, IOobject::NO_READ,
IOobject::AUTO_WRITE IOobject::AUTO_WRITE
), ),
thermof[i].rho() thermoFluid[i].rho()
) )
); );
Info<< " Adding to Kf\n" << endl; Info<< " Adding to KFluid\n" << endl;
Kf.set KFluid.set
( (
i, i,
new volScalarField new volScalarField
@ -82,12 +82,12 @@
IOobject::NO_READ, IOobject::NO_READ,
IOobject::NO_WRITE IOobject::NO_WRITE
), ),
thermof[i].Cp()*thermof[i].alpha() thermoFluid[i].Cp()*thermoFluid[i].alpha()
) )
); );
Info<< " Adding to Uf\n" << endl; Info<< " Adding to UFluid\n" << endl;
Uf.set UFluid.set
( (
i, i,
new volVectorField new volVectorField
@ -104,8 +104,8 @@
) )
); );
Info<< " Adding to phif\n" << endl; Info<< " Adding to phiFluid\n" << endl;
phif.set phiFluid.set
( (
i, i,
new surfaceScalarField new surfaceScalarField
@ -118,29 +118,29 @@
IOobject::READ_IF_PRESENT, IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE IOobject::AUTO_WRITE
), ),
linearInterpolate(rhof[i]*Uf[i]) linearInterpolate(rhoFluid[i]*UFluid[i])
& fluidRegions[i].Sf() & fluidRegions[i].Sf()
) )
); );
Info<< " Adding to turb\n" << endl; Info<< " Adding to turbulence\n" << endl;
turb.set turbulence.set
( (
i, i,
autoPtr<compressible::turbulenceModel> autoPtr<compressible::turbulenceModel>
( (
compressible::turbulenceModel::New compressible::turbulenceModel::New
( (
rhof[i], rhoFluid[i],
Uf[i], UFluid[i],
phif[i], phiFluid[i],
thermof[i] thermoFluid[i]
) )
).ptr() ).ptr()
); );
Info<< " Adding to DpDtf\n" << endl; Info<< " Adding to DpDtFluid\n" << endl;
DpDtf.set DpDtFluid.set
( (
i, i,
new volScalarField new volScalarField
@ -150,9 +150,9 @@
surfaceScalarField surfaceScalarField
( (
"phiU", "phiU",
phif[i]/fvc::interpolate(rhof[i]) phiFluid[i]/fvc::interpolate(rhoFluid[i])
), ),
thermof[i].p() thermoFluid[i].p()
) )
) )
); );
@ -162,8 +162,8 @@
("environmentalProperties"); ("environmentalProperties");
dimensionedVector g(environmentalProperties.lookup("g")); dimensionedVector g(environmentalProperties.lookup("g"));
Info<< " Adding to ghf\n" << endl; Info<< " Adding to ghFluid\n" << endl;
ghf.set ghFluid.set
( (
i, i,
new volScalarField new volScalarField
@ -172,12 +172,21 @@
g & fluidRegions[i].C() g & fluidRegions[i].C()
) )
); );
ghfFluid.set
(
i,
new surfaceScalarField
(
"ghf",
g & fluidRegions[i].Cf()
)
);
Info<< " Updating p from pd\n" << endl; Info<< " Updating p from pd\n" << endl;
thermof[i].p() == pdf[i] + rhof[i]*ghf[i] + pRef; thermoFluid[i].p() == pdFluid[i] + rhoFluid[i]*ghFluid[i] + pRef;
thermof[i].correct(); thermoFluid[i].correct();
initialMassf[i] = fvc::domainIntegrate(rhof[i]).value(); initialMassFluid[i] = fvc::domainIntegrate(rhoFluid[i]).value();
} }

View File

@ -1,9 +1,17 @@
solveEnthalpyEquation {
tmp<fvScalarMatrix> hEqn
( (
rhof[i], fvm::ddt(rho, h)
DpDtf[i], + fvm::div(phi, h)
phif[i], - fvm::laplacian(turb.alphaEff(), h)
turb[i], ==
thermof[i] DpDt
); );
hEqn().relax();
hEqn().solve();
thermo.correct();
Info<< "Min/max T:" << min(thermo.T()) << ' ' << max(thermo.T())
<< endl;
}

View File

@ -0,0 +1 @@
List<scalar> cumulativeContErr(fluidRegions.size(), 0.0);

View File

@ -1,60 +1,75 @@
{ {
bool closedVolume = false; bool closedVolume = pd.needReference();
rhof[i] = thermof[i].rho(); rho = thermo.rho();
volScalarField rUA = 1.0/UEqn().A(); volScalarField rUA = 1.0/UEqn().A();
Uf[i] = rUA*UEqn().H(); surfaceScalarField rhorUAf("(rho*(1|A(U)))", fvc::interpolate(rho*rUA));
phif[i] = U = rUA*UEqn().H();
fvc::interpolate(rhof[i])
surfaceScalarField phiU
(
fvc::interpolate(rho)
*( *(
(fvc::interpolate(Uf[i]) & fluidRegions[i].Sf()) (fvc::interpolate(U) & mesh.Sf())
+ fvc::ddtPhiCorr(rUA, rhof[i], Uf[i], phif[i]) + fvc::ddtPhiCorr(rUA, rho, U, phi)
) )
- fvc::interpolate(rhof[i]*rUA*ghf[i]) );
*fvc::snGrad(rhof[i])
*fluidRegions[i].magSf();
// Solve pressure difference phi = phiU - ghf*fvc::snGrad(rho)*rhorUAf*mesh.magSf();
# include "pdEqn.H"
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{
fvScalarMatrix pdEqn
(
fvm::ddt(psi, pd)
+ fvc::ddt(psi)*pRef
+ fvc::ddt(psi, rho)*gh
+ fvc::div(phi)
- fvm::laplacian(rho*rUA, pd)
);
if (corr == nCorr-1 && nonOrth == nNonOrthCorr)
{
pdEqn.solve(mesh.solver(pd.name() + "Final"));
}
else
{
pdEqn.solve(mesh.solver(pd.name()));
}
if (nonOrth == nNonOrthCorr)
{
phi += pdEqn.flux();
}
}
// Correct velocity field
U += rUA*fvc::reconstruct((phi - phiU)/rhorUAf);
U.correctBoundaryConditions();
// Update pressure field (including bc)
p == pd + rho*gh + pRef;
DpDt = fvc::DDt(surfaceScalarField("phiU", phi/fvc::interpolate(rho)), p);
// Solve continuity // Solve continuity
# include "rhoEqn.H" # include "rhoEqn.H"
// Update pressure field (including bc)
thermof[i].p() == pdf[i] + rhof[i]*ghf[i] + pRef;
DpDtf[i] = fvc::DDt
(
surfaceScalarField("phiU", phif[i]/fvc::interpolate(rhof[i])),
thermof[i].p()
);
// Update continuity errors // Update continuity errors
compressibleContinuityErrors(cumulativeContErr, rhof[i], thermof[i]); # include "compressibleContinuityErrors.H"
// Correct velocity field
Uf[i] -= rUA*(fvc::grad(pdf[i]) + fvc::grad(rhof[i])*ghf[i]);
Uf[i].correctBoundaryConditions();
// For closed-volume cases adjust the pressure and density levels // For closed-volume cases adjust the pressure and density levels
// to obey overall mass continuity // to obey overall mass continuity
if (closedVolume) if (closedVolume)
{ {
thermof[i].p() += p += (massIni - fvc::domainIntegrate(psi*p))/fvc::domainIntegrate(psi);
( rho = thermo.rho();
dimensionedScalar
(
"massIni",
dimMass,
initialMassf[i]
)
- fvc::domainIntegrate(thermof[i].psi()*thermof[i].p())
)/fvc::domainIntegrate(thermof[i].psi());
pdf[i] == thermof[i].p() - (rhof[i]*ghf[i] + pRef);
rhof[i] = thermof[i].rho();
} }
// Update thermal conductivity // Update thermal conductivity
Kf[i] = thermof[i].Cp()*turb[i].alphaEff(); K = thermoFluid[i].Cp()*turb.alphaEff();
// Update pd (including bc)
pd == p - (rho*gh + pRef);
} }

View File

@ -1,14 +0,0 @@
solvePressureDifferenceEquation
(
corr,
nCorr,
nNonOrthCorr,
closedVolume,
pdf[i],
pRef,
rhof[i],
thermof[i].psi(),
rUA,
ghf[i],
phif[i]
);

View File

@ -1 +1 @@
solveContinuityEquation(rhof[i], phif[i]); solve(fvm::ddt(rho) + fvc::div(phi));

View File

@ -1,15 +0,0 @@
if (adjustTimeStep)
{
if (CoNum > SMALL)
{
runTime.setDeltaT
(
min
(
maxCo*runTime.deltaT().value()/CoNum,
maxDeltaT
)
);
}
}

View File

@ -0,0 +1,18 @@
const fvMesh& mesh = fluidRegions[i];
basicThermo& thermo = thermoFluid[i];
volScalarField& rho = rhoFluid[i];
volScalarField& K = KFluid[i];
volVectorField& U = UFluid[i];
surfaceScalarField phi = phiFluid[i];
compressible::turbulenceModel& turb = turbulence[i];
volScalarField& DpDt = DpDtFluid[i];
const volScalarField& gh = ghFluid[i];
const surfaceScalarField& ghf = ghfFluid[i];
volScalarField& pd = pdFluid[i];
volScalarField& p = thermo.p();
const volScalarField& psi = thermo.psi();
volScalarField& h = thermo.h();
const dimensionedScalar massIni("massIni", dimMass, initialMassFluid[i]);

View File

@ -12,4 +12,4 @@
# include "pEqn.H" # include "pEqn.H"
} }
} }
turb[i].correct(); turb.correct();

View File

@ -1,73 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Description
Solve pressure difference equation
\*---------------------------------------------------------------------------*/
void solvePressureDifferenceEquation
(
const label corr,
const label nCorr,
const label nNonOrthCorr,
bool& closedVolume,
volScalarField& pd,
const dimensionedScalar& pRef,
const volScalarField& rho,
const volScalarField& psi,
const volScalarField& rUA,
const volScalarField& gh,
surfaceScalarField& phi
)
{
closedVolume = pd.needReference();
for (int nonOrth=0; nonOrth<=nNonOrthCorr; nonOrth++)
{
fvScalarMatrix pdEqn
(
fvm::ddt(psi, pd)
+ fvc::ddt(psi)*pRef
+ fvc::ddt(psi, rho)*gh
+ fvc::div(phi)
- fvm::laplacian(rho*rUA, pd)
);
//pdEqn.solve();
if (corr == nCorr-1 && nonOrth == nNonOrthCorr)
{
pdEqn.solve(pd.mesh().solver(pd.name() + "Final"));
}
else
{
pdEqn.solve(pd.mesh().solver(pd.name()));
}
if (nonOrth == nNonOrthCorr)
{
phi += pdEqn.flux();
}
}
}

View File

@ -0,0 +1,6 @@
// fvMesh& mesh = solidRegions[i];
volScalarField& rho = rhos[i];
volScalarField& cp = cps[i];
volScalarField& K = Ks[i];
volScalarField& T = Ts[i];

View File

@ -3,10 +3,9 @@
{ {
solve solve
( (
fvm::ddt(rhosCps[i], Ts[i]) - fvm::laplacian(Ks[i], Ts[i]) fvm::ddt(rho*cp, T) - fvm::laplacian(K, T)
); );
} }
Info<< "Min/max T:" << min(Ts[i]) << ' ' << max(Ts[i]) Info<< "Min/max T:" << min(T) << ' ' << max(T) << endl;
<< endl;
} }

View File

@ -6,7 +6,7 @@
phic = min(interface.cAlpha()*phic, max(phic)); phic = min(interface.cAlpha()*phic, max(phic));
surfaceScalarField phir = phic*interface.nHatf(); surfaceScalarField phir = phic*interface.nHatf();
for (int gCorr=0; gCorr<nAlphaCorr; gCorr++) for (int aCorr=0; aCorr<nAlphaCorr; aCorr++)
{ {
surfaceScalarField phiAlpha = surfaceScalarField phiAlpha =
fvc::flux fvc::flux

View File

@ -81,17 +81,20 @@ Foam::phaseModel::phaseModel
{ {
Info<< "Reading face flux field " << phiName << endl; Info<< "Reading face flux field " << phiName << endl;
phiPtr_ = new surfaceScalarField phiPtr_.reset
( (
IOobject new surfaceScalarField
( (
phiName, IOobject
mesh.time().timeName(), (
mesh, phiName,
IOobject::MUST_READ, mesh.time().timeName(),
IOobject::AUTO_WRITE mesh,
), IOobject::MUST_READ,
mesh IOobject::AUTO_WRITE
),
mesh
)
); );
} }
else else
@ -112,18 +115,21 @@ Foam::phaseModel::phaseModel
} }
} }
phiPtr_ = new surfaceScalarField phiPtr_.reset
( (
IOobject new surfaceScalarField
( (
phiName, IOobject
mesh.time().timeName(), (
mesh, phiName,
IOobject::NO_READ, mesh.time().timeName(),
IOobject::AUTO_WRITE mesh,
), IOobject::NO_READ,
fvc::interpolate(U_) & mesh.Sf(), IOobject::AUTO_WRITE
phiTypes ),
fvc::interpolate(U_) & mesh.Sf(),
phiTypes
)
); );
} }
} }

View File

@ -69,7 +69,7 @@ class phaseModel
volVectorField U_; volVectorField U_;
//- Fluxes //- Fluxes
surfaceScalarField* phiPtr_; autoPtr<surfaceScalarField> phiPtr_;
public: public:
@ -133,12 +133,12 @@ public:
const surfaceScalarField& phi() const const surfaceScalarField& phi() const
{ {
return *phiPtr_; return phiPtr_();
} }
surfaceScalarField& phi() surfaceScalarField& phi()
{ {
return *phiPtr_; return phiPtr_();
} }
}; };

View File

@ -111,6 +111,33 @@ int main(int argc, char *argv[])
Info<< "setD : " << setD << endl; Info<< "setD : " << setD << endl;
Info<< "setB ^ setC ^ setD : " << (setB ^ setC ^ setD) << endl; Info<< "setB ^ setC ^ setD : " << (setB ^ setC ^ setD) << endl;
// test operator[]
Info<< "setD : " << setD << endl;
if (setD[0])
{
Info<< "setD has 0" << endl;
}
else
{
Info<< "setD has no 0" << endl;
}
if (setD[11])
{
Info<< "setD has 11" << endl;
}
else
{
Info<< "setD has no 0" << endl;
}
Info<< "setD : " << setD << endl;
// this doesn't work (yet?)
// setD[12] = true;
return 0; return 0;
} }

View File

@ -38,7 +38,6 @@ using namespace Foam;
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
bool changed;
Info<< "PackedList max_bits() = " << PackedList<0>::max_bits() << nl; Info<< "PackedList max_bits() = " << PackedList<0>::max_bits() << nl;
Info<< "\ntest allocation with value\n"; Info<< "\ntest allocation with value\n";
@ -46,11 +45,50 @@ int main(int argc, char *argv[])
list1.print(Info); list1.print(Info);
Info<< "\ntest assign uniform value\n"; Info<< "\ntest assign uniform value\n";
list1 = 2; list1 = 3;
list1.print(Info);
Info<< "\ntest assign uniform value (with overflow)\n";
list1 = -1;
list1.print(Info);
Info<< "\ntest assign between references\n";
list1[2] = 3;
list1[4] = list1[2];
list1.print(Info);
Info<< "\ntest assign between references, with chaining\n";
list1[4] = list1[2] = 1;
list1.print(Info);
{
const PackedList<3>& constLst = list1;
Info<< "\ntest operator[] const with out-of-range index\n";
constLst.print(Info);
if (!constLst[20])
{
Info<< "[20] is false (expected) list size should be unchanged (const)\n";
}
constLst.print(Info);
Info<< "\ntest operator[] non-const with out-of-range index\n";
if (!list1[20])
{
Info<< "[20] is false (expected) but list was resized?? (non-const)\n";
}
list1.print(Info);
}
Info<< "\ntest operator[] with out-of-range index\n";
if (!list1[20])
{
Info<< "[20] is false, as expected\n";
}
list1.print(Info); list1.print(Info);
Info<< "\ntest resize with value (without reallocation)\n"; Info<< "\ntest resize with value (without reallocation)\n";
list1.resize(6, 3); list1.resize(8, list1.max_value());
list1.print(Info); list1.print(Info);
Info<< "\ntest set() function\n"; Info<< "\ntest set() function\n";
@ -96,7 +134,7 @@ int main(int argc, char *argv[])
list1.print(Info); list1.print(Info);
Info<< "\ntest setCapacity() operation\n"; Info<< "\ntest setCapacity() operation\n";
list1.setCapacity(30); list1.setCapacity(100);
list1.print(Info); list1.print(Info);
Info<< "\ntest operator[] assignment\n"; Info<< "\ntest operator[] assignment\n";
@ -108,7 +146,15 @@ int main(int argc, char *argv[])
list1.print(Info); list1.print(Info);
Info<< "\ntest setCapacity smaller\n"; Info<< "\ntest setCapacity smaller\n";
list1.setCapacity(32); list1.setCapacity(24);
list1.print(Info);
Info<< "\ntest resize much smaller\n";
list1.resize(150);
list1.print(Info);
Info<< "\ntest trim\n";
list1.trim();
list1.print(Info); list1.print(Info);
// add in some misc values // add in some misc values
@ -118,37 +164,54 @@ int main(int argc, char *argv[])
Info<< "\ntest iterator\n"; Info<< "\ntest iterator\n";
PackedList<3>::iterator iter = list1.begin(); PackedList<3>::iterator iter = list1.begin();
Info<< "iterator:" << iter() << "\n"; Info<< "begin():";
iter.print(Info) << "\n"; iter.print(Info) << "\n";
Info<< "\ntest iterator operator=\n";
changed = (iter = 5);
Info<< "iterator:" << iter() << "\n"; Info<< "iterator:" << iter() << "\n";
Info<< "changed:" << changed << "\n"; iter() = 5;
changed = (iter = 5); iter.print(Info);
Info<< "changed:" << changed << "\n";
list1.print(Info); list1.print(Info);
iter = list1[31];
Info<< "iterator:" << iter() << "\n";
iter.print(Info);
Info<< "\ntest get() method\n"; Info<< "\ntest get() method\n";
Info<< "get(10):" << list1.get(10) Info<< "get(10):" << list1.get(10) << " and list[10]:" << list1[10] << "\n";
<< " and list[10]:" << unsigned(list1[10]) << "\n";
list1.print(Info); list1.print(Info);
Info<< "\ntest iterator indexing\n"; Info<< "\ntest iterator indexing\n";
Info<< "end() "; Info<< "cend() ";
list1.end().print(Info) << "\n"; list1.cend().print(Info) << "\n";
for (iter = list1[31]; iter != list1.end(); ++iter)
{ {
iter.print(Info); Info<< "\ntest assignment of iterator\n";
list1.print(Info);
PackedList<3>::iterator cit = list1[25];
cit.print(Info);
list1.end().print(Info);
} }
Info<< "\ntest operator[] auto-vivify\n";
const unsigned int val = list1[45];
Info<< "list[45]:" << val << "\n"; for
list1.print(Info); (
PackedList<3>::iterator cit = list1[5];
cit != list1.end();
++cit
)
{
cit.print(Info);
}
// Info<< "\ntest operator[] auto-vivify\n";
// const unsigned int val = list1[45];
//
// Info<< "list[45]:" << val << "\n";
// list1[45] = list1.max_value();
// Info<< "list[45]:" << list1[45] << "\n";
// list1[49] = list1.max_value();
// list1.print(Info);
Info<< "\ntest copy constructor + append\n"; Info<< "\ntest copy constructor + append\n";
@ -161,8 +224,15 @@ int main(int argc, char *argv[])
Info<< "\ntest pattern that fills all bits\n"; Info<< "\ntest pattern that fills all bits\n";
PackedList<4> list3(8, 8); PackedList<4> list3(8, 8);
list3[list3.size()-2] = 0;
list3[list3.size()-1] = list3.max_value(); label pos = list3.size() - 1;
list3[pos--] = list3.max_value();
list3[pos--] = 0;
list3[pos--] = list3.max_value();
list3.print(Info);
Info<< "removed final value: " << list3.remove() << endl;
list3.print(Info); list3.print(Info);
Info<< "\n\nDone.\n"; Info<< "\n\nDone.\n";

View File

@ -0,0 +1,3 @@
PackedListTest2.C
EXE = $(FOAM_USER_APPBIN)/PackedListTest2

View File

@ -0,0 +1,348 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2009 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Application
Description
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "boolList.H"
#include "PackedBoolList.H"
#include "HashSet.H"
#include "cpuTime.H"
#include <vector>
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// Main program:
int main(int argc, char *argv[])
{
const label n = 1000000;
const label nIters = 1000;
unsigned int sum = 0;
PackedBoolList packed(n, 1);
boolList unpacked(n, true);
std::vector<bool> stlVector(n, true);
labelHashSet emptyHash;
labelHashSet fullHash(1000);
for(label i = 0; i < n; i++)
{
fullHash.insert(i);
}
cpuTime timer;
for (label iter = 0; iter < nIters; ++iter)
{
packed.resize(40);
packed.shrink();
packed.resize(n, 1);
}
Info<< "resize/shrink/resize:" << timer.cpuTimeIncrement() << " s\n\n";
// set every other bit on:
Info<< "set every other bit on and count\n";
packed.storage() = 0xAAAAAAAAu;
// Count packed
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
forAll(packed, i)
{
sum += packed[i];
}
}
Info<< "Counting brute-force:" << timer.cpuTimeIncrement()
<< " s" << endl;
Info<< " sum " << sum << endl;
// Count packed
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
sum += packed.count();
}
Info<< "Counting via count():" << timer.cpuTimeIncrement()
<< " s" << endl;
Info<< " sum " << sum << endl;
// Dummy addition
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
forAll(unpacked, i)
{
sum += i + 1;
}
}
Info<< "Dummy loop:" << timer.cpuTimeIncrement() << " s" << endl;
Info<< " sum " << sum << endl;
//
// Read
//
// Read stl
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
for(unsigned int i = 0; i < stlVector.size(); i++)
{
sum += stlVector[i];
}
}
Info<< "Reading stl:" << timer.cpuTimeIncrement() << " s" << endl;
Info<< " sum " << sum << endl;
// Read unpacked
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
forAll(unpacked, i)
{
sum += unpacked[i];
}
}
Info<< "Reading unpacked:" << timer.cpuTimeIncrement() << " s" << endl;
Info<< " sum " << sum << endl;
// Read packed
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
forAll(packed, i)
{
sum += packed.get(i);
}
}
Info<< "Reading packed using get:" << timer.cpuTimeIncrement()
<< " s" << endl;
Info<< " sum " << sum << endl;
// Read packed
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
forAll(packed, i)
{
sum += packed[i];
}
}
Info<< "Reading packed using reference:" << timer.cpuTimeIncrement()
<< " s" << endl;
Info<< " sum " << sum << endl;
// Read via iterator
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
for
(
PackedBoolList::iterator it = packed.begin();
it != packed.end();
++it
)
{
sum += it;
}
}
Info<< "Reading packed using iterator:" << timer.cpuTimeIncrement()
<< " s" << endl;
Info<< " sum " << sum << endl;
// Read via iterator
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
for
(
PackedBoolList::const_iterator cit = packed.cbegin();
cit != packed.cend();
++cit
)
{
sum += cit();
}
}
Info<< "Reading packed using const_iterator():" << timer.cpuTimeIncrement()
<< " s" << endl;
Info<< " sum " << sum << endl;
// Read empty hash
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
forAll(unpacked, i)
{
sum += emptyHash.found(i);
}
}
Info<< "Reading empty labelHashSet:" << timer.cpuTimeIncrement()
<< " s" << endl;
Info<< " sum " << sum << endl;
// Read full hash
sum = 0;
for (label iter = 0; iter < nIters; ++iter)
{
forAll(unpacked, i)
{
sum += fullHash.found(i);
}
}
Info<< "Reading full labelHashSet:" << timer.cpuTimeIncrement()
<< " s" << endl;
Info<< " sum " << sum << endl;
//
// Write
//
// Write stl
for (label iter = 0; iter < nIters; ++iter)
{
for (unsigned int i = 0; i < stlVector.size(); i++)
{
stlVector[i] = true;
}
}
Info<< "Writing stl:" << timer.cpuTimeIncrement() << " s" << endl;
// Write unpacked
for (label iter = 0; iter < nIters; ++iter)
{
forAll(unpacked, i)
{
unpacked[i] = true;
}
}
Info<< "Writing unpacked:" << timer.cpuTimeIncrement() << " s" << endl;
// Write packed
for (label iter = 0; iter < nIters; ++iter)
{
forAll(packed, i)
{
packed[i] = 1;
}
}
Info<< "Writing packed using reference:" << timer.cpuTimeIncrement()
<< " s" << endl;
// Write packed
for (label iter = 0; iter < nIters; ++iter)
{
forAll(packed, i)
{
packed.set(i, 1);
}
}
Info<< "Writing packed using set:" << timer.cpuTimeIncrement()
<< " s" << endl;
// Write packed
for (label iter = 0; iter < nIters; ++iter)
{
for
(
PackedBoolList::iterator it = packed.begin();
it != packed.end();
++it
)
{
it() = 1;
}
}
Info<< "Writing packed using iterator:" << timer.cpuTimeIncrement()
<< " s" << endl;
// Write packed
for (label iter = 0; iter < nIters; ++iter)
{
packed = 0;
}
Info<< "Writing packed uniform 0:" << timer.cpuTimeIncrement()
<< " s" << endl;
// Write packed
for (label iter = 0; iter < nIters; ++iter)
{
packed = 1;
}
Info<< "Writing packed uniform 1:" << timer.cpuTimeIncrement()
<< " s" << endl;
PackedList<3> oddPacked(n, 3);
// Write packed
for (label iter = 0; iter < nIters; ++iter)
{
packed = 0;
}
Info<< "Writing packed<3> uniform 0:" << timer.cpuTimeIncrement()
<< " s" << endl;
// Write packed
for (label iter = 0; iter < nIters; ++iter)
{
packed = 1;
}
Info<< "Writing packed<3> uniform 1:" << timer.cpuTimeIncrement()
<< " s" << endl;
Info << "End\n" << endl;
return 0;
}
// ************************************************************************* //

View File

@ -31,6 +31,7 @@ Description
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
#include "fileName.H" #include "fileName.H"
#include "SubList.H"
#include "IOstreams.H" #include "IOstreams.H"
#include "OSspecific.H" #include "OSspecific.H"
@ -50,19 +51,55 @@ int main()
fileName pathName(wrdList); fileName pathName(wrdList);
Info<< "pathName = " << pathName << endl; Info<< "pathName = " << pathName << nl
Info<< "pathName.name() = " << pathName.name() << endl; << "pathName.name() = " << pathName.name() << nl
Info<< "pathName.path() = " << pathName.path() << endl; << "pathName.path() = " << pathName.path() << nl
Info<< "pathName.ext() = " << pathName.ext() << endl; << "pathName.ext() = " << pathName.ext() << endl;
Info<< "pathName.components() = " << pathName.components() << endl; Info<< "pathName.components() = " << pathName.components() << nl
Info<< "pathName.component(2) = " << pathName.component(2) << endl; << "pathName.component(2) = " << pathName.component(2) << nl
<< endl;
// try with different combination
for (label start = 0; start < wrdList.size(); ++start)
{
fileName instance, local;
word name;
fileName path(SubList<word>(wrdList, wrdList.size()-start, start));
fileName path2 = "." / path;
path.IOobjectComponents
(
instance,
local,
name
);
Info<< "IOobjectComponents for " << path << nl
<< " instance = " << instance << nl
<< " local = " << local << nl
<< " name = " << name << endl;
path2.IOobjectComponents
(
instance,
local,
name
);
Info<< "IOobjectComponents for " << path2 << nl
<< " instance = " << instance << nl
<< " local = " << local << nl
<< " name = " << name << endl;
}
// test findEtcFile // test findEtcFile
Info<< "\n\nfindEtcFile tests:" << nl Info<< "\n\nfindEtcFile tests:" << nl
<< " controlDict => " << findEtcFile("controlDict") << nl << " controlDict => " << findEtcFile("controlDict") << nl
<< " badName => " << findEtcFile("badName") << endl; << " badName => " << findEtcFile("badName") << endl;
Info<< "This should emit a fatal error:" << endl; Info<< "This should emit a fatal error:" << endl;
Info<< " badName(die) => " << findEtcFile("badName", true) << nl Info<< " badName(die) => " << findEtcFile("badName", true) << nl
<< endl; << endl;

View File

@ -464,6 +464,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
scalar minLen(readScalar(IStringStream(args.additionalArgs()[0])())); scalar minLen(readScalar(IStringStream(args.additionalArgs()[0])()));
scalar angle(readScalar(IStringStream(args.additionalArgs()[1])())); scalar angle(readScalar(IStringStream(args.additionalArgs()[1])()));
@ -587,8 +588,12 @@ int main(int argc, char *argv[])
{ {
runTime++; runTime++;
} }
else
{
mesh.setInstance(oldInstance);
}
Info << "Writing collapsed mesh to time " << runTime.value() << endl; Info<< "Writing collapsed mesh to time " << runTime.timeName() << endl;
mesh.write(); mesh.write();
} }

View File

@ -441,6 +441,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
scalar featureAngle(readScalar(IStringStream(args.additionalArgs()[0])())); scalar featureAngle(readScalar(IStringStream(args.additionalArgs()[0])()));
@ -502,6 +503,11 @@ int main(int argc, char *argv[])
if (nChanged > 0) if (nChanged > 0)
{ {
if (overwrite)
{
mesh.setInstance(oldInstance);
}
Info<< "Writing morphed mesh to time " << runTime.timeName() << endl; Info<< "Writing morphed mesh to time " << runTime.timeName() << endl;
mesh.write(); mesh.write();

View File

@ -334,6 +334,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
bool overwrite = args.options().found("overwrite"); bool overwrite = args.options().found("overwrite");
@ -553,9 +554,13 @@ int main(int argc, char *argv[])
{ {
runTime++; runTime++;
} }
else
{
mesh.setInstance(oldInstance);
}
// Write resulting mesh // Write resulting mesh
Info << "Writing modified mesh to time " << runTime.value() << endl; Info << "Writing modified mesh to time " << runTime.timeName() << endl;
mesh.write(); mesh.write();
} }
else if (edgeToPos.size()) else if (edgeToPos.size())
@ -602,9 +607,13 @@ int main(int argc, char *argv[])
{ {
runTime++; runTime++;
} }
else
{
mesh.setInstance(oldInstance);
}
// Write resulting mesh // Write resulting mesh
Info << "Writing modified mesh to time " << runTime.value() << endl; Info << "Writing modified mesh to time " << runTime.timeName() << endl;
mesh.write(); mesh.write();
} }
else else
@ -641,9 +650,13 @@ int main(int argc, char *argv[])
{ {
runTime++; runTime++;
} }
else
{
mesh.setInstance(oldInstance);
}
// Write resulting mesh // Write resulting mesh
Info << "Writing modified mesh to time " << runTime.value() << endl; Info << "Writing modified mesh to time " << runTime.timeName() << endl;
mesh.write(); mesh.write();
} }

View File

@ -58,6 +58,8 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
pointMesh pMesh(mesh); pointMesh pMesh(mesh);
word cellSetName(args.args()[1]); word cellSetName(args.args()[1]);
@ -177,6 +179,10 @@ int main(int argc, char *argv[])
Pout<< "Refined from " << returnReduce(map().nOldCells(), sumOp<label>()) Pout<< "Refined from " << returnReduce(map().nOldCells(), sumOp<label>())
<< " to " << mesh.globalData().nTotalCells() << " cells." << nl << endl; << " to " << mesh.globalData().nTotalCells() << " cells." << nl << endl;
if (overwrite)
{
mesh.setInstance(oldInstance);
}
Info<< "Writing mesh to " << runTime.timeName() << endl; Info<< "Writing mesh to " << runTime.timeName() << endl;
mesh.write(); mesh.write();

View File

@ -56,6 +56,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
word patchName(args.additionalArgs()[0]); word patchName(args.additionalArgs()[0]);
@ -226,8 +227,13 @@ int main(int argc, char *argv[])
// Update stored labels on meshCutter. // Update stored labels on meshCutter.
cutter.updateMesh(morphMap()); cutter.updateMesh(morphMap());
if (overwrite)
{
mesh.setInstance(oldInstance);
}
// Write resulting mesh // Write resulting mesh
Info << "Writing refined morphMesh to time " << runTime.value() << endl; Info << "Writing refined morphMesh to time " << runTime.timeName() << endl;
mesh.write(); mesh.write();

View File

@ -55,6 +55,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
bool overwrite = args.options().found("overwrite"); bool overwrite = args.options().found("overwrite");
@ -167,6 +168,10 @@ int main(int argc, char *argv[])
{ {
runTime++; runTime++;
} }
else
{
mesh.setInstance(oldInstance);
}
// Take over refinement levels and write to new time directory. // Take over refinement levels and write to new time directory.
Pout<< "Writing mesh to time " << runTime.timeName() << endl; Pout<< "Writing mesh to time " << runTime.timeName() << endl;

View File

@ -534,6 +534,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
scalar featureAngle(readScalar(IStringStream(args.additionalArgs()[0])())); scalar featureAngle(readScalar(IStringStream(args.additionalArgs()[0])()));
@ -693,7 +694,13 @@ int main(int argc, char *argv[])
Info<< "Remaining:" << cellsToCut.size() << endl; Info<< "Remaining:" << cellsToCut.size() << endl;
// Write resulting mesh // Write resulting mesh
Info << "Writing refined morphMesh to time " << runTime.value() << endl; if (overwrite)
{
mesh.setInstance(oldInstance);
}
Info<< "Writing refined morphMesh to time " << runTime.timeName()
<< endl;
mesh.write(); mesh.write();
} }

View File

@ -354,6 +354,7 @@ int main(int argc, char *argv[])
runTime.setTime(Times[startTime], startTime); runTime.setTime(Times[startTime], startTime);
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
// Mark boundary edges and points. // Mark boundary edges and points.
// (Note: in 1.4.2 we can use the built-in mesh point ordering // (Note: in 1.4.2 we can use the built-in mesh point ordering
@ -499,7 +500,10 @@ int main(int argc, char *argv[])
if (!overwrite) if (!overwrite)
{ {
runTime++; runTime++;
mesh.setInstance(runTime.timeName()); }
else
{
mesh.setInstance(oldInstance);
} }
Info<< "Writing dual mesh to " << runTime.timeName() << endl; Info<< "Writing dual mesh to " << runTime.timeName() << endl;

View File

@ -78,10 +78,10 @@ int main(int argc, char *argv[])
# include "setRootCase.H" # include "setRootCase.H"
# include "createTime.H" # include "createTime.H"
const word dictName("blockMeshDict");
word regionName; word regionName;
fileName polyMeshDir; fileName polyMeshDir;
word dictName("blockMeshDict");
fileName dictPath(runTime.constant());
if (args.options().found("region")) if (args.options().found("region"))
{ {
@ -98,55 +98,58 @@ int main(int argc, char *argv[])
polyMeshDir = polyMesh::meshSubDir; polyMeshDir = polyMesh::meshSubDir;
} }
fileName dictLocal = polyMeshDir; autoPtr<IOobject> meshDictIoPtr;
if (args.options().found("dict")) if (args.options().found("dict"))
{ {
wordList elems(fileName(args.options()["dict"]).components()); fileName dictPath(args.options()["dict"]);
dictName = elems[elems.size()-1];
dictPath = elems[0];
dictLocal = "";
if (elems.size() == 1) meshDictIoPtr.set
{ (
dictPath = "."; new IOobject
} (
else if (elems.size() > 2) ( dictPath.isDir() ? dictPath/dictName : dictPath ),
{ runTime,
dictLocal = fileName(SubList<word>(elems, elems.size()-2, 1)); IOobject::MUST_READ,
} IOobject::NO_WRITE,
false
)
);
}
else
{
meshDictIoPtr.set
(
new IOobject
(
dictName,
runTime.constant(),
polyMeshDir,
runTime,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
)
);
} }
bool writeTopo = args.options().found("blockTopology"); if (!meshDictIoPtr->headerOk())
IOobject meshDictIo
(
dictName,
dictPath,
dictLocal,
runTime,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
);
if (!meshDictIo.headerOk())
{ {
FatalErrorIn(args.executable()) FatalErrorIn(args.executable())
<< "Cannot open mesh description file\n " << "Cannot open mesh description file\n "
<< meshDictIo.objectPath() << meshDictIoPtr->objectPath()
<< nl << nl
<< exit(FatalError); << exit(FatalError);
} }
Info<< nl << "Creating block mesh from\n " Info<< nl << "Creating block mesh from\n "
<< meshDictIo.objectPath() << nl << endl; << meshDictIoPtr->objectPath() << nl << endl;
IOdictionary meshDict(meshDictIo);
IOdictionary meshDict(meshDictIoPtr());
blockMesh blocks(meshDict); blockMesh blocks(meshDict);
if (writeTopo)
if (args.options().found("blockTopology"))
{ {
// Write mesh as edges. // Write mesh as edges.
{ {

View File

@ -63,6 +63,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
scalar thickness(readScalar(IStringStream(args.additionalArgs()[0])())); scalar thickness(readScalar(IStringStream(args.additionalArgs()[0])()));
bool overwrite = args.options().found("overwrite"); bool overwrite = args.options().found("overwrite");
@ -182,6 +183,10 @@ int main(int argc, char *argv[])
{ {
runTime++; runTime++;
} }
else
{
mesh.setInstance(oldInstance);
}
// Take over refinement levels and write to new time directory. // Take over refinement levels and write to new time directory.
Pout<< "Writing extruded mesh to time " << runTime.timeName() << nl Pout<< "Writing extruded mesh to time " << runTime.timeName() << nl

View File

@ -10,4 +10,8 @@ EXE_INC = \
EXE_LIBS = \ EXE_LIBS = \
-L$(FOAM_MPI_LIBBIN) -lparMetisDecompositionMethod \ -L$(FOAM_MPI_LIBBIN) -lparMetisDecompositionMethod \
-lfiniteVolume \
-ldecompositionMethods \
-lmeshTools \
-ldynamicMesh \
-lautoMesh -lautoMesh

View File

@ -48,6 +48,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
bool overwrite = args.options().found("overwrite"); bool overwrite = args.options().found("overwrite");
@ -61,6 +62,10 @@ int main(int argc, char *argv[])
attachPolyTopoChanger(mesh).attach(); attachPolyTopoChanger(mesh).attach();
if (overwrite)
{
mesh.setInstance(oldInstance);
}
mesh.write(); mesh.write();
Info<< "End\n" << endl; Info<< "End\n" << endl;

View File

@ -77,6 +77,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
Info<< "Mesh read in = " Info<< "Mesh read in = "
<< runTime.cpuTimeIncrement() << runTime.cpuTimeIncrement()
@ -243,6 +244,10 @@ int main(int argc, char *argv[])
polyMeshRepatcher.repatch(); polyMeshRepatcher.repatch();
// Write resulting mesh // Write resulting mesh
if (overwrite)
{
mesh.setInstance(oldInstance);
}
mesh.write(); mesh.write();

View File

@ -74,19 +74,7 @@ int main(int argc, char *argv[])
{ {
# include "setRootCase.H" # include "setRootCase.H"
# include "createTime.H" # include "createTime.H"
# include "createPolyMesh.H"
Info<< "Reading mesh for time = " << runTime.value() << endl;
Info<< "Create mesh\n" << endl;
polyMesh mesh
(
IOobject
(
polyMesh::defaultRegion,
runTime.timeName(),
runTime
)
);
Info<< "Reading cellSetDict\n" << endl; Info<< "Reading cellSetDict\n" << endl;

View File

@ -60,6 +60,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
const polyBoundaryMesh& patches = mesh.boundaryMesh(); const polyBoundaryMesh& patches = mesh.boundaryMesh();
const faceZoneMesh& faceZones = mesh.faceZones(); const faceZoneMesh& faceZones = mesh.faceZones();
@ -247,6 +248,10 @@ int main(int argc, char *argv[])
mesh.movePoints(map().preMotionPoints()); mesh.movePoints(map().preMotionPoints());
} }
if (overwrite)
{
mesh.setInstance(oldInstance);
}
Pout<< "Writing mesh to " << runTime.timeName() << endl; Pout<< "Writing mesh to " << runTime.timeName() << endl;
mesh.write(); mesh.write();

View File

@ -569,6 +569,7 @@ int main(int argc, char *argv[])
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
const polyBoundaryMesh& patches = mesh.boundaryMesh(); const polyBoundaryMesh& patches = mesh.boundaryMesh();
@ -909,6 +910,10 @@ int main(int argc, char *argv[])
{ {
runTime++; runTime++;
} }
else
{
mesh.setInstance(oldInstance);
}
// Write resulting mesh // Write resulting mesh
Info<< "Writing repatched mesh to " << runTime.timeName() << nl << endl; Info<< "Writing repatched mesh to " << runTime.timeName() << nl << endl;

View File

@ -74,19 +74,7 @@ int main(int argc, char *argv[])
{ {
# include "setRootCase.H" # include "setRootCase.H"
# include "createTime.H" # include "createTime.H"
# include "createPolyMesh.H"
Info<< "Reading mesh for time = " << runTime.value() << endl;
Info<< "Create mesh\n" << endl;
polyMesh mesh
(
IOobject
(
polyMesh::defaultRegion,
runTime.timeName(),
runTime
)
);
Info<< "Reading faceSetDict\n" << endl; Info<< "Reading faceSetDict\n" << endl;

View File

@ -42,7 +42,7 @@ int main(int argc, char *argv[])
# include "setRoots.H" # include "setRoots.H"
# include "createTimes.H" # include "createTimes.H"
Info<< "Reading master mesh for time = " << runTimeMaster.value() << endl; Info<< "Reading master mesh for time = " << runTimeMaster.timeName() << endl;
Info<< "Create mesh\n" << endl; Info<< "Create mesh\n" << endl;
mergePolyMesh masterMesh mergePolyMesh masterMesh
@ -56,7 +56,7 @@ int main(int argc, char *argv[])
); );
Info<< "Reading mesh to add for time = " << runTimeToAdd.value() << endl; Info<< "Reading mesh to add for time = " << runTimeToAdd.timeName() << endl;
Info<< "Create mesh\n" << endl; Info<< "Create mesh\n" << endl;
polyMesh meshToAdd polyMesh meshToAdd
@ -71,7 +71,7 @@ int main(int argc, char *argv[])
runTimeMaster++; runTimeMaster++;
Info<< "Writing combined mesh to " << runTimeMaster.value() << endl; Info<< "Writing combined mesh to " << runTimeMaster.timeName() << endl;
masterMesh.addMesh(meshToAdd); masterMesh.addMesh(meshToAdd);
masterMesh.merge(); masterMesh.merge();

View File

@ -229,6 +229,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
bool split = args.options().found("split"); bool split = args.options().found("split");
bool overwrite = args.options().found("overwrite"); bool overwrite = args.options().found("overwrite");
@ -338,7 +339,10 @@ int main(int argc, char *argv[])
mesh.movePoints(map().preMotionPoints()); mesh.movePoints(map().preMotionPoints());
} }
if (overwrite)
{
mesh.setInstance(oldInstance);
}
Pout<< "Writing mesh to time " << runTime.timeName() << endl; Pout<< "Writing mesh to time " << runTime.timeName() << endl;
mesh.write(); mesh.write();

View File

@ -74,19 +74,7 @@ int main(int argc, char *argv[])
{ {
# include "setRootCase.H" # include "setRootCase.H"
# include "createTime.H" # include "createTime.H"
# include "createPolyMesh.H"
Info<< "Reading mesh for time = " << runTime.value() << endl;
Info<< "Create mesh\n" << endl;
polyMesh mesh
(
IOobject
(
polyMesh::defaultRegion,
runTime.timeName(),
runTime
)
);
Info<< "Reading pointSetDict\n" << endl; Info<< "Reading pointSetDict\n" << endl;

View File

@ -300,6 +300,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
printEdgeStats(mesh); printEdgeStats(mesh);
@ -427,6 +428,10 @@ int main(int argc, char *argv[])
// Write resulting mesh // Write resulting mesh
if (overwrite)
{
mesh.setInstance(oldInstance);
}
mesh.write(); mesh.write();

View File

@ -383,9 +383,8 @@ int main(int argc, char *argv[])
runTime.setTime(Times[startTime], startTime); runTime.setTime(Times[startTime], startTime);
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
const bool blockOrder = args.options().found("blockOrder"); const bool blockOrder = args.options().found("blockOrder");
@ -631,6 +630,11 @@ int main(int argc, char *argv[])
<< endl; << endl;
} }
if (overwrite)
{
mesh.setInstance(oldInstance);
}
Info<< "Writing mesh to " << runTime.timeName() << endl; Info<< "Writing mesh to " << runTime.timeName() << endl;
mesh.write(); mesh.write();

View File

@ -124,6 +124,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createPolyMesh.H" # include "createPolyMesh.H"
const word oldInstance = mesh.pointsInstance();
word setName(args.additionalArgs()[0]); word setName(args.additionalArgs()[0]);
word masterPatch(args.additionalArgs()[1]); word masterPatch(args.additionalArgs()[1]);
@ -262,7 +263,12 @@ int main(int argc, char *argv[])
splitter.attach(); splitter.attach();
Info << nl << "Writing polyMesh" << endl; if (overwrite)
{
mesh.setInstance(oldInstance);
}
Info<< "Writing mesh to " << runTime.timeName() << endl;
if (!mesh.write()) if (!mesh.write())
{ {
FatalErrorIn(args.executable()) FatalErrorIn(args.executable())

View File

@ -30,12 +30,17 @@ Description
- any face inbetween differing cellZones (-cellZones) - any face inbetween differing cellZones (-cellZones)
Output is: Output is:
- mesh with multiple regions - mesh with multiple regions or
- mesh with cells put into cellZones (-makeCellZones) - mesh with cells put into cellZones (-makeCellZones)
Should work in parallel but cellZone interfaces cannot align with Note:
- Should work in parallel but cellZone interfaces cannot align with
processor boundaries so use the correct option in decomposition to processor boundaries so use the correct option in decomposition to
preserve those interfaces. preserve those interfaces.
- If a cell zone gets split into more than one region it can detect
the largest matching region (-sloppyCellZones). This will accept any
region that covers more than 50% of the zone. It has to be a subset
so cannot have any cells in any other zone.
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
@ -758,7 +763,8 @@ void createAndWriteRegion
const regionSplit& cellRegion, const regionSplit& cellRegion,
const wordList& regionNames, const wordList& regionNames,
const EdgeMap<label>& interfaceToPatch, const EdgeMap<label>& interfaceToPatch,
const label regionI const label regionI,
const word& newMeshInstance
) )
{ {
Info<< "Creating mesh for region " << regionI Info<< "Creating mesh for region " << regionI
@ -902,6 +908,7 @@ void createAndWriteRegion
Info<< "Writing new mesh" << endl; Info<< "Writing new mesh" << endl;
newMesh().setInstance(newMeshInstance);
newMesh().write(); newMesh().write();
// Write addressing files like decomposePar // Write addressing files like decomposePar
@ -1036,78 +1043,181 @@ EdgeMap<label> addRegionPatches
} }
// Checks if regionI in cellRegion corresponds to existing zone. //// Checks if regionI in cellRegion is subset of existing cellZone. Returns -1
label findCorrespondingZone //// if no zone found, zone otherwise
//label findCorrespondingSubZone
//(
// const cellZoneMesh& cellZones,
// const labelList& existingZoneID,
// const labelList& cellRegion,
// const label regionI
//)
//{
// // Zone corresponding to region. No corresponding zone.
// label zoneI = labelMax;
//
// labelList regionCells = findIndices(cellRegion, regionI);
//
// if (regionCells.empty())
// {
// // My local portion is empty. Maps to any empty cellZone. Mark with
// // special value which can get overwritten by other processors.
// zoneI = -1;
// }
// else
// {
// // Get zone for first element.
// zoneI = existingZoneID[regionCells[0]];
//
// if (zoneI == -1)
// {
// zoneI = labelMax;
// }
// else
// {
// // 1. All regionCells in zoneI?
// forAll(regionCells, i)
// {
// if (existingZoneID[regionCells[i]] != zoneI)
// {
// zoneI = labelMax;
// break;
// }
// }
// }
// }
//
// // Determine same zone over all processors.
// reduce(zoneI, maxOp<label>());
//
// if (zoneI == labelMax)
// {
// // Cells in region that are not in zoneI
// zoneI = -1;
// }
//
// return zoneI;
//}
//XXXXXXXXX
// Find region that covers most of cell zone
label findCorrespondingRegion
( (
const cellZoneMesh& cellZones, const labelList& existingZoneID, // per cell the (unique) zoneID
const labelList& existingZoneID, const regionSplit& cellRegion,
const labelList& cellRegion, const label zoneI,
const label regionI const label minOverlapSize
) )
{ {
// Zone corresponding to region. No corresponding zone. // Per region the number of cells in zoneI
label zoneI = labelMax; labelList cellsInZone(cellRegion.nRegions(), 0);
labelList regionCells = findIndices(cellRegion, regionI); forAll(cellRegion, cellI)
if (regionCells.empty())
{ {
// My local portion is empty. Maps to any empty cellZone. Mark with if (existingZoneID[cellI] == zoneI)
// special value which can get overwritten by other processors. {
zoneI = -1; cellsInZone[cellRegion[cellI]]++;
}
}
Pstream::listCombineGather(cellsInZone, plusEqOp<label>());
Pstream::listCombineScatter(cellsInZone);
// Pick region with largest overlap of zoneI
label regionI = findMax(cellsInZone);
if (cellsInZone[regionI] < minOverlapSize)
{
// Region covers too little of zone. Not good enough.
regionI = -1;
} }
else else
{ {
// Get zone for first element. // Check that region contains no cells that aren't in cellZone.
zoneI = existingZoneID[regionCells[0]]; forAll(cellRegion, cellI)
if (zoneI == -1)
{ {
zoneI = labelMax; if (cellRegion[cellI] == regionI && existingZoneID[cellI] != zoneI)
}
else
{
// 1. All regionCells in zoneI?
forAll(regionCells, i)
{ {
if (existingZoneID[regionCells[i]] != zoneI) // cellI in regionI but not in zoneI
{ regionI = -1;
zoneI = labelMax;
break;
}
}
}
}
// Determine same zone over all processors.
reduce(zoneI, maxOp<label>());
// 2. All of cellZone present?
if (zoneI == labelMax)
{
zoneI = -1;
}
else if (zoneI != -1)
{
const cellZone& cz = cellZones[zoneI];
forAll(cz, i)
{
if (cellRegion[cz[i]] != regionI)
{
zoneI = -1;
break; break;
} }
} }
// If one in error, all should be in error. Note that branch gets taken // If one in error, all should be in error. Note that branch gets taken
// on all procs. // on all procs.
reduce(zoneI, minOp<label>()); reduce(regionI, minOp<label>());
} }
return zoneI; return regionI;
} }
//XXXXXXXXX
//// Checks if cellZone has corresponding cellRegion.
//label findCorrespondingRegion
//(
// const cellZoneMesh& cellZones,
// const labelList& existingZoneID, // per cell the (unique) zoneID
// const regionSplit& cellRegion,
// const label zoneI
//)
//{
// // Region corresponding to zone. Start off with special value: no
// // corresponding region.
// label regionI = labelMax;
//
// const cellZone& cz = cellZones[zoneI];
//
// if (cz.empty())
// {
// // My local portion is empty. Maps to any empty cellZone. Mark with
// // special value which can get overwritten by other processors.
// regionI = -1;
// }
// else
// {
// regionI = cellRegion[cz[0]];
//
// forAll(cz, i)
// {
// if (cellRegion[cz[i]] != regionI)
// {
// regionI = labelMax;
// break;
// }
// }
// }
//
// // Determine same zone over all processors.
// reduce(regionI, maxOp<label>());
//
//
// // 2. All of region present?
//
// if (regionI == labelMax)
// {
// regionI = -1;
// }
// else if (regionI != -1)
// {
// forAll(cellRegion, cellI)
// {
// if (cellRegion[cellI] == regionI && existingZoneID[cellI] != zoneI)
// {
// // cellI in regionI but not in zoneI
// regionI = -1;
// break;
// }
// }
// // If one in error, all should be in error. Note that branch gets taken
// // on all procs.
// reduce(regionI, minOp<label>());
// }
//
// return regionI;
//}
// Main program: // Main program:
@ -1120,11 +1230,14 @@ int main(int argc, char *argv[])
argList::validOptions.insert("largestOnly", ""); argList::validOptions.insert("largestOnly", "");
argList::validOptions.insert("insidePoint", "point"); argList::validOptions.insert("insidePoint", "point");
argList::validOptions.insert("overwrite", ""); argList::validOptions.insert("overwrite", "");
argList::validOptions.insert("detectOnly", "");
argList::validOptions.insert("sloppyCellZones", "");
# include "setRootCase.H" # include "setRootCase.H"
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
word blockedFacesName; word blockedFacesName;
if (args.options().found("blockedFaces")) if (args.options().found("blockedFaces"))
@ -1134,10 +1247,13 @@ int main(int argc, char *argv[])
<< blockedFacesName << nl << endl; << blockedFacesName << nl << endl;
} }
bool makeCellZones = args.options().found("makeCellZones");
bool largestOnly = args.options().found("largestOnly"); bool largestOnly = args.options().found("largestOnly");
bool insidePoint = args.options().found("insidePoint"); bool insidePoint = args.options().found("insidePoint");
bool useCellZones = args.options().found("cellZones"); bool useCellZones = args.options().found("cellZones");
bool overwrite = args.options().found("overwrite"); bool overwrite = args.options().found("overwrite");
bool detectOnly = args.options().found("detectOnly");
bool sloppyCellZones = args.options().found("sloppyCellZones");
if (insidePoint && largestOnly) if (insidePoint && largestOnly)
{ {
@ -1281,6 +1397,31 @@ int main(int argc, char *argv[])
Info<< "Writing region per cell file (for manual decomposition) to " Info<< "Writing region per cell file (for manual decomposition) to "
<< cellToRegion.objectPath() << nl << endl; << cellToRegion.objectPath() << nl << endl;
} }
// Write for postprocessing
{
volScalarField cellToRegion
(
IOobject
(
"cellToRegion",
mesh.facesInstance(),
mesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
mesh,
dimensionedScalar("zero", dimless, 0)
);
forAll(cellRegion, cellI)
{
cellToRegion[cellI] = cellRegion[cellI];
}
cellToRegion.write();
Info<< "Writing region per cell as volScalarField to "
<< cellToRegion.objectPath() << nl << endl;
}
// Sizes per region // Sizes per region
@ -1307,40 +1448,131 @@ int main(int argc, char *argv[])
Info<< endl; Info<< endl;
// Sizes per cellzone
// ~~~~~~~~~~~~~~~~~~
labelList zoneSizes(cellZones.size(), 0);
if (useCellZones || makeCellZones || sloppyCellZones)
{
List<wordList> zoneNames(Pstream::nProcs());
zoneNames[Pstream::myProcNo()] = cellZones.names();
Pstream::gatherList(zoneNames);
Pstream::scatterList(zoneNames);
forAll(zoneNames, procI)
{
if (zoneNames[procI] != zoneNames[0])
{
FatalErrorIn(args.executable())
<< "cellZones not synchronised across processors." << endl
<< "Master has cellZones " << zoneNames[0] << endl
<< "Processor " << procI
<< " has cellZones " << zoneNames[procI]
<< exit(FatalError);
}
}
forAll(cellZones, zoneI)
{
zoneSizes[zoneI] = returnReduce
(
cellZones[zoneI].size(),
sumOp<label>()
);
}
}
// Whether region corresponds to a cellzone // Whether region corresponds to a cellzone
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Info<< "Region\tZone\tName" << nl
<< "------\t----\t----" << endl;
// Region per zone // Region per zone
labelList regionToZone(cellRegion.nRegions()); labelList regionToZone(cellRegion.nRegions(), -1);
// Name of region // Name of region
wordList regionNames(cellRegion.nRegions()); wordList regionNames(cellRegion.nRegions());
// Zone to region
labelList zoneToRegion(cellZones.size(), -1);
if (sloppyCellZones)
{
Info<< "Trying to match regions to existing cell zones;"
<< " region can be subset of cell zone." << nl << endl;
forAll(cellZones, zoneI)
{
label regionI = findCorrespondingRegion
(
zoneID,
cellRegion,
zoneI,
label(0.5*zoneSizes[zoneI]) // minimum overlap
);
if (regionI != -1)
{
Info<< "Sloppily matched region " << regionI
<< " size " << regionSizes[regionI]
<< " to zone " << zoneI << " size " << zoneSizes[zoneI]
<< endl;
zoneToRegion[zoneI] = regionI;
regionToZone[regionI] = zoneI;
regionNames[regionI] = cellZones[zoneI].name();
}
}
}
else
{
Info<< "Trying to match regions to existing cell zones." << nl << endl;
forAll(cellZones, zoneI)
{
label regionI = findCorrespondingRegion
(
zoneID,
cellRegion,
zoneI,
1 // minimum overlap
);
if (regionI != -1)
{
zoneToRegion[zoneI] = regionI;
regionToZone[regionI] = zoneI;
regionNames[regionI] = cellZones[zoneI].name();
}
}
}
// Allocate region names for unmatched regions.
forAll(regionToZone, regionI) forAll(regionToZone, regionI)
{ {
regionToZone[regionI] = findCorrespondingZone if (regionToZone[regionI] == -1)
(
cellZones,
zoneID,
cellRegion,
regionI
);
if (regionToZone[regionI] != -1)
{
regionNames[regionI] = cellZones[regionToZone[regionI]].name();
}
else
{ {
regionNames[regionI] = "domain" + Foam::name(regionI); regionNames[regionI] = "domain" + Foam::name(regionI);
} }
}
// Print region to zone
Info<< "Region\tZone\tName" << nl
<< "------\t----\t----" << endl;
forAll(regionToZone, regionI)
{
Info<< regionI << '\t' << regionToZone[regionI] << '\t' Info<< regionI << '\t' << regionToZone[regionI] << '\t'
<< regionNames[regionI] << nl; << regionNames[regionI] << nl;
} }
Info<< endl; Info<< endl;
//// Print zone to region
//Info<< "Zone\tName\tRegion" << nl
// << "----\t----\t------" << endl;
//forAll(zoneToRegion, zoneI)
//{
// Info<< zoneI << '\t' << cellZones[zoneI].name() << '\t'
// << zoneToRegion[zoneI] << nl;
//}
//Info<< endl;
// Since we're going to mess with patches make sure all non-processor ones // Since we're going to mess with patches make sure all non-processor ones
// are on all processors. // are on all processors.
@ -1361,7 +1593,8 @@ int main(int argc, char *argv[])
interfaceSizes interfaceSizes
); );
Info<< "Region\tRegion\tFaces" << nl Info<< "Sizes inbetween regions:" << nl << nl
<< "Region\tRegion\tFaces" << nl
<< "------\t------\t-----" << endl; << "------\t------\t-----" << endl;
forAll(interfaces, interI) forAll(interfaces, interI)
@ -1373,7 +1606,10 @@ int main(int argc, char *argv[])
Info<< endl; Info<< endl;
if (detectOnly)
{
return 0;
}
// Read objects in time directory // Read objects in time directory
@ -1423,7 +1659,7 @@ int main(int argc, char *argv[])
{ {
Info<< "Only one region. Doing nothing." << endl; Info<< "Only one region. Doing nothing." << endl;
} }
else if (args.options().found("makeCellZones")) else if (makeCellZones)
{ {
Info<< "Putting cells into cellZones instead of splitting mesh." Info<< "Putting cells into cellZones instead of splitting mesh."
<< endl; << endl;
@ -1479,12 +1715,16 @@ int main(int argc, char *argv[])
if (!overwrite) if (!overwrite)
{ {
runTime++; runTime++;
mesh.setInstance(runTime.timeName());
}
else
{
mesh.setInstance(oldInstance);
} }
Info<< "Writing cellZones as new mesh to time " << runTime.timeName() Info<< "Writing cellZones as new mesh to time " << runTime.timeName()
<< nl << endl; << nl << endl;
mesh.setInstance(runTime.timeName());
mesh.write(); mesh.write();
@ -1567,7 +1807,8 @@ int main(int argc, char *argv[])
cellRegion, cellRegion,
regionNames, regionNames,
interfaceToPatch, interfaceToPatch,
regionI regionI,
(overwrite ? oldInstance : runTime.timeName())
); );
} }
else if (largestOnly) else if (largestOnly)
@ -1584,7 +1825,8 @@ int main(int argc, char *argv[])
cellRegion, cellRegion,
regionNames, regionNames,
interfaceToPatch, interfaceToPatch,
regionI regionI,
(overwrite ? oldInstance : runTime.timeName())
); );
} }
else else
@ -1602,7 +1844,8 @@ int main(int argc, char *argv[])
cellRegion, cellRegion,
regionNames, regionNames,
interfaceToPatch, interfaceToPatch,
regionI regionI,
(overwrite ? oldInstance : runTime.timeName())
); );
} }
} }

View File

@ -137,6 +137,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
word masterPatchName(args.additionalArgs()[0]); word masterPatchName(args.additionalArgs()[0]);
@ -391,6 +392,10 @@ int main(int argc, char *argv[])
mesh.movePoints(morphMap->preMotionPoints()); mesh.movePoints(morphMap->preMotionPoints());
// Write mesh // Write mesh
if (overwrite)
{
mesh.setInstance(oldInstance);
}
Info << nl << "Writing polyMesh to time " << runTime.timeName() << endl; Info << nl << "Writing polyMesh to time " << runTime.timeName() << endl;
IOstream::defaultPrecision(10); IOstream::defaultPrecision(10);

View File

@ -159,6 +159,7 @@ int main(int argc, char *argv[])
# include "createTime.H" # include "createTime.H"
runTime.functionObjects().off(); runTime.functionObjects().off();
# include "createMesh.H" # include "createMesh.H"
const word oldInstance = mesh.pointsInstance();
word setName(args.additionalArgs()[0]); word setName(args.additionalArgs()[0]);
bool overwrite = args.options().found("overwrite"); bool overwrite = args.options().found("overwrite");
@ -276,7 +277,7 @@ int main(int argc, char *argv[])
// Read point fields and subset // Read point fields and subset
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pointMesh pMesh(mesh); const pointMesh& pMesh = pointMesh::New(mesh);
wordList pointScalarNames(objects.names(pointScalarField::typeName)); wordList pointScalarNames(objects.names(pointScalarField::typeName));
PtrList<pointScalarField> pointScalarFlds(pointScalarNames.size()); PtrList<pointScalarField> pointScalarFlds(pointScalarNames.size());
@ -331,8 +332,12 @@ int main(int argc, char *argv[])
{ {
runTime++; runTime++;
} }
else
{
mesh.setInstance(oldInstance);
}
Info<< "Writing subsetted mesh and fields to time " << runTime.value() Info<< "Writing subsetted mesh and fields to time " << runTime.timeName()
<< endl; << endl;
subsetter.subMesh().write(); subsetter.subMesh().write();

View File

@ -190,16 +190,15 @@ void Foam::vtkPV3Foam::convertPointField
} }
vtkFloatArray *pointData = vtkFloatArray::New(); vtkFloatArray *pointData = vtkFloatArray::New();
pointData->SetNumberOfTuples( nPoints + addPointCellLabels.size() ); pointData->SetNumberOfTuples(nPoints + addPointCellLabels.size());
pointData->SetNumberOfComponents( nComp ); pointData->SetNumberOfComponents(nComp);
pointData->Allocate( nComp*(nPoints + addPointCellLabels.size()) ); pointData->Allocate(nComp*(nPoints + addPointCellLabels.size()));
pointData->SetName( tf.name().c_str() ); pointData->SetName(ptf.name().c_str());
if (debug) if (debug)
{ {
Info<< "convert convertPointField: " Info<< "convert convertPointField: "
<< tf.name() << ptf.name()
<< " size = " << nPoints << " size = " << nPoints
<< " nComp=" << nComp << " nComp=" << nComp
<< " nTuples = " << (nPoints + addPointCellLabels.size()) << " nTuples = " << (nPoints + addPointCellLabels.size())

View File

@ -161,6 +161,10 @@ surfaces
isoField rho; isoField rho;
isoValue 0.5; isoValue 0.5;
interpolate true; interpolate true;
//zone ABC; // Optional: zone only
//exposedPatchName fixedWalls; // Optional: zone only
// regularise false; // Optional: do not simplify // regularise false; // Optional: do not simplify
} }
constantIso constantIso
@ -171,8 +175,27 @@ surfaces
isoField rho; isoField rho;
isoValue 0.5; isoValue 0.5;
interpolate false; interpolate false;
regularise false; // do not simplify
}
triangleCut
{
// Cutingplaneusing iso surface
type cuttingPlane;
planeType pointAndNormal;
pointAndNormalDict
{
basePoint (0.4 0 0.4);
normalVector (1 0.2 0.2);
}
interpolate true;
//zone ABC; // Optional: zone only
//exposedPatchName fixedWalls; // Optional: zone only
// regularise false; // Optional: do not simplify // regularise false; // Optional: do not simplify
} }
); );

View File

@ -78,56 +78,61 @@ int main(int argc, char *argv[])
Time runTime(args.rootPath(), args.caseName()); Time runTime(args.rootPath(), args.caseName());
const stringList& params = args.additionalArgs(); const stringList& params = args.additionalArgs();
word dictName("coordinateSystems"); const word dictName("coordinateSystems");
fileName dictPath(runTime.constant());
fileName dictLocal;
if (args.options().found("dict"))
{
wordList elems(fileName(args.options()["dict"]).components());
dictName = elems[elems.size()-1];
dictPath = elems[0];
dictLocal = "";
if (elems.size() == 1)
{
dictPath = ".";
}
else if (elems.size() > 2)
{
dictLocal = fileName(SubList<word>(elems, elems.size()-2, 1));
}
}
autoPtr<coordinateSystem> fromCsys; autoPtr<coordinateSystem> fromCsys;
autoPtr<coordinateSystem> toCsys; autoPtr<coordinateSystem> toCsys;
if (args.options().found("from") || args.options().found("to")) if (args.options().found("from") || args.options().found("to"))
{ {
IOobject csDictIo autoPtr<IOobject> csDictIoPtr;
(
dictName,
dictPath,
dictLocal,
runTime,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
);
if (!csDictIo.headerOk()) if (args.options().found("dict"))
{
fileName dictPath(args.options()["dict"]);
csDictIoPtr.set
(
new IOobject
(
( dictPath.isDir() ? dictPath/dictName : dictPath ),
runTime,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
)
);
}
else
{
csDictIoPtr.set
(
new IOobject
(
dictName,
runTime.constant(),
runTime,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
)
);
}
if (!csDictIoPtr->headerOk())
{ {
FatalErrorIn(args.executable()) FatalErrorIn(args.executable())
<< "Cannot open coordinateSystems file\n " << "Cannot open coordinateSystems file\n "
<< csDictIo.objectPath() << nl << csDictIoPtr->objectPath() << nl
<< exit(FatalError); << exit(FatalError);
} }
coordinateSystems csLst(csDictIo); coordinateSystems csLst(csDictIoPtr());
if (args.options().found("from")) if (args.options().found("from"))
{ {
word csName(args.options()["from"]); const word csName(args.options()["from"]);
label csId = csLst.find(csName); label csId = csLst.find(csName);
if (csId < 0) if (csId < 0)
@ -143,7 +148,7 @@ int main(int argc, char *argv[])
if (args.options().found("to")) if (args.options().found("to"))
{ {
word csName(args.options()["to"]); const word csName(args.options()["to"]);
label csId = csLst.find(csName); label csId = csLst.find(csName);
if (csId < 0) if (csId < 0)

View File

@ -56,6 +56,7 @@ Note
#include "Time.H" #include "Time.H"
#include "polyMesh.H" #include "polyMesh.H"
#include "triSurface.H" #include "triSurface.H"
#include "PackedBoolList.H"
#include "MeshedSurfaces.H" #include "MeshedSurfaces.H"
#include "UnsortedMeshedSurfaces.H" #include "UnsortedMeshedSurfaces.H"
@ -115,7 +116,7 @@ int main(int argc, char *argv[])
if (args.options().found("orient")) if (args.options().found("orient"))
{ {
Info<< "Checking surface orientation" << endl; Info<< "Checking surface orientation" << endl;
surf.checkOrientation(true); PatchTools::checkOrientation(surf, true);
Info<< endl; Info<< endl;
} }
@ -154,7 +155,7 @@ int main(int argc, char *argv[])
if (args.options().found("orient")) if (args.options().found("orient"))
{ {
Info<< "Checking surface orientation" << endl; Info<< "Checking surface orientation" << endl;
surf.checkOrientation(true); PatchTools::checkOrientation(surf, true);
Info<< endl; Info<< endl;
} }
@ -192,7 +193,7 @@ int main(int argc, char *argv[])
if (args.options().found("orient")) if (args.options().found("orient"))
{ {
Info<< "Checking surface orientation" << endl; Info<< "Checking surface orientation" << endl;
surf.checkOrientation(true); PatchTools::checkOrientation(surf, true);
Info<< endl; Info<< endl;
} }
@ -230,7 +231,7 @@ int main(int argc, char *argv[])
if (args.options().found("orient")) if (args.options().found("orient"))
{ {
Info<< "Checking surface orientation" << endl; Info<< "Checking surface orientation" << endl;
surf.checkOrientation(true); PatchTools::checkOrientation(surf, true);
Info<< endl; Info<< endl;
} }

View File

@ -172,6 +172,17 @@ Linux)
esac esac
;; ;;
SunOS)
WM_ARCH=SunOS64
export WM_COMPILER_LIB_ARCH=64
export WM_CC='gcc'
export WM_CXX='g++'
export WM_CFLAGS='-mabi=64 -fPIC'
export WM_CXXFLAGS='-mabi=64 -fPIC'
export WM_LDFLAGS='-mabi=64 -G0'
export WM_MPLIB=FJMPI
;;
*) # an unsupported operating system *) # an unsupported operating system
cat <<USAGE cat <<USAGE

View File

@ -149,7 +149,7 @@ case Linux:
setenv WM_ARCH linuxIA64 setenv WM_ARCH linuxIA64
setenv WM_COMPILER I64 setenv WM_COMPILER I64
breaksw breaksw
mips64) case mips64:
setenv WM_ARCH SiCortex64 setenv WM_ARCH SiCortex64
setenv WM_COMPILER_LIB_ARCH 64 setenv WM_COMPILER_LIB_ARCH 64
setenv WM_CC 'gcc' setenv WM_CC 'gcc'
@ -158,13 +158,24 @@ case Linux:
setenv WM_CXXFLAGS '-mabi=64 -fPIC' setenv WM_CXXFLAGS '-mabi=64 -fPIC'
setenv WM_LDFLAGS '-mabi=64 -G0' setenv WM_LDFLAGS '-mabi=64 -G0'
setenv WM_MPLIB MPI setenv WM_MPLIB MPI
;; breaksw
default: default:
echo Unknown processor type `uname -m` for Linux echo Unknown processor type `uname -m` for Linux
breaksw breaksw
endsw endsw
breaksw breaksw
case SunOS:
setenv WM_ARCH SunOS64
setenv WM_COMPILER_LIB_ARCH 64
setenv WM_CC 'gcc'
setenv WM_CXX 'g++'
setenv WM_CFLAGS '-mabi=64 -fPIC'
setenv WM_CXXFLAGS '-mabi=64 -fPIC'
setenv WM_LDFLAGS '-mabi=64 -G0'
setenv WM_MPLIB FJMPI
breaksw
default: default:
echo echo
echo "Your '$WM_ARCH' operating system is not supported by this release" echo "Your '$WM_ARCH' operating system is not supported by this release"

View File

@ -222,6 +222,15 @@ case MPI:
setenv FOAM_MPI_LIBBIN $FOAM_LIBBIN/mpi setenv FOAM_MPI_LIBBIN $FOAM_LIBBIN/mpi
breaksw breaksw
case FJMPI:
setenv MPI_ARCH_PATH /opt/FJSVmpi2
setenv FOAM_MPI_LIBBIN $FOAM_LIBBIN/mpi
_foamAddPath $MPI_ARCH_PATH/bin
_foamAddLib $MPI_ARCH_PATH/lib/sparcv9
_foamAddLib /opt/FSUNf90/lib/sparcv9
_foamAddLib /opt/FJSVpnidt/lib
breaksw
default: default:
setenv FOAM_MPI_LIBBIN $FOAM_LIBBIN/dummy setenv FOAM_MPI_LIBBIN $FOAM_LIBBIN/dummy
breaksw breaksw

View File

@ -249,6 +249,16 @@ MPI)
export FOAM_MPI_LIBBIN=$FOAM_LIBBIN/mpi export FOAM_MPI_LIBBIN=$FOAM_LIBBIN/mpi
;; ;;
FJMPI)
export MPI_ARCH_PATH=/opt/FJSVmpi2
export FOAM_MPI_LIBBIN=$FOAM_LIBBIN/mpi
_foamAddPath $MPI_ARCH_PATH/bin
_foamAddLib $MPI_ARCH_PATH/lib/sparcv9
_foamAddLib /opt/FSUNf90/lib/sparcv9
_foamAddLib /opt/FJSVpnidt/lib
;;
*) *)
export FOAM_MPI_LIBBIN=$FOAM_LIBBIN/dummy export FOAM_MPI_LIBBIN=$FOAM_LIBBIN/dummy
;; ;;

View File

@ -8,7 +8,11 @@ fileStat.C
Unix.C Unix.C
cpuTime/cpuTime.C cpuTime/cpuTime.C
clockTime/clockTime.C clockTime/clockTime.C
#ifndef SunOS64
printStack.C printStack.C
/*dummyPrintStack.C*/ #else
dummyPrintStack.C
#endif
LIB = $(FOAM_LIBBIN)/libOSspecific LIB = $(FOAM_LIBBIN)/libOSspecific

View File

@ -50,7 +50,7 @@ primitives/random/Random.C
containers/HashTables/HashTable/HashTableName.C containers/HashTables/HashTable/HashTableName.C
containers/HashTables/StaticHashTable/StaticHashTableName.C containers/HashTables/StaticHashTable/StaticHashTableName.C
containers/Lists/SortableList/ParSortableListName.C containers/Lists/SortableList/ParSortableListName.C
containers/Lists/PackedList/PackedListName.C containers/Lists/PackedList/PackedListCore.C
containers/Lists/ListOps/ListOps.C containers/Lists/ListOps/ListOps.C
containers/LinkedLists/linkTypes/SLListBase/SLListBase.C containers/LinkedLists/linkTypes/SLListBase/SLListBase.C
containers/LinkedLists/linkTypes/DLListBase/DLListBase.C containers/LinkedLists/linkTypes/DLListBase/DLListBase.C

View File

@ -51,6 +51,13 @@ Foam::HashSet<Key, Hash>::HashSet(const HashTable<AnyType, Key, Hash>& ht)
// * * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * //
template<class Key, class Hash>
inline bool Foam::HashSet<Key, Hash>::operator[](const Key& key) const
{
return found(key);
}
template<class Key, class Hash> template<class Key, class Hash>
bool Foam::HashSet<Key, Hash>::operator==(const HashSet<Key, Hash>& rhs) const bool Foam::HashSet<Key, Hash>::operator==(const HashSet<Key, Hash>& rhs) const
{ {

View File

@ -133,9 +133,11 @@ public:
return HashTable<nil, Key, Hash>::insert(key, nil()); return HashTable<nil, Key, Hash>::insert(key, nil());
} }
// Member Operators // Member Operators
//- Return true if the entry exists, same as found()
inline bool operator[](const Key&) const;
//- Equality. Two hashtables are equal when their contents are equal. //- Equality. Two hashtables are equal when their contents are equal.
// Independent of table size or order. // Independent of table size or order.
bool operator==(const HashSet<Key, Hash>&) const; bool operator==(const HashSet<Key, Hash>&) const;

View File

@ -28,20 +28,20 @@ License
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
template<int nBits> template<unsigned nBits>
Foam::PackedList<nBits>::PackedList(const label size, const unsigned int val) Foam::PackedList<nBits>::PackedList(const label size, const unsigned int val)
: :
List<PackedStorage>(packedLength(size), 0u), StorageList(packedLength(size), 0u),
size_(size) size_(size)
{ {
operator=(val); operator=(val);
} }
template<int nBits> template<unsigned nBits>
Foam::PackedList<nBits>::PackedList(const UList<label>& lst) Foam::PackedList<nBits>::PackedList(const UList<label>& lst)
: :
List<PackedStorage>(packedLength(lst.size()), 0u), StorageList(packedLength(lst.size()), 0u),
size_(lst.size()) size_(lst.size())
{ {
forAll(lst, i) forAll(lst, i)
@ -53,7 +53,116 @@ Foam::PackedList<nBits>::PackedList(const UList<label>& lst)
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<int nBits>
#if (UINT_MAX == 0xFFFFFFFF)
// 32-bit counting, Hamming weight method
# define COUNT_PACKEDBITS(sum, x) \
{ \
x -= (x >> 1) & 0x55555555; \
x = (x & 0x33333333) + ((x >> 2) & 0x33333333); \
sum += (((x + (x >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \
}
#elif (UINT_MAX == 0xFFFFFFFFFFFFFFFF)
// 64-bit counting, Hamming weight method
# define COUNT_PACKEDBITS(sum, x) \
{ \
x -= (x >> 1) & 0x5555555555555555; \
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333); \
sum += (((x + (x >> 4)) & 0x0F0F0F0F0F0F0F0F) * 0x0101010101010101) >> 56;\
}
#else
// Arbitrary number of bits, Brian Kernighan's method
# define COUNT_PACKEDBITS(sum, x) for (; x; ++sum) { x &= x - 1; }
#endif
template<unsigned nBits>
unsigned int Foam::PackedList<nBits>::count() const
{
register unsigned int c = 0;
if (size_)
{
// mask value for complete chunks
unsigned int mask = maskLower(packing());
unsigned int endIdx = size_ / packing();
unsigned int endOff = size_ % packing();
// count bits in complete elements
for (unsigned i = 0; i < endIdx; ++i)
{
register unsigned int bits = StorageList::operator[](i) & mask;
COUNT_PACKEDBITS(c, bits);
}
// count bits in partial chunk
if (endOff)
{
mask = maskLower(endOff);
register unsigned int bits = StorageList::operator[](endIdx) & mask;
COUNT_PACKEDBITS(c, bits);
}
}
return c;
}
template<unsigned nBits>
bool Foam::PackedList<nBits>::trim()
{
if (!size_)
{
return false;
}
// mask value for complete chunks
unsigned int mask = maskLower(packing());
label currElem = packedLength(size_) - 1;
unsigned int endOff = size_ % packing();
// clear trailing bits on final segment
if (endOff)
{
StorageList::operator[](currElem) &= maskLower(endOff);
}
// test entire chunk
while (currElem > 0 && !(StorageList::operator[](currElem) &= mask))
{
currElem--;
}
// test segment
label newsize = (currElem + 1) * packing();
// mask for the final segment
mask = max_value() << (nBits * (packing() - 1));
for (endOff = packing(); endOff >= 1; --endOff, --newsize)
{
if (StorageList::operator[](currElem) & mask)
{
break;
}
mask >>= nBits;
}
if (size_ == newsize)
{
return false;
}
size_ = newsize;
return false;
}
template<unsigned nBits>
Foam::labelList Foam::PackedList<nBits>::values() const Foam::labelList Foam::PackedList<nBits>::values() const
{ {
labelList elems(size()); labelList elems(size());
@ -66,11 +175,12 @@ Foam::labelList Foam::PackedList<nBits>::values() const
} }
template<int nBits> template<unsigned nBits>
Foam::Ostream& Foam::PackedList<nBits>::iterator::print(Ostream& os) const Foam::Ostream& Foam::PackedList<nBits>::iteratorBase::print(Ostream& os) const
{ {
os << "iterator<" << nBits << "> [" << position() << "]" os << "iterator<" << label(nBits) << "> ["
<< " elem:" << elem_ << " offset:" << offset_ << (index_ * packing() + offset_) << "]"
<< " index:" << index_ << " offset:" << offset_
<< " value:" << unsigned(*this) << " value:" << unsigned(*this)
<< nl; << nl;
@ -78,10 +188,10 @@ Foam::Ostream& Foam::PackedList<nBits>::iterator::print(Ostream& os) const
} }
template<int nBits> template<unsigned nBits>
Foam::Ostream& Foam::PackedList<nBits>::print(Ostream& os) const Foam::Ostream& Foam::PackedList<nBits>::print(Ostream& os) const
{ {
os << "PackedList<" << nBits << ">" os << "PackedList<" << label(nBits) << ">"
<< " max_value:" << max_value() << " max_value:" << max_value()
<< " packing:" << packing() << nl << " packing:" << packing() << nl
<< "values: " << size() << "/" << capacity() << "( "; << "values: " << size() << "/" << capacity() << "( ";
@ -90,27 +200,36 @@ Foam::Ostream& Foam::PackedList<nBits>::print(Ostream& os) const
os << get(i) << ' '; os << get(i) << ' ';
} }
label packLen = packedLength(size_);
os << ")\n" os << ")\n"
<< "storage: " << storage().size() << "( "; << "storage: " << packLen << "/" << StorageList::size() << "( ";
label count = size(); // mask value for complete chunks
unsigned int mask = maskLower(packing());
forAll(storage(), i) for (label i=0; i < packLen; i++)
{ {
const PackedStorage& rawBits = storage()[i]; const StorageType& rawBits = StorageList::operator[](i);
// create mask for unaddressed bits // the final storage may not be full, modify mask accordingly
unsigned int addressed = 0; if (i+1 == packLen)
for (unsigned packI = 0; count && packI < packing(); packI++, count--)
{ {
addressed <<= nBits; unsigned endOff = size_ % packing();
addressed |= max_value();
if (endOff)
{
mask = maskLower(endOff);
}
else
{
continue;
}
} }
for (unsigned int testBit = 0x1 << max_bits(); testBit; testBit >>= 1) for (unsigned int testBit = (1 << max_bits()); testBit; testBit >>= 1)
{ {
if (testBit & addressed) if (testBit & mask)
{ {
if (rawBits & testBit) if (rawBits & testBit)
{ {
@ -123,7 +242,7 @@ Foam::Ostream& Foam::PackedList<nBits>::print(Ostream& os) const
} }
else else
{ {
os << '_'; os << '.';
} }
} }
cout << ' '; cout << ' ';
@ -136,15 +255,15 @@ Foam::Ostream& Foam::PackedList<nBits>::print(Ostream& os) const
// * * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Member Operators * * * * * * * * * * * * * //
template<int nBits> template<unsigned nBits>
void Foam::PackedList<nBits>::operator=(const PackedList<nBits>& lst) void Foam::PackedList<nBits>::operator=(const PackedList<nBits>& lst)
{ {
setCapacity(lst.size()); setCapacity(lst.size());
List<PackedStorage>::operator=(lst); StorageList::operator=(lst);
} }
template<int nBits> template<unsigned nBits>
void Foam::PackedList<nBits>::operator=(const UList<label>& lst) void Foam::PackedList<nBits>::operator=(const UList<label>& lst)
{ {
setCapacity(lst.size()); setCapacity(lst.size());
@ -156,10 +275,9 @@ void Foam::PackedList<nBits>::operator=(const UList<label>& lst)
} }
// * * * * * * * * * * * * * * * Ostream Operator * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Ostream Operator * * * * * * * * * * * * * //
//template<int nBits> //template<unsigned nBits>
//Foam::Ostream& ::Foam::operator<<(Ostream& os, const PackedList<nBits>& lst) //Foam::Ostream& ::Foam::operator<<(Ostream& os, const PackedList<nBits>& lst)
//{ //{
// os << lst(); // os << lst();

View File

@ -28,19 +28,46 @@ Class
Description Description
A Dynamically allocatable list of packed unsigned ints. A Dynamically allocatable list of packed unsigned ints.
Gets given the number of bits per item.
Note
The list resizing is similar to DynamicList, thus the methods clear() The list resizing is similar to DynamicList, thus the methods clear()
and setSize() behave like their DynamicList counterparts and the methods and setSize() behave like their DynamicList counterparts and the methods
reserve() and setCapacity() can be used to influence the allocation. reserve() and setCapacity() can be used to influence the allocation.
The number of bits per item is specified by the template parameter nBits.
Note
In a const context, the '[]' operator simply returns the stored value,
with out-of-range elements returned as zero.
In a non-const context, the '[]' operator returns an iteratorBase, which
may not have a valid reference for out-of-range elements.
The iteratorBase class handles the assignment of new values.
Using the iteratorBase as a proxy allows assignment of values
between list elements. Thus the following bit of code works as expected:
@code
blist[1] = blist[5]; // value assignment, not iterator position
blist[2] = blist[5] = 4; // propagates value
blist[1] = blist[5] = blist[6]; // propagates value
@endcode
Using get() or the '[]' operator are similarly fast. Looping and reading
with an iterator is approx. 15% slower, but can be more flexible.
Using the set() operator (and the '[]' operator) are marginally slower
(approx. 5%) than using an iterator, but the set() method has an
advantage that it also returns a bool if the value changed. This can be
useful for branching on changed values.
@code
blist[5] = 4;
changed = blist.set(5, 8);
if (changed) ...
@endcode
SeeAlso SeeAlso
Foam::DynamicList Foam::DynamicList
ToDo ToDo
Add checks for bad template parameters (ie, nBits=0, nBits too large). Checks for bad template parameters (ie, nBits=0, nBits too large)?
The missing const_iterator might eventually still be needed.
SourceFiles SourceFiles
PackedListI.H PackedListI.H
@ -59,9 +86,9 @@ namespace Foam
{ {
// Forward declaration of friend functions and operators // Forward declaration of friend functions and operators
template<int nBits> class PackedList; template<unsigned nBits> class PackedList;
// template<int nBits> // template<unsigned nBits>
// Ostream& operator<<(Ostream&, const PackedList<nBits>&); // Ostream& operator<<(Ostream&, const PackedList<nBits>&);
@ -75,12 +102,13 @@ TemplateName(PackedList);
Class PackedList Declaration Class PackedList Declaration
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
template <int nBits> template<unsigned nBits=1>
class PackedList class PackedList
: :
private List<unsigned int> private List<unsigned int>
{ {
typedef unsigned int PackedStorage; typedef unsigned int StorageType;
typedef List<StorageType> StorageList;
// Private data // Private data
@ -110,10 +138,12 @@ public:
//- The number of entries per packed storage element //- The number of entries per packed storage element
inline static unsigned int packing(); inline static unsigned int packing();
// Forward declaration of iterator //- Masking for all bits below the offset
class iterator; inline static unsigned int maskLower(unsigned offset);
friend class iterator;
// Forward declaration of iteratorBase
class iteratorBase;
// Constructors // Constructors
@ -152,16 +182,27 @@ public:
inline bool empty() const; inline bool empty() const;
//- Get value at index I. //- Get value at index I.
// Does not auto-vivifies entries. // Does not auto-vivify entries.
inline unsigned int get(const label) const; inline unsigned int get(const label) const;
//- Set value at index I. Return true if value changed. //- Set value at index I. Return true if value changed.
// Does not auto-vivifies entries. // Does not auto-vivify entries.
inline bool set(const label, const unsigned int val); inline bool set(const label, const unsigned int val);
//- Return the underlying packed storage
inline List<unsigned int>& storage();
//- Return the underlying packed storage //- Return the underlying packed storage
inline const List<unsigned int>& storage() const; inline const List<unsigned int>& storage() const;
//- Count number of bits set, O(log(n))
// Uses the Hamming weight (population count) method
// http://en.wikipedia.org/wiki/Hamming_weight
unsigned int count() const;
//- Trim any trailing zero elements
bool trim();
//- Return the values as a labelList //- Return the values as a labelList
labelList values() const; labelList values() const;
@ -185,7 +226,7 @@ public:
//- Reserve allocation space for at least this size. //- Reserve allocation space for at least this size.
// Never shrinks the allocated size. // Never shrinks the allocated size.
// Optionally provide an initialization value for new elements. // Optionally provide an initialization value for new elements.
inline void reserve(const label, const unsigned int& val = 0); inline void reserve(const label);
//- Clear the list, i.e. set addressable size to zero. //- Clear the list, i.e. set addressable size to zero.
// Does not adjust the underlying storage // Does not adjust the underlying storage
@ -204,22 +245,25 @@ public:
//- Transfer contents to the Xfer container //- Transfer contents to the Xfer container
inline Xfer<PackedList<nBits> > xfer(); inline Xfer<PackedList<nBits> > xfer();
// Member operators // Member operators
//- Append a value at the end of the list //- Append a value at the end of the list
inline void append(const unsigned int val); inline void append(const unsigned int val);
//- Remove and return the last element
inline unsigned int remove();
//- Get value at index I //- Get value at index I
// Auto-vivifies any new values to zero. // Does not auto-vivify entries.
inline unsigned int operator[](const label) const; inline unsigned int operator[](const label) const;
//- Set value at index I. //- Set value at index I.
// Returns proxy to perform the actual operation. // Returns iterator to perform the actual operation.
// Auto-vivifies any new values to zero. // Does not auto-vivify entries.
inline iterator operator[](const label); inline iteratorBase operator[](const label);
//- Assignment of all entries to the given value. //- Assignment of all entries to the given value. Takes linear time.
// Does set on all elements.
inline void operator=(const unsigned int val); inline void operator=(const unsigned int val);
//- Assignment operator. Takes linear time. //- Assignment operator. Takes linear time.
@ -230,83 +274,211 @@ public:
// Ostream operator // Ostream operator
// // Write PackedList to Ostream. // // Write PackedList to Ostream.
// friend Ostream& operator<< <nBits> (Ostream&, const PackedList<nBits>&); // friend Ostream& operator<< <nBits> (Ostream&, const PackedList<nBits>&);
// Iterators and helpers
//- The iterator base for PackedList
// Note: data and functions are protected, to allow reuse by iterator
// and prevent most external usage.
class iteratorBase
{
friend class PackedList;
protected:
// Protected Data
//- Pointer to original list
// This also lets us use the default bitwise copy/assignment
PackedList* list_;
//- Element index within storage
unsigned index_;
//- Offset within storage element
unsigned offset_;
// Protected Member Functions
//- Get value as unsigned
inline unsigned int get() const;
//- Set value, returning true if changed
inline bool set(unsigned int);
//- Increment to new position
inline void incr();
//- Decrement to new position
inline void decr();
//- Move to new position, but not beyond end()
inline void seek(const iteratorBase&);
//- The iterator-like class used for PackedList // Constructors
class iterator
{
friend class PackedList;
// Private Data //- Construct null
inline iteratorBase();
//- Reference to original list //- Copy construct
PackedList& list_; inline iteratorBase(const iteratorBase&);
//- Element index within storage //- Construct from base list and position index
unsigned elem_; inline iteratorBase(const PackedList*, const label);
//- Offset within storage element public:
unsigned offset_;
//- Return the raw storage element // Member Functions
inline PackedStorage& chunk() const;
//- Return the position in the PackedList //- Return true if the element is within addressable range
inline label position() const; inline bool valid() const;
public: //- Move iterator to end() if it would otherwise be out-of-range
// Returns true if the element was already ok
inline bool validate();
// Constructors // Member Operators
//- Construct from base list and position index //- Compare positions
inline iterator(const PackedList&, const label i); inline bool operator==(const iteratorBase&) const;
inline bool operator!=(const iteratorBase&) const;
// Members //- Assign value, not position.
// This allows packed[0] = packed[3] for assigning values
inline unsigned int operator=(const iteratorBase&);
//- Assign value, returns true if the value changed //- Assign value
inline bool operator=(const unsigned int val); inline unsigned int operator=(const unsigned int val);
//- Conversion operator //- Conversion operator
inline operator unsigned int () const; inline operator unsigned int () const;
//- Conversion operator //- Print value and information
inline operator bool() const; Ostream& print(Ostream&) const;
};
// Member operators
inline void operator=(const iterator&);
inline bool operator==(const iterator&) const;
inline bool operator!=(const iterator&) const;
//- Return referenced value
inline unsigned int operator()() const;
inline iterator& operator++();
inline iterator operator++(int);
//- Print value and information
Ostream& print(Ostream&) const;
};
//- iterator set to the begining of the PackedList //- The iterator class used for PackedList
inline iterator begin(); class iterator
:
public iteratorBase
{
//- iterator set to beyond the end of the HashTable //- Should never violate const-ness!
inline iterator end(); void operator=(const const_iterator&);
public:
// Constructors
//- Construct null
inline iterator();
//- Construct from iterator base, eg iter(packedlist[i])
inline iterator(const iteratorBase&);
//- Construct from base list and position index
inline iterator(const PackedList*, const label);
// Member Operators
//- Assign from iteratorBase, eg iter = packedlist[i]
inline iterator& operator=(const iteratorBase&);
//- Return value
inline unsigned int operator*() const;
//- Return value
inline unsigned int operator()() const;
//- Return iteratorBase for assigning values
inline iteratorBase& operator*();
//- Return iteratorBase for assigning values
inline iteratorBase& operator()();
inline iterator& operator++();
inline iterator operator++(int);
inline iterator& operator--();
inline iterator operator--(int);
};
//- iterator set to the beginning of the PackedList
inline iterator begin();
//- iterator set to beyond the end of the HashTable
inline iterator end();
//- The const_iterator for PackedList
class const_iterator
:
public iteratorBase
{
public:
// Constructors
//- Construct null
inline const_iterator();
//- Construct from iterator base
inline const_iterator(const iteratorBase&);
//- Construct from base list and position index
inline const_iterator(const PackedList*, const label);
//- Construct from non-const iterator
inline const_iterator(const iterator&);
// Member operators
//- Assign from iteratorBase or derived
// eg, iter = packedlist[i] or iter = [non-const]list.begin()
inline const_iterator& operator=(const iteratorBase&);
//- Return referenced value directly
inline unsigned int operator*() const;
//- Return referenced value directly
inline unsigned int operator()() const;
inline const_iterator& operator++();
inline const_iterator operator++(int);
inline const_iterator& operator--();
inline const_iterator operator--(int);
};
//- const_iterator set to the beginning of the PackedList
inline const_iterator cbegin() const;
//- const_iterator set to beyond the end of the PackedList
inline const_iterator cend() const;
//- const_iterator set to the beginning of the PackedList
inline const_iterator begin() const;
//- const_iterator set to beyond the end of the PackedList
inline const_iterator end() const;
}; };
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam } // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
# include "PackedListI.H" #include "PackedListI.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

File diff suppressed because it is too large Load Diff

View File

@ -98,6 +98,37 @@ Foam::IOobject::IOobject
} }
Foam::IOobject::IOobject
(
const fileName& path,
const objectRegistry& registry,
readOption ro,
writeOption wo,
bool registerObject
)
:
name_(),
headerClassName_(typeName),
note_(),
instance_(),
local_(),
db_(registry),
rOpt_(ro),
wOpt_(wo),
registerObject_(registerObject),
objState_(GOOD)
{
path.IOobjectComponents(instance_, local_, name_);
if (objectRegistry::debug)
{
Info<< "Constructing IOobject called " << name_
<< " of type " << headerClassName_
<< endl;
}
}
// * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * * // // * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * * //
Foam::IOobject::~IOobject() Foam::IOobject::~IOobject()

View File

@ -193,6 +193,16 @@ public:
bool registerObject=true bool registerObject=true
); );
//- Construct from path, registry, io options
IOobject
(
const fileName& path,
const objectRegistry& registry,
readOption r=NO_READ,
writeOption w=NO_WRITE,
bool registerObject=true
);
//- Clone //- Clone
Foam::autoPtr<IOobject> clone() const Foam::autoPtr<IOobject> clone() const
{ {

View File

@ -283,10 +283,11 @@ public:
//- Return the location of "dir" containing the file "name". //- Return the location of "dir" containing the file "name".
// (Used in reading mesh data) // (Used in reading mesh data)
// If name is null search for a directory "dir"
word findInstance word findInstance
( (
const fileName& dir, const fileName& dir,
const word& name, const word& name = word::null,
const IOobject::readOption rOpt = IOobject::MUST_READ const IOobject::readOption rOpt = IOobject::MUST_READ
) const; ) const;

View File

@ -23,7 +23,9 @@ License
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Description Description
Return the location of "directory" containing the file "name". If "name" is empty: return the location of "directory"
If "name" is not empty: return the location of "directory" containing the
file "name".
Used in reading mesh data. Used in reading mesh data.
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/
@ -43,15 +45,20 @@ Foam::word Foam::Time::findInstance
// Is the mesh data in the current time directory ? // Is the mesh data in the current time directory ?
if if
( (
file(path()/timeName()/dir/name) (name.empty() && Foam::dir(path()/timeName()/dir))
&& IOobject(name, timeName(), dir, *this).headerOk() ||
(
!name.empty()
&& file(path()/timeName()/dir/name)
&& IOobject(name, timeName(), dir, *this).headerOk()
)
) )
{ {
if (debug) if (debug)
{ {
Info<< "Time::findInstance(const word& dir, const word& name) : " Info<< "Time::findInstance(const word&, const word&) : "
<< "reading " << name << "found " << name
<< " from " << timeName()/dir << " in " << timeName()/dir
<< endl; << endl;
} }
@ -81,16 +88,21 @@ Foam::word Foam::Time::findInstance
{ {
if if
( (
file(path()/ts[j].name()/dir/name) (name.empty() && Foam::dir(path()/ts[j].name()/dir))
&& IOobject(name, ts[j].name(), dir, *this).headerOk() ||
(
!name.empty()
&& file(path()/ts[j].name()/dir/name)
&& IOobject(name, ts[j].name(), dir, *this).headerOk()
)
) )
{ {
if (debug) if (debug)
{ {
Info<< "Time::findInstance(const word& dir, " Info<< "Time::findInstance(const word&, "
<< "const word& name) : " << "const word&) : "
<< "reading " << name << "found " << name
<< " from " << ts[j].name()/dir << " in " << ts[j].name()/dir
<< endl; << endl;
} }
@ -109,16 +121,20 @@ Foam::word Foam::Time::findInstance
if if
( (
file(path()/constant()/dir/name) (name.empty() && Foam::dir(path()/constant()/dir))
&& IOobject(name, constant(), dir, *this).headerOk() || (
!name.empty()
&& file(path()/constant()/dir/name)
&& IOobject(name, constant(), dir, *this).headerOk()
)
) )
{ {
if (debug) if (debug)
{ {
Info<< "Time::findInstance(const word& dir, " Info<< "Time::findInstance(const word&, "
<< "const word& name) : " << "const word&) : "
<< "reading " << name << "found " << name
<< " from " << constant()/dir << " in " << constant()/dir
<< endl; << endl;
} }
@ -127,7 +143,7 @@ Foam::word Foam::Time::findInstance
if (rOpt == IOobject::MUST_READ) if (rOpt == IOobject::MUST_READ)
{ {
FatalErrorIn("Time::findInstance(const word& dir, const word& name)") FatalErrorIn("Time::findInstance(const word&, const word&)")
<< "Cannot find file \"" << name << "\" in directory " << "Cannot find file \"" << name << "\" in directory "
<< constant()/dir << constant()/dir
<< exit(FatalError); << exit(FatalError);
@ -136,4 +152,5 @@ Foam::word Foam::Time::findInstance
return constant(); return constant();
} }
// ************************************************************************* // // ************************************************************************* //

View File

@ -538,7 +538,7 @@ bool Foam::polyBoundaryMesh::checkDefinition(const bool report) const
forAll (bm, patchI) forAll (bm, patchI)
{ {
if (bm[patchI].start() != nextPatchStart) if (bm[patchI].start() != nextPatchStart && !boundaryError)
{ {
boundaryError = true; boundaryError = true;
@ -547,7 +547,9 @@ bool Foam::polyBoundaryMesh::checkDefinition(const bool report) const
<< " of type " << bm[patchI].type() << " of type " << bm[patchI].type()
<< ". The patch should start on face no " << nextPatchStart << ". The patch should start on face no " << nextPatchStart
<< " and the patch specifies " << bm[patchI].start() << " and the patch specifies " << bm[patchI].start()
<< "." << endl; << "." << endl
<< "Possibly consecutive patches have this same problem."
<< " Suppressing future warnings." << endl;
} }
nextPatchStart += bm[patchI].size(); nextPatchStart += bm[patchI].size();

View File

@ -60,8 +60,6 @@ SourceFiles
namespace Foam namespace Foam
{ {
class patchZones;
/*---------------------------------------------------------------------------*\ /*---------------------------------------------------------------------------*\
Class cyclicPolyPatch Declaration Class cyclicPolyPatch Declaration
\*---------------------------------------------------------------------------*/ \*---------------------------------------------------------------------------*/

Some files were not shown because too many files have changed in this diff Show More