Merge pull request #110 from ParticulateFlow/develop

merging develop in recurrenceLib
This commit is contained in:
asanaz
2020-09-17 11:37:31 +02:00
committed by GitHub
393 changed files with 1230776 additions and 572582 deletions

View File

@ -21,18 +21,18 @@ jobs:
- run:
name: Make project and user dir
command: mkdir -p /root/CFDEM/CFDEMcoupling && mkdir -p /root/CFDEM/-4.1
command: mkdir -p /root/CFDEM/CFDEMcoupling && mkdir -p /root/CFDEM/-6
- checkout:
path: /root/CFDEM/CFDEMcoupling
- run:
name: Add OpenFOAM package repository
command: sudo apt-get install -y software-properties-common wget apt-transport-https && sudo add-apt-repository http://dl.openfoam.org/ubuntu && sudo sh -c "wget -O - http://dl.openfoam.org/gpg.key | apt-key add -"
command: sudo apt-get install -y software-properties-common wget apt-transport-https && sudo sh -c "wget -O - http://dl.openfoam.org/gpg.key | apt-key add -" && sudo add-apt-repository http://dl.openfoam.org/ubuntu
- run:
name: Install OpenFOAM 4.1
command: sudo apt-get update && sudo apt-get -y install openfoam4
name: Install OpenFOAM 6
command: sudo apt-get update && sudo apt-get -y install openfoam6
- run:
name: Clone LIGGGHTS repository
@ -42,7 +42,7 @@ jobs:
name: Build LIGGGHTS
command: >
shopt -s expand_aliases &&
source /opt/openfoam4/etc/bashrc &&
source /opt/openfoam6/etc/bashrc &&
source /root/CFDEM/CFDEMcoupling/etc/bashrc &&
bash /root/CFDEM/CFDEMcoupling/etc/compileLIGGGHTS.sh
no_output_timeout: 30m
@ -51,7 +51,7 @@ jobs:
name: Build CFDEMcoupling library
command: >
shopt -s expand_aliases &&
source /opt/openfoam4/etc/bashrc &&
source /opt/openfoam6/etc/bashrc &&
source /root/CFDEM/CFDEMcoupling/etc/bashrc &&
bash /root/CFDEM/CFDEMcoupling/etc/compileCFDEMcoupling_src.sh
@ -59,7 +59,7 @@ jobs:
name: Build CFDEMcoupling solvers
command: >
shopt -s expand_aliases &&
source /opt/openfoam4/etc/bashrc &&
source /opt/openfoam6/etc/bashrc &&
source /root/CFDEM/CFDEMcoupling/etc/bashrc &&
bash /root/CFDEM/CFDEMcoupling/etc/compileCFDEMcoupling_sol.sh
@ -67,6 +67,6 @@ jobs:
name: Build CFDEMcoupling utilities
command: >
shopt -s expand_aliases &&
source /opt/openfoam4/etc/bashrc &&
source /opt/openfoam6/etc/bashrc &&
source /root/CFDEM/CFDEMcoupling/etc/bashrc &&
bash /root/CFDEM/CFDEMcoupling/etc/compileCFDEMcoupling_uti.sh

View File

@ -1,6 +1,10 @@
FOAM_VERSION_MAJOR := $(word 1,$(subst ., ,$(WM_PROJECT_VERSION)))
PFLAGS+= -DOPENFOAM_VERSION_MAJOR=$(FOAM_VERSION_MAJOR)
include $(CFDEM_ADD_LIBS_DIR)/additionalLibs
EXE_INC = \
$(PFLAGS) \
-I$(CFDEM_OFVERSION_DIR) \
-ImultiphaseMixture/lnInclude \
-I$(LIB_SRC)/transportModels \

View File

@ -46,6 +46,11 @@ Description
int main(int argc, char *argv[])
{
#if OPENFOAM_VERSION_MAJOR >= 6
FatalError << "cfdemSolverMultiphase requires OpenFOAM 4.x or 5.x to work properly" << exit(FatalError);
#endif
#include "postProcess.H"
#include "setRootCase.H"
#include "createTime.H"

View File

@ -1,4 +1,8 @@
FOAM_VERSION_MAJOR := $(word 1,$(subst ., ,$(WM_PROJECT_VERSION)))
PFLAGS+= -DOPENFOAM_VERSION_MAJOR=$(FOAM_VERSION_MAJOR)
EXE_INC = \
$(PFLAGS) \
-IalphaContactAngle \
-I$(LIB_SRC)/transportModels \
-I$(LIB_SRC)/transportModels/incompressible/lnInclude \

View File

@ -679,8 +679,13 @@ void Foam::multiphaseMixture::solveAlphas
alphaPhiCorr,
zeroField(),
zeroField(),
#if OPENFOAM_VERSION_MAJOR < 6
1,
0,
#else
oneField(),
zeroField(),
#endif
true
);

View File

@ -1,5 +1,7 @@
include $(CFDEM_ADD_LIBS_DIR)/additionalLibs
FOAM_VERSION_MAJOR := $(word 1,$(subst ., ,$(WM_PROJECT_VERSION)))
PFLAGS+= -DOPENFOAM_VERSION_MAJOR=$(FOAM_VERSION_MAJOR)
PFLAGS+= -Dcompre
EXE_INC = \

View File

@ -92,7 +92,7 @@ int main(int argc, char *argv[])
particleCloud.clockM().start(2,"Coupling");
bool hasEvolved = particleCloud.evolve(voidfraction,Us,U);
if(hasEvolved)
if(hasEvolved && smoothenForces)
{
particleCloud.smoothingM().smoothen(particleCloud.forceM(0).impParticleForces());
}
@ -113,7 +113,11 @@ int main(int argc, char *argv[])
particleCloud.clockM().start(26,"Flow");
#if OPENFOAM_VERSION_MAJOR < 6
if (pimple.nCorrPIMPLE() <= 1)
#else
if (pimple.nCorrPimple() <= 1)
#endif
{
#include "rhoEqn.H"
}

View File

@ -177,6 +177,17 @@ Info<< "Reading thermophysical properties\n" << endl;
)
);
bool smoothenForces
(
pimple.dict().lookupOrDefault<bool>
(
"smoothenForces",
false
)
);
if (smoothenForces) Info << "Smoothening implicit particle forces.\n" << endl;
else Info << "Not smoothening implicit particle forces.\n" << endl;
Info<< "Creating turbulence model\n" << endl;
autoPtr<compressible::turbulenceModel> turbulence
(

View File

@ -1,5 +1,7 @@
include $(CFDEM_ADD_LIBS_DIR)/additionalLibs
FOAM_VERSION_MAJOR := $(word 1,$(subst ., ,$(WM_PROJECT_VERSION)))
PFLAGS+= -DOPENFOAM_VERSION_MAJOR=$(FOAM_VERSION_MAJOR)
PFLAGS+= -Dcompre
EXE_INC = \
@ -19,12 +21,8 @@ EXE_INC = \
-I$(LIB_SRC)/thermophysicalModels/specie/lnInclude \
-I$(LIB_SRC)/transportModels/compressible/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/basic/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/properties/liquidProperties/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/properties/liquidMixtureProperties/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/thermophysicalFunctions/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/reactionThermo/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/chemistryModel/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/radiationModels/lnInclude \
-I$(LIB_SRC)/regionModels/regionModel/lnInclude \
-I$(LIB_SRC)/regionModels/surfaceFilmModels/lnInclude \
-I$(LIB_SRC)/ODE/lnInclude \
@ -48,9 +46,6 @@ EXE_LIBS = \
-l$(CFDEM_LIB_COMP_NAME) \
$(CFDEM_ADD_LIB_PATHS) \
$(CFDEM_ADD_LIBS) \
-lliquidProperties \
-lliquidMixtureProperties \
-lthermophysicalFunctions \
-lreactionThermophysicalModels \
-lchemistryModel \
-lradiationModels \

View File

@ -13,7 +13,11 @@ tmp<fv::convectionScheme<scalar> > mvConvection
{
combustion->correct();
#if OPENFOAM_VERSION_MAJOR < 5
dQ = combustion->dQ();
#else
Qdot = combustion->Qdot();
#endif
label inertIndex = -1;
volScalarField Yt(0.0*Y[0]);
@ -72,4 +76,5 @@ tmp<fv::convectionScheme<scalar> > mvConvection
Y[inertIndex].max(0.0);
}
}
particleCloud.clockM().stop("Y");

View File

@ -30,7 +30,12 @@ Description
#include "fvCFD.H"
#include "turbulentFluidThermoModel.H"
#if OPENFOAM_VERSION_MAJOR < 6
#include "rhoCombustionModel.H"
#else
#include "rhoReactionThermo.H"
#include "CombustionModel.H"
#endif
#include "bound.H"
#include "pimpleControl.H"
#include "fvOptions.H"
@ -115,7 +120,11 @@ int main(int argc, char *argv[])
particleCloud.clockM().start(26,"Flow");
#if OPENFOAM_VERSION_MAJOR < 6
if (pimple.nCorrPIMPLE() <= 1)
#else
if (pimple.nCorrPimple() <= 1)
#endif
{
#include "rhoEqn.H"
}

View File

@ -1,20 +1,28 @@
// thermodynamics, chemistry
#if OPENFOAM_VERSION_MAJOR < 6
Info<< "Creating combustion model\n" << endl;
autoPtr<combustionModels::rhoCombustionModel> combustion
(
combustionModels::rhoCombustionModel::New(mesh)
);
rhoReactionThermo& thermo = combustion->thermo();
#else
Info<< "Reading thermophysical properties\n" << endl;
autoPtr<rhoReactionThermo> pThermo(rhoReactionThermo::New(mesh));
rhoReactionThermo& thermo = pThermo();
#endif
thermo.validate(args.executable(), "h", "e");
basicSpecieMixture& composition = thermo.composition();
PtrList<volScalarField>& Y = composition.Y();
// read molecular weight
#if OPENFOAM_VERSION_MAJOR < 6
volScalarField W(composition.W());
#else
volScalarField W(thermo.W());
#endif
bool propagateInertSpecie = true;
@ -198,6 +206,14 @@
)
);
#if OPENFOAM_VERSION_MAJOR >= 6
Info<< "Creating combustion model\n" << endl;
autoPtr<CombustionModel<rhoReactionThermo>> combustion
(
CombustionModel<rhoReactionThermo>::New(thermo, turbulence())
);
#endif
Info<< "Creating field dpdt\n" << endl;
volScalarField dpdt
(
@ -214,6 +230,7 @@
Info<< "Creating field kinetic energy K\n" << endl;
volScalarField K("K", 0.5*magSqr(U));
#if OPENFOAM_VERSION_MAJOR < 5
volScalarField dQ
(
IOobject
@ -227,6 +244,21 @@
mesh,
dimensionedScalar("dQ", dimEnergy/dimTime, 0.0)
);
#else
volScalarField Qdot
(
IOobject
(
"Qdot",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
mesh,
dimensionedScalar("Qdot", dimEnergy/dimVolume/dimTime, 0.0)
);
#endif
Info<< "\nReading momentum exchange field Ksl\n" << endl;
volScalarField Ksl

View File

@ -23,4 +23,4 @@ EXE_LIBS = \
-lmeshTools \
-l$(CFDEM_LIB_NAME) \
$(CFDEM_ADD_LIB_PATHS) \
$(CFDEM_ADD_LIBS)
$(CFDEM_ADD_LIBS)

View File

@ -29,11 +29,6 @@ Description
\*---------------------------------------------------------------------------*/
// #include "fvCFD.H"
// #include "singlePhaseTransportModel.H"
// #include "turbulentTransportModel.H"
// #include "fvOptions.H"
#include "recBase.H"
#include "recStatAnalysis.H"

View File

@ -1,3 +0,0 @@
rcfdemSolverHeattransfer.C
EXE=$(CFDEM_APP_DIR)/rcfdemSolverHeattransfer

View File

@ -1,27 +0,0 @@
include $(CFDEM_ADD_LIBS_DIR)/additionalLibs
EXE_INC = \
-I$(CFDEM_OFVERSION_DIR) \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/TurbulenceModels/turbulenceModels/lnInclude \
-I$(LIB_SRC)/TurbulenceModels/incompressible/lnInclude \
-I$(LIB_SRC)/transportModels \
-I$(LIB_SRC)/transportModels/incompressible/singlePhaseTransportModel \
-I$(CFDEM_SRC_DIR)/lagrangian/cfdemParticle/lnInclude \
-I$(CFDEM_SRC_DIR)/lagrangian/cfdemParticle/cfdTools \
-I$(CFDEM_SRC_DIR)/recurrence/lnInclude \
-I$(CFDEM_SRC_DIR)/lagrangian/cfdemParticle/derived/cfdemCloudRec \
EXE_LIBS = \
-L$(CFDEM_LIB_DIR)\
-lrecurrence \
-lturbulenceModels \
-lincompressibleTurbulenceModels \
-lincompressibleTransportModels \
-lfiniteVolume \
-lmeshTools \
-lfvOptions \
-l$(CFDEM_LIB_NAME) \
$(CFDEM_ADD_LIB_PATHS) \
$(CFDEM_ADD_LIBS)

View File

@ -1,40 +0,0 @@
volScalarField rhoeps = rhoRec*voidfractionRec;
particleCloud.energyContributions(Qsource);
particleCloud.energyCoefficients(QCoeff);
K = 0.5*magSqr(URec);
addSource = fvc::ddt(rhoeps, K) + fvc::div(phiRec, K) +
fvc::div
(
fvc::absolute(phiRec/fvc::interpolate(rhoRec), voidfractionRec*URec), pRec
);
fvScalarMatrix TEqn =
(
fvm::ddt(rhoeps, T)
+ fvm::div(phiRec, T)
+ addSource/Cv
- fvm::laplacian(voidfractionRec*thCond/Cv, T)
- Qsource/Cv
- fvm::Sp(QCoeff/Cv, T)
==
fvOptions(rhoeps, T) // no fvOptions support yet
);
//TEqn.relax(relaxCoeff);
fvOptions.constrain(TEqn); // no fvOptions support yet
TEqn.solve();
particleCloud.clockM().start(31,"postFlow");
counter++;
if((counter - couplingSubStep) % dtDEM2dtCFD == 0)
particleCloud.postFlow();
particleCloud.clockM().stop("postFlow");

View File

@ -1,206 +0,0 @@
// dummy fields
Info << "\nCreating dummy pressure field\n" << endl;
volScalarField pRec
(
IOobject
(
"pRec",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh
);
// recurrence fields
Info << "\nCreating recurrence fields.\n" << endl;
volScalarField rhoRec
(
IOobject
(
"rhoRec",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh//,
//dimensionedScalar("rhoRec", dimensionSet(1, -3, 0, 0, 0), 1.0)
);
volVectorField URec
(
IOobject
(
"URec",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh
);
volScalarField voidfractionRec
(
IOobject
(
"voidfractionRec",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh
);
volVectorField UsRec
(
IOobject
(
"UsRec",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh
);
// heat transfer fields
Info << "\nCreating heat transfer fields.\n" << endl;
volScalarField Qsource
(
IOobject
(
"Qsource",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
mesh,
dimensionedScalar("zero", dimensionSet(1,-1,-3,0,0,0,0), 0.0)
);
volScalarField QCoeff
(
IOobject
(
"QCoeff",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
mesh,
dimensionedScalar("zero", dimensionSet(1,-1,-3,-1,0,0,0), 0.0)
);
volScalarField thCond
(
IOobject
(
"thCond",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
mesh,
dimensionedScalar("zero", dimensionSet(1,1,-3,-1,0,0,0), 0.0),
"zeroGradient"
);
volScalarField T
(
IOobject
(
"T",
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::AUTO_WRITE
),
mesh
);
// calculated fields
Info << "\nCreating fields subject to calculation\n" << endl;
volScalarField voidfraction
(
IOobject
(
"voidfraction",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
voidfractionRec
);
volVectorField Us
(
IOobject
(
"Us",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
UsRec
);
// write fields for t=t_start
voidfraction.write();
Us.write();
//===============================
Info << "Calculating face flux field phiRec\n" << endl;
surfaceScalarField phiRec
(
IOobject
(
"phiRec",
runTime.timeName(),
mesh,
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
linearInterpolate(URec*voidfractionRec*rhoRec) & mesh.Sf()
);
phiRec.write();
singlePhaseTransportModel laminarTransport(URec, phiRec);
autoPtr<incompressible::turbulenceModel> turbulence
(
incompressible::turbulenceModel::New(URec, phiRec, laminarTransport)
);
const IOdictionary& transportProps = mesh.lookupObject<IOdictionary>("transportProperties");
dimensionedScalar Cv(transportProps.lookup("Cv"));
volScalarField addSource
(
IOobject
(
"addSource",
runTime.timeName(),
mesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
mesh,
dimensionedScalar("zero", dimensionSet(1,-1,-3,0,0,0,0), 0.0)
);
Info << "Creating field kinetic energy K\n" << endl;
volScalarField K("K", 0.5*magSqr(URec));

View File

@ -1,128 +0,0 @@
/*---------------------------------------------------------------------------*\
CFDEMcoupling academic - Open Source CFD-DEM coupling
Contributing authors:
Thomas Lichtenegger
Copyright (C) 2015- Johannes Kepler University, Linz
-------------------------------------------------------------------------------
License
This file is part of CFDEMcoupling academic.
CFDEMcoupling academic is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CFDEMcoupling academic is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with CFDEMcoupling academic. If not, see <http://www.gnu.org/licenses/>.
Application
rcfdemSolverHeattransfer
Description
Solves heat transfer between fluid and particles based on rCFD
\*---------------------------------------------------------------------------*/
#include "fvCFD.H"
#include "singlePhaseTransportModel.H"
#include "turbulentTransportModel.H"
#include "fvOptions.H"
#include "cfdemCloudRec.H"
#include "recBase.H"
#include "recModel.H"
#include "cfdemCloudEnergy.H"
#include "clockModel.H"
#include "thermCondModel.H"
#include "energyModel.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
int main(int argc, char *argv[])
{
#include "postProcess.H"
#include "setRootCase.H"
#include "createTime.H"
#include "createMesh.H"
#include "createControl.H"
#include "createFields.H"
#include "createFvOptions.H"
cfdemCloudRec<cfdemCloudEnergy> particleCloud(mesh);
recBase recurrenceBase(mesh);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
Info << "\nCalculating particle trajectories based on recurrence statistics\n" << endl;
label recTimeIndex = 0;
scalar recTimeStep = recurrenceBase.recM().recTimeStep();
scalar startTime = runTime.startTime().value();
// control coupling behavior in case of substepping
// assumes constant timestep size
label counter = 0;
label couplingSubStep = recurrenceBase.couplingSubStep();//5;//3;
double dtProp = particleCloud.dataExchangeM().couplingTime() / runTime.deltaTValue();
label dtDEM2dtCFD = int(dtProp + 0.5);
Info << "deltaT_DEM / deltaT_CFD = " << dtDEM2dtCFD << endl;
if (dtDEM2dtCFD > 1)
Info << "coupling at substep " << couplingSubStep << endl;
while (runTime.run())
{
runTime++;
// do stuff (every lagrangian time step)
particleCloud.clockM().start(1,"Global");
Info << "Time = " << runTime.timeName() << nl << endl;
particleCloud.clockM().start(2,"Coupling");
particleCloud.evolve(voidfraction,Us,URec);
particleCloud.clockM().stop("Coupling");
particleCloud.clockM().start(26,"Flow");
#include "TEqImp.H"
particleCloud.clockM().stop("Flow");
particleCloud.clockM().start(32,"ReadFields");
if ( runTime.timeOutputValue() - startTime - (recTimeIndex+1)*recTimeStep + 1.0e-5 > 0.0 )
{
recurrenceBase.updateRecFields();
#include "updateFields.H"
recTimeIndex++;
}
particleCloud.clockM().stop("ReadFields");
particleCloud.clockM().start(33,"Output");
runTime.write();
particleCloud.clockM().stop("Output");
particleCloud.clockM().stop("Global");
Info << "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
<< " ClockTime = " << runTime.elapsedClockTime() << " s"
<< nl << endl;
}
Info << "End\n" << endl;
return 0;
}
// ************************************************************************* //

View File

@ -1,14 +0,0 @@
recurrenceBase.recM().exportVolScalarField("voidfraction",voidfractionRec);
recurrenceBase.recM().exportVolScalarField("rho",rhoRec);
recurrenceBase.recM().exportVolScalarField("p",pRec);
recurrenceBase.recM().exportVolVectorField("Us",UsRec);
recurrenceBase.recM().exportVolVectorField("U",URec);
recurrenceBase.recM().exportSurfaceScalarField("phi",phiRec);
{
volScalarField& NuField(const_cast<volScalarField&>(mesh.lookupObject<volScalarField> ("NuField")));
recurrenceBase.recM().exportVolScalarField("NuField",NuField);
}
#include "updateRho.H"

View File

@ -1,32 +0,0 @@
// work-around for transient properties
// needs to be specified for each case
// case 1
forAll(rhoRec,cellI)
{
if (mesh.C()[cellI].z() < 0.00228)
rhoRec[cellI] = 1.18+(1.085-1.18)*Foam::exp(-0.065*runTime.timeOutputValue());
else if (mesh.C()[cellI].z() < 0.00456)
rhoRec[cellI] = 1.18+(1.01-1.18)*Foam::exp(-0.05*runTime.timeOutputValue());
else if (mesh.C()[cellI].z() < 0.00684)
rhoRec[cellI] = 1.18+(0.98-1.18)*Foam::exp(-0.0425*runTime.timeOutputValue());
else
rhoRec[cellI] = 1.18+(0.955-1.18)*Foam::exp(-0.0425*runTime.timeOutputValue());
}
// case 2
/*
forAll(rhoRec,cellI)
{
if (mesh.C()[cellI].z() < 0.00228)
rhoRec[cellI] = 1.18+(1.115-1.18)*Foam::exp(-0.065*runTime.timeOutputValue());
else if (mesh.C()[cellI].z() < 0.00456)
rhoRec[cellI] = 1.18+(1.04-1.18)*Foam::exp(-0.05*runTime.timeOutputValue());
else if (mesh.C()[cellI].z() < 0.00684)
rhoRec[cellI] = 1.18+(1.005-1.18)*Foam::exp(-0.0425*runTime.timeOutputValue());
else
rhoRec[cellI] = 1.18+(0.96-1.18)*Foam::exp(-0.0425*runTime.timeOutputValue());
}
*/

View File

@ -1,5 +1,7 @@
include $(CFDEM_ADD_LIBS_DIR)/additionalLibs
FOAM_VERSION_MAJOR := $(word 1,$(subst ., ,$(WM_PROJECT_VERSION)))
PFLAGS+= -DOPENFOAM_VERSION_MAJOR=$(FOAM_VERSION_MAJOR)
PFLAGS+= -Dcompre
EXE_INC = \

View File

@ -69,15 +69,15 @@ int main(int argc, char *argv[])
#include "createFvOptions.H"
// create cfdemCloud
// #include "readGravitationalAcceleration.H"
//#include "readGravitationalAcceleration.H"
cfdemCloudRec<cfdemCloudEnergy> particleCloud(mesh);
#include "checkModelType.H"
recBase recurrenceBase(mesh);
#include "updateFields.H"
turbulence->validate();
// #include "compressibleCourantNo.H"
// #include "setInitialDeltaT.H"
//#include "compressibleCourantNo.H"
//#include "setInitialDeltaT.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -108,8 +108,8 @@ int main(int argc, char *argv[])
particleCloud.clockM().start(2,"Coupling");
bool hasEvolved = particleCloud.evolve(voidfraction,Us,U);
//voidfraction = voidfractionRec;
//Us = UsRec;
//voidfraction = voidfractionRec;
//Us = UsRec;
if(hasEvolved)
{
@ -137,7 +137,11 @@ int main(int argc, char *argv[])
while (pimple.loop())
{
// if needed, perform drag update here
#if OPENFOAM_VERSION_MAJOR < 6
if (pimple.nCorrPIMPLE() <= 1)
#else
if (pimple.nCorrPimple() <= 1)
#endif
{
#include "rhoEqn.H"
}

View File

@ -20,7 +20,7 @@ In the CFDEMcoupling repository take a look at the file
src/lagrangian/cfdemParticle/cfdTools/versionInfo.H :pre
to find out the latest tested version of LIGGGHTS and OpenFOAM that work with
CFDEMcoupling. As of this writing the version of OpenFOAM to be used is 4.x.
CFDEMcoupling. As of this writing the version of OpenFOAM to be used is 6.
You can then basically follow the instructions at
"openfoam.org"_https://openfoam.org/download/source/, cloning OpenFOAM from the
@ -29,11 +29,11 @@ git repository.
cd $HOME
mkdir OpenFOAM
cd OpenFOAM
git clone https://github.com/OpenFOAM/OpenFOAM-4.x.git :pre
git clone https://github.com/OpenFOAM/OpenFOAM-6.git :pre
Clone the corresponding third party packages to the OpenFOAM folder.
git clone https://github.com/OpenFOAM/ThirdParty-4.x.git :pre
git clone https://github.com/OpenFOAM/ThirdParty-6.git :pre
Switch to root user with sudo
@ -42,9 +42,8 @@ sudo su - :pre
Install dependent packages required for OpenFOAM on Ubuntu by executing the
following commands:
apt-get install build-essential flex bison cmake zlib1g-dev libboost-system-dev libboost-thread-dev libopenmpi-dev openmpi-bin gnuplot libreadline-dev libncurses-dev libxt-dev
apt-get install qt4-dev-tools libqt4-dev libqt4-opengl-dev freeglut3-dev libqtwebkit-dev
apt-get install libcgal-dev :pre
apt-get install build-essential flex bison git-core cmake zlib1g-dev libboost-system-dev libboost-thread-dev libopenmpi-dev openmpi-bin gnuplot libreadline-dev libncurses-dev libxt-dev
apt-get install libqt5x11extras5-dev libxt-dev qt5-default qttools5-dev curl :pre
2.1.2 Setup the environment :h5
@ -56,7 +55,7 @@ gedit ~/.bashrc :pre
and add the following lines:
source $HOME/OpenFOAM/OpenFOAM-4.x/etc/bashrc
source $HOME/OpenFOAM/OpenFOAM-6/etc/bashrc
export WM_NCOMPPROCS=4 :pre
Save the file and reload it:
@ -67,7 +66,7 @@ source ~/.bashrc :pre
[Additional check]
Open ~/OpenFOAM/OpenFOAM-4.x/etc/bashrc and make sure that {WM_MPLIB} is set
Open ~/OpenFOAM/OpenFOAM-6/etc/bashrc and make sure that {WM_MPLIB} is set
correctly:
export WM_MPLIB=SYSTEMOPENMPI :pre
@ -89,7 +88,7 @@ cd $WM_THIRD_PARTY_DIR
Paraview is a third-party software provided for graphical post-processing in
OpenFOAM. Its compilation is automated using a script called makeParaView in the
ThirdParty-4.x directory.
ThirdParty-6 directory.
Before installing Paraview, check the version of cmake that is installed on the
system. This can be done by typing

View File

@ -27,7 +27,8 @@ The "averagingModel"_averagingModel.html keyword entry defines the model used to
map the Lagrangian data to Eulerian values.
"dense"_averagingModel_dense.html,
"dilute"_averagingModel_dilute.html :tb(c=2,ea=c)
"dilute"_averagingModel_dilute.html,
off :tb(c=2,ea=c)
6.3 Chemistry models :h4
@ -69,8 +70,9 @@ that performs the data exchange between the DEM code and the CFD code.
The {energyModels} keyword specifies a list of energy models used for e.g.
compressible, reacting flows.
heatTransferGranConduction,
heatTransferGunn,
heatTransferGunnPartField,
heatTransferRanzMarshall,
reactionHeat :tb(c=2,ea=c)
@ -92,15 +94,23 @@ Fines,
"MeiLift"_forceModel_MeiLift.html,
"SchillerNaumannDrag"_forceModel_SchillerNaumannDrag.html,
"ShirgaonkarIB"_forceModel_ShirgaonkarIB.html,
dSauter,
deactivateForce,
directedDiffusiveRelaxation,
evaluateFluctuations,
"fieldStore"_forceModel_fieldStore.html,
"fieldTimeAverage"_forceModel_fieldTimeAverage.html,
freeStreaming,
"gradPForce"_forceModel_gradPForce.html,
"gradPForceSmooth"_forceModel_gradPForceSmooth.html,
granKineticEnergy,
"interface"_forceModel_interface.html,
isotropicFluctuations,
"noDrag"_forceModel_noDrag.html,
"particleCellVolume"_forceModel_particleCellVolume.html,
particleDeformation,
"pdCorrelation"_forceModel_pdCorrelation.html,
potentialRelaxation,
"surfaceTensionForce"_forceModel_surfaceTensionForce.html,
"virtualMassForce"_forceModel_virtualMassForce.html,
"viscForce"_forceModel_viscForce.html,
@ -153,6 +163,7 @@ used to manipulate the CFD mesh according to the DEM mesh motion.
The "momCoupleModels"_momCoupleModel.html keyword specifies a list of models
used for momentum exchange between DEM and CFD simulation
deactivateCouple,
"explicitCouple"_momCoupleModel_explicitCouple.html,
"implicitCouple"_momCoupleModel_implicitCouple.html,
"off"_momCoupleModel_noCouple.html :tb(c=2,ea=c)
@ -214,5 +225,7 @@ accounting for the volume of the particles in the CFD domain.
"IB"_voidFractionModel_IBVoidFraction.html,
"bigParticle"_voidFractionModel_bigParticleVoidFraction.html,
"centre"_voidFractionModel_centreVoidFraction.html,
"divided"_voidFractionModel_dividedVoidFraction.html :tb(c=2,ea=c)
"divided"_voidFractionModel_dividedVoidFraction.html,
off,
trilinear :tb(c=2,ea=c)

View File

@ -15,5 +15,11 @@ This section lists all CFDEMcoupling solvers alphabetically.
"cfdemSolverPisoScalar"_cfdemSolverPisoScalar.html,
"cfdemSolverRhoPimple"_cfdemSolverRhoPimple.html,
"cfdemSolverRhoPimpleChem"_cfdemSolverRhoPimpleChem.html,
"cfdemSolverRhoSimple"_cfdemSolverRhoSimple.html :tb(c=2,ea=c)
rStatAnalysis,
rcfdemSolverBase,
rcfdemSolverCoupledHeattransfer,
rcfdemSolverRhoSteadyPimple,
recSolverTurbTransport,
rtfmSolverSpecies,
testTwoFluidRecurrenceTurbulence :tb(c=2,ea=c)

View File

@ -29,6 +29,8 @@ END_RST -->
For more details, see "Vångö et al. (2018)"_#Vångö2018.
IMPORTANT NOTE: This solver requires OpenFOAM 4.x or 5.x to work properly.
:line
:link(Vångö2018)

View File

@ -1,59 +0,0 @@
<!-- HTML_ONLY -->
<HEAD>
<META CHARSET="utf-8">
</HEAD>
<!-- END_HTML_ONLY -->
"CFDEMproject Website"_lws - "Main Page"_main :c
:link(lws,http://www.cfdem.com)
:link(main,CFDEMcoupling_Manual.html)
:line
cfdemSolverRhoSimple command :h3
[Description:]
<!-- HTML_ONLY -->
"cfdemSolverRhoSimple" is a coupled CFD-DEM solver using the CFDEMcoupling
framework. Based on the OpenFOAM&reg;(*) solver rhoSimpleFoam, this is a
steady-state solver for turbulent flow of compressible fluids coupled with the
DEM code LIGGGHTS for solid particles.
<!-- END_HTML_ONLY -->
<!-- RST
"cfdemSolverRhoSimple" is a coupled CFD-DEM solver using the CFDEMcoupling
framework. Based on the OpenFOAM\ |reg|\ (*) solver rhoSimpleFoam, this is a
steady-state solver for turbulent flow of compressible fluids coupled with the
DEM code LIGGGHTS for solid particles.
.. |reg| unicode:: U+000AE .. REGISTERED SIGN
END_RST -->
:line
<!-- HTML_ONLY -->
NOTE:
(*) This offering is not approved or endorsed by OpenCFD Limited, producer and
distributor of the OpenFOAM software via www.openfoam.com, and owner of the
OPENFOAM&reg; and OpenCFD&reg; trade marks.
OPENFOAM&reg; is a registered trade mark of OpenCFD Limited, producer and
distributor of the OpenFOAM software via www.openfoam.com.
<!-- END_HTML_ONLY -->
<!-- RST
.. note::
(*) This offering is not approved or endorsed by OpenCFD Limited, producer
and distributor of the OpenFOAM software via www.openfoam.com, and owner of
the OPENFOAM\ |reg| and OpenCFD\ |reg| trade marks.
OPENFOAM\ |reg| is a registered trade mark of OpenCFD Limited, producer and
distributor of the OpenFOAM software via www.openfoam.com.
.. |reg| unicode:: U+000AE .. REGISTERED SIGN
END_RST -->

View File

@ -22,11 +22,14 @@ particleCellVolumeProps
lowerThreshold scalar2;
startTime scalar3;
verbose;
writeToFile switch1;
\} :pre
{scalar1} = only cells with a field value (magnitude) lower than this upper threshold are considered :l
{scalar2} = only cells with a field value (magnitude) greater than this lower threshold are considered :l
{scalar3} = (optional, default 0) start time of volume calculation and output :l
{verbose} = (optional, default false) keyword only (mostly used for debugging) :l
{switch1} = (optional, default false) switch for file output :l
:ule
[Examples:]

View File

@ -30,6 +30,7 @@ volWeightedAverageProps
upperThreshold scalar1;
lowerThreshold scalar2;
verbose;
writeToFile switch1;
\} :pre
{time} = (optional, default 0.) time to start the averaging :ulb,l
@ -38,6 +39,7 @@ volWeightedAverageProps
{scalar1} = only cells with a field value (magnitude) lower than this upper threshold are considered :l
{scalar2} = only cells with a field value (magnitude) greater than this lower threshold are considered :l
{verbose} = (optional, default false) keyword only (mostly used for debugging) :l
{switch1} = (optional, default false) switch for file output :l
:ule
[Examples:]

View File

@ -17,7 +17,7 @@
#------------------------------------------------------------------------------
export CFDEM_PROJECT=CFDEM
export CFDEM_VERSION=19.09
export CFDEM_VERSION=20.05
################################################################################
# USER EDITABLE PART: Changes made here may be lost with the next upgrade
@ -25,7 +25,7 @@ export CFDEM_VERSION=19.09
# Please set to the appropriate path if the default is not correct.
#
# activate compatible OpenFOAM version
. $HOME/OpenFOAM/OpenFOAM-4.x/etc/bashrc
. $HOME/OpenFOAM/OpenFOAM-6/etc/bashrc
#
# CFDEMcoupling
export CFDEM_INST_DIR=$HOME/$CFDEM_PROJECT
@ -199,6 +199,9 @@ alias cfdemCleanCFDEM='bash $CFDEM_PROJECT_DIR/etc/cleanCFDEMcoupling.sh'
#- shortcut to compile LIGGGHTS + sublibraries
alias cfdemCompLIG='bash $CFDEM_PROJECT_DIR/etc/compileLIGGGHTS.sh'
#- shortcut to compile LIGGGHTS sublibraries
alias cfdemCompLIGlib='bash $CFDEM_PROJECT_DIR/etc/compileLIGGGHTS_lib.sh'
#- shortcut to compile CFDEMcoupling +LIGGGHTS
alias cfdemCompCFDEMall='bash $CFDEM_PROJECT_DIR/etc/compileCFDEMcoupling_all.sh'

View File

@ -15,7 +15,7 @@
#------------------------------------------------------------------------------
setenv CFDEM_PROJECT CFDEM
setenv CFDEM_VERSION 19.09
setenv CFDEM_VERSION 20.05
################################################################################
# USER EDITABLE PART: Changes made here may be lost with the next upgrade
@ -23,7 +23,7 @@ setenv CFDEM_VERSION 19.09
# Please set to the appropriate path if the default is not correct.
#
# activate compatible OpenFOAM version
. $HOME/OpenFOAM/OpenFOAM-4.x/etc/cshrc
. $HOME/OpenFOAM/OpenFOAM-6/etc/cshrc
#
# CFDEMcoupling
setenv CFDEM_INST_DIR $HOME/$CFDEM_PROJECT
@ -232,6 +232,9 @@ alias cfdemCleanCFDEM 'bash $CFDEM_PROJECT_DIR/etc/cleanCFDEMcoupling.sh'
#- shortcut to compile LIGGGHTS + sublibraries
alias cfdemCompLIG 'bash $CFDEM_PROJECT_DIR/etc/compileLIGGGHTS.sh'
#- shortcut to compile LIGGGHTS sublibraries
alias cfdemCompLIGlib 'bash $CFDEM_PROJECT_DIR/etc/compileLIGGGHTS_lib.sh'
#- shortcut to compile CFDEMcoupling +LIGGGHTS
alias cfdemCompCFDEMall 'bash $CFDEM_PROJECT_DIR/etc/compileCFDEMcoupling_all.sh'

View File

@ -1,5 +1,4 @@
rcfdemSolverRhoSteadyPimple/dir
rcfdemSolverHeattransfer/dir
rcfdemSolverCoupledHeattransfer/dir
rStatAnalysis/dir
rcfdemSolverBase/dir
@ -7,7 +6,6 @@ rtfmSolverSpecies/dir
cfdemSolverPisoMS/dir
cfdemSolverPiso/dir
cfdemSolverRhoPimple/dir
cfdemSolverRhoSimple/dir
cfdemSolverIB/dir
cfdemSolverPisoScalar/dir
cfdemSolverRhoPimpleChem/dir

View File

@ -3,6 +3,8 @@ sinclude $(RULES)/mplib$(WM_MPLIB)
GIT_VERSION := $(shell git describe --dirty --always --tags)
PFLAGS+= -DGITVERSION=\"$(GIT_VERSION)\"
FOAM_VERSION_MAJOR := $(word 1,$(subst ., ,$(WM_PROJECT_VERSION)))
PFLAGS+= -DOPENFOAM_VERSION_MAJOR=$(FOAM_VERSION_MAJOR)
include $(CFDEM_ADD_LIBS_DIR)/additionalLibs

View File

@ -34,9 +34,9 @@ Description
#ifndef versionInfo_H
#define versionInfo_H
word CFDEMversion="PFM 19.09";
word compatibleLIGGGHTSversion="PFM 19.09";
word OFversion="4.x";
word CFDEMversion="PFM 20.05";
word compatibleLIGGGHTSversion="PFM 20.05";
word OFversion="6";
Info << "\nCFDEMcoupling version: " << CFDEMversion << endl;
Info << "compatible to LIGGGHTS version: " << compatibleLIGGGHTSversion << endl;

View File

@ -84,10 +84,12 @@ cfdemCloud::cfdemCloud
ignore_(couplingProperties_.found("ignore")),
allowCFDsubTimestep_(true),
limitDEMForces_(couplingProperties_.found("limitDEMForces")),
phaseInForces_(couplingProperties_.found("phaseInForcesTime")),
getParticleDensities_(couplingProperties_.lookupOrDefault<bool>("getParticleDensities",false)),
getParticleEffVolFactors_(couplingProperties_.lookupOrDefault<bool>("getParticleEffVolFactors",false)),
getParticleTypes_(couplingProperties_.lookupOrDefault<bool>("getParticleTypes",false)),
maxDEMForce_(0.),
phaseInForcesTime_(couplingProperties_.lookupOrDefault<scalar>("phaseInForcesTime",0.0)),
modelType_(couplingProperties_.lookup("modelType")),
positions_(NULL),
velocities_(NULL),
@ -368,6 +370,12 @@ cfdemCloud::cfdemCloud
if (verbose_) Info << "nPatchesNonCyclic=" << nPatchesNonCyclic << ", nPatchesCyclic=" << nPatchesCyclic << endl;
Warning << "Periodic handing is disabled because the domain is not fully periodic!\n" << endl;
}
// check if phasing-in time existing and is meaningful
if (phaseInForces_ && phaseInForcesTime_ < SMALL)
{
FatalError << "phasing-in time too small" << endl;
}
}
// * * * * * * * * * * * * * * * * Destructors * * * * * * * * * * * * * * //
@ -468,6 +476,19 @@ void cfdemCloud::setForces()
}
Info << "largest particle-fluid interaction on particle: " << maxF << endl;
}
if (phaseInForces_)
{
scalar tfrac = (mesh_.time().timeOutputValue()-mesh_.time().startTime().value())/phaseInForcesTime_;
if (tfrac <= 1.0)
{
for (int index = 0;index < numberOfParticles(); ++index)
{
for(int i=0;i<3;i++) DEMForces_[index][i] *= tfrac;
Cds_[index][0] *= tfrac;
}
}
}
}
void cfdemCloud::setParticleForceField()

View File

@ -96,6 +96,8 @@ protected:
const bool limitDEMForces_;
const bool phaseInForces_;
const bool getParticleDensities_;
const bool getParticleEffVolFactors_;
@ -104,6 +106,8 @@ protected:
scalar maxDEMForce_;
scalar phaseInForcesTime_;
const word modelType_;
double **positions_; // particle positions

View File

@ -30,6 +30,7 @@ Description
\*---------------------------------------------------------------------------*/
#include "error.H"
#include "particle.H"
#include "IOModel.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -99,13 +100,22 @@ void IOModel::streamDataToPath(const fileName& path, const double* const* array,
OFstream fileStream(path/name);
fileStream
<< "FoamFile\n{\n"
<< " version " << fileStream.version() << ";\n"
<< " format " << fileStream.format() << ";\n"
<< " class " << className << ";\n"
<< " location " << 0 << ";\n"
<< " object " << name << ";\n"
<< "}" << nl;
<< "/*--------------------------------*- C++ -*----------------------------------*\\" << nl
<< " ========= |" << nl
<< " \\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox" << nl
<< " \\\\ / O peration | Website: https://openfoam.org" << nl
<< " \\\\ / A nd | Version: " << FOAMversion << nl
<< " \\\\/ M anipulation |" << nl
<< "\\*---------------------------------------------------------------------------*/" << nl
<< "FoamFile" << nl
<< "{" << nl
<< " version " << fileStream.version() << ";" << nl
<< " format " << fileStream.format() << ";" << nl
<< " class " << className << ";" << nl
<< " location " << 0 << ";" << nl
<< " object " << name << ";" << nl
<< "}" << nl
<< "// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //" << nl << nl;
fileStream << nPProc << "\n";
@ -129,7 +139,19 @@ void IOModel::streamDataToPath(const fileName& path, const double* const* array,
}
else if (type == "position")
{
fileStream << "( "<< array[index][0] << " " << array[index][1] << " " << array[index][2] << " ) " << cellIDs[index][0] << " \n";
#if OPENFOAM_VERSION_MAJOR < 5
fileStream << "( " << array[index][0] << " " << array[index][1] << " " << array[index][2] << " ) " << cellIDs[index][0] << nl;
#else
particle part
(
particleCloud_.mesh(),
vector(array[index][0],array[index][1],array[index][2]),
cellIDs[index][0]
);
part.writePosition(fileStream);
fileStream << nl;
#endif
}
else if (type == "vector")
{

View File

@ -68,6 +68,18 @@ twoWayMPI::twoWayMPI
:
dataExchangeModel(dict,sm),
propsDict_(dict.subDict(typeName + "Props")),
DEMVariableNames_(propsDict_.lookupOrDefault<wordList>("DEMVariables",wordList(0))),
DEMVariables_
(
IOobject
(
"DEMVariables",
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::NO_READ,
IOobject::NO_WRITE
)
),
lmp(NULL)
{
Info << "Starting up LIGGGHTS for first time execution" << endl;
@ -92,10 +104,38 @@ twoWayMPI::twoWayMPI
twoWayMPI::~twoWayMPI()
{
if (propsDict_.found("liggghtsEndOfRunPath"))
{
const fileName liggghtsEndOfRunPath(propsDict_.lookup("liggghtsEndOfRunPath"));
lmp->input->file(liggghtsEndOfRunPath.c_str());
}
delete lmp;
}
// * * * * * * * * * * * * * * * private Member Functions * * * * * * * * * * * * * //
void twoWayMPI::updateDEMVariables()
{
scalar variablevalue = 0.0;
word variablename("");
DEMVariables_.clear();
for (label i=0; i<DEMVariableNames_.size(); i++)
{
variablename = DEMVariableNames_[i];
variablevalue = DEMVariableValue(variablename);
DEMVariables_.append(variablevalue);
}
}
double twoWayMPI::DEMVariableValue(word variablename)
{
return liggghts_get_variable(lmp,variablename.c_str());
}
// * * * * * * * * * * * * * * * public Member Functions * * * * * * * * * * * * * //
void twoWayMPI::getData
(
word name,
@ -350,6 +390,9 @@ bool twoWayMPI::couple(int i)
particleCloud_.clockM().start(4,"LIGGGHTS_reallocArrays");
particleCloud_.reAllocArrays();
particleCloud_.clockM().stop("LIGGGHTS_reallocArrays");
// retrieve DEM variables if present
updateDEMVariables();
}
return coupleNow;

View File

@ -44,6 +44,7 @@ SourceFiles
#include <mpi.h>
#include "dataExchangeModel.H"
#include "scalarIOList.H"
//=================================//
//LAMMPS/LIGGGHTS
@ -71,7 +72,14 @@ private:
dictionary propsDict_;
MPI_Comm comm_liggghts;
wordList DEMVariableNames_;
scalarIOList DEMVariables_;
// private member functions
double DEMVariableValue(word);
void updateDEMVariables();
protected:
LAMMPS_NS::LAMMPS *lmp;
@ -146,7 +154,6 @@ public:
int getNumberOfTypes() const;
double* getTypeVol() const;
scalar getCG() const { return lmp->force->cg(); }
};

View File

@ -163,6 +163,12 @@ twoWayMany2Many::~twoWayMany2Many()
delete lmp2foam_vec_;
delete foam2lmp_vec_;
delete foam2lmp_;
if (propsDict_.found("liggghtsEndOfRunPath"))
{
const fileName liggghtsEndOfRunPath(propsDict_.lookup("liggghtsEndOfRunPath"));
lmp->input->file(liggghtsEndOfRunPath.c_str());
}
delete lmp;
}

View File

@ -242,6 +242,11 @@ twoWayOne2One::~twoWayOne2One()
destroy(prev_cell_ids_);
destroy(dbl_cell_ids_);
if (propsDict_.found("liggghtsEndOfRunPath"))
{
const fileName liggghtsEndOfRunPath(propsDict_.lookup("liggghtsEndOfRunPath"));
lmp->input->file(liggghtsEndOfRunPath.c_str());
}
delete lmp;
}
@ -961,6 +966,10 @@ void twoWayOne2One::extractCollected(T*& src, T**& dst, int width) const
}
}
int twoWayOne2One::getNumberOfParticles() const
{
return particleCloud_.numberOfParticles();
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -208,6 +208,8 @@ public:
template <typename T>
void extractCollected(T*&, T**&, int width=1) const;
int getNumberOfParticles() const;
scalar getCG() const { return lmp->force->cg(); }
};

View File

@ -51,6 +51,8 @@ heatTransferGunn::heatTransferGunn
implicit_(propsDict_.lookupOrDefault<bool>("implicit",true)),
calcTotalHeatFlux_(propsDict_.lookupOrDefault<bool>("calcTotalHeatFlux",true)),
initPartTemp_(propsDict_.lookupOrDefault<bool>("initPartTemp",false)),
Tmin_(propsDict_.lookupOrDefault<scalar>("Tmin",0.0)),
Tmax_(propsDict_.lookupOrDefault<scalar>("Tmax",1e6)),
totalHeatFlux_(0.0),
NusseltScalingFactor_(1.0),
QPartFluidName_(propsDict_.lookupOrDefault<word>("QPartFluidName","QPartFluid")),
@ -148,7 +150,8 @@ heatTransferGunn::heatTransferGunn
partRe_(NULL),
partNu_(NULL),
scaleDia_(1.),
typeCG_(propsDict_.lookupOrDefault<scalarList>("coarseGrainingFactors",scalarList(1,1.0)))
typeCG_(propsDict_.lookupOrDefault<scalarList>("coarseGrainingFactors",scalarList(1,1.0))),
maxTypeCG_(typeCG_.size())
{
allocateMyArrays();
@ -207,7 +210,10 @@ heatTransferGunn::heatTransferGunn
scaleDia_=scalar(readScalar(propsDict_.lookup("scale")));
typeCG_[0] = scaleDia_;
}
else if (typeCG_.size()>1) multiTypes_ = true;
else if (typeCG_.size()>1)
{
multiTypes_ = true;
}
if (initPartTemp_ && !partTempField_.headerOk())
{
@ -338,6 +344,10 @@ void heatTransferGunn::calcEnergyContribution()
if (multiTypes_)
{
partType = particleCloud_.particleType(index);
if (partType > maxTypeCG_)
{
FatalError<< "Too few coarse-graining factors provided." << abort(FatalError);
}
cg = typeCG_[partType - 1];
scaleDia3 = cg*cg*cg;
}
@ -596,12 +606,22 @@ void heatTransferGunn::partTempField()
void heatTransferGunn::initPartTemp()
{
label cellI = 0;
scalar T = 0.0;
for(int index = 0;index < particleCloud_.numberOfParticles(); ++index)
{
cellI = particleCloud_.cellIDs()[index][0];
if(cellI >= 0)
{
partTemp_[index][0] = partTempField_[cellI];
T = partTempField_[cellI];
if (T < Tmin_)
{
T = Tmin_;
}
else if (T > Tmax_)
{
T = Tmax_;
}
partTemp_[index][0] = T;
}
}
}

View File

@ -60,6 +60,10 @@ protected:
bool initPartTemp_;
scalar Tmin_;
scalar Tmax_;
scalar totalHeatFlux_;
scalar NusseltScalingFactor_;
@ -122,6 +126,8 @@ protected:
scalarList typeCG_;
const label maxTypeCG_;
void allocateMyArrays() const;
void partTempField();

View File

@ -49,6 +49,9 @@ heatTransferRanzMarshall::heatTransferRanzMarshall
verbose_(propsDict_.lookupOrDefault<bool>("verbose",false)),
implicit_(propsDict_.lookupOrDefault<bool>("implicit",true)),
calcTotalHeatFlux_(propsDict_.lookupOrDefault<bool>("calcTotalHeatFlux",true)),
initPartTemp_(propsDict_.lookupOrDefault<bool>("initPartTemp",false)),
Tmin_(propsDict_.lookupOrDefault<scalar>("Tmin",0.0)),
Tmax_(propsDict_.lookupOrDefault<scalar>("Tmax",1e6)),
totalHeatFlux_(0.0),
NusseltScalingFactor_(1.0),
QPartFluidName_(propsDict_.lookupOrDefault<word>("QPartFluidName","QPartFluid")),
@ -146,7 +149,8 @@ heatTransferRanzMarshall::heatTransferRanzMarshall
partRe_(NULL),
partNu_(NULL),
scaleDia_(1.),
typeCG_(propsDict_.lookupOrDefault<scalarList>("coarseGrainingFactors",scalarList(1,1.0)))
typeCG_(propsDict_.lookupOrDefault<scalarList>("coarseGrainingFactors",scalarList(1,1.0))),
maxTypeCG_(typeCG_.size())
{
allocateMyArrays();
@ -205,7 +209,15 @@ heatTransferRanzMarshall::heatTransferRanzMarshall
scaleDia_=scalar(readScalar(propsDict_.lookup("scale")));
typeCG_[0] = scaleDia_;
}
else if (typeCG_.size()>1) multiTypes_ = true;
else if (typeCG_.size()>1)
{
multiTypes_ = true;
}
if (initPartTemp_ && !partTempField_.headerOk())
{
FatalError <<"Trying to initialize particle temperatures, but no field found.\n" << abort(FatalError);
}
}
@ -252,8 +264,18 @@ void heatTransferRanzMarshall::calcEnergyContribution()
// reset Scalar field
QPartFluid_.primitiveFieldRef() = 0.0;
// get DEM data
particleCloud_.dataExchangeM().getData(partTempName_,"scalar-atom",partTemp_);
if (initPartTemp_)
{
// if particle temperatures are to be initialized from field, do a one-time push to DEM
initPartTemp();
particleCloud_.dataExchangeM().giveData("Temp","scalar-atom", partTemp_);
initPartTemp_ = false;
}
else
{
// get DEM data
particleCloud_.dataExchangeM().getData(partTempName_,"scalar-atom",partTemp_);
}
#ifdef compre
const volScalarField mufField = particleCloud_.turbulence().mu();
@ -322,6 +344,10 @@ void heatTransferRanzMarshall::calcEnergyContribution()
if (multiTypes_)
{
partType = particleCloud_.particleType(index);
if (partType > maxTypeCG_)
{
FatalError<< "Too few coarse-graining factors provided." << abort(FatalError);
}
cg = typeCG_[partType - 1];
scaleDia3 = cg*cg*cg;
}
@ -564,6 +590,29 @@ void heatTransferRanzMarshall::partTempField()
Info << "heatTransferRanzMarshall: average part. temp = " << partTempAve_.value() << endl;
}
void heatTransferRanzMarshall::initPartTemp()
{
label cellI = 0;
scalar T = 0.0;
for(int index = 0;index < particleCloud_.numberOfParticles(); ++index)
{
cellI = particleCloud_.cellIDs()[index][0];
if(cellI >= 0)
{
T = partTempField_[cellI];
if (T < Tmin_)
{
T = Tmin_;
}
else if (T > Tmax_)
{
T = Tmax_;
}
partTemp_[index][0] = T;
}
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam

View File

@ -57,6 +57,12 @@ protected:
bool calcTotalHeatFlux_;
bool initPartTemp_;
scalar Tmin_;
scalar Tmax_;
scalar totalHeatFlux_;
scalar NusseltScalingFactor_;
@ -119,6 +125,8 @@ protected:
scalarList typeCG_;
const label maxTypeCG_;
void allocateMyArrays() const;
void partTempField();
@ -129,6 +137,8 @@ protected:
virtual void heatFlux(label, scalar, scalar, scalar, scalar cg3 = 1.0);
virtual void initPartTemp();
public:
//- Runtime type information

View File

@ -58,8 +58,9 @@ BeetstraDrag::BeetstraDrag
minVoidfraction_(propsDict_.lookupOrDefault<scalar>("minVoidfraction",0.1)),
UsFieldName_(propsDict_.lookup("granVelFieldName")),
UsField_(sm.mesh().lookupObject<volVectorField> (UsFieldName_)),
scaleDia_(1.),
maxTypeCG_(1),
typeCG_(propsDict_.lookupOrDefault<scalarList>("coarseGrainingFactors",scalarList(1,1.0))),
scaleDia_(1.),
scaleDrag_(1.),
rhoP_(0.),
rho_(0.),
@ -102,7 +103,11 @@ BeetstraDrag::BeetstraDrag
scaleDrag_=scalar(readScalar(propsDict_.lookup("scaleDrag")));
}
if (typeCG_.size()>1) multiTypes_ = true;
if (typeCG_.size()>1)
{
multiTypes_ = true;
maxTypeCG_ = typeCG_.size();
}
if (propsDict_.found("useFilteredDragModel"))
{
@ -213,12 +218,14 @@ void BeetstraDrag::setForce() const
voidfraction = voidfraction_[cellI];
Ufluid = U_[cellI];
}
// in case a fines phase is present, void fraction needs to be adapted
adaptVoidfraction(voidfraction, cellI);
if (multiTypes_)
{
partType = particleCloud_.particleType(index);
if (partType > maxTypeCG_)
{
FatalError<< "Too few coarse-graining factors provided." << abort(FatalError);
}
cg = typeCG_[partType - 1];
scaleDia3 = cg*cg*cg;
}

View File

@ -64,10 +64,12 @@ protected:
const volVectorField& UsField_;
mutable scalar scaleDia_;
mutable label maxTypeCG_;
mutable scalarList typeCG_;
mutable scalar scaleDia_;
mutable scalar scaleDrag_;
mutable scalar rhoP_;
@ -88,8 +90,6 @@ protected:
bool usePC_;
virtual void adaptVoidfraction(double&, label) const {}
virtual scalar effDiameter(double d, label cellI, label index) const {return d;}
virtual scalar meanSauterDiameter(double d, label cellI) const {return d;}

View File

@ -49,23 +49,26 @@ BeetstraDragPoly::BeetstraDragPoly
:
BeetstraDrag(dict,sm),
fines_(propsDict_.lookupOrDefault<bool>("fines",false)),
dFine_(1.0)
dFine_(1.0),
alphaP_(NULL),
alphaSt_(NULL),
dSauter_(NULL)
{
// if fines are present, take mixture dSauter, otherwise normal dSauter
if (fines_)
{
dFine_ = readScalar(propsDict_.lookup("dFine"));
volScalarField& alphaP(const_cast<volScalarField&>(sm.mesh().lookupObject<volScalarField> ("alphaP")));
alphaP_.set(&alphaP);
alphaP_ = &alphaP;
volScalarField& alphaSt(const_cast<volScalarField&>(sm.mesh().lookupObject<volScalarField> ("alphaSt")));
alphaSt_.set(&alphaSt);
alphaSt_ = &alphaSt;
volScalarField& dSauter(const_cast<volScalarField&>(sm.mesh().lookupObject<volScalarField> ("dSauterMix")));
dSauter_.set(&dSauter);
dSauter_ = &dSauter;
}
else
{
volScalarField& dSauter(const_cast<volScalarField&>(sm.mesh().lookupObject<volScalarField> ("dSauter")));
dSauter_.set(&dSauter);
dSauter_ = &dSauter;
}
}
@ -77,22 +80,15 @@ BeetstraDragPoly::~BeetstraDragPoly()
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
void BeetstraDragPoly::adaptVoidfraction(double& voidfraction, label cellI) const
{
if (fines_) voidfraction -= alphaSt_()[cellI];
if (voidfraction < minVoidfraction_) voidfraction = minVoidfraction_;
}
scalar BeetstraDragPoly::effDiameter(double d, label cellI, label index) const
{
scalar dS = dSauter_()[cellI];
scalar dS = (*dSauter_)[cellI];
scalar effD = d*d / dS + 0.064*d*d*d*d / (dS*dS*dS);
if (fines_)
{
scalar fineCorr = dFine_*dFine_ / dS + 0.064*dFine_*dFine_*dFine_*dFine_ / (dS*dS*dS);
fineCorr *= d*d*d / (dFine_*dFine_*dFine_) * alphaSt_()[cellI] / alphaP_()[cellI];
fineCorr *= d*d*d / (dFine_*dFine_*dFine_) * (*alphaSt_)[cellI] / (*alphaP_)[cellI];
effD += fineCorr;
}
@ -107,7 +103,7 @@ scalar BeetstraDragPoly::effDiameter(double d, label cellI, label index) const
scalar BeetstraDragPoly::meanSauterDiameter(double d, label cellI) const
{
return dSauter_()[cellI];
return (*dSauter_)[cellI];
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -46,13 +46,11 @@ protected:
scalar dFine_;
autoPtr<volScalarField> alphaP_;
volScalarField* alphaP_;
autoPtr<volScalarField> alphaSt_;
volScalarField* alphaSt_;
autoPtr<volScalarField> dSauter_;
void adaptVoidfraction(double&, label) const;
volScalarField* dSauter_;
scalar effDiameter(double, label, label) const;

View File

@ -50,6 +50,11 @@ Fines::Fines
forceModel(dict,sm),
finesFields_(dict,sm)
{
// safety check: fines should be first force model in list because they correct the voidfraction field
if (particleCloud_.forceModels()[0] != "dSauter" || particleCloud_.forceModels()[1] != "Fines")
{
FatalError <<"Force models dSauter and Fines need to be first in list.\n" << abort(FatalError);
}
}

View File

@ -45,11 +45,20 @@ FinesFields::FinesFields
propsDict_(dict.subDict("FinesFieldsProps")),
smoothing_(propsDict_.lookupOrDefault<bool>("smoothing",false)),
verbose_(propsDict_.lookupOrDefault<bool>("verbose",false)),
clogKin_(propsDict_.lookupOrDefault<bool>("kineticClogging",false)),
clogStick_(propsDict_.lookupOrDefault<bool>("stickyClogging",false)),
movingBed_(propsDict_.lookupOrDefault<bool>("movingBed",true)),
useDepositionLength_(false),
fluxFieldName_(propsDict_.lookupOrDefault<word>("fluxFieldName","phi")),
phi_(sm.mesh().lookupObject<surfaceScalarField> (fluxFieldName_)),
velFieldName_(propsDict_.lookupOrDefault<word>("velFieldName","U")),
U_(sm.mesh().lookupObject<volVectorField> (velFieldName_)),
voidfractionFieldName_(propsDict_.lookupOrDefault<word>("voidfractionFieldName","voidfraction")),
// this is probably really bad
voidfraction_(const_cast<volScalarField&>(sm.mesh().lookupObject<volScalarField> (voidfractionFieldName_))),
#if OPENFOAM_VERSION_MAJOR < 5
voidfraction_(const_cast<volScalarField&>(sm.mesh().lookupObject<volScalarField>(voidfractionFieldName_))),
#else
voidfraction_(sm.mesh().lookupObjectRef<volScalarField> (voidfractionFieldName_)),
#endif
UsFieldName_(propsDict_.lookupOrDefault<word>("granVelFieldName","Us")),
UsField_(sm.mesh().lookupObject<volVectorField> (UsFieldName_)),
pFieldName_(propsDict_.lookupOrDefault<word>("pFieldName","p")),
@ -200,7 +209,6 @@ FinesFields::FinesFields
),
sm.mesh(),
dimensionedScalar("zero", dimensionSet(1,0,-1,0,0), 0)
//dimensionedVector("zero", dimensionSet(1,-2,-1,0,0), vector::zero)
),
uDyn_
( IOobject
@ -219,54 +227,134 @@ FinesFields::FinesFields
rhoFine_("rhoFine",dimensionSet(1,-3,0,0,0),0.0),
g_("g",dimensionSet(0,1,-2,0,0),vector(0,0,-9.81)),
alphaDynMax_(0.1),
alphaMax_(readScalar(propsDict_.lookup ("alphaMax"))),
critVoidfraction_(readScalar(propsDict_.lookup ("critVoidfraction"))),
depRate_(readScalar(propsDict_.lookup ("depRate"))),
alphaMax_(propsDict_.lookupOrDefault<scalar>("alphaMax",0.95)),
alphaMinClog_(propsDict_.lookupOrDefault<scalar>("alphaMinClog",0.1)),
critVoidfraction_(propsDict_.lookupOrDefault<scalar>("critVoidfraction", 0.05)),
deltaT_(voidfraction_.mesh().time().deltaTValue()),
depositionLength_(0.0),
exponent_(-1.33),
nCrit_(readScalar(propsDict_.lookup ("nCrit"))),
poresizeWidth_(readScalar(propsDict_.lookup ("poresizeWidth"))),
nCrit_(0.0),
poresizeWidth_(0.0),
prefactor_(10.5),
ratioHydraulicPore_(1.5)
ratioHydraulicPore_(1.5),
tauDeposition_(0.0),
tauRelease_(readScalar(propsDict_.lookup ("tauRelease"))),
uBindHigh_(propsDict_.lookupOrDefault<scalar>("uBindHigh",0.0)),
uBindLow_(propsDict_.lookupOrDefault<scalar>("uBindLow",0.0)),
uMin_(0.001)
{
Sds_.write();
if (propsDict_.found("prefactor"))
prefactor_=readScalar(propsDict_.lookup ("prefactor"));
if (propsDict_.found("exponent"))
exponent_=readScalar(propsDict_.lookup ("exponent"));
if (propsDict_.found("dFine"))
dFine_.value()=readScalar(propsDict_.lookup ("dFine"));
else
FatalError <<"Please specify dFine.\n" << abort(FatalError);
if (propsDict_.found("diffCoeff"))
diffCoeff_.value()=readScalar(propsDict_.lookup ("diffCoeff"));
if (propsDict_.found("rhoFine"))
rhoFine_.value()=readScalar(propsDict_.lookup ("rhoFine"));
else
FatalError <<"Please specify rhoFine.\n" << abort(FatalError);
if (propsDict_.found("nuAve"))
nuAve_.value()=readScalar(propsDict_.lookup ("nuAve"));
if (propsDict_.found("alphaDynMax"))
alphaDynMax_=readScalar(propsDict_.lookup ("alphaDynMax"));
if(verbose_)
{
alphaG_.writeOpt() = IOobject::AUTO_WRITE;
alphaG_.write();
alphaP_.writeOpt() = IOobject::AUTO_WRITE;
alphaP_.write();
deltaAlpha_.writeOpt() = IOobject::AUTO_WRITE;
deltaAlpha_.write();
dHydMix_.writeOpt() = IOobject::AUTO_WRITE;
dHydMix_.write();
DragCoeff_.writeOpt() = IOobject::AUTO_WRITE;
DragCoeff_.write();
dSauterMix_.writeOpt() = IOobject::AUTO_WRITE;
dSauterMix_.write();
FanningCoeff_.writeOpt() = IOobject::AUTO_WRITE;
FanningCoeff_.write();
Froude_.writeOpt() = IOobject::AUTO_WRITE;
Froude_.write();
prefactor_=readScalar(propsDict_.lookup ("prefactor"));
}
if (propsDict_.found("exponent"))
{
exponent_=readScalar(propsDict_.lookup ("exponent"));
}
if (propsDict_.found("dFine"))
{
dFine_.value()=readScalar(propsDict_.lookup ("dFine"));
}
else
{
FatalError <<"Please specify dFine.\n" << abort(FatalError);
}
if (propsDict_.found("diffCoeff"))
{
diffCoeff_.value()=readScalar(propsDict_.lookup ("diffCoeff"));
}
if (propsDict_.found("rhoFine"))
{
rhoFine_.value()=readScalar(propsDict_.lookup ("rhoFine"));
}
else
{
FatalError <<"Please specify rhoFine.\n" << abort(FatalError);
}
if (propsDict_.found("nuAve"))
{
nuAve_.value()=readScalar(propsDict_.lookup ("nuAve"));
}
if (propsDict_.found("alphaDynMax"))
{
alphaDynMax_=readScalar(propsDict_.lookup ("alphaDynMax"));
}
if (clogKin_)
{
nCrit_ = readScalar(propsDict_.lookup ("nCrit"));
poresizeWidth_ = readScalar(propsDict_.lookup ("poresizeWidth"));
}
if (clogStick_)
{
if (uBindHigh_ - uBindLow_ < SMALL)
{
FatalError <<"No reasonable values for critical binding velocities.\n" << abort(FatalError);
}
uReconstructed_.set
(
new volVectorField
(
IOobject
(
"uReconstructed",
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::NO_READ,
IOobject::NO_WRITE
),
U_//sm.mesh()
)
);
}
if (propsDict_.found("depositionLength") && propsDict_.found("tauDeposition"))
{
FatalError <<"You cannot specify both a deposition length and time.\n" << abort(FatalError);
}
else if (propsDict_.found("depositionLength"))
{
depositionLength_ = readScalar(propsDict_.lookup ("depositionLength"));
useDepositionLength_ = true;
}
else if (propsDict_.found("tauDeposition"))
{
tauDeposition_ = readScalar(propsDict_.lookup ("tauDeposition"));
useDepositionLength_ = false;
}
else
{
FatalError <<"Specify either a deposition length or time.\n" << abort(FatalError);
}
if (verbose_)
{
alphaG_.writeOpt() = IOobject::AUTO_WRITE;
alphaG_.write();
alphaP_.writeOpt() = IOobject::AUTO_WRITE;
alphaP_.write();
deltaAlpha_.writeOpt() = IOobject::AUTO_WRITE;
deltaAlpha_.write();
dHydMix_.writeOpt() = IOobject::AUTO_WRITE;
dHydMix_.write();
DragCoeff_.writeOpt() = IOobject::AUTO_WRITE;
DragCoeff_.write();
dSauterMix_.writeOpt() = IOobject::AUTO_WRITE;
dSauterMix_.write();
FanningCoeff_.writeOpt() = IOobject::AUTO_WRITE;
FanningCoeff_.write();
Froude_.writeOpt() = IOobject::AUTO_WRITE;
Froude_.write();
if (clogStick_)
{
uReconstructed_().writeOpt() = IOobject::AUTO_WRITE;
uReconstructed_().write();
}
}
}
@ -281,76 +369,100 @@ FinesFields::~FinesFields()
void FinesFields::update()
{
if(verbose_) Info << "FinesFields: Updating alphaP.\n" << endl;
if (verbose_) Info << "FinesFields: Updating alphaP.\n" << endl;
updateAlphaP();
if(verbose_) Info << "FinesFields: Updating alphaG.\n" << endl;
if (verbose_) Info << "FinesFields: Updating alphaG.\n" << endl;
updateAlphaG();
if(verbose_) Info << "FinesFields: Calculating source terms.\n" << endl;
if (clogStick_)
{
if (verbose_) Info << "FinesFields: Updating uReconstructed.\n" << endl;
updateUReconstructed();
}
if (verbose_) Info << "FinesFields: Calculating source terms.\n" << endl;
calcSource();
if(verbose_) Info << "FinesFields: Updating dSauter.\n" << endl;
if (verbose_) Info << "FinesFields: Updating dSauter.\n" << endl;
updateDSauter();
if(verbose_) Info << "FinesFields: Updating dHydMix.\n" << endl;
if (verbose_) Info << "FinesFields: Updating dHydMix.\n" << endl;
updateDHydMix();
if(verbose_) Info << "FinesFields: Updating Froude.\n" << endl;
if (verbose_) Info << "FinesFields: Updating Froude.\n" << endl;
updateFroude();
if(verbose_) Info << "FinesFields: Updating FanningCoeff.\n" << endl;
if (verbose_) Info << "FinesFields: Updating FanningCoeff.\n" << endl;
updateFanningCoeff();
if(verbose_) Info << "FinesFields: Updating DragCoeff.\n" << endl;
if (verbose_) Info << "FinesFields: Updating DragCoeff.\n" << endl;
updateDragCoeff();
if(verbose_) Info << "FinesFields: Updating uDyn.\n" << endl;
if (verbose_) Info << "FinesFields: Updating uDyn.\n" << endl;
updateUDyn();
if(verbose_) Info << "FinesFields: Integrating alphas.\n" << endl;
if (verbose_) Info << "FinesFields: Integrating alphas.\n" << endl;
integrateFields();
if(verbose_) Info << "FinesFields: Update finished.\n" << endl;
if (verbose_) Info << "FinesFields: Update finished.\n" << endl;
}
void FinesFields::calcSource()
{
if (!clogKin_ & !clogStick_) return;
Sds_.primitiveFieldRef() = 0;
deltaAlpha_.primitiveFieldRef() = 0.0;
scalar f(0.0);
scalar critpore(0.0);
scalar dmean(0.0);
scalar d1(0.0);
scalar d2(0.0);
scalar fKin = 0.0;
scalar fStick = 0.0;
scalar critpore = 0.0;
scalar dmean = 0.0;
scalar d1 = 0.0;
scalar d2 = 0.0;
scalar magU = 0.0;
scalar tauDeposition = 0.0;
forAll(Sds_,cellI)
{
// calculate everything in units auf dSauter
critpore = nCrit_*dFine_.value()/dSauter_[cellI];
// pore size from hydraulic radius
dmean = 2 * (1 - alphaP_[cellI]) / ( (1 + poresizeWidth_*poresizeWidth_/3) * 3 * alphaP_[cellI] );
// Sweeney and Martin, Acta Materialia 51 (2003): ratio of hydraulic to pore throat radius
dmean /= ratioHydraulicPore_;
d1 = dmean * (1 - poresizeWidth_);
d2 = dmean * (1 + poresizeWidth_);
fKin = 0.0;
fStick = 0.0;
if (clogKin_ && alphaP_[cellI] > alphaMinClog_) // no cloggig in dilute regions
{
// calculate everything in units auf dSauter
critpore = nCrit_*dFine_.value()/dSauter_[cellI];
// pore size from hydraulic radius
dmean = 2 * (1 - alphaP_[cellI]) / ( (1 + poresizeWidth_*poresizeWidth_/3) * 3 * alphaP_[cellI] );
// Sweeney and Martin, Acta Materialia 51 (2003): ratio of hydraulic to pore throat radius
dmean /= ratioHydraulicPore_;
d1 = dmean * (1 - poresizeWidth_);
d2 = dmean * (1 + poresizeWidth_);
f = (critpore*critpore*critpore - d1 * d1 * d1) / (d2 * d2 * d2 - d1 * d1 * d1);
if (f < 0)
{
f = 0.0;
fKin = (critpore*critpore*critpore - d1 * d1 * d1) / (d2 * d2 * d2 - d1 * d1 * d1);
if (fKin < 0) fKin = 0.0;
else if (fKin > 1.0) fKin = 1.0;
}
else if (f > 1.0)
if (clogStick_ && alphaP_[cellI] > alphaMinClog_) // no cloggig in dilute regions
{
f = 1.0;
magU = mag(uReconstructed_()[cellI]); // use U reconstructed from phi to suppress oscillations at interfaces
// fStick = 1.0 / ( 1.0 + magU/uBind_) * alphaP_[cellI] / 0.65;
if (magU < uBindLow_) fStick = 1.0;
else if (magU > uBindHigh_) fStick = 0.0;
else fStick = 1.0 - (magU - uBindLow_) / (uBindHigh_ - uBindLow_);
// if (fStick > 1.0) fStick = 1.0;
fStick *= alphaP_[cellI] / 0.65;
}
// at this point, voidfraction is still calculated from the true particle sizes
deltaAlpha_[cellI] = f * (alphaMax_ - alphaP_[cellI]) - alphaSt_[cellI];
// too much volume occupied: release it (50% per time step)
deltaAlpha_[cellI] = max(fKin,fStick) * (alphaMax_ - alphaP_[cellI]) - alphaSt_[cellI];
// too much volume occupied: release it
if (deltaAlpha_[cellI] < 0.0)
{
Sds_[cellI] = 0.5*deltaAlpha_[cellI];
}
// volume too occupy available: deposit at most 80% of dyn hold up
else if (depRate_ * deltaAlpha_[cellI] > 0.8 * alphaDyn_[cellI])
{
Sds_[cellI] = 0.8 * alphaDyn_[cellI];
Sds_[cellI] = deltaAlpha_[cellI] / tauRelease_;
}
// volume to occupy available
else
{
Sds_[cellI] = depRate_ * deltaAlpha_[cellI];
if (useDepositionLength_)
{
tauDeposition = depositionLength_ / max(mag(U_[cellI]),uMin_);
}
else
{
tauDeposition = tauDeposition_;
}
if (tauDeposition < 4.0*deltaT_) tauDeposition = 4.0*deltaT_; // do not deposit all dyn hold-up within less than 3 time steps
Sds_[cellI] = alphaDyn_[cellI] / tauDeposition;
}
}
}
@ -364,11 +476,12 @@ void FinesFields::integrateFields()
fvScalarMatrix alphaStEqn
(
fvm::ddt(alphaSt_)
+ fvm::div(phiSt,alphaSt_)
fvm::ddt(alphaSt_)
==
Sds_
);
if (movingBed_) alphaStEqn += fvm::div(phiSt,alphaSt_);
fvScalarMatrix alphaDynEqn
(
fvm::ddt(alphaDyn_)
@ -377,10 +490,11 @@ void FinesFields::integrateFields()
==
-Sds_
);
alphaStEqn.solve();
alphaDynEqn.solve();
if(smoothing_)
if (smoothing_)
particleCloud_.smoothingM().smoothen(alphaDyn_);
// limit hold-ups, should be done more elegantly
@ -422,13 +536,14 @@ void FinesFields::integrateFields()
}
void FinesFields::updateAlphaG()
void FinesFields::updateAlphaG() // called after updateAlphaP() - correct voidfraction by removing space occupied by fines
{
alphaG_ = max(voidfraction_ - alphaSt_ - alphaDyn_, critVoidfraction_);
voidfraction_ = alphaG_;
}
void FinesFields::updateAlphaP()
void FinesFields::updateAlphaP() // called first in the update cycle - voidfraction_ is current with DEM data
{
alphaP_ = 1.0 - voidfraction_ + SMALL;
}
@ -439,7 +554,7 @@ void FinesFields::updateDHydMix()
forAll(dHydMix_,cellI)
{
scalar aPSt = alphaP_[cellI] + alphaSt_[cellI];
if(aPSt < SMALL || aPSt > 1 - SMALL)
if (aPSt < SMALL || aPSt > 1 - SMALL)
dHydMix_[cellI] = SMALL;
else
dHydMix_[cellI] = 2*(1 - aPSt) / (3*aPSt ) * dSauterMix_[cellI];
@ -459,11 +574,11 @@ void FinesFields::updateDragCoeff()
forAll(DragCoeff_,cellI)
{
Ref1 = Ref[cellI];
if(Ref1 <= SMALL)
if (Ref1 <= SMALL)
Cd = 24.0 / SMALL;
else if(Ref1 <= 1.0)
else if (Ref1 <= 1.0)
Cd = 24.0 / Ref1;
else if(Ref1 <= 1000)
else if (Ref1 <= 1000)
Cd = 24 * (1.0 + 0.15 * Foam::pow(Ref1,0.687) ) / Ref1;
else
Cd = 0.44;
@ -475,11 +590,11 @@ void FinesFields::updateDragCoeff()
forAll(DragCoeff_.boundaryField()[patchI], faceI)
{
Ref1 = Ref.boundaryField()[patchI][faceI];
if(Ref1 <= SMALL)
if (Ref1 <= SMALL)
Cd = 24.0 / SMALL;
else if(Ref1 <= 1.0)
else if (Ref1 <= 1.0)
Cd = 24.0 / Ref1;
else if(Ref1 <= 1000)
else if (Ref1 <= 1000)
Cd = 24 * (1.0 + 0.15 * Foam::pow(Ref1,0.687) ) / Ref1;
else
Cd = 0.44;
@ -496,9 +611,9 @@ void FinesFields::updateDSauter()
{
scalar aP = alphaP_[cellI];
scalar aSt = alphaSt_[cellI];
if(aSt < SMALL)
if (aSt < SMALL)
dSauterMix_[cellI] = dSauter_[cellI];
else if(aP < SMALL)
else if (aP < SMALL)
dSauterMix_[cellI] = dFine_.value();
else
dSauterMix_[cellI] = (aP + aSt) / (aP / dSauter_[cellI] + aSt / dFine_.value() );
@ -541,7 +656,7 @@ void FinesFields::updateUDyn()
{
scalar mU(mag(U_[cellI]));
scalar muDyn(mag(uDyn_[cellI]));
if(muDyn > mU && muDyn > SMALL)
if (muDyn > mU && muDyn > SMALL)
{
uDyn_[cellI] *= mU / muDyn;
}
@ -552,13 +667,24 @@ void FinesFields::updateUDyn()
{
scalar mU(mag(U_.boundaryField()[patchI][faceI]));
scalar muDyn(mag(uDyn_.boundaryField()[patchI][faceI]));
if(muDyn > mU && muDyn > SMALL)
if (muDyn > mU && muDyn > SMALL)
{
uDyn_.boundaryFieldRef()[patchI][faceI] *= mU / muDyn;
}
}
}
void FinesFields::updateUReconstructed()
{
if (phi_.dimensions() == dimensionSet(1, 0, -1, 0, 0)) // compressible
{
uReconstructed_() = fvc::reconstruct(phi_) / (rhoG_ * voidfraction_);
}
else
{
uReconstructed_() = fvc::reconstruct(phi_) / voidfraction_;
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam

View File

@ -46,6 +46,18 @@ private:
bool verbose_;
bool clogKin_;
bool clogStick_;
bool movingBed_;
bool useDepositionLength_;
word fluxFieldName_;
const surfaceScalarField& phi_;
word velFieldName_;
const volVectorField& U_;
@ -90,11 +102,12 @@ private:
volScalarField Sds_;
//volVectorField massFluxDyn_;
surfaceScalarField massFluxDyn_;
volVectorField uDyn_;
autoPtr<volVectorField> uReconstructed_; // velocity field can show oscillations at porosity steps
dimensionedScalar dFine_;
dimensionedScalar diffCoeff_;
@ -109,9 +122,13 @@ private:
scalar alphaMax_;
scalar alphaMinClog_;
scalar critVoidfraction_;
scalar depRate_;
scalar deltaT_;
scalar depositionLength_;
scalar exponent_;
@ -123,6 +140,16 @@ private:
scalar ratioHydraulicPore_;
scalar tauDeposition_;
scalar tauRelease_;
scalar uBindHigh_;
scalar uBindLow_;
scalar uMin_;
void calcSource();
void integrateFields();
@ -143,6 +170,8 @@ private:
void updateUDyn();
void updateUReconstructed();
public:
//- Runtime type information

View File

@ -291,7 +291,11 @@ void KochHillRWDrag::setForce() const
// modify current fluid velocity
for (int dim=0; dim<3; dim++)
{
#if OPENFOAM_VERSION_MAJOR < 6
partUfluct_[index][dim] = RanGen_.GaussNormal()*sqrt(2.*k/3.);
#else
partUfluct_[index][dim] = RanGen_.scalarNormal()*sqrt(2.*k/3.);
#endif
//Pout << "RW-TEST: Ufluid[" << dim << "] = " << Ufluid[dim] << " Ufluct = " << partUfluct_[index][dim] << " k = " << k << endl; // TEST-Output
Ufluid[dim] = Ufluid[dim] + partUfluct_[index][dim];
}

View File

@ -66,7 +66,9 @@ SchillerNaumannDrag::SchillerNaumannDrag
propsDict_(dict.subDict(typeName + "Props")),
verbose_(propsDict_.found("verbose")),
velFieldName_(propsDict_.lookup("velFieldName")),
U_(sm.mesh().lookupObject<volVectorField> (velFieldName_))
U_(sm.mesh().lookupObject<volVectorField> (velFieldName_)),
scaleDia_(1.),
scaleDrag_(1.)
{
//Append the field names to be probed
particleCloud_.probeM().initialize(typeName, typeName+".logDat");
@ -85,7 +87,12 @@ SchillerNaumannDrag::SchillerNaumannDrag
// read those switches defined above, if provided in dict
forceSubM(0).readSwitches();
particleCloud_.checkCG(false);
particleCloud_.checkCG(true);
if (propsDict_.found("scale"))
scaleDia_ = scalar(readScalar(propsDict_.lookup("scale")));
if (propsDict_.found("scaleDrag"))
scaleDrag_ = scalar(readScalar(propsDict_.lookup("scaleDrag")));
}
@ -99,6 +106,16 @@ SchillerNaumannDrag::~SchillerNaumannDrag()
void SchillerNaumannDrag::setForce() const
{
if (scaleDia_ > 1.0)
{
Info << "SchillerNaumann using scale = " << scaleDia_ << endl;
}
else if (particleCloud_.cg() > 1.0)
{
scaleDia_ = particleCloud_.cg();
Info << "SchillerNaumann using scale from liggghts cg = " << scaleDia_ << endl;
}
#include "setupProbeModel.H"
const volScalarField& nufField = forceSubM(0).nuField();
@ -117,6 +134,7 @@ void SchillerNaumannDrag::setForce() const
vector Us = particleCloud_.velocity(index);
vector Ur = U_[cellI]-Us;
scalar ds = 2*particleCloud_.radius(index);
scalar ds_scaled = ds/scaleDia_;
scalar nuf = nufField[cellI];
scalar rho = rhoField[cellI];
scalar voidfraction = particleCloud_.voidfraction(index);
@ -127,30 +145,33 @@ void SchillerNaumannDrag::setForce() const
if (magUr > 0)
{
// calc particle Re Nr
Rep = ds*magUr/nuf;
Rep = ds_scaled*magUr/nuf;
// calc fluid drag Coeff
Cd = max(0.44,24.0/Rep*(1.0+0.15*pow(Rep,0.687)));
// calc particle's drag
drag = 0.125*Cd*rho*M_PI*ds*ds*magUr*Ur;
drag = 0.125*Cd*rho*M_PI*ds*ds*scaleDia_*magUr*Ur*scaleDrag_;
if (modelType_=="B")
drag /= voidfraction;
}
if(verbose_ && index >100 && index <102)
if(verbose_ && index >=100 && index <102)
{
Info << "index = " << index << endl;
Info << "Us = " << Us << endl;
Info << "Ur = " << Ur << endl;
Info << "ds = " << ds << endl;
Info << "rho = " << rho << endl;
Info << "nuf = " << nuf << endl;
Info << "voidfraction = " << voidfraction << endl;
Info << "Rep = " << Rep << endl;
Info << "Cd = " << Cd << endl;
Info << "drag = " << drag << endl;
Pout << "cellI = " << cellI << endl;
Pout << "index = " << index << endl;
Pout << "Ufluid = " << U_[cellI] << endl;
Pout << "Us = " << Us << endl;
Pout << "Ur = " << Ur << endl;
Pout << "ds = " << ds << endl;
Pout << "ds/scale = " << ds_scaled << endl;
Pout << "rho = " << rho << endl;
Pout << "nuf = " << nuf << endl;
Pout << "voidfraction = " << voidfraction << endl;
Pout << "Rep = " << Rep << endl;
Pout << "Cd = " << Cd << endl;
Pout << "drag = " << drag << endl;
}
//Set value fields and write the probe

View File

@ -66,6 +66,10 @@ private:
const volVectorField& U_;
mutable scalar scaleDia_;
mutable scalar scaleDrag_;
public:
//- Runtime type information

View File

@ -56,6 +56,7 @@ dSauter::dSauter
multiTypes_(false),
d2_(NULL),
d3_(NULL),
maxTypeCG_(1),
typeCG_(propsDict_.lookupOrDefault<scalarList>("coarseGrainingFactors",scalarList(1,1.0))),
d2Field_
( IOobject
@ -95,7 +96,11 @@ dSauter::dSauter
"zeroGradient"
)
{
if (typeCG_.size()>1) multiTypes_ = true;
if (typeCG_.size()>1)
{
multiTypes_ = true;
maxTypeCG_ = typeCG_.size();
}
allocateMyArrays();
dSauter_.write();
@ -153,6 +158,10 @@ void dSauter::setForce() const
if (multiTypes_)
{
partType = particleCloud_.particleType(index);
if (partType > maxTypeCG_)
{
FatalError<< "Too few coarse-graining factors provided." << abort(FatalError);
}
cg = typeCG_[partType - 1];
}
ds = particleCloud_.d(index);

View File

@ -49,6 +49,8 @@ private:
mutable double **d3_;
label maxTypeCG_;
scalarList typeCG_;
mutable volScalarField d2Field_;

View File

@ -69,7 +69,8 @@ forceModel::forceModel
IOobject::AUTO_WRITE
),
sm.mesh(),
dimensionedVector("zero", dimensionSet(1,1,-2,0,0), vector::zero) // N
dimensionedVector("zero", dimensionSet(1,1,-2,0,0), vector::zero),
"zeroGradient"
),
expParticleForces_
( IOobject
@ -81,7 +82,8 @@ forceModel::forceModel
IOobject::AUTO_WRITE
),
sm.mesh(),
dimensionedVector("zero", dimensionSet(1,1,-2,0,0), vector::zero) // N
dimensionedVector("zero", dimensionSet(1,1,-2,0,0), vector::zero),
"zeroGradient"
),
coupleForce_(true),
modelType_(sm.modelType()),

View File

@ -1,6 +1,6 @@
/*---------------------------------------------------------------------------*\
CFDEMcoupling academic - Open Source CFD-DEM coupling
Contributing authors:
Thomas Lichtenegger
Copyright (C) 2015- Johannes Kepler University, Linz
@ -82,8 +82,9 @@ isotropicFluctuations::isotropicFluctuations
sm.mesh(),
dimensionedScalar("D0", dimensionSet(0,0,0,0,0,0,0), D0_)
),
maxDisplacement_(propsDict_.lookupOrDefault<scalar>("maxDisplacement", -1.0)),
dtDEM_(particleCloud_.dataExchangeM().DEMts()),
ranGen_(osRandomInteger())
ranGen_(clock::getTime()+pid())
{
if(ignoreCellsName_ != "none")
{
@ -122,10 +123,13 @@ void isotropicFluctuations::setForce() const
vector flucU(0,0,0);
label cellI=0;
scalar relVolfractionExcess(0.0);
interpolationCellPoint<scalar> voidfractionInterpolator_(voidfraction_);
interpolationCellPoint<scalar> voidfractionRecInterpolator_(voidfractionRec_);
scalar maxVel = maxDisplacement_ / dtDEM_;
scalar magFlucU = 0.0;
for(int index = 0;index < particleCloud_.numberOfParticles(); ++index)
{
cellI = particleCloud_.cellIDs()[index][0];
@ -151,14 +155,22 @@ void isotropicFluctuations::setForce() const
voidfractionRec = voidfractionRec_[cellI];
}
// write particle based data to global array
deltaVoidfrac=voidfractionRec-voidfraction;
relVolfractionExcess=deltaVoidfrac/(1-voidfraction+SMALL);
if(deltaVoidfrac>0)
{
D = D0Field_[cellI];
flucU=unitFlucDir()*fluctuationMag(relVolfractionExcess,D);
}
magFlucU = fluctuationMag(relVolfractionExcess,D);
if (maxVel > 0.0 && magFlucU > maxVel)
{
flucU=unitFlucDir()*maxVel;
}
else
{
flucU=unitFlucDir()*fluctuationMag(relVolfractionExcess,D);
}
}
// write particle based data to global array
for(int j=0;j<3;j++)
@ -168,11 +180,11 @@ void isotropicFluctuations::setForce() const
}
}
}
if (measureDiff_)
{
dimensionedScalar diff( fvc::domainIntegrate( sqr( voidfraction_ - voidfractionRec_ ) ) );
scalar t = particleCloud_.mesh().time().timeOutputValue();
scalar t = particleCloud_.mesh().time().timeOutputValue();
recErrorFile_ << t << "\t" << diff.value() << endl;
}
}

View File

@ -1,6 +1,6 @@
/*---------------------------------------------------------------------------*\
CFDEMcoupling academic - Open Source CFD-DEM coupling
Contributing authors:
Thomas Lichtenegger
Copyright (C) 2015- Johannes Kepler University, Linz
@ -46,24 +46,24 @@ class isotropicFluctuations
{
protected:
dictionary propsDict_;
bool interpolate_;
bool measureDiff_;
mutable OFstream recErrorFile_;
// ignore fluctuations in region
word ignoreCellsName_;
autoPtr<cellSet> ignoreCells_;
bool existIgnoreCells_;
word voidfractionFieldName_;
const volScalarField& voidfraction_;
word voidfractionRecFieldName_;
const volScalarField& voidfractionRec_;
@ -76,6 +76,8 @@ protected:
volScalarField D0Field_;
scalar maxDisplacement_;
const scalar dtDEM_;
virtual vector unitFlucDir() const;

View File

@ -94,8 +94,17 @@ particleCellVolume::particleCellVolume
),
upperThreshold_(readScalar(propsDict_.lookup("upperThreshold"))),
lowerThreshold_(readScalar(propsDict_.lookup("lowerThreshold"))),
verbose_(propsDict_.found("verbose"))
verbose_(propsDict_.found("verbose")),
writeToFile_(propsDict_.lookupOrDefault<Switch>("writeToFile",false)),
filePtr_()
{
// create the path and output file
if(writeToFile_)
{
fileName path(particleCloud_.IOM().createTimeDir("postProcessing/particleCellVolume"));
filePtr_.set(new OFstream(path/"particleCellVolume.txt"));
filePtr_() << "# time | total particle volume in cells | total volume of cells with particles | average volume fraction | min(voidfraction) | max(voidfraction)" << endl;
}
}
@ -120,6 +129,8 @@ void particleCellVolume::setForce() const
scalar fieldValue=-1;
scalar cellVol=-1;
scalar minFieldVal=1e18;
scalar maxFieldVal=-1e18;
forAll(field,cellI)
{
@ -129,6 +140,8 @@ void particleCellVolume::setForce() const
cellVol = mesh_.V()[cellI];
scalarField_[cellI] = (1-fieldValue) * cellVol;
scalarField2_[cellI] = cellVol;
minFieldVal = min(minFieldVal, fieldValue);
maxFieldVal = max(maxFieldVal, fieldValue);
}
else
{
@ -138,6 +151,8 @@ void particleCellVolume::setForce() const
}
scalarField_.ref() = gSum(scalarField_);
scalarField2_.ref() = gSum(scalarField2_);
reduce(minFieldVal, minOp<scalar>());
reduce(maxFieldVal, maxOp<scalar>());
if(verbose_)
{
@ -147,8 +162,20 @@ void particleCellVolume::setForce() const
<< ", and > " << lowerThreshold_
<< ",\n the total volume of cells holding particles = " << scalarField2_[0]
<< ",\n this results in an average volume fraction of:" << scalarField_[0]/(scalarField2_[0]+SMALL)
<< ",\n the min occurring " << scalarFieldName_ << " is:" << minFieldVal
<< ",\n the max occurring " << scalarFieldName_ << " is:" << maxFieldVal
<< endl;
}
if(writeToFile_)
{
filePtr_() << mesh_.time().value() << " "
<< scalarField_[0] << " "
<< scalarField2_[0] << " "
<< scalarField_[0]/(scalarField2_[0]+SMALL) << " "
<< minFieldVal << " "
<< maxFieldVal << endl;
}
}// end if time >= startTime_
}

View File

@ -74,6 +74,10 @@ private:
const Switch verbose_;
const Switch writeToFile_;
mutable autoPtr<OFstream> filePtr_;
public:
//- Runtime type information

View File

@ -20,6 +20,7 @@ License
#include "particleDeformation.H"
#include "addToRunTimeSelectionTable.H"
#include "dataExchangeModel.H"
#include "OFstream.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -52,9 +53,13 @@ particleDeformation::particleDeformation
initialExec_(true),
refFieldName_(propsDict_.lookup("refFieldName")),
refField_(),
partType_(propsDict_.lookupOrDefault<label>("partType",0)),
lowerBound_(readScalar(propsDict_.lookup ("lowerBound"))),
upperBound_(readScalar(propsDict_.lookup ("upperBound"))),
defaultDeformCellsName_(propsDict_.lookupOrDefault<word>("defaultDeformCellsName","none")),
defaultDeformCells_(),
existDefaultDeformCells_(false),
defaultDeformation_(propsDict_.lookupOrDefault<scalar>("defaultDeformation",1.0)),
partTypes_(propsDict_.lookupOrDefault<labelList>("partTypes",labelList(1,-1))),
lowerBounds_(propsDict_.lookupOrDefault<scalarList>("lowerBounds",scalarList(1,-1.0))),
upperBounds_(propsDict_.lookupOrDefault<scalarList>("upperBounds",scalarList(1,-1.0))),
partDeformations_(NULL)
{
allocateMyArrays();
@ -70,6 +75,44 @@ particleDeformation::particleDeformation
forceSubM(0).readSwitches();
particleCloud_.checkCG(false);
if(defaultDeformCellsName_ != "none")
{
defaultDeformCells_.set(new cellSet(particleCloud_.mesh(),defaultDeformCellsName_));
existDefaultDeformCells_ = true;
Info << "particleDeformation: default deformation of " << defaultDeformation_ << " in cellSet " << defaultDeformCells_().name() <<
" with " << defaultDeformCells_().size() << " cells." << endl;
if (defaultDeformation_ < 0.0 || defaultDeformation_ > 1.0)
{
defaultDeformation_ = min(max(defaultDeformation_,0.0),1.0);
Info << "Resetting defaultDeformation to range [0;1]" << endl;
}
}
// check if only single value instead of list was provided
if (propsDict_.found("partType"))
{
partTypes_[0] = readLabel(propsDict_.lookup("partType"));
}
if (propsDict_.found("lowerBound"))
{
lowerBounds_[0] = readScalar(propsDict_.lookup("lowerBound"));
}
if (propsDict_.found("upperBound"))
{
upperBounds_[0] = readScalar(propsDict_.lookup("upperBound"));
}
if (partTypes_.size() != lowerBounds_.size() || partTypes_.size() != upperBounds_.size())
{
FatalError << "Inconsistent number of particle types and/or bounds provided." << abort(FatalError);
}
Info << "partTypes: " << partTypes_ << endl;
Info << "lowerBounds: " << lowerBounds_ << endl;
Info << "upperBounds: " << upperBounds_ << endl;
}
@ -87,6 +130,12 @@ void particleDeformation::allocateMyArrays() const
double initVal = 0.0;
particleCloud_.dataExchangeM().allocateArray(partDeformations_,initVal,1);
}
bool particleDeformation::defaultDeformCell(label cell) const
{
if (!existDefaultDeformCells_) return false;
else return defaultDeformCells_()[cell];
}
// * * * * * * * * * * * * * * * public Member Functions * * * * * * * * * * * * * //
void particleDeformation::setForce() const
@ -103,36 +152,44 @@ void particleDeformation::setForce() const
label partType = -1;
scalar refFieldValue = 0.0;
scalar deformationDegree = 0.0;
interpolationCellPoint<scalar> refFieldInterpolator_(refField_());
for(int index = 0; index < particleCloud_.numberOfParticles(); ++index)
{
cellI = particleCloud_.cellIDs()[index][0];
partType = particleCloud_.particleType(index);
if (cellI >= 0 && partType == partType_)
label listIndex = getListIndex(partType);
if (cellI >= 0 && listIndex >= 0)
{
if (forceSubM(0).interpolation())
if (defaultDeformCell(cellI))
{
vector position = particleCloud_.position(index);
refFieldValue = refFieldInterpolator_.interpolate(position,cellI);
deformationDegree = defaultDeformation_;
}
else
{
refFieldValue = refField_()[cellI];
}
if (forceSubM(0).interpolation())
{
vector position = particleCloud_.position(index);
refFieldValue = refFieldInterpolator_.interpolate(position,cellI);
}
else
{
refFieldValue = refField_()[cellI];
}
if (refFieldValue <= lowerBound_)
{
deformationDegree = 0.0;
}
else if (refFieldValue >= upperBound_)
{
deformationDegree = 1.0;
}
else
{
deformationDegree = (refFieldValue - lowerBound_) / (upperBound_ - lowerBound_);
if (refFieldValue <= lowerBounds_[listIndex])
{
deformationDegree = 0.0;
}
else if (refFieldValue >= upperBounds_[listIndex])
{
deformationDegree = 1.0;
}
else
{
deformationDegree = (refFieldValue - lowerBounds_[listIndex]) / (upperBounds_[listIndex] - lowerBounds_[listIndex]);
}
}
partDeformations_[index][0] = deformationDegree;
@ -155,7 +212,6 @@ void particleDeformation::init() const
// check if ref field with corresponding name has been read by some other class or if it needs to be newly created
if (particleCloud_.mesh().foundObject<volScalarField> (refFieldName_))
{
// volScalarField& refField(particleCloud_.mesh().lookupObject<volScalarField> (refFieldName_));
volScalarField& refField(const_cast<volScalarField&>(particleCloud_.mesh().lookupObject<volScalarField> (refFieldName_)));
refField_.set(&refField);
}
@ -181,6 +237,15 @@ void particleDeformation::init() const
}
}
label particleDeformation::getListIndex(label testElement) const
{
for(label ind = 0; ind<partTypes_.size(); ind++)
{
if (partTypes_[ind] == testElement) return ind;
}
return -1;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam

View File

@ -34,7 +34,8 @@ SourceFiles
#include "forceModel.H"
#include "averagingModel.H"
#include "interpolationCellPoint.H"
#include "autoPtr.H"
#include "cellSet.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
@ -58,18 +59,31 @@ private:
mutable autoPtr<volScalarField> refField_;
label partType_;
// default deformation in region
word defaultDeformCellsName_;
scalar lowerBound_;
autoPtr<cellSet> defaultDeformCells_;
scalar upperBound_;
bool existDefaultDeformCells_;
scalar defaultDeformation_;
labelList partTypes_;
scalarList lowerBounds_;
scalarList upperBounds_;
mutable double **partDeformations_;
label getListIndex(label) const;
void allocateMyArrays() const;
void init() const;
bool defaultDeformCell(label) const;
public:
//- Runtime type information

View File

@ -122,14 +122,6 @@ pdCorrelation::pdCorrelation
sm.mesh(),
dimensionedScalar("zero", dimless, 0)
),
typeCG_
(
propsDict_.lookupOrDefault
(
"coarseGrainingFactors",
scalarList(1, sm.cg())
)
),
particleDensities_
(
propsDict_.lookupOrDefault
@ -138,6 +130,15 @@ pdCorrelation::pdCorrelation
scalarList(1, -1.)
)
),
typeCG_
(
propsDict_.lookupOrDefault
(
"coarseGrainingFactors",
scalarList(1, sm.cg())
)
),
maxTypeCG_(typeCG_.size()),
constantCG_(typeCG_.size() < 2),
CG_(!constantCG_ || typeCG_[0] > 1. + SMALL),
runOnWriteOnly_(propsDict_.lookupOrDefault("runOnWriteOnly", false))
@ -222,6 +223,10 @@ void pdCorrelation::setForce() const
typei = particleCloud_.particleTypes()[pi][0] - 1;
rhop = densityFromList ? particleDensities_[typei]
: particleCloud_.particleDensity(pi);
if (!constantCG_ && typei >= maxTypeCG_)
{
FatalError<< "Too few coarse-graining factors provided." << abort(FatalError);
}
cg = constantCG_ ? typeCG_[0] : typeCG_[typei];
}

View File

@ -58,8 +58,9 @@ private:
mutable volScalarField cg3Field_;
const scalarList typeCG_;
const scalarList particleDensities_;
const scalarList typeCG_;
const label maxTypeCG_;
const Switch constantCG_;
const Switch CG_;
const Switch runOnWriteOnly_;

View File

@ -69,7 +69,9 @@ volWeightedAverage::volWeightedAverage
vectorFieldNames_(propsDict_.lookup("vectorFieldNames")),
upperThreshold_(readScalar(propsDict_.lookup("upperThreshold"))),
lowerThreshold_(readScalar(propsDict_.lookup("lowerThreshold"))),
verbose_(propsDict_.found("verbose"))
verbose_(propsDict_.found("verbose")),
writeToFile_(propsDict_.lookupOrDefault<Switch>("writeToFile", false)),
filePtr_()
{
// create vol weighted average scalar fields
scalarFields_.setSize(scalarFieldNames_.size());
@ -124,6 +126,13 @@ volWeightedAverage::volWeightedAverage
)
);
}
// create the path and output file
if(writeToFile_)
{
fileName path(particleCloud_.IOM().createTimeDir("postProcessing/volWeightedAverage"));
filePtr_.set(new OFstream(path/"volWeightedAverage.txt"));
}
}
@ -141,6 +150,8 @@ void volWeightedAverage::setForce() const
{
if(verbose_) Info << "volWeightedAverage.C - setForce()" << endl;
if(writeToFile_) filePtr_() << mesh_.time().value() << " ";
for (int i=0;i < scalarFieldNames_.size(); i++)
{
// get reference to actual field
@ -171,7 +182,7 @@ void volWeightedAverage::setForce() const
MPI_Allreduce(&totVol, &totVol_all, 3, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
integralValue = gSum(scalarFields_[i]);
volWeightedAverage = integralValue / (totVol_all+SMALL);
scalarFields_[i].ref() = volWeightedAverage;
scalarFields_[i].primitiveFieldRef() = volWeightedAverage;
if(verbose_)
{
@ -184,6 +195,8 @@ void volWeightedAverage::setForce() const
<< ",\n considering cells where the field < " << upperThreshold_
<< ", and > " << lowerThreshold_ << endl;
}
if(writeToFile_) filePtr_() << volWeightedAverage << " ";
}
for (int i=0;i < vectorFieldNames_.size(); i++)
@ -225,7 +238,11 @@ void volWeightedAverage::setForce() const
<< ",\n considering cells where the mag(field) < " << upperThreshold_
<< ", and > " << lowerThreshold_ << endl;
}
if(writeToFile_) filePtr_() << volWeightedAverage << " ";
}
if(writeToFile_) filePtr_() << endl;
}// end if time >= startTime_
}

View File

@ -76,6 +76,10 @@ private:
const Switch verbose_;
const Switch writeToFile_;
mutable autoPtr<OFstream> filePtr_;
public:
//- Runtime type information

View File

@ -65,14 +65,70 @@ constDiffSmoothing::constDiffSmoothing
propsDict_(dict.subDict(typeName + "Props")),
lowerLimit_(readScalar(propsDict_.lookup("lowerLimit"))),
upperLimit_(readScalar(propsDict_.lookup("upperLimit"))),
smoothingLength_(dimensionedScalar("smoothingLength", dimLength, readScalar(propsDict_.lookup("smoothingLength")))),
smoothingLengthReferenceField_(dimensionedScalar("smoothingLengthReferenceField", dimLength, readScalar(propsDict_.lookup("smoothingLength")))),
DT_("DT", dimensionSet(0,2,-1,0,0), 0.),
smoothingLength_(propsDict_.lookupOrDefault<scalar>("smoothingLength", -1.0)),
smoothingLengthReference_(propsDict_.lookupOrDefault<scalar>("smoothingLengthReference",smoothingLength_)),
smoothingLengthFieldName_(propsDict_.lookupOrDefault<word>("smoothingLengthFieldName","smoothingLengthField")),
smoothingLengthField_
( IOobject
(
smoothingLengthFieldName_,
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE
),
sm.mesh(),
dimensionedScalar("smoothingLength", dimensionSet(0,1,0,0,0,0,0), smoothingLength_),
"zeroGradient"
),
smoothingLengthReferenceFieldName_(propsDict_.lookupOrDefault<word>("smoothingLengthReferenceFieldName","smoothingLengthReferenceField")),
smoothingLengthReferenceField_
( IOobject
(
smoothingLengthReferenceFieldName_,
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE
),
sm.mesh(),
dimensionedScalar("smoothingLengthReference", dimensionSet(0,1,0,0,0,0,0), smoothingLengthReference_),
"zeroGradient"
),
DT_
( IOobject
(
"DT",
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::NO_READ,
IOobject::NO_WRITE
),
sm.mesh(),
dimensionedScalar("DT", dimensionSet(0,2,-1,0,0,0,0), 0.0),
"zeroGradient"
),
verbose_(propsDict_.found("verbose"))
{
if(propsDict_.found("smoothingLengthReferenceField"))
// either use scalar or field parameters for smoothing
if (smoothingLength_ > 0.0 || smoothingLengthReference_ > 0.0)
{
smoothingLengthReferenceField_.value() = double(readScalar(propsDict_.lookup("smoothingLengthReferenceField")));
if (smoothingLengthField_.headerOk() || smoothingLengthReferenceField_.headerOk())
{
FatalError <<"constDiffSmoothing: Either use scalar or field parameter for smoothing.\n" << abort(FatalError);
}
}
if (smoothingLength_ < 0.0 && !smoothingLengthField_.headerOk())
{
FatalError <<"constDiffSmoothing: Provide scalar or field parameter for smoothing.\n" << abort(FatalError);
}
// if no scalar length for smoothing wrt reference field is provided and no
// such field, use smoothingLengthField
if (smoothingLengthReference_ < 0.0 && !smoothingLengthReferenceField_.headerOk())
{
smoothingLengthReferenceField_ = smoothingLengthField_;
}
checkFields(sSmoothField_);
@ -104,8 +160,8 @@ void constDiffSmoothing::smoothen(volScalarField& fieldSrc) const
sSmoothField.oldTime()=fieldSrc;
sSmoothField.oldTime().correctBoundaryConditions();
double deltaT = sSmoothField.mesh().time().deltaTValue();
DT_.value() = smoothingLength_.value() * smoothingLength_.value() / deltaT;
dimensionedScalar deltaT = sSmoothField.mesh().time().deltaT();
DT_ = smoothingLengthField_ * smoothingLengthField_ / deltaT;
// do smoothing
solve
@ -145,8 +201,8 @@ void constDiffSmoothing::smoothen(volVectorField& fieldSrc) const
vSmoothField.oldTime()=fieldSrc;
vSmoothField.oldTime().correctBoundaryConditions();
double deltaT = vSmoothField_.mesh().time().deltaTValue();
DT_.value() = smoothingLength_.value() * smoothingLength_.value() / deltaT;
dimensionedScalar deltaT = vSmoothField.mesh().time().deltaT();
DT_ = smoothingLengthField_ * smoothingLengthField_ / deltaT;
// do smoothing
solve
@ -183,9 +239,8 @@ void constDiffSmoothing::smoothenReferenceField(volVectorField& fieldSrc) const
double sourceStrength = 1e5; //large number to keep reference values constant
dimensionedScalar deltaT = vSmoothField.mesh().time().deltaT();
DT_.value() = smoothingLengthReferenceField_.value()
* smoothingLengthReferenceField_.value() / deltaT.value();
DT_ = smoothingLengthReferenceField_ * smoothingLengthReferenceField_ / deltaT;
tmp<volScalarField> NLarge
(
new volScalarField

View File

@ -60,11 +60,25 @@ class constDiffSmoothing
private:
dictionary propsDict_;
const scalar lowerLimit_;
const scalar upperLimit_;
const dimensionedScalar smoothingLength_;
dimensionedScalar smoothingLengthReferenceField_;
mutable dimensionedScalar DT_;
const scalar smoothingLength_;
const scalar smoothingLengthReference_;
word smoothingLengthFieldName_;
mutable volScalarField smoothingLengthField_;
word smoothingLengthReferenceFieldName_;
mutable volScalarField smoothingLengthReferenceField_;
mutable volScalarField DT_;
const bool verbose_;
public:

View File

@ -50,31 +50,9 @@ SyamlalThermCond::SyamlalThermCond
:
thermCondModel(dict,sm),
propsDict_(dict.subDict(typeName + "Props")),
thermCondFieldName_(propsDict_.lookupOrDefault<word>("thermCondFieldName","thCond")),
thermCondField_(const_cast<volScalarField&>(sm.mesh().lookupObject<volScalarField> (thermCondFieldName_))),
voidfractionFieldName_(propsDict_.lookupOrDefault<word>("voidfractionFieldName","voidfraction")),
voidfraction_(sm.mesh().lookupObject<volScalarField> (voidfractionFieldName_)),
wallQFactorName_(propsDict_.lookupOrDefault<word>("wallQFactorName","wallQFactor")),
wallQFactor_
( IOobject
(
wallQFactorName_,
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
sm.mesh(),
dimensionedScalar("zero", dimensionSet(0,0,0,0,0,0,0), 1.0)
),
hasWallQFactor_(false)
{
if (wallQFactor_.headerOk())
{
Info << "Found field for scaling wall heat flux.\n" << endl;
hasWallQFactor_ = true;
}
}
voidfraction_(sm.mesh().lookupObject<volScalarField> (voidfractionFieldName_))
{}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
@ -95,13 +73,7 @@ void SyamlalThermCond::calcThermCond()
thermCondField_.correctBoundaryConditions();
// if a wallQFactor field is present, use it to scale heat transport through a patch
if (hasWallQFactor_)
{
wallQFactor_.correctBoundaryConditions();
forAll(wallQFactor_.boundaryField(), patchi)
thermCondField_.boundaryFieldRef()[patchi] *= wallQFactor_.boundaryField()[patchi];
}
calcBoundaryCorrections();
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -53,21 +53,10 @@ private:
dictionary propsDict_;
word thermCondFieldName_;
volScalarField& thermCondField_;
word voidfractionFieldName_;
const volScalarField& voidfraction_;
word wallQFactorName_;
// ratio of half-cell-size and near-wall film
mutable volScalarField wallQFactor_;
bool hasWallQFactor_;
public:
//- Runtime type information

View File

@ -55,21 +55,7 @@ ZehnerSchluenderThermCond::ZehnerSchluenderThermCond
voidfractionFieldName_(propsDict_.lookupOrDefault<word>("voidfractionFieldName","voidfraction")),
voidfraction_(sm.mesh().lookupObject<volScalarField> (voidfractionFieldName_)),
typeKs_(propsDict_.lookupOrDefault<scalarList>("thermalConductivities",scalarList(1,-1.0))),
partKs_(NULL),
wallQFactorName_(propsDict_.lookupOrDefault<word>("wallQFactorName","wallQFactor")),
wallQFactor_
( IOobject
(
wallQFactorName_,
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::READ_IF_PRESENT,
IOobject::AUTO_WRITE
),
sm.mesh(),
dimensionedScalar("zero", dimensionSet(0,0,0,0,0,0,0), 1.0)
),
hasWallQFactor_(false)
partKs_(NULL)
{
if (typeKs_[0] < 0.0)
{
@ -81,12 +67,6 @@ ZehnerSchluenderThermCond::ZehnerSchluenderThermCond
{
partKsField_ *= typeKs_[0];
}
if (wallQFactor_.headerOk())
{
Info << "Found field for scaling wall heat flux.\n" << endl;
hasWallQFactor_ = true;
}
}

View File

@ -66,13 +66,6 @@ private:
mutable double **partKs_;
word wallQFactorName_;
// ratio of half-cell-size and near-wall film
mutable volScalarField wallQFactor_;
bool hasWallQFactor_;
void allocateMyArrays() const;
void calcPartKsField() const;

View File

@ -47,6 +47,7 @@ thermCondModel::thermCondModel
:
dict_(dict),
particleCloud_(sm),
mesh_(particleCloud_.mesh()),
transportProperties_
(
IOobject
@ -58,8 +59,72 @@ thermCondModel::thermCondModel
IOobject::NO_WRITE
)
),
kf0_(transportProperties_.lookup("kf"))
{}
kf0_(transportProperties_.lookup("kf")),
thermCondField_(const_cast<volScalarField&>(sm.mesh().lookupObject<volScalarField> ("thCond"))),
wallQFactor_
( IOobject
(
"wallQFactor",
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE
),
sm.mesh(),
dimensionedScalar("zero", dimensionSet(0,0,0,0,0,0,0), 1.0)
),
hasWallQFactor_(false),
wallBoundaryLayerThickness_
( IOobject
(
"wallBoundaryLayerThickness",
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE
),
sm.mesh(),
dimensionedScalar("zero", dimensionSet(0,1,0,0,0,0,0), 1.0)
),
hasWallBoundaryLayerThickness_(false),
wallHeatLoss_
( IOobject
(
"wallHeatLoss",
sm.mesh().time().timeName(),
sm.mesh(),
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE
),
sm.mesh(),
dimensionedScalar("zero", dimensionSet(1,0,-3,-1,0,0,0), 0.0)
),
hasWallHeatLoss_(false)
{
if (wallQFactor_.headerOk())
{
Info << "Found field for scaling wall heat flux.\n" << endl;
hasWallQFactor_ = true;
wallQFactor_.writeOpt() = IOobject::AUTO_WRITE;
}
if (wallBoundaryLayerThickness_.headerOk())
{
Info << "Found field near-wall boundary layer thickness.\n" << endl;
hasWallBoundaryLayerThickness_ = true;
wallBoundaryLayerThickness_.writeOpt() = IOobject::AUTO_WRITE;
}
if (wallHeatLoss_.headerOk())
{
Info << "Found field for wall heat loss.\n" << endl;
hasWallHeatLoss_ = true;
wallHeatLoss_.writeOpt() = IOobject::AUTO_WRITE;
}
if (hasWallQFactor_ + hasWallBoundaryLayerThickness_ + hasWallHeatLoss_ > 1)
{
FatalError << "thermCondModel: cannot use more than one option of wallQFactor, wallBoundaryLayerThickness and wallHeatLoss." << abort(FatalError);
}
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
@ -67,8 +132,54 @@ thermCondModel::thermCondModel
thermCondModel::~thermCondModel()
{}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
void thermCondModel::calcBoundaryCorrections()
{
if(!hasWallQFactor_ && !hasWallHeatLoss_ && !hasWallBoundaryLayerThickness_) return;
const fvPatchList& patches = mesh_.boundary();
// if a wallQFactor field is present, use it to scale heat transport through a patch
if (hasWallQFactor_)
{
wallQFactor_.correctBoundaryConditions();
forAll(patches, patchi)
{
const fvPatch& curPatch = patches[patchi];
forAll(curPatch, facei)
{
label faceCelli = curPatch.faceCells()[facei];
thermCondField_.boundaryFieldRef()[patchi][facei] = thermCondField_[faceCelli]*wallQFactor_.boundaryField()[patchi][facei];
}
}
}
else if (hasWallHeatLoss_)
{
wallHeatLoss_.correctBoundaryConditions();
forAll(patches, patchi)
{
// since not explicitly looped over all faces, need to use == to force assignment
thermCondField_.boundaryFieldRef()[patchi] == wallHeatLoss_.boundaryField()[patchi] / mesh_.deltaCoeffs().boundaryField()[patchi];
}
}
else if (hasWallBoundaryLayerThickness_)
{
wallBoundaryLayerThickness_.correctBoundaryConditions();
forAll(patches, patchi)
{
const fvPatch& curPatch = patches[patchi];
forAll(curPatch, facei)
{
label faceCelli = curPatch.faceCells()[facei];
thermCondField_.boundaryFieldRef()[patchi][facei] = thermCondField_[faceCelli] /
(wallBoundaryLayerThickness_.boundaryField()[patchi][facei] * mesh_.deltaCoeffs().boundaryField()[patchi][facei]);
}
}
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View File

@ -19,6 +19,12 @@ License
Description
model for thermal conductivity of fluid phase in presence of particles
special treatment for near-wall behavior:
a) wallQFactor can be used to impose scaling factor for thermal conductivity at the boundaries,
e.g. to model a thin boundary layer below grid resolution
b) explicitly specify a boundary layer thickness
c) specify given heat loss in W / (m K)
ATTENTION: if zeroGradient BC are used in thCondField, they cannot be overwritten with numerical data
Class
thermCondModel
@ -53,10 +59,29 @@ protected:
cfdemCloud& particleCloud_;
const fvMesh& mesh_;
IOdictionary transportProperties_;
dimensionedScalar kf0_;
volScalarField& thermCondField_;
// ratio of half-cell-size and near-wall film
mutable volScalarField wallQFactor_;
bool hasWallQFactor_;
// thickness of near-wall boundary layer, can be used instead of Q factor
mutable volScalarField wallBoundaryLayerThickness_;
bool hasWallBoundaryLayerThickness_;
// thickness of near-wall boundary layer, can be used instead of Q factor
mutable volScalarField wallHeatLoss_;
bool hasWallHeatLoss_;
public:
//- Runtime type information
@ -105,6 +130,8 @@ public:
virtual void calcThermCond() {}
virtual void calcBoundaryCorrections();
};

View File

@ -261,9 +261,9 @@ void trilinearVoidFraction::setvoidFraction(double** const& mask,double**& voidf
// find x,y,z
// TODO changes needed here when generalized for quader cells
offsetOrigin = particleCloud_.mesh().C()[i000] - (partPos + posShift);
x = mag(offsetOrigin[0]) / cellLength_;
y = mag(offsetOrigin[1]) / cellLength_;
z = mag(offsetOrigin[2]) / cellLength_;
x = std::min(1.0, mag(offsetOrigin[0]) / cellLength_);
y = std::min(1.0, mag(offsetOrigin[1]) / cellLength_);
z = std::min(1.0, mag(offsetOrigin[2]) / cellLength_);
// calculate the mapping coeffs
C000 = (1 - x) * (1 - y) * (1 - z);
@ -350,12 +350,6 @@ void trilinearVoidFraction::setvoidFraction(double** const& mask,double**& voidf
alphaLimited = true;
}
if(index==0 && alphaLimited) Info<<"alpha limited to" <<alphaMin_<<endl;*/
// store voidFraction for each particle
voidfractions[index][0] = voidfractionNext_[cellI];
// store cellweight for each particle - this should not live here
particleWeights[index][0] = 1.;
}
}
voidfractionNext_.correctBoundaryConditions();
@ -363,23 +357,18 @@ void trilinearVoidFraction::setvoidFraction(double** const& mask,double**& voidf
// bring voidfraction from Eulerian Field to particle array
for (int index = 0; index < particleCloud_.numberOfParticles(); ++index)
{
label cellID = particleCloud_.cellIDs()[index][0];
for (int subcell = 0; subcell < maxCellsPerParticle_; ++subcell)
{
label cellID = particleCloud_.cellIDs()[index][subcell];
if (cellID >= 0)
{
voidfractions[index][0] = voidfractionNext_[i000];
voidfractions[index][1] = voidfractionNext_[i100];
voidfractions[index][2] = voidfractionNext_[i110];
voidfractions[index][3] = voidfractionNext_[i010];
voidfractions[index][4] = voidfractionNext_[i001];
voidfractions[index][5] = voidfractionNext_[i101];
voidfractions[index][6] = voidfractionNext_[i111];
voidfractions[index][7] = voidfractionNext_[i011];
}
else
{
for (int i = 0; i < 8; ++i)
voidfractions[index][i] = -1.;
if (cellID >= 0)
{
voidfractions[index][subcell] = voidfractionNext_[cellID];
}
else
{
voidfractions[index][subcell] = -1.;
}
}
}
}

View File

@ -6,6 +6,8 @@ sinclude $(RULES)/mplib$(WM_MPLIB)
GIT_VERSION := $(shell git describe --dirty --always --tags)
PFLAGS+= -DGITVERSION=\"$(GIT_VERSION)\"
FOAM_VERSION_MAJOR := $(word 1,$(subst ., ,$(WM_PROJECT_VERSION)))
PFLAGS+= -DOPENFOAM_VERSION_MAJOR=$(FOAM_VERSION_MAJOR)
PFLAGS+= -Dcompre

View File

@ -12,11 +12,10 @@ recNorm/readNorm/readNorm.C
recNorm/sqrDiffNorm/sqrDiffNorm.C
recNorm/noRecNorm/noRecNorm.C
recPath/recPath/recPath.C
recPath/recPath/newRecPath.C
recPath/simpleRandomPath/simpleRandomPath.C
recPath/recPath/newRecPath.C
recPath/noPath/noPath.C
recPath/MarkovPath/MarkovPath.C
recPath/multiIntervalPath/multiIntervalPath.C
recPath/predefinedPath/predefinedPath.C
recStatAnalysis/recStatAnalysis/recStatAnalysis.C
recStatAnalysis/recStatAnalysis/newRecStatAnalysis.C

View File

@ -1,6 +1,9 @@
GIT_VERSION := $(shell git describe --abbrev=4 --dirty --always --tags)
PFLAGS+= -DGITVERSION=\"$(GIT_VERSION)\"
FOAM_VERSION_MAJOR := $(word 1,$(subst ., ,$(WM_PROJECT_VERSION)))
PFLAGS+= -DOPENFOAM_VERSION_MAJOR=$(FOAM_VERSION_MAJOR)
include $(CFDEM_ADD_LIBS_DIR)/additionalLibs
EXE_INC = \

View File

@ -83,14 +83,20 @@ recBase::recBase
),
couplingSubStep_(recProperties_.lookupOrDefault<label>("couplingSubStep",0))
{
recModel_ -> readFieldSeries();
recNorm_ -> computeRecMatrix();
recPath_ -> getRecPath();
recModel_ -> readFieldSeries();
if (!recStatAnalysis_->suppressMatrixAndPath())
{
recNorm_ -> computeRecMatrix();
recPath_ -> getRecPath();
}
recModel_ -> init();
recModel_ -> writeRecMatrix();
recModel_ -> writeRecPath();
recModel_ -> init();
if (!recStatAnalysis_->suppressMatrixAndPath())
{
recModel_ -> writeRecMatrix();
recModel_ -> writeRecPath();
}
}
recBase::recBase
(

View File

@ -226,7 +226,11 @@ scalar gerhardsRecModel::checkTimeStep()
}
// set deltaT
#if OPENFOAM_VERSION_MAJOR < 6
recTime.setDeltaT(dtCur, false);
#else
recTime.setDeltaT(dtCur);
#endif
if (verbose_)
{
@ -296,7 +300,11 @@ void gerhardsRecModel::readFieldSeries()
for OpenFOAM versions prior to OpenFOAM-5.0
Do this the other way around for OpenFOAM-5.0 and potentially later versions
*/
#if OPENFOAM_VERSION_MAJOR < 5
if (! header.headerOk())
#else
if (! header.typeHeaderOk<volScalarField>(true))
#endif
{
FatalError
<< "Field " << volScalarFieldNames_[i] << " not found"
@ -320,7 +328,11 @@ void gerhardsRecModel::readFieldSeries()
for OpenFOAM versions prior to OpenFOAM-5.0
Do this the other way around for OpenFOAM-5.0 and potentially later versions
*/
#if OPENFOAM_VERSION_MAJOR < 5
if (! header.headerOk())
#else
if (! header.typeHeaderOk<volVectorField>(true))
#endif
{
FatalError
<< "Field " << volVectorFieldNames_[i] << " not found"
@ -344,7 +356,11 @@ void gerhardsRecModel::readFieldSeries()
for OpenFOAM versions prior to OpenFOAM-5.0
Do this the other way around for OpenFOAM-5.0 and potentially later versions
*/
#if OPENFOAM_VERSION_MAJOR < 5
if (! header.headerOk())
#else
if (! header.typeHeaderOk<surfaceScalarField>(true))
#endif
{
FatalError
<< "Field " << surfaceScalarFieldNames_[i] << " not found"
@ -851,6 +867,27 @@ const surfaceScalarField& gerhardsRecModel::exportSurfaceScalarField(word fieldn
}
PtrList<volScalarField>& gerhardsRecModel::exportVolScalarFieldList(word fieldname)
{
const label fieldI = getVolScalarFieldIndex(fieldname);
return volScalarFieldList_[fieldI];
}
PtrList<volVectorField>& gerhardsRecModel::exportVolVectorFieldList(word fieldname)
{
const label fieldI = getVolVectorFieldIndex(fieldname);
return volVectorFieldList_[fieldI];
}
PtrList<surfaceScalarField>& gerhardsRecModel::exportSurfaceScalarFieldList(word fieldname)
{
const label fieldI = getSurfaceScalarFieldIndex(fieldname);
return surfaceScalarFieldList_[fieldI];
}
SymmetricSquareMatrix<scalar>& gerhardsRecModel::recurrenceMatrix()

View File

@ -100,9 +100,9 @@ protected:
scalar checkTimeStep();
inline label getVolScalarFieldIndex(word, label) const;
inline label getVolVectorFieldIndex(word, label) const;
inline label getSurfaceScalarFieldIndex(word, label) const;
inline label getVolScalarFieldIndex(word, label=0) const;
inline label getVolVectorFieldIndex(word, label=0) const;
inline label getSurfaceScalarFieldIndex(word, label=0) const;
void readFieldSeries();
@ -143,7 +143,11 @@ public:
const volVectorField& exportVolVectorField(word, label);
const surfaceScalarField& exportSurfaceScalarField(word, label);
// tmp<surfaceScalarField> exportAveragedSurfaceScalarField(word, scalar, label index = -1);
PtrList<volScalarField>& exportVolScalarFieldList(word);
PtrList<volVectorField>& exportVolVectorFieldList(word);
PtrList<surfaceScalarField>& exportSurfaceScalarFieldList(word);
// tmp<surfaceScalarField> exportAveragedSurfaceScalarField(word, scalar, label index = -1);
void exportAveragedVolVectorField(volVectorField&, word, scalar, label index = -1) const;
SymmetricSquareMatrix<scalar>& recurrenceMatrix();

View File

@ -92,7 +92,7 @@ MarkovPath::MarkovPath
}
intervalSizesCumulative_[i] = sum1;
}
// check if meanIntervalSteps and correlationSteps are reasonable
label critLength = meanIntervalSteps_ + 2 * correlationSteps_;
for(int i=0;i<numIntervals_;i++)
@ -150,7 +150,7 @@ void MarkovPath::computeRecPath()
void MarkovPath::extendPath()
{
Random ranGen(osRandomInteger());
Random ranGen(clock::getTime()+pid());
SymmetricSquareMatrix<scalar>& recurrenceMatrix( base_.recM().recurrenceMatrix() );

View File

@ -1,194 +0,0 @@
/*---------------------------------------------------------------------------*\
CFDEMcoupling academic - Open Source CFD-DEM coupling
Contributing authors:
Thomas Lichtenegger
Copyright (C) 2015- Johannes Kepler University, Linz
-------------------------------------------------------------------------------
License
This file is part of CFDEMcoupling academic.
CFDEMcoupling academic is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CFDEMcoupling academic is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with CFDEMcoupling academic. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "error.H"
#include "multiIntervalPath.H"
#include "Random.H"
#include "recModel.H"
#include "addToRunTimeSelectionTable.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
defineTypeNameAndDebug(multiIntervalPath, 0);
addToRunTimeSelectionTable
(
recPath,
multiIntervalPath,
dictionary
);
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
// Construct from components
multiIntervalPath::multiIntervalPath
(
const dictionary& dict,
recBase& base
)
:
recPath(dict, base),
propsDict_(dict.subDict(typeName + "Props")),
meanIntervalSteps_(propsDict_.lookupOrDefault<label>("meanIntervalSteps",-1)),
numIntervals_(base.recM().numIntervals()),
intervalSizes_(numIntervals_),
intervalSizesCumulative_(numIntervals_),
Pjump_(0.0),
intervalWeights_(propsDict_.lookupOrDefault<scalarList>("intervalWeights",scalarList(numIntervals_,1.0))),
intervalWeightsCumulative_(intervalWeights_)
{
if(meanIntervalSteps_<0)
{
// if no mean interval length for consecutive steps is specified, use 1/5 from first interval
meanIntervalSteps_ = static_cast<label>(0.2 * intervalSizes_[0]);
}
// normalize weights
scalar wsum = 0.0;
for(int i=0;i<numIntervals_;i++)
{
intervalSizes_[i] = base.recM().numRecFields(i);
wsum += intervalWeights_[i];
}
for(int i=0;i<numIntervals_;i++)
{
intervalWeights_[i] /= wsum;
}
for(int i=0;i<numIntervals_;i++)
{
scalar sum1 = 0.0;
scalar sum2 = 0.0;
for(int j=0;j<=i;j++)
{
sum1 += intervalWeights_[j];
sum2 += intervalSizes_[j];
}
intervalWeightsCumulative_[i] = sum1;
intervalSizesCumulative_[i] = sum2;
}
// given a jump probability of P, the probability of finding a chain of length N is
// P(N) = (1 - P)^N * P, and the mean length E(N) = (1 - P) / P
Pjump_ = 1.0 / (1 + meanIntervalSteps_);
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
multiIntervalPath::~multiIntervalPath()
{}
// * * * * * * * * * * * * * protected Member Functions * * * * * * * * * * * * //
void multiIntervalPath::computeRecPath()
{
Info << "\nComputing recurrence path\n" << endl;
Random ranGen(osRandomInteger());
label virtualTimeIndex=0;
label recSteps=0;
label seqStart=0;
bool prevStepWasJump = true;
SymmetricSquareMatrix<scalar>& recurrenceMatrix( base_.recM().recurrenceMatrix() );
if(base_.recM().totRecSteps() == 1)
{
Info << "\nPrimitive recurrence path with one element.\n" << endl;
return;
}
while(recSteps <= base_.recM().totRecSteps() )
{
scalar randJump = ranGen.scalar01();
// check if current virtualTimeIndex is close to separation time
bool intervalBorder = false;
label sep = 0;
for(int i = 0;i < numIntervals_; i++)
{
sep += intervalSizes_[i];
if (sep - 1 == virtualTimeIndex) intervalBorder=true;
}
if ((randJump > Pjump_ && !intervalBorder) || prevStepWasJump)
{
virtualTimeIndex++;
prevStepWasJump = false;
}
else
{
// before jump, complete former consecutive interval
labelPair seqStartEnd(seqStart,virtualTimeIndex);
virtualTimeIndexList_.append(seqStartEnd);
recSteps += virtualTimeIndex - seqStart + 1;
// now jump
// identify interval to jump to
scalar randInterval = ranGen.scalar01();
label interval = numIntervals_-1;
for(int i = numIntervals_-2 ;i >= 0; i--)
{
if (randInterval < intervalWeightsCumulative_[i]) interval=i;
}
label startLoop = 0;
if (interval > 0) startLoop = intervalSizesCumulative_[interval-1];
label endLoop = intervalSizesCumulative_[interval] - meanIntervalSteps_;
scalar nextMinimum(GREAT);
for (label j = startLoop; j <= endLoop; j++)
{
if(abs(j - virtualTimeIndex) < meanIntervalSteps_) continue;
if (recurrenceMatrix[j][virtualTimeIndex] < nextMinimum)
{
nextMinimum = recurrenceMatrix[j][virtualTimeIndex];
seqStart = j+1;
}
}
virtualTimeIndex = seqStart;
prevStepWasJump = true;
}
}
Info << "\nComputing recurrence path done\n" << endl;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View File

@ -1,103 +0,0 @@
/*---------------------------------------------------------------------------*\
CFDEMcoupling academic - Open Source CFD-DEM coupling
Contributing authors:
Thomas Lichtenegger
Copyright (C) 2015- Johannes Kepler University, Linz
-------------------------------------------------------------------------------
License
This file is part of CFDEMcoupling academic.
CFDEMcoupling academic is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CFDEMcoupling academic is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with CFDEMcoupling academic. If not, see <http://www.gnu.org/licenses/>.
Description
A recurrence database consisting of N separate intervals is assumed with separation
times t0 (start time), t1, ... tN (end time) and weights w0, ... wN-1.
\*---------------------------------------------------------------------------*/
#ifndef multiIntervalPath_H
#define multiIntervalPath_H
#include "recPath.H"
#include "scalarList.H"
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class multiIntervalPath Declaration
\*---------------------------------------------------------------------------*/
class multiIntervalPath
:
public recPath
{
protected:
// Protected data
dictionary propsDict_;
void computeRecPath();
label meanIntervalSteps_;
label numIntervals_;
labelList intervalSizes_;
labelList intervalSizesCumulative_;
scalar Pjump_;
scalarList intervalWeights_;
scalarList intervalWeightsCumulative_;
public:
//- Runtime type information
TypeName("multiIntervalPath");
// Constructors
//- Construct from components
multiIntervalPath
(
const dictionary& dict,
recBase& base
);
// Destructor
virtual ~multiIntervalPath();
// Member Functions
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -69,15 +69,18 @@ void simpleRandomPath::computeRecPath()
{
Info << "\nComputing recurrence path\n" << endl;
Random ranGen(osRandomInteger());
Random ranGen(clock::getTime()+pid());
label virtualTimeIndex = 0;
label recSteps = 0;
label seqStart = 0;
label lowerSeqLim( base_.recM().lowerSeqLim() );
label upperSeqLim( base_.recM().upperSeqLim() );
#if OPENFOAM_VERSION_MAJOR < 6
label seqLength = ranGen.integer(lowerSeqLim, upperSeqLim);
#else
label seqLength = ranGen.sampleAB(lowerSeqLim, upperSeqLim);
#endif
virtualTimeIndex = seqEnd(seqStart,seqLength);
labelPair seqStartEnd(seqStart,virtualTimeIndex);
@ -123,7 +126,11 @@ void simpleRandomPath::computeRecPath()
}
}
#if OPENFOAM_VERSION_MAJOR < 6
seqLength = ranGen.integer(lowerSeqLim, upperSeqLim);
#else
seqLength = ranGen.sampleAB(lowerSeqLim, upperSeqLim);
#endif
virtualTimeIndex = seqEnd(seqStart,seqLength);
labelPair seqStartEnd(seqStart,virtualTimeIndex);
virtualTimeIndexList_.append(seqStartEnd);

View File

@ -73,7 +73,8 @@ autocorrelation::autocorrelation
),
base.mesh(),
dimensionedScalar("zero", dimensionSet(0,0,0,0,0), 0.0)
)
),
suppressMatrixAndPath_(propsDict_.lookupOrDefault<bool>("suppressMatrixAndPath",false))
{
if (fieldtype_ != "scalar" && fieldtype_ != "vector")
{
@ -108,31 +109,23 @@ void autocorrelation::statistics()
void autocorrelation::autocorr()
{
scalar res = 0.0;
PtrList<volScalarField> scalarFieldList;
PtrList<volVectorField> vectorFieldList;
if (fieldtype_ == "scalar") scalarFieldList.transfer(base_.recM().exportVolScalarFieldList(fieldname_));
else vectorFieldList.transfer(base_.recM().exportVolVectorFieldList(fieldname_));
if (fieldtype_ == "scalar") scalarFieldList_.transfer(base_.recM().exportVolScalarFieldList(fieldname_));
else vectorFieldList_.transfer(base_.recM().exportVolVectorFieldList(fieldname_));
label tmax = base_.recM().totRecSteps();
for (label ti = delaySteps_; ti < tmax; ti++)
{
forAll(autoCorrField_, cellI)
{
if (fieldtype_ == "scalar")
{
res = scalarFieldList[ti-delaySteps_][refCell_] * scalarFieldList[ti][cellI];
}
else
{
res = vectorFieldList[ti-delaySteps_][refCell_] & vectorFieldList[ti][cellI];
}
autoCorrField_[cellI] += res;
autoCorrField_[cellI] += autocorrSummand(ti-delaySteps_,ti,refCell_,cellI);
}
}
autoCorrField_ /= (tmax - delaySteps_);
autoCorrField_ -= meanProd();
if (normalize_)
{
volScalarField meanProd(autoCorrField_);
@ -159,17 +152,55 @@ void autocorrelation::autocorr()
dimensionSet fieldDim(0,0,0,0,0);
if (fieldtype_ == "scalar")
{
fieldDim.reset(scalarFieldList[0].dimensions());
fieldDim.reset(scalarFieldList_[0].dimensions());
}
else
{
fieldDim.reset(vectorFieldList[0].dimensions());
fieldDim.reset(vectorFieldList_[0].dimensions());
}
fieldDim = fieldDim * fieldDim;
fieldDim.reset(fieldDim * fieldDim);
autoCorrField_.dimensions().reset(fieldDim);
}
autoCorrField_.write();
}
scalar autocorrelation::autocorrSummand(label t1, label t2, label c1, label c2)
{
scalar res;
if (fieldtype_ == "scalar")
{
res = scalarFieldList_[t1][c1] * scalarFieldList_[t2][c2];
}
else
{
res = vectorFieldList_[t1][c1] & vectorFieldList_[t2][c2];
}
return res;
}
volScalarField autocorrelation::meanProd()
{
volScalarField meanProd(autoCorrField_);
if (fieldtype_ == "scalar")
{
volScalarField aveField = base_.recM().exportVolScalarFieldAve(fieldname_);
forAll(meanProd, cellI)
{
meanProd[cellI] = aveField()[cellI] * aveField()[refCell_];
}
}
else
{
volVectorField aveField = base_.recM().exportVolVectorFieldAve(fieldname_);
forAll(meanProd, cellI)
{
meanProd[cellI] = aveField()[cellI] & aveField()[refCell_];
}
}
return meanProd;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -85,9 +85,21 @@ private:
bool normalize_;
PtrList<volScalarField> scalarFieldList_;
PtrList<volVectorField> vectorFieldList_;
volScalarField autoCorrField_;
bool suppressMatrixAndPath_;
void autocorr();
scalar autocorrSummand(label, label, label, label);
volScalarField meanProd();
bool suppressMatrixAndPath() {return suppressMatrixAndPath_;}
};

View File

@ -102,6 +102,8 @@ public:
virtual void statistics() = 0;
virtual bool suppressMatrixAndPath() {return false;}
};

View File

@ -23,7 +23,7 @@ solvers
cAlpha 1;
}
pcorr
"pcorr.*"
{
solver PCG;
preconditioner

View File

@ -23,7 +23,7 @@ solvers
cAlpha 1;
}
pcorr
"pcorr.*"
{
solver PCG;
preconditioner

View File

@ -23,7 +23,7 @@ solvers
cAlpha 1;
}
pcorr
"pcorr.*"
{
solver PCG;
preconditioner

View File

@ -14,6 +14,9 @@ name outlet;
fields (alpha.air alpha.water alpha.oil);
operation average;
#includeEtc "caseDicts/postProcessing/surfaceRegion/patch.cfg"
// OF4
//#includeEtc "caseDicts/postProcessing/surfaceRegion/patch.cfg"
// OF5,OF6
#includeEtc "caseDicts/postProcessing/surfaceFieldValue/patch.cfg"
// ************************************************************************* //

View File

@ -0,0 +1,122 @@
/*---------------------------------------------------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 1.4 |
| \\ / A nd | Web: http://www.openfoam.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
root "";
case "";
instance "";
local "";
class dictionary;
object couplingProperties;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
//===========================================================================//
// sub-models & settings
syncMode false;
modelType "A";
couplingInterval 100;
voidFractionModel divided;
locateModel engineSearchMany2Many;
meshMotionModel noMeshMotion;
regionModel allRegion;
IOModel basicIO;
probeModel off;
dataExchangeModel twoWayOne2One;
averagingModel dense;
clockModel standardClock;
smoothingModel off;
forceModels
(
gradPForce
viscForce
KochHillDrag
);
momCoupleModels
(
implicitCouple
);
turbulenceModelType "turbulenceProperties";
//===========================================================================//
// sub-model properties
dividedProps
{
alphaMin 0.01;
scaleUpVol 1.0;
}
engineSearchMany2ManyProps
{
engineProps
{
treeSearch true;
}
}
twoWayOne2OneProps
{
liggghtsPath "../DEM/in.liggghts_run";
verbose true;
}
gradPForceProps
{
pFieldName "p";
voidfractionFieldName "voidfraction";
velocityFieldName "U";
interpolation true;
}
viscForceProps
{
velocityFieldName "U";
interpolation true;
}
KochHillDragProps
{
verbose true;
velFieldName "U";
voidfractionFieldName "voidfraction";
interpolation true;
implForceDEM true;
}
implicitCoupleProps
{
velFieldName "U";
granVelFieldName "Us";
voidfractionFieldName "voidfraction";
}
// ************************************************************************* //

View File

@ -0,0 +1,67 @@
# Pour granular particles into a cylinder, then induce flow
log ../DEM/log.liggghts
thermo_log ../DEM/post/thermo.txt
atom_style granular
atom_modify map array
communicate single vel yes
boundary m m m
newton off
units si
processors 2 2 1
# read the restart file
read_restart ../DEM/post/restart/liggghts.restart
neighbor 0.0005 bin
neigh_modify delay 0
# Material properties required for granular pair styles
fix m1 all property/global youngsModulus peratomtype 5.e6
fix m2 all property/global poissonsRatio peratomtype 0.45
fix m3 all property/global coefficientRestitution peratomtypepair 1 0.3
fix m4 all property/global coefficientFriction peratomtypepair 1 0.5
# pair style
pair_style gran model hertz tangential history
pair_coeff * *
# timestep, gravity
timestep 0.00001
fix gravi all gravity 9.81 vector 0.0 0.0 -1.0
# walls
fix zwalls1 all wall/gran model hertz tangential history primitive type 1 zplane 0.0
fix zwalls2 all wall/gran model hertz tangential history primitive type 1 zplane 0.0553
fix cylwalls all wall/gran model hertz tangential history primitive type 1 zcylinder 0.01385 0. 0.
# change the particles density
set group all density 2000
# cfd coupling
fix cfd all couple/cfd couple_every 100 one2one
fix cfd2 all couple/cfd/force/implicit
#fix cfd2 all couple/cfd/force/implicit/accumulated #CrankNicolson 0.5
# apply nve integration to all particles that are inserted as single particles
fix integr all nve/sphere
# center of mass
compute centerOfMass all com
# compute total dragforce
compute dragtotal all reduce sum f_dragforce[1] f_dragforce[2] f_dragforce[3]
# screen output
compute rke all erotate/sphere
thermo_style custom step atoms ke c_rke vol c_centerOfMass[3] c_dragtotal[1] c_dragtotal[2] c_dragtotal[3]
thermo 10
thermo_modify lost ignore norm no
compute_modify thermo_temp dynamic yes
dump dmp all custom 5000 ../DEM/post/dump*.liggghts_run id type x y z vx vy vz fx fy fz f_dragforce[1] f_dragforce[2] f_dragforce[3] radius
run 1

View File

@ -0,0 +1,16 @@
#!/bin/bash
cp ./CFD/constant/couplingProperties.one2one ./CFD/constant/couplingProperties
cp ./DEM/in.liggghts_run.one2one ./DEM/in.liggghts_run
#- define variables
casePath="$(dirname "$(readlink -f ${BASH_SOURCE[0]})")"
# check if mesh was built
if [ -f "$casePath/CFD/constant/polyMesh/boundary" ]; then
echo "mesh was built before - using old mesh"
else
echo "mesh needs to be built"
cd $casePath/CFD
blockMesh
fi

View File

@ -14,6 +14,15 @@
"nprocs" : 4,
"pre_scripts" : ["prerun.sh"],
"post_scripts" : ["postrun.sh"]
},
{
"name" : "cfdemrun-one2one",
"depends_on" : "liggghts-init",
"solver" : "cfdemSolverPiso",
"type" : "CFDEMcoupling/mpi",
"nprocs" : 4,
"pre_scripts" : ["prerun_one2one.sh"],
"post_scripts" : ["postrun.sh"]
}
]
}

Some files were not shown because too many files have changed in this diff Show More