Compare commits

...

17 Commits

Author SHA1 Message Date
8862a0b3b2 Replaced all integer variables with labels, and all std containers with the corresponding OpenFOAM containers. 2023-02-13 16:16:07 +02:00
c7477885a5 This commit contains a new decomposition method which extends the capabilities of the multi-level decomposition method found within OpenFOAM, to allow a better control over the domain decomposition between the different nodes. 2023-02-13 16:16:07 +02:00
cc6ba5c1a0 Merge branch 'fix-ATC-extraConvection' into 'master'
BUG: extraConvection in ATC missing a multiplication with ATClimiter

See merge request Development/openfoam!591
2023-02-03 15:36:17 +00:00
26420a88d7 BUG: extraConvection in ATC missing a multiplication with ATClimiter
In the 'standard' and 'UaGradU' options for the ATC term of the adjoint
equations, there is an option to add 'aritificial dissipation', by
adding and subtracting a multiple of the adjoint convection term with
different discretizations. The implicit part was not multiplied with the
ATClimiter whereas the explicit one was, leading to mismatched
contributions in the areas affected by the ATClimiter, which could
affect the sensitivity derivatives.
2023-02-03 15:35:49 +00:00
ff13cdd39d BUG: ensightWrite not reading "excludeFields" (fixes #2696)
- field blocking/exclusion added in commit d9ab5d54ef,
  but was incorrectly doing a lookup for "blockField" for ensight
  although "excludeFields" was documented (and expected).

  Now corrected to use "excludeFields"
2023-02-02 15:17:24 +01:00
988ec18ecc COMP: backslash instead of slash in Make/options 2023-01-30 11:55:35 +01:00
0339e5ee0d BUG: expression field functionObject 'store' keyword ignored 2023-01-26 21:49:59 +01:00
07c69fdf0d COMP: add static libgcc, libstdc++ linking for mingw (fixes #2680)
- this solves some 'dangling' dependency problems that plagued earlier
  versions (when MS-MPI was not also installed).
2023-01-24 18:21:05 +01:00
03ab6c1a9d COMP: remove commented Make/options item (#2668)
COMP: update include for CGAL-5.5 (#2665)

  old:  Robust_circumcenter_filtered_traits_3
  new:  Robust_weighted_circumcenter_filtered_traits_3

COMP: adjust CGAL rule for OSX (#2664)

- since CGAL is now header-only, the previous OSX-specific rules have
  become redundant
2023-01-23 09:39:30 +01:00
e62b031f26 BUG: Casson coefficients not re-read (fixes #2681) 2023-01-22 18:28:00 +01:00
33f0ff8145 BUG: limitVelocity: specify default entry for U (fixes #2679) 2023-01-13 15:33:19 +00:00
166164da93 BUG: vtk::coordSetWriter produces incorrect VTK legacy format (fixes #2678)
- has a special purpose beginPiece() method, but was missing an update
  on the numberOfPoints, numberOfCells values required by the base class.
2023-01-12 17:10:13 +01:00
1f68e7f9b3 CONFIG: bump patch level 2023-01-11 13:00:30 +01:00
e346620a62 BUG: inconsistent finiteArea proc-boundary contributions (fixes #2672) 2023-01-11 12:31:16 +01:00
df808ad3f2 BUG: overset: fix layerRelax input.Fixes #2670 2023-01-04 17:13:59 +00:00
51ed7a6034 TUT: Added simpleFoam/rotatingCylinders validation case - fixes #2563 2023-01-03 21:20:48 +00:00
0031cb1efa CONFIG: set API level to 2212 2022-12-28 09:36:06 +00:00
51 changed files with 1948 additions and 469 deletions

View File

@ -1,2 +1,2 @@
api=2212
patch=0
patch=230110

View File

@ -9,7 +9,7 @@ EXE_INC = \
-I$(LIB_SRC)/thermophysicalModels/basic/lnInclude \
-I$(LIB_SRC)/TurbulenceModels/turbulenceModels/lnInclude \
-I$(LIB_SRC)/TurbulenceModels/compressible/lnInclude \
-I$(LIB_SRC)/regionFaModels\lnInclude
-I$(LIB_SRC)/regionFaModels/lnInclude
EXE_LIBS = \
-lfiniteVolume \

View File

@ -7,7 +7,7 @@ EXE_INC = \
-I$(LIB_SRC)/thermophysicalModels/basic/lnInclude \
-I$(LIB_SRC)/TurbulenceModels/turbulenceModels/lnInclude \
-I$(LIB_SRC)/TurbulenceModels/compressible/lnInclude \
-I$(LIB_SRC)/regionFaModels\lnInclude
-I$(LIB_SRC)/regionFaModels/lnInclude
EXE_LIBS = \
-lfiniteVolume \

View File

@ -10,7 +10,7 @@ EXE_INC = \
-I$(LIB_SRC)/TurbulenceModels/compressible/lnInclude \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/dynamicFvMesh/lnInclude \
-I$(LIB_SRC)/regionFaModels\lnInclude
-I$(LIB_SRC)/regionFaModels/lnInclude
EXE_LIBS = \
-lfiniteVolume \

View File

@ -7,7 +7,7 @@ EXE_INC = \
-I$(LIB_SRC)/thermophysicalModels/radiation/lnInclude \
-I$(LIB_SRC)/TurbulenceModels/turbulenceModels/lnInclude \
-I$(LIB_SRC)/TurbulenceModels/compressible/lnInclude \
-I$(LIB_SRC)/regionFaModels\lnInclude
-I$(LIB_SRC)/regionFaModels/lnInclude
EXE_LIBS = \
-lfiniteVolume \

View File

@ -19,7 +19,7 @@ EXE_INC = \
-I$(LIB_SRC)/TurbulenceModels/compressible/lnInclude \
-I$(LIB_SRC)/thermophysicalModels/radiation/lnInclude \
-I$(LIB_SRC)/regionModels/regionModel/lnInclude \
-I$(LIB_SRC)/regionFaModels\lnInclude
-I$(LIB_SRC)/regionFaModels/lnInclude
EXE_LIBS = \

View File

@ -8,7 +8,7 @@ EXE_INC = \
-I$(LIB_SRC)/transportModels/incompressible/singlePhaseTransportModel \
-I$(LIB_SRC)/dynamicMesh/lnInclude \
-I$(LIB_SRC)/dynamicFvMesh/lnInclude \
-I$(LIB_SRC)/regionFaModels\lnInclude
-I$(LIB_SRC)/regionFaModels/lnInclude
EXE_LIBS = \
-lfiniteVolume \

View File

@ -30,8 +30,8 @@ Description
\*---------------------------------------------------------------------------*/
#ifndef CGALTriangulation3DKernel_H
#define CGALTriangulation3DKernel_H
#ifndef Foam_CGALTriangulation3DKernel_H
#define Foam_CGALTriangulation3DKernel_H
// Silence boost bind deprecation warnings (before CGAL-5.2.1)
#include "CGAL/version.h"
@ -54,9 +54,19 @@ Description
// #include "CGAL/Robust_circumcenter_traits_3.h"
// typedef CGAL::Robust_circumcenter_traits_3<baseK> K;
#if defined(CGAL_VERSION_NR) && (CGAL_VERSION_NR < 1050500000)
// Prior to CGAL-5.5
#include "CGAL/Robust_circumcenter_filtered_traits_3.h"
typedef CGAL::Robust_circumcenter_filtered_traits_3<baseK> K;
#else
#include "CGAL/Robust_weighted_circumcenter_filtered_traits_3.h"
typedef CGAL::Robust_weighted_circumcenter_filtered_traits_3<baseK> K;
#endif
#else
// Very robust but expensive kernel

View File

@ -4,7 +4,6 @@ EXE_INC = \
-Wno-old-style-cast \
$(COMP_FLAGS) \
${CGAL_INC} \
-DCGAL_HEADER_ONLY \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/surfMesh/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \
@ -12,7 +11,6 @@ EXE_INC = \
EXE_LIBS = \
/* ${CGAL_LIBS} */ \
-lfiniteVolume \
-lsurfMesh \
-lmeshTools \

View File

@ -1,23 +1,23 @@
## Getting the code
Links to all code packs are available on https://dl.openfoam.com. For OpenFOAM-v2206:
Links to all code packs are available on https://dl.openfoam.com. For OpenFOAM-v2212:
- https://dl.openfoam.com/source/latest/
- Source: https://dl.openfoam.com/source/v2206/OpenFOAM-v2206.tgz
- ThirdParty: https://dl.openfoam.com/source/v2206/ThirdParty-v2206.tgz
- Source: https://dl.openfoam.com/source/v2212/OpenFOAM-v2212.tgz
- ThirdParty: https://dl.openfoam.com/source/v2212/ThirdParty-v2212.tgz
## OpenFOAM&reg; Quick Build Guide
Prior to building, ensure that the [system requirements][link openfoam-require]
are satisfied (including any special [cross-compiling][wiki-cross-compile]
considerations), and source the correct OpenFOAM environment.
For example, for the OpenFOAM-v2206 version:
For example, for the OpenFOAM-v2212 version:
```
source <installation path>/OpenFOAM-v2206/etc/bashrc
source <installation path>/OpenFOAM-v2212/etc/bashrc
```
e.g. if installed under the `~/openfoam` directory
```
source ~/openfoam/OpenFOAM-v2206/etc/bashrc
source ~/openfoam/OpenFOAM-v2212/etc/bashrc
```

View File

@ -79,8 +79,8 @@ bool Foam::laminarModels::generalizedNewtonianViscosityModels::Casson::read
coeffs.readEntry("m", m_);
coeffs.readEntry("tau0", tau0_);
coeffs.readEntry("nuMin_", nuMin_);
coeffs.readEntry("nuMax_", nuMax_);
coeffs.readEntry("nuMin", nuMin_);
coeffs.readEntry("nuMax", nuMax_);
return true;
}

View File

@ -55,7 +55,6 @@ $(basicFaPatchFields)/transform/transformFaPatchScalarField.C
constraintFaPatchFields = $(faPatchFields)/constraint
$(constraintFaPatchFields)/empty/emptyFaPatchFields.C
$(constraintFaPatchFields)/processor/processorFaPatchFields.C
$(constraintFaPatchFields)/processor/processorFaPatchScalarField.C
$(constraintFaPatchFields)/wedge/wedgeFaPatchFields.C
$(constraintFaPatchFields)/wedge/wedgeFaPatchScalarField.C
$(constraintFaPatchFields)/cyclic/cyclicFaPatchFields.C

View File

@ -181,13 +181,6 @@ public:
// Coupled interface functionality
//- Transform given patch component field
virtual void transformCoupleField
(
solveScalarField& f,
const direction cmpt
) const = 0;
//- Update result field based on interface functionality
virtual void updateInterfaceMatrix
(

View File

@ -1,58 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2016-2017 Wikki Ltd
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#ifndef transformFaPatchFieldsFwd_H
#define transformFaPatchFieldsFwd_H
#include "fieldTypes.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
template<class Type> class transformFaPatchField;
// typedef transformFaPatchField<scalar> transformFaPatchScalarField;
// typedef transformFaPatchField<vector> transformFaPatchVectorField;
// typedef transformFaPatchField<tensor> transformFaPatchTensorField;
// template<class Type> class transformFaPatchField;
makeFaPatchTypeFieldTypedefs(transform)
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2016-2017 Wikki Ltd
Copyright (C) 2019 OpenCFD Ltd.
Copyright (C) 2019-2023 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -187,20 +187,7 @@ void Foam::cyclicFaPatchField<Type>::updateInterfaceMatrix
transformCoupleField(pnf, cmpt);
// Multiply the field by coefficients and add into the result
if (add)
{
forAll(faceCells, elemI)
{
result[faceCells[elemI]] += coeffs[elemI]*pnf[elemI];
}
}
else
{
forAll(faceCells, elemI)
{
result[faceCells[elemI]] -= coeffs[elemI]*pnf[elemI];
}
}
this->addToInternalField(result, !add, faceCells, coeffs, pnf);
}
@ -227,21 +214,11 @@ void Foam::cyclicFaPatchField<Type>::updateInterfaceMatrix
pnf[facei + sizeby2] = psiInternal[faceCells[facei]];
}
// Transform according to the transformation tensors
transformCoupleField(pnf);
// Multiply the field by coefficients and add into the result
if (add)
{
forAll(faceCells, elemI)
{
result[faceCells[elemI]] += coeffs[elemI]*pnf[elemI];
}
}
else
{
forAll(faceCells, elemI)
{
result[faceCells[elemI]] -= coeffs[elemI]*pnf[elemI];
}
}
this->addToInternalField(result, !add, faceCells, coeffs, pnf);
}

View File

@ -164,16 +164,6 @@ public:
// Coupled interface functionality
//- Transform neighbour field
virtual void transformCoupleField
(
solveScalarField& f,
const direction cmpt
) const
{
cyclicLduInterfaceField::transformCoupleField(f, cmpt);
}
//- Update result field based on interface functionality
virtual void updateInterfaceMatrix
(

View File

@ -130,13 +130,6 @@ Foam::processorFaPatchField<Type>::processorFaPatchField
{}
// * * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * //
template<class Type>
Foam::processorFaPatchField<Type>::~processorFaPatchField()
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
@ -203,6 +196,8 @@ void Foam::processorFaPatchField<Type>::initInterfaceMatrixUpdate
commsType,
this->patch().patchInternalField(psiInternal)()
);
const_cast<processorFaPatchField<Type>&>(*this).updatedMatrix() = false;
}
@ -219,32 +214,28 @@ void Foam::processorFaPatchField<Type>::updateInterfaceMatrix
const Pstream::commsTypes commsType
) const
{
if (this->updatedMatrix())
{
return;
}
const labelUList& faceCells = this->patch().edgeFaces();
solveScalarField pnf
(
procPatch_.receive<solveScalar>(commsType, this->size())()
procPatch_.receive<solveScalar>(commsType, this->size())
);
// Transform according to the transformation tensor
transformCoupleField(pnf, cmpt);
if (!std::is_arithmetic<Type>::value)
{
// Transform non-scalar data according to the transformation tensor
transformCoupleField(pnf, cmpt);
}
// Multiply the field by coefficients and add into the result
this->addToInternalField(result, !add, faceCells, coeffs, pnf);
const labelUList& edgeFaces = this->patch().edgeFaces();
if (add)
{
forAll(edgeFaces, elemI)
{
result[edgeFaces[elemI]] += coeffs[elemI]*pnf[elemI];
}
}
else
{
forAll(edgeFaces, elemI)
{
result[edgeFaces[elemI]] -= coeffs[elemI]*pnf[elemI];
}
}
const_cast<processorFaPatchField<Type>&>(*this).updatedMatrix() = true;
}
@ -265,6 +256,8 @@ void Foam::processorFaPatchField<Type>::initInterfaceMatrixUpdate
commsType,
this->patch().patchInternalField(psiInternal)()
);
const_cast<processorFaPatchField<Type>&>(*this).updatedMatrix() = false;
}
@ -280,29 +273,25 @@ void Foam::processorFaPatchField<Type>::updateInterfaceMatrix
const Pstream::commsTypes commsType
) const
{
if (this->updatedMatrix())
{
return;
}
const labelUList& faceCells = this->patch().edgeFaces();
Field<Type> pnf
(
procPatch_.receive<Type>(commsType, this->size())()
procPatch_.receive<Type>(commsType, this->size())
);
// Transform according to the transformation tensor
transformCoupleField(pnf);
// Multiply the field by coefficients and add into the result
this->addToInternalField(result, !add, faceCells, coeffs, pnf);
const labelUList& edgeFaces = this->patch().edgeFaces();
if (add)
{
forAll(edgeFaces, elemI)
{
result[edgeFaces[elemI]] += coeffs[elemI]*pnf[elemI];
}
}
else
{
forAll(edgeFaces, elemI)
{
result[edgeFaces[elemI]] -= coeffs[elemI]*pnf[elemI];
}
}
const_cast<processorFaPatchField<Type>&>(*this).updatedMatrix() = true;
}

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2016-2017 Wikki Ltd
Copyright (C) 2019 OpenCFD Ltd.
Copyright (C) 2019-2023 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -38,8 +38,8 @@ SourceFiles
\*---------------------------------------------------------------------------*/
#ifndef processorFaPatchField_H
#define processorFaPatchField_H
#ifndef Foam_processorFaPatchField_H
#define Foam_processorFaPatchField_H
#include "coupledFaPatchField.H"
#include "processorLduInterfaceField.H"
@ -139,9 +139,8 @@ public:
}
// Destructor
~processorFaPatchField();
//- Destructor
~processorFaPatchField() = default;
// Member functions
@ -169,17 +168,8 @@ public:
//- Return patch-normal gradient
virtual tmp<Field<Type>> snGrad() const;
// Coupled interface functionality
//- Transform neighbour field
virtual void transformCoupleField
(
solveScalarField& f,
const direction cmpt
) const
{
processorLduInterfaceField::transformCoupleField(f, cmpt);
}
// Coupled interface functionality
//- Initialise neighbour matrix update
virtual void initInterfaceMatrixUpdate

View File

@ -25,10 +25,10 @@ License
\*---------------------------------------------------------------------------*/
#ifndef processorFaPatchFields_H
#define processorFaPatchFields_H
#ifndef Foam_processorFaPatchFields_H
#define Foam_processorFaPatchFields_H
#include "processorFaPatchScalarField.H"
#include "processorFaPatchField.H"
#include "fieldTypes.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -1,52 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2016-2017 Wikki Ltd
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#ifndef processorFaPatchFieldsFwd_H
#define processorFaPatchFieldsFwd_H
#include "fieldTypes.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
template<class Type> class processorFaPatchField;
makeFaPatchTypeFieldTypedefs(processor)
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -1,100 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2016-2017 Wikki Ltd
Copyright (C) 2019 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "processorFaPatchScalarField.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<>
void Foam::processorFaPatchField<Foam::scalar>::transformCoupleField
(
solveScalarField& f,
const direction cmpt
) const
{}
template<>
void Foam::processorFaPatchField<Foam::scalar>::initInterfaceMatrixUpdate
(
solveScalarField& result,
const bool add,
const lduAddressing& lduAddr,
const label patchId,
const solveScalarField& psiInternal,
const scalarField& coeffs,
const direction,
const Pstream::commsTypes commsType
) const
{
procPatch_.send
(
commsType,
patch().patchInternalField(psiInternal)()
);
}
template<>
void Foam::processorFaPatchField<Foam::scalar>::updateInterfaceMatrix
(
solveScalarField& result,
const bool add,
const lduAddressing& lduAddr,
const label patchId,
const solveScalarField&,
const scalarField& coeffs,
const direction,
const Pstream::commsTypes commsType
) const
{
solveScalarField pnf
(
procPatch_.receive<solveScalar>(commsType, this->size())()
);
const labelUList& edgeFaces = patch().edgeFaces();
if (add)
{
forAll(edgeFaces, facei)
{
result[edgeFaces[facei]] += coeffs[facei]*pnf[facei];
}
}
else
{
forAll(edgeFaces, facei)
{
result[edgeFaces[facei]] -= coeffs[facei]*pnf[facei];
}
}
}
// ************************************************************************* //

View File

@ -1,86 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2016-2017 Wikki Ltd
Copyright (C) 2019 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#ifndef processorFaPatchScalarField_H
#define processorFaPatchScalarField_H
#include "processorFaPatchField.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<>
void processorFaPatchField<scalar>::transformCoupleField
(
solveScalarField& f,
const direction cmpt
) const;
template<>
void processorFaPatchField<scalar>::initInterfaceMatrixUpdate
(
solveScalarField& result,
const bool add,
const lduAddressing& lduAddr,
const label patchId,
const solveScalarField&,
const scalarField& coeffs,
const direction,
const Pstream::commsTypes commsType
) const;
template<>
void processorFaPatchField<scalar>::updateInterfaceMatrix
(
solveScalarField& result,
const bool add,
const lduAddressing& lduAddr,
const label patchId,
const solveScalarField&,
const scalarField& coeffs,
const direction,
const Pstream::commsTypes commsType
) const;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -389,7 +389,7 @@ bool Foam::functionObjects::fvExpressionField::read(const dictionary& dict)
}
autowrite_ = dict.getOrDefault("autowrite", false);
store_ = dict.getOrDefault("autowrite", true);
store_ = dict.getOrDefault("store", true);
// "dimensions" is optional
dimensions_.clear();

View File

@ -103,7 +103,7 @@ bool Foam::functionObjects::ensightWrite::readSelection(const dictionary& dict)
selectFields_.uniq();
blockFields_.clear();
dict.readIfPresent("blockFields", blockFields_);
dict.readIfPresent("excludeFields", blockFields_);
blockFields_.uniq();
// Actions to define selection

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2016-2017 OpenFOAM Foundation
Copyright (C) 2018-2022 OpenCFD Ltd.
Copyright (C) 2018-2023 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -71,7 +71,7 @@ Foam::fv::limitVelocity::limitVelocity
:
fv::cellSetOption(name, modelType, dict, mesh),
writeFile(mesh, name, typeName, dict, false),
UName_(word::null),
UName_("U"),
max_(0)
{
read(dict);

View File

@ -80,6 +80,19 @@ void Foam::vtk::coordSetWriter::beginPiece()
}
}
// Update sizes, similar to
// vtk::polyWriter::beginPiece(const pointField&, const edgeList&)
numberOfPoints_ = nLocalPoints_;
numberOfCells_ = nLocalLines_;
// if (parallel_)
// {
// reduce(numberOfPoints_, sumOp<label>());
// reduce(numberOfCells_, sumOp<label>());
// }
// Nothing else to do for legacy
if (legacy()) return;
@ -88,7 +101,7 @@ void Foam::vtk::coordSetWriter::beginPiece()
format().openTag
(
vtk::fileTag::PIECE,
vtk::fileAttr::NUMBER_OF_POINTS, nLocalPoints_
vtk::fileAttr::NUMBER_OF_POINTS, numberOfPoints_
);
if (nLocalVerts_)
{
@ -105,7 +118,7 @@ void Foam::vtk::coordSetWriter::beginPiece()
void Foam::vtk::coordSetWriter::writePoints()
{
this->beginPoints(nLocalPoints_);
this->beginPoints(numberOfPoints_); //<- same as nLocalPoints_
{
for (const pointField& pts : points_)

View File

@ -5,8 +5,8 @@
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2007-2021 PCOpt/NTUA
Copyright (C) 2013-2021 FOSS GP
Copyright (C) 2007-2023 PCOpt/NTUA
Copyright (C) 2013-2023 FOSS GP
Copyright (C) 2019 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
@ -86,10 +86,9 @@ void ATCUaGradU::addATC(fvVectorMatrix& UaEqn)
if (extraConvection_ > 0)
{
// Implicit part added to increase diagonal dominance
// Note: Maybe this needs to be multiplied with the ATClimiter ??
UaEqn += extraConvection_*fvm::div(-phi, Ua);
UaEqn += ATClimiter_*extraConvection_*fvm::div(-phi, Ua);
// correct rhs due to implicitly augmenting the adjoint convection
// Correct rhs due to implicitly augmenting the adjoint convection
ATC_ += extraConvection_*(fvc::grad(UaForATC(), "gradUaATC")().T() & U);
}

View File

@ -5,8 +5,8 @@
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2007-2021 PCOpt/NTUA
Copyright (C) 2013-2021 FOSS GP
Copyright (C) 2007-2023 PCOpt/NTUA
Copyright (C) 2013-2023 FOSS GP
Copyright (C) 2019 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
@ -90,10 +90,9 @@ void ATCstandard::addATC(fvVectorMatrix& UaEqn)
if (extraConvection_ > 0)
{
// Implicit part added to increase diagonal dominance
// Note: Maybe this needs to be multiplied with the ATClimiter ??
UaEqn += extraConvection_*fvm::div(-phi, Ua);
UaEqn += ATClimiter_*extraConvection_*fvm::div(-phi, Ua);
// correct rhs due to implicitly augmenting the adjoint convection
// Correct rhs due to implicitly augmenting the adjoint convection
ATC_ += extraConvection_*(fvc::grad(Ua, "gradUaATC")().T() & U);
}

View File

@ -411,7 +411,7 @@ void Foam::cellCellStencil::setUpFrontOnOversetPatch
void Foam::cellCellStencil::walkFront
(
const globalIndex& globalCells,
const label layerRelax,
const scalar layerRelax,
const labelListList& allStencil,
labelList& allCellTypes,
scalarField& allWeight,
@ -611,9 +611,11 @@ void Foam::cellCellStencil::walkFront
allWeightWork[nei] = fraction[facei];
allCellTypesWork[nei] = INTERPOLATED;
const label donorId = compactStencil[nei][0];
const label donorId =
compactStencil[nei][0];
volRatio[nei] = V[nei]/compactCellVol[donorId];
volRatio[nei] =
V[nei]/compactCellVol[donorId];
seedCell
(

View File

@ -276,7 +276,7 @@ public:
void walkFront
(
const globalIndex& globalCells,
const label layerRelax,
const scalar layerRelax,
const labelListList& allStencil,
labelList& allCellTypes,
scalarField& allWeight,

View File

@ -1798,7 +1798,7 @@ Foam::cellCellStencils::inverseDistance::~inverseDistance()
bool Foam::cellCellStencils::inverseDistance::update()
{
label layerRelax(dict_.getOrDefault("layerRelax", 1));
scalar layerRelax(dict_.getOrDefault("layerRelax", 1.0));
scalar tol = dict_.getOrDefault("tolerance", 1e-10);
smallVec_ = mesh_.bounds().span()*tol;

View File

@ -4,6 +4,7 @@ simpleGeomDecomp/simpleGeomDecomp.C
hierarchGeomDecomp/hierarchGeomDecomp.C
manualDecomp/manualDecomp.C
multiLevelDecomp/multiLevelDecomp.C
multiNodeDecomp/multiNodeDecomp.C
metisLikeDecomp/metisLikeDecomp.C
structuredDecomp/structuredDecomp.C
randomDecomp/randomDecomp.C

View File

@ -0,0 +1,58 @@
# New Multi-Level Decomposition
The multi-node decomposition is an extension of the existing multi-level decomposition. It supports the syntax of the current multi-level decomposition, but allows to change the decomposition tree as you wish. For example, you may split into unbalanced nodes, set the weights of some nodes to be bigger than others, or perhaps use a different decomposition method for some nodes.
You may set up the decomposition in two ways:
1. Using a domains list and a default method:
```
numberOfSubdomains 8;
multiNodeCoeffs {
domains (2 4);
method metis;
}
```
2. Using a dictionary for each level:
```
numberOfSubdomains 8;
multiLevelCoeffs {
nodes {
numberOfSubdomains 2;
method metis;
}
cores {
numberOfSubdomains 4;
method scotch;
}
}
```
Note that if the total number of subdomains does not match the product of the number of subdomains at each level, but a default method is provided, a new level will be inferred in order to match the total number of subdomains.
This creates a "decomposition tree" - for example, the dictionaries above create a tree, where the root has two children, and each child has four children (who are the leaves of the tree). Every leaf in the tree is a subdomain in the final decomposition.
After setting up the decomposition, we may edit specific nodes or ranges of nodes. For example, suppose we want to split into two nodes, the first one having four subdomains and the second having eight subdomains. We can use the above dictionaries, and then use:
```
domains[1] (8);
```
The squared brackets indicate which nodes in the tree should we edit - We want the second child of the root (the indexing starts from zero). If we wanted to change the first two children of the third child of the root, we would write:
```
domains[2][0-1] (8);
```
Note that the total number of subdomains must match the number of subdomains declared after all modifications. In addition, note that the decomposition into two nodes will be done as if they were of the same size, hence the first four subdomains will be bigger than the other eight. In order to fix this, we may:
1. Change the weight of the second node into twice the weight:
```
weight[1] 2;
```
2. Set the weights initialization into relative - this will cause the weights of the children to first be computed by the amount of leaves in their subtree. Note that this updates the whole subtree initialization, but using the `weight` parameter, we can override this initialization.
```
weightsInitialization[1] relative;
```
We may also set a special method dictionary that decomposes differently for some nodes:
```
method[2-4] {
numberOfSubdomains 4;
method metis;
coeffs {
...
}
}
```

View File

@ -0,0 +1,788 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2017 OpenFOAM Foundation
Copyright (C) 2017-2021 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "multiNodeDecomp.H"
#include "addToRunTimeSelectionTable.H"
#include "IFstream.H"
#include "globalIndex.H"
#include "mapDistribute.H"
#include "DynamicList.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
namespace Foam
{
defineTypeNameAndDebug(multiNodeDecomp, 0);
addToRunTimeSelectionTable
(
decompositionMethod,
multiNodeDecomp,
dictionary
);
}
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
namespace Foam {
void multiNodeDecomp::initializeMetadata(const dictionary& coeffsDict) {
word defaultMethod;
dictionary defaultMethodDict;
if(coeffsDict.readIfPresent("method", defaultMethod, keyType::LITERAL)) {
defaultMethodDict.add("method",defaultMethod);
const dictionary& subMethodCoeffsDict
(
findCoeffsDict
(
coeffsDict,
defaultMethod + "Coeffs",
selectionType::NULL_DICT
)
);
if(subMethodCoeffsDict.size())
defaultMethodDict.add(subMethodCoeffsDict.dictName(), subMethodCoeffsDict);
}
labelList domains;
label nTotal = 0;
label nLevels = 0;
//Check if any meta argument is changed using the new syntax.
//If they are, we cannot infer an additional level of decomposition,
//as it may interfere with the indices.
List<string> domainChanges = metaParser::getEntries(coeffsDict, "domains");
List<string> methodChanges = metaParser::getEntries(coeffsDict, "method");
List<string> weightChanges = metaParser::getEntries(coeffsDict, "weight");
//We can parse weightMode without brackets too
List<string> weightModeChanges = metaParser::getEntries(coeffsDict, "weightsInitialization", true);
bool bChangesDomains = !domainChanges.empty();
bool bChangesArguments = bChangesDomains
|| (!methodChanges.empty())
|| (!weightChanges.empty())
|| (!weightModeChanges.empty());
bool bMetadataInitialized = false;
// Found (non-recursive, no patterns) "method" and "domains" ?
// Allow as quick short-cut entry
if
(
// non-recursive, no patterns
coeffsDict.readIfPresent("method", defaultMethod, keyType::LITERAL)
// non-recursive, no patterns
&& coeffsDict.readIfPresent("domains", domains, keyType::LITERAL)
)
{
// Short-cut version specified by method, domains only
nTotal = (domains.empty() ? 0 : 1);
for (const label n : domains)
{
nTotal *= n;
++nLevels;
}
//Update domains here
if(nTotal != 0 && bChangesDomains) {
rootMetadata_.initialize(
domains,
&defaultMethodDict
);
bMetadataInitialized = true;
for(string key : domainChanges)
rootMetadata_.updateDomains( key,
coeffsDict.get<labelList>(key, keyType::LITERAL));
nTotal = rootMetadata_.getSize();
}
if (nTotal == 1)
{
// Emit Warning
nTotal = nDomains();
nLevels = 1;
domains.setSize(1);
domains[0] = nTotal;
}
//If bChangesDomains is true, we do not want to add another dimension as this
//may affect the user's assignments of domains/weights/methods later on.
else if (nTotal > 0 && nTotal < nDomains() && !(nDomains() % nTotal) && !bChangesArguments)
{
// nTotal < nDomains, but with an integral factor,
// which we insert as level 0
++nLevels;
labelList old(std::move(domains));
domains.setSize(old.size()+1);
domains[0] = nDomains() / nTotal;
forAll(old, i)
{
domains[i+1] = old[i];
}
nTotal *= domains[0];
Info<<" inferred level 0 with " << domains[0]
<< " domains" << nl << nl;
}
if (!nLevels || nTotal != nDomains())
{
FatalErrorInFunction
<< "Top level decomposition specifies " << nDomains()
<< " domains which is not equal to the product of"
<< " all sub domains " << nTotal
<< exit(FatalError);
}
if(!bMetadataInitialized) {
bMetadataInitialized = true;
rootMetadata_.initialize(
domains,
&defaultMethodDict
);
}
}
else
{
// Specified by full dictionaries
// Create editable methods dictionaries
// - Only consider sub-dictionaries with a "numberOfSubdomains" entry
// This automatically filters out any coeffs dictionaries
label nTotal = 1;
List<const dictionary*> methods;
for (const entry& dEntry : coeffsDict)
{
word methodName;
if
(
dEntry.isDict()
// non-recursive, no patterns
&& dEntry.dict().found("numberOfSubdomains", keyType::LITERAL)
)
{
domains.append(dEntry.dict().get<label>("numberOfSubdomains"));
nTotal *= domains.last();
// No method specified? can use a default method?
const bool addDefaultMethod
(
!(dEntry.dict().found("method", keyType::LITERAL))
&& !defaultMethod.empty()
);
if(!(dEntry.dict().found("method",keyType::LITERAL)) && defaultMethod.empty()) {
FatalErrorInFunction <<
dEntry.keyword() <<
" dictionary does not contain method, and no default method is specified."
<< nl << exit(FatalError);
}
dictionary* levelDict = new dictionary(dEntry.dict());
levelDict->remove("numberOfSubdomains");
if(addDefaultMethod) levelDict->add("method", defaultMethod);
methods.append(levelDict);
}
}
if(domains.empty())
nTotal = 0;
rootMetadata_.initialize(domains, methods[0]);
bMetadataInitialized = true;
for(string key : domainChanges)
rootMetadata_.updateDomains( key,
coeffsDict.get<labelList>(key, keyType::LITERAL));
if(nTotal != nDomains()) {
FatalErrorInFunction
<< "Top level decomposition specifies " << nDomains()
<< " domains which is not equal to the product of"
<< " all sub domains " << nTotal << " manually defined by dictionaries. "
<< exit(FatalError);
}
rootMetadata_.setLeveledDictionaries(methods);
for(const dictionary* method : methods)
delete method;
}
for(string key : methodChanges)
rootMetadata_.updateMethod(key, coeffsDict.subDict(key, keyType::LITERAL));
for(string key : weightChanges)
rootMetadata_.updateWeight(key, coeffsDict.get<label>(key, keyType::LITERAL));
for(string key : weightModeChanges) {
word value = coeffsDict.get<word>(key, keyType::LITERAL);
WeightsInitialization newValue = UNKNOWN;
if(value=="uniform")
newValue = UNIFORM;
else if(value == "relative")
newValue = RELATIVE;
else
FatalErrorInFunction <<
"unknown weights initialization (" << value << "). Must be one of: relative, uniform."
<< nl << exit(FatalError);
rootMetadata_.updateWeightsInitialization(key, newValue);
}
if(!rootMetadata_.isLeaf())
rootMetadata_.constructMethods();
}
// Given a subset of cells determine the new global indices. The problem
// is in the cells from neighbouring processors which need to be renumbered.
void multiNodeDecomp::subsetGlobalCellCells
(
const label nDomains,
const label domainI,
const labelList& dist,
const labelListList& cellCells,
const labelList& set,
labelListList& subCellCells,
labelList& cutConnections
) const
{
// Determine new index for cells by inverting subset
labelList oldToNew(invert(cellCells.size(), set));
globalIndex globalCells(cellCells.size());
// Subset locally the elements for which I have data
subCellCells = UIndirectList<labelList>(cellCells, set);
// Get new indices for neighbouring processors
List<Map<label>> compactMap;
mapDistribute map(globalCells, subCellCells, compactMap);
map.distribute(oldToNew);
labelList allDist(dist);
map.distribute(allDist);
// Now we have:
// oldToNew : the locally-compact numbering of all our cellCells. -1 if
// cellCell is not in set.
// allDist : destination domain for all our cellCells
// subCellCells : indexes into oldToNew and allDist
// Globally compact numbering for cells in set.
globalIndex globalSubCells(set.size());
// Now subCellCells contains indices into oldToNew which are the
// new locations of the neighbouring cells.
cutConnections.setSize(nDomains);
cutConnections = 0;
forAll(subCellCells, subCelli)
{
labelList& cCells = subCellCells[subCelli];
// Keep the connections to valid mapped cells
label newI = 0;
forAll(cCells, i)
{
// Get locally-compact cell index of neighbouring cell
const label nbrCelli = oldToNew[cCells[i]];
if (nbrCelli == -1)
{
cutConnections[allDist[cCells[i]]]++;
}
else
{
// Reconvert local cell index into global one
// Get original neighbour
const label celli = set[subCelli];
const label oldNbrCelli = cellCells[celli][i];
// Get processor from original neighbour
const label proci = globalCells.whichProcID(oldNbrCelli);
// Convert into global compact numbering
cCells[newI++] = globalSubCells.toGlobal(proci, nbrCelli);
}
}
cCells.setSize(newI);
}
}
void multiNodeDecomp::decompose
(
const labelListList& pointPoints,
const pointField& points,
const scalarField& pointWeights,
const labelUList& pointMap, // map back to original points
const nodeMetadata& decomposeData,
const label leafOffset,
labelList& finalDecomp
) const
{
labelList dist
(
decomposeData.getMethod()->decompose
(
pointPoints,
points,
pointWeights
)
);
// Number of domains at the current level
const label nCurrDomains = decomposeData.nDomains();
// Calculate the domain remapping.
// The decompose() method delivers a distribution of [0..nDomains-1]
// which we map to the final location according to the decomposition
// leaf we are on.
labelList domainOffsets(nCurrDomains);
domainOffsets[0] = leafOffset;
for(label nDomain = 1; nDomain < nCurrDomains; ++nDomain) {
domainOffsets[nDomain] = domainOffsets[nDomain-1] + decomposeData.getChild(nDomain-1)->getSize();
}
// Extract processor+local index from point-point addressing
forAll(pointMap, i)
{
finalDecomp[pointMap[i]] = domainOffsets[dist[i]];
}
if (nCurrDomains > 0)
{
// Recurse
// Determine points per domain
labelListList domainToPoints(invertOneToMany(nCurrDomains, dist));
for (label domainI = 0; domainI < nCurrDomains; ++domainI)
{
if(decomposeData.getChild(domainI)->isLeaf()) continue;
// Extract elements for current domain
const labelList domainPoints(findIndices(dist, domainI));
// Subset point-wise data.
pointField subPoints(points, domainPoints);
scalarField subWeights(pointWeights, domainPoints);
labelList subPointMap(labelUIndList(pointMap, domainPoints));
// Subset point-point addressing (adapt global numbering)
labelListList subPointPoints;
labelList nOutsideConnections;
subsetGlobalCellCells
(
nCurrDomains,
domainI,
dist,
pointPoints,
domainPoints,
subPointPoints,
nOutsideConnections
);
decompose
(
subPointPoints,
subPoints,
subWeights,
subPointMap,
*decomposeData.getChild(domainI),
domainOffsets[domainI], // The offset for this level and leaf
finalDecomp
);
}
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
multiNodeDecomp::multiNodeDecomp
(
const dictionary& decompDict,
const word& regionName
)
:
decompositionMethod(decompDict, regionName),
rootMetadata_()
{
const dictionary& coeffsDict(
findCoeffsDict(
typeName + "Coeffs",
(selectionType::EXACT | selectionType::MANDATORY)
)
);
initializeMetadata(coeffsDict);
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
bool multiNodeDecomp::parallelAware() const
{
return rootMetadata_.parallelAware();
}
labelList multiNodeDecomp::decompose
(
const polyMesh& mesh,
const pointField& cc,
const scalarField& cWeights
) const
{
CompactListList<label> cellCells;
calcCellCells(mesh, identity(cc.size()), cc.size(), true, cellCells);
labelList finalDecomp(cc.size(), Zero);
labelList cellMap(identity(cc.size()));
decompose
(
cellCells.unpack(),
cc,
cWeights,
cellMap, // map back to original cells
rootMetadata_,
0,
finalDecomp
);
return finalDecomp;
}
labelList multiNodeDecomp::decompose
(
const labelListList& globalPointPoints,
const pointField& points,
const scalarField& pointWeights
) const
{
labelList finalDecomp(points.size(), Zero);
labelList pointMap(identity(points.size()));
decompose
(
globalPointPoints,
points,
pointWeights,
pointMap, // map back to original points
rootMetadata_,
0,
finalDecomp
);
return finalDecomp;
}
// * * * * * * * * * * * * Meta Parser Class * * * * * * * * * * * * * //
List<string> multiNodeDecomp::metaParser::getEntries(const dictionary& dict, const string& argument, bool allowWithoutBrackets) {
string argumentBracket = argument + "[";
DynamicList<string, 4> Result;
for(auto& dEntry : dict) {
if(dEntry.keyword().starts_with(argumentBracket) || (allowWithoutBrackets && dEntry.keyword() == argument))
Result.push_back(dEntry.keyword());
}
return Result;
}
List<Pair<label>> multiNodeDecomp::metaParser::parseRanges(const string& key) {
// First, discard the argument and process the indices only.
// The current syntax is argument[...]
// Assuming that this key was returned in getEntries,
// if there is no '[', it is OK and we use the
// empty string (update the root).
string indices = "";
if(key.find_first_of('[') != key.npos) {
// There is a '[' in the string.
// We can substr from that location.
label nFirstBracket = key.find('[');
indices = key.substr(nFirstBracket);
}
// All checks print an error message if failed, explaining why.
DynamicList<Pair<label>, 4> Result;
label nCurPtr = 0, nIndicesLength = indices.size();
// As long as there are more ranges to parse.
while(nCurPtr != nIndicesLength) {
// First, check if there is an opening bracket.
if(indices[nCurPtr]!='[')
FatalError
<< "Error when parsing indices "
<< indices << ": Expected '[', found "
<< indices[nCurPtr] << ". Aborting\n"
<< exit(FatalError);
// Then, find the matching close bracket.
label nEndIndex = indices.find(']', nCurPtr);
if(nEndIndex == nIndicesLength) {
FatalError
<< "Error when parsing indices "
<< indices << ": Expected ']' after '['. Aborting\n"
<< exit(FatalError);
}
// Read inside the brackets, mark the hyphen if it exists, and make sure
// every character is either a digit or a hyphen.
// Note that only one hyphen may exist.
label nHyphenIdx=-1;
for(label nCurIndex = nCurPtr+1; nCurIndex < nEndIndex; ++nCurIndex) {
if(!isdigit(indices[nCurIndex])&&indices[nCurIndex]!='-') {
FatalError
<< "Error when parsing indices "
<< indices << ": Expected digit/'-'/']', found "
<< indices[nCurIndex] << ". Aborting\n"
<< exit(FatalError);
}
if(indices[nCurIndex]=='-') {
if(nHyphenIdx!=-1)
FatalError
<< "Error when parsing indices "
<< indices << ": Found two hyphens(-) inside an index. Aborting\n"
<< exit(FatalError);
nHyphenIdx = nCurIndex;
}
}
label nLeft,nRight;
if(nHyphenIdx == -1) {
// Not a range - just a single index, or empty brackets (indicating to change the whole range).
if(nCurPtr+1==nEndIndex) nLeft = 0, nRight = -1;
else {
string sNum = indices.substr(nCurPtr+1,nEndIndex-nCurPtr-1);
nLeft = nRight = atoi(sNum.c_str());
}
} else {
// A range of indices.
// Assert that the hyphen is not right next to the brackets.
if(nHyphenIdx+1==nEndIndex||nCurPtr+1==nHyphenIdx)
FatalError
<< "Error when parsing indices "
<< indices << ": Expected number, found "
<< (nCurPtr+1==nHyphenIdx?'-':']')
<< ". Aborting\n"
<< exit(FatalError);
// Parse the numbers
string sLeftNum = indices.substr(nCurPtr+1,nHyphenIdx-nCurPtr-1);
string sRightNum = indices.substr(nHyphenIdx+1,nEndIndex-nHyphenIdx-1);
nLeft = atoi(sLeftNum.c_str());
nRight = atoi(sRightNum.c_str());
// Make sure left endpoint is at most the right endpoint
if(nLeft>nRight)
FatalError
<< "Error when parsing indices "
<< indices << ": right endpoint("<< nRight
<< ") cannot be smaller than left endpoint("
<< nLeft << "). Aborting\n"
<< exit(FatalError);
}
// Move the pointer after the closing bracket and append to the result list.
nCurPtr = nEndIndex + 1;
Result.push_back({nLeft,nRight});
}
return Result;
}
// * * * * * * * * * * * * Node Metadata Class * * * * * * * * * * * * * //
void multiNodeDecomp::nodeMetadata::setLeveledDictionaries(const List<const dictionary*>& dictionaries) {
setLeveledDictionaries(dictionaries, 0);
}
bool multiNodeDecomp::nodeMetadata::parallelAware() const {
// The decomposition tree is parallel aware if and only if all methods used are parallel aware.
// If this is a leaf, we are OK.
if(children.empty())
return true;
// Otherwise, check if the method used in this node is parallel aware.
if(!method->parallelAware())
return false;
// Check recursively, and if any child is not parallel aware - return false.
for(auto& child : children)
if(!child->parallelAware())
return false;
return true;
}
void multiNodeDecomp::nodeMetadata::updateProcessorWeights() {
label nDom = nDomains();
word methodCoeffsName = coeffsDict->get<word>("method") + "Coeffs";
// If processorWeights were set by the user, we do not modify them.
if(
// Check if the user did not specify processorWeights under the coeffs dictionary or the methodCoeffs dictionary
!(coeffsDict->subDictOrAdd(methodCoeffsName).found("processorWeights", keyType::LITERAL)
|| coeffsDict->subDictOrAdd("coeffs").found("processorWeights", keyType::LITERAL))) {
// Then we should compute weights on our own
Field<float> processorWeights(nDom);
forAll(children, i) {
if(children[i]->weight != 1)
processorWeights[i] = children[i]->weight;
else switch(weightsInitialization) {
case RELATIVE:
processorWeights[i] = children[i]->size;
break;
case UNIFORM:
processorWeights[i] = 1;
break;
default:
FatalError
<< "Weights initialization is not handled in updateProcessorWeights. Aborting\n"
<< exit(FatalError);
}
}
coeffsDict->subDictOrAdd(methodCoeffsName).add("processorWeights", processorWeights);
}
}
void multiNodeDecomp::nodeMetadata::constructMethods() {
// Special handling of nDomains = 1, because some decomposition methods crash when decomposing to one domain.
label nDom = nDomains();
if(nDom==1) {
coeffsDict->clear();
coeffsDict->add("method","none");
} else
updateProcessorWeights();
coeffsDict->add("numberOfSubdomains",nDom);
// Non-verbose construction of decomposition methods would be nice
method = decompositionMethod::New(*coeffsDict).release();
// Cannot release coeffsDict from memory because method uses a reference that must stay alive
forAll(children, i) {
if(!children[i]->isLeaf())
children[i]->constructMethods();
}
}
// Recursively construct the decomposition tree, given the list of dimensions and a default method.
void multiNodeDecomp::nodeMetadata::constructRecursive(const labelList& dims, const dictionary* defaultMethod) {
if(!dims.empty()) {
// The list of dimensions of the children is the current list without the first element.
labelList newDims(dims.size() - 1);
forAll(newDims, i)
newDims[i] = dims[i+1];
// Construct children recursively
// First, resize existing children
// And delete the excess
forAll(children, i) {
if(i < dims[0])
children[i]->constructRecursive(newDims, defaultMethod);
else
delete children[i];
}
label nOldSize = children.size();
children.resize(dims[0]);
// If the new array is bigger we will need to allocate new children.
for(label i = nOldSize; i < dims[0]; ++i)
children[i] = new nodeMetadata(newDims, defaultMethod);
// Compute size (number of leaves in subtree)
size = dims[0];
if(!children.empty())
size *= children[0]->size;
}
}
void multiNodeDecomp::nodeMetadata::updateNodes(const string& key, const std::function<void(nodeMetadata*)>& update) {
List<Pair<label>> indicesList = metaParser::parseRanges(key);
updateNodes(indicesList, update);
}
// Parse the indices, and apply the update function to all matching nodes.
// nCurPtr is used to indicate the index we are now parsing (instead of sending substrings of indices)
void multiNodeDecomp::nodeMetadata::updateNodes(const List<Pair<label>>& indices, const std::function<void(nodeMetadata*)>& update, label nCurIdx) {
if(nCurIdx == label(indices.size())) update(this);
else {
// Otherwise, call recursively.
label nLeft, nRight, nChildren = children.size();
nLeft = indices[nCurIdx].first();
nRight = indices[nCurIdx].second();
// [0,-1] means the entire range.
if(nLeft==0 && nRight == -1)
nRight = nChildren - 1;
// Make sure that the indices do not exceed the number of children.
if(nRight >= nChildren)
FatalError
<< "Error when parsing indices: The #" << (nCurIdx+1)
<< " range ["<< nLeft <<"," << nRight<<"]:\n"
<< " Cannot update indices bigger than number of children("
<< nChildren << "). Aborting\n"
<< exit(FatalError);
for(label nChildIdx = nLeft; nChildIdx <= nRight; ++nChildIdx)
children[nChildIdx]->updateNodes(indices,update, nCurIdx+1);
}
// Recompute size assuming children are updated.
if(!children.empty()) {
size = 0;
forAll(children, i)
size += children[i]->size;
}
}
void multiNodeDecomp::nodeMetadata::setLeveledDictionaries(const List<const dictionary*>& dictionaries, label nLevel) {
// Set the dictionary to this level, and to non-leaf children.
setDict(*dictionaries[nLevel]);
forAll(children, i) {
if(children[i]->nDomains() > 0)
children[i]->setLeveledDictionaries(dictionaries,nLevel+1);
}
}
}
// ************************************************************************* //

View File

@ -0,0 +1,435 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2015 OpenFOAM Foundation
Copyright (C) 2017-2021 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::multiNodeDecomp
Description
Decompose given using consecutive application of decomposers,
to perhaps uneven pieces.
Note: If uneven pieces are required, the decomposition method
used must support the processorWeights argument.
SourceFiles
multiNodeDecomp.C
\*---------------------------------------------------------------------------*/
#ifndef multiNodeDecomp_H
#define multiNodeDecomp_H
#include "decompositionMethod.H"
// #include "List.H"
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class multiNodeDecomp Declaration
\*---------------------------------------------------------------------------*/
class multiNodeDecomp
:
public decompositionMethod
{
// Nested classes declarations
/*---------------------------------------------------------------------------*\
Class metaParser Declaration
\*---------------------------------------------------------------------------*/
// A class responsible for detecting and parsing metadata-related arguments.
class metaParser {
public:
// Detect and return entries related to the given argument.
// Input:
// dict - the coeffs dictionary we are looking inside.
// argument - the argument we're looking for.
// allowWithoutBrackets - set to true if the argument can be detected without brackets
// For example, domains should not be allowed without brackets, but weightsInitialization can.
static List<string> getEntries(const dictionary& dict, const string& argument, bool allowWithoutBrackets = false);
// Given a key string of an entry returned from getEntries,
// Parse and return all ranges described in the key.
// Note that it is the user's responsibility to make sure that the right endpoint
// does not exceed the number of children in each node.
// The user may also write "[]" to specify all children of the node.
// In this case, the range returned is [0,-1]. Otherwise, endpoints are always non-negative.
// Input:
// key - a key of an entry we are parsing, perhaps from an entry returned in getEntries.
// Output:
// A list of ranges, where the i-th range corresponds to the i-th level of the decomposition tree.
// i.e. if two ranges are returned, we will traverse each child of the root in the first range, and recursively
// each child in the second range, thus updating nodes at the third level.
static List<Pair<label>> parseRanges(const string& key);
};
enum WeightsInitialization {
RELATIVE,
UNIFORM,
UNKNOWN
};
/*---------------------------------------------------------------------------*\
Class nodeMetadata Declaration
\*---------------------------------------------------------------------------*/
// A class holding all the information necessary for a multi-node decomposition, without building
// the decompositionMethod objects.
// The size indicates the number of processors in this subtree. It will be used
// When computing the offset of the decomposition, and when using the relative weights initialization.
// The weight is a multiplicative factor applied when decomposing. It is 1 by default and can be set by the user.
// If the uniform weights initialization is used, all nodes will have the same weight. If the relative weights
// initialization is used, each node's weight is set relatively to their size.
// Then, the weight field can be used to change the weight of a specific node.
// Note that if the coeffs dictionary contains a processorWeights field, it will not be overwritten.
// We will then construct a new dictionary with the required numberOfSubdomains and processorWeights.
class nodeMetadata {
public:
nodeMetadata() : weight(1), size(1), weightsInitialization(UNIFORM), children(0), coeffsDict(nullptr), method(nullptr) {}
// Constructs a decomposition data tree with dimensions dims and a default method.
// Input: A list of domains for each level, and a default dictionary method.
nodeMetadata(const labelList& dims, const dictionary* defaultMethod) : nodeMetadata() {
initialize(dims, defaultMethod);
}
// Initializes an existing nodeMetadata object.
void initialize(const labelList& dims, const dictionary* defaultMethod) {
setDict(*defaultMethod);
constructRecursive(dims, defaultMethod);
}
~nodeMetadata() {
// Since this class represents a tree, we will need to destruct recursively.
for(nodeMetadata* child : children)
delete child;
// Only delete method and dict if they were assigned.
if(method != nullptr)
delete method;
if(coeffsDict != nullptr)
delete coeffsDict;
}
// Getters
// Get the weight of this node, with respect to the decomposition done in this node's parent
label getWeight() const {
return weight;
}
// Get the coeffs dictionary for the decomposition of this node.
const dictionary* getDict() const {
return coeffsDict;
}
// Get the modifiable coeffs dictionary for the decomposition of this node.
dictionary* getMutableDict() {
return coeffsDict;
}
// Get the number of leaves in this subtree, i.e., number of processors
// created under this node.
label getSize() const {
return size;
}
// Get the decomposition method object of this node.
// Note that construct methods must be called first, otherwise
// a null pointer will be returned.
const Foam::decompositionMethod* getMethod() const {
return method;
}
// Get the current weights initialization mode.
bool getWeightsInitialization() const {
return weightsInitialization;
}
// Get a const pointer to a child of this node.
const nodeMetadata* getChild(label index) const {
return children[index];
}
// Get a non-const pointer to a child of this node.
nodeMetadata* getMutableChild(label index) {
return children[index];
}
// Returns the number of direct subdomains this node has.
label nDomains() const {
return children.size();
}
// Returns whether this node represents a leaf (i.e., has no children)
bool isLeaf() const {
return children.empty();
}
// Setters
// Set the weight of this node, with respect to the decomposition done in this node's parent
void setWeight(label weight) {
this->weight = weight;
}
// Set the coeffs dictionary for the decomposition of this node.
// This creates a copy of the dictionary (and deletes the previous copy created)
void setDict(const dictionary& dict) {
if(coeffsDict != nullptr) {
delete coeffsDict;
}
coeffsDict = new dictionary(dict);
}
// Set the decomposition method object of this node.
void setMethod(Foam::decompositionMethod* method) {
this->method = method;
}
// Sets the weights initialization mode. If setRecursive is true, propagate to the entire subtree (i.e., the root and all of the descendents)
void setWeightsInitialization(WeightsInitialization newMode, bool setRecursive = true) {
weightsInitialization = newMode;
if(setRecursive) {
for(nodeMetadata* child : children)
child->setWeightsInitialization(newMode, true);
}
}
// Updates
// Update the weights of the nodes at the given indices to the given weight.
// Input: A string indicating the indices of the nodes to be updated, and
// the new weight of the nodes.
void updateWeight(const string& indices, label newWeight) {
updateNodes(indices, [newWeight](nodeMetadata* node) {
node->setWeight(newWeight);
});
}
// Update the dimensions array of the nodes at the given indices to the given dimensions array.
// Input: A string indicating the indices of the nodes to be updated, and
// the new list of dimensions.
void updateDomains(const string& indices,const labelList& dims) {
updateNodes(indices, [dims](nodeMetadata* node) {
// Reconstruct using this node's dict.
// Note that first all domain changes are done,
// And only then dictionaries are set.
// So the descendents' dictionaries are not overwritten.
node->constructRecursive(dims,node->getDict());
});
}
// Update the method of the nodes at the given indices to the given method dictionary.
// Input: A string indicating the indices of the nodes to be updated, and
// the new method dictionary.
void updateMethod(const string& indices, const dictionary& dict) {
updateNodes(indices, [dict](nodeMetadata* node) {
node->setDict(dict);
});
}
// Update the weight initialization mode of nodes at the given indices and their descendents to the new mode.
// Input: A string indicating the indices of the nodes to be updated, and
// the new weight mode.
void updateWeightsInitialization(const string& indices, WeightsInitialization newMode) {
updateNodes(indices, [newMode](nodeMetadata* node) {
node->setWeightsInitialization(newMode);
});
}
// Given a list of dictionaries for each level, set the dictionaries accordingly.
// Input: A list of dictionaries for each level.
void setLeveledDictionaries(const List<const dictionary*>& dictionaries);
// To be used within the decompositionMethod's parallelAware function.
// Returns whether all decompositions in this subtree are parallel aware
// (i.e., synchronize domains across proc boundaries)
bool parallelAware() const;
// Calculate (and add to the dictionary) the new processor weights if reqired,
// Using the children's weights and the weight initialization mode.
void updateProcessorWeights();
// Construct the decompositionMethod object for this node and all its descendents.
void constructMethods();
private:
// The weight of this node in the parent's decomposition, relative to the other nodes.
// Overrides weights set by the weights initialization.
label weight;
// The size of the node indicates the total number of subdomains this node has.
label size;
// An enum describing the weights initialization.
WeightsInitialization weightsInitialization;
// The direct descendents.
List<nodeMetadata*> children;
// The dictionary used to construct the decomposition method.
dictionary* coeffsDict;
// The decomposition method of this node.
const Foam::decompositionMethod* method;
// Recursively constructs the subtree rooted at this node
// Input: A list of dimensions and the dictionary of the default method.
void constructRecursive(const labelList& dims, const dictionary* defaultMethod);
// Update all nodes matching the given indices with the given updating function.
// Input: A list of ranges for each level, and a function that receives a pointer to nodeMetadata,
// updates it accordingly and returns nothing.
void updateNodes(const string& key, const std::function<void(nodeMetadata*)>& update);
// Internal implementation of updateNodes.
// The list of ranges are constructed by passing the key argument to the meta parser.
// nCurIdx is an internal variable that indicates our location inside the indices array.
void updateNodes(const List<Pair<label>>& indices, const std::function<void(nodeMetadata*)>& update, label nCurIdx = 0);
// This function is used inside the public setLeveledDictionaries function.
void setLeveledDictionaries(const List<const dictionary*>& dictionaries, label nLevel);
// Parse the range of indices starting at the (string) index nStartIndex.
// Input: The indices string, and the starting position of the range
// (i.e, the position of the opening bracket)
// Returns a pair representing the range if succeeded,
// or crashes the program with an appropriate error message if failed to parse.
Pair<label> parseRange(const string& indices, label nStartIndex) const;
};
// Private Data
//- The decomposition metadata.
nodeMetadata rootMetadata_;
// Private Member Functions
//- Read coeffsDict and construct the decomposition metadata.
void initializeMetadata(const dictionary& coeffsDict);
//- Given connectivity across processors work out connectivity
// for a (consistent) subset
void subsetGlobalCellCells
(
const label nDomains,
const label domainI,
const labelList& dist,
const labelListList& cellCells,
const labelList& set,
labelListList& subCellCells,
labelList& cutConnections
) const;
//- Decompose at 'currLevel' without addressing
void decompose
(
const labelListList& pointPoints,
const pointField& points,
const scalarField& pointWeights,
const labelUList& pointMap, // map back to original points
const nodeMetadata& decomposeData,
const label leafOffset,
labelList& finalDecomp
) const;
//- No copy construct
multiNodeDecomp(const multiNodeDecomp&) = delete;
//- No copy assignment
void operator=(const multiNodeDecomp&) = delete;
public:
//- Runtime type information
TypeName("multiNode");
// Constructors
//- Construct given decomposition dictionary and optional region name
explicit multiNodeDecomp
(
const dictionary& decompDict,
const word& regionName = ""
);
//- Destructor
virtual ~multiNodeDecomp() = default;
// Member Functions
//- Is method parallel aware?
// i.e. does it synchronize domains across proc boundaries
virtual bool parallelAware() const;
//- Inherit decompose from decompositionMethod
using decompositionMethod::decompose;
//- Return for every coordinate the wanted processor number.
// Use the mesh connectivity (if needed)
virtual labelList decompose
(
const polyMesh& mesh,
const pointField& points,
const scalarField& pointWeights
) const;
//- Return for every coordinate the wanted processor number.
// Explicitly provided connectivity - does not use mesh_.
virtual labelList decompose
(
const labelListList& globalCellCells,
const pointField& cc,
const scalarField& cWeights
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -119,8 +119,8 @@ bool Foam::viscosityModels::Casson::read
CassonCoeffs_.readEntry("m", m_);
CassonCoeffs_.readEntry("tau0", tau0_);
CassonCoeffs_.readEntry("nuMin_", nuMin_);
CassonCoeffs_.readEntry("nuMax_", nuMax_);
CassonCoeffs_.readEntry("nuMin", nuMin_);
CassonCoeffs_.readEntry("nuMax", nuMax_);
return true;
}

View File

@ -0,0 +1,39 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class volVectorField;
object U;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
dimensions [0 1 -1 0 0 0 0];
internalField uniform (0 0 0);
boundaryField
{
innerWall
{
type noSlip;
}
outerWall
{
type noSlip;
}
frontAndBack
{
type empty;
}
}
// ************************************************************************* //

View File

@ -0,0 +1,34 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object p;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
dimensions [0 2 -2 0 0 0 0];
internalField uniform 0;
boundaryField
{
"innerWall|outerWall"
{
type zeroGradient;
}
frontAndBack
{
type empty;
}
}
// ************************************************************************* //

View File

@ -0,0 +1,10 @@
#!/bin/sh
cd "${0%/*}" || exit # Run from this directory
. ${WM_PROJECT_DIR:?}/bin/tools/RunFunctions # Tutorial run functions
#------------------------------------------------------------------------------
runApplication blockMesh
runApplication $(getApplication)
./plot
#------------------------------------------------------------------------------

View File

@ -0,0 +1,30 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "constant";
object MRFProperties;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
MRF1
{
cellZone all;
active yes;
nonRotatingPatches (outerWall);
origin (0 0 0);
axis (0 0 1);
omega 100;
}
// ************************************************************************* //

View File

@ -0,0 +1,22 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "constant";
object transportProperties;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
transportModel Newtonian;
nu 1;
// ************************************************************************* //

View File

@ -0,0 +1,20 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "constant";
object turbulenceProperties;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
simulationType laminar;
// ************************************************************************* //

View File

@ -0,0 +1,42 @@
#!/bin/sh
# Require gnuplot
command -v gnuplot >/dev/null || {
echo "FOAM FATAL ERROR: gnuplot not found - skipping graph creation" 1>&2
exit 1
}
gnuplot<<EOF
set terminal pngcairo font "helvetica,16" size 800,600
set xlabel "Radius, r"
set xrange [1:2]
set grid
Omega1 = 100.
Omega2 = 0.
R1 = 1.
R2 = 2.
mu = Omega2/Omega1
A = Omega1*(1. - R2*R2*mu/R1/R1)/(1. - R2*R2/R1/R1)
B = R1*R1*Omega1*(1. - mu)/(1. - R1*R1/R2/R2)
analytical_utheta(r) = A*r + B/r
C = 0.5*A*A*R1*R1 + 2.*A*B*log(R1) - 0.5*B*B/R1/R1
analytical_p(r) = 0.5*A*A*r*r + 2.*A*B*log(r) - 0.5*B*B/r/r - C
set ylabel "Pressure, p"
set output "p.png"
set samples 20
set key bottom right
plot "postProcessing/sample1/1000/centreLine_p.xy" w lines lw 2 lc "black" t "OpenFOAM", \
analytical_p(x) w p ps 2 pt 6 lw 2 lc rgb "red" t "analytical"
set ylabel "U_{/Symbol q}, rad/s"
set output "Utheta.png"
set samples 20
set key top right
plot "postProcessing/sample1/1000/centreLine_U_U:Transformed.xy" u 1:6 w lines lw 2 lc "black" t "OpenFOAM", \
analytical_utheta(x) w p ps 2 pt 6 lw 2 lc rgb "red" t "analytical"
EOF

View File

@ -0,0 +1,126 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object blockMeshDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
scale 1;
geom
{
r0 1;
r1 2;
nr 40;
ntheta 40;
g 10;
mr0 #eval{ - $r0 };
mr1 #eval{ - $r1 };
invG #eval{ 1./$g };
}
vertices
(
( $:geom.r0 0 0)
( 0 $geom.r0 0)
($:geom.mr0 0 0)
( 0 $geom.mr0 0)
( $:geom.r1 0 0)
( 0 $geom.r1 0)
($:geom.mr1 0 0)
( 0 $geom.mr1 0)
( $:geom.r0 0 1)
( 0 $geom.r0 1)
($:geom.mr0 0 1)
( 0 $geom.mr0 1)
( $:geom.r1 0 1)
( 0 $geom.r1 1)
($:geom.mr1 0 1)
( 0 $geom.mr1 1)
);
blockInfo
all
($:geom.ntheta $:geom.nr 1)
simpleGrading (1 ((0.5 0.5 $:geom.g)(0.5 0.5 $:geom.invG)) 1);
blocks
(
hex (1 0 4 5 9 8 12 13) $blockInfo
hex (2 1 5 6 10 9 13 14) $blockInfo
hex (3 2 6 7 11 10 14 15) $blockInfo
hex (0 3 7 4 8 11 15 12) $blockInfo
);
edges
(
arc 0 1 origin (0 0 0)
arc 1 2 origin (0 0 0)
arc 2 3 origin (0 0 0)
arc 3 0 origin (0 0 0)
arc 8 9 origin (0 0 1)
arc 9 10 origin (0 0 1)
arc 10 11 origin (0 0 1)
arc 11 8 origin (0 0 1)
arc 4 5 origin (0 0 0)
arc 5 6 origin (0 0 0)
arc 6 7 origin (0 0 0)
arc 7 4 origin (0 0 0)
arc 12 13 origin (0 0 1)
arc 13 14 origin (0 0 1)
arc 14 15 origin (0 0 1)
arc 15 12 origin (0 0 1)
);
defaultPatch
{
name frontAndBack;
type empty;
}
boundary
(
innerWall
{
type wall;
faces
(
(1 0 8 9)
(2 1 9 10)
(3 2 10 11)
(0 3 11 8)
);
}
outerWall
{
type wall;
faces
(
(5 13 12 4)
(6 14 13 5)
(7 15 14 6)
(4 12 15 7)
);
}
);
mergePatchPairs
(
);
// ************************************************************************* //

View File

@ -0,0 +1,93 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "system";
object controlDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
application simpleFoam;
startFrom startTime;
startTime 0;
stopAt endTime;
endTime 1000;
deltaT 1;
writeControl timeStep;
writeInterval 10;
purgeWrite 3;
writeFormat ascii;
writePrecision 6;
writeCompression off;
timeFormat general;
timePrecision 6;
runTimeModifiable true;
functions
{
transform1
{
type fieldCoordinateSystemTransform;
libs (fieldFunctionObjects);
writeControl writeTime;
fields (U);
coordinateSystem
{
origin (0 0 0);
coordinateRotation
{
type axesRotation;
e3 (0 0 1);
e1 (1 0 0);
}
}
}
sample1
{
type sets;
libs (sampling);
writeControl writeTime;
fields (U p U:Transformed);
interpolationScheme cellPoint;
setFormat raw;
sets
(
centreLine
{
type uniform;
axis x;
start (1 0 0);
end (2 0 0);
nPoints 20;
}
);
}
}
// ************************************************************************* //

View File

@ -0,0 +1,51 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "system";
object fvSchemes;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
ddtSchemes
{
default steadyState;
}
gradSchemes
{
default Gauss linear;
}
divSchemes
{
default none;
div(phi,U) bounded Gauss linearUpwind grad(U);
div((nuEff*dev2(T(grad(U))))) Gauss linear;
}
laplacianSchemes
{
default Gauss linear corrected;
}
interpolationSchemes
{
default linear;
}
snGradSchemes
{
default corrected;
}
// ************************************************************************* //

View File

@ -0,0 +1,57 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v2212 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "system";
object fvSolution;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
solvers
{
p
{
solver GAMG;
tolerance 1e-10;
relTol 0.1;
smoother GaussSeidel;
}
U
{
solver smoothSolver;
smoother GaussSeidel;
tolerance 1e-10;
relTol 0.1;
}
}
SIMPLE
{
nNonOrthogonalCorrectors 0;
pRefCell 0;
pRefValue 0;
}
relaxationFactors
{
fields
{
p 0.3;
}
equations
{
U 0.7;
}
}
// ************************************************************************* //

View File

@ -1,21 +1,26 @@
# ----------------------------------------------------------------------------
# CGAL definitions - several possibilities
#
# 0. missing
# 1. header-only
# 2. library, no mpfr
# 3. library, with mpfr (a likely default)
# - missing
# - header-only
# - header-only, no mpfr
# - library, no mpfr
# - library, with mpfr (default for older CGAL)
#
# Dispatch according to the defined 'CGAL_FLAVOUR'
# - names may change [see wmake/scripts/have_cgal]
# (no-cgal | cgal-header | cgal-no-mpfr | cgal-mpfr)
# (no-cgal | cgal-header | cgal-header-no-mpfr | cgal-no-mpfr | cgal-mpfr)
cgal_subrule := cgal-mpfr
ifneq (,$(findstring header,$(CGAL_FLAVOUR)))
cgal_subrule := cgal-header-only
endif
ifneq (,$(findstring no-mpfr,$(CGAL_FLAVOUR)))
cgal_subrule := cgal-no-mpfr
ifneq (,$(findstring header,$(CGAL_FLAVOUR)))
cgal_subrule := cgal-header-no-mpfr
endif
else
ifneq (,$(findstring header,$(CGAL_FLAVOUR)))
cgal_subrule := cgal-header-only
endif
endif
# ----------------------------------------------------------------------------

View File

@ -0,0 +1,18 @@
# -----------------------------------------------------------------------------
# CGAL (header-only version) without mpfr
CGAL_INC = -DCGAL_HEADER_ONLY
CGAL_LIBS =
CGAL_INC += \
$(foreach dir,$(BOOST_INC_DIR),-I$(dir)) \
$(foreach dir,$(CGAL_INC_DIR),-I$(dir))
CGAL_LIBS += \
$(foreach dir,$(BOOST_LIB_DIR),-L$(dir))
# ----
# Extra failsafe - still needed? (2020-05-15)
## CGAL_INC += -I/usr/local/include -I/usr/include
# -----------------------------------------------------------------------------

View File

@ -1,14 +0,0 @@
# ----------------------------------------------------------------------------
# CGAL on Darwin
# CGAL (library version) without mpfr
CGAL_INC = \
$(foreach dir,$(BOOST_INC_DIR),-I$(dir)) \
$(foreach dir,$(CGAL_INC_DIR),-I$(dir))
CGAL_LIBS = \
$(foreach dir,$(BOOST_LIB_DIR),-L$(dir)) \
$(foreach dir,$(CGAL_LIB_DIR),-L$(dir)) \
-lCGAL
# ----------------------------------------------------------------------------

View File

@ -35,6 +35,7 @@ LINKLIBSO = $(CC) $(c++FLAGS) -shared \
-Wl,--strip-all
LINKEXE = $(CC) $(c++FLAGS) \
-static-libgcc -static-libstdc++ \
-Wl,--enable-auto-import \
-Wl,--strip-all \
-Wl,--force-exe-suffix