Compare commits

...

98 Commits

Author SHA1 Message Date
4f50e60556 WIP: short-circuit on direct detect of UNCOLLOCATED (#1946) 2020-12-17 11:21:51 +01:00
249314c0ea BACKPORT (testing purposes) of master read/send (#1946) 2020-12-09 14:54:16 +01:00
fe1fe09ef3 BUG: meshToMesh - corrected mapping of internal field for tgt->src mapping. Fixes #1935 2020-11-25 12:58:24 +01:00
de55ada95d BUG: typo in solidProperties prevents re-reading of "Hf" (fixes #1927) 2020-11-18 19:47:26 +01:00
fbbff15eae BUG: extraneous brackets after Fluent cell types (fixes #1872)
ENH: limit output to 25 cell types per line for readability

- makes it easier to trace potential format errors etc

STYLE: downgrade warning about polyhedrals to a simple info message

- can assume that polyhedral support is widespread enough to not
  warrant a warning.
2020-10-29 11:28:56 +01:00
b68c1fc582 BUG: parallel foamToEnsight fails depending on lagrangian order (fixes #1873)
- the problem arises when output fields are missing on some
  processors.

  When the information is combined, the resulting HashTables can have
  different insertion orders. This poses an issue when there are hash
  key collisions and thus different chaining.

- Use sorted order.
2020-10-08 19:07:08 +02:00
a589f48d09 CONFIG: increment patch level 2020-07-27 11:52:30 +02:00
201c5d2cfa BUG: potential memory leaks in HashPtrTable::set (#1787)
- backported fix from develop
2020-07-27 11:52:30 +02:00
6d6ab639f3 CONFIG: increment patch level 2020-06-04 21:58:21 +02:00
d3f9f1bce4 BUG: missing compilation for some vtk conversion components (fixes #1720) 2020-06-04 21:57:38 +02:00
3eb1fdcf5f COMP: erroneous return from void method (fixes #1716) 2020-05-26 10:41:35 +02:00
579760b7f5 BUG: incorrect startLineNumber for primitiveEntry (fixes #1706) 2020-05-14 17:22:33 +02:00
7cfddaef77 BUG: Pair sort on construct did the opposite (fixes #1701) 2020-05-11 20:25:15 +02:00
3bf486b24b COMP: adjust for CGAL-4.14 changes 2020-05-11 16:34:31 +02:00
28ae07a26a CONFIG: additional packaging helpers, tutorial test helper
- bin/tools/create-mpi-config to query/write values for system openmpi.
  In some cases this can be used to avoid an mpicc requirement at runtime.

- adjust openfoam session to latest version,
  including -test-tutorial forwarding.

- adjust foamConfigurePaths to latest version

- removal of gperftools default config, as per develop
2020-04-20 20:22:27 +02:00
eb2b797d7a BUG: missing output for foamDictionary -includes (closes #1635)
- log to stdout when explicitly enabled
2020-03-16 17:37:17 +01:00
5dde2f5b06 STYLE: foamCreate{Manpage,ModuleInclude} consistency with 1912 2020-03-12 15:40:03 +01:00
3fe4e28194 CONFIG: bump patch level 2020-03-12 11:37:02 +01:00
2b45bec19c BUG: incorrect EnSight lagrangian fields in parallel (fixes #1629) 2020-03-12 11:09:23 +01:00
2bf231860f ENH: add gradientEnergy write() method, including bugfix #1617 2020-03-10 18:39:03 +01:00
e5e39e673a CONFIG: provide separate default settings for clang/gcc (fixes #1566)
- the foamConfigurePaths script is quite simplistic and aggressive in
  what it changes. This was particularly evident when using it to
  change gcc/clang versions.

  Restructured the corresponding compiler settings to define default
  versions (eg, "default_gcc_version") that limits the scope of
  changes performed by foamConfigurePaths and makes it easier to
  understand if changing manually.
2020-02-03 11:12:38 +01:00
564ed65b6c CONFIG: bump patch level 2020-01-31 12:10:51 +01:00
b6d3602888 COMP: avoid -Wstringop-truncation warning 2020-01-31 12:08:31 +01:00
5ef9cc55a7 COMP: backport of updates for gcc-92 compilation
ENH: add typedefs for interpolation tables
2020-01-28 12:46:27 +01:00
033537897a BUG: incorrect Nastran surface output and segmentation faults #1571
- indexing error in the output of values resulted in uniform output in
  most cases.

- allocation error for on-the-fly triangulation

ENH: changed decomposed storage from DynamicList to plain faceList for
clearer allocation control and better overhead
2020-01-27 11:04:25 +01:00
644319d63b COMP: silence gcc 8.2 memcpy warnings
- we know they have already protected by an is_contiguous check,
  so the class-memaccess warning/error can be suppressed.
2019-10-28 16:57:47 +01:00
954d8b5c58 CONFIG: adjust known gcc/clang compiler versions 2019-10-28 12:55:15 +01:00
2ad8e0aed1 STYLE: use const reference for caught exceptions 2019-01-23 09:03:06 +01:00
b2f928d2e3 BUG: ensightReadFile ignores binary string limits (#1511)
ENH: downgrade error on type-mismatch to a warning

- Ensight data files generated by OpenFOAM will start with the name of
  the data type (Eg, "scalar", "vector" etc), but this convention may
  fail for data files that have been generated elsewhere.
2019-11-25 15:37:00 +01:00
1bab2ac4c1 BUG: arrarySet: incorrect index->location. Fixes #1506 2019-11-20 13:57:57 +00:00
98eb6183a9 GIT: check-in file #1504 2019-11-19 15:28:01 +01:00
fbaae94902 COMP: declare template specialisation (gcc 4.8.5) 2019-11-19 06:58:38 +01:00
09ed422656 BUG: decomposePar fails with csv table in binary (fixes #1498) 2019-11-18 16:59:51 +01:00
404f9ee69c DOC: update README for weblinks into wiki content
- relocate BuildIssues into doc/
2019-11-01 10:18:24 +01:00
726c8bd272 CONFIG: openfoam session must use /bin/bash, not /bin/sh (fixes #1461) 2019-10-17 09:29:42 +02:00
60a30bba9a CONFIG: bump patch level 2019-10-01 14:01:58 +02:00
01b8d776ab BACKPORT: openfoam shell wrapper (interactive or 'one-shot' use) 2019-09-25 17:36:10 +02:00
88527df40f CONFIG: bump patch level 2019-08-28 17:23:46 +02:00
7f0328112f BUG: missing foamExec causes foamJob failure (#1309, #1411)
- foamExec was removed as part of commit f308aa6174554096 but
  overlooked the fact that it is partly still needed by foamJob.
  When running in parallel, it is used to source the etc/bashrc env.

  Reinstated a stripped down form, which has been relocated to
  bin/tools directory (it should not be directly called by the user).

  The previous version switching functionality has nonetheless be
  dropped since it only worked only when a rigid naming convention for
  OpenFOAM installations was followed.

ENH: add foamJob log=, -log-app, -no-log options, improved coding quality
2019-08-28 17:19:11 +02:00
08c36e0d46 ENH: globalMeshData - added else branch so that in the case of a cyclic baffle
edge it sets the globalEdgeOrientation

See #1367
2019-08-14 12:05:52 +01:00
526522c375 CONFIG: bump patch level 2019-08-13 16:24:03 +02:00
cced7acd00 BUG: edge sync fails with cyclic baffles (fixes #1397)
- synchronization, reduction only makes sense on processor-coupled
  patches. Since cyclic baffles are within a single processor domain,
  they are not reduced. So need to skip the sanity test for these.
2019-08-13 16:09:52 +02:00
acc4a6f71b CONFIG: improve build against paraview on headless nodes
- the use of 'paraview --version' can be fail if the build host
  doesn't have the necessary graphics. For this case, try to obtain
  the ParaView API number from the associated include directory.
2019-08-13 10:25:02 +02:00
38cd2cc5da CONFIG: robuster paraview detection 2019-08-09 14:22:30 +02:00
907a2f3495 CONFIG: bump 1812 patch level 2019-08-07 18:37:14 +02:00
9eb9e6a28f CONFIG: accept 'system' for foamConfigurePaths -paraview 2019-08-07 18:36:08 +02:00
961701c50c BUG: incorrect binary read of injectedParticle (fixes #1393)
- the read offset missed the tag_ member entirely and thus the entire
  particle information would be corrupt (incorrectly interpreted) as
  well as potential violation of adjacent (trailing) memory locations.
2019-08-06 10:30:07 +02:00
0acf06dbe9 ENH: use average value for ensight faceZone on proc boundary (#1176) 2019-01-22 12:49:50 +01:00
545e7ef9c6 BUG: bad '#line' directives for dynamicCode (fixes #1282)
- now suppress any '#line' if the input number number is invalid
  (ie, an empty set of tokens)
2019-04-15 12:42:29 +02:00
2a9c6c97e3 BUG: distanceSurface has gaps in mesh (#1374)
- need to be more generous when prefiltering the cell selection
2019-07-12 16:38:04 +02:00
ccec2e0996 BUG: incorrect blocked face synchronisation crashes regionSplit (#1370)
- now catch these and emit a warning.
  Still need to investigate the root cause in the caller(s) or regionSplit.
2019-07-12 13:29:20 +02:00
500cb6d76f CONFIG: bump patch level 2019-07-08 14:04:49 +02:00
9c42139dd4 CONFIG: do not touch LD_PRELOAD contents at all (#1357) 2019-07-05 10:29:18 +02:00
2fdad7c90a BUG: DimensionedField - register new field if it does not have the same name as the copy - see #1348 2019-07-02 10:22:50 +01:00
ed320bcbe4 BUG: scalarBar range ignored in runTimePostProcessing
- was missing range setting for the colour lookup table
2019-05-30 12:02:56 +02:00
088d54b690 GIT: Updated bug issue template 2019-06-14 10:11:17 +01:00
1af2eed0c2 CONFIG: bump patch level 2019-05-31 16:08:24 +02:00
ac699b3fe0 BUG: avoid memory leak caused by IOobjectList::filterObjects (#1286) 2019-05-01 17:22:52 +02:00
f4faa4d713 BUG: Sf field ignored for sumDirection operation (fixes #1287) 2019-04-16 18:32:07 +02:00
ddef3e7502 BUG: inconsistent emissivity for externalWallHeatFlux (fixes #1280)
- the emissivity was ignored for 'hpTa' when there is no solid resistance

STYLE: adjust debug statement in externalWallHeatFlux
2019-04-12 10:24:38 +02:00
076ba4e247 BUG: gmsh: skip empty lines. See #1155.
Fix provided by Gavin Ridley.
2019-04-11 11:07:28 +01:00
487ca69afe BUG: snappyHexMesh: attraction distance mapping. Fixes #941. 2019-04-10 10:34:50 +01:00
bd78649efd BUG: ensight writer using incorrect first mesh time (fixes #1273)
- used fallback of 0 instead of the results time.
  This discrepancy caused the case file to have two timesets that
  only differed by the first (incorrect) entry.
2019-04-08 16:58:47 +02:00
c38117d529 BUG: foamToVTK writes to undecomposed case (closes #1271)
- when running in serial but within a processor directory,
  argList::globalPath() is to be used instead of Time.globalPath()
  For other cases there is no difference.
2019-04-08 11:53:50 +02:00
1fb98e5f7b CONFIG: incorrect location for site appbin, libbin (fixes #1270)
- still had old WM_PROJECT_VERSION settings instead of FOAM_API
  for FOAM_SITE_APPBIN and FOAM_SITE_LIBBIN locations.
2019-04-03 23:55:49 +02:00
0854a74ebe CONFIG: bump patch level 2019-04-03 16:44:32 +02:00
4d4cd02359 STYLE: missing inline specifier (#1263) 2019-04-01 07:51:54 +02:00
ee4817962e TUT: include samples instead of cuttingPlane (closes #1254) 2019-03-29 12:32:21 +01:00
214ba31a9e BUG: PBiCGStab: check minIter. Fixes #1052. 2019-03-27 16:29:52 +00:00
ddd5383f40 BUG: dereference invalid autoPtr in XiEngineFoam (fixes #1250) 2019-03-26 09:42:27 +01:00
7135b02f85 BUG: faMesh: support for multi-region moving meshes. See #1170. 2019-03-04 11:00:52 +00:00
febe3036cb COMP: adjust ARM compilation flags (#1225)
- with -mcpu=native for automatic detection and -armpl for linking in
  the performance libraries

STYLE: relocate -mcpu into compiler instead of compiler-flags (#1225)
2019-03-01 18:20:41 +01:00
b4553b193a COMP: Updated for 64 bit labels 2019-02-18 11:20:40 +00:00
39e25fa9ef COMP: Added randomProcesses-based function objects to build 2019-02-18 11:20:18 +00:00
345282078c CONFIG: missing semicolon in config file (fixes #1246) 2019-03-22 09:08:35 +01:00
d04c51a684 BUG: faMesh: processors without faMesh. See #1090.
pointNormals calculation gets triggered through processorfvPatches
so on processors that don't have these bypass this. This leads
to the global reduction hanging.
2019-03-04 10:56:41 +00:00
cc7cadcfe6 STYLE: Updated bug report template 2019-02-12 09:09:32 +00:00
522f3b43a7 BUG: Corrected debug output for solarLoad. Fixes #1195 2019-02-11 12:19:39 +00:00
9cbe613eec BUG: foamCreateModuleInclude ignores -output option (fixes #1190)
ENH: add -prefs option to pass in a preferences configuration file
2019-02-04 14:48:03 +01:00
7a2d574572 BUG: foamToEnsightParts reports the time value twice in log output 2019-01-30 15:41:06 +01:00
6165c29bb8 GIT: Added gitlab templates 2019-01-29 16:52:35 +00:00
00d5c604fb ENH: enable MPI library variants (#1153)
- in addition to managing different vendors and versions, it may also
  be necessary or desirable to have a particular variant
  (eg, profiling, release, etc).

  Devise a new meaningful name for the variant and create a
  corresponding wmake rule.

  Eg, SYSTEMOPENMPI-profiling with a corresponding
      "wmake/rules/linux64Gcc/mplibSYSTEMOPENMPI-profiling" file
  that has suitable content for your system.

CONFIG: intel-mpi use intel64/ paths only for config and wmake rules (#1153)

- previously adjusted the config files, but missed the changes
  required for the wmake rules too.

  Now simply migrate to using  "intel64/{include,bin,lib}"
  instead of the older naming  "{include,bin,lib}64"

  These changes work since at least intel-mpi 2015 (5.x), but possibly
  earlier as well
2019-01-28 19:19:09 +01:00
11ce375a1b BUG: thermalBaffleModel::New from thermalBafflePropertiesDict fails
- retrieved value for the modelType was masked by a local variable
2019-01-23 21:57:20 +01:00
e2e29e2d86 CONFIG: bump patch level 2019-01-23 21:14:02 +01:00
f66baf27eb BUG: registerCopy may fail for copy construct of regIOobject
- checkIn() was incorrectly conditional on the register state of the
  source object.

Partial patch from .org commit 6dc48b62d948
2019-01-23 13:25:18 +01:00
897713dda9 BUG: missing return value for PtrDynList::set 2019-01-23 09:16:59 +01:00
706d713786 CONFIG: drop config support for deprecated ensight reader library 2019-01-17 10:53:44 +01:00
1dc878551d ENH: make use of FOAM_API for environment as well (issue #1158)
- was WM_PROJECT_API in the environment and FOAM_API in dictionaries.

  Make these both consistently FOAM_API.
  This is a non-breaking change, since the value of WM_PROJECT_API
  (added in 1812) and/or FOAM_API is purely informative.
  For the current correct values, always use

    * foamEtcFile -show-api
    * wmakeBuildInfo -show-api
2019-01-10 12:21:19 +01:00
4e1b5c1cd6 ENH: provide config setup for mesa with llvm (issue #1164)
- If using a non-clang compiler suite (gcc, intel, etc) the additional
  lbraries required for mesa with llvm pipelines may not be found.

  Provide a mesa_llvm configuration with in the 'vtk' config file.
  Can use the usual types of settings

     * mesa_llvm=llvm-4.0.1
     * mesa_llvm=none
     * mesa_llvm=system
2019-01-10 11:50:09 +01:00
e4d4263563 BUG: snappyHexMesh: extrude non-local baffle. Fixes #1175. 2019-01-21 15:27:51 +00:00
f2e0da1309 DEP: Deprecated the ensightFoamReader 2019-01-17 09:35:04 +00:00
a812db29bc BUG: vtk::internalWriter::writeProcIDs could block in parallel
- could be triggered if running in parallel, but requesting procIDs
  to be written with a non-parallel version of the writer.
2019-01-14 16:43:30 +01:00
5eb449222c ENH: export FOAM_API in dictionary (issue #1158)
- uses the value of foamVersion::api, which should be reliable.
2019-01-07 19:04:50 +01:00
a396034634 SUBMODULES: align git information with the respective master branches 2019-01-07 13:45:21 +01:00
0d01f98efc COMP: no vtkDataArray::Fill() method prior to VTK-8 (fixes #1156) 2019-01-07 11:58:33 +01:00
73994899e0 CONFIG: adjust intel-mpi paths (issue #1153)
- and support setting cmake_version within the paraview config files
2019-01-06 15:16:10 +01:00
975eb388bb CONFIG: remove bash dependency from wmakeBuildInfo (fixes #1152)
- looks slightly messier without associative arrays, but improves
  portability. Should now also work with dash.

STYLE: support wmakeBuildInfo -cmp and -check options as equivalent
2019-01-06 10:50:37 +01:00
50ddc5e06b ENH: Reducing tolerance for the cellVolumeWeight search 2019-04-08 09:08:07 -07:00
381 changed files with 4261 additions and 55802 deletions

View File

@ -0,0 +1,73 @@
<!--
*** Please read this first! ***
Before opening a new issue, make sure to search for keywords in the issues
filtered by the "bug" label and check to see if it has already been reported
You can see how your report will be rendered on the platform by using the
"preview" tab above
-->
<!--
All text between these markers are comments and will not be present in the
report
-->
### Summary
<!-- Summarize the bug encountered concisely -->
### Steps to reproduce
<!-- How one can reproduce the issue - this is very important -->
### Example case
<!--
If possible, please create a SMALL example and attach it to your report
If you are using an older version of OpenFOAM this will also determine
whether the bug has been fixed in a more recent version
-->
### What is the current *bug* behaviour?
<!-- What actually happens -->
### What is the expected *correct* behavior?
<!-- What you should see instead -->
### Relevant logs and/or images
<!--
Paste any relevant logs - please use code blocks (```) to format console
output, logs, and code as it's very hard to read otherwise.
-->
### Environment information
<!--
Providing details of your set-up can help us identify any issues, e.g.
OpenFOAM version : v1806|v1812|v1906 etc
Operating system : ubuntu|openSUSE|centos etc
Hardware info : any info that may help?
Compiler : gcc|intel|clang etc
-->
- OpenFOAM version :
- Operating system :
- Hardware info :
- Compiler :
### Possible fixes
<!--
If you can, link to the line of code that might be responsible for the
problem
The "/label ~bug" text is a gitlab flag that will add the "bug" label to this
issue
-->
/label ~bug

View File

@ -0,0 +1,31 @@
### Functionality to add/problem to solve
(Brief scope)
### Target audience
(Who will benefit from the changes?)
(What type of cases?)
### Proposal
(How are we going to solve the problem?)
### What does success look like, and how can we measure that?
(What are the success factors and acceptance criteria? e.g. test cases, error margins)
### Links / references
(Links to literature, supporting information)
### Funding
(Does the functionality already exist/is sponsorship available?)
/label ~feature

View File

@ -0,0 +1,20 @@
### Summary
(Summarize the changes concisely)
### Resolved bugs (If applicable)
(Links to issues)
### Details of new models (If applicable)
(New options, user inputs etc)
(Images are nice :))
### Risks
(Possible regressions?)
(Changes to user inputs?)

View File

@ -24,8 +24,8 @@ command -v mpirun 2>/dev/null || true
echo "========================================"
date "+%Y-%m-%d %H:%M:%S %z" 2>/dev/null || echo "date is unknown"
echo "Starting compile ${WM_PROJECT_DIR##*/} ${0##*}"
echo " $WM_COMPILER $WM_COMPILER_TYPE compiler"
echo "Starting compile ${WM_PROJECT_DIR##*/} ${0##*/}"
echo " $WM_COMPILER ${WM_COMPILER_TYPE:-system} compiler"
echo " ${WM_OPTIONS}, with ${WM_MPLIB} ${FOAM_MPI}"
echo "========================================"
echo
@ -36,9 +36,18 @@ echo
# Compile ThirdParty libraries and applications
if [ -d "$WM_THIRD_PARTY_DIR" ]
then
$WM_THIRD_PARTY_DIR/Allwmake
if [ -e "$WM_THIRD_PARTY_DIR"/Allwmake.override ]
then
if [ -x "$WM_THIRD_PARTY_DIR"/Allwmake.override ]
then "$WM_THIRD_PARTY_DIR"/Allwmake.override
fi
elif [ -x "$WM_THIRD_PARTY_DIR"/Allwmake ]
then "$WM_THIRD_PARTY_DIR"/Allwmake
else
echo "Skip ThirdParty (no Allwmake* files)"
fi
else
echo "No ThirdParty directory found - skipping"
echo "Skip ThirdParty (no directory)"
fi
echo "========================================"
@ -57,13 +66,13 @@ then
echo "========================================"
echo "Compile OpenFOAM modules"
echo
(cd $WM_PROJECT_DIR/modules 2>/dev/null && wmake -all)
(cd "$WM_PROJECT_DIR/modules" 2>/dev/null && wmake -all)
fi
# Count files in given directory. Ignore "Test-*" binaries.
_foamCountDirEntries()
{
(cd "$1" 2>/dev/null && find -mindepth 1 -maxdepth 1 -type f 2>/dev/null) |\
(cd "$1" 2>/dev/null && find . -mindepth 1 -maxdepth 1 -type f 2>/dev/null) |\
sed -e '\@/Test-@d' | wc -l
}
@ -72,13 +81,13 @@ echo
date "+%Y-%m-%d %H:%M:%S %z" 2>/dev/null || echo "date is unknown"
echo "========================================"
echo " ${WM_PROJECT_DIR##*/}"
echo " $WM_COMPILER $WM_COMPILER_TYPE compiler"
echo " $WM_COMPILER ${WM_COMPILER_TYPE:-system} compiler"
echo " ${WM_OPTIONS}, with ${WM_MPLIB} ${FOAM_MPI}"
echo
echo " api = $(wmakeBuildInfo -show-api 2>/dev/null)"
echo " patch = $(wmakeBuildInfo -show-patch 2>/dev/null)"
echo " bin = $(_foamCountDirEntries $FOAM_APPBIN) entries"
echo " lib = $(_foamCountDirEntries $FOAM_LIBBIN) entries"
echo " api = $(etc/openfoam -show-api 2>/dev/null)"
echo " patch = $(etc/openfoam -show-patch 2>/dev/null)"
echo " bin = $(_foamCountDirEntries "$FOAM_APPBIN") entries"
echo " lib = $(_foamCountDirEntries "$FOAM_LIBBIN") entries"
echo
echo "========================================"

View File

@ -1,2 +1,2 @@
api=1812
patch=0
patch=200727

163
README.md
View File

@ -2,7 +2,12 @@
OpenFOAM is a free, open source CFD software [released and developed primarily by OpenCFD Ltd](http://www.openfoam.com) since 2004. It has a large user base across most areas of engineering and science, from both commercial and academic organisations. OpenFOAM has an extensive range of features to solve anything from complex fluid flows involving chemical reactions, turbulence and heat transfer, to acoustics, solid mechanics and electromagnetics. [More...](http://www.openfoam.com/documentation)
OpenFOAM is professionally released every six months to include customer sponsored developments and contributions from the community - individual and group contributors, fork re-integrations including from FOAM-extend and OpenFOAM Foundation Ltd - in this Official Release sanctioned by the OpenFOAM Worldwide Trademark Owner aiming towards one OpenFOAM.
OpenFOAM is professionally released every six months to include
customer sponsored developments and contributions from the community -
individual and group contributors, re-integrations (including from
FOAM-extend and OpenFOAM Foundation Ltd) - in this Official Release
sanctioned by the OpenFOAM Worldwide Trademark Owner aiming towards
one OpenFOAM.
# Copyright
@ -17,6 +22,162 @@ Please [contact OpenCFD](http://www.openfoam.com/contact) if you have any questi
Violations of the Trademark are continuously monitored, and will be duly prosecuted.
# Using OpenFOAM
If OpenFOAM has already been compiled on your system, simply source
the appropriate `etc/bashrc` or `etc/cshrc` file and get started.
For example, for the OpenFOAM-v1906 version:
```
source /installation/path/OpenFOAM-v1906/etc/bashrc
```
# Compiling OpenFOAM
If you are compiling OpenFOAM from source, please see the relevant
guides:
| Location | Readme | Requirements | Build |
|-------------|-----------|--------------|-------|
| [OpenFOAM][repo openfoam] | [readme][link openfoam-readme] | [system requirements][link openfoam-require] | [build][link openfoam-build] |
| [ThirdParty][repo third] | [readme][link third-readme] | [system requirements][link third-require] | [build][link third-build] |
# How do I know which version I am currently using?
The value of the `$WM_PROJECT_DIR` or even `$WM_PROJECT_VERSION` are
not guaranteed to have any correspondence to the OpenFOAM release
(API) value. If OpenFOAM has already been compiled, the build-time
information is embedded into each application. For example, as
displayed from `blockMesh -help`:
```
Using: OpenFOAM-v1812.local (1812) (see www.OpenFOAM.com)
Build: 65d6551ff7-20190530 (patch=190531)
Arch: LSB;label=32;scalar=64
```
This output contains all of the more interesting information that we need:
| item | value |
|-----------------------|---------------|
| version | v1812.local |
| api | 1812 |
| commit | 65d6551ff7 |
| author date | 20190530 |
| patch-level | (20)190531 |
| label/scalar size | 32/64 bits |
As can be seen in this example, the git build information is
supplemented by the date when the last change was authored, which can
be helpful when the repository contains local changes. If you simply
wish to know the current API and patch levels directly, the
`wmakeBuildInfo` script provides the relevant information even
when OpenFOAM has not yet been compiled:
```
$ wmakeBuildInfo
make
api = 1812
patch = 190531
branch = master
build = 65d6551ff7-20190530
```
Similar information is available with `foamEtcFile`, using the
`-show-api` or `-show-patch` options. For example,
```
$ foamEtcFile -show-api
1812
$ foamEtcFile -show-patch
190531
```
This output will generally be the easiest to parse for scripts.
The `$FOAM_API` convenience environment variable may not reflect the
patching changes made within the currently active environment and
should be used with caution.
# ThirdParty directory
OpenFOAM normally ships with a directory of 3rd-party software and
build scripts for some 3rd-party software that is either necessary or
at least highly useful for OpenFOAM, but which are not necessarily
readily available on every operating system or cluster installation.
These 3rd-party sources are normally located in a directory parallel
to the OpenFOAM directory. For example,
```
/path/parent
|-- OpenFOAM-v1906
\-- ThirdParty-v1906
```
There are, however, many cases where this simple convention is inadequate:
* When no additional 3rd party software is actually required (ie, the
operating system or cluster installation provides it)
* When we have changed the OpenFOAM directory name to some arbitrary
directory name, e.g. openfoam-sandbox1906, etc..
* When we would like any additional 3rd party software to be located
inside of the OpenFOAM directory to ensure that the installation is
encapsulated within a single directory structure. This can be
necessary for cluster installations, or may simply be a convenient
means of performing a software rollout for individual workstations.
* When we have many different OpenFOAM directories for testing or
developing various different features but wish to use or reuse the
same 3rd party software for them all.
The solution for these problems is a newer, more intelligent discovery when locating the ThirdParty directory with the following precedence:
1. PROJECT/ThirdParty
* for single-directory installations
2. PREFIX/ThirdParty-VERSION
* this corresponds to the traditional approach
3. PREFIX/ThirdParty-vAPI
* allows for an updated value of VERSION, *eg*, `v1906-myCustom`,
without requiring a renamed ThirdParty. The API value would still
be `1906` and the original `ThirdParty-v1906/` would be found.
4. PREFIX/ThirdParty-API
* this is the same as the previous example, but using an unadorned
API value. This also makes sense if the chosen version name also
uses the unadorned API value in its naming, *eg*,
`1906-patch190131`, `1906.19W03`
5. PREFIX/ThirdParty-common
* permits maximum reuse for various versions, but only for
experienced user who are aware of potential version
incompatibilities
If none of these directories are found to be suitable, it reverts to using PROJECT/ThirdParty as a dummy location (even if the directory does not exist). This is a safe fallback value since it is within the OpenFOAM directory structure and can be trusted to have no negative side-effects.
In the above, the following notation has been used:
| name | value | meaning |
|---------------|---------------|---------------|
| PROJECT | `$WM_PROJECT_DIR` | The OpenFOAM directory |
| PREFIX | `dirname $WM_PROJECT_DIR` | The OpenFOAM parent directory |
| API | `foamEtcFiles -show-api` | The api or release version |
| VERSION | `$WM_PROJECT_VERSION` | The version we've chosen |
To reduce the potential of false positive matches (perhaps some other
software also uses ThirdParty-xxx for its naming), the directory test
is accompanied by a OpenFOAM-specific sanity test. The OpenFOAM
ThirdParty directory will contain either an `Allwmake` file or a
`platforms/` directory.
<!-- OpenFOAM -->
[repo openfoam]: https://develop.openfoam.com/Development/OpenFOAM-plus/
[repo third]: https://develop.openfoam.com/Development/ThirdParty-plus/
[link openfoam-readme]: https://develop.openfoam.com/Development/OpenFOAM-plus/blob/develop/README.md
[link openfoam-issues]: https://develop.openfoam.com/Development/OpenFOAM-plus/blob/develop/doc/BuildIssues.md
[link openfoam-config]: https://develop.openfoam.com/Development/OpenFOAM-plus/blob/develop/doc/Config.md
[link openfoam-build]: https://develop.openfoam.com/Development/OpenFOAM-plus/blob/develop/doc/Build.md
[link openfoam-require]: https://develop.openfoam.com/Development/OpenFOAM-plus/blob/develop/doc/Requirements.md
[link third-readme]: https://develop.openfoam.com/Development/ThirdParty-plus/blob/develop/README.md
[link third-build]: https://develop.openfoam.com/Development/ThirdParty-plus/blob/develop/BUILD.md
[link third-require]: https://develop.openfoam.com/Development/ThirdParty-plus/blob/develop/Requirements.md
# Useful Links
- [Download and installation instructions](http://www.openfoam.com/download/)
- [Documentation](http://www.openfoam.com/documentation)

View File

@ -1,15 +1,22 @@
Info<< "Mean pressure:" << p.weightedAverage(mesh.V()).value() << endl;
Info<< "Mean temperature:" << thermo.T().weightedAverage(mesh.V()).value()
<< endl;
Info<< "Mean u':"
<< (sqrt((2.0/3.0)*turbulence->k()))().weightedAverage(mesh.V()).value()
<< endl;
{
const scalar meanP = p.weightedAverage(mesh.V()).value();
const scalar meanT = thermo.T().weightedAverage(mesh.V()).value();
const scalar meanUp =
(sqrt((2.0/3.0)*turbulence->k()))().weightedAverage(mesh.V()).value();
const scalar meanB = b.weightedAverage(mesh.V()).value();
logSummaryFile()
<< runTime.theta() << tab
<< p.weightedAverage(mesh.V()).value() << tab
<< thermo.T().weightedAverage(mesh.V()).value() << tab
<< (sqrt((2.0/3.0)*turbulence->k()))().weightedAverage(mesh.V()).value()
<< tab
<< 1 - b.weightedAverage(mesh.V()).value()
<< endl;
Info<< "Mean pressure:" << meanP << nl
<< "Mean temperature:" << meanT << nl
<< "Mean u':" << meanUp << endl;
if (Pstream::master())
{
logSummaryFile()
<< runTime.theta() << tab
<< meanP << tab
<< meanT << tab
<< meanUp << tab
<< 1 - meanB
<< endl;
}
}

View File

@ -183,7 +183,7 @@ int main(int argc, char *argv[])
<< "(non-const)\n";
}
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Failed (expected) " << err << nl << endl;
}
@ -205,7 +205,7 @@ int main(int argc, char *argv[])
Info<< "[20] is false, as expected for const-access\n";
}
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Failed (expected) " << err << nl << endl;
}
@ -275,7 +275,7 @@ int main(int argc, char *argv[])
list1[16] = 5;
list1[36] = list1.max_value;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Failed (expected) " << err << nl << endl;

View File

@ -201,7 +201,7 @@ int main(int argc, char *argv[])
Info<<"Random position(10,5): "
<< Random().position<label>(10, 5) << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}

View File

@ -108,11 +108,11 @@ void doTest(const dictionary& dict)
basicTests(cs1);
}
catch (Foam::IOerror& err)
catch (const Foam::IOerror& err)
{
Info<< "Caught FatalIOError " << err << nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}
@ -136,11 +136,11 @@ void doTest(const objectRegistry& obr, const dictionary& dict)
basicTests(cs1);
}
catch (Foam::IOerror& err)
catch (const Foam::IOerror& err)
{
Info<< "Caught FatalIOError " << err << nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}

View File

@ -0,0 +1,19 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: v1812 |
| \\ / A nd | Web: www.OpenFOAM.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object testDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
_FOAM_API $FOAM_API;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //

View File

@ -61,12 +61,12 @@ scalar try_readScalar(const dictionary& dict, const word& k)
val = readScalar(dict.lookup(k));
Info<< "readScalar(" << k << ") = " << val << nl;
}
catch (Foam::IOerror& err)
catch (const Foam::IOerror& err)
{
Info<< "readScalar(" << k << ") Caught FatalIOError "
<< err << nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "readScalar(" << k << ") Caught FatalError "
<< err << nl << endl;
@ -91,12 +91,12 @@ scalar try_getScalar(const dictionary& dict, const word& k)
val = dict.get<scalar>(k);
Info<< "get<scalar>(" << k << ") = " << val << nl;
}
catch (Foam::IOerror& err)
catch (const Foam::IOerror& err)
{
Info<< "get<scalar>(" << k << ") Caught FatalIOError "
<< err << nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "get<scalar>(" << k << ") Caught FatalError "
<< err << nl << endl;
@ -127,12 +127,12 @@ scalar try_getScalar(const entry* eptr, const word& k)
val = eptr->get<scalar>();
Info<< "entry get<scalar>(" << k << ") = " << val << nl;
}
catch (Foam::IOerror& err)
catch (const Foam::IOerror& err)
{
Info<< "entry get<scalar>(" << k << ") Caught FatalIOError "
<< err << nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "entry get<scalar>(" << k << ") Caught FatalError "
<< err << nl << endl;

View File

@ -1,3 +0,0 @@
udr_checker.c
EXE = $(FOAM_USER_APPBIN)/ensightFoamReader-udr_checker

View File

@ -1,5 +0,0 @@
EXE_INC = \
-DUSERD_API_203
EXE_LIBS = \
-luserd-foam

View File

@ -1,116 +0,0 @@
udr_checker
-----------
udr_checker.c is a routine that can be used to debug EnSight User-defined
readers. It exists because of the difficulty of debugging dynamic shared
libraries when you don't have the source for the calling program (EnSight).
If udr_checker.c is compiled and linked with your reader source code (including
access to any libraries needed, and the global_extern.h file), it will exercise
most options of you reader, giving feedback as it goes. The resulting
executable can be debugged using your favorite debugger. And if you have
memory/bounds checking software (such as purify), you can (and should) run it
with this executable to make sure that you are not overwriting things. Readers
that bash memory will cause problems when run with EnSight!
You will note that the Makefile provided with the readers in the EnSight
distribution have a "checker" object. If you do a "make checker" instead of
just a "make", the "checker"executable will be produced. You may need to
modify these makefiles slightly if the locations of your reader files are
different than the normal.
--------------------------------------
Once the "checker" executable exists, you can run the checker program by simply
invoking it:
> checker
And you will be prompted for the type of information that you provide in the
EnSight Data Reader dialog, namely:
The path
filename_1
[filename_2] Only if your reader uses two fields
swapbytes flag
<toggle flags> Only if your reader implements extra GUI
<pulldown flags> one flag value per line
<field contents> one field string per line
There are certain command line options that you can use to control some aspects
of the checker program. One of the more useful is the ability to provide the
input just described in a file. This is done in this fashion:
> checker -p <playfile>
And <playfile> would be a simple ascii file with 3 [0r 4] lines:
line 1: the path
line 2: filename_1
line 3: [filename_2] (if two_fields is TRUE)
line 3 or 4: 0 or 1, for swapbytes (0 is FALSE, 1 is TRUE)
remaining lines 0 or 1 for toggle disable enabled
one line for each toggle
0 - num_pulldown_values for pulldown choice
one line for each pulldown
strings
one line for each field
example playfile for an EnSight Gold reader casefile (entitled cube.play)
could look something like the following: (Note: two_fields is FALSE)
-------------------
/usr/local/bin/data/ens
cube.case
0
And you would invoke checker as:
> checker -p check.play
Another example playfile
with swapbytes 0,
two enabled toggles,
three pulldowns with the value 0 chosen
and a single field "sample field value"
could look something like the following::
----------------------
/mydirectory/subdir/
myfile
0
1
1
0
0
0
sample field value
Other command line arguments are:
---------------------------------
-server_number For checking server number routines. If you use this
option, you will be prompted for the total number of
servers and the current server number. These will then be
used in the calls to the server number routines.
-gts # For specifying the geometry timestep to test. The default
is step 0.
The # is the (zero based) time step to read for geometry.
-vts # For specifying the variable timestep to test. The default
is step 0.
The # is the (zero based) time step to read for variables.
Testing optional routines using #defines
-----------------------------------------
For optional routines, such as the extra_gui, or var_extract_gui routines, you
must uncomment the proper #define in udr_checker.c
Currently the ones available are:
#define _EGS for extra gui routines
#define _VES for var extract gui routines

View File

@ -1,91 +0,0 @@
#!/bin/sh
#------------------------------------------------------------------------------
# ========= |
# \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
# \\ / O peration |
# \\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
# \\/ M anipulation |
#-------------------------------------------------------------------------------
# License
# This file is part of OpenFOAM.
#
# OpenFOAM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
#
# Script
# ensightFoamReaderTest
#
# Description
# start ensightFoamReader-udr_checker
#
#------------------------------------------------------------------------------
usage() {
while [ "$#" -ge 1 ]; do echo "$1"; shift; done
cat<<USAGE
usage: ${0##*/} [OPTION]
options:
-case dir specify alternative case directory
* start ensightFoamReader-udr_checker
USAGE
exit 1
}
# parse options
while [ "$#" -gt 0 ]
do
case "$1" in
-h | -help)
usage
;;
-case)
[ "$#" -ge 2 ] || usage "'$1' option requires an argument"
cd "$2" 2>/dev/null || usage "directory does not exist: '$2'"
shift 2
;;
*)
usage "unknown option/argument: '$*'"
;;
esac
done
# check existence of essential files
for check in system/controlDict system/fvSchemes system/fvSolution
do
[ -s "$check" ] || usage "file does not exist: '$check'"
done
# export values that might be needed
export FOAM_CASE=$PWD
export FOAM_CASENAME=${PWD##*/}
pathName=${PWD%/*}
playFile=/tmp/ensightFoamReader.$$
trap "rm -f $playFile 2>/dev/null; exit 0" EXIT TERM INT
cat << PLAY_FILE > $playFile
$pathName
$FOAM_CASENAME
0
PLAY_FILE
echo "ensightFoamReader-udr_checker -p $playFile"
ensightFoamReader-udr_checker -p $playFile
#------------------------------------------------------------------------------

View File

@ -1 +0,0 @@
../../utilities/postProcessing/graphics/ensightFoamReader/global_extern.h

View File

@ -1 +0,0 @@
../../utilities/postProcessing/graphics/ensightFoamReader/global_extern_proto.h

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
udr_checker-80.c

View File

@ -55,7 +55,7 @@ int main(int argc, char *argv[])
<< "Error 2"
<< exit(FatalError);
}
catch (Foam::error& fErr)
catch (const Foam::error& fErr)
{
Serr<< "Caught Foam error " << fErr << nl << endl;
}
@ -66,7 +66,7 @@ int main(int argc, char *argv[])
<< "Error# 3"
<< exit(FatalError);
}
catch (Foam::error& fErr)
catch (const Foam::error& fErr)
{
Serr<< "Caught Foam error " << fErr << nl << endl;
}

View File

@ -120,7 +120,7 @@ int main(int argc, char *argv[])
labelledTri l1{ 1, 2, 3, 10, 24 };
Info<< "labelled:" << l1 << nl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
WarningInFunction
<< "Caught FatalError " << err << nl << endl;

View File

@ -783,7 +783,7 @@ int main(int argc, char *argv[])
<< findEtcFile("<very-badName>", true) << nl
<< endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< nl << "findEtcFile() Caught FatalError "
<< err << nl << endl;

View File

@ -75,7 +75,7 @@ unsigned testParsing
{
val = function (str);
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
parsed = false;
errMsg = err.message();

View File

@ -127,7 +127,7 @@ int main(int argc, char *argv[])
}
Info<< endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
continue;
@ -164,7 +164,7 @@ int main(int argc, char *argv[])
Info<< "pass - null pointer is no expression" << endl;
}
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}
@ -181,7 +181,7 @@ int main(int argc, char *argv[])
Info<< "no match" << endl;
}
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}
@ -198,7 +198,7 @@ int main(int argc, char *argv[])
Info<< "no match" << endl;
}
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}
@ -215,7 +215,7 @@ int main(int argc, char *argv[])
Info<< "no match" << endl;
}
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}
@ -232,7 +232,7 @@ int main(int argc, char *argv[])
Info<< "pass - no match on empty expression" << endl;
}
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}
@ -249,7 +249,7 @@ int main(int argc, char *argv[])
Info<< "pass - no match on empty expression" << endl;
}
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
Info<< "Caught FatalError " << err << nl << endl;
}

View File

@ -2,8 +2,11 @@
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
Copyright (C) 2020 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -60,13 +63,13 @@ void Foam::fluentFvMesh::writeFluentMesh() const
/ time().caseName() + ".msh"
);
Info<< "Writing Header" << endl;
Info<< "Writing Fluent Mesh" << endl;
fluentMeshFile
<< "(0 \"FOAM to Fluent Mesh File\")" << std::endl << std::endl
<< "(0 \"Dimension:\")" << std::endl
<< "(2 3)" << std::endl << std::endl
<< "(0 \"Grid dimensions:\")" << std::endl;
<< "(0 \"OpenFOAM to Fluent Mesh File\")" << nl << nl
<< "(0 \"Dimension:\")" << nl
<< "(2 3)" << nl << nl
<< "(0 \"Grid dimensions:\")" << nl;
// Writing number of points
fluentMeshFile
@ -215,8 +218,8 @@ void Foam::fluentFvMesh::writeFluentMesh() const
// Writing cells
fluentMeshFile
<< "(12 (1 1 "
<< nCells() << " 1 0)(" << std::endl;
<< "(12 (1 1 " << nCells() << " 1 0)" << nl
<< '(';
const cellModel& hex = cellModel::ref(cellModel::HEX);
const cellModel& prism = cellModel::ref(cellModel::PRISM);
@ -225,44 +228,59 @@ void Foam::fluentFvMesh::writeFluentMesh() const
const cellShapeList& cells = cellShapes();
bool hasWarned = false;
label nPolys = 0;
int nElemPerLine = 25; // Start with linebreak and indent
forAll(cells, celli)
{
if (nElemPerLine == 25)
{
// 25 elements per line with initial indent (readability)
fluentMeshFile << "\n ";
nElemPerLine = 0;
}
else if (!(nElemPerLine % 5))
{
// Format in blocks of 5 (readability)
fluentMeshFile << token::SPACE;
}
fluentMeshFile << token::SPACE;
++nElemPerLine;
if (cells[celli].model() == tet)
{
fluentMeshFile << " " << 2;
fluentMeshFile << 2;
}
else if (cells[celli].model() == hex)
{
fluentMeshFile << " " << 4;
fluentMeshFile << 4;
}
else if (cells[celli].model() == pyr)
{
fluentMeshFile << " " << 5;
fluentMeshFile << 5;
}
else if (cells[celli].model() == prism)
{
fluentMeshFile << " " << 6;
fluentMeshFile << 6;
}
else
{
if (!hasWarned)
{
hasWarned = true;
WarningInFunction
<< "foamMeshToFluent: cell shape for cell "
<< celli << " only supported by Fluent polyhedral meshes."
<< nl
<< " Suppressing any further messages for polyhedral"
<< " cells." << endl;
}
fluentMeshFile << " " << 7;
fluentMeshFile << 7;
++nPolys;
}
}
fluentMeshFile << ")())" << std::endl;
fluentMeshFile
<< nl << "))" << nl;
if (nPolys)
{
Info<< "Mesh had " << nPolys << " polyhedrals." << endl;
}
// Return to dec
fluentMeshFile.setf(ios::dec, ios::basefield);

View File

@ -822,6 +822,12 @@ int main(int argc, char *argv[])
{
string line;
inFile.getLine(line);
if (line.empty())
{
continue;
}
IStringStream lineStr(line);
word tag(lineStr);

View File

@ -141,6 +141,13 @@ public:
typedef indexedVertex<Gt,Vb2> Other;
};
// Generated Methods
//- Copy construct
indexedVertex(const indexedVertex&) = default;
// Constructors
inline indexedVertex();

View File

@ -525,7 +525,7 @@ bool doCommand
}
}
}
catch (Foam::IOerror& fIOErr)
catch (const Foam::IOerror& fIOErr)
{
ok = false;
@ -536,7 +536,7 @@ bool doCommand
Pout<< topoSetSource::usage(sourceType).c_str();
}
}
catch (Foam::error& fErr)
catch (const Foam::error& fErr)
{
ok = false;

View File

@ -65,7 +65,7 @@ int main(int argc, char *argv[])
{
utility.reset(helpType::New(utilityName));
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
utility.clear();

View File

@ -71,10 +71,10 @@ if (doLagrangian)
}
}
forAllConstIters(theseCloudFields, fieldIter)
// Field order may differ on individual processors, so sort by name
for (const word& fieldName : theseCloudFields.sortedToc())
{
const word& fieldName = fieldIter.key();
const word& fieldType = fieldIter.object();
const word& fieldType = theseCloudFields[fieldName];
IOobject fieldObject
(
@ -92,9 +92,13 @@ if (doLagrangian)
// but that combination does not work.
// So check the header and sync globally
const bool parRun = Pstream::parRun();
Pstream::parRun() = false;
fieldExists =
fieldObject.typeHeaderOk<IOField<scalar>>(false);
Pstream::parRun() = parRun;
reduce(fieldExists, orOp<bool>());
}

View File

@ -1,8 +1,8 @@
// Read time index from */uniform/time, but treat 0 and constant specially
// or simply increment from the '-index' option if it was supplied
label timeIndex = 0;
label timeIndex = 0;
{
if (optIndex)
{
timeIndex = indexingNumber++;
@ -37,7 +37,4 @@
continue;
}
}
Info<< nl << "Time [" << timeIndex << "] = " << runTime.timeName() << nl;
// end-of-file
}

View File

@ -47,7 +47,7 @@ if (doFiniteArea)
{
faMeshPtr.reset(new faMesh(meshProxy.baseMesh()));
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
faMeshPtr.clear();
}

View File

@ -94,7 +94,7 @@ if (doLagrangian)
);
Info<< " Lagrangian: "
<< runTime.relativePath(writer.output()) << nl;
<< args.relativePath(writer.output()) << nl;
writer.writeTimeValue(mesh.time().value());
writer.writeGeometry();

View File

@ -107,7 +107,7 @@ Description
);
Info<< " Surface : "
<< runTime.relativePath(writer.output()) << nl;
<< args.relativePath(writer.output()) << nl;
writer.writeTimeValue(timeValue);
@ -211,7 +211,7 @@ Description
);
Info<< " FaceZone : "
<< runTime.relativePath(writer.output()) << nl;
<< args.relativePath(writer.output()) << nl;
writer.beginFile(fz.name());

View File

@ -42,7 +42,7 @@ if (faceSetName.size())
);
Info<< " faceSet : "
<< runTime.relativePath(outputName) << nl;
<< args.relativePath(outputName) << nl;
vtk::writeFaceSet
(
@ -70,7 +70,7 @@ if (pointSetName.size())
);
Info<< " pointSet : "
<< runTime.relativePath(outputName) << nl;
<< args.relativePath(outputName) << nl;
vtk::writePointSet
(

View File

@ -113,7 +113,7 @@ Description
);
Info<< " Internal : "
<< runTime.relativePath(internalWriter->output()) << nl;
<< args.relativePath(internalWriter->output()) << nl;
internalWriter->writeTimeValue(mesh.time().value());
internalWriter->writeGeometry();
@ -163,7 +163,7 @@ Description
);
Info<< " Boundaries: "
<< runTime.relativePath(writer->output()) << nl;
<< args.relativePath(writer->output()) << nl;
writer->writeTimeValue(timeValue);
writer->writeGeometry();
@ -229,7 +229,7 @@ Description
);
Info<< " Boundary : "
<< runTime.relativePath(writer->output()) << nl;
<< args.relativePath(writer->output()) << nl;
writer->writeTimeValue(timeValue);
writer->writeGeometry();

View File

@ -575,7 +575,7 @@ int main(int argc, char *argv[])
fvMeshSubsetProxy::subsetType cellSubsetType = fvMeshSubsetProxy::NONE;
string vtkName = runTime.globalCaseName();
string vtkName = args.globalCaseName();
if (regionNames.size() == 1)
{
@ -628,7 +628,7 @@ int main(int argc, char *argv[])
// Sub-directory for output
const word vtkDirName = args.opt<word>("name", "VTK");
const fileName outputDir(runTime.globalPath()/vtkDirName);
const fileName outputDir(args.globalPath()/vtkDirName);
if (Pstream::master())
{
@ -650,7 +650,7 @@ int main(int argc, char *argv[])
if (args.found("overwrite") && isDir(regionDir))
{
Info<< "Removing old directory "
<< runTime.relativePath(regionDir)
<< args.relativePath(regionDir)
<< nl << endl;
rmDir(regionDir);
}

View File

@ -30,6 +30,7 @@ License
#include "areaFaMesh.H"
#include "faMesh.H"
#include "fvMesh.H"
#include "foamVersion.H"
#include "Time.H"
#include "patchZones.H"
#include "IOobjectList.H"
@ -314,6 +315,9 @@ Foam::vtkPVFoam::vtkPVFoam
fullCasePath = cwd();
}
// OPENFOAM API
setEnv("FOAM_API", std::to_string(foamVersion::api), true);
// The name of the executable, unless already present in the environment
setEnv("FOAM_EXECUTABLE", "paraview", false);

View File

@ -286,12 +286,12 @@ void Foam::vtkPVFoam::convertVolFields
// Convert
convertVolField(patchInterpList, fld);
}
catch (Foam::IOerror& ioErr)
catch (const Foam::IOerror& ioErr)
{
ioErr.write(Warning, false);
Info<< nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
// Bit of trickery to get the original message
err.write(Warning, false);
@ -368,12 +368,12 @@ void Foam::vtkPVFoam::convertDimFields
convertVolField(patchInterpList, volFld);
}
catch (Foam::IOerror& ioErr)
catch (const Foam::IOerror& ioErr)
{
ioErr.write(Warning, false);
Info<< nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
// Bit of trickery to get the original message
err.write(Warning, false);
@ -496,12 +496,12 @@ void Foam::vtkPVFoam::convertAreaFields
dataset->GetCellData()->AddArray(cdata);
}
}
catch (Foam::IOerror& ioErr)
catch (const Foam::IOerror& ioErr)
{
ioErr.write(Warning, false);
Info<< nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
// Bit of trickery to get the original message
err.write(Warning, false);
@ -638,12 +638,12 @@ void Foam::vtkPVFoam::convertPointFields
dataset->GetPointData()->AddArray(pdata);
}
}
catch (Foam::IOerror& ioErr)
catch (const Foam::IOerror& ioErr)
{
ioErr.write(Warning, false);
Info<< nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
// Bit of trickery to get the original message
err.write(Warning, false);
@ -817,12 +817,12 @@ void Foam::vtkPVFoam::convertLagrangianFields
vtkmesh->GetCellData()->AddArray(data);
vtkmesh->GetPointData()->AddArray(data);
}
catch (Foam::IOerror& ioErr)
catch (const Foam::IOerror& ioErr)
{
ioErr.write(Warning, false);
Info<< nl << endl;
}
catch (Foam::error& err)
catch (const Foam::error& err)
{
// Bit of trickery to get the original message
err.write(Warning, false);

View File

@ -3,7 +3,7 @@
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
\\/ M anipulation | Copyright (C) 2017 OpenCFD Ltd.
\\/ M anipulation | Copyright (C) 2017-2019 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -29,6 +29,7 @@ License
// OpenFOAM includes
#include "blockMesh.H"
#include "blockMeshTools.H"
#include "foamVersion.H"
#include "Time.H"
#include "patchZones.H"
#include "StringStream.H"
@ -206,6 +207,9 @@ Foam::vtkPVblockMesh::vtkPVblockMesh
fullCasePath = cwd();
}
// OPENFOAM API
setEnv("FOAM_API", std::to_string(foamVersion::api), true);
// The name of the executable, unless already present in the environment
setEnv("FOAM_EXECUTABLE", "paraview", false);

View File

@ -1,9 +0,0 @@
#!/bin/sh
cd ${0%/*} || exit 1 # Run from this directory
. $WM_PROJECT_DIR/wmake/scripts/AllwmakeParseArguments
#------------------------------------------------------------------------------
wmake $targetType
#------------------------------------------------------------------------------

View File

@ -1,3 +0,0 @@
libuserd.C
LIB = $(FOAM_LIBBIN)/libuserd-foam

View File

@ -1,14 +0,0 @@
EXE_INC = \
-I$(LIB_SRC)/finiteVolume/lnInclude \
-I$(LIB_SRC)/meshTools/lnInclude \
-I$(LIB_SRC)/browser/lnInclude \
-I$(LIB_SRC)/sampling/lnInclude \
-I$(LIB_SRC)/lagrangian/basic/lnInclude
LIB_LIBS = \
-lOpenFOAM \
-lfiniteVolume \
-lmeshTools \
-lgenericPatchFields \
-llagrangian \
$(PROJECT_LIBS)

View File

@ -1,361 +0,0 @@
README_1.0_to_2.0
=================
This document exists to help those who already have a working user defined
reader (using the 1.0 API) to change it into the 2.0 API format - if desired.
Note that you do not have to update your (1.0 API) user defined reader if it
is already working fine for you.
You should consider it if:
- efficieny gains are needed or
- you need access to complex variables or
- you need access to tensor variables or
- you need multiple timeset capability or
- you want to provide your own "border" elements (as opposed to EnSight's
computation of them)
As an indication of the differences that might be realized in efficency,
consider the following comparison on an unstructured model consisting of:
1,639,058 nodes
7,079,211 elements 240530 tria3
3984 quad4
5927663 tetra4
653 pyramid5
906381 penta6
12 parts
The same model was represented in EnSight6 and EnSight Gold format.
EnSight6 format into: EnSight Gold format into:
------------------------------------ -------------------------
EnSight7.1 |Ensight7.2 |Ensight 7.1 |EnSight7.2 |Ensight7.2
internal |internal |userd reader |internal |userd reader
reader |reader |(API 1.0) |reader |(API 2.0)
| | | |
Time | Mem |Time | Mem |Time | Mem |Time | Mem |Time | Mem
(sec)| (Mb) |(sec)| (Mb) |(sec)| (Mb) |(sec)| (Mb) |(sec)| (Mb)
----------- |----------- |----------- |----------- |-----------
@ part 4.3 27.6 | 3.5 28.4 | 4.0 27.6 | 3.3 8.8 | 3.3 8.9
loader | | | |
| | | |
after 14.0 243.4 |12.8 244.3 |49.8 475.8 | 6.0 211.5 | 6.2 211.6
loading all | | | |
12 parts | | | |
(non-visual) | | | |
| | | |
after 16.8 263.2 |16.0 264.2 |52.8 490.7 | 9.1 236.2 | 9.5 236.2
activate of | | | |
a vector. | | | |
^ ^
/|\ /|\
| |
| |
| |
Compare these two!
Significant is the inefficiency of the 1.0 API, and the fact that the
2.0 API has the same improved efficiency (both in speed and memory) as
the gold internal reader!
Note: Structured data will not show much difference between the two API's,
but it was more efficient initially.
=========================================================
A note on philosophical differences between the two API's:
=========================================================
API 1.0 deals with:
-------------------
-> global coordinate array & corresponding
-> global node id array
-> global nodal variables
-> for each part:
-> local element connectivities (grouped by type) & corresponding
-> local element ids
-> local elemental variables
The element connectivities, within parts, reference the global coordinate
array. If node ids are provided, the element connectivities have to be in
terms of the node ids. If node ids are not provided, the connectivities are in
terms of the (one-based) index number of each node in the global coordinate
array. Thus, node ids are more than labels - they are a part of the
connectivity referencing scheme. Element ids are purely labels.
This API was originally setup to try to make the interface to other codes as
straightforward as possible. Efficiency was not the major consideration.
EnSight must do a fair amount of work to get data provided in the manner
described above into the form that it uses internally. There is mapping that
has to be setup and maintained between the global arrays and the local part
arrays so that updating over time can be accomplished efficiently. There is
hashing that is required in order to deal efficently with node ids.
All of this leads to a considerable amount of temporary memory and processing,
in order to get a model read into EnSight.
API 2.0 deals with:
-------------------
-> for each part:
-> part coordinates & corresponding
-> part node ids
-> part nodal variables
-> part element connectivities (grouped by type) & corresponding
-> part element ids
-> part elemental variables
API 2.0 requires that the coordinates and corresponding nodal variables be
provided per part. This eliminates the global to local mapping with all its
associated temporary memory and processing time. The connectivity of the
elements in each part reference the node indicies of its own (one-based) part
coordinate array. The connectivity of the elements do not reference the nodes
according to node ids. Node ids (and element ids for that matter) are purely
labels for screen display and for query operations within EnSight. This
eliminates the need for node id hashing as a model is read.
The 2.0 API has been created for those needing more efficiency - both in terms
of memory use and speed. The increased efficiency is possible because data is
requested in a manner which more closely represents the way that EnSight
stores and manipulates information internally. The new API requests size
information and allocates the actual internal structures and arrays
accordingly. Pointers to these arrays are passed directly to you in the
routines which gather data, thus eliminating a considerable amount of
temporary memory (and allocation time) that is needed in the old
API. Depending on what you must do to get your data into the form required,
the memory savings and the speed improvement when loading models can be quite
significant!!
Additionally, the ability to handle tensor and complex variables has been
added to the new API, and support for multiple timesets is provided.
------------------------------------------------
So, with that said, if you determine that you want to convert your existing
reader to the new API format, The following may be helpful.
====================
First the Good News! The following routines are identical in both API's!!
==================== ----------------------------------------------------
USERD_bkup
USERD_get_block_coords_by_component
USERD_get_block_iblanking
USERD_get_changing_geometry_status
USERD_get_dataset_query_file_info
USERD_get_element_label_status
USERD_get_name_of_reader
USERD_get_node_label_status
USERD_get_number_of_files_in_dataset
USERD_get_number_of_model_parts
USERD_get_number_of_variables
USERD_set_filenames
USERD_stop_part_building
========================
Second, pretty Good News! The following routines have minor changes,
======================== namely a slight name change and the addition
of arguments related to complex data, constant
(Note, the name changes type, or self contained parts vs global coords.
are needed so both
API's can exist together) The arguments must be added, but depending on
your situation, many might simply be place
holders.
-------------------------------------------------------------------------------
-----------------------------------------------------
A) Changes related to imaginary flag for complex data
=====================================================
If you don't deal with complex variables, simply add
this flag to your argument list and ignore its value.
-----------------------------------------------------
API 1.0 API 2.0
------- -------
USERD_get_constant_value USERD_get_constant_val
( (
int which var int which_var,
int imag_data
) )
USERD_get_description_lines USERD_get_descrip_lines
( (
int which_type, int which_type,
int which_var, int which_var,
int imag_data,
char line1[Z_BUFL], char line1[Z_BUFL],
char line2[Z_BUFL] char line2[Z_BUFL]
) )
USERD_get_variable_value_at_specific USERD_get_var_value_at_specific
( (
int which_var, int which_var,
int which_node_or_elem, int which_node_or_elem,
int which_part, int which_part,
int which_elem_type, int which_elem_type,
int time_step, int time_step,
float values[3] float values[3],
int imag_data
) )
---------------------------------------------------------
B) Changes related to complex data info, and constant type
(and some of the multiple timeset support)
=========================================================
If you don't deal with complex variables, simply add the
arguments for var_complex, var_ifilename, and var_freq
and assign var_complex to be FALSE.
The argument var_contran needs to be added, and set
appropriately if you have constant variables, to indicate
if the constant variable is fixed for all time or varies
over time.
The argument var_timeset needs to be added, and set
appropriately.
---------------------------------------------------------
API 1.0 API 2.0
------- -------
USERD_get_variable_info USERD_get_gold_variable_info
( (
char **var_description, char **var_description,
char **var_filename, char **var_filename,
int *var_type, int *var_type,
int *var_classify int *var_classify,
int *var_complex,
char **var_ifilename,
float *var_freq,
int *var_contran,
int *var_timeset
) )
------------------------------------------------------
C) Changes related to self contained part coordinates
======================================================
The number_of_nodes argument needs to be added and
set for each part. This one is critical for you to do.
------------------------------------------------------
API 1.0 API 2.0
------- -------
USERD_get_part_build_info USERD_get_gold_part_build_info
( (
int *part_numbers, int *part_types,
int *part_types, int *part_types,
char *part_description[Z_BUFL], char *part_description[Z_BUFL],
int *number_of_nodes,
int *number_of_elements[Z_MAXTYPE], int *number_of_elements[Z_MAXTYPE],
int *ijk_dimensions[3], int *ijk_dimensions[3],
int *iblanking_options[6] int *iblanking_options[6]
) )
------------------------------------------------------
D) Changes related to multiple timeset support
======================================================
The timeset_number argument needs to be added for the
following three routines.
The multiple timeset support also includes the change
in B) above for USERD_get_gold_variable_info and the
last three new routines in the third section of this
readme file.
------------------------------------------------------
API 1.0 API 2.0
------- -------
USERD_get_number_of_time_steps USERD_get_num_of_time_steps
( (
void int timeset_number
) )
USERD_get_solution_times USERD_get_sol_times
( (
int timeset_number,
float *solution_times float *solution_times
) )
USERD_set_time_step USERD_set_time_set_and_step
( (
int timeset_number,
int time_step int time_step
) )
------------------------------------------------------
E) Changes related to global_extern.h
======================================================
Be sure to include the updated global_extern.h file that comes
with the EnSight 7.2 release (not the one from previous releases).
=================================================================
Third, deleted and new routines. (Here is where the work lies)
Several old routines are gone. You will have to create the new
routines that replace them. I think you will find in most cases
that your old routines will form the basis of the new routines,
and that it isn't too difficult to provide the information in
the new way.
The detailed specifications for these new routines can be found
in README_USERD_2.0 (or the headers in libuserd.c of the
dummy_gold or ensight_gold readers).
=================================================================
API 1.0 API 2.0
------- -------
These routines: replaced by the single routine:
--------------------------- -------------------------------
USERD_get_block_scalar_values USERD_get_var_by_component
USERD_get_block_vector_values_by_component
USERD_get_scalar_values
USERD_get_vector_values
These global coordinate routines: replaced by part coord routines:
--------------------------------- --------------------------------
USERD_get_global_coords USERD_get_part_coords
USERD_get_global_node_ids USERD_get_part_node_ids
USERD_get_number_of_global_nodes
These part connectivity routines: replaced by part by type routines:
--------------------------------- ----------------------------------
USERD_get_element_connectivities_for_part USERD_get_part_elements_by_type
USERD_get_element_ids_for_part USERD_get_part_element_ids_by_type
These are New Routines
----------------------
(Can be a dummy) -> USERD_exit_routine
(Can be a dummy) -> USERD_get_model_extents
(Required) -> USERD_get_reader_version
multiple timeset releated:
(Required) -> USERD_get_number_timesets
(Required) -> USERD_get_timeset_description
(Required) -> USERD_get_geom_timeset_number
border provided by the reader option:
(Required) -> USERD_get_border_availability
(Can be a dummy) -> USERD_get_border_elements_by_type
transient model allocation efficency:
(Can be a dummy) -> USERD_get_maxsize_info
Possible use with Server-of-Servers:
(Can be a dummy) -> USERD_set_server_number

View File

@ -1,2 +0,0 @@
#define DO_READER
#define USERD_API_203

View File

@ -1,14 +0,0 @@
//======================================================================
// backup is not implemented
//======================================================================
int USERD_bkup
(
FILE *archive_file,
int backup_type)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_bkup" << endl
<< flush;
#endif
return Z_ERR;
}

View File

@ -1,11 +0,0 @@
void USERD_exit_routine
(
void
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_exit_routine" << endl
<< flush;
#endif
}

View File

@ -1,17 +0,0 @@
// Not used
int USERD_get_border_availability
(
int part_number,
int number_of_elements[Z_MAXTYPE]
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_border_availability for part_number "
<< part_number << endl
<< flush;
#endif
return Z_ERR;
}

View File

@ -1,19 +0,0 @@
// Not called if USERD_border_availability returns Z_ERR
int USERD_get_border_elements_by_type
(
int part_number,
int element_type,
int **conn_array,
short *parent_element_type,
int *parnet_element_type
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_border_elements_by_type" << endl
<< flush;
#endif
return Z_ERR;
}

View File

@ -1,10 +0,0 @@
//======================================================================
int USERD_get_changing_geometry_status(void)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_changing_geometry_status" << endl << flush;
#endif
// Choose the most general option
return Z_CHANGE_CONN;
}

View File

@ -1,15 +0,0 @@
//======================================================================
// Not in use
//======================================================================
float USERD_get_constant_val
(
int which_var,
int imag_data
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_constant_val" << endl << flush;
#endif
return 0.0;
}

View File

@ -1,8 +0,0 @@
//======================================================================
// NOT SUPPORTED... yet, if ever!
//======================================================================
int USERD_get_dataset_query_file_info(Z_QFILES *qfiles)
{
// just return OK
return Z_OK;
}

View File

@ -1,31 +0,0 @@
//======================================================================
int USERD_get_descrip_lines
(
int which_type,
int which_var,
int imag_data,
char line1[Z_BUFL],
char line2[Z_BUFL]
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_descrip_lines" << endl
<< flush;
#endif
if (which_type == Z_GEOM)
{
strncpy(line1, meshName, Z_BUFL);
strncpy(line2, "", Z_BUFL);
}
else
{
strncpy(line1, "WHERE IS THIS LINE USED I WONDER???", Z_BUFL);
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_descrip_lines" << endl
<< flush;
#endif
return Z_OK;
}

View File

@ -1,10 +0,0 @@
//======================================================================
// if TRUE: set in USERD_get_element_ids_for_part
//======================================================================
int USERD_get_element_label_status(void)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_element_label_status" << endl << flush;
#endif
return TRUE;
}

View File

@ -1,14 +0,0 @@
int USERD_get_geom_timeset_number
(
void
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_geom_timeset_number" << endl
<< flush;
#endif
Geom_timeset_number = 1;
return Geom_timeset_number;
}

View File

@ -1,153 +0,0 @@
//======================================================================
// this is based on the current time step.
//======================================================================
int USERD_get_gold_part_build_info
(
int *part_numbers,
int *part_types,
char *part_descriptions[Z_BUFL],
int *number_of_nodes,
int *number_of_elements[Z_MAXTYPE],
int *ijk_dimensions[3],
int *iblanking_options[6]
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_gold_part_build_info" << endl << flush;
#endif
const cellShapeList& cellShapes = meshPtr->cellShapes();
const cellList& cells = meshPtr->cells();
label nCells = cells.size();
// all parts are unstructured
for (label n = 0; n<Numparts_available; n++)
{
part_numbers[n] = n + 1;
part_types[n] = Z_UNSTRUCTURED;
}
strncpy(part_descriptions[0], meshName, Z_BUFL);
for (label i=0; i<nPatches; i++)
{
word patchName(meshPtr->boundary()[i].name());
strncpy(part_descriptions[i+1], patchName.c_str(), Z_BUFL);
}
label nHex08 = 0;
label nPen06 = 0;
label nPyr05 = 0;
label nTet04 = 0;
label nFaced = 0;
for (label n=0; n<nCells; n++)
{
label nFacesInCell = cells[n].size();
labelList points = cellShapes[n];
if ((nFacesInCell == 6) && (points.size() == 8))
{
nHex08++;
}
else if ((nFacesInCell == 4) && (points.size() == 4))
{
nTet04++;
}
else if (nFacesInCell == 5)
{
if (points.size() == 6)
{
nPen06++;
}
else if (points.size() == 5)
{
nPyr05++;
}
else
{
nFaced++;
}
}
else
{
nFaced++;
}
}
for (label n=0; n < Z_MAXTYPE; n++)
{
for (label i=0; i<Numparts_available; i++)
{
number_of_elements[i][n] = 0;
}
}
number_of_elements[0][Z_TET04] = nTet04;
number_of_elements[0][Z_PYR05] = nPyr05;
number_of_elements[0][Z_HEX08] = nHex08;
number_of_elements[0][Z_PEN06] = nPen06;
number_of_elements[0][Z_NFACED] = nFaced;
/*
Info<< "nTet04 = " << nTet04 << endl;
Info<< "nPyr05 = " << nPyr05 << endl;
Info<< "nHex08 = " << nHex08 << endl;
Info<< "nPen06 = " << nPen06 << endl;
Info<< "nFaced = " << nFaced << endl;
*/
number_of_nodes[0] = meshPtr->nPoints();
const polyBoundaryMesh& bMesh = meshPtr->boundaryMesh();
for (label i=0; i<nPatches; i++)
{
label nTri03 = 0;
label nQuad04 = 0;
label nPoly = 0;
forAll(bMesh[i], n)
{
label nPoints = bMesh[i][n].size();
if (nPoints == 3)
{
nTri03++;
}
else if (nPoints == 4)
{
nQuad04++;
}
else
{
nPoly++;
}
}
number_of_elements[i+1][Z_TRI03] = nTri03;
number_of_elements[i+1][Z_QUA04] = nQuad04;
number_of_elements[i+1][Z_NSIDED] = nPoly;
number_of_nodes[i+1] = bMesh[i].points().size();
}
if (Numparts_available > nPatches+1)
{
strncpy
(
part_descriptions[nPatches+1],
cloud::prefix.c_str(),
Z_BUFL
);
number_of_elements[nPatches+1][Z_POINT] = sprayPtr->size();
number_of_nodes[nPatches+1] = sprayPtr->size();
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_gold_part_build_info" << endl << flush;
#endif
return Z_OK;
}

View File

@ -1,120 +0,0 @@
//======================================================================
// variable 1 - var[0] , i.e variables are zero based
//======================================================================
int USERD_get_gold_variable_info
(
char **var_description,
char **var_filename,
int *var_type,
int *var_classify,
int *var_complex,
char **var_ifilename,
float *var_freq,
int *var_contran,
int *var_timeset
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_gold_variable_info" << endl
<< flush;
#endif
label offset = Num_variables - nSprayVariables;
// scalars first ...
for (label n=0; n<offset; n++)
{
if (isScalar[var2field[n]])
{
var_type[n] = Z_SCALAR;
var_classify[n] = Z_PER_ELEM;
var_complex[n] = FALSE;
var_timeset[n] = 1;
strncpy
(
var_description[n],
fieldNames[var2field[n]].c_str(),
Z_BUFL
);
}
}
// ... and then vectors
for (label n=0; n<offset; n++)
{
if (isVector[var2field[n]])
{
var_type[n] = Z_VECTOR;
var_classify[n] = Z_PER_ELEM;
var_complex[n] = FALSE;
var_timeset[n] = 1;
strncpy
(
var_description[n],
fieldNames[var2field[n]].c_str(),
Z_BUFL
);
}
}
// ... and tensors (NB! all tensors are treated as asymmetric)
for (label n=0; n<offset; n++)
{
if (isTensor[var2field[n]])
{
var_type[n] = Z_TENSOR9;
var_classify[n] = Z_PER_ELEM;
var_complex[n] = FALSE;
var_timeset[n] = 1;
strncpy
(
var_description[n],
fieldNames[var2field[n]].c_str(),
Z_BUFL
);
}
}
if (Numparts_available > nPatches+1)
{
label Ns = lagrangianScalarNames.size();
for (label n=0; n<Ns; n++)
{
var_type[offset + n] = Z_SCALAR;
var_classify[offset + n] = Z_PER_ELEM;
var_complex[offset + n] = FALSE;
var_timeset[offset + n] = 1;
word name = parcelPrepend + lagrangianScalarNames[n];
strncpy
(
var_description[offset + n],
name.c_str(),
Z_BUFL
);
}
forAll(lagrangianVectorNames, n)
{
var_type[offset + Ns + n] = Z_VECTOR;
var_classify[offset + Ns + n] = Z_PER_ELEM;
var_complex[offset + Ns + n] = FALSE;
var_timeset[offset + Ns + n] = 1;
word name = parcelPrepend + lagrangianVectorNames[n];
strncpy
(
var_description[offset + Ns + n],
name.c_str(),
Z_BUFL
);
}
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_gold_variable_info" << endl
<< flush;
#endif
return Z_OK;
}

View File

@ -1,14 +0,0 @@
int USERD_get_matf_set_info
(
int *mat_set_ids,
char **mat_set_name
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_matf_set_info" << endl
<< flush;
#endif
return Z_ERR;
}

View File

@ -1,15 +0,0 @@
int USERD_get_matf_var_info
(
int set_index,
int *mat_ids,
char **mat_desc
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_matf_var_info" << endl
<< flush;
#endif
return Z_ERR;
}

View File

@ -1,102 +0,0 @@
int USERD_get_maxsize_info
(
int *max_number_of_nodes,
int *max_number_of_elements[Z_MAXTYPE],
int *max_ijk_dimensions[3]
)
{
return Z_ERR;
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_maxsize_info" << endl;
#endif
label maxNPoints = 0;
label maxNParcels = 0;
label nPen06Max = 0;
label nHex08Max = 0;
label nPyr05Max = 0;
label nTet04Max = 0;
Info<< "Checking all time steps for EnSight memory allocation purpose. "
<< "This can take some time." << endl;
for (label timeI=1; timeI < timeDirs.size(); ++timeI)
{
label nPen06 = 0;
label nHex08 = 0;
label nPyr05 = 0;
label nTet04 = 0;
runTimePtr->setTime(timeDirs[timeI], timeI);
Info<< "Checking time = " << runTimePtr->timeName() << endl;
const cellShapeList& cells = meshPtr->cellShapes();
const label nPoints = meshPtr->nPoints();
const label nCells = cells.size();
maxNPoints = max(maxNPoints, nPoints);
for (label n=0; n<nCells;n++)
{
label nFaces = cells[n].nFaces();
const labelList& points = cells[n];
if ((nFaces == 6) && (points.size() == 8))
{
nHex08++;
}
else if ((nFaces == 5) && (points.size() == 6))
{
nPen06++;
}
else if ((nFaces == 5) && (points.size() == 5))
{
nPyr05++;
}
else if ((nFaces == 4) && (points.size() == 4))
{
nTet04++;
}
}
nPen06Max = max(nPen06Max, nPen06);
nHex08Max = max(nHex08Max, nHex08);
nPyr05Max = max(nPyr05Max, nPyr05);
nTet04Max = max(nTet04Max, nTet04);
if (Numparts_available > 1)
{
// Get the maximum number of spray parcels
// and store it
Cloud<passiveParticle> lagrangian(*meshPtr, cloud::defaultName);
if (lagrangian.size() > nMaxParcels)
{
nMaxParcels = lagrangian.size();
}
}
}
max_number_of_nodes[0] = maxNPoints;
max_number_of_elements[0][Z_HEX08] = nHex08Max;
max_number_of_elements[0][Z_PEN06] = nPen06Max;
max_number_of_elements[0][Z_PYR05] = nPyr05Max;
max_number_of_elements[0][Z_TET04] = nTet04Max;
if (Numparts_available > 1)
{
max_number_of_nodes[1] = maxNParcels;
max_number_of_elements[1][Z_POINT] = maxNParcels;
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_maxsize_info" << endl;
#endif
return Z_OK;
}

View File

@ -1,15 +0,0 @@
// Not used. Let EnSight do the job.
int USERD_get_model_extents
(
float extents[6]
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_model_extents" << endl
<< flush;
#endif
return Z_ERR;
}

View File

@ -1,18 +0,0 @@
//======================================================================
// Setting name in the gui, and specifying one or two input fields
//======================================================================
int USERD_get_name_of_reader
(
char reader_name[Z_MAX_USERD_NAME],
int *two_fields
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_name_of_reader" << endl << flush;
#endif
strncpy(reader_name, readerName, Z_MAX_USERD_NAME);
*two_fields = FALSE;
return Z_OK;
}

View File

@ -1,79 +0,0 @@
int USERD_get_nfaced_conn
(
int part_number,
int *nfaced_conn_array
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_nfaced_conn"
<< ", part_number = " << part_number
<< endl
<< flush;
#endif
if (part_number == 1)
{
label nPoint = 0;
const cellShapeList& cellShapes = meshPtr->cellShapes();
const cellList& cells = meshPtr->cells();
const faceList& faces = meshPtr->faces();
label nCells = cellShapes.size();
for (label n=0; n<nCells; n++)
{
label nFacesInCell = cells[n].size();
labelList points = cellShapes[n];
if ((nFacesInCell == 6) && (points.size() == 8))
{}
else if ((nFacesInCell == 4) && (points.size() == 4))
{}
else if (nFacesInCell == 5)
{
if (points.size() == 6)
{}
else if (points.size() == 5)
{}
else
{
for (label i=0; i<nFacesInCell; i++)
{
label facei = cells[n][i];
label nPoints = faces[facei].size();
for (label j=0; j<nPoints; j++)
{
nfaced_conn_array[nPoint++] = faces[facei][j] + 1;
}
}
}
}
else
{
for (label i=0; i<nFacesInCell; i++)
{
label facei = cells[n][i];
label nPoints = faces[facei].size();
for (label j=0; j<nPoints; j++)
{
nfaced_conn_array[nPoint++] = faces[facei][j] + 1;
}
}
}
}
}
else if (part_number < nPatches+2)
{
}
else
{
return Z_ERR;
}
#ifdef ENSIGHTDEBUG
Info<< "Exiting: USERD_get_nfaced_conn" << endl
<< flush;
#endif
return Z_OK;
}

View File

@ -1,74 +0,0 @@
int USERD_get_nfaced_nodes_per_face
(
int part_number,
int *nfaced_npf_array
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_nfaced_nodes_per_face"
<< ", part_number = " << part_number
<< endl
<< flush;
#endif
if (part_number == 1)
{
const cellShapeList& cellShapes = meshPtr->cellShapes();
const cellList& cells = meshPtr->cells();
const faceList& faces = meshPtr->faces();
label nCells = cellShapes.size();
label nFaced = 0;
for (label n=0; n<nCells; n++)
{
label nFacesInCell = cells[n].size();
labelList points = cellShapes[n];
label nPoints = points.size();
if ((nFacesInCell == 6) && (nPoints == 8))
{}
else if ((nFacesInCell == 4) && (nPoints == 4))
{}
else if (nFacesInCell == 5)
{
if (nPoints == 6)
{}
else if (nPoints == 5)
{}
else
{
for (label i=0; i<nFacesInCell; i++)
{
label facei = cells[n][i];
label nFacePoints = faces[facei].size();
nfaced_npf_array[nFaced++] = nFacePoints;
}
}
}
else
{
for (label i=0; i<nFacesInCell; i++)
{
label facei = cells[n][i];
label nFacePoints = faces[facei].size();
nfaced_npf_array[nFaced++] = nFacePoints;
}
}
}
}
else if (part_number < nPatches+2)
{
return Z_ERR;
}
else
{
return Z_ERR;
}
#ifdef ENSIGHTDEBUG
Info<< "Exiting: USERD_get_nfaced_nodes_per_face" << endl
<< flush;
#endif
return Z_OK;
}

View File

@ -1,11 +0,0 @@
//======================================================================
// if TRUE: set in USERD_get_global_node_ids
//======================================================================
int USERD_get_node_label_status(void)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_node_label_status" << endl << flush;
#endif
return TRUE;
}

View File

@ -1,49 +0,0 @@
int USERD_get_nsided_conn
(
int part_number,
int *nsided_conn_array
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_nsided_conn"
<< ", part_number = " << part_number
<< endl
<< flush;
#endif
if (part_number == 1)
{
Info<< "************* EEEEEEEEERRRRRRRRRRRRRRRRRR *************** "
<< endl << flush;
}
else if (part_number < nPatches+2)
{
//const cellList& cells = meshPtr->cells();
//const faceList& faces = meshPtr->faces();
label patchi = part_number - 2;
const polyBoundaryMesh& bMesh = meshPtr->boundaryMesh();
label np = 0;
forAll(bMesh[patchi], facei)
{
label nPoints = bMesh[patchi][facei].size();
if ((nPoints != 3) && (nPoints != 4))
{
for (label i=0; i<nPoints; i++)
{
nsided_conn_array[np++] = bMesh[patchi][facei][i] + 1;
}
}
}
}
else if (part_number == nPatches+2)
{
return Z_ERR;
}
#ifdef ENSIGHTDEBUG
Info<< "Exiting: USERD_get_nsided_conn" << endl
<< flush;
#endif
return Z_OK;
}

View File

@ -1,15 +0,0 @@
//======================================================================
// don't use multiple time sets...NN
//======================================================================
int USERD_get_num_of_time_steps
(
int timeset_number
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_num_of_time_steps" << endl
<< flush;
#endif
return Num_time_steps;
}

View File

@ -1,13 +0,0 @@
//======================================================================
//
//======================================================================
int USERD_get_number_of_files_in_dataset(void)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_number_of_files_in_dataset" << endl << flush;
#endif
// use 1 insted of 0 which gives an un-necessary warning.
Num_dataset_files = 1;
return Num_dataset_files;
}

View File

@ -1,14 +0,0 @@
int USERD_get_number_of_material_sets
(
void
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_number_of_material_sets" << endl
<< flush;
#endif
// No materials
return 0;
}

View File

@ -1,14 +0,0 @@
int USERD_get_number_of_materials
(
int set_index
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_number_of_materials" << endl
<< flush;
#endif
// No materials
return 0;
}

View File

@ -1,9 +0,0 @@
int USERD_get_number_of_model_parts(void)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_number_of_model_parts" << endl << flush;
#endif
return Numparts_available;
}

View File

@ -1,9 +0,0 @@
//======================================================================
int USERD_get_number_of_variables(void)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_number_of_variables" << endl << flush;
#endif
return Num_variables;
}

View File

@ -1,13 +0,0 @@
int USERD_get_number_of_timesets
(
void
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_number_of_timesets" << endl
<< flush;
#endif
Num_timesets = 1;
return Num_timesets;
}

View File

@ -1,64 +0,0 @@
// Note: coord_array is 1-based.
int USERD_get_part_coords
(
int part_number,
float **coord_array
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_part_coords" << endl <<
"part_number = " << part_number << endl << flush;
#endif
if (part_number == 1)
{
const vectorField& points = meshPtr->points();
label nPoints = points.size();
for (label indx=0; indx<nPoints; indx++)
{
coord_array[0][indx+1] = float(points[indx].x());
coord_array[1][indx+1] = float(points[indx].y());
coord_array[2][indx+1] = float(points[indx].z());
}
}
else if (part_number < nPatches+2)
{
label patchi = part_number-2;
const polyBoundaryMesh& bMesh = meshPtr->boundaryMesh();
const vectorField& points = bMesh[patchi].points();
label nPoints = points.size();
for (label indx=0; indx<nPoints; indx++)
{
coord_array[0][indx+1] = float(points[indx].x());
coord_array[1][indx+1] = float(points[indx].y());
coord_array[2][indx+1] = float(points[indx].z());
}
}
else if (part_number == nPatches+2)
{
label indx = 1;
forAllConstIter(Cloud<passiveParticle>, *sprayPtr, iter)
{
coord_array[0][indx] = float(iter().position().x());
coord_array[1][indx] = float(iter().position().y());
coord_array[2][indx] = float(iter().position().z());
indx++;
}
}
else
{
return Z_ERR;
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_part_coords" << endl << flush;
#endif
return Z_OK;
}

View File

@ -1,162 +0,0 @@
int USERD_get_part_element_ids_by_type
(
int part_number,
int element_type,
int *elemid_array
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_part_element_ids_by_type" << endl
<< "part_number = " << part_number << endl
<< "element_type = " << element_type << endl << flush;
#endif
if (part_number == 1)
{
const cellShapeList& cellShapes = meshPtr->cellShapes();
const cellList& cells = meshPtr->cells();
label nCells = cells.size();
label nPen06 = 0;
label nHex08 = 0;
label nPyr05 = 0;
label nTet04 = 0;
label nFaced = 0;
if (element_type == Z_HEX08)
{
for (label n=0; n<nCells; n++)
{
label nFaces = cells[n].size();
labelList points = cellShapes[n];
if ((nFaces == 6) && (points.size() == 8))
{
elemid_array[nHex08++] = n + 1;
}
}
}
else if (element_type == Z_PEN06)
{
for (label n=0; n<nCells; n++)
{
label nFaces = cells[n].size();
labelList points = cellShapes[n];
if ((nFaces == 5) && (points.size() == 6))
{
elemid_array[nPen06++] = n + 1;
}
}
}
else if (element_type == Z_PYR05)
{
for (label n=0; n<nCells; n++)
{
label nFaces = cells[n].size();
labelList points = cellShapes[n];
if ((nFaces == 5) && (points.size() == 5))
{
elemid_array[nPyr05++] = n + 1;
}
}
}
else if (element_type == Z_TET04)
{
for (label n=0; n<nCells; n++)
{
label nFaces = cells[n].size();
labelList points = cellShapes[n];
if ((nFaces == 4) && (points.size() == 4))
{
elemid_array[nTet04++] = n + 1;
}
}
}
else if (element_type == Z_NFACED)
{
for (label n=0; n<nCells; n++)
{
label nFaces = cells[n].size();
labelList points = cellShapes[n];
if ((nFaces == 6) && (points.size() == 8))
{}
else if ((nFaces == 5) && (points.size() == 6))
{}
else if ((nFaces == 5) && (points.size() == 5))
{}
else if ((nFaces == 4) && (points.size() == 4))
{}
else
{
elemid_array[nFaced++] = n + 1;
}
}
}
}
else if (part_number < nPatches+2)
{
const polyBoundaryMesh& bMesh = meshPtr->boundaryMesh();
label patchi = part_number - 2;
label nTri03 = 0;
label nQuad04 = 0;
label nPoly = 0;
if (element_type == Z_TRI03)
{
forAll(bMesh[patchi], facei)
{
if (bMesh[patchi][facei].size() == 3)
{
elemid_array[nTri03++] = facei + 1;
}
}
}
else if (element_type == Z_QUA04)
{
forAll(bMesh[patchi], facei)
{
if (bMesh[patchi][facei].size() == 4)
{
elemid_array[nQuad04++] = facei + 1;
}
}
}
else if (element_type == Z_NSIDED)
{
forAll(bMesh[patchi], facei)
{
label nPoints = bMesh[patchi][facei].size();
if ((nPoints != 3) && (nPoints != 4))
{
elemid_array[nPoly++] = facei + 1;
}
}
}
}
else if (part_number == nPatches+2)
{
forAll(*sprayPtr, n)
{
elemid_array[n] = n + 1;
}
}
else
{
return Z_ERR;
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_part_element_ids_by_type" << endl << flush;
#endif
return Z_OK;
}

View File

@ -1,252 +0,0 @@
int USERD_get_part_elements_by_type
(
int part_number,
int element_type,
int **conn_array
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_part_elements_by_type" << nl
<< "part_number = " << part_number << nl
<< "element_type = " << element_type;
if (element_type == Z_HEX08)
{
Info<< " Z_HEX08";
}
else if (element_type == Z_PEN06)
{
Info<< " Z_PEN06";
}
else if (element_type == Z_PYR05)
{
Info<< " Z_PYR05";
}
else if (element_type == Z_TET04)
{
Info<< " Z_TET04";
}
else if (element_type == Z_TRI03)
{
Info<< " Z_TRI03";
}
else if (element_type == Z_QUA04)
{
Info<< " Z_QUA04";
}
else if (element_type == Z_NFACED)
{
Info<< " Z_NFACED";
}
else if (element_type == Z_NSIDED)
{
Info<< " Z_NSIDED";
}
else
{
Info<< " unknown";
}
Info<< endl << flush;
#endif
if (part_number == 1)
{
const cellShapeList& cellShapes = meshPtr->cellShapes();
//================================
// hexahedron
//================================
if (element_type == Z_HEX08)
{
const cellModel& hex = cellModel::ref(cellModel::HEX);
label nHex08 = 0;
forAll(cellShapes, celli)
{
const cellShape& cellShape = cellShapes[celli];
const cellModel& cellModel = cellShape.model();
if (cellModel == hex)
{
forAll(cellShape, ip)
{
conn_array[nHex08][ip] = cellShape[ip] + 1;
}
nHex08++;
}
}
}
//================================
// pentahedron
//================================
else if (element_type == Z_PEN06)
{
const cellModel& prism = cellModel::ref(cellModel::PRISM);
label nPen06 = 0;
forAll(cellShapes, celli)
{
const cellShape& cellShape = cellShapes[celli];
const cellModel& cellModel = cellShape.model();
if (cellModel == prism)
{
forAll(cellShape, ip)
{
conn_array[nPen06][ip] = cellShape[ip] + 1;
}
nPen06++;
}
}
}
//================================
// pyramid
//================================
else if (element_type == Z_PYR05)
{
const cellModel& pyr = cellModel::ref(cellModel::PYR);
label nPyr05 = 0;
forAll(cellShapes, celli)
{
const cellShape& cellShape = cellShapes[celli];
const cellModel& cellModel = cellShape.model();
if (cellModel == pyr)
{
forAll(cellShape, ip)
{
conn_array[nPyr05][ip] = cellShape[ip] + 1;
}
nPyr05++;
}
}
}
//================================
// tetrahedron
//================================
else if (element_type == Z_TET04)
{
const cellModel& tet = cellModel::ref(cellModel::TET);
label nTet04 = 0;
forAll(cellShapes, celli)
{
const cellShape& cellShape = cellShapes[celli];
const cellModel& cellModel = cellShape.model();
if (cellModel == tet)
{
forAll(cellShape, ip)
{
conn_array[nTet04][ip] = cellShape[ip] + 1;
}
nTet04++;
}
}
}
//================================
// polyhedra
//================================
else
{
label nCells = cellShapes.size();
label nFaced = 0;
const cellList cells = meshPtr->cells();
for (label n=0; n<nCells; n++)
{
label nFacesInCell = cells[n].size();
labelList points = cellShapes[n];
if ((nFacesInCell == 6) && (points.size() == 8))
{}
else if ((nFacesInCell == 4) && (points.size() == 4))
{}
else if (nFacesInCell == 5)
{
if (points.size() == 6)
{}
else if (points.size() == 5)
{}
else
{
conn_array[nFaced++][0] = nFacesInCell;
}
}
else
{
conn_array[nFaced++][0] = nFacesInCell;
}
}
}
}
else if (part_number < nPatches+2)
{
label patchi = part_number - 2;
const polyBoundaryMesh& bMesh = meshPtr->boundaryMesh();
label nTri03 = 0;
label nQuad04 = 0;
if (element_type == Z_TRI03)
{
forAll(bMesh[patchi], n)
{
label nPoints = bMesh[patchi][n].size();
if (nPoints == 3)
{
for (label i=0; i<nPoints; i++)
{
label ip = bMesh[patchi][n][i];
conn_array[nTri03][i] = ip + 1;
}
nTri03++;
}
}
}
else if (element_type == Z_QUA04)
{
forAll(bMesh[patchi], n)
{
label nPoints = bMesh[patchi][n].size();
if (nPoints == 4)
{
for (label i=0; i<nPoints; i++)
{
label ip = bMesh[patchi][n][i];
conn_array[nQuad04][i] = ip + 1;
}
nQuad04++;
}
}
}
else if (element_type == Z_NSIDED)
{
label nPoly = 0;
forAll(bMesh[patchi], n)
{
label nPoints = bMesh[patchi][n].size();
if ((nPoints != 3) && (nPoints != 4))
{
conn_array[nPoly++][0] = nPoints;
}
}
}
}
else if (part_number == nPatches+2)
{
forAll(*sprayPtr, n)
{
conn_array[n][0] = n + 1;
}
}
else
{
return Z_ERR;
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_part_elements_by_type" << endl;
#endif
return Z_OK;
}

View File

@ -1,56 +0,0 @@
int USERD_get_part_node_ids
(
int part_number,
int *nodeid_array
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_part_node_ids" << endl
<< "part_number = " << part_number << endl
<< flush;
#endif
if (part_number == 1)
{
for (label indx=0; indx<Num_global_nodes; indx++)
{
nodeid_array[indx] = indx + 1;
}
}
else if (part_number < nPatches+2)
{
label patchi = part_number-2;
const polyBoundaryMesh& bMesh = meshPtr->boundaryMesh();
const vectorField& points = bMesh[patchi].points();
label nPoints = points.size();
for (label indx=0; indx<nPoints; indx++)
{
nodeid_array[indx] = indx + 1;
}
}
else if (part_number == nPatches+2)
{
label indx = 0;
forAllConstIter(Cloud<passiveParticle>, *sprayPtr, iter)
{
nodeid_array[indx] = indx + 1;
indx++;
}
}
else
{
return Z_ERR;
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_part_node_ids" << endl
<< flush;
#endif
return Z_OK;
}

View File

@ -1,27 +0,0 @@
int USERD_get_reader_release
(
char release_number[Z_MAX_USERD_NAME]
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_reader_release" << endl;
#endif
strncpy
(
release_number,
#if OPENFOAM
("OpenFOAM-" + std::to_string(OPENFOAM)).c_str(),
#else
"OpenFOAM-unknown",
#endif
Z_MAX_USERD_NAME
);
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_reader_release" << endl;
#endif
return Z_OK;
}

View File

@ -1,18 +0,0 @@
int USERD_get_reader_version
(
char version_number[Z_MAX_USERD_NAME]
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_reader_version" << endl;
#endif
strncpy(version_number, readerVersion, Z_MAX_USERD_NAME);
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_reader_version" << endl;
#endif
return Z_OK;
}

View File

@ -1,43 +0,0 @@
//======================================================================
// Negative values of the time is not allowed in EnSight.
// So for engines, where the time is CAD's we need to correct
// this so that all CAD's are positive. NN
//======================================================================
int USERD_get_sol_times
(
int timeset_number,
float *solution_times
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_sol_times\n" << timeDirs << endl;
#endif
for (label n=0; n<Num_time_steps;n++)
{
solution_times[n] = timeDirs[n+1].value();
}
if (timeDirs[1].value() < 0)
{
scalar addCAD = 360.0;
while (timeDirs[1].value() + addCAD < 0.0)
{
addCAD += 360.0;
}
for (label n=0; n<Num_time_steps;n++)
{
solution_times[n] += addCAD;
Info<< "Time[" << n << "] = " << timeDirs[n+1].value()
<< " was corrected to " << solution_times[n] << endl;
}
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_sol_times" << endl;
#endif
return Z_OK;
}

View File

@ -1,26 +0,0 @@
int USERD_get_timeset_description
(
int timeset_number,
char timeset_description[Z_BUFL]
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_timeset_description" << endl;
#endif
if (timeDirs[1].value() < 0)
{
strncpy(timeset_description, "CAD", Z_BUFL);
}
else
{
strncpy(timeset_description, "seconds", Z_BUFL);
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_timeset_description" << endl;
#endif
return Z_OK;
}

View File

@ -1,103 +0,0 @@
int USERD_get_var_by_component
(
int which_variable,
int which_part,
int var_type,
int which_type,
int imag_data,
int component,
float *var_array
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_var_by_component" << endl
<< "which_variable = " << which_variable << endl
<< "which_part = " << which_part << endl
<< "var_type = " << var_type << endl
<< "which_type = " << which_type << endl
<< "component = " << component << endl
<< flush;
#endif
label nVar = which_variable - 1;
Time& runTime = *runTimePtr;
fvMesh& mesh = *meshPtr;
const cellShapeList& cells = mesh.cellShapes();
label nCells = cells.size();
if (var_type == Z_SCALAR)
{
if (which_part == 1)
{
#include "getFieldScalar.H"
}
else if (which_part < nPatches+2)
{
#include "getPatchFieldScalar.H"
}
else if (which_part == nPatches+2)
{
#include "getLagrangianScalar.H"
}
else
{
return Z_ERR;
}
}
else if (var_type == Z_VECTOR)
{
if (which_part == 1)
{
#include "getFieldVector.H"
}
else if (which_part < nPatches+2)
{
#include "getPatchFieldVector.H"
}
else if (which_part == nPatches+2)
{
#include "getLagrangianVector.H"
}
else
{
return Z_ERR;
}
}
else if (var_type == Z_TENSOR9)
{
// all tensor are treated as asymmetric tensors here
if (which_part == 1)
{
#include "getFieldTensor.H"
}
else if (which_part < nPatches+2)
{
#include "getPatchFieldTensor.H"
}
else if (which_part == nPatches+2)
{
return Z_UNDEF;
}
else
{
return Z_ERR;
}
}
else
{
return Z_UNDEF;
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_var_by_component" << endl
<< flush;
#endif
return Z_OK;
}

View File

@ -1,70 +0,0 @@
//======================================================================
int USERD_get_var_value_at_specific
(
int which_var,
int which_node_or_elem,
int which_part,
int which_elem_type,
int time_step,
float values[3],
int imag_data
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_get_var_value_at_specific" << endl
<< flush;
#endif
// Not sure if it is 0 or 1 based
label nNode = which_node_or_elem;
label nVar = which_var - 1;
fvMesh& mesh = *meshPtr;
if (nVar < Num_variables - nSprayVariables)
{
Time& runTime = *runTimePtr;
IOobject fieldObject
(
fieldNames[var2field[nVar]],
runTime.timeName(),
mesh,
IOobject::MUST_READ,
IOobject::NO_WRITE
);
if (isScalar[nVar])
{
volScalarField scalarField(fieldObject,mesh);
values[0] = scalarField[nNode];
}
else if (isVector[nVar])
{
volVectorField vectorField(fieldObject,mesh);
values[0] = vectorField[nNode].x();
values[1] = vectorField[nNode].y();
values[2] = vectorField[nNode].z();
}
else
{
Info<< "ERROR in USERD_get_variable_value_at_specific. "
<< "No available variable???"
<< endl;
return Z_ERR;
}
}
else
{
Info<< "This functionality is not implemented yet."
<< endl;
return Z_ERR;
}
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_get_var_value_at_specific" << endl
<< flush;
#endif
return Z_OK;
}

View File

@ -1,18 +0,0 @@
int USERD_load_matf_data
(
int set_index,
int part_id,
int wtyp,
int mat_type,
int *ids_list,
float *val_list
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_load_matf_data" << endl
<< flush;
#endif
return Z_ERR;
}

View File

@ -1,222 +0,0 @@
//======================================================================
// Setting filenames
//======================================================================
int USERD_set_filenames
(
char filename_1[],
char filename_2[],
char the_path[],
int swapbytes
)
{
#ifdef ENSIGHTDEBUG
Info<< "Entering: USERD_set_filenames" << endl << flush;
#endif
char tmp[100];
label lRoot = strlen(the_path);
label lCase = strlen(filename_1);
bool cleared = false;
while (!cleared)
{
lRoot = strlen(the_path);
lCase = strlen(filename_1);
// remove the last '/' from rootDir
if (the_path[lRoot-1] == '/')
{
the_path[lRoot-1] = '\0';
}
else
{
cleared = true;
}
}
rootDir = the_path;
// the path is pre-pended to filename_1
// 1 is the 'Geometry' : 2 the 'Result' which is null here
// since two_field is FALSE
for (label i=0; i<lCase-lRoot;i++)
{
tmp[i] = filename_1[i+1+lRoot];
}
caseDir = tmp;
if (!isDir(rootDir/caseDir))
{
Info<< rootDir/caseDir << " is not a valid directory."
<< endl;
return Z_ERR;
}
// construct the global pointers to the database and mesh
delete meshPtr;
delete runTimePtr;
runTimePtr = new Time
(
Time::controlDictName,
rootDir,
caseDir
);
Time& runTime = *runTimePtr;
meshPtr = new fvMesh
(
IOobject
(
fvMesh::defaultRegion,
runTime.timeName(),
runTime
)
);
// set the available number of time-steps
timeDirs = Foam::Time::findTimes(rootDir/caseDir);
Num_time_steps = timeDirs.size() - 1;
nPatches = meshPtr->boundaryMesh().size();
// set the number of fields and store their names
// a valid field must exist for all time-steps
runTime.setTime(timeDirs.last(), timeDirs.size()-1);
IOobjectList objects(*meshPtr, runTime.timeName());
fieldNames = objects.names();
// because of the spray being a 'field' ...
// get the availabe number of variables and
// check for type (scalar/vector/tensor)
label nVar = 0;
wordList scalars = objects.names(scalarName);
forAll(fieldNames, n)
{
bool isitScalar = false;
forAll(scalars,i)
{
if (fieldNames[n] == scalars[i])
{
isitScalar = true;
var2field[nVar++] = n;
}
}
isScalar[n] = isitScalar;
}
wordList vectors = objects.names(vectorName);
forAll(fieldNames, n)
{
bool isitVector = false;
forAll(vectors,i)
{
if (fieldNames[n] == vectors[i])
{
isitVector = true;
var2field[nVar++] = n;
}
}
isVector[n] = isitVector;
}
wordList tensors = objects.names(tensorName);
forAll(fieldNames, n)
{
bool isitTensor = false;
forAll(tensors,i)
{
if (fieldNames[n] == tensors[i])
{
isitTensor = true;
var2field[nVar++] = n;
}
}
isTensor[n] = isitTensor;
}
bool lagrangianNamesFound = false;
label n = 0;
while (!lagrangianNamesFound && n < Num_time_steps)
{
runTime.setTime(timeDirs[n+1], n+1);
Cloud<passiveParticle> lagrangian(*meshPtr, cloud::defaultName);
n++;
if (lagrangian.size())
{
lagrangianNamesFound = true;
}
}
IOobject positionsHeader
(
"positions",
runTime.timeName(),
cloud::prefix,
runTime,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
);
IOobject coordinatesHeader
(
"coordinates",
runTime.timeName(),
cloud::prefix,
runTime,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
);
if
(
positionsHeader.typeHeaderOk<Cloud<passiveParticle>>(false)
|| coordinatesHeader.typeHeaderOk<Cloud<passiveParticle>>(false)
)
{
Info<< "[Found lagrangian]" << endl;
delete sprayPtr;
sprayPtr = new Cloud<passiveParticle>(*meshPtr, cloud::defaultName);
IOobjectList objects(*meshPtr, runTime.timeName(), cloud::prefix);
lagrangianScalarNames = objects.names(sprayScalarFieldName);
lagrangianVectorNames = objects.names(sprayVectorFieldName);
isSpray[fieldNames.size()] = true;
nSprayVariables += lagrangianScalarNames.size();
nSprayVariables += lagrangianVectorNames.size();
Num_unstructured_parts++;
}
Current_time_step = Num_time_steps;
runTime.setTime(timeDirs[Current_time_step], Current_time_step);
Num_variables = nVar + nSprayVariables;
Numparts_available =
Num_unstructured_parts + Num_structured_parts + nPatches;
#ifdef ENSIGHTDEBUG
Info<< "Leaving: USERD_set_filenames" << endl << flush;
#endif
return Z_OK;
}

Some files were not shown because too many files have changed in this diff Show More