ENH: refactor coordSet writers (#2347)

- the very old 'writer' class was fully stateless and always templated
  on an particular output type.

  This is now replaced with a 'coordSetWriter' with similar concepts
  as previously introduced for surface writers (#1206).

  - writers change from being a generic state-less set of routines to
    more properly conforming to the normal notion of a writer.

  - Parallel data is done *outside* of the writers, since they are used
    in a wide variety of contexts and the caller is currently still in
    a better position for deciding how to combine parallel data.

ENH: update sampleSets to sample on per-field basis (#2347)

- sample/write a field in a single step.

- support for 'sampleOnExecute' to obtain values at execution
  intervals without writing.

- support 'sets' input as a dictionary entry (as well as a list),
  which is similar to the changes for sampled-surface and permits use
  of changeDictionary to modify content.

- globalIndex for gather to reduce parallel communication, less code

- qualify the sampleSet results (properties) with the name of the set.
  The sample results were previously without a qualifier, which meant
  that only the last property value was actually saved (previous ones
  overwritten).

  For example,
  ```
    sample1
    {
        scalar
        {
            average(line,T) 349.96521;
            min(line,T)     349.9544281;
            max(line,T)     350;
            average(cells,T) 349.9854619;
            min(cells,T)    349.6589286;
            max(cells,T)    350.4967271;
            average(line,epsilon) 0.04947733869;
            min(line,epsilon) 0.04449639927;
            max(line,epsilon) 0.06452856475;
        }
        label
        {
            size(line,T)    79;
            size(cells,T)   1720;
            size(line,epsilon) 79;
        }
    }
  ```

ENH: update particleTracks application

- use globalIndex to manage original parcel addressing and
  for gathering. Simplify code by introducing a helper class,
  storing intermediate fields in hash tables instead of
  separate lists.

ADDITIONAL NOTES:

- the regionSizeDistribution largely retains separate writers since
  the utility of placing sum/dev/count for all fields into a single file
  is questionable.

- the streamline writing remains a "soft" upgrade, which means that
  scalar and vector fields are still collected a priori and not
  on-the-fly.  This is due to how the streamline infrastructure is
  currently handled (should be upgraded in the future).
This commit is contained in:
Mark Olesen
2022-02-25 14:50:01 +01:00
parent 8a9ae839ce
commit c3e14ffdd5
96 changed files with 8735 additions and 5178 deletions

View File

@ -6,6 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
Copyright (C) 2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -45,73 +46,112 @@ Description
#include "Time.H"
#include "timeSelector.H"
#include "OFstream.H"
#include "passiveParticleCloud.H"
#include "labelPairHashes.H"
#include "SortableList.H"
#include "IOField.H"
#include "IOobjectList.H"
#include "PtrList.H"
#include "Field.H"
#include "SortableList.H"
#include "passiveParticleCloud.H"
#include "steadyParticleTracksTemplates.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
using namespace Foam;
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
label validateFields
// Extract list of IOobjects, modifying the input IOobjectList in the
// process
IOobjectList preFilterFields
(
const List<word>& userFields,
const IOobjectList& cloudObjs
IOobjectList& cloudObjects,
const wordRes& acceptFields,
const wordRes& excludeFields
)
{
List<bool> ok(userFields.size(), false);
IOobjectList filteredObjects(cloudObjects.capacity());
DynamicList<label> missed(acceptFields.size());
forAll(userFields, i)
{
ok[i] = ok[i] || fieldOk<label>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<scalar>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<vector>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<sphericalTensor>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<symmTensor>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<tensor>(cloudObjs, userFields[i]);
}
// Selection here is slighly different than usual
// - an empty accept filter means "ignore everything"
label nOk = 0;
forAll(ok, i)
if (!acceptFields.empty())
{
if (ok[i])
const wordRes::filter pred(acceptFields, excludeFields);
const wordList allNames(cloudObjects.sortedNames());
// Detect missing fields
forAll(acceptFields, i)
{
++nOk;
if
(
acceptFields[i].isLiteral()
&& !allNames.found(acceptFields[i])
)
{
missed.append(i);
}
}
else
for (const word& fldName : allNames)
{
Info << "\n*** Warning: user specified field '" << userFields[i]
<< "' unavailable" << endl;
const auto iter = cloudObjects.cfind(fldName);
if (!pred(fldName) || !iter.found())
{
continue; // reject
}
const IOobject& io = *(iter.val());
if
(
//OR: fieldTypes::basic.found(io.headerClassName())
io.headerClassName() == IOField<label>::typeName
|| io.headerClassName() == IOField<scalar>::typeName
|| io.headerClassName() == IOField<vector>::typeName
|| io.headerClassName() == IOField<sphericalTensor>::typeName
|| io.headerClassName() == IOField<symmTensor>::typeName
|| io.headerClassName() == IOField<tensor>::typeName
)
{
// Transfer from cloudObjects -> filteredObjects
filteredObjects.add(cloudObjects.remove(fldName));
}
}
}
return nOk;
if (missed.size())
{
WarningInFunction
<< nl
<< "Cannot find field file matching "
<< UIndirectList<wordRe>(acceptFields, missed) << endl;
}
return filteredObjects;
}
template<>
void writeVTK(OFstream& os, const label& value)
void readFieldsAndWriteVTK
(
OFstream& os,
const List<labelList>& particleMap,
const IOobjectList& filteredObjects
)
{
os << value;
processFields<label>(os, particleMap, filteredObjects);
processFields<scalar>(os, particleMap, filteredObjects);
processFields<vector>(os, particleMap, filteredObjects);
processFields<sphericalTensor>(os, particleMap, filteredObjects);
processFields<symmTensor>(os, particleMap, filteredObjects);
processFields<tensor>(os, particleMap, filteredObjects);
}
} // End namespace Foam
template<>
void writeVTK(OFstream& os, const scalar& value)
{
os << value;
}
}
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -153,37 +193,31 @@ int main(int argc, char *argv[])
const fileName vtkTimePath(vtkPath/runTime.timeName());
mkDir(vtkTimePath);
Info<< " Reading particle positions" << endl;
PtrList<passiveParticle> particles(0);
pointField particlePosition;
labelList particleToTrack;
label nTracks = 0;
// Transfer particles to (more convenient) list
{
passiveParticleCloud ppc(mesh, cloudName);
Info<< "\n Read " << returnReduce(ppc.size(), sumOp<label>())
Info<< " Reading particle positions" << endl;
passiveParticleCloud myCloud(mesh, cloudName);
Info<< "\n Read " << returnReduce(myCloud.size(), sumOp<label>())
<< " particles" << endl;
particles.setSize(ppc.size());
const label nParticles = myCloud.size();
label i = 0;
forAllIters(ppc, iter)
particlePosition.resize(nParticles);
particleToTrack.resize(nParticles);
LabelPairMap<label> trackTable;
label np = 0;
for (const passiveParticle& p : myCloud)
{
particles.set(i++, ppc.remove(&iter()));
}
// myCloud should now be empty
}
List<label> particleToTrack(particles.size());
label nTracks = 0;
{
labelPairLookup trackTable;
forAll(particles, i)
{
const label origProc = particles[i].origProc();
const label origId = particles[i].origId();
const label origId = p.origId();
const label origProc = p.origProc();
particlePosition[np] = p.position();
const labelPair key(origProc, origId);
@ -191,17 +225,19 @@ int main(int argc, char *argv[])
if (iter.found())
{
particleToTrack[i] = *iter;
particleToTrack[np] = *iter;
}
else
{
particleToTrack[i] = nTracks;
trackTable.insert(key, nTracks);
++nTracks;
particleToTrack[np] = trackTable.size();
trackTable.insert(key, trackTable.size());
}
}
}
++np;
}
nTracks = trackTable.size();
}
if (nTracks == 0)
{
@ -230,7 +266,7 @@ int main(int argc, char *argv[])
}
// Store the particle age per track
IOobjectList cloudObjs
IOobjectList cloudObjects
(
mesh,
runTime.timeName(),
@ -239,25 +275,30 @@ int main(int argc, char *argv[])
// TODO: gather age across all procs
{
tmp<scalarField> tage =
readParticleField<scalar>("age", cloudObjs);
tmp<IOField<scalar>> tage =
readParticleField<scalar>("age", cloudObjects);
const scalarField& age = tage();
const auto& age = tage();
labelList trackSamples(nTracks, Zero);
forAll(particleToTrack, i)
{
const label trackI = particleToTrack[i];
const label sampleI = trackSamples[trackI];
agePerTrack[trackI][sampleI] = age[i];
particleMap[trackI][sampleI] = i;
trackSamples[trackI]++;
const label tracki = particleToTrack[i];
const label samplei = trackSamples[tracki];
agePerTrack[tracki][samplei] = age[i];
particleMap[tracki][samplei] = i;
++trackSamples[tracki];
}
tage.clear();
}
const IOobjectList filteredObjects
(
preFilterFields(cloudObjects, acceptFields, excludeFields)
);
if (Pstream::master())
{
OFstream os(vtkTimePath/"particleTracks.vtk");
@ -295,7 +336,7 @@ int main(int argc, char *argv[])
forAll(ids, j)
{
const label localId = particleIds[j];
const vector pos(particles[localId].position());
const point& pos = particlePosition[localId];
os << pos.x() << ' ' << pos.y() << ' ' << pos.z()
<< nl;
}
@ -330,22 +371,14 @@ int main(int argc, char *argv[])
}
const label nFields = validateFields(userFields, cloudObjs);
const label nFields = filteredObjects.size();
os << "POINT_DATA " << nPoints << nl
<< "FIELD attributes " << nFields << nl;
Info<< "\n Processing fields" << nl << endl;
processFields<label>(os, particleMap, userFields, cloudObjs);
processFields<scalar>(os, particleMap, userFields, cloudObjs);
processFields<vector>(os, particleMap, userFields, cloudObjs);
processFields<sphericalTensor>
(os, particleMap, userFields, cloudObjs);
processFields<symmTensor>
(os, particleMap, userFields, cloudObjs);
processFields<tensor>(os, particleMap, userFields, cloudObjs);
readFieldsAndWriteVTK(os, particleMap, filteredObjects);
}
}
Info<< endl;