ENH: refactor coordSet writers (#2347)

- the very old 'writer' class was fully stateless and always templated
  on an particular output type.

  This is now replaced with a 'coordSetWriter' with similar concepts
  as previously introduced for surface writers (#1206).

  - writers change from being a generic state-less set of routines to
    more properly conforming to the normal notion of a writer.

  - Parallel data is done *outside* of the writers, since they are used
    in a wide variety of contexts and the caller is currently still in
    a better position for deciding how to combine parallel data.

ENH: update sampleSets to sample on per-field basis (#2347)

- sample/write a field in a single step.

- support for 'sampleOnExecute' to obtain values at execution
  intervals without writing.

- support 'sets' input as a dictionary entry (as well as a list),
  which is similar to the changes for sampled-surface and permits use
  of changeDictionary to modify content.

- globalIndex for gather to reduce parallel communication, less code

- qualify the sampleSet results (properties) with the name of the set.
  The sample results were previously without a qualifier, which meant
  that only the last property value was actually saved (previous ones
  overwritten).

  For example,
  ```
    sample1
    {
        scalar
        {
            average(line,T) 349.96521;
            min(line,T)     349.9544281;
            max(line,T)     350;
            average(cells,T) 349.9854619;
            min(cells,T)    349.6589286;
            max(cells,T)    350.4967271;
            average(line,epsilon) 0.04947733869;
            min(line,epsilon) 0.04449639927;
            max(line,epsilon) 0.06452856475;
        }
        label
        {
            size(line,T)    79;
            size(cells,T)   1720;
            size(line,epsilon) 79;
        }
    }
  ```

ENH: update particleTracks application

- use globalIndex to manage original parcel addressing and
  for gathering. Simplify code by introducing a helper class,
  storing intermediate fields in hash tables instead of
  separate lists.

ADDITIONAL NOTES:

- the regionSizeDistribution largely retains separate writers since
  the utility of placing sum/dev/count for all fields into a single file
  is questionable.

- the streamline writing remains a "soft" upgrade, which means that
  scalar and vector fields are still collected a priori and not
  on-the-fly.  This is due to how the streamline infrastructure is
  currently handled (should be upgraded in the future).
This commit is contained in:
Mark Olesen
2022-02-25 14:50:01 +01:00
parent 8a9ae839ce
commit c3e14ffdd5
96 changed files with 8735 additions and 5178 deletions

View File

@ -17,7 +17,12 @@ label maxTracks(propsDict.getOrDefault<label>("maxTracks", -1));
word setFormat(propsDict.getOrDefault<word>("setFormat", "vtk"));
// Optional - if empty, select all
const wordRes fieldNames(propsDict.getOrDefault<wordRes>("fields", wordRes()));
wordRes acceptFields;
propsDict.readIfPresent("fields", acceptFields);
// Optional
wordRes excludeFields;
propsDict.readIfPresent("exclude", excludeFields);
const word UName(propsDict.getOrDefault<word>("U", "U"));

View File

@ -42,199 +42,34 @@ Description
#include "fvMesh.H"
#include "Time.H"
#include "timeSelector.H"
#include "OFstream.H"
#include "coordSetWriter.H"
#include "passiveParticleCloud.H"
#include "writer.H"
#include "ListOps.H"
#include "particleTracksSampler.H"
#define createTrack(field, trackValues) \
createTrackField \
( \
field, \
sampleFrequency, \
maxPositions, \
startIds, \
allOrigProcs, \
allOrigIds, \
trackValues \
);
#define setFields(fields, fieldNames) \
setTrackFields \
( \
obr, \
fields, \
fieldNames, \
nTracks, \
startIds, \
allOrigProcs, \
allOrigIds, \
maxPositions, \
sampleFrequency \
);
#define writeFields(fields, fieldNames, tracks, times, dirs) \
writeTrackFields \
( \
fields, \
fieldNames, \
tracks, \
times, \
dirs, \
setFormat, \
formatOptions, \
cloudName \
);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
using namespace Foam;
template<class Type>
void createTrackField
(
const Field<Type>& values,
const label sampleFrequency,
const label maxPositions,
const labelList& startIds,
const List<labelField>& allOrigProcs,
const List<labelField>& allOrigIds,
List<DynamicList<Type>>& trackValues
)
{
List<Field<Type>> procField(Pstream::nProcs());
procField[Pstream::myProcNo()] = values;
Pstream::gatherList(procField);
if (!Pstream::master())
{
return;
}
const label nTracks = trackValues.size();
forAll(procField, proci)
{
forAll(procField[proci], i)
{
const label globalId =
startIds[allOrigProcs[proci][i]] + allOrigIds[proci][i];
if (globalId % sampleFrequency == 0)
{
const label trackId = globalId/sampleFrequency;
if
(
trackId < nTracks
&& trackValues[trackId].size() < maxPositions
)
{
trackValues[trackId].append(procField[proci][i]);
}
}
}
}
}
template<class Type>
void writeTrackFields
(
List<List<DynamicList<Type>>>& fieldValues,
const wordList& fieldNames,
const PtrList<coordSet>& tracks,
const List<scalarField>& times,
const List<vectorField>& dirs,
const word& setFormat,
const dictionary& formatOptions,
const word& cloudName
coordSetWriter& writer,
HashTable<List<DynamicList<Type>>>& fieldTable
)
{
if (fieldValues.empty())
for (const word& fieldName : fieldTable.sortedToc())
{
return;
}
// Steal and reshape from List<DynamicList> to List<Field>
auto& input = fieldTable[fieldName];
auto writerPtr = writer<Type>::New(setFormat, formatOptions);
const fileName outFile(writerPtr().getFileName(tracks[0], wordList(0)));
const fileName outPath
(
functionObject::outputPrefix/cloud::prefix/cloudName/"particleTracks"
);
mkDir(outPath);
OFstream os(outPath/(pTraits<Type>::typeName & "tracks." + outFile.ext()));
Info<< "Writing " << pTraits<Type>::typeName << " particle tracks in "
<< setFormat << " format to " << os.name() << endl;
List<List<Field<Type>>> fields(fieldValues.size());
forAll(fields, fieldi)
{
fields[fieldi].setSize(fieldValues[fieldi].size());
forAll(fields[fieldi], tracki)
List<Field<Type>> fields(input.size());
forAll(input, tracki)
{
fields[fieldi][tracki].transfer(fieldValues[fieldi][tracki]);
fields[tracki].transfer(input[tracki]);
}
writer.write(fieldName, fields);
}
writerPtr().write(true, times, tracks, fieldNames, fields, os);
}
template<class Type>
Foam::label setTrackFields
(
const objectRegistry& obr,
List<List<DynamicList<Type>>>& fields,
List<word>& fieldNames,
const label nTracks,
const labelList& startIds,
const List<labelField>& allOrigProcs,
const List<labelField>& allOrigIds,
const label maxPositions,
const label sampleFrequency
)
{
const auto availableFieldPtrs = obr.lookupClass<IOField<Type>>();
fieldNames = availableFieldPtrs.toc();
if (Pstream::parRun())
{
Pstream::combineGather(fieldNames, ListOps::uniqueEqOp<word>());
Pstream::combineScatter(fieldNames);
Foam::sort(fieldNames);
}
const label nFields = fieldNames.size();
if (fields.empty())
{
fields.setSize(nFields);
fieldNames.setSize(nFields);
forAll(fields, i)
{
fields[i].setSize(nTracks);
}
}
forAll(fieldNames, fieldi)
{
const word& fieldName = fieldNames[fieldi];
const auto* fldPtr = obr.cfindObject<IOField<Type>>(fieldName);
createTrack
(
fldPtr ? static_cast<const Field<Type>>(*fldPtr) : Field<Type>(),
fields[fieldi]
);
}
return nFields;
}
@ -267,6 +102,14 @@ int main(int argc, char *argv[])
"Override the sample-frequency"
);
argList::addOption
(
"fields",
"wordRes",
"Specify single or multiple fields to write "
"(default: all or 'fields' from dictionary)\n"
"Eg, 'T' or '( \"U.*\" )'"
);
argList::addOption
(
"format",
"name",
@ -285,137 +128,147 @@ int main(int argc, char *argv[])
#include "createControls.H"
args.readListIfPresent<wordRe>("fields", acceptFields);
args.readIfPresent("format", setFormat);
args.readIfPresent("stride", sampleFrequency);
sampleFrequency = max(1, sampleFrequency); // sanity
// Setup the writer
auto writerPtr =
coordSetWriter::New
(
setFormat,
formatOptions.optionalSubDict(setFormat, keyType::LITERAL)
);
writerPtr().useTracks(true);
if (args.verbose())
{
writerPtr().verbose(true);
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
const fileName vtkPath(runTime.rootPath()/runTime.globalCaseName()/"VTK");
mkDir(vtkPath);
particleTracksSampler trackSampler;
Info<< "Scanning times to determine track data for cloud " << cloudName
<< nl << endl;
labelList maxIds(Pstream::nProcs(), -1);
forAll(timeDirs, timei)
{
runTime.setTime(timeDirs[timei], timei);
Info<< "Time = " << runTime.timeName() << endl;
Info<< " Reading particle positions" << endl;
passiveParticleCloud myCloud(mesh, cloudName);
Info<< " Read " << returnReduce(myCloud.size(), sumOp<label>())
<< " particles" << endl;
for (const passiveParticle& p : myCloud)
labelList maxIds(Pstream::nProcs(), -1);
forAll(timeDirs, timei)
{
const label origId = p.origId();
const label origProc = p.origProc();
runTime.setTime(timeDirs[timei], timei);
Info<< "Time = " << runTime.timeName() << endl;
// Handle case where we are processing particle data generated in
// parallel using more cores than when running this application.
if (origProc >= maxIds.size())
passiveParticleCloud myCloud(mesh, cloudName);
Info<< " Read " << returnReduce(myCloud.size(), sumOp<label>())
<< " particles" << endl;
for (const passiveParticle& p : myCloud)
{
maxIds.setSize(origProc+1, -1);
}
const label origId = p.origId();
const label origProc = p.origProc();
maxIds[origProc] = max(maxIds[origProc], origId);
// Handle case where processing particle data generated in
// parallel using more cores than for this application.
if (origProc >= maxIds.size())
{
maxIds.resize(origProc+1, -1);
}
maxIds[origProc] = max(maxIds[origProc], origId);
}
}
const label maxNProcs = returnReduce(maxIds.size(), maxOp<label>());
maxIds.resize(maxNProcs, -1);
Pstream::listCombineGather(maxIds, maxEqOp<label>());
Pstream::listCombineScatter(maxIds);
// From ids to count
const labelList numIds = maxIds + 1;
// Set parcel addressing
trackSampler.reset(numIds);
Info<< nl
<< "Detected particles originating from "
<< maxNProcs << " processors." << nl
<< "Particle statistics:" << endl;
if (Pstream::master())
{
const globalIndex& parcelAddr = trackSampler.parcelAddr();
for (const label proci : parcelAddr.allProcs())
{
Info<< " Found " << parcelAddr.localSize(proci)
<< " particles originating"
<< " from processor " << proci << nl;
}
}
}
label maxNProcs = returnReduce(maxIds.size(), maxOp<label>());
Info<< "Detected particles originating from " << maxNProcs
<< " processors." << nl << endl;
maxIds.setSize(maxNProcs, -1);
Pstream::listCombineGather(maxIds, maxEqOp<label>());
Pstream::listCombineScatter(maxIds);
labelList numIds = maxIds + 1;
Info<< nl << "Particle statistics:" << endl;
forAll(maxIds, proci)
{
Info<< " Found " << numIds[proci] << " particles originating"
<< " from processor " << proci << endl;
}
Info<< nl << endl;
// Calculate starting ids for particles on each processor
labelList startIds(numIds.size(), Zero);
for (label i = 0; i < numIds.size()-1; ++i)
{
startIds[i+1] += startIds[i] + numIds[i];
}
label nParticle = startIds.last() + numIds[startIds.size()-1];
trackSampler.setSampleRate(sampleFrequency, maxPositions, maxTracks);
// Number of tracks to generate
const label nTracks =
maxTracks > 0
? min(nParticle/sampleFrequency, maxTracks)
: nParticle/sampleFrequency;
const label nTracks = trackSampler.nTracks();
// Storage for all particle tracks
List<DynamicList<vector>> allTracks(nTracks);
List<DynamicList<point>> allTrackPos(nTracks);
List<DynamicList<scalar>> allTrackTimes(nTracks);
// Lists of field, tracki, trackValues
//List<List<DynamicList<label>>> labelFields;
List<List<DynamicList<scalar>>> scalarFields;
List<List<DynamicList<vector>>> vectorFields;
List<List<DynamicList<sphericalTensor>>> sphTensorFields;
List<List<DynamicList<symmTensor>>> symTensorFields;
List<List<DynamicList<tensor>>> tensorFields;
//List<word> labelFieldNames;
List<word> scalarFieldNames;
List<word> vectorFieldNames;
List<word> sphTensorFieldNames;
List<word> symTensorFieldNames;
List<word> tensorFieldNames;
// Track field values by name/type
HashTable<List<DynamicList<label>>> labelFields; // <- mostly unused
HashTable<List<DynamicList<scalar>>> scalarFields;
HashTable<List<DynamicList<vector>>> vectorFields;
HashTable<List<DynamicList<sphericalTensor>>> sphericalTensorFields;
HashTable<List<DynamicList<symmTensor>>> symmTensorFields;
HashTable<List<DynamicList<tensor>>> tensorFields;
Info<< "\nGenerating " << nTracks << " particle tracks for cloud "
<< cloudName << nl << endl;
Info<< nl
<< "Generating " << nTracks
<< " particle tracks for cloud " << cloudName << nl << endl;
forAll(timeDirs, timei)
{
runTime.setTime(timeDirs[timei], timei);
Info<< "Time = " << runTime.timeName() << endl;
Info<< "Time = " << runTime.timeName() << " (processing)" << endl;
// Read particles. Will be size 0 if no particles.
Info<< " Reading particle positions" << endl;
passiveParticleCloud myCloud(mesh, cloudName);
pointField localPositions(myCloud.size(), Zero);
scalarField localTimes(myCloud.size(), Zero);
pointField localPositions(myCloud.size());
scalarField localTimes(myCloud.size(), runTime.value());
List<labelField> allOrigIds(Pstream::nProcs());
List<labelField> allOrigProcs(Pstream::nProcs());
// Collect the track data on all processors that have positions
allOrigIds[Pstream::myProcNo()].setSize(myCloud.size(), Zero);
allOrigProcs[Pstream::myProcNo()].setSize(myCloud.size(), Zero);
label i = 0;
for (const passiveParticle& p : myCloud)
// Gather track data from all processors that have positions
trackSampler.resetCloud(myCloud.size());
{
allOrigIds[Pstream::myProcNo()][i] = p.origId();
allOrigProcs[Pstream::myProcNo()][i] = p.origProc();
localPositions[i] = p.position();
localTimes[i] = runTime.value();
++i;
labelField& origIds = trackSampler.origParcelIds_;
labelField& origProcs = trackSampler.origProcIds_;
label np = 0;
for (const passiveParticle& p : myCloud)
{
origIds[np] = p.origId();
origProcs[np] = p.origProc();
localPositions[np] = p.position();
++np;
}
trackSampler.gatherInplace(origIds);
trackSampler.gatherInplace(origProcs);
}
// Collect the track data on the master processor
Pstream::gatherList(allOrigIds);
Pstream::gatherList(allOrigProcs);
// Read cloud fields (from disk) into object registry
objectRegistry obr
(
IOobject
@ -426,63 +279,122 @@ int main(int argc, char *argv[])
)
);
myCloud.readFromFiles(obr, fieldNames);
myCloud.readFromFiles(obr, acceptFields, excludeFields);
// Create track positions and track time fields
// (not registered as IOFields)
// Note: createTrack is a local #define to reduce arg count...
createTrack(localPositions, allTracks);
createTrack(localTimes, allTrackTimes);
trackSampler.createTrackField(localPositions, allTrackPos);
trackSampler.createTrackField(localTimes, allTrackTimes);
// Create the track fields
// Note: setFields is a local #define to reduce arg count...
//setFields(labelFields, labelFieldNames);
setFields(scalarFields, scalarFieldNames);
setFields(vectorFields, vectorFieldNames);
setFields(sphTensorFields, sphTensorFieldNames);
setFields(symTensorFields, symTensorFieldNames);
setFields(tensorFields, tensorFieldNames);
///trackSampler.setTrackFields(obr, labelFields);
trackSampler.setTrackFields(obr, scalarFields);
trackSampler.setTrackFields(obr, vectorFields);
trackSampler.setTrackFields(obr, sphericalTensorFields);
trackSampler.setTrackFields(obr, symmTensorFields);
trackSampler.setTrackFields(obr, tensorFields);
}
const label nFields =
(
labelFields.size()
+ scalarFields.size()
+ vectorFields.size()
+ sphericalTensorFields.size()
+ symmTensorFields.size()
+ tensorFields.size()
);
Info<< nl
<< "Extracted " << nFields << " cloud fields" << nl;
if (nFields)
{
#undef doLocalCode
#define doLocalCode(FieldContent) \
if (!FieldContent.empty()) \
{ \
Info<< " "; \
for (const word& fieldName : FieldContent.sortedToc()) \
{ \
Info<< ' ' << fieldName; \
} \
Info<< nl; \
}
doLocalCode(labelFields);
doLocalCode(scalarFields);
doLocalCode(vectorFields);
doLocalCode(sphericalTensorFields);
doLocalCode(symmTensorFields);
doLocalCode(tensorFields);
#undef doLocalCode
}
Info<< nl
<< "Writing particle tracks (" << setFormat << " format)" << nl;
if (Pstream::master())
{
PtrList<coordSet> tracks(allTracks.size());
List<scalarField> times(tracks.size());
PtrList<coordSet> tracks(allTrackPos.size());
List<scalarField> times(allTrackPos.size());
forAll(tracks, tracki)
{
tracks.set
(
tracki,
new coordSet("track" + Foam::name(tracki), "distance")
new coordSet("track" + Foam::name(tracki), "xyz")
);
tracks[tracki].transfer(allTracks[tracki]);
tracks[tracki].transfer(allTrackPos[tracki]);
times[tracki].transfer(allTrackTimes[tracki]);
if (!tracki) tracks[0].rename("tracks");
}
Info<< nl;
/// Currently unused
/// List<vectorField> dirs(nTracks);
/// const auto Uiter = vectorFields.cfind(UName);
/// if (Uiter.found())
/// {
/// const auto& UTracks = *Uiter;
/// forAll(UTracks, tracki)
/// {
/// const auto& U = UTracks[tracki];
/// dirs[tracki] = U/(mag(U) + ROOTVSMALL);
/// }
/// }
const label Uid = vectorFieldNames.find(UName);
List<vectorField> dirs(nTracks);
if (Uid != -1)
// Write tracks with fields
if (nFields)
{
const auto& UTracks = vectorFields[Uid];
forAll(UTracks, tracki)
{
const auto& U = UTracks[tracki];
dirs[tracki] = U/(mag(U) + ROOTVSMALL);
}
}
auto& writer = *writerPtr;
// Write track fields
// Note: writeFields is a local #define to reduce arg count...
//writeFields(allLabelFields, labelFieldNames, tracks);
writeFields(scalarFields, scalarFieldNames, tracks, times, dirs);
writeFields(vectorFields, vectorFieldNames, tracks, times, dirs);
writeFields(sphTensorFields, sphTensorFieldNames, tracks, times, dirs);
writeFields(symTensorFields, symTensorFieldNames, tracks, times, dirs);
writeFields(tensorFields, tensorFieldNames, tracks, times, dirs);
const fileName outputPath
(
functionObject::outputPrefix/cloud::prefix/cloudName
/ "particleTracks" / "tracks"
);
Info<< " "
<< runTime.relativePath(outputPath) << endl;
writer.open(tracks, outputPath);
writer.setTrackTimes(times);
writer.nFields(nFields);
///writeTrackFields(writer, labelFields);
writeTrackFields(writer, scalarFields);
writeTrackFields(writer, vectorFields);
writeTrackFields(writer, sphericalTensorFields);
writeTrackFields(writer, symmTensorFields);
writeTrackFields(writer, tensorFields);
}
else
{
Info<< "Warning: no fields, did not write" << endl;
}
}
Info<< nl << "End\n" << endl;

View File

@ -0,0 +1,190 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::particleTracksSampler
Description
Helper class when generating particle tracks.
The interface is fairly rudimentary.
SourceFiles
particleTracksSamplerTemplates.C
\*---------------------------------------------------------------------------*/
#ifndef Foam_particleTracksSampler_H
#define Foam_particleTracksSampler_H
#include "globalIndex.H"
#include "fieldTypes.H"
#include "IOField.H"
#include "labelField.H"
#include "DynamicList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// Forward Declarations
class objectRegistry;
/*---------------------------------------------------------------------------*\
Class particleTracksSampler Declaration
\*---------------------------------------------------------------------------*/
class particleTracksSampler
{
// Private Data
//- The sampling interval
label stride_ = 1;
//- The number of tracks
label nTracks_ = 1;
//- Upper limit for number of track positions
label maxPositions_ = 10000;
//- The bin for sampled parcels.
labelField trackIds_;
//- The addressing for gathering cloud fields etc
globalIndex cloudGather_;
//- The originating parcel addressing
globalIndex origParcelAddr_;
public:
// Data Members
//- The originating parcel ids
labelField origParcelIds_;
//- The originating processor ids
labelField origProcIds_;
// Member Functions
//- The original parcel addressing
const globalIndex& parcelAddr() const noexcept
{
return origParcelAddr_;
}
//- Total number of particles
label nParticle() const
{
return origParcelAddr_.totalSize();
}
//- Number of tracks to generate
label nTracks() const noexcept
{
return nTracks_;
}
//- Define the orig parcel mappings
void reset(const labelUList& origParcelCounts)
{
origParcelAddr_.reset(origParcelCounts);
origParcelIds_.clear();
origProcIds_.clear();
trackIds_.clear();
}
//- Set the sampling stride, upper limits
// \return Number of tracks to generate
label setSampleRate
(
const label sampleFreq,
const label maxPositions,
const label maxTracks = -1
)
{
// numTracks = numParticles/stride
stride_ = max(1, sampleFreq);
maxPositions_ = maxPositions;
nTracks_ = (origParcelAddr_.totalSize()/stride_);
if (maxTracks > 0)
{
nTracks_ = min(nTracks_, maxTracks);
}
return nTracks_;
}
void resetCloud(const label localCloudSize)
{
cloudGather_.reset(localCloudSize, globalIndex::gatherOnly{});
origParcelIds_.resize_nocopy(localCloudSize);
origProcIds_.resize_nocopy(localCloudSize);
}
template<class Type>
void gatherInplace(List<Type>& fld) const
{
cloudGather_.gatherInplace(fld);
}
template<class Type>
void createTrackField
(
const UList<Type>& values,
List<DynamicList<Type>>& trackValues
) const;
template<class Type>
label setTrackFields
(
const objectRegistry& obr,
HashTable<List<DynamicList<Type>>>& fieldTable
) const;
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#ifdef NoRepository
#include "particleTracksSamplerTemplates.C"
#endif
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -0,0 +1,108 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "objectRegistry.H"
#include "ListOps.H"
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
void Foam::particleTracksSampler::createTrackField
(
const UList<Type>& values,
List<DynamicList<Type>>& trackValues
) const
{
List<Type> allValues(cloudGather_.gather(values));
const label nTracks = trackValues.size();
forAll(allValues, i)
{
const label globalId =
origParcelAddr_.toGlobal(origProcIds_[i], origParcelIds_[i]);
if (globalId % stride_ == 0)
{
const label trackId = globalId/stride_;
if
(
trackId < nTracks
&& trackValues[trackId].size() < maxPositions_
)
{
trackValues[trackId].append(allValues[i]);
}
}
}
}
template<class Type>
Foam::label Foam::particleTracksSampler::setTrackFields
(
const objectRegistry& obr,
HashTable<List<DynamicList<Type>>>& fieldTable
) const
{
// First time - obtain field names and populate storage locations
if (fieldTable.empty())
{
wordList fieldNames = obr.names<IOField<Type>>();
if (Pstream::parRun())
{
Pstream::combineGather(fieldNames, ListOps::uniqueEqOp<word>());
Pstream::combineScatter(fieldNames);
}
for (const word& fieldName : fieldNames)
{
fieldTable(fieldName).resize(nTracks());
}
}
// Process in parallel-consistent order
for (const word& fieldName : fieldTable.sortedToc())
{
auto& output = fieldTable[fieldName];
const auto* ptr = obr.cfindObject<IOField<Type>>(fieldName);
this->createTrackField
(
ptr ? static_cast<const UList<Type>&>(*ptr) : UList<Type>::null(),
output
);
}
return fieldTable.size();
}
// ************************************************************************* //

View File

@ -8,7 +8,12 @@ IOdictionary propsDict(dictIO);
const word cloudName(propsDict.get<word>("cloud"));
List<word> userFields(propsDict.lookup("fields"));
// Mandatory - if empty, select none
wordRes acceptFields(propsDict.get<wordRes>("fields"));
// Optional
wordRes excludeFields;
propsDict.readIfPresent("exclude", excludeFields);
const dictionary formatOptions
(

View File

@ -6,6 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
Copyright (C) 2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -45,73 +46,112 @@ Description
#include "Time.H"
#include "timeSelector.H"
#include "OFstream.H"
#include "passiveParticleCloud.H"
#include "labelPairHashes.H"
#include "SortableList.H"
#include "IOField.H"
#include "IOobjectList.H"
#include "PtrList.H"
#include "Field.H"
#include "SortableList.H"
#include "passiveParticleCloud.H"
#include "steadyParticleTracksTemplates.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
using namespace Foam;
namespace Foam
{
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
label validateFields
// Extract list of IOobjects, modifying the input IOobjectList in the
// process
IOobjectList preFilterFields
(
const List<word>& userFields,
const IOobjectList& cloudObjs
IOobjectList& cloudObjects,
const wordRes& acceptFields,
const wordRes& excludeFields
)
{
List<bool> ok(userFields.size(), false);
IOobjectList filteredObjects(cloudObjects.capacity());
DynamicList<label> missed(acceptFields.size());
forAll(userFields, i)
{
ok[i] = ok[i] || fieldOk<label>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<scalar>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<vector>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<sphericalTensor>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<symmTensor>(cloudObjs, userFields[i]);
ok[i] = ok[i] || fieldOk<tensor>(cloudObjs, userFields[i]);
}
// Selection here is slighly different than usual
// - an empty accept filter means "ignore everything"
label nOk = 0;
forAll(ok, i)
if (!acceptFields.empty())
{
if (ok[i])
const wordRes::filter pred(acceptFields, excludeFields);
const wordList allNames(cloudObjects.sortedNames());
// Detect missing fields
forAll(acceptFields, i)
{
++nOk;
if
(
acceptFields[i].isLiteral()
&& !allNames.found(acceptFields[i])
)
{
missed.append(i);
}
}
else
for (const word& fldName : allNames)
{
Info << "\n*** Warning: user specified field '" << userFields[i]
<< "' unavailable" << endl;
const auto iter = cloudObjects.cfind(fldName);
if (!pred(fldName) || !iter.found())
{
continue; // reject
}
const IOobject& io = *(iter.val());
if
(
//OR: fieldTypes::basic.found(io.headerClassName())
io.headerClassName() == IOField<label>::typeName
|| io.headerClassName() == IOField<scalar>::typeName
|| io.headerClassName() == IOField<vector>::typeName
|| io.headerClassName() == IOField<sphericalTensor>::typeName
|| io.headerClassName() == IOField<symmTensor>::typeName
|| io.headerClassName() == IOField<tensor>::typeName
)
{
// Transfer from cloudObjects -> filteredObjects
filteredObjects.add(cloudObjects.remove(fldName));
}
}
}
return nOk;
if (missed.size())
{
WarningInFunction
<< nl
<< "Cannot find field file matching "
<< UIndirectList<wordRe>(acceptFields, missed) << endl;
}
return filteredObjects;
}
template<>
void writeVTK(OFstream& os, const label& value)
void readFieldsAndWriteVTK
(
OFstream& os,
const List<labelList>& particleMap,
const IOobjectList& filteredObjects
)
{
os << value;
processFields<label>(os, particleMap, filteredObjects);
processFields<scalar>(os, particleMap, filteredObjects);
processFields<vector>(os, particleMap, filteredObjects);
processFields<sphericalTensor>(os, particleMap, filteredObjects);
processFields<symmTensor>(os, particleMap, filteredObjects);
processFields<tensor>(os, particleMap, filteredObjects);
}
} // End namespace Foam
template<>
void writeVTK(OFstream& os, const scalar& value)
{
os << value;
}
}
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -153,37 +193,31 @@ int main(int argc, char *argv[])
const fileName vtkTimePath(vtkPath/runTime.timeName());
mkDir(vtkTimePath);
Info<< " Reading particle positions" << endl;
PtrList<passiveParticle> particles(0);
pointField particlePosition;
labelList particleToTrack;
label nTracks = 0;
// Transfer particles to (more convenient) list
{
passiveParticleCloud ppc(mesh, cloudName);
Info<< "\n Read " << returnReduce(ppc.size(), sumOp<label>())
Info<< " Reading particle positions" << endl;
passiveParticleCloud myCloud(mesh, cloudName);
Info<< "\n Read " << returnReduce(myCloud.size(), sumOp<label>())
<< " particles" << endl;
particles.setSize(ppc.size());
const label nParticles = myCloud.size();
label i = 0;
forAllIters(ppc, iter)
particlePosition.resize(nParticles);
particleToTrack.resize(nParticles);
LabelPairMap<label> trackTable;
label np = 0;
for (const passiveParticle& p : myCloud)
{
particles.set(i++, ppc.remove(&iter()));
}
// myCloud should now be empty
}
List<label> particleToTrack(particles.size());
label nTracks = 0;
{
labelPairLookup trackTable;
forAll(particles, i)
{
const label origProc = particles[i].origProc();
const label origId = particles[i].origId();
const label origId = p.origId();
const label origProc = p.origProc();
particlePosition[np] = p.position();
const labelPair key(origProc, origId);
@ -191,17 +225,19 @@ int main(int argc, char *argv[])
if (iter.found())
{
particleToTrack[i] = *iter;
particleToTrack[np] = *iter;
}
else
{
particleToTrack[i] = nTracks;
trackTable.insert(key, nTracks);
++nTracks;
particleToTrack[np] = trackTable.size();
trackTable.insert(key, trackTable.size());
}
}
}
++np;
}
nTracks = trackTable.size();
}
if (nTracks == 0)
{
@ -230,7 +266,7 @@ int main(int argc, char *argv[])
}
// Store the particle age per track
IOobjectList cloudObjs
IOobjectList cloudObjects
(
mesh,
runTime.timeName(),
@ -239,25 +275,30 @@ int main(int argc, char *argv[])
// TODO: gather age across all procs
{
tmp<scalarField> tage =
readParticleField<scalar>("age", cloudObjs);
tmp<IOField<scalar>> tage =
readParticleField<scalar>("age", cloudObjects);
const scalarField& age = tage();
const auto& age = tage();
labelList trackSamples(nTracks, Zero);
forAll(particleToTrack, i)
{
const label trackI = particleToTrack[i];
const label sampleI = trackSamples[trackI];
agePerTrack[trackI][sampleI] = age[i];
particleMap[trackI][sampleI] = i;
trackSamples[trackI]++;
const label tracki = particleToTrack[i];
const label samplei = trackSamples[tracki];
agePerTrack[tracki][samplei] = age[i];
particleMap[tracki][samplei] = i;
++trackSamples[tracki];
}
tage.clear();
}
const IOobjectList filteredObjects
(
preFilterFields(cloudObjects, acceptFields, excludeFields)
);
if (Pstream::master())
{
OFstream os(vtkTimePath/"particleTracks.vtk");
@ -295,7 +336,7 @@ int main(int argc, char *argv[])
forAll(ids, j)
{
const label localId = particleIds[j];
const vector pos(particles[localId].position());
const point& pos = particlePosition[localId];
os << pos.x() << ' ' << pos.y() << ' ' << pos.z()
<< nl;
}
@ -330,22 +371,14 @@ int main(int argc, char *argv[])
}
const label nFields = validateFields(userFields, cloudObjs);
const label nFields = filteredObjects.size();
os << "POINT_DATA " << nPoints << nl
<< "FIELD attributes " << nFields << nl;
Info<< "\n Processing fields" << nl << endl;
processFields<label>(os, particleMap, userFields, cloudObjs);
processFields<scalar>(os, particleMap, userFields, cloudObjs);
processFields<vector>(os, particleMap, userFields, cloudObjs);
processFields<sphericalTensor>
(os, particleMap, userFields, cloudObjs);
processFields<symmTensor>
(os, particleMap, userFields, cloudObjs);
processFields<tensor>(os, particleMap, userFields, cloudObjs);
readFieldsAndWriteVTK(os, particleMap, filteredObjects);
}
}
Info<< endl;

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
Copyright (C) 2016 OpenCFD Ltd.
Copyright (C) 2016-2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -31,113 +31,71 @@ License
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
template<class Type>
bool Foam::fieldOk(const IOobjectList& cloudObjs, const word& name)
{
return cloudObjs.cfindObject<IOField<Type>>(name) != nullptr;
}
template<class Type>
Foam::tmp<Foam::Field<Type>> Foam::readParticleField
Foam::tmp<Foam::IOField<Type>> Foam::readParticleField
(
const word& name,
const IOobjectList cloudObjs
const word& fieldName,
const IOobjectList& cloudObjects
)
{
const IOobject* obj = cloudObjs.cfindObject<IOField<Type>>(name);
if (obj != nullptr)
const IOobject* io = cloudObjects.cfindObject<IOField<Type>>(fieldName);
if (io)
{
IOField<Type> newField(*obj);
return tmp<Field<Type>>::New(std::move(newField));
return tmp<IOField<Type>>::New(*io);
}
FatalErrorInFunction
<< "Error: cloud field name " << name
<< " not found or the wrong type"
<< "Cloud field name " << fieldName
<< " not found or the incorrect type"
<< abort(FatalError);
return Field<Type>::null();
}
template<class Type>
void Foam::readFields
(
PtrList<List<Type>>& values,
const List<word>& fieldNames,
const IOobjectList& cloudObjs
)
{
forAll(fieldNames, fieldi)
{
const word& fieldName = fieldNames[fieldi];
const IOobject* obj = cloudObjs.cfindObject<IOField<Type>>(fieldName);
if (obj != nullptr)
{
Info<< " reading field " << fieldName << endl;
IOField<Type> newField(*obj);
values.set(fieldi, new List<Type>(std::move(newField)));
}
else
{
FatalErrorInFunction
<< "Unable to read field " << fieldName
<< abort(FatalError);
}
}
return nullptr;
}
template<class Type>
void Foam::writeVTK(OFstream& os, const Type& value)
{
os << value.component(0);
for (label i=1; i<pTraits<Type>::nComponents; i++)
os << component(value, 0);
for (label d=1; d < pTraits<Type>::nComponents; ++d)
{
os << ' ' << value.component(i);
os << ' ' << component(value, d);
}
}
template<class Type>
void Foam::writeVTKFields
void Foam::writeVTKField
(
OFstream& os,
const PtrList<List<Type>>& values,
const List<List<label>>& addr,
const List<word>& fieldNames
const IOField<Type>& field,
const List<labelList>& addr
)
{
label step = max(floor(8/pTraits<Type>::nComponents), 1);
const label step = max(1, floor(8/pTraits<Type>::nComponents));
forAll(values, fieldi)
Info<< " writing field " << field.name() << endl;
os << nl << field.name() << ' '
<< int(pTraits<Type>::nComponents) << ' '
<< field.size() << " float" << nl;
///label offset = 0;
for (const labelList& ids : addr)
{
Info<< " writing field " << fieldNames[fieldi] << endl;
os << nl << fieldNames[fieldi] << ' '
<< int(pTraits<Type>::nComponents) << ' '
<< values[fieldi].size() << " float" << nl;
label offset = 0;
forAll(addr, tracki)
List<Type> data(UIndirectList<Type>(field, ids));
label nData = data.size() - 1;
forAll(data, i)
{
const List<label> ids(addr[tracki]);
List<Type> data(UIndirectList<Type>(values[fieldi], ids));
label nData = data.size() - 1;
forAll(data, i)
writeVTK<Type>(os, data[i]);
if (((i + 1) % step == 0) || (i == nData))
{
writeVTK<Type>(os, data[i]);
if (((i + 1) % step == 0) || (i == nData))
{
os << nl;
}
else
{
os << ' ';
}
os << nl;
}
else
{
os << ' ';
}
offset += ids.size();
}
/// offset += ids.size();
}
}
@ -146,36 +104,28 @@ template<class Type>
void Foam::processFields
(
OFstream& os,
const List<List<label>>& addr,
const List<word>& userFieldNames,
const IOobjectList& cloudObjs
const List<labelList>& addr,
const IOobjectList& cloudObjects
)
{
IOobjectList objects(cloudObjs.lookupClass(IOField<Type>::typeName));
if (objects.size())
for (const word& fldName : cloudObjects.sortedNames<IOField<Type>>())
{
DynamicList<word> fieldNames(objects.size());
forAll(userFieldNames, i)
const IOobject* io = cloudObjects.cfindObject<IOField<Type>>(fldName);
if (!io)
{
const IOobject* obj = objects.findObject(userFieldNames[i]);
if (obj != nullptr)
{
fieldNames.append(obj->name());
}
FatalErrorInFunction
<< "Could not read field:" << fldName
<< " type:" << IOField<Type>::typeName
<< abort(FatalError);
}
fieldNames.shrink();
else
{
Info<< " reading field " << fldName << endl;
IOField<Type> field(*io);
PtrList<List<Type>> values(fieldNames.size());
readFields<Type>(values, fieldNames, cloudObjs);
writeVTKFields<Type>
(
os,
values,
addr,
fieldNames
);
writeVTKField<Type>(os, field, addr);
}
}
}

View File

@ -6,6 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
Copyright (C) 2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -25,13 +26,12 @@ License
\*---------------------------------------------------------------------------*/
#ifndef steadyParticleTracksTemplates_H
#define steadyParticleTracksTemplates_H
#ifndef Foam_steadyParticleTracksTemplates_H
#define Foam_steadyParticleTracksTemplates_H
#include "OFstream.H"
#include "IOobjectList.H"
#include "PtrList.H"
#include "Field.H"
#include "IOField.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -41,42 +41,29 @@ namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
template<class Type>
bool fieldOk(const IOobjectList& cloudObjs, const word& name);
template<class Type>
tmp<Field<Type>> readParticleField
tmp<IOField<Type>> readParticleField
(
const word& name,
const IOobjectList cloudObjs
);
template<class Type>
void readFields
(
PtrList<List<Type>>& values,
const List<word>& fields,
const IOobjectList& cloudObjs
const word& fieldName,
const IOobjectList& cloudObjects
);
template<class Type>
void writeVTK(OFstream& os, const Type& value);
template<class Type>
void writeVTKFields
void writeVTKField
(
OFstream& os,
const PtrList<List<Type>>& values,
const List<List<label>>& addr,
const List<word>& fieldNames
const IOField<Type>& field,
const List<labelList>& addr
);
template<class Type>
void processFields
(
OFstream& os,
const List<List<label>>& addr,
const List<word>& userFieldNames,
const IOobjectList& cloudObjs
const List<labelList>& addr,
const IOobjectList& cloudObjects
);
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //