/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | \\ / A nd | www.openfoam.com \\/ M anipulation | ------------------------------------------------------------------------------- Copyright (C) 2011-2017 OpenFOAM Foundation Copyright (C) 2015-2022 OpenCFD Ltd. ------------------------------------------------------------------------------- License This file is part of OpenFOAM. OpenFOAM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenFOAM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenFOAM. If not, see . Application redistributePar Group grpParallelUtilities Description Redistributes existing decomposed mesh and fields according to the current settings in the decomposeParDict file. Must be run on maximum number of source and destination processors. Balances mesh and writes new mesh to new time directory. Can optionally run in decompose/reconstruct mode to decompose/reconstruct mesh and fields. Usage \b redistributePar [OPTION] Options: - \par -decompose Remove any existing \a processor subdirectories and decomposes the mesh. Equivalent to running without processor subdirectories. - \par -reconstruct Reconstruct mesh and fields (like reconstructParMesh+reconstructPar). - \par -newTimes (in combination with -reconstruct) reconstruct only new times. - \par -dry-run (not in combination with -reconstruct) Test without actually decomposing. - \par -cellDist not in combination with -reconstruct) Write the cell distribution as a labelList, for use with 'manual' decomposition method and as a volScalarField for visualization. - \par -region \ Distribute named region. - \par -allRegions Distribute all regions in regionProperties. Does not check for existence of processor*. \*---------------------------------------------------------------------------*/ #include "argList.H" #include "sigFpe.H" #include "Time.H" #include "fvMesh.H" #include "fvMeshTools.H" #include "fvMeshDistribute.H" #include "decompositionMethod.H" #include "decompositionModel.H" #include "timeSelector.H" #include "PstreamReduceOps.H" #include "volFields.H" #include "surfaceFields.H" #include "IOmapDistributePolyMesh.H" #include "IOobjectList.H" #include "globalIndex.H" #include "loadOrCreateMesh.H" #include "processorFvPatchField.H" #include "zeroGradientFvPatchFields.H" #include "topoSet.H" #include "regionProperties.H" #include "parFvFieldReconstructor.H" #include "parLagrangianRedistributor.H" #include "unmappedPassivePositionParticleCloud.H" #include "hexRef8Data.H" #include "meshRefinement.H" #include "pointFields.H" #include "cyclicACMIFvPatch.H" #include "masterUncollatedFileOperation.H" #include "uncollatedFileOperation.H" #include "collatedFileOperation.H" using namespace Foam; // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // const int debug(::Foam::debug::debugSwitch("redistributePar", 0)); void createTimeDirs(const fileName& path) { // Get current set of local processor's time directories. Uses // fileHandler const instantList localTimeDirs(Time::findTimes(path, "constant")); instantList masterTimeDirs; if (Pstream::master()) { //const bool oldParRun = Pstream::parRun(false); //timeDirs = Time::findTimes(path, "constant"); //Pstream::parRun(oldParRun); // Restore parallel state masterTimeDirs = localTimeDirs; } Pstream::scatter(masterTimeDirs); //DebugVar(masterTimeDirs); //DebugVar(localTimeDirs); // Sync any cached times (e.g. masterUncollatedFileOperation::times_) // since only master would have done the findTimes for (const instant& t : masterTimeDirs) { if (!localTimeDirs.found(t)) { const fileName timePath(path/t.name()); //Pout<< "Time:" << t << nl // << " raw :" << timePath << nl // << endl; mkDir(timePath); } } // Just to make sure remove all state and re-scan fileHandler().flush(); (void)Time::findTimes(path, "constant"); } void copyUniform ( const fileOperation& fh, const bool decompose, const bool reconstruct, const word& readTimeName, const objectRegistry& readDb, const objectRegistry& writeDb ) { // Detect uniform/ at original database + time const IOobject readIO("uniform", readTimeName, readDb); const fileName readPath ( fh.dirPath ( false, // local directory readIO, false // do not search in time ) ); //if (Pstream::master() && !readPath.empty()) if (!readPath.empty()) { Info<< "Detected additional non-decomposed files in " << readPath << endl; // readPath: searching is the same for all file handlers. Typical: // /0.1/uniform (parent dir, decompose mode) // /processor1/0.1/uniform (redistribute/reconstruct mode) // /processors2/0.1/uniform ,, // writePath: // uncollated : /0.1/uniform (reconstruct mode). Should only // be done by master // uncollated : /processorXXX/0.1/uniform. Should be done by all. // collated : /processors2/0.1/uniform. Should be done by // local master only. // See what local directory const IOobject writeIO("uniform", writeDb.time().timeName(), writeDb); const fileName writePath ( fh.objectPath ( writeIO, word::null ) ); // Do we already have this directory? const fileName currentPath(fh.dirPath(false, writeIO, false)); if (::debug) { Pout<< " readPath :" << readPath << endl; Pout<< " writePath :" << writePath << endl; Pout<< " currentPath:" << currentPath << endl; } if (readPath == writePath) { return; } if (currentPath.empty()) { if (decompose) { // All processors copy to destination fh.cp(readPath, writePath); } else if (reconstruct) { // Only master if (Pstream::master()) { const bool oldParRun = Pstream::parRun(false); fh.cp(readPath, writePath); Pstream::parRun(oldParRun); } } else { // Redistribute. If same destination path do only on master, // if different path do on all processors. For now check // if collated file handler only. tbd. if (isA(fh)) { // Collated if (Pstream::master()) { const bool oldParRun = Pstream::parRun(false); fh.cp(readPath, writePath); Pstream::parRun(oldParRun); } } else { // Assume uncollated fh.cp(readPath, writePath); } } } } } void printMeshData(const polyMesh& mesh) { // Collect all data on master globalIndex globalCells(mesh.nCells()); labelListList patchNeiProcNo(Pstream::nProcs()); labelListList patchSize(Pstream::nProcs()); const labelList& pPatches = mesh.globalData().processorPatches(); patchNeiProcNo[Pstream::myProcNo()].setSize(pPatches.size()); patchSize[Pstream::myProcNo()].setSize(pPatches.size()); forAll(pPatches, i) { const processorPolyPatch& ppp = refCast ( mesh.boundaryMesh()[pPatches[i]] ); patchNeiProcNo[Pstream::myProcNo()][i] = ppp.neighbProcNo(); patchSize[Pstream::myProcNo()][i] = ppp.size(); } Pstream::gatherList(patchNeiProcNo); Pstream::gatherList(patchSize); // Print stats globalIndex globalBoundaryFaces(mesh.nBoundaryFaces()); label maxProcCells = 0; label totProcFaces = 0; label maxProcPatches = 0; label totProcPatches = 0; label maxProcFaces = 0; for (const int procI : Pstream::allProcs()) { Info<< nl << "Processor " << procI << nl << " Number of cells = " << globalCells.localSize(procI) << endl; label nProcFaces = 0; const labelList& nei = patchNeiProcNo[procI]; forAll(patchNeiProcNo[procI], i) { Info<< " Number of faces shared with processor " << patchNeiProcNo[procI][i] << " = " << patchSize[procI][i] << endl; nProcFaces += patchSize[procI][i]; } Info<< " Number of processor patches = " << nei.size() << nl << " Number of processor faces = " << nProcFaces << nl << " Number of boundary faces = " << globalBoundaryFaces.localSize(procI)-nProcFaces << endl; maxProcCells = max(maxProcCells, globalCells.localSize(procI)); totProcFaces += nProcFaces; totProcPatches += nei.size(); maxProcPatches = max(maxProcPatches, nei.size()); maxProcFaces = max(maxProcFaces, nProcFaces); } // Stats scalar avgProcCells = scalar(globalCells.size())/Pstream::nProcs(); scalar avgProcPatches = scalar(totProcPatches)/Pstream::nProcs(); scalar avgProcFaces = scalar(totProcFaces)/Pstream::nProcs(); // In case of all faces on one processor. Just to avoid division by 0. if (totProcPatches == 0) { avgProcPatches = 1; } if (totProcFaces == 0) { avgProcFaces = 1; } Info<< nl << "Number of processor faces = " << totProcFaces/2 << nl << "Max number of cells = " << maxProcCells << " (" << 100.0*(maxProcCells-avgProcCells)/avgProcCells << "% above average " << avgProcCells << ")" << nl << "Max number of processor patches = " << maxProcPatches << " (" << 100.0*(maxProcPatches-avgProcPatches)/avgProcPatches << "% above average " << avgProcPatches << ")" << nl << "Max number of faces between processors = " << maxProcFaces << " (" << 100.0*(maxProcFaces-avgProcFaces)/avgProcFaces << "% above average " << avgProcFaces << ")" << nl << endl; } // Debugging: write volScalarField with decomposition for post processing. void writeDecomposition ( const word& name, const fvMesh& mesh, const labelUList& decomp ) { // Write the decomposition as labelList for use with 'manual' // decomposition method. labelIOList cellDecomposition ( IOobject ( "cellDecomposition", mesh.facesInstance(), // mesh read from facesInstance mesh, IOobject::NO_READ, IOobject::NO_WRITE, false ), decomp ); cellDecomposition.write(); Info<< "Writing wanted cell distribution to volScalarField " << name << " for postprocessing purposes." << nl << endl; volScalarField procCells ( IOobject ( name, mesh.time().timeName(), mesh, IOobject::NO_READ, IOobject::AUTO_WRITE, false // do not register ), mesh, dimensionedScalar(name, dimless, -1), zeroGradientFvPatchScalarField::typeName ); forAll(procCells, cI) { procCells[cI] = decomp[cI]; } procCells.correctBoundaryConditions(); procCells.write(); } void determineDecomposition ( const Time& baseRunTime, const fileName& decompDictFile, // optional location for decomposeParDict const bool decompose, // decompose, i.e. read from undecomposed case const fileName& proc0CaseName, const fvMesh& mesh, const bool writeCellDist, label& nDestProcs, labelList& decomp ) { // Read decomposeParDict (on all processors) const decompositionModel& method = decompositionModel::New ( mesh, decompDictFile ); decompositionMethod& decomposer = method.decomposer(); if (!decomposer.parallelAware()) { WarningInFunction << "You have selected decomposition method " << decomposer.typeName << " which does" << nl << "not synchronise the decomposition across" << " processor patches." << nl << " You might want to select a decomposition method" << " which is aware of this. Continuing." << endl; } Time& tm = const_cast(mesh.time()); const bool oldProcCase = tm.processorCase(); if (Pstream::master() && decompose) { Info<< "Setting caseName to " << baseRunTime.caseName() << " to read decomposeParDict" << endl; tm.caseName() = baseRunTime.caseName(); tm.processorCase(false); } scalarField cellWeights; if (method.found("weightField")) { word weightName = method.get("weightField"); volScalarField weights ( IOobject ( weightName, tm.timeName(), mesh, IOobject::MUST_READ, IOobject::NO_WRITE ), mesh ); cellWeights = weights.internalField(); } nDestProcs = decomposer.nDomains(); decomp = decomposer.decompose(mesh, cellWeights); if (Pstream::master() && decompose) { Info<< "Restoring caseName to " << proc0CaseName << endl; tm.caseName() = proc0CaseName; tm.processorCase(oldProcCase); } // Dump decomposition to volScalarField if (writeCellDist) { // Note: on master make sure to write to processor0 if (decompose) { if (Pstream::master()) { const bool oldParRun = Pstream::parRun(false); Info<< "Setting caseName to " << baseRunTime.caseName() << " to write undecomposed cellDist" << endl; tm.caseName() = baseRunTime.caseName(); tm.processorCase(false); writeDecomposition("cellDist", mesh, decomp); Info<< "Restoring caseName to " << proc0CaseName << endl; tm.caseName() = proc0CaseName; tm.processorCase(oldProcCase); Pstream::parRun(oldParRun); } } else { writeDecomposition("cellDist", mesh, decomp); } } } // Generic mesh-based field reading template void readField ( const IOobject& io, const fvMesh& mesh, const label i, PtrList& fields ) { fields.set(i, new GeoField(io, mesh)); } // Definition of readField for GeometricFields only template class PatchField, class GeoMesh> void readField ( const IOobject& io, const fvMesh& mesh, const label i, PtrList>& fields ) { fields.set ( i, new GeometricField(io, mesh, false) ); } // Read vol or surface fields template void readFields ( const boolList& haveMesh, const fvMesh& mesh, const autoPtr& subsetterPtr, IOobjectList& allObjects, PtrList& fields ) { // Get my objects of type IOobjectList objects(allObjects.lookupClass(GeoField::typeName)); // Check that we all have all objects wordList objectNames = objects.sortedNames(); // Get master names wordList masterNames(objectNames); Pstream::scatter(masterNames); if (haveMesh[Pstream::myProcNo()] && objectNames != masterNames) { FatalErrorInFunction << "Objects not synchronised across processors." << nl << "Master has " << flatOutput(masterNames) << nl << "Processor " << Pstream::myProcNo() << " has " << flatOutput(objectNames) << exit(FatalError); } fields.setSize(masterNames.size()); // Have master send all fields to processors that don't have a mesh. The // issue is if a patchField does any parallel operations inside its // construct-from-dictionary. This will not work when going to more // processors (e.g. decompose = 1 -> many) ! We could make a special // exception for decomposePar but nicer would be to have read-communicator // ... For now detect if decomposing & disable parRun if (Pstream::master()) { // Work out if we're decomposing - none of the subprocs has a mesh bool decompose = true; for (const int procI : Pstream::subProcs()) { if (haveMesh[procI]) { decompose = false; } } forAll(masterNames, i) { const word& name = masterNames[i]; IOobject& io = *objects[name]; io.writeOpt(IOobject::AUTO_WRITE); // Load field (but not oldTime) const bool oldParRun = Pstream::parRun(); if (decompose) { Pstream::parRun(false); } readField(io, mesh, i, fields); if (decompose) { Pstream::parRun(oldParRun); } // Create zero sized field and send if (subsetterPtr) { const bool oldParRun = Pstream::parRun(false); tmp tsubfld = subsetterPtr().interpolate(fields[i]); Pstream::parRun(oldParRun); // Send to all processors that don't have a mesh for (const int procI : Pstream::subProcs()) { if (!haveMesh[procI]) { OPstream toProc(Pstream::commsTypes::blocking, procI); toProc<< tsubfld(); } } } } } else if (!haveMesh[Pstream::myProcNo()]) { // Don't have mesh (nor fields). Receive empty field from master. forAll(masterNames, i) { const word& name = masterNames[i]; // Receive field IPstream fromMaster ( Pstream::commsTypes::blocking, Pstream::masterNo() ); dictionary fieldDict(fromMaster); fields.set ( i, new GeoField ( IOobject ( name, mesh.time().timeName(), mesh, IOobject::NO_READ, IOobject::AUTO_WRITE ), mesh, fieldDict ) ); //// Write it for next time round (since mesh gets written as well) //fields[i].write(); } } else { // Have mesh so just try to load forAll(masterNames, i) { const word& name = masterNames[i]; IOobject& io = *objects[name]; io.writeOpt(IOobject::AUTO_WRITE); // Load field (but not oldtime) readField(io, mesh, i, fields); } } } // Variant of GeometricField::correctBoundaryConditions that only // evaluates selected patch fields template void correctCoupledBoundaryConditions(fvMesh& mesh) { HashTable flds ( mesh.objectRegistry::lookupClass() ); for (const word& fldName : flds.sortedToc()) { GeoField& fld = *(flds[fldName]); fld.boundaryFieldRef().template evaluateCoupled(); } } // Inplace redistribute mesh and any fields autoPtr redistributeAndWrite ( autoPtr&& writeHandler, const Time& baseRunTime, const boolList& haveMesh, const fileName& meshSubDir, const bool doReadFields, const bool decompose, // decompose, i.e. read from undecomposed case const bool reconstruct, const bool overwrite, const fileName& proc0CaseName, const label nDestProcs, const labelList& decomp, const fileName& masterInstDir, fvMesh& mesh ) { Time& runTime = const_cast(mesh.time()); const bool oldProcCase = runTime.processorCase(); //// Print some statistics //Info<< "Before distribution:" << endl; //printMeshData(mesh); PtrList volScalarFields; PtrList volVectorFields; PtrList volSphereTensorFields; PtrList volSymmTensorFields; PtrList volTensorFields; PtrList surfScalarFields; PtrList surfVectorFields; PtrList surfSphereTensorFields; PtrList surfSymmTensorFields; PtrList surfTensorFields; PtrList> dimScalarFields; PtrList> dimVectorFields; PtrList> dimSphereTensorFields; PtrList> dimSymmTensorFields; PtrList> dimTensorFields; DynamicList pointFieldNames; if (doReadFields) { // Create 0 sized mesh to do all the generation of zero sized // fields on processors that have zero sized meshes. Note that this is // only necessary on master but since polyMesh construction with // Pstream::parRun does parallel comms we have to do it on all // processors autoPtr subsetterPtr; const bool allHaveMesh = !haveMesh.found(false); if (!allHaveMesh) { // Find last non-processor patch. const polyBoundaryMesh& patches = mesh.boundaryMesh(); const label nonProcI = (patches.nNonProcessor() - 1); if (nonProcI < 0) { FatalErrorInFunction << "Cannot find non-processor patch on processor " << Pstream::myProcNo() << nl << " Current patches:" << patches.names() << abort(FatalError); } // Subset 0 cells, no parallel comms. // This is used to create zero-sized fields. subsetterPtr.reset(new fvMeshSubset(mesh, zero{})); } // Get original objects (before incrementing time!) if (Pstream::master() && decompose) { runTime.caseName() = baseRunTime.caseName(); runTime.processorCase(false); } IOobjectList objects(mesh, runTime.timeName()); if (Pstream::master() && decompose) { runTime.caseName() = proc0CaseName; runTime.processorCase(oldProcCase); } Info<< "From time " << runTime.timeName() << " mesh:" << mesh.objectRegistry::objectPath() << " have objects:" << objects.names() << endl; // We don't want to map the decomposition (mapping already tested when // mapping the cell centre field) auto iter = objects.find("cellDist"); if (iter.found()) { objects.erase(iter); } // volFields if (Pstream::master() && decompose) { runTime.caseName() = baseRunTime.caseName(); runTime.processorCase(false); } readFields ( haveMesh, mesh, subsetterPtr, objects, volScalarFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, volVectorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, volSphereTensorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, volSymmTensorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, volTensorFields ); // surfaceFields readFields ( haveMesh, mesh, subsetterPtr, objects, surfScalarFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, surfVectorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, surfSphereTensorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, surfSymmTensorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, surfTensorFields ); // Dimensioned internal fields readFields ( haveMesh, mesh, subsetterPtr, objects, dimScalarFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, dimVectorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, dimSphereTensorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, dimSymmTensorFields ); readFields ( haveMesh, mesh, subsetterPtr, objects, dimTensorFields ); // pointFields currently not supported. Read their names so we // can delete them. { // Get my objects of type pointFieldNames.append ( objects.lookupClass(pointScalarField::typeName).sortedNames() ); pointFieldNames.append ( objects.lookupClass(pointVectorField::typeName).sortedNames() ); pointFieldNames.append ( objects.lookupClass ( pointSphericalTensorField::typeName ).sortedNames() ); pointFieldNames.append ( objects.lookupClass ( pointSymmTensorField::typeName ).sortedNames() ); pointFieldNames.append ( objects.lookupClass(pointTensorField::typeName).sortedNames() ); // Make sure all processors have the same set Pstream::scatter(pointFieldNames); } if (Pstream::master() && decompose) { runTime.caseName() = proc0CaseName; runTime.processorCase(oldProcCase); } } // Mesh distribution engine fvMeshDistribute distributor(mesh); // Do all the distribution of mesh and fields autoPtr rawMap = distributor.distribute(decomp); // Print some statistics Info<< "After distribution:" << endl; printMeshData(mesh); // Get other side of processor boundaries correctCoupledBoundaryConditions < volScalarField, processorFvPatch >(mesh); correctCoupledBoundaryConditions < volVectorField, processorFvPatch >(mesh); correctCoupledBoundaryConditions < volSphericalTensorField, processorFvPatch >(mesh); correctCoupledBoundaryConditions < volSymmTensorField, processorFvPatch >(mesh); correctCoupledBoundaryConditions < volTensorField, processorFvPatch >(mesh); // No update surface fields // Set the minimum write precision IOstream::defaultPrecision(max(10u, IOstream::defaultPrecision())); if (!overwrite) { ++runTime; mesh.setInstance(runTime.timeName()); } else { mesh.setInstance(masterInstDir); } IOmapDistributePolyMesh map ( IOobject ( "procAddressing", mesh.facesInstance(), polyMesh::meshSubDir, mesh, IOobject::NO_READ, IOobject::AUTO_WRITE ) ); map.transfer(rawMap()); if (reconstruct) { if (Pstream::master()) { Info<< "Setting caseName to " << baseRunTime.caseName() << " to write reconstructed mesh and fields." << endl; runTime.caseName() = baseRunTime.caseName(); const bool oldProcCase(runTime.processorCase(false)); const bool oldParRun = Pstream::parRun(false); mesh.write(); topoSet::removeFiles(mesh); for (const word& fieldName : pointFieldNames) { IOobject io ( fieldName, runTime.timeName(), mesh ); const fileName fieldFile(io.objectPath()); if (topoSet::debug) DebugVar(fieldFile); rm(fieldFile); } Pstream::parRun(oldParRun); // Now we've written all. Reset caseName on master Info<< "Restoring caseName to " << proc0CaseName << endl; runTime.caseName() = proc0CaseName; runTime.processorCase(oldProcCase); } } else { autoPtr defaultHandler; if (writeHandler) { defaultHandler = fileHandler(std::move(writeHandler)); } mesh.write(); if (defaultHandler) { writeHandler = fileHandler(std::move(defaultHandler)); } topoSet::removeFiles(mesh); for (const word& fieldName : pointFieldNames) { IOobject io ( fieldName, runTime.timeName(), mesh ); const fileName fieldFile(io.objectPath()); if (topoSet::debug) DebugVar(fieldFile); rm(fieldFile); } } Info<< "Written redistributed mesh to " << mesh.facesInstance() << nl << endl; if (decompose || reconstruct) { // Decompose (1 -> N) or reconstruct (N -> 1) // so {boundary,cell,face,point}ProcAddressing have meaning fvMeshTools::writeProcAddressing ( mesh, map, decompose, std::move(writeHandler) ); } else { // Redistribute (N -> M) // {boundary,cell,face,point}ProcAddressing would be incorrect // - can either remove or redistribute previous removeProcAddressing(mesh); } // Refinement data { // Read refinement data if (Pstream::master() && decompose) { runTime.caseName() = baseRunTime.caseName(); runTime.processorCase(false); } IOobject io ( "dummy", mesh.facesInstance(), polyMesh::meshSubDir, mesh, IOobject::READ_IF_PRESENT, IOobject::NO_WRITE, false ); hexRef8Data refData(io); if (Pstream::master() && decompose) { runTime.caseName() = proc0CaseName; runTime.processorCase(oldProcCase); } // Make sure all processors have valid data (since only some will // read) refData.sync(io); // Distribute refData.distribute(map); // Now we've read refinement data we can remove it meshRefinement::removeFiles(mesh); if (reconstruct) { if (Pstream::master()) { const bool oldParRun = Pstream::parRun(false); Info<< "Setting caseName to " << baseRunTime.caseName() << " to write reconstructed refinement data." << endl; runTime.caseName() = baseRunTime.caseName(); const bool oldProcCase(runTime.processorCase(false)); refData.write(); // Now we've written all. Reset caseName on master Info<< "Restoring caseName to " << proc0CaseName << endl; runTime.caseName() = proc0CaseName; runTime.processorCase(oldProcCase); Pstream::parRun(oldParRun); } } else { refData.write(); } } //// Sets. Disabled for now. //{ // // Read sets // if (Pstream::master() && decompose) // { // runTime.caseName() = baseRunTime.caseName(); // runTime.processorCase(false); // } // IOobjectList objects(mesh, mesh.facesInstance(), "polyMesh/sets"); // // PtrList cellSets; // ReadFields(objects, cellSets); // // if (Pstream::master() && decompose) // { // runTime.caseName() = proc0CaseName; // runTime.processorCase(oldProcCase); // } // // forAll(cellSets, i) // { // cellSets[i].distribute(map); // } // // if (reconstruct) // { // if (Pstream::master()) // { // Info<< "Setting caseName to " << baseRunTime.caseName() // << " to write reconstructed refinement data." << endl; // runTime.caseName() = baseRunTime.caseName(); // const bool oldProcCase(runTime.processorCase(false)); // // forAll(cellSets, i) // { // cellSets[i].distribute(map); // } // // // Now we've written all. Reset caseName on master // Info<< "Restoring caseName to " << proc0CaseName << endl; // runTime.caseName() = proc0CaseName; // runTime.processorCase(oldProcCase); // } // } // else // { // forAll(cellSets, i) // { // cellSets[i].distribute(map); // } // } //} return autoPtr::New(std::move(map)); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Field Mapping // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ void reconstructMeshFields ( const parFvFieldReconstructor& fvReconstructor, const IOobjectList& objects, const wordRes& selectedFields ) { // Dimensioned fields fvReconstructor.reconstructFvVolumeInternalFields ( objects, selectedFields ); fvReconstructor.reconstructFvVolumeInternalFields ( objects, selectedFields ); fvReconstructor.reconstructFvVolumeInternalFields ( objects, selectedFields ); fvReconstructor.reconstructFvVolumeInternalFields ( objects, selectedFields ); fvReconstructor.reconstructFvVolumeInternalFields ( objects, selectedFields ); // volFields fvReconstructor.reconstructFvVolumeFields ( objects, selectedFields ); fvReconstructor.reconstructFvVolumeFields ( objects, selectedFields ); fvReconstructor.reconstructFvVolumeFields ( objects, selectedFields ); fvReconstructor.reconstructFvVolumeFields ( objects, selectedFields ); fvReconstructor.reconstructFvVolumeFields ( objects, selectedFields ); // surfaceFields fvReconstructor.reconstructFvSurfaceFields ( objects, selectedFields ); fvReconstructor.reconstructFvSurfaceFields ( objects, selectedFields ); fvReconstructor.reconstructFvSurfaceFields ( objects, selectedFields ); fvReconstructor.reconstructFvSurfaceFields ( objects, selectedFields ); fvReconstructor.reconstructFvSurfaceFields ( objects, selectedFields ); } void reconstructLagrangian ( autoPtr& lagrangianReconstructorPtr, const fvMesh& baseMesh, const fvMesh& mesh, const mapDistributePolyMesh& distMap, const wordRes& selectedLagrangianFields ) { // Clouds (note: might not be present on all processors) wordList cloudNames; List fieldNames; // Find all cloudNames on all processors parLagrangianRedistributor::findClouds(mesh, cloudNames, fieldNames); if (cloudNames.size()) { if (!lagrangianReconstructorPtr) { lagrangianReconstructorPtr.reset ( new parLagrangianRedistributor ( mesh, baseMesh, mesh.nCells(), // range of cell indices in clouds distMap ) ); } const parLagrangianRedistributor& lagrangianReconstructor = *lagrangianReconstructorPtr; for (const word& cloudName : cloudNames) { Info<< "Reconstructing lagrangian fields for cloud " << cloudName << nl << endl; autoPtr lagrangianMapPtr = lagrangianReconstructor.redistributeLagrangianPositions ( cloudName ); const mapDistributeBase& lagrangianMap = *lagrangianMapPtr; IOobjectList cloudObjs ( mesh, mesh.time().timeName(), cloud::prefix/cloudName ); lagrangianReconstructor.redistributeFields