From 912009c458b51b6a5a71375d4a6d3ad473c4f7bb Mon Sep 17 00:00:00 2001 From: mattijs Date: Thu, 2 May 2019 16:49:48 +0100 Subject: [PATCH] ENH: overset: insert remote interpolation into lduMatrix All remote contributions to interpolation stencils now get added as 'processor' type lduInterfaces. This guarantees a consistent matrix, e.g. initial residual is normalised to 1. Second change is the normalisation of the interpolation discretisation which uses the diagonal from the unmodified equation. This helps GAMG. --- .../test/sliceRange/Test-sliceRange.C | 2 +- src/overset/Make/files | 12 +- .../dynamicOversetFvMesh.C | 294 ++++++++++-- .../dynamicOversetFvMesh.H | 46 +- .../dynamicOversetFvMeshTemplates.C | 450 ++++++++++++------ .../fvMeshPrimitiveLduAddressing.C | 2 +- .../GAMG/calculatedProcessorGAMGInterface.C | 221 +++++++++ .../GAMG/calculatedProcessorGAMGInterface.H | 199 ++++++++ .../calculatedProcessorGAMGInterfaceField.C | 206 ++++++++ .../calculatedProcessorGAMGInterfaceField.H | 208 ++++++++ .../calculatedProcessorFvPatchField.C | 348 ++++++++++++++ .../calculatedProcessorFvPatchField.H | 306 ++++++++++++ .../calculatedProcessorFvPatchFields.C | 52 ++ .../calculatedProcessorFvPatchFields.H | 49 ++ .../calculatedProcessorFvPatchFieldsFwd.H | 50 ++ .../lduPrimitiveProcessorInterface.C | 2 +- .../lduPrimitiveProcessorInterface.H | 13 +- src/overset/oversetPolyPatch/oversetFvPatch.C | 73 +-- src/overset/oversetPolyPatch/oversetFvPatch.H | 60 +-- .../oversetPolyPatch/oversetFvPatchField.C | 278 +++++------ .../oversetPolyPatch/oversetFvPatchField.H | 64 +-- .../oversetPolyPatch/oversetGAMGInterface.C | 8 +- .../oversetGAMGInterfaceField.C | 144 +++--- .../semiImplicitOversetFvPatchField.C | 132 ++--- .../semiImplicitOversetFvPatchField.H | 48 +- .../semiImplicitOversetGAMGInterfaceField.C | 2 + 26 files changed, 2617 insertions(+), 652 deletions(-) create mode 100644 src/overset/lduPrimitiveProcessorInterface/GAMG/calculatedProcessorGAMGInterface.C create mode 100644 src/overset/lduPrimitiveProcessorInterface/GAMG/calculatedProcessorGAMGInterface.H create mode 100644 src/overset/lduPrimitiveProcessorInterface/GAMG/calculatedProcessorGAMGInterfaceField.C create mode 100644 src/overset/lduPrimitiveProcessorInterface/GAMG/calculatedProcessorGAMGInterfaceField.H create mode 100644 src/overset/lduPrimitiveProcessorInterface/calculatedProcessorFvPatchField.C create mode 100644 src/overset/lduPrimitiveProcessorInterface/calculatedProcessorFvPatchField.H create mode 100644 src/overset/lduPrimitiveProcessorInterface/calculatedProcessorFvPatchFields.C create mode 100644 src/overset/lduPrimitiveProcessorInterface/calculatedProcessorFvPatchFields.H create mode 100644 src/overset/lduPrimitiveProcessorInterface/calculatedProcessorFvPatchFieldsFwd.H diff --git a/applications/test/sliceRange/Test-sliceRange.C b/applications/test/sliceRange/Test-sliceRange.C index f398c6fbbe..382c01c34a 100644 --- a/applications/test/sliceRange/Test-sliceRange.C +++ b/applications/test/sliceRange/Test-sliceRange.C @@ -69,7 +69,7 @@ void printInfo(const sliceCoeffs& coeffs) const auto endIter = range.cend(); - for (const label i : {-1, (range.size()/2), range.size()}) + for (const label i : {label(-1), (range.size()/2), range.size()}) { const auto iter = range.at(i); diff --git a/src/overset/Make/files b/src/overset/Make/files index 14ce43d1f2..e3ba8d7f42 100644 --- a/src/overset/Make/files +++ b/src/overset/Make/files @@ -13,23 +13,27 @@ dynamicOversetFvMesh/dynamicOversetFvMesh.C fvMeshPrimitiveLduAddressing/fvMeshPrimitiveLduAddressing.C oversetPolyPatch/oversetPolyPatch.C -oversetPolyPatch/oversetLduInterface.C oversetPolyPatch/oversetFvPatch.C oversetPolyPatch/oversetFvPatchFields.C oversetPolyPatch/oversetFvsPatchFields.C -oversetPolyPatch/oversetGAMGInterface.C -oversetPolyPatch/oversetGAMGInterfaceField.C oversetPolyPatch/oversetPointPatch.C oversetPolyPatch/oversetPointPatchFields.C +/* oversetPolyPatch/semiImplicitOversetFvPatchFields.C +oversetPolyPatch/oversetLduInterface.C +oversetPolyPatch/oversetGAMGInterface.C +oversetPolyPatch/oversetGAMGInterfaceField.C oversetPolyPatch/semiImplicitOversetGAMGInterfaceField.C +*/ oversetAdjustPhi/oversetAdjustPhi.C regionsToCell/regionsToCell.C lduPrimitiveProcessorInterface/lduPrimitiveProcessorInterface.C - +lduPrimitiveProcessorInterface/calculatedProcessorFvPatchFields.C +lduPrimitiveProcessorInterface/GAMG/calculatedProcessorGAMGInterface.C +lduPrimitiveProcessorInterface/GAMG/calculatedProcessorGAMGInterfaceField.C LIB = $(FOAM_LIBBIN)/liboverset diff --git a/src/overset/dynamicOversetFvMesh/dynamicOversetFvMesh.C b/src/overset/dynamicOversetFvMesh/dynamicOversetFvMesh.C index 3f59a96aeb..a8530dfba1 100644 --- a/src/overset/dynamicOversetFvMesh/dynamicOversetFvMesh.C +++ b/src/overset/dynamicOversetFvMesh/dynamicOversetFvMesh.C @@ -91,41 +91,168 @@ bool Foam::dynamicOversetFvMesh::updateAddressing() const << " nExtraFaces:" << nExtraFaces << endl; } - // Extract relevant remote processors - labelList nbrProcs(localFaceCells.size()); + + // Now for the tricky bits. We want to hand out processor faces according + // to the localFaceCells/remoteFaceCells. Ultimately we need + // per entry in stencil: + // - the patch (or -1 for internal faces) + // - the face (is either an internal face index or a patch face index) + + stencilPatches_.setSize(stencilFaces_.size()); + + // Per processor to owner (local)/neighbour (remote) + List> procOwner(Pstream::nProcs()); + List> dynProcNeighbour(Pstream::nProcs()); + forAll(stencil, celli) { - label nbrI = 0; - forAll(localFaceCells, procI) + const labelList& nbrs = stencil[celli]; + stencilPatches_[celli].setSize(nbrs.size(), -1); + + forAll(nbrs, nbri) { - if (localFaceCells[procI].size()) + const label nbrCelli = nbrs[nbri]; + if (stencilFaces_[celli][nbri] == -1) { - //Pout<< " from proc:" << procI - // << " want its local cells " << remoteFaceCells[procI] - // << " to add to my local cells:" << localFaceCells[procI] - // << endl; - nbrProcs[nbrI++] = procI; + label globalNbr = globalCellIDs[nbrCelli]; + label proci = globalNumbering.whichProcID(globalNbr); + label remoteCelli = globalNumbering.toLocal(proci, globalNbr); + + // Overwrite the face to be a patch face + stencilFaces_[celli][nbri] = procOwner[proci].size(); + stencilPatches_[celli][nbri] = proci; + procOwner[proci].append(celli); + dynProcNeighbour[proci].append(remoteCelli); + + //Pout<< "From neighbour proc:" << proci + // << " allocating patchFace:" << stencilFaces_[celli][nbri] + // << " to get remote cell " << remoteCelli + // << " onto local cell " << celli << endl; } } - nbrProcs.setSize(nbrI); + } + labelListList procNeighbour(dynProcNeighbour.size()); + forAll(procNeighbour, i) + { + procNeighbour[i] = std::move(dynProcNeighbour[i]); + } + labelListList mySendCells; + Pstream::exchange(procNeighbour, mySendCells); + + label nbri = 0; + forAll(procOwner, proci) + { + if (procOwner[proci].size()) + { + nbri++; + } + if (mySendCells[proci].size()) + { + nbri++; + } + } + remoteStencilInterfaces_.setSize(nbri); + nbri = 0; + + // E.g. if proc1 needs some data from proc2 and proc2 needs some data from + // proc1. We first want the pair : proc1 receive and proc2 send + // and then the pair : proc1 send, proc2 receive + + + labelList procToInterface(Pstream::nProcs(), -1); + + forAll(procOwner, proci) + { + if (proci < Pstream::myProcNo() && procOwner[proci].size()) + { + //Pout<< "Adding interface " << nbri + // << " to receive my " << procOwner[proci] + // << " from " << proci << endl; + + procToInterface[proci] = nbri; + remoteStencilInterfaces_.set + ( + nbri++, + new lduPrimitiveProcessorInterface + ( + procOwner[proci], + Pstream::myProcNo(), + proci, + tensorField(0), + Pstream::msgType()+2 + ) + ); + } + else if (proci > Pstream::myProcNo() && mySendCells[proci].size()) + { + //Pout<< "Adding interface " << nbri + // << " to send my " << mySendCells[proci] + // << " to " << proci << endl; + remoteStencilInterfaces_.set + ( + nbri++, + new lduPrimitiveProcessorInterface + ( + mySendCells[proci], + Pstream::myProcNo(), + proci, + tensorField(0), + Pstream::msgType()+2 + ) + ); + } + } + forAll(procOwner, proci) + { + if (proci > Pstream::myProcNo() && procOwner[proci].size()) + { + //Pout<< "Adding interface " << nbri + // << " to receive my " << procOwner[proci] + // << " from " << proci << endl; + procToInterface[proci] = nbri; + remoteStencilInterfaces_.set + ( + nbri++, + new lduPrimitiveProcessorInterface + ( + procOwner[proci], + Pstream::myProcNo(), + proci, + tensorField(0), + Pstream::msgType()+2 + ) + ); + } + else if (proci < Pstream::myProcNo() && mySendCells[proci].size()) + { + //Pout<< "Adding interface " << nbri + // << " to send my " << mySendCells[proci] + // << " to " << proci << endl; + remoteStencilInterfaces_.set + ( + nbri++, + new lduPrimitiveProcessorInterface + ( + mySendCells[proci], + Pstream::myProcNo(), + proci, + tensorField(0), + Pstream::msgType()+2 + ) + ); + } } - // Construct interfaces - remoteStencilInterfaces_.setSize(nbrProcs.size()); - forAll(nbrProcs, i) + + // Rewrite stencilPatches now we know the actual interface (procToInterface) + for (auto& patches : stencilPatches_) { - label procI = nbrProcs[i]; - remoteStencilInterfaces_.set - ( - i, - new lduPrimitiveProcessorInterface - ( - localFaceCells[procI], - Pstream::myProcNo(), - procI, - tensorField(0), - Pstream::msgType() - ) - ); + for (auto& interface : patches) + { + if (interface != -1) + { + interface = procToInterface[interface]+boundary().size(); + } + } } @@ -218,6 +345,104 @@ bool Foam::dynamicOversetFvMesh::updateAddressing() const } +void Foam::dynamicOversetFvMesh::writeAgglomeration +( + const GAMGAgglomeration& agglom +) const +{ + labelList cellToCoarse(identity(nCells())); + labelListList coarseToCell(invertOneToMany(nCells(), cellToCoarse)); + + // Write initial agglomeration + { + volScalarField scalarAgglomeration + ( + IOobject + ( + "agglomeration", + this->time().timeName(), + *this, + IOobject::NO_READ, + IOobject::AUTO_WRITE + ), + *this, + dimensionedScalar(dimless, Zero) + ); + scalarField& fld = scalarAgglomeration.primitiveFieldRef(); + forAll(fld, celli) + { + fld[celli] = cellToCoarse[celli]; + } + fld /= max(fld); + correctBoundaryConditions + < + volScalarField, + oversetFvPatchField + >(scalarAgglomeration.boundaryFieldRef(), false); + scalarAgglomeration.write(); + + Info<< "Writing initial cell distribution to " + << this->time().timeName() << endl; + } + + + for (label level = 0; level < agglom.size(); level++) + { + const labelList& addr = agglom.restrictAddressing(level); + label coarseSize = max(addr)+1; + + Info<< "Level : " << level << endl + << returnReduce(addr.size(), sumOp