Merge branch 'master' of ssh://dm/home/dm4/OpenFOAM/OpenFOAM-dev

This commit is contained in:
Henry
2013-05-11 11:57:21 +01:00
11 changed files with 295 additions and 187 deletions

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation \\ / A nd | Copyright (C) 2011-2013 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -22,7 +22,7 @@ License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Application Application
uncoupledKinematicParcelFoam icoUncoupledKinematicParcelFoam
Description Description
Transient solver for the passive transport of a single kinematic Transient solver for the passive transport of a single kinematic

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2011-2012 OpenFOAM Foundation \\ / A nd | Copyright (C) 2011-2013 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -26,7 +26,7 @@ Application
Description Description
Transient solver for the passive transport of a single kinematic Transient solver for the passive transport of a single kinematic
particle could. particle cloud.
Uses a pre- calculated velocity field to evolve the cloud. Uses a pre- calculated velocity field to evolve the cloud.

View File

@ -488,7 +488,10 @@ void Foam::GAMGAgglomeration::procAgglomerateRestrictAddressing
comm, comm,
procIDs, procIDs,
restrictAddressing_[levelIndex], restrictAddressing_[levelIndex],
procRestrictAddressing procRestrictAddressing,
UPstream::msgType(),
Pstream::nonBlocking //Pstream::scheduled
); );

View File

@ -126,7 +126,15 @@ void Foam::GAMGAgglomeration::restrictField
const List<int>& procIDs = agglomProcIDs(coarseLevelIndex); const List<int>& procIDs = agglomProcIDs(coarseLevelIndex);
const labelList& offsets = cellOffsets(coarseLevelIndex); const labelList& offsets = cellOffsets(coarseLevelIndex);
globalIndex::gather(offsets, fineComm, procIDs, cf); globalIndex::gather
(
offsets,
fineComm,
procIDs,
cf,
UPstream::msgType(),
Pstream::nonBlocking //Pstream::scheduled
);
} }
} }
@ -194,7 +202,16 @@ void Foam::GAMGAgglomeration::prolongField
label localSize = nCells_[levelIndex]; label localSize = nCells_[levelIndex];
Field<Type> allCf(localSize); Field<Type> allCf(localSize);
globalIndex::scatter(offsets, coarseComm, procIDs, cf, allCf); globalIndex::scatter
(
offsets,
coarseComm,
procIDs,
cf,
allCf,
UPstream::msgType(),
Pstream::nonBlocking //Pstream::scheduled
);
forAll(fineToCoarse, i) forAll(fineToCoarse, i)
{ {

View File

@ -109,7 +109,7 @@ bool Foam::manualGAMGProcAgglomeration::agglomerate()
// My processor id // My processor id
const label myProcID = Pstream::myProcNo(levelMesh.comm()); const label myProcID = Pstream::myProcNo(levelMesh.comm());
const List<clusterAndMaster>& clusters = const List<labelList>& clusters =
procAgglomMaps_[i].second(); procAgglomMaps_[i].second();
// Coarse to fine master processor // Coarse to fine master processor
@ -125,8 +125,8 @@ bool Foam::manualGAMGProcAgglomeration::agglomerate()
forAll(clusters, coarseI) forAll(clusters, coarseI)
{ {
const labelList& cluster = clusters[coarseI].first(); const labelList& cluster = clusters[coarseI];
coarseToMaster[coarseI] = clusters[coarseI].second(); coarseToMaster[coarseI] = cluster[0];
forAll(cluster, i) forAll(cluster, i)
{ {

View File

@ -30,19 +30,23 @@ Description
In the GAMG control dictionary: In the GAMG control dictionary:
processorAgglomerator manual; processorAgglomerator manual;
// List of level+procagglomeration where
// procagglomeration is a set of labelLists. Each labelList is
// a cluster of processor which gets combined onto the first element
// in the list.
processorAgglomeration processorAgglomeration
( (
( (
3 //at level 3 3 //at level 3
( (
((0 1) 0) //coarse 0 from 0,1 (and moved onto 0) (0 1) //coarse 0 from 0,1 (and moved onto 0)
((2 3) 3) //coarse 1 from 2,3 (and moved onto 3) (3 2) //coarse 1 from 2,3 (and moved onto 3)
) )
) )
( (
6 //at level6 6 //at level6
( (
((0 1) 0) //coarse 0 from 0,1 (and moved onto 0) (0 1) //coarse 0 from 0,1 (and moved onto 0)
) )
) )
); );
@ -76,10 +80,8 @@ class manualGAMGProcAgglomeration
{ {
// Private data // Private data
typedef Tuple2<labelList, label> clusterAndMaster;
//- Per level the agglomeration map //- Per level the agglomeration map
const List<Tuple2<label, List<clusterAndMaster> > > procAgglomMaps_; const List<Tuple2<label, List<labelList> > > procAgglomMaps_;
//- Any allocated communicators //- Any allocated communicators
DynamicList<label> comms_; DynamicList<label> comms_;

View File

@ -114,13 +114,6 @@ Foam::solverPerformance Foam::GAMGSolver::solve
cmpt cmpt
); );
//Pout<< "finestCorrection:" << finestCorrection << endl;
//Pout<< "finestResidual:" << finestResidual << endl;
//Pout<< "psi:" << psi << endl;
//Pout<< "Apsi:" << Apsi << endl;
// Calculate finest level residual field // Calculate finest level residual field
matrix_.Amul(Apsi, psi, interfaceBouCoeffs_, interfaces_, cmpt); matrix_.Amul(Apsi, psi, interfaceBouCoeffs_, interfaces_, cmpt);
finestResidual = source; finestResidual = source;
@ -205,7 +198,6 @@ void Foam::GAMGSolver::Vcycle
scratch1, scratch1,
coarseCorrFields[leveli].size() coarseCorrFields[leveli].size()
); );
//scalarField ACf(coarseCorrFields[leveli].size(), VGREAT);
// Scale coarse-grid correction field // Scale coarse-grid correction field
// but not on the coarsest level because it evaluates to 1 // but not on the coarsest level because it evaluates to 1
@ -218,8 +210,6 @@ void Foam::GAMGSolver::Vcycle
( (
ACf.operator const scalarField&() ACf.operator const scalarField&()
), ),
//ACf,
matrixLevels_[leveli], matrixLevels_[leveli],
interfaceLevelsBouCoeffs_[leveli], interfaceLevelsBouCoeffs_[leveli],
interfaceLevels_[leveli], interfaceLevels_[leveli],
@ -235,7 +225,6 @@ void Foam::GAMGSolver::Vcycle
( (
ACf.operator const scalarField&() ACf.operator const scalarField&()
), ),
//ACf,
coarseCorrFields[leveli], coarseCorrFields[leveli],
interfaceLevelsBouCoeffs_[leveli], interfaceLevelsBouCoeffs_[leveli],
interfaceLevels_[leveli], interfaceLevels_[leveli],
@ -294,11 +283,6 @@ void Foam::GAMGSolver::Vcycle
scratch2, scratch2,
coarseCorrFields[leveli].size() coarseCorrFields[leveli].size()
); );
//scalarField preSmoothedCoarseCorrField
//(
// coarseCorrFields[leveli].size(),
// VGREAT
//);
// Only store the preSmoothedCoarseCorrField if pre-smoothing is // Only store the preSmoothedCoarseCorrField if pre-smoothing is
// used // used
@ -328,12 +312,6 @@ void Foam::GAMGSolver::Vcycle
); );
scalarField& ACfRef = scalarField& ACfRef =
const_cast<scalarField&>(ACf.operator const scalarField&()); const_cast<scalarField&>(ACf.operator const scalarField&());
//scalarField ACfRef
//(
// coarseCorrFields[leveli].size(),
// VGREAT
//);
if (interpolateCorrection_) if (interpolateCorrection_)
{ {

View File

@ -160,7 +160,8 @@ public:
const labelList& procIDs, const labelList& procIDs,
const UList<Type>& fld, const UList<Type>& fld,
List<Type>& allFld, List<Type>& allFld,
const int tag = UPstream::msgType() const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType=Pstream::nonBlocking
); );
//- Collect data in processor order on master (== procIDs[0]). //- Collect data in processor order on master (== procIDs[0]).
@ -172,10 +173,11 @@ public:
const labelList& procIDs, const labelList& procIDs,
const UList<Type>& fld, const UList<Type>& fld,
List<Type>& allFld, List<Type>& allFld,
const int tag = UPstream::msgType() const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType=Pstream::nonBlocking
) const ) const
{ {
gather(offsets_, comm, procIDs, fld, allFld, tag); gather(offsets_, comm, procIDs, fld, allFld, tag, commsType);
} }
//- Inplace collect data in processor order on master //- Inplace collect data in processor order on master
@ -187,7 +189,8 @@ public:
const label comm, const label comm,
const labelList& procIDs, const labelList& procIDs,
List<Type>& fld, List<Type>& fld,
const int tag = UPstream::msgType() const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType=Pstream::nonBlocking
); );
//- Inplace collect data in processor order on master //- Inplace collect data in processor order on master
@ -198,10 +201,11 @@ public:
const label comm, const label comm,
const labelList& procIDs, const labelList& procIDs,
List<Type>& fld, List<Type>& fld,
const int tag = UPstream::msgType() const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType=Pstream::nonBlocking
) const ) const
{ {
gather(offsets_, comm, procIDs, fld, tag); gather(offsets_, comm, procIDs, fld, tag, commsType);
} }
//- Distribute data in processor order. Requires fld to be sized! //- Distribute data in processor order. Requires fld to be sized!
@ -213,7 +217,8 @@ public:
const labelList& procIDs, const labelList& procIDs,
const UList<Type>& allFld, const UList<Type>& allFld,
UList<Type>& fld, UList<Type>& fld,
const int tag = UPstream::msgType() const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType=Pstream::nonBlocking
); );
//- Distribute data in processor order. Requires fld to be sized! //- Distribute data in processor order. Requires fld to be sized!
@ -224,10 +229,11 @@ public:
const labelList& procIDs, const labelList& procIDs,
const UList<Type>& allFld, const UList<Type>& allFld,
UList<Type>& fld, UList<Type>& fld,
const int tag = UPstream::msgType() const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType=Pstream::nonBlocking
) const ) const
{ {
scatter(offsets_, comm, procIDs, allFld, fld, tag); scatter(offsets_, comm, procIDs, allFld, fld, tag, commsType);
} }

View File

@ -30,30 +30,75 @@ License
template<class Type> template<class Type>
void Foam::globalIndex::gather void Foam::globalIndex::gather
( (
const labelUList& offsets, const labelUList& off,
const label comm, const label comm,
const labelList& procIDs, const labelList& procIDs,
const UList<Type>& fld, const UList<Type>& fld,
List<Type>& allFld, List<Type>& allFld,
const int tag const int tag,
const Pstream::commsTypes commsType
) )
{ {
if (Pstream::myProcNo(comm) == procIDs[0]) if (Pstream::myProcNo(comm) == procIDs[0])
{ {
allFld.setSize(offsets.last()); allFld.setSize(off.last());
// Assign my local data // Assign my local data
SubList<Type>(allFld, fld.size(), 0).assign(fld); SubList<Type>(allFld, fld.size(), 0).assign(fld);
for (label i = 1; i < procIDs.size(); i++) if (commsType == Pstream::scheduled || commsType == Pstream::blocking)
{ {
SubList<Type> procSlot(allFld, offsets[i+1]-offsets[i], offsets[i]); for (label i = 1; i < procIDs.size(); i++)
if (contiguous<Type>())
{ {
SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
if (contiguous<Type>())
{
IPstream::read
(
commsType,
procIDs[i],
reinterpret_cast<char*>(procSlot.begin()),
procSlot.byteSize(),
tag,
comm
);
}
else
{
IPstream fromSlave
(
commsType,
procIDs[i],
0,
tag,
comm
);
fromSlave >> procSlot;
}
}
}
else
{
// nonBlocking
if (!contiguous<Type>())
{
FatalErrorIn("globalIndex::gather(..)")
<< "nonBlocking not supported for non-contiguous data"
<< exit(FatalError);
}
label startOfRequests = Pstream::nRequests();
// Set up reads
for (label i = 1; i < procIDs.size(); i++)
{
SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
IPstream::read IPstream::read
( (
Pstream::scheduled, commsType,
procIDs[i], procIDs[i],
reinterpret_cast<char*>(procSlot.begin()), reinterpret_cast<char*>(procSlot.begin()),
procSlot.byteSize(), procSlot.byteSize(),
@ -61,45 +106,66 @@ void Foam::globalIndex::gather
comm comm
); );
} }
else
{ // Wait for all to finish
IPstream fromSlave Pstream::waitRequests(startOfRequests);
(
Pstream::scheduled,
procIDs[i],
0,
tag,
comm
);
fromSlave >> procSlot;
}
} }
} }
else else
{ {
if (contiguous<Type>()) if (commsType == Pstream::scheduled || commsType == Pstream::blocking)
{ {
if (contiguous<Type>())
{
OPstream::write
(
commsType,
procIDs[0],
reinterpret_cast<const char*>(fld.begin()),
fld.byteSize(),
tag,
comm
);
}
else
{
OPstream toMaster
(
commsType,
procIDs[0],
0,
tag,
comm
);
toMaster << fld;
}
}
else
{
// nonBlocking
if (!contiguous<Type>())
{
FatalErrorIn("globalIndex::gather(..)")
<< "nonBlocking not supported for non-contiguous data"
<< exit(FatalError);
}
label startOfRequests = Pstream::nRequests();
// Set up write
OPstream::write OPstream::write
( (
Pstream::scheduled, commsType,
procIDs[0], procIDs[0],
reinterpret_cast<const char*>(fld.begin()), reinterpret_cast<const char*>(fld.begin()),
fld.byteSize(), fld.byteSize(),
tag, tag,
comm comm
); );
}
else // Wait for all to finish
{ Pstream::waitRequests(startOfRequests);
OPstream toMaster
(
Pstream::scheduled,
procIDs[0],
0,
tag,
comm
);
toMaster << fld;
} }
} }
} }
@ -108,111 +174,104 @@ void Foam::globalIndex::gather
template<class Type> template<class Type>
void Foam::globalIndex::gather void Foam::globalIndex::gather
( (
const labelUList& offsets, const labelUList& off,
const label comm, const label comm,
const labelList& procIDs, const labelList& procIDs,
List<Type>& fld, List<Type>& fld,
const int tag const int tag,
const Pstream::commsTypes commsType
) )
{ {
List<Type> allFld;
gather(off, comm, procIDs, fld, allFld, tag, commsType);
if (Pstream::myProcNo(comm) == procIDs[0]) if (Pstream::myProcNo(comm) == procIDs[0])
{ {
List<Type> allFld(offsets.last());
// Assign my local data
SubList<Type>(allFld, fld.size(), 0).assign(fld);
for (label i = 1; i < procIDs.size(); i++)
{
SubList<Type> procSlot(allFld, offsets[i+1]-offsets[i], offsets[i]);
if (contiguous<Type>())
{
IPstream::read
(
Pstream::scheduled,
procIDs[i],
reinterpret_cast<char*>(procSlot.begin()),
procSlot.byteSize(),
tag,
comm
);
}
else
{
IPstream fromSlave
(
Pstream::scheduled,
procIDs[i],
0,
tag,
comm
);
fromSlave >> procSlot;
}
}
fld.transfer(allFld); fld.transfer(allFld);
} }
else
{
if (contiguous<Type>())
{
OPstream::write
(
Pstream::scheduled,
procIDs[0],
reinterpret_cast<const char*>(fld.begin()),
fld.byteSize(),
tag,
comm
);
}
else
{
OPstream toMaster
(
Pstream::scheduled,
procIDs[0],
0,
tag,
comm
);
toMaster << fld;
}
}
} }
template<class Type> template<class Type>
void Foam::globalIndex::scatter void Foam::globalIndex::scatter
( (
const labelUList& offsets, const labelUList& off,
const label comm, const label comm,
const labelList& procIDs, const labelList& procIDs,
const UList<Type>& allFld, const UList<Type>& allFld,
UList<Type>& fld, UList<Type>& fld,
const int tag const int tag,
const Pstream::commsTypes commsType
) )
{ {
if (Pstream::myProcNo(comm) == procIDs[0]) if (Pstream::myProcNo(comm) == procIDs[0])
{ {
fld.assign(SubList<Type>(allFld, offsets[1]-offsets[0])); fld.assign(SubList<Type>(allFld, off[1]-off[0]));
for (label i = 1; i < procIDs.size(); i++) if (commsType == Pstream::scheduled || commsType == Pstream::blocking)
{ {
const SubList<Type> procSlot for (label i = 1; i < procIDs.size(); i++)
(
allFld,
offsets[i+1]-offsets[i],
offsets[i]
);
if (contiguous<Type>())
{ {
const SubList<Type> procSlot
(
allFld,
off[i+1]-off[i],
off[i]
);
if (contiguous<Type>())
{
OPstream::write
(
commsType,
procIDs[i],
reinterpret_cast<const char*>(procSlot.begin()),
procSlot.byteSize(),
tag,
comm
);
}
else
{
OPstream toSlave
(
commsType,
procIDs[i],
0,
tag,
comm
);
toSlave << procSlot;
}
}
}
else
{
// nonBlocking
if (!contiguous<Type>())
{
FatalErrorIn("globalIndex::scatter(..)")
<< "nonBlocking not supported for non-contiguous data"
<< exit(FatalError);
}
label startOfRequests = Pstream::nRequests();
// Set up writes
for (label i = 1; i < procIDs.size(); i++)
{
const SubList<Type> procSlot
(
allFld,
off[i+1]-off[i],
off[i]
);
OPstream::write OPstream::write
( (
Pstream::scheduled, commsType,
procIDs[i], procIDs[i],
reinterpret_cast<const char*>(procSlot.begin()), reinterpret_cast<const char*>(procSlot.begin()),
procSlot.byteSize(), procSlot.byteSize(),
@ -220,45 +279,66 @@ void Foam::globalIndex::scatter
comm comm
); );
} }
else
{ // Wait for all to finish
OPstream toSlave Pstream::waitRequests(startOfRequests);
(
Pstream::scheduled,
procIDs[i],
0,
tag,
comm
);
toSlave << procSlot;
}
} }
} }
else else
{ {
if (contiguous<Type>()) if (commsType == Pstream::scheduled || commsType == Pstream::blocking)
{ {
if (contiguous<Type>())
{
IPstream::read
(
commsType,
procIDs[0],
reinterpret_cast<char*>(fld.begin()),
fld.byteSize(),
tag,
comm
);
}
else
{
IPstream fromMaster
(
commsType,
procIDs[0],
0,
tag,
comm
);
fromMaster >> fld;
}
}
else
{
// nonBlocking
if (!contiguous<Type>())
{
FatalErrorIn("globalIndex::scatter(..)")
<< "nonBlocking not supported for non-contiguous data"
<< exit(FatalError);
}
label startOfRequests = Pstream::nRequests();
// Set up read
IPstream::read IPstream::read
( (
Pstream::scheduled, commsType,
procIDs[0], procIDs[0],
reinterpret_cast<char*>(fld.begin()), reinterpret_cast<char*>(fld.begin()),
fld.byteSize(), fld.byteSize(),
tag, tag,
comm comm
); );
}
else // Wait for all to finish
{ Pstream::waitRequests(startOfRequests);
IPstream fromMaster
(
Pstream::scheduled,
procIDs[0],
0,
tag,
comm
);
fromMaster >> fld;
} }
} }
} }

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation \\ / A nd | Copyright (C) 2012-2013 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -241,6 +241,20 @@ Foam::Ostream& Foam::OBJstream::write(const linePointRef& ln)
} }
Foam::Ostream& Foam::OBJstream::write
(
const linePointRef& ln,
const vector& n0,
const vector& n1
)
{
write(ln.start(), n0);
write(ln.end(), n1);
write("l ") << nVertices_-1 << ' ' << nVertices_ << nl;
return *this;
}
Foam::Ostream& Foam::OBJstream::write Foam::Ostream& Foam::OBJstream::write
( (
const triPointRef& f, const triPointRef& f,

View File

@ -2,7 +2,7 @@
========= | ========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration | \\ / O peration |
\\ / A nd | Copyright (C) 2012 OpenFOAM Foundation \\ / A nd | Copyright (C) 2012-2013 OpenFOAM Foundation
\\/ M anipulation | \\/ M anipulation |
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
License License
@ -92,7 +92,7 @@ public:
// Access // Access
//- Return the name of the stream //- Return the number of vertices written
label nVertices() const label nVertices() const
{ {
return nVertices_; return nVertices_;
@ -135,6 +135,14 @@ public:
//- Write line //- Write line
Ostream& write(const linePointRef&); Ostream& write(const linePointRef&);
//- Write line with points and vector normals ('vn')
Ostream& write
(
const linePointRef&,
const vector& n0,
const vector& n1
);
//- Write triangle as points with lines or filled polygon //- Write triangle as points with lines or filled polygon
Ostream& write(const triPointRef&, const bool lines = true); Ostream& write(const triPointRef&, const bool lines = true);