Simplified twoWayMPI
The code seems to be derived from the LAMMPS COUPLE library. The original COUPLE code allowed using a subset of the global processors for the coupled code. Some fractions of that code remained, but on their own don't make sense anymore. Since no additional colors are assigned to processors, MPI_Comm_split effectively just duplicates the global communicator, which can be easily done using MPI_Comm_dup. The second simplification is that the code tried to limit IO to MPI rank 0. The filename of the input script was read in by one MPI rank and then broadcasted to all other ranks. While from the outside this seems to make sense from an MPI programmer standpoint, it does not take the OpenFOAM implementation into account. IODictionary is already multi-processor aware. Reading operations are done only on the master processor. This means the dictionary is already in memory for each MPI processor at this point in time and lookup() is an in-memory operation.
This commit is contained in:
@ -63,41 +63,20 @@ twoWayMPI::twoWayMPI
|
||||
)
|
||||
:
|
||||
dataExchangeModel(dict,sm),
|
||||
propsDict_(dict.subDict(typeName + "Props"))
|
||||
propsDict_(dict.subDict(typeName + "Props")),
|
||||
lmp(NULL)
|
||||
{
|
||||
Info<<"Starting up LIGGGHTS for first time execution"<<endl;
|
||||
|
||||
MPI_Comm_rank(MPI_COMM_WORLD,&me);
|
||||
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
|
||||
MPI_Comm_dup(MPI_COMM_WORLD, &comm_liggghts);
|
||||
|
||||
if (me < nprocs) liggghts = 1;
|
||||
else liggghts = MPI_UNDEFINED;
|
||||
|
||||
MPI_Comm_split(MPI_COMM_WORLD,liggghts,0,&comm_liggghts);
|
||||
// read path from dictionary
|
||||
const fileName liggghtsPath(propsDict_.lookup("liggghtsPath"));
|
||||
|
||||
// open LIGGGHTS input script
|
||||
char *liggghtsPathChar = new char[256];
|
||||
int n = 0;
|
||||
if (me == 0)
|
||||
{
|
||||
// read path from dictionary
|
||||
const fileName liggghtsPath(propsDict_.lookup("liggghtsPath"));
|
||||
strcpy(liggghtsPathChar, liggghtsPath.c_str());
|
||||
n = strlen(liggghtsPathChar) + 1;
|
||||
|
||||
Info<<"Executing input script '"<< liggghtsPath.c_str() <<"'"<<endl;
|
||||
}
|
||||
|
||||
if (liggghts == 1) lmp = new LAMMPS_NS::LAMMPS(0,NULL,comm_liggghts);
|
||||
|
||||
MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD);
|
||||
if (n > 0)
|
||||
{
|
||||
MPI_Bcast(liggghtsPathChar,n,MPI_CHAR,0,MPI_COMM_WORLD);
|
||||
if (liggghts == 1) lmp->input->file(liggghtsPathChar);
|
||||
}
|
||||
|
||||
delete [] liggghtsPathChar;
|
||||
Info<<"Executing input script '"<< liggghtsPath.c_str() <<"'"<<endl;
|
||||
lmp = new LAMMPS_NS::LAMMPS(0,NULL,comm_liggghts);
|
||||
lmp->input->file(liggghtsPath.c_str());
|
||||
|
||||
// get DEM time step size
|
||||
DEMts_ = lmp->update->dt;
|
||||
@ -109,7 +88,7 @@ twoWayMPI::twoWayMPI
|
||||
|
||||
twoWayMPI::~twoWayMPI()
|
||||
{
|
||||
if (liggghts == 1) delete lmp;
|
||||
delete lmp;
|
||||
}
|
||||
|
||||
// * * * * * * * * * * * * * * * public Member Functions * * * * * * * * * * * * * //
|
||||
@ -240,8 +219,6 @@ bool Foam::twoWayMPI::couple(int i) const
|
||||
coupleNow = true;
|
||||
|
||||
// start liggghts
|
||||
if (liggghts == 1)
|
||||
{
|
||||
// run commands from liggghtsCommands dict
|
||||
Info<<"Starting up LIGGGHTS" << endl;
|
||||
particleCloud_.clockM().start(3,"LIGGGHTS");
|
||||
@ -366,7 +343,6 @@ bool Foam::twoWayMPI::couple(int i) const
|
||||
|
||||
particleCloud_.clockM().stop("LIGGGHTS");
|
||||
Info<<"LIGGGHTS finished"<<endl;
|
||||
}
|
||||
|
||||
// give nr of particles to cloud
|
||||
double newNpart = liggghts_get_maxtag(lmp);
|
||||
|
||||
@ -81,14 +81,6 @@ private:
|
||||
|
||||
// private data
|
||||
dictionary propsDict_;
|
||||
|
||||
// variables
|
||||
int me;
|
||||
|
||||
int nprocs;
|
||||
|
||||
int liggghts;
|
||||
|
||||
MPI_Comm comm_liggghts;
|
||||
|
||||
// private member functions
|
||||
|
||||
Reference in New Issue
Block a user