From c4425a1b0ebee13f036e2a76c1766eadb07d491c Mon Sep 17 00:00:00 2001 From: Steve Plimpton Date: Wed, 23 Mar 2022 11:17:51 -0600 Subject: [PATCH] debugging plugin mode --- examples/mdi/README | 15 +- examples/mdi/series_driver.py | 360 +++++++++++++++++++--------------- lib/mdi/Install.py | 2 +- src/MDI/library_mdi.cpp | 6 + src/MDI/mdi_engine.cpp | 8 + 5 files changed, 232 insertions(+), 159 deletions(-) diff --git a/examples/mdi/README b/examples/mdi/README index 5983c0f95e..f1afb46491 100644 --- a/examples/mdi/README +++ b/examples/mdi/README @@ -99,7 +99,7 @@ the info is copied here: Run with TCP: 1 proc each -% python3 series_driver.py -mdi "-role DRIVER -name aimd -method TCP -port 8021" +% python3 series_driver.py -mdi "-role DRIVER -name series -method TCP -port 8021" % lmp_mpi -mdi "-role ENGINE -name LAMMPS -method TCP -port 8021 -hostname localhost" -log log.series -in in.series @@ -107,7 +107,7 @@ Run with TCP: 1 proc each Run with TCP: 2 proc + 4 procs -% mpirun -np 2 python3 series_driver.py -mdi "-role DRIVER -name aimd -method TCP -port 8021" +% mpirun -np 2 python3 series_driver.py -mdi "-role DRIVER -name series -method TCP -port 8021" % mpirun -np 4 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method TCP -port 8021 -hostname localhost" -log log.series -in in.series @@ -115,12 +115,19 @@ Run with TCP: 2 proc + 4 procs Run with MPI: 1 proc each -% mpirun -np 1 python3 series_driver.py -mdi "-role DRIVER -name aimd -method MPI" : -np 1 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method MPI" -log log.series -in in.series +% mpirun -np 1 python3 series_driver.py -mdi "-role DRIVER -name series -method MPI" : -np 1 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method MPI" -log log.series -in in.series --- Run with MPI: 2 procs + 3 procs -% mpirun -np 2 python3 series_driver.py -mdi "-role DRIVER -name aimd -method MPI" : -np 3 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method MPI" -log log.series -in in.series +% mpirun -np 2 python3 series_driver.py -mdi "-role DRIVER -name series -method MPI" : -np 3 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method MPI" -log log.series -in in.series + +--- + +Run in plugin mode: 1 proc + +% python3 series_driver.py -plugin lammps -mdi "-role DRIVER -name series -method LINK -plugin_path /home/sjplimp/lammps/git/src" -plugin_args "-log log.series -in in.series" +mpiexec -n 1 python3 plugin_driver.py --plugin_name "lammps" --mdi "-role DRIVER -name driver -method LINK -plugin_path /home/sjplimp/lammps/git/src" --plugin_command_line "foo -in in.series" diff --git a/examples/mdi/series_driver.py b/examples/mdi/series_driver.py index f8ee41b03f..5ced1b7375 100644 --- a/examples/mdi/series_driver.py +++ b/examples/mdi/series_driver.py @@ -1,10 +1,20 @@ # MDI driver to perform a series of independent calculations -# using LAMMPS as an engine +# using LAMMPS as a standalone engine # Syntax: python3 series_driver.py switch arg switch arg ... # possible switches: # -mdi "-role DRIVER ..." # required switch +# example for stand-alone mode: +# -mdi "-role DRIVER -name sequence -method TCP -port 8021" +# example for plugin mode: +# -mdi "-role DRIVER -name sequemce -method LINK +# -plugin_path /home/sjplimp/lammps/src/" +# -plugin name +# name of plugin library, only when using plugin mode +# -plugin_args arglist +# args to add when launching plugin library, only when using plugin mode +# enclose arglist in quotes if multiple words # -n 10 # number of calculations to perform, default = 1 # -mode eval/run/min @@ -15,7 +25,7 @@ # -rho 0.75 0.1 # reduced density and random variation thereof, default = 0.75 0.1 # -delta 0.1 -# randomly perturb atoms initially by this distance, default 0.0 +# randomly perturb atoms initially by this distance, default 0.1 # -nsteps 100 # number of timesteps in dynamics runs, default = 100 # -temp 1.0 @@ -36,20 +46,176 @@ def error(txt=None): if txt: raise Exception(txt) raise Exception("Syntax: python3 series_driver.py switch arg switch arg ...") -# send a LAMMPS input script command to MDI engine +# loop over all the tasks to exchange MDI Sends/Receives with the engine +# for standalone mode, this is called by main program below +# for plugin mode, this is a callback function invoked by MDI -def send_command(cmd): - mdi.MDI_Send_Command("NBYTES",mdicomm) - mdi.MDI_Send(len(cmd),1,mdi.MDI_INT,mdicomm) - mdi.MDI_Send_Command("COMMAND",mdicomm) - mdi.MDI_Send(cmd,len(cmd)+1,mdi.MDI_CHAR,mdicomm) +def perform_tasks(world,mdicomm,dummy): -# parse command-line args + print("PT start",world,mdicomm,dummy) + + me = world.Get_rank() + nprocs = world.Get_size() + + # allocate vectors for per-atom types, coords, vels, forces + + natoms = nx * ny * nz + atypes = np.zeros(natoms,dtype=np.int) + coords = np.zeros(3*natoms,dtype=np.float64) + vels = np.zeros(3*natoms,dtype=np.float64) + forces = np.zeros(3*natoms,dtype=np.float64) + + atypes[:] = 1 + + # initialize RN generator + + random.seed(seed) + + # loop over sequence of calculations + + for icalc in range(ncalc): + + # define simulation box + + onerho = rho + (random.random()-0.5)*rhodelta; + sigma = pow(1.0/onerho,1.0/3.0) + + xlo = ylo = zlo = 0.0 + xhi = nx * sigma + yhi = ny * sigma + zhi = nz * sigma + + # send simulation box to engine + + vec = [xhi-xlo,0.0,0.0] + [0.0,yhi-ylo,0.0] + [0.0,0.0,zhi-zlo] + print("PRE-CELL",mdicomm) + mdi.MDI_Send_command(">CELL",mdicomm) + mdi.MDI_Send(vec,9,mdi.MDI_DOUBLE,mdicomm) + print("POST-CELL") + + # create atoms on perfect lattice + + m = 0 + for k in range(nz): + for j in range(ny): + for i in range(nx): + coords[m] = i * sigma + coords[m+1] = j * sigma + coords[m+2] = k * sigma + m += 3 + + # perturb lattice + + for m in range(3*natoms): + coords[m] += 2.0*random.random()*delta - delta + + # define initial velocities + + for m in range(3*natoms): + vels[m] = random.random() - 0.5 + + tcurrent = 0.0 + for m in range(3*natoms): + tcurrent += vels[m]*vels[m] + tcurrent /= 3*(natoms-1) + + factor = math.sqrt(tinitial/tcurrent) + + for m in range(3*natoms): + vels[m] *= factor + + # send atoms and their properties to engine + + mdi.MDI_Send_command(">NATOMS",mdicomm) + mdi.MDI_Send(natoms,1,mdi.MDI_INT,mdicomm) + mdi.MDI_Send_command(">TYPES",mdicomm) + mdi.MDI_Send(atypes,natoms,mdi.MDI_INT,mdicomm) + mdi.MDI_Send_command(">COORDS",mdicomm) + mdi.MDI_Send(coords,3*natoms,mdi.MDI_DOUBLE,mdicomm) + mdi.MDI_Send_command(">VELOCITIES",mdicomm) + mdi.MDI_Send(vels,3*natoms,mdi.MDI_DOUBLE,mdicomm) + + # eval or run or minimize + + if mode == "eval": + pass + elif mode == "run": + mdi.MDI_Send_command("@INIT_MD",mdicomm) + mdi.MDI_Send_command(">NITERATE",mdicomm) + mdi.MDI_Send(nsteps,1,mdi.MDI_INT,mdicomm) + mdi.MDI_Send_command("@DEFAULT",mdicomm) + elif mode == "min": + mdi.MDI_Send_command("@INIT_OPTG",mdicomm) + mdi.MDI_Send_command(">TOLERANCE",mdicomm) + params = [1.0e-4,1.0e-4,100.0,100.0] + mdi.MDI_Send(params,4,mdi.MDI_DOUBLE,mdicomm) + mdi.MDI_Send_command("@DEFAULT",mdicomm) + + # request potential energy + + print("PRE-PE") + + mdi.MDI_Send_command(" narg: error() - mdiarg = iarg + 1 + mdiarg = args[iarg+1] + iarg += 2 + elif args[iarg] == "-plugin": + if iarg+2 > narg: error() + plugin = args[iarg+1] + iarg += 2 + elif args[iarg] == "-plugin_args": + if iarg+2 > narg: error() + plugin_args = args[iarg+1] iarg += 2 elif args[iarg] == "-n": if iarg+2 > narg: error() @@ -118,153 +294,29 @@ while iarg < narg: if not mdiarg: error() -# initialize MDI Library +# LAMMPS engine is a stand-alone code +# world = MPI communicator for just this driver +# invoke perform_tasks() directly -mdi.MDI_Init(args[mdiarg]) +if not plugin: + mdi.MDI_Init(mdiarg) + world = mdi.MDI_MPI_get_world_comm() -# MPI communicator for just the driver + # connect to engine -world = mdi.MDI_MPI_get_world_comm() -me = world.Get_rank() -nprocs = world.Get_size() + mdicomm = mdi.MDI_Accept_Communicator() -# connect to engine + perform_tasks(world,mdicomm,None) -mdicomm = mdi.MDI_Accept_Communicator() +# LAMMPS engine is a plugin library +# launch plugin +# MDI will call back to perform_tasks() -# allocate vectors for per-atom types, coords, vels, forces +print("PRE PLUGIN"); -natoms = nx * ny * nz -atypes = np.zeros(natoms,dtype=np.int) -coords = np.zeros(3*natoms,dtype=np.float64) -vels = np.zeros(3*natoms,dtype=np.float64) -forces = np.zeros(3*natoms,dtype=np.float64) - -atypes[:] = 1 - -# initialize RN generator - -random.seed(seed) - -# loop over sequence of calculations - -for icalc in range(ncalc): - - # define simulation box - - onerho = rho + (random.random()-0.5)*rhodelta; - sigma = pow(1.0/onerho,1.0/3.0) - - xlo = ylo = zlo = 0.0 - xhi = nx * sigma - yhi = ny * sigma - zhi = nz * sigma - - # send simulation box to engine - - vec = [xhi-xlo,0.0,0.0] + [0.0,yhi-ylo,0.0] + [0.0,0.0,zhi-zlo] - mdi.MDI_Send_command(">CELL",mdicomm) - mdi.MDI_Send(vec,9,mdi.MDI_DOUBLE,mdicomm) - - # create atoms on perfect lattice - - m = 0 - for k in range(nz): - for j in range(ny): - for i in range(nx): - coords[m] = i * sigma - coords[m+1] = j * sigma - coords[m+2] = k * sigma - m += 3 - - # perturb lattice - - for m in range(3*natoms): - coords[m] += 2.0*random.random()*delta - delta - - # define initial velocities - - for m in range(3*natoms): - vels[m] = random.random() - 0.5 - - tcurrent = 0.0 - for m in range(3*natoms): - tcurrent += vels[m]*vels[m] - tcurrent /= 3*(natoms-1) - - factor = math.sqrt(tinitial/tcurrent) - - for m in range(3*natoms): - vels[m] *= factor - - # send atoms and their properties to engine - - mdi.MDI_Send_command(">NATOMS",mdicomm) - mdi.MDI_Send(natoms,1,mdi.MDI_INT,mdicomm) - mdi.MDI_Send_command(">TYPES",mdicomm) - mdi.MDI_Send(atypes,natoms,mdi.MDI_INT,mdicomm) - mdi.MDI_Send_command(">COORDS",mdicomm) - mdi.MDI_Send(coords,3*natoms,mdi.MDI_DOUBLE,mdicomm) - mdi.MDI_Send_command(">VELOCITIES",mdicomm) - mdi.MDI_Send(vels,3*natoms,mdi.MDI_DOUBLE,mdicomm) - - # eval or run or minimize - - if mode == "eval": - pass - elif mode == "run": - mdi.MDI_Send_command("@INIT_MD",mdicomm) - mdi.MDI_Send_command(">NITERATE",mdicomm) - mdi.MDI_Send(nsteps,1,mdi.MDI_INT,mdicomm) - mdi.MDI_Send_command("@DEFAULT",mdicomm) - elif mode == "min": - mdi.MDI_Send_command("@INIT_OPTG",mdicomm) - mdi.MDI_Send_command(">TOLERANCE",mdicomm) - params = [1.0e-4,1.0e-4,100.0,100.0] - mdi.MDI_Send(params,4,mdi.MDI_DOUBLE,mdicomm) - mdi.MDI_Send_command("@DEFAULT",mdicomm) - - # request potential energy - - - mdi.MDI_Send_command(" 0) lmp = lammps_open(mdi_argc, mdi_argv, mpi_world_comm, nullptr); diff --git a/src/MDI/mdi_engine.cpp b/src/MDI/mdi_engine.cpp index 913c24ad99..bbef471951 100644 --- a/src/MDI/mdi_engine.cpp +++ b/src/MDI/mdi_engine.cpp @@ -174,6 +174,8 @@ void MDIEngine::mdi_engine(int narg, char **arg) MDI_Accept_communicator(&mdicomm); if (mdicomm <= 0) error->all(FLERR,"Unable to connect to MDI driver"); + printf("ENG post accept MDI comm\n"); + // endless engine loop, responding to driver commands mode = DEFAULT; @@ -235,6 +237,8 @@ void MDIEngine::engine_node(const char *node) { int ierr; + printf("ENG ENODE %s\n",node); + // do not process commands if engine and driver request are not the same strncpy(node_engine,node,MDI_COMMAND_LENGTH); @@ -249,9 +253,13 @@ void MDIEngine::engine_node(const char *node) // read the next command from the driver // all procs call this, but only proc 0 receives the command + printf("ENG PRE-RECV %d\n",mdicomm); + ierr = MDI_Recv_command(mdicmd,mdicomm); if (ierr) error->all(FLERR,"MDI: Unable to receive command from driver"); + printf("ENG POST-RECV %s\n",mdicmd); + // broadcast command to the other MPI tasks MPI_Bcast(mdicmd,MDI_COMMAND_LENGTH,MPI_CHAR,0,world);