Merge remote-tracking branch 'lammps-ro/master' into lammps-icms
# Resolved Conflicts: # doc/Manual.txt # doc/improper_distance.txt # doc/pair_mdf.txt # python/lammps.py # src/MANYBODY/pair_polymorphic.cpp # src/MANYBODY/pair_polymorphic.h
This commit is contained in:
@ -39,7 +39,8 @@ scripts in the examples sub-directory:
|
||||
|
||||
trivial.py read/run a LAMMPS input script thru Python
|
||||
demo.py invoke various LAMMPS library interface routines
|
||||
simple.py mimic operation of examples/COUPLE/simple/simple.cpp
|
||||
simple.py parallel example, mimicing examples/COUPLE/simple/simple.cpp
|
||||
split.py parallel example
|
||||
mc.py Monte Carlo energy relaxation wrapper on LAMMPS
|
||||
gui.py GUI go/stop/temperature-slider to control LAMMPS
|
||||
plot.py real-time temeperature plot with GnuPlot via Pizza.py
|
||||
@ -77,30 +78,53 @@ The latter link is to the open-source version.
|
||||
|
||||
-------------------------------------------------------------------
|
||||
|
||||
Each example script has more documentation in the file that explains
|
||||
how to use it and what it is doing.
|
||||
|
||||
You can run a particular script in either of the following ways:
|
||||
|
||||
% trivial.py in.trivial
|
||||
% python -i trivial.py in.trivial
|
||||
|
||||
The former assumes that you have changed the first line of the script
|
||||
to point to the Python installed on your box.
|
||||
to point to the Python installed on your box and made the script
|
||||
exectable (e.g. chmod +x trivial.py).
|
||||
|
||||
Run the Python scripts with the following LAMMPS input scripts and
|
||||
arguments:
|
||||
The example scripts take the following arguments. The in.* args are
|
||||
LAMPS input scripts.
|
||||
|
||||
trivial.py in.trivial
|
||||
demo.py
|
||||
simple.py in.simple
|
||||
simple.py in.simple # can run in parallel (see below)
|
||||
split.py in.simple # can run in parallel (see below)
|
||||
|
||||
gui.py in.gui 100
|
||||
plot.py in.plot 10 1000 thermo_temp
|
||||
viz_tool.py in.viz 100 5000
|
||||
vizplotgui_tool.py in.viz 100 thermo_temp
|
||||
|
||||
You can un-comment the Pypar calls if you want to run these in
|
||||
parallel. Then, for example, you can type:
|
||||
To run LAMMPS in parallel from Python, so something like this:
|
||||
|
||||
% mpirun -np 4 simple.py in.simple
|
||||
% mpirun -np 4 python split.py in.simple
|
||||
|
||||
If you run simple.py as-is, this will invoke P instances of a
|
||||
one-processor run, where both Python and LAMMPS will run on single
|
||||
processors. Each running job will read the same input file, and write
|
||||
to same log.lammps file, which isn't too useful.
|
||||
|
||||
However, if you have either the Pypar or mpi4py packages installed in
|
||||
your Python, and uncomment the Pypar or mpi4py code in simple.py, then
|
||||
the above commands will invoke 1 instance of a P-processor run. Both
|
||||
Python and LAMMPS will run on P processors. The job will read the
|
||||
input file and write a single log.lammps file.
|
||||
|
||||
The split.py script can also be run in parallel. It uses mpi4py
|
||||
version 2.0.0 (or later), which makes it possible to pass a
|
||||
communicator when creating the LAMMPS object and thus run multiple
|
||||
instances of LAMMPS at the same time, each on a different subset of
|
||||
MPI ranks. Or run LAMMPS on one subset and some other program on the
|
||||
rest of the MPI ranks, concurrently. See comments in the split.py
|
||||
script for more details.
|
||||
|
||||
% mpirun -np 4 trivial.py in.trivial
|
||||
% mpirun -np 4 python trivial.py in.trivial
|
||||
|
||||
Each script has more documentation in the file that explains how to
|
||||
use it and what it is doing.
|
||||
|
||||
@ -15,21 +15,14 @@ if len(argv) != 1:
|
||||
print "Syntax: demo.py"
|
||||
sys.exit()
|
||||
|
||||
me = 0
|
||||
# uncomment if running in parallel via Pypar
|
||||
#import pypar
|
||||
#me = pypar.rank()
|
||||
#nprocs = pypar.size()
|
||||
|
||||
from lammps import lammps
|
||||
|
||||
lmp = lammps()
|
||||
|
||||
# test out various library functions after running in.demo
|
||||
|
||||
lmp.file("in.demo")
|
||||
|
||||
if me == 0: print "\nPython output:"
|
||||
print "\nPython output:"
|
||||
|
||||
natoms = lmp.extract_global("natoms",0)
|
||||
mass = lmp.extract_atom("mass",2)
|
||||
@ -55,7 +48,3 @@ xc[0] = xc[0] + 1.0
|
||||
lmp.scatter_atoms("x",1,3,xc)
|
||||
|
||||
print "Changed x[0][0] via scatter_atoms =",x[0][0]
|
||||
|
||||
# uncomment if running in parallel via Pypar
|
||||
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
#pypar.finalize()
|
||||
|
||||
@ -2,9 +2,14 @@
|
||||
# preceeding line should have path for Python on your machine
|
||||
|
||||
# simple.py
|
||||
# Purpose: mimic operation of couple/simple/simple.cpp via Python
|
||||
# Syntax: simple.py in.lammps
|
||||
# in.lammps = LAMMPS input script
|
||||
# Purpose: mimic operation of examples/COUPLE/simple/simple.cpp via Python
|
||||
|
||||
# Serial syntax: simple.py in.lammps
|
||||
# in.lammps = LAMMPS input script
|
||||
|
||||
# Parallel syntax: mpirun -np 4 simple.py in.lammps
|
||||
# in.lammps = LAMMPS input script
|
||||
# also need to uncomment either Pypar or mpi4py sections below
|
||||
|
||||
import sys
|
||||
|
||||
@ -19,11 +24,16 @@ infile = sys.argv[1]
|
||||
|
||||
me = 0
|
||||
|
||||
# uncomment if running in parallel via Pypar
|
||||
# uncomment this if running in parallel via Pypar
|
||||
#import pypar
|
||||
#me = pypar.rank()
|
||||
#nprocs = pypar.size()
|
||||
|
||||
# uncomment this if running in parallel via mpi4py
|
||||
#from mpi4py import MPI
|
||||
#me = MPI.COMM_WORLD.Get_rank()
|
||||
#nprocs = MPI.COMM_WORLD.Get_size()
|
||||
|
||||
from lammps import lammps
|
||||
lmp = lammps()
|
||||
|
||||
@ -54,3 +64,7 @@ print "Force on 1 atom via extract_variable:",fx[0]
|
||||
# uncomment if running in parallel via Pypar
|
||||
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
#pypar.finalize()
|
||||
|
||||
# uncomment if running in parallel via mpi4py
|
||||
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
#MPI.Finalize()
|
||||
|
||||
79
python/examples/split.py
Executable file
79
python/examples/split.py
Executable file
@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env python -i
|
||||
# preceeding line should have path for Python on your machine
|
||||
|
||||
# split.py
|
||||
# Purpose: similar to simple.py, but first the world communicator
|
||||
# is split in two halves and LAMMPS is run only on one partition
|
||||
# Syntax: split.py in.lammps
|
||||
# in.lammps = LAMMPS input script
|
||||
|
||||
import sys
|
||||
|
||||
# parse command line
|
||||
|
||||
argv = sys.argv
|
||||
if len(argv) != 2:
|
||||
print "Syntax: simple.py in.lammps"
|
||||
sys.exit()
|
||||
|
||||
infile = sys.argv[1]
|
||||
|
||||
me = 0
|
||||
|
||||
# this example *only* works with mpi4py version 2.0.0 or later
|
||||
|
||||
from mpi4py import MPI
|
||||
comm = MPI.COMM_WORLD
|
||||
me = comm.Get_rank()
|
||||
nprocs = comm.Get_size()
|
||||
|
||||
# create two subcommunicators
|
||||
|
||||
if me < nprocs // 2: color = 0
|
||||
else: color = 1
|
||||
|
||||
split = comm.Split(color,key=0)
|
||||
|
||||
if color == 0:
|
||||
from lammps import lammps
|
||||
lmp = lammps(comm=split)
|
||||
|
||||
# run infile one line at a time
|
||||
|
||||
lines = open(infile,'r').readlines()
|
||||
for line in lines: lmp.command(line)
|
||||
|
||||
# run 10 more steps
|
||||
# get coords from LAMMPS
|
||||
# change coords of 1st atom
|
||||
# put coords back into LAMMPS
|
||||
# run a single step with changed coords
|
||||
|
||||
lmp.command("run 10")
|
||||
x = lmp.gather_atoms("x",1,3)
|
||||
epsilon = 0.1
|
||||
x[0] += epsilon
|
||||
lmp.scatter_atoms("x",1,3,x)
|
||||
lmp.command("run 1");
|
||||
|
||||
f = lmp.extract_atom("f",3)
|
||||
print "Force on 1 atom via extract_atom: ",f[0][0]
|
||||
|
||||
fx = lmp.extract_variable("fx","all",1)
|
||||
print "Force on 1 atom via extract_variable:",fx[0]
|
||||
print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
print "Calculation on partition 0 complete"
|
||||
|
||||
else:
|
||||
# could run a 2nd calculation on second partition
|
||||
# with different LAMMPS instance or another code
|
||||
# in this case, just sleep on second partition
|
||||
|
||||
import time
|
||||
time.sleep(2)
|
||||
print "Calculation on partition 1 complete"
|
||||
|
||||
# shutdown mpi4py
|
||||
|
||||
comm.Barrier()
|
||||
MPI.Finalize()
|
||||
@ -17,12 +17,6 @@ if len(argv) != 2:
|
||||
|
||||
infile = sys.argv[1]
|
||||
|
||||
me = 0
|
||||
# uncomment if running in parallel via Pypar
|
||||
#import pypar
|
||||
#me = pypar.rank()
|
||||
#nprocs = pypar.size()
|
||||
|
||||
from lammps import lammps
|
||||
lmp = lammps()
|
||||
|
||||
@ -30,11 +24,7 @@ lmp = lammps()
|
||||
|
||||
lmp.file(infile)
|
||||
|
||||
# run infile one line at a time
|
||||
# or run infile one line at a time
|
||||
|
||||
#lines = open(infile,'r').readlines()
|
||||
#for line in lines: lmp.command(line)
|
||||
|
||||
# uncomment if running in parallel via Pypar
|
||||
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
|
||||
#pypar.finalize()
|
||||
|
||||
@ -18,9 +18,10 @@ from ctypes import *
|
||||
from os.path import dirname,abspath,join
|
||||
from inspect import getsourcefile
|
||||
|
||||
|
||||
class lammps:
|
||||
# detect, if we use a version of mpi4py that can pass a communicator
|
||||
|
||||
# detect if Python is using version of mpi4py that can pass a communicator
|
||||
|
||||
has_mpi4py_v2 = False
|
||||
try:
|
||||
from mpi4py import MPI
|
||||
@ -30,9 +31,12 @@ class lammps:
|
||||
except:
|
||||
pass
|
||||
|
||||
# create instance of LAMMPS
|
||||
|
||||
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
|
||||
|
||||
# determine module location
|
||||
|
||||
modpath = dirname(abspath(getsourcefile(lambda:0)))
|
||||
|
||||
# load liblammps.so by default
|
||||
@ -56,9 +60,10 @@ class lammps:
|
||||
# just convert it to ctypes ptr and store in self.lmp
|
||||
|
||||
if not ptr:
|
||||
# with mpi4py we can pass communicators into the LAMMPS object but
|
||||
# we need to adjust type for the MPI communicator object depending
|
||||
# on whether it is an int (like MPICH) or a void* (like OpenMPI)
|
||||
# with mpi4py v2, can pass MPI communicator to LAMMPS
|
||||
# need to adjust for type of MPI communicator object
|
||||
# allow for int (like MPICH) or void* (like OpenMPI)
|
||||
|
||||
if lammps.has_mpi4py_v2 and comm != None:
|
||||
if lammps.MPI._sizeof(lammps.MPI.Comm) == sizeof(c_int):
|
||||
MPI_Comm = c_int
|
||||
@ -97,6 +102,7 @@ class lammps:
|
||||
self.lib.lammps_open_no_mpi(0,None,byref(self.lmp))
|
||||
# could use just this if LAMMPS lib interface supported it
|
||||
# self.lmp = self.lib.lammps_open_no_mpi(0,None)
|
||||
|
||||
else:
|
||||
self.opened = 0
|
||||
# magic to convert ptr to ctypes ptr
|
||||
|
||||
Reference in New Issue
Block a user