Merge branch 'master' into less-compiler-warnings

This commit is contained in:
Axel Kohlmeyer
2018-09-04 08:59:12 -04:00
128 changed files with 22762 additions and 117 deletions

View File

@ -43,6 +43,29 @@ function(validate_option name values)
endif()
endfunction(validate_option)
function(get_lammps_version version_header variable)
file(READ ${version_header} line)
set(MONTHS x Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec)
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\1" day "${line}")
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\2" month "${line}")
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\3" year "${line}")
string(STRIP ${day} day)
string(STRIP ${month} month)
string(STRIP ${year} year)
list(FIND MONTHS "${month}" month)
string(LENGTH ${day} day_length)
string(LENGTH ${month} month_length)
if(day_length EQUAL 1)
set(day "0${day}")
endif()
if(month_length EQUAL 1)
set(month "0${month}")
endif()
set(${variable} "${year}${month}${day}" PARENT_SCOPE)
endfunction()
get_lammps_version(${LAMMPS_SOURCE_DIR}/version.h LAMMPS_VERSION)
# Cmake modules/macros are in a subdirectory to keep this file cleaner
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/Modules)
@ -121,10 +144,10 @@ if(BUILD_LIB)
if(BUILD_SHARED_LIBS) # for all pkg libs, mpi_stubs and linalg
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
set(LIB_SUFFIX "" CACHE STRING "Suffix to append to liblammps and pkg-config file")
mark_as_advanced(LIB_SUFFIX)
if(LIB_SUFFIX)
set(LIB_SUFFIX "_${LIB_SUFFIX}")
set(LAMMPS_LIB_SUFFIX "" CACHE STRING "Suffix to append to liblammps and pkg-config file")
mark_as_advanced(LAMMPS_LIB_SUFFIX)
if(LAMMPS_LIB_SUFFIX)
set(LAMMPS_LIB_SUFFIX "_${LAMMPS_LIB_SUFFIX}")
endif()
endif()
@ -188,7 +211,7 @@ if(ENABLE_TESTING)
endif(ENABLE_TESTING)
set(DEFAULT_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS DIPOLE GRANULAR
KSPACE MANYBODY MC MEAM MISC MOLECULE PERI REAX REPLICA RIGID SHOCK SPIN SNAP
KSPACE MANYBODY MC MEAM MESSAGE MISC MOLECULE PERI REAX REPLICA RIGID SHOCK SPIN SNAP
SRD KIM PYTHON MSCG MPIIO VORONOI POEMS LATTE USER-ATC USER-AWPMD USER-BOCS
USER-CGDNA USER-MESO USER-CGSDK USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE
USER-EFF USER-FEP USER-H5MD USER-LB USER-MANIFOLD USER-MEAMC USER-MGPT USER-MISC
@ -481,6 +504,39 @@ if(PKG_KIM)
include_directories(${KIM_INCLUDE_DIRS})
endif()
if(PKG_MESSAGE)
option(MESSAGE_ZMQ "Use ZeroMQ in MESSAGE package" OFF)
file(GLOB_RECURSE cslib_SOURCES ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/*.F
${LAMMPS_LIB_SOURCE_DIR}/message/cslib/*.c ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/*.cpp)
if(BUILD_SHARED_LIBS)
add_library(cslib SHARED ${cslib_SOURCES})
else()
add_library(cslib STATIC ${cslib_SOURCES})
endif()
if(BUILD_MPI)
target_compile_definitions(cslib PRIVATE -DMPI_YES)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csmpi")
else()
target_compile_definitions(cslib PRIVATE -DMPI_NO)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csnompi")
endif()
if(MESSAGE_ZMQ)
target_compile_definitions(cslib PRIVATE -DZMQ_YES)
find_package(ZMQ REQUIRED)
target_include_directories(cslib PRIVATE ${ZMQ_INCLUDE_DIRS})
target_link_libraries(cslib PUBLIC ${ZMQ_LIBRARIES})
else()
target_compile_definitions(cslib PRIVATE -DZMQ_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_ZMQ)
endif()
list(APPEND LAMMPS_LINK_LIBS cslib)
include_directories(${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src)
endif()
if(PKG_MSCG)
find_package(GSL REQUIRED)
option(DOWNLOAD_MSCG "Download latte (instead of using the system's one)" OFF)
@ -1040,14 +1096,14 @@ if(BUILD_LIB)
if(LAMMPS_DEPS)
add_dependencies(lammps ${LAMMPS_DEPS})
endif()
set_target_properties(lammps PROPERTIES OUTPUT_NAME lammps${LIB_SUFFIX})
if(BUILD_SHARED_LIBS)
set_target_properties(lammps PROPERTIES SOVERSION ${SOVERSION})
install(TARGETS lammps LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(FILES ${LAMMPS_SOURCE_DIR}/library.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/lammps)
configure_file(pkgconfig/liblammps.pc.in ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LIB_SUFFIX}.pc @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LIB_SUFFIX}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
endif()
set_target_properties(lammps PROPERTIES OUTPUT_NAME lammps${LAMMPS_LIB_SUFFIX})
set_target_properties(lammps PROPERTIES SOVERSION ${SOVERSION})
install(TARGETS lammps LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(FILES ${LAMMPS_SOURCE_DIR}/library.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/lammps)
configure_file(pkgconfig/liblammps.pc.in ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LAMMPS_LIB_SUFFIX}.pc @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/liblammps${LAMMPS_LIB_SUFFIX}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
configure_file(FindLAMMPS.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/FindLAMMPS${LAMMPS_LIB_SUFFIX}.cmake @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/FindLAMMPS${LAMMPS_LIB_SUFFIX}.cmake DESTINATION ${CMAKE_INSTALL_DATADIR}/cmake/Module)
else()
list(APPEND LMP_SOURCES ${LIB_SOURCES})
endif()
@ -1177,7 +1233,7 @@ endif()
###############################################################################
# Print package summary
###############################################################################
foreach(PKG ${DEFAULT_PACKAGES} ${ACCEL_PACKAGES})
foreach(PKG ${DEFAULT_PACKAGES} ${ACCEL_PACKAGES} ${OTHER_PACKAGES})
if(PKG_${PKG})
message(STATUS "Building package: ${PKG}")
endif()

48
cmake/FindLAMMPS.cmake.in Normal file
View File

@ -0,0 +1,48 @@
# - Find liblammps
# Find the native liblammps headers and libraries.
#
# The following variables will set:
# LAMMPS_INCLUDE_DIRS - where to find lammps/library.h, etc.
# LAMMPS_LIBRARIES - List of libraries when using lammps.
# LAMMPS_API_DEFINES - lammps library api defines
# LAMMPS_VERSION - lammps library version
# LAMMPS_FOUND - True if liblammps found.
#
# In addition a LAMMPS::LAMMPS imported target is getting created.
#
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
#
find_package(PkgConfig)
pkg_check_modules(PC_LAMMPS liblammps@LAMMPS_LIB_SUFFIX@)
find_path(LAMMPS_INCLUDE_DIR lammps/library.h HINTS ${PC_LAMMPS_INCLUDE_DIRS} @CMAKE_INSTALL_FULL_INCLUDEDIR@)
set(LAMMPS_VERSION @LAMMPS_VERSION@)
set(LAMMPS_API_DEFINES @LAMMPS_API_DEFINES@)
find_library(LAMMPS_LIBRARY NAMES lammps@LAMMPS_LIB_SUFFIX@ HINTS ${PC_LAMMPS_LIBRARY_DIRS} @CMAKE_INSTALL_FULL_LIBDIR@)
set(LAMMPS_INCLUDE_DIRS "${LAMMPS_INCLUDE_DIR}")
set(LAMMPS_LIBRARIES "${LAMMPS_LIBRARY}")
include(FindPackageHandleStandardArgs)
# handle the QUIETLY and REQUIRED arguments and set LAMMPS_FOUND to TRUE
# if all listed variables are TRUE
find_package_handle_standard_args(LAMMPS REQUIRED_VARS LAMMPS_LIBRARY LAMMPS_INCLUDE_DIR VERSION_VAR LAMMPS_VERSION)
mark_as_advanced(LAMMPS_INCLUDE_DIR LAMMPS_LIBRARY)
if(LAMMPS_FOUND AND NOT TARGET LAMMPS::LAMMPS)
add_library(LAMMPS::LAMMPS UNKNOWN IMPORTED)
set_target_properties(LAMMPS::LAMMPS PROPERTIES IMPORTED_LOCATION "${LAMMPS_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${LAMMPS_INCLUDE_DIR}" INTERFACE_COMPILE_DEFINITIONS "${LAMMPS_API_DEFINES}")
endif()

View File

@ -0,0 +1,8 @@
find_path(ZMQ_INCLUDE_DIR zmq.h)
find_library(ZMQ_LIBRARY NAMES zmq)
set(ZMQ_LIBRARIES ${ZMQ_LIBRARY})
set(ZMQ_INCLUDE_DIRS ${ZMQ_INCLUDE_DIR})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(ZMQ DEFAULT_MSG ZMQ_LIBRARY ZMQ_INCLUDE_DIR)

View File

@ -4,15 +4,15 @@
# after you added @CMAKE_INSTALL_FULL_LIBDIR@/pkg-config to PKG_CONFIG_PATH,
# e.g. export PKG_CONFIG_PATH=@CMAKE_INSTALL_FULL_LIBDIR@/pkgconfig
prefix=@CMAKE_INSTALL_FULL_PREFIX@
prefix=@CMAKE_INSTALL_PREFIX@
libdir=@CMAKE_INSTALL_FULL_LIBDIR@
includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
Name: liblammps@LAMMPS_MACHINE@
Description: Large-scale Atomic/Molecular Massively Parallel Simulator Library
URL: http://lammps.sandia.gov
Version:
Version: @LAMMPS_VERSION@
Requires:
Libs: -L${libdir} -llammps@LIB_SUFFIX@@
Libs: -L${libdir} -llammps@LAMMPS_LIB_SUFFIX@
Libs.private: -lm
Cflags: -I${includedir} @LAMMPS_API_DEFINES@

View File

@ -31,6 +31,7 @@ This is the list of packages that may require additional steps.
"KOKKOS"_#kokkos,
"LATTE"_#latte,
"MEAM"_#meam,
"MESSAGE"_#message,
"MSCG"_#mscg,
"OPT"_#opt,
"POEMS"_#poems,
@ -361,6 +362,10 @@ make lib-meam args="-m mpi" # build with default Fortran compiler compatible
make lib-meam args="-m serial" # build with compiler compatible with "make serial" (GNU Fortran)
make lib-meam args="-m ifort" # build with Intel Fortran compiler using Makefile.ifort :pre
NOTE: You should test building the MEAM library with both the Intel
and GNU compilers to see if a simulation runs faster with one versus
the other on your system.
The build should produce two files: lib/meam/libmeam.a and
lib/meam/Makefile.lammps. The latter is copied from an existing
Makefile.lammps.* and has settings needed to link C++ (LAMMPS) with
@ -373,6 +378,35 @@ file.
:line
MESSAGE package :h4,link(message)
This package can optionally include support for messaging via sockets,
using the open-source "ZeroMQ library"_http://zeromq.org, which must
be installed on your system.
[CMake build]:
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
[Traditional make]:
Before building LAMMPS, you must build the CSlib library in
lib/message. You can build the CSlib library manually if you prefer;
follow the instructions in lib/message/README. You can also do it in
one step from the lammps/src dir, using a command like these, which
simply invoke the lib/message/Install.py script with the specified args:
make lib-message # print help message
make lib-message args="-m -z" # build with MPI and socket (ZMQ) support
make lib-message args="-s" # build as serial lib with no ZMQ support
The build should produce two files: lib/message/cslib/src/libmessage.a
and lib/message/Makefile.lammps. The latter is copied from an
existing Makefile.lammps.* and has settings to link with the ZeroMQ
library if requested in the build.
:line
MSCG package :h4,link(mscg)
To build with this package, you must download and build the MS-CG

View File

@ -42,6 +42,7 @@ packages:
"KOKKOS"_Build_extras.html#kokkos,
"LATTE"_Build_extras.html#latte,
"MEAM"_Build_extras.html#meam,
"MESSAGE"_#Build_extras.html#message,
"MSCG"_Build_extras.html#mscg,
"OPT"_Build_extras.html#opt,
"POEMS"_Build_extras.html#poems,

View File

@ -71,6 +71,7 @@ An alphabetic list of all LAMMPS commands.
"lattice"_lattice.html,
"log"_log.html,
"mass"_mass.html,
"message"_message.html,
"minimize"_minimize.html,
"min_modify"_min_modify.html,
"min_style"_min_style.html,
@ -103,6 +104,7 @@ An alphabetic list of all LAMMPS commands.
"restart"_restart.html,
"run"_run.html,
"run_style"_run_style.html,
"server"_server.html,
"set"_set.html,
"shell"_shell.html,
"special_bonds"_special_bonds.html,

View File

@ -54,6 +54,7 @@ General howto :h3
Howto_replica
Howto_library
Howto_couple
Howto_client_server
END_RST -->
@ -64,7 +65,8 @@ END_RST -->
"Run multiple simulations from one input script"_Howto_multiple.html
"Multi-replica simulations"_Howto_replica.html
"Library interface to LAMMPS"_Howto_library.html
"Couple LAMMPS to other codes"_Howto_couple.html :all(b)
"Couple LAMMPS to other codes"_Howto_couple.html
"Using LAMMPS in client/server mode"_Howto_client_server.html :all(b)
<!-- END_HTML_ONLY -->

View File

@ -0,0 +1,131 @@
"Higher level section"_Howto.html - "LAMMPS WWW Site"_lws - "LAMMPS
Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
Using LAMMPS in client/server mode
Client/server coupling of two codes is where one code is the "client"
and sends request messages to a "server" code. The server responds to
each request with a reply message. This enables the two codes to work
in tandem to perform a simulation. LAMMPS can act as either a client
or server code.
Some advantages of client/server coupling are that the two codes run
as stand-alone executables; they are not linked together. Thus
neither code needs to have a library interface. This often makes it
easier to run the two codes on different numbers of processors. If a
message protocol (format and content) is defined for a particular kind
of simulation, then in principle any code that implements the
client-side protocol can be used in tandem with any code that
implements the server-side protocol, without the two codes needing to
know anything more specific about each other.
A simple example of client/server coupling is where LAMMPS is the
client code performing MD timestepping. Each timestep it sends a
message to a server quantum code containing current coords of all the
atoms. The quantum code computes energy and forces based on the
coords. It returns them as a message to LAMMPS, which completes the
timestep.
Alternate methods for code coupling with LAMMPS are described on
the "Howto couple"_Howto_couple.html doc page.
LAMMPS support for client/server coupling is in its "MESSAGE
package"_Packages_details.html#PKG-MESSAGE which implements several
commands that enable LAMMPS to act as a client or server, as discussed
below. The MESSAGE package also wraps a client/server library called
CSlib which enables two codes to exchange messages in different ways,
either via files, sockets, or MPI. The CSlib is provided with LAMMPS
in the lib/message dir. The CSlib has its own
"website"_http://cslib.sandia.gov with documentation and test
programs.
NOTE: For client/server coupling to work between LAMMPS and another
code, the other code also has to use the CSlib. This can sometimes be
done without any modifications to the other code by simply wrapping it
with a Python script that exchanges CSlib messages with LAMMPS and
prepares input for or processes output from the other code. The other
code also has to implement a matching protocol for the format and
content of messages that LAMMPS exchanges with it.
These are the commands currently in the MESSAGE package for two
protocols, MD and MC (Monte Carlo). New protocols can easily be
defined and added to this directory, where LAMMPS acts as either the
client or server.
"message"_message.html
"fix client md"_fix_client_md.html = LAMMPS is a client for running MD
"server md"_server_md.html = LAMMPS is a server for computing MD forces
"server mc"_server_mc.html = LAMMPS is a server for computing a Monte Carlo energy
The server doc files give details of the message protocols
for data that is exchanged bewteen the client and server.
These example directories illustrate how to use LAMMPS as either a
client or server code:
examples/message
examples/COUPLE/README
examples/COUPLE/lammps_mc
examples/COUPLE/lammps_vasp :ul
The examples/message dir couples a client instance of LAMMPS to a
server instance of LAMMPS.
The lammps_mc dir shows how to couple LAMMPS as a server to a simple
Monte Carlo client code as the driver.
The lammps_vasp dir shows how to couple LAMMPS as a client code
running MD timestepping to VASP acting as a server providing quantum
DFT forces, thru a Python wrapper script on VASP.
Here is how to launch a client and server code together for any of the
4 modes of message exchange that the "message"_message.html command
and the CSlib support. Here LAMMPS is used as both the client and
server code. Another code could be subsitituted for either.
The examples below show launching both codes from the same window (or
batch script), using the "&" character to launch the first code in the
background. For all modes except {mpi/one}, you could also launch the
codes in separate windows on your desktop machine. It does not
matter whether you launch the client or server first.
In these examples either code can be run on one or more processors.
If running in a non-MPI mode (file or zmq) you can launch a code on a
single processor without using mpirun.
IMPORTANT: If you run in mpi/two mode, you must launch both codes via
mpirun, even if one or both of them runs on a single processor. This
is so that MPI can figure out how to connect both MPI processes
together to exchange MPI messages between them.
For message exchange in {file}, {zmq}, or {mpi/two} modes:
% mpirun -np 1 lmp_mpi -log log.client < in.client &
% mpirun -np 2 lmp_mpi -log log.server < in.server :pre
% mpirun -np 4 lmp_mpi -log log.client < in.client &
% mpirun -np 1 lmp_mpi -log log.server < in.server :pre
% mpirun -np 2 lmp_mpi -log log.client < in.client &
% mpirun -np 4 lmp_mpi -log log.server < in.server :pre
For message exchange in {mpi/one} mode:
Launch both codes in a single mpirun command:
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.message.client -log log.client : -np 4 lmp_mpi -mpicolor 1 -in in.message.server -log log.server
The two -np values determine how many procs the client and the server
run on.
A LAMMPS executable run in this manner must use the -mpicolor color
command-line option as their its option, where color is an integer
label that will be used to distinguish one executable from another in
the multiple executables that the mpirun command launches. In this
example the client was colored with a 0, and the server with a 1.

View File

@ -16,10 +16,12 @@ atoms and pass those forces to LAMMPS. Or a continuum finite element
nodal points, compute a FE solution, and return interpolated forces on
MD atoms.
LAMMPS can be coupled to other codes in at least 3 ways. Each has
LAMMPS can be coupled to other codes in at least 4 ways. Each has
advantages and disadvantages, which you'll have to think about in the
context of your application.
:line
(1) Define a new "fix"_fix.html command that calls the other code. In
this scenario, LAMMPS is the driver code. During its timestepping,
the fix is invoked, and can make library calls to the other code,
@ -32,6 +34,8 @@ LAMMPS.
:link(poems,http://www.rpi.edu/~anderk5/lab)
:line
(2) Define a new LAMMPS command that calls the other code. This is
conceptually similar to method (1), but in this case LAMMPS and the
other code are on a more equal footing. Note that now the other code
@ -52,6 +56,8 @@ command writes and reads.
See the "Modify command"_Modify_command.html doc page for info on how
to add a new command to LAMMPS.
:line
(3) Use LAMMPS as a library called by another code. In this case the
other code is the driver and calls LAMMPS as needed. Or a wrapper
code could link and call both LAMMPS and another code as libraries.
@ -102,3 +108,9 @@ on all the processors. Or it might allocate half the processors to
LAMMPS and half to the other code and run both codes simultaneously
before syncing them up periodically. Or it might instantiate multiple
instances of LAMMPS to perform different calculations.
:line
(4) Couple LAMMPS with another code in a client/server mode. This is
described on the "Howto client/server"_Howto_client_server.html doc
page.

View File

@ -1,7 +1,7 @@
<!-- HTML_ONLY -->
<HEAD>
<TITLE>LAMMPS Users Manual</TITLE>
<META NAME="docnumber" CONTENT="22 Aug 2018 version">
<META NAME="docnumber" CONTENT="31 Aug 2018 version">
<META NAME="author" CONTENT="http://lammps.sandia.gov - Sandia National Laboratories">
<META NAME="copyright" CONTENT="Copyright (2003) Sandia Corporation. This software and manual is distributed under the GNU General Public License.">
</HEAD>
@ -21,7 +21,7 @@
:line
LAMMPS Documentation :c,h1
22 Aug 2018 version :c,h2
31 Aug 2018 version :c,h2
"What is a LAMMPS version?"_Manual_version.html

View File

@ -549,10 +549,6 @@ This package has "specific installation
instructions"_Build_extras.html#gpu on the "Build
extras"_Build_extras.html doc page.
NOTE: You should test building the MEAM library with both the Intel
and GNU compilers to see if a simulation runs faster with one versus
the other on your system.
[Supporting info:]
src/MEAM: filenames -> commands
@ -563,6 +559,31 @@ examples/meam :ul
:line
MESSAGE package :link(PKG-MESSAGE),h4
[Contents:]
Commands to use LAMMPS as either a client or server and couple it to
another application.
[Install:]
This package has "specific installation
instructions"_Build_extras.html#message on the "Build
extras"_Build_extras.html doc page.
[Supporting info:]
src/MESSAGE: filenames -> commands
lib/message/README
"message"_message.html
"fix client/md"_fix_client_md.html
"server md"_server_md.html
"server mc"_server_mc.html
examples/message :ul
:line
MISC package :link(PKG-MISC),h4
[Contents:]

View File

@ -47,6 +47,7 @@ Package, Description, Doc page, Example, Library
"MANYBODY"_Packages_details.html#PKG-MANYBODY, many-body potentials, "pair_style tersoff"_pair_tersoff.html, shear, no
"MC"_Packages_details.html#PKG-MC, Monte Carlo options, "fix gcmc"_fix_gcmc.html, n/a, no
"MEAM"_Packages_details.html#PKG-MEAM, modified EAM potential, "pair_style meam"_pair_meam.html, meam, int
"MESSAGE"_Packages_details.html#PKG-MESSAGE, client/server messaging, "message"_message.html, message, int
"MISC"_Packages_details.html#PKG-MISC, miscellaneous single-file commands, n/a, no, no
"MOLECULE"_Packages_details.html#PKG-MOLECULE, molecular system force fields, "Howto bioFF"_Howto_bioFF.html, peptide, no
"MPIIO"_Packages_details.html#PKG-MPIIO, MPI parallel I/O dump and restart, "dump"_dump.html, n/a, no

View File

@ -18,6 +18,7 @@ letter abbreviation can be used:
"-i or -in"_#file
"-k or -kokkos"_#run-kokkos
"-l or -log"_#log
"-m or -mpicolor"_#mpicolor
"-nc or -nocite"_#nocite
"-pk or -package"_#package
"-p or -partition"_#partition
@ -175,6 +176,30 @@ Option -plog will override the name of the partition log files file.N.
:line
[-mpicolor] color :link(mpi)
If used, this must be the first command-line argument after the LAMMPS
executable name. It is only used when LAMMPS is launched by an mpirun
command which also launches another executable(s) at the same time.
(The other executable could be LAMMPS as well.) The color is an
integer value which should be different for each executable (another
application may set this value in a different way). LAMMPS and the
other executable(s) perform an MPI_Comm_split() with their own colors
to shrink the MPI_COMM_WORLD communication to be the subset of
processors they are actually running on.
Currently, this is only used in LAMMPS to perform client/server
messaging with another application. LAMMPS can act as either a client
or server (or both). More details are given on the "Howto
client/server"_Howto_client_server.html doc page.
Specifically, this refers to the "mpi/one" mode of messaging provided
by the "message"_message.html command and the CSlib library LAMMPS
links with from the lib/message directory. See the
"message"_message.html command for more details.
:line
[-nocite] :link(nocite)
Disable writing the log.cite file which is normally written to list

View File

@ -56,6 +56,7 @@ Commands :h1
lattice
log
mass
message
min_modify
min_style
minimize
@ -87,6 +88,7 @@ Commands :h1
restart
run
run_style
server
set
shell
special_bonds

106
doc/src/fix_client_md.txt Normal file
View File

@ -0,0 +1,106 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
fix client/md command :h3
[Syntax:]
fix ID group-ID client/md :pre
ID, group-ID are documented in "fix"_fix.html command
client/md = style name of this fix command :ul
[Examples:]
fix 1 all client/md :pre
[Description:]
This fix style enables LAMMPS to run as a "client" code and
communicate each timestep with a separate "server" code to perform an
MD simulation together.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When using this fix, LAMMPS (as the client code) passes the current
coordinates of all particles to the server code each timestep, which
computes their interaction, and returns the energy, forces, and virial
for the interacting particles to LAMMPS, so it can complete the
timestep.
The server code could be a quantum code, or another classical MD code
which encodes a force field (pair_style in LAMMPS lingo) which LAMMPS
does not have. In the quantum case, this fix is a mechanism for
running {ab initio} MD with quantum forces.
The group associated with this fix is ignored.
The protocol and "units"_units.html for message format and content
that LAMMPS exchanges with the server code is defined on the "server
md"_server_md.html doc page.
Note that when using LAMMPS as an MD client, your LAMMPS input script
should not normally contain force field commands, like a
"pair_style"_doc/pair_style.html, "bond_style"_doc/bond_style.html, or
"kspace_style"_kspace_style.html commmand. However it is possible for
a server code to only compute a portion of the full force-field, while
LAMMPS computes the remaining part. Your LAMMPS script can also
specify boundary conditions or force constraints in the usual way,
which will be added to the per-atom forces returned by the server
code.
See the examples/message dir for example scripts where LAMMPS is both
the "client" and/or "server" code for this kind of client/server MD
simulation. The examples/message/README file explains how to launch
LAMMPS and another code in tandem to perform a coupled simulation.
:line
[Restart, fix_modify, output, run start/stop, minimize info:]
No information about this fix is written to "binary restart
files"_restart.html.
The "fix_modify"_fix_modify.html {energy} option is supported by this
fix to add the potential energy computed by the server application to
the system's potential energy as part of "thermodynamic
output"_thermo_style.html.
The "fix_modify"_fix_modify.html {virial} option is supported by this
fix to add the server application's contribution to the system's
virial as part of "thermodynamic output"_thermo_style.html. The
default is {virial yes}
This fix computes a global scalar which can be accessed by various
"output commands"_Howto_output.html. The scalar is the potential
energy discussed above. The scalar value calculated by this fix is
"extensive".
No parameter of this fix can be used with the {start/stop} keywords of
the "run"_run.html command. This fix is not invoked during "energy
minimization"_minimize.html.
[Restrictions:]
This fix is part of the MESSAGE package. It is only enabled if LAMMPS
was built with that package. See the "Build
package"_Build_package.html doc page for more info.
A script that uses this command must also use the
"message"_message.html command to setup the messaging protocol with
the other server code.
[Related commands:]
"message"_message.html, "server"_server.html
[Default:] none

View File

@ -167,6 +167,7 @@ label.html
lattice.html
log.html
mass.html
message.html
min_modify.html
min_style.html
minimize.html
@ -194,6 +195,9 @@ reset_timestep.html
restart.html
run.html
run_style.html
server.html
server_mc.html
server_md.html
set.html
shell.html
special_bonds.html
@ -241,6 +245,7 @@ fix_bond_create.html
fix_bond_react.html
fix_bond_swap.html
fix_box_relax.html
fix_client_md.html
fix_cmap.html
fix_colvars.html
fix_controller.html

162
doc/src/message.txt Normal file
View File

@ -0,0 +1,162 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Command_all.html)
:line
message command :h3
[Syntax:]
message which protocol mode arg :pre
which = {client} or {server} :ulb,l
protocol = {md} or {mc} :l
mode = {file} or {zmq} or {mpi/one} or {mpi/two} :l
{file} arg = filename
filename = file used for message exchanges
{zmq} arg = socket-ID
socket-ID for client = localhost:5555, see description below
socket-ID for server = *:5555, see description below
{mpi/one} arg = none
{mpi/two} arg = filename
filename = file used to establish communication bewteen 2 MPI jobs :pre
:ule
[Examples:]
message client md file tmp.couple
message server md file tmp.couple :pre
message client md zmq localhost:5555
message server md zmq *:5555 :pre
message client md mpi/one
message server md mpi/one :pre
message client md mpi/two tmp.couple
message server md mpi/two tmp.couple :pre
[Description:]
Establish a messaging protocol between LAMMPS and another code for the
purpose of client/server coupling.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
:line
The {which} argument defines LAMMPS to be the client or the server.
:line
The {protocol} argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
md = run dynamics with another code
mc = perform Monte Carlo moves with another code :ul
For protocol {md}, LAMMPS can be either a client or server. See the
"server md"_server_md.html doc page for details on the protocol.
For protocol {mc}, LAMMPS can be the server. See the "server
mc"_server_mc.html doc page for details on the protocol.
:line
The {mode} argument specifies how messages are exchanged between the
client and server codes. Both codes must use the same mode and use
consistent parameters.
For mode {file}, the 2 codes communicate via binary files. They must
use the same filename, which is actually a file prefix. Several files
with that prefix will be created and deleted as a simulation runs.
The filename can include a path. Both codes must be able to access
the path/file in a common filesystem.
For mode {zmq}, the 2 codes communicate via a socket on the server
code's machine. Support for socket messaging is provided by the
open-source "ZeroMQ library"_http://zeromq.org, which must be
installed on your system. The client specifies an IP address (IPv4
format) or the DNS name of the machine the server code is running on,
followed by a 4-digit port ID for the socket, separated by a colon.
E.g.
localhost:5555 # client and server running on same machine
192.168.1.1:5555 # server is 192.168.1.1
deptbox.uni.edu:5555 # server is deptbox.uni.edu :pre
The server specifes "*:5555" where "*" represents all available
interfaces on the server's machine, and the port ID must match
what the client specifies.
NOTE: What are allowed port IDs?
NOTE: Additional explanation is needed here about how to use the {zmq}
mode on a parallel machine, e.g. a cluster with many nodes.
For mode {mpi/one}, the 2 codes communicate via MPI and are launched
by the same mpirun command, e.g. with this syntax for OpenMPI:
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.client -log log.client : -np 4 othercode args # LAMMPS is client
mpirun -np 2 othercode args : -np 4 lmp_mpi -mpicolor 1 -in in.server # LAMMPS is server :pre
Note the use of the "-mpicolor color" command-line argument with
LAMMPS. See the "command-line args"_Run_options.html doc page for
further explanation.
For mode {mpi/two}, the 2 codes communicate via MPI, but are launched
be 2 separate mpirun commands. The specified {filename} argument is a
file the 2 MPI processes will use to exchange info so that an MPI
inter-communicator can be established to enable the 2 codes to send
MPI messages to each other. Both codes must be able to access the
path/file in a common filesystem.
:line
Normally, the message command should be used at the top of a LAMMPS
input script. It performs an initial handshake with the other code to
setup messaging and to verify that both codes are using the same
message protocol and mode. Assuming both codes are launched at
(nearly) the same time, the other code should perform the same kind of
initialization.
If LAMMPS is the client code, it will begin sending messages when a
LAMMPS client command begins its operation. E.g. for the "fix
client/md"_fix_client_md.html command, it is when a "run"_run.html
command is executed.
If LAMMPS is the server code, it will begin receiving messages when
the "server"_server.html command is invoked.
A fix client command will terminate its messaging with the server when
LAMMPS ends, or the fix is deleted via the "unfix"_unfix command. The
server command will terminate its messaging with the client when the
client signals it. Then the remainder of the LAMMPS input script will
be processed.
If both codes do something similar, this means a new round of
client/server messaging can be initiated after termination by re-using
a 2nd message command in your LAMMPS input script, followed by a new
fix client or server command.
:line
[Restrictions:]
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the "Build
package"_Build_package.html doc page for more info.
[Related commands:]
"server"_server.html, "fix client/md"_fix_client_md.html
[Default:] none

71
doc/src/server.txt Normal file
View File

@ -0,0 +1,71 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
server command :h3
[Syntax:]
server protocol :pre
protocol = {md} or {mc} :ul
[Examples:]
server md :pre
[Description:]
This command starts LAMMPS running in "server" mode, where it receives
messages from a separate "client" code and responds by sending a reply
message back to the client. The specified {protocol} determines the
format and content of messages LAMMPS expects to receive and how it
responds.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The {protocol} argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
"md"_server_md.html = run dynamics with another code
"mc"_server_mc.html = perform Monte Carlo moves with another code :ul
For protocol {md}, LAMMPS can be either a client (via the "fix
client/md"_fix_client_md.html command) or server. See the "server
md"_server_md.html doc page for details on the protocol.
For protocol {mc}, LAMMPS can be the server. See the "server
mc"_server_mc.html doc page for details on the protocol.
:line
[Restrictions:]
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the "Build
package"_Build_package.html doc page for more info.
A script that uses this command must also use the
"message"_message.html command to setup the messaging protocol with
the other client code.
[Related commands:]
"message"_message.html, "fix client/md"_fix_client_md.html
[Default:] none

116
doc/src/server_mc.txt Normal file
View File

@ -0,0 +1,116 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
server mc command :h3
[Syntax:]
server mc :pre
mc = the protocol argument to the "server"_server.html command
[Examples:]
server mc :pre
[Description:]
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the {mc}
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The "server"_server.html doc page gives other options for using LAMMPS
See an example of how this command is used in
examples/COUPLE/lammps_mc/in.server.
:line
When using this command, LAMMPS (as the server code) receives
instructions from a Monte Carlo (MC) driver to displace random atoms,
compute the energy before and after displacement, and run dynamics to
equilibrate the system.
The MC driver performs the random displacements on random atoms,
accepts or rejects the move in an MC sense, and orchestrates the MD
runs.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the "CSlib website"_http://cslib.sandia.gov doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_mc.cpp file for details on how LAMMPS uses
these messages. See the examples/COUPLE/lammmps_mc/mc.cpp file for an
example of how an MC driver code can use these messages.
Let NATOMS=1, EINIT=2, DISPLACE=3, ACCEPT=4, RUN=5.
[Client sends one of these kinds of message]:
cs->send(NATOMS,0) # msgID = 1 with no fields :pre
cs->send(EINIT,0) # msgID = 2 with no fields :pre
cs->send(DISPLACE,2) # msgID = 3 with 2 fields
cs->pack_int(1,ID) # 1st field = ID of atom to displace
cs->pack(2,3,xnew) # 2nd field = new xyz coords of displaced atom :pre
cs->send(ACCEPT,1) # msgID = 4 with 1 field
cs->pack_int(1,flag) # 1st field = accept/reject flag :pre
cs->send(RUN,1) # msgID = 5 with 1 field
cs->pack_int(1,nsteps) # 1st field = # of timesteps to run MD :pre
[Server replies]:
cs->send(NATOMS,1) # msgID = 1 with 1 field
cs->pack_int(1,natoms) # 1st field = number of atoms :pre
cs->send(EINIT,2) # msgID = 2 with 2 fields
cs->pack_double(1,poteng) # 1st field = potential energy of system
cs->pack(2,3*natoms,x) # 2nd field = 3N coords of Natoms :pre
cs->send(DISPLACE,1) # msgID = 3 with 1 field
cs->pack_double(1,poteng) # 1st field = new potential energy of system :pre
cs->send(ACCEPT,0) # msgID = 4 with no fields
cs->send(RUN,0) # msgID = 5 with no fields
:line
[Restrictions:]
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the "Build
package"_Build_package.html doc page for more info.
A script that uses this command must also use the
"message"_message.html command to setup the messaging protocol with
the other client code.
[Related commands:]
"message"_message.html
[Default:] none

147
doc/src/server_md.txt Normal file
View File

@ -0,0 +1,147 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Commands_all.html)
:line
server md command :h3
[Syntax:]
server md :pre
md = the protocol argument to the "server"_server.html command
[Examples:]
server md :pre
[Description:]
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the {md}
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The "Howto client/server"_Howto_client_server.html doc page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The "server"_server.html doc page gives other options for using LAMMPS
in server mode. See an example of how this command is used in
examples/message/in.message.server.
:line
When using this command, LAMMPS (as the server code) receives the
current coordinates of all particles from the client code each
timestep, computes their interaction, and returns the energy, forces,
and pressure for the interacting particles to the client code, so it
can complete the timestep. This command could also be used with a
client code that performs energy minimization, using the server to
compute forces and energy each iteration of its minimizer.
When using the "fix client/md" command, LAMMPS (as the client code)
does the timestepping and receives needed energy, forces, and pressure
values from the server code.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the "CSlib website"_http://cslib.sandia.gov doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_md.cpp and src/MESSAGE/fix_client_md.cpp
files for details on how LAMMPS uses these messages. See the
examples/COUPLE/lammps_vasp/vasp_wrapper.py file for an example of how
a quantum code (VASP) can use use these messages.
The following pseudo-code uses these values, defined as enums.
enum{SETUP=1,STEP};
enum{DIM=1,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE};
enum{FORCES=1,ENERGY,PRESSURE,ERROR}; :pre
[Client sends 2 kinds of messages]:
# required fields: DIM, PERIODICTY, ORIGIN, BOX, NATOMS, NTYPES, TYPES, COORDS
# optional fields: UNITS, CHARGE :pre
cs->send(SETUP,nfields) # msgID with nfields :pre
cs->pack_int(DIM,dim) # dimension (2,3) of simulation
cs->pack(PERIODICITY,3,xyz) # periodicity flags in 3 dims
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
cs->pack_int(NATOMS,natoms) # total number of atoms
cs->pack_int(NTYPES,ntypes) # number of atom types
cs->pack(TYPES,natoms,type) # vector of per-atom types
cs->pack(COORDS,3*natoms,x) # vector of 3N atom coords
cs->pack_string(UNITS,units) # units = "lj", "real", "metal", etc
cs->pack(CHARGE,natoms,q) # vector of per-atom charge :pre
# required fields: COORDS
# optional fields: ORIGIN, BOX :pre
cs->send(STEP,nfields) # msgID with nfields :pre
cs->pack(COORDS,3*natoms,x) # vector of 3N atom coords
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
[Server replies to either kind of message]:
# required fields: FORCES, ENERGY, PRESSURE
# optional fields: ERROR :pre
cs->send(msgID,nfields) # msgID with nfields
cs->pack(FORCES,3*Natoms,f) # vector of 3N forces on atoms
cs->pack(ENERGY,1,poteng) # total potential energy of system
cs->pack(PRESSURE,6,press) # global pressure tensor (6-vector)
cs->pack_int(ERROR,flag) # server had an error (e.g. DFT non-convergence) :pre
:line
The units for various quantities that are sent and received iva
messages are defined for atomic-scale simulations in the table below.
The client and server codes (including LAMMPS) can use internal units
different than these (e.g. "real units"_units.html in LAMMPS), so long
as they convert to these units for meesaging.
COORDS, ORIGIN, BOX = Angstroms
CHARGE = multiple of electron charge (1.0 is a proton)
ENERGY = eV
FORCES = eV/Angstrom
PRESSURE = bars :ul
Note that these are "metal units"_units.html in LAMMPS.
If you wish to run LAMMPS in another its non-atomic units, e.g. "lj
units"_units.html, then the client and server should exchange a UNITS
message as indicated above, and both the client and server should
agree on the units for the data they exchange.
:line
[Restrictions:]
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the "Build
package"_Build_package.html doc page for more info.
[Related commands:]
"message"_message.html, "fix client/md"_fix_client_md.html
[Default:] none

View File

@ -10,6 +10,7 @@ See these sections of the LAMMPS manaul for details:
2.5 Building LAMMPS as a library (doc/Section_start.html#start_5)
6.10 Coupling LAMMPS to other codes (doc/Section_howto.html#howto_10)
6.29 Using LAMMPS in client/server mode (doc/Section_howto.html#howto_29)
In all of the examples included here, LAMMPS must first be built as a
library. Basically, in the src dir you type one of
@ -33,9 +34,13 @@ These are the sub-directories included in this directory:
simple simple example of driver code calling LAMMPS as a lib
multiple example of driver code calling multiple instances of LAMMPS
lammps_mc client/server coupling of Monte Carlo client
with LAMMPS server for energy evaluation
lammps_quest MD with quantum forces, coupling to Quest DFT code
lammps_spparks grain-growth Monte Carlo with strain via MD,
coupling to SPPARKS kinetic MC code
lammps_vasp client/server coupling of LAMMPS client with
VASP quantum DFT as server for quantum forces
library collection of useful inter-code communication routines
fortran a simple wrapper on the LAMMPS library API that
can be called from Fortran

View File

@ -0,0 +1,33 @@
# Makefile for MC
SHELL = /bin/sh
SRC = mc.cpp random_park.cpp
OBJ = $(SRC:.cpp=.o)
# change this line for your machine to path for CSlib src dir
CSLIB = /home/sjplimp/lammps/lib/message/cslib/src
# compiler/linker settings
CC = g++
CCFLAGS = -g -O3 -I$(CSLIB)
LINK = g++
LINKFLAGS = -g -O -L$(CSLIB)
# targets
mc: $(OBJ)
# first line if built the CSlib within lib/message with ZMQ support
# second line if built the CSlib without ZMQ support
$(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -lzmq -o mc
# $(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -o mc
clean:
@rm -f *.o mc
# rules
%.o:%.cpp
$(CC) $(CCFLAGS) -c $<

View File

@ -0,0 +1,128 @@
Sample Monte Carlo (MC) wrapper on LAMMPS via client/server coupling
See the MESSAGE package (doc/Section_messages.html#MESSAGE)
and Section_howto.html#howto10 for more details on how
client/server coupling works in LAMMPS.
In this dir, the mc.cpp/h files are a standalone "client" MC code. It
should be run on a single processor, though it could become a parallel
program at some point. LAMMPS is also run as a standalone executable
as a "server" on as many processors as desired using its "server mc"
command; see it's doc page for details.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the MC
program became parallel, data could also be exchanged via MPI.
The MC code makes simple MC moves, by displacing a single random atom
by a small random amount. It uses LAMMPS to calculate the energy
change, and to run dynamics between MC moves.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
% cd lammps/lib/message
% python Install.py -m -z # build CSlib with MPI and ZMQ support
% cd lammps/src
% make yes-message
% make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the MC client code
The source files for the MC code are in this dir. It links with the
CSlib library in lib/message/cslib.
You must first build the CSlib in serial mode, e.g.
% cd lammps/lib/message/cslib/src
% make lib # build serial and parallel lib with ZMQ support
% make lib zmq=no # build serial and parallel lib without ZMQ support
Then edit the Makefile in this dir. The CSLIB variable should be the
path to where the LAMMPS lib/message/cslib/src dir is on your system.
If you built the CSlib without ZMQ support you will also need to
comment/uncomment one line. Then you can just type
% make
and you should get an "mc" executable.
----------------
To run in client/server mode:
Both the client (MC) and server (LAMMPS) must use the same messaging
mode, namely file or zmq. This is an argument to the MC code; it can
be selected by setting the "mode" variable when you run LAMMPS. The
default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The MC code is always run in serial.
When you run, the server should print out thermodynamic info
for every MD run it performs (between MC moves). The client
will print nothing until the simulation ends, then it will
print stats about the accepted MC moves.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 1 lmp_mpi -v mode file < in.mc.server
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 4 lmp_mpi -v mode file < in.mc.server
ZMQ mode of messaging:
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 1 lmp_mpi -v mode zmq < in.mc.server
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 4 lmp_mpi -v mode zmq < in.mc.server
--------------
The input script for the MC program is in.mc. You can edit it to run
longer simulations.
500 nsteps = total # of steps of MD
100 ndynamics = # of MD steps between MC moves
0.1 delta = displacement size of MC move
1.0 temperature = used in MC Boltzman factor
12345 seed = random number seed
--------------
The problem size that LAMMPS is computing the MC energy for and
running dynamics on is set by the x,y,z variables in the LAMMPS
in.mc.server script. The default size is 500 particles. You can
adjust the size as follows:
lmp_mpi -v x 10 -v y 10 -v z 20 # 8000 particles

View File

@ -0,0 +1,7 @@
# MC params
500 nsteps
100 ndynamics
0.1 delta
1.0 temperature
12345 seed

View File

@ -0,0 +1,36 @@
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then &
"message server mc file tmp.couple" &
elif "${mode} == zmq" &
"message server mc zmq *:5555" &
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc

View File

@ -0,0 +1,254 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000649929 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:02

View File

@ -0,0 +1,254 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000592947 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 3.8147e-06 on 4 procs for 0 steps with 500 atoms
59.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.815e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
106.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.75509e-06 on 4 procs for 0 steps with 500 atoms
113.2% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.755e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.563e-06 on 4 procs for 0 steps with 500 atoms
117.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.563e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 3.99351e-06 on 4 procs for 0 steps with 500 atoms
93.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.994e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 3.57628e-06 on 4 procs for 0 steps with 500 atoms
111.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.576e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:02

View File

@ -0,0 +1,254 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000741005 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.19209e-06 on 1 procs for 0 steps with 500 atoms
83.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.192e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
104.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:00

View File

@ -0,0 +1,254 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000576019 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 4.76837e-06 on 4 procs for 0 steps with 500 atoms
89.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 4.768e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.45707e-06 on 4 procs for 0 steps with 500 atoms
94.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.457e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
115.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.38419e-06 on 4 procs for 0 steps with 500 atoms
125.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.384e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 2.44379e-06 on 4 procs for 0 steps with 500 atoms
112.5% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.444e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 2.14577e-06 on 4 procs for 0 steps with 500 atoms
139.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:00

View File

@ -0,0 +1,263 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
// MC code used with LAMMPS in client/server mode
// MC is the client, LAMMPS is the server
// Syntax: mc infile mode modearg
// mode = file, zmq
// modearg = filename for file, localhost:5555 for zmq
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "mc.h"
#include "random_park.h"
#include "cslib.h"
using namespace CSLIB_NS;
void error(const char *);
CSlib *cs_create(char *, char *);
#define MAXLINE 256
/* ---------------------------------------------------------------------- */
// main program
int main(int narg, char **arg)
{
if (narg != 4) {
error("Syntax: mc infile mode modearg");
exit(1);
}
// initialize CSlib
CSlib *cs = cs_create(arg[2],arg[3]);
// create MC class and perform run
MC *mc = new MC(arg[1],cs);
mc->run();
// final MC stats
int naccept = mc->naccept;
int nattempt = mc->nattempt;
printf("------ MC stats ------\n");
printf("MC attempts = %d\n",nattempt);
printf("MC accepts = %d\n",naccept);
printf("Acceptance ratio = %g\n",1.0*naccept/nattempt);
// clean up
delete cs;
delete mc;
}
/* ---------------------------------------------------------------------- */
void error(const char *str)
{
printf("ERROR: %s\n",str);
exit(1);
}
/* ---------------------------------------------------------------------- */
CSlib *cs_create(char *mode, char *arg)
{
CSlib *cs = new CSlib(0,mode,arg,NULL);
// initial handshake to agree on protocol
cs->send(0,1);
cs->pack_string(1,(char *) "mc");
int msgID,nfield;
int *fieldID,*fieldtype,*fieldlen;
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
return cs;
}
// ----------------------------------------------------------------------
// MC class
// ----------------------------------------------------------------------
MC::MC(char *mcfile, void *cs_caller)
//MC::MC(char *mcfile, CSlib *cs_caller)
{
cs_void = cs_caller;
// setup MC params
options(mcfile);
// random # generator
random = new RanPark(seed);
}
/* ---------------------------------------------------------------------- */
MC::~MC()
{
free(x);
delete random;
}
/* ---------------------------------------------------------------------- */
void MC::run()
{
int iatom,accept,msgID,nfield;
double pe_initial,pe_final,edelta;
double dx,dy,dz;
double xold[3],xnew[3];
int *fieldID,*fieldtype,*fieldlen;
enum{NATOMS=1,EINIT,DISPLACE,ACCEPT,RUN};
CSlib *cs = (CSlib *) cs_void;
// one-time request for atom count from MD
// allocate 1d coord buffer
cs->send(NATOMS,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
natoms = cs->unpack_int(1);
x = (double *) malloc(3*natoms*sizeof(double));
// loop over MC moves
naccept = nattempt = 0;
for (int iloop = 0; iloop < nloop; iloop++) {
// request current energy from MD
// recv energy, coords from MD
cs->send(EINIT,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_initial = cs->unpack_double(1);
double *x = (double *) cs->unpack(2);
// perform simple MC event
// displace a single atom by random amount
iatom = (int) natoms*random->uniform();
xold[0] = x[3*iatom+0];
xold[1] = x[3*iatom+1];
xold[2] = x[3*iatom+2];
dx = 2.0*delta*random->uniform() - delta;
dy = 2.0*delta*random->uniform() - delta;
dz = 2.0*delta*random->uniform() - delta;
xnew[0] = xold[0] + dx;
xnew[1] = xold[1] + dx;
xnew[2] = xold[2] + dx;
// send atom ID and its new coords to MD
// recv new energy
cs->send(DISPLACE,2);
cs->pack_int(1,iatom+1);
cs->pack(2,4,3,xnew);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_final = cs->unpack_double(1);
// decide whether to accept/reject MC event
if (pe_final <= pe_initial) accept = 1;
else if (temperature == 0.0) accept = 0;
else if (random->uniform() >
exp(natoms*(pe_initial-pe_final)/temperature)) accept = 0;
else accept = 1;
nattempt++;
if (accept) naccept++;
// send accept (1) or reject (0) flag to MD
cs->send(ACCEPT,1);
cs->pack_int(1,accept);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
// send dynamics timesteps
cs->send(RUN,1);
cs->pack_int(1,ndynamics);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
// send exit message to MD
cs->send(-1,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
/* ---------------------------------------------------------------------- */
void MC::options(char *filename)
{
// default params
nsteps = 0;
ndynamics = 100;
delta = 0.1;
temperature = 1.0;
seed = 12345;
// read and parse file
FILE *fp = fopen(filename,"r");
if (fp == NULL) error("Could not open MC file");
char line[MAXLINE];
char *keyword,*value;
char *eof = fgets(line,MAXLINE,fp);
while (eof) {
if (line[0] == '#') { // comment line
eof = fgets(line,MAXLINE,fp);
continue;
}
value = strtok(line," \t\n\r\f");
if (value == NULL) { // blank line
eof = fgets(line,MAXLINE,fp);
continue;
}
keyword = strtok(NULL," \t\n\r\f");
if (keyword == NULL) error("Missing keyword in MC file");
if (strcmp(keyword,"nsteps") == 0) nsteps = atoi(value);
else if (strcmp(keyword,"ndynamics") == 0) ndynamics = atoi(value);
else if (strcmp(keyword,"delta") == 0) delta = atof(value);
else if (strcmp(keyword,"temperature") == 0) temperature = atof(value);
else if (strcmp(keyword,"seed") == 0) seed = atoi(value);
else error("Unknown param in MC file");
eof = fgets(line,MAXLINE,fp);
}
// derived params
nloop = nsteps/ndynamics;
}

View File

@ -0,0 +1,40 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
#ifndef MC_H
#define MC_H
/* ---------------------------------------------------------------------- */
class MC {
public:
int naccept; // # of accepted MC events
int nattempt; // # of attempted MC events
MC(char *, void *);
~MC();
void run();
private:
int nsteps; // total # of MD steps
int ndynamics; // steps in one short dynamics run
int nloop; // nsteps/ndynamics
int natoms; // # of MD atoms
double delta; // MC displacement distance
double temperature; // MC temperature for Boltzmann criterion
double *x; // atom coords as 3N 1d vector
double energy; // global potential energy
int seed; // RNG seed
class RanPark *random;
void *cs_void; // messaging library
void options(char *);
};
#endif

View File

@ -0,0 +1,72 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
// Park/Miller RNG
#include <math.h>
#include "random_park.h"
//#include "error.h"
#define IA 16807
#define IM 2147483647
#define AM (1.0/IM)
#define IQ 127773
#define IR 2836
/* ---------------------------------------------------------------------- */
RanPark::RanPark(int seed_init)
{
//if (seed_init <= 0)
// error->one(FLERR,"Invalid seed for Park random # generator");
seed = seed_init;
save = 0;
}
/* ----------------------------------------------------------------------
uniform RN
------------------------------------------------------------------------- */
double RanPark::uniform()
{
int k = seed/IQ;
seed = IA*(seed-k*IQ) - IR*k;
if (seed < 0) seed += IM;
double ans = AM*seed;
return ans;
}
/* ----------------------------------------------------------------------
gaussian RN
------------------------------------------------------------------------- */
double RanPark::gaussian()
{
double first,v1,v2,rsq,fac;
if (!save) {
do {
v1 = 2.0*uniform()-1.0;
v2 = 2.0*uniform()-1.0;
rsq = v1*v1 + v2*v2;
} while ((rsq >= 1.0) || (rsq == 0.0));
fac = sqrt(-2.0*log(rsq)/rsq);
second = v1*fac;
first = v2*fac;
save = 1;
} else {
first = second;
save = 0;
}
return first;
}

View File

@ -0,0 +1,28 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef RANPARK_H
#define RANPARK_H
class RanPark {
public:
RanPark(int);
double uniform();
double gaussian();
private:
int seed,save;
double second;
};
#endif

View File

@ -0,0 +1,53 @@
# Startparameter for this run:
NWRITE = 2 write-flag & timer
PREC = normal normal or accurate (medium, high low for compatibility)
ISTART = 0 job : 0-new 1-cont 2-samecut
ICHARG = 2 charge: 1-file 2-atom 10-const
ISPIN = 1 spin polarized calculation?
LSORBIT = F spin-orbit coupling
INIWAV = 1 electr: 0-lowe 1-rand 2-diag
# Electronic Relaxation 1
ENCUT = 600.0 eV #Plane wave energy cutoff
ENINI = 600.0 initial cutoff
NELM = 100; NELMIN= 2; NELMDL= -5 # of ELM steps
EDIFF = 0.1E-05 stopping-criterion for ELM
# Ionic relaxation
EDIFFG = 0.1E-02 stopping-criterion for IOM
NSW = 0 number of steps for IOM
NBLOCK = 1; KBLOCK = 1 inner block; outer block
IBRION = -1 ionic relax: 0-MD 1-quasi-New 2-CG #No ion relaxation with -1
NFREE = 0 steps in history (QN), initial steepest desc. (CG)
ISIF = 2 stress and relaxation # 2: F-yes Sts-yes RlxIon-yes cellshape-no cellvol-no
IWAVPR = 10 prediction: 0-non 1-charg 2-wave 3-comb # 10: TMPCAR stored in memory rather than file
POTIM = 0.5000 time-step for ionic-motion
TEBEG = 3500.0; TEEND = 3500.0 temperature during run # Finite Temperature variables if AI-MD is on
SMASS = -3.00 Nose mass-parameter (am)
estimated Nose-frequenzy (Omega) = 0.10E-29 period in steps =****** mass= -0.366E-27a.u.
PSTRESS= 0.0 pullay stress
# DOS related values:
EMIN = 10.00; EMAX =-10.00 energy-range for DOS
EFERMI = 0.00
ISMEAR = 0; SIGMA = 0.10 broadening in eV -4-tet -1-fermi 0-gaus
# Electronic relaxation 2 (details)
IALGO = 48 algorithm
# Write flags
LWAVE = T write WAVECAR
LCHARG = T write CHGCAR
LVTOT = F write LOCPOT, total local potential
LVHAR = F write LOCPOT, Hartree potential only
LELF = F write electronic localiz. function (ELF)
# Dipole corrections
LMONO = F monopole corrections only (constant potential shift)
LDIPOL = F correct potential (dipole corrections)
IDIPOL = 0 1-x, 2-y, 3-z, 4-all directions
EPSILON= 1.0000000 bulk dielectric constant
# Exchange correlation treatment:
GGA = -- GGA type

View File

@ -0,0 +1,6 @@
K-Points
0
Monkhorst Pack
15 15 15
0 0 0

View File

@ -0,0 +1,11 @@
W unit cell
1.0
3.16 0.00000000 0.00000000
0.00000000 3.16 0.00000000
0.00000000 0.00000000 3.16
W
2
Direct
0.00000000 0.00000000 0.00000000
0.50000000 0.50000000 0.50000000

View File

@ -0,0 +1,149 @@
Sample LAMMPS MD wrapper on VASP quantum DFT via client/server
coupling
See the MESSAGE package (doc/Section_messages.html#MESSAGE) and
Section_howto.html#howto10 for more details on how client/server
coupling works in LAMMPS.
In this dir, the vasp_wrap.py is a wrapper on the VASP quantum DFT
code so it can work as a "server" code which LAMMPS drives as a
"client" code to perform ab initio MD. LAMMPS performs the MD
timestepping, sends VASP a current set of coordinates each timestep,
VASP computes forces and energy and virial and returns that info to
LAMMPS.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the
vasp_wrap.py program became parallel, or the CSlib library calls were
integrated into VASP directly, then data could also be exchanged via
MPI.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
cd lammps/lib/message
python Install.py -m -z # build CSlib with MPI and ZMQ support
cd lammps/src
make yes-message
make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the CSlib in a form usable by the vasp_wrapper.py script:
% cd lammps/lib/message/cslib/src
% make shlib # build serial and parallel shared lib with ZMQ support
% make shlib zmq=no # build serial and parallel shared lib w/out ZMQ support
This will make a shared library versions of the CSlib, which Python
requires. Python must be able to find both the cslib.py script and
the libcsnompi.so library in your lammps/lib/message/cslib/src
directory. If it is not able to do this, you will get an error when
you run vasp_wrapper.py.
You can do this by augmenting two environment variables, either
from the command line, or in your shell start-up script.
Here is the sample syntax for the csh or tcsh shells:
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/lib/message/cslib/src
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/message/cslib/src
----------------
Prepare to use VASP and the vasp_wrapper.py script
You can run the vasp_wrap.py script as-is to test that the coupling
between it and LAMMPS is functional. This will use the included
vasprun.xml file output by a previous VASP run.
But note that the as-is version of vasp_wrap.py will not attempt to
run VASP.
To do this, you must edit the 1st vaspcmd line at the top of
vasp_wrapper.py to be the launch command needed to run VASP on your
system. It can be a command to run VASP in serial or in parallel,
e.g. an mpirun command. Then comment out the 2nd vaspcmd line
immediately following it.
Insure you have the necessary VASP input files in this
directory, suitable for the VASP calculation you want to perform:
INCAR
KPOINTS
POSCAR_template
POTCAR
Examples of all but the POTCAR file are provided. As explained below,
POSCAR_W is an input file for a 2-atom unit cell of tungsten and can
be used to test the LAMMPS/VASP coupling. The POTCAR file is a
proprietary VASP file, so use one from your VASP installation.
Note that the POSCAR_template file should be matched to the LAMMPS
input script (# of atoms and atom types, box size, etc). The provided
POSCAR_W matches in.client.W.
Once you run VASP yourself, the vasprun.xml file will be overwritten.
----------------
To run in client/server mode:
NOTE: The vasp_wrap.py script must be run with Python version 2, not
3. This is because it used the CSlib python wrapper, which only
supports version 2. We plan to upgrade CSlib to support Python 3.
Both the client (LAMMPS) and server (vasp_wrap.py) must use the same
messaging mode, namely file or zmq. This is an argument to the
vasp_wrap.py code; it can be selected by setting the "mode" variable
when you run LAMMPS. The default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The vasp_wrap.py code is always run in serial, but it
launches VASP from Python via an mpirun command which can run VASP
itself in parallel.
When you run, the server should print out thermodynamic info every
timestep which corresponds to the forces and virial computed by VASP.
VASP will also generate output files each timestep. The vasp_wrapper.py
script could be generalized to archive these.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 lmp_mpi -v mode file < in.client.W
% python vasp_wrap.py file POSCAR_W
% mpirun -np 2 lmp_mpi -v mode file < in.client.W
% python vasp_wrap.py file POSCAR_W
ZMQ mode of messaging:
% mpirun -np 1 lmp_mpi -v mode zmq < in.client.W
% python vasp_wrap.py zmq POSCAR_W
% mpirun -np 2 lmp_mpi -v mode zmq < in.client.W
% python vasp_wrap.py zmq POSCAR_W

View File

@ -0,0 +1,15 @@
LAMMPS W data file
2 atoms
1 atom types
0.0 3.16 xlo xhi
0.0 3.16 ylo yhi
0.0 3.16 zlo zhi
Atoms
1 1 0.000 0.000 0.000
2 1 1.58 1.58 1.58

View File

@ -0,0 +1,34 @@
# small W unit cell for use with VASP
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
mass 1 183.85
replicate $x $y $z
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3

View File

@ -0,0 +1,76 @@
LAMMPS (22 Aug 2018)
# small W unit cell for use with VASP
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md zmq localhost:5555
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 2 MPI processor grid
reading atoms ...
2 atoms
mass 1 183.85
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 2 MPI processor grid
2 atoms
Time spent = 0.000148058 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
Per MPI rank memory allocation (min/avg/max) = 1.8 | 1.8 | 1.8 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -48.030793 -78159.503
1 298.24318 0 0 -48.03102 -78167.19
2 296.85584 0 0 -48.031199 -78173.26
3 295.83795 0 0 -48.031331 -78177.714
Loop time of 0.457491 on 2 procs for 3 steps with 2 atoms
Performance: 0.567 ns/day, 42.360 hours/ns, 6.558 timesteps/s
50.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 1.3828e-05 | 2.9922e-05 | 4.6015e-05 | 0.0 | 0.01
Output | 7.5817e-05 | 9.3937e-05 | 0.00011206 | 0.0 | 0.02
Modify | 0.45735 | 0.45736 | 0.45736 | 0.0 | 99.97
Other | | 1.204e-05 | | | 0.00
Nlocal: 1 ave 1 max 1 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 4 ave 4 max 4 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:01:21

View File

@ -0,0 +1,300 @@
#!/usr/bin/env python
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
# ----------------------------------------------------------------------
# Syntax: vasp_wrap.py file/zmq POSCARfile
# wrapper on VASP to act as server program using CSlib
# receives message with list of coords from client
# creates VASP inputs
# invokes VASP to calculate self-consistent energy of that config
# reads VASP outputs
# sends message with energy, forces, pressure to client
# NOTES:
# check to insure basic VASP input files are in place?
# could archive VASP input/output in special filenames or dirs?
# need to check that POTCAR file is consistent with atom ordering?
# could make syntax for launching VASP more flexible
# e.g. command-line arg for # of procs
# detect if VASP had an error and return ERROR field, e.g. non-convergence ??
from __future__ import print_function
import sys
version = sys.version_info[0]
if version == 3:
sys.exit("The CSlib python wrapper does not yet support python 3")
import subprocess
import xml.etree.ElementTree as ET
from cslib import CSlib
# comment out 2nd line once 1st line is correct for your system
vaspcmd = "srun -N 1 --ntasks-per-node=4 " + \
"-n 4 /projects/vasp/2017-build/cts1/vasp5.4.4/vasp_tfermi/bin/vasp_std"
vaspcmd = "touch tmp"
# enums matching FixClientMD class in LAMMPS
SETUP,STEP = range(1,2+1)
DIM,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE = range(1,10+1)
FORCES,ENERGY,VIRIAL,ERROR = range(1,4+1)
# -------------------------------------
# functions
# error message and exit
def error(txt):
print("ERROR:",txt)
sys.exit(1)
# -------------------------------------
# read initial VASP POSCAR file to setup problem
# return natoms,ntypes,box
def vasp_setup(poscar):
ps = open(poscar,'r').readlines()
# box size
words = ps[2].split()
xbox = float(words[0])
words = ps[3].split()
ybox = float(words[1])
words = ps[4].split()
zbox = float(words[2])
box = [xbox,ybox,zbox]
ntypes = 0
natoms = 0
words = ps[6].split()
for word in words:
if word == '#': break
ntypes += 1
natoms += int(word)
return natoms,ntypes,box
# -------------------------------------
# write a new POSCAR file for VASP
def poscar_write(poscar,natoms,ntypes,types,coords,box):
psold = open(poscar,'r').readlines()
psnew = open("POSCAR",'w')
# header, including box size
psnew.write(psold[0])
psnew.write(psold[1])
psnew.write("%g %g %g\n" % (box[0],box[1],box[2]))
psnew.write("%g %g %g\n" % (box[3],box[4],box[5]))
psnew.write("%g %g %g\n" % (box[6],box[7],box[8]))
psnew.write(psold[5])
psnew.write(psold[6])
# per-atom coords
# grouped by types
psnew.write("Cartesian\n")
for itype in range(1,ntypes+1):
for i in range(natoms):
if types[i] != itype: continue
x = coords[3*i+0]
y = coords[3*i+1]
z = coords[3*i+2]
aline = " %g %g %g\n" % (x,y,z)
psnew.write(aline)
psnew.close()
# -------------------------------------
# read a VASP output vasprun.xml file
# uses ElementTree module
# see https://docs.python.org/2/library/xml.etree.elementtree.html
def vasprun_read():
tree = ET.parse('vasprun.xml')
root = tree.getroot()
#fp = open("vasprun.xml","r")
#root = ET.parse(fp)
scsteps = root.findall('calculation/scstep')
energy = scsteps[-1].find('energy')
for child in energy:
if child.attrib["name"] == "e_0_energy":
eout = float(child.text)
fout = []
sout = []
varrays = root.findall('calculation/varray')
for varray in varrays:
if varray.attrib["name"] == "forces":
forces = varray.findall("v")
for line in forces:
fxyz = line.text.split()
fxyz = [float(value) for value in fxyz]
fout += fxyz
if varray.attrib["name"] == "stress":
tensor = varray.findall("v")
stensor = []
for line in tensor:
sxyz = line.text.split()
sxyz = [float(value) for value in sxyz]
stensor.append(sxyz)
sxx = stensor[0][0]
syy = stensor[1][1]
szz = stensor[2][2]
# symmetrize off-diagonal components
sxy = 0.5 * (stensor[0][1] + stensor[1][0])
sxz = 0.5 * (stensor[0][2] + stensor[2][0])
syz = 0.5 * (stensor[1][2] + stensor[2][1])
sout = [sxx,syy,szz,sxy,sxz,syz]
#fp.close()
return eout,fout,sout
# -------------------------------------
# main program
# command-line args
if len(sys.argv) != 3:
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
sys.exit(1)
mode = sys.argv[1]
poscar_template = sys.argv[2]
if mode == "file": cs = CSlib(1,mode,"tmp.couple",None)
elif mode == "zmq": cs = CSlib(1,mode,"*:5555",None)
else:
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
sys.exit(1)
natoms,ntypes,box = vasp_setup(poscar_template)
# initial message for MD protocol
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID != 0: error("Bad initial client/server handshake")
protocol = cs.unpack_string(1)
if protocol != "md": error("Mismatch in client/server protocol")
cs.send(0,0)
# endless server loop
while 1:
# recv message from client
# msgID = 0 = all-done message
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID < 0: break
# SETUP receive at beginning of each run
# required fields: DIM, PERIODICTY, ORIGIN, BOX,
# NATOMS, NTYPES, TYPES, COORDS
# optional fields: others in enum above, but VASP ignores them
if msgID == SETUP:
origin = []
box = []
natoms_recv = ntypes_recv = 0
types = []
coords = []
for field in fieldID:
if field == DIM:
dim = cs.unpack_int(DIM)
if dim != 3: error("VASP only performs 3d simulations")
elif field == PERIODICITY:
periodicity = cs.unpack(PERIODICITY,1)
if not periodicity[0] or not periodicity[1] or not periodicity[2]:
error("VASP wrapper only currently supports fully periodic systems")
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box = cs.unpack(BOX,1)
elif field == NATOMS:
natoms_recv = cs.unpack_int(NATOMS)
if natoms != natoms_recv:
error("VASP wrapper mis-match in number of atoms")
elif field == NTYPES:
ntypes_recv = cs.unpack_int(NTYPES)
if ntypes != ntypes_recv:
error("VASP wrapper mis-match in number of atom types")
elif field == TYPES:
types = cs.unpack(TYPES,1)
elif field == COORDS:
coords = cs.unpack(COORDS,1)
if not origin or not box or not natoms or not ntypes or \
not types or not coords:
error("Required VASP wrapper setup field not received");
# STEP receive at each timestep of run or minimization
# required fields: COORDS
# optional fields: ORIGIN, BOX
elif msgID == STEP:
coords = []
for field in fieldID:
if field == COORDS:
coords = cs.unpack(COORDS,1)
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box = cs.unpack(BOX,1)
if not coords: error("Required VASP wrapper step field not received");
else: error("VASP wrapper received unrecognized message")
# create POSCAR file
poscar_write(poscar_template,natoms,ntypes,types,coords,box)
# invoke VASP
print("\nLaunching VASP ...")
print(vaspcmd)
subprocess.check_output(vaspcmd,stderr=subprocess.STDOUT,shell=True)
# process VASP output
energy,forces,virial = vasprun_read()
# convert VASP kilobars to bars
for i,value in enumerate(virial): virial[i] *= 1000.0
# return forces, energy, pressure to client
cs.send(msgID,3);
cs.pack(FORCES,4,3*natoms,forces)
cs.pack_double(ENERGY,energy)
cs.pack(VIRIAL,4,6,virial)
# final reply to client
cs.send(0,0)
# clean-up
del cs

File diff suppressed because it is too large Load Diff

View File

@ -83,6 +83,7 @@ kim: use of potentials in Knowledge Base for Interatomic Models (KIM)
latte: use of LATTE density-functional tight-binding quantum code
meam: MEAM test for SiC and shear (same as shear examples)
melt: rapid melt of 3d LJ system
message: client/server coupling of 2 codes
micelle: self-assembly of small lipid-like molecules into 2d bilayers
min: energy minimization of 2d LJ melt
mscg: parameterize a multi-scale coarse-graining (MSCG) model

117
examples/message/README Normal file
View File

@ -0,0 +1,117 @@
This dir contains scripts that demonstrate how to use LAMMPS as both a
client and server code to run a simple MD simulation. LAMMPS as a
client performs the MD timestepping. LAMMPS as a server provides the
energy and forces between interacting particles. Every timestep the
LAMMPS client sends a message to the LAMMPS server and receives a
response message in return.
Another code could replace LAMMPS as the client, e.g. another MD code
which wants to use a LAMMPS potential. Another code could replace
LAMMPS as the server, e.g. a quantum code computing quantum forces, so
that ab initio MD could be performed. See an example of the latter in
examples/COUPLE/lammps_vasp.
See the doc pages for the "MESSAGE package"
(Package_details.html#PKG-MESSAGE) and "Howto client/server"
(Howto_client_server.html) for more details on how client/server
coupling works in LAMMPS.
--------------
Note that you can adjust the problem size run by these scripts by
setting "x,y,z" variables when you run LAMMPS. The default problem size
is x = y = z = 5, which is 500 particles.
lmp_mpi -v x 10 -v y 10 -v z 20 # 8000 particles
This applies to either in.message or in.message.client
You can also run the in.message scripts with an NPT integrator
instead of NVE, if you comment/uncomment the correct lines.
The client and server script define a "mode" variable
which can be set to file, zmq, mpi/one, or mpi/two,
as illustrated below.
--------------
To run this problem in the traditional way (no client/server coupling)
do one of these:
% lmp_serial < in.message
% mpirun -np 4 lmp_mpi < in.message
Or run with in.message.tilt.
--------------
To run in client/server mode:
Both the client and server script must use the same messaging mode.
This can be selected by setting the "mode" variable when you run
LAMMPS. The default mode = file. The other options for the mode
variable are zmq, mpione, mpitwo.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means any of
the 4 messaging modes can be used.
The next sections illustrate how to launch LAMMPS twice, once as a
client, once as a server, for each of the messaging modes.
In all cases, the client should print out thermodynamic info for 50
steps. The server should print out setup info, print nothing until
the client exits, at which point the server should also exit.
The examples below show launching LAMMPS twice from the same window
(or batch script), using the "&" character to launch the first time in
the background. For all modes except {mpi/one}, you could also launch
twice in separate windows on your desktop machine. It does not matter
whether you launch the client or server first.
In these examples either the client or server can be run on one or
more processors. If running in a non-MPI mode (file or zmq) you can
launch LAMMPS on a single processor without using mpirun.
IMPORTANT: If you run in mpi/two mode, you must launch LAMMPS both
times via mpirun, even if one or both of them runs on a single
processor. This is so that MPI can figure out how to connect both MPI
processes together to exchange MPI messages between them.
--------------
NOTE: the Script.sh file has comands to perform all the
runs listed below.
--------------
File or ZMQ or mpi/two modes of messaging:
% mpirun -np 1 lmp_mpi -v mode file -log log.client < in.message.client &
% mpirun -np 2 lmp_mpi -v mode file -log log.server < in.message.server
% mpirun -np 4 lmp_mpi -v mode zmq -log log.client < in.message.client &
% mpirun -np 1 lmp_mpi -v mode zmq -log log.server < in.message.server
% mpirun -np 2 lmp_mpi -v mode mpitwo -log log.client < in.message.client &
% mpirun -np 4 lmp_mpi -v mode mpitwo -log log.server < in.message.server
Or run with in.message.tilt.client/server.
Don't run the tilt files with the "file" mode; they run too slow.
--------------
Mpi/one mode of messaging:
Launch LAMMPS twice in a single mpirun command:
% mpirun -np 2 lmp_mpi -mpicolor 0 -in in.message.client -v mode mpione -log log.client : -np 4 lmp_mpi -mpicolor 1 -in in.message.server -v mode mpione -log log.server
Or run with in.message.tilt.client/server.
The two -np values determine how many procs the client and the server
run on.
A LAMMPS executable run in this manner must use the -mpicolor color
command-line option as its first option, where color is set to one
integer value for the 1st app, and another value for the 2nd app.

View File

@ -0,0 +1,55 @@
# sample launch script
# message on 1 proc each
mpirun -np 1 lmp_mpi -log log.message.g++.1 < in.message
mpirun -np 1 lmp_mpi -v mode file -log log.message.client.file.g++.1 < in.message.client &
mpirun -np 1 lmp_mpi -v mode file -log log.message.server.file.g++.1 < in.message.server
mpirun -np 1 lmp_mpi -v mode zmq -log log.message.client.zmq.g++.1 < in.message.client &
mpirun -np 1 lmp_mpi -v mode zmq -log log.message.server.zmq.g++.1 < in.message.server
mpirun -np 1 lmp_mpi -v mode mpitwo -log log.message.client.mpitwo.g++.1 < in.message.client &
mpirun -np 1 lmp_mpi -v mode mpitwo -log log.message.server.mpitwo.g++.1 < in.message.server
mpirun -np 1 lmp_mpi -m 0 -in in.message.client -v mode mpione -log log.message.client.mpione.g++.1 : -np 1 lmp_mpi -m 1 -in in.message.server -v mode mpione -log log.message.server.mpione.g++.1
# message on 2/4 procs each
mpirun -np 4 lmp_mpi -log log.message.g++.4 < in.message
mpirun -np 2 lmp_mpi -v mode file -log log.message.client.file.g++.2 < in.message.client &
mpirun -np 4 lmp_mpi -v mode file -log log.message.server.file.g++.4 < in.message.server
mpirun -np 2 lmp_mpi -v mode zmq -log log.message.client.zmq.g++.2 < in.message.client &
mpirun -np 4 lmp_mpi -v mode zmq -log log.message.server.zmq.g++.4 < in.message.server
mpirun -np 2 lmp_mpi -v mode mpitwo -log log.message.client.mpitwo.g++.2 < in.message.client &
mpirun -np 4 lmp_mpi -v mode mpitwo -log log.message.server.mpitwo.g++.4 < in.message.server
mpirun -np 2 lmp_mpi -m 0 -in in.message.client -v mode mpione -log log.message.client.mpione.g++.2 : -np 4 lmp_mpi -m 1 -in in.message.server -v mode mpione -log log.message.server.mpione.g++.4
# message.tilt on 1 proc each
mpirun -np 1 lmp_mpi -log log.message.tilt.g++.1 < in.message.tilt
mpirun -np 1 lmp_mpi -v mode zmq -log log.message.tilt.client.zmq.g++.1 < in.message.tilt.client &
mpirun -np 1 lmp_mpi -v mode zmq -log log.message.tilt.server.zmq.g++.1 < in.message.tilt.server
mpirun -np 1 lmp_mpi -v mode mpitwo -log log.message.tilt.client.mpitwo.g++.1 < in.message.tilt.client &
mpirun -np 1 lmp_mpi -v mode mpitwo -log log.message.tilt.server.mpitwo.g++.1 < in.message.tilt.server
mpirun -np 1 lmp_mpi -m 0 -in in.message.tilt.client -v mode mpione -log log.message.tilt.client.mpione.g++.1 : -np 1 lmp_mpi -m 1 -in in.message.tilt.server -v mode mpione -log log.message.tilt.server.mpione.g++.1
# message.tilt on 2/4 procs each
mpirun -np 1 lmp_mpi -log log.message.tilt.g++.4 < in.message.tilt
mpirun -np 2 lmp_mpi -v mode zmq -log log.message.tilt.client.zmq.g++.2 < in.message.tilt.client &
mpirun -np 4 lmp_mpi -v mode zmq -log log.message.tilt.server.zmq.g++.4 < in.message.tilt.server
mpirun -np 2 lmp_mpi -v mode mpitwo -log log.message.tilt.client.mpitwo.g++.2 < in.message.tilt.client &
mpirun -np 4 lmp_mpi -v mode mpitwo -log log.message.tilt.server.mpitwo.g++.4 < in.message.tilt.server
mpirun -np 2 lmp_mpi -m 0 -in in.message.tilt.client -v mode mpione -log log.message.tilt.client.mpione.g++.2 : -np 4 lmp_mpi -m 1 -in in.message.tilt.server -v mode mpione -log log.message.tilt.server.mpione.g++.4

View File

@ -0,0 +1,29 @@
# 3d Lennard-Jones melt - no client/server mode
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
thermo 10
run 50

View File

@ -0,0 +1,41 @@
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
elif "${mode} == mpione" &
"message client md mpi/one" &
elif "${mode} == mpitwo" &
"message client md mpi/two tmp.couple"
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50

View File

@ -0,0 +1,29 @@
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then &
"message server md file tmp.couple" &
elif "${mode} == zmq" &
"message server md zmq *:5555" &
elif "${mode} == mpione" &
"message server md mpi/one" &
elif "${mode} == mpitwo" &
"message server md mpi/two tmp.couple"
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
region box block 0 1 0 1 0 1
create_box 1 box
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md

View File

@ -0,0 +1,30 @@
# 2d NEMD simulation - no client/server mode
units lj
atom_style atomic
dimension 2
lattice sq2 0.8442
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
create_atoms 1 box
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000

View File

@ -0,0 +1,42 @@
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
elif "${mode} == mpione" &
"message client md mpi/one" &
elif "${mode} == mpitwo" &
"message client md mpi/two tmp.couple"
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
create_atoms 1 box
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000

View File

@ -0,0 +1,31 @@
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then &
"message server md file tmp.couple" &
elif "${mode} == zmq" &
"message server md zmq *:5555" &
elif "${mode} == mpione" &
"message server md mpi/one" &
elif "${mode} == mpitwo" &
"message server md mpi/two tmp.couple"
units lj
atom_style atomic
dimension 2
atom_modify map yes
lattice sq2 0.8442
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
create_atoms 1 box
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md

View File

@ -0,0 +1,79 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000752926 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.303 | 2.303 | 2.303 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 5.0251 on 1 procs for 50 steps with 500 atoms
Performance: 4298.421 tau/day, 9.950 timesteps/s
0.1% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 9.7752e-06 | 9.7752e-06 | 9.7752e-06 | 0.0 | 0.00
Comm | 0.00014925 | 0.00014925 | 0.00014925 | 0.0 | 0.00
Output | 0.00023127 | 0.00023127 | 0.00023127 | 0.0 | 0.00
Modify | 5.0242 | 5.0242 | 5.0242 | 0.0 | 99.98
Other | | 0.0004668 | | | 0.01
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:05

View File

@ -0,0 +1,79 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000613928 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.302 | 2.302 | 2.302 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 5.02384 on 2 procs for 50 steps with 500 atoms
Performance: 4299.499 tau/day, 9.953 timesteps/s
50.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 1.9073e-06 | 3.3379e-06 | 4.7684e-06 | 0.0 | 0.00
Comm | 0.00020742 | 0.00021136 | 0.00021529 | 0.0 | 0.00
Output | 0.00026989 | 0.00048053 | 0.00069118 | 0.0 | 0.01
Modify | 5.0171 | 5.0199 | 5.0228 | 0.1 | 99.92
Other | | 0.003203 | | | 0.06
Nlocal: 250 ave 255 max 245 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:05

View File

@ -0,0 +1,79 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/one
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000540018 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.303 | 2.303 | 2.303 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0403891 on 1 procs for 50 steps with 500 atoms
Performance: 534798.272 tau/day, 1237.959 timesteps/s
99.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 4.7684e-06 | 4.7684e-06 | 4.7684e-06 | 0.0 | 0.01
Comm | 6.3181e-05 | 6.3181e-05 | 6.3181e-05 | 0.0 | 0.16
Output | 9.5367e-05 | 9.5367e-05 | 9.5367e-05 | 0.0 | 0.24
Modify | 0.040053 | 0.040053 | 0.040053 | 0.0 | 99.17
Other | | 0.0001726 | | | 0.43
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,79 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/one
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000475883 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.302 | 2.302 | 2.302 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0208495 on 2 procs for 50 steps with 500 atoms
Performance: 1035997.740 tau/day, 2398.143 timesteps/s
99.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 3.0994e-06 | 4.53e-06 | 5.9605e-06 | 0.0 | 0.02
Comm | 0.00012422 | 0.00012457 | 0.00012493 | 0.0 | 0.60
Output | 5.7697e-05 | 7.987e-05 | 0.00010204 | 0.0 | 0.38
Modify | 0.020463 | 0.020464 | 0.020466 | 0.0 | 98.15
Other | | 0.0001761 | | | 0.84
Nlocal: 250 ave 255 max 245 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,79 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/two tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000603914 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.303 | 2.303 | 2.303 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.069119 on 1 procs for 50 steps with 500 atoms
Performance: 312504.627 tau/day, 723.390 timesteps/s
42.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 7.1526e-06 | 7.1526e-06 | 7.1526e-06 | 0.0 | 0.01
Comm | 0.0001049 | 0.0001049 | 0.0001049 | 0.0 | 0.15
Output | 0.00014019 | 0.00014019 | 0.00014019 | 0.0 | 0.20
Modify | 0.068602 | 0.068602 | 0.068602 | 0.0 | 99.25
Other | | 0.0002651 | | | 0.38
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,79 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/two tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000667095 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.302 | 2.302 | 2.302 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0190214 on 2 procs for 50 steps with 500 atoms
Performance: 1135563.588 tau/day, 2628.619 timesteps/s
58.5% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 1.9073e-06 | 2.861e-06 | 3.8147e-06 | 0.0 | 0.02
Comm | 0.00017238 | 0.00017989 | 0.0001874 | 0.0 | 0.95
Output | 0.00012803 | 0.00015497 | 0.00018191 | 0.0 | 0.81
Modify | 0.018065 | 0.018181 | 0.018297 | 0.1 | 95.58
Other | | 0.0005029 | | | 2.64
Nlocal: 250 ave 255 max 245 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,79 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md zmq localhost:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000734091 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.303 | 2.303 | 2.303 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0778341 on 1 procs for 50 steps with 500 atoms
Performance: 277513.222 tau/day, 642.392 timesteps/s
11.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 7.8678e-06 | 7.8678e-06 | 7.8678e-06 | 0.0 | 0.01
Comm | 8.3685e-05 | 8.3685e-05 | 8.3685e-05 | 0.0 | 0.11
Output | 0.00011373 | 0.00011373 | 0.00011373 | 0.0 | 0.15
Modify | 0.07734 | 0.07734 | 0.07734 | 0.0 | 99.37
Other | | 0.0002885 | | | 0.37
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,79 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md zmq localhost:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000570059 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.302 | 2.302 | 2.302 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0416595 on 2 procs for 50 steps with 500 atoms
Performance: 518489.499 tau/day, 1200.207 timesteps/s
56.5% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 2.861e-06 | 3.3379e-06 | 3.8147e-06 | 0.0 | 0.01
Comm | 0.00013399 | 0.00013685 | 0.00013971 | 0.0 | 0.33
Output | 8.6784e-05 | 0.00011206 | 0.00013733 | 0.0 | 0.27
Modify | 0.040948 | 0.04103 | 0.041112 | 0.0 | 98.49
Other | | 0.0003769 | | | 0.90
Nlocal: 250 ave 255 max 245 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,85 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - no client/server mode
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000682831 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
thermo 10
run 50
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.143 | 3.143 | 3.143 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
10 1.1347688 -6.3153532 0 -4.6166043 -2.6072847
20 0.628166 -5.5624945 0 -4.62213 1.0186262
30 0.73767593 -5.7297655 0 -4.6254647 0.49629637
40 0.69517962 -5.6660345 0 -4.6253506 0.69303877
50 0.70150496 -5.6761362 0 -4.6259832 0.59551518
Loop time of 0.039681 on 1 procs for 50 steps with 500 atoms
Performance: 544341.699 tau/day, 1260.050 timesteps/s
99.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.029993 | 0.029993 | 0.029993 | 0.0 | 75.59
Neigh | 0.0073051 | 0.0073051 | 0.0073051 | 0.0 | 18.41
Comm | 0.0012736 | 0.0012736 | 0.0012736 | 0.0 | 3.21
Output | 0.00012803 | 0.00012803 | 0.00012803 | 0.0 | 0.32
Modify | 0.00053287 | 0.00053287 | 0.00053287 | 0.0 | 1.34
Other | | 0.000448 | | | 1.13
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1946 ave 1946 max 1946 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18820 ave 18820 max 18820 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18820
Ave neighs/atom = 37.64
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,85 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - no client/server mode
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000656843 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
thermo 10
run 50
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.109 | 3.109 | 3.109 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
10 1.1347688 -6.3153532 0 -4.6166043 -2.6072847
20 0.628166 -5.5624945 0 -4.62213 1.0186262
30 0.73767593 -5.7297655 0 -4.6254647 0.49629637
40 0.69517962 -5.6660345 0 -4.6253506 0.69303877
50 0.70150496 -5.6761362 0 -4.6259832 0.59551518
Loop time of 0.0131519 on 4 procs for 50 steps with 500 atoms
Performance: 1642350.242 tau/day, 3801.737 timesteps/s
97.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.006074 | 0.0065379 | 0.0072589 | 0.6 | 49.71
Neigh | 0.0014219 | 0.0015552 | 0.0017018 | 0.3 | 11.82
Comm | 0.003546 | 0.0043943 | 0.0049584 | 0.8 | 33.41
Output | 0.000108 | 0.00012845 | 0.00016999 | 0.0 | 0.98
Modify | 0.00014353 | 0.00014949 | 0.00015569 | 0.0 | 1.14
Other | | 0.0003865 | | | 2.94
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 1 0 0 0 1 1
Nghost: 1091 ave 1094 max 1089 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Neighs: 4705 ave 4849 max 4648 min
Histogram: 2 1 0 0 0 0 0 0 0 1
Total # of neighbors = 18820
Ave neighs/atom = 37.64
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,44 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md file tmp.couple
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 1 by 1 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:05

View File

@ -0,0 +1,44 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md file tmp.couple
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 2 by 2 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 2 by 2 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:05

View File

@ -0,0 +1,44 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/one
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 1 by 1 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -0,0 +1,44 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/one
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 2 by 2 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 2 by 2 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -0,0 +1,44 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/two tmp.couple
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 1 by 1 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -0,0 +1,44 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/two tmp.couple
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 2 by 2 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 2 by 2 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -0,0 +1,44 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md zmq *:5555
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 1 by 1 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -0,0 +1,44 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md zmq *:5555
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 2 by 2 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 2 by 2 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -0,0 +1,130 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/one
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000485897 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 0
ghost atom cutoff = 0
binsize = 15.3919, bins = 1 1 1
0 neighbor lists, perpetual/occasional/extra = 0 0 0
Per MPI rank memory allocation (min/avg/max) = 2.308 | 2.308 | 2.308 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.0086399 0 1.3760711 6.8772078 0.61567613
2000 1.0707188 0 1.3744107 6.628097 1.2313523
3000 1.0627515 0 1.310096 6.1647179 1.8470284
4000 0.94091054 0 1.268976 6.4723215 2.4627045
5000 1.0218949 0 1.2430242 5.6945977 3.0783806
6000 0.98649481 0 1.1997565 5.2870413 3.6940568
7000 0.9047957 0 1.1461262 5.9291636 4.3097329
8000 0.85697614 0 1.0437412 5.0652097 4.925409
9000 0.84208329 0 1.109218 6.1749808 5.5410852
10000 0.86418108 0 1.1594773 6.2581867 6.1567613
11000 0.95136356 0 1.1650901 5.8389085 6.7724374
12000 0.94571583 0 1.2210342 6.2498816 7.3881135
13000 0.95994288 0 1.2172042 5.9608165 -7.3881135
14000 0.99053999 0 1.2925597 6.9994446 -6.7724374
15000 1.0316726 0 1.3346023 6.6902672 -6.1567613
16000 0.99537481 0 1.3227696 7.0301123 -5.5410852
17000 1.0306843 0 1.3101457 6.4750102 -4.925409
18000 1.071154 0 1.2947547 5.695888 -4.3097329
19000 0.97120752 0 1.3035465 7.3945362 -3.6940568
20000 0.97198994 0 1.2244663 6.0047605 -3.0783806
21000 0.97943545 0 1.2393394 6.3871012 -2.4627045
22000 0.98550707 0 1.1768148 5.019967 -1.8470284
23000 0.96920052 0 1.1730698 5.7944947 -1.2313523
24000 0.94069959 0 1.184119 5.8434876 -0.61567613
25000 0.91569312 0 1.1642118 5.668997 0
26000 0.98882932 0 1.1999248 5.0115507 0.61567613
27000 0.8972608 0 1.2556546 7.0837158 1.2313523
28000 0.93554756 0 1.2221911 5.9302841 1.8470284
29000 0.97894608 0 1.2168736 5.5766766 2.4627045
30000 0.97877055 0 1.2575839 6.4308887 3.0783806
31000 1.0002387 0 1.2338069 5.3873124 3.6940568
32000 0.89608618 0 1.2382021 6.7892204 4.3097329
33000 0.87439302 0 1.2252635 7.078134 4.925409
34000 1.076102 0 1.2991393 5.5556892 5.5410852
35000 1.0018689 0 1.272105 6.1320483 6.1567613
36000 0.93327214 0 1.2428039 7.0030867 6.7724374
37000 1.0770236 0 1.3002931 5.4996076 7.3881135
38000 0.98715132 0 1.215562 5.5958335 -7.3881135
39000 0.95028417 0 1.2566706 6.4133713 -6.7724374
40000 1.0445585 0 1.241151 5.3589806 -6.1567613
41000 0.93799713 0 1.2109086 6.4957845 -5.5410852
42000 0.99231038 0 1.2228781 5.9363471 -4.925409
43000 0.97913815 0 1.1854842 5.8837987 -4.3097329
44000 0.86748838 0 1.1616201 6.8991278 -3.6940568
45000 0.96284421 0 1.1549383 5.1226785 -3.0783806
46000 0.98701623 0 1.170581 4.9719567 -2.4627045
47000 0.92618683 0 1.2146576 6.7100075 -1.8470284
48000 1.0092593 0 1.2523988 5.7067864 -1.2313523
49000 1.0187472 0 1.271608 5.3355092 -0.61567613
50000 1.0194881 0 1.2831094 6.2449759 0
Loop time of 1.74559 on 1 procs for 50000 steps with 160 atoms
Performance: 12374053.445 tau/day, 28643.642 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0020533 | 0.0020533 | 0.0020533 | 0.0 | 0.12
Comm | 0.015517 | 0.015517 | 0.015517 | 0.0 | 0.89
Output | 0.00052404 | 0.00052404 | 0.00052404 | 0.0 | 0.03
Modify | 1.6784 | 1.6784 | 1.6784 | 0.0 | 96.15
Other | | 0.04905 | | | 2.81
Nlocal: 160 ave 160 max 160 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 5270
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,130 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/one
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
2 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000453949 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 0
ghost atom cutoff = 0
binsize = 15.3919, bins = 1 1 1
0 neighbor lists, perpetual/occasional/extra = 0 0 0
Per MPI rank memory allocation (min/avg/max) = 2.308 | 2.308 | 2.308 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.0876599 0 1.3637783 6.063363 0.61567613
2000 1.0722996 0 1.4108622 7.0518942 1.2313523
3000 1.0580774 0 1.3845895 6.0395275 1.8470284
4000 1.0068006 0 1.3804387 7.0944598 2.4627045
5000 0.95391814 0 1.2578438 6.1878831 3.0783806
6000 0.93492945 0 1.2711287 7.0440517 3.6940568
7000 0.94107853 0 1.2384371 6.1438077 4.3097329
8000 0.89711744 0 1.145748 6.3558305 4.925409
9000 0.90190304 0 1.0860684 4.957098 5.5410852
10000 0.84255749 0 1.0376892 5.2351795 6.1567613
11000 0.90250882 0 1.0497695 4.3844838 6.7724374
12000 0.83461274 0 1.0281949 5.1534361 7.3881135
13000 0.80315331 0 1.0226333 5.757222 -7.3881135
14000 0.81820939 0 0.99276466 4.6943725 -6.7724374
15000 0.8239631 0 1.0408289 5.1669006 -6.1567613
16000 0.88908894 0 1.1554855 6.3510278 -5.5410852
17000 0.98268136 0 1.2086981 5.6302847 -4.925409
18000 1.0098162 0 1.3687676 7.5243587 -4.3097329
19000 1.0795086 0 1.4562691 7.639418 -3.6940568
20000 1.1932155 0 1.5278988 7.0668432 -3.0783806
21000 1.2424296 0 1.6048792 7.959585 -2.4627045
22000 1.297169 0 1.7421262 8.9432388 -1.8470284
23000 1.2863494 0 1.7236774 8.3584973 -1.2313523
24000 1.4084347 0 1.7370339 7.2729078 -0.61567613
25000 1.3339728 0 1.6883255 7.529098 0
26000 1.1483243 0 1.5155578 7.3869994 0.61567613
27000 1.1372606 0 1.4368323 7.0580136 1.2313523
28000 1.0518579 0 1.355311 6.478857 1.8470284
29000 1.0581145 0 1.2535509 5.3697479 2.4627045
30000 0.93612564 0 1.185532 5.5520142 3.0783806
31000 0.94387516 0 1.1716454 5.8655485 3.6940568
32000 0.83953515 0 1.0737746 5.5551953 4.3097329
33000 0.84862926 0 1.0564042 5.7795428 4.925409
34000 0.83621877 0 1.079387 5.7514243 5.5410852
35000 0.86938506 0 1.031783 4.5897825 6.1567613
36000 0.88990609 0 1.0807597 5.3751744 6.7724374
37000 0.89534631 0 1.1238882 5.2400355 7.3881135
38000 0.98545003 0 1.2121125 5.7783854 -7.3881135
39000 0.96737778 0 1.2472934 6.1139 -6.7724374
40000 1.0664194 0 1.2956811 5.7353661 -6.1567613
41000 1.0681494 0 1.3269435 6.3102722 -5.5410852
42000 1.0875422 0 1.3963739 7.1208066 -4.925409
43000 1.0968173 0 1.3388062 6.1704339 -4.3097329
44000 1.1182109 0 1.3773214 7.0872686 -3.6940568
45000 1.1243261 0 1.432186 6.884782 -3.0783806
46000 1.039713 0 1.4389721 7.5585257 -2.4627045
47000 1.0816108 0 1.4100361 6.4611126 -1.8470284
48000 0.97637127 0 1.3605389 7.3992744 -1.2313523
49000 1.0361978 0 1.2721873 5.8166109 -0.61567613
50000 0.92367087 0 1.1875669 6.4685214 0
Loop time of 2.82785 on 2 procs for 50000 steps with 160 atoms
Performance: 7638300.565 tau/day, 17681.251 timesteps/s
100.0% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0011888 | 0.0012611 | 0.0013335 | 0.2 | 0.04
Comm | 0.024838 | 0.025075 | 0.025312 | 0.1 | 0.89
Output | 0.0003581 | 0.00052559 | 0.00069308 | 0.0 | 0.02
Modify | 2.7209 | 2.7263 | 2.7318 | 0.3 | 96.41
Other | | 0.07465 | | | 2.64
Nlocal: 80 ave 80 max 80 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 5257
Dangerous builds = 0
Total wall time: 0:00:02

View File

@ -0,0 +1,130 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/two tmp.couple
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000543118 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 0
ghost atom cutoff = 0
binsize = 15.3919, bins = 1 1 1
0 neighbor lists, perpetual/occasional/extra = 0 0 0
Per MPI rank memory allocation (min/avg/max) = 2.308 | 2.308 | 2.308 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.0086399 0 1.3760711 6.8772078 0.61567613
2000 1.0707188 0 1.3744107 6.628097 1.2313523
3000 1.0627515 0 1.310096 6.1647179 1.8470284
4000 0.94091054 0 1.268976 6.4723215 2.4627045
5000 1.0218949 0 1.2430242 5.6945977 3.0783806
6000 0.98649481 0 1.1997565 5.2870413 3.6940568
7000 0.9047957 0 1.1461262 5.9291636 4.3097329
8000 0.85697614 0 1.0437412 5.0652097 4.925409
9000 0.84208329 0 1.109218 6.1749808 5.5410852
10000 0.86418108 0 1.1594773 6.2581867 6.1567613
11000 0.95136356 0 1.1650901 5.8389085 6.7724374
12000 0.94571583 0 1.2210342 6.2498816 7.3881135
13000 0.95994288 0 1.2172042 5.9608165 -7.3881135
14000 0.99053999 0 1.2925597 6.9994446 -6.7724374
15000 1.0316726 0 1.3346023 6.6902672 -6.1567613
16000 0.99537481 0 1.3227696 7.0301123 -5.5410852
17000 1.0306843 0 1.3101457 6.4750102 -4.925409
18000 1.071154 0 1.2947547 5.695888 -4.3097329
19000 0.97120752 0 1.3035465 7.3945362 -3.6940568
20000 0.97198994 0 1.2244663 6.0047605 -3.0783806
21000 0.97943545 0 1.2393394 6.3871012 -2.4627045
22000 0.98550707 0 1.1768148 5.019967 -1.8470284
23000 0.96920052 0 1.1730698 5.7944947 -1.2313523
24000 0.94069959 0 1.184119 5.8434876 -0.61567613
25000 0.91569312 0 1.1642118 5.668997 0
26000 0.98882932 0 1.1999248 5.0115507 0.61567613
27000 0.8972608 0 1.2556546 7.0837158 1.2313523
28000 0.93554756 0 1.2221911 5.9302841 1.8470284
29000 0.97894608 0 1.2168736 5.5766766 2.4627045
30000 0.97877055 0 1.2575839 6.4308887 3.0783806
31000 1.0002387 0 1.2338069 5.3873124 3.6940568
32000 0.89608618 0 1.2382021 6.7892204 4.3097329
33000 0.87439302 0 1.2252635 7.078134 4.925409
34000 1.076102 0 1.2991393 5.5556892 5.5410852
35000 1.0018689 0 1.272105 6.1320483 6.1567613
36000 0.93327214 0 1.2428039 7.0030867 6.7724374
37000 1.0770236 0 1.3002931 5.4996076 7.3881135
38000 0.98715132 0 1.215562 5.5958335 -7.3881135
39000 0.95028417 0 1.2566706 6.4133713 -6.7724374
40000 1.0445585 0 1.241151 5.3589806 -6.1567613
41000 0.93799713 0 1.2109086 6.4957845 -5.5410852
42000 0.99231038 0 1.2228781 5.9363471 -4.925409
43000 0.97913815 0 1.1854842 5.8837987 -4.3097329
44000 0.86748838 0 1.1616201 6.8991278 -3.6940568
45000 0.96284421 0 1.1549383 5.1226785 -3.0783806
46000 0.98701623 0 1.170581 4.9719567 -2.4627045
47000 0.92618683 0 1.2146576 6.7100075 -1.8470284
48000 1.0092593 0 1.2523988 5.7067864 -1.2313523
49000 1.0187472 0 1.271608 5.3355092 -0.61567613
50000 1.0194881 0 1.2831094 6.2449759 0
Loop time of 4.80282 on 1 procs for 50000 steps with 160 atoms
Performance: 4497356.047 tau/day, 10410.546 timesteps/s
50.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0032325 | 0.0032325 | 0.0032325 | 0.0 | 0.07
Comm | 0.033613 | 0.033613 | 0.033613 | 0.0 | 0.70
Output | 0.00089812 | 0.00089812 | 0.00089812 | 0.0 | 0.02
Modify | 4.6706 | 4.6706 | 4.6706 | 0.0 | 97.25
Other | | 0.09449 | | | 1.97
Nlocal: 160 ave 160 max 160 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 5270
Dangerous builds = 0
Total wall time: 0:00:04

View File

@ -0,0 +1,130 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/two tmp.couple
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
2 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000574827 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 0
ghost atom cutoff = 0
binsize = 15.3919, bins = 1 1 1
0 neighbor lists, perpetual/occasional/extra = 0 0 0
Per MPI rank memory allocation (min/avg/max) = 2.308 | 2.308 | 2.308 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.0876599 0 1.3637783 6.063363 0.61567613
2000 1.0722996 0 1.4108622 7.0518942 1.2313523
3000 1.0580774 0 1.3845895 6.0395275 1.8470284
4000 1.0068006 0 1.3804387 7.0944598 2.4627045
5000 0.95391814 0 1.2578438 6.1878831 3.0783806
6000 0.93492945 0 1.2711287 7.0440517 3.6940568
7000 0.94107853 0 1.2384371 6.1438077 4.3097329
8000 0.89711744 0 1.145748 6.3558305 4.925409
9000 0.90190304 0 1.0860684 4.957098 5.5410852
10000 0.84255749 0 1.0376892 5.2351795 6.1567613
11000 0.90250882 0 1.0497695 4.3844838 6.7724374
12000 0.83461274 0 1.0281949 5.1534361 7.3881135
13000 0.80315331 0 1.0226333 5.757222 -7.3881135
14000 0.81820939 0 0.99276466 4.6943725 -6.7724374
15000 0.8239631 0 1.0408289 5.1669006 -6.1567613
16000 0.88908894 0 1.1554855 6.3510278 -5.5410852
17000 0.98268136 0 1.2086981 5.6302847 -4.925409
18000 1.0098162 0 1.3687676 7.5243587 -4.3097329
19000 1.0795086 0 1.4562691 7.639418 -3.6940568
20000 1.1932155 0 1.5278988 7.0668432 -3.0783806
21000 1.2424296 0 1.6048792 7.959585 -2.4627045
22000 1.297169 0 1.7421262 8.9432388 -1.8470284
23000 1.2863494 0 1.7236774 8.3584973 -1.2313523
24000 1.4084347 0 1.7370339 7.2729078 -0.61567613
25000 1.3339728 0 1.6883255 7.529098 0
26000 1.1483243 0 1.5155578 7.3869994 0.61567613
27000 1.1372606 0 1.4368323 7.0580136 1.2313523
28000 1.0518579 0 1.355311 6.478857 1.8470284
29000 1.0581145 0 1.2535509 5.3697479 2.4627045
30000 0.93612564 0 1.185532 5.5520142 3.0783806
31000 0.94387516 0 1.1716454 5.8655485 3.6940568
32000 0.83953515 0 1.0737746 5.5551953 4.3097329
33000 0.84862926 0 1.0564042 5.7795428 4.925409
34000 0.83621877 0 1.079387 5.7514243 5.5410852
35000 0.86938506 0 1.031783 4.5897825 6.1567613
36000 0.88990609 0 1.0807597 5.3751744 6.7724374
37000 0.89534631 0 1.1238882 5.2400355 7.3881135
38000 0.98545003 0 1.2121125 5.7783854 -7.3881135
39000 0.96737778 0 1.2472934 6.1139 -6.7724374
40000 1.0664194 0 1.2956811 5.7353661 -6.1567613
41000 1.0681494 0 1.3269435 6.3102722 -5.5410852
42000 1.0875422 0 1.3963739 7.1208066 -4.925409
43000 1.0968173 0 1.3388062 6.1704339 -4.3097329
44000 1.1182109 0 1.3773214 7.0872686 -3.6940568
45000 1.1243261 0 1.432186 6.884782 -3.0783806
46000 1.039713 0 1.4389721 7.5585257 -2.4627045
47000 1.0816108 0 1.4100361 6.4611126 -1.8470284
48000 0.97637127 0 1.3605389 7.3992744 -1.2313523
49000 1.0361978 0 1.2721873 5.8166109 -0.61567613
50000 0.92367087 0 1.1875669 6.4685214 0
Loop time of 5.66536 on 2 procs for 50000 steps with 160 atoms
Performance: 3812643.232 tau/day, 8825.563 timesteps/s
48.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0012836 | 0.0015377 | 0.0017917 | 0.6 | 0.03
Comm | 0.037211 | 0.037922 | 0.038633 | 0.4 | 0.67
Output | 0.00052023 | 0.00073683 | 0.00095344 | 0.0 | 0.01
Modify | 5.5101 | 5.5105 | 5.511 | 0.0 | 97.27
Other | | 0.1146 | | | 2.02
Nlocal: 80 ave 80 max 80 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 5257
Dangerous builds = 0
Total wall time: 0:00:06

View File

@ -0,0 +1,130 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md zmq localhost:5555
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000530005 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 0
ghost atom cutoff = 0
binsize = 15.3919, bins = 1 1 1
0 neighbor lists, perpetual/occasional/extra = 0 0 0
Per MPI rank memory allocation (min/avg/max) = 2.308 | 2.308 | 2.308 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.0086399 0 1.3760711 6.8772078 0.61567613
2000 1.0707188 0 1.3744107 6.628097 1.2313523
3000 1.0627515 0 1.310096 6.1647179 1.8470284
4000 0.94091054 0 1.268976 6.4723215 2.4627045
5000 1.0218949 0 1.2430242 5.6945977 3.0783806
6000 0.98649481 0 1.1997565 5.2870413 3.6940568
7000 0.9047957 0 1.1461262 5.9291636 4.3097329
8000 0.85697614 0 1.0437412 5.0652097 4.925409
9000 0.84208329 0 1.109218 6.1749808 5.5410852
10000 0.86418108 0 1.1594773 6.2581867 6.1567613
11000 0.95136356 0 1.1650901 5.8389085 6.7724374
12000 0.94571583 0 1.2210342 6.2498816 7.3881135
13000 0.95994288 0 1.2172042 5.9608165 -7.3881135
14000 0.99053999 0 1.2925597 6.9994446 -6.7724374
15000 1.0316726 0 1.3346023 6.6902672 -6.1567613
16000 0.99537481 0 1.3227696 7.0301123 -5.5410852
17000 1.0306843 0 1.3101457 6.4750102 -4.925409
18000 1.071154 0 1.2947547 5.695888 -4.3097329
19000 0.97120752 0 1.3035465 7.3945362 -3.6940568
20000 0.97198994 0 1.2244663 6.0047605 -3.0783806
21000 0.97943545 0 1.2393394 6.3871012 -2.4627045
22000 0.98550707 0 1.1768148 5.019967 -1.8470284
23000 0.96920052 0 1.1730698 5.7944947 -1.2313523
24000 0.94069959 0 1.184119 5.8434876 -0.61567613
25000 0.91569312 0 1.1642118 5.668997 0
26000 0.98882932 0 1.1999248 5.0115507 0.61567613
27000 0.8972608 0 1.2556546 7.0837158 1.2313523
28000 0.93554756 0 1.2221911 5.9302841 1.8470284
29000 0.97894608 0 1.2168736 5.5766766 2.4627045
30000 0.97877055 0 1.2575839 6.4308887 3.0783806
31000 1.0002387 0 1.2338069 5.3873124 3.6940568
32000 0.89608618 0 1.2382021 6.7892204 4.3097329
33000 0.87439302 0 1.2252635 7.078134 4.925409
34000 1.076102 0 1.2991393 5.5556892 5.5410852
35000 1.0018689 0 1.272105 6.1320483 6.1567613
36000 0.93327214 0 1.2428039 7.0030867 6.7724374
37000 1.0770236 0 1.3002931 5.4996076 7.3881135
38000 0.98715132 0 1.215562 5.5958335 -7.3881135
39000 0.95028417 0 1.2566706 6.4133713 -6.7724374
40000 1.0445585 0 1.241151 5.3589806 -6.1567613
41000 0.93799713 0 1.2109086 6.4957845 -5.5410852
42000 0.99231038 0 1.2228781 5.9363471 -4.925409
43000 0.97913815 0 1.1854842 5.8837987 -4.3097329
44000 0.86748838 0 1.1616201 6.8991278 -3.6940568
45000 0.96284421 0 1.1549383 5.1226785 -3.0783806
46000 0.98701623 0 1.170581 4.9719567 -2.4627045
47000 0.92618683 0 1.2146576 6.7100075 -1.8470284
48000 1.0092593 0 1.2523988 5.7067864 -1.2313523
49000 1.0187472 0 1.271608 5.3355092 -0.61567613
50000 1.0194881 0 1.2831094 6.2449759 0
Loop time of 12.9652 on 1 procs for 50000 steps with 160 atoms
Performance: 1666000.147 tau/day, 3856.482 timesteps/s
23.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0018659 | 0.0018659 | 0.0018659 | 0.0 | 0.01
Comm | 0.020679 | 0.020679 | 0.020679 | 0.0 | 0.16
Output | 0.00052547 | 0.00052547 | 0.00052547 | 0.0 | 0.00
Modify | 12.871 | 12.871 | 12.871 | 0.0 | 99.28
Other | | 0.07062 | | | 0.54
Nlocal: 160 ave 160 max 160 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 5270
Dangerous builds = 0
Total wall time: 0:00:12

View File

@ -0,0 +1,130 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md zmq localhost:5555
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
2 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000673056 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 0
ghost atom cutoff = 0
binsize = 15.3919, bins = 1 1 1
0 neighbor lists, perpetual/occasional/extra = 0 0 0
Per MPI rank memory allocation (min/avg/max) = 2.308 | 2.308 | 2.308 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.0876599 0 1.3637783 6.063363 0.61567613
2000 1.0722996 0 1.4108622 7.0518942 1.2313523
3000 1.0580774 0 1.3845895 6.0395275 1.8470284
4000 1.0068006 0 1.3804387 7.0944598 2.4627045
5000 0.95391814 0 1.2578438 6.1878831 3.0783806
6000 0.93492945 0 1.2711287 7.0440517 3.6940568
7000 0.94107853 0 1.2384371 6.1438077 4.3097329
8000 0.89711744 0 1.145748 6.3558305 4.925409
9000 0.90190304 0 1.0860684 4.957098 5.5410852
10000 0.84255749 0 1.0376892 5.2351795 6.1567613
11000 0.90250882 0 1.0497695 4.3844838 6.7724374
12000 0.83461274 0 1.0281949 5.1534361 7.3881135
13000 0.80315331 0 1.0226333 5.757222 -7.3881135
14000 0.81820939 0 0.99276466 4.6943725 -6.7724374
15000 0.8239631 0 1.0408289 5.1669006 -6.1567613
16000 0.88908894 0 1.1554855 6.3510278 -5.5410852
17000 0.98268136 0 1.2086981 5.6302847 -4.925409
18000 1.0098162 0 1.3687676 7.5243587 -4.3097329
19000 1.0795086 0 1.4562691 7.639418 -3.6940568
20000 1.1932155 0 1.5278988 7.0668432 -3.0783806
21000 1.2424296 0 1.6048792 7.959585 -2.4627045
22000 1.297169 0 1.7421262 8.9432388 -1.8470284
23000 1.2863494 0 1.7236774 8.3584973 -1.2313523
24000 1.4084347 0 1.7370339 7.2729078 -0.61567613
25000 1.3339728 0 1.6883255 7.529098 0
26000 1.1483243 0 1.5155578 7.3869994 0.61567613
27000 1.1372606 0 1.4368323 7.0580136 1.2313523
28000 1.0518579 0 1.355311 6.478857 1.8470284
29000 1.0581145 0 1.2535509 5.3697479 2.4627045
30000 0.93612564 0 1.185532 5.5520142 3.0783806
31000 0.94387516 0 1.1716454 5.8655485 3.6940568
32000 0.83953515 0 1.0737746 5.5551953 4.3097329
33000 0.84862926 0 1.0564042 5.7795428 4.925409
34000 0.83621877 0 1.079387 5.7514243 5.5410852
35000 0.86938506 0 1.031783 4.5897825 6.1567613
36000 0.88990609 0 1.0807597 5.3751744 6.7724374
37000 0.89534631 0 1.1238882 5.2400355 7.3881135
38000 0.98545003 0 1.2121125 5.7783854 -7.3881135
39000 0.96737778 0 1.2472934 6.1139 -6.7724374
40000 1.0664194 0 1.2956811 5.7353661 -6.1567613
41000 1.0681494 0 1.3269435 6.3102722 -5.5410852
42000 1.0875422 0 1.3963739 7.1208066 -4.925409
43000 1.0968173 0 1.3388062 6.1704339 -4.3097329
44000 1.1182109 0 1.3773214 7.0872686 -3.6940568
45000 1.1243261 0 1.432186 6.884782 -3.0783806
46000 1.039713 0 1.4389721 7.5585257 -2.4627045
47000 1.0816108 0 1.4100361 6.4611126 -1.8470284
48000 0.97637127 0 1.3605389 7.3992744 -1.2313523
49000 1.0361978 0 1.2721873 5.8166109 -0.61567613
50000 0.92367087 0 1.1875669 6.4685214 0
Loop time of 13.6129 on 2 procs for 50000 steps with 160 atoms
Performance: 1586733.497 tau/day, 3672.994 timesteps/s
59.3% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0015125 | 0.0015551 | 0.0015976 | 0.1 | 0.01
Comm | 0.026598 | 0.028413 | 0.030227 | 1.1 | 0.21
Output | 0.00046492 | 0.00063884 | 0.00081277 | 0.0 | 0.00
Modify | 13.465 | 13.483 | 13.501 | 0.5 | 99.04
Other | | 0.0994 | | | 0.73
Nlocal: 80 ave 80 max 80 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 5257
Dangerous builds = 0
Total wall time: 0:00:13

View File

@ -0,0 +1,129 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - no client/server mode
units lj
atom_style atomic
dimension 2
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000437021 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.3
ghost atom cutoff = 1.3
binsize = 0.65, bins = 24 19 3
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/2d/newton/tri
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.065 | 3.065 | 3.065 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.1326992 0.25863754 1.3842573 6.0588079 0.61567613
2000 1.0158438 0.33502643 1.3445212 7.2638652 1.2313523
3000 1.0968167 0.3149227 1.4048843 7.0653223 1.8470284
4000 1.0070993 0.40611915 1.4069241 7.7283521 2.4627045
5000 1.1153133 0.2674828 1.3758254 6.2949171 3.0783806
6000 1.0170665 0.25843673 1.2691466 6.049412 3.6940568
7000 1.0224605 0.20974914 1.2258193 5.5104976 4.3097329
8000 0.96149374 0.24035439 1.1958388 5.4179146 4.925409
9000 0.87759014 0.2590493 1.1311545 5.8711239 5.5410852
10000 0.83791968 0.23477897 1.0674617 5.666904 6.1567613
11000 0.87702487 0.22958877 1.1011322 5.9068062 6.7724374
12000 0.81507294 0.26375817 1.0737369 5.9166925 7.3881135
13000 0.85655284 0.24676491 1.0979643 5.6918734 -7.3881135
14000 0.84369293 0.27818471 1.1166046 6.4146184 -6.7724374
15000 0.90052173 0.19836095 1.0932544 5.2690913 -6.1567613
16000 0.83836874 0.26921637 1.1023453 5.9579526 -5.5410852
17000 0.90492897 0.21933098 1.1186041 5.6042194 -4.925409
18000 0.90113412 0.24880908 1.1443111 6.0634846 -4.3097329
19000 1.0160445 0.17252962 1.1822239 5.3149334 -3.6940568
20000 0.96217234 0.2414377 1.1975965 5.476653 -3.0783806
21000 0.98229664 0.27569118 1.2518485 5.9340174 -2.4627045
22000 1.0551763 0.26997615 1.3185576 6.2094112 -1.8470284
23000 1.051999 0.34076639 1.3861904 7.082385 -1.2313523
24000 1.1350071 0.23701844 1.3649317 6.1829742 -0.61567613
25000 1.0946409 0.33366032 1.4214597 7.1907559 0
26000 1.1511799 0.24626808 1.3902531 5.8469984 0.61567613
27000 1.1009203 0.25653085 1.3505704 6.1504287 1.2313523
28000 1.0521302 0.2876798 1.3332342 5.9906187 1.8470284
29000 1.0518465 0.21853 1.2638025 5.6577549 2.4627045
30000 0.97264625 0.28758145 1.2541487 6.5769804 3.0783806
31000 1.0133579 0.31575837 1.3227828 6.6650893 3.6940568
32000 1.0714324 0.28757036 1.3523063 6.2682059 4.3097329
33000 1.0739451 0.28062459 1.3478575 6.6862746 4.925409
34000 1.0056867 0.38289586 1.382297 7.1120131 5.5410852
35000 1.0911349 0.26370939 1.3480247 6.1476048 6.1567613
36000 1.0618618 0.28269593 1.3379211 6.9414608 6.7724374
37000 1.0704991 0.29974994 1.3635585 7.0834346 7.3881135
38000 1.1087507 0.2682201 1.3700411 5.8506019 -7.3881135
39000 1.1303733 0.22362416 1.3469326 5.2500269 -6.7724374
40000 1.0174248 0.28956571 1.3006316 6.4491571 -6.1567613
41000 0.95981887 0.29162143 1.2454414 6.4658646 -5.5410852
42000 0.88302144 0.30432252 1.1818251 6.7401923 -4.925409
43000 0.93164419 0.25110308 1.1769245 5.9067383 -4.3097329
44000 0.98352598 0.23322873 1.2106077 5.5606585 -3.6940568
45000 1.0247245 0.26503082 1.2833508 6.533394 -3.0783806
46000 0.93004532 0.32277782 1.2470104 6.4689179 -2.4627045
47000 1.0653176 0.29185413 1.3505135 6.9534569 -1.8470284
48000 1.0401524 0.3420245 1.3756759 6.8016042 -1.2313523
49000 1.0023407 0.31833091 1.314407 6.7385662 -0.61567613
50000 1.0566272 0.28657142 1.3365947 6.261203 0
Loop time of 1.15672 on 1 procs for 50000 steps with 160 atoms
Performance: 18673426.371 tau/day, 43225.524 timesteps/s
99.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.19267 | 0.19267 | 0.19267 | 0.0 | 16.66
Neigh | 0.15156 | 0.15156 | 0.15156 | 0.0 | 13.10
Comm | 0.044569 | 0.044569 | 0.044569 | 0.0 | 3.85
Output | 0.00057936 | 0.00057936 | 0.00057936 | 0.0 | 0.05
Modify | 0.71051 | 0.71051 | 0.71051 | 0.0 | 61.42
Other | | 0.05684 | | | 4.91
Nlocal: 160 ave 160 max 160 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 71 ave 71 max 71 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 362 ave 362 max 362 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 362
Ave neighs/atom = 2.2625
Neighbor list builds = 5256
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,129 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - no client/server mode
units lj
atom_style atomic
dimension 2
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000396967 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.3
ghost atom cutoff = 1.3
binsize = 0.65, bins = 24 19 3
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/2d/newton/tri
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.065 | 3.065 | 3.065 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.1326992 0.25863754 1.3842573 6.0588079 0.61567613
2000 1.0158438 0.33502643 1.3445212 7.2638652 1.2313523
3000 1.0968167 0.3149227 1.4048843 7.0653223 1.8470284
4000 1.0070993 0.40611915 1.4069241 7.7283521 2.4627045
5000 1.1153133 0.2674828 1.3758254 6.2949171 3.0783806
6000 1.0170665 0.25843673 1.2691466 6.049412 3.6940568
7000 1.0224605 0.20974914 1.2258193 5.5104976 4.3097329
8000 0.96149374 0.24035439 1.1958388 5.4179146 4.925409
9000 0.87759014 0.2590493 1.1311545 5.8711239 5.5410852
10000 0.83791968 0.23477897 1.0674617 5.666904 6.1567613
11000 0.87702487 0.22958877 1.1011322 5.9068062 6.7724374
12000 0.81507294 0.26375817 1.0737369 5.9166925 7.3881135
13000 0.85655284 0.24676491 1.0979643 5.6918734 -7.3881135
14000 0.84369293 0.27818471 1.1166046 6.4146184 -6.7724374
15000 0.90052173 0.19836095 1.0932544 5.2690913 -6.1567613
16000 0.83836874 0.26921637 1.1023453 5.9579526 -5.5410852
17000 0.90492897 0.21933098 1.1186041 5.6042194 -4.925409
18000 0.90113412 0.24880908 1.1443111 6.0634846 -4.3097329
19000 1.0160445 0.17252962 1.1822239 5.3149334 -3.6940568
20000 0.96217234 0.2414377 1.1975965 5.476653 -3.0783806
21000 0.98229664 0.27569118 1.2518485 5.9340174 -2.4627045
22000 1.0551763 0.26997615 1.3185576 6.2094112 -1.8470284
23000 1.051999 0.34076639 1.3861904 7.082385 -1.2313523
24000 1.1350071 0.23701844 1.3649317 6.1829742 -0.61567613
25000 1.0946409 0.33366032 1.4214597 7.1907559 0
26000 1.1511799 0.24626808 1.3902531 5.8469984 0.61567613
27000 1.1009203 0.25653085 1.3505704 6.1504287 1.2313523
28000 1.0521302 0.2876798 1.3332342 5.9906187 1.8470284
29000 1.0518465 0.21853 1.2638025 5.6577549 2.4627045
30000 0.97264625 0.28758145 1.2541487 6.5769804 3.0783806
31000 1.0133579 0.31575837 1.3227828 6.6650893 3.6940568
32000 1.0714324 0.28757036 1.3523063 6.2682059 4.3097329
33000 1.0739451 0.28062459 1.3478575 6.6862746 4.925409
34000 1.0056867 0.38289586 1.382297 7.1120131 5.5410852
35000 1.0911349 0.26370939 1.3480247 6.1476048 6.1567613
36000 1.0618618 0.28269593 1.3379211 6.9414608 6.7724374
37000 1.0704991 0.29974994 1.3635585 7.0834346 7.3881135
38000 1.1087507 0.2682201 1.3700411 5.8506019 -7.3881135
39000 1.1303733 0.22362416 1.3469326 5.2500269 -6.7724374
40000 1.0174248 0.28956571 1.3006316 6.4491571 -6.1567613
41000 0.95981887 0.29162143 1.2454414 6.4658646 -5.5410852
42000 0.88302144 0.30432252 1.1818251 6.7401923 -4.925409
43000 0.93164419 0.25110308 1.1769245 5.9067383 -4.3097329
44000 0.98352598 0.23322873 1.2106077 5.5606585 -3.6940568
45000 1.0247245 0.26503082 1.2833508 6.533394 -3.0783806
46000 0.93004532 0.32277782 1.2470104 6.4689179 -2.4627045
47000 1.0653176 0.29185413 1.3505135 6.9534569 -1.8470284
48000 1.0401524 0.3420245 1.3756759 6.8016042 -1.2313523
49000 1.0023407 0.31833091 1.314407 6.7385662 -0.61567613
50000 1.0566272 0.28657142 1.3365947 6.261203 0
Loop time of 0.92653 on 1 procs for 50000 steps with 160 atoms
Performance: 23312793.646 tau/day, 53964.800 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.15063 | 0.15063 | 0.15063 | 0.0 | 16.26
Neigh | 0.11869 | 0.11869 | 0.11869 | 0.0 | 12.81
Comm | 0.035839 | 0.035839 | 0.035839 | 0.0 | 3.87
Output | 0.00064468 | 0.00064468 | 0.00064468 | 0.0 | 0.07
Modify | 0.57422 | 0.57422 | 0.57422 | 0.0 | 61.98
Other | | 0.0465 | | | 5.02
Nlocal: 160 ave 160 max 160 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 71 ave 71 max 71 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 362 ave 362 max 362 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 362
Ave neighs/atom = 2.2625
Neighbor list builds = 5256
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,48 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/one
units lj
atom_style atomic
dimension 2
atom_modify map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000438929 secs
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.3
ghost atom cutoff = 1.3
binsize = 0.65, bins = 24 19 3
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/2d/newton/tri
bin: standard
Server MD calls = 50001
Server MD reneighborings 5073
Total wall time: 0:00:01

View File

@ -0,0 +1,48 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/one
units lj
atom_style atomic
dimension 2
atom_modify map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000455141 secs
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
2 by 2 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.3
ghost atom cutoff = 1.3
binsize = 0.65, bins = 24 19 3
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/2d/newton/tri
bin: standard
Server MD calls = 50001
Server MD reneighborings 5066
Total wall time: 0:00:02

View File

@ -0,0 +1,48 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/two tmp.couple
units lj
atom_style atomic
dimension 2
atom_modify map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000521898 secs
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.3
ghost atom cutoff = 1.3
binsize = 0.65, bins = 24 19 3
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/2d/newton/tri
bin: standard
Server MD calls = 50001
Server MD reneighborings 5073
Total wall time: 0:00:04

View File

@ -0,0 +1,48 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/two tmp.couple
units lj
atom_style atomic
dimension 2
atom_modify map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000378847 secs
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
2 by 2 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.3
ghost atom cutoff = 1.3
binsize = 0.65, bins = 24 19 3
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/2d/newton/tri
bin: standard
Server MD calls = 50001
Server MD reneighborings 5066
Total wall time: 0:00:06

View File

@ -0,0 +1,48 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md zmq *:5555
units lj
atom_style atomic
dimension 2
atom_modify map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000557184 secs
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.3
ghost atom cutoff = 1.3
binsize = 0.65, bins = 24 19 3
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/2d/newton/tri
bin: standard
Server MD calls = 50001
Server MD reneighborings 5073
Total wall time: 0:00:12

View File

@ -0,0 +1,48 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md zmq *:5555
units lj
atom_style atomic
dimension 2
atom_modify map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000586987 secs
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
2 by 2 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.3
ghost atom cutoff = 1.3
binsize = 0.65, bins = 24 19 3
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/2d/newton/tri
bin: standard
Server MD calls = 50001
Server MD reneighborings 5066
Total wall time: 0:00:13

View File

@ -35,6 +35,8 @@ linalg set of BLAS and LAPACK routines needed by USER-ATC package
from Axel Kohlmeyer (Temple U)
meam modified embedded atom method (MEAM) potential, MEAM package
from Greg Wagner (Sandia)
message client/server communication library via MPI, sockets, files
from Steve Plimpton (Sandia)
molfile hooks to VMD molfile plugins, used by the USER-MOLFILE package
from Axel Kohlmeyer (Temple U) and the VMD development team
mscg hooks to the MSCG library, used by fix_mscg command

118
lib/message/Install.py Normal file
View File

@ -0,0 +1,118 @@
#!/usr/bin/env python
# Install.py tool to build the CSlib library
# used to automate the steps described in the README file in this dir
from __future__ import print_function
import sys,os,re,subprocess
# help message
help = """
Syntax from src dir: make lib-message args="-m"
or: make lib-message args="-s -z"
Syntax from lib dir: python Install.py -m
or: python Install.py -s -z
specify zero or more options, order does not matter
-m = parallel build of CSlib library
-s = serial build of CSlib library
-z = build CSlib library with ZMQ socket support, default = no ZMQ support
Example:
make lib-message args="-m -z" # build parallel CSlib with ZMQ support
make lib-message args="-s" # build serial CSlib with no ZMQ support
"""
# print error message or help
def error(str=None):
if not str: print(help)
else: print("ERROR",str)
sys.exit()
# expand to full path name
# process leading '~' or relative path
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# parse args
args = sys.argv[1:]
nargs = len(args)
if nargs == 0: error()
mpiflag = False
serialflag = False
zmqflag = False
iarg = 0
while iarg < nargs:
if args[iarg] == "-m":
mpiflag = True
iarg += 1
elif args[iarg] == "-s":
serialflag = True
iarg += 1
elif args[iarg] == "-z":
zmqflag = True
iarg += 1
else: error()
if (not mpiflag and not serialflag):
error("Must use either -m or -s flag")
if (mpiflag and serialflag):
error("Cannot use -m and -s flag at the same time")
# build CSlib
# copy resulting lib to cslib/src/libmessage.a
# copy appropriate Makefile.lammps.* to Makefile.lammps
print("Building CSlib ...")
srcdir = fullpath("./cslib/src")
if mpiflag and zmqflag:
cmd = "cd %s; make lib_parallel" % srcdir
elif mpiflag and not zmqflag:
cmd = "cd %s; make lib_parallel zmq=no" % srcdir
elif not mpiflag and zmqflag:
cmd = "cd %s; make lib_serial" % srcdir
elif not mpiflag and not zmqflag:
cmd = "cd %s; make lib_serial zmq=no" % srcdir
print(cmd)
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
print(txt.decode('UTF-8'))
if mpiflag: cmd = "cd %s; cp libcsmpi.a libmessage.a" % srcdir
else: cmd = "cd %s; cp libcsnompi.a libmessage.a" % srcdir
print(cmd)
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
print(txt.decode('UTF-8'))
if zmqflag: cmd = "cp Makefile.lammps.zmq Makefile.lammps"
else: cmd = "cp Makefile.lammps.nozmq Makefile.lammps"
print(cmd)
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
print(txt.decode('UTF-8'))

View File

@ -0,0 +1,5 @@
# Settings that the LAMMPS build will import when this package library is used
message_SYSINC =
message_SYSLIB =
message_SYSPATH =

View File

@ -0,0 +1,5 @@
# Settings that the LAMMPS build will import when this package library is used
message_SYSINC =
message_SYSLIB = -lzmq
message_SYSPATH =

51
lib/message/README Normal file
View File

@ -0,0 +1,51 @@
This directory contains the CSlib library which is required
to use the MESSAGE package and its client/server commands
in a LAMMPS input script.
The CSlib libary is included in the LAMMPS distribution. A fuller
version including documentation and test programs is available at
http://cslib.sandia.gov. It was developed by Steve Plimpton at Sandia
National Laboratories.
You can type "make lib-message" from the src directory to see help on
how to build this library via make commands, or you can do the same
thing by typing "python Install.py" from within this directory, or you
can do it manually by following the instructions below.
The CSlib can be optionally built with support for sockets using
the open-source ZeroMQ (ZMQ) library. If it is not installed
on your system, it is easy to download and install.
Go to the ZMQ website for details: http://zeromq.org
-----------------
Instructions:
1. Compile CSlib from within cslib/src with one of the following:
% make lib_parallel # build parallel library with ZMQ socket support
% make lib_serial # build serial library with ZMQ support
% make lib_parallel zmq=no # build parallel lib with no ZMQ support
% make lib_serial zmq=no # build serial lib with no ZMQ support
2. Copy the produced cslib/src/libcsmpi.a or libscnompi.a file to
cslib/src/libmessage.a
3. Copy either lib/message/Makefile.lammps.zmq or Makefile.lammps.nozmq
to lib/message/Makefile.lammps, depending on whether you
build the library with ZMQ support or not.
If your ZMQ library is not in a place your shell path finds,
you can set the INCLUDE and PATH variables in Makefile.lammps
to point to the dirs where the ZMQ include and library files are.
-----------------
When these steps are complete you can build LAMMPS
with the MESSAGAE package installed:
% cd lammps/src
% make yes-message
% make mpi (or whatever target you wish)
Note that if you download and unpack a new LAMMPS tarball, you will
need to re-build the CSlib in this dir.

32
lib/message/cslib/LICENSE Normal file
View File

@ -0,0 +1,32 @@
Program: CSlib client/server coupling library
Copyright 2018 National Technology & Engineering Solutions of Sandia,
LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the
U.S. Government retains certain rights in this software. This
software is distributed under the modified Berkeley Software
Distribution (BSD) License.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Sandia Corporation nor the names of contributors
to this software may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

23
lib/message/cslib/README Normal file
View File

@ -0,0 +1,23 @@
This is the the Client/Server messaging library (CSlib).
Only the source directory and license file are included here as part
of the LAMMPS distribution. The full CSlib distribution, including
documentation and test codes, can be found at the website:
http://cslib.sandia.gov (as of Aug 2018).
The contact author is
Steve Plimpton
Sandia National Laboratories
sjplimp@sandia.gov
http://www.sandia.gov/~sjplimp
The CSlib is distributed as open-source code under the modified
Berkeley Software Distribution (BSD) License. See the accompanying
LICENSE file.
This directory contains the following:
README this file
LICENSE GNU LGPL license
src source files for library

View File

@ -0,0 +1,107 @@
# Makefile for CSlib = client/server messaging library
# type "make help" for options
SHELL = /bin/sh
# ----------------------------------------
# should only need to change this section
# compiler/linker settings
# ----------------------------------------
CC = g++
CCFLAGS = -g -O3 -DZMQ_$(ZMQ) -DMPI_$(MPI)
SHFLAGS = -fPIC
ARCHIVE = ar
ARCHFLAGS = -rc
SHLIBFLAGS = -shared
# files
LIB = libcsmpi.a
SHLIB = libcsmpi.so
SRC = $(wildcard *.cpp)
INC = $(wildcard *.h)
OBJ = $(SRC:.cpp=.o)
# build with ZMQ support or not
zmq = yes
ZMQ = $(shell echo $(zmq) | tr a-z A-Z)
ifeq ($(ZMQ),YES)
ZMQLIB = -lzmq
else
CCFLAGS += -I./STUBS_ZMQ
endif
# build with MPI support or not
mpi = yes
MPI = $(shell echo $(mpi) | tr a-z A-Z)
ifeq ($(MPI),YES)
CC = mpicxx
else
CCFLAGS += -I./STUBS_MPI
LIB = libcsnompi.a
SHLIB = libcsnompi.so
endif
# targets
shlib: shlib_parallel shlib_serial
lib: lib_parallel lib_serial
all: shlib lib
help:
@echo 'make default = shlib'
@echo 'make shlib build 2 shared CSlibs: parallel & serial'
@echo 'make lib build 2 static CSlibs: parallel & serial'
@echo 'make all build 4 CSlibs: shlib and lib'
@echo 'make shlib_parallel build shared parallel CSlib'
@echo 'make shlib_serial build shared serial CSlib'
@echo 'make lib_parallel build static parallel CSlib'
@echo 'make lib_serial build static serial CSlib'
@echo 'make ... zmq=no build w/out ZMQ support'
@echo 'make clean remove all *.o files'
@echo 'make clean-all remove *.o and lib files'
@echo 'make tar create a tarball, 2 levels up'
shlib_parallel:
$(MAKE) clean
$(MAKE) shared zmq=$(zmq) mpi=yes
shlib_serial:
$(MAKE) clean
$(MAKE) shared zmq=$(zmq) mpi=no
lib_parallel:
$(MAKE) clean
$(MAKE) static zmq=$(zmq) mpi=yes
lib_serial:
$(MAKE) clean
$(MAKE) static zmq=$(zmq) mpi=no
static: $(OBJ)
$(ARCHIVE) $(ARCHFLAGS) $(LIB) $(OBJ)
shared: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) -o $(SHLIB) $(OBJ) $(ZMQLIB)
clean:
@rm -f *.o *.pyc
clean-all:
@rm -f *.o *.pyc lib*.a lib*.so
tar:
cd ../..; tar cvf cslib.tar cslib/README cslib/LICENSE \
cslib/doc cslib/src cslib/test
# rules
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) -c $<

View File

@ -0,0 +1,96 @@
/* ----------------------------------------------------------------------
CSlib - Client/server library for code coupling
http://cslib.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright 2018 National Technology & Engineering Solutions of
Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
NTESS, the U.S. Government retains certain rights in this software.
This software is distributed under the modified Berkeley Software
Distribution (BSD) License.
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
// MPI constants and dummy functions
#ifndef MPI_DUMMY_H
#define MPI_DUMMY_H
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
namespace CSLIB_NS {
typedef int MPI_Comm;
typedef int MPI_Fint;
typedef int MPI_Datatype;
typedef int MPI_Status;
typedef int MPI_Op;
typedef int MPI_Info;
#define MPI_COMM_WORLD 0
#define MPI_MAX_PORT_NAME 0
#define MPI_INFO_NULL 0
#define MPI_INT 1
#define MPI_LONG_LONG 2
#define MPI_FLOAT 3
#define MPI_DOUBLE 4
#define MPI_CHAR 5
#define MPI_SUM 0
static void MPI_Init(int *, char ***) {}
static MPI_Comm MPI_Comm_f2c(MPI_Comm world) {return world;}
static void MPI_Comm_rank(MPI_Comm, int *) {}
static void MPI_Comm_size(MPI_Comm, int *) {}
static void MPI_Open_port(MPI_Info, char *) {}
static void MPI_Close_port(const char *) {}
static void MPI_Comm_accept(const char *, MPI_Info, int,
MPI_Comm, MPI_Comm *) {}
static void MPI_Comm_connect(const char *, MPI_Info, int,
MPI_Comm, MPI_Comm *) {}
static void MPI_Comm_split(MPI_Comm, int, int, MPI_Comm *) {}
static void MPI_Comm_free(MPI_Comm *) {}
static void MPI_Send(const void *, int, MPI_Datatype, int, int, MPI_Comm) {}
static void MPI_Recv(void *, int, MPI_Datatype, int, int,
MPI_Comm, MPI_Status *) {}
static void MPI_Allreduce(const void *in, void *out, int, MPI_Datatype type,
MPI_Op op, MPI_Comm)
{
if (type == MPI_INT) *((int *) out) = *((int *) in);
}
static void MPI_Scan(const void *in, void *out, int, MPI_Datatype intype,
MPI_Op op,MPI_Comm)
{
if (intype == MPI_INT) *((int *) out) = *((int *) in);
}
static void MPI_Bcast(void *, int, MPI_Datatype, int, MPI_Comm) {}
static void MPI_Allgather(const void *in, int incount, MPI_Datatype intype,
void *out, int, MPI_Datatype, MPI_Comm)
{
// assuming incount = 1
if (intype == MPI_INT) *((int *) out) = *((int *) in);
}
static void MPI_Allgatherv(const void *in, int incount, MPI_Datatype intype,
void *out, const int *, const int *,
MPI_Datatype, MPI_Comm)
{
if (intype == MPI_INT) memcpy(out,in,incount*sizeof(int));
else if (intype == MPI_LONG_LONG) memcpy(out,in,incount*sizeof(int64_t));
else if (intype == MPI_FLOAT) memcpy(out,in,incount*sizeof(float));
else if (intype == MPI_DOUBLE) memcpy(out,in,incount*sizeof(double));
else if (intype == MPI_CHAR) memcpy(out,in,incount*sizeof(char));
}
static void MPI_Abort(MPI_Comm, int) {exit(1);}
static void MPI_Finalize() {}
}
#endif

View File

@ -0,0 +1,36 @@
/* ----------------------------------------------------------------------
CSlib - Client/server library for code coupling
http://cslib.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright 2018 National Technology & Engineering Solutions of
Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
NTESS, the U.S. Government retains certain rights in this software.
This software is distributed under the modified Berkeley Software
Distribution (BSD) License.
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
// ZMQ constants and dummy functions
#ifndef ZMQ_DUMMY_H
#define ZMQ_DUMMY_H
namespace CSLIB_NS {
#define ZMQ_REQ 0
#define ZMQ_REP 0
static void *zmq_ctx_new() {return NULL;}
static void *zmq_connect(void *, char *) {return NULL;}
static int zmq_bind(void *, char *) {return 0;}
static void *zmq_socket(void *,int) {return NULL;}
static void zmq_close(void *) {}
static void zmq_ctx_destroy(void *) {}
static void zmq_send(void *, void *, int, int) {}
static void zmq_recv(void *, void *, int, int) {}
};
#endif

View File

@ -0,0 +1,768 @@
/* ----------------------------------------------------------------------
CSlib - Client/server library for code coupling
http://cslib.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright 2018 National Technology & Engineering Solutions of
Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
NTESS, the U.S. Government retains certain rights in this software.
This software is distributed under the modified Berkeley Software
Distribution (BSD) License.
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#include <mpi.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <limits.h>
#include "cslib.h"
#include "msg_file.h"
#include "msg_zmq.h"
#include "msg_mpi_one.h"
#include "msg_mpi_two.h"
using namespace CSLIB_NS;
#define MAXTYPE 5 // # of defined field data types
/* ---------------------------------------------------------------------- */
CSlib::CSlib(int csflag, const char *mode, const void *ptr, const void *pcomm)
{
if (pcomm) myworld = (uint64_t) *((MPI_Comm *) pcomm);
else myworld = 0;
#ifdef MPI_NO
if (pcomm)
error_all("constructor(): CSlib invoked with MPI_Comm "
"but built w/out MPI support");
#endif
#ifdef MPI_YES // NOTE: this could be OK to allow ??
// would allow a parallel app to invoke CSlib
// in parallel and/or in serial
if (!pcomm)
error_all("constructor(): CSlib invoked w/out MPI_Comm "
"but built with MPI support");
#endif
client = server = 0;
if (csflag == 0) client = 1;
else if (csflag == 1) server = 1;
else error_all("constructor(): Invalid client/server arg");
if (pcomm == NULL) {
me = 0;
nprocs = 1;
if (strcmp(mode,"file") == 0) msg = new MsgFile(csflag,ptr);
else if (strcmp(mode,"zmq") == 0) msg = new MsgZMQ(csflag,ptr);
else if (strcmp(mode,"mpi/one") == 0)
error_all("constructor(): No mpi/one mode for serial lib usage");
else if (strcmp(mode,"mpi/two") == 0)
error_all("constructor(): No mpi/two mode for serial lib usage");
else error_all("constructor(): Unknown mode");
} else if (pcomm) {
MPI_Comm world = (MPI_Comm) myworld;
MPI_Comm_rank(world,&me);
MPI_Comm_size(world,&nprocs);
if (strcmp(mode,"file") == 0) msg = new MsgFile(csflag,ptr,world);
else if (strcmp(mode,"zmq") == 0) msg = new MsgZMQ(csflag,ptr,world);
else if (strcmp(mode,"mpi/one") == 0) msg = new MsgMPIOne(csflag,ptr,world);
else if (strcmp(mode,"mpi/two") == 0) msg = new MsgMPITwo(csflag,ptr,world);
else error_all("constructor(): Unknown mode");
}
maxfield = 0;
fieldID = fieldtype = fieldlen = fieldoffset = NULL;
maxheader = 0;
header = NULL;
maxbuf = 0;
buf = NULL;
recvcounts = displs = NULL;
maxglobal = 0;
allids = NULL;
maxfieldbytes = 0;
fielddata = NULL;
pad = "\0\0\0\0\0\0\0"; // just length 7 since will have trailing NULL
nsend = nrecv = 0;
}
/* ---------------------------------------------------------------------- */
CSlib::~CSlib()
{
deallocate_fields();
sfree(header);
sfree(buf);
sfree(recvcounts);
sfree(displs);
sfree(allids);
sfree(fielddata);
delete msg;
}
/* ---------------------------------------------------------------------- */
void CSlib::send(int msgID_caller, int nfield_caller)
{
if (nfield_caller < 0) error_all("send(): Invalid nfield");
msgID = msgID_caller;
nfield = nfield_caller;
allocate_fields();
fieldcount = 0;
nbuf = 0;
if (fieldcount == nfield) send_message();
}
/* ---------------------------------------------------------------------- */
void CSlib::pack_int(int id, int value)
{
pack(id,1,1,&value);
}
/* ---------------------------------------------------------------------- */
void CSlib::pack_int64(int id, int64_t value)
{
pack(id,2,1,&value);
}
/* ---------------------------------------------------------------------- */
void CSlib::pack_float(int id, float value)
{
pack(id,3,1,&value);
}
/* ---------------------------------------------------------------------- */
void CSlib::pack_double(int id, double value)
{
pack(id,4,1,&value);
}
/* ---------------------------------------------------------------------- */
void CSlib::pack_string(int id, char *value)
{
pack(id,5,strlen(value)+1,value);
}
/* ---------------------------------------------------------------------- */
void CSlib::pack(int id, int ftype, int flen, void *data)
{
if (find_field(id,fieldcount) >= 0)
error_all("pack(): Reuse of field ID");
if (ftype < 1 || ftype > MAXTYPE) error_all("pack(): Invalid ftype");
if (flen < 0) error_all("pack(): Invalid flen");
fieldID[fieldcount] = id;
fieldtype[fieldcount] = ftype;
fieldlen[fieldcount] = flen;
int nbytes,nbytesround;
onefield(ftype,flen,nbytes,nbytesround);
memcpy(&buf[nbuf],data,nbytes);
memcpy(&buf[nbuf+nbytes],pad,nbytesround-nbytes);
nbuf += nbytesround;
fieldcount++;
if (fieldcount == nfield) send_message();
}
/* ---------------------------------------------------------------------- */
void CSlib::pack_parallel(int id, int ftype,
int nlocal, int *ids, int nper, void *data)
{
int i,j,k,m;
if (find_field(id,fieldcount) >= 0)
error_all("pack_parallel(): Reuse of field ID");
if (ftype < 1 || ftype > MAXTYPE) error_all("pack_parallel(): Invalid ftype");
if (nlocal < 0) error_all("pack_parallel(): Invalid nlocal");
if (nper < 1) error_all("pack_parallel(): Invalid nper");
MPI_Comm world = (MPI_Comm) myworld;
// NOTE: check for overflow of maxglobal and flen
int nglobal;
MPI_Allreduce(&nlocal,&nglobal,1,MPI_INT,MPI_SUM,world);
int flen = nper*nglobal;
fieldID[fieldcount] = id;
fieldtype[fieldcount] = ftype;
fieldlen[fieldcount] = flen;
// nlocal datums, each of nper length, from all procs
// final data in buf = datums for all natoms, ordered by ids
if (recvcounts == NULL) {
recvcounts = (int *) smalloc(nprocs*sizeof(int));
displs = (int *) smalloc(nprocs*sizeof(int));
}
MPI_Allgather(&nlocal,1,MPI_INT,recvcounts,1,MPI_INT,world);
displs[0] = 0;
for (int iproc = 1; iproc < nprocs; iproc++)
displs[iproc] = displs[iproc-1] + recvcounts[iproc-1];
if (ids && nglobal > maxglobal) {
sfree(allids);
maxglobal = nglobal;
// NOTE: maxglobal*sizeof(int) could overflow int
allids = (int *) smalloc(maxglobal*sizeof(int));
}
MPI_Allgatherv(ids,nlocal,MPI_INT,allids,
recvcounts,displs,MPI_INT,world);
int nlocalsize = nper*nlocal;
MPI_Allgather(&nlocalsize,1,MPI_INT,recvcounts,1,MPI_INT,world);
displs[0] = 0;
for (int iproc = 1; iproc < nprocs; iproc++)
displs[iproc] = displs[iproc-1] + recvcounts[iproc-1];
int nbytes,nbytesround;
onefield(ftype,flen,nbytes,nbytesround);
if (ftype == 1) {
int *alldata;
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (int *) fielddata;
} else alldata = (int *) &buf[nbuf];
MPI_Allgatherv(data,nlocalsize,MPI_INT,alldata,
recvcounts,displs,MPI_INT,world);
if (ids) {
int *bufptr = (int *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
}
}
} else if (ftype == 2) {
int64_t *alldata;
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (int64_t *) fielddata;
} else alldata = (int64_t *) &buf[nbuf];
// NOTE: may be just MPI_LONG on some machines
MPI_Allgatherv(data,nlocalsize,MPI_LONG_LONG,alldata,
recvcounts,displs,MPI_LONG_LONG,world);
if (ids) {
int64_t *bufptr = (int64_t *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
}
}
} else if (ftype == 3) {
float *alldata;
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (float *) fielddata;
} else alldata = (float *) &buf[nbuf];
MPI_Allgatherv(data,nlocalsize,MPI_FLOAT,alldata,
recvcounts,displs,MPI_FLOAT,world);
if (ids) {
float *bufptr = (float *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
}
}
} else if (ftype == 4) {
double *alldata;
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (double *) fielddata;
} else alldata = (double *) &buf[nbuf];
MPI_Allgatherv(data,nlocalsize,MPI_DOUBLE,alldata,
recvcounts,displs,MPI_DOUBLE,world);
if (ids) {
double *bufptr = (double *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
}
}
/* eventually ftype = BYTE, but not yet
} else if (ftype == 5) {
char *alldata;
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (char *) fielddata;
} else alldata = (char *) &buf[nbuf];
MPI_Allgatherv(data,nlocalsize,MPI_CHAR,alldata,
recvcounts,displs,MPI_CHAR,world);
if (ids) {
char *bufptr = (char *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
memcpy(&bufptr[j],&alldata[m],nper);
m += nper;
}
}
*/
}
memcpy(&buf[nbuf+nbytes],pad,nbytesround-nbytes);
nbuf += nbytesround;
fieldcount++;
if (fieldcount == nfield) send_message();
}
/* ---------------------------------------------------------------------- */
void CSlib::send_message()
{
// setup header message
int m = 0;
header[m++] = msgID;
header[m++] = nfield;
for (int ifield = 0; ifield < nfield; ifield++) {
header[m++] = fieldID[ifield];
header[m++] = fieldtype[ifield];
header[m++] = fieldlen[ifield];
}
msg->send(nheader,header,nbuf,buf);
nsend++;
}
/* ---------------------------------------------------------------------- */
int CSlib::recv(int &nfield_caller, int *&fieldID_caller,
int *&fieldtype_caller, int *&fieldlen_caller)
{
msg->recv(maxheader,header,maxbuf,buf);
nrecv++;
// unpack header message
int m = 0;
msgID = header[m++];
nfield = header[m++];
allocate_fields();
int nbytes,nbytesround;
nbuf = 0;
for (int ifield = 0; ifield < nfield; ifield++) {
fieldID[ifield] = header[m++];
fieldtype[ifield] = header[m++];
fieldlen[ifield] = header[m++];
fieldoffset[ifield] = nbuf;
onefield(fieldtype[ifield],fieldlen[ifield],nbytes,nbytesround);
nbuf += nbytesround;
}
// return message parameters
nfield_caller = nfield;
fieldID_caller = fieldID;
fieldtype_caller = fieldtype;
fieldlen_caller = fieldlen;
return msgID;
}
/* ---------------------------------------------------------------------- */
int CSlib::unpack_int(int id)
{
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack_int(): Unknown field ID");
if (fieldtype[ifield] != 1) error_all("unpack_int(): Mis-match of ftype");
if (fieldlen[ifield] != 1) error_all("unpack_int(): Flen is not 1");
int *ptr = (int *) unpack(id);
return *ptr;
}
/* ---------------------------------------------------------------------- */
int64_t CSlib::unpack_int64(int id)
{
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack_int64(): Unknown field ID");
if (fieldtype[ifield] != 2) error_all("unpack_int64(): Mis-match of ftype");
if (fieldlen[ifield] != 1) error_all("unpack_int64(): Flen is not 1");
int64_t *ptr = (int64_t *) unpack(id);
return *ptr;
}
/* ---------------------------------------------------------------------- */
float CSlib::unpack_float(int id)
{
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack_float(): Unknown field ID");
if (fieldtype[ifield] != 3) error_all("unpack_float(): Mis-match of ftype");
if (fieldlen[ifield] != 1) error_all("unpack_float(): Flen is not 1");
float *ptr = (float *) unpack(id);
return *ptr;
}
/* ---------------------------------------------------------------------- */
double CSlib::unpack_double(int id)
{
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack_double(): Unknown field ID");
if (fieldtype[ifield] != 4) error_all("unpack_double(): Mis-match of ftype");
if (fieldlen[ifield] != 1) error_all("unpack_double(): Flen is not 1");
double *ptr = (double *) unpack(id);
return *ptr;
}
/* ---------------------------------------------------------------------- */
char *CSlib::unpack_string(int id)
{
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack_string(): Unknown field ID");
if (fieldtype[ifield] != 5) error_all("unpack_string(): Mis-match of ftype");
char *ptr = (char *) unpack(id);
return ptr;
}
/* ---------------------------------------------------------------------- */
void *CSlib::unpack(int id)
{
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack(): Unknown field ID");
return &buf[fieldoffset[ifield]];
}
/* ---------------------------------------------------------------------- */
void CSlib::unpack(int id, void *data)
{
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack(): Unknown field ID");
int ftype = fieldtype[ifield];
int nbytes = fieldlen[ifield];
if (ftype == 1) nbytes *= sizeof(int);
else if (ftype == 2) nbytes *= sizeof(int64_t);
else if (ftype == 3) nbytes *= sizeof(float);
else if (ftype == 4) nbytes *= sizeof(double);
memcpy(data,&buf[fieldoffset[ifield]],nbytes);
}
/* ---------------------------------------------------------------------- */
void CSlib::unpack_parallel(int id, int nlocal, int *ids, int nper, void *data)
{
int i,j,k,m;
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack_parallel(): Unknown field ID");
if (nlocal < 0) error_all("unpack_parallel(): Invalid nlocal");
if (nper < 1) error_all("pack_parallel(): Invalid nper");
MPI_Comm world = (MPI_Comm) myworld;
int upto;
if (!ids) {
MPI_Scan(&nlocal,&upto,1,MPI_INT,MPI_SUM,world);
upto -= nlocal;
}
if (fieldtype[ifield] == 1) {
int *local = (int *) data;
int *global = (int *) &buf[fieldoffset[ifield]];
if (!ids) memcpy(local,&global[nper*upto],nper*nlocal*sizeof(int));
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
}
}
} else if (fieldtype[ifield] == 2) {
int64_t *local = (int64_t *) data;
int64_t *global = (int64_t *) &buf[fieldoffset[ifield]];
if (!ids) memcpy(local,&global[nper*upto],nper*nlocal*sizeof(int64_t));
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
}
}
} else if (fieldtype[ifield] == 3) {
float *local = (float *) data;
float *global = (float *) &buf[fieldoffset[ifield]];
if (!ids) memcpy(local,&global[nper*upto],nper*nlocal*sizeof(float));
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
}
}
} else if (fieldtype[ifield] == 4) {
double *local = (double *) data;
double *global = (double *) &buf[fieldoffset[ifield]];
if (!ids) memcpy(local,&global[nper*upto],nper*nlocal*sizeof(double));
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
}
}
/* eventually ftype = BYTE, but not yet
} else if (fieldtype[ifield] == 5) {
char *local = (char *) data;
char *global = (char *) &buf[fieldoffset[ifield]];
if (!ids) memcpy(local,&global[nper*upto],nper*nlocal*sizeof(char));
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
memcpy(&local[m],&global[j],nper);
m += nper;
}
}
*/
}
}
/* ---------------------------------------------------------------------- */
int CSlib::extract(int flag)
{
if (flag == 1) return nsend;
if (flag == 2) return nrecv;
error_all("extract(): Invalid flag");
return 0;
}
/* ---------------------------------------------------------------------- */
void CSlib::onefield(int ftype, int flen, int &nbytes, int &nbytesround)
{
int64_t bigbytes,bigbytesround;
int64_t biglen = flen;
if (ftype == 1) bigbytes = biglen * sizeof(int);
else if (ftype == 2) bigbytes = biglen * sizeof(int64_t);
else if (ftype == 3) bigbytes = biglen * sizeof(float);
else if (ftype == 4) bigbytes = biglen * sizeof(double);
else if (ftype == 5) bigbytes = biglen * sizeof(char);
bigbytesround = roundup(bigbytes,8);
if (nbuf + bigbytesround > INT_MAX)
error_all("pack(): Message size exceeds 32-bit integer limit");
nbytes = (int) bigbytes;
nbytesround = (int) bigbytesround;
if (nbuf + nbytesround > maxbuf) {
maxbuf = nbuf + nbytesround;
buf = (char *) srealloc(buf,maxbuf);
}
}
/* ---------------------------------------------------------------------- */
int CSlib::find_field(int id, int n)
{
int ifield;
for (ifield = 0; ifield < n; ifield++)
if (id == fieldID[ifield]) return ifield;
return -1;
}
/* ---------------------------------------------------------------------- */
void CSlib::allocate_fields()
{
int64_t bigbytes = (2 + 3*((int64_t) nfield)) * sizeof(int);
if (bigbytes > INT_MAX)
error_all("send(): Message header size exceeds 32-bit integer limit");
nheader = 2;
nheader += 3 * nfield;
if (nfield > maxfield) {
deallocate_fields();
maxfield = nfield;
fieldID = new int[maxfield];
fieldtype = new int[maxfield];
fieldlen = new int[maxfield];
fieldoffset = new int[maxfield];
}
if (nheader > maxheader) {
sfree(header);
maxheader = nheader;
header = (int *) smalloc(maxheader*sizeof(int));
}
}
/* ---------------------------------------------------------------------- */
void CSlib::deallocate_fields()
{
delete [] fieldID;
delete [] fieldtype;
delete [] fieldlen;
delete [] fieldoffset;
}
/* ---------------------------------------------------------------------- */
void *CSlib::smalloc(int nbytes)
{
if (nbytes == 0) return NULL;
void *ptr = malloc(nbytes);
if (ptr == NULL) {
char str[128];
sprintf(str,"malloc(): Failed to allocate %d bytes",nbytes);
error_one(str);
}
return ptr;
}
/* ---------------------------------------------------------------------- */
void *CSlib::srealloc(void *ptr, int nbytes)
{
if (nbytes == 0) {
sfree(ptr);
return NULL;
}
ptr = realloc(ptr,nbytes);
if (ptr == NULL) {
char str[128];
sprintf(str,"realloc(): Failed to reallocate %d bytes",nbytes);
error_one(str);
}
return ptr;
}
/* ---------------------------------------------------------------------- */
void CSlib::sfree(void *ptr)
{
if (ptr == NULL) return;
free(ptr);
}
/* ---------------------------------------------------------------------- */
void CSlib::error_all(const char *str)
{
if (me == 0) printf("CSlib ERROR: %s\n",str);
MPI_Comm world = (MPI_Comm) myworld;
MPI_Abort(world,1);
}
/* ---------------------------------------------------------------------- */
void CSlib::error_one(const char *str)
{
printf("CSlib ERROR: %s\n",str);
MPI_Comm world = (MPI_Comm) myworld;
MPI_Abort(world,1);
}
/* ----------------------------------------------------------------------
round N up to multiple of nalign and return it
NOTE: see mapreduce/src/keyvalue.cpp for doing this as uint64_t
------------------------------------------------------------------------- */
int64_t CSlib::roundup(int64_t n, int nalign)
{
if (n % nalign == 0) return n;
n = (n/nalign + 1) * nalign;
return n;
}

View File

@ -0,0 +1,87 @@
/* ----------------------------------------------------------------------
CSlib - Client/server library for code coupling
http://cslib.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright 2018 National Technology & Engineering Solutions of
Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
NTESS, the U.S. Government retains certain rights in this software.
This software is distributed under the modified Berkeley Software
Distribution (BSD) License.
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#ifndef CSLIB_H
#define CSLIB_H
#include <stdint.h>
namespace CSLIB_NS {
class CSlib {
public:
int nsend,nrecv;
CSlib(int, const char *, const void *, const void *);
~CSlib();
void send(int, int);
void pack_int(int, int);
void pack_int64(int, int64_t);
void pack_float(int, float);
void pack_double(int, double);
void pack_string(int, char *);
void pack(int, int, int, void *);
void pack_parallel(int, int, int, int *, int, void *);
int recv(int &, int *&, int *&, int *&);
int unpack_int(int);
int64_t unpack_int64(int);
float unpack_float(int);
double unpack_double(int);
char *unpack_string(int);
void *unpack(int);
void unpack(int, void *);
void unpack_parallel(int, int, int *, int, void *);
int extract(int);
private:
uint64_t myworld; // really MPI_Comm, but avoids use of mpi.h in this file
// so apps can include this file w/ no MPI on system
int me,nprocs;
int client,server;
int nfield,maxfield;
int msgID,fieldcount;
int nheader,maxheader;
int nbuf,maxbuf;
int maxglobal,maxfieldbytes;
int *fieldID,*fieldtype,*fieldlen,*fieldoffset;
int *header;
int *recvcounts,*displs; // nprocs size for Allgathers
int *allids; // nglobal size for pack_parallel()
char *buf; // maxbuf size for msg with all fields
char *fielddata; // maxfieldbytes size for one global field
const char *pad;
class Msg *msg;
void send_message();
void onefield(int, int, int &, int &);
int find_field(int, int);
void allocate_fields();
void deallocate_fields();
int64_t roundup(int64_t, int);
void *smalloc(int);
void *srealloc(void *, int);
void sfree(void *);
void error_all(const char *);
void error_one(const char *);
};
}
#endif

View File

@ -0,0 +1,362 @@
# ------------------------------------------------------------------------
# CSlib - Client/server library for code coupling
# http://cslib.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright 2018 National Technology & Engineering Solutions of
# Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
# This software is distributed under the modified Berkeley Software
# Distribution (BSD) License.
#
# See the README file in the top-level CSlib directory.
# -------------------------------------------------------------------------
# Python wrapper on CSlib library via ctypes
# ctypes and Numpy data types:
# 32-bit int = c_int = np.intc = np.int32
# 64-bit int = c_longlong = np.int64
# 32-bit floating point = c_float = np.float32
# 64-bit floating point = c_double = np.float = np.float64
import sys,traceback
from ctypes import *
# Numpy and mpi4py packages may not exist
try:
import numpy as np
numpyflag = 1
except:
numpyflag = 0
try:
from mpi4py import MPI
mpi4pyflag = 1
except:
mpi4pyflag = 0
# wrapper class
class CSlib:
# instantiate CSlib thru its C-interface
def __init__(self,csflag,mode,ptr,comm):
# load libcslib.so
try:
if comm: self.lib = CDLL("libcsmpi.so",RTLD_GLOBAL)
else: self.lib = CDLL("libcsnompi.so",RTLD_GLOBAL)
except:
etype,value,tb = sys.exc_info()
traceback.print_exception(etype,value,tb)
raise OSError,"Could not load CSlib dynamic library"
# define ctypes API for each library method
self.lib.cslib_open.argtypes = [c_int,c_char_p,c_void_p,c_void_p,
POINTER(c_void_p)]
self.lib.cslib_open.restype = None
self.lib.cslib_close.argtypes = [c_void_p]
self.lib.cslib_close.restype = None
self.lib.cslib_send.argtypes = [c_void_p,c_int,c_int]
self.lib.cslib_send.restype = None
self.lib.cslib_pack_int.argtypes = [c_void_p,c_int,c_int]
self.lib.cslib_pack_int.restype = None
self.lib.cslib_pack_int64.argtypes = [c_void_p,c_int,c_longlong]
self.lib.cslib_pack_int64.restype = None
self.lib.cslib_pack_float.argtypes = [c_void_p,c_int,c_float]
self.lib.cslib_pack_float.restype = None
self.lib.cslib_pack_double.argtypes = [c_void_p,c_int,c_double]
self.lib.cslib_pack_double.restype = None
self.lib.cslib_pack_string.argtypes = [c_void_p,c_int,c_char_p]
self.lib.cslib_pack_string.restype = None
self.lib.cslib_pack.argtypes = [c_void_p,c_int,c_int,c_int,c_void_p]
self.lib.cslib_pack.restype = None
self.lib.cslib_pack_parallel.argtypes = [c_void_p,c_int,c_int,c_int,
POINTER(c_int),c_int,c_void_p]
self.lib.cslib_pack_parallel.restype = None
self.lib.cslib_recv.argtypes = [c_void_p,POINTER(c_int),
POINTER(POINTER(c_int)),
POINTER(POINTER(c_int)),
POINTER(POINTER(c_int))]
self.lib.cslib_recv.restype = c_int
self.lib.cslib_unpack_int.argtypes = [c_void_p,c_int]
self.lib.cslib_unpack_int.restype = c_int
self.lib.cslib_unpack_int64.argtypes = [c_void_p,c_int]
self.lib.cslib_unpack_int64.restype = c_longlong
self.lib.cslib_unpack_float.argtypes = [c_void_p,c_int]
self.lib.cslib_unpack_float.restype = c_float
self.lib.cslib_unpack_double.argtypes = [c_void_p,c_int]
self.lib.cslib_unpack_double.restype = c_double
self.lib.cslib_unpack_string.argtypes = [c_void_p,c_int]
self.lib.cslib_unpack_string.restype = c_char_p
# override return in unpack()
self.lib.cslib_unpack.argtypes = [c_void_p,c_int]
self.lib.cslib_unpack.restype = c_void_p
self.lib.cslib_unpack_data.argtypes = [c_void_p,c_int,c_void_p]
self.lib.cslib_unpack_data.restype = None
# override last arg in unpack_parallel()
self.lib.cslib_unpack_parallel.argtypes = [c_void_p,c_int,c_int,
POINTER(c_int),c_int,c_void_p]
self.lib.cslib_unpack_parallel.restype = None
self.lib.cslib_extract.argtypes = [c_void_p,c_int]
self.lib.cslib_extract.restype = c_int
# create an instance of CSlib with or w/out MPI communicator
self.cs = c_void_p()
if not comm:
self.lib.cslib_open(csflag,mode,ptr,None,byref(self.cs))
elif not mpi4pyflag:
print "Cannot pass MPI communicator to CSlib w/out mpi4py package"
sys.exit()
else:
address = MPI._addressof(comm)
comm_ptr = c_void_p(address)
if mode == "mpi/one":
address = MPI._addressof(ptr)
ptrcopy = c_void_p(address)
else: ptrcopy = ptr
self.lib.cslib_open(csflag,mode,ptrcopy,comm_ptr,byref(self.cs))
# destroy instance of CSlib
def __del__(self):
if self.cs: self.lib.cslib_close(self.cs)
def close(self):
self.lib.cslib_close(self.cs)
self.lib = None
# send a message
def send(self,msgID,nfield):
self.nfield = nfield
self.lib.cslib_send(self.cs,msgID,nfield)
# pack one field of message
def pack_int(self,id,value):
self.lib.cslib_pack_int(self.cs,id,value)
def pack_int64(self,id,value):
self.lib.cslib_pack_int64(self.cs,id,value)
def pack_float(self,id,value):
self.lib.cslib_pack_float(self.cs,id,value)
def pack_double(self,id,value):
self.lib.cslib_pack_double(self.cs,id,value)
def pack_string(self,id,value):
self.lib.cslib_pack_string(self.cs,id,value)
def pack(self,id,ftype,flen,data):
cdata = self.data_convert(ftype,flen,data)
self.lib.cslib_pack(self.cs,id,ftype,flen,cdata)
def pack_parallel(self,id,ftype,nlocal,ids,nper,data):
cids = self.data_convert(1,nlocal,ids)
cdata = self.data_convert(ftype,nper*nlocal,data)
self.lib.cslib_pack_parallel(self.cs,id,ftype,nlocal,cids,nper,cdata)
# convert input data to a ctypes vector to pass to CSlib
def data_convert(self,ftype,flen,data):
# tflag = type of data
# tflag = 1 if data is list or tuple
# tflag = 2 if data is Numpy array
# tflag = 3 if data is ctypes vector
# same usage of tflag as in unpack function
txttype = str(type(data))
if "numpy" in txttype: tflag = 2
elif "c_" in txttype: tflag = 3
else: tflag = 1
# create ctypes vector out of data to pass to lib
# cdata = ctypes vector to return
# NOTE: error check on ftype and tflag everywhere, also flen
if ftype == 1:
if tflag == 1: cdata = (flen * c_int)(*data)
elif tflag == 2: cdata = data.ctypes.data_as(POINTER(c_int))
elif tflag == 3: cdata = data
elif ftype == 2:
if tflag == 1: cdata = (flen * c_longlong)(*data)
elif tflag == 2: cdata = data.ctypes.data_as(POINTER(c_longlong))
elif tflag == 3: cdata = data
elif ftype == 3:
if tflag == 1: cdata = (flen * c_float)(*data)
elif tflag == 2: cdata = data.ctypes.data_as(POINTER(c_float))
elif tflag == 3: cdata = data
elif ftype == 4:
if tflag == 1: cdata = (flen * c_double)(*data)
elif tflag == 2: cdata = data.ctypes.data_as(POINTER(c_double))
elif tflag == 3: cdata = data
return cdata
# receive a message
def recv(self):
self.lib.cslib_recv.restype = c_int
nfield = c_int()
fieldID = POINTER(c_int)()
fieldtype = POINTER(c_int)()
fieldlen = POINTER(c_int)()
msgID = self.lib.cslib_recv(self.cs,byref(nfield),
byref(fieldID),byref(fieldtype),byref(fieldlen))
# copy returned C args to native Python int and lists
# store them in class so unpack() methods can access the info
self.nfield = nfield = nfield.value
self.fieldID = fieldID[:nfield]
self.fieldtype = fieldtype[:nfield]
self.fieldlen = fieldlen[:nfield]
return msgID,self.nfield,self.fieldID,self.fieldtype,self.fieldlen
# unpack one field of message
# tflag = type of data to return
# 3 = ctypes vector is default, since no conversion required
def unpack_int(self,id):
return self.lib.cslib_unpack_int(self.cs,id)
def unpack_int64(self,id):
return self.lib.cslib_unpack_int64(self.cs,id)
def unpack_float(self,id):
return self.lib.cslib_unpack_float(self.cs,id)
def unpack_double(self,id):
return self.lib.cslib_unpack_double(self.cs,id)
def unpack_string(self,id):
return self.lib.cslib_unpack_string(self.cs,id)
def unpack(self,id,tflag=3):
index = self.fieldID.index(id)
# reset data type of return so can morph by tflag
# cannot do this for the generic c_void_p returned by CSlib
if self.fieldtype[index] == 1:
self.lib.cslib_unpack.restype = POINTER(c_int)
elif self.fieldtype[index] == 2:
self.lib.cslib_unpack.restype = POINTER(c_longlong)
elif self.fieldtype[index] == 3:
self.lib.cslib_unpack.restype = POINTER(c_float)
elif self.fieldtype[index] == 4:
self.lib.cslib_unpack.restype = POINTER(c_double)
#elif self.fieldtype[index] == 5:
# self.lib.cslib_unpack.restype = POINTER(c_char)
cdata = self.lib.cslib_unpack(self.cs,id)
# tflag = user-requested type of data to return
# tflag = 1 to return data as list
# tflag = 2 to return data as Numpy array
# tflag = 3 to return data as ctypes vector
# same usage of tflag as in pack functions
# tflag = 2,3 should NOT perform a data copy
if tflag == 1:
data = cdata[:self.fieldlen[index]]
elif tflag == 2:
if numpyflag == 0:
print "Cannot return Numpy array w/out numpy package"
sys.exit()
data = np.ctypeslib.as_array(cdata,shape=(self.fieldlen[index],))
elif tflag == 3:
data = cdata
return data
# handle data array like pack() or unpack_parallel() ??
def unpack_data(self,id,tflag=3):
index = self.fieldID.index(id)
# unpack one field of message in parallel
# tflag = type of data to return
# 3 = ctypes vector is default, since no conversion required
# NOTE: allow direct use of user array (e.g. Numpy), if user provides data arg?
# as opposed to creating this cdata
# does that make any performance difference ?
# e.g. should we allow CSlib to populate an existing Numpy array's memory
def unpack_parallel(self,id,nlocal,ids,nper,tflag=3):
cids = self.data_convert(1,nlocal,ids)
# allocate memory for the returned data
# pass cdata ptr to the memory to CSlib unpack_parallel()
# this resets data type of last unpack_parallel() arg
index = self.fieldID.index(id)
if self.fieldtype[index] == 1: cdata = (nper*nlocal * c_int)()
elif self.fieldtype[index] == 2: cdata = (nlocal*nper * c_longlong)()
elif self.fieldtype[index] == 3: cdata = (nlocal*nper * c_float)()
elif self.fieldtype[index] == 4: cdata = (nlocal*nper * c_double)()
#elif self.fieldtype[index] == 5: cdata = (nlocal*nper * c_char)()
self.lib.cslib_unpack_parallel(self.cs,id,nlocal,cids,nper,cdata)
# tflag = user-requested type of data to return
# tflag = 1 to return data as list
# tflag = 2 to return data as Numpy array
# tflag = 3 to return data as ctypes vector
# same usage of tflag as in pack functions
if tflag == 1:
data = cdata[:nper*nlocal]
elif tflag == 2:
if numpyflag == 0:
print "Cannot return Numpy array w/out numpy package"
sys.exit()
# NOTE: next line gives ctypes warning for fieldtype = 2 = 64-bit int
# not sure why, reported as bug between ctypes and Numpy here:
# https://stackoverflow.com/questions/4964101/pep-3118-
# warning-when-using-ctypes-array-as-numpy-array
# but why not same warning when just using unpack() ??
# in Python these lines give same warning:
# >>> import ctypes,numpy
# >>> a = (10 * ctypes.c_longlong)()
# >>> b = numpy.ctypeslib.as_array(a)
data = np.ctypeslib.as_array(cdata,shape=(nlocal*nper,))
elif tflag == 3:
data = cdata
return data
# extract a library value
def extract(self,flag):
return self.lib.cslib_extract(self.cs,flag)

View File

@ -0,0 +1,239 @@
/* ----------------------------------------------------------------------
CSlib - Client/server library for code coupling
http://cslib.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright 2018 National Technology & Engineering Solutions of
Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
NTESS, the U.S. Government retains certain rights in this software.
This software is distributed under the modified Berkeley Software
Distribution (BSD) License.
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
// C style library interface to CSlib class
#include <mpi.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cslib_wrap.h"
#include "cslib.h"
using namespace CSLIB_NS;
// ----------------------------------------------------------------------
void cslib_open(int csflag, const char *mode, const void *ptr,
const void *pcomm, void **csptr)
{
CSlib *cs = new CSlib(csflag,mode,ptr,pcomm);
*csptr = (void *) cs;
}
// ----------------------------------------------------------------------
void cslib_open_fortran(int csflag, const char *mode, const char *str,
const void *pcomm, void **csptr)
{
MPI_Comm ccomm;
void *pccomm = NULL;
if (pcomm) {
MPI_Fint *fcomm = (MPI_Fint *) pcomm;
ccomm = MPI_Comm_f2c(*fcomm);
pccomm = &ccomm;
}
CSlib *cs = new CSlib(csflag,mode,str,pccomm);
*csptr = (void *) cs;
}
// ----------------------------------------------------------------------
void cslib_open_fortran_mpi_one(int csflag, const char *mode,
const void *pboth, const void *pcomm,
void **csptr)
{
MPI_Comm ccomm,cboth;
void *pccomm,*pcboth;
MPI_Fint *fcomm = (MPI_Fint *) pcomm;
ccomm = MPI_Comm_f2c(*fcomm);
pccomm = &ccomm;
MPI_Fint *fboth = (MPI_Fint *) pboth;
cboth = MPI_Comm_f2c(*fboth);
pcboth = &cboth;
CSlib *cs = new CSlib(csflag,mode,pcboth,pccomm);
*csptr = (void *) cs;
}
// ----------------------------------------------------------------------
void cslib_close(void *ptr)
{
CSlib *cs = (CSlib *) ptr;
delete cs;
}
// ----------------------------------------------------------------------
void cslib_send(void *ptr, int msgID, int nfield)
{
CSlib *cs = (CSlib *) ptr;
cs->send(msgID,nfield);
}
// ----------------------------------------------------------------------
void cslib_pack_int(void *ptr, int id, int value)
{
CSlib *cs = (CSlib *) ptr;
cs->pack_int(id,value);
}
// ----------------------------------------------------------------------
void cslib_pack_int64(void *ptr, int id, int64_t value)
{
CSlib *cs = (CSlib *) ptr;
cs->pack_int64(id,value);
}
// ----------------------------------------------------------------------
void cslib_pack_float(void *ptr, int id, float value)
{
CSlib *cs = (CSlib *) ptr;
cs->pack_float(id,value);
}
// ----------------------------------------------------------------------
void cslib_pack_double(void *ptr, int id, double value)
{
CSlib *cs = (CSlib *) ptr;
cs->pack_double(id,value);
}
// ----------------------------------------------------------------------
void cslib_pack_string(void *ptr, int id, char *value)
{
CSlib *cs = (CSlib *) ptr;
cs->pack_string(id,value);
}
// ----------------------------------------------------------------------
void cslib_pack(void *ptr, int id, int ftype, int flen, void *data)
{
CSlib *cs = (CSlib *) ptr;
cs->pack(id,ftype,flen,data);
}
// ----------------------------------------------------------------------
void cslib_pack_parallel(void *ptr, int id, int ftype,
int nlocal, int *ids, int nper, void *data)
{
CSlib *cs = (CSlib *) ptr;
cs->pack_parallel(id,ftype,nlocal,ids,nper,data);
}
// ----------------------------------------------------------------------
int cslib_recv(void *ptr, int *nfield_caller,
int **fieldID_caller, int **fieldtype_caller,
int **fieldlen_caller)
{
CSlib *cs = (CSlib *) ptr;
int nfield;
int *fieldID,*fieldtype,*fieldlen;
int msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
*nfield_caller = nfield;
*fieldID_caller = fieldID;
*fieldtype_caller = fieldtype;
*fieldlen_caller = fieldlen;
return msgID;
}
// ----------------------------------------------------------------------
int cslib_unpack_int(void *ptr, int id)
{
CSlib *cs = (CSlib *) ptr;
return cs->unpack_int(id);
}
// ----------------------------------------------------------------------
int64_t cslib_unpack_int64(void *ptr, int id)
{
CSlib *cs = (CSlib *) ptr;
return cs->unpack_int64(id);
}
// ----------------------------------------------------------------------
float cslib_unpack_float(void *ptr, int id)
{
CSlib *cs = (CSlib *) ptr;
return cs->unpack_float(id);
}
// ----------------------------------------------------------------------
double cslib_unpack_double(void *ptr, int id)
{
CSlib *cs = (CSlib *) ptr;
return cs->unpack_double(id);
}
// ----------------------------------------------------------------------
char *cslib_unpack_string(void *ptr, int id)
{
CSlib *cs = (CSlib *) ptr;
return cs->unpack_string(id);
}
// ----------------------------------------------------------------------
void *cslib_unpack(void *ptr, int id)
{
CSlib *cs = (CSlib *) ptr;
return cs->unpack(id);
}
// ----------------------------------------------------------------------
void cslib_unpack_data(void *ptr, int id, void *data)
{
CSlib *cs = (CSlib *) ptr;
cs->unpack(id,data);
}
// ----------------------------------------------------------------------
void cslib_unpack_parallel(void *ptr, int id, int nlocal, int *ids,
int nper, void *data)
{
CSlib *cs = (CSlib *) ptr;
cs->unpack_parallel(id,nlocal,ids,nper,data);
}
// ----------------------------------------------------------------------
int cslib_extract(void *ptr, int flag)
{
CSlib *cs = (CSlib *) ptr;
return cs->extract(flag);
}

View File

@ -0,0 +1,147 @@
! ISO_C_binding wrapper on CSlib C interface
module cslib_wrap
interface
subroutine cslib_open_fortran(csflag,mode,str,pcomm,ptr) bind(c)
use iso_c_binding
integer(c_int), value :: csflag
character(c_char) :: mode(*),str(*)
type(c_ptr), value :: pcomm
type(c_ptr) :: ptr
end subroutine cslib_open_fortran
subroutine cslib_open_fortran_mpi_one(csflag,mode,pboth,pcomm,ptr) bind(c)
use iso_c_binding
integer(c_int), value :: csflag
character(c_char) :: mode(*)
type(c_ptr), value :: pboth,pcomm
type(c_ptr) :: ptr
end subroutine cslib_open_fortran_mpi_one
subroutine cslib_close(ptr) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
end subroutine cslib_close
subroutine cslib_send(ptr,msgID,nfield) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: msgID,nfield
end subroutine cslib_send
subroutine cslib_pack_int(ptr,id,value) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: id
integer(c_int), value :: value
end subroutine cslib_pack_int
subroutine cslib_pack_int64(ptr,id,value) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: id
integer(c_int64_t), value :: value
end subroutine cslib_pack_int64
subroutine cslib_pack_float(ptr,id,value) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: id
real(c_float), value :: value
end subroutine cslib_pack_float
subroutine cslib_pack_double(ptr,id,value) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: id
real(c_double), value :: value
end subroutine cslib_pack_double
subroutine cslib_pack_string(ptr,id,value) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: id
character(c_char) :: value(*)
end subroutine cslib_pack_string
subroutine cslib_pack(ptr,id,ftype,flen,data) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: id,ftype,flen
type(c_ptr), value :: data
end subroutine cslib_pack
subroutine cslib_pack_parallel(ptr,id,ftype,nlocal,ids,nper,data) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: id,ftype,nlocal,nper
type(c_ptr), value :: ids,data
end subroutine cslib_pack_parallel
function cslib_recv(ptr,nfield,fieldID,fieldtype,fieldlen) bind(c)
use iso_c_binding
integer(c_int) :: cslib_recv
type(c_ptr), value :: ptr
integer(c_int) :: nfield
type(c_ptr) :: fieldID,fieldtype,fieldlen
end function cslib_recv
function cslib_unpack_int(ptr,id) bind(c)
use iso_c_binding
integer(c_int) :: cslib_unpack_int
type(c_ptr), value :: ptr
integer(c_int), value :: id
end function cslib_unpack_int
function cslib_unpack_int64(ptr,id) bind(c)
use iso_c_binding
integer(c_int64_t) :: cslib_unpack_int64
type(c_ptr), value :: ptr
integer(c_int), value :: id
end function cslib_unpack_int64
function cslib_unpack_float(ptr,id) bind(c)
use iso_c_binding
real(c_float) :: cslib_unpack_float
type(c_ptr), value :: ptr
integer(c_int), value :: id
end function cslib_unpack_float
function cslib_unpack_double(ptr,id) bind(c)
use iso_c_binding
real(c_double) :: cslib_unpack_double
type(c_ptr), value :: ptr
integer(c_int), value :: id
end function cslib_unpack_double
function cslib_unpack_string(ptr,id) bind(c)
use iso_c_binding
type(c_ptr) :: cslib_unpack_string
type(c_ptr), value :: ptr
integer(c_int), value :: id
end function cslib_unpack_string
function cslib_unpack(ptr,id) bind(c)
use iso_c_binding
type(c_ptr) :: cslib_unpack
type(c_ptr), value :: ptr
integer(c_int), value :: id
end function cslib_unpack
subroutine cslib_unpack_parallel(ptr,id,nlocal,ids,nper,data) bind(c)
use iso_c_binding
type(c_ptr), value :: ptr
integer(c_int), value :: id,nlocal,nper
type(c_ptr), value :: ids,data
end subroutine cslib_unpack_parallel
function cslib_extract(ptr,flag) bind(c)
use iso_c_binding
integer(c_int) :: cslib_extract
type(c_ptr), value :: ptr
integer(c_int), value :: flag
end function cslib_extract
end interface
end module cslib_wrap

View File

@ -0,0 +1,54 @@
/* ----------------------------------------------------------------------
CSlib - Client/server library for code coupling
http://cslib.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright 2018 National Technology & Engineering Solutions of
Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
NTESS, the U.S. Government retains certain rights in this software.
This software is distributed under the modified Berkeley Software
Distribution (BSD) License.
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
/* C style library interface to CSlib class
ifdefs allow this file to be included in a C program
*/
#ifdef __cplusplus
extern "C" {
#endif
void cslib_open(int, const char *, const void *, const void *, void **);
void cslib_open_fortran(int, const char *, const char *, const void *, void **);
void cslib_open_fortran_mpi_one(int, const char *, const void *,
const void *, void **);
void cslib_close(void *);
void cslib_send(void *, int, int);
void cslib_pack_int(void *, int, int);
void cslib_pack_int64(void *, int, int64_t);
void cslib_pack_float(void *, int, float);
void cslib_pack_double(void *, int, double);
void cslib_pack_string(void *, int, char *);
void cslib_pack(void *, int, int, int, void *);
void cslib_pack_parallel(void *, int, int, int, int *, int, void *);
int cslib_recv(void *, int *, int **, int **, int **);
int cslib_unpack_int(void *, int);
int64_t cslib_unpack_int64(void *, int);
float cslib_unpack_float(void *, int);
double cslib_unpack_double(void *, int);
char *cslib_unpack_string(void *, int);
void *cslib_unpack(void *, int);
void cslib_unpack_data(void *, int, void *);
void cslib_unpack_parallel(void *, int, int, int *, int, void *);
int cslib_extract(void *, int);
#ifdef __cplusplus
}
#endif

Some files were not shown because too many files have changed in this diff Show More