Merge branch 'develop' into BPM

This commit is contained in:
Axel Kohlmeyer
2022-04-11 12:18:34 -04:00
1084 changed files with 38368 additions and 47018 deletions

View File

@ -26,6 +26,7 @@ jobs:
shell: bash
run: |
python3 -m pip install numpy
python3 -m pip install pyyaml
cmake -C cmake/presets/windows.cmake \
-D PKG_PYTHON=on \
-S cmake -B build \

View File

@ -37,6 +37,7 @@ jobs:
working-directory: build
run: |
ccache -z
python3 -m pip install pyyaml
cmake -C ../cmake/presets/clang.cmake \
-C ../cmake/presets/most.cmake \
-D CMAKE_CXX_COMPILER_LAUNCHER=ccache \

View File

@ -4,10 +4,8 @@ This directory contains 5 benchmark problems which are discussed in
the Benchmark section of the LAMMPS documentation, and on the
Benchmark page of the LAMMPS WWW site (https://www.lammps.org/bench.html).
This directory also has several sub-directories:
This directory also has one sub-directories:
FERMI benchmark scripts for desktop machine with Fermi GPUs (Tesla)
KEPLER benchmark scripts for GPU cluster with Kepler GPUs
POTENTIALS benchmarks scripts for various potentials in LAMMPS
The results for all of these benchmarks are displayed and discussed on

View File

@ -209,7 +209,6 @@ set(STANDARD_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP
@ -452,7 +451,7 @@ else()
endif()
foreach(PKG_WITH_INCL KSPACE PYTHON ML-IAP VORONOI COLVARS ML-HDNNP MDI MOLFILE NETCDF
PLUMED QMMM ML-QUIP SCAFACOS MACHDYN VTK KIM LATTE MESSAGE MSCG COMPRESS ML-PACE)
PLUMED QMMM ML-QUIP SCAFACOS MACHDYN VTK KIM LATTE MSCG COMPRESS ML-PACE)
if(PKG_${PKG_WITH_INCL})
include(Packages/${PKG_WITH_INCL})
endif()
@ -811,6 +810,7 @@ endif()
include(FeatureSummary)
feature_summary(DESCRIPTION "The following tools and libraries have been found and configured:" WHAT PACKAGES_FOUND)
message(STATUS "<<< Build configuration >>>
LAMMPS Version: ${PROJECT_VERSION}
Operating System: ${CMAKE_SYSTEM_NAME} ${CMAKE_LINUX_DISTRO} ${CMAKE_DISTRO_VERSION}
Build type: ${LAMMPS_BUILD_TYPE}
Install path: ${CMAKE_INSTALL_PREFIX}

View File

@ -347,6 +347,10 @@ elseif(GPU_API STREQUAL "HIP")
target_link_libraries(gpu PRIVATE hip::host)
if(HIP_USE_DEVICE_SORT)
if(HIP_PLATFORM STREQUAL "amd")
# newer version of ROCm (5.1+) require c++14 for rocprim
set_property(TARGET gpu PROPERTY CXX_STANDARD 14)
endif()
# add hipCUB
target_include_directories(gpu PRIVATE ${HIP_ROOT_DIR}/../include)
target_compile_definitions(gpu PRIVATE -DUSE_HIP_DEVICE_SORT)

View File

@ -1,5 +1,14 @@
enable_language(C)
# we don't use the parallel i/o interface.
set(HDF5_PREFER_PARALLEL FALSE)
find_package(HDF5 REQUIRED)
# parallel HDF5 will import incompatible MPI headers with a serial build
if((NOT BUILD_MPI) AND HDF5_IS_PARALLEL)
message(FATAL_ERROR "Serial LAMMPS build and parallel HDF5 library are not compatible")
endif()
target_link_libraries(h5md PRIVATE ${HDF5_LIBRARIES})
target_include_directories(h5md PUBLIC ${HDF5_INCLUDE_DIRS})

View File

@ -8,8 +8,8 @@ option(DOWNLOAD_MDI "Download and compile the MDI library instead of using an al
if(DOWNLOAD_MDI)
message(STATUS "MDI download requested - we will build our own")
set(MDI_URL "https://github.com/MolSSI-MDI/MDI_Library/archive/v1.2.9.tar.gz" CACHE STRING "URL for MDI tarball")
set(MDI_MD5 "ddfa46d6ee15b4e59cfd527ec7212184" CACHE STRING "MD5 checksum for MDI tarball")
set(MDI_URL "https://github.com/MolSSI-MDI/MDI_Library/archive/v1.3.0.tar.gz" CACHE STRING "URL for MDI tarball")
set(MDI_MD5 "8a8da217148bd9b700083b67d795af5e" CACHE STRING "MD5 checksum for MDI tarball")
mark_as_advanced(MDI_URL)
mark_as_advanced(MDI_MD5)
enable_language(C)

View File

@ -1,31 +0,0 @@
if(LAMMPS_SIZES STREQUAL "BIGBIG")
message(FATAL_ERROR "The MESSAGE Package is not compatible with -DLAMMPS_BIGBIG")
endif()
option(MESSAGE_ZMQ "Use ZeroMQ in MESSAGE package" OFF)
file(GLOB_RECURSE cslib_SOURCES
${LAMMPS_LIB_SOURCE_DIR}/message/cslib/[^.]*.cpp)
add_library(cslib STATIC ${cslib_SOURCES})
target_compile_definitions(cslib PRIVATE -DLAMMPS_${LAMMPS_SIZES})
set_target_properties(cslib PROPERTIES OUTPUT_NAME lammps_cslib${LAMMPS_MACHINE})
if(BUILD_MPI)
target_compile_definitions(cslib PRIVATE -DMPI_YES)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csmpi")
target_link_libraries(cslib PRIVATE MPI::MPI_CXX)
else()
target_compile_definitions(cslib PRIVATE -DMPI_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_MPI)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csnompi")
endif()
if(MESSAGE_ZMQ)
target_compile_definitions(cslib PRIVATE -DZMQ_YES)
find_package(ZMQ REQUIRED)
target_link_libraries(cslib PUBLIC ZMQ::ZMQ)
else()
target_compile_definitions(cslib PRIVATE -DZMQ_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_ZMQ)
endif()
target_link_libraries(lammps PRIVATE cslib)
target_include_directories(lammps PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src)

View File

@ -42,16 +42,10 @@ if(DOWNLOAD_N2P2)
if(NOT BUILD_MPI)
set(N2P2_PROJECT_OPTIONS "-DN2P2_NO_MPI")
else()
# get path to MPI include directory when cross-compiling to windows
if((CMAKE_SYSTEM_NAME STREQUAL Windows) AND CMAKE_CROSSCOMPILING)
# get path to MPI include directory
get_target_property(N2P2_MPI_INCLUDE MPI::MPI_CXX INTERFACE_INCLUDE_DIRECTORIES)
set(N2P2_PROJECT_OPTIONS "-I${N2P2_MPI_INCLUDE}")
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
get_target_property(N2P2_MPI_INCLUDE MPI::MPI_CXX INTERFACE_INCLUDE_DIRECTORIES)
set(N2P2_PROJECT_OPTIONS "-I${N2P2_MPI_INCLUDE}")
endif()
endif()
# prefer GNU make, if available. N2P2 lib seems to need it.
find_program(N2P2_MAKE NAMES gmake make)

View File

@ -48,7 +48,6 @@ set(ALL_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP

View File

@ -50,7 +50,6 @@ set(ALL_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP

View File

@ -0,0 +1,15 @@
# preset that enables KOKKOS and selects SYCL compilation with OpenMP
# enabled as well. Also sets some performance related compiler flags.
set(PKG_KOKKOS ON CACHE BOOL "" FORCE)
set(Kokkos_ENABLE_SERIAL ON CACHE BOOL "" FORCE)
set(Kokkos_ENABLE_OPENMP ON CACHE BOOL "" FORCE)
set(Kokkos_ENABLE_CUDA OFF CACHE BOOL "" FORCE)
set(Kokkos_ENABLE_SYCL ON CACHE BOOL "" FORCE)
set(Kokkos_ARCH_MAXWELL50 on CACHE BOOL "" FORCE)
set(BUILD_OMP ON CACHE BOOL "" FORCE)
set(CMAKE_CXX_COMPILER clang++ CACHE STRING "" FORCE)
set(MPI_CXX_COMPILER "mpicxx" CACHE STRING "" FORCE)
set(CMAKE_CXX_STANDARD 17 CACHE STRING "" FORCE)
set(CMAKE_SHARED_LINKER_FLAGS "-Xsycl-target-frontend -O3" CACHE STRING "" FORCE)
set(CMAKE_TUNE_FLAGS "-fgpu-inline-threshold=100000 -Xsycl-target-frontend -O3 -Xsycl-target-frontend -ffp-contract=on -Wno-unknown-cuda-version" CACHE STRING "" FORCE)

View File

@ -15,7 +15,6 @@ set(PACKAGES_WITH_LIB
MACHDYN
MDI
MESONT
MESSAGE
ML-HDNNP
ML-PACE
ML-QUIP

View File

@ -1,7 +1,7 @@
.TH LAMMPS "1" "17 February 2022" "2022-2-17"
.TH LAMMPS "1" "24 March 2022" "2022-3-24"
.SH NAME
.B LAMMPS
\- Molecular Dynamics Simulator.
\- Molecular Dynamics Simulator. Version 24 March 2022
.SH SYNOPSIS
.B lmp
@ -297,7 +297,7 @@ the chapter on errors in the
manual gives some additional information about error messages, if possible.
.SH COPYRIGHT
© 2003--2021 Sandia Corporation
© 2003--2022 Sandia Corporation
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as

View File

@ -98,7 +98,7 @@ msi2lmp decane -c 0 -f oplsaa
.SH COPYRIGHT
© 2003--2021 Sandia Corporation
© 2003--2022 Sandia Corporation
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as

View File

@ -45,7 +45,6 @@ This is the list of packages that may require additional steps.
* :ref:`MACHDYN <machdyn>`
* :ref:`MDI <mdi>`
* :ref:`MESONT <mesont>`
* :ref:`MESSAGE <message>`
* :ref:`ML-HDNNP <ml-hdnnp>`
* :ref:`ML-IAP <mliap>`
* :ref:`ML-PACE <ml-pace>`
@ -638,13 +637,14 @@ This list was last updated for version 3.5.0 of the Kokkos library.
-D CMAKE_CXX_COMPILER=${HOME}/lammps/lib/kokkos/bin/nvcc_wrapper
To simplify compilation, three preset files are included in the
To simplify compilation, four preset files are included in the
``cmake/presets`` folder, ``kokkos-serial.cmake``,
``kokkos-openmp.cmake``, and ``kokkos-cuda.cmake``. They will
enable the KOKKOS package and enable some hardware choice. So to
compile with OpenMP host parallelization, CUDA device
parallelization (for GPUs with CC 5.0 and up) with some common
packages enabled, you can do the following:
``kokkos-openmp.cmake``, ``kokkos-cuda.cmake``, and
``kokkos-sycl.cmake``. They will enable the KOKKOS package and
enable some hardware choice. So to compile with OpenMP host
parallelization, CUDA device parallelization (for GPUs with CC 5.0
and up) with some common packages enabled, you can do the
following:
.. code-block:: bash
@ -796,47 +796,6 @@ library.
----------
.. _message:
MESSAGE package
-----------------------------
This package can optionally include support for messaging via sockets,
using the open-source `ZeroMQ library <http://zeromq.org>`_, which must
be installed on your system.
.. tabs::
.. tab:: CMake build
.. code-block:: bash
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
-D ZMQ_LIBRARY=path # ZMQ library file (only needed if a custom location)
-D ZMQ_INCLUDE_DIR=path # ZMQ include directory (only needed if a custom location)
.. tab:: Traditional make
Before building LAMMPS, you must build the CSlib library in
``lib/message``\ . You can build the CSlib library manually if
you prefer; follow the instructions in ``lib/message/README``\ .
You can also do it in one step from the ``lammps/src`` dir, using
a command like these, which simply invoke the
``lib/message/Install.py`` script with the specified args:
.. code-block:: bash
$ make lib-message # print help message
$ make lib-message args="-m -z" # build with MPI and socket (ZMQ) support
$ make lib-message args="-s" # build as serial lib with no ZMQ support
The build should produce two files: ``lib/message/cslib/src/libmessage.a``
and ``lib/message/Makefile.lammps``. The latter is copied from an
existing ``Makefile.lammps.*`` and has settings to link with the ZeroMQ
library if requested in the build.
----------
.. _mliap:
ML-IAP package

View File

@ -45,7 +45,6 @@ packages:
* :ref:`KOKKOS <kokkos>`
* :ref:`LATTE <latte>`
* :ref:`MACHDYN <machdyn>`
* :ref:`MESSAGE <message>`
* :ref:`ML-HDNNP <ml-hdnnp>`
* :ref:`ML-PACE <ml-pace>`
* :ref:`ML-QUIP <ml-quip>`

View File

@ -73,8 +73,9 @@ configuration should set this up automatically, but is untested.
In case of problems, you are recommended to contact somebody with
experience in using Cygwin. If you do come across portability problems
requiring changes to the LAMMPS source code, or figure out corrections
yourself, please report them on the lammps-users mailing list, or file
them as an issue or pull request on the LAMMPS GitHub project.
yourself, please report them on the
`LAMMPS forum at MatSci <https://matsci.org/c/lammps/lammps-development/>`_,
or file them as an issue or pull request on the LAMMPS GitHub project.
.. _msvc:
@ -117,8 +118,8 @@ LAMMPS with MPI enabled.
.. note::
This is work in progress and you should contact the LAMMPS developers
via GitHub, the forum, or the mailing list, if you have questions or
LAMMPS specific problems.
via GitHub or the `LAMMPS forum at MatSci <https://matsci.org/c/lammps/lammps-development/>`_,
if you have questions or LAMMPS specific problems.
.. _cross:

View File

@ -67,8 +67,7 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`lattice <lattice>`
* :doc:`log <log>`
* :doc:`mass <mass>`
* :doc:`mdi/engine <mdi_engine>`
* :doc:`message <message>`
* :doc:`mdi <mdi>`
* :doc:`minimize <minimize>`
* :doc:`min_modify <min_modify>`
* :doc:`min_style <min_style>`
@ -105,7 +104,6 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`restart <restart>`
* :doc:`run <run>`
* :doc:`run_style <run_style>`
* :doc:`server <server>`
* :doc:`set <set>`
* :doc:`shell <shell>`
* :doc:`special_bonds <special_bonds>`

View File

@ -63,6 +63,7 @@ KOKKOS, o = OPENMP, t = OPT.
* :doc:`event/displace <compute_event_displace>`
* :doc:`fabric <compute_fabric>`
* :doc:`fep <compute_fep>`
* :doc:`fep/ta <compute_fep_ta>`
* :doc:`force/tally <compute_tally>`
* :doc:`fragment/atom <compute_cluster_atom>`
* :doc:`global/atom <compute_global_atom>`

View File

@ -51,10 +51,10 @@ OPT.
* :doc:`bond/swap <fix_bond_swap>`
* :doc:`box/relax <fix_box_relax>`
* :doc:`charge/regulation <fix_charge_regulation>`
* :doc:`client/md <fix_client_md>`
* :doc:`cmap <fix_cmap>`
* :doc:`colvars <fix_colvars>`
* :doc:`controller <fix_controller>`
* :doc:`damping/cundall <fix_damping_cundall>`
* :doc:`deform (k) <fix_deform>`
* :doc:`deposit <fix_deposit>`
* :doc:`dpd/energy (k) <fix_dpd_energy>`
@ -100,7 +100,7 @@ OPT.
* :doc:`lb/viscous <fix_lb_viscous>`
* :doc:`lineforce <fix_lineforce>`
* :doc:`manifoldforce <fix_manifoldforce>`
* :doc:`mdi/engine <fix_mdi_engine>`
* :doc:`mdi/aimd <fix_mdi_aimd>`
* :doc:`meso/move <fix_meso_move>`
* :doc:`mol/swap <fix_mol_swap>`
* :doc:`momentum (k) <fix_momentum>`
@ -245,6 +245,7 @@ OPT.
* :doc:`vector <fix_vector>`
* :doc:`viscosity <fix_viscosity>`
* :doc:`viscous <fix_viscous>`
* :doc:`viscous/sphere <fix_viscous_sphere>`
* :doc:`wall/body/polygon <fix_wall_body_polygon>`
* :doc:`wall/body/polyhedron <fix_wall_body_polyhedron>`
* :doc:`wall/colloid <fix_wall>`

View File

@ -88,12 +88,12 @@ OPT.
* :doc:`coul/tt <pair_coul_tt>`
* :doc:`coul/wolf (ko) <pair_coul>`
* :doc:`coul/wolf/cs <pair_cs>`
* :doc:`dpd (gio) <pair_dpd>`
* :doc:`dpd (giko) <pair_dpd>`
* :doc:`dpd/fdt <pair_dpd_fdt>`
* :doc:`dpd/ext <pair_dpd_ext>`
* :doc:`dpd/ext/tstat <pair_dpd_ext>`
* :doc:`dpd/ext (k) <pair_dpd_ext>`
* :doc:`dpd/ext/tstat (k) <pair_dpd_ext>`
* :doc:`dpd/fdt/energy (k) <pair_dpd_fdt>`
* :doc:`dpd/tstat (go) <pair_dpd>`
* :doc:`dpd/tstat (gko) <pair_dpd>`
* :doc:`dsmc <pair_dsmc>`
* :doc:`e3b <pair_e3b>`
* :doc:`drip <pair_drip>`

View File

@ -68,7 +68,7 @@ Members of ``lammpsplugin_t``
* - author
- String with the name and email of the author
* - creator.v1
- Pointer to factory function for pair, bond, angle, dihedral, improper or command styles
- Pointer to factory function for pair, bond, angle, dihedral, improper, kspace, or command styles
* - creator.v2
- Pointer to factory function for compute, fix, or region styles
* - handle
@ -262,3 +262,24 @@ A plugin may be registered under an existing style name. In that case
the plugin will override the existing code. This can be used to modify
the behavior of existing styles or to debug new versions of them without
having to re-compile or re-install all of LAMMPS.
Compiling plugins
^^^^^^^^^^^^^^^^^
Plugins need to be compiled with the same compilers and libraries
(e.g. MPI) and compilation settings (MPI on/off, OpenMP, integer sizes)
as the LAMMPS executable and library. Otherwise the plugin will likely
not load due to mismatches in the function signatures (LAMMPS is C++ so
scope, type, and number of arguments are encoded into the symbol names
and thus differences in them will lead to failed plugin load commands.
Compilation of the plugin can be managed via both, CMake or traditional
GNU makefiles. Some examples that can be used as a template are in the
``examples/plugins`` folder. The CMake script code has some small
adjustments to allow building the plugins for running unit tests with
them. Another example that converts the KIM package into a plugin can be
found in the ``examples/kim/plugin`` folder. No changes to the sources
of the KIM package themselves are needed; only the plugin interface and
loader code needs to be added. This example only supports building with
CMake, but is probably a more typical example. To compile you need to
run CMake with -DLAMMPS_SOURCE_DIR=<path/to/lammps/src/folder>. Other
configuration setting are identical to those for compiling LAMMPS.

View File

@ -133,6 +133,9 @@ and parsing files or arguments.
.. doxygenfunction:: trim_comment
:project: progguide
.. doxygenfunction:: star_subst
:project: progguide
.. doxygenfunction:: has_utf8
:project: progguide

View File

@ -17,9 +17,8 @@ the steps outlined below:
if your issue has already been reported and if it is still open.
* Check the `GitHub Pull Requests page <https://github.com/lammps/lammps/pulls>`_
to see if there is already a fix for your bug pending.
* Check the `mailing list archives <https://www.lammps.org/mail.html>`_ or
the `LAMMPS forum <https://www.lammps.org/forum.html>`_ to see if the
issue has been discussed before.
* Check the `LAMMPS forum at MatSci <https://matsci.org/lammps/>`_
to see if the issue has been discussed before.
If none of these steps yields any useful information, please file a new
bug report on the `GitHub Issue page <https://github.com/lammps/lammps/issues>`_.
@ -38,12 +37,9 @@ generate this restart from a data file or a simple additional input.
This input deck can be used with tools like a debugger or `valgrind
<https://valgrind.org>`_ to further :doc:`debug the crash <Errors_debug>`.
You may also send an email to the LAMMPS mailing list at
"lammps-users at lists.sourceforge.net" describing the problem with the
same kind of information. The mailing list can provide a faster response,
especially if the bug reported is actually expected behavior. But because
of the high volume of the mailing list, it can happen that your e-mail
is overlooked and then forgotten. Issues on GitHub have to be explicitly
closed, so that will *guarantee* that at least one LAMMPS developer will
have looked at it.
You may also post a message in the `development category of the LAMMPS
forum at MatSci <https://matsci.org/c/lammps/lammps-development/>`_
describing the problem with the same kind of information. The forum can
provide a faster response, especially if the bug reported is actually
expected behavior or other LAMMPS users have come across it before.

View File

@ -96,12 +96,12 @@ Lowercase directories
+-------------+------------------------------------------------------------------+
| latte | examples for using fix latte for DFTB via the LATTE library |
+-------------+------------------------------------------------------------------+
| mdi | use of the MDI package and MolSSI MDI code coupling library |
+-------------+------------------------------------------------------------------+
| meam | MEAM test for SiC and shear (same as shear examples) |
+-------------+------------------------------------------------------------------+
| melt | rapid melt of 3d LJ system |
+-------------+------------------------------------------------------------------+
| message | demos for LAMMPS client/server coupling with the MESSAGE package |
+-------------+------------------------------------------------------------------+
| micelle | self-assembly of small lipid-like molecules into 2d bilayers |
+-------------+------------------------------------------------------------------+
| min | energy minimization of 2d LJ melt |

View File

@ -22,7 +22,6 @@ General howto
Howto_replica
Howto_library
Howto_couple
Howto_client_server
Howto_mdi
Howto_broken_bonds

View File

@ -1,163 +0,0 @@
Using LAMMPS in client/server mode
==================================
Client/server coupling of two codes is where one code is the "client"
and sends request messages to a "server" code. The server responds to
each request with a reply message. This enables the two codes to work
in tandem to perform a simulation. LAMMPS can act as either a client
or server code.
Some advantages of client/server coupling are that the two codes run
as stand-alone executables; they are not linked together. Thus
neither code needs to have a library interface. This often makes it
easier to run the two codes on different numbers of processors. If a
message protocol (format and content) is defined for a particular kind
of simulation, then in principle any code that implements the
client-side protocol can be used in tandem with any code that
implements the server-side protocol, without the two codes needing to
know anything more specific about each other.
A simple example of client/server coupling is where LAMMPS is the
client code performing MD timestepping. Each timestep it sends a
message to a server quantum code containing current coords of all the
atoms. The quantum code computes energy and forces based on the
coords. It returns them as a message to LAMMPS, which completes the
timestep.
A more complex example is where LAMMPS is the client code and
processes a series of data files, sending each configuration to a
quantum code to compute energy and forces. Or LAMMPS runs dynamics
with an atomistic force field, but pauses every N steps to ask the
quantum code to compute energy and forces.
Alternate methods for code coupling with LAMMPS are described on
the :doc:`Howto couple <Howto_couple>` doc page.
The protocol for using LAMMPS as a client is to use these 3 commands
in this order (other commands may come in between):
* :doc:`message client <message>` # initiate client/server interaction
* :doc:`fix client/md <fix_client_md>` # any client fix which makes specific requests to the server
* :doc:`message quit <message>` # terminate client/server interaction
In between the two message commands, a client fix command and
:doc:`unfix <unfix>` command can be used multiple times. Similarly,
this sequence of 3 commands can be repeated multiple times, assuming
the server program operates in a similar fashion, to initiate and
terminate client/server communication.
The protocol for using LAMMPS as a server is to use these 2 commands
in this order (other commands may come in between):
* :doc:`message server <message>` # initiate client/server interaction
* :doc:`server md <server_md>` # any server command which responds to specific requests from the client
This sequence of 2 commands can be repeated multiple times, assuming
the client program operates in a similar fashion, to initiate and
terminate client/server communication.
LAMMPS support for client/server coupling is in its :ref:`MESSAGE package <PKG-MESSAGE>` which implements several
commands that enable LAMMPS to act as a client or server, as discussed
below. The MESSAGE package also wraps a client/server library called
CSlib which enables two codes to exchange messages in different ways,
either via files, sockets, or MPI. The CSlib is provided with LAMMPS
in the lib/message dir. The CSlib has its own
`website <https://cslib.sandia.gov>`_ with documentation and test
programs.
.. note::
For client/server coupling to work between LAMMPS and another
code, the other code also has to use the CSlib. This can sometimes be
done without any modifications to the other code by simply wrapping it
with a Python script that exchanges CSlib messages with LAMMPS and
prepares input for or processes output from the other code. The other
code also has to implement a matching protocol for the format and
content of messages that LAMMPS exchanges with it.
These are the commands currently in the MESSAGE package for two
protocols, MD and MC (Monte Carlo). New protocols can easily be
defined and added to this directory, where LAMMPS acts as either the
client or server.
* :doc:`message <message>`
* :doc:`fix client md <fix_client_md>` = LAMMPS is a client for running MD
* :doc:`server md <server_md>` = LAMMPS is a server for computing MD forces
* :doc:`server mc <server_mc>` = LAMMPS is a server for computing a Monte Carlo energy
The server doc files give details of the message protocols
for data that is exchanged between the client and server.
These example directories illustrate how to use LAMMPS as either a
client or server code:
* examples/message
* examples/COUPLE/README
* examples/COUPLE/lammps_mc
* examples/COUPLE/lammps_nwchem
* examples/COUPLE/lammps_vasp
The examples/message directory couples a client instance of LAMMPS to a
server instance of LAMMPS.
The files in the *lammps_mc* folder show how to couple LAMMPS as
a server to a simple Monte Carlo client code as the driver.
The files in the *lammps_nwchem* folder show how to couple LAMMPS
as a client code running MD timestepping to NWChem acting as a
server providing quantum DFT forces, through a Python wrapper script
on NWChem.
The files in the *lammps_vasp* folder show how to couple LAMMPS as
a client code running MD timestepping to VASP acting as a server
providing quantum DFT forces, through a Python wrapper script on VASP.
Here is how to launch a client and server code together for any of the
4 modes of message exchange that the :doc:`message <message>` command
and the CSlib support. Here LAMMPS is used as both the client and
server code. Another code could be substituted for either.
The examples below show launching both codes from the same window (or
batch script), using the "&" character to launch the first code in the
background. For all modes except *mpi/one*, you could also launch the
codes in separate windows on your desktop machine. It does not
matter whether you launch the client or server first.
In these examples either code can be run on one or more processors.
If running in a non-MPI mode (file or zmq) you can launch a code on a
single processor without using mpirun.
IMPORTANT: If you run in mpi/two mode, you must launch both codes via
mpirun, even if one or both of them runs on a single processor. This
is so that MPI can figure out how to connect both MPI processes
together to exchange MPI messages between them.
For message exchange in *file*, *zmq*, or *mpi/two* modes:
.. code-block:: bash
% mpirun -np 1 lmp_mpi -log log.client < in.client &
% mpirun -np 2 lmp_mpi -log log.server < in.server
% mpirun -np 4 lmp_mpi -log log.client < in.client &
% mpirun -np 1 lmp_mpi -log log.server < in.server
% mpirun -np 2 lmp_mpi -log log.client < in.client &
% mpirun -np 4 lmp_mpi -log log.server < in.server
For message exchange in *mpi/one* mode:
Launch both codes in a single mpirun command:
.. code-block:: bash
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.message.client -log log.client : -np 4 lmp_mpi -mpicolor 1 -in in.message.server -log log.server
The two -np values determine how many procs the client and the server
run on.
A LAMMPS executable run in this manner must use the -mpicolor color
command-line option as their its option, where color is an integer
label that will be used to distinguish one executable from another in
the multiple executables that the mpirun command launches. In this
example the client was colored with a 0, and the server with a 1.

View File

@ -12,16 +12,16 @@ LAMMPS can be coupled to other codes in at least 4 ways. Each has
advantages and disadvantages, which you will have to think about in the
context of your application.
1. Define a new :doc:`fix <fix>` command that calls the other code. In
this scenario, LAMMPS is the driver code. During timestepping,
1. Define a new :doc:`fix <fix>` command that calls the other code.
In this scenario, LAMMPS is the driver code. During timestepping,
the fix is invoked, and can make library calls to the other code,
which has been linked to LAMMPS as a library. This is the way how the
which has been linked to LAMMPS as a library. This is the way the
:ref:`LATTE <PKG-LATTE>` package, which performs density-functional
tight-binding calculations using the `LATTE software <https://github.com/lanl/LATTE>`_
to compute forces, is hooked to LAMMPS.
See the :doc:`fix latte <fix_latte>` command for more details.
Also see the :doc:`Modify <Modify>` doc pages for info on how to
add a new fix to LAMMPS.
tight-binding calculations using the `LATTE software
<https://github.com/lanl/LATTE>`_ to compute forces, is hooked to
LAMMPS. See the :doc:`fix latte <fix_latte>` command for more
details. Also see the :doc:`Modify <Modify>` doc pages for info on
how to add a new fix to LAMMPS.
.. spacer
@ -58,6 +58,12 @@ context of your application.
.. spacer
4. Couple LAMMPS with another code in a client/server mode. This is
described on the :doc:`Howto client/server <Howto_client_server>` doc
page.
4. Couple LAMMPS with another code in a client/server fashion, using
using the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_
developed by the `Molecular Sciences Software Institute (MolSSI)
<https://molssi.org>`_ to run LAMMPS as either an MDI driver
(client) or an MDI engine (server). The MDI driver issues commands
to the MDI server to exchange data between them. See the
:doc:`Howto mdi <Howto_mdi>` page for more information about how
LAMMPS can operate in either of these modes.

View File

@ -1,132 +1,144 @@
Using LAMMPS with the MDI library for code coupling
===================================================
.. note::
This Howto page will eventually replace the
:doc:`Howto client/server <Howto_client_server>` doc page.
Client/server coupling of two codes is where one code is the "client"
and sends request messages (data) to a "server" code. The server
responds to each request with a reply message. This enables the two
codes to work in tandem to perform a simulation. LAMMPS can act as
either a client or server code; it does this by using the `MolSSI
Driver Interface (MDI) library
Client/server coupling of two (or more) codes is where one code is the
"client" and sends request messages (data) to one (or more) "server"
code(s). A server responds to each request with a reply message
(data). This enables two (or more) codes to work in tandem to perform
a simulation. LAMMPS can act as either a client or server code; it
does this by using the `MolSSI Driver Interface (MDI) library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_,
developed by the `Molecular Sciences Software Institute (MolSSI)
<https://molssi.org>`_.
<https://molssi.org>`_, which is supported by the :ref:`MDI <PKG-MDI>`
package.
Alternate methods for code coupling with LAMMPS are described on the
:doc:`Howto couple <Howto_couple>` doc page.
Some advantages of client/server coupling are that the two codes can run
Some advantages of client/server coupling are that the codes can run
as stand-alone executables; they need not be linked together. Thus
neither code needs to have a library interface. This also makes it easy
to run the two codes on different numbers of processors. If a message
protocol (format and content) is defined for a particular kind of
simulation, then in principle any code which implements the client-side
protocol can be used in tandem with any code which implements the
server-side protocol. Neither code needs to know what specific other
code it is working with.
neither code needs to have a library interface. This also makes it
easy to run the two codes on different numbers of processors. If a
message protocol (format and content) is defined for a particular kind
of simulation, then in principle any code which implements the
client-side protocol can be used in tandem with any code which
implements the server-side protocol. Neither code needs to know what
specific other code it is working with.
In MDI nomenclature, a client code is the "driver", and a server code is
an "engine". One driver code can communicate with one or more instances
of one or more engine codes. Driver and engine codes can be written in
any language: C, C++, Fortran, Python, etc.
In addition to allowing driver and engine(s) running to run as
stand-alone executables, MDI also enables a server code to be a
"plugin" to the client code. In this scenario, server code(s) are
compiled as shared libraries, and one (or more) instances of the
server are instantiated by the driver code. If the driver code runs
in parallel, it can split its MPI communicator into multiple
sub-communicators, and launch each plugin engine instance on a
sub-communicator. Driver processors in that sub-communicator exchange
messages with that engine instance, and can also send MPI messages to
other processors in the driver. The driver code can also destroy
engine instances and re-instantiate them.
In addition to allowing driver and engine(s) to run as stand-alone
executables, MDI also enables an engine to be a *plugin* to the client
code. In this scenario, server code(s) are compiled as shared
libraries, and one (or more) instances of the server are instantiated
by the driver code. If the driver code runs in parallel, it can split
its MPI communicator into multiple sub-communicators, and launch each
plugin engine instance on a sub-communicator. Driver processors
within that sub-communicator exchange messages with the corresponding
engine instance, and can also send MPI messages to other processors in
the driver. The driver code can also destroy engine instances and
re-instantiate them. LAMMPS can operate as either a stand-alone or
plugin MDI engine. When it operates as a driver, if can use either
stand-alone or plugin MDI engines.
The way that a driver communicates with an engine is by making
MDI_Send() and MDI_Recv() calls, which are conceptually similar to
MPI_Send() and MPI_Recv() calls. Each send or receive has a string
which identifies the command name, and optionally some data, which can
be a single value or vector of values of any data type. Inside the
MDI library, data is exchanged between the driver and engine via MPI
calls or sockets. This a run-time choice by the user.
The way in which an MDI driver communicates with an MDI engine is by
making MDI_Send() and MDI_Recv() calls, which are conceptually similar
to MPI_Send() and MPI_Recv() calls. Each send or receive operation
uses a string to identify the command name, and optionally some data,
which can be a single value or vector of values of any data type.
Inside the MDI library, data is exchanged between the driver and
engine via MPI calls or sockets. This a run-time choice by the user.
----------
The :ref:`MDI <PKG-MDI>` package provides a :doc:`mdi engine <mdi>`
command which enables LAMMPS to operate as an MDI engine. Its doc
page explains the variety of standard and custom MDI commands which
the LAMMPS engine recognizes and can respond to.
The package also provides a :doc:`mdi plugin <mdi>` command which
enables LAMMPS to operate as an MDI driver and load an MDI engine as a
plugin library.
The package also has a `fix mdi/aimd <fix_mdi_aimd>` command in which
LAMMPS operates as an MDI driver to perform *ab initio* MD simulations
in conjunction with a quantum mechanics code. Its post_force() method
illustrates how a driver issues MDI commands to another code. This
command can be used to couple to an MDI engine which is either a
stand-alone code or a plugin library.
----------
The examples/mdi directory contains Python scripts and LAMMPS input
script which use LAMMPS as either an MDI driver or engine or both.
Three example use cases are provided:
* Run ab initio MD (AIMD) using 2 instances of LAMMPS, one as driver
and one as an engine. As an engine, LAMMPS is a surrogate for a
quantum code.
* A Python script driver invokes a sequence of unrelated LAMMPS
calculations. Calculations can be single-point energy/force
evaluations, MD runs, or energy minimizations.
* Run AIMD with a Python driver code and 2 LAMMPS instances as
engines. The first LAMMPS instance performs MD timestepping. The
second LAMMPS instance acts as a surrogate QM code to compute
forces.
Note that in any of these example where LAMMPS is used as an engine,
an actual QM code (which supports MDI) could be used in its place,
without modifying other code or scripts, except to specify the name of
the QM code.
The examples/mdi/README file explains how to launch both driver and
engine codes so that they communicate using the MDI library via either
MPI or sockets.
-------------
As an example, LAMMPS and the ``pw.x`` command from Quantum Espresso (a
suite of quantum DFT codes), can work together via the MDI library to
perform an ab initio MD (AIMD) simulation, where LAMMPS runs an MD
simulation and sends a message each timestep to ``pw.x`` asking it to
compute quantum forces on the current configuration of atoms. Here is
how the 2 codes are launched to communicate by MPI:
Currently there are two quantum DFT codes which have direct MDI
support, `Quantum ESPRESSO (QE) <https://www.quantum-espresso.org/>`_
and `INQ <https://qsg.llnl.gov/node/101.html>`_. There are also
several QM codes which have indirect support through QCEngine or i-PI.
The former means they require a wrapper program (QCEngine) with MDI
support which writes/read files to pass data to the quantum code
itself. The list of QCEngine-supported and i-PI-supported quantum
codes is on the `MDI webpage
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_.
Here is how to build QE as a stand-alone ``pw.x`` file which can be
used in stand-alone mode:
.. code-block:: bash
% mpirun -np 2 lmp_mpi -mdi "-role DRIVER -name d -method MPI" \
-in in.aimd : -np 16 pw.x -in qe.in -mdi "-role ENGINE -name e -method MPI"
% git clone --branch mdi_plugin https://github.com/MolSSI-MDI/q-e.git <base_path>/q-e
% build the executable pw.x, following the `QE build guide <https://gitlab.com/QEF/q-e/-/wikis/Developers/CMake-build-system>`_
In this case LAMMPS runs on 2 processors (MPI tasks), ``pw.x`` runs on 16
processors.
Here is how the 2 codes are launched to communicate by sockets:
Here is how to build QE as a shared library which can be used in plugin mode,
which results in a libqemdi.so file in <base_path>/q-e/MDI/src:
.. code-block:: bash
% mpirun -np 2 lmp_mpi -mdi "-role DRIVER -name d -method TCP -port 8021" -in in.aimd
% mpirun -np 16 pw.x -in qe.in -mdi "-role ENGINE -name e -method TCP -port 8021 -hostname localhost"
% git clone --branch mdi_plugin https://github.com/MolSSI-MDI/q-e.git <base_path>/q-e
% cd <base_path>/q-e
% ./configure --enable-parallel --enable-openmp --enable-shared FFLAGS="-fPIC" FCFLAGS="-fPIC" CFLAGS="-fPIC" foxflags="-fPIC" try_foxflags="-fPIC"
% make -j 4 mdi
These commands could be issued in different windows on a desktop
machine. Or in the same window, if the first command is ended with
"&" so as to run in the background. If "localhost" is replaced by an
IP address, ``pw.x`` could be run on another machine on the same network, or
even on another machine across the country.
INQ cannot be built as a stand-alone code; it is by design a library.
Here is how to build INQ as a shared library which can be used in
plugin mode, which results in a libinqmdi.so file in
<base_path>/inq/build/examples:
After both codes initialize themselves to model the same system, this is
what occurs each timestep:
.. code-block:: bash
* LAMMPS send a ">COORDS" message to ``pw.x`` with a 3*N vector of current atom coords
* ``pw.x`` receives the message/coords and computes quantum forces on all the atoms
* LAMMPS send a "<FORCES" message to ``pw.x`` and waits for the result
* ``pw.x`` receives the message (after its computation finishes) and sends a 3*N vector of forces
* LAMMPS receives the forces and time integrates to complete a single timestep
-------------
Examples scripts for using LAMMPS as an MDI engine are in the
examples/mdi directory. See the README file in that directory for
instructions on how to run the examples.
.. note::
Work is underway to add commands that allow LAMMPS to be used as an
MDI driver, e.g. for the AIMD example discussed above. Example
scripts for this usage mode will be added the same directory when
available.
If LAMMPS is used as a stand-alone engine it should set up the system
it will be modeling in its input script, then invoke the
:doc:`mdi/engine <mdi_engine>` command. This will put LAMMPS into
"engine mode" where it waits for messages and data from the driver.
When the driver sends an "EXIT" command, LAMMPS will exit engine mode
and the input script will continue.
If LAMMPS is used as a plugin engine it operates the same way, except
that the driver will pass LAMMPS an input script to initialize itself.
Upon receiving the "EXIT" command, LAMMPS will exit engine mode and the
input script will continue. After finishing execution of the input
script, the instance of LAMMPS will be destroyed.
LAMMPS supports the full set of MD-appropriate engine commands defined
by the MDI library. See the :doc:`mdi/engine <mdi_engine>` page for
a list of these.
If those commands are not sufficient for a user-developed driver to use
LAMMPS as an engine, then new commands can be easily added. See these
two files which implement the definition of MDI commands and the logic
for responding to them:
* src/MDI/mdi_engine.cpp
* src/MDI/fix_mdi_engine.cpp
% git clone --branch mdi --recurse-submodules https://gitlab.com/taylor-a-barnes/inq.git <base_path>/inq
% cd <base_path>/inq
% mkdir -p build
% cd build
% ../configure --prefix=<install_path>/install
% make -j 4
% make install

View File

@ -79,6 +79,10 @@ This data can be extracted and parsed from a log file using python with:
.. code-block:: python
import re, yaml
try:
from yaml import CSafeLoader as Loader, CSafeDumper as Dumper
except ImportError:
from yaml import SafeLoader as Loader, SafeDumper as Dumper
docs = ""
with open("log.lammps") as f:
@ -86,7 +90,7 @@ This data can be extracted and parsed from a log file using python with:
m = re.search(r"^(keywords:.*$|data:$|---$|\.\.\.$| - \[.*\]$)", line)
if m: docs += m.group(0) + '\n'
thermo = list(yaml.load_all(docs, Loader=yaml.SafeLoader))
thermo = list(yaml.load_all(docs, Loader=Loader))
print("Number of runs: ", len(thermo))
print(thermo[1]['keywords'][4], ' = ', thermo[1]['data'][2][4])

View File

@ -5,7 +5,7 @@ LAMMPS can be downloaded, built, and configured for OS X on a Mac with
`Homebrew <homebrew_>`_. (Alternatively, see the install instructions for
:doc:`Download an executable via Conda <Install_conda>`.) The following LAMMPS
packages are unavailable at this time because of additional needs not yet met:
GPU, KOKKOS, LATTE, MSCG, MESSAGE, MPIIO POEMS VORONOI.
GPU, KOKKOS, LATTE, MSCG, MPIIO, POEMS, VORONOI.
After installing Homebrew, you can install LAMMPS on your system with
the following commands:

View File

@ -6,7 +6,7 @@ of the `LAMMPS website <lws_>`_.
.. _download: https://www.lammps.org/download.html
.. _bug: https://www.lammps.org/bug.html
.. _older: https://www.lammps.org/tars
.. _older: https://download.lammps.org/tars/
.. _lws: https://www.lammps.org
You have two choices of tarballs, either the most recent stable

View File

@ -46,7 +46,7 @@ In addition there are DOIs for individual stable releases. Currently there are:
- 3 March 2020 version: `DOI:10.5281/zenodo.3726417 <https://dx.doi.org/10.5281/zenodo.3726417>`_
- 29 October 2020 version: `DOI:10.5281/zenodo.4157471 <https://dx.doi.org/10.5281/zenodo.4157471>`_
- 29 September 2021 version: `DOI:10.5281/zenodo.6386596 <https//dx.doi.org/10.5281/zenodo.6386596>`_
Home page
^^^^^^^^^

View File

@ -20,7 +20,6 @@ available online are listed below.
* `Glossary of terms relevant to LAMMPS <https://www.lammps.org/glossary.html>`_
* `LAMMPS highlights with images <https://www.lammps.org/pictures.html>`_
* `LAMMPS highlights with movies <https://www.lammps.org/movies.html>`_
* `Mailing list <https://www.lammps.org/mail.html>`_
* `LAMMPS forum <https://www.lammps.org/forum.html>`_
* `Workshops <https://www.lammps.org/workshops.html>`_
* `Tutorials <https://www.lammps.org/tutorials.html>`_

View File

@ -14,11 +14,11 @@ LAMMPS is an open-source code, distributed freely under the terms of
the GNU Public License Version 2 (GPLv2).
The `LAMMPS website <lws_>`_ has a variety of information about the
code. It includes links to an on-line version of this manual, a
`mailing list <https://www.lammps.org/mail.html>`_ and
`online forum <https://www.lammps.org/forum.html>`_ where users can
post questions, and a `GitHub site <https://github.com/lammps/lammps>`_
where all LAMMPS development is coordinated.
code. It includes links to an on-line version of this manual, an
`online forum <https://www.lammps.org/forum.html>`_ where users can post
questions and discuss LAMMPS, and a `GitHub site
<https://github.com/lammps/lammps>`_ where all LAMMPS development is
coordinated.
----------

View File

@ -27,11 +27,10 @@ join the `LAMMPS developers on Slack <https://lammps.slack.com>`_. This
slack work space is by invitation only. Thus for access, please send an
e-mail to ``slack@lammps.org`` explaining what part of LAMMPS you are
working on. Only discussions related to LAMMPS development are
tolerated in that work space, so this is **NOT** for people that look for
help with compiling, installing, or using LAMMPS. Please post a message
to the `lammps-users mailing list <https://www.lammps.org/mail.html>`_
or the `LAMMPS forum <https://www.lammps.org/forum.html>`_ for those
purposes.
tolerated in that work space, so this is **NOT** for people that look
for help with compiling, installing, or using LAMMPS. Please post a
message to the `LAMMPS forum <https://www.lammps.org/forum.html>`_ for
those purposes.
Packages versus individual files
--------------------------------

View File

@ -73,7 +73,6 @@ page gives those details.
* :ref:`MDI <PKG-MDI>`
* :ref:`MEAM <PKG-MEAM>`
* :ref:`MESONT <PKG-MESONT>`
* :ref:`MESSAGE <PKG-MESSAGE>`
* :ref:`MGPT <PKG-MGPT>`
* :ref:`MISC <PKG-MISC>`
* :ref:`ML-HDNNP <PKG-ML-HDNNP>`
@ -1424,17 +1423,25 @@ MDI package
**Contents:**
A LAMMPS command and fix to allow client-server coupling of LAMMPS to
other atomic or molecular simulation codes via the `MolSSI Driver Interface
A LAMMPS command and fixes to allow client-server coupling of LAMMPS
to other atomic or molecular simulation codes or materials modeling
workflows via the `MolSSI Driver Interface
(MDI) library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_.
**Author:** Taylor Barnes - MolSSI, taylor.a.barnes at gmail.com
**Install:**
This package has :ref:`specific installation instructions <mdi>` on
the :doc:`Build extras <Build_extras>` page.
**Supporting info:**
* src/MDI/README
* :doc:`mdi/engine <mdi_engine>`
* :doc:`fix mdi/engine <fix_mdi_engine>`
* lib/mdi/README
* :doc:`Howto MDI <Howto_mdi>`
* :doc:`mdi <mdi>`
* :doc:`fix mdi/aimd <fix_mdi_aimd>`
* examples/PACKAGES/mdi
----------
@ -1511,32 +1518,6 @@ Philipp Kloza (U Cambridge)
----------
.. _PKG-MESSAGE:
MESSAGE package
---------------
**Contents:**
Commands to use LAMMPS as either a client or server and couple it to
another application.
**Install:**
This package has :ref:`specific installation instructions <message>` on the :doc:`Build extras <Build_extras>` page.
**Supporting info:**
* src/MESSAGE: filenames -> commands
* lib/message/README
* :doc:`message <message>`
* :doc:`fix client/md <fix_client_md>`
* :doc:`server md <server_md>`
* :doc:`server mc <server_mc>`
* examples/message
----------
.. _PKG-MGPT:
MGPT package

View File

@ -249,7 +249,7 @@ whether an extra library is needed to build and use the package:
- n/a
- no
* - :ref:`MDI <PKG-MDI>`
- client-server coupling
- client-server code coupling
- :doc:`MDI Howto <Howto_mdi>`
- PACKAGES/mdi
- ext
@ -263,11 +263,6 @@ whether an extra library is needed to build and use the package:
- pair styles :doc:`mesont/tpm <pair_mesont_tpm>`, :doc:`mesocnt <pair_mesocnt>`
- PACKAGES/mesont
- int
* - :ref:`MESSAGE <PKG-MESSAGE>`
- client/server messaging
- :doc:`message <message>`
- message
- int
* - :ref:`MGPT <PKG-MGPT>`
- fast MGPT multi-ion potentials
- :doc:`pair_style mgpt <pair_mgpt>`

View File

@ -226,15 +226,6 @@ other executable(s) perform an MPI_Comm_split() with their own colors
to shrink the MPI_COMM_WORLD communication to be the subset of
processors they are actually running on.
Currently, this is only used in LAMMPS to perform client/server
messaging with another application. LAMMPS can act as either a client
or server (or both). More details are given on the :doc:`Howto client/server <Howto_client_server>` doc page.
Specifically, this refers to the "mpi/one" mode of messaging provided
by the :doc:`message <message>` command and the CSlib library LAMMPS
links with from the lib/message directory. See the
:doc:`message <message>` command for more details.
----------
.. _cite:

View File

@ -59,8 +59,7 @@ Commands
lattice
log
mass
mdi_engine
message
mdi
min_modify
min_spin
min_style
@ -96,9 +95,6 @@ Commands
restart
run
run_style
server
server_mc
server_md
set
shell
special_bonds

View File

@ -208,7 +208,8 @@ The individual style names on the :doc:`Commands compute <Commands_compute>` pag
* :doc:`erotate/sphere/atom <compute_erotate_sphere_atom>` - rotational energy for each spherical particle
* :doc:`event/displace <compute_event_displace>` - detect event on atom displacement
* :doc:`fabric <compute_fabric>` - calculates fabric tensors from pair interactions
* :doc:`fep <compute_fep>` -
* :doc:`fep <compute_fep>` - compute free energies for alchemical transformation from perturbation theory
* :doc:`fep/ta <compute_fep_ta>` - compute free energies for a test area perturbation
* :doc:`force/tally <compute_tally>` - force between two groups of atoms via the tally callback mechanism
* :doc:`fragment/atom <compute_cluster_atom>` - fragment ID for each atom
* :doc:`global/atom <compute_global_atom>` -

View File

@ -0,0 +1,99 @@
.. index:: compute fep/ta
compute fep/ta command
======================
Syntax
""""""
.. parsed-literal::
compute ID group-ID fep/ta temp plane scale_factor keyword value ...
* ID, group-ID are documented in the :doc:`compute <compute>` command
* fep/ta = name of this compute command
* temp = external temperature (as specified for constant-temperature run)
* plane = *xy* or *xz* or *yz*
* scale_factor = multiplicative factor for change in plane area
* zero or more keyword/value pairs may be appended
* keyword = *tail*
.. parsed-literal::
*tail* value = *no* or *yes*
*no* = ignore tail correction to pair energies (usually small in fep)
*yes* = include tail correction to pair energies
Examples
""""""""
.. code-block:: LAMMPS
compute 1 all fep/ta 298 xy 1.0005
Description
"""""""""""
Define a computation that calculates the change in the free energy due
to a test-area (TA) perturbation :ref:`(Gloor) <Gloor>`. The test-area
approach can be used to determine the interfacial tension of the system
in a single simulation:
.. math::
\gamma = \lim_{\Delta \mathcal{A} \to 0} \left( \frac{\Delta A_{0 \to 1 }}{\Delta \mathcal{A}}\right)_{N,V,T}
= - \frac{kT}{\Delta \mathcal{A}} \ln \left< \exp(-(U_1 - U_0)/kT) \right>_0
During the perturbation, both axes of *plane* are scaled by multiplying
:math:`\sqrt{scale\_factor}`, while the other axis divided by
*scale_factor* such that the overall volume of the system is maintained.
The *tail* keyword controls the calculation of the tail correction to
"van der Waals" pair energies beyond the cutoff, if this has been
activated via the :doc:`pair_modify <pair_modify>` command. If the
perturbation is small, the tail contribution to the energy difference
between the reference and perturbed systems should be negligible.
----------
Output info
"""""""""""
This compute calculates a global vector of length 3 which contains the
energy difference ( :math:`U_1-U_0` ) as c_ID[1], the Boltzmann factor
:math:`\exp(-(U_1-U_0)/kT)`, as c_ID[2] and the change in the *plane*
area :math:`\Delta \mathcal{A}` as c_ID[3]. :math:`U_1` is the potential
energy of the perturbed state and :math:`U_0` is the potential energy of
the reference state. The energies include kspace terms if these are
used in the simulation.
These output results can be used by any command that uses a global
scalar or vector from a compute as input. See the :doc:`Howto output
<Howto_output>` page for an overview of LAMMPS output options. For
example, the computed values can be averaged using :doc:`fix ave/time
<fix_ave_time>`.
Restrictions
""""""""""""
Constraints, like fix shake, may lead to incorrect values for energy difference.
This compute is distributed as the FEP package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`compute fep <compute_fep>`
Default
"""""""
The option defaults are *tail* = *no*\ .
----------
.. _Gloor:
**(Gloor)** Gloor, J Chem Phys, 123, 134703 (2005)

View File

@ -75,10 +75,11 @@ solids undergoing thermal motion.
.. note::
Initial coordinates are stored in "unwrapped" form, by using the
image flags associated with each atom. See the :doc:`dump custom <dump>` command for a discussion of "unwrapped" coordinates.
See the Atoms section of the :doc:`read_data <read_data>` command for a
discussion of image flags and how they are set for each atom. You can
reset the image flags (e.g. to 0) before invoking this compute by
image flags associated with each atom. See the :doc:`dump custom
<dump>` command for a discussion of "unwrapped" coordinates. See the
Atoms section of the :doc:`read_data <read_data>` command for a
discussion of image flags and how they are set for each atom. You
can reset the image flags (e.g. to 0) before invoking this compute by
using the :doc:`set image <set>` command.
.. note::
@ -108,7 +109,8 @@ distance\^2 :doc:`units <units>`.
Restrictions
""""""""""""
none
Compute *msd* cannot be used with a dynamic group.
Related commands
""""""""""""""""

View File

@ -74,8 +74,11 @@ the third is dimensionless.
Restrictions
""""""""""""
This compute is part of the EXTRA-COMPUTE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Compute *msd/nongauss* cannot be used with a dynamic group.
This compute is part of the EXTRA-COMPUTE package. It is only enabled
if LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
Related commands
""""""""""""""""

View File

@ -36,7 +36,7 @@ Syntax
* ID = user-assigned name for the dump
* group-ID = ID of the group of atoms to be dumped
* style = *atom* or *atom/gz* or *atom/zstd or *atom/mpiio* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/mpiio* or *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *dcd* or *h5md* or *image* or *local* or *local/gz* or *local/zstd* or *molfile* or *movie* or *netcdf* or *netcdf/mpiio* or *vtk* or *xtc* or *xyz* or *xyz/gz* or *xyz/zstd* or *xyz/mpiio*
* style = *atom* or *atom/gz* or *atom/zstd or *atom/mpiio* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/mpiio* or *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *dcd* or *h5md* or *image* or *local* or *local/gz* or *local/zstd* or *molfile* or *movie* or *netcdf* or *netcdf/mpiio* or *vtk* or *xtc* or *xyz* or *xyz/gz* or *xyz/zstd* or *xyz/mpiio* or *yaml*
* N = dump every this many timesteps
* file = name of file to write dump info to
* args = list of arguments for a particular style
@ -68,8 +68,9 @@ Syntax
*xyz/gz* args = none
*xyz/zstd* args = none
*xyz/mpiio* args = none
*yaml* args = same as *custom* args, see below
* *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *netcdf* or *netcdf/mpiio* args = list of atom attributes
* *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *netcdf* or *netcdf/mpiio* or *yaml* args = list of atom attributes
.. parsed-literal::
@ -386,6 +387,70 @@ from using the (numerical) atom type to an element name (or some
other label). This will help many visualization programs to guess
bonds and colors.
Dump style *yaml* has the same command syntax as style *custom* and
writes YAML format files that can be easily parsed by a variety of data
processing tools and programming languages. Each timestep will be
written as a YAML "document" (i.e. starts with "---" and ends with
"..."). The style supports writing one file per timestep through the
"\*" wildcard but not multi-processor outputs with the "%" token in the
filename. In addition to per-atom data, :doc:`thermo <thermo>` data can
be included in the *yaml* style dump file using the :doc:`dump_modify
thermo yes <dump_modify>`. The data included in the dump file uses the
"thermo" tag and is otherwise identical to data specified by the
:doc:`thermo_style <thermo_style>` command.
Below is an example for a YAML format dump created by the following commands.
.. code-block:: LAMMPS
dump out all yaml 100 dump.yaml id type x y z vx vy vz ix iy iz
dump_modify out time yes units yes thermo yes format 1 %5d format "% 10.6e"
The tags "time", "units", and "thermo" are optional and enabled by the
dump_modify command. The list under the "box" tag has 3 lines for
orthogonal boxes and 4 lines with triclinic boxes, where the first 3 are
the box boundaries and the 4th the three tilt factors (xy, xz, yz). The
"thermo" data follows the format of the *yaml* thermo style. The
"keywords" tag lists the per-atom properties contained in the "data"
columns, which contain a list with one line per atom. The keywords may
be renamed using the dump_modify command same as for the *custom* dump
style.
.. code-block:: yaml
---
timestep: 0
units: lj
time: 0
natoms: 4000
boundary: [ p, p, p, p, p, p, ]
thermo:
- keywords: [ Step, Temp, E_pair, E_mol, TotEng, Press, ]
- data: [ 0, 0, -27093.472213010766, 0, 0, 0, ]
box:
- [ 0, 16.795961913825074 ]
- [ 0, 16.795961913825074 ]
- [ 0, 16.795961913825074 ]
- [ 0, 0, 0 ]
keywords: [ id, type, x, y, z, vx, vy, vz, ix, iy, iz, ]
data:
- [ 1 , 1 , 0.000000e+00 , 0.000000e+00 , 0.000000e+00 , -1.841579e-01 , -9.710036e-01 , -2.934617e+00 , 0 , 0 , 0, ]
- [ 2 , 1 , 8.397981e-01 , 8.397981e-01 , 0.000000e+00 , -1.799591e+00 , 2.127197e+00 , 2.298572e+00 , 0 , 0 , 0, ]
- [ 3 , 1 , 8.397981e-01 , 0.000000e+00 , 8.397981e-01 , -1.807682e+00 , -9.585130e-01 , 1.605884e+00 , 0 , 0 , 0, ]
[...]
...
---
timestep: 100
units: lj
time: 0.5
[...]
...
----------
Note that *atom*, *custom*, *dcd*, *xtc*, and *xyz* style dump files
can be read directly by `VMD <http://www.ks.uiuc.edu/Research/vmd>`_, a
popular molecular viewing program.
@ -427,9 +492,9 @@ If a "%" character appears in the filename, then each of P processors
writes a portion of the dump file, and the "%" character is replaced
with the processor ID from 0 to P-1. For example, tmp.dump.% becomes
tmp.dump.0, tmp.dump.1, ... tmp.dump.P-1, etc. This creates smaller
files and can be a fast mode of output on parallel machines that
support parallel I/O for output. This option is not available for the
*dcd*, *xtc*, and *xyz* styles.
files and can be a fast mode of output on parallel machines that support
parallel I/O for output. This option is **not** available for the *dcd*,
*xtc*, *xyz*, and *yaml* styles.
By default, P = the number of processors meaning one file per
processor, but P can be set to a smaller value via the *nfile* or
@ -722,8 +787,8 @@ are part of the MPIIO package. They are only enabled if LAMMPS was
built with that package. See the :doc:`Build package <Build_package>`
doc page for more info.
The *xtc* and *dcd* styles are part of the EXTRA-DUMP package. They
are only enabled if LAMMPS was built with that package. See the
The *xtc*, *dcd* and *yaml* styles are part of the EXTRA-DUMP package.
They are only enabled if LAMMPS was built with that package. See the
:doc:`Build package <Build_package>` page for more info.
Related commands

View File

@ -35,13 +35,21 @@ Examples
Description
"""""""""""
Dump a snapshot of atom coordinates every N timesteps in the
`ADIOS <adios_>`_ based "BP" file format, or using different I/O solutions in ADIOS,
to a stream that can be read on-line by another program.
Dump a snapshot of atom coordinates every N timesteps in the `ADIOS
<adios_>`_ based "BP" file format, or using different I/O solutions in
ADIOS, to a stream that can be read on-line by another program.
ADIOS-BP files are binary, portable and self-describing.
.. _adios: https://github.com/ornladios/ADIOS2
.. note::
To be able to use ADIOS, a file ``adios2_config.xml`` with specific
configuration settings is expected in the current working directory.
If the file is not present, LAMMPS will try to create a minimal
default file. Please refer to the ADIOS documentation for details on
how to adjust this file for optimal performance and desired features.
**Use from write_dump:**
It is possible to use these dump styles with the

View File

@ -26,6 +26,10 @@ Syntax
N = index of frame written upon first dump
*balance* arg = *yes* or *no*
*buffer* arg = *yes* or *no*
*colname* values = ID string, or *default*
string = new column header name
ID = integer from 1 to N, or integer from -1 to -N, where N = # of quantities being output
*or* a custom dump keyword or reference to compute, fix, property or variable.
*delay* arg = Dstep
Dstep = delay output until this timestep
*element* args = E1 E2 ... EN, where N = # of atom types
@ -40,9 +44,10 @@ Syntax
Np = write one file for every this many processors
*first* arg = *yes* or *no*
*flush* arg = *yes* or *no*
*format* args = *line* string, *int* string, *float* string, M string, or *none*
*format* args = *line* string, *int* string, *float* string, ID string, or *none*
string = C-style format string
M = integer from 1 to N, where N = # of per-atom quantities being output
ID = integer from 1 to N, or integer from -1 to -N, where N = # of quantities being output
*or* a custom dump keyword or reference to compute, fix, property or variable.
*header* arg = *yes* or *no*
*yes* to write the header
*no* to not write the header
@ -375,6 +380,29 @@ performed with dump style *xtc*\ .
----------
The *colname* keyword can be used to change the default header keyword
for dump styles: *atom*, *custom*, and *cfg* and their compressed, ADIOS,
and MPIIO variants. The setting for *ID string* replaces the default
text with the provided string. *ID* can be a positive integer when it
represents the column number counting from the left, a negative integer
when it represents the column number from the right (i.e. -1 is the last
column/keyword), or a custom dump keyword (or compute, fix, property, or
variable reference) and then it replaces the string for that specific
keyword. For *atom* dump styles only the keywords "id", "type", "x",
"y", "z", "ix", "iy", "iz" can be accessed via string regardless of
whether scaled or unwrapped coordinates were enabled or disabled, and
it always assumes 8 columns for indexing regardless of whether image
flags are enabled or not. For dump style *cfg* only the "auxiliary"
keywords (6th or later keyword) may be changed and the column indexing
considers only them (i.e. the 6th keyword is the the 1st column).
The *colname* keyword can be used multiple times. If multiple *colname*
settings refer to the same keyword, the last setting has precedence. A
setting of *default* clears all previous settings, reverting all values
to their default names.
----------
The *format* keyword can be used to change the default numeric format output
by the text-based dump styles: *atom*, *local*, *custom*, *cfg*, and
*xyz* styles, and their MPIIO variants. Only the *line* or *none*
@ -685,8 +713,8 @@ run, this option is ignored since the output is already balanced.
----------
The *thermo* keyword only applies the dump *netcdf* style. It
triggers writing of :doc:`thermo <thermo>` information to the dump file
The *thermo* keyword only applies the dump styles *netcdf* and *yaml*.
It triggers writing of :doc:`thermo <thermo>` information to the dump file
alongside per-atom data. The values included in the dump file are
identical to the values specified by :doc:`thermo_style <thermo_style>`.

View File

@ -194,10 +194,10 @@ accelerated styles exist.
* :doc:`bond/swap <fix_bond_swap>` - Monte Carlo bond swapping
* :doc:`box/relax <fix_box_relax>` - relax box size during energy minimization
* :doc:`charge/regulation <fix_charge_regulation>` - Monte Carlo sampling of charge regulation
* :doc:`client/md <fix_client_md>` - MD client for client/server simulations
* :doc:`cmap <fix_cmap>` - enables CMAP cross-terms of the CHARMM force field
* :doc:`colvars <fix_colvars>` - interface to the collective variables "Colvars" library
* :doc:`controller <fix_controller>` - apply control loop feedback mechanism
* :doc:`damping/cundall <fix_damping_cundall>` - Cundall non-viscous damping for granular simulations
* :doc:`deform <fix_deform>` - change the simulation box size/shape
* :doc:`deposit <fix_deposit>` - add new atoms above a surface
* :doc:`dpd/energy <fix_dpd_energy>` - constant energy dissipative particle dynamics
@ -243,7 +243,7 @@ accelerated styles exist.
* :doc:`lb/viscous <fix_lb_viscous>` -
* :doc:`lineforce <fix_lineforce>` - constrain atoms to move in a line
* :doc:`manifoldforce <fix_manifoldforce>` - restrain atoms to a manifold during minimization
* :doc:`mdi/engine <fix_mdi_engine>` - connect LAMMPS to external programs via the MolSSI Driver Interface (MDI)
* :doc:`mdi/aimd <fix_mdi_aimd>` - LAMMPS operates as driver for ab initio MD (AIMD) via the MolSSI Driver Interface (MDI)
* :doc:`meso/move <fix_meso_move>` - move mesoscopic SPH/SDPD particles in a prescribed fashion
* :doc:`mol/swap <fix_mol_swap>` - Monte Carlo atom type swapping with a molecule
* :doc:`momentum <fix_momentum>` - zero the linear and/or angular momentum of a group of atoms
@ -388,6 +388,7 @@ accelerated styles exist.
* :doc:`vector <fix_vector>` - accumulate a global vector every N timesteps
* :doc:`viscosity <fix_viscosity>` - Muller-Plathe momentum exchange for viscosity calculation
* :doc:`viscous <fix_viscous>` - viscous damping for granular simulations
* :doc:`viscous/sphere <fix_viscous_sphere>` - viscous damping on angular velocity for granular simulations
* :doc:`wall/body/polygon <fix_wall_body_polygon>` -
* :doc:`wall/body/polyhedron <fix_wall_body_polyhedron>` -
* :doc:`wall/colloid <fix_wall>` - Lennard-Jones wall interacting with finite-size particles

View File

@ -19,6 +19,12 @@ Syntax
* cutlo,cuthi = lo and hi cutoff for Taper radius
* tolerance = precision to which charges will be equilibrated
* params = reaxff or a filename
* one or more keywords or keyword/value pairs may be appended
.. parsed-literal::
keyword = *maxiter*
*maxiter* N = limit the number of iterations to *N*
Examples
""""""""
@ -26,7 +32,7 @@ Examples
.. code-block:: LAMMPS
fix 1 all acks2/reaxff 1 0.0 10.0 1.0e-6 reaxff
fix 1 all acks2/reaxff 1 0.0 10.0 1.0e-6 param.acks2
fix 1 all acks2/reaxff 1 0.0 10.0 1.0e-6 param.acks2 maxiter 500
Description
"""""""""""
@ -48,10 +54,10 @@ with their neighbors. It requires some parameters for each atom type.
If the *params* setting above is the word "reaxff", then these are
extracted from the :doc:`pair_style reaxff <pair_reaxff>` command and
the ReaxFF force field file it reads in. If a file name is specified
for *params*\ , then the parameters are taken from the specified file
and the file must contain one line for each atom type. The latter form
must be used when performing QeQ with a non-ReaxFF potential. The lines
should be formatted as follows:
for *params*, then the parameters are taken from the specified file
and the file must contain one line for each atom type. The latter
form must be used when performing QeQ with a non-ReaxFF potential.
The lines should be formatted as follows:
.. parsed-literal::
@ -67,13 +73,25 @@ ReaxFF potential file, except that eta is defined here as twice the eta
value in the ReaxFF file. Note that unlike the rest of LAMMPS, the units
of this fix are hard-coded to be A, eV, and electronic charge.
**Restart, fix_modify, output, run start/stop, minimize info:**
The optional *maxiter* keyword allows changing the max number
of iterations in the linear solver. The default value is 200.
.. note::
In order to solve the self-consistent equations for electronegativity
equalization, LAMMPS imposes the additional constraint that all the
charges in the fix group must add up to zero. The initial charge
assignments should also satisfy this constraint. LAMMPS will print a
warning if that is not the case.
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files
<restart>`. No global scalar or vector or per-atom quantities are
stored by this fix for access by various :doc:`output commands
<Howto_output>`. No parameter of this fix can be used with the
*start/stop* keywords of the :doc:`run <run>` command.
<restart>`. This fix computes a global scalar (the number of
iterations) for access by various :doc:`output commands <Howto_output>`.
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
This fix is invoked during :doc:`energy minimization <minimize>`.
@ -86,9 +104,9 @@ This fix is invoked during :doc:`energy minimization <minimize>`.
Restrictions
""""""""""""
This fix is part of the REAXFF package. It is only enabled if LAMMPS
was built with that package. See the :doc:`Build package
<Build_package>` doc page for more info.
This fix is part of the REAXFF package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
This fix does not correctly handle interactions involving multiple
periodic images of the same atom. Hence, it should not be used for
@ -105,7 +123,10 @@ Related commands
:doc:`pair_style reaxff <pair_reaxff>`, :doc:`fix qeq/reaxff <fix_qeq_reaxff>`
**Default:** none
Default
"""""""
maxiter 200
----------

View File

@ -23,7 +23,7 @@ Syntax
* temp = temperature
* seed = random number generator seed
* one or more keyword/value pairs may be appended
* keyword = *rng* or *dipole* or *gamma_r_eigen* or *gamma_t_eigen* or *gamma_r* or *gamma_t*
* keyword = *rng* or *dipole* or *gamma_r_eigen* or *gamma_t_eigen* or *gamma_r* or *gamma_t* or *rotation_temp* or *planar_rotation*
.. parsed-literal::
@ -41,7 +41,9 @@ Syntax
*gt1*, *gt2*, and *gt3* = diagonal entries of body frame translational friction tensor
*gamma_t* values = *gt* for *brownian* and *brownian/sphere*
*gt* = magnitude of the (isotropic) translational friction tensor
*rotation_temp* values = *T* for *brownian/sphere* and *brownian/asphere*
*T* = rotation temperature, which can be different then *temp* when out of equilibrium
*planar_rotation* values = None (constrains rotational diffusion to be in xy plane if in 3D)
Examples
""""""""
@ -86,12 +88,45 @@ For the style *brownian/sphere*, the positions of the particles are
updated, and a dipole slaved to the spherical orientation is also
updated. This style therefore requires the hybrid atom style
:doc:`atom_style dipole <atom_style>` and :doc:`atom_style sphere
<atom_style>`.
<atom_style>`. The equation of motion for the dipole is
.. math::
\mathbf{\mu}(t+dt) = \frac{\mathbf{\mu}(t) + \mathbf{\omega} \times \mathbf{\mu}dt
}{|\mathbf{\mu}(t) + \mathbf{\omega} \times \mathbf{\mu}|}
which correctly reproduces a Boltzmann distribution of orientations and
rotational diffusion moments (see :ref:`(Ilie) <Ilie1>`) when
.. math::
\mathbf{\omega} = \frac{\mathbf{T}}{\gamma_r} + \sqrt{\frac{2 k_B T_{rot}}{\gamma_r}\frac{d\mathbf{W}}{dt}},
with :math:`d\mathbf{W}` being a random number with zero mean and variance :math:`dt`
and :math:`T_{rot}` is *rotation_temp*.
For the style *brownian/asphere*, the center of mass positions and the
quaternions of ellipsoidal particles are updated. This fix style is
suitable for equations of motion where the rotational and translational
friction tensors can be diagonalized in a certain (body) reference frame.
friction tensors can be diagonalized in a certain (body) reference
frame. In this case, the rotational equation of motion is updated via
the quaternion
.. math::
\mathbf{q}(t+dt) = \frac{\mathbf{q}(t) + d\mathbf{q}}{|\mathbf{q}(t) + d\mathbf{q}|}
which correctly reproduces a Boltzmann distribution of orientations and rotational
diffusion moments (see :ref:`(Ilie) <Ilie1>`) when the quaternion step given by
.. math::
d\mathbf{q} = \mathbf{\Psi}\mathbf{\omega}dt
where :math:`\mathbf{Psi}` has rows :math:`(-q_1,-q_2,-q_3)`, :math:`(q_0,-q_3,q_2)`,
:math:`(q_3,q_0,-q_1)`, and :math:`(-q_2,q_1,q_0)`. :math:`\mathbf{\omega}` is
evaluated in the body frame of reference where the friction tensor is diagonal.
See :ref:`(Delong) <Delong1>` for more details of a similar algorithm.
---------
@ -99,13 +134,16 @@ friction tensors can be diagonalized in a certain (body) reference frame.
.. note::
This integrator does not by default assume a relationship between the
rotational and translational friction tensors, though such a relationship
should exist in the case of no-slip boundary conditions between the particles and
the surrounding (implicit) solvent. E.g. in the case of spherical particles,
the condition :math:`\gamma_t=3\gamma_r/\sigma^2` must be explicitly
accounted for by setting *gamma_t* to 3x and *gamma_r* to x (where
:math:`\sigma` is the spherical diameter). A similar (though more complex)
relationship holds for ellipsoids and rod-like particles.
rotational and translational friction tensors, though such a
relationship should exist in the case of no-slip boundary conditions
between the particles and the surrounding (implicit) solvent. E.g. in
the case of spherical particles, the condition
:math:`\gamma_t=3\gamma_r/\sigma^2` must be explicitly accounted for
by setting *gamma_t* to 3x and *gamma_r* to x (where :math:`\sigma`
is the spherical diameter). A similar (though more complex)
relationship holds for ellipsoids and rod-like particles. The
translational diffusion and rotational diffusion are given by
*temp/gamma_t* and *rotation_temp/gamma_r*.
---------
@ -113,10 +151,10 @@ friction tensors can be diagonalized in a certain (body) reference frame.
Temperature computation using the :doc:`compute temp <compute_temp>`
will not correctly compute temperature of these overdamped dynamics
since we are explicitly neglecting inertial effects.
Furthermore, this time integrator does not add the stochastic terms or
viscous terms to the force and/or torques. Rather, they are just added
in to the equations of motion to update the degrees of freedom.
since we are explicitly neglecting inertial effects. Furthermore,
this time integrator does not add the stochastic terms or viscous
terms to the force and/or torques. Rather, they are just added in to
the equations of motion to update the degrees of freedom.
---------
@ -145,14 +183,22 @@ The units of *gamma_r* are mass*length**2/time.
The *gamma_r_eigen*, and *gamma_t_eigen* keywords are the eigenvalues of
the rotational and viscous damping tensors (having the same units as
their isotropic counterparts). Required for (and only compatible with)
*brownian/asphere*. For a 2D system, the first two values of *gamma_r_eigen*
must be inf (only rotation in xy plane), and the third value of *gamma_t_eigen*
must be inf (only diffusion in xy plane).
*brownian/asphere*. For a 2D system, the first two values of
*gamma_r_eigen* must be inf (only rotation in xy plane), and the third
value of *gamma_t_eigen* must be inf (only diffusion in xy plane).
If the *dipole* keyword is used, then the dipole moments of the particles
are updated as described above. Only compatible with *brownian/asphere*
(as *brownian/sphere* updates dipoles automatically).
If the *rotation_temp* keyword is used, then the rotational diffusion
will be occur at this prescribed temperature instead of *temp*. Only
compatible with *brownian/sphere* and *brownian/asphere*.
If the *planar_rotation* keyword is used, then rotation is constrained
to the xy plane in a 3D simulation. Only compatible with
*brownian/sphere* and *brownian/asphere* in 3D.
----------
.. note::
@ -165,9 +211,9 @@ are updated as described above. Only compatible with *brownian/asphere*
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files <restart>`.
No global or per-atom quantities are stored
by this fix for access by various :doc:`output commands <Howto_output>`.
No information about this fix is written to :doc:`binary restart files
<restart>`. No global or per-atom quantities are stored by this fix for
access by various :doc:`output commands <Howto_output>`.
No parameter of this fix can be used with the *start/stop* keywords of
@ -177,16 +223,17 @@ the :doc:`run <run>` command. This fix is not invoked during
Restrictions
""""""""""""
The style *brownian/sphere* fix requires that atoms store torque and angular velocity (omega)
as defined by the :doc:`atom_style sphere <atom_style>` command.
The style *brownian/asphere* fix requires that atoms store torque and quaternions
as defined by the :doc:`atom_style ellipsoid <atom_style>` command.
If the *dipole* keyword is used, they must also store a dipole moment
as defined by the :doc:`atom_style dipole <atom_style>` command.
The style *brownian/sphere* fix requires that atoms store torque and
angular velocity (omega) as defined by the :doc:`atom_style sphere
<atom_style>` command. The style *brownian/asphere* fix requires that
atoms store torque and quaternions as defined by the :doc:`atom_style
ellipsoid <atom_style>` command. If the *dipole* keyword is used, they
must also store a dipole moment as defined by the :doc:`atom_style
dipole <atom_style>` command.
This fix is part of the BROWNIAN package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>`
doc page for more info.
This fix is part of the BROWNIAN package. It is only enabled if LAMMPS
was built with that package. See the :doc:`Build package
<Build_package>` doc page for more info.
Related commands
""""""""""""""""
@ -197,8 +244,8 @@ Related commands
Default
"""""""
The default for *rng* is *uniform*. The default for the rotational and translational friction
tensors are the identity tensor.
The default for *rng* is *uniform*. The default for the rotational and
translational friction tensors are the identity tensor.
----------

View File

@ -1,118 +0,0 @@
.. index:: fix client/md
fix client/md command
=====================
Syntax
""""""
.. parsed-literal::
fix ID group-ID client/md
* ID, group-ID are documented in :doc:`fix <fix>` command
* client/md = style name of this fix command
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all client/md
Description
"""""""""""
This fix style enables LAMMPS to run as a "client" code and
communicate each timestep with a separate "server" code to perform an
MD simulation together.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When using this fix, LAMMPS (as the client code) passes the current
coordinates of all particles to the server code each timestep, which
computes their interaction, and returns the energy, forces, and virial
for the interacting particles to LAMMPS, so it can complete the
timestep.
Note that the server code can be a quantum code, or another classical
MD code which encodes a force field (pair_style in LAMMPS lingo) which
LAMMPS does not have. In the quantum case, this fix is a mechanism
for running *ab initio* MD with quantum forces.
The group associated with this fix is ignored.
The protocol and :doc:`units <units>` for message format and content
that LAMMPS exchanges with the server code is defined on the
:doc:`server md <server_md>` doc page.
Note that when using LAMMPS as an MD client, your LAMMPS input script
should not normally contain force field commands, like a
:doc:`pair_style <pair_style>`, :doc:`bond_style <bond_style>`, or
:doc:`kspace_style <kspace_style>` command. However it is possible
for a server code to only compute a portion of the full force-field,
while LAMMPS computes the remaining part. Your LAMMPS script can also
specify boundary conditions or force constraints in the usual way,
which will be added to the per-atom forces returned by the server
code.
See the examples/message directory for example scripts where LAMMPS is both
the "client" and/or "server" code for this kind of client/server MD
simulation. The examples/message/README file explains how to launch
LAMMPS and another code in tandem to perform a coupled simulation.
----------
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files
<restart>`.
The :doc:`fix_modify <fix_modify>` *energy* option is supported by
this fix to add the potential energy set by the server application to
the global potential energy of the system as part of
:doc:`thermodynamic output <thermo_style>`. The default setting for
this fix is :doc:`fix_modify energy yes <fix_modify>`.
The :doc:`fix_modify <fix_modify>` *virial* option is supported by
this fix to add the contribution computed by the server application to
the global pressure of the system via the :doc:`compute pressure
<compute_pressure>` command. This can be accessed by
:doc:`thermodynamic output <thermo_style>`. The default setting for
this fix is :doc:`fix_modify virial yes <fix_modify>`.
This fix computes a global scalar which can be accessed by various
:doc:`output commands <Howto_output>`. The scalar is the potential
energy discussed above. The scalar value calculated by this fix is
"extensive".
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
This fix is not invoked during :doc:`energy minimization <minimize>`.
Restrictions
""""""""""""
This fix is part of the MESSAGE package. It is only enabled if LAMMPS
was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup and shut down the messaging
protocol with the server code.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`server <server>`
Default
"""""""
none

View File

@ -0,0 +1,149 @@
.. index:: fix damping/cundall
fix damping/cundall command
===========================
Syntax
""""""
.. parsed-literal::
fix ID group-ID damping/cundall gamma_l gamma_a keyword values ...
* ID, group-ID are documented in :doc:`fix <fix>` command
* damping/cundall = style name of this fix command
* gamma_l = linear damping coefficient (dimensionless)
* gamma_a = angular damping coefficient (dimensionless)
* zero or more keyword/value pairs may be appended
.. parsed-literal::
keyword = *scale*
*scale* values = *type ratio* or *v_name*
type = atom type (1-N)
ratio = factor to scale the damping coefficients by
v_name = reference to atom style variable *name*
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all damping/cundall 0.8 0.8
fix 1 all damping/cundall 0.8 0.5 scale 3 2.5
fix a all damping/cundall 0.8 0.5 scale v_radscale
Description
"""""""""""
Add damping force and torque to finite-size spherical particles in the group
following the model of :ref:`Cundall, 1987 <Cundall1987>`, as implemented in other
granular physics code (e.g., :ref:`Yade-DEM <YadeDEM>`, :ref:`PFC <PFC>`).
The damping is constructed to always have negative mechanical power with respect
to the current velocity/angular velocity to ensure dissipation of kinetic energy.
If used without additional thermostatting (to add kinetic energy to the system),
it has the effect of slowly (or rapidly) freezing the system; hence it can also
be used as a simple energy minimization technique.
The magnitude of the damping force/torque :math:`F_d`/:math:`T_d` is a fraction
:math:`\gamma \in [0;1]` of the current force/torque :math:`F`/:math:`T` on the
particle. Damping is applied component-by-component in each direction
:math:`k\in\{x, y, z\}`:
.. math::
{F_d}_k = - \gamma_l \, F_k \, \mathrm{sign}(F_k v_k)
.. math::
{T_d}_k = - \gamma_a \, T_k \, \mathrm{sign}(T_k \omega_k)
The larger the coefficients, the faster the kinetic energy is reduced.
If the optional keyword *scale* is used, :math:`\gamma_l` and :math:`\gamma_a`
can be scaled up or down by the specified factor for atoms. This factor can be
set for different atom types and thus the *scale* keyword used multiple times
followed by the atom type and the associated scale factor. Alternately the
scaling factor can be computed for each atom (e.g. based on its radius) by
using an :doc:`atom-style variable <variable>`.
.. Note::
The damping force/torque is computed based on the force/torque at the moment
this fix is invoked. Any force/torque added after this fix, e.g., by
:doc:`fix addforce <fix_addforce>` or :doc:`fix addtorque <fix_addtorque>`
will not be damped. When performing simulations with gravity, invoking
:doc:`fix gravity <fix_gravity>` after this fix will maintain the specified
gravitational acceleration.
.. Note::
This scheme is dependent on the coordinates system and does not correspond to
realistic physical processes. It is constructed for numerical convenience and
efficacy.
This non-viscous damping presents the following advantages:
1. damping is independent of velocity, equally damping regions with distinct natural frequencies,
2. damping affects acceleration and vanishes for steady uniform motion of the particles,
3. damping parameter :math:`\gamma` is dimensionless and does not require scaling.
----------
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files
<restart>`. None of the :doc:`fix_modify <fix_modify>` options are
relevant to this fix. No global or per-atom quantities are stored by
this fix for access by various :doc:`output commands <Howto_output>`.
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
The :doc:`fix_modify <fix_modify>` *respa* option is supported by this
fix. This allows to set at which level of the :doc:`r-RESPA <run_style>`
integrator the fix is modifying forces/torques. Default is the outermost level.
The forces/torques due to this fix are imposed during an energy minimization,
invoked by the :doc:`minimize <minimize>` command. This fix should only
be used with damped dynamics minimizers that allow for
non-conservative forces. See the :doc:`min_style <min_style>` command
for details.
Restrictions
""""""""""""
This fix is part of the GRANULAR package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
This fix requires that atoms store torque and a radius as defined by the
:doc:`atom_style sphere <atom_style>` command.
Related commands
""""""""""""""""
:doc:`fix viscous <fix_viscous>`, :doc:`fix viscous/sphere <fix_viscous_sphere>`
Default
"""""""
none
References
""""""""""
.. _Cundall1987:
**(Cundall, 1987)** Cundall, P. A. Distinct Element Models of Rock and Soil
Structure, in Analytical and Computational Methods in Engineering Rock
Mechanics, Ch. 4, pp. 129-163. E. T. Brown, ed. London: Allen & Unwin., 1987.
.. _PFC:
**(PFC)** PFC Particle Flow Code 6.0 Documentation. Itasca Consulting Group.
.. _YadeDEM:
**(Yade-DEM)** V. Smilauer et al. (2021), Yade Documentation 3rd ed.
The Yade Project. DOI:10.5281/zenodo.5705394 (https://yade-dem.org/doc/)

View File

@ -70,7 +70,7 @@ Syntax
*remap* value = *x* or *v* or *none*
x = remap coords of atoms in group into deforming box
v = remap velocities of all atoms when they cross periodic boundaries
v = remap velocities of atoms in group when they cross periodic boundaries
none = no remapping of x or v
*flip* value = *yes* or *no*
allow or disallow box flips when it becomes highly skewed

View File

@ -8,7 +8,7 @@ Syntax
.. parsed-literal::
fix ID group-ID lb/fluid nevery LBtype viscosity density keyword values ...
fix ID group-ID lb/fluid nevery viscosity density keyword values ...
* ID, group-ID are documented in :doc:`fix <fix>` command
* lb/fluid = style name of this fix command

103
doc/src/fix_mdi_aimd.rst Normal file
View File

@ -0,0 +1,103 @@
.. index:: fix mdi/aimd
fix mdi/aimd command
======================
Syntax
""""""
.. parsed-literal::
fix ID group-ID mdi/aimd keyword
* ID, group-ID are documented in :doc:`fix <fix>` command
* mdi/aimd = style name of this fix command
* optional keyword = *plugin*
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all mdi/aimd
fix 1 all mdi/aimd plugin
Description
"""""""""""
This command enables LAMMPS to act as a client with another server
code to couple the two codes together to perform ab initio MD (AIMD)
simulations.
More specifically, this command causes LAMMPS to begin using the `MDI
Library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_
to run as an MDI driver (client), which sends MDI commands to an
external MDI engine code (server) which in the case of AIMD is a
quantum mechanics (QM) code, or could be LAMMPS itself, acting as a
surrogate for a QM code. See the :doc:`Howto mdi <Howto_mdi>` page
for more information about how LAMMPS can operate as either an MDI
driver or engine.
The examples/mdi directory contains input scripts performing AIMD in
this manner with LAMMPS acting as both a driver and an engine
(surrogate for a QM code). The examples/mdi/README file explains how
to launch both driver and engine codes so that they communicate using
the MDI library via either MPI or sockets. Any QM code that supports
MDI could be used in place of LAMMPS acting as a QM surrogate. See
the :doc:`Howto mdi <Howto_mdi>` page for a current list (March 2022)
of such QM codes.
The engine code can run either as a stand-alone code, launched at the
same time as LAMMPS, or as a plugin library. See the :doc:`mdi plugin
<mdi>` command for how to trigger LAMMPS to load the plugin library.
Again, the examples/mdi/README file explains how to launch both driver
and engine codes so that engine is used in plugin mode.
To use this fix with a plugin engine, you must specify the
*plugin* keyword as the last argument, as illustrated above.
.. note::
As of April 2022, the *plugin* keyword is needed. In a future
version of the MDI library it will no longer be necessary.
----------
This fix performs the timestepping portion of an AIMD simulation.
Both LAMMPS and the engine code (QM or LAMMPS) should define the same
system (simulation box, atoms and their types) in their respective
input scripts. LAMMPS then begins its timestepping.
At the point in each timestep when LAMMPS needs the force on each
atom, it communicates with the engine code. It sends the current
simulation box size and shape (if they change dynamically, e.g. during
an NPT simulation), and the current atom coordinates. The engine code
computes quantum forces on each atom and returns them to LAMMPS. If
LAMMPS also needs the system energy and/or virial, it requests those
values from the engine code as well.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
To use LAMMPS as an MDI driver in conjunction with other MDI-enabled
atomistic codes, the :doc:`units <units>` command should be used to
specify *real* or *metal* units. This will ensure the correct unit
conversions between LAMMPS and MDI units, which the other codes will
also perform in their preferred units.
LAMMPS can also be used as an MDI driver in other unit choices it
supports, e.g. *lj*, but then no unit conversion is performed.
Related commands
""""""""""""""""
:doc:`mdi engine <mdi>`
Default
"""""""
none

View File

@ -1,59 +0,0 @@
.. index:: fix mdi/engine
fix mdi/engine command
======================
Syntax
""""""
.. parsed-literal::
fix ID group-ID mdi/engine
* ID, group-ID are documented in :doc:`fix <fix>` command
* mdi/engine = style name of this fix command
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all mdi/engine
Description
"""""""""""
This fix is used along with the :doc:`mdi/engine <mdi_engine>` command
to enable LAMMPS to use the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_ to run as
an MDI engine. The fix provides hooks that enable MDI driver codes to
communicate with LAMMPS at various points within a LAMMPS timestep.
It is not generally necessary to add this fix to a LAMMPS input file,
even when using the :doc:`mdi/engine <mdi_engine>` command. If the
:doc:`mdi/engine <mdi_engine>` command is executed and this fix is not
present, it will automatically be added and applied as a new fix for
all atoms for the duration of the command. Thus it is only necessary
to add this fix to an input file when you want to modify the group-ID
or the ordering of this fix relative to other fixes in the input script.
For more information about running LAMMPS as an MDI engine, see the
:doc:`mdi/engine <mdi_engine>` command and the :doc:`Howto mdi
<Howto_mdi>` doc page.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`mdi/engine <mdi_engine>`
Default
"""""""
none

View File

@ -12,7 +12,7 @@ Syntax
* ID, group-ID are documented in :doc:`fix <fix>` command
* move = style name of this fix command
* style = *linear* or *wiggle* or *rotate* or *variable*
* style = *linear* or *wiggle* or *rotate* or *transrot* or *variable*
.. parsed-literal::
@ -25,6 +25,11 @@ Syntax
Px,Py,Pz = origin point of axis of rotation (distance units)
Rx,Ry,Rz = axis of rotation vector
period = period of rotation (time units)
*transrot* args = Vx Vy Vz Px Py Pz Rx Ry Rz period
Vx,Vy,Vz = components of velocity vector (velocity units)
Px,Py,Pz = origin point of axis of rotation (distance units)
Rx,Ry,Rz = axis of rotation vector
period = period of rotation (time units)
*variable* args = v_dx v_dy v_dz v_vx v_vy v_vz
v_dx,v_dy,v_dz = 3 variable names that calculate x,y,z displacement as function of time, any component can be specified as NULL
v_vx,v_vy,v_vz = 3 variable names that calculate x,y,z velocity as function of time, any component can be specified as NULL
@ -44,6 +49,7 @@ Examples
fix 1 boundary move wiggle 3.0 0.0 0.0 1.0 units box
fix 2 boundary move rotate 0.0 0.0 0.0 0.0 0.0 1.0 5.0
fix 2 boundary move variable v_myx v_myy NULL v_VX v_VY NULL
fix 3 boundary move transrot 0.1 0.1 0.0 0.0 0.0 0.0 0.0 0.0 1.0 5.0 units box
Description
"""""""""""
@ -55,15 +61,17 @@ whose movement can influence nearby atoms.
.. note::
The atoms affected by this fix should not normally be time
integrated by other fixes (e.g. :doc:`fix nve <fix_nve>`, :doc:`fix nvt <fix_nh>`), since that will change their positions and
velocities twice.
The atoms affected by this fix should not normally be time integrated
by other fixes (e.g. :doc:`fix nve <fix_nve>`, :doc:`fix nvt
<fix_nh>`), since that will change their positions and velocities
twice.
.. note::
As atoms move due to this fix, they will pass through periodic
boundaries and be remapped to the other side of the simulation box,
just as they would during normal time integration (e.g. via the :doc:`fix nve <fix_nve>` command). It is up to you to decide whether
just as they would during normal time integration (e.g. via the
:doc:`fix nve <fix_nve>` command). It is up to you to decide whether
periodic boundaries are appropriate with the kind of atom motion you
are prescribing with this fix.
@ -73,11 +81,11 @@ whose movement can influence nearby atoms.
position at the time the fix is specified. These initial coordinates
are stored by the fix in "unwrapped" form, by using the image flags
associated with each atom. See the :doc:`dump custom <dump>` command
for a discussion of "unwrapped" coordinates. See the Atoms section of
the :doc:`read_data <read_data>` command for a discussion of image flags
and how they are set for each atom. You can reset the image flags
(e.g. to 0) before invoking this fix by using the :doc:`set image <set>`
command.
for a discussion of "unwrapped" coordinates. See the Atoms section
of the :doc:`read_data <read_data>` command for a discussion of image
flags and how they are set for each atom. You can reset the image
flags (e.g. to 0) before invoking this fix by using the :doc:`set
image <set>` command.
----------
@ -118,13 +126,13 @@ notation as
where *X0* = (x0,y0,z0) is their position at the time the fix is
specified, *A* is the specified amplitude vector with components
(Ax,Ay,Az), *omega* is 2 PI / *period*, and *delta* is the time
elapsed since the fix was specified. This style also sets the
velocity of each atom to the time derivative of this expression. If
any of the amplitude components is specified as NULL, then the
position and velocity of that component is time integrated the same as
the :doc:`fix nve <fix_nve>` command would perform, using the
corresponding force component on the atom.
(Ax,Ay,Az), *omega* is 2 PI / *period*, and *delta* is the time elapsed
since the fix was specified. This style also sets the velocity of each
atom to the time derivative of this expression. If any of the amplitude
components is specified as NULL, then the position and velocity of that
component is time integrated the same as the :doc:`fix nve <fix_nve>`
command would perform, using the corresponding force component on the
atom.
Note that the *wiggle* style is identical to using the *variable*
style with :doc:`equal-style variables <variable>` that use the
@ -139,21 +147,29 @@ swiggle() and cwiggle() functions. E.g.
variable v equal v_omega*($A-cwiggle(0.0,$A,$T))
fix 1 boundary move variable v_x NULL NULL v_v NULL NULL
The *rotate* style rotates atoms around a rotation axis *R* =
(Rx,Ry,Rz) that goes through a point *P* = (Px,Py,Pz). The *period* of
the rotation is also specified. The direction of rotation for the
atoms around the rotation axis is consistent with the right-hand rule:
if your right-hand thumb points along *R*, then your fingers wrap
around the axis in the direction of rotation.
The *rotate* style rotates atoms around a rotation axis *R* = (Rx,Ry,Rz)
that goes through a point *P* = (Px,Py,Pz). The *period* of the
rotation is also specified. The direction of rotation for the atoms
around the rotation axis is consistent with the right-hand rule: if your
right-hand thumb points along *R*, then your fingers wrap around the
axis in the direction of rotation.
This style also sets the velocity of each atom to (omega cross Rperp)
where omega is its angular velocity around the rotation axis and Rperp
is a perpendicular vector from the rotation axis to the atom. If the
defined :doc:`atom_style <atom_style>` assigns an angular velocity or
angular momentum or orientation to each atom (:doc:`atom styles <atom_style>` sphere, ellipsoid, line, tri, body), then
angular momentum or orientation to each atom (:doc:`atom styles
<atom_style>` sphere, ellipsoid, line, tri, body), then
those properties are also updated appropriately to correspond to the
atom's motion and rotation over time.
The *transrot* style combines the effects of *rotate* and *linear* so
that it is possible to prescribe a rotating group of atoms that also
moves at a constant velocity. The arguments are for the translation
first and then for the rotation. Since the rotation affects all
coordinate components, it is not possible to set any of the
translation vector components to NULL.
The *variable* style allows the position and velocity components of
each atom to be set by formulas specified via the
:doc:`variable <variable>` command. Each of the 6 variables is
@ -165,10 +181,10 @@ Each variable must be of either the *equal* or *atom* style.
a function of the timestep as well as of other simulation values.
*Atom*\ -style variables compute a numeric quantity for each atom, that
can be a function per-atom quantities, such as the atom's position, as
well as of the timestep and other simulation values. Note that this
fix stores the original coordinates of each atom (see note below) so
that per-atom quantity can be used in an atom-style variable formula.
See the :doc:`variable <variable>` command for details.
well as of the timestep and other simulation values. Note that this fix
stores the original coordinates of each atom (see note below) so that
per-atom quantity can be used in an atom-style variable formula. See
the :doc:`variable <variable>` command for details.
The first 3 variables (v_dx,v_dy,v_dz) specified for the *variable*
style are used to calculate a displacement from the atom's original
@ -206,8 +222,9 @@ spacings can be different in x,y,z.
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
This fix writes the original coordinates of moving atoms to :doc:`binary restart files <restart>`, as well as the initial timestep, so that
the motion can be continuous in a restarted simulation. See the
This fix writes the original coordinates of moving atoms to :doc:`binary
restart files <restart>`, as well as the initial timestep, so that the
motion can be continuous in a restarted simulation. See the
:doc:`read_restart <read_restart>` command for info on how to re-specify
a fix in an input script that reads a restart file, so that the
operation of the fix continues in an uninterrupted fashion.
@ -224,11 +241,12 @@ fix.
This fix produces a per-atom array which can be accessed by various
:doc:`output commands <Howto_output>`. The number of columns for each
atom is 3, and the columns store the original unwrapped x,y,z coords
of each atom. The per-atom values can be accessed on any timestep.
atom is 3, and the columns store the original unwrapped x,y,z coords of
each atom. The per-atom values can be accessed on any timestep.
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command. This fix is not invoked during :doc:`energy minimization <minimize>`.
the :doc:`run <run>` command. This fix is not invoked during
:doc:`energy minimization <minimize>`.
For :doc:`rRESPA time integration <run_style>`, this fix adjusts the
position and velocity of atoms on the outermost rRESPA level.

View File

@ -29,10 +29,10 @@ Description
"""""""""""
Perform targeted molecular dynamics (TMD) on a group of atoms. A
holonomic constraint is used to force the atoms to move towards (or
away from) the target configuration. The parameter "rho" is
monotonically decreased (or increased) from its initial value to
rho_final at the end of the run.
holonomic constraint is used to force the atoms to move towards (or away
from) the target configuration. The parameter "rho" is monotonically
decreased (or increased) from its initial value to rho_final at the end
of the run.
Rho has distance units and is a measure of the root-mean-squared
distance (RMSD) between the current configuration of the atoms in the
@ -55,22 +55,25 @@ a .gz suffix). The format of the target file1 is as follows:
The first 3 lines may or may not be needed, depending on the format of
the atoms to follow. If image flags are included with the atoms, the
first 3 lo/hi lines must appear in the file. If image flags are not
included, the first 3 lines should not appear. The 3 lines contain the
first 3 lo/hi lines **must** appear in the file. If image flags are not
included, the first 3 lines **must not** appear. The 3 lines contain the
simulation box dimensions for the atom coordinates, in the same format
as in a LAMMPS data file (see the :doc:`read_data <read_data>` command).
The remaining lines each contain an atom ID and its target x,y,z
coordinates. The atom lines (all or none of them) can optionally be
followed by 3 integer values: nx,ny,nz. For periodic dimensions, they
followed by 3 integer values: nx,ny,nz.For periodic dimensions, they
specify which image of the box the atom is considered to be in, i.e. a
value of N (positive or negative) means add N times the box length to
the coordinate to get the true value.
the coordinate to get the true value. Those 3 integers either must
be given for all atoms or none.
The atom lines can be listed in any order, but every atom in the group
must be listed in the file. Atoms not in the fix group may also be
listed; they will be ignored.
Comments starting with '#' and empty lines may be included as well.
TMD statistics are written to file2 every N timesteps, unless N is
specified as 0, which means no statistics.

View File

@ -154,7 +154,7 @@ of the electronic specific heat, but ignored temperature dependencies
of any of the other parameters. See more discussion below for fix
ttm/mod.
..note::
.. note::
These fixes do not perform time integration of the atoms in the fix
group, they only rescale their velocities. Thus a time integration
@ -164,7 +164,7 @@ ttm/mod.
fix, e.g. :doc:`fix nvt <fix_nh>` or :doc:`fix langevin
<fix_langevin>`.
..note::
.. note::
These fixes require use of an orthogonal 3d simulation box with
periodic boundary conditions in all dimensions. They also require
@ -305,8 +305,8 @@ is calculated as
where lambda is the electron mean free path (see :ref:`(Norman) <Norman>`,
:ref:`(Pisarev) <Pisarev>`)
The fix ttm/mod parameter file *init_file* has the following syntax/
Every line with the odd number is considered as a comment and
The fix ttm/mod parameter file *init_file* has the following syntax.
Every line with an odd number is considered as a comment and
ignored. The lines with the even numbers are treated as follows:
.. parsed-literal::

View File

@ -106,12 +106,15 @@ for details.
Restrictions
""""""""""""
none
none
Related commands
""""""""""""""""
:doc:`fix langevin <fix_langevin>`
:doc:`fix langevin <fix_langevin>`,
:doc:`fix viscous/sphere <fix_viscous_sphere>`,
:doc:`fix damping/cundall <fix_damping_cundall>`
Default
"""""""

View File

@ -0,0 +1,111 @@
.. index:: fix viscous/sphere
fix viscous/sphere command
==========================
Syntax
""""""
.. parsed-literal::
fix ID group-ID viscous/sphere gamma keyword values ...
* ID, group-ID are documented in :doc:`fix <fix>` command
* viscous/sphere = style name of this fix command
* gamma = damping coefficient (torque/angular velocity units)
* zero or more keyword/value pairs may be appended
.. parsed-literal::
keyword = *scale*
*scale* values = *type ratio* or *v_name*
type = atom type (1-N)
ratio = factor to scale the damping coefficients by
v_name = reference to atom style variable *name*
Examples
""""""""
.. code-block:: LAMMPS
fix 1 flow viscous/sphere 0.1
fix 1 damp viscous/sphere 0.5 scale 3 2.5
fix 1 damp viscous/sphere 0.5 scale v_radscale
Description
"""""""""""
Add a viscous damping torque to finite-size spherical particles in the group
that is proportional to the angular velocity of the atom. In granular
simulations this can be useful for draining the rotational kinetic energy from
the system in a controlled fashion. If used without additional thermostatting
(to add kinetic energy to the system), it has the effect of slowly (or rapidly)
freezing the system; hence it can also be used as a simple energy minimization
technique.
The damping torque :math:`T_i` is given by :math:`T_i = - \gamma \omega_i`.
The larger the coefficient, the faster the rotational kinetic energy is reduced.
If the optional keyword *scale* is used, :math:`\gamma` can be scaled up
or down by the specified factor for atoms. This factor can be set for
different atom types and thus the *scale* keyword used multiple times
followed by the atom type and the associated scale factor. Alternately
the scaling factor can be computed for each atom (e.g. based on its
radius) by using an :doc:`atom-style variable <variable>`.
.. note::
You should specify gamma in torque/angular velocity units. This is not
the same as mass/time units, at least for some of the LAMMPS
:doc:`units <units>` options like "real" or "metal" that are not
self-consistent.
In the current implementation, rather than have the user specify a viscosity,
:math:`\gamma` is specified directly in torque/angular velocity units.
If needed, :math:`\gamma` can be adjusted for atoms of different sizes
(i.e. :math:`\sigma`) by using the *scale* keyword.
----------
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files
<restart>`. None of the :doc:`fix_modify <fix_modify>` options are
relevant to this fix. No global or per-atom quantities are stored by
this fix for access by various :doc:`output commands <Howto_output>`.
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
The :doc:`fix_modify <fix_modify>` *respa* option is supported by this
fix. This allows to set at which level of the :doc:`r-RESPA <run_style>`
integrator the fix is modifying torques. Default is the outermost level.
The torques due to this fix are imposed during an energy minimization,
invoked by the :doc:`minimize <minimize>` command. This fix should only
be used with damped dynamics minimizers that allow for
non-conservative forces. See the :doc:`min_style <min_style>` command
for details.
Restrictions
""""""""""""
This fix is part of the EXTRA-FIX package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
This fix requires that atoms store torque and angular velocity (omega)
and a radius as defined by the :doc:`atom_style sphere <atom_style>`
command.
All particles in the group must be finite-size spheres. They cannot
be point particles.
Related commands
""""""""""""""""
:doc:`fix viscous <fix_viscous>`, :doc:`fix damping/cundall <fix_damping_cundall>`
Default
"""""""
none

View File

@ -294,11 +294,13 @@ group and running further.
.. note::
All fixes and computes take a group ID as an argument, but they
do not all allow for use of a dynamic group. If you get an error
All fixes and computes take a group ID as an argument, but they do
not all allow for use of a dynamic group. If you get an error
message that this is not allowed, but feel that it should be for the
fix or compute in question, then please post your reasoning to the
LAMMPS mail list and we can change it.
`LAMMPS forum at MatSci <https://matsci.org/c/lammps-development/>`_
and we can look into changing it. The same applies if you come
across inconsistent behavior when dynamic groups are allowed.
The *static* style removes the setting for a dynamic group, converting
it to a static group (the default). The atoms in the static group are

312
doc/src/mdi.rst Normal file
View File

@ -0,0 +1,312 @@
.. index:: mdi
mdi command
==================
Syntax
""""""
.. parsed-literal::
mdi mode args
* mode = *engine* or *plugin*
.. parsed-literal::
*engine* args = none
*plugin* args = name keyword value keyword value
name = name of plugin library, e.g. lammps means a liblammps.so library will be loaded
keywords = *mdi* or *infile* or *extra* or *command*
*mdi* value = args passed to MDI for driver to operate with plugins
*infile* value = filename the engine will read at start-up
*extra* value = aditional command-line args to pass to engine library when loaded
*command* value = a LAMMPS input script command to execute
Examples
""""""""
.. code-block:: LAMMPS
mdi engine
mdi plugin lammps mdi "-role ENGINE -name lammps -method LINK" &
infile in.aimd.engine extra "-log log.aimd.engine.plugin" &
command "run 5"
Description
"""""""""""
This command implements two high-level operations within LAMMPS to use
the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>` for
coupling to other codes in a client/server protocol.
The *engine* mode enables LAMMPS to act as an MDI engine (server),
responding to requests from an MDI driver (client) code.
The *plugin* mode enables LAMMPS to act as an MDI driver (client), and
load the MDI engine (server) code as a library plugin. In this case
the MDI engine is a library plugin. It can also be a stand-alone
code, launched separately from LAMMPS, in which case the mdi plugin
command is not used.
See the Howto MDI doc page for a discussion of all the different ways
2 or more codes can interact via MDI.
The examples/mdi directory has examples which use LAMMPS in 4
different modes: as a driver using an engine as either a stand-alone
code or as a plugin, and as an engine operating as either a
stand-alone code or as a plugin. The README file in that directory
shows how to launch and couple codes for all the 4 usage modes, and so
they communicate via the MDI library using either MPI or sockets.
----------
The *mdi engine* command is used to make LAMMPS operate as an MDI
engine. It is typically used in an input script after LAMMPS has
setup the system it is going to model consistent with what the driver
code expects. Depending on when the driver code tells the LAMMPS
engine to exit, other commands can be executed after this command, but
typically it is used at the end of a LAMMPS input script.
To act as an MDI engine operating as an MD code (or surrogate QM
code), this is the list of standard MDI commands issued by a driver
code which LAMMPS currently recognizes. Using standard commands
defined by the MDI library means that a driver code can work
interchangeably with LAMMPS or other MD codes or with QM codes which
support the MDI standard. See more details about these commands in
the `MDI library documentation
<https://molssi-mdi.github.io/MDI_Library/html/mdi_standard.html>`_
These commands are valid at the @DEFAULT node defined by MDI.
Commands that start with ">" mean the driver is sending information to
LAMMPS. Commands that start with "<" are requests by the driver for
LAMMPS to send it information. Commands that start with an alphabetic
letter perform actions. Commands that start with "@" are MDI "node"
commands, which are described further below.
.. list-table::
:widths: 20 80
:header-rows: 1
* - Command name
- Action
* - >CELL or <CELL
- Send/request 3 simulation box edge vectors (9 values)
* - >CELL_DISPL or <CELL_DISPL
- Send/request displacement of the simulation box from the origin (3 values)
* - >CHARGES or <CHARGES
- Send/request charge on each atom (N values)
* - >COORDS or <COORDS
- Send/request coordinates of each atom (3N values)
* - <ENERGY
- Request total energy (potential + kinetic) of the system (1 value)
* - >FORCES or <FORCES
- Send/request forces on each atom (3N values)
* - >+FORCES
- Send forces to add to each atom (3N values)
* - <LABELS
- Request string label of each atom (N values)
* - <MASSES
- Request mass of each atom (N values)
* - MD
- Perform an MD simulation for N timesteps (most recent >NSTEPS value)
* - OPTG
- Perform an energy minimization to convergence (most recent >TOLERANCE values)
* - >NATOMS or <NATOMS
- Sends/request number of atoms in the system (1 value)
* - >NSTEPS
- Send number of timesteps for next MD dynamics run via MD command
* - <PE
- Request potential energy of the system (1 value)
* - <STRESS
- Request stress tensor (virial) of the system (6 values)
* - >TOLERANCE
- Send 4 tolerance parameters for next MD minimization via OPTG command
* - >TYPES or <TYPES
- Send/request the numeric type of each atom (N values)
* - >VELOCITIES or <VELOCITIES
- Send/request the velocity of each atom (3N values)
* - @INIT_MD or @INIT_OPTG
- Driver tells LAMMPS to start single-step dynamics or minimization (see below)
* - EXIT
- Driver tells LAMMPS to exit engine mode
.. note::
The <ENERGY, <FORCES, <PE, and <STRESS commands trigger LAMMPS to
compute atomic interactions for the current configuration of atoms
and size/shape of the simulation box. I.e. LAMMPS invokes its
pair, bond, angle, ..., kspace styles. If the driver is updating
the atom coordinates and/or box incrementally (as in an MD
simulation which the driver is managing), then the LAMMPS engine
will do the same, and only occasionally trigger neighbor list
builds. If the change in atom positions is large (since the
previous >COORDS command), then LAMMPS will do a more expensive
operation to migrate atoms to new processors as needed and
re-neighbor. If the >NATOMS or >TYPES commands have been sent
(since the previous >COORDS command), then LAMMPS assumes the
system is new and re-initializes an entirely new simulation.
The MD and OPTG commands perform an entire MD simulation or energy
minimization (to convergence) with no communication from the driver
until the simulation is complete. By contrast, the @INIT_MD and
@INIT_OPTG commands allow the driver to communicate with the engine at
each timestep of a dynamics run or iteration of a minimization; see
more info below.
The MD command performs a simulation using the most recent >NSTEPS
value. The OPTG command performs a minimization using the 4
convergence parameters from the most recent >TOLERANCE command. The 4
parameters sent are those used by the :doc:`minimize <minimize>`
command in LAMMPS: etol, ftol, maxiter, and maxeval.
The mdi engine command also implements the following custom MDI
commands which are LAMMPS-specific. These commands are also valid at
the @DEFAULT node defined by MDI:
* - Command name
- Action
* - >NBYTES
- Send # of datums in a subsequent command (1 value)
* - >COMMAND
- Send a LAMMPS input script command as a string (Nbytes in length)
* - >COMMANDS
- Send multiple LAMMPS input script commands as a newline-separated string (Nbytes in length)
* - >INFILE
- Send filename of an input script to execute (filename Nbytes in length)
* - <KE
- Request kinetic energy of the system (1 value)
Note that other custom commands can easily be added if these are not
sufficient to support what a user-written driver code needs. Code to
support new commands can be added to the MDI package within LAMMPS,
specifically to the src/MDI/mdi_engine.cpp file.
MDI also defines a standard mechanism for the driver to request that
an MD engine (LAMMPS) perform a dynamics simulation one step at a time
or an energy minimization one iteration at a time. This is so that
the driver can (optionally) communicate with LAMMPS at intermediate
points of the timestep or iteration by issuing MDI node commands which
start with "@".
To tell LAMMPS to run dynamics in single-step mode, the driver sends
as @INIT_MD command followed by the these commands. The driver
can interact with LAMMPS at 3 node locations within each
timestep: @COORDS, @FORCES, @ENDSTEP:
* - Command name
- Action
* - @COORDS
- Proceed to next @COORDS node = post-integrate location in LAMMPS timestep
* - @FORCES
- Proceed to next @FORCES node = post-force location in LAMMPS timestep
* - @ENDSTEP
- Proceed to next @ENDSTEP node = end-of-step location in LAMMPS timestep
* - @DEFAULT
- Exit MD simulation, return to @DEFAULT node
* - EXIT
- Driver tells LAMMPS to exit the MD simulation and engine mode
To tell LAMMPS to run an energy minimization in single-iteration mode.
The driver can interact with LAMMPS at 2 node locations within each
iteration of the minimizer: @COORDS, @FORCES:
* - Command name
- Action
* - @COORDS
- Proceed to next @COORDS node = min-pre-force location in LAMMPS min iteration
* - @FORCES
- Proceed to next @FORCES node = min-post-force location in LAMMPS min iteration
* - @DEFAULT
- Exit minimization, return to @DEFAULT node
* - EXIT
- Driver tells LAMMPS to exit the minimization and engine mode
While LAMMPS is at its @COORDS node, the following standard MDI
commands are supported, as documented above: >COORDS or <COORDS,
@COORDS, @FORCES, @ENDSTEP, @DEFAULT, EXIT.
While LAMMPS is at its @FORCES node, the following standard MDI
commands are supported, as documented above: <COORDS, <ENERGY, >FORCES
or >+FORCES or <FORCES, <KE, <PE, <STRESS, @COORDS, @FORCES, @ENDSTEP,
@DEFAULT, EXIT.
While LAMMPS is at its @ENDSTEP node, the following standard MDI
commands are supported, as documented above: <ENERGY, <FORCES, <KE,
<PE, <STRESS, @COORDS, @FORCES, @ENDSTEP, @DEFAULT, EXIT.
----------
The *mdi plugin* command is used to make LAMMPS operate as an MDI
driver which loads an MDI engine as a plugin library. It is typically
used in an input script after LAMMPS has setup the system it is going
to model consistent with the engine code.
The *name* argument specifies which plugin library to load. A name
like "lammps" is converted to a filename liblammps.so. The path for
where this file is located is specified by the -plugin_path switch
within the -mdi command-line switch, which is specified when LAMMPS is
launched. See the examples/mdi/README files for examples of how this
is done.
The *mdi* keyword is required and is used as the -mdi argument passed
to the library when it is launched. The -role and -method settings
are required. The -name setting can be anything you choose. MDI
drivers and engines can query their names to verify they are values
they expect.
The *infile* keyword is also required. It is the name of an input
script which the engine will open and process. MDI will pass it as a
command-line argument to the library when it is launched. The file
typically contains settings that an MD or QM code will use for its
subsequent calculations.
The *extra* keyword is optional. It contains additional command-line
arguments which MDI will pass to the library when it is launched.
The *command* keyword is required. It specifies a LAMMPS input script
command (as a single argument in quotes if it is multiple words).
Once the plugin library is launched, LAMMPS will execute this command.
Other previously-defined commands in the input script, such as the
:doc:`fix mdi/aimd <fix_mdi_aimd>` command, should perform MDI
communication with the engine, while the specified *command* executes.
Note that if *command* is an :doc:`include <include>` command, then it
could specify a filename with multiple LAMMPS commands.
.. note::
When the single *command* is complete, LAMMPS will send an MDI
EXIT command to the plugin engine and the plugin will be removed.
The "mdi plugin" command will then exit and the next command
(if any) in the LAMMPS input script will be processed. A subsequent
"mdi plugin" command could then load the same library plugin or
a different one if desired.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if LAMMPS
was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
To use LAMMPS in conjunction with other MDI-enabled atomistic codes,
the :doc:`units <units>` command should be used to specify *real* or
*metal* units. This will ensure the correct unit conversions between
LAMMPS and MDI units, which the other codes will also perform in their
preferred units.
LAMMPS can also be used as an MDI engine in other unit choices it
supports, e.g. *lj*, but then no unit conversion is performed.
Related commands
""""""""""""""""
:doc:`fix mdi/aimd <fix_mdi_aimd>`
Default
"""""""
None

View File

@ -1,88 +0,0 @@
.. index:: mdi/engine
mdi_engine command
==================
Syntax
""""""
.. parsed-literal::
mdi_engine
Description
"""""""""""
This command is used to have LAMMPS act as a server with another
client code to effectively couple the two codes together in
client/server mode.
More specifically, this command causes LAMMPS to begin using the `MDI
Library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_
to run as an MDI engine (server), responding to commands made by an
external MDI driver code (client). See the :doc:`Howto mdi
<Howto_mdi>` page for more information about how LAMMPS can work
as both an MDI driver or engine.
General information about launching codes that communicate using the
MDI Library can be found in the `corresponding page
<https://molssi-mdi.github.io/MDI_Library/html/library_page.html#library_launching_sec>`_
of the MDI Library's documentation.
----------
This command should typically be used in an input script after LAMMPS
has setup the system it is going to model in collaboration with the
driver code. Depending on how the driver code tells the LAMMPS engine
to exit, other commands can be executed after this command, but
typically it should be used at the end of the LAMMPS input script.
To act as a MD-based MDI engine, this is the list of MDI commands from
a driver code which LAMMPS currently recognizes. See more details
about these commands in the `MDI library documentation
<https://molssi-mdi.github.io/MDI_Library/html/mdi_standard.html>`_
.. NOTE: Taylor - is this the best link for this info? Can we flesh this
.. out with the full list of supported commands? Maybe the distinction
.. of what "node" the commands refer to is not needed in this table?
.. list-table::
:widths: 20 80
:header-rows: 1
* - Command name
- Action
* - >NATOMS
- Driver sends the number of atoms in the system
* - <NATOMS
- Driver requests the number of atoms in the system
* - <COORDS
- Driver requests 3*N double-precision atom coordinates
* - >FORCES
- Driver sends 3*N double-precision atom forces
* - <COORDS
- Driver requests 3*N double-precision atom forces
* - EXIT
- Driver tells the engine (LAMMPS) to exit engine mode
If these commands are not sufficient to support what a driver which
you write needs, additional commands can be defined by simply using a
new command name not in this list. Code to support the new command
needs to be added to the MDI package within LAMMPS; see its
src/MDI/mdi_engine.cpp and fix_mdi_engine.cpp files.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`fix mdi/engine <fix_mdi_engine>`
Default
"""""""
None

View File

@ -1,204 +0,0 @@
.. index:: message
message command
===============
Syntax
""""""
.. parsed-literal::
message which protocol mode arg
* which = *client* or *server* or *quit*
* protocol = *md* or *mc*
* mode = *file* or *zmq* or *mpi/one* or *mpi/two*
.. parsed-literal::
*file* arg = filename
filename = file used for message exchanges
*zmq* arg = socket-ID
socket-ID for client = localhost:5555, see description below
socket-ID for server = \*:5555, see description below
*mpi/one* arg = none
*mpi/two* arg = filename
filename = file used to establish communication between 2 MPI jobs
Examples
""""""""
.. code-block:: LAMMPS
message client md file tmp.couple
message server md file tmp.couple
message client md zmq localhost:5555
message server md zmq *:5555
message client md mpi/one
message server md mpi/one
message client md mpi/two tmp.couple
message server md mpi/two tmp.couple
message quit
Description
"""""""""""
Establish a messaging protocol between LAMMPS and another code for the
purpose of client/server coupling.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
----------
The *which* argument defines LAMMPS to be the client or the server.
As explained below the *quit* option should be used when LAMMPS is
finished as a client. It sends a message to the server to tell it to
shut down.
----------
The *protocol* argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
* md = run dynamics with another code
* mc = perform Monte Carlo moves with another code
For protocol *md*, LAMMPS can be either a client or server. See the
:doc:`server md <server_md>` page for details on the protocol.
For protocol *mc*, LAMMPS can be the server. See the :doc:`server mc <server_mc>` page for details on the protocol.
----------
The *mode* argument specifies how messages are exchanged between the
client and server codes. Both codes must use the same mode and use
consistent parameters.
For mode *file*, the 2 codes communicate via binary files. They must
use the same filename, which is actually a file prefix. Several files
with that prefix will be created and deleted as a simulation runs.
The filename can include a path. Both codes must be able to access
the path/file in a common filesystem.
For mode *zmq*, the 2 codes communicate via a socket on the server
code's machine. Support for socket messaging is provided by the
open-source `ZeroMQ library <http://zeromq.org>`_, which must be
installed on your system. The client specifies an IP address (IPv4
format) or the DNS name of the machine the server code is running on,
followed by a 4 or 5 digit port ID for the socket, separated by a colon.
E.g.
.. parsed-literal::
localhost:5555 # client and server running on same machine
192.168.1.1:5555 # server is 192.168.1.1
deptbox.uni.edu:5555 # server is deptbox.uni.edu
The server specifies "\*:5555" where "\*" represents all available
interfaces on the server's machine, and the port ID must match
what the client specifies.
.. note::
On Linux or Unix machines port IDs below 1024 are reserved to the
superuser and thus not available. Other ports may already be in
use and cannot be opened by a second process. On a Linux machine
the commands "netstat -t4an" or "ss -t4an" will list all locally
used port IDs for IPv4 addresses.
.. note::
On many machines (and sometimes on local networks) also ports IDs
may be blocked by default through firewalls. In that case either
access to the required port (or a desired range of ports) has to
be selectively enabled to the firewall disabled (the latter is
usually not a good idea unless you are on a (small) local network
that is already protected from outside access.
.. note::
Additional explanation is needed here about how to use the *zmq*
mode on a parallel machine, e.g. a cluster with many nodes.
For mode *mpi/one*, the 2 codes communicate via MPI and are launched
by the same mpirun command, e.g. with this syntax for OpenMPI:
.. code-block:: bash
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.client -log log.client : -np 4 othercode args # LAMMPS is client
mpirun -np 2 othercode args : -np 4 lmp_mpi -mpicolor 1 -in in.server # LAMMPS is server
Note the use of the "-mpicolor color" command-line argument with
LAMMPS. See the :doc:`command-line args <Run_options>` page for
further explanation.
For mode *mpi/two*, the 2 codes communicate via MPI, but are launched
be 2 separate mpirun commands. The specified *filename* argument is a
file the 2 MPI processes will use to exchange info so that an MPI
inter-communicator can be established to enable the 2 codes to send
MPI messages to each other. Both codes must be able to access the
path/file in a common filesystem.
----------
Normally, the message client or message server command should be used
at the top of a LAMMPS input script. It performs an initial handshake
with the other code to setup messaging and to verify that both codes
are using the same message protocol and mode. Assuming both codes are
launched at (nearly) the same time, the other code should perform the
same kind of initialization.
If LAMMPS is the client code, it will begin sending messages when a
LAMMPS client command begins its operation. E.g. for the :doc:`fix client/md <fix_client_md>` command, it is when a :doc:`run <run>`
command is executed.
If LAMMPS is the server code, it will begin receiving messages when
the :doc:`server <server>` command is invoked.
If LAMMPS is being used as a client, the message quit command will
terminate its messaging with the server. If you do not use this
command and just allow LAMMPS to exit, then the server will continue
to wait for further messages. This may not be a problem, but if both
the client and server programs were launched in the same batch script,
then if the server runs indefinitely, it may consume the full allocation
of computer time, even if the calculation finishes sooner.
Note that if LAMMPS is the client or server, it will continue
processing the rest of its input script after client/server
communication terminates.
If both codes cooperate in this manner, a new round of client/server
messaging can be initiated after termination by re-using a second message
command in your LAMMPS input script, followed by a new fix client or
server command, followed by another message quit command (if LAMMPS is
the client). As an example, this can be performed in a loop to use a
quantum code as a server to compute quantum forces for multiple LAMMPS
data files or periodic snapshots while running dynamics.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`server <server>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -71,7 +71,7 @@ Syntax
*no_affinity* values = none
*kokkos* args = keyword value ...
zero or more keyword/value pairs may be appended
keywords = *neigh* or *neigh/qeq* or *neigh/thread* or *newton* or *binsize* or *comm* or *comm/exchange* or *comm/forward* *pair/comm/forward* *fix/comm/forward* or *comm/reverse* or *gpu/aware* or *pair/only*
keywords = *neigh* or *neigh/qeq* or *neigh/thread* or *newton* or *binsize* or *comm* or *comm/exchange* or *comm/forward* *comm/pair/forward* *comm/fix/forward* or *comm/reverse* or *gpu/aware* or *pair/only*
*neigh* value = *full* or *half*
full = full neighbor list
half = half neighbor list built in thread-safe manner
@ -81,17 +81,20 @@ Syntax
*neigh/thread* value = *off* or *on*
off = thread only over atoms
on = thread over both atoms and neighbors
*neigh/transpose* value = *off* or *on*
off = use same memory layout for GPU neigh list build as pair style
on = use transposed memory layout for GPU neigh list build
*newton* = *off* or *on*
off = set Newton pairwise and bonded flags off
on = set Newton pairwise and bonded flags on
*binsize* value = size
size = bin size for neighbor list construction (distance units)
*comm* value = *no* or *host* or *device*
use value for comm/exchange and comm/forward and pair/comm/forward and fix/comm/forward and comm/reverse
use value for comm/exchange and comm/forward and comm/pair/forward and comm/fix/forward and comm/reverse
*comm/exchange* value = *no* or *host* or *device*
*comm/forward* value = *no* or *host* or *device*
*pair/comm/forward* value = *no* or *device*
*fix/comm/forward* value = *no* or *device*
*comm/pair/forward* value = *no* or *device*
*comm/fix/forward* value = *no* or *device*
*comm/reverse* value = *no* or *host* or *device*
no = perform communication pack/unpack in non-KOKKOS mode
host = perform pack/unpack on host (e.g. with OpenMP threading)
@ -463,6 +466,16 @@ potentials support this keyword yet, and only thread over atoms. Many
simple pairwise potentials such as Lennard-Jones do support threading
over both atoms and neighbors.
If the *neigh/transpose* keyword is set to *off*, then the KOKKOS
package will use the same memory layout for building the neighbor list on
GPUs as used for the pair style. When this keyword is set to *on* it
will use a different (transposed) memory layout to build the neighbor
list on GPUs. This can be faster in some cases (e.g. ReaxFF HNS
benchmark) but slower in others (e.g. Lennard Jones benchmark). The
copy between different memory layouts is done out of place and
therefore doubles the memory overhead of the neighbor list, which can
be significant.
The *newton* keyword sets the Newton flags for pairwise and bonded
interactions to *off* or *on*, the same as the :doc:`newton <newton>`
command allows. The default for GPUs is *off* because this will almost
@ -471,11 +484,12 @@ computation is done, but less communication. However, when running on
CPUs a value of *on* is the default since it can often be faster, just
as it is for non-accelerated pair styles
The *binsize* keyword sets the size of bins used to bin atoms in
neighbor list builds. The same value can be set by the :doc:`neigh_modify binsize <neigh_modify>` command. Making it an option in the package
kokkos command allows it to be set from the command line. The default
value for CPUs is 0.0, which means the LAMMPS default will be used,
which is bins = 1/2 the size of the pairwise cutoff + neighbor skin
The *binsize* keyword sets the size of bins used to bin atoms during
neighbor list builds. The same value can be set by the
:doc:`neigh_modify binsize <neigh_modify>` command. Making it an option
in the package kokkos command allows it to be set from the command line.
The default value for CPUs is 0.0, which means the LAMMPS default will be
used, which is bins = 1/2 the size of the pairwise cutoff + neighbor skin
distance. This is fine when neighbor lists are built on the CPU. For GPU
builds, a 2x larger binsize equal to the pairwise cutoff + neighbor skin
is often faster, which is the default. Note that if you use a
@ -485,8 +499,8 @@ because the GPU is faster at performing pairwise interactions, then this
rule of thumb may give too large a binsize and the default should be
overridden with a smaller value.
The *comm* and *comm/exchange* and *comm/forward* and *pair/comm/forward*
and *fix/comm/forward* and comm/reverse*
The *comm* and *comm/exchange* and *comm/forward* and *comm/pair/forward*
and *comm/fix/forward* and comm/reverse*
keywords determine whether the host or device performs the packing and
unpacking of data when communicating per-atom data between processors.
"Exchange" communication happens only on timesteps that neighbor lists
@ -507,8 +521,8 @@ packing/unpacking data for the communication. A value of *host* means to
use the host, typically a multi-core CPU, and perform the
packing/unpacking in parallel with threads. A value of *device* means to
use the device, typically a GPU, to perform the packing/unpacking
operation. If a value of *host* is used for the *pair/comm/forward* or
*fix/comm/forward* keyword, it will be automatically be changed to *no*
operation. If a value of *host* is used for the *comm/pair/forward* or
*comm/fix/forward* keyword, it will be automatically be changed to *no*
since these keywords don't support *host* mode.
The optimal choice for these keywords depends on the input script and
@ -681,15 +695,16 @@ script or via the "-pk intel" :doc:`command-line switch <Run_options>`.
For the KOKKOS package, the option defaults for GPUs are neigh = full,
neigh/qeq = full, newton = off, binsize for GPUs = 2x LAMMPS default
value, comm = device, gpu/aware = on. When LAMMPS can safely detect
that GPU-aware MPI is not available, the default value of gpu/aware
becomes "off". For CPUs or Xeon Phis, the option defaults are neigh =
half, neigh/qeq = half, newton = on, binsize = 0.0, and comm = no. The
option neigh/thread = on when there are 16K atoms or less on an MPI
rank, otherwise it is "off". These settings are made automatically by
the required "-k on" :doc:`command-line switch <Run_options>`. You can
change them by using the package kokkos command in your input script or
via the :doc:`-pk kokkos command-line switch <Run_options>`.
value, comm = device, neigh/transpose = off, gpu/aware = on. When
LAMMPS can safely detect that GPU-aware MPI is not available, the
default value of gpu/aware becomes "off". For CPUs or Xeon Phis, the
option defaults are neigh = half, neigh/qeq = half, newton = on,
binsize = 0.0, and comm = no. The option neigh/thread = on when there
are 16K atoms or less on an MPI rank, otherwise it is "off". These
settings are made automatically by the required "-k on"
:doc:`command-line switch <Run_options>`. You can change them by using
the package kokkos command in your input script or via the :doc:`-pk
kokkos command-line switch <Run_options>`.
For the OMP package, the default is Nthreads = 0 and the option
defaults are neigh = yes. These settings are made automatically if

View File

@ -1,20 +1,22 @@
.. index:: pair_style dpd
.. index:: pair_style dpd/gpu
.. index:: pair_style dpd/intel
.. index:: pair_style dpd/kk
.. index:: pair_style dpd/omp
.. index:: pair_style dpd/tstat
.. index:: pair_style dpd/tstat/gpu
.. index:: pair_style dpd/tstat/kk
.. index:: pair_style dpd/tstat/omp
pair_style dpd command
======================
Accelerator Variants: *dpd/gpu*, *dpd/intel*, *dpd/omp*
Accelerator Variants: *dpd/gpu*, *dpd/intel*, *dpd/kk*, *dpd/omp*
pair_style dpd/tstat command
============================
Accelerator Variants: *dpd/tstat/gpu*, *dpd/tstat/omp*
Accelerator Variants: *dpd/tstat/gpu*, *dpd/tstat/kk*, *dpd/tstat/omp*
Syntax
""""""

View File

@ -1,12 +1,18 @@
.. index:: pair_style dpd/ext
.. index:: pair_style dpd/ext/kk
.. index:: pair_style dpd/ext/tstat
.. index:: pair_style dpd/ext/tstat/kk
pair_style dpd/ext command
==========================
Accelerator Variants: dpd/ext/kk
pair_style dpd/ext/tstat command
================================
Accelerator Variants: dpd/ext/tstat/kk
Syntax
""""""
@ -137,6 +143,10 @@ except that A is not included.
----------
.. include:: accel_styles.rst
----------
**Mixing, shift, table, tail correction, restart, rRESPA info**\ :

View File

@ -75,16 +75,16 @@ N additional arguments after the second filename in the pair_coeff
command, where N is the number of LAMMPS atom types:
* MEAM library file
* Elem1, Elem2, ...
* Element1, Element2, ...
* MEAM parameter file
* N element names = mapping of MEAM elements to atom types
See the :doc:`pair_coeff <pair_coeff>` page for alternate ways
to specify the path for the potential files.
As an example, the potentials/library.meam file has generic MEAM
settings for a variety of elements. The potentials/SiC.meam file has
specific parameter settings for a Si and C alloy system. If your
As an example, the ``potentials/library.meam`` file has generic MEAM
settings for a variety of elements. The ``potentials/SiC.meam`` file
has specific parameter settings for a Si and C alloy system. If your
LAMMPS simulation has 4 atoms types and you want the first 3 to be Si,
and the fourth to be C, you would use the following pair_coeff command:
@ -118,30 +118,30 @@ that will be used with other potentials.
If the second filename is NULL, the element names between the two
filenames can appear in any order, e.g. "Si C" or "C Si" in the
example above. However, if the second filename is not NULL (as in the
example above), it contains settings that are Fortran-indexed for the
elements that precede it. Thus you need to insure you list the
elements between the filenames in an order consistent with how the
example above. However, if the second filename is **not** NULL (as in the
example above), it contains settings that are indexed **by numbers**
for the elements that precede it. Thus you need to insure that you list
the elements between the filenames in an order consistent with how the
values in the second filename are indexed. See details below on the
syntax for settings in the second file.
The MEAM library file provided with LAMMPS has the name
potentials/library.meam. It is the "meamf" file used by other MD
codes. Aside from blank and comment lines (start with #) which can
appear anywhere, it is formatted as a series of entries, each of which
``potentials/library.meam``. It is the "meamf" file used by other MD
codes. Aside from blank and comment lines (starting with # which can
appear anywhere), it is formatted as a series of entries, each of which
has 19 parameters and can span multiple lines:
elt, lat, z, ielement, atwt, alpha, b0, b1, b2, b3, alat, esub, asub,
t0, t1, t2, t3, rozero, ibar
The "elt" and "lat" parameters are text strings, such as elt = Si or
Cu and lat = dia or fcc. Because the library file is used by Fortran
The *elt* and *lat* parameters are text strings, such as *elt* = Si or
Cu and *lat* = dia or fcc. Because the library file is used by Fortran
MD codes, these strings may be enclosed in single quotes, but this is
not required. The other numeric parameters match values in the
formulas above. The value of the "elt" string is what is used in the
formulas above. The value of the *elt* string is what is used in the
pair_coeff command to identify which settings from the library file
you wish to read in. There can be multiple entries in the library
file with the same "elt" value; LAMMPS reads the first matching entry it
file with the same *elt* value; LAMMPS reads the first matching entry it
finds and ignores the rest.
Other parameters in the MEAM library file correspond to single-element
@ -157,13 +157,13 @@ potential parameters:
esub = energy per atom (eV) in the reference structure at equilibrium
asub = "A" parameter for MEAM (see e.g. :ref:`(Baskes) <Baskes>`)
The alpha, b0, b1, b2, b3, t0, t1, t2, t3 parameters correspond to the
The *alpha*, *b0*, *b1*, *b2*, *b3*, *t0*, *t1*, *t2*, *t3* parameters correspond to the
standard MEAM parameters in the literature :ref:`(Baskes) <Baskes>` (the b
parameters are the standard beta parameters). Note that only parameters
normalized to t0 = 1.0 are supported. The rozero parameter is
normalized to *t0 = 1.0* are supported. The *rozero* parameter is
an element-dependent density scaling that weights the reference
background density (see e.g. equation 4.5 in :ref:`(Gullet) <Gullet>`) and
is typically 1.0 for single-element systems. The ibar parameter
is typically 1.0 for single-element systems. The *ibar* parameter
selects the form of the function G(Gamma) used to compute the electron
density; options are
@ -180,7 +180,7 @@ If used, the MEAM parameter file contains settings that override or
complement the library file settings. Examples of such parameter
files are in the potentials directory with a ".meam" suffix. Their
format is the same as is read by other Fortran MD codes. Aside from
blank and comment lines (start with #) which can appear anywhere, each
blank and comment lines (start with # which can appear anywhere), each
line has one of the following forms. Each line can also have a
trailing comment (starting with #) which is ignored.
@ -213,7 +213,7 @@ The recognized keywords for the parameter file are as follows:
delta(I,J) = heat of formation for I-J alloy; if Ec_IJ is input as
zero, then LAMMPS sets Ec_IJ = (Ec_II + Ec_JJ)/2 - delta_IJ
alpha(I,J) = alpha parameter for pair potential between I and J (can
be computed from bulk modulus of reference structure
be computed from bulk modulus of reference structure)
re(I,J) = equilibrium distance between I and J in the reference
structure
Cmax(I,J,K) = Cmax screening parameter when I-J pair is screened
@ -283,8 +283,8 @@ The recognized keywords for the parameter file are as follows:
1 = rho_bkgd = rho0_meam(a)\*Z_meam(a) (matches DYNAMO)
default = 0
Rc, delr, re are in distance units (Angstroms in the case of metal
units). Ec and delta are in energy units (eV in the case of metal
*Rc*, *delr*, *re* are in distance units (Angstroms in the case of metal
units). *Ec* and *delta* are in energy units (eV in the case of metal
units).
Each keyword represents a quantity which is either a scalar, vector,
@ -299,37 +299,37 @@ Thus these lines
rho0(2) = 2.25
alpha(1,2) = 4.37
set rho0 for the second element to the value 2.25 and set alpha for the
set *rho0* for the second element to the value 2.25 and set *alpha* for the
alloy interaction between elements 1 and 2 to 4.37.
The augt1 parameter is related to modifications in the MEAM
The *augt1* parameter is related to modifications in the MEAM
formulation of the partial electron density function. In recent
literature, an extra term is included in the expression for the
third-order density in order to make the densities orthogonal (see for
example :ref:`(Wang) <Wang2>`, equation 3d); this term is included in the
MEAM implementation in lammps. However, in earlier published work
this term was not included when deriving parameters, including most of
those provided in the library.meam file included with lammps, and to
account for this difference the parameter t1 must be augmented by
3/5\*t3. If augt1=1, the default, this augmentation is done
those provided in the ``library.meam`` file included with lammps, and to
account for this difference the parameter *t1* must be augmented by
3/5\**t3*. If *augt1* = 1, the default, this augmentation is done
automatically. When parameter values are fit using the modified
density function, as in more recent literature, augt1 should be set to
0.
The mixture_ref_t parameter is available to match results with those
The *mixture_ref_t* parameter is available to match results with those
of previous versions of lammps (before January 2011). Newer versions
of lammps, by default, use the single-element values of the t
of lammps, by default, use the single-element values of the *t*
parameters to compute the background reference density. This is the
proper way to compute these parameters. Earlier versions of lammps
used an alloy mixture averaged value of t to compute the background
reference density. Setting mixture_ref_t=1 gives the old behavior.
WARNING: using mixture_ref_t=1 will give results that are demonstrably
used an alloy mixture averaged value of *t* to compute the background
reference density. Setting *mixture_ref_t* = 1 gives the old behavior.
WARNING: using *mixture_ref_t* = 1 will give results that are demonstrably
incorrect for second-neighbor MEAM, and non-standard for
first-neighbor MEAM; this option is included only for matching with
previous versions of lammps and should be avoided if possible.
The parameters attrac and repuls, along with the integer selection
parameter erose_form, can be used to modify the Rose energy function
The parameters *attrac* and *repuls*, along with the integer selection
parameter *erose_form*, can be used to modify the Rose energy function
used to compute the pair potential. This function gives the energy of
the reference state as a function of interatomic spacing. The form of
this function is:
@ -343,19 +343,19 @@ this function is:
a3 = repuls, astar < 0
a3 = attrac, astar >= 0
Most published MEAM parameter sets use the default values attrac=repulse=0.
Setting repuls=attrac=delta corresponds to the form used in several
Most published MEAM parameter sets use the default values *attrac* = *repulse* = 0.
Setting *repuls* = *attrac* = *delta* corresponds to the form used in several
recent published MEAM parameter sets, such as :ref:`(Valone) <Valone>`
.. note::
The default form of the erose expression in LAMMPS was corrected
The default form of the *erose* expression in LAMMPS was corrected
in March 2009. The current version is correct, but may show different
behavior compared with earlier versions of lammps with the attrac
and/or repuls parameters are non-zero. To obtain the previous default
form, use erose_form = 1 (this form does not seem to appear in the
form, use *erose_form* = 1 (this form does not seem to appear in the
literature). An alternative form (see e.g. :ref:`(Lee2) <Lee2>`) is
available using erose_form = 2.
available using *erose_form* = 2.
----------
@ -364,13 +364,13 @@ Mixing, shift, table, tail correction, restart, rRESPA info
For atom type pairs I,J and I != J, where types I and J correspond to
two different element types, mixing is performed by LAMMPS with
user-specifiable parameters as described above. You never need to
specify a pair_coeff command with I != J arguments for this style.
user-specifiable parameters as described above.
This pair style does not support the :doc:`pair_modify <pair_modify>`
shift, table, and tail options.
*shift*, *table*, and *tail* options.
This pair style does not write its information to :doc:`binary restart files <restart>`, since it is stored in potential files. Thus, you
This pair style does not write its information to :doc:`binary restart files <restart>`,
since it is stored in potential files. Thus, you
need to re-specify the pair_style and pair_coeff commands in an input
script that reads a restart file.

View File

@ -17,7 +17,7 @@ Syntax
*load* file = load plugin(s) from shared object in *file*
*unload* style name = unload plugin *name* of style *style*
*style* = *pair* or *bond* or *angle* or *dihedral* or *improper* or *compute* or *fix* or *region* or *command*
*style* = *pair* or *bond* or *angle* or *dihedral* or *improper* or *kspace* or *compute* or *fix* or *region* or *command*
*list* = print a list of currently loaded plugins
*clear* = unload all currently loaded plugins
@ -70,12 +70,11 @@ Restrictions
""""""""""""
The *plugin* command is part of the PLUGIN package. It is
only enabled if LAMMPS was built with that package.
See the :doc:`Build package <Build_package>` page for
more info. Plugins are not available on Windows.
only enabled if LAMMPS was built with that package. See
the :doc:`Build package <Build_package>` page for more info.
If plugins access functions or classes from a package, LAMMPS must
have been compiled with that package included.
If plugins access functions or classes from a package,
LAMMPS must have been compiled with that package included.
Plugins are dependent on the LAMMPS binary interface (ABI)
and particularly the MPI library used. So they are not guaranteed

View File

@ -1,74 +0,0 @@
.. index:: server
server command
==============
Syntax
""""""
.. parsed-literal::
server protocol
* protocol = *md* or *mc*
Examples
""""""""
.. code-block:: LAMMPS
server md
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it receives
messages from a separate "client" code and responds by sending a reply
message back to the client. The specified *protocol* determines the
format and content of messages LAMMPS expects to receive and how it
responds.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS input script will be
processed.
The *protocol* argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
* :doc:`md <server_md>` = run dynamics with another code
* :doc:`mc <server_mc>` = perform Monte Carlo moves with another code
For protocol *md*, LAMMPS can be either a client (via the :doc:`fix client/md <fix_client_md>` command) or server. See the :doc:`server md <server_md>` page for details on the protocol.
For protocol *mc*, LAMMPS can be the server. See the :doc:`server mc <server_mc>` page for details on the protocol.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup the messaging protocol with
the other client code.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -1,126 +0,0 @@
.. index:: server mc
server mc command
=================
Syntax
""""""
.. parsed-literal::
server mc
mc = the protocol argument to the :doc:`server <server>` command
Examples
""""""""
.. code-block:: LAMMPS
server mc
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the *mc*
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The :doc:`server <server>` page gives other options for using LAMMPS
See an example of how this command is used in
examples/COUPLE/lammps_mc/in.server.
----------
When using this command, LAMMPS (as the server code) receives
instructions from a Monte Carlo (MC) driver to displace random atoms,
compute the energy before and after displacement, and run dynamics to
equilibrate the system.
The MC driver performs the random displacements on random atoms,
accepts or rejects the move in an MC sense, and orchestrates the MD
runs.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the `CSlib website <https://cslib.sandia.gov>`_ doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_mc.cpp file for details on how LAMMPS uses
these messages. See the examples/COUPLE/lammps_mc/mc.cpp file for an
example of how an MC driver code can use these messages.
Define NATOMS=1, EINIT=2, DISPLACE=3, ACCEPT=4, RUN=5.
**Client sends one of these kinds of message**\ :
.. parsed-literal::
cs->send(NATOMS,0) # msgID = 1 with no fields
cs->send(EINIT,0) # msgID = 2 with no fields
cs->send(DISPLACE,2) # msgID = 3 with 2 fields
cs->pack_int(1,ID) # 1st field = ID of atom to displace
cs->pack(2,3,xnew) # 2nd field = new xyz coords of displaced atom
cs->send(ACCEPT,1) # msgID = 4 with 1 field
cs->pack_int(1,flag) # 1st field = accept/reject flag
cs->send(RUN,1) # msgID = 5 with 1 field
cs->pack_int(1,nsteps) # 1st field = # of timesteps to run MD
**Server replies**\ :
.. parsed-literal::
cs->send(NATOMS,1) # msgID = 1 with 1 field
cs->pack_int(1,natoms) # 1st field = number of atoms
cs->send(EINIT,2) # msgID = 2 with 2 fields
cs->pack_double(1,poteng) # 1st field = potential energy of system
cs->pack(2,3\*natoms,x) # 2nd field = 3N coords of Natoms
cs->send(DISPLACE,1) # msgID = 3 with 1 field
cs->pack_double(1,poteng) # 1st field = new potential energy of system
cs->send(ACCEPT,0) # msgID = 4 with no fields
cs->send(RUN,0) # msgID = 5 with no fields
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup the messaging protocol with
the other client code.
Related commands
""""""""""""""""
:doc:`message <message>`
Default
"""""""
none

View File

@ -1,161 +0,0 @@
.. index:: server md
server md command
=================
Syntax
""""""
.. parsed-literal::
server md
md = the protocol argument to the :doc:`server <server>` command
Examples
""""""""
.. code-block:: LAMMPS
server md
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the *md*
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The :doc:`server <server>` page gives other options for using LAMMPS
in server mode. See an example of how this command is used in
examples/message/in.message.server.
----------
When using this command, LAMMPS (as the server code) receives the
current coordinates of all particles from the client code each
timestep, computes their interaction, and returns the energy, forces,
and pressure for the interacting particles to the client code, so it
can complete the timestep. This command could also be used with a
client code that performs energy minimization, using the server to
compute forces and energy each iteration of its minimizer.
When using the :doc:`fix client/md <fix_client_md>` command, LAMMPS (as
the client code) does the timestepping and receives needed energy,
forces, and pressure values from the server code.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the `CSlib website <https://cslib.sandia.gov>`_ doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_md.cpp and src/MESSAGE/fix_client_md.cpp
files for details on how LAMMPS uses these messages. See the
examples/COUPLE/lammps_vasp/vasp_wrap.py or
examples/COUPLE/lammps_nwchem/nwchem_wrap.py files for examples of how
a quantum code (VASP or NWChem) can use these messages.
The following pseudo-code uses these values, defined as enums.
Define:
.. parsed-literal::
SETUP=1, STEP=2
DIM=1, PERIODICITY=2, ORIGIN=3, BOX=4, NATOMS=5, NTYPES=6, TYPES=7, COORDS=8, UNITS-9, CHARGE=10
FORCES=1, ENERGY=2, PRESSURE=3, ERROR=4
**Client sends 2 kinds of messages**\ :
.. parsed-literal::
# required fields: DIM, PERIODICTY, ORIGIN, BOX, NATOMS, NTYPES, TYPES, COORDS
# optional fields: UNITS, CHARGE
cs->send(SETUP,nfields) # msgID with nfields
cs->pack_int(DIM,dim) # dimension (2,3) of simulation
cs->pack(PERIODICITY,3,xyz) # periodicity flags in 3 dims
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
cs->pack_int(NATOMS,natoms) # total number of atoms
cs->pack_int(NTYPES,ntypes) # number of atom types
cs->pack(TYPES,natoms,type) # vector of per-atom types
cs->pack(COORDS,3\*natoms,x) # vector of 3N atom coords
cs->pack_string(UNITS,units) # units = "lj", "real", "metal", etc
cs->pack(CHARGE,natoms,q) # vector of per-atom charge
# required fields: COORDS
# optional fields: ORIGIN, BOX
cs->send(STEP,nfields) # msgID with nfields
cs->pack(COORDS,3\*natoms,x) # vector of 3N atom coords
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
**Server replies to either kind of message**\ :
.. parsed-literal::
# required fields: FORCES, ENERGY, PRESSURE
# optional fields: ERROR
cs->send(msgID,nfields) # msgID with nfields
cs->pack(FORCES,3\*Natoms,f) # vector of 3N forces on atoms
cs->pack(ENERGY,1,poteng) # total potential energy of system
cs->pack(PRESSURE,6,press) # global pressure tensor (6-vector)
cs->pack_int(ERROR,flag) # server had an error (e.g. DFT non-convergence)
----------
The units for various quantities that are sent and received iva
messages are defined for atomic-scale simulations in the table below.
The client and server codes (including LAMMPS) can use internal units
different than these (e.g. :doc:`real units <units>` in LAMMPS), so long
as they convert to these units for messaging.
* COORDS, ORIGIN, BOX = Angstroms
* CHARGE = multiple of electron charge (1.0 is a proton)
* ENERGY = eV
* FORCES = eV/Angstrom
* PRESSURE = bars
Note that these are :doc:`metal units <units>` in LAMMPS.
If you wish to run LAMMPS in another its non-atomic units, e.g. :doc:`lj units <units>`, then the client and server should exchange a UNITS
message as indicated above, and both the client and server should
agree on the units for the data they exchange.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -21,9 +21,14 @@ Syntax
*norm* value = *yes* or *no*
*flush* value = *yes* or *no*
*line* value = *one* or *multi* or *yaml*
*format* values = *line* string, *int* string, *float* string, M string, or *none*
*colname* values = ID string, or *default*
string = new column header name
ID = integer from 1 to N, or integer from -1 to -N, where N = # of quantities being output
*or* a thermo keyword or reference to compute, fix, property or variable.
*format* values = *line* string, *int* string, *float* string, ID string, or *none*
string = C-style format string
M = integer from 1 to N, where N = # of quantities being output
ID = integer from 1 to N, or integer from -1 to -N, where N = # of quantities being output
*or* a thermo keyword or reference to compute, fix, property or variable.
*temp* value = compute ID that calculates a temperature
*press* value = compute ID that calculates a pressure
@ -36,7 +41,8 @@ Examples
thermo_modify temp myTemp format 3 %15.8g
thermo_modify temp myTemp format line "%ld %g %g %15.8g"
thermo_modify line multi format float %g
themos_modify line yaml format none
thermo_modify line yaml format none
thermo_modify colname 1 Timestep colname -2 Pressure colname f_1[1] AvgDensity
Description
"""""""""""
@ -147,6 +153,20 @@ containing the timestep and CPU time ("multi"), or in a YAML format
block ("yaml"). This modify option overrides the *one*, *multi*, or
*yaml* thermo_style settings.
The *colname* keyword can be used to change the default header keyword
for a column or field of thermodynamic output. The setting for *ID
string* replaces the default text with the provided string. *ID* can be
a positive integer when it represents the column number counting from
the left, a negative integer when it represents the column number from
the right (i.e. -1 is the last column/keyword), or a thermo keyword (or
compute, fix, property, or variable reference) and then it replaces the
string for that specific thermo keyword.
The *colname* keyword can be used multiple times. If multiple *colname*
settings refer to the same keyword, the last setting has precedence. A
setting of *default* clears all previous settings, reverting all values
to their default values.
The *format* keyword can be used to change the default numeric format of
any of quantities the :doc:`thermo_style <thermo_style>` command
outputs. All the specified format strings are C-style formats, e.g. as
@ -155,12 +175,16 @@ argument which is the format string for the entire line of thermo
output, with N fields, which you must enclose in quotes if it is more
than one field. The *int* and *float* keywords take a single format
argument and are applied to all integer or floating-point quantities
output. The setting for *M string* also takes a single format argument
which is used for the Mth value output in each line, e.g. the fifth
column is output in high precision for "format 5 %20.15g".
output. The setting for *ID string* also takes a single format argument
which is used for the indexed value in each line. The interpretation is
the same as for *colname*, i.e. a positive integer is the n-th value
corresponding to the n-th thermo keyword, a negative integer is counting
backwards, and a string matches the entry with the thermo keyword.,
e.g. the fifth column is output in high precision for "format 5 %20.15g"
and the pair energy for "format epair %20.15g".
The *format* keyword can be used multiple times. The precedence is
that for each value in a line of output, the *M* format (if specified)
that for each value in a line of output, the *ID* format (if specified)
is used, else the *int* or *float* setting (if specified) is used,
else the *line* setting (if specified) for that value is used, else
the default setting is used. A setting of *none* clears all previous
@ -173,9 +197,10 @@ settings, reverting all values to their default format.
When specifying the *format int* option you can use a "%d"-style
format identifier in the format string and LAMMPS will convert this
to the corresponding 8-byte form when it is applied to those
keywords. However, when specifying the *line* option or *format M
keywords. However, when specifying the *line* option or *format ID
string* option for *step* and *natoms*, you should specify a format
string appropriate for an 8-byte signed integer, e.g. one with "%ld".
string appropriate for an 8-byte signed integer, e.g. one with "%ld"
or "%lld" depending on the platform.
The *temp* keyword is used to determine how thermodynamic temperature is
calculated, which is used by all thermo quantities that require a

View File

@ -65,8 +65,8 @@ Syntax
bound(group,dir,region), gyration(group,region), ke(group,reigon),
angmom(group,dim,region), torque(group,dim,region),
inertia(group,dimdim,region), omega(group,dim,region)
special functions = sum(x), min(x), max(x), ave(x), trap(x), slope(x), gmask(x), rmask(x), grmask(x,y), next(x), is_file(name)
feature functions = is_available(category,feature), is_active(category,feature), is_defined(category,id)
special functions = sum(x), min(x), max(x), ave(x), trap(x), slope(x), gmask(x), rmask(x), grmask(x,y), next(x), is_file(name), extract_setting(name)
feature functions = is_active(category,feature), is_available(category,feature), is_defined(category,id)
atom value = id[i], mass[i], type[i], mol[i], x[i], y[i], z[i], vx[i], vy[i], vz[i], fx[i], fy[i], fz[i], q[i]
atom vector = id, mass, type, mol, x, y, z, vx, vy, vz, fx, fy, fz, q
compute references = c_ID, c_ID[i], c_ID[i][j], C_ID, C_ID[i]
@ -823,10 +823,15 @@ Special Functions
Special functions take specific kinds of arguments, meaning their
arguments cannot be formulas themselves.
The is_file(x) function is a test whether 'x' is a (readable) file
and returns 1 in this case, otherwise it returns 0. For that 'x'
The is_file(name) function is a test whether *name* is a (readable) file
and returns 1 in this case, otherwise it returns 0. For that *name*
is taken as a literal string and must not have any blanks in it.
The extract_setting(name) function allows to access some basic settings
through calling the :cpp:func:`lammps_extract_setting` library function.
For available keywords *name* and their meaning, please see the
documentation of that function.
The sum(x), min(x), max(x), ave(x), trap(x), and slope(x) functions
each take 1 argument which is of the form "c_ID" or "c_ID[N]" or
"f_ID" or "f_ID[N]" or "v_name". The first two are computes and the
@ -911,30 +916,30 @@ Feature Functions
-----------------
Feature functions allow to probe the running LAMMPS executable for
whether specific features are either active, defined, or available.
The functions take two arguments, a *category* and a corresponding
*argument*\ . The arguments are strings thus cannot be formulas
themselves (only $-style immediate variable expansion is possible).
whether specific features are either active, defined, or available. The
functions take two arguments, a *category* and a corresponding
*argument*\ . The arguments are strings and thus cannot be formulas
themselves; only $-style immediate variable expansion is possible.
Return value is either 1.0 or 0.0 depending on whether the function
evaluates to true or false, respectively.
The *is_active()* function allows to query for active settings which
are grouped by categories. Currently supported categories and
arguments are:
The *is_active(category,feature)* function allows to query for active
settings which are grouped by categories. Currently supported categories
and arguments are:
* *package* (argument = *gpu* or *intel* or *kokkos* or *omp*\ )
* *newton* (argument = *pair* or *bond* or *any*\ )
* *pair* (argument = *single* or *respa* or *manybody* or *tail* or *shift*\ )
* *comm_style* (argument = *brick* or *tiled*\ )
* *min_style* (argument = any of the compiled in minimizer styles)
* *run_style* (argument = any of the compiled in run styles)
* *atom_style* (argument = any of the compiled in atom styles)
* *pair_style* (argument = any of the compiled in pair styles)
* *bond_style* (argument = any of the compiled in bond styles)
* *angle_style* (argument = any of the compiled in angle styles)
* *dihedral_style* (argument = any of the compiled in dihedral styles)
* *improper_style* (argument = any of the compiled in improper styles)
* *kspace_style* (argument = any of the compiled in kspace styles)
* *package*\ : argument = *gpu* or *intel* or *kokkos* or *omp*
* *newton*\ : argument = *pair* or *bond* or *any*
* *pair*\ : argument = *single* or *respa* or *manybody* or *tail* or *shift*
* *comm_style*\ : argument = *brick* or *tiled*
* *min_style*\ : argument = any of the compiled in minimizer styles
* *run_style*\ : argument = any of the compiled in run styles
* *atom_style*\ : argument = any of the compiled in atom style)
* *pair_style*\ : argument = any of the compiled in pair styles
* *bond_style*\ : argument = any of the compiled in bond styles
* *angle_style*\ : argument = any of the compiled in angle styles
* *dihedral_style*\ : argument = any of the compiled in dihedral styles
* *improper_style*\ : argument = any of the compiled in improper styles
* *kspace_style*\ : argument = any of the compiled in kspace styles
Most of the settings are self-explanatory, the *single* argument in the
*pair* category allows to check whether a pair style supports a
@ -943,7 +948,9 @@ features or LAMMPS, *respa* allows to check whether the inner/middle/outer
mode of r-RESPA is supported. In the various style categories,
the checking is also done using suffix flags, if available and enabled.
Example 1: disable use of suffix for pppm when using GPU package (i.e. run it on the CPU concurrently to running the pair style on the GPU), but do use the suffix otherwise (e.g. with OPENMP).
Example 1: disable use of suffix for pppm when using GPU package
(i.e. run it on the CPU concurrently to running the pair style on the
GPU), but do use the suffix otherwise (e.g. with OPENMP).
.. code-block:: LAMMPS
@ -951,17 +958,15 @@ Example 1: disable use of suffix for pppm when using GPU package (i.e. run it on
if $(is_active(package,gpu)) then "suffix off"
kspace_style pppm
Example 2: use r-RESPA with inner/outer cutoff, if supported by pair style, otherwise fall back to using pair and reducing the outer time step
Example 2: use r-RESPA with inner/outer cutoff, if supported by pair
style, otherwise fall back to using pair and reducing the outer time
step
.. code-block:: LAMMPS
timestep $(2.0*(1.0+2.0*is_active(pair,respa))
if $(is_active(pair,respa)) then "run_style respa 4 3 2 2 improper 1 inner 2 5.5 7.0 outer 3 kspace 4" else "run_style respa 3 3 2 improper 1 pair 2 kspace 3"
The *is_defined()* function allows to query categories like *compute*,
*dump*, *fix*, *group*, *region*, and *variable* whether an entry
with the provided name or id is defined.
The *is_available(category,name)* function allows to query whether
a specific optional feature is available, i.e. compiled in.
This currently works for the following categories: *command*,
@ -983,6 +988,10 @@ the compiled binary supports it.
if "$(is_available(feature,ffmpeg)" then "dump 3 all movie 25 movie.mp4 type type zoom 1.6 adiam 1.0"
The *is_defined(categoy,id)* function allows to query categories like
*compute*, *dump*, *fix*, *group*, *region*, and *variable* whether an
entry with the provided name or id is defined.
----------
Atom Values and Vectors

View File

@ -5,3 +5,4 @@ sphinx_tabs
breathe
Pygments
six
pyyaml

View File

@ -573,6 +573,8 @@ cuFFT
CuH
Cui
Cummins
Cundall
cundall
Curk
Cusentino
customIDs
@ -655,6 +657,7 @@ delocalized
Delong
delr
deltaHf
dem
Dendrimer
dendritic
Denniston
@ -1177,6 +1180,7 @@ Gladky
gld
gle
globbing
Gloor
Glosli
Glotzer
gmail
@ -1414,6 +1418,7 @@ initializations
InitiatorIDs
initio
InP
inq
inregion
instantiation
Institut
@ -1425,6 +1430,7 @@ interal
interatomic
Interatomic
interconvert
interfacial
interial
interlayer
intermolecular
@ -1735,6 +1741,7 @@ libdl
libfftw
libgcc
libgpu
libinqmdi
libjpeg
libkim
liblammps
@ -1749,6 +1756,7 @@ libplumed
libplumedKernel
libpng
libpoems
libqemdi
libqmmm
librar
libreax
@ -1784,6 +1792,7 @@ Liu
Livermore
lj
llammps
lld
LLVM
lm
lmp
@ -2202,6 +2211,7 @@ Nbondtypes
nBOt
nbrhood
Nbtypes
Nbytes
nc
Nc
nchunk
@ -3096,6 +3106,7 @@ smallsmall
smd
SMD
smi
Smilauer
Smirichinski
Smit
smtbq
@ -3481,6 +3492,7 @@ unsplit
unstrained
untar
untilted
Unwin
uparrow
upenn
upto
@ -3644,6 +3656,7 @@ Wittmaack
wn
Wolde
workflow
workflows
Worley
Wriggers
Wuppertal
@ -3701,6 +3714,8 @@ xy
xyz
xz
xzhou
Yade
yade
yaff
YAFF
Yamada
@ -3746,6 +3761,7 @@ zcm
zeeman
Zeeman
Zemer
zenodo
Zepeda
zflag
Zhang

View File

@ -9,6 +9,11 @@ model a realistic problem.
In many of the examples included here, LAMMPS must first be built as a
library.
Also see the Howto_mdi doc page in the LAMMPS manual for a description
of how LAMMPS can be coupled to other codes in a client/server fashion
using the MDI Library created by the MolSSI consortium. The MDI
package in LAMMPS has support for this style of code coupling.
See these sections of the LAMMPS manual for details:
Build LAMMPS as a library (doc/html/Build_basics.html)
@ -28,15 +33,9 @@ These are the sub-directories included in this directory:
simple simple example of driver code calling LAMMPS as a lib
multiple example of driver code calling multiple instances of LAMMPS
plugin example for loading LAMMPS at runtime from a shared library
lammps_mc client/server coupling of Monte Carlo client
with LAMMPS server for energy evaluation
lammps_nwchem client/server coupling of LAMMPS client with
NWChem quantum DFT as server for quantum forces
lammps_quest MD with quantum forces, coupling to Quest DFT code
lammps_spparks grain-growth Monte Carlo with strain via MD,
coupling to SPPARKS kinetic MC code
lammps_vasp client/server coupling of LAMMPS client with
VASP quantum DFT as server for quantum forces
library collection of useful inter-code communication routines
fortran a simple wrapper on the LAMMPS library API that
can be called from Fortran

View File

@ -1,33 +0,0 @@
# Makefile for MC
SHELL = /bin/sh
SRC = mc.cpp random_park.cpp
OBJ = $(SRC:.cpp=.o)
# change this line for your machine to path for CSlib src dir
CSLIB = /home/sjplimp/lammps/lib/message/cslib/src
# compiler/linker settings
CC = g++
CCFLAGS = -g -O3 -I$(CSLIB)
LINK = g++
LINKFLAGS = -g -O -L$(CSLIB)
# targets
mc: $(OBJ)
# first line if built the CSlib within lib/message with ZMQ support
# second line if built the CSlib without ZMQ support
$(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -lzmq -o mc
# $(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -o mc
clean:
@rm -f *.o mc
# rules
%.o:%.cpp
$(CC) $(CCFLAGS) -c $<

View File

@ -1,128 +0,0 @@
Sample Monte Carlo (MC) wrapper on LAMMPS via client/server coupling
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
In this dir, the mc.cpp/h files are a standalone "client" MC code. It
should be run on a single processor, though it could become a parallel
program at some point. LAMMPS is also run as a standalone executable
as a "server" on as many processors as desired using its "server mc"
command; see it's doc page for details.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the MC
program became parallel, data could also be exchanged via MPI.
The MC code makes simple MC moves, by displacing a single random atom
by a small random amount. It uses LAMMPS to calculate the energy
change, and to run dynamics between MC moves.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
% cd lammps/lib/message
% python Install.py -m -z # build CSlib with MPI and ZMQ support
% cd lammps/src
% make yes-message
% make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the MC client code
The source files for the MC code are in this dir. It links with the
CSlib library in lib/message/cslib.
You must first build the CSlib in serial mode, e.g.
% cd lammps/lib/message/cslib/src
% make lib # build serial and parallel lib with ZMQ support
% make lib zmq=no # build serial and parallel lib without ZMQ support
Then edit the Makefile in this dir. The CSLIB variable should be the
path to where the LAMMPS lib/message/cslib/src dir is on your system.
If you built the CSlib without ZMQ support you will also need to
comment/uncomment one line. Then you can just type
% make
and you should get an "mc" executable.
----------------
To run in client/server mode:
Both the client (MC) and server (LAMMPS) must use the same messaging
mode, namely file or zmq. This is an argument to the MC code; it can
be selected by setting the "mode" variable when you run LAMMPS. The
default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The MC code is always run in serial.
When you run, the server should print out thermodynamic info
for every MD run it performs (between MC moves). The client
will print nothing until the simulation ends, then it will
print stats about the accepted MC moves.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 1 lmp_mpi -v mode file -in in.mc.server
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 4 lmp_mpi -v mode file -in in.mc.server
ZMQ mode of messaging:
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 1 lmp_mpi -v mode zmq -in in.mc.server
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 4 lmp_mpi -v mode zmq -in in.mc.server
--------------
The input script for the MC program is in.mc. You can edit it to run
longer simulations.
500 nsteps = total # of steps of MD
100 ndynamics = # of MD steps between MC moves
0.1 delta = displacement size of MC move
1.0 temperature = used in MC Boltzman factor
12345 seed = random number seed
--------------
The problem size that LAMMPS is computing the MC energy for and
running dynamics on is set by the x,y,z variables in the LAMMPS
in.mc.server script. The default size is 500 particles. You can
adjust the size as follows:
lmp_mpi -v x 10 -v y 10 -v z 20 # 8000 particles

View File

@ -1,7 +0,0 @@
# MC params
500 nsteps
100 ndynamics
0.1 delta
1.0 temperature
12345 seed

View File

@ -1,36 +0,0 @@
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then &
"message server mc file tmp.couple" &
elif "${mode} == zmq" &
"message server mc zmq *:5555" &
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000649929 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:02

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000592947 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 3.8147e-06 on 4 procs for 0 steps with 500 atoms
59.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.815e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
106.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.75509e-06 on 4 procs for 0 steps with 500 atoms
113.2% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.755e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.563e-06 on 4 procs for 0 steps with 500 atoms
117.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.563e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 3.99351e-06 on 4 procs for 0 steps with 500 atoms
93.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.994e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 3.57628e-06 on 4 procs for 0 steps with 500 atoms
111.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.576e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:02

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000741005 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.19209e-06 on 1 procs for 0 steps with 500 atoms
83.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.192e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
104.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:00

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000576019 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 4.76837e-06 on 4 procs for 0 steps with 500 atoms
89.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 4.768e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.45707e-06 on 4 procs for 0 steps with 500 atoms
94.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.457e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
115.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.38419e-06 on 4 procs for 0 steps with 500 atoms
125.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.384e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 2.44379e-06 on 4 procs for 0 steps with 500 atoms
112.5% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.444e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 2.14577e-06 on 4 procs for 0 steps with 500 atoms
139.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:00

View File

@ -1,263 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
// MC code used with LAMMPS in client/server mode
// MC is the client, LAMMPS is the server
// Syntax: mc infile mode modearg
// mode = file, zmq
// modearg = filename for file, localhost:5555 for zmq
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "mc.h"
#include "random_park.h"
#include "cslib.h"
using namespace CSLIB_NS;
void error(const char *);
CSlib *cs_create(char *, char *);
#define MAXLINE 256
/* ---------------------------------------------------------------------- */
// main program
int main(int narg, char **arg)
{
if (narg != 4) {
error("Syntax: mc infile mode modearg");
exit(1);
}
// initialize CSlib
CSlib *cs = cs_create(arg[2],arg[3]);
// create MC class and perform run
MC *mc = new MC(arg[1],cs);
mc->run();
// final MC stats
int naccept = mc->naccept;
int nattempt = mc->nattempt;
printf("------ MC stats ------\n");
printf("MC attempts = %d\n",nattempt);
printf("MC accepts = %d\n",naccept);
printf("Acceptance ratio = %g\n",1.0*naccept/nattempt);
// clean up
delete cs;
delete mc;
}
/* ---------------------------------------------------------------------- */
void error(const char *str)
{
printf("ERROR: %s\n",str);
exit(1);
}
/* ---------------------------------------------------------------------- */
CSlib *cs_create(char *mode, char *arg)
{
CSlib *cs = new CSlib(0,mode,arg,NULL);
// initial handshake to agree on protocol
cs->send(0,1);
cs->pack_string(1,(char *) "mc");
int msgID,nfield;
int *fieldID,*fieldtype,*fieldlen;
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
return cs;
}
// ----------------------------------------------------------------------
// MC class
// ----------------------------------------------------------------------
MC::MC(char *mcfile, void *cs_caller)
//MC::MC(char *mcfile, CSlib *cs_caller)
{
cs_void = cs_caller;
// setup MC params
options(mcfile);
// random # generator
random = new RanPark(seed);
}
/* ---------------------------------------------------------------------- */
MC::~MC()
{
free(x);
delete random;
}
/* ---------------------------------------------------------------------- */
void MC::run()
{
int iatom,accept,msgID,nfield;
double pe_initial,pe_final,edelta;
double dx,dy,dz;
double xold[3],xnew[3];
int *fieldID,*fieldtype,*fieldlen;
enum{NATOMS=1,EINIT,DISPLACE,ACCEPT,RUN};
CSlib *cs = (CSlib *) cs_void;
// one-time request for atom count from MD
// allocate 1d coord buffer
cs->send(NATOMS,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
natoms = cs->unpack_int(1);
x = (double *) malloc(3*natoms*sizeof(double));
// loop over MC moves
naccept = nattempt = 0;
for (int iloop = 0; iloop < nloop; iloop++) {
// request current energy from MD
// recv energy, coords from MD
cs->send(EINIT,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_initial = cs->unpack_double(1);
double *x = (double *) cs->unpack(2);
// perform simple MC event
// displace a single atom by random amount
iatom = (int) natoms*random->uniform();
xold[0] = x[3*iatom+0];
xold[1] = x[3*iatom+1];
xold[2] = x[3*iatom+2];
dx = 2.0*delta*random->uniform() - delta;
dy = 2.0*delta*random->uniform() - delta;
dz = 2.0*delta*random->uniform() - delta;
xnew[0] = xold[0] + dx;
xnew[1] = xold[1] + dx;
xnew[2] = xold[2] + dx;
// send atom ID and its new coords to MD
// recv new energy
cs->send(DISPLACE,2);
cs->pack_int(1,iatom+1);
cs->pack(2,4,3,xnew);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_final = cs->unpack_double(1);
// decide whether to accept/reject MC event
if (pe_final <= pe_initial) accept = 1;
else if (temperature == 0.0) accept = 0;
else if (random->uniform() >
exp(natoms*(pe_initial-pe_final)/temperature)) accept = 0;
else accept = 1;
nattempt++;
if (accept) naccept++;
// send accept (1) or reject (0) flag to MD
cs->send(ACCEPT,1);
cs->pack_int(1,accept);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
// send dynamics timesteps
cs->send(RUN,1);
cs->pack_int(1,ndynamics);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
// send exit message to MD
cs->send(-1,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
/* ---------------------------------------------------------------------- */
void MC::options(char *filename)
{
// default params
nsteps = 0;
ndynamics = 100;
delta = 0.1;
temperature = 1.0;
seed = 12345;
// read and parse file
FILE *fp = fopen(filename,"r");
if (fp == NULL) error("Could not open MC file");
char line[MAXLINE];
char *keyword,*value;
char *eof = fgets(line,MAXLINE,fp);
while (eof) {
if (line[0] == '#') { // comment line
eof = fgets(line,MAXLINE,fp);
continue;
}
value = strtok(line," \t\n\r\f");
if (value == NULL) { // blank line
eof = fgets(line,MAXLINE,fp);
continue;
}
keyword = strtok(NULL," \t\n\r\f");
if (keyword == NULL) error("Missing keyword in MC file");
if (strcmp(keyword,"nsteps") == 0) nsteps = atoi(value);
else if (strcmp(keyword,"ndynamics") == 0) ndynamics = atoi(value);
else if (strcmp(keyword,"delta") == 0) delta = atof(value);
else if (strcmp(keyword,"temperature") == 0) temperature = atof(value);
else if (strcmp(keyword,"seed") == 0) seed = atoi(value);
else error("Unknown param in MC file");
eof = fgets(line,MAXLINE,fp);
}
// derived params
nloop = nsteps/ndynamics;
}

View File

@ -1,40 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/ Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
#ifndef MC_H
#define MC_H
/* ---------------------------------------------------------------------- */
class MC {
public:
int naccept; // # of accepted MC events
int nattempt; // # of attempted MC events
MC(char *, void *);
~MC();
void run();
private:
int nsteps; // total # of MD steps
int ndynamics; // steps in one short dynamics run
int nloop; // nsteps/ndynamics
int natoms; // # of MD atoms
double delta; // MC displacement distance
double temperature; // MC temperature for Boltzmann criterion
double *x; // atom coords as 3N 1d vector
double energy; // global potential energy
int seed; // RNG seed
class RanPark *random;
void *cs_void; // messaging library
void options(char *);
};
#endif

View File

@ -1,72 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
// Park/Miller RNG
#include <math.h>
#include "random_park.h"
//#include "error.h"
#define IA 16807
#define IM 2147483647
#define AM (1.0/IM)
#define IQ 127773
#define IR 2836
/* ---------------------------------------------------------------------- */
RanPark::RanPark(int seed_init)
{
//if (seed_init <= 0)
// error->one(FLERR,"Invalid seed for Park random # generator");
seed = seed_init;
save = 0;
}
/* ----------------------------------------------------------------------
uniform RN
------------------------------------------------------------------------- */
double RanPark::uniform()
{
int k = seed/IQ;
seed = IA*(seed-k*IQ) - IR*k;
if (seed < 0) seed += IM;
double ans = AM*seed;
return ans;
}
/* ----------------------------------------------------------------------
gaussian RN
------------------------------------------------------------------------- */
double RanPark::gaussian()
{
double first,v1,v2,rsq,fac;
if (!save) {
do {
v1 = 2.0*uniform()-1.0;
v2 = 2.0*uniform()-1.0;
rsq = v1*v1 + v2*v2;
} while ((rsq >= 1.0) || (rsq == 0.0));
fac = sqrt(-2.0*log(rsq)/rsq);
second = v1*fac;
first = v2*fac;
save = 1;
} else {
first = second;
save = 0;
}
return first;
}

View File

@ -1,28 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/ Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef RANPARK_H
#define RANPARK_H
class RanPark {
public:
RanPark(int);
double uniform();
double gaussian();
private:
int seed,save;
double second;
};
#endif

View File

@ -1,197 +0,0 @@
Sample LAMMPS MD wrapper on NWChem via client/server coupling
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
In this dir, the nwchem_wrap.py is a wrapper on the NWChem electronic
structure code so it can work as a "server" code which LAMMPS drives
as a "client" code to perform ab initio MD. LAMMPS performs the MD
timestepping, sends NWChem a current set of coordinates each timestep,
NWChem computes forces and energy (and virial) and returns that info
to LAMMPS.
Messages are exchanged between NWChem and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the
nwchem_wrap.py program became parallel, or the CSlib library calls were
integrated into NWChem directly, then data could also be exchanged via
MPI.
There are 2 examples provided in the planeware and ao_basis
sub-directories. See details below.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
cd lammps/lib/message
python Install.py -m -z # build CSlib with MPI and ZMQ support
cd lammps/src
make yes-message
make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the CSlib in a form usable by the nwchem_wrapper.py script:
% cd lammps/lib/message/cslib/src
% make shlib # build serial and parallel shared lib with ZMQ support
% make shlib zmq=no # build serial and parallel shared lib w/out ZMQ support
This will make a shared library versions of the CSlib, which Python
requires. Python must be able to find both the cslib.py script and
the libcsnompi.so library in your lammps/lib/message/cslib/src
directory. If it is not able to do this, you will get an error when
you run nwchem_wrapper.py.
You can do this by augmenting two environment variables, either from
the command line, or in your shell start-up script. Here is the
sample syntax for the csh or tcsh shells:
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/lib/message/cslib/src
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/message/cslib/src
----------------
Prepare to use NWChem and the nwchem_wrap.py script
You can run the nwchem_wrap.py script as-is to test that the coupling
between it and LAMMPS is functional. This will use the included
nwchem_lammps.out files output by a previous NWChem run.
But note that the as-is version of nwchem_wrap.py will not attempt to
run NWChem.
To do this, you must edit the 1st nwchemcmd line at the top of
nwchem_wrapper.py to be the launch command needed to run NWChem on
your system. It can be a command to run NWChem in serial or in
parallel, e.g. an mpirun command. Then comment out the 2nd nwchemcmd
line immediately following it.
Ensure you have the necessary NWChem input file in this directory,
suitable for the NWChem calculation you want to perform.
Example input files are provided for both atom-centered AO basis sets
and plane-wave basis sets. Note that the NWChem template file should
be matched to the LAMMPS input script (# of atoms and atom types, box
size, etc).
Once you run NWChem yourself, the nwchem_lammps.out file will be
overwritten.
The syntax of the wrapper is:
nwchem_wrap.py file/zmq ao/pw input_template
* file/zmg = messaging mode, must match LAMMPS messaging mode
* ao/pw = basis set mode, selects between atom-centered and plane-wave
the input_template file must correspond to the appropriate basis set mode:
the "ao" mode supports the scf and dft modules in NWChem,
the "pw" mode supports the nwpw module.
* input_template = NWChem input file used as template, must include a
"geometry" block with the atoms in the simulation, dummy
xyz coordinates should be included (but are not used).
Atom ordering must match LAMMPS input.
During a simulation, the molecular orbitals from the previous timestep
will be used as the initial guess for the next NWChem calculation. If
a file named "nwchem_lammps.movecs" is in the directory the wrapper is
called from, these orbitals will be used as the initial guess orbitals
in the first step of the simulation.
----------------
Example directories
(1) planewave
Demonstrates coupling of the nwpw module in NWChem with LAMMPS. Only fully
periodic boundary conditions and orthogonal simulation boxes are currently
supported by the wrapper. The included files provide an example run using a
2 atom unit cell of tungsten.
Files:
* data.W LAMMPS input with geometry information
* in.client.W LAMMPS simulation input
* log.client.output LAMMPS simulation output
* w.nw NWChem template input file
* nwchem_lammps.out NWChem output
(2) ao_basis
Demonstrates coupling of the scf (or dft) modules in NWChem with
LAMMPS. Only fully aperiodic boundary conditions are currently
supported by the wrapper. The included files provide an example run
using a single water molecule.
Files:
* data.h2o LAMMPS input with geometry information
* in.client.h2o LAMMPS simulation input
* log.client.output LAMMPS simulation output
* h2o.nw NWChem template input file
* nwchem_lammps.out NWChem output
As noted above, you can run the nwchem_wrap.py script as-is to test that
the coupling between it and LAMMPS is functional. This will use the included
nwchem_lammps.out files.
----------------
To run in client/server mode:
NOTE: The nwchem_wrap.py script must be run with Python version 2, not
3. This is because it used the CSlib python wrapper, which only
supports version 2. We plan to upgrade CSlib to support Python 3.
Both the client (LAMMPS) and server (nwchem_wrap.py) must use the same
messaging mode, namely file or zmq. This is an argument to the
nwchem_wrap.py code; it can be selected by setting the "mode" variable
when you run LAMMPS. The default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The nwchem_wrap.py code is always run in serial, but it
launches NWChem from Python via an mpirun command which can run NWChem
itself in parallel.
When you run, the server should print out thermodynamic info every
timestep which corresponds to the forces and virial computed by NWChem.
NWChem will also generate output files each timestep. Output files from
previous timesteps are archived in a "nwchem_logs" directory.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 lmp_mpi -v mode file -in in.client.W
% python nwchem_wrap.py file pw w.nw
% mpirun -np 2 lmp_mpi -v mode file -in in.client.h2o
% python nwchem_wrap.py file ao h2o.nw
ZMQ mode of messaging:
% mpirun -np 1 lmp_mpi -v mode zmq -in in.client.W
% python nwchem_wrap.py zmq pw w.nw
% mpirun -np 2 lmp_mpi -v mode zmq -in in.client.h2o
% python nwchem_wrap.py zmq ao h2o.nw

View File

@ -1,20 +0,0 @@
LAMMPS H2O data file
3 atoms
2 atom types
-10.0 10.0 xlo xhi
-10.0 10.0 ylo yhi
-10.0 10.0 zlo zhi
Masses
1 15.994915008544922
2 1.0078250169754028
Atoms
1 1 0.0 0.0 0.0
2 2 0.0 0.756723 -0.585799
3 2 0.0 -0.756723 -0.585799

View File

@ -1,25 +0,0 @@
echo
memory global 40 mb stack 23 mb heap 5 mb
geometry units angstrom noautosym
O 0.0 0.0 0.0
H 1.0 0.5 0.0
H -1.0 0.5 0.0
end
basis
O library 6-31g*
H library 6-31g*
end
scf
maxiter 100
end
#dft
# xc b3lyp
#end
task scf gradient
#task dft gradient

View File

@ -1,27 +0,0 @@
# H2O with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3

View File

@ -1,30 +0,0 @@
# H2O with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
group one id 2
displace_atoms one move 0.1 0.2 0.3
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
minimize 1.0e-6 1.0e-6 10 50

View File

@ -1,66 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000627125 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0276 | 0.0276 | 0.0276 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 0.077556087 10.354878 8000
1 300 0 0 0.077556087 10.354878 8000
2 300 0 0 0.077556087 10.354878 8000
3 300 0 0 0.077556087 10.354878 8000
Loop time of 0.30198 on 1 procs for 3 steps with 3 atoms
Performance: 0.858 ns/day, 27.961 hours/ns, 9.934 timesteps/s
0.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 2.5979e-06 | 2.5979e-06 | 2.5979e-06 | 0.0 | 0.00
Output | 0.00012053 | 0.00012053 | 0.00012053 | 0.0 | 0.04
Modify | 0.30185 | 0.30185 | 0.30185 | 0.0 | 99.96
Other | | 8.211e-06 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:07

View File

@ -1,66 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000608759 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0276 | 0.0276 | 0.0276 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 -2068.2746 10.354878 8000
1 200.33191 0 0 -2068.2704 6.9147085 8000
2 152.36218 0 0 -2068.269 5.2589726 8000
3 227.40679 0 0 -2068.2722 7.8492321 8000
Loop time of 1.90319 on 1 procs for 3 steps with 3 atoms
Performance: 0.136 ns/day, 176.221 hours/ns, 1.576 timesteps/s
0.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 3.9274e-06 | 3.9274e-06 | 3.9274e-06 | 0.0 | 0.00
Output | 0.00011798 | 0.00011798 | 0.00011798 | 0.0 | 0.01
Modify | 1.9031 | 1.9031 | 1.9031 | 0.0 | 99.99
Other | | 1.054e-05 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:07

View File

@ -1,82 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000615383 secs
group one id 2
1 atoms in group one
displace_atoms one move 0.1 0.2 0.3
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
minimize 1.0e-6 1.0e-6 10 50
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (src/min.cpp:174)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0279 | 0.0279 | 0.0279 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 -2067.8909 10.354878 8000
1 300 0 0 -2068.0707 10.354878 8000
2 300 0 0 -2068.252 10.354878 8000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
3 300 0 0 -2068.2797 10.354878 8000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
4 300 0 0 -2068.2799 10.354878 8000
Loop time of 5.71024 on 1 procs for 4 steps with 3 atoms
0.1% CPU use with 1 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
-2067.96847053 -2068.35730416 -2068.35745184
Force two-norm initial, final = 4.54685 0.124714
Force max component initial, final = 3.48924 0.0859263
Final line search alpha, max atom move = 1 0.0859263
Iterations, force evaluations = 4 8
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 6.2305e-07 | 6.2305e-07 | 6.2305e-07 | 0.0 | 0.00
Comm | 1.1522e-05 | 1.1522e-05 | 1.1522e-05 | 0.0 | 0.00
Output | 8.4217e-05 | 8.4217e-05 | 8.4217e-05 | 0.0 | 0.00
Modify | 5.7099 | 5.7099 | 5.7099 | 0.0 | 99.99
Other | | 0.0002355 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 2
Dangerous builds not checked
Total wall time: 0:00:10

View File

@ -1,626 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
memory global 40 mb stack 23 mb heap 5 mb
geometry units angstrom noautosym nocenter
O 0.00197082 0.0012463 -0.00298048
H -0.0432066 0.769363 -0.596119
H 0.0119282 -0.789143 -0.528177
end
scf
vectors input nwchem_lammps.movecs
end
dft
vectors input nwchem_lammps.movecs
end
basis
O library 6-31g*
H library 6-31g*
end
scf
maxiter 100
end
#dft
# xc b3lyp
#end
task scf gradient
#task dft gradient
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = almondjoy
program = /home/jboschen/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Fri Jan 31 00:31:00 2020
compiled = Tue_Oct_01_13:20:43_2019
source = /home/jboschen/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 655358 doubles = 5.0 Mbytes
stack = 3014651 doubles = 23.0 Mbytes
global = 5242880 doubles = 40.0 Mbytes (distinct from heap & stack)
total = 8912889 doubles = 68.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = scf
Operation = gradient
Status = ok
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 3 Fri Jan 31 00:30:59 2020
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 ao basis 2 Fri Jan 31 00:30:59 2020
The basis set named "ao basis" is the default AO basis for restart
NWChem Input Module
-------------------
Scaling coordinates for geometry "geometry" by 1.889725989
(inverse scale = 0.529177249)
------
auto-z
------
no constraints, skipping 0.0000000000000000
no constraints, skipping 0.0000000000000000
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 O 8.0000 0.00197082 0.00124630 -0.00298048
2 H 1.0000 -0.04320660 0.76936300 -0.59611900
3 H 1.0000 0.01192820 -0.78914300 -0.52817700
Atomic Mass
-----------
O 15.994910
H 1.007825
Effective nuclear repulsion energy (a.u.) 9.1573270473
Nuclear Dipole moment (a.u.)
----------------------------
X Y Z
---------------- ---------------- ----------------
-0.0293131272 -0.0185374561 -2.1696696942
Z-matrix (autoz)
--------
Units are Angstrom for bonds and degrees for angles
Type Name I J K L M Value
----------- -------- ----- ----- ----- ----- ----- ----------
1 Stretch 1 2 0.97152
2 Stretch 1 3 0.94902
3 Bend 2 1 3 108.72901
XYZ format geometry
-------------------
3
geometry
O 0.00197082 0.00124630 -0.00298048
H -0.04320660 0.76936300 -0.59611900
H 0.01192820 -0.78914300 -0.52817700
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 H | 1 O | 1.83591 | 0.97152
3 H | 1 O | 1.79339 | 0.94902
------------------------------------------------------------------------------
number of included internuclear distances: 2
==============================================================================
==============================================================================
internuclear angles
------------------------------------------------------------------------------
center 1 | center 2 | center 3 | degrees
------------------------------------------------------------------------------
2 H | 1 O | 3 H | 108.73
------------------------------------------------------------------------------
number of included internuclear angles: 1
==============================================================================
Basis "ao basis" -> "" (cartesian)
-----
O (Oxygen)
----------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 5.48467170E+03 0.001831
1 S 8.25234950E+02 0.013950
1 S 1.88046960E+02 0.068445
1 S 5.29645000E+01 0.232714
1 S 1.68975700E+01 0.470193
1 S 5.79963530E+00 0.358521
2 S 1.55396160E+01 -0.110778
2 S 3.59993360E+00 -0.148026
2 S 1.01376180E+00 1.130767
3 P 1.55396160E+01 0.070874
3 P 3.59993360E+00 0.339753
3 P 1.01376180E+00 0.727159
4 S 2.70005800E-01 1.000000
5 P 2.70005800E-01 1.000000
6 D 8.00000000E-01 1.000000
H (Hydrogen)
------------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 1.87311370E+01 0.033495
1 S 2.82539370E+00 0.234727
1 S 6.40121700E-01 0.813757
2 S 1.61277800E-01 1.000000
Summary of "ao basis" -> "" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
NWChem SCF Module
-----------------
ao basis = "ao basis"
functions = 19
atoms = 3
closed shells = 5
open shells = 0
charge = 0.00
wavefunction = RHF
input vectors = ./nwchem_lammps.movecs
output vectors = ./nwchem_lammps.movecs
use symmetry = F
symmetry adapt = F
Summary of "ao basis" -> "ao basis" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
Forming initial guess at 0.0s
Loading old vectors from job with title :
Starting SCF solution at 0.0s
----------------------------------------------
Quadratically convergent ROHF
Convergence threshold : 1.000E-04
Maximum no. of iterations : 100
Final Fock-matrix accuracy: 1.000E-07
----------------------------------------------
#quartets = 1.540D+03 #integrals = 1.424D+04 #direct = 0.0% #cached =100.0%
Integral file = ./nwchem_lammps.aoints.0
Record size in doubles = 65536 No. of integs per rec = 43688
Max. records in memory = 2 Max. records in file = 1392051
No. of bits per label = 8 No. of bits per value = 64
iter energy gnorm gmax time
----- ------------------- --------- --------- --------
1 -76.0095751323 4.63D-02 1.64D-02 0.1
2 -76.0097628164 8.13D-04 2.83D-04 0.1
3 -76.0097629130 3.92D-06 1.55D-06 0.1
Final RHF results
------------------
Total SCF energy = -76.009762913030
One-electron energy = -123.002897732381
Two-electron energy = 37.835807772101
Nuclear repulsion energy = 9.157327047250
Time for solution = 0.0s
Final eigenvalues
-----------------
1
1 -20.5584
2 -1.3367
3 -0.7128
4 -0.5617
5 -0.4959
6 0.2104
7 0.3038
8 1.0409
9 1.1202
10 1.1606
11 1.1691
12 1.3840
13 1.4192
14 2.0312
15 2.0334
ROHF Final Molecular Orbital Analysis
-------------------------------------
Vector 2 Occ=2.000000D+00 E=-1.336749D+00
MO Center= -2.8D-03, -1.3D-02, -1.7D-01, r^2= 5.1D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
2 0.476636 1 O s 6 0.442369 1 O s
1 -0.210214 1 O s
Vector 3 Occ=2.000000D+00 E=-7.127948D-01
MO Center= -4.9D-03, 3.9D-03, -2.1D-01, r^2= 7.8D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
4 0.504894 1 O py 8 0.303932 1 O py
18 -0.234724 3 H s 16 0.229765 2 H s
Vector 4 Occ=2.000000D+00 E=-5.617306D-01
MO Center= 3.6D-03, 9.0D-03, 5.6D-02, r^2= 6.9D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
5 0.559565 1 O pz 9 0.410981 1 O pz
6 0.315892 1 O s 2 0.157960 1 O s
Vector 5 Occ=2.000000D+00 E=-4.959173D-01
MO Center= 1.4D-03, 6.9D-05, -2.2D-02, r^2= 6.0D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
3 0.638390 1 O px 7 0.511530 1 O px
Vector 6 Occ=0.000000D+00 E= 2.103822D-01
MO Center= -2.3D-02, 3.5D-02, -7.3D-01, r^2= 2.6D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.416869 1 O s 17 -1.068330 2 H s
19 -1.014775 3 H s 9 -0.490951 1 O pz
5 -0.212990 1 O pz
Vector 7 Occ=0.000000D+00 E= 3.037943D-01
MO Center= -1.8D-02, -8.9D-02, -7.1D-01, r^2= 2.8D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
19 -1.426837 3 H s 17 1.332767 2 H s
8 -0.842141 1 O py 4 -0.327553 1 O py
Vector 8 Occ=0.000000D+00 E= 1.040852D+00
MO Center= -7.4D-03, 1.3D-01, -1.6D-01, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
16 0.931594 2 H s 18 -0.747590 3 H s
8 -0.655817 1 O py 17 -0.523035 2 H s
19 0.366407 3 H s 14 -0.357109 1 O dyz
Vector 9 Occ=0.000000D+00 E= 1.120172D+00
MO Center= -6.8D-03, -2.9D-02, -3.1D-01, r^2= 1.5D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.145090 1 O s 18 0.841596 3 H s
2 -0.727471 1 O s 16 0.684927 2 H s
9 0.559191 1 O pz 19 -0.546678 3 H s
17 -0.538778 2 H s 10 -0.344609 1 O dxx
15 -0.250035 1 O dzz
Vector 10 Occ=0.000000D+00 E= 1.160603D+00
MO Center= 1.2D-02, -4.3D-02, 2.5D-01, r^2= 1.0D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.137949 1 O s 5 -0.844233 1 O pz
9 0.595088 1 O pz 2 -0.475986 1 O s
18 -0.455932 3 H s 16 -0.357325 2 H s
13 -0.317117 1 O dyy 15 -0.196968 1 O dzz
Vector 11 Occ=0.000000D+00 E= 1.169054D+00
MO Center= 1.9D-03, 1.2D-03, -6.4D-03, r^2= 1.1D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
7 -1.034653 1 O px 3 0.962043 1 O px
Vector 12 Occ=0.000000D+00 E= 1.384034D+00
MO Center= 6.0D-04, -2.6D-03, -5.0D-02, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
8 1.557767 1 O py 4 -1.035829 1 O py
17 -0.900920 2 H s 19 0.901756 3 H s
Vector 13 Occ=0.000000D+00 E= 1.419205D+00
MO Center= -1.3D-02, -4.9D-02, -5.2D-01, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 3.605136 1 O s 2 -1.454853 1 O s
9 -1.107532 1 O pz 19 -0.874208 3 H s
17 -0.757016 2 H s 13 -0.634436 1 O dyy
5 0.516593 1 O pz 15 -0.401100 1 O dzz
10 -0.319873 1 O dxx 16 -0.260650 2 H s
Vector 14 Occ=0.000000D+00 E= 2.031234D+00
MO Center= 1.9D-03, 2.3D-03, -3.0D-03, r^2= 6.1D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
11 1.727083 1 O dxy
Vector 15 Occ=0.000000D+00 E= 2.033369D+00
MO Center= 3.4D-03, 3.4D-03, 4.3D-02, r^2= 6.2D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
15 1.012642 1 O dzz 13 -0.512441 1 O dyy
10 -0.438481 1 O dxx 6 -0.226567 1 O s
center of mass
--------------
x = -0.00000001 y = -0.00000003 z = -0.12388979
moments of inertia (a.u.)
------------------
6.378705068992 0.153373998471 -0.069687034145
0.153373998471 2.014476065716 0.150739744400
-0.069687034145 0.150739744400 4.379134195179
Mulliken analysis of the total density
--------------------------------------
Atom Charge Shell Charges
----------- ------ -------------------------------------------------------
1 O 8 8.87 2.00 0.90 2.90 0.92 2.08 0.08
2 H 1 0.56 0.46 0.11
3 H 1 0.56 0.47 0.10
Multipole analysis of the density wrt the origin
------------------------------------------------
L x y z total open nuclear
- - - - ----- ---- -------
0 0 0 0 -0.000000 0.000000 10.000000
1 1 0 0 -0.026417 0.000000 -0.029313
1 0 1 0 -0.023604 0.000000 -0.018537
1 0 0 1 -0.846090 0.000000 -2.169670
2 2 0 0 -5.373227 0.000000 0.007286
2 1 1 0 -0.085617 0.000000 -0.152252
2 1 0 1 0.038215 0.000000 0.069311
2 0 2 0 -2.927589 0.000000 4.337695
2 0 1 1 -0.071410 0.000000 -0.149465
2 0 0 2 -4.159949 0.000000 2.265483
Parallel integral file used 1 records with 0 large values
NWChem Gradients Module
-----------------------
wavefunction = RHF
RHF ENERGY GRADIENTS
atom coordinates gradient
x y z x y z
1 O 0.003724 0.002355 -0.005632 0.000909 -0.019294 0.007866
2 H -0.081649 1.453885 -1.126502 -0.001242 0.025549 -0.011605
3 H 0.022541 -1.491264 -0.998110 0.000333 -0.006255 0.003739
----------------------------------------
| Time | 1-e(secs) | 2-e(secs) |
----------------------------------------
| CPU | 0.00 | 0.03 |
----------------------------------------
| WALL | 0.00 | 0.03 |
----------------------------------------
Task times cpu: 0.1s wall: 0.1s
NWChem Input Module
-------------------
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 182 182 2869 728 468 0 0 68
number of processes/call 1.00e+00 1.00e+00 1.00e+00 0.00e+00 0.00e+00
bytes total: 6.18e+05 3.56e+05 1.04e+05 0.00e+00 0.00e+00 5.44e+02
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 39432 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 18 28
current total bytes 0 0
maximum total bytes 1060104 16000888
maximum total K-bytes 1061 16001
maximum total M-bytes 2 17
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.1s wall: 0.2s

Some files were not shown because too many files have changed in this diff Show More