Merge branch 'develop' into dielectric-updates

This commit is contained in:
Trung Nguyen
2022-04-12 16:33:02 -05:00
committed by GitHub
952 changed files with 23964 additions and 39343 deletions

View File

@ -26,6 +26,7 @@ jobs:
shell: bash
run: |
python3 -m pip install numpy
python3 -m pip install pyyaml
cmake -C cmake/presets/windows.cmake \
-D PKG_PYTHON=on \
-S cmake -B build \

View File

@ -37,6 +37,7 @@ jobs:
working-directory: build
run: |
ccache -z
python3 -m pip install pyyaml
cmake -C ../cmake/presets/clang.cmake \
-C ../cmake/presets/most.cmake \
-D CMAKE_CXX_COMPILER_LAUNCHER=ccache \

View File

@ -4,10 +4,8 @@ This directory contains 5 benchmark problems which are discussed in
the Benchmark section of the LAMMPS documentation, and on the
Benchmark page of the LAMMPS WWW site (https://www.lammps.org/bench.html).
This directory also has several sub-directories:
This directory also has one sub-directories:
FERMI benchmark scripts for desktop machine with Fermi GPUs (Tesla)
KEPLER benchmark scripts for GPU cluster with Kepler GPUs
POTENTIALS benchmarks scripts for various potentials in LAMMPS
The results for all of these benchmarks are displayed and discussed on

View File

@ -208,7 +208,6 @@ set(STANDARD_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP
@ -451,7 +450,7 @@ else()
endif()
foreach(PKG_WITH_INCL KSPACE PYTHON ML-IAP VORONOI COLVARS ML-HDNNP MDI MOLFILE NETCDF
PLUMED QMMM ML-QUIP SCAFACOS MACHDYN VTK KIM LATTE MESSAGE MSCG COMPRESS ML-PACE)
PLUMED QMMM ML-QUIP SCAFACOS MACHDYN VTK KIM LATTE MSCG COMPRESS ML-PACE)
if(PKG_${PKG_WITH_INCL})
include(Packages/${PKG_WITH_INCL})
endif()

View File

@ -347,6 +347,10 @@ elseif(GPU_API STREQUAL "HIP")
target_link_libraries(gpu PRIVATE hip::host)
if(HIP_USE_DEVICE_SORT)
if(HIP_PLATFORM STREQUAL "amd")
# newer version of ROCm (5.1+) require c++14 for rocprim
set_property(TARGET gpu PROPERTY CXX_STANDARD 14)
endif()
# add hipCUB
target_include_directories(gpu PRIVATE ${HIP_ROOT_DIR}/../include)
target_compile_definitions(gpu PRIVATE -DUSE_HIP_DEVICE_SORT)

View File

@ -1,5 +1,14 @@
enable_language(C)
# we don't use the parallel i/o interface.
set(HDF5_PREFER_PARALLEL FALSE)
find_package(HDF5 REQUIRED)
# parallel HDF5 will import incompatible MPI headers with a serial build
if((NOT BUILD_MPI) AND HDF5_IS_PARALLEL)
message(FATAL_ERROR "Serial LAMMPS build and parallel HDF5 library are not compatible")
endif()
target_link_libraries(h5md PRIVATE ${HDF5_LIBRARIES})
target_include_directories(h5md PUBLIC ${HDF5_INCLUDE_DIRS})

View File

@ -8,8 +8,8 @@ option(DOWNLOAD_MDI "Download and compile the MDI library instead of using an al
if(DOWNLOAD_MDI)
message(STATUS "MDI download requested - we will build our own")
set(MDI_URL "https://github.com/MolSSI-MDI/MDI_Library/archive/v1.2.9.tar.gz" CACHE STRING "URL for MDI tarball")
set(MDI_MD5 "ddfa46d6ee15b4e59cfd527ec7212184" CACHE STRING "MD5 checksum for MDI tarball")
set(MDI_URL "https://github.com/MolSSI-MDI/MDI_Library/archive/v1.3.0.tar.gz" CACHE STRING "URL for MDI tarball")
set(MDI_MD5 "8a8da217148bd9b700083b67d795af5e" CACHE STRING "MD5 checksum for MDI tarball")
mark_as_advanced(MDI_URL)
mark_as_advanced(MDI_MD5)
enable_language(C)

View File

@ -1,31 +0,0 @@
if(LAMMPS_SIZES STREQUAL "BIGBIG")
message(FATAL_ERROR "The MESSAGE Package is not compatible with -DLAMMPS_BIGBIG")
endif()
option(MESSAGE_ZMQ "Use ZeroMQ in MESSAGE package" OFF)
file(GLOB_RECURSE cslib_SOURCES
${LAMMPS_LIB_SOURCE_DIR}/message/cslib/[^.]*.cpp)
add_library(cslib STATIC ${cslib_SOURCES})
target_compile_definitions(cslib PRIVATE -DLAMMPS_${LAMMPS_SIZES})
set_target_properties(cslib PROPERTIES OUTPUT_NAME lammps_cslib${LAMMPS_MACHINE})
if(BUILD_MPI)
target_compile_definitions(cslib PRIVATE -DMPI_YES)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csmpi")
target_link_libraries(cslib PRIVATE MPI::MPI_CXX)
else()
target_compile_definitions(cslib PRIVATE -DMPI_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_MPI)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csnompi")
endif()
if(MESSAGE_ZMQ)
target_compile_definitions(cslib PRIVATE -DZMQ_YES)
find_package(ZMQ REQUIRED)
target_link_libraries(cslib PUBLIC ZMQ::ZMQ)
else()
target_compile_definitions(cslib PRIVATE -DZMQ_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_ZMQ)
endif()
target_link_libraries(lammps PRIVATE cslib)
target_include_directories(lammps PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src)

View File

@ -48,7 +48,6 @@ set(ALL_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP

View File

@ -50,7 +50,6 @@ set(ALL_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP

View File

@ -15,7 +15,6 @@ set(PACKAGES_WITH_LIB
MACHDYN
MDI
MESONT
MESSAGE
ML-HDNNP
ML-PACE
ML-QUIP

View File

@ -98,7 +98,7 @@ msi2lmp decane -c 0 -f oplsaa
.SH COPYRIGHT
© 2003--2021 Sandia Corporation
© 2003--2022 Sandia Corporation
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as

View File

@ -45,7 +45,6 @@ This is the list of packages that may require additional steps.
* :ref:`MACHDYN <machdyn>`
* :ref:`MDI <mdi>`
* :ref:`MESONT <mesont>`
* :ref:`MESSAGE <message>`
* :ref:`ML-HDNNP <ml-hdnnp>`
* :ref:`ML-IAP <mliap>`
* :ref:`ML-PACE <ml-pace>`
@ -797,47 +796,6 @@ library.
----------
.. _message:
MESSAGE package
-----------------------------
This package can optionally include support for messaging via sockets,
using the open-source `ZeroMQ library <http://zeromq.org>`_, which must
be installed on your system.
.. tabs::
.. tab:: CMake build
.. code-block:: bash
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
-D ZMQ_LIBRARY=path # ZMQ library file (only needed if a custom location)
-D ZMQ_INCLUDE_DIR=path # ZMQ include directory (only needed if a custom location)
.. tab:: Traditional make
Before building LAMMPS, you must build the CSlib library in
``lib/message``\ . You can build the CSlib library manually if
you prefer; follow the instructions in ``lib/message/README``\ .
You can also do it in one step from the ``lammps/src`` dir, using
a command like these, which simply invoke the
``lib/message/Install.py`` script with the specified args:
.. code-block:: bash
$ make lib-message # print help message
$ make lib-message args="-m -z" # build with MPI and socket (ZMQ) support
$ make lib-message args="-s" # build as serial lib with no ZMQ support
The build should produce two files: ``lib/message/cslib/src/libmessage.a``
and ``lib/message/Makefile.lammps``. The latter is copied from an
existing ``Makefile.lammps.*`` and has settings to link with the ZeroMQ
library if requested in the build.
----------
.. _mliap:
ML-IAP package

View File

@ -45,7 +45,6 @@ packages:
* :ref:`KOKKOS <kokkos>`
* :ref:`LATTE <latte>`
* :ref:`MACHDYN <machdyn>`
* :ref:`MESSAGE <message>`
* :ref:`ML-HDNNP <ml-hdnnp>`
* :ref:`ML-PACE <ml-pace>`
* :ref:`ML-QUIP <ml-quip>`

View File

@ -67,8 +67,7 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`lattice <lattice>`
* :doc:`log <log>`
* :doc:`mass <mass>`
* :doc:`mdi/engine <mdi_engine>`
* :doc:`message <message>`
* :doc:`mdi <mdi>`
* :doc:`minimize <minimize>`
* :doc:`min_modify <min_modify>`
* :doc:`min_style <min_style>`
@ -105,7 +104,6 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`restart <restart>`
* :doc:`run <run>`
* :doc:`run_style <run_style>`
* :doc:`server <server>`
* :doc:`set <set>`
* :doc:`shell <shell>`
* :doc:`special_bonds <special_bonds>`

View File

@ -63,6 +63,7 @@ KOKKOS, o = OPENMP, t = OPT.
* :doc:`event/displace <compute_event_displace>`
* :doc:`fabric <compute_fabric>`
* :doc:`fep <compute_fep>`
* :doc:`fep/ta <compute_fep_ta>`
* :doc:`force/tally <compute_tally>`
* :doc:`fragment/atom <compute_cluster_atom>`
* :doc:`global/atom <compute_global_atom>`

View File

@ -51,7 +51,6 @@ OPT.
* :doc:`bond/swap <fix_bond_swap>`
* :doc:`box/relax <fix_box_relax>`
* :doc:`charge/regulation <fix_charge_regulation>`
* :doc:`client/md <fix_client_md>`
* :doc:`cmap <fix_cmap>`
* :doc:`colvars <fix_colvars>`
* :doc:`controller <fix_controller>`
@ -101,7 +100,7 @@ OPT.
* :doc:`lb/viscous <fix_lb_viscous>`
* :doc:`lineforce <fix_lineforce>`
* :doc:`manifoldforce <fix_manifoldforce>`
* :doc:`mdi/engine <fix_mdi_engine>`
* :doc:`mdi/aimd <fix_mdi_aimd>`
* :doc:`meso/move <fix_meso_move>`
* :doc:`mol/swap <fix_mol_swap>`
* :doc:`momentum (k) <fix_momentum>`

View File

@ -88,12 +88,12 @@ OPT.
* :doc:`coul/tt <pair_coul_tt>`
* :doc:`coul/wolf (ko) <pair_coul>`
* :doc:`coul/wolf/cs <pair_cs>`
* :doc:`dpd (gio) <pair_dpd>`
* :doc:`dpd (giko) <pair_dpd>`
* :doc:`dpd/fdt <pair_dpd_fdt>`
* :doc:`dpd/ext <pair_dpd_ext>`
* :doc:`dpd/ext/tstat <pair_dpd_ext>`
* :doc:`dpd/ext (k) <pair_dpd_ext>`
* :doc:`dpd/ext/tstat (k) <pair_dpd_ext>`
* :doc:`dpd/fdt/energy (k) <pair_dpd_fdt>`
* :doc:`dpd/tstat (go) <pair_dpd>`
* :doc:`dpd/tstat (gko) <pair_dpd>`
* :doc:`dsmc <pair_dsmc>`
* :doc:`e3b <pair_e3b>`
* :doc:`drip <pair_drip>`

View File

@ -262,3 +262,24 @@ A plugin may be registered under an existing style name. In that case
the plugin will override the existing code. This can be used to modify
the behavior of existing styles or to debug new versions of them without
having to re-compile or re-install all of LAMMPS.
Compiling plugins
^^^^^^^^^^^^^^^^^
Plugins need to be compiled with the same compilers and libraries
(e.g. MPI) and compilation settings (MPI on/off, OpenMP, integer sizes)
as the LAMMPS executable and library. Otherwise the plugin will likely
not load due to mismatches in the function signatures (LAMMPS is C++ so
scope, type, and number of arguments are encoded into the symbol names
and thus differences in them will lead to failed plugin load commands.
Compilation of the plugin can be managed via both, CMake or traditional
GNU makefiles. Some examples that can be used as a template are in the
``examples/plugins`` folder. The CMake script code has some small
adjustments to allow building the plugins for running unit tests with
them. Another example that converts the KIM package into a plugin can be
found in the ``examples/kim/plugin`` folder. No changes to the sources
of the KIM package themselves are needed; only the plugin interface and
loader code needs to be added. This example only supports building with
CMake, but is probably a more typical example. To compile you need to
run CMake with -DLAMMPS_SOURCE_DIR=<path/to/lammps/src/folder>. Other
configuration setting are identical to those for compiling LAMMPS.

View File

@ -94,12 +94,12 @@ Lowercase directories
+-------------+------------------------------------------------------------------+
| latte | examples for using fix latte for DFTB via the LATTE library |
+-------------+------------------------------------------------------------------+
| mdi | use of the MDI package and MolSSI MDI code coupling library |
+-------------+------------------------------------------------------------------+
| meam | MEAM test for SiC and shear (same as shear examples) |
+-------------+------------------------------------------------------------------+
| melt | rapid melt of 3d LJ system |
+-------------+------------------------------------------------------------------+
| message | demos for LAMMPS client/server coupling with the MESSAGE package |
+-------------+------------------------------------------------------------------+
| micelle | self-assembly of small lipid-like molecules into 2d bilayers |
+-------------+------------------------------------------------------------------+
| min | energy minimization of 2d LJ melt |

View File

@ -22,7 +22,6 @@ General howto
Howto_replica
Howto_library
Howto_couple
Howto_client_server
Howto_mdi
Settings howto

View File

@ -1,163 +0,0 @@
Using LAMMPS in client/server mode
==================================
Client/server coupling of two codes is where one code is the "client"
and sends request messages to a "server" code. The server responds to
each request with a reply message. This enables the two codes to work
in tandem to perform a simulation. LAMMPS can act as either a client
or server code.
Some advantages of client/server coupling are that the two codes run
as stand-alone executables; they are not linked together. Thus
neither code needs to have a library interface. This often makes it
easier to run the two codes on different numbers of processors. If a
message protocol (format and content) is defined for a particular kind
of simulation, then in principle any code that implements the
client-side protocol can be used in tandem with any code that
implements the server-side protocol, without the two codes needing to
know anything more specific about each other.
A simple example of client/server coupling is where LAMMPS is the
client code performing MD timestepping. Each timestep it sends a
message to a server quantum code containing current coords of all the
atoms. The quantum code computes energy and forces based on the
coords. It returns them as a message to LAMMPS, which completes the
timestep.
A more complex example is where LAMMPS is the client code and
processes a series of data files, sending each configuration to a
quantum code to compute energy and forces. Or LAMMPS runs dynamics
with an atomistic force field, but pauses every N steps to ask the
quantum code to compute energy and forces.
Alternate methods for code coupling with LAMMPS are described on
the :doc:`Howto couple <Howto_couple>` doc page.
The protocol for using LAMMPS as a client is to use these 3 commands
in this order (other commands may come in between):
* :doc:`message client <message>` # initiate client/server interaction
* :doc:`fix client/md <fix_client_md>` # any client fix which makes specific requests to the server
* :doc:`message quit <message>` # terminate client/server interaction
In between the two message commands, a client fix command and
:doc:`unfix <unfix>` command can be used multiple times. Similarly,
this sequence of 3 commands can be repeated multiple times, assuming
the server program operates in a similar fashion, to initiate and
terminate client/server communication.
The protocol for using LAMMPS as a server is to use these 2 commands
in this order (other commands may come in between):
* :doc:`message server <message>` # initiate client/server interaction
* :doc:`server md <server_md>` # any server command which responds to specific requests from the client
This sequence of 2 commands can be repeated multiple times, assuming
the client program operates in a similar fashion, to initiate and
terminate client/server communication.
LAMMPS support for client/server coupling is in its :ref:`MESSAGE package <PKG-MESSAGE>` which implements several
commands that enable LAMMPS to act as a client or server, as discussed
below. The MESSAGE package also wraps a client/server library called
CSlib which enables two codes to exchange messages in different ways,
either via files, sockets, or MPI. The CSlib is provided with LAMMPS
in the lib/message dir. The CSlib has its own
`website <https://cslib.sandia.gov>`_ with documentation and test
programs.
.. note::
For client/server coupling to work between LAMMPS and another
code, the other code also has to use the CSlib. This can sometimes be
done without any modifications to the other code by simply wrapping it
with a Python script that exchanges CSlib messages with LAMMPS and
prepares input for or processes output from the other code. The other
code also has to implement a matching protocol for the format and
content of messages that LAMMPS exchanges with it.
These are the commands currently in the MESSAGE package for two
protocols, MD and MC (Monte Carlo). New protocols can easily be
defined and added to this directory, where LAMMPS acts as either the
client or server.
* :doc:`message <message>`
* :doc:`fix client md <fix_client_md>` = LAMMPS is a client for running MD
* :doc:`server md <server_md>` = LAMMPS is a server for computing MD forces
* :doc:`server mc <server_mc>` = LAMMPS is a server for computing a Monte Carlo energy
The server doc files give details of the message protocols
for data that is exchanged between the client and server.
These example directories illustrate how to use LAMMPS as either a
client or server code:
* examples/message
* examples/COUPLE/README
* examples/COUPLE/lammps_mc
* examples/COUPLE/lammps_nwchem
* examples/COUPLE/lammps_vasp
The examples/message directory couples a client instance of LAMMPS to a
server instance of LAMMPS.
The files in the *lammps_mc* folder show how to couple LAMMPS as
a server to a simple Monte Carlo client code as the driver.
The files in the *lammps_nwchem* folder show how to couple LAMMPS
as a client code running MD timestepping to NWChem acting as a
server providing quantum DFT forces, through a Python wrapper script
on NWChem.
The files in the *lammps_vasp* folder show how to couple LAMMPS as
a client code running MD timestepping to VASP acting as a server
providing quantum DFT forces, through a Python wrapper script on VASP.
Here is how to launch a client and server code together for any of the
4 modes of message exchange that the :doc:`message <message>` command
and the CSlib support. Here LAMMPS is used as both the client and
server code. Another code could be substituted for either.
The examples below show launching both codes from the same window (or
batch script), using the "&" character to launch the first code in the
background. For all modes except *mpi/one*, you could also launch the
codes in separate windows on your desktop machine. It does not
matter whether you launch the client or server first.
In these examples either code can be run on one or more processors.
If running in a non-MPI mode (file or zmq) you can launch a code on a
single processor without using mpirun.
IMPORTANT: If you run in mpi/two mode, you must launch both codes via
mpirun, even if one or both of them runs on a single processor. This
is so that MPI can figure out how to connect both MPI processes
together to exchange MPI messages between them.
For message exchange in *file*, *zmq*, or *mpi/two* modes:
.. code-block:: bash
% mpirun -np 1 lmp_mpi -log log.client < in.client &
% mpirun -np 2 lmp_mpi -log log.server < in.server
% mpirun -np 4 lmp_mpi -log log.client < in.client &
% mpirun -np 1 lmp_mpi -log log.server < in.server
% mpirun -np 2 lmp_mpi -log log.client < in.client &
% mpirun -np 4 lmp_mpi -log log.server < in.server
For message exchange in *mpi/one* mode:
Launch both codes in a single mpirun command:
.. code-block:: bash
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.message.client -log log.client : -np 4 lmp_mpi -mpicolor 1 -in in.message.server -log log.server
The two -np values determine how many procs the client and the server
run on.
A LAMMPS executable run in this manner must use the -mpicolor color
command-line option as their its option, where color is an integer
label that will be used to distinguish one executable from another in
the multiple executables that the mpirun command launches. In this
example the client was colored with a 0, and the server with a 1.

View File

@ -12,16 +12,16 @@ LAMMPS can be coupled to other codes in at least 4 ways. Each has
advantages and disadvantages, which you will have to think about in the
context of your application.
1. Define a new :doc:`fix <fix>` command that calls the other code. In
this scenario, LAMMPS is the driver code. During timestepping,
1. Define a new :doc:`fix <fix>` command that calls the other code.
In this scenario, LAMMPS is the driver code. During timestepping,
the fix is invoked, and can make library calls to the other code,
which has been linked to LAMMPS as a library. This is the way how the
which has been linked to LAMMPS as a library. This is the way the
:ref:`LATTE <PKG-LATTE>` package, which performs density-functional
tight-binding calculations using the `LATTE software <https://github.com/lanl/LATTE>`_
to compute forces, is hooked to LAMMPS.
See the :doc:`fix latte <fix_latte>` command for more details.
Also see the :doc:`Modify <Modify>` doc pages for info on how to
add a new fix to LAMMPS.
tight-binding calculations using the `LATTE software
<https://github.com/lanl/LATTE>`_ to compute forces, is hooked to
LAMMPS. See the :doc:`fix latte <fix_latte>` command for more
details. Also see the :doc:`Modify <Modify>` doc pages for info on
how to add a new fix to LAMMPS.
.. spacer
@ -58,6 +58,12 @@ context of your application.
.. spacer
4. Couple LAMMPS with another code in a client/server mode. This is
described on the :doc:`Howto client/server <Howto_client_server>` doc
page.
4. Couple LAMMPS with another code in a client/server fashion, using
using the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_
developed by the `Molecular Sciences Software Institute (MolSSI)
<https://molssi.org>`_ to run LAMMPS as either an MDI driver
(client) or an MDI engine (server). The MDI driver issues commands
to the MDI server to exchange data between them. See the
:doc:`Howto mdi <Howto_mdi>` page for more information about how
LAMMPS can operate in either of these modes.

View File

@ -1,132 +1,144 @@
Using LAMMPS with the MDI library for code coupling
===================================================
.. note::
This Howto page will eventually replace the
:doc:`Howto client/server <Howto_client_server>` doc page.
Client/server coupling of two codes is where one code is the "client"
and sends request messages (data) to a "server" code. The server
responds to each request with a reply message. This enables the two
codes to work in tandem to perform a simulation. LAMMPS can act as
either a client or server code; it does this by using the `MolSSI
Driver Interface (MDI) library
Client/server coupling of two (or more) codes is where one code is the
"client" and sends request messages (data) to one (or more) "server"
code(s). A server responds to each request with a reply message
(data). This enables two (or more) codes to work in tandem to perform
a simulation. LAMMPS can act as either a client or server code; it
does this by using the `MolSSI Driver Interface (MDI) library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_,
developed by the `Molecular Sciences Software Institute (MolSSI)
<https://molssi.org>`_.
<https://molssi.org>`_, which is supported by the :ref:`MDI <PKG-MDI>`
package.
Alternate methods for code coupling with LAMMPS are described on the
:doc:`Howto couple <Howto_couple>` doc page.
Some advantages of client/server coupling are that the two codes can run
Some advantages of client/server coupling are that the codes can run
as stand-alone executables; they need not be linked together. Thus
neither code needs to have a library interface. This also makes it easy
to run the two codes on different numbers of processors. If a message
protocol (format and content) is defined for a particular kind of
simulation, then in principle any code which implements the client-side
protocol can be used in tandem with any code which implements the
server-side protocol. Neither code needs to know what specific other
code it is working with.
neither code needs to have a library interface. This also makes it
easy to run the two codes on different numbers of processors. If a
message protocol (format and content) is defined for a particular kind
of simulation, then in principle any code which implements the
client-side protocol can be used in tandem with any code which
implements the server-side protocol. Neither code needs to know what
specific other code it is working with.
In MDI nomenclature, a client code is the "driver", and a server code is
an "engine". One driver code can communicate with one or more instances
of one or more engine codes. Driver and engine codes can be written in
any language: C, C++, Fortran, Python, etc.
In addition to allowing driver and engine(s) running to run as
stand-alone executables, MDI also enables a server code to be a
"plugin" to the client code. In this scenario, server code(s) are
compiled as shared libraries, and one (or more) instances of the
server are instantiated by the driver code. If the driver code runs
in parallel, it can split its MPI communicator into multiple
sub-communicators, and launch each plugin engine instance on a
sub-communicator. Driver processors in that sub-communicator exchange
messages with that engine instance, and can also send MPI messages to
other processors in the driver. The driver code can also destroy
engine instances and re-instantiate them.
In addition to allowing driver and engine(s) to run as stand-alone
executables, MDI also enables an engine to be a *plugin* to the client
code. In this scenario, server code(s) are compiled as shared
libraries, and one (or more) instances of the server are instantiated
by the driver code. If the driver code runs in parallel, it can split
its MPI communicator into multiple sub-communicators, and launch each
plugin engine instance on a sub-communicator. Driver processors
within that sub-communicator exchange messages with the corresponding
engine instance, and can also send MPI messages to other processors in
the driver. The driver code can also destroy engine instances and
re-instantiate them. LAMMPS can operate as either a stand-alone or
plugin MDI engine. When it operates as a driver, if can use either
stand-alone or plugin MDI engines.
The way that a driver communicates with an engine is by making
MDI_Send() and MDI_Recv() calls, which are conceptually similar to
MPI_Send() and MPI_Recv() calls. Each send or receive has a string
which identifies the command name, and optionally some data, which can
be a single value or vector of values of any data type. Inside the
MDI library, data is exchanged between the driver and engine via MPI
calls or sockets. This a run-time choice by the user.
The way in which an MDI driver communicates with an MDI engine is by
making MDI_Send() and MDI_Recv() calls, which are conceptually similar
to MPI_Send() and MPI_Recv() calls. Each send or receive operation
uses a string to identify the command name, and optionally some data,
which can be a single value or vector of values of any data type.
Inside the MDI library, data is exchanged between the driver and
engine via MPI calls or sockets. This a run-time choice by the user.
----------
The :ref:`MDI <PKG-MDI>` package provides a :doc:`mdi engine <mdi>`
command which enables LAMMPS to operate as an MDI engine. Its doc
page explains the variety of standard and custom MDI commands which
the LAMMPS engine recognizes and can respond to.
The package also provides a :doc:`mdi plugin <mdi>` command which
enables LAMMPS to operate as an MDI driver and load an MDI engine as a
plugin library.
The package also has a `fix mdi/aimd <fix_mdi_aimd>` command in which
LAMMPS operates as an MDI driver to perform *ab initio* MD simulations
in conjunction with a quantum mechanics code. Its post_force() method
illustrates how a driver issues MDI commands to another code. This
command can be used to couple to an MDI engine which is either a
stand-alone code or a plugin library.
----------
The examples/mdi directory contains Python scripts and LAMMPS input
script which use LAMMPS as either an MDI driver or engine or both.
Three example use cases are provided:
* Run ab initio MD (AIMD) using 2 instances of LAMMPS, one as driver
and one as an engine. As an engine, LAMMPS is a surrogate for a
quantum code.
* A Python script driver invokes a sequence of unrelated LAMMPS
calculations. Calculations can be single-point energy/force
evaluations, MD runs, or energy minimizations.
* Run AIMD with a Python driver code and 2 LAMMPS instances as
engines. The first LAMMPS instance performs MD timestepping. The
second LAMMPS instance acts as a surrogate QM code to compute
forces.
Note that in any of these example where LAMMPS is used as an engine,
an actual QM code (which supports MDI) could be used in its place,
without modifying other code or scripts, except to specify the name of
the QM code.
The examples/mdi/README file explains how to launch both driver and
engine codes so that they communicate using the MDI library via either
MPI or sockets.
-------------
As an example, LAMMPS and the ``pw.x`` command from Quantum Espresso (a
suite of quantum DFT codes), can work together via the MDI library to
perform an ab initio MD (AIMD) simulation, where LAMMPS runs an MD
simulation and sends a message each timestep to ``pw.x`` asking it to
compute quantum forces on the current configuration of atoms. Here is
how the 2 codes are launched to communicate by MPI:
Currently there are two quantum DFT codes which have direct MDI
support, `Quantum ESPRESSO (QE) <https://www.quantum-espresso.org/>`_
and `INQ <https://qsg.llnl.gov/node/101.html>`_. There are also
several QM codes which have indirect support through QCEngine or i-PI.
The former means they require a wrapper program (QCEngine) with MDI
support which writes/read files to pass data to the quantum code
itself. The list of QCEngine-supported and i-PI-supported quantum
codes is on the `MDI webpage
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_.
Here is how to build QE as a stand-alone ``pw.x`` file which can be
used in stand-alone mode:
.. code-block:: bash
% mpirun -np 2 lmp_mpi -mdi "-role DRIVER -name d -method MPI" \
-in in.aimd : -np 16 pw.x -in qe.in -mdi "-role ENGINE -name e -method MPI"
% git clone --branch mdi_plugin https://github.com/MolSSI-MDI/q-e.git <base_path>/q-e
% build the executable pw.x, following the `QE build guide <https://gitlab.com/QEF/q-e/-/wikis/Developers/CMake-build-system>`_
In this case LAMMPS runs on 2 processors (MPI tasks), ``pw.x`` runs on 16
processors.
Here is how the 2 codes are launched to communicate by sockets:
Here is how to build QE as a shared library which can be used in plugin mode,
which results in a libqemdi.so file in <base_path>/q-e/MDI/src:
.. code-block:: bash
% mpirun -np 2 lmp_mpi -mdi "-role DRIVER -name d -method TCP -port 8021" -in in.aimd
% mpirun -np 16 pw.x -in qe.in -mdi "-role ENGINE -name e -method TCP -port 8021 -hostname localhost"
% git clone --branch mdi_plugin https://github.com/MolSSI-MDI/q-e.git <base_path>/q-e
% cd <base_path>/q-e
% ./configure --enable-parallel --enable-openmp --enable-shared FFLAGS="-fPIC" FCFLAGS="-fPIC" CFLAGS="-fPIC" foxflags="-fPIC" try_foxflags="-fPIC"
% make -j 4 mdi
These commands could be issued in different windows on a desktop
machine. Or in the same window, if the first command is ended with
"&" so as to run in the background. If "localhost" is replaced by an
IP address, ``pw.x`` could be run on another machine on the same network, or
even on another machine across the country.
INQ cannot be built as a stand-alone code; it is by design a library.
Here is how to build INQ as a shared library which can be used in
plugin mode, which results in a libinqmdi.so file in
<base_path>/inq/build/examples:
After both codes initialize themselves to model the same system, this is
what occurs each timestep:
.. code-block:: bash
* LAMMPS send a ">COORDS" message to ``pw.x`` with a 3*N vector of current atom coords
* ``pw.x`` receives the message/coords and computes quantum forces on all the atoms
* LAMMPS send a "<FORCES" message to ``pw.x`` and waits for the result
* ``pw.x`` receives the message (after its computation finishes) and sends a 3*N vector of forces
* LAMMPS receives the forces and time integrates to complete a single timestep
-------------
Examples scripts for using LAMMPS as an MDI engine are in the
examples/mdi directory. See the README file in that directory for
instructions on how to run the examples.
.. note::
Work is underway to add commands that allow LAMMPS to be used as an
MDI driver, e.g. for the AIMD example discussed above. Example
scripts for this usage mode will be added the same directory when
available.
If LAMMPS is used as a stand-alone engine it should set up the system
it will be modeling in its input script, then invoke the
:doc:`mdi/engine <mdi_engine>` command. This will put LAMMPS into
"engine mode" where it waits for messages and data from the driver.
When the driver sends an "EXIT" command, LAMMPS will exit engine mode
and the input script will continue.
If LAMMPS is used as a plugin engine it operates the same way, except
that the driver will pass LAMMPS an input script to initialize itself.
Upon receiving the "EXIT" command, LAMMPS will exit engine mode and the
input script will continue. After finishing execution of the input
script, the instance of LAMMPS will be destroyed.
LAMMPS supports the full set of MD-appropriate engine commands defined
by the MDI library. See the :doc:`mdi/engine <mdi_engine>` page for
a list of these.
If those commands are not sufficient for a user-developed driver to use
LAMMPS as an engine, then new commands can be easily added. See these
two files which implement the definition of MDI commands and the logic
for responding to them:
* src/MDI/mdi_engine.cpp
* src/MDI/fix_mdi_engine.cpp
% git clone --branch mdi --recurse-submodules https://gitlab.com/taylor-a-barnes/inq.git <base_path>/inq
% cd <base_path>/inq
% mkdir -p build
% cd build
% ../configure --prefix=<install_path>/install
% make -j 4
% make install

View File

@ -79,6 +79,10 @@ This data can be extracted and parsed from a log file using python with:
.. code-block:: python
import re, yaml
try:
from yaml import CSafeLoader as Loader, CSafeDumper as Dumper
except ImportError:
from yaml import SafeLoader as Loader, SafeDumper as Dumper
docs = ""
with open("log.lammps") as f:
@ -86,7 +90,7 @@ This data can be extracted and parsed from a log file using python with:
m = re.search(r"^(keywords:.*$|data:$|---$|\.\.\.$| - \[.*\]$)", line)
if m: docs += m.group(0) + '\n'
thermo = list(yaml.load_all(docs, Loader=yaml.SafeLoader))
thermo = list(yaml.load_all(docs, Loader=Loader))
print("Number of runs: ", len(thermo))
print(thermo[1]['keywords'][4], ' = ', thermo[1]['data'][2][4])

View File

@ -5,7 +5,7 @@ LAMMPS can be downloaded, built, and configured for OS X on a Mac with
`Homebrew <homebrew_>`_. (Alternatively, see the install instructions for
:doc:`Download an executable via Conda <Install_conda>`.) The following LAMMPS
packages are unavailable at this time because of additional needs not yet met:
GPU, KOKKOS, LATTE, MSCG, MESSAGE, MPIIO POEMS VORONOI.
GPU, KOKKOS, LATTE, MSCG, MPIIO, POEMS, VORONOI.
After installing Homebrew, you can install LAMMPS on your system with
the following commands:

View File

@ -72,7 +72,6 @@ page gives those details.
* :ref:`MDI <PKG-MDI>`
* :ref:`MEAM <PKG-MEAM>`
* :ref:`MESONT <PKG-MESONT>`
* :ref:`MESSAGE <PKG-MESSAGE>`
* :ref:`MGPT <PKG-MGPT>`
* :ref:`MISC <PKG-MISC>`
* :ref:`ML-HDNNP <PKG-ML-HDNNP>`
@ -1408,17 +1407,25 @@ MDI package
**Contents:**
A LAMMPS command and fix to allow client-server coupling of LAMMPS to
other atomic or molecular simulation codes via the `MolSSI Driver Interface
A LAMMPS command and fixes to allow client-server coupling of LAMMPS
to other atomic or molecular simulation codes or materials modeling
workflows via the `MolSSI Driver Interface
(MDI) library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_.
**Author:** Taylor Barnes - MolSSI, taylor.a.barnes at gmail.com
**Install:**
This package has :ref:`specific installation instructions <mdi>` on
the :doc:`Build extras <Build_extras>` page.
**Supporting info:**
* src/MDI/README
* :doc:`mdi/engine <mdi_engine>`
* :doc:`fix mdi/engine <fix_mdi_engine>`
* lib/mdi/README
* :doc:`Howto MDI <Howto_mdi>`
* :doc:`mdi <mdi>`
* :doc:`fix mdi/aimd <fix_mdi_aimd>`
* examples/PACKAGES/mdi
----------
@ -1495,32 +1502,6 @@ Philipp Kloza (U Cambridge)
----------
.. _PKG-MESSAGE:
MESSAGE package
---------------
**Contents:**
Commands to use LAMMPS as either a client or server and couple it to
another application.
**Install:**
This package has :ref:`specific installation instructions <message>` on the :doc:`Build extras <Build_extras>` page.
**Supporting info:**
* src/MESSAGE: filenames -> commands
* lib/message/README
* :doc:`message <message>`
* :doc:`fix client/md <fix_client_md>`
* :doc:`server md <server_md>`
* :doc:`server mc <server_mc>`
* examples/message
----------
.. _PKG-MGPT:
MGPT package

View File

@ -244,7 +244,7 @@ whether an extra library is needed to build and use the package:
- n/a
- no
* - :ref:`MDI <PKG-MDI>`
- client-server coupling
- client-server code coupling
- :doc:`MDI Howto <Howto_mdi>`
- PACKAGES/mdi
- ext
@ -258,11 +258,6 @@ whether an extra library is needed to build and use the package:
- pair styles :doc:`mesont/tpm <pair_mesont_tpm>`, :doc:`mesocnt <pair_mesocnt>`
- PACKAGES/mesont
- int
* - :ref:`MESSAGE <PKG-MESSAGE>`
- client/server messaging
- :doc:`message <message>`
- message
- int
* - :ref:`MGPT <PKG-MGPT>`
- fast MGPT multi-ion potentials
- :doc:`pair_style mgpt <pair_mgpt>`

View File

@ -226,15 +226,6 @@ other executable(s) perform an MPI_Comm_split() with their own colors
to shrink the MPI_COMM_WORLD communication to be the subset of
processors they are actually running on.
Currently, this is only used in LAMMPS to perform client/server
messaging with another application. LAMMPS can act as either a client
or server (or both). More details are given on the :doc:`Howto client/server <Howto_client_server>` doc page.
Specifically, this refers to the "mpi/one" mode of messaging provided
by the :doc:`message <message>` command and the CSlib library LAMMPS
links with from the lib/message directory. See the
:doc:`message <message>` command for more details.
----------
.. _cite:

View File

@ -59,8 +59,7 @@ Commands
lattice
log
mass
mdi_engine
message
mdi
min_modify
min_spin
min_style
@ -96,9 +95,6 @@ Commands
restart
run
run_style
server
server_mc
server_md
set
shell
special_bonds

View File

@ -208,7 +208,8 @@ The individual style names on the :doc:`Commands compute <Commands_compute>` pag
* :doc:`erotate/sphere/atom <compute_erotate_sphere_atom>` - rotational energy for each spherical particle
* :doc:`event/displace <compute_event_displace>` - detect event on atom displacement
* :doc:`fabric <compute_fabric>` - calculates fabric tensors from pair interactions
* :doc:`fep <compute_fep>` -
* :doc:`fep <compute_fep>` - compute free energies for alchemical transformation from perturbation theory
* :doc:`fep/ta <compute_fep_ta>` - compute free energies for a test area perturbation
* :doc:`force/tally <compute_tally>` - force between two groups of atoms via the tally callback mechanism
* :doc:`fragment/atom <compute_cluster_atom>` - fragment ID for each atom
* :doc:`global/atom <compute_global_atom>` -

View File

@ -0,0 +1,99 @@
.. index:: compute fep/ta
compute fep/ta command
======================
Syntax
""""""
.. parsed-literal::
compute ID group-ID fep/ta temp plane scale_factor keyword value ...
* ID, group-ID are documented in the :doc:`compute <compute>` command
* fep/ta = name of this compute command
* temp = external temperature (as specified for constant-temperature run)
* plane = *xy* or *xz* or *yz*
* scale_factor = multiplicative factor for change in plane area
* zero or more keyword/value pairs may be appended
* keyword = *tail*
.. parsed-literal::
*tail* value = *no* or *yes*
*no* = ignore tail correction to pair energies (usually small in fep)
*yes* = include tail correction to pair energies
Examples
""""""""
.. code-block:: LAMMPS
compute 1 all fep/ta 298 xy 1.0005
Description
"""""""""""
Define a computation that calculates the change in the free energy due
to a test-area (TA) perturbation :ref:`(Gloor) <Gloor>`. The test-area
approach can be used to determine the interfacial tension of the system
in a single simulation:
.. math::
\gamma = \lim_{\Delta \mathcal{A} \to 0} \left( \frac{\Delta A_{0 \to 1 }}{\Delta \mathcal{A}}\right)_{N,V,T}
= - \frac{kT}{\Delta \mathcal{A}} \ln \left< \exp(-(U_1 - U_0)/kT) \right>_0
During the perturbation, both axes of *plane* are scaled by multiplying
:math:`\sqrt{scale\_factor}`, while the other axis divided by
*scale_factor* such that the overall volume of the system is maintained.
The *tail* keyword controls the calculation of the tail correction to
"van der Waals" pair energies beyond the cutoff, if this has been
activated via the :doc:`pair_modify <pair_modify>` command. If the
perturbation is small, the tail contribution to the energy difference
between the reference and perturbed systems should be negligible.
----------
Output info
"""""""""""
This compute calculates a global vector of length 3 which contains the
energy difference ( :math:`U_1-U_0` ) as c_ID[1], the Boltzmann factor
:math:`\exp(-(U_1-U_0)/kT)`, as c_ID[2] and the change in the *plane*
area :math:`\Delta \mathcal{A}` as c_ID[3]. :math:`U_1` is the potential
energy of the perturbed state and :math:`U_0` is the potential energy of
the reference state. The energies include kspace terms if these are
used in the simulation.
These output results can be used by any command that uses a global
scalar or vector from a compute as input. See the :doc:`Howto output
<Howto_output>` page for an overview of LAMMPS output options. For
example, the computed values can be averaged using :doc:`fix ave/time
<fix_ave_time>`.
Restrictions
""""""""""""
Constraints, like fix shake, may lead to incorrect values for energy difference.
This compute is distributed as the FEP package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`compute fep <compute_fep>`
Default
"""""""
The option defaults are *tail* = *no*\ .
----------
.. _Gloor:
**(Gloor)** Gloor, J Chem Phys, 123, 134703 (2005)

View File

@ -36,7 +36,7 @@ Syntax
* ID = user-assigned name for the dump
* group-ID = ID of the group of atoms to be dumped
* style = *atom* or *atom/gz* or *atom/zstd or *atom/mpiio* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/mpiio* or *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *dcd* or *h5md* or *image* or *local* or *local/gz* or *local/zstd* or *molfile* or *movie* or *netcdf* or *netcdf/mpiio* or *vtk* or *xtc* or *xyz* or *xyz/gz* or *xyz/zstd* or *xyz/mpiio*
* style = *atom* or *atom/gz* or *atom/zstd or *atom/mpiio* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/mpiio* or *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *dcd* or *h5md* or *image* or *local* or *local/gz* or *local/zstd* or *molfile* or *movie* or *netcdf* or *netcdf/mpiio* or *vtk* or *xtc* or *xyz* or *xyz/gz* or *xyz/zstd* or *xyz/mpiio* or *yaml*
* N = dump every this many timesteps
* file = name of file to write dump info to
* args = list of arguments for a particular style
@ -68,8 +68,9 @@ Syntax
*xyz/gz* args = none
*xyz/zstd* args = none
*xyz/mpiio* args = none
*yaml* args = same as *custom* args, see below
* *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *netcdf* or *netcdf/mpiio* args = list of atom attributes
* *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *netcdf* or *netcdf/mpiio* or *yaml* args = list of atom attributes
.. parsed-literal::
@ -386,6 +387,70 @@ from using the (numerical) atom type to an element name (or some
other label). This will help many visualization programs to guess
bonds and colors.
Dump style *yaml* has the same command syntax as style *custom* and
writes YAML format files that can be easily parsed by a variety of data
processing tools and programming languages. Each timestep will be
written as a YAML "document" (i.e. starts with "---" and ends with
"..."). The style supports writing one file per timestep through the
"\*" wildcard but not multi-processor outputs with the "%" token in the
filename. In addition to per-atom data, :doc:`thermo <thermo>` data can
be included in the *yaml* style dump file using the :doc:`dump_modify
thermo yes <dump_modify>`. The data included in the dump file uses the
"thermo" tag and is otherwise identical to data specified by the
:doc:`thermo_style <thermo_style>` command.
Below is an example for a YAML format dump created by the following commands.
.. code-block:: LAMMPS
dump out all yaml 100 dump.yaml id type x y z vx vy vz ix iy iz
dump_modify out time yes units yes thermo yes format 1 %5d format "% 10.6e"
The tags "time", "units", and "thermo" are optional and enabled by the
dump_modify command. The list under the "box" tag has 3 lines for
orthogonal boxes and 4 lines with triclinic boxes, where the first 3 are
the box boundaries and the 4th the three tilt factors (xy, xz, yz). The
"thermo" data follows the format of the *yaml* thermo style. The
"keywords" tag lists the per-atom properties contained in the "data"
columns, which contain a list with one line per atom. The keywords may
be renamed using the dump_modify command same as for the *custom* dump
style.
.. code-block:: yaml
---
timestep: 0
units: lj
time: 0
natoms: 4000
boundary: [ p, p, p, p, p, p, ]
thermo:
- keywords: [ Step, Temp, E_pair, E_mol, TotEng, Press, ]
- data: [ 0, 0, -27093.472213010766, 0, 0, 0, ]
box:
- [ 0, 16.795961913825074 ]
- [ 0, 16.795961913825074 ]
- [ 0, 16.795961913825074 ]
- [ 0, 0, 0 ]
keywords: [ id, type, x, y, z, vx, vy, vz, ix, iy, iz, ]
data:
- [ 1 , 1 , 0.000000e+00 , 0.000000e+00 , 0.000000e+00 , -1.841579e-01 , -9.710036e-01 , -2.934617e+00 , 0 , 0 , 0, ]
- [ 2 , 1 , 8.397981e-01 , 8.397981e-01 , 0.000000e+00 , -1.799591e+00 , 2.127197e+00 , 2.298572e+00 , 0 , 0 , 0, ]
- [ 3 , 1 , 8.397981e-01 , 0.000000e+00 , 8.397981e-01 , -1.807682e+00 , -9.585130e-01 , 1.605884e+00 , 0 , 0 , 0, ]
[...]
...
---
timestep: 100
units: lj
time: 0.5
[...]
...
----------
Note that *atom*, *custom*, *dcd*, *xtc*, and *xyz* style dump files
can be read directly by `VMD <http://www.ks.uiuc.edu/Research/vmd>`_, a
popular molecular viewing program.
@ -427,9 +492,9 @@ If a "%" character appears in the filename, then each of P processors
writes a portion of the dump file, and the "%" character is replaced
with the processor ID from 0 to P-1. For example, tmp.dump.% becomes
tmp.dump.0, tmp.dump.1, ... tmp.dump.P-1, etc. This creates smaller
files and can be a fast mode of output on parallel machines that
support parallel I/O for output. This option is not available for the
*dcd*, *xtc*, and *xyz* styles.
files and can be a fast mode of output on parallel machines that support
parallel I/O for output. This option is **not** available for the *dcd*,
*xtc*, *xyz*, and *yaml* styles.
By default, P = the number of processors meaning one file per
processor, but P can be set to a smaller value via the *nfile* or
@ -722,8 +787,8 @@ are part of the MPIIO package. They are only enabled if LAMMPS was
built with that package. See the :doc:`Build package <Build_package>`
doc page for more info.
The *xtc* and *dcd* styles are part of the EXTRA-DUMP package. They
are only enabled if LAMMPS was built with that package. See the
The *xtc*, *dcd* and *yaml* styles are part of the EXTRA-DUMP package.
They are only enabled if LAMMPS was built with that package. See the
:doc:`Build package <Build_package>` page for more info.
Related commands

View File

@ -26,6 +26,10 @@ Syntax
N = index of frame written upon first dump
*balance* arg = *yes* or *no*
*buffer* arg = *yes* or *no*
*colname* values = ID string, or *default*
string = new column header name
ID = integer from 1 to N, or integer from -1 to -N, where N = # of quantities being output
*or* a custom dump keyword or reference to compute, fix, property or variable.
*delay* arg = Dstep
Dstep = delay output until this timestep
*element* args = E1 E2 ... EN, where N = # of atom types
@ -40,9 +44,10 @@ Syntax
Np = write one file for every this many processors
*first* arg = *yes* or *no*
*flush* arg = *yes* or *no*
*format* args = *line* string, *int* string, *float* string, M string, or *none*
*format* args = *line* string, *int* string, *float* string, ID string, or *none*
string = C-style format string
M = integer from 1 to N, where N = # of per-atom quantities being output
ID = integer from 1 to N, or integer from -1 to -N, where N = # of quantities being output
*or* a custom dump keyword or reference to compute, fix, property or variable.
*header* arg = *yes* or *no*
*yes* to write the header
*no* to not write the header
@ -375,6 +380,29 @@ performed with dump style *xtc*\ .
----------
The *colname* keyword can be used to change the default header keyword
for dump styles: *atom*, *custom*, and *cfg* and their compressed, ADIOS,
and MPIIO variants. The setting for *ID string* replaces the default
text with the provided string. *ID* can be a positive integer when it
represents the column number counting from the left, a negative integer
when it represents the column number from the right (i.e. -1 is the last
column/keyword), or a custom dump keyword (or compute, fix, property, or
variable reference) and then it replaces the string for that specific
keyword. For *atom* dump styles only the keywords "id", "type", "x",
"y", "z", "ix", "iy", "iz" can be accessed via string regardless of
whether scaled or unwrapped coordinates were enabled or disabled, and
it always assumes 8 columns for indexing regardless of whether image
flags are enabled or not. For dump style *cfg* only the "auxiliary"
keywords (6th or later keyword) may be changed and the column indexing
considers only them (i.e. the 6th keyword is the the 1st column).
The *colname* keyword can be used multiple times. If multiple *colname*
settings refer to the same keyword, the last setting has precedence. A
setting of *default* clears all previous settings, reverting all values
to their default names.
----------
The *format* keyword can be used to change the default numeric format output
by the text-based dump styles: *atom*, *local*, *custom*, *cfg*, and
*xyz* styles, and their MPIIO variants. Only the *line* or *none*
@ -684,8 +712,8 @@ run, this option is ignored since the output is already balanced.
----------
The *thermo* keyword only applies the dump *netcdf* style. It
triggers writing of :doc:`thermo <thermo>` information to the dump file
The *thermo* keyword only applies the dump styles *netcdf* and *yaml*.
It triggers writing of :doc:`thermo <thermo>` information to the dump file
alongside per-atom data. The values included in the dump file are
identical to the values specified by :doc:`thermo_style <thermo_style>`.

View File

@ -194,7 +194,6 @@ accelerated styles exist.
* :doc:`bond/swap <fix_bond_swap>` - Monte Carlo bond swapping
* :doc:`box/relax <fix_box_relax>` - relax box size during energy minimization
* :doc:`charge/regulation <fix_charge_regulation>` - Monte Carlo sampling of charge regulation
* :doc:`client/md <fix_client_md>` - MD client for client/server simulations
* :doc:`cmap <fix_cmap>` - enables CMAP cross-terms of the CHARMM force field
* :doc:`colvars <fix_colvars>` - interface to the collective variables "Colvars" library
* :doc:`controller <fix_controller>` - apply control loop feedback mechanism
@ -244,7 +243,7 @@ accelerated styles exist.
* :doc:`lb/viscous <fix_lb_viscous>` -
* :doc:`lineforce <fix_lineforce>` - constrain atoms to move in a line
* :doc:`manifoldforce <fix_manifoldforce>` - restrain atoms to a manifold during minimization
* :doc:`mdi/engine <fix_mdi_engine>` - connect LAMMPS to external programs via the MolSSI Driver Interface (MDI)
* :doc:`mdi/aimd <fix_mdi_aimd>` - LAMMPS operates as driver for ab initio MD (AIMD) via the MolSSI Driver Interface (MDI)
* :doc:`meso/move <fix_meso_move>` - move mesoscopic SPH/SDPD particles in a prescribed fashion
* :doc:`mol/swap <fix_mol_swap>` - Monte Carlo atom type swapping with a molecule
* :doc:`momentum <fix_momentum>` - zero the linear and/or angular momentum of a group of atoms

View File

@ -1,118 +0,0 @@
.. index:: fix client/md
fix client/md command
=====================
Syntax
""""""
.. parsed-literal::
fix ID group-ID client/md
* ID, group-ID are documented in :doc:`fix <fix>` command
* client/md = style name of this fix command
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all client/md
Description
"""""""""""
This fix style enables LAMMPS to run as a "client" code and
communicate each timestep with a separate "server" code to perform an
MD simulation together.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When using this fix, LAMMPS (as the client code) passes the current
coordinates of all particles to the server code each timestep, which
computes their interaction, and returns the energy, forces, and virial
for the interacting particles to LAMMPS, so it can complete the
timestep.
Note that the server code can be a quantum code, or another classical
MD code which encodes a force field (pair_style in LAMMPS lingo) which
LAMMPS does not have. In the quantum case, this fix is a mechanism
for running *ab initio* MD with quantum forces.
The group associated with this fix is ignored.
The protocol and :doc:`units <units>` for message format and content
that LAMMPS exchanges with the server code is defined on the
:doc:`server md <server_md>` doc page.
Note that when using LAMMPS as an MD client, your LAMMPS input script
should not normally contain force field commands, like a
:doc:`pair_style <pair_style>`, :doc:`bond_style <bond_style>`, or
:doc:`kspace_style <kspace_style>` command. However it is possible
for a server code to only compute a portion of the full force-field,
while LAMMPS computes the remaining part. Your LAMMPS script can also
specify boundary conditions or force constraints in the usual way,
which will be added to the per-atom forces returned by the server
code.
See the examples/message directory for example scripts where LAMMPS is both
the "client" and/or "server" code for this kind of client/server MD
simulation. The examples/message/README file explains how to launch
LAMMPS and another code in tandem to perform a coupled simulation.
----------
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files
<restart>`.
The :doc:`fix_modify <fix_modify>` *energy* option is supported by
this fix to add the potential energy set by the server application to
the global potential energy of the system as part of
:doc:`thermodynamic output <thermo_style>`. The default setting for
this fix is :doc:`fix_modify energy yes <fix_modify>`.
The :doc:`fix_modify <fix_modify>` *virial* option is supported by
this fix to add the contribution computed by the server application to
the global pressure of the system via the :doc:`compute pressure
<compute_pressure>` command. This can be accessed by
:doc:`thermodynamic output <thermo_style>`. The default setting for
this fix is :doc:`fix_modify virial yes <fix_modify>`.
This fix computes a global scalar which can be accessed by various
:doc:`output commands <Howto_output>`. The scalar is the potential
energy discussed above. The scalar value calculated by this fix is
"extensive".
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
This fix is not invoked during :doc:`energy minimization <minimize>`.
Restrictions
""""""""""""
This fix is part of the MESSAGE package. It is only enabled if LAMMPS
was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup and shut down the messaging
protocol with the server code.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`server <server>`
Default
"""""""
none

103
doc/src/fix_mdi_aimd.rst Normal file
View File

@ -0,0 +1,103 @@
.. index:: fix mdi/aimd
fix mdi/aimd command
======================
Syntax
""""""
.. parsed-literal::
fix ID group-ID mdi/aimd keyword
* ID, group-ID are documented in :doc:`fix <fix>` command
* mdi/aimd = style name of this fix command
* optional keyword = *plugin*
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all mdi/aimd
fix 1 all mdi/aimd plugin
Description
"""""""""""
This command enables LAMMPS to act as a client with another server
code to couple the two codes together to perform ab initio MD (AIMD)
simulations.
More specifically, this command causes LAMMPS to begin using the `MDI
Library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_
to run as an MDI driver (client), which sends MDI commands to an
external MDI engine code (server) which in the case of AIMD is a
quantum mechanics (QM) code, or could be LAMMPS itself, acting as a
surrogate for a QM code. See the :doc:`Howto mdi <Howto_mdi>` page
for more information about how LAMMPS can operate as either an MDI
driver or engine.
The examples/mdi directory contains input scripts performing AIMD in
this manner with LAMMPS acting as both a driver and an engine
(surrogate for a QM code). The examples/mdi/README file explains how
to launch both driver and engine codes so that they communicate using
the MDI library via either MPI or sockets. Any QM code that supports
MDI could be used in place of LAMMPS acting as a QM surrogate. See
the :doc:`Howto mdi <Howto_mdi>` page for a current list (March 2022)
of such QM codes.
The engine code can run either as a stand-alone code, launched at the
same time as LAMMPS, or as a plugin library. See the :doc:`mdi plugin
<mdi>` command for how to trigger LAMMPS to load the plugin library.
Again, the examples/mdi/README file explains how to launch both driver
and engine codes so that engine is used in plugin mode.
To use this fix with a plugin engine, you must specify the
*plugin* keyword as the last argument, as illustrated above.
.. note::
As of April 2022, the *plugin* keyword is needed. In a future
version of the MDI library it will no longer be necessary.
----------
This fix performs the timestepping portion of an AIMD simulation.
Both LAMMPS and the engine code (QM or LAMMPS) should define the same
system (simulation box, atoms and their types) in their respective
input scripts. LAMMPS then begins its timestepping.
At the point in each timestep when LAMMPS needs the force on each
atom, it communicates with the engine code. It sends the current
simulation box size and shape (if they change dynamically, e.g. during
an NPT simulation), and the current atom coordinates. The engine code
computes quantum forces on each atom and returns them to LAMMPS. If
LAMMPS also needs the system energy and/or virial, it requests those
values from the engine code as well.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
To use LAMMPS as an MDI driver in conjunction with other MDI-enabled
atomistic codes, the :doc:`units <units>` command should be used to
specify *real* or *metal* units. This will ensure the correct unit
conversions between LAMMPS and MDI units, which the other codes will
also perform in their preferred units.
LAMMPS can also be used as an MDI driver in other unit choices it
supports, e.g. *lj*, but then no unit conversion is performed.
Related commands
""""""""""""""""
:doc:`mdi engine <mdi>`
Default
"""""""
none

View File

@ -1,59 +0,0 @@
.. index:: fix mdi/engine
fix mdi/engine command
======================
Syntax
""""""
.. parsed-literal::
fix ID group-ID mdi/engine
* ID, group-ID are documented in :doc:`fix <fix>` command
* mdi/engine = style name of this fix command
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all mdi/engine
Description
"""""""""""
This fix is used along with the :doc:`mdi/engine <mdi_engine>` command
to enable LAMMPS to use the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_ to run as
an MDI engine. The fix provides hooks that enable MDI driver codes to
communicate with LAMMPS at various points within a LAMMPS timestep.
It is not generally necessary to add this fix to a LAMMPS input file,
even when using the :doc:`mdi/engine <mdi_engine>` command. If the
:doc:`mdi/engine <mdi_engine>` command is executed and this fix is not
present, it will automatically be added and applied as a new fix for
all atoms for the duration of the command. Thus it is only necessary
to add this fix to an input file when you want to modify the group-ID
or the ordering of this fix relative to other fixes in the input script.
For more information about running LAMMPS as an MDI engine, see the
:doc:`mdi/engine <mdi_engine>` command and the :doc:`Howto mdi
<Howto_mdi>` doc page.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`mdi/engine <mdi_engine>`
Default
"""""""
none

View File

@ -29,10 +29,10 @@ Description
"""""""""""
Perform targeted molecular dynamics (TMD) on a group of atoms. A
holonomic constraint is used to force the atoms to move towards (or
away from) the target configuration. The parameter "rho" is
monotonically decreased (or increased) from its initial value to
rho_final at the end of the run.
holonomic constraint is used to force the atoms to move towards (or away
from) the target configuration. The parameter "rho" is monotonically
decreased (or increased) from its initial value to rho_final at the end
of the run.
Rho has distance units and is a measure of the root-mean-squared
distance (RMSD) between the current configuration of the atoms in the
@ -55,22 +55,25 @@ a .gz suffix). The format of the target file1 is as follows:
The first 3 lines may or may not be needed, depending on the format of
the atoms to follow. If image flags are included with the atoms, the
first 3 lo/hi lines must appear in the file. If image flags are not
included, the first 3 lines should not appear. The 3 lines contain the
first 3 lo/hi lines **must** appear in the file. If image flags are not
included, the first 3 lines **must not** appear. The 3 lines contain the
simulation box dimensions for the atom coordinates, in the same format
as in a LAMMPS data file (see the :doc:`read_data <read_data>` command).
The remaining lines each contain an atom ID and its target x,y,z
coordinates. The atom lines (all or none of them) can optionally be
followed by 3 integer values: nx,ny,nz. For periodic dimensions, they
followed by 3 integer values: nx,ny,nz.For periodic dimensions, they
specify which image of the box the atom is considered to be in, i.e. a
value of N (positive or negative) means add N times the box length to
the coordinate to get the true value.
the coordinate to get the true value. Those 3 integers either must
be given for all atoms or none.
The atom lines can be listed in any order, but every atom in the group
must be listed in the file. Atoms not in the fix group may also be
listed; they will be ignored.
Comments starting with '#' and empty lines may be included as well.
TMD statistics are written to file2 every N timesteps, unless N is
specified as 0, which means no statistics.

View File

@ -154,7 +154,7 @@ of the electronic specific heat, but ignored temperature dependencies
of any of the other parameters. See more discussion below for fix
ttm/mod.
..note::
.. note::
These fixes do not perform time integration of the atoms in the fix
group, they only rescale their velocities. Thus a time integration
@ -164,7 +164,7 @@ ttm/mod.
fix, e.g. :doc:`fix nvt <fix_nh>` or :doc:`fix langevin
<fix_langevin>`.
..note::
.. note::
These fixes require use of an orthogonal 3d simulation box with
periodic boundary conditions in all dimensions. They also require
@ -305,8 +305,8 @@ is calculated as
where lambda is the electron mean free path (see :ref:`(Norman) <Norman>`,
:ref:`(Pisarev) <Pisarev>`)
The fix ttm/mod parameter file *init_file* has the following syntax/
Every line with the odd number is considered as a comment and
The fix ttm/mod parameter file *init_file* has the following syntax.
Every line with an odd number is considered as a comment and
ignored. The lines with the even numbers are treated as follows:
.. parsed-literal::

312
doc/src/mdi.rst Normal file
View File

@ -0,0 +1,312 @@
.. index:: mdi
mdi command
==================
Syntax
""""""
.. parsed-literal::
mdi mode args
* mode = *engine* or *plugin*
.. parsed-literal::
*engine* args = none
*plugin* args = name keyword value keyword value
name = name of plugin library, e.g. lammps means a liblammps.so library will be loaded
keywords = *mdi* or *infile* or *extra* or *command*
*mdi* value = args passed to MDI for driver to operate with plugins
*infile* value = filename the engine will read at start-up
*extra* value = aditional command-line args to pass to engine library when loaded
*command* value = a LAMMPS input script command to execute
Examples
""""""""
.. code-block:: LAMMPS
mdi engine
mdi plugin lammps mdi "-role ENGINE -name lammps -method LINK" &
infile in.aimd.engine extra "-log log.aimd.engine.plugin" &
command "run 5"
Description
"""""""""""
This command implements two high-level operations within LAMMPS to use
the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>` for
coupling to other codes in a client/server protocol.
The *engine* mode enables LAMMPS to act as an MDI engine (server),
responding to requests from an MDI driver (client) code.
The *plugin* mode enables LAMMPS to act as an MDI driver (client), and
load the MDI engine (server) code as a library plugin. In this case
the MDI engine is a library plugin. It can also be a stand-alone
code, launched separately from LAMMPS, in which case the mdi plugin
command is not used.
See the Howto MDI doc page for a discussion of all the different ways
2 or more codes can interact via MDI.
The examples/mdi directory has examples which use LAMMPS in 4
different modes: as a driver using an engine as either a stand-alone
code or as a plugin, and as an engine operating as either a
stand-alone code or as a plugin. The README file in that directory
shows how to launch and couple codes for all the 4 usage modes, and so
they communicate via the MDI library using either MPI or sockets.
----------
The *mdi engine* command is used to make LAMMPS operate as an MDI
engine. It is typically used in an input script after LAMMPS has
setup the system it is going to model consistent with what the driver
code expects. Depending on when the driver code tells the LAMMPS
engine to exit, other commands can be executed after this command, but
typically it is used at the end of a LAMMPS input script.
To act as an MDI engine operating as an MD code (or surrogate QM
code), this is the list of standard MDI commands issued by a driver
code which LAMMPS currently recognizes. Using standard commands
defined by the MDI library means that a driver code can work
interchangeably with LAMMPS or other MD codes or with QM codes which
support the MDI standard. See more details about these commands in
the `MDI library documentation
<https://molssi-mdi.github.io/MDI_Library/html/mdi_standard.html>`_
These commands are valid at the @DEFAULT node defined by MDI.
Commands that start with ">" mean the driver is sending information to
LAMMPS. Commands that start with "<" are requests by the driver for
LAMMPS to send it information. Commands that start with an alphabetic
letter perform actions. Commands that start with "@" are MDI "node"
commands, which are described further below.
.. list-table::
:widths: 20 80
:header-rows: 1
* - Command name
- Action
* - >CELL or <CELL
- Send/request 3 simulation box edge vectors (9 values)
* - >CELL_DISPL or <CELL_DISPL
- Send/request displacement of the simulation box from the origin (3 values)
* - >CHARGES or <CHARGES
- Send/request charge on each atom (N values)
* - >COORDS or <COORDS
- Send/request coordinates of each atom (3N values)
* - <ENERGY
- Request total energy (potential + kinetic) of the system (1 value)
* - >FORCES or <FORCES
- Send/request forces on each atom (3N values)
* - >+FORCES
- Send forces to add to each atom (3N values)
* - <LABELS
- Request string label of each atom (N values)
* - <MASSES
- Request mass of each atom (N values)
* - MD
- Perform an MD simulation for N timesteps (most recent >NSTEPS value)
* - OPTG
- Perform an energy minimization to convergence (most recent >TOLERANCE values)
* - >NATOMS or <NATOMS
- Sends/request number of atoms in the system (1 value)
* - >NSTEPS
- Send number of timesteps for next MD dynamics run via MD command
* - <PE
- Request potential energy of the system (1 value)
* - <STRESS
- Request stress tensor (virial) of the system (6 values)
* - >TOLERANCE
- Send 4 tolerance parameters for next MD minimization via OPTG command
* - >TYPES or <TYPES
- Send/request the numeric type of each atom (N values)
* - >VELOCITIES or <VELOCITIES
- Send/request the velocity of each atom (3N values)
* - @INIT_MD or @INIT_OPTG
- Driver tells LAMMPS to start single-step dynamics or minimization (see below)
* - EXIT
- Driver tells LAMMPS to exit engine mode
.. note::
The <ENERGY, <FORCES, <PE, and <STRESS commands trigger LAMMPS to
compute atomic interactions for the current configuration of atoms
and size/shape of the simulation box. I.e. LAMMPS invokes its
pair, bond, angle, ..., kspace styles. If the driver is updating
the atom coordinates and/or box incrementally (as in an MD
simulation which the driver is managing), then the LAMMPS engine
will do the same, and only occasionally trigger neighbor list
builds. If the change in atom positions is large (since the
previous >COORDS command), then LAMMPS will do a more expensive
operation to migrate atoms to new processors as needed and
re-neighbor. If the >NATOMS or >TYPES commands have been sent
(since the previous >COORDS command), then LAMMPS assumes the
system is new and re-initializes an entirely new simulation.
The MD and OPTG commands perform an entire MD simulation or energy
minimization (to convergence) with no communication from the driver
until the simulation is complete. By contrast, the @INIT_MD and
@INIT_OPTG commands allow the driver to communicate with the engine at
each timestep of a dynamics run or iteration of a minimization; see
more info below.
The MD command performs a simulation using the most recent >NSTEPS
value. The OPTG command performs a minimization using the 4
convergence parameters from the most recent >TOLERANCE command. The 4
parameters sent are those used by the :doc:`minimize <minimize>`
command in LAMMPS: etol, ftol, maxiter, and maxeval.
The mdi engine command also implements the following custom MDI
commands which are LAMMPS-specific. These commands are also valid at
the @DEFAULT node defined by MDI:
* - Command name
- Action
* - >NBYTES
- Send # of datums in a subsequent command (1 value)
* - >COMMAND
- Send a LAMMPS input script command as a string (Nbytes in length)
* - >COMMANDS
- Send multiple LAMMPS input script commands as a newline-separated string (Nbytes in length)
* - >INFILE
- Send filename of an input script to execute (filename Nbytes in length)
* - <KE
- Request kinetic energy of the system (1 value)
Note that other custom commands can easily be added if these are not
sufficient to support what a user-written driver code needs. Code to
support new commands can be added to the MDI package within LAMMPS,
specifically to the src/MDI/mdi_engine.cpp file.
MDI also defines a standard mechanism for the driver to request that
an MD engine (LAMMPS) perform a dynamics simulation one step at a time
or an energy minimization one iteration at a time. This is so that
the driver can (optionally) communicate with LAMMPS at intermediate
points of the timestep or iteration by issuing MDI node commands which
start with "@".
To tell LAMMPS to run dynamics in single-step mode, the driver sends
as @INIT_MD command followed by the these commands. The driver
can interact with LAMMPS at 3 node locations within each
timestep: @COORDS, @FORCES, @ENDSTEP:
* - Command name
- Action
* - @COORDS
- Proceed to next @COORDS node = post-integrate location in LAMMPS timestep
* - @FORCES
- Proceed to next @FORCES node = post-force location in LAMMPS timestep
* - @ENDSTEP
- Proceed to next @ENDSTEP node = end-of-step location in LAMMPS timestep
* - @DEFAULT
- Exit MD simulation, return to @DEFAULT node
* - EXIT
- Driver tells LAMMPS to exit the MD simulation and engine mode
To tell LAMMPS to run an energy minimization in single-iteration mode.
The driver can interact with LAMMPS at 2 node locations within each
iteration of the minimizer: @COORDS, @FORCES:
* - Command name
- Action
* - @COORDS
- Proceed to next @COORDS node = min-pre-force location in LAMMPS min iteration
* - @FORCES
- Proceed to next @FORCES node = min-post-force location in LAMMPS min iteration
* - @DEFAULT
- Exit minimization, return to @DEFAULT node
* - EXIT
- Driver tells LAMMPS to exit the minimization and engine mode
While LAMMPS is at its @COORDS node, the following standard MDI
commands are supported, as documented above: >COORDS or <COORDS,
@COORDS, @FORCES, @ENDSTEP, @DEFAULT, EXIT.
While LAMMPS is at its @FORCES node, the following standard MDI
commands are supported, as documented above: <COORDS, <ENERGY, >FORCES
or >+FORCES or <FORCES, <KE, <PE, <STRESS, @COORDS, @FORCES, @ENDSTEP,
@DEFAULT, EXIT.
While LAMMPS is at its @ENDSTEP node, the following standard MDI
commands are supported, as documented above: <ENERGY, <FORCES, <KE,
<PE, <STRESS, @COORDS, @FORCES, @ENDSTEP, @DEFAULT, EXIT.
----------
The *mdi plugin* command is used to make LAMMPS operate as an MDI
driver which loads an MDI engine as a plugin library. It is typically
used in an input script after LAMMPS has setup the system it is going
to model consistent with the engine code.
The *name* argument specifies which plugin library to load. A name
like "lammps" is converted to a filename liblammps.so. The path for
where this file is located is specified by the -plugin_path switch
within the -mdi command-line switch, which is specified when LAMMPS is
launched. See the examples/mdi/README files for examples of how this
is done.
The *mdi* keyword is required and is used as the -mdi argument passed
to the library when it is launched. The -role and -method settings
are required. The -name setting can be anything you choose. MDI
drivers and engines can query their names to verify they are values
they expect.
The *infile* keyword is also required. It is the name of an input
script which the engine will open and process. MDI will pass it as a
command-line argument to the library when it is launched. The file
typically contains settings that an MD or QM code will use for its
subsequent calculations.
The *extra* keyword is optional. It contains additional command-line
arguments which MDI will pass to the library when it is launched.
The *command* keyword is required. It specifies a LAMMPS input script
command (as a single argument in quotes if it is multiple words).
Once the plugin library is launched, LAMMPS will execute this command.
Other previously-defined commands in the input script, such as the
:doc:`fix mdi/aimd <fix_mdi_aimd>` command, should perform MDI
communication with the engine, while the specified *command* executes.
Note that if *command* is an :doc:`include <include>` command, then it
could specify a filename with multiple LAMMPS commands.
.. note::
When the single *command* is complete, LAMMPS will send an MDI
EXIT command to the plugin engine and the plugin will be removed.
The "mdi plugin" command will then exit and the next command
(if any) in the LAMMPS input script will be processed. A subsequent
"mdi plugin" command could then load the same library plugin or
a different one if desired.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if LAMMPS
was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
To use LAMMPS in conjunction with other MDI-enabled atomistic codes,
the :doc:`units <units>` command should be used to specify *real* or
*metal* units. This will ensure the correct unit conversions between
LAMMPS and MDI units, which the other codes will also perform in their
preferred units.
LAMMPS can also be used as an MDI engine in other unit choices it
supports, e.g. *lj*, but then no unit conversion is performed.
Related commands
""""""""""""""""
:doc:`fix mdi/aimd <fix_mdi_aimd>`
Default
"""""""
None

View File

@ -1,88 +0,0 @@
.. index:: mdi/engine
mdi_engine command
==================
Syntax
""""""
.. parsed-literal::
mdi_engine
Description
"""""""""""
This command is used to have LAMMPS act as a server with another
client code to effectively couple the two codes together in
client/server mode.
More specifically, this command causes LAMMPS to begin using the `MDI
Library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_
to run as an MDI engine (server), responding to commands made by an
external MDI driver code (client). See the :doc:`Howto mdi
<Howto_mdi>` page for more information about how LAMMPS can work
as both an MDI driver or engine.
General information about launching codes that communicate using the
MDI Library can be found in the `corresponding page
<https://molssi-mdi.github.io/MDI_Library/html/library_page.html#library_launching_sec>`_
of the MDI Library's documentation.
----------
This command should typically be used in an input script after LAMMPS
has setup the system it is going to model in collaboration with the
driver code. Depending on how the driver code tells the LAMMPS engine
to exit, other commands can be executed after this command, but
typically it should be used at the end of the LAMMPS input script.
To act as a MD-based MDI engine, this is the list of MDI commands from
a driver code which LAMMPS currently recognizes. See more details
about these commands in the `MDI library documentation
<https://molssi-mdi.github.io/MDI_Library/html/mdi_standard.html>`_
.. NOTE: Taylor - is this the best link for this info? Can we flesh this
.. out with the full list of supported commands? Maybe the distinction
.. of what "node" the commands refer to is not needed in this table?
.. list-table::
:widths: 20 80
:header-rows: 1
* - Command name
- Action
* - >NATOMS
- Driver sends the number of atoms in the system
* - <NATOMS
- Driver requests the number of atoms in the system
* - <COORDS
- Driver requests 3*N double-precision atom coordinates
* - >FORCES
- Driver sends 3*N double-precision atom forces
* - <COORDS
- Driver requests 3*N double-precision atom forces
* - EXIT
- Driver tells the engine (LAMMPS) to exit engine mode
If these commands are not sufficient to support what a driver which
you write needs, additional commands can be defined by simply using a
new command name not in this list. Code to support the new command
needs to be added to the MDI package within LAMMPS; see its
src/MDI/mdi_engine.cpp and fix_mdi_engine.cpp files.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`fix mdi/engine <fix_mdi_engine>`
Default
"""""""
None

View File

@ -1,204 +0,0 @@
.. index:: message
message command
===============
Syntax
""""""
.. parsed-literal::
message which protocol mode arg
* which = *client* or *server* or *quit*
* protocol = *md* or *mc*
* mode = *file* or *zmq* or *mpi/one* or *mpi/two*
.. parsed-literal::
*file* arg = filename
filename = file used for message exchanges
*zmq* arg = socket-ID
socket-ID for client = localhost:5555, see description below
socket-ID for server = \*:5555, see description below
*mpi/one* arg = none
*mpi/two* arg = filename
filename = file used to establish communication between 2 MPI jobs
Examples
""""""""
.. code-block:: LAMMPS
message client md file tmp.couple
message server md file tmp.couple
message client md zmq localhost:5555
message server md zmq *:5555
message client md mpi/one
message server md mpi/one
message client md mpi/two tmp.couple
message server md mpi/two tmp.couple
message quit
Description
"""""""""""
Establish a messaging protocol between LAMMPS and another code for the
purpose of client/server coupling.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
----------
The *which* argument defines LAMMPS to be the client or the server.
As explained below the *quit* option should be used when LAMMPS is
finished as a client. It sends a message to the server to tell it to
shut down.
----------
The *protocol* argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
* md = run dynamics with another code
* mc = perform Monte Carlo moves with another code
For protocol *md*, LAMMPS can be either a client or server. See the
:doc:`server md <server_md>` page for details on the protocol.
For protocol *mc*, LAMMPS can be the server. See the :doc:`server mc <server_mc>` page for details on the protocol.
----------
The *mode* argument specifies how messages are exchanged between the
client and server codes. Both codes must use the same mode and use
consistent parameters.
For mode *file*, the 2 codes communicate via binary files. They must
use the same filename, which is actually a file prefix. Several files
with that prefix will be created and deleted as a simulation runs.
The filename can include a path. Both codes must be able to access
the path/file in a common filesystem.
For mode *zmq*, the 2 codes communicate via a socket on the server
code's machine. Support for socket messaging is provided by the
open-source `ZeroMQ library <http://zeromq.org>`_, which must be
installed on your system. The client specifies an IP address (IPv4
format) or the DNS name of the machine the server code is running on,
followed by a 4 or 5 digit port ID for the socket, separated by a colon.
E.g.
.. parsed-literal::
localhost:5555 # client and server running on same machine
192.168.1.1:5555 # server is 192.168.1.1
deptbox.uni.edu:5555 # server is deptbox.uni.edu
The server specifies "\*:5555" where "\*" represents all available
interfaces on the server's machine, and the port ID must match
what the client specifies.
.. note::
On Linux or Unix machines port IDs below 1024 are reserved to the
superuser and thus not available. Other ports may already be in
use and cannot be opened by a second process. On a Linux machine
the commands "netstat -t4an" or "ss -t4an" will list all locally
used port IDs for IPv4 addresses.
.. note::
On many machines (and sometimes on local networks) also ports IDs
may be blocked by default through firewalls. In that case either
access to the required port (or a desired range of ports) has to
be selectively enabled to the firewall disabled (the latter is
usually not a good idea unless you are on a (small) local network
that is already protected from outside access.
.. note::
Additional explanation is needed here about how to use the *zmq*
mode on a parallel machine, e.g. a cluster with many nodes.
For mode *mpi/one*, the 2 codes communicate via MPI and are launched
by the same mpirun command, e.g. with this syntax for OpenMPI:
.. code-block:: bash
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.client -log log.client : -np 4 othercode args # LAMMPS is client
mpirun -np 2 othercode args : -np 4 lmp_mpi -mpicolor 1 -in in.server # LAMMPS is server
Note the use of the "-mpicolor color" command-line argument with
LAMMPS. See the :doc:`command-line args <Run_options>` page for
further explanation.
For mode *mpi/two*, the 2 codes communicate via MPI, but are launched
be 2 separate mpirun commands. The specified *filename* argument is a
file the 2 MPI processes will use to exchange info so that an MPI
inter-communicator can be established to enable the 2 codes to send
MPI messages to each other. Both codes must be able to access the
path/file in a common filesystem.
----------
Normally, the message client or message server command should be used
at the top of a LAMMPS input script. It performs an initial handshake
with the other code to setup messaging and to verify that both codes
are using the same message protocol and mode. Assuming both codes are
launched at (nearly) the same time, the other code should perform the
same kind of initialization.
If LAMMPS is the client code, it will begin sending messages when a
LAMMPS client command begins its operation. E.g. for the :doc:`fix client/md <fix_client_md>` command, it is when a :doc:`run <run>`
command is executed.
If LAMMPS is the server code, it will begin receiving messages when
the :doc:`server <server>` command is invoked.
If LAMMPS is being used as a client, the message quit command will
terminate its messaging with the server. If you do not use this
command and just allow LAMMPS to exit, then the server will continue
to wait for further messages. This may not be a problem, but if both
the client and server programs were launched in the same batch script,
then if the server runs indefinitely, it may consume the full allocation
of computer time, even if the calculation finishes sooner.
Note that if LAMMPS is the client or server, it will continue
processing the rest of its input script after client/server
communication terminates.
If both codes cooperate in this manner, a new round of client/server
messaging can be initiated after termination by re-using a second message
command in your LAMMPS input script, followed by a new fix client or
server command, followed by another message quit command (if LAMMPS is
the client). As an example, this can be performed in a loop to use a
quantum code as a server to compute quantum forces for multiple LAMMPS
data files or periodic snapshots while running dynamics.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`server <server>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -467,14 +467,14 @@ simple pairwise potentials such as Lennard-Jones do support threading
over both atoms and neighbors.
If the *neigh/transpose* keyword is set to *off*, then the KOKKOS
package will use the same memory layout for building the neigh list on
package will use the same memory layout for building the neighbor list on
GPUs as used for the pair style. When this keyword is set to *on* it
will use a different (transposed) memory layout to build the neigh
will use a different (transposed) memory layout to build the neighbor
list on GPUs. This can be faster in some cases (e.g. ReaxFF HNS
benchmark) but slower in others (e.g. Lennard Jones benchmark). The
copy between different memory layouts is done out of place and
therefore doubles the memory overhead of the neigh list, which can be
signicant.
therefore doubles the memory overhead of the neighbor list, which can
be significant.
The *newton* keyword sets the Newton flags for pairwise and bonded
interactions to *off* or *on*, the same as the :doc:`newton <newton>`
@ -484,11 +484,12 @@ computation is done, but less communication. However, when running on
CPUs a value of *on* is the default since it can often be faster, just
as it is for non-accelerated pair styles
The *binsize* keyword sets the size of bins used to bin atoms in
neighbor list builds. The same value can be set by the :doc:`neigh_modify binsize <neigh_modify>` command. Making it an option in the package
kokkos command allows it to be set from the command line. The default
value for CPUs is 0.0, which means the LAMMPS default will be used,
which is bins = 1/2 the size of the pairwise cutoff + neighbor skin
The *binsize* keyword sets the size of bins used to bin atoms during
neighbor list builds. The same value can be set by the
:doc:`neigh_modify binsize <neigh_modify>` command. Making it an option
in the package kokkos command allows it to be set from the command line.
The default value for CPUs is 0.0, which means the LAMMPS default will be
used, which is bins = 1/2 the size of the pairwise cutoff + neighbor skin
distance. This is fine when neighbor lists are built on the CPU. For GPU
builds, a 2x larger binsize equal to the pairwise cutoff + neighbor skin
is often faster, which is the default. Note that if you use a

View File

@ -1,20 +1,22 @@
.. index:: pair_style dpd
.. index:: pair_style dpd/gpu
.. index:: pair_style dpd/intel
.. index:: pair_style dpd/kk
.. index:: pair_style dpd/omp
.. index:: pair_style dpd/tstat
.. index:: pair_style dpd/tstat/gpu
.. index:: pair_style dpd/tstat/kk
.. index:: pair_style dpd/tstat/omp
pair_style dpd command
======================
Accelerator Variants: *dpd/gpu*, *dpd/intel*, *dpd/omp*
Accelerator Variants: *dpd/gpu*, *dpd/intel*, *dpd/kk*, *dpd/omp*
pair_style dpd/tstat command
============================
Accelerator Variants: *dpd/tstat/gpu*, *dpd/tstat/omp*
Accelerator Variants: *dpd/tstat/gpu*, *dpd/tstat/kk*, *dpd/tstat/omp*
Syntax
""""""

View File

@ -1,12 +1,18 @@
.. index:: pair_style dpd/ext
.. index:: pair_style dpd/ext/kk
.. index:: pair_style dpd/ext/tstat
.. index:: pair_style dpd/ext/tstat/kk
pair_style dpd/ext command
==========================
Accelerator Variants: dpd/ext/kk
pair_style dpd/ext/tstat command
================================
Accelerator Variants: dpd/ext/tstat/kk
Syntax
""""""
@ -137,6 +143,10 @@ except that A is not included.
----------
.. include:: accel_styles.rst
----------
**Mixing, shift, table, tail correction, restart, rRESPA info**\ :

View File

@ -70,12 +70,11 @@ Restrictions
""""""""""""
The *plugin* command is part of the PLUGIN package. It is
only enabled if LAMMPS was built with that package.
See the :doc:`Build package <Build_package>` page for
more info. Plugins are not available on Windows.
only enabled if LAMMPS was built with that package. See
the :doc:`Build package <Build_package>` page for more info.
If plugins access functions or classes from a package, LAMMPS must
have been compiled with that package included.
If plugins access functions or classes from a package,
LAMMPS must have been compiled with that package included.
Plugins are dependent on the LAMMPS binary interface (ABI)
and particularly the MPI library used. So they are not guaranteed

View File

@ -1,74 +0,0 @@
.. index:: server
server command
==============
Syntax
""""""
.. parsed-literal::
server protocol
* protocol = *md* or *mc*
Examples
""""""""
.. code-block:: LAMMPS
server md
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it receives
messages from a separate "client" code and responds by sending a reply
message back to the client. The specified *protocol* determines the
format and content of messages LAMMPS expects to receive and how it
responds.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS input script will be
processed.
The *protocol* argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
* :doc:`md <server_md>` = run dynamics with another code
* :doc:`mc <server_mc>` = perform Monte Carlo moves with another code
For protocol *md*, LAMMPS can be either a client (via the :doc:`fix client/md <fix_client_md>` command) or server. See the :doc:`server md <server_md>` page for details on the protocol.
For protocol *mc*, LAMMPS can be the server. See the :doc:`server mc <server_mc>` page for details on the protocol.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup the messaging protocol with
the other client code.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -1,126 +0,0 @@
.. index:: server mc
server mc command
=================
Syntax
""""""
.. parsed-literal::
server mc
mc = the protocol argument to the :doc:`server <server>` command
Examples
""""""""
.. code-block:: LAMMPS
server mc
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the *mc*
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The :doc:`server <server>` page gives other options for using LAMMPS
See an example of how this command is used in
examples/COUPLE/lammps_mc/in.server.
----------
When using this command, LAMMPS (as the server code) receives
instructions from a Monte Carlo (MC) driver to displace random atoms,
compute the energy before and after displacement, and run dynamics to
equilibrate the system.
The MC driver performs the random displacements on random atoms,
accepts or rejects the move in an MC sense, and orchestrates the MD
runs.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the `CSlib website <https://cslib.sandia.gov>`_ doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_mc.cpp file for details on how LAMMPS uses
these messages. See the examples/COUPLE/lammps_mc/mc.cpp file for an
example of how an MC driver code can use these messages.
Define NATOMS=1, EINIT=2, DISPLACE=3, ACCEPT=4, RUN=5.
**Client sends one of these kinds of message**\ :
.. parsed-literal::
cs->send(NATOMS,0) # msgID = 1 with no fields
cs->send(EINIT,0) # msgID = 2 with no fields
cs->send(DISPLACE,2) # msgID = 3 with 2 fields
cs->pack_int(1,ID) # 1st field = ID of atom to displace
cs->pack(2,3,xnew) # 2nd field = new xyz coords of displaced atom
cs->send(ACCEPT,1) # msgID = 4 with 1 field
cs->pack_int(1,flag) # 1st field = accept/reject flag
cs->send(RUN,1) # msgID = 5 with 1 field
cs->pack_int(1,nsteps) # 1st field = # of timesteps to run MD
**Server replies**\ :
.. parsed-literal::
cs->send(NATOMS,1) # msgID = 1 with 1 field
cs->pack_int(1,natoms) # 1st field = number of atoms
cs->send(EINIT,2) # msgID = 2 with 2 fields
cs->pack_double(1,poteng) # 1st field = potential energy of system
cs->pack(2,3\*natoms,x) # 2nd field = 3N coords of Natoms
cs->send(DISPLACE,1) # msgID = 3 with 1 field
cs->pack_double(1,poteng) # 1st field = new potential energy of system
cs->send(ACCEPT,0) # msgID = 4 with no fields
cs->send(RUN,0) # msgID = 5 with no fields
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup the messaging protocol with
the other client code.
Related commands
""""""""""""""""
:doc:`message <message>`
Default
"""""""
none

View File

@ -1,161 +0,0 @@
.. index:: server md
server md command
=================
Syntax
""""""
.. parsed-literal::
server md
md = the protocol argument to the :doc:`server <server>` command
Examples
""""""""
.. code-block:: LAMMPS
server md
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the *md*
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The :doc:`server <server>` page gives other options for using LAMMPS
in server mode. See an example of how this command is used in
examples/message/in.message.server.
----------
When using this command, LAMMPS (as the server code) receives the
current coordinates of all particles from the client code each
timestep, computes their interaction, and returns the energy, forces,
and pressure for the interacting particles to the client code, so it
can complete the timestep. This command could also be used with a
client code that performs energy minimization, using the server to
compute forces and energy each iteration of its minimizer.
When using the :doc:`fix client/md <fix_client_md>` command, LAMMPS (as
the client code) does the timestepping and receives needed energy,
forces, and pressure values from the server code.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the `CSlib website <https://cslib.sandia.gov>`_ doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_md.cpp and src/MESSAGE/fix_client_md.cpp
files for details on how LAMMPS uses these messages. See the
examples/COUPLE/lammps_vasp/vasp_wrap.py or
examples/COUPLE/lammps_nwchem/nwchem_wrap.py files for examples of how
a quantum code (VASP or NWChem) can use these messages.
The following pseudo-code uses these values, defined as enums.
Define:
.. parsed-literal::
SETUP=1, STEP=2
DIM=1, PERIODICITY=2, ORIGIN=3, BOX=4, NATOMS=5, NTYPES=6, TYPES=7, COORDS=8, UNITS-9, CHARGE=10
FORCES=1, ENERGY=2, PRESSURE=3, ERROR=4
**Client sends 2 kinds of messages**\ :
.. parsed-literal::
# required fields: DIM, PERIODICTY, ORIGIN, BOX, NATOMS, NTYPES, TYPES, COORDS
# optional fields: UNITS, CHARGE
cs->send(SETUP,nfields) # msgID with nfields
cs->pack_int(DIM,dim) # dimension (2,3) of simulation
cs->pack(PERIODICITY,3,xyz) # periodicity flags in 3 dims
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
cs->pack_int(NATOMS,natoms) # total number of atoms
cs->pack_int(NTYPES,ntypes) # number of atom types
cs->pack(TYPES,natoms,type) # vector of per-atom types
cs->pack(COORDS,3\*natoms,x) # vector of 3N atom coords
cs->pack_string(UNITS,units) # units = "lj", "real", "metal", etc
cs->pack(CHARGE,natoms,q) # vector of per-atom charge
# required fields: COORDS
# optional fields: ORIGIN, BOX
cs->send(STEP,nfields) # msgID with nfields
cs->pack(COORDS,3\*natoms,x) # vector of 3N atom coords
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
**Server replies to either kind of message**\ :
.. parsed-literal::
# required fields: FORCES, ENERGY, PRESSURE
# optional fields: ERROR
cs->send(msgID,nfields) # msgID with nfields
cs->pack(FORCES,3\*Natoms,f) # vector of 3N forces on atoms
cs->pack(ENERGY,1,poteng) # total potential energy of system
cs->pack(PRESSURE,6,press) # global pressure tensor (6-vector)
cs->pack_int(ERROR,flag) # server had an error (e.g. DFT non-convergence)
----------
The units for various quantities that are sent and received iva
messages are defined for atomic-scale simulations in the table below.
The client and server codes (including LAMMPS) can use internal units
different than these (e.g. :doc:`real units <units>` in LAMMPS), so long
as they convert to these units for messaging.
* COORDS, ORIGIN, BOX = Angstroms
* CHARGE = multiple of electron charge (1.0 is a proton)
* ENERGY = eV
* FORCES = eV/Angstrom
* PRESSURE = bars
Note that these are :doc:`metal units <units>` in LAMMPS.
If you wish to run LAMMPS in another its non-atomic units, e.g. :doc:`lj units <units>`, then the client and server should exchange a UNITS
message as indicated above, and both the client and server should
agree on the units for the data they exchange.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -21,9 +21,14 @@ Syntax
*norm* value = *yes* or *no*
*flush* value = *yes* or *no*
*line* value = *one* or *multi* or *yaml*
*format* values = *line* string, *int* string, *float* string, M string, or *none*
*colname* values = ID string, or *default*
string = new column header name
ID = integer from 1 to N, or integer from -1 to -N, where N = # of quantities being output
*or* a thermo keyword or reference to compute, fix, property or variable.
*format* values = *line* string, *int* string, *float* string, ID string, or *none*
string = C-style format string
M = integer from 1 to N, where N = # of quantities being output
ID = integer from 1 to N, or integer from -1 to -N, where N = # of quantities being output
*or* a thermo keyword or reference to compute, fix, property or variable.
*temp* value = compute ID that calculates a temperature
*press* value = compute ID that calculates a pressure
@ -36,7 +41,8 @@ Examples
thermo_modify temp myTemp format 3 %15.8g
thermo_modify temp myTemp format line "%ld %g %g %15.8g"
thermo_modify line multi format float %g
themos_modify line yaml format none
thermo_modify line yaml format none
thermo_modify colname 1 Timestep colname -2 Pressure colname f_1[1] AvgDensity
Description
"""""""""""
@ -147,6 +153,20 @@ containing the timestep and CPU time ("multi"), or in a YAML format
block ("yaml"). This modify option overrides the *one*, *multi*, or
*yaml* thermo_style settings.
The *colname* keyword can be used to change the default header keyword
for a column or field of thermodynamic output. The setting for *ID
string* replaces the default text with the provided string. *ID* can be
a positive integer when it represents the column number counting from
the left, a negative integer when it represents the column number from
the right (i.e. -1 is the last column/keyword), or a thermo keyword (or
compute, fix, property, or variable reference) and then it replaces the
string for that specific thermo keyword.
The *colname* keyword can be used multiple times. If multiple *colname*
settings refer to the same keyword, the last setting has precedence. A
setting of *default* clears all previous settings, reverting all values
to their default values.
The *format* keyword can be used to change the default numeric format of
any of quantities the :doc:`thermo_style <thermo_style>` command
outputs. All the specified format strings are C-style formats, e.g. as
@ -155,12 +175,16 @@ argument which is the format string for the entire line of thermo
output, with N fields, which you must enclose in quotes if it is more
than one field. The *int* and *float* keywords take a single format
argument and are applied to all integer or floating-point quantities
output. The setting for *M string* also takes a single format argument
which is used for the Mth value output in each line, e.g. the fifth
column is output in high precision for "format 5 %20.15g".
output. The setting for *ID string* also takes a single format argument
which is used for the indexed value in each line. The interpretation is
the same as for *colname*, i.e. a positive integer is the n-th value
corresponding to the n-th thermo keyword, a negative integer is counting
backwards, and a string matches the entry with the thermo keyword.,
e.g. the fifth column is output in high precision for "format 5 %20.15g"
and the pair energy for "format epair %20.15g".
The *format* keyword can be used multiple times. The precedence is
that for each value in a line of output, the *M* format (if specified)
that for each value in a line of output, the *ID* format (if specified)
is used, else the *int* or *float* setting (if specified) is used,
else the *line* setting (if specified) for that value is used, else
the default setting is used. A setting of *none* clears all previous
@ -173,9 +197,10 @@ settings, reverting all values to their default format.
When specifying the *format int* option you can use a "%d"-style
format identifier in the format string and LAMMPS will convert this
to the corresponding 8-byte form when it is applied to those
keywords. However, when specifying the *line* option or *format M
keywords. However, when specifying the *line* option or *format ID
string* option for *step* and *natoms*, you should specify a format
string appropriate for an 8-byte signed integer, e.g. one with "%ld".
string appropriate for an 8-byte signed integer, e.g. one with "%ld"
or "%lld" depending on the platform.
The *temp* keyword is used to determine how thermodynamic temperature is
calculated, which is used by all thermo quantities that require a

View File

@ -5,3 +5,4 @@ sphinx_tabs
breathe
Pygments
six
pyyaml

View File

@ -1180,6 +1180,7 @@ Gladky
gld
gle
globbing
Gloor
Glosli
Glotzer
gmail
@ -1417,6 +1418,7 @@ initializations
InitiatorIDs
initio
InP
inq
inregion
instantiation
Institut
@ -1428,6 +1430,7 @@ interal
interatomic
Interatomic
interconvert
interfacial
interial
interlayer
intermolecular
@ -1738,6 +1741,7 @@ libdl
libfftw
libgcc
libgpu
libinqmdi
libjpeg
libkim
liblammps
@ -1752,6 +1756,7 @@ libplumed
libplumedKernel
libpng
libpoems
libqemdi
libqmmm
librar
libreax
@ -1787,6 +1792,7 @@ Liu
Livermore
lj
llammps
lld
LLVM
lm
lmp
@ -2205,6 +2211,7 @@ Nbondtypes
nBOt
nbrhood
Nbtypes
Nbytes
nc
Nc
nchunk
@ -3649,6 +3656,7 @@ Wittmaack
wn
Wolde
workflow
workflows
Worley
Wriggers
Wuppertal

View File

@ -9,6 +9,11 @@ model a realistic problem.
In many of the examples included here, LAMMPS must first be built as a
library.
Also see the Howto_mdi doc page in the LAMMPS manual for a description
of how LAMMPS can be coupled to other codes in a client/server fashion
using the MDI Library created by the MolSSI consortium. The MDI
package in LAMMPS has support for this style of code coupling.
See these sections of the LAMMPS manual for details:
Build LAMMPS as a library (doc/html/Build_basics.html)
@ -28,15 +33,9 @@ These are the sub-directories included in this directory:
simple simple example of driver code calling LAMMPS as a lib
multiple example of driver code calling multiple instances of LAMMPS
plugin example for loading LAMMPS at runtime from a shared library
lammps_mc client/server coupling of Monte Carlo client
with LAMMPS server for energy evaluation
lammps_nwchem client/server coupling of LAMMPS client with
NWChem quantum DFT as server for quantum forces
lammps_quest MD with quantum forces, coupling to Quest DFT code
lammps_spparks grain-growth Monte Carlo with strain via MD,
coupling to SPPARKS kinetic MC code
lammps_vasp client/server coupling of LAMMPS client with
VASP quantum DFT as server for quantum forces
library collection of useful inter-code communication routines
fortran a simple wrapper on the LAMMPS library API that
can be called from Fortran

View File

@ -1,33 +0,0 @@
# Makefile for MC
SHELL = /bin/sh
SRC = mc.cpp random_park.cpp
OBJ = $(SRC:.cpp=.o)
# change this line for your machine to path for CSlib src dir
CSLIB = /home/sjplimp/lammps/lib/message/cslib/src
# compiler/linker settings
CC = g++
CCFLAGS = -g -O3 -I$(CSLIB)
LINK = g++
LINKFLAGS = -g -O -L$(CSLIB)
# targets
mc: $(OBJ)
# first line if built the CSlib within lib/message with ZMQ support
# second line if built the CSlib without ZMQ support
$(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -lzmq -o mc
# $(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -o mc
clean:
@rm -f *.o mc
# rules
%.o:%.cpp
$(CC) $(CCFLAGS) -c $<

View File

@ -1,128 +0,0 @@
Sample Monte Carlo (MC) wrapper on LAMMPS via client/server coupling
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
In this dir, the mc.cpp/h files are a standalone "client" MC code. It
should be run on a single processor, though it could become a parallel
program at some point. LAMMPS is also run as a standalone executable
as a "server" on as many processors as desired using its "server mc"
command; see it's doc page for details.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the MC
program became parallel, data could also be exchanged via MPI.
The MC code makes simple MC moves, by displacing a single random atom
by a small random amount. It uses LAMMPS to calculate the energy
change, and to run dynamics between MC moves.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
% cd lammps/lib/message
% python Install.py -m -z # build CSlib with MPI and ZMQ support
% cd lammps/src
% make yes-message
% make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the MC client code
The source files for the MC code are in this dir. It links with the
CSlib library in lib/message/cslib.
You must first build the CSlib in serial mode, e.g.
% cd lammps/lib/message/cslib/src
% make lib # build serial and parallel lib with ZMQ support
% make lib zmq=no # build serial and parallel lib without ZMQ support
Then edit the Makefile in this dir. The CSLIB variable should be the
path to where the LAMMPS lib/message/cslib/src dir is on your system.
If you built the CSlib without ZMQ support you will also need to
comment/uncomment one line. Then you can just type
% make
and you should get an "mc" executable.
----------------
To run in client/server mode:
Both the client (MC) and server (LAMMPS) must use the same messaging
mode, namely file or zmq. This is an argument to the MC code; it can
be selected by setting the "mode" variable when you run LAMMPS. The
default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The MC code is always run in serial.
When you run, the server should print out thermodynamic info
for every MD run it performs (between MC moves). The client
will print nothing until the simulation ends, then it will
print stats about the accepted MC moves.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 1 lmp_mpi -v mode file -in in.mc.server
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 4 lmp_mpi -v mode file -in in.mc.server
ZMQ mode of messaging:
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 1 lmp_mpi -v mode zmq -in in.mc.server
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 4 lmp_mpi -v mode zmq -in in.mc.server
--------------
The input script for the MC program is in.mc. You can edit it to run
longer simulations.
500 nsteps = total # of steps of MD
100 ndynamics = # of MD steps between MC moves
0.1 delta = displacement size of MC move
1.0 temperature = used in MC Boltzman factor
12345 seed = random number seed
--------------
The problem size that LAMMPS is computing the MC energy for and
running dynamics on is set by the x,y,z variables in the LAMMPS
in.mc.server script. The default size is 500 particles. You can
adjust the size as follows:
lmp_mpi -v x 10 -v y 10 -v z 20 # 8000 particles

View File

@ -1,7 +0,0 @@
# MC params
500 nsteps
100 ndynamics
0.1 delta
1.0 temperature
12345 seed

View File

@ -1,36 +0,0 @@
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then &
"message server mc file tmp.couple" &
elif "${mode} == zmq" &
"message server mc zmq *:5555" &
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000649929 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:02

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000592947 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 3.8147e-06 on 4 procs for 0 steps with 500 atoms
59.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.815e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
106.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.75509e-06 on 4 procs for 0 steps with 500 atoms
113.2% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.755e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.563e-06 on 4 procs for 0 steps with 500 atoms
117.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.563e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 3.99351e-06 on 4 procs for 0 steps with 500 atoms
93.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.994e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 3.57628e-06 on 4 procs for 0 steps with 500 atoms
111.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.576e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:02

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000741005 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.19209e-06 on 1 procs for 0 steps with 500 atoms
83.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.192e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
104.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:00

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000576019 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 4.76837e-06 on 4 procs for 0 steps with 500 atoms
89.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 4.768e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.45707e-06 on 4 procs for 0 steps with 500 atoms
94.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.457e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
115.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.38419e-06 on 4 procs for 0 steps with 500 atoms
125.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.384e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 2.44379e-06 on 4 procs for 0 steps with 500 atoms
112.5% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.444e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 2.14577e-06 on 4 procs for 0 steps with 500 atoms
139.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:00

View File

@ -1,263 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
// MC code used with LAMMPS in client/server mode
// MC is the client, LAMMPS is the server
// Syntax: mc infile mode modearg
// mode = file, zmq
// modearg = filename for file, localhost:5555 for zmq
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "mc.h"
#include "random_park.h"
#include "cslib.h"
using namespace CSLIB_NS;
void error(const char *);
CSlib *cs_create(char *, char *);
#define MAXLINE 256
/* ---------------------------------------------------------------------- */
// main program
int main(int narg, char **arg)
{
if (narg != 4) {
error("Syntax: mc infile mode modearg");
exit(1);
}
// initialize CSlib
CSlib *cs = cs_create(arg[2],arg[3]);
// create MC class and perform run
MC *mc = new MC(arg[1],cs);
mc->run();
// final MC stats
int naccept = mc->naccept;
int nattempt = mc->nattempt;
printf("------ MC stats ------\n");
printf("MC attempts = %d\n",nattempt);
printf("MC accepts = %d\n",naccept);
printf("Acceptance ratio = %g\n",1.0*naccept/nattempt);
// clean up
delete cs;
delete mc;
}
/* ---------------------------------------------------------------------- */
void error(const char *str)
{
printf("ERROR: %s\n",str);
exit(1);
}
/* ---------------------------------------------------------------------- */
CSlib *cs_create(char *mode, char *arg)
{
CSlib *cs = new CSlib(0,mode,arg,NULL);
// initial handshake to agree on protocol
cs->send(0,1);
cs->pack_string(1,(char *) "mc");
int msgID,nfield;
int *fieldID,*fieldtype,*fieldlen;
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
return cs;
}
// ----------------------------------------------------------------------
// MC class
// ----------------------------------------------------------------------
MC::MC(char *mcfile, void *cs_caller)
//MC::MC(char *mcfile, CSlib *cs_caller)
{
cs_void = cs_caller;
// setup MC params
options(mcfile);
// random # generator
random = new RanPark(seed);
}
/* ---------------------------------------------------------------------- */
MC::~MC()
{
free(x);
delete random;
}
/* ---------------------------------------------------------------------- */
void MC::run()
{
int iatom,accept,msgID,nfield;
double pe_initial,pe_final,edelta;
double dx,dy,dz;
double xold[3],xnew[3];
int *fieldID,*fieldtype,*fieldlen;
enum{NATOMS=1,EINIT,DISPLACE,ACCEPT,RUN};
CSlib *cs = (CSlib *) cs_void;
// one-time request for atom count from MD
// allocate 1d coord buffer
cs->send(NATOMS,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
natoms = cs->unpack_int(1);
x = (double *) malloc(3*natoms*sizeof(double));
// loop over MC moves
naccept = nattempt = 0;
for (int iloop = 0; iloop < nloop; iloop++) {
// request current energy from MD
// recv energy, coords from MD
cs->send(EINIT,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_initial = cs->unpack_double(1);
double *x = (double *) cs->unpack(2);
// perform simple MC event
// displace a single atom by random amount
iatom = (int) natoms*random->uniform();
xold[0] = x[3*iatom+0];
xold[1] = x[3*iatom+1];
xold[2] = x[3*iatom+2];
dx = 2.0*delta*random->uniform() - delta;
dy = 2.0*delta*random->uniform() - delta;
dz = 2.0*delta*random->uniform() - delta;
xnew[0] = xold[0] + dx;
xnew[1] = xold[1] + dx;
xnew[2] = xold[2] + dx;
// send atom ID and its new coords to MD
// recv new energy
cs->send(DISPLACE,2);
cs->pack_int(1,iatom+1);
cs->pack(2,4,3,xnew);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_final = cs->unpack_double(1);
// decide whether to accept/reject MC event
if (pe_final <= pe_initial) accept = 1;
else if (temperature == 0.0) accept = 0;
else if (random->uniform() >
exp(natoms*(pe_initial-pe_final)/temperature)) accept = 0;
else accept = 1;
nattempt++;
if (accept) naccept++;
// send accept (1) or reject (0) flag to MD
cs->send(ACCEPT,1);
cs->pack_int(1,accept);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
// send dynamics timesteps
cs->send(RUN,1);
cs->pack_int(1,ndynamics);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
// send exit message to MD
cs->send(-1,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
/* ---------------------------------------------------------------------- */
void MC::options(char *filename)
{
// default params
nsteps = 0;
ndynamics = 100;
delta = 0.1;
temperature = 1.0;
seed = 12345;
// read and parse file
FILE *fp = fopen(filename,"r");
if (fp == NULL) error("Could not open MC file");
char line[MAXLINE];
char *keyword,*value;
char *eof = fgets(line,MAXLINE,fp);
while (eof) {
if (line[0] == '#') { // comment line
eof = fgets(line,MAXLINE,fp);
continue;
}
value = strtok(line," \t\n\r\f");
if (value == NULL) { // blank line
eof = fgets(line,MAXLINE,fp);
continue;
}
keyword = strtok(NULL," \t\n\r\f");
if (keyword == NULL) error("Missing keyword in MC file");
if (strcmp(keyword,"nsteps") == 0) nsteps = atoi(value);
else if (strcmp(keyword,"ndynamics") == 0) ndynamics = atoi(value);
else if (strcmp(keyword,"delta") == 0) delta = atof(value);
else if (strcmp(keyword,"temperature") == 0) temperature = atof(value);
else if (strcmp(keyword,"seed") == 0) seed = atoi(value);
else error("Unknown param in MC file");
eof = fgets(line,MAXLINE,fp);
}
// derived params
nloop = nsteps/ndynamics;
}

View File

@ -1,40 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/ Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
#ifndef MC_H
#define MC_H
/* ---------------------------------------------------------------------- */
class MC {
public:
int naccept; // # of accepted MC events
int nattempt; // # of attempted MC events
MC(char *, void *);
~MC();
void run();
private:
int nsteps; // total # of MD steps
int ndynamics; // steps in one short dynamics run
int nloop; // nsteps/ndynamics
int natoms; // # of MD atoms
double delta; // MC displacement distance
double temperature; // MC temperature for Boltzmann criterion
double *x; // atom coords as 3N 1d vector
double energy; // global potential energy
int seed; // RNG seed
class RanPark *random;
void *cs_void; // messaging library
void options(char *);
};
#endif

View File

@ -1,72 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
// Park/Miller RNG
#include <math.h>
#include "random_park.h"
//#include "error.h"
#define IA 16807
#define IM 2147483647
#define AM (1.0/IM)
#define IQ 127773
#define IR 2836
/* ---------------------------------------------------------------------- */
RanPark::RanPark(int seed_init)
{
//if (seed_init <= 0)
// error->one(FLERR,"Invalid seed for Park random # generator");
seed = seed_init;
save = 0;
}
/* ----------------------------------------------------------------------
uniform RN
------------------------------------------------------------------------- */
double RanPark::uniform()
{
int k = seed/IQ;
seed = IA*(seed-k*IQ) - IR*k;
if (seed < 0) seed += IM;
double ans = AM*seed;
return ans;
}
/* ----------------------------------------------------------------------
gaussian RN
------------------------------------------------------------------------- */
double RanPark::gaussian()
{
double first,v1,v2,rsq,fac;
if (!save) {
do {
v1 = 2.0*uniform()-1.0;
v2 = 2.0*uniform()-1.0;
rsq = v1*v1 + v2*v2;
} while ((rsq >= 1.0) || (rsq == 0.0));
fac = sqrt(-2.0*log(rsq)/rsq);
second = v1*fac;
first = v2*fac;
save = 1;
} else {
first = second;
save = 0;
}
return first;
}

View File

@ -1,28 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/ Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef RANPARK_H
#define RANPARK_H
class RanPark {
public:
RanPark(int);
double uniform();
double gaussian();
private:
int seed,save;
double second;
};
#endif

View File

@ -1,197 +0,0 @@
Sample LAMMPS MD wrapper on NWChem via client/server coupling
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
In this dir, the nwchem_wrap.py is a wrapper on the NWChem electronic
structure code so it can work as a "server" code which LAMMPS drives
as a "client" code to perform ab initio MD. LAMMPS performs the MD
timestepping, sends NWChem a current set of coordinates each timestep,
NWChem computes forces and energy (and virial) and returns that info
to LAMMPS.
Messages are exchanged between NWChem and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the
nwchem_wrap.py program became parallel, or the CSlib library calls were
integrated into NWChem directly, then data could also be exchanged via
MPI.
There are 2 examples provided in the planeware and ao_basis
sub-directories. See details below.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
cd lammps/lib/message
python Install.py -m -z # build CSlib with MPI and ZMQ support
cd lammps/src
make yes-message
make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the CSlib in a form usable by the nwchem_wrapper.py script:
% cd lammps/lib/message/cslib/src
% make shlib # build serial and parallel shared lib with ZMQ support
% make shlib zmq=no # build serial and parallel shared lib w/out ZMQ support
This will make a shared library versions of the CSlib, which Python
requires. Python must be able to find both the cslib.py script and
the libcsnompi.so library in your lammps/lib/message/cslib/src
directory. If it is not able to do this, you will get an error when
you run nwchem_wrapper.py.
You can do this by augmenting two environment variables, either from
the command line, or in your shell start-up script. Here is the
sample syntax for the csh or tcsh shells:
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/lib/message/cslib/src
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/message/cslib/src
----------------
Prepare to use NWChem and the nwchem_wrap.py script
You can run the nwchem_wrap.py script as-is to test that the coupling
between it and LAMMPS is functional. This will use the included
nwchem_lammps.out files output by a previous NWChem run.
But note that the as-is version of nwchem_wrap.py will not attempt to
run NWChem.
To do this, you must edit the 1st nwchemcmd line at the top of
nwchem_wrapper.py to be the launch command needed to run NWChem on
your system. It can be a command to run NWChem in serial or in
parallel, e.g. an mpirun command. Then comment out the 2nd nwchemcmd
line immediately following it.
Ensure you have the necessary NWChem input file in this directory,
suitable for the NWChem calculation you want to perform.
Example input files are provided for both atom-centered AO basis sets
and plane-wave basis sets. Note that the NWChem template file should
be matched to the LAMMPS input script (# of atoms and atom types, box
size, etc).
Once you run NWChem yourself, the nwchem_lammps.out file will be
overwritten.
The syntax of the wrapper is:
nwchem_wrap.py file/zmq ao/pw input_template
* file/zmg = messaging mode, must match LAMMPS messaging mode
* ao/pw = basis set mode, selects between atom-centered and plane-wave
the input_template file must correspond to the appropriate basis set mode:
the "ao" mode supports the scf and dft modules in NWChem,
the "pw" mode supports the nwpw module.
* input_template = NWChem input file used as template, must include a
"geometry" block with the atoms in the simulation, dummy
xyz coordinates should be included (but are not used).
Atom ordering must match LAMMPS input.
During a simulation, the molecular orbitals from the previous timestep
will be used as the initial guess for the next NWChem calculation. If
a file named "nwchem_lammps.movecs" is in the directory the wrapper is
called from, these orbitals will be used as the initial guess orbitals
in the first step of the simulation.
----------------
Example directories
(1) planewave
Demonstrates coupling of the nwpw module in NWChem with LAMMPS. Only fully
periodic boundary conditions and orthogonal simulation boxes are currently
supported by the wrapper. The included files provide an example run using a
2 atom unit cell of tungsten.
Files:
* data.W LAMMPS input with geometry information
* in.client.W LAMMPS simulation input
* log.client.output LAMMPS simulation output
* w.nw NWChem template input file
* nwchem_lammps.out NWChem output
(2) ao_basis
Demonstrates coupling of the scf (or dft) modules in NWChem with
LAMMPS. Only fully aperiodic boundary conditions are currently
supported by the wrapper. The included files provide an example run
using a single water molecule.
Files:
* data.h2o LAMMPS input with geometry information
* in.client.h2o LAMMPS simulation input
* log.client.output LAMMPS simulation output
* h2o.nw NWChem template input file
* nwchem_lammps.out NWChem output
As noted above, you can run the nwchem_wrap.py script as-is to test that
the coupling between it and LAMMPS is functional. This will use the included
nwchem_lammps.out files.
----------------
To run in client/server mode:
NOTE: The nwchem_wrap.py script must be run with Python version 2, not
3. This is because it used the CSlib python wrapper, which only
supports version 2. We plan to upgrade CSlib to support Python 3.
Both the client (LAMMPS) and server (nwchem_wrap.py) must use the same
messaging mode, namely file or zmq. This is an argument to the
nwchem_wrap.py code; it can be selected by setting the "mode" variable
when you run LAMMPS. The default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The nwchem_wrap.py code is always run in serial, but it
launches NWChem from Python via an mpirun command which can run NWChem
itself in parallel.
When you run, the server should print out thermodynamic info every
timestep which corresponds to the forces and virial computed by NWChem.
NWChem will also generate output files each timestep. Output files from
previous timesteps are archived in a "nwchem_logs" directory.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 lmp_mpi -v mode file -in in.client.W
% python nwchem_wrap.py file pw w.nw
% mpirun -np 2 lmp_mpi -v mode file -in in.client.h2o
% python nwchem_wrap.py file ao h2o.nw
ZMQ mode of messaging:
% mpirun -np 1 lmp_mpi -v mode zmq -in in.client.W
% python nwchem_wrap.py zmq pw w.nw
% mpirun -np 2 lmp_mpi -v mode zmq -in in.client.h2o
% python nwchem_wrap.py zmq ao h2o.nw

View File

@ -1,20 +0,0 @@
LAMMPS H2O data file
3 atoms
2 atom types
-10.0 10.0 xlo xhi
-10.0 10.0 ylo yhi
-10.0 10.0 zlo zhi
Masses
1 15.994915008544922
2 1.0078250169754028
Atoms
1 1 0.0 0.0 0.0
2 2 0.0 0.756723 -0.585799
3 2 0.0 -0.756723 -0.585799

View File

@ -1,25 +0,0 @@
echo
memory global 40 mb stack 23 mb heap 5 mb
geometry units angstrom noautosym
O 0.0 0.0 0.0
H 1.0 0.5 0.0
H -1.0 0.5 0.0
end
basis
O library 6-31g*
H library 6-31g*
end
scf
maxiter 100
end
#dft
# xc b3lyp
#end
task scf gradient
#task dft gradient

View File

@ -1,27 +0,0 @@
# H2O with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3

View File

@ -1,30 +0,0 @@
# H2O with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
group one id 2
displace_atoms one move 0.1 0.2 0.3
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
minimize 1.0e-6 1.0e-6 10 50

View File

@ -1,66 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000627125 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0276 | 0.0276 | 0.0276 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 0.077556087 10.354878 8000
1 300 0 0 0.077556087 10.354878 8000
2 300 0 0 0.077556087 10.354878 8000
3 300 0 0 0.077556087 10.354878 8000
Loop time of 0.30198 on 1 procs for 3 steps with 3 atoms
Performance: 0.858 ns/day, 27.961 hours/ns, 9.934 timesteps/s
0.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 2.5979e-06 | 2.5979e-06 | 2.5979e-06 | 0.0 | 0.00
Output | 0.00012053 | 0.00012053 | 0.00012053 | 0.0 | 0.04
Modify | 0.30185 | 0.30185 | 0.30185 | 0.0 | 99.96
Other | | 8.211e-06 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:07

View File

@ -1,66 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000608759 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0276 | 0.0276 | 0.0276 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 -2068.2746 10.354878 8000
1 200.33191 0 0 -2068.2704 6.9147085 8000
2 152.36218 0 0 -2068.269 5.2589726 8000
3 227.40679 0 0 -2068.2722 7.8492321 8000
Loop time of 1.90319 on 1 procs for 3 steps with 3 atoms
Performance: 0.136 ns/day, 176.221 hours/ns, 1.576 timesteps/s
0.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 3.9274e-06 | 3.9274e-06 | 3.9274e-06 | 0.0 | 0.00
Output | 0.00011798 | 0.00011798 | 0.00011798 | 0.0 | 0.01
Modify | 1.9031 | 1.9031 | 1.9031 | 0.0 | 99.99
Other | | 1.054e-05 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:07

View File

@ -1,82 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000615383 secs
group one id 2
1 atoms in group one
displace_atoms one move 0.1 0.2 0.3
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
minimize 1.0e-6 1.0e-6 10 50
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (src/min.cpp:174)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0279 | 0.0279 | 0.0279 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 -2067.8909 10.354878 8000
1 300 0 0 -2068.0707 10.354878 8000
2 300 0 0 -2068.252 10.354878 8000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
3 300 0 0 -2068.2797 10.354878 8000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
4 300 0 0 -2068.2799 10.354878 8000
Loop time of 5.71024 on 1 procs for 4 steps with 3 atoms
0.1% CPU use with 1 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
-2067.96847053 -2068.35730416 -2068.35745184
Force two-norm initial, final = 4.54685 0.124714
Force max component initial, final = 3.48924 0.0859263
Final line search alpha, max atom move = 1 0.0859263
Iterations, force evaluations = 4 8
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 6.2305e-07 | 6.2305e-07 | 6.2305e-07 | 0.0 | 0.00
Comm | 1.1522e-05 | 1.1522e-05 | 1.1522e-05 | 0.0 | 0.00
Output | 8.4217e-05 | 8.4217e-05 | 8.4217e-05 | 0.0 | 0.00
Modify | 5.7099 | 5.7099 | 5.7099 | 0.0 | 99.99
Other | | 0.0002355 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 2
Dangerous builds not checked
Total wall time: 0:00:10

View File

@ -1,626 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
memory global 40 mb stack 23 mb heap 5 mb
geometry units angstrom noautosym nocenter
O 0.00197082 0.0012463 -0.00298048
H -0.0432066 0.769363 -0.596119
H 0.0119282 -0.789143 -0.528177
end
scf
vectors input nwchem_lammps.movecs
end
dft
vectors input nwchem_lammps.movecs
end
basis
O library 6-31g*
H library 6-31g*
end
scf
maxiter 100
end
#dft
# xc b3lyp
#end
task scf gradient
#task dft gradient
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = almondjoy
program = /home/jboschen/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Fri Jan 31 00:31:00 2020
compiled = Tue_Oct_01_13:20:43_2019
source = /home/jboschen/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 655358 doubles = 5.0 Mbytes
stack = 3014651 doubles = 23.0 Mbytes
global = 5242880 doubles = 40.0 Mbytes (distinct from heap & stack)
total = 8912889 doubles = 68.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = scf
Operation = gradient
Status = ok
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 3 Fri Jan 31 00:30:59 2020
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 ao basis 2 Fri Jan 31 00:30:59 2020
The basis set named "ao basis" is the default AO basis for restart
NWChem Input Module
-------------------
Scaling coordinates for geometry "geometry" by 1.889725989
(inverse scale = 0.529177249)
------
auto-z
------
no constraints, skipping 0.0000000000000000
no constraints, skipping 0.0000000000000000
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 O 8.0000 0.00197082 0.00124630 -0.00298048
2 H 1.0000 -0.04320660 0.76936300 -0.59611900
3 H 1.0000 0.01192820 -0.78914300 -0.52817700
Atomic Mass
-----------
O 15.994910
H 1.007825
Effective nuclear repulsion energy (a.u.) 9.1573270473
Nuclear Dipole moment (a.u.)
----------------------------
X Y Z
---------------- ---------------- ----------------
-0.0293131272 -0.0185374561 -2.1696696942
Z-matrix (autoz)
--------
Units are Angstrom for bonds and degrees for angles
Type Name I J K L M Value
----------- -------- ----- ----- ----- ----- ----- ----------
1 Stretch 1 2 0.97152
2 Stretch 1 3 0.94902
3 Bend 2 1 3 108.72901
XYZ format geometry
-------------------
3
geometry
O 0.00197082 0.00124630 -0.00298048
H -0.04320660 0.76936300 -0.59611900
H 0.01192820 -0.78914300 -0.52817700
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 H | 1 O | 1.83591 | 0.97152
3 H | 1 O | 1.79339 | 0.94902
------------------------------------------------------------------------------
number of included internuclear distances: 2
==============================================================================
==============================================================================
internuclear angles
------------------------------------------------------------------------------
center 1 | center 2 | center 3 | degrees
------------------------------------------------------------------------------
2 H | 1 O | 3 H | 108.73
------------------------------------------------------------------------------
number of included internuclear angles: 1
==============================================================================
Basis "ao basis" -> "" (cartesian)
-----
O (Oxygen)
----------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 5.48467170E+03 0.001831
1 S 8.25234950E+02 0.013950
1 S 1.88046960E+02 0.068445
1 S 5.29645000E+01 0.232714
1 S 1.68975700E+01 0.470193
1 S 5.79963530E+00 0.358521
2 S 1.55396160E+01 -0.110778
2 S 3.59993360E+00 -0.148026
2 S 1.01376180E+00 1.130767
3 P 1.55396160E+01 0.070874
3 P 3.59993360E+00 0.339753
3 P 1.01376180E+00 0.727159
4 S 2.70005800E-01 1.000000
5 P 2.70005800E-01 1.000000
6 D 8.00000000E-01 1.000000
H (Hydrogen)
------------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 1.87311370E+01 0.033495
1 S 2.82539370E+00 0.234727
1 S 6.40121700E-01 0.813757
2 S 1.61277800E-01 1.000000
Summary of "ao basis" -> "" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
NWChem SCF Module
-----------------
ao basis = "ao basis"
functions = 19
atoms = 3
closed shells = 5
open shells = 0
charge = 0.00
wavefunction = RHF
input vectors = ./nwchem_lammps.movecs
output vectors = ./nwchem_lammps.movecs
use symmetry = F
symmetry adapt = F
Summary of "ao basis" -> "ao basis" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
Forming initial guess at 0.0s
Loading old vectors from job with title :
Starting SCF solution at 0.0s
----------------------------------------------
Quadratically convergent ROHF
Convergence threshold : 1.000E-04
Maximum no. of iterations : 100
Final Fock-matrix accuracy: 1.000E-07
----------------------------------------------
#quartets = 1.540D+03 #integrals = 1.424D+04 #direct = 0.0% #cached =100.0%
Integral file = ./nwchem_lammps.aoints.0
Record size in doubles = 65536 No. of integs per rec = 43688
Max. records in memory = 2 Max. records in file = 1392051
No. of bits per label = 8 No. of bits per value = 64
iter energy gnorm gmax time
----- ------------------- --------- --------- --------
1 -76.0095751323 4.63D-02 1.64D-02 0.1
2 -76.0097628164 8.13D-04 2.83D-04 0.1
3 -76.0097629130 3.92D-06 1.55D-06 0.1
Final RHF results
------------------
Total SCF energy = -76.009762913030
One-electron energy = -123.002897732381
Two-electron energy = 37.835807772101
Nuclear repulsion energy = 9.157327047250
Time for solution = 0.0s
Final eigenvalues
-----------------
1
1 -20.5584
2 -1.3367
3 -0.7128
4 -0.5617
5 -0.4959
6 0.2104
7 0.3038
8 1.0409
9 1.1202
10 1.1606
11 1.1691
12 1.3840
13 1.4192
14 2.0312
15 2.0334
ROHF Final Molecular Orbital Analysis
-------------------------------------
Vector 2 Occ=2.000000D+00 E=-1.336749D+00
MO Center= -2.8D-03, -1.3D-02, -1.7D-01, r^2= 5.1D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
2 0.476636 1 O s 6 0.442369 1 O s
1 -0.210214 1 O s
Vector 3 Occ=2.000000D+00 E=-7.127948D-01
MO Center= -4.9D-03, 3.9D-03, -2.1D-01, r^2= 7.8D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
4 0.504894 1 O py 8 0.303932 1 O py
18 -0.234724 3 H s 16 0.229765 2 H s
Vector 4 Occ=2.000000D+00 E=-5.617306D-01
MO Center= 3.6D-03, 9.0D-03, 5.6D-02, r^2= 6.9D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
5 0.559565 1 O pz 9 0.410981 1 O pz
6 0.315892 1 O s 2 0.157960 1 O s
Vector 5 Occ=2.000000D+00 E=-4.959173D-01
MO Center= 1.4D-03, 6.9D-05, -2.2D-02, r^2= 6.0D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
3 0.638390 1 O px 7 0.511530 1 O px
Vector 6 Occ=0.000000D+00 E= 2.103822D-01
MO Center= -2.3D-02, 3.5D-02, -7.3D-01, r^2= 2.6D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.416869 1 O s 17 -1.068330 2 H s
19 -1.014775 3 H s 9 -0.490951 1 O pz
5 -0.212990 1 O pz
Vector 7 Occ=0.000000D+00 E= 3.037943D-01
MO Center= -1.8D-02, -8.9D-02, -7.1D-01, r^2= 2.8D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
19 -1.426837 3 H s 17 1.332767 2 H s
8 -0.842141 1 O py 4 -0.327553 1 O py
Vector 8 Occ=0.000000D+00 E= 1.040852D+00
MO Center= -7.4D-03, 1.3D-01, -1.6D-01, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
16 0.931594 2 H s 18 -0.747590 3 H s
8 -0.655817 1 O py 17 -0.523035 2 H s
19 0.366407 3 H s 14 -0.357109 1 O dyz
Vector 9 Occ=0.000000D+00 E= 1.120172D+00
MO Center= -6.8D-03, -2.9D-02, -3.1D-01, r^2= 1.5D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.145090 1 O s 18 0.841596 3 H s
2 -0.727471 1 O s 16 0.684927 2 H s
9 0.559191 1 O pz 19 -0.546678 3 H s
17 -0.538778 2 H s 10 -0.344609 1 O dxx
15 -0.250035 1 O dzz
Vector 10 Occ=0.000000D+00 E= 1.160603D+00
MO Center= 1.2D-02, -4.3D-02, 2.5D-01, r^2= 1.0D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.137949 1 O s 5 -0.844233 1 O pz
9 0.595088 1 O pz 2 -0.475986 1 O s
18 -0.455932 3 H s 16 -0.357325 2 H s
13 -0.317117 1 O dyy 15 -0.196968 1 O dzz
Vector 11 Occ=0.000000D+00 E= 1.169054D+00
MO Center= 1.9D-03, 1.2D-03, -6.4D-03, r^2= 1.1D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
7 -1.034653 1 O px 3 0.962043 1 O px
Vector 12 Occ=0.000000D+00 E= 1.384034D+00
MO Center= 6.0D-04, -2.6D-03, -5.0D-02, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
8 1.557767 1 O py 4 -1.035829 1 O py
17 -0.900920 2 H s 19 0.901756 3 H s
Vector 13 Occ=0.000000D+00 E= 1.419205D+00
MO Center= -1.3D-02, -4.9D-02, -5.2D-01, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 3.605136 1 O s 2 -1.454853 1 O s
9 -1.107532 1 O pz 19 -0.874208 3 H s
17 -0.757016 2 H s 13 -0.634436 1 O dyy
5 0.516593 1 O pz 15 -0.401100 1 O dzz
10 -0.319873 1 O dxx 16 -0.260650 2 H s
Vector 14 Occ=0.000000D+00 E= 2.031234D+00
MO Center= 1.9D-03, 2.3D-03, -3.0D-03, r^2= 6.1D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
11 1.727083 1 O dxy
Vector 15 Occ=0.000000D+00 E= 2.033369D+00
MO Center= 3.4D-03, 3.4D-03, 4.3D-02, r^2= 6.2D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
15 1.012642 1 O dzz 13 -0.512441 1 O dyy
10 -0.438481 1 O dxx 6 -0.226567 1 O s
center of mass
--------------
x = -0.00000001 y = -0.00000003 z = -0.12388979
moments of inertia (a.u.)
------------------
6.378705068992 0.153373998471 -0.069687034145
0.153373998471 2.014476065716 0.150739744400
-0.069687034145 0.150739744400 4.379134195179
Mulliken analysis of the total density
--------------------------------------
Atom Charge Shell Charges
----------- ------ -------------------------------------------------------
1 O 8 8.87 2.00 0.90 2.90 0.92 2.08 0.08
2 H 1 0.56 0.46 0.11
3 H 1 0.56 0.47 0.10
Multipole analysis of the density wrt the origin
------------------------------------------------
L x y z total open nuclear
- - - - ----- ---- -------
0 0 0 0 -0.000000 0.000000 10.000000
1 1 0 0 -0.026417 0.000000 -0.029313
1 0 1 0 -0.023604 0.000000 -0.018537
1 0 0 1 -0.846090 0.000000 -2.169670
2 2 0 0 -5.373227 0.000000 0.007286
2 1 1 0 -0.085617 0.000000 -0.152252
2 1 0 1 0.038215 0.000000 0.069311
2 0 2 0 -2.927589 0.000000 4.337695
2 0 1 1 -0.071410 0.000000 -0.149465
2 0 0 2 -4.159949 0.000000 2.265483
Parallel integral file used 1 records with 0 large values
NWChem Gradients Module
-----------------------
wavefunction = RHF
RHF ENERGY GRADIENTS
atom coordinates gradient
x y z x y z
1 O 0.003724 0.002355 -0.005632 0.000909 -0.019294 0.007866
2 H -0.081649 1.453885 -1.126502 -0.001242 0.025549 -0.011605
3 H 0.022541 -1.491264 -0.998110 0.000333 -0.006255 0.003739
----------------------------------------
| Time | 1-e(secs) | 2-e(secs) |
----------------------------------------
| CPU | 0.00 | 0.03 |
----------------------------------------
| WALL | 0.00 | 0.03 |
----------------------------------------
Task times cpu: 0.1s wall: 0.1s
NWChem Input Module
-------------------
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 182 182 2869 728 468 0 0 68
number of processes/call 1.00e+00 1.00e+00 1.00e+00 0.00e+00 0.00e+00
bytes total: 6.18e+05 3.56e+05 1.04e+05 0.00e+00 0.00e+00 5.44e+02
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 39432 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 18 28
current total bytes 0 0
maximum total bytes 1060104 16000888
maximum total K-bytes 1061 16001
maximum total M-bytes 2 17
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.1s wall: 0.2s

View File

@ -1,626 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
memory global 40 mb stack 23 mb heap 5 mb
geometry units angstrom noautosym nocenter
O -0.00836667 0.0010006 0.0866404
H 0.0968795 0.837453 -0.346117
H 0.0114839 -0.638453 -0.612122
end
scf
vectors input nwchem_lammps.movecs
end
dft
vectors input nwchem_lammps.movecs
end
basis
O library 6-31g*
H library 6-31g*
end
scf
maxiter 100
end
#dft
# xc b3lyp
#end
task scf gradient
#task dft gradient
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = almondjoy
program = /home/jboschen/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Fri Jan 31 00:33:40 2020
compiled = Tue_Oct_01_13:20:43_2019
source = /home/jboschen/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 655358 doubles = 5.0 Mbytes
stack = 3014651 doubles = 23.0 Mbytes
global = 5242880 doubles = 40.0 Mbytes (distinct from heap & stack)
total = 8912889 doubles = 68.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = scf
Operation = gradient
Status = ok
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 3 Fri Jan 31 00:33:40 2020
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 ao basis 2 Fri Jan 31 00:33:40 2020
The basis set named "ao basis" is the default AO basis for restart
NWChem Input Module
-------------------
Scaling coordinates for geometry "geometry" by 1.889725989
(inverse scale = 0.529177249)
------
auto-z
------
no constraints, skipping 0.0000000000000000
no constraints, skipping 0.0000000000000000
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 O 8.0000 -0.00836667 0.00100060 0.08664040
2 H 1.0000 0.09687950 0.83745300 -0.34611700
3 H 1.0000 0.01148390 -0.63845300 -0.61212200
Atomic Mass
-----------
O 15.994910
H 1.007825
Effective nuclear repulsion energy (a.u.) 9.2881144400
Nuclear Dipole moment (a.u.)
----------------------------
X Y Z
---------------- ---------------- ----------------
0.0782914233 0.3911823503 -0.5009962172
Z-matrix (autoz)
--------
Units are Angstrom for bonds and degrees for angles
Type Name I J K L M Value
----------- -------- ----- ----- ----- ----- ----- ----------
1 Stretch 1 2 0.94763
2 Stretch 1 3 0.94740
3 Bend 2 1 3 104.86952
XYZ format geometry
-------------------
3
geometry
O -0.00836667 0.00100060 0.08664040
H 0.09687950 0.83745300 -0.34611700
H 0.01148390 -0.63845300 -0.61212200
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 H | 1 O | 1.79077 | 0.94763
3 H | 1 O | 1.79032 | 0.94740
------------------------------------------------------------------------------
number of included internuclear distances: 2
==============================================================================
==============================================================================
internuclear angles
------------------------------------------------------------------------------
center 1 | center 2 | center 3 | degrees
------------------------------------------------------------------------------
2 H | 1 O | 3 H | 104.87
------------------------------------------------------------------------------
number of included internuclear angles: 1
==============================================================================
Basis "ao basis" -> "" (cartesian)
-----
O (Oxygen)
----------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 5.48467170E+03 0.001831
1 S 8.25234950E+02 0.013950
1 S 1.88046960E+02 0.068445
1 S 5.29645000E+01 0.232714
1 S 1.68975700E+01 0.470193
1 S 5.79963530E+00 0.358521
2 S 1.55396160E+01 -0.110778
2 S 3.59993360E+00 -0.148026
2 S 1.01376180E+00 1.130767
3 P 1.55396160E+01 0.070874
3 P 3.59993360E+00 0.339753
3 P 1.01376180E+00 0.727159
4 S 2.70005800E-01 1.000000
5 P 2.70005800E-01 1.000000
6 D 8.00000000E-01 1.000000
H (Hydrogen)
------------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 1.87311370E+01 0.033495
1 S 2.82539370E+00 0.234727
1 S 6.40121700E-01 0.813757
2 S 1.61277800E-01 1.000000
Summary of "ao basis" -> "" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
NWChem SCF Module
-----------------
ao basis = "ao basis"
functions = 19
atoms = 3
closed shells = 5
open shells = 0
charge = 0.00
wavefunction = RHF
input vectors = ./nwchem_lammps.movecs
output vectors = ./nwchem_lammps.movecs
use symmetry = F
symmetry adapt = F
Summary of "ao basis" -> "ao basis" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
Forming initial guess at 0.0s
Loading old vectors from job with title :
Starting SCF solution at 0.0s
----------------------------------------------
Quadratically convergent ROHF
Convergence threshold : 1.000E-04
Maximum no. of iterations : 100
Final Fock-matrix accuracy: 1.000E-07
----------------------------------------------
#quartets = 1.540D+03 #integrals = 1.424D+04 #direct = 0.0% #cached =100.0%
Integral file = ./nwchem_lammps.aoints.0
Record size in doubles = 65536 No. of integs per rec = 43688
Max. records in memory = 2 Max. records in file = 1392051
No. of bits per label = 8 No. of bits per value = 64
iter energy gnorm gmax time
----- ------------------- --------- --------- --------
1 -76.0107350035 4.75D-05 2.49D-05 0.1
Final RHF results
------------------
Total SCF energy = -76.010735003510
One-electron energy = -123.220958992568
Two-electron energy = 37.922109549024
Nuclear repulsion energy = 9.288114440035
Time for solution = 0.0s
Final eigenvalues
-----------------
1
1 -20.5583
2 -1.3466
3 -0.7130
4 -0.5721
5 -0.4985
6 0.2129
7 0.3068
8 1.0286
9 1.1338
10 1.1678
11 1.1807
12 1.3845
13 1.4334
14 2.0187
15 2.0311
ROHF Final Molecular Orbital Analysis
-------------------------------------
Vector 2 Occ=2.000000D+00 E=-1.346587D+00
MO Center= 1.1D-02, 3.1D-02, -8.5D-02, r^2= 5.0D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
2 0.475648 1 O s 6 0.435095 1 O s
1 -0.209463 1 O s
Vector 3 Occ=2.000000D+00 E=-7.129747D-01
MO Center= 1.5D-02, 3.8D-02, -1.3D-01, r^2= 7.6D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
4 0.500246 1 O py 8 0.299047 1 O py
16 0.232138 2 H s 18 -0.232195 3 H s
Vector 4 Occ=2.000000D+00 E=-5.720760D-01
MO Center= -1.5D-02, -9.7D-03, 1.5D-01, r^2= 6.8D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
5 0.545527 1 O pz 9 0.395332 1 O pz
6 0.326735 1 O s 2 0.164593 1 O s
Vector 5 Occ=2.000000D+00 E=-4.984552D-01
MO Center= -6.2D-03, 4.4D-03, 6.7D-02, r^2= 6.0D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
3 0.634559 1 O px 7 0.507891 1 O px
Vector 6 Occ=0.000000D+00 E= 2.128732D-01
MO Center= 7.5D-02, 1.3D-01, -6.6D-01, r^2= 2.6D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.437795 1 O s 17 -1.050892 2 H s
19 -1.050374 3 H s 9 -0.494696 1 O pz
5 -0.208359 1 O pz
Vector 7 Occ=0.000000D+00 E= 3.067764D-01
MO Center= 7.1D-02, 1.3D-01, -6.3D-01, r^2= 2.7D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
17 1.413885 2 H s 19 -1.414835 3 H s
8 -0.824411 1 O py 4 -0.320355 1 O py
Vector 8 Occ=0.000000D+00 E= 1.028607D+00
MO Center= 7.1D-03, 2.6D-02, -5.2D-02, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
16 0.839269 2 H s 18 -0.838060 3 H s
8 -0.692349 1 O py 17 -0.426291 2 H s
19 0.425092 3 H s 14 -0.319117 1 O dyz
Vector 9 Occ=0.000000D+00 E= 1.133833D+00
MO Center= -2.7D-02, -2.9D-02, 2.6D-01, r^2= 1.5D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.621086 1 O s 2 -0.910150 1 O s
9 0.744864 1 O pz 16 0.490586 2 H s
18 0.491102 3 H s 5 -0.484186 1 O pz
17 -0.426087 2 H s 19 -0.425823 3 H s
10 -0.375325 1 O dxx 15 -0.317874 1 O dzz
Vector 10 Occ=0.000000D+00 E= 1.167849D+00
MO Center= -8.0D-03, 1.6D-03, 8.3D-02, r^2= 1.1D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
7 -1.028149 1 O px 3 0.955686 1 O px
Vector 11 Occ=0.000000D+00 E= 1.180721D+00
MO Center= 1.8D-02, 4.2D-02, -1.5D-01, r^2= 1.1D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
16 0.710073 2 H s 18 0.711177 3 H s
5 0.704677 1 O pz 17 -0.389719 2 H s
19 -0.389376 3 H s 6 -0.326170 1 O s
9 -0.288739 1 O pz 13 0.229749 1 O dyy
Vector 12 Occ=0.000000D+00 E= 1.384514D+00
MO Center= -7.4D-04, 1.3D-02, 1.8D-02, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
8 1.510506 1 O py 4 -1.021750 1 O py
17 -0.934844 2 H s 19 0.935260 3 H s
9 0.272171 1 O pz 5 -0.184286 1 O pz
Vector 13 Occ=0.000000D+00 E= 1.433397D+00
MO Center= 4.7D-02, 8.7D-02, -4.1D-01, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 3.628985 1 O s 2 -1.436467 1 O s
9 -1.143870 1 O pz 17 -0.805578 2 H s
19 -0.806493 3 H s 13 -0.635948 1 O dyy
5 0.489050 1 O pz 15 -0.410417 1 O dzz
16 -0.312860 2 H s 18 -0.312722 3 H s
Vector 14 Occ=0.000000D+00 E= 2.018721D+00
MO Center= -1.4D-02, -7.1D-03, 1.3D-01, r^2= 6.2D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
15 0.947149 1 O dzz 14 -0.531399 1 O dyz
13 -0.526961 1 O dyy 10 -0.358371 1 O dxx
12 -0.297495 1 O dxz 6 -0.233087 1 O s
Vector 15 Occ=0.000000D+00 E= 2.031133D+00
MO Center= -8.4D-03, 1.0D-03, 8.7D-02, r^2= 6.1D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
11 1.681563 1 O dxy 12 0.314688 1 O dxz
center of mass
--------------
x = -0.00258245 y = 0.02272235 z = 0.04407491
moments of inertia (a.u.)
------------------
6.155330507195 -0.266185800841 0.185335033231
-0.266185800841 2.211585220634 -0.350250164177
0.185335033231 -0.350250164177 4.020009073007
Mulliken analysis of the total density
--------------------------------------
Atom Charge Shell Charges
----------- ------ -------------------------------------------------------
1 O 8 8.87 2.00 0.90 2.91 0.91 2.06 0.08
2 H 1 0.57 0.47 0.10
3 H 1 0.57 0.47 0.10
Multipole analysis of the density wrt the origin
------------------------------------------------
L x y z total open nuclear
- - - - ----- ---- -------
0 0 0 0 -0.000000 0.000000 10.000000
1 1 0 0 0.094145 0.000000 0.078291
1 0 1 0 0.148179 0.000000 0.391182
1 0 0 1 -0.851621 0.000000 -0.500996
2 2 0 0 -5.338111 0.000000 0.035987
2 1 1 0 0.149191 0.000000 0.263306
2 1 0 1 -0.084723 0.000000 -0.165556
2 0 2 0 -3.114464 0.000000 3.960160
2 0 1 1 0.205130 0.000000 0.362991
2 0 0 2 -4.329185 0.000000 1.980308
Parallel integral file used 1 records with 0 large values
NWChem Gradients Module
-----------------------
wavefunction = RHF
RHF ENERGY GRADIENTS
atom coordinates gradient
x y z x y z
1 O -0.015811 0.001891 0.163727 -0.000201 -0.000505 0.001671
2 H 0.183076 1.582557 -0.654066 0.000065 -0.000505 -0.001056
3 H 0.021701 -1.206501 -1.156743 0.000136 0.001011 -0.000616
----------------------------------------
| Time | 1-e(secs) | 2-e(secs) |
----------------------------------------
| CPU | 0.00 | 0.03 |
----------------------------------------
| WALL | 0.00 | 0.03 |
----------------------------------------
Task times cpu: 0.1s wall: 0.1s
NWChem Input Module
-------------------
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 46 46 2296 477 27 0 0 68
number of processes/call 1.00e+00 1.00e+00 1.00e+00 0.00e+00 0.00e+00
bytes total: 2.70e+05 1.39e+05 2.27e+04 0.00e+00 0.00e+00 5.44e+02
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 37544 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 18 28
current total bytes 0 0
maximum total bytes 1060104 16000888
maximum total K-bytes 1061 16001
maximum total M-bytes 2 17
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.1s wall: 0.1s

View File

@ -1,447 +0,0 @@
#!/usr/bin/env python
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# https://www.lammps.org/ Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
# ----------------------------------------------------------------------
# Syntax: nwchem_wrap.py file/zmq ao/pw input_template
# file/zmg = messaging mode, must match LAMMPS messaging mode
# ao/pw = basis set mode, selects between atom-centered and plane-wave
# the input_template file must correspond to the appropriate basis set mode:
# the "ao" mode supports the scf and dft modules in NWChem,
# the "pw" mode supports the nwpw module.
# input_template = NWChem input file used as template, must include a
# "geometry" block with the atoms in the simulation, dummy
# xyz coordinates should be included (but are not used).
# Atom ordering must match LAMMPS input.
# wrapper on NWChem
# receives message with list of coords
# creates NWChem inputs
# invokes NWChem to calculate self-consistent energy of that config
# reads NWChem outputs
# sends message with energy, forces, pressure to client
from __future__ import print_function
import sys
version = sys.version_info[0]
if version == 3:
sys.exit("The CSlib python wrapper does not yet support python 3")
import subprocess
import re
import os
import shutil
from cslib import CSlib
# comment out 2nd line once 1st line is correct for your system
nwchemcmd = "mpirun -np 1 /usr/bin/nwchem"
nwchemcmd = "touch tmp"
# enums matching FixClientMD class in LAMMPS
SETUP,STEP = range(1,2+1)
DIM,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE = range(1,10+1)
FORCES,ENERGY,VIRIAL,ERROR = range(1,4+1)
# -------------------------------------
# functions
# error message and exit
def error(txt):
print("ERROR:",txt)
sys.exit(1)
# -------------------------------------
# read initial input file to setup problem
# return natoms
def nwchem_setup_ao(input):
template = open(input,'r')
geometry_block = False
natoms = 0
while True:
line = template.readline()
if not line: break
if geometry_block and re.search("end",line):
geometry_block = False
if geometry_block and not re.match("#",line) :
natoms += 1
if re.search("geometry",line):
geometry_block = True
return natoms
# -------------------------------------
# write a new input file for NWChem
# assumes the NWChem input geometry is to be specified in angstroms
def nwchem_input_write_ao(input,coords):
template = open(input,'r')
new_input = open("nwchem_lammps.nw",'w')
geometry_block = False
i = 0
while True:
line = template.readline()
if not line: break
if geometry_block and not re.match("#",line) and re.search("end",line):
geometry_block = False
if os.path.exists("nwchem_lammps.movecs"):
# The below is hacky, but one of these lines will be ignored
# by NWChem depending on if the input file is for scf/dft.
append = "\nscf\n vectors input nwchem_lammps.movecs\nend\n"
append2 = "\ndft\n vectors input nwchem_lammps.movecs\nend\n"
line = line + append + append2
if geometry_block and not re.match("#",line):
x = coords[3*i+0]
y = coords[3*i+1]
z = coords[3*i+2]
coord_string = " %g %g %g \n" % (x,y,z)
atom_string = line.split()[0]
line = atom_string + coord_string
i += 1
if (not re.match("#",line)) and re.search("geometry",line):
geometry_block = True
line = "geometry units angstrom noautosym nocenter\n"
print(line,file=new_input,end='')
new_input.close()
# -------------------------------------
# read a NWChem output nwchem_lammps.out file
def nwchem_read_ao(natoms, log):
nwchem_output = open(log, 'r')
energy_pattern = r"Total \w+ energy"
gradient_pattern = "x y z x y z"
eout = 0.0
fout = []
while True:
line = nwchem_output.readline()
if not line: break
# pattern match for energy
if re.search(energy_pattern,line):
eout = float(line.split()[4])
# pattern match for forces
if re.search(gradient_pattern, line):
for i in range(natoms):
line = nwchem_output.readline()
forces = line.split()
fout += [float(forces[5]), float(forces[6]), float(forces[7])]
# convert units
hartree2eV = 27.21138602
bohr2angstrom = 0.52917721092
eout = eout * hartree2eV
fout = [i * -hartree2eV/bohr2angstrom for i in fout]
return eout,fout
# -------------------------------------
# read initial planewave input file to setup problem
# return natoms,box
def nwchem_setup_pw(input):
template = open(input,'r')
geometry_block = False
system_block = False
coord_pattern = r"^\s*\w{1,2}(?:\s+-?(?:\d+.?\d*|\d*.?\d+)){3}"
natoms = 0
box = []
while True:
line = template.readline()
if not line: break
if geometry_block and re.search("system crystal",line):
system_block = True
for i in range(3):
line = template.readline()
line = re.sub(r'd|D', 'e', line)
box += [float(line.split()[1])]
if geometry_block and not system_block and re.match("#",line) and re.search("end",line):
geometry_block = False
if system_block and re.search("end",line):
system_block = False
if geometry_block and not re.match("#",line) and re.search(coord_pattern,line):
natoms += 1
if re.search("geometry",line) and not re.match("#",line):
geometry_block = True
return natoms,box
# -------------------------------------
# write a new planewave input file for NWChem
# assumes the NWChem input geometry is to be specified fractional coordinates
def nwchem_input_write_pw(input,coords,box):
template = open(input,'r')
new_input = open("nwchem_lammps.nw",'w')
writing_atoms = False
geometry_block = False
system_block = False
coord_pattern = r"^\s*\w{1,2}(?:\s+-?(?:\d+.?\d*|\d*.?\d+)){3}"
i = 0
while True:
line = template.readline()
if not line: break
if geometry_block and re.search("system crystal",line):
system_block = True
if geometry_block and not system_block and not re.match("#",line) and re.search("end",line):
geometry_block = False
if os.path.exists("nwchem_lammps.movecs"):
append = "\nnwpw\n vectors input nwchem_lammps.movecs\nend\n"
line = line + append
if system_block and re.search("end",line):
system_block = False
if geometry_block and not re.match("#",line) and re.search(coord_pattern,line):
x = coords[3*i+0] / box[0]
y = coords[3*i+1] / box[1]
z = coords[3*i+2] / box[2]
coord_string = " %g %g %g \n" % (x,y,z)
atom_string = line.split()[0]
line = atom_string + coord_string
i += 1
if re.search("geometry",line) and not re.match("#",line):
geometry_block = True
print(line,file=new_input,end='')
new_input.close()
# -------------------------------------
# read a NWChem output nwchem_lammps.out file for planewave calculation
def nwchem_read_pw(log):
nw_output = open(log, 'r')
eout = 0.0
sout = []
fout = []
reading_forces = False
while True:
line = nw_output.readline()
if not line: break
# pattern match for energy
if re.search("PSPW energy",line):
eout = float(line.split()[4])
# pattern match for forces
if re.search("C\.O\.M", line):
reading_forces = False
if reading_forces:
forces = line.split()
fout += [float(forces[3]), float(forces[4]), float(forces[5])]
if re.search("Ion Forces",line):
reading_forces = True
# pattern match for stress
if re.search("=== total gradient ===",line):
stensor = []
for i in range(3):
line = nw_output.readline()
line = line.replace("S ="," ")
stress = line.split()
stensor += [float(stress[1]), float(stress[2]), float(stress[3])]
sxx = stensor[0]
syy = stensor[4]
szz = stensor[8]
sxy = 0.5 * (float(stensor[1]) + float(stensor[3]))
sxz = 0.5 * (stensor[2] + stensor[6])
syz = 0.5 * (stensor[5] + stensor[7])
sout = [sxx,syy,szz,sxy,sxz,syz]
# convert units
hartree2eV = 27.21138602
bohr2angstrom = 0.52917721092
austress2bar = 294210156.97
eout = eout * hartree2eV
fout = [i * hartree2eV/bohr2angstrom for i in fout]
sout = [i * austress2bar for i in sout]
return eout,fout,sout
# -------------------------------------
# main program
# command-line args
#
if len(sys.argv) != 4:
print("Syntax: python nwchem_wrap.py file/zmq ao/pw input_template")
sys.exit(1)
comm_mode = sys.argv[1]
basis_type = sys.argv[2]
input_template = sys.argv[3]
if comm_mode == "file": cs = CSlib(1,comm_mode,"tmp.couple",None)
elif comm_mode == "zmq": cs = CSlib(1,comm_mode,"*:5555",None)
else:
print("Syntax: python nwchem_wrap.py file/zmq")
sys.exit(1)
natoms = 0
box = []
if basis_type == "ao":
natoms = nwchem_setup_ao(input_template)
elif basis_type == "pw":
natoms,box = nwchem_setup_pw(input_template)
# initial message for AIMD protocol
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID != 0: error("Bad initial client/server handshake")
protocol = cs.unpack_string(1)
if protocol != "md": error("Mismatch in client/server protocol")
cs.send(0,0)
# endless server loop
i = 0
if not os.path.exists("nwchem_logs"):
os.mkdir("nwchem_logs")
while 1:
# recv message from client
# msgID = 0 = all-done message
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID < 0: break
# SETUP receive at beginning of each run
# required fields: DIM, PERIODICITY, ORIGIN, BOX,
# NATOMS, COORDS
# optional fields: others in enum above, but NWChem ignores them
if msgID == SETUP:
origin = []
box_lmp = []
natoms_recv = ntypes_recv = 0
types = []
coords = []
for field in fieldID:
if field == DIM:
dim = cs.unpack_int(DIM)
if dim != 3: error("NWChem only performs 3d simulations")
elif field == PERIODICITY:
periodicity = cs.unpack(PERIODICITY,1)
if basis_type == "ao":
if periodicity[0] or periodicity[1] or periodicity[2]:
error("NWChem AO basis wrapper only currently supports fully aperiodic systems")
elif basis_type == "pw":
if not periodicity[0] or not periodicity[1] or not periodicity[2]:
error("NWChem PW basis wrapper only currently supports fully periodic systems")
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box_lmp = cs.unpack(BOX,1)
if (basis_type == "pw"):
if (box[0] != box_lmp[0] or box[1] != box_lmp[4] or box[2] != box_lmp[8]):
error("NWChem wrapper mismatch in box dimensions")
elif field == NATOMS:
natoms_recv = cs.unpack_int(NATOMS)
if natoms != natoms_recv:
error("NWChem wrapper mismatch in number of atoms")
elif field == COORDS:
coords = cs.unpack(COORDS,1)
if not origin or not box_lmp or not natoms or not coords:
error("Required NWChem wrapper setup field not received");
# STEP receive at each timestep of run or minimization
# required fields: COORDS
# optional fields: ORIGIN, BOX
elif msgID == STEP:
coords = []
for field in fieldID:
if field == COORDS:
coords = cs.unpack(COORDS,1)
if not coords: error("Required NWChem wrapper step field not received");
else: error("NWChem wrapper received unrecognized message")
# unpack coords from client
# create NWChem input
if basis_type == "ao":
nwchem_input_write_ao(input_template,coords)
elif basis_type == "pw":
nwchem_input_write_pw(input_template,coords,box)
# invoke NWChem
i += 1
log = "nwchem_lammps.out"
archive = "nwchem_logs/nwchem_lammps" + str(i) + ".out"
cmd = nwchemcmd + " nwchem_lammps.nw > " + log
print("\nLaunching NWChem ...")
print(cmd)
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
shutil.copyfile(log,archive)
# process NWChem output
if basis_type == "ao":
energy,forces = nwchem_read_ao(natoms,log)
virial = [0,0,0,0,0,0]
elif basis_type == "pw":
energy,forces,virial = nwchem_read_pw(log)
# return forces, energy to client
cs.send(msgID,3)
cs.pack(FORCES,4,3*natoms,forces)
cs.pack_double(ENERGY,energy)
cs.pack(VIRIAL,4,6,virial)
# final reply to client
cs.send(0,0)
# clean-up
del cs

View File

@ -1,15 +0,0 @@
LAMMPS W data file
2 atoms
1 atom types
0.0 3.16 xlo xhi
0.0 3.16 ylo yhi
0.0 3.16 zlo zhi
Atoms
1 1 0.000 0.000 0.000
2 1 1.58 1.58 1.58

View File

@ -1,34 +0,0 @@
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
mass 1 183.85
replicate $x $y $z
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3

View File

@ -1,38 +0,0 @@
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
mass 1 183.85
group one id 2
displace_atoms one move 0.1 0.2 0.3
replicate $x $y $z
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
dump 1 all custom 1 dump.W.min id type x y z
thermo 1
minimize 1.0e-6 1.0e-6 10 50

View File

@ -1,76 +0,0 @@
LAMMPS (18 Sep 2018)
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
reading atoms ...
2 atoms
mass 1 183.85
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
2 atoms
Time spent = 0.000187325 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
Per MPI rank memory allocation (min/avg/max) = 1.8 | 1.8 | 1.8 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -549.75686 36815830
1 300 0 0 -549.75686 36815830
2 300 0 0 -549.75686 36815830
3 300 0 0 -549.75686 36815830
Loop time of 0.400933 on 1 procs for 3 steps with 2 atoms
Performance: 0.646 ns/day, 37.123 hours/ns, 7.483 timesteps/s
0.1% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 4.755e-06 | 4.755e-06 | 4.755e-06 | 0.0 | 0.00
Output | 0.00010114 | 0.00010114 | 0.00010114 | 0.0 | 0.03
Modify | 0.40082 | 0.40082 | 0.40082 | 0.0 | 99.97
Other | | 1.232e-05 | | | 0.00
Nlocal: 2 ave 2 max 2 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 7 ave 7 max 7 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:09

View File

@ -1,78 +0,0 @@
LAMMPS (19 Sep 2019)
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
reading atoms ...
2 atoms
read_data CPU = 0.0014801 secs
mass 1 183.85
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
2 atoms
replicate CPU = 0.000123978 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 1.801 | 1.801 | 1.801 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -549.75686 36815830
1 298.93216 0 0 -549.75686 36815825
2 295.76254 0 0 -549.75687 36814830
3 290.55935 0 0 -549.75687 36811865
Loop time of 2.60414 on 1 procs for 3 steps with 2 atoms
Performance: 0.100 ns/day, 241.124 hours/ns, 1.152 timesteps/s
0.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 7.1526e-06 | 7.1526e-06 | 7.1526e-06 | 0.0 | 0.00
Output | 0.00012779 | 0.00012779 | 0.00012779 | 0.0 | 0.00
Modify | 2.604 | 2.604 | 2.604 | 0.0 | 99.99
Other | | 9.06e-06 | | | 0.00
Nlocal: 2 ave 2 max 2 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 7 ave 7 max 7 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:05

View File

@ -1,92 +0,0 @@
LAMMPS (19 Sep 2019)
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
reading atoms ...
2 atoms
read_data CPU = 0.00183487 secs
mass 1 183.85
group one id 2
1 atoms in group one
displace_atoms one move 0.1 0.2 0.3
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
2 atoms
replicate CPU = 0.000159979 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
dump 1 all custom 1 tmp.dump id type x y z
thermo 1
minimize 1.0e-6 1.0e-6 10 50
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (../min.cpp:174)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 4.676 | 4.676 | 4.676 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -547.52142 28510277
1 300 0 0 -549.43104 35614471
2 300 0 0 -549.75661 36815830
3 300 0 0 -549.75662 36815830
Loop time of 7.71121 on 1 procs for 3 steps with 2 atoms
0.0% CPU use with 1 MPI tasks x no OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
-547.560202518 -549.795386038 -549.795398827
Force two-norm initial, final = 16.0041 0.00108353
Force max component initial, final = 9.57978 0.000719909
Final line search alpha, max atom move = 1 0.000719909
Iterations, force evaluations = 3 5
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 9.5367e-07 | 9.5367e-07 | 9.5367e-07 | 0.0 | 0.00
Comm | 1.3113e-05 | 1.3113e-05 | 1.3113e-05 | 0.0 | 0.00
Output | 0.00017023 | 0.00017023 | 0.00017023 | 0.0 | 0.00
Modify | 7.7109 | 7.7109 | 7.7109 | 0.0 |100.00
Other | | 0.0001729 | | | 0.00
Nlocal: 2 ave 2 max 2 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 1
Dangerous builds not checked
Total wall time: 0:00:19

File diff suppressed because it is too large Load Diff

View File

@ -1,817 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
#**** Enter the geometry using fractional coordinates ****
geometry units angstrom noautosym
system crystal
lat_a 3.16d0
lat_b 3.16d0
lat_c 3.16d0
end
W 0.999335 0.99967 0.998875
W 0.500665 0.50033 0.501125
end
nwpw
vectors input nwchem_lammps.movecs
end
#***** setup the nwpw gamma point code ****
nwpw
simulation_cell
ngrid 16 16 16
end
ewald_ncut 8
mulliken
lcao #old default
end
nwpw
tolerances 1.0d-9 1.0d-9
end
task pspw stress
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = singsing
program = /home/sjplimp/tools/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Thu Oct 3 16:57:17 2019
compiled = Wed_Oct_02_09:25:27_2019
source = /home/sjplimp/tools/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 13107200 doubles = 100.0 Mbytes
stack = 13107197 doubles = 100.0 Mbytes
global = 26214400 doubles = 200.0 Mbytes (distinct from heap & stack)
total = 52428797 doubles = 400.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = pspw
Operation = stress
Status = unknown
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 2 Thu Oct 3 16:57:16 2019
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
There are no basis sets in the database
NWChem Input Module
-------------------
!!!!!!!!! geom_3d NEEDS TESTING !!!!!!!!!!
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 W 74.0000 3.15789860 3.15895720 3.15644500
2 W 74.0000 1.58210140 1.58104280 1.58355500
Lattice Parameters
------------------
lattice vectors in angstroms (scale by 1.889725989 to convert to a.u.)
a1=< 3.160 0.000 0.000 >
a2=< 0.000 3.160 0.000 >
a3=< 0.000 0.000 3.160 >
a= 3.160 b= 3.160 c= 3.160
alpha= 90.000 beta= 90.000 gamma= 90.000
omega= 31.6
reciprocal lattice vectors in a.u.
b1=< 1.052 0.000 -0.000 >
b2=< -0.000 1.052 -0.000 >
b3=< 0.000 0.000 1.052 >
Atomic Mass
-----------
W 183.951000
XYZ format geometry
-------------------
2
geometry
W 3.15789860 3.15895720 3.15644500
W 1.58210140 1.58104280 1.58355500
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 W | 1 W | 5.15689 | 2.72891
------------------------------------------------------------------------------
number of included internuclear distances: 1
==============================================================================
>>>> PSPW Parallel Module - stress <<<<
****************************************************
* *
* NWPW PSPW Calculation *
* *
* [ (Grassmann/Stiefel manifold implementation) ] *
* *
* [ NorthWest Chemistry implementation ] *
* *
* version #5.10 06/12/02 *
* *
* This code was developed by Eric J. Bylaska, *
* and was based upon algorithms and code *
* developed by the group of Prof. John H. Weare *
* *
****************************************************
>>> JOB STARTED AT Thu Oct 3 16:57:17 2019 <<<
================ input data ========================
input psi filename:./nwchem_lammps.movecs
initializing pspw_APC data structure
------------------------------------
nga, ngs: 3 6
Gc : 2.5000000000000000
APC gamma: 1 0.59999999999999998
APC gamma: 2 0.90000000000000002
APC gamma: 3 1.3500000000000001
number of processors used: 1
processor grid : 1 x 1
parallel mapping :2d hilbert
parallel mapping : balanced
number of threads : 1
parallel io : off
options:
boundary conditions = periodic (version3)
electron spin = restricted
exchange-correlation = LDA (Vosko et al) parameterization
elements involved in the cluster:
1: W valence charge: 6.0000 lmax= 2
comment : Troullier-Martins pseudopotential
pseudpotential type : 0
highest angular component : 2
local potential used : 0
number of non-local projections: 8
semicore corrections included : 1.800 (radius) 4.538 (charge)
cutoff = 2.389 3.185 2.244
total charge: 0.000
atomic composition:
W : 2
number of electrons: spin up= 6 ( 6 per task) down= 6 ( 6 per task) (Fourier space)
number of orbitals : spin up= 6 ( 6 per task) down= 6 ( 6 per task) (Fourier space)
supercell:
cell_name: cell_default
lattice: a1=< 5.972 0.000 0.000 >
a2=< 0.000 5.972 0.000 >
a3=< 0.000 0.000 5.972 >
reciprocal: b1=< 1.052 0.000 -0.000 >
b2=< -0.000 1.052 -0.000 >
b3=< 0.000 0.000 1.052 >
lattice: a= 5.972 b= 5.972 c= 5.972
alpha= 90.000 beta= 90.000 gamma= 90.000
omega= 212.9
density cutoff= 35.427 fft= 16x 16x 16( 1052 waves 1052 per task)
wavefnc cutoff= 35.427 fft= 16x 16x 16( 1052 waves 1052 per task)
Ewald summation: cut radius= 1.90 and 8
Madelung Wigner-Seitz= 1.76011888 (alpha= 2.83729748 rs= 3.70444413)
technical parameters:
time step= 5.80 fictitious mass= 400000.0
tolerance=0.100E-08 (energy) 0.100E-08 (density)
maximum iterations = 1000 ( 10 inner 100 outer )
== Energy Calculation ==
====== Grassmann conjugate gradient iteration ======
>>> ITERATION STARTED AT Thu Oct 3 16:57:17 2019 <<<
iter. Energy DeltaE DeltaRho
------------------------------------------------------
10 -0.2020457267E+02 -0.12753E-06 0.54770E-09
20 -0.2020457281E+02 -0.96520E-09 0.65680E-11
*** tolerance ok. iteration terminated
>>> ITERATION ENDED AT Thu Oct 3 16:57:18 2019 <<<
== Summary Of Results ==
number of electrons: spin up= 6.00000 down= 6.00000 (real space)
total energy : -0.2020457281E+02 ( -0.10102E+02/ion)
total orbital energy: 0.5093546150E+01 ( 0.84892E+00/electron)
hartree energy : 0.2903382088E+00 ( 0.48390E-01/electron)
exc-corr energy : -0.9445078100E+01 ( -0.15742E+01/electron)
ion-ion energy : -0.2193939674E+02 ( -0.10970E+02/ion)
kinetic (planewave) : 0.1441586264E+02 ( 0.24026E+01/electron)
V_local (planewave) : 0.1156111351E+02 ( 0.19269E+01/electron)
V_nl (planewave) : -0.1508741234E+02 ( -0.25146E+01/electron)
V_Coul (planewave) : 0.5806764176E+00 ( 0.96779E-01/electron)
V_xc. (planewave) : -0.6376694082E+01 ( -0.10628E+01/electron)
Virial Coefficient : -0.6466707350E+00
orbital energies:
0.5414291E+00 ( 14.733eV)
0.5414285E+00 ( 14.733eV)
0.5414070E+00 ( 14.733eV)
0.3596871E+00 ( 9.788eV)
0.3596781E+00 ( 9.787eV)
0.2031433E+00 ( 5.528eV)
Total PSPW energy : -0.2020457281E+02
=== Spin Contamination ===
<Sexact^2> = 0.0000000000000000
<S^2> = 0.0000000000000000
== Center of Charge ==
spin up ( -0.0030, -0.0015, -0.0050 )
spin down ( -0.0030, -0.0015, -0.0050 )
total ( -0.0030, -0.0015, -0.0050 )
ionic ( -1.4929, -1.4929, -1.4929 )
== Molecular Dipole wrt Center of Mass ==
mu = ( -17.8792, -17.8970, -17.8547 ) au
|mu| = 30.9638 au, 78.6976 Debye
Translation force removed: ( -0.00000 -0.00000 -0.00000)
============= Ion Gradients =================
Ion Forces:
1 W ( 0.002737 0.001358 0.004631 )
2 W ( -0.002737 -0.001358 -0.004631 )
C.O.M. ( 0.000000 0.000000 0.000000 )
===============================================
|F| = 0.784689E-02
|F|/nion = 0.392344E-02
max|Fatom|= 0.554859E-02 ( 0.285eV/Angstrom)
======================
= Stress calculation =
======================
============= total gradient ==============
S = ( 0.12512 0.00000 0.00000 )
( 0.00000 0.12512 0.00001 )
( 0.00000 0.00001 0.12511 )
===================================================
|S| = 0.21671E+00
pressure = 0.125E+00 au
= 0.368E+02 Mbar
= 0.368E+04 GPa
= 0.363E+08 atm
dE/da = 0.12512
dE/db = 0.12512
dE/dc = 0.12511
dE/dalpha = -0.00003
dE/dbeta = -0.00002
dE/dgamma = -0.00001
*************************************************************
** **
** PSPW Mulliken analysis **
** **
** Population analysis algorithm devloped by Ryoichi Kawai **
** **
** Thu Oct 3 16:57 **
** **
*************************************************************
== XYZ OUTPUT ==
2
W -0.002101 -0.001043 -0.003555
W -1.577898 -1.578956 -1.576444
== Atomic Orbital Expansion ==
W nodamping
=====================================================
| POPULATION ANALYSIS OF FILLED MOLECULAR ORBITALS |
=====================================================
== Using pseudoatomic orbital expansion ==
------------------------------------------------------------------------------
*** ORBITAL= 1*** SPIN=BOTH SUM= 0.12471E+01 E= 0.54143E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00000
px pz py
1 W 1 0.00000 -0.00018 -0.00011 0.00005
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.49999 0.00003 -0.68532 0.00001 0.10591 0.13824
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00001 -0.00003 -0.00187 -0.00238 -0.00028 0.00001 0.00000 -0.00017
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 0.00018 0.00011 -0.00005
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.49999 0.00003 -0.68532 0.00001 0.10591 0.13824
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00001 -0.00003 0.00187 0.00238 0.00028 -0.00001 -0.00000 0.00017
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 2*** SPIN=BOTH SUM= 0.12472E+01 E= 0.54143E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00000
px pz py
1 W 1 0.00000 0.00002 -0.00005 -0.00011
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.49998 -0.00001 -0.02322 0.00001 -0.61187 0.35363
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00002 -0.00001 0.00071 -0.00049 -0.00015 -0.00283 0.00006 0.00266
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 -0.00002 0.00005 0.00011
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.49998 -0.00001 -0.02322 0.00001 -0.61187 0.35363
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00002 -0.00001 -0.00071 0.00049 0.00015 0.00283 -0.00006 -0.00266
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 3*** SPIN=BOTH SUM= 0.12472E+01 E= 0.54141E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00000 0.00010 0.00006 0.00020
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.49999 0.00000 0.17259 0.00000 0.33820 0.59651
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00001 0.00000 0.00001 -0.00015 0.00015 -0.00033 -0.00325 -0.00033
s
2 W 0 0.00000 -0.00000
px pz py
2 W 1 0.00000 -0.00010 -0.00006 -0.00020
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.49999 0.00000 0.17259 0.00000 0.33820 0.59651
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00001 0.00000 -0.00001 0.00015 -0.00015 0.00033 0.00325 0.00033
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 4*** SPIN=BOTH SUM= 0.14577E+01 E= 0.35969E+00 ( 9.788eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00002 0.00162 -0.00440 0.00049
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.48998 -0.09896 0.00001 0.69296 0.00001 -0.00001
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00999 -0.09993 0.00031 -0.00131 -0.00234 -0.00064 0.00000 0.00022
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00002 0.00162 -0.00440 0.00049
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.48998 0.09896 -0.00001 -0.69296 -0.00001 0.00001
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00999 0.09993 0.00031 -0.00131 -0.00234 -0.00064 0.00000 0.00022
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 0.9800 0.0200
------------------------------------------------------------------------------
*** ORBITAL= 5*** SPIN=BOTH SUM= 0.14616E+01 E= 0.35968E+00 ( 9.787eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00000
px pz py
1 W 1 0.00001 0.00206 0.00063 -0.00121
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.48871 -0.69206 -0.00002 -0.09883 0.00001 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.01129 -0.10621 0.00214 0.00009 0.00033 0.00014 0.00000 0.00063
s
2 W 0 0.00000 -0.00000
px pz py
2 W 1 0.00001 0.00206 0.00063 -0.00121
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.48871 0.69206 0.00002 0.09883 -0.00001 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.01129 0.10621 0.00214 0.00009 0.00033 0.00014 0.00000 0.00063
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 0.9774 0.0226
------------------------------------------------------------------------------
*** ORBITAL= 6*** SPIN=BOTH SUM= 0.19540E+01 E= 0.20314E+00 ( 5.528eV)
NO ATOM L POPULATION
s
1 W 0 0.49974 -0.70692
px pz py
1 W 1 0.00000 0.00028 0.00047 0.00014
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.00000 -0.00000 -0.00000 -0.00000 -0.00000 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00026 0.01609 -0.00000 -0.00007 0.00021 -0.00003 0.00000 -0.00004
s
2 W 0 0.49974 -0.70692
px pz py
2 W 1 0.00000 -0.00028 -0.00047 -0.00014
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.00000 -0.00000 -0.00000 -0.00000 -0.00000 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00026 0.01609 0.00000 0.00007 -0.00021 0.00003 -0.00000 0.00004
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.9995 0.0000 0.0000 0.0005
========================================
| POPULATION ANALYSIS ON EACH ATOM |
========================================
NO ATOM SPIN TOTAL s p d f
1 W UP 3.00000 0.49974 0.00003 2.47866 0.02157
1 W DOWN 3.00000 0.49974 0.00003 2.47866 0.02157
2 W UP 3.00000 0.49974 0.00003 2.47866 0.02157
2 W DOWN 3.00000 0.49974 0.00003 2.47866 0.02157
=== TOTAL ANGULAR MOMENTUM POPULATION ===
SPIN s p d f
UP 16.66% 0.00% 82.62% 0.72%
UP 16.66% 0.00% 82.62% 0.72%
TOTAL 16.66% 0.00% 82.62% 0.72%
*************************************************************
** **
** PSPW Atomic Point Charge (APC) Analysis **
** **
** Point charge analysis based on paper by P.E. Blochl **
** (J. Chem. Phys. vol 103, page 7422, 1995) **
** **
*************************************************************
pspw_APC data structure
-----------------------
nga, ngs: 3 6
Gc : 2.5000000000000000
APC gamma: 1 0.59999999999999998
APC gamma: 2 0.90000000000000002
APC gamma: 3 1.3500000000000001
charge analysis on each atom
----------------------------
no atom Qelc Qion Qtotal
-- ---- ------- ------- -------
1 W -6.000 6.000 -0.000
2 W -6.000 6.000 -0.000
Total Q -12.000 12.000 -0.000
gaussian coefficients of model density
--------------------------------------
no atom g=0.000 g=0.600 g=0.900 g=1.350
-- ---- ------- ------- ------- -------
1 W 6.000 -7.235 17.653 -16.419
2 W 6.000 -7.235 17.653 -16.419
=== Electric Field at Atoms ===
1 W Atomic Electric Field =( -0.00022 -0.00011 -0.00038 )
(ion) =( 0.00094 0.00047 0.00159 )
(electronic) =( -0.00116 -0.00058 -0.00197 )
2 W Atomic Electric Field =( 0.00022 0.00011 0.00038 )
(ion) =( -0.00094 -0.00047 -0.00159 )
(electronic) =( 0.00116 0.00058 0.00197 )
output psi filename:./nwchem_lammps.movecs
== Timing ==
cputime in seconds
prologue : 0.114428E+00
main loop : 0.475396E+00
epilogue : 0.316691E-01
total : 0.621493E+00
cputime/step: 0.559289E-02 ( 85 evalulations, 20 linesearches)
Time spent doing total step percent
total time : 0.623259E+00 0.733246E-02 100.0 %
i/o time : 0.103071E-01 0.121260E-03 1.7 %
FFTs : 0.348712E-01 0.410250E-03 5.6 %
dot products : 0.981057E-02 0.115418E-03 1.6 %
geodesic : 0.696999E-01 0.819999E-03 11.2 %
ffm_dgemm : 0.104145E-02 0.122523E-04 0.2 %
fmf_dgemm : 0.565297E-01 0.665055E-03 9.1 %
mmm_dgemm : 0.129490E-03 0.152342E-05 0.0 %
m_diagonalize : 0.701885E-03 0.825747E-05 0.1 %
exchange correlation : 0.764353E-01 0.899239E-03 12.3 %
local pseudopotentials : 0.439882E-03 0.517509E-05 0.1 %
non-local pseudopotentials : 0.271890E-01 0.319871E-03 4.4 %
hartree potentials : 0.202482E-02 0.238214E-04 0.3 %
ion-ion interaction : 0.104062E+00 0.122426E-02 16.7 %
structure factors : 0.152984E-01 0.179981E-03 2.5 %
phase factors : 0.107278E-04 0.126210E-06 0.0 %
masking and packing : 0.304392E-01 0.358108E-03 4.9 %
queue fft : 0.111536E+00 0.131219E-02 17.9 %
queue fft (serial) : 0.708244E-01 0.833228E-03 11.4 %
queue fft (message passing): 0.360800E-01 0.424470E-03 5.8 %
non-local psp FFM : 0.860008E-02 0.101177E-03 1.4 %
non-local psp FMF : 0.111482E-01 0.131155E-03 1.8 %
non-local psp FFM A : 0.214632E-02 0.252509E-04 0.3 %
non-local psp FFM B : 0.560879E-02 0.659858E-04 0.9 %
>>> JOB COMPLETED AT Thu Oct 3 16:57:18 2019 <<<
Task times cpu: 0.6s wall: 0.6s
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 0 0 0 0 0 0 0 0
number of processes/call 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
bytes total: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 0 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 294 17
current total bytes 0 0
maximum total bytes 4879496 351944
maximum total K-bytes 4880 352
maximum total M-bytes 5 1
NWChem Input Module
-------------------
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.6s wall: 0.7s

View File

@ -1,816 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
#**** Enter the geometry using fractional coordinates ****
geometry units angstrom noautosym
system crystal
lat_a 3.16d0
lat_b 3.16d0
lat_c 3.16d0
end
W 0.0158218 0.0316436 0.0474661
W 0.515824 0.531647 0.547471
end
nwpw
vectors input nwchem_lammps.movecs
end
#***** setup the nwpw gamma point code ****
nwpw
simulation_cell
ngrid 16 16 16
end
ewald_ncut 8
mulliken
lcao #old default
end
nwpw
tolerances 1.0d-9 1.0d-9
end
task pspw stress
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = singsing
program = /home/sjplimp/tools/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Thu Oct 3 16:58:54 2019
compiled = Wed_Oct_02_09:25:27_2019
source = /home/sjplimp/tools/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 13107200 doubles = 100.0 Mbytes
stack = 13107197 doubles = 100.0 Mbytes
global = 26214400 doubles = 200.0 Mbytes (distinct from heap & stack)
total = 52428797 doubles = 400.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = pspw
Operation = stress
Status = unknown
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 2 Thu Oct 3 16:58:53 2019
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
There are no basis sets in the database
NWChem Input Module
-------------------
!!!!!!!!! geom_3d NEEDS TESTING !!!!!!!!!!
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 W 74.0000 0.04999689 0.09999378 0.14999288
2 W 74.0000 1.63000384 1.68000452 1.73000836
Lattice Parameters
------------------
lattice vectors in angstroms (scale by 1.889725989 to convert to a.u.)
a1=< 3.160 0.000 0.000 >
a2=< 0.000 3.160 0.000 >
a3=< 0.000 0.000 3.160 >
a= 3.160 b= 3.160 c= 3.160
alpha= 90.000 beta= 90.000 gamma= 90.000
omega= 31.6
reciprocal lattice vectors in a.u.
b1=< 1.052 0.000 -0.000 >
b2=< -0.000 1.052 -0.000 >
b3=< 0.000 0.000 1.052 >
Atomic Mass
-----------
W 183.951000
XYZ format geometry
-------------------
2
geometry
W 0.04999689 0.09999378 0.14999288
W 1.63000384 1.68000452 1.73000836
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 W | 1 W | 5.17154 | 2.73666
------------------------------------------------------------------------------
number of included internuclear distances: 1
==============================================================================
>>>> PSPW Parallel Module - stress <<<<
****************************************************
* *
* NWPW PSPW Calculation *
* *
* [ (Grassmann/Stiefel manifold implementation) ] *
* *
* [ NorthWest Chemistry implementation ] *
* *
* version #5.10 06/12/02 *
* *
* This code was developed by Eric J. Bylaska, *
* and was based upon algorithms and code *
* developed by the group of Prof. John H. Weare *
* *
****************************************************
>>> JOB STARTED AT Thu Oct 3 16:58:54 2019 <<<
================ input data ========================
input psi filename:./nwchem_lammps.movecs
initializing pspw_APC data structure
------------------------------------
nga, ngs: 3 6
Gc : 2.5000000000000000
APC gamma: 1 0.59999999999999998
APC gamma: 2 0.90000000000000002
APC gamma: 3 1.3500000000000001
number of processors used: 1
processor grid : 1 x 1
parallel mapping :2d hilbert
parallel mapping : balanced
number of threads : 1
parallel io : off
options:
boundary conditions = periodic (version3)
electron spin = restricted
exchange-correlation = LDA (Vosko et al) parameterization
elements involved in the cluster:
1: W valence charge: 6.0000 lmax= 2
comment : Troullier-Martins pseudopotential
pseudpotential type : 0
highest angular component : 2
local potential used : 0
number of non-local projections: 8
semicore corrections included : 1.800 (radius) 4.538 (charge)
cutoff = 2.389 3.185 2.244
total charge: 0.000
atomic composition:
W : 2
number of electrons: spin up= 6 ( 6 per task) down= 6 ( 6 per task) (Fourier space)
number of orbitals : spin up= 6 ( 6 per task) down= 6 ( 6 per task) (Fourier space)
supercell:
cell_name: cell_default
lattice: a1=< 5.972 0.000 0.000 >
a2=< 0.000 5.972 0.000 >
a3=< 0.000 0.000 5.972 >
reciprocal: b1=< 1.052 0.000 -0.000 >
b2=< -0.000 1.052 -0.000 >
b3=< 0.000 0.000 1.052 >
lattice: a= 5.972 b= 5.972 c= 5.972
alpha= 90.000 beta= 90.000 gamma= 90.000
omega= 212.9
density cutoff= 35.427 fft= 16x 16x 16( 1052 waves 1052 per task)
wavefnc cutoff= 35.427 fft= 16x 16x 16( 1052 waves 1052 per task)
Ewald summation: cut radius= 1.90 and 8
Madelung Wigner-Seitz= 1.76011888 (alpha= 2.83729748 rs= 3.70444413)
technical parameters:
time step= 5.80 fictitious mass= 400000.0
tolerance=0.100E-08 (energy) 0.100E-08 (density)
maximum iterations = 1000 ( 10 inner 100 outer )
== Energy Calculation ==
====== Grassmann conjugate gradient iteration ======
>>> ITERATION STARTED AT Thu Oct 3 16:58:54 2019 <<<
iter. Energy DeltaE DeltaRho
------------------------------------------------------
10 -0.2020460841E+02 -0.37164E-09 0.13892E-11
*** tolerance ok. iteration terminated
>>> ITERATION ENDED AT Thu Oct 3 16:58:54 2019 <<<
== Summary Of Results ==
number of electrons: spin up= 6.00000 down= 6.00000 (real space)
total energy : -0.2020460841E+02 ( -0.10102E+02/ion)
total orbital energy: 0.5093526999E+01 ( 0.84892E+00/electron)
hartree energy : 0.2902689593E+00 ( 0.48378E-01/electron)
exc-corr energy : -0.9445045626E+01 ( -0.15742E+01/electron)
ion-ion energy : -0.2193948849E+02 ( -0.10970E+02/ion)
kinetic (planewave) : 0.1441573280E+02 ( 0.24026E+01/electron)
V_local (planewave) : 0.1156119613E+02 ( 0.19269E+01/electron)
V_nl (planewave) : -0.1508727219E+02 ( -0.25145E+01/electron)
V_Coul (planewave) : 0.5805379185E+00 ( 0.96756E-01/electron)
V_xc. (planewave) : -0.6376667662E+01 ( -0.10628E+01/electron)
Virial Coefficient : -0.6466688811E+00
orbital energies:
0.5414223E+00 ( 14.733eV)
0.5414201E+00 ( 14.733eV)
0.5414174E+00 ( 14.733eV)
0.3596809E+00 ( 9.787eV)
0.3596804E+00 ( 9.787eV)
0.2031424E+00 ( 5.528eV)
Total PSPW energy : -0.2020460841E+02
=== Spin Contamination ===
<Sexact^2> = 0.0000000000000000
<S^2> = 0.0000000000000000
== Center of Charge ==
spin up ( 0.0106, 0.0203, 0.0283 )
spin down ( 0.0106, 0.0203, 0.0283 )
total ( 0.0106, 0.0203, 0.0283 )
ionic ( -1.3984, -1.3039, -1.2094 )
== Molecular Dipole wrt Center of Mass ==
mu = ( -16.9083, -15.8910, -14.8528 ) au
|mu| = 27.5503 au, 70.0218 Debye
Translation force removed: ( -0.00002 0.00000 0.00002)
============= Ion Gradients =================
Ion Forces:
1 W ( -0.000001 0.000005 0.000014 )
2 W ( 0.000001 -0.000005 -0.000014 )
C.O.M. ( -0.000000 0.000000 0.000000 )
===============================================
|F| = 0.216488E-04
|F|/nion = 0.108244E-04
max|Fatom|= 0.153080E-04 ( 0.001eV/Angstrom)
======================
= Stress calculation =
======================
============= total gradient ==============
S = ( 0.12513 0.00001 -0.00003 )
( 0.00001 0.12513 -0.00001 )
( -0.00003 -0.00001 0.12513 )
===================================================
|S| = 0.21673E+00
pressure = 0.125E+00 au
= 0.368E+02 Mbar
= 0.368E+04 GPa
= 0.363E+08 atm
dE/da = 0.12513
dE/db = 0.12513
dE/dc = 0.12513
dE/dalpha = 0.00006
dE/dbeta = 0.00020
dE/dgamma = -0.00008
*************************************************************
** **
** PSPW Mulliken analysis **
** **
** Population analysis algorithm devloped by Ryoichi Kawai **
** **
** Thu Oct 3 16:58 **
** **
*************************************************************
== XYZ OUTPUT ==
2
W 0.049997 0.099994 0.149993
W -1.529995 -1.479995 -1.429991
== Atomic Orbital Expansion ==
W nodamping
=====================================================
| POPULATION ANALYSIS OF FILLED MOLECULAR ORBITALS |
=====================================================
== Using pseudoatomic orbital expansion ==
------------------------------------------------------------------------------
*** ORBITAL= 1*** SPIN=BOTH SUM= 0.12471E+01 E= 0.54142E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00000
px pz py
1 W 1 0.00000 0.00000 0.00000 0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.50000 -0.00001 -0.03953 0.00002 0.50309 0.49532
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00000 -0.00001 -0.00000 -0.00000 0.00000 0.00000 -0.00001 -0.00000
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 -0.00000 -0.00000 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.50000 -0.00001 -0.03953 0.00002 0.50309 0.49532
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00000 -0.00001 0.00000 0.00000 -0.00000 -0.00000 0.00001 0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 2*** SPIN=BOTH SUM= 0.12471E+01 E= 0.54142E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00000 0.00000 0.00000 0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.50000 0.00004 0.62658 0.00003 -0.20360 0.25680
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00000 -0.00004 0.00000 0.00000 -0.00000 -0.00000 -0.00001 0.00000
s
2 W 0 0.00000 -0.00000
px pz py
2 W 1 0.00000 -0.00000 -0.00000 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.50000 0.00004 0.62658 0.00003 -0.20360 0.25680
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00000 -0.00004 -0.00000 -0.00000 -0.00000 0.00000 0.00001 -0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 3*** SPIN=BOTH SUM= 0.12471E+01 E= 0.54142E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00001
px pz py
1 W 1 0.00000 0.00000 -0.00000 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.50000 -0.00001 -0.32532 -0.00000 -0.45327 0.43441
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00000 0.00001 0.00000 -0.00000 -0.00000 -0.00000 -0.00000 0.00000
s
2 W 0 0.00000 0.00001
px pz py
2 W 1 0.00000 -0.00000 0.00000 0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.50000 -0.00001 -0.32532 -0.00000 -0.45327 0.43441
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00000 0.00001 -0.00000 0.00001 0.00000 0.00000 0.00000 -0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 4*** SPIN=BOTH SUM= 0.14785E+01 E= 0.35968E+00 ( 9.787eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00000 -0.00000 0.00001 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.48310 0.33381 0.00000 -0.60965 0.00000 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.01690 0.13001 -0.00000 0.00000 0.00000 0.00000 0.00000 -0.00000
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 -0.00000 0.00001 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.48310 -0.33381 -0.00000 0.60965 -0.00000 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.01690 -0.13001 -0.00000 0.00000 0.00000 0.00000 0.00000 -0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 0.9662 0.0338
------------------------------------------------------------------------------
*** ORBITAL= 5*** SPIN=BOTH SUM= 0.14407E+01 E= 0.35968E+00 ( 9.787eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00000 -0.00000 -0.00000 0.00001
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.49580 0.61761 -0.00000 0.33817 0.00000 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00420 0.06484 -0.00001 0.00000 -0.00000 -0.00000 -0.00000 -0.00000
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 -0.00000 -0.00000 0.00001
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.49580 -0.61761 0.00000 -0.33817 -0.00000 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00420 -0.06484 -0.00001 0.00000 -0.00000 -0.00000 -0.00000 -0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 0.9916 0.0084
------------------------------------------------------------------------------
*** ORBITAL= 6*** SPIN=BOTH SUM= 0.19540E+01 E= 0.20314E+00 ( 5.528eV)
NO ATOM L POPULATION
s
1 W 0 0.49974 -0.70692
px pz py
1 W 1 0.00000 0.00000 -0.00000 0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.00000 0.00000 -0.00000 -0.00000 -0.00000 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00026 0.01609 -0.00000 -0.00000 -0.00000 -0.00000 -0.00000 -0.00000
s
2 W 0 0.49974 -0.70692
px pz py
2 W 1 0.00000 -0.00000 0.00000 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.00000 0.00000 -0.00000 -0.00000 -0.00000 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00026 0.01609 0.00000 0.00000 -0.00000 0.00000 0.00000 0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.9995 0.0000 0.0000 0.0005
========================================
| POPULATION ANALYSIS ON EACH ATOM |
========================================
NO ATOM SPIN TOTAL s p d f
1 W UP 3.00000 0.49974 0.00000 2.47889 0.02137
1 W DOWN 3.00000 0.49974 0.00000 2.47889 0.02137
2 W UP 3.00000 0.49974 0.00000 2.47889 0.02137
2 W DOWN 3.00000 0.49974 0.00000 2.47889 0.02137
=== TOTAL ANGULAR MOMENTUM POPULATION ===
SPIN s p d f
UP 16.66% 0.00% 82.63% 0.71%
UP 16.66% 0.00% 82.63% 0.71%
TOTAL 16.66% 0.00% 82.63% 0.71%
*************************************************************
** **
** PSPW Atomic Point Charge (APC) Analysis **
** **
** Point charge analysis based on paper by P.E. Blochl **
** (J. Chem. Phys. vol 103, page 7422, 1995) **
** **
*************************************************************
pspw_APC data structure
-----------------------
nga, ngs: 3 6
Gc : 2.5000000000000000
APC gamma: 1 0.59999999999999998
APC gamma: 2 0.90000000000000002
APC gamma: 3 1.3500000000000001
charge analysis on each atom
----------------------------
no atom Qelc Qion Qtotal
-- ---- ------- ------- -------
1 W -6.000 6.000 -0.000
2 W -6.000 6.000 0.000
Total Q -12.000 12.000 -0.000
gaussian coefficients of model density
--------------------------------------
no atom g=0.000 g=0.600 g=0.900 g=1.350
-- ---- ------- ------- ------- -------
1 W 6.000 -7.235 17.654 -16.419
2 W 6.000 -7.234 17.651 -16.418
=== Electric Field at Atoms ===
1 W Atomic Electric Field =( -0.00002 0.00000 0.00001 )
(ion) =( 0.00000 0.00000 0.00000 )
(electronic) =( -0.00002 -0.00000 0.00001 )
2 W Atomic Electric Field =( -0.00002 0.00000 0.00002 )
(ion) =( -0.00000 -0.00000 -0.00000 )
(electronic) =( -0.00002 0.00000 0.00002 )
output psi filename:./nwchem_lammps.movecs
== Timing ==
cputime in seconds
prologue : 0.991130E-01
main loop : 0.101190E+00
epilogue : 0.203540E-01
total : 0.220657E+00
cputime/step: 0.252975E-01 ( 4 evalulations, 1 linesearches)
Time spent doing total step percent
total time : 0.222262E+00 0.555655E-01 100.0 %
i/o time : 0.847340E-02 0.211835E-02 3.8 %
FFTs : 0.576015E-02 0.144004E-02 2.6 %
dot products : 0.157053E-02 0.392634E-03 0.7 %
geodesic : 0.203228E-02 0.508070E-03 0.9 %
ffm_dgemm : 0.641376E-04 0.160344E-04 0.0 %
fmf_dgemm : 0.202988E-02 0.507471E-03 0.9 %
mmm_dgemm : 0.286302E-05 0.715756E-06 0.0 %
m_diagonalize : 0.101088E-03 0.252721E-04 0.0 %
exchange correlation : 0.287819E-02 0.719547E-03 1.3 %
local pseudopotentials : 0.346661E-03 0.866652E-04 0.2 %
non-local pseudopotentials : 0.268912E-02 0.672280E-03 1.2 %
hartree potentials : 0.163791E-03 0.409476E-04 0.1 %
ion-ion interaction : 0.699389E-01 0.174847E-01 31.5 %
structure factors : 0.889608E-02 0.222402E-02 4.0 %
phase factors : 0.102510E-04 0.256275E-05 0.0 %
masking and packing : 0.839656E-02 0.209914E-02 3.8 %
queue fft : 0.418949E-02 0.104737E-02 1.9 %
queue fft (serial) : 0.264608E-02 0.661519E-03 1.2 %
queue fft (message passing): 0.136477E-02 0.341193E-03 0.6 %
non-local psp FFM : 0.391964E-03 0.979910E-04 0.2 %
non-local psp FMF : 0.407219E-03 0.101805E-03 0.2 %
non-local psp FFM A : 0.144235E-03 0.360588E-04 0.1 %
non-local psp FFM B : 0.216961E-03 0.542402E-04 0.1 %
>>> JOB COMPLETED AT Thu Oct 3 16:58:54 2019 <<<
Task times cpu: 0.2s wall: 0.2s
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 0 0 0 0 0 0 0 0
number of processes/call 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
bytes total: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 0 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 294 17
current total bytes 0 0
maximum total bytes 4879496 351944
maximum total K-bytes 4880 352
maximum total M-bytes 5 1
NWChem Input Module
-------------------
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.2s wall: 0.3s

View File

@ -1,28 +0,0 @@
echo
#**** Enter the geometry using fractional coordinates ****
geometry units angstrom noautosym
system crystal
lat_a 3.16d0
lat_b 3.16d0
lat_c 3.16d0
end
W 0.0 0.0 0.0
W 0.5 0.5 0.5
end
#***** setup the nwpw gamma point code ****
nwpw
simulation_cell
ngrid 16 16 16
end
ewald_ncut 8
mulliken
lcao #old default
end
nwpw
tolerances 1.0d-9 1.0d-9
end
task pspw stress

View File

@ -1,53 +0,0 @@
# Startparameter for this run:
NWRITE = 2 write-flag & timer
PREC = normal normal or accurate (medium, high low for compatibility)
ISTART = 0 job : 0-new 1-cont 2-samecut
ICHARG = 2 charge: 1-file 2-atom 10-const
ISPIN = 1 spin polarized calculation?
LSORBIT = F spin-orbit coupling
INIWAV = 1 electr: 0-lowe 1-rand 2-diag
# Electronic Relaxation 1
ENCUT = 600.0 eV #Plane wave energy cutoff
ENINI = 600.0 initial cutoff
NELM = 100; NELMIN= 2; NELMDL= -5 # of ELM steps
EDIFF = 0.1E-05 stopping-criterion for ELM
# Ionic relaxation
EDIFFG = 0.1E-02 stopping-criterion for IOM
NSW = 0 number of steps for IOM
NBLOCK = 1; KBLOCK = 1 inner block; outer block
IBRION = -1 ionic relax: 0-MD 1-quasi-New 2-CG #No ion relaxation with -1
NFREE = 0 steps in history (QN), initial steepest desc. (CG)
ISIF = 2 stress and relaxation # 2: F-yes Sts-yes RlxIon-yes cellshape-no cellvol-no
IWAVPR = 10 prediction: 0-non 1-charg 2-wave 3-comb # 10: TMPCAR stored in memory rather than file
POTIM = 0.5000 time-step for ionic-motion
TEBEG = 3500.0; TEEND = 3500.0 temperature during run # Finite Temperature variables if AI-MD is on
SMASS = -3.00 Nose mass-parameter (am)
estimated Nose-frequenzy (Omega) = 0.10E-29 period in steps =****** mass= -0.366E-27a.u.
PSTRESS= 0.0 pullay stress
# DOS related values:
EMIN = 10.00; EMAX =-10.00 energy-range for DOS
EFERMI = 0.00
ISMEAR = 0; SIGMA = 0.10 broadening in eV -4-tet -1-fermi 0-gaus
# Electronic relaxation 2 (details)
IALGO = 48 algorithm
# Write flags
LWAVE = T write WAVECAR
LCHARG = T write CHGCAR
LVTOT = F write LOCPOT, total local potential
LVHAR = F write LOCPOT, Hartree potential only
LELF = F write electronic localiz. function (ELF)
# Dipole corrections
LMONO = F monopole corrections only (constant potential shift)
LDIPOL = F correct potential (dipole corrections)
IDIPOL = 0 1-x, 2-y, 3-z, 4-all directions
EPSILON= 1.0000000 bulk dielectric constant
# Exchange correlation treatment:
GGA = -- GGA type

View File

@ -1,6 +0,0 @@
K-Points
0
Monkhorst Pack
15 15 15
0 0 0

View File

@ -1,11 +0,0 @@
W unit cell
1.0
3.16 0.00000000 0.00000000
0.00000000 3.16 0.00000000
0.00000000 0.00000000 3.16
W
2
Direct
0.00000000 0.00000000 0.00000000
0.50000000 0.50000000 0.50000000

View File

@ -1,149 +0,0 @@
Sample LAMMPS MD wrapper on VASP quantum DFT via client/server
coupling
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
In this dir, the vasp_wrap.py is a wrapper on the VASP quantum DFT
code so it can work as a "server" code which LAMMPS drives as a
"client" code to perform ab initio MD. LAMMPS performs the MD
timestepping, sends VASP a current set of coordinates each timestep,
VASP computes forces and energy and virial and returns that info to
LAMMPS.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the
vasp_wrap.py program became parallel, or the CSlib library calls were
integrated into VASP directly, then data could also be exchanged via
MPI.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
cd lammps/lib/message
python Install.py -m -z # build CSlib with MPI and ZMQ support
cd lammps/src
make yes-message
make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the CSlib in a form usable by the vasp_wrapper.py script:
% cd lammps/lib/message/cslib/src
% make shlib # build serial and parallel shared lib with ZMQ support
% make shlib zmq=no # build serial and parallel shared lib w/out ZMQ support
This will make a shared library versions of the CSlib, which Python
requires. Python must be able to find both the cslib.py script and
the libcsnompi.so library in your lammps/lib/message/cslib/src
directory. If it is not able to do this, you will get an error when
you run vasp_wrapper.py.
You can do this by augmenting two environment variables, either
from the command line, or in your shell start-up script.
Here is the sample syntax for the csh or tcsh shells:
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/lib/message/cslib/src
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/message/cslib/src
----------------
Prepare to use VASP and the vasp_wrapper.py script
You can run the vasp_wrap.py script as-is to test that the coupling
between it and LAMMPS is functional. This will use the included
vasprun.xml file output by a previous VASP run.
But note that the as-is version of vasp_wrap.py will not attempt to
run VASP.
To do this, you must edit the 1st vaspcmd line at the top of
vasp_wrapper.py to be the launch command needed to run VASP on your
system. It can be a command to run VASP in serial or in parallel,
e.g. an mpirun command. Then comment out the 2nd vaspcmd line
immediately following it.
Insure you have the necessary VASP input files in this
directory, suitable for the VASP calculation you want to perform:
INCAR
KPOINTS
POSCAR_template
POTCAR
Examples of all but the POTCAR file are provided. As explained below,
POSCAR_W is an input file for a 2-atom unit cell of tungsten and can
be used to test the LAMMPS/VASP coupling. The POTCAR file is a
proprietary VASP file, so use one from your VASP installation.
Note that the POSCAR_template file should be matched to the LAMMPS
input script (# of atoms and atom types, box size, etc). The provided
POSCAR_W matches in.client.W.
Once you run VASP yourself, the vasprun.xml file will be overwritten.
----------------
To run in client/server mode:
NOTE: The vasp_wrap.py script must be run with Python version 2, not
3. This is because it used the CSlib python wrapper, which only
supports version 2. We plan to upgrade CSlib to support Python 3.
Both the client (LAMMPS) and server (vasp_wrap.py) must use the same
messaging mode, namely file or zmq. This is an argument to the
vasp_wrap.py code; it can be selected by setting the "mode" variable
when you run LAMMPS. The default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The vasp_wrap.py code is always run in serial, but it
launches VASP from Python via an mpirun command which can run VASP
itself in parallel.
When you run, the server should print out thermodynamic info every
timestep which corresponds to the forces and virial computed by VASP.
VASP will also generate output files each timestep. The vasp_wrapper.py
script could be generalized to archive these.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 lmp_mpi -v mode file -in in.client.W
% python vasp_wrap.py file POSCAR_W
% mpirun -np 2 lmp_mpi -v mode file -in in.client.W
% python vasp_wrap.py file POSCAR_W
ZMQ mode of messaging:
% mpirun -np 1 lmp_mpi -v mode zmq -in in.client.W
% python vasp_wrap.py zmq POSCAR_W
% mpirun -np 2 lmp_mpi -v mode zmq -in in.client.W
% python vasp_wrap.py zmq POSCAR_W

View File

@ -1,15 +0,0 @@
LAMMPS W data file
2 atoms
1 atom types
0.0 3.16 xlo xhi
0.0 3.16 ylo yhi
0.0 3.16 zlo zhi
Atoms
1 1 0.000 0.000 0.000
2 1 1.58 1.58 1.58

View File

@ -1,35 +0,0 @@
# small W unit cell for use with VASP
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
mass 1 183.85
replicate $x $y $z
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
message quit

View File

@ -1,76 +0,0 @@
LAMMPS (22 Aug 2018)
# small W unit cell for use with VASP
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md zmq localhost:5555
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 2 MPI processor grid
reading atoms ...
2 atoms
mass 1 183.85
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 2 MPI processor grid
2 atoms
Time spent = 0.000148058 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
Per MPI rank memory allocation (min/avg/max) = 1.8 | 1.8 | 1.8 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -48.030793 -78159.503
1 298.24318 0 0 -48.03102 -78167.19
2 296.85584 0 0 -48.031199 -78173.26
3 295.83795 0 0 -48.031331 -78177.714
Loop time of 0.457491 on 2 procs for 3 steps with 2 atoms
Performance: 0.567 ns/day, 42.360 hours/ns, 6.558 timesteps/s
50.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 1.3828e-05 | 2.9922e-05 | 4.6015e-05 | 0.0 | 0.01
Output | 7.5817e-05 | 9.3937e-05 | 0.00011206 | 0.0 | 0.02
Modify | 0.45735 | 0.45736 | 0.45736 | 0.0 | 99.97
Other | | 1.204e-05 | | | 0.00
Nlocal: 1 ave 1 max 1 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 4 ave 4 max 4 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:01:21

View File

@ -1,300 +0,0 @@
#!/usr/bin/env python
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# https://www.lammps.org/ Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
# ----------------------------------------------------------------------
# Syntax: vasp_wrap.py file/zmq POSCARfile
# wrapper on VASP to act as server program using CSlib
# receives message with list of coords from client
# creates VASP inputs
# invokes VASP to calculate self-consistent energy of that config
# reads VASP outputs
# sends message with energy, forces, pressure to client
# NOTES:
# check to insure basic VASP input files are in place?
# could archive VASP input/output in special filenames or dirs?
# need to check that POTCAR file is consistent with atom ordering?
# could make syntax for launching VASP more flexible
# e.g. command-line arg for # of procs
# detect if VASP had an error and return ERROR field, e.g. non-convergence ??
from __future__ import print_function
import sys
version = sys.version_info[0]
if version == 3:
sys.exit("The CSlib python wrapper does not yet support python 3")
import subprocess
import xml.etree.ElementTree as ET
from cslib import CSlib
# comment out 2nd line once 1st line is correct for your system
vaspcmd = "srun -N 1 --ntasks-per-node=4 " + \
"-n 4 /projects/vasp/2017-build/cts1/vasp5.4.4/vasp_tfermi/bin/vasp_std"
vaspcmd = "touch tmp"
# enums matching FixClientMD class in LAMMPS
SETUP,STEP = range(1,2+1)
DIM,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE = range(1,10+1)
FORCES,ENERGY,VIRIAL,ERROR = range(1,4+1)
# -------------------------------------
# functions
# error message and exit
def error(txt):
print("ERROR:",txt)
sys.exit(1)
# -------------------------------------
# read initial VASP POSCAR file to setup problem
# return natoms,ntypes,box
def vasp_setup(poscar):
ps = open(poscar,'r').readlines()
# box size
words = ps[2].split()
xbox = float(words[0])
words = ps[3].split()
ybox = float(words[1])
words = ps[4].split()
zbox = float(words[2])
box = [xbox,ybox,zbox]
ntypes = 0
natoms = 0
words = ps[6].split()
for word in words:
if word == '#': break
ntypes += 1
natoms += int(word)
return natoms,ntypes,box
# -------------------------------------
# write a new POSCAR file for VASP
def poscar_write(poscar,natoms,ntypes,types,coords,box):
psold = open(poscar,'r').readlines()
psnew = open("POSCAR",'w')
# header, including box size
psnew.write(psold[0])
psnew.write(psold[1])
psnew.write("%g %g %g\n" % (box[0],box[1],box[2]))
psnew.write("%g %g %g\n" % (box[3],box[4],box[5]))
psnew.write("%g %g %g\n" % (box[6],box[7],box[8]))
psnew.write(psold[5])
psnew.write(psold[6])
# per-atom coords
# grouped by types
psnew.write("Cartesian\n")
for itype in range(1,ntypes+1):
for i in range(natoms):
if types[i] != itype: continue
x = coords[3*i+0]
y = coords[3*i+1]
z = coords[3*i+2]
aline = " %g %g %g\n" % (x,y,z)
psnew.write(aline)
psnew.close()
# -------------------------------------
# read a VASP output vasprun.xml file
# uses ElementTree module
# see https://docs.python.org/2/library/xml.etree.elementtree.html
def vasprun_read():
tree = ET.parse('vasprun.xml')
root = tree.getroot()
#fp = open("vasprun.xml","r")
#root = ET.parse(fp)
scsteps = root.findall('calculation/scstep')
energy = scsteps[-1].find('energy')
for child in energy:
if child.attrib["name"] == "e_0_energy":
eout = float(child.text)
fout = []
sout = []
varrays = root.findall('calculation/varray')
for varray in varrays:
if varray.attrib["name"] == "forces":
forces = varray.findall("v")
for line in forces:
fxyz = line.text.split()
fxyz = [float(value) for value in fxyz]
fout += fxyz
if varray.attrib["name"] == "stress":
tensor = varray.findall("v")
stensor = []
for line in tensor:
sxyz = line.text.split()
sxyz = [float(value) for value in sxyz]
stensor.append(sxyz)
sxx = stensor[0][0]
syy = stensor[1][1]
szz = stensor[2][2]
# symmetrize off-diagonal components
sxy = 0.5 * (stensor[0][1] + stensor[1][0])
sxz = 0.5 * (stensor[0][2] + stensor[2][0])
syz = 0.5 * (stensor[1][2] + stensor[2][1])
sout = [sxx,syy,szz,sxy,sxz,syz]
#fp.close()
return eout,fout,sout
# -------------------------------------
# main program
# command-line args
if len(sys.argv) != 3:
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
sys.exit(1)
mode = sys.argv[1]
poscar_template = sys.argv[2]
if mode == "file": cs = CSlib(1,mode,"tmp.couple",None)
elif mode == "zmq": cs = CSlib(1,mode,"*:5555",None)
else:
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
sys.exit(1)
natoms,ntypes,box = vasp_setup(poscar_template)
# initial message for MD protocol
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID != 0: error("Bad initial client/server handshake")
protocol = cs.unpack_string(1)
if protocol != "md": error("Mismatch in client/server protocol")
cs.send(0,0)
# endless server loop
while 1:
# recv message from client
# msgID = 0 = all-done message
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID < 0: break
# SETUP receive at beginning of each run
# required fields: DIM, PERIODICTY, ORIGIN, BOX,
# NATOMS, NTYPES, TYPES, COORDS
# optional fields: others in enum above, but VASP ignores them
if msgID == SETUP:
origin = []
box = []
natoms_recv = ntypes_recv = 0
types = []
coords = []
for field in fieldID:
if field == DIM:
dim = cs.unpack_int(DIM)
if dim != 3: error("VASP only performs 3d simulations")
elif field == PERIODICITY:
periodicity = cs.unpack(PERIODICITY,1)
if not periodicity[0] or not periodicity[1] or not periodicity[2]:
error("VASP wrapper only currently supports fully periodic systems")
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box = cs.unpack(BOX,1)
elif field == NATOMS:
natoms_recv = cs.unpack_int(NATOMS)
if natoms != natoms_recv:
error("VASP wrapper mis-match in number of atoms")
elif field == NTYPES:
ntypes_recv = cs.unpack_int(NTYPES)
if ntypes != ntypes_recv:
error("VASP wrapper mis-match in number of atom types")
elif field == TYPES:
types = cs.unpack(TYPES,1)
elif field == COORDS:
coords = cs.unpack(COORDS,1)
if not origin or not box or not natoms or not ntypes or \
not types or not coords:
error("Required VASP wrapper setup field not received");
# STEP receive at each timestep of run or minimization
# required fields: COORDS
# optional fields: ORIGIN, BOX
elif msgID == STEP:
coords = []
for field in fieldID:
if field == COORDS:
coords = cs.unpack(COORDS,1)
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box = cs.unpack(BOX,1)
if not coords: error("Required VASP wrapper step field not received");
else: error("VASP wrapper received unrecognized message")
# create POSCAR file
poscar_write(poscar_template,natoms,ntypes,types,coords,box)
# invoke VASP
print("\nLaunching VASP ...")
print(vaspcmd)
subprocess.check_output(vaspcmd,stderr=subprocess.STDOUT,shell=True)
# process VASP output
energy,forces,virial = vasprun_read()
# convert VASP kilobars to bars
for i,value in enumerate(virial): virial[i] *= 1000.0
# return forces, energy, pressure to client
cs.send(msgID,3);
cs.pack(FORCES,4,3*natoms,forces)
cs.pack_double(ENERGY,energy)
cs.pack(VIRIAL,4,6,virial)
# final reply to client
cs.send(0,0)
# clean-up
del cs

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,16 @@
Examples for Extended Dissipative Particle Dynamics (DPD)
---------------------------------------------------------
This directory contains examples for extended DPD simulations
Examples for Basic Dissipative Particle Dynamics (DPD)
------------------------------------------------------
This directory contains examples for DPD simulations using
pair styles from the DPD-BASIC package.
1) 'dpdext' - test case (DPD fluid) for 'dpdext' pair style (in.dpdext) and an initial
configuration (dpdext.data)
1) 'dpd' - simple example (DPD fluid) for 'dpd' pair style (in.dpd)
2) 'dpdext_tstat' - test case (coarse-grained SPC/E water) for 'dpdext/tstat' pair style
(in.cg_spce), an initial configuration (dpdext.data) and tabulated potential
2) 'dpd_tstat' - coarse-grained SPC/E water example for 'dpd/tstat' pair style
(in.dpd_tstat), an initial configuration (dpdext.data) and tabulated potential
(cg_spce_table.pot) obtained by bottom-up coarse-graining of the atomistic SPC/E water.
3) 'dpdext' - simple example (DPD fluid) for 'dpd/ext' pair style (in.dpdext)
4) 'dpdext_tstat' - coarse-grained SPC/E water example for 'dpd/ext/tstat' pair style
(in.dpdext_tstat), an initial configuration (dpdext.data) and tabulated potential
(cg_spce_table.pot) obtained by bottom-up coarse-graining of the atomistic SPC/E water.

View File

@ -0,0 +1,43 @@
# DPD Fluid
variable T equal 1.0
variable rc equal 1.0
variable rcD equal 1.2
units lj
boundary p p p
atom_style atomic
dimension 3
newton on
comm_modify vel yes
### create box and configuration
variable L equal 5.0
lattice fcc 3.0
region simBox block 0 ${L} 0 ${L} 0 ${L}
create_box 2 simBox
#create_atoms 1 region simBox
create_atoms 1 random 100 12456 simBox
create_atoms 2 random 100 13245 simBox
mass 1 1.0
mass 2 2.0
###
pair_style dpd ${T} ${rc} 3854262
pair_coeff 1 1 25.0 4.5 1.2
pair_coeff 1 2 25.1 4.51 1.21
pair_coeff 2 2 25.2 4.52 1.22
timestep 0.01
run_style verlet
velocity all create ${T} 68768932
thermo_style custom step time temp press
thermo 100
fix 1 all nve
run 5000

View File

@ -0,0 +1,154 @@
LAMMPS (24 Mar 2022)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# DPD Fluid
variable T equal 1.0
variable rc equal 1.0
variable rcD equal 1.2
units lj
boundary p p p
atom_style atomic
dimension 3
newton on
comm_modify vel yes
### create box and configuration
variable L equal 5.0
lattice fcc 3.0
Lattice spacing in x,y,z = 1.1006424 1.1006424 1.1006424
region simBox block 0 ${L} 0 ${L} 0 ${L}
region simBox block 0 5 0 ${L} 0 ${L}
region simBox block 0 5 0 5 0 ${L}
region simBox block 0 5 0 5 0 5
create_box 2 simBox
Created orthogonal box = (0 0 0) to (5.5032121 5.5032121 5.5032121)
1 by 1 by 1 MPI processor grid
#create_atoms 1 region simBox
create_atoms 1 random 100 12456 simBox
Created 100 atoms
using lattice units in orthogonal box = (0 0 0) to (5.5032121 5.5032121 5.5032121)
create_atoms CPU = 0.000 seconds
create_atoms 2 random 100 13245 simBox
Created 100 atoms
using lattice units in orthogonal box = (0 0 0) to (5.5032121 5.5032121 5.5032121)
create_atoms CPU = 0.000 seconds
mass 1 1.0
mass 2 2.0
###
pair_style dpd ${T} ${rc} 3854262
pair_style dpd 1 ${rc} 3854262
pair_style dpd 1 1 3854262
pair_coeff 1 1 25.0 4.5 1.2
pair_coeff 1 2 25.1 4.51 1.21
pair_coeff 2 2 25.2 4.52 1.22
timestep 0.01
run_style verlet
velocity all create ${T} 68768932
velocity all create 1 68768932
thermo_style custom step time temp press
thermo 100
fix 1 all nve
run 5000
generated 0 of 1 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.52
ghost atom cutoff = 1.52
binsize = 0.76, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair dpd, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.083 | 3.083 | 3.083 Mbytes
Step Time Temp Press
0 0 1 9.5226009
100 1 1.9913643 9.2036029
200 2 1.6321732 9.2787957
300 3 1.3533438 8.3081433
400 4 1.2125884 8.0809065
500 5 1.0682216 8.0877925
600 6 0.99100473 8.1100319
700 7 0.99731243 7.8225195
800 8 1.0597693 7.8368218
900 9 0.99038588 7.9450569
1000 10 1.077129 7.5857015
1100 11 0.99070336 7.5138128
1200 12 1.013894 7.2794857
1300 13 1.0433203 7.7439871
1400 14 1.0285528 7.5662235
1500 15 0.99180601 7.8376313
1600 16 0.98059071 8.0243735
1700 17 1.0070947 8.3186893
1800 18 0.99507569 7.0786393
1900 19 1.0040168 7.8120389
2000 20 0.98636164 7.472185
2100 21 0.95811165 7.7085985
2200 22 0.93568327 6.9424246
2300 23 0.92804144 8.1239435
2400 24 0.94940276 7.6108611
2500 25 1.0535153 8.0772721
2600 26 1.0902144 7.5609768
2700 27 1.0737336 7.8706755
2800 28 0.93074581 7.3699993
2900 29 1.0440705 7.6454988
3000 30 0.93868164 7.841168
3100 31 1.0172025 7.6856163
3200 32 1.0405368 7.5325735
3300 33 0.96721201 7.8262809
3400 34 0.90430758 7.1693921
3500 35 0.89938433 7.865845
3600 36 0.9907178 7.3462971
3700 37 1.0311879 7.8876401
3800 38 0.98339132 7.3413929
3900 39 1.2111264 8.0968408
4000 40 1.062489 7.7315959
4100 41 0.94737492 7.3386028
4200 42 1.0453816 8.2017304
4300 43 0.97024897 7.7379624
4400 44 0.9553861 7.8047635
4500 45 1.043252 7.7486215
4600 46 0.98611474 8.1237053
4700 47 0.98624285 8.5801642
4800 48 0.97176754 7.1540299
4900 49 1.0165401 7.3853841
5000 50 0.88359115 7.5541592
Loop time of 0.359916 on 1 procs for 5000 steps with 200 atoms
Performance: 12002788.048 tau/day, 13892.116 timesteps/s
99.5% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.24932 | 0.24932 | 0.24932 | 0.0 | 69.27
Neigh | 0.068726 | 0.068726 | 0.068726 | 0.0 | 19.10
Comm | 0.028691 | 0.028691 | 0.028691 | 0.0 | 7.97
Output | 0.00066318 | 0.00066318 | 0.00066318 | 0.0 | 0.18
Modify | 0.0078062 | 0.0078062 | 0.0078062 | 0.0 | 2.17
Other | | 0.004713 | | | 1.31
Nlocal: 200 ave 200 max 200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 546 ave 546 max 546 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1649 ave 1649 max 1649 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 1649
Ave neighs/atom = 8.245
Neighbor list builds = 500
Dangerous builds = 500
Total wall time: 0:00:00

Some files were not shown because too many files have changed in this diff Show More