Merge pull request #3207 from lammps/remove-message-package

remove MESSAGE package
This commit is contained in:
Axel Kohlmeyer
2022-04-09 03:20:26 -04:00
committed by GitHub
155 changed files with 45 additions and 29167 deletions

View File

@ -26,6 +26,7 @@ jobs:
shell: bash
run: |
python3 -m pip install numpy
python3 -m pip install pyyaml
cmake -C cmake/presets/windows.cmake \
-D PKG_PYTHON=on \
-S cmake -B build \

View File

@ -37,6 +37,7 @@ jobs:
working-directory: build
run: |
ccache -z
python3 -m pip install pyyaml
cmake -C ../cmake/presets/clang.cmake \
-C ../cmake/presets/most.cmake \
-D CMAKE_CXX_COMPILER_LAUNCHER=ccache \

View File

@ -208,7 +208,6 @@ set(STANDARD_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP
@ -451,7 +450,7 @@ else()
endif()
foreach(PKG_WITH_INCL KSPACE PYTHON ML-IAP VORONOI COLVARS ML-HDNNP MDI MOLFILE NETCDF
PLUMED QMMM ML-QUIP SCAFACOS MACHDYN VTK KIM LATTE MESSAGE MSCG COMPRESS ML-PACE)
PLUMED QMMM ML-QUIP SCAFACOS MACHDYN VTK KIM LATTE MSCG COMPRESS ML-PACE)
if(PKG_${PKG_WITH_INCL})
include(Packages/${PKG_WITH_INCL})
endif()

View File

@ -1,31 +0,0 @@
if(LAMMPS_SIZES STREQUAL "BIGBIG")
message(FATAL_ERROR "The MESSAGE Package is not compatible with -DLAMMPS_BIGBIG")
endif()
option(MESSAGE_ZMQ "Use ZeroMQ in MESSAGE package" OFF)
file(GLOB_RECURSE cslib_SOURCES
${LAMMPS_LIB_SOURCE_DIR}/message/cslib/[^.]*.cpp)
add_library(cslib STATIC ${cslib_SOURCES})
target_compile_definitions(cslib PRIVATE -DLAMMPS_${LAMMPS_SIZES})
set_target_properties(cslib PROPERTIES OUTPUT_NAME lammps_cslib${LAMMPS_MACHINE})
if(BUILD_MPI)
target_compile_definitions(cslib PRIVATE -DMPI_YES)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csmpi")
target_link_libraries(cslib PRIVATE MPI::MPI_CXX)
else()
target_compile_definitions(cslib PRIVATE -DMPI_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_MPI)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csnompi")
endif()
if(MESSAGE_ZMQ)
target_compile_definitions(cslib PRIVATE -DZMQ_YES)
find_package(ZMQ REQUIRED)
target_link_libraries(cslib PUBLIC ZMQ::ZMQ)
else()
target_compile_definitions(cslib PRIVATE -DZMQ_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_ZMQ)
endif()
target_link_libraries(lammps PRIVATE cslib)
target_include_directories(lammps PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src)

View File

@ -48,7 +48,6 @@ set(ALL_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP

View File

@ -50,7 +50,6 @@ set(ALL_PACKAGES
MDI
MEAM
MESONT
MESSAGE
MGPT
MISC
ML-HDNNP

View File

@ -15,7 +15,6 @@ set(PACKAGES_WITH_LIB
MACHDYN
MDI
MESONT
MESSAGE
ML-HDNNP
ML-PACE
ML-QUIP

View File

@ -45,7 +45,6 @@ This is the list of packages that may require additional steps.
* :ref:`MACHDYN <machdyn>`
* :ref:`MDI <mdi>`
* :ref:`MESONT <mesont>`
* :ref:`MESSAGE <message>`
* :ref:`ML-HDNNP <ml-hdnnp>`
* :ref:`ML-IAP <mliap>`
* :ref:`ML-PACE <ml-pace>`
@ -797,47 +796,6 @@ library.
----------
.. _message:
MESSAGE package
-----------------------------
This package can optionally include support for messaging via sockets,
using the open-source `ZeroMQ library <http://zeromq.org>`_, which must
be installed on your system.
.. tabs::
.. tab:: CMake build
.. code-block:: bash
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
-D ZMQ_LIBRARY=path # ZMQ library file (only needed if a custom location)
-D ZMQ_INCLUDE_DIR=path # ZMQ include directory (only needed if a custom location)
.. tab:: Traditional make
Before building LAMMPS, you must build the CSlib library in
``lib/message``\ . You can build the CSlib library manually if
you prefer; follow the instructions in ``lib/message/README``\ .
You can also do it in one step from the ``lammps/src`` dir, using
a command like these, which simply invoke the
``lib/message/Install.py`` script with the specified args:
.. code-block:: bash
$ make lib-message # print help message
$ make lib-message args="-m -z" # build with MPI and socket (ZMQ) support
$ make lib-message args="-s" # build as serial lib with no ZMQ support
The build should produce two files: ``lib/message/cslib/src/libmessage.a``
and ``lib/message/Makefile.lammps``. The latter is copied from an
existing ``Makefile.lammps.*`` and has settings to link with the ZeroMQ
library if requested in the build.
----------
.. _mliap:
ML-IAP package

View File

@ -45,7 +45,6 @@ packages:
* :ref:`KOKKOS <kokkos>`
* :ref:`LATTE <latte>`
* :ref:`MACHDYN <machdyn>`
* :ref:`MESSAGE <message>`
* :ref:`ML-HDNNP <ml-hdnnp>`
* :ref:`ML-PACE <ml-pace>`
* :ref:`ML-QUIP <ml-quip>`

View File

@ -68,7 +68,6 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`log <log>`
* :doc:`mass <mass>`
* :doc:`mdi <mdi>`
* :doc:`message <message>`
* :doc:`minimize <minimize>`
* :doc:`min_modify <min_modify>`
* :doc:`min_style <min_style>`
@ -105,7 +104,6 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`restart <restart>`
* :doc:`run <run>`
* :doc:`run_style <run_style>`
* :doc:`server <server>`
* :doc:`set <set>`
* :doc:`shell <shell>`
* :doc:`special_bonds <special_bonds>`

View File

@ -51,7 +51,6 @@ OPT.
* :doc:`bond/swap <fix_bond_swap>`
* :doc:`box/relax <fix_box_relax>`
* :doc:`charge/regulation <fix_charge_regulation>`
* :doc:`client/md <fix_client_md>`
* :doc:`cmap <fix_cmap>`
* :doc:`colvars <fix_colvars>`
* :doc:`controller <fix_controller>`

View File

@ -100,8 +100,6 @@ Lowercase directories
+-------------+------------------------------------------------------------------+
| melt | rapid melt of 3d LJ system |
+-------------+------------------------------------------------------------------+
| message | demos for LAMMPS client/server coupling with the MESSAGE package |
+-------------+------------------------------------------------------------------+
| micelle | self-assembly of small lipid-like molecules into 2d bilayers |
+-------------+------------------------------------------------------------------+
| min | energy minimization of 2d LJ melt |

View File

@ -22,7 +22,6 @@ General howto
Howto_replica
Howto_library
Howto_couple
Howto_client_server
Howto_mdi
Settings howto

View File

@ -1,163 +0,0 @@
Using LAMMPS in client/server mode
==================================
Client/server coupling of two codes is where one code is the "client"
and sends request messages to a "server" code. The server responds to
each request with a reply message. This enables the two codes to work
in tandem to perform a simulation. LAMMPS can act as either a client
or server code.
Some advantages of client/server coupling are that the two codes run
as stand-alone executables; they are not linked together. Thus
neither code needs to have a library interface. This often makes it
easier to run the two codes on different numbers of processors. If a
message protocol (format and content) is defined for a particular kind
of simulation, then in principle any code that implements the
client-side protocol can be used in tandem with any code that
implements the server-side protocol, without the two codes needing to
know anything more specific about each other.
A simple example of client/server coupling is where LAMMPS is the
client code performing MD timestepping. Each timestep it sends a
message to a server quantum code containing current coords of all the
atoms. The quantum code computes energy and forces based on the
coords. It returns them as a message to LAMMPS, which completes the
timestep.
A more complex example is where LAMMPS is the client code and
processes a series of data files, sending each configuration to a
quantum code to compute energy and forces. Or LAMMPS runs dynamics
with an atomistic force field, but pauses every N steps to ask the
quantum code to compute energy and forces.
Alternate methods for code coupling with LAMMPS are described on
the :doc:`Howto couple <Howto_couple>` doc page.
The protocol for using LAMMPS as a client is to use these 3 commands
in this order (other commands may come in between):
* :doc:`message client <message>` # initiate client/server interaction
* :doc:`fix client/md <fix_client_md>` # any client fix which makes specific requests to the server
* :doc:`message quit <message>` # terminate client/server interaction
In between the two message commands, a client fix command and
:doc:`unfix <unfix>` command can be used multiple times. Similarly,
this sequence of 3 commands can be repeated multiple times, assuming
the server program operates in a similar fashion, to initiate and
terminate client/server communication.
The protocol for using LAMMPS as a server is to use these 2 commands
in this order (other commands may come in between):
* :doc:`message server <message>` # initiate client/server interaction
* :doc:`server md <server_md>` # any server command which responds to specific requests from the client
This sequence of 2 commands can be repeated multiple times, assuming
the client program operates in a similar fashion, to initiate and
terminate client/server communication.
LAMMPS support for client/server coupling is in its :ref:`MESSAGE package <PKG-MESSAGE>` which implements several
commands that enable LAMMPS to act as a client or server, as discussed
below. The MESSAGE package also wraps a client/server library called
CSlib which enables two codes to exchange messages in different ways,
either via files, sockets, or MPI. The CSlib is provided with LAMMPS
in the lib/message dir. The CSlib has its own
`website <https://cslib.sandia.gov>`_ with documentation and test
programs.
.. note::
For client/server coupling to work between LAMMPS and another
code, the other code also has to use the CSlib. This can sometimes be
done without any modifications to the other code by simply wrapping it
with a Python script that exchanges CSlib messages with LAMMPS and
prepares input for or processes output from the other code. The other
code also has to implement a matching protocol for the format and
content of messages that LAMMPS exchanges with it.
These are the commands currently in the MESSAGE package for two
protocols, MD and MC (Monte Carlo). New protocols can easily be
defined and added to this directory, where LAMMPS acts as either the
client or server.
* :doc:`message <message>`
* :doc:`fix client md <fix_client_md>` = LAMMPS is a client for running MD
* :doc:`server md <server_md>` = LAMMPS is a server for computing MD forces
* :doc:`server mc <server_mc>` = LAMMPS is a server for computing a Monte Carlo energy
The server doc files give details of the message protocols
for data that is exchanged between the client and server.
These example directories illustrate how to use LAMMPS as either a
client or server code:
* examples/message
* examples/COUPLE/README
* examples/COUPLE/lammps_mc
* examples/COUPLE/lammps_nwchem
* examples/COUPLE/lammps_vasp
The examples/message directory couples a client instance of LAMMPS to a
server instance of LAMMPS.
The files in the *lammps_mc* folder show how to couple LAMMPS as
a server to a simple Monte Carlo client code as the driver.
The files in the *lammps_nwchem* folder show how to couple LAMMPS
as a client code running MD timestepping to NWChem acting as a
server providing quantum DFT forces, through a Python wrapper script
on NWChem.
The files in the *lammps_vasp* folder show how to couple LAMMPS as
a client code running MD timestepping to VASP acting as a server
providing quantum DFT forces, through a Python wrapper script on VASP.
Here is how to launch a client and server code together for any of the
4 modes of message exchange that the :doc:`message <message>` command
and the CSlib support. Here LAMMPS is used as both the client and
server code. Another code could be substituted for either.
The examples below show launching both codes from the same window (or
batch script), using the "&" character to launch the first code in the
background. For all modes except *mpi/one*, you could also launch the
codes in separate windows on your desktop machine. It does not
matter whether you launch the client or server first.
In these examples either code can be run on one or more processors.
If running in a non-MPI mode (file or zmq) you can launch a code on a
single processor without using mpirun.
IMPORTANT: If you run in mpi/two mode, you must launch both codes via
mpirun, even if one or both of them runs on a single processor. This
is so that MPI can figure out how to connect both MPI processes
together to exchange MPI messages between them.
For message exchange in *file*, *zmq*, or *mpi/two* modes:
.. code-block:: bash
% mpirun -np 1 lmp_mpi -log log.client < in.client &
% mpirun -np 2 lmp_mpi -log log.server < in.server
% mpirun -np 4 lmp_mpi -log log.client < in.client &
% mpirun -np 1 lmp_mpi -log log.server < in.server
% mpirun -np 2 lmp_mpi -log log.client < in.client &
% mpirun -np 4 lmp_mpi -log log.server < in.server
For message exchange in *mpi/one* mode:
Launch both codes in a single mpirun command:
.. code-block:: bash
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.message.client -log log.client : -np 4 lmp_mpi -mpicolor 1 -in in.message.server -log log.server
The two -np values determine how many procs the client and the server
run on.
A LAMMPS executable run in this manner must use the -mpicolor color
command-line option as their its option, where color is an integer
label that will be used to distinguish one executable from another in
the multiple executables that the mpirun command launches. In this
example the client was colored with a 0, and the server with a 1.

View File

@ -5,7 +5,7 @@ LAMMPS can be downloaded, built, and configured for OS X on a Mac with
`Homebrew <homebrew_>`_. (Alternatively, see the install instructions for
:doc:`Download an executable via Conda <Install_conda>`.) The following LAMMPS
packages are unavailable at this time because of additional needs not yet met:
GPU, KOKKOS, LATTE, MSCG, MESSAGE, MPIIO POEMS VORONOI.
GPU, KOKKOS, LATTE, MSCG, MPIIO, POEMS, VORONOI.
After installing Homebrew, you can install LAMMPS on your system with
the following commands:

View File

@ -72,7 +72,6 @@ page gives those details.
* :ref:`MDI <PKG-MDI>`
* :ref:`MEAM <PKG-MEAM>`
* :ref:`MESONT <PKG-MESONT>`
* :ref:`MESSAGE <PKG-MESSAGE>`
* :ref:`MGPT <PKG-MGPT>`
* :ref:`MISC <PKG-MISC>`
* :ref:`ML-HDNNP <PKG-ML-HDNNP>`
@ -1405,7 +1404,8 @@ workflows via the `MolSSI Driver Interface
**Install:**
This package has :ref:`specific installation instructions <message>` on the :doc:`Build extras <Build_extras>` page.
This package has :ref:`specific installation instructions <mdi>` on
the :doc:`Build extras <Build_extras>` page.
**Supporting info:**
@ -1490,32 +1490,6 @@ Philipp Kloza (U Cambridge)
----------
.. _PKG-MESSAGE:
MESSAGE package
---------------
**Contents:**
Commands to use LAMMPS as either a client or server and couple it to
another application.
**Install:**
This package has :ref:`specific installation instructions <message>` on the :doc:`Build extras <Build_extras>` page.
**Supporting info:**
* src/MESSAGE: filenames -> commands
* lib/message/README
* :doc:`message <message>`
* :doc:`fix client/md <fix_client_md>`
* :doc:`server md <server_md>`
* :doc:`server mc <server_mc>`
* examples/message
----------
.. _PKG-MGPT:
MGPT package

View File

@ -258,11 +258,6 @@ whether an extra library is needed to build and use the package:
- pair styles :doc:`mesont/tpm <pair_mesont_tpm>`, :doc:`mesocnt <pair_mesocnt>`
- PACKAGES/mesont
- int
* - :ref:`MESSAGE <PKG-MESSAGE>`
- client/server messaging
- :doc:`message <message>`
- message
- int
* - :ref:`MGPT <PKG-MGPT>`
- fast MGPT multi-ion potentials
- :doc:`pair_style mgpt <pair_mgpt>`

View File

@ -226,15 +226,6 @@ other executable(s) perform an MPI_Comm_split() with their own colors
to shrink the MPI_COMM_WORLD communication to be the subset of
processors they are actually running on.
Currently, this is only used in LAMMPS to perform client/server
messaging with another application. LAMMPS can act as either a client
or server (or both). More details are given on the :doc:`Howto client/server <Howto_client_server>` doc page.
Specifically, this refers to the "mpi/one" mode of messaging provided
by the :doc:`message <message>` command and the CSlib library LAMMPS
links with from the lib/message directory. See the
:doc:`message <message>` command for more details.
----------
.. _cite:

View File

@ -60,7 +60,6 @@ Commands
log
mass
mdi
message
min_modify
min_spin
min_style
@ -96,9 +95,6 @@ Commands
restart
run
run_style
server
server_mc
server_md
set
shell
special_bonds

View File

@ -194,7 +194,6 @@ accelerated styles exist.
* :doc:`bond/swap <fix_bond_swap>` - Monte Carlo bond swapping
* :doc:`box/relax <fix_box_relax>` - relax box size during energy minimization
* :doc:`charge/regulation <fix_charge_regulation>` - Monte Carlo sampling of charge regulation
* :doc:`client/md <fix_client_md>` - MD client for client/server simulations
* :doc:`cmap <fix_cmap>` - enables CMAP cross-terms of the CHARMM force field
* :doc:`colvars <fix_colvars>` - interface to the collective variables "Colvars" library
* :doc:`controller <fix_controller>` - apply control loop feedback mechanism

View File

@ -1,118 +0,0 @@
.. index:: fix client/md
fix client/md command
=====================
Syntax
""""""
.. parsed-literal::
fix ID group-ID client/md
* ID, group-ID are documented in :doc:`fix <fix>` command
* client/md = style name of this fix command
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all client/md
Description
"""""""""""
This fix style enables LAMMPS to run as a "client" code and
communicate each timestep with a separate "server" code to perform an
MD simulation together.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When using this fix, LAMMPS (as the client code) passes the current
coordinates of all particles to the server code each timestep, which
computes their interaction, and returns the energy, forces, and virial
for the interacting particles to LAMMPS, so it can complete the
timestep.
Note that the server code can be a quantum code, or another classical
MD code which encodes a force field (pair_style in LAMMPS lingo) which
LAMMPS does not have. In the quantum case, this fix is a mechanism
for running *ab initio* MD with quantum forces.
The group associated with this fix is ignored.
The protocol and :doc:`units <units>` for message format and content
that LAMMPS exchanges with the server code is defined on the
:doc:`server md <server_md>` doc page.
Note that when using LAMMPS as an MD client, your LAMMPS input script
should not normally contain force field commands, like a
:doc:`pair_style <pair_style>`, :doc:`bond_style <bond_style>`, or
:doc:`kspace_style <kspace_style>` command. However it is possible
for a server code to only compute a portion of the full force-field,
while LAMMPS computes the remaining part. Your LAMMPS script can also
specify boundary conditions or force constraints in the usual way,
which will be added to the per-atom forces returned by the server
code.
See the examples/message directory for example scripts where LAMMPS is both
the "client" and/or "server" code for this kind of client/server MD
simulation. The examples/message/README file explains how to launch
LAMMPS and another code in tandem to perform a coupled simulation.
----------
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files
<restart>`.
The :doc:`fix_modify <fix_modify>` *energy* option is supported by
this fix to add the potential energy set by the server application to
the global potential energy of the system as part of
:doc:`thermodynamic output <thermo_style>`. The default setting for
this fix is :doc:`fix_modify energy yes <fix_modify>`.
The :doc:`fix_modify <fix_modify>` *virial* option is supported by
this fix to add the contribution computed by the server application to
the global pressure of the system via the :doc:`compute pressure
<compute_pressure>` command. This can be accessed by
:doc:`thermodynamic output <thermo_style>`. The default setting for
this fix is :doc:`fix_modify virial yes <fix_modify>`.
This fix computes a global scalar which can be accessed by various
:doc:`output commands <Howto_output>`. The scalar is the potential
energy discussed above. The scalar value calculated by this fix is
"extensive".
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
This fix is not invoked during :doc:`energy minimization <minimize>`.
Restrictions
""""""""""""
This fix is part of the MESSAGE package. It is only enabled if LAMMPS
was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup and shut down the messaging
protocol with the server code.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`server <server>`
Default
"""""""
none

View File

@ -1,204 +0,0 @@
.. index:: message
message command
===============
Syntax
""""""
.. parsed-literal::
message which protocol mode arg
* which = *client* or *server* or *quit*
* protocol = *md* or *mc*
* mode = *file* or *zmq* or *mpi/one* or *mpi/two*
.. parsed-literal::
*file* arg = filename
filename = file used for message exchanges
*zmq* arg = socket-ID
socket-ID for client = localhost:5555, see description below
socket-ID for server = \*:5555, see description below
*mpi/one* arg = none
*mpi/two* arg = filename
filename = file used to establish communication between 2 MPI jobs
Examples
""""""""
.. code-block:: LAMMPS
message client md file tmp.couple
message server md file tmp.couple
message client md zmq localhost:5555
message server md zmq *:5555
message client md mpi/one
message server md mpi/one
message client md mpi/two tmp.couple
message server md mpi/two tmp.couple
message quit
Description
"""""""""""
Establish a messaging protocol between LAMMPS and another code for the
purpose of client/server coupling.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
----------
The *which* argument defines LAMMPS to be the client or the server.
As explained below the *quit* option should be used when LAMMPS is
finished as a client. It sends a message to the server to tell it to
shut down.
----------
The *protocol* argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
* md = run dynamics with another code
* mc = perform Monte Carlo moves with another code
For protocol *md*, LAMMPS can be either a client or server. See the
:doc:`server md <server_md>` page for details on the protocol.
For protocol *mc*, LAMMPS can be the server. See the :doc:`server mc <server_mc>` page for details on the protocol.
----------
The *mode* argument specifies how messages are exchanged between the
client and server codes. Both codes must use the same mode and use
consistent parameters.
For mode *file*, the 2 codes communicate via binary files. They must
use the same filename, which is actually a file prefix. Several files
with that prefix will be created and deleted as a simulation runs.
The filename can include a path. Both codes must be able to access
the path/file in a common filesystem.
For mode *zmq*, the 2 codes communicate via a socket on the server
code's machine. Support for socket messaging is provided by the
open-source `ZeroMQ library <http://zeromq.org>`_, which must be
installed on your system. The client specifies an IP address (IPv4
format) or the DNS name of the machine the server code is running on,
followed by a 4 or 5 digit port ID for the socket, separated by a colon.
E.g.
.. parsed-literal::
localhost:5555 # client and server running on same machine
192.168.1.1:5555 # server is 192.168.1.1
deptbox.uni.edu:5555 # server is deptbox.uni.edu
The server specifies "\*:5555" where "\*" represents all available
interfaces on the server's machine, and the port ID must match
what the client specifies.
.. note::
On Linux or Unix machines port IDs below 1024 are reserved to the
superuser and thus not available. Other ports may already be in
use and cannot be opened by a second process. On a Linux machine
the commands "netstat -t4an" or "ss -t4an" will list all locally
used port IDs for IPv4 addresses.
.. note::
On many machines (and sometimes on local networks) also ports IDs
may be blocked by default through firewalls. In that case either
access to the required port (or a desired range of ports) has to
be selectively enabled to the firewall disabled (the latter is
usually not a good idea unless you are on a (small) local network
that is already protected from outside access.
.. note::
Additional explanation is needed here about how to use the *zmq*
mode on a parallel machine, e.g. a cluster with many nodes.
For mode *mpi/one*, the 2 codes communicate via MPI and are launched
by the same mpirun command, e.g. with this syntax for OpenMPI:
.. code-block:: bash
mpirun -np 2 lmp_mpi -mpicolor 0 -in in.client -log log.client : -np 4 othercode args # LAMMPS is client
mpirun -np 2 othercode args : -np 4 lmp_mpi -mpicolor 1 -in in.server # LAMMPS is server
Note the use of the "-mpicolor color" command-line argument with
LAMMPS. See the :doc:`command-line args <Run_options>` page for
further explanation.
For mode *mpi/two*, the 2 codes communicate via MPI, but are launched
be 2 separate mpirun commands. The specified *filename* argument is a
file the 2 MPI processes will use to exchange info so that an MPI
inter-communicator can be established to enable the 2 codes to send
MPI messages to each other. Both codes must be able to access the
path/file in a common filesystem.
----------
Normally, the message client or message server command should be used
at the top of a LAMMPS input script. It performs an initial handshake
with the other code to setup messaging and to verify that both codes
are using the same message protocol and mode. Assuming both codes are
launched at (nearly) the same time, the other code should perform the
same kind of initialization.
If LAMMPS is the client code, it will begin sending messages when a
LAMMPS client command begins its operation. E.g. for the :doc:`fix client/md <fix_client_md>` command, it is when a :doc:`run <run>`
command is executed.
If LAMMPS is the server code, it will begin receiving messages when
the :doc:`server <server>` command is invoked.
If LAMMPS is being used as a client, the message quit command will
terminate its messaging with the server. If you do not use this
command and just allow LAMMPS to exit, then the server will continue
to wait for further messages. This may not be a problem, but if both
the client and server programs were launched in the same batch script,
then if the server runs indefinitely, it may consume the full allocation
of computer time, even if the calculation finishes sooner.
Note that if LAMMPS is the client or server, it will continue
processing the rest of its input script after client/server
communication terminates.
If both codes cooperate in this manner, a new round of client/server
messaging can be initiated after termination by re-using a second message
command in your LAMMPS input script, followed by a new fix client or
server command, followed by another message quit command (if LAMMPS is
the client). As an example, this can be performed in a loop to use a
quantum code as a server to compute quantum forces for multiple LAMMPS
data files or periodic snapshots while running dynamics.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`server <server>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -485,7 +485,7 @@ CPUs a value of *on* is the default since it can often be faster, just
as it is for non-accelerated pair styles
The *binsize* keyword sets the size of bins used to bin atoms during
neighbor list builds. The same value can be set by the
neighbor list builds. The same value can be set by the
:doc:`neigh_modify binsize <neigh_modify>` command. Making it an option
in the package kokkos command allows it to be set from the command line.
The default value for CPUs is 0.0, which means the LAMMPS default will be

View File

@ -1,74 +0,0 @@
.. index:: server
server command
==============
Syntax
""""""
.. parsed-literal::
server protocol
* protocol = *md* or *mc*
Examples
""""""""
.. code-block:: LAMMPS
server md
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it receives
messages from a separate "client" code and responds by sending a reply
message back to the client. The specified *protocol* determines the
format and content of messages LAMMPS expects to receive and how it
responds.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS input script will be
processed.
The *protocol* argument defines the format and content of messages
that will be exchanged between the two codes. The current options
are:
* :doc:`md <server_md>` = run dynamics with another code
* :doc:`mc <server_mc>` = perform Monte Carlo moves with another code
For protocol *md*, LAMMPS can be either a client (via the :doc:`fix client/md <fix_client_md>` command) or server. See the :doc:`server md <server_md>` page for details on the protocol.
For protocol *mc*, LAMMPS can be the server. See the :doc:`server mc <server_mc>` page for details on the protocol.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup the messaging protocol with
the other client code.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -1,126 +0,0 @@
.. index:: server mc
server mc command
=================
Syntax
""""""
.. parsed-literal::
server mc
mc = the protocol argument to the :doc:`server <server>` command
Examples
""""""""
.. code-block:: LAMMPS
server mc
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the *mc*
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The :doc:`server <server>` page gives other options for using LAMMPS
See an example of how this command is used in
examples/COUPLE/lammps_mc/in.server.
----------
When using this command, LAMMPS (as the server code) receives
instructions from a Monte Carlo (MC) driver to displace random atoms,
compute the energy before and after displacement, and run dynamics to
equilibrate the system.
The MC driver performs the random displacements on random atoms,
accepts or rejects the move in an MC sense, and orchestrates the MD
runs.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the `CSlib website <https://cslib.sandia.gov>`_ doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_mc.cpp file for details on how LAMMPS uses
these messages. See the examples/COUPLE/lammps_mc/mc.cpp file for an
example of how an MC driver code can use these messages.
Define NATOMS=1, EINIT=2, DISPLACE=3, ACCEPT=4, RUN=5.
**Client sends one of these kinds of message**\ :
.. parsed-literal::
cs->send(NATOMS,0) # msgID = 1 with no fields
cs->send(EINIT,0) # msgID = 2 with no fields
cs->send(DISPLACE,2) # msgID = 3 with 2 fields
cs->pack_int(1,ID) # 1st field = ID of atom to displace
cs->pack(2,3,xnew) # 2nd field = new xyz coords of displaced atom
cs->send(ACCEPT,1) # msgID = 4 with 1 field
cs->pack_int(1,flag) # 1st field = accept/reject flag
cs->send(RUN,1) # msgID = 5 with 1 field
cs->pack_int(1,nsteps) # 1st field = # of timesteps to run MD
**Server replies**\ :
.. parsed-literal::
cs->send(NATOMS,1) # msgID = 1 with 1 field
cs->pack_int(1,natoms) # 1st field = number of atoms
cs->send(EINIT,2) # msgID = 2 with 2 fields
cs->pack_double(1,poteng) # 1st field = potential energy of system
cs->pack(2,3\*natoms,x) # 2nd field = 3N coords of Natoms
cs->send(DISPLACE,1) # msgID = 3 with 1 field
cs->pack_double(1,poteng) # 1st field = new potential energy of system
cs->send(ACCEPT,0) # msgID = 4 with no fields
cs->send(RUN,0) # msgID = 5 with no fields
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
A script that uses this command must also use the
:doc:`message <message>` command to setup the messaging protocol with
the other client code.
Related commands
""""""""""""""""
:doc:`message <message>`
Default
"""""""
none

View File

@ -1,161 +0,0 @@
.. index:: server md
server md command
=================
Syntax
""""""
.. parsed-literal::
server md
md = the protocol argument to the :doc:`server <server>` command
Examples
""""""""
.. code-block:: LAMMPS
server md
Description
"""""""""""
This command starts LAMMPS running in "server" mode, where it will
expect messages from a separate "client" code that match the *md*
protocol for format and content explained below. For each message
LAMMPS receives it will send a message back to the client.
The :doc:`Howto client/server <Howto_client_server>` page gives an
overview of client/server coupling of LAMMPS with another code where
one code is the "client" and sends request messages to a "server"
code. The server responds to each request with a reply message. This
enables the two codes to work in tandem to perform a simulation.
When this command is invoked, LAMMPS will run in server mode in an
endless loop, waiting for messages from the client code. The client
signals when it is done sending messages to LAMMPS, at which point the
loop will exit, and the remainder of the LAMMPS script will be
processed.
The :doc:`server <server>` page gives other options for using LAMMPS
in server mode. See an example of how this command is used in
examples/message/in.message.server.
----------
When using this command, LAMMPS (as the server code) receives the
current coordinates of all particles from the client code each
timestep, computes their interaction, and returns the energy, forces,
and pressure for the interacting particles to the client code, so it
can complete the timestep. This command could also be used with a
client code that performs energy minimization, using the server to
compute forces and energy each iteration of its minimizer.
When using the :doc:`fix client/md <fix_client_md>` command, LAMMPS (as
the client code) does the timestepping and receives needed energy,
forces, and pressure values from the server code.
The format and content of the exchanged messages are explained here in
a conceptual sense. Python-style pseudo code for the library calls to
the CSlib is shown, which performs the actual message exchange between
the two codes. See the `CSlib website <https://cslib.sandia.gov>`_ doc
pages for more details on the actual library syntax. The "cs" object
in this pseudo code is a pointer to an instance of the CSlib.
See the src/MESSAGE/server_md.cpp and src/MESSAGE/fix_client_md.cpp
files for details on how LAMMPS uses these messages. See the
examples/COUPLE/lammps_vasp/vasp_wrap.py or
examples/COUPLE/lammps_nwchem/nwchem_wrap.py files for examples of how
a quantum code (VASP or NWChem) can use these messages.
The following pseudo-code uses these values, defined as enums.
Define:
.. parsed-literal::
SETUP=1, STEP=2
DIM=1, PERIODICITY=2, ORIGIN=3, BOX=4, NATOMS=5, NTYPES=6, TYPES=7, COORDS=8, UNITS-9, CHARGE=10
FORCES=1, ENERGY=2, PRESSURE=3, ERROR=4
**Client sends 2 kinds of messages**\ :
.. parsed-literal::
# required fields: DIM, PERIODICTY, ORIGIN, BOX, NATOMS, NTYPES, TYPES, COORDS
# optional fields: UNITS, CHARGE
cs->send(SETUP,nfields) # msgID with nfields
cs->pack_int(DIM,dim) # dimension (2,3) of simulation
cs->pack(PERIODICITY,3,xyz) # periodicity flags in 3 dims
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
cs->pack_int(NATOMS,natoms) # total number of atoms
cs->pack_int(NTYPES,ntypes) # number of atom types
cs->pack(TYPES,natoms,type) # vector of per-atom types
cs->pack(COORDS,3\*natoms,x) # vector of 3N atom coords
cs->pack_string(UNITS,units) # units = "lj", "real", "metal", etc
cs->pack(CHARGE,natoms,q) # vector of per-atom charge
# required fields: COORDS
# optional fields: ORIGIN, BOX
cs->send(STEP,nfields) # msgID with nfields
cs->pack(COORDS,3\*natoms,x) # vector of 3N atom coords
cs->pack(ORIGIN,3,origin) # lower-left corner of simulation box
cs->pack(BOX,9,box) # 3 edge vectors of simulation box
**Server replies to either kind of message**\ :
.. parsed-literal::
# required fields: FORCES, ENERGY, PRESSURE
# optional fields: ERROR
cs->send(msgID,nfields) # msgID with nfields
cs->pack(FORCES,3\*Natoms,f) # vector of 3N forces on atoms
cs->pack(ENERGY,1,poteng) # total potential energy of system
cs->pack(PRESSURE,6,press) # global pressure tensor (6-vector)
cs->pack_int(ERROR,flag) # server had an error (e.g. DFT non-convergence)
----------
The units for various quantities that are sent and received iva
messages are defined for atomic-scale simulations in the table below.
The client and server codes (including LAMMPS) can use internal units
different than these (e.g. :doc:`real units <units>` in LAMMPS), so long
as they convert to these units for messaging.
* COORDS, ORIGIN, BOX = Angstroms
* CHARGE = multiple of electron charge (1.0 is a proton)
* ENERGY = eV
* FORCES = eV/Angstrom
* PRESSURE = bars
Note that these are :doc:`metal units <units>` in LAMMPS.
If you wish to run LAMMPS in another its non-atomic units, e.g. :doc:`lj units <units>`, then the client and server should exchange a UNITS
message as indicated above, and both the client and server should
agree on the units for the data they exchange.
----------
Restrictions
""""""""""""
This command is part of the MESSAGE package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Related commands
""""""""""""""""
:doc:`message <message>`, :doc:`fix client/md <fix_client_md>`
Default
"""""""
none

View File

@ -5,3 +5,4 @@ sphinx_tabs
breathe
Pygments
six
pyyaml

View File

@ -9,6 +9,11 @@ model a realistic problem.
In many of the examples included here, LAMMPS must first be built as a
library.
Also see the Howto_mdi doc page in the LAMMPS manual for a description
of how LAMMPS can be coupled to other codes in a client/server fashion
using the MDI Library created by the MolSSI consortium. The MDI
package in LAMMPS has support for this style of code coupling.
See these sections of the LAMMPS manual for details:
Build LAMMPS as a library (doc/html/Build_basics.html)
@ -28,15 +33,9 @@ These are the sub-directories included in this directory:
simple simple example of driver code calling LAMMPS as a lib
multiple example of driver code calling multiple instances of LAMMPS
plugin example for loading LAMMPS at runtime from a shared library
lammps_mc client/server coupling of Monte Carlo client
with LAMMPS server for energy evaluation
lammps_nwchem client/server coupling of LAMMPS client with
NWChem quantum DFT as server for quantum forces
lammps_quest MD with quantum forces, coupling to Quest DFT code
lammps_spparks grain-growth Monte Carlo with strain via MD,
coupling to SPPARKS kinetic MC code
lammps_vasp client/server coupling of LAMMPS client with
VASP quantum DFT as server for quantum forces
library collection of useful inter-code communication routines
fortran a simple wrapper on the LAMMPS library API that
can be called from Fortran

View File

@ -1,33 +0,0 @@
# Makefile for MC
SHELL = /bin/sh
SRC = mc.cpp random_park.cpp
OBJ = $(SRC:.cpp=.o)
# change this line for your machine to path for CSlib src dir
CSLIB = /home/sjplimp/lammps/lib/message/cslib/src
# compiler/linker settings
CC = g++
CCFLAGS = -g -O3 -I$(CSLIB)
LINK = g++
LINKFLAGS = -g -O -L$(CSLIB)
# targets
mc: $(OBJ)
# first line if built the CSlib within lib/message with ZMQ support
# second line if built the CSlib without ZMQ support
$(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -lzmq -o mc
# $(LINK) $(LINKFLAGS) $(OBJ) -lcsnompi -o mc
clean:
@rm -f *.o mc
# rules
%.o:%.cpp
$(CC) $(CCFLAGS) -c $<

View File

@ -1,128 +0,0 @@
Sample Monte Carlo (MC) wrapper on LAMMPS via client/server coupling
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
In this dir, the mc.cpp/h files are a standalone "client" MC code. It
should be run on a single processor, though it could become a parallel
program at some point. LAMMPS is also run as a standalone executable
as a "server" on as many processors as desired using its "server mc"
command; see it's doc page for details.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the MC
program became parallel, data could also be exchanged via MPI.
The MC code makes simple MC moves, by displacing a single random atom
by a small random amount. It uses LAMMPS to calculate the energy
change, and to run dynamics between MC moves.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
% cd lammps/lib/message
% python Install.py -m -z # build CSlib with MPI and ZMQ support
% cd lammps/src
% make yes-message
% make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the MC client code
The source files for the MC code are in this dir. It links with the
CSlib library in lib/message/cslib.
You must first build the CSlib in serial mode, e.g.
% cd lammps/lib/message/cslib/src
% make lib # build serial and parallel lib with ZMQ support
% make lib zmq=no # build serial and parallel lib without ZMQ support
Then edit the Makefile in this dir. The CSLIB variable should be the
path to where the LAMMPS lib/message/cslib/src dir is on your system.
If you built the CSlib without ZMQ support you will also need to
comment/uncomment one line. Then you can just type
% make
and you should get an "mc" executable.
----------------
To run in client/server mode:
Both the client (MC) and server (LAMMPS) must use the same messaging
mode, namely file or zmq. This is an argument to the MC code; it can
be selected by setting the "mode" variable when you run LAMMPS. The
default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The MC code is always run in serial.
When you run, the server should print out thermodynamic info
for every MD run it performs (between MC moves). The client
will print nothing until the simulation ends, then it will
print stats about the accepted MC moves.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 1 lmp_mpi -v mode file -in in.mc.server
% mpirun -np 1 mc in.mc file tmp.couple
% mpirun -np 4 lmp_mpi -v mode file -in in.mc.server
ZMQ mode of messaging:
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 1 lmp_mpi -v mode zmq -in in.mc.server
% mpirun -np 1 mc in.mc zmq localhost:5555
% mpirun -np 4 lmp_mpi -v mode zmq -in in.mc.server
--------------
The input script for the MC program is in.mc. You can edit it to run
longer simulations.
500 nsteps = total # of steps of MD
100 ndynamics = # of MD steps between MC moves
0.1 delta = displacement size of MC move
1.0 temperature = used in MC Boltzman factor
12345 seed = random number seed
--------------
The problem size that LAMMPS is computing the MC energy for and
running dynamics on is set by the x,y,z variables in the LAMMPS
in.mc.server script. The default size is 500 particles. You can
adjust the size as follows:
lmp_mpi -v x 10 -v y 10 -v z 20 # 8000 particles

View File

@ -1,7 +0,0 @@
# MC params
500 nsteps
100 ndynamics
0.1 delta
1.0 temperature
12345 seed

View File

@ -1,36 +0,0 @@
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then &
"message server mc file tmp.couple" &
elif "${mode} == zmq" &
"message server mc zmq *:5555" &
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000649929 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
93.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 2.14577e-06 on 1 procs for 0 steps with 500 atoms
139.8% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
157.3% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:02

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000592947 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 3.8147e-06 on 4 procs for 0 steps with 500 atoms
59.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.815e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
106.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.75509e-06 on 4 procs for 0 steps with 500 atoms
113.2% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.755e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.563e-06 on 4 procs for 0 steps with 500 atoms
117.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.563e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 3.99351e-06 on 4 procs for 0 steps with 500 atoms
93.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.994e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 3.57628e-06 on 4 procs for 0 steps with 500 atoms
111.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.576e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:02

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000741005 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 1.90735e-06 on 1 procs for 0 steps with 500 atoms
52.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.907e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19501 ave 19501 max 19501 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70239211 -5.6763152 0 -4.6248342 0.59544428
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.7565768 0 -4.6240944 0.22436405
Loop time of 1.19209e-06 on 1 procs for 0 steps with 500 atoms
83.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.192e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1939 ave 1939 max 1939 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18757 ave 18757 max 18757 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18757
Ave neighs/atom = 37.514
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.7565013 -5.757431 0 -4.6249485 0.21982657
150 0.76110797 -5.7664315 0 -4.6270529 0.16005254
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7181381 0 -4.6177585 0.37629943
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18699 ave 18699 max 18699 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73505651 -5.7266069 0 -4.6262273 0.34189744
250 0.73052476 -5.7206316 0 -4.627036 0.39287516
300 0.76300831 -5.7675007 0 -4.6252773 0.16312925
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
104.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1903 ave 1903 max 1903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18715 ave 18715 max 18715 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18715
Ave neighs/atom = 37.43
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76300831 -5.768304 0 -4.6260806 0.15954325
350 0.72993309 -5.7193261 0 -4.6266162 0.3358374
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
run 0
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.7077332 0 -4.6228655 0.47669832
Loop time of 9.53674e-07 on 1 procs for 0 steps with 500 atoms
209.7% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1899 ave 1899 max 1899 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18683 ave 18683 max 18683 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18683
Ave neighs/atom = 37.366
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.658 | 2.658 | 2.658 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.72469448 -5.713463 0 -4.6285954 0.44859547
450 0.75305735 -5.7518283 0 -4.6245015 0.34658587
500 0.73092571 -5.7206337 0 -4.6264379 0.43715809
Total wall time: 0:00:00

View File

@ -1,254 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones Monte Carlo server script
variable mode index file
if "${mode} == file" then "message server mc file tmp.couple" elif "${mode} == zmq" "message server mc zmq *:5555"
message server mc zmq *:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000576019 secs
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
velocity all create 1.44 87287 loop geom
fix 1 all nve
thermo 50
server mc
run 0
Neighbor list info ...
update every 20 steps, delay 0 steps, check no
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
Loop time of 4.76837e-06 on 4 procs for 0 steps with 500 atoms
89.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 4.768e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875 ave 4875 max 4875 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Dangerous builds not checked
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
Loop time of 3.45707e-06 on 4 procs for 0 steps with 500 atoms
94.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.457e-06 | | |100.00
Nlocal: 125 ave 125 max 125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 1099 ave 1099 max 1099 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 4875.25 ave 4885 max 4866 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Total # of neighbors = 19501
Ave neighs/atom = 39.002
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7723127 0 -4.6166327 -5.015531
50 0.70210225 -5.6759068 0 -4.6248598 0.59609192
100 0.75891559 -5.7611234 0 -4.6250267 0.20841608
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
Loop time of 3.03984e-06 on 4 procs for 0 steps with 500 atoms
115.1% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.04e-06 | | |100.00
Nlocal: 125 ave 126 max 124 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 1085.25 ave 1089 max 1079 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Neighs: 4690.25 ave 4996 max 4401 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 18761
Ave neighs/atom = 37.522
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
100 0.75891559 -5.7609392 0 -4.6248426 0.20981291
150 0.75437991 -5.7558622 0 -4.6265555 0.20681722
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7143906 0 -4.6199151 0.37126023
Loop time of 2.38419e-06 on 4 procs for 0 steps with 500 atoms
125.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.384e-06 | | |100.00
Nlocal: 125 ave 126 max 123 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 1068.5 ave 1076 max 1063 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 4674.75 ave 4938 max 4419 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18699
Ave neighs/atom = 37.398
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
200 0.73111257 -5.7193748 0 -4.6248993 0.35230715
250 0.73873144 -5.7312505 0 -4.6253696 0.33061033
300 0.76392796 -5.7719207 0 -4.6283206 0.18197874
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
Loop time of 2.44379e-06 on 4 procs for 0 steps with 500 atoms
112.5% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.444e-06 | | |100.00
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1069 ave 1080 max 1055 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 4672 ave 4803 max 4600 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 18688
Ave neighs/atom = 37.376
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
300 0.76392796 -5.7725589 0 -4.6289588 0.17994628
350 0.71953041 -5.7041632 0 -4.6270261 0.44866153
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
run 0
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7158168 0 -4.6201554 0.49192039
Loop time of 2.14577e-06 on 4 procs for 0 steps with 500 atoms
139.8% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.146e-06 | | |100.00
Nlocal: 125 ave 132 max 118 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 1057.5 ave 1068 max 1049 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 4685.75 ave 5045 max 4229 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 18743
Ave neighs/atom = 37.486
Neighbor list builds = 0
Dangerous builds not checked
Per MPI rank memory allocation (min/avg/max) = 2.619 | 2.619 | 2.619 Mbytes
Step Temp E_pair E_mol TotEng Press
400 0.7319047 -5.7216051 0 -4.6259438 0.46321355
450 0.74503154 -5.7405318 0 -4.6252196 0.33211879
500 0.70570501 -5.6824439 0 -4.6260035 0.62020788
Total wall time: 0:00:00

View File

@ -1,263 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
// MC code used with LAMMPS in client/server mode
// MC is the client, LAMMPS is the server
// Syntax: mc infile mode modearg
// mode = file, zmq
// modearg = filename for file, localhost:5555 for zmq
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "mc.h"
#include "random_park.h"
#include "cslib.h"
using namespace CSLIB_NS;
void error(const char *);
CSlib *cs_create(char *, char *);
#define MAXLINE 256
/* ---------------------------------------------------------------------- */
// main program
int main(int narg, char **arg)
{
if (narg != 4) {
error("Syntax: mc infile mode modearg");
exit(1);
}
// initialize CSlib
CSlib *cs = cs_create(arg[2],arg[3]);
// create MC class and perform run
MC *mc = new MC(arg[1],cs);
mc->run();
// final MC stats
int naccept = mc->naccept;
int nattempt = mc->nattempt;
printf("------ MC stats ------\n");
printf("MC attempts = %d\n",nattempt);
printf("MC accepts = %d\n",naccept);
printf("Acceptance ratio = %g\n",1.0*naccept/nattempt);
// clean up
delete cs;
delete mc;
}
/* ---------------------------------------------------------------------- */
void error(const char *str)
{
printf("ERROR: %s\n",str);
exit(1);
}
/* ---------------------------------------------------------------------- */
CSlib *cs_create(char *mode, char *arg)
{
CSlib *cs = new CSlib(0,mode,arg,NULL);
// initial handshake to agree on protocol
cs->send(0,1);
cs->pack_string(1,(char *) "mc");
int msgID,nfield;
int *fieldID,*fieldtype,*fieldlen;
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
return cs;
}
// ----------------------------------------------------------------------
// MC class
// ----------------------------------------------------------------------
MC::MC(char *mcfile, void *cs_caller)
//MC::MC(char *mcfile, CSlib *cs_caller)
{
cs_void = cs_caller;
// setup MC params
options(mcfile);
// random # generator
random = new RanPark(seed);
}
/* ---------------------------------------------------------------------- */
MC::~MC()
{
free(x);
delete random;
}
/* ---------------------------------------------------------------------- */
void MC::run()
{
int iatom,accept,msgID,nfield;
double pe_initial,pe_final,edelta;
double dx,dy,dz;
double xold[3],xnew[3];
int *fieldID,*fieldtype,*fieldlen;
enum{NATOMS=1,EINIT,DISPLACE,ACCEPT,RUN};
CSlib *cs = (CSlib *) cs_void;
// one-time request for atom count from MD
// allocate 1d coord buffer
cs->send(NATOMS,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
natoms = cs->unpack_int(1);
x = (double *) malloc(3*natoms*sizeof(double));
// loop over MC moves
naccept = nattempt = 0;
for (int iloop = 0; iloop < nloop; iloop++) {
// request current energy from MD
// recv energy, coords from MD
cs->send(EINIT,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_initial = cs->unpack_double(1);
double *x = (double *) cs->unpack(2);
// perform simple MC event
// displace a single atom by random amount
iatom = (int) natoms*random->uniform();
xold[0] = x[3*iatom+0];
xold[1] = x[3*iatom+1];
xold[2] = x[3*iatom+2];
dx = 2.0*delta*random->uniform() - delta;
dy = 2.0*delta*random->uniform() - delta;
dz = 2.0*delta*random->uniform() - delta;
xnew[0] = xold[0] + dx;
xnew[1] = xold[1] + dx;
xnew[2] = xold[2] + dx;
// send atom ID and its new coords to MD
// recv new energy
cs->send(DISPLACE,2);
cs->pack_int(1,iatom+1);
cs->pack(2,4,3,xnew);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
pe_final = cs->unpack_double(1);
// decide whether to accept/reject MC event
if (pe_final <= pe_initial) accept = 1;
else if (temperature == 0.0) accept = 0;
else if (random->uniform() >
exp(natoms*(pe_initial-pe_final)/temperature)) accept = 0;
else accept = 1;
nattempt++;
if (accept) naccept++;
// send accept (1) or reject (0) flag to MD
cs->send(ACCEPT,1);
cs->pack_int(1,accept);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
// send dynamics timesteps
cs->send(RUN,1);
cs->pack_int(1,ndynamics);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
// send exit message to MD
cs->send(-1,0);
msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
}
/* ---------------------------------------------------------------------- */
void MC::options(char *filename)
{
// default params
nsteps = 0;
ndynamics = 100;
delta = 0.1;
temperature = 1.0;
seed = 12345;
// read and parse file
FILE *fp = fopen(filename,"r");
if (fp == NULL) error("Could not open MC file");
char line[MAXLINE];
char *keyword,*value;
char *eof = fgets(line,MAXLINE,fp);
while (eof) {
if (line[0] == '#') { // comment line
eof = fgets(line,MAXLINE,fp);
continue;
}
value = strtok(line," \t\n\r\f");
if (value == NULL) { // blank line
eof = fgets(line,MAXLINE,fp);
continue;
}
keyword = strtok(NULL," \t\n\r\f");
if (keyword == NULL) error("Missing keyword in MC file");
if (strcmp(keyword,"nsteps") == 0) nsteps = atoi(value);
else if (strcmp(keyword,"ndynamics") == 0) ndynamics = atoi(value);
else if (strcmp(keyword,"delta") == 0) delta = atof(value);
else if (strcmp(keyword,"temperature") == 0) temperature = atof(value);
else if (strcmp(keyword,"seed") == 0) seed = atoi(value);
else error("Unknown param in MC file");
eof = fgets(line,MAXLINE,fp);
}
// derived params
nloop = nsteps/ndynamics;
}

View File

@ -1,40 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/ Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
------------------------------------------------------------------------- */
#ifndef MC_H
#define MC_H
/* ---------------------------------------------------------------------- */
class MC {
public:
int naccept; // # of accepted MC events
int nattempt; // # of attempted MC events
MC(char *, void *);
~MC();
void run();
private:
int nsteps; // total # of MD steps
int ndynamics; // steps in one short dynamics run
int nloop; // nsteps/ndynamics
int natoms; // # of MD atoms
double delta; // MC displacement distance
double temperature; // MC temperature for Boltzmann criterion
double *x; // atom coords as 3N 1d vector
double energy; // global potential energy
int seed; // RNG seed
class RanPark *random;
void *cs_void; // messaging library
void options(char *);
};
#endif

View File

@ -1,72 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
// Park/Miller RNG
#include <math.h>
#include "random_park.h"
//#include "error.h"
#define IA 16807
#define IM 2147483647
#define AM (1.0/IM)
#define IQ 127773
#define IR 2836
/* ---------------------------------------------------------------------- */
RanPark::RanPark(int seed_init)
{
//if (seed_init <= 0)
// error->one(FLERR,"Invalid seed for Park random # generator");
seed = seed_init;
save = 0;
}
/* ----------------------------------------------------------------------
uniform RN
------------------------------------------------------------------------- */
double RanPark::uniform()
{
int k = seed/IQ;
seed = IA*(seed-k*IQ) - IR*k;
if (seed < 0) seed += IM;
double ans = AM*seed;
return ans;
}
/* ----------------------------------------------------------------------
gaussian RN
------------------------------------------------------------------------- */
double RanPark::gaussian()
{
double first,v1,v2,rsq,fac;
if (!save) {
do {
v1 = 2.0*uniform()-1.0;
v2 = 2.0*uniform()-1.0;
rsq = v1*v1 + v2*v2;
} while ((rsq >= 1.0) || (rsq == 0.0));
fac = sqrt(-2.0*log(rsq)/rsq);
second = v1*fac;
first = v2*fac;
save = 1;
} else {
first = second;
save = 0;
}
return first;
}

View File

@ -1,28 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/ Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef RANPARK_H
#define RANPARK_H
class RanPark {
public:
RanPark(int);
double uniform();
double gaussian();
private:
int seed,save;
double second;
};
#endif

View File

@ -1,197 +0,0 @@
Sample LAMMPS MD wrapper on NWChem via client/server coupling
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
In this dir, the nwchem_wrap.py is a wrapper on the NWChem electronic
structure code so it can work as a "server" code which LAMMPS drives
as a "client" code to perform ab initio MD. LAMMPS performs the MD
timestepping, sends NWChem a current set of coordinates each timestep,
NWChem computes forces and energy (and virial) and returns that info
to LAMMPS.
Messages are exchanged between NWChem and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the
nwchem_wrap.py program became parallel, or the CSlib library calls were
integrated into NWChem directly, then data could also be exchanged via
MPI.
There are 2 examples provided in the planeware and ao_basis
sub-directories. See details below.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
cd lammps/lib/message
python Install.py -m -z # build CSlib with MPI and ZMQ support
cd lammps/src
make yes-message
make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the CSlib in a form usable by the nwchem_wrapper.py script:
% cd lammps/lib/message/cslib/src
% make shlib # build serial and parallel shared lib with ZMQ support
% make shlib zmq=no # build serial and parallel shared lib w/out ZMQ support
This will make a shared library versions of the CSlib, which Python
requires. Python must be able to find both the cslib.py script and
the libcsnompi.so library in your lammps/lib/message/cslib/src
directory. If it is not able to do this, you will get an error when
you run nwchem_wrapper.py.
You can do this by augmenting two environment variables, either from
the command line, or in your shell start-up script. Here is the
sample syntax for the csh or tcsh shells:
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/lib/message/cslib/src
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/message/cslib/src
----------------
Prepare to use NWChem and the nwchem_wrap.py script
You can run the nwchem_wrap.py script as-is to test that the coupling
between it and LAMMPS is functional. This will use the included
nwchem_lammps.out files output by a previous NWChem run.
But note that the as-is version of nwchem_wrap.py will not attempt to
run NWChem.
To do this, you must edit the 1st nwchemcmd line at the top of
nwchem_wrapper.py to be the launch command needed to run NWChem on
your system. It can be a command to run NWChem in serial or in
parallel, e.g. an mpirun command. Then comment out the 2nd nwchemcmd
line immediately following it.
Ensure you have the necessary NWChem input file in this directory,
suitable for the NWChem calculation you want to perform.
Example input files are provided for both atom-centered AO basis sets
and plane-wave basis sets. Note that the NWChem template file should
be matched to the LAMMPS input script (# of atoms and atom types, box
size, etc).
Once you run NWChem yourself, the nwchem_lammps.out file will be
overwritten.
The syntax of the wrapper is:
nwchem_wrap.py file/zmq ao/pw input_template
* file/zmg = messaging mode, must match LAMMPS messaging mode
* ao/pw = basis set mode, selects between atom-centered and plane-wave
the input_template file must correspond to the appropriate basis set mode:
the "ao" mode supports the scf and dft modules in NWChem,
the "pw" mode supports the nwpw module.
* input_template = NWChem input file used as template, must include a
"geometry" block with the atoms in the simulation, dummy
xyz coordinates should be included (but are not used).
Atom ordering must match LAMMPS input.
During a simulation, the molecular orbitals from the previous timestep
will be used as the initial guess for the next NWChem calculation. If
a file named "nwchem_lammps.movecs" is in the directory the wrapper is
called from, these orbitals will be used as the initial guess orbitals
in the first step of the simulation.
----------------
Example directories
(1) planewave
Demonstrates coupling of the nwpw module in NWChem with LAMMPS. Only fully
periodic boundary conditions and orthogonal simulation boxes are currently
supported by the wrapper. The included files provide an example run using a
2 atom unit cell of tungsten.
Files:
* data.W LAMMPS input with geometry information
* in.client.W LAMMPS simulation input
* log.client.output LAMMPS simulation output
* w.nw NWChem template input file
* nwchem_lammps.out NWChem output
(2) ao_basis
Demonstrates coupling of the scf (or dft) modules in NWChem with
LAMMPS. Only fully aperiodic boundary conditions are currently
supported by the wrapper. The included files provide an example run
using a single water molecule.
Files:
* data.h2o LAMMPS input with geometry information
* in.client.h2o LAMMPS simulation input
* log.client.output LAMMPS simulation output
* h2o.nw NWChem template input file
* nwchem_lammps.out NWChem output
As noted above, you can run the nwchem_wrap.py script as-is to test that
the coupling between it and LAMMPS is functional. This will use the included
nwchem_lammps.out files.
----------------
To run in client/server mode:
NOTE: The nwchem_wrap.py script must be run with Python version 2, not
3. This is because it used the CSlib python wrapper, which only
supports version 2. We plan to upgrade CSlib to support Python 3.
Both the client (LAMMPS) and server (nwchem_wrap.py) must use the same
messaging mode, namely file or zmq. This is an argument to the
nwchem_wrap.py code; it can be selected by setting the "mode" variable
when you run LAMMPS. The default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The nwchem_wrap.py code is always run in serial, but it
launches NWChem from Python via an mpirun command which can run NWChem
itself in parallel.
When you run, the server should print out thermodynamic info every
timestep which corresponds to the forces and virial computed by NWChem.
NWChem will also generate output files each timestep. Output files from
previous timesteps are archived in a "nwchem_logs" directory.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 lmp_mpi -v mode file -in in.client.W
% python nwchem_wrap.py file pw w.nw
% mpirun -np 2 lmp_mpi -v mode file -in in.client.h2o
% python nwchem_wrap.py file ao h2o.nw
ZMQ mode of messaging:
% mpirun -np 1 lmp_mpi -v mode zmq -in in.client.W
% python nwchem_wrap.py zmq pw w.nw
% mpirun -np 2 lmp_mpi -v mode zmq -in in.client.h2o
% python nwchem_wrap.py zmq ao h2o.nw

View File

@ -1,20 +0,0 @@
LAMMPS H2O data file
3 atoms
2 atom types
-10.0 10.0 xlo xhi
-10.0 10.0 ylo yhi
-10.0 10.0 zlo zhi
Masses
1 15.994915008544922
2 1.0078250169754028
Atoms
1 1 0.0 0.0 0.0
2 2 0.0 0.756723 -0.585799
3 2 0.0 -0.756723 -0.585799

View File

@ -1,25 +0,0 @@
echo
memory global 40 mb stack 23 mb heap 5 mb
geometry units angstrom noautosym
O 0.0 0.0 0.0
H 1.0 0.5 0.0
H -1.0 0.5 0.0
end
basis
O library 6-31g*
H library 6-31g*
end
scf
maxiter 100
end
#dft
# xc b3lyp
#end
task scf gradient
#task dft gradient

View File

@ -1,27 +0,0 @@
# H2O with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3

View File

@ -1,30 +0,0 @@
# H2O with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
group one id 2
displace_atoms one move 0.1 0.2 0.3
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
minimize 1.0e-6 1.0e-6 10 50

View File

@ -1,66 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000627125 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0276 | 0.0276 | 0.0276 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 0.077556087 10.354878 8000
1 300 0 0 0.077556087 10.354878 8000
2 300 0 0 0.077556087 10.354878 8000
3 300 0 0 0.077556087 10.354878 8000
Loop time of 0.30198 on 1 procs for 3 steps with 3 atoms
Performance: 0.858 ns/day, 27.961 hours/ns, 9.934 timesteps/s
0.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 2.5979e-06 | 2.5979e-06 | 2.5979e-06 | 0.0 | 0.00
Output | 0.00012053 | 0.00012053 | 0.00012053 | 0.0 | 0.04
Modify | 0.30185 | 0.30185 | 0.30185 | 0.0 | 99.96
Other | | 8.211e-06 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:07

View File

@ -1,66 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000608759 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0276 | 0.0276 | 0.0276 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 -2068.2746 10.354878 8000
1 200.33191 0 0 -2068.2704 6.9147085 8000
2 152.36218 0 0 -2068.269 5.2589726 8000
3 227.40679 0 0 -2068.2722 7.8492321 8000
Loop time of 1.90319 on 1 procs for 3 steps with 3 atoms
Performance: 0.136 ns/day, 176.221 hours/ns, 1.576 timesteps/s
0.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 3.9274e-06 | 3.9274e-06 | 3.9274e-06 | 0.0 | 0.00
Output | 0.00011798 | 0.00011798 | 0.00011798 | 0.0 | 0.01
Modify | 1.9031 | 1.9031 | 1.9031 | 0.0 | 99.99
Other | | 1.054e-05 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:07

View File

@ -1,82 +0,0 @@
LAMMPS (19 Sep 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:93)
using 1 OpenMP thread(s) per MPI task
# H2O with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
boundary m m m
read_data data.h2o
orthogonal box = (-10 -10 -10) to (10 10 10)
1 by 1 by 1 MPI processor grid
reading atoms ...
3 atoms
read_data CPU = 0.000615383 secs
group one id 2
1 atoms in group one
displace_atoms one move 0.1 0.2 0.3
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
minimize 1.0e-6 1.0e-6 10 50
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (src/min.cpp:174)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 0.0279 | 0.0279 | 0.0279 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 300 0 0 -2067.8909 10.354878 8000
1 300 0 0 -2068.0707 10.354878 8000
2 300 0 0 -2068.252 10.354878 8000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
3 300 0 0 -2068.2797 10.354878 8000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:166)
4 300 0 0 -2068.2799 10.354878 8000
Loop time of 5.71024 on 1 procs for 4 steps with 3 atoms
0.1% CPU use with 1 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
-2067.96847053 -2068.35730416 -2068.35745184
Force two-norm initial, final = 4.54685 0.124714
Force max component initial, final = 3.48924 0.0859263
Final line search alpha, max atom move = 1 0.0859263
Iterations, force evaluations = 4 8
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 6.2305e-07 | 6.2305e-07 | 6.2305e-07 | 0.0 | 0.00
Comm | 1.1522e-05 | 1.1522e-05 | 1.1522e-05 | 0.0 | 0.00
Output | 8.4217e-05 | 8.4217e-05 | 8.4217e-05 | 0.0 | 0.00
Modify | 5.7099 | 5.7099 | 5.7099 | 0.0 | 99.99
Other | | 0.0002355 | | | 0.00
Nlocal: 3 ave 3 max 3 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 2
Dangerous builds not checked
Total wall time: 0:00:10

View File

@ -1,626 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
memory global 40 mb stack 23 mb heap 5 mb
geometry units angstrom noautosym nocenter
O 0.00197082 0.0012463 -0.00298048
H -0.0432066 0.769363 -0.596119
H 0.0119282 -0.789143 -0.528177
end
scf
vectors input nwchem_lammps.movecs
end
dft
vectors input nwchem_lammps.movecs
end
basis
O library 6-31g*
H library 6-31g*
end
scf
maxiter 100
end
#dft
# xc b3lyp
#end
task scf gradient
#task dft gradient
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = almondjoy
program = /home/jboschen/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Fri Jan 31 00:31:00 2020
compiled = Tue_Oct_01_13:20:43_2019
source = /home/jboschen/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 655358 doubles = 5.0 Mbytes
stack = 3014651 doubles = 23.0 Mbytes
global = 5242880 doubles = 40.0 Mbytes (distinct from heap & stack)
total = 8912889 doubles = 68.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = scf
Operation = gradient
Status = ok
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 3 Fri Jan 31 00:30:59 2020
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 ao basis 2 Fri Jan 31 00:30:59 2020
The basis set named "ao basis" is the default AO basis for restart
NWChem Input Module
-------------------
Scaling coordinates for geometry "geometry" by 1.889725989
(inverse scale = 0.529177249)
------
auto-z
------
no constraints, skipping 0.0000000000000000
no constraints, skipping 0.0000000000000000
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 O 8.0000 0.00197082 0.00124630 -0.00298048
2 H 1.0000 -0.04320660 0.76936300 -0.59611900
3 H 1.0000 0.01192820 -0.78914300 -0.52817700
Atomic Mass
-----------
O 15.994910
H 1.007825
Effective nuclear repulsion energy (a.u.) 9.1573270473
Nuclear Dipole moment (a.u.)
----------------------------
X Y Z
---------------- ---------------- ----------------
-0.0293131272 -0.0185374561 -2.1696696942
Z-matrix (autoz)
--------
Units are Angstrom for bonds and degrees for angles
Type Name I J K L M Value
----------- -------- ----- ----- ----- ----- ----- ----------
1 Stretch 1 2 0.97152
2 Stretch 1 3 0.94902
3 Bend 2 1 3 108.72901
XYZ format geometry
-------------------
3
geometry
O 0.00197082 0.00124630 -0.00298048
H -0.04320660 0.76936300 -0.59611900
H 0.01192820 -0.78914300 -0.52817700
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 H | 1 O | 1.83591 | 0.97152
3 H | 1 O | 1.79339 | 0.94902
------------------------------------------------------------------------------
number of included internuclear distances: 2
==============================================================================
==============================================================================
internuclear angles
------------------------------------------------------------------------------
center 1 | center 2 | center 3 | degrees
------------------------------------------------------------------------------
2 H | 1 O | 3 H | 108.73
------------------------------------------------------------------------------
number of included internuclear angles: 1
==============================================================================
Basis "ao basis" -> "" (cartesian)
-----
O (Oxygen)
----------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 5.48467170E+03 0.001831
1 S 8.25234950E+02 0.013950
1 S 1.88046960E+02 0.068445
1 S 5.29645000E+01 0.232714
1 S 1.68975700E+01 0.470193
1 S 5.79963530E+00 0.358521
2 S 1.55396160E+01 -0.110778
2 S 3.59993360E+00 -0.148026
2 S 1.01376180E+00 1.130767
3 P 1.55396160E+01 0.070874
3 P 3.59993360E+00 0.339753
3 P 1.01376180E+00 0.727159
4 S 2.70005800E-01 1.000000
5 P 2.70005800E-01 1.000000
6 D 8.00000000E-01 1.000000
H (Hydrogen)
------------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 1.87311370E+01 0.033495
1 S 2.82539370E+00 0.234727
1 S 6.40121700E-01 0.813757
2 S 1.61277800E-01 1.000000
Summary of "ao basis" -> "" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
NWChem SCF Module
-----------------
ao basis = "ao basis"
functions = 19
atoms = 3
closed shells = 5
open shells = 0
charge = 0.00
wavefunction = RHF
input vectors = ./nwchem_lammps.movecs
output vectors = ./nwchem_lammps.movecs
use symmetry = F
symmetry adapt = F
Summary of "ao basis" -> "ao basis" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
Forming initial guess at 0.0s
Loading old vectors from job with title :
Starting SCF solution at 0.0s
----------------------------------------------
Quadratically convergent ROHF
Convergence threshold : 1.000E-04
Maximum no. of iterations : 100
Final Fock-matrix accuracy: 1.000E-07
----------------------------------------------
#quartets = 1.540D+03 #integrals = 1.424D+04 #direct = 0.0% #cached =100.0%
Integral file = ./nwchem_lammps.aoints.0
Record size in doubles = 65536 No. of integs per rec = 43688
Max. records in memory = 2 Max. records in file = 1392051
No. of bits per label = 8 No. of bits per value = 64
iter energy gnorm gmax time
----- ------------------- --------- --------- --------
1 -76.0095751323 4.63D-02 1.64D-02 0.1
2 -76.0097628164 8.13D-04 2.83D-04 0.1
3 -76.0097629130 3.92D-06 1.55D-06 0.1
Final RHF results
------------------
Total SCF energy = -76.009762913030
One-electron energy = -123.002897732381
Two-electron energy = 37.835807772101
Nuclear repulsion energy = 9.157327047250
Time for solution = 0.0s
Final eigenvalues
-----------------
1
1 -20.5584
2 -1.3367
3 -0.7128
4 -0.5617
5 -0.4959
6 0.2104
7 0.3038
8 1.0409
9 1.1202
10 1.1606
11 1.1691
12 1.3840
13 1.4192
14 2.0312
15 2.0334
ROHF Final Molecular Orbital Analysis
-------------------------------------
Vector 2 Occ=2.000000D+00 E=-1.336749D+00
MO Center= -2.8D-03, -1.3D-02, -1.7D-01, r^2= 5.1D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
2 0.476636 1 O s 6 0.442369 1 O s
1 -0.210214 1 O s
Vector 3 Occ=2.000000D+00 E=-7.127948D-01
MO Center= -4.9D-03, 3.9D-03, -2.1D-01, r^2= 7.8D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
4 0.504894 1 O py 8 0.303932 1 O py
18 -0.234724 3 H s 16 0.229765 2 H s
Vector 4 Occ=2.000000D+00 E=-5.617306D-01
MO Center= 3.6D-03, 9.0D-03, 5.6D-02, r^2= 6.9D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
5 0.559565 1 O pz 9 0.410981 1 O pz
6 0.315892 1 O s 2 0.157960 1 O s
Vector 5 Occ=2.000000D+00 E=-4.959173D-01
MO Center= 1.4D-03, 6.9D-05, -2.2D-02, r^2= 6.0D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
3 0.638390 1 O px 7 0.511530 1 O px
Vector 6 Occ=0.000000D+00 E= 2.103822D-01
MO Center= -2.3D-02, 3.5D-02, -7.3D-01, r^2= 2.6D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.416869 1 O s 17 -1.068330 2 H s
19 -1.014775 3 H s 9 -0.490951 1 O pz
5 -0.212990 1 O pz
Vector 7 Occ=0.000000D+00 E= 3.037943D-01
MO Center= -1.8D-02, -8.9D-02, -7.1D-01, r^2= 2.8D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
19 -1.426837 3 H s 17 1.332767 2 H s
8 -0.842141 1 O py 4 -0.327553 1 O py
Vector 8 Occ=0.000000D+00 E= 1.040852D+00
MO Center= -7.4D-03, 1.3D-01, -1.6D-01, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
16 0.931594 2 H s 18 -0.747590 3 H s
8 -0.655817 1 O py 17 -0.523035 2 H s
19 0.366407 3 H s 14 -0.357109 1 O dyz
Vector 9 Occ=0.000000D+00 E= 1.120172D+00
MO Center= -6.8D-03, -2.9D-02, -3.1D-01, r^2= 1.5D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.145090 1 O s 18 0.841596 3 H s
2 -0.727471 1 O s 16 0.684927 2 H s
9 0.559191 1 O pz 19 -0.546678 3 H s
17 -0.538778 2 H s 10 -0.344609 1 O dxx
15 -0.250035 1 O dzz
Vector 10 Occ=0.000000D+00 E= 1.160603D+00
MO Center= 1.2D-02, -4.3D-02, 2.5D-01, r^2= 1.0D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.137949 1 O s 5 -0.844233 1 O pz
9 0.595088 1 O pz 2 -0.475986 1 O s
18 -0.455932 3 H s 16 -0.357325 2 H s
13 -0.317117 1 O dyy 15 -0.196968 1 O dzz
Vector 11 Occ=0.000000D+00 E= 1.169054D+00
MO Center= 1.9D-03, 1.2D-03, -6.4D-03, r^2= 1.1D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
7 -1.034653 1 O px 3 0.962043 1 O px
Vector 12 Occ=0.000000D+00 E= 1.384034D+00
MO Center= 6.0D-04, -2.6D-03, -5.0D-02, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
8 1.557767 1 O py 4 -1.035829 1 O py
17 -0.900920 2 H s 19 0.901756 3 H s
Vector 13 Occ=0.000000D+00 E= 1.419205D+00
MO Center= -1.3D-02, -4.9D-02, -5.2D-01, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 3.605136 1 O s 2 -1.454853 1 O s
9 -1.107532 1 O pz 19 -0.874208 3 H s
17 -0.757016 2 H s 13 -0.634436 1 O dyy
5 0.516593 1 O pz 15 -0.401100 1 O dzz
10 -0.319873 1 O dxx 16 -0.260650 2 H s
Vector 14 Occ=0.000000D+00 E= 2.031234D+00
MO Center= 1.9D-03, 2.3D-03, -3.0D-03, r^2= 6.1D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
11 1.727083 1 O dxy
Vector 15 Occ=0.000000D+00 E= 2.033369D+00
MO Center= 3.4D-03, 3.4D-03, 4.3D-02, r^2= 6.2D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
15 1.012642 1 O dzz 13 -0.512441 1 O dyy
10 -0.438481 1 O dxx 6 -0.226567 1 O s
center of mass
--------------
x = -0.00000001 y = -0.00000003 z = -0.12388979
moments of inertia (a.u.)
------------------
6.378705068992 0.153373998471 -0.069687034145
0.153373998471 2.014476065716 0.150739744400
-0.069687034145 0.150739744400 4.379134195179
Mulliken analysis of the total density
--------------------------------------
Atom Charge Shell Charges
----------- ------ -------------------------------------------------------
1 O 8 8.87 2.00 0.90 2.90 0.92 2.08 0.08
2 H 1 0.56 0.46 0.11
3 H 1 0.56 0.47 0.10
Multipole analysis of the density wrt the origin
------------------------------------------------
L x y z total open nuclear
- - - - ----- ---- -------
0 0 0 0 -0.000000 0.000000 10.000000
1 1 0 0 -0.026417 0.000000 -0.029313
1 0 1 0 -0.023604 0.000000 -0.018537
1 0 0 1 -0.846090 0.000000 -2.169670
2 2 0 0 -5.373227 0.000000 0.007286
2 1 1 0 -0.085617 0.000000 -0.152252
2 1 0 1 0.038215 0.000000 0.069311
2 0 2 0 -2.927589 0.000000 4.337695
2 0 1 1 -0.071410 0.000000 -0.149465
2 0 0 2 -4.159949 0.000000 2.265483
Parallel integral file used 1 records with 0 large values
NWChem Gradients Module
-----------------------
wavefunction = RHF
RHF ENERGY GRADIENTS
atom coordinates gradient
x y z x y z
1 O 0.003724 0.002355 -0.005632 0.000909 -0.019294 0.007866
2 H -0.081649 1.453885 -1.126502 -0.001242 0.025549 -0.011605
3 H 0.022541 -1.491264 -0.998110 0.000333 -0.006255 0.003739
----------------------------------------
| Time | 1-e(secs) | 2-e(secs) |
----------------------------------------
| CPU | 0.00 | 0.03 |
----------------------------------------
| WALL | 0.00 | 0.03 |
----------------------------------------
Task times cpu: 0.1s wall: 0.1s
NWChem Input Module
-------------------
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 182 182 2869 728 468 0 0 68
number of processes/call 1.00e+00 1.00e+00 1.00e+00 0.00e+00 0.00e+00
bytes total: 6.18e+05 3.56e+05 1.04e+05 0.00e+00 0.00e+00 5.44e+02
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 39432 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 18 28
current total bytes 0 0
maximum total bytes 1060104 16000888
maximum total K-bytes 1061 16001
maximum total M-bytes 2 17
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.1s wall: 0.2s

View File

@ -1,626 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
memory global 40 mb stack 23 mb heap 5 mb
geometry units angstrom noautosym nocenter
O -0.00836667 0.0010006 0.0866404
H 0.0968795 0.837453 -0.346117
H 0.0114839 -0.638453 -0.612122
end
scf
vectors input nwchem_lammps.movecs
end
dft
vectors input nwchem_lammps.movecs
end
basis
O library 6-31g*
H library 6-31g*
end
scf
maxiter 100
end
#dft
# xc b3lyp
#end
task scf gradient
#task dft gradient
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = almondjoy
program = /home/jboschen/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Fri Jan 31 00:33:40 2020
compiled = Tue_Oct_01_13:20:43_2019
source = /home/jboschen/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 655358 doubles = 5.0 Mbytes
stack = 3014651 doubles = 23.0 Mbytes
global = 5242880 doubles = 40.0 Mbytes (distinct from heap & stack)
total = 8912889 doubles = 68.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = scf
Operation = gradient
Status = ok
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 3 Fri Jan 31 00:33:40 2020
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 ao basis 2 Fri Jan 31 00:33:40 2020
The basis set named "ao basis" is the default AO basis for restart
NWChem Input Module
-------------------
Scaling coordinates for geometry "geometry" by 1.889725989
(inverse scale = 0.529177249)
------
auto-z
------
no constraints, skipping 0.0000000000000000
no constraints, skipping 0.0000000000000000
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 O 8.0000 -0.00836667 0.00100060 0.08664040
2 H 1.0000 0.09687950 0.83745300 -0.34611700
3 H 1.0000 0.01148390 -0.63845300 -0.61212200
Atomic Mass
-----------
O 15.994910
H 1.007825
Effective nuclear repulsion energy (a.u.) 9.2881144400
Nuclear Dipole moment (a.u.)
----------------------------
X Y Z
---------------- ---------------- ----------------
0.0782914233 0.3911823503 -0.5009962172
Z-matrix (autoz)
--------
Units are Angstrom for bonds and degrees for angles
Type Name I J K L M Value
----------- -------- ----- ----- ----- ----- ----- ----------
1 Stretch 1 2 0.94763
2 Stretch 1 3 0.94740
3 Bend 2 1 3 104.86952
XYZ format geometry
-------------------
3
geometry
O -0.00836667 0.00100060 0.08664040
H 0.09687950 0.83745300 -0.34611700
H 0.01148390 -0.63845300 -0.61212200
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 H | 1 O | 1.79077 | 0.94763
3 H | 1 O | 1.79032 | 0.94740
------------------------------------------------------------------------------
number of included internuclear distances: 2
==============================================================================
==============================================================================
internuclear angles
------------------------------------------------------------------------------
center 1 | center 2 | center 3 | degrees
------------------------------------------------------------------------------
2 H | 1 O | 3 H | 104.87
------------------------------------------------------------------------------
number of included internuclear angles: 1
==============================================================================
Basis "ao basis" -> "" (cartesian)
-----
O (Oxygen)
----------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 5.48467170E+03 0.001831
1 S 8.25234950E+02 0.013950
1 S 1.88046960E+02 0.068445
1 S 5.29645000E+01 0.232714
1 S 1.68975700E+01 0.470193
1 S 5.79963530E+00 0.358521
2 S 1.55396160E+01 -0.110778
2 S 3.59993360E+00 -0.148026
2 S 1.01376180E+00 1.130767
3 P 1.55396160E+01 0.070874
3 P 3.59993360E+00 0.339753
3 P 1.01376180E+00 0.727159
4 S 2.70005800E-01 1.000000
5 P 2.70005800E-01 1.000000
6 D 8.00000000E-01 1.000000
H (Hydrogen)
------------
Exponent Coefficients
-------------- ---------------------------------------------------------
1 S 1.87311370E+01 0.033495
1 S 2.82539370E+00 0.234727
1 S 6.40121700E-01 0.813757
2 S 1.61277800E-01 1.000000
Summary of "ao basis" -> "" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
NWChem SCF Module
-----------------
ao basis = "ao basis"
functions = 19
atoms = 3
closed shells = 5
open shells = 0
charge = 0.00
wavefunction = RHF
input vectors = ./nwchem_lammps.movecs
output vectors = ./nwchem_lammps.movecs
use symmetry = F
symmetry adapt = F
Summary of "ao basis" -> "ao basis" (cartesian)
------------------------------------------------------------------------------
Tag Description Shells Functions and Types
---------------- ------------------------------ ------ ---------------------
O 6-31g* 6 15 3s2p1d
H 6-31g* 2 2 2s
Forming initial guess at 0.0s
Loading old vectors from job with title :
Starting SCF solution at 0.0s
----------------------------------------------
Quadratically convergent ROHF
Convergence threshold : 1.000E-04
Maximum no. of iterations : 100
Final Fock-matrix accuracy: 1.000E-07
----------------------------------------------
#quartets = 1.540D+03 #integrals = 1.424D+04 #direct = 0.0% #cached =100.0%
Integral file = ./nwchem_lammps.aoints.0
Record size in doubles = 65536 No. of integs per rec = 43688
Max. records in memory = 2 Max. records in file = 1392051
No. of bits per label = 8 No. of bits per value = 64
iter energy gnorm gmax time
----- ------------------- --------- --------- --------
1 -76.0107350035 4.75D-05 2.49D-05 0.1
Final RHF results
------------------
Total SCF energy = -76.010735003510
One-electron energy = -123.220958992568
Two-electron energy = 37.922109549024
Nuclear repulsion energy = 9.288114440035
Time for solution = 0.0s
Final eigenvalues
-----------------
1
1 -20.5583
2 -1.3466
3 -0.7130
4 -0.5721
5 -0.4985
6 0.2129
7 0.3068
8 1.0286
9 1.1338
10 1.1678
11 1.1807
12 1.3845
13 1.4334
14 2.0187
15 2.0311
ROHF Final Molecular Orbital Analysis
-------------------------------------
Vector 2 Occ=2.000000D+00 E=-1.346587D+00
MO Center= 1.1D-02, 3.1D-02, -8.5D-02, r^2= 5.0D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
2 0.475648 1 O s 6 0.435095 1 O s
1 -0.209463 1 O s
Vector 3 Occ=2.000000D+00 E=-7.129747D-01
MO Center= 1.5D-02, 3.8D-02, -1.3D-01, r^2= 7.6D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
4 0.500246 1 O py 8 0.299047 1 O py
16 0.232138 2 H s 18 -0.232195 3 H s
Vector 4 Occ=2.000000D+00 E=-5.720760D-01
MO Center= -1.5D-02, -9.7D-03, 1.5D-01, r^2= 6.8D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
5 0.545527 1 O pz 9 0.395332 1 O pz
6 0.326735 1 O s 2 0.164593 1 O s
Vector 5 Occ=2.000000D+00 E=-4.984552D-01
MO Center= -6.2D-03, 4.4D-03, 6.7D-02, r^2= 6.0D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
3 0.634559 1 O px 7 0.507891 1 O px
Vector 6 Occ=0.000000D+00 E= 2.128732D-01
MO Center= 7.5D-02, 1.3D-01, -6.6D-01, r^2= 2.6D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.437795 1 O s 17 -1.050892 2 H s
19 -1.050374 3 H s 9 -0.494696 1 O pz
5 -0.208359 1 O pz
Vector 7 Occ=0.000000D+00 E= 3.067764D-01
MO Center= 7.1D-02, 1.3D-01, -6.3D-01, r^2= 2.7D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
17 1.413885 2 H s 19 -1.414835 3 H s
8 -0.824411 1 O py 4 -0.320355 1 O py
Vector 8 Occ=0.000000D+00 E= 1.028607D+00
MO Center= 7.1D-03, 2.6D-02, -5.2D-02, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
16 0.839269 2 H s 18 -0.838060 3 H s
8 -0.692349 1 O py 17 -0.426291 2 H s
19 0.425092 3 H s 14 -0.319117 1 O dyz
Vector 9 Occ=0.000000D+00 E= 1.133833D+00
MO Center= -2.7D-02, -2.9D-02, 2.6D-01, r^2= 1.5D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 1.621086 1 O s 2 -0.910150 1 O s
9 0.744864 1 O pz 16 0.490586 2 H s
18 0.491102 3 H s 5 -0.484186 1 O pz
17 -0.426087 2 H s 19 -0.425823 3 H s
10 -0.375325 1 O dxx 15 -0.317874 1 O dzz
Vector 10 Occ=0.000000D+00 E= 1.167849D+00
MO Center= -8.0D-03, 1.6D-03, 8.3D-02, r^2= 1.1D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
7 -1.028149 1 O px 3 0.955686 1 O px
Vector 11 Occ=0.000000D+00 E= 1.180721D+00
MO Center= 1.8D-02, 4.2D-02, -1.5D-01, r^2= 1.1D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
16 0.710073 2 H s 18 0.711177 3 H s
5 0.704677 1 O pz 17 -0.389719 2 H s
19 -0.389376 3 H s 6 -0.326170 1 O s
9 -0.288739 1 O pz 13 0.229749 1 O dyy
Vector 12 Occ=0.000000D+00 E= 1.384514D+00
MO Center= -7.4D-04, 1.3D-02, 1.8D-02, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
8 1.510506 1 O py 4 -1.021750 1 O py
17 -0.934844 2 H s 19 0.935260 3 H s
9 0.272171 1 O pz 5 -0.184286 1 O pz
Vector 13 Occ=0.000000D+00 E= 1.433397D+00
MO Center= 4.7D-02, 8.7D-02, -4.1D-01, r^2= 1.4D+00
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
6 3.628985 1 O s 2 -1.436467 1 O s
9 -1.143870 1 O pz 17 -0.805578 2 H s
19 -0.806493 3 H s 13 -0.635948 1 O dyy
5 0.489050 1 O pz 15 -0.410417 1 O dzz
16 -0.312860 2 H s 18 -0.312722 3 H s
Vector 14 Occ=0.000000D+00 E= 2.018721D+00
MO Center= -1.4D-02, -7.1D-03, 1.3D-01, r^2= 6.2D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
15 0.947149 1 O dzz 14 -0.531399 1 O dyz
13 -0.526961 1 O dyy 10 -0.358371 1 O dxx
12 -0.297495 1 O dxz 6 -0.233087 1 O s
Vector 15 Occ=0.000000D+00 E= 2.031133D+00
MO Center= -8.4D-03, 1.0D-03, 8.7D-02, r^2= 6.1D-01
Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
----- ------------ --------------- ----- ------------ ---------------
11 1.681563 1 O dxy 12 0.314688 1 O dxz
center of mass
--------------
x = -0.00258245 y = 0.02272235 z = 0.04407491
moments of inertia (a.u.)
------------------
6.155330507195 -0.266185800841 0.185335033231
-0.266185800841 2.211585220634 -0.350250164177
0.185335033231 -0.350250164177 4.020009073007
Mulliken analysis of the total density
--------------------------------------
Atom Charge Shell Charges
----------- ------ -------------------------------------------------------
1 O 8 8.87 2.00 0.90 2.91 0.91 2.06 0.08
2 H 1 0.57 0.47 0.10
3 H 1 0.57 0.47 0.10
Multipole analysis of the density wrt the origin
------------------------------------------------
L x y z total open nuclear
- - - - ----- ---- -------
0 0 0 0 -0.000000 0.000000 10.000000
1 1 0 0 0.094145 0.000000 0.078291
1 0 1 0 0.148179 0.000000 0.391182
1 0 0 1 -0.851621 0.000000 -0.500996
2 2 0 0 -5.338111 0.000000 0.035987
2 1 1 0 0.149191 0.000000 0.263306
2 1 0 1 -0.084723 0.000000 -0.165556
2 0 2 0 -3.114464 0.000000 3.960160
2 0 1 1 0.205130 0.000000 0.362991
2 0 0 2 -4.329185 0.000000 1.980308
Parallel integral file used 1 records with 0 large values
NWChem Gradients Module
-----------------------
wavefunction = RHF
RHF ENERGY GRADIENTS
atom coordinates gradient
x y z x y z
1 O -0.015811 0.001891 0.163727 -0.000201 -0.000505 0.001671
2 H 0.183076 1.582557 -0.654066 0.000065 -0.000505 -0.001056
3 H 0.021701 -1.206501 -1.156743 0.000136 0.001011 -0.000616
----------------------------------------
| Time | 1-e(secs) | 2-e(secs) |
----------------------------------------
| CPU | 0.00 | 0.03 |
----------------------------------------
| WALL | 0.00 | 0.03 |
----------------------------------------
Task times cpu: 0.1s wall: 0.1s
NWChem Input Module
-------------------
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 46 46 2296 477 27 0 0 68
number of processes/call 1.00e+00 1.00e+00 1.00e+00 0.00e+00 0.00e+00
bytes total: 2.70e+05 1.39e+05 2.27e+04 0.00e+00 0.00e+00 5.44e+02
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 37544 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 18 28
current total bytes 0 0
maximum total bytes 1060104 16000888
maximum total K-bytes 1061 16001
maximum total M-bytes 2 17
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.1s wall: 0.1s

View File

@ -1,447 +0,0 @@
#!/usr/bin/env python
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# https://www.lammps.org/ Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
# ----------------------------------------------------------------------
# Syntax: nwchem_wrap.py file/zmq ao/pw input_template
# file/zmg = messaging mode, must match LAMMPS messaging mode
# ao/pw = basis set mode, selects between atom-centered and plane-wave
# the input_template file must correspond to the appropriate basis set mode:
# the "ao" mode supports the scf and dft modules in NWChem,
# the "pw" mode supports the nwpw module.
# input_template = NWChem input file used as template, must include a
# "geometry" block with the atoms in the simulation, dummy
# xyz coordinates should be included (but are not used).
# Atom ordering must match LAMMPS input.
# wrapper on NWChem
# receives message with list of coords
# creates NWChem inputs
# invokes NWChem to calculate self-consistent energy of that config
# reads NWChem outputs
# sends message with energy, forces, pressure to client
from __future__ import print_function
import sys
version = sys.version_info[0]
if version == 3:
sys.exit("The CSlib python wrapper does not yet support python 3")
import subprocess
import re
import os
import shutil
from cslib import CSlib
# comment out 2nd line once 1st line is correct for your system
nwchemcmd = "mpirun -np 1 /usr/bin/nwchem"
nwchemcmd = "touch tmp"
# enums matching FixClientMD class in LAMMPS
SETUP,STEP = range(1,2+1)
DIM,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE = range(1,10+1)
FORCES,ENERGY,VIRIAL,ERROR = range(1,4+1)
# -------------------------------------
# functions
# error message and exit
def error(txt):
print("ERROR:",txt)
sys.exit(1)
# -------------------------------------
# read initial input file to setup problem
# return natoms
def nwchem_setup_ao(input):
template = open(input,'r')
geometry_block = False
natoms = 0
while True:
line = template.readline()
if not line: break
if geometry_block and re.search("end",line):
geometry_block = False
if geometry_block and not re.match("#",line) :
natoms += 1
if re.search("geometry",line):
geometry_block = True
return natoms
# -------------------------------------
# write a new input file for NWChem
# assumes the NWChem input geometry is to be specified in angstroms
def nwchem_input_write_ao(input,coords):
template = open(input,'r')
new_input = open("nwchem_lammps.nw",'w')
geometry_block = False
i = 0
while True:
line = template.readline()
if not line: break
if geometry_block and not re.match("#",line) and re.search("end",line):
geometry_block = False
if os.path.exists("nwchem_lammps.movecs"):
# The below is hacky, but one of these lines will be ignored
# by NWChem depending on if the input file is for scf/dft.
append = "\nscf\n vectors input nwchem_lammps.movecs\nend\n"
append2 = "\ndft\n vectors input nwchem_lammps.movecs\nend\n"
line = line + append + append2
if geometry_block and not re.match("#",line):
x = coords[3*i+0]
y = coords[3*i+1]
z = coords[3*i+2]
coord_string = " %g %g %g \n" % (x,y,z)
atom_string = line.split()[0]
line = atom_string + coord_string
i += 1
if (not re.match("#",line)) and re.search("geometry",line):
geometry_block = True
line = "geometry units angstrom noautosym nocenter\n"
print(line,file=new_input,end='')
new_input.close()
# -------------------------------------
# read a NWChem output nwchem_lammps.out file
def nwchem_read_ao(natoms, log):
nwchem_output = open(log, 'r')
energy_pattern = r"Total \w+ energy"
gradient_pattern = "x y z x y z"
eout = 0.0
fout = []
while True:
line = nwchem_output.readline()
if not line: break
# pattern match for energy
if re.search(energy_pattern,line):
eout = float(line.split()[4])
# pattern match for forces
if re.search(gradient_pattern, line):
for i in range(natoms):
line = nwchem_output.readline()
forces = line.split()
fout += [float(forces[5]), float(forces[6]), float(forces[7])]
# convert units
hartree2eV = 27.21138602
bohr2angstrom = 0.52917721092
eout = eout * hartree2eV
fout = [i * -hartree2eV/bohr2angstrom for i in fout]
return eout,fout
# -------------------------------------
# read initial planewave input file to setup problem
# return natoms,box
def nwchem_setup_pw(input):
template = open(input,'r')
geometry_block = False
system_block = False
coord_pattern = r"^\s*\w{1,2}(?:\s+-?(?:\d+.?\d*|\d*.?\d+)){3}"
natoms = 0
box = []
while True:
line = template.readline()
if not line: break
if geometry_block and re.search("system crystal",line):
system_block = True
for i in range(3):
line = template.readline()
line = re.sub(r'd|D', 'e', line)
box += [float(line.split()[1])]
if geometry_block and not system_block and re.match("#",line) and re.search("end",line):
geometry_block = False
if system_block and re.search("end",line):
system_block = False
if geometry_block and not re.match("#",line) and re.search(coord_pattern,line):
natoms += 1
if re.search("geometry",line) and not re.match("#",line):
geometry_block = True
return natoms,box
# -------------------------------------
# write a new planewave input file for NWChem
# assumes the NWChem input geometry is to be specified fractional coordinates
def nwchem_input_write_pw(input,coords,box):
template = open(input,'r')
new_input = open("nwchem_lammps.nw",'w')
writing_atoms = False
geometry_block = False
system_block = False
coord_pattern = r"^\s*\w{1,2}(?:\s+-?(?:\d+.?\d*|\d*.?\d+)){3}"
i = 0
while True:
line = template.readline()
if not line: break
if geometry_block and re.search("system crystal",line):
system_block = True
if geometry_block and not system_block and not re.match("#",line) and re.search("end",line):
geometry_block = False
if os.path.exists("nwchem_lammps.movecs"):
append = "\nnwpw\n vectors input nwchem_lammps.movecs\nend\n"
line = line + append
if system_block and re.search("end",line):
system_block = False
if geometry_block and not re.match("#",line) and re.search(coord_pattern,line):
x = coords[3*i+0] / box[0]
y = coords[3*i+1] / box[1]
z = coords[3*i+2] / box[2]
coord_string = " %g %g %g \n" % (x,y,z)
atom_string = line.split()[0]
line = atom_string + coord_string
i += 1
if re.search("geometry",line) and not re.match("#",line):
geometry_block = True
print(line,file=new_input,end='')
new_input.close()
# -------------------------------------
# read a NWChem output nwchem_lammps.out file for planewave calculation
def nwchem_read_pw(log):
nw_output = open(log, 'r')
eout = 0.0
sout = []
fout = []
reading_forces = False
while True:
line = nw_output.readline()
if not line: break
# pattern match for energy
if re.search("PSPW energy",line):
eout = float(line.split()[4])
# pattern match for forces
if re.search("C\.O\.M", line):
reading_forces = False
if reading_forces:
forces = line.split()
fout += [float(forces[3]), float(forces[4]), float(forces[5])]
if re.search("Ion Forces",line):
reading_forces = True
# pattern match for stress
if re.search("=== total gradient ===",line):
stensor = []
for i in range(3):
line = nw_output.readline()
line = line.replace("S ="," ")
stress = line.split()
stensor += [float(stress[1]), float(stress[2]), float(stress[3])]
sxx = stensor[0]
syy = stensor[4]
szz = stensor[8]
sxy = 0.5 * (float(stensor[1]) + float(stensor[3]))
sxz = 0.5 * (stensor[2] + stensor[6])
syz = 0.5 * (stensor[5] + stensor[7])
sout = [sxx,syy,szz,sxy,sxz,syz]
# convert units
hartree2eV = 27.21138602
bohr2angstrom = 0.52917721092
austress2bar = 294210156.97
eout = eout * hartree2eV
fout = [i * hartree2eV/bohr2angstrom for i in fout]
sout = [i * austress2bar for i in sout]
return eout,fout,sout
# -------------------------------------
# main program
# command-line args
#
if len(sys.argv) != 4:
print("Syntax: python nwchem_wrap.py file/zmq ao/pw input_template")
sys.exit(1)
comm_mode = sys.argv[1]
basis_type = sys.argv[2]
input_template = sys.argv[3]
if comm_mode == "file": cs = CSlib(1,comm_mode,"tmp.couple",None)
elif comm_mode == "zmq": cs = CSlib(1,comm_mode,"*:5555",None)
else:
print("Syntax: python nwchem_wrap.py file/zmq")
sys.exit(1)
natoms = 0
box = []
if basis_type == "ao":
natoms = nwchem_setup_ao(input_template)
elif basis_type == "pw":
natoms,box = nwchem_setup_pw(input_template)
# initial message for AIMD protocol
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID != 0: error("Bad initial client/server handshake")
protocol = cs.unpack_string(1)
if protocol != "md": error("Mismatch in client/server protocol")
cs.send(0,0)
# endless server loop
i = 0
if not os.path.exists("nwchem_logs"):
os.mkdir("nwchem_logs")
while 1:
# recv message from client
# msgID = 0 = all-done message
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID < 0: break
# SETUP receive at beginning of each run
# required fields: DIM, PERIODICITY, ORIGIN, BOX,
# NATOMS, COORDS
# optional fields: others in enum above, but NWChem ignores them
if msgID == SETUP:
origin = []
box_lmp = []
natoms_recv = ntypes_recv = 0
types = []
coords = []
for field in fieldID:
if field == DIM:
dim = cs.unpack_int(DIM)
if dim != 3: error("NWChem only performs 3d simulations")
elif field == PERIODICITY:
periodicity = cs.unpack(PERIODICITY,1)
if basis_type == "ao":
if periodicity[0] or periodicity[1] or periodicity[2]:
error("NWChem AO basis wrapper only currently supports fully aperiodic systems")
elif basis_type == "pw":
if not periodicity[0] or not periodicity[1] or not periodicity[2]:
error("NWChem PW basis wrapper only currently supports fully periodic systems")
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box_lmp = cs.unpack(BOX,1)
if (basis_type == "pw"):
if (box[0] != box_lmp[0] or box[1] != box_lmp[4] or box[2] != box_lmp[8]):
error("NWChem wrapper mismatch in box dimensions")
elif field == NATOMS:
natoms_recv = cs.unpack_int(NATOMS)
if natoms != natoms_recv:
error("NWChem wrapper mismatch in number of atoms")
elif field == COORDS:
coords = cs.unpack(COORDS,1)
if not origin or not box_lmp or not natoms or not coords:
error("Required NWChem wrapper setup field not received");
# STEP receive at each timestep of run or minimization
# required fields: COORDS
# optional fields: ORIGIN, BOX
elif msgID == STEP:
coords = []
for field in fieldID:
if field == COORDS:
coords = cs.unpack(COORDS,1)
if not coords: error("Required NWChem wrapper step field not received");
else: error("NWChem wrapper received unrecognized message")
# unpack coords from client
# create NWChem input
if basis_type == "ao":
nwchem_input_write_ao(input_template,coords)
elif basis_type == "pw":
nwchem_input_write_pw(input_template,coords,box)
# invoke NWChem
i += 1
log = "nwchem_lammps.out"
archive = "nwchem_logs/nwchem_lammps" + str(i) + ".out"
cmd = nwchemcmd + " nwchem_lammps.nw > " + log
print("\nLaunching NWChem ...")
print(cmd)
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
shutil.copyfile(log,archive)
# process NWChem output
if basis_type == "ao":
energy,forces = nwchem_read_ao(natoms,log)
virial = [0,0,0,0,0,0]
elif basis_type == "pw":
energy,forces,virial = nwchem_read_pw(log)
# return forces, energy to client
cs.send(msgID,3)
cs.pack(FORCES,4,3*natoms,forces)
cs.pack_double(ENERGY,energy)
cs.pack(VIRIAL,4,6,virial)
# final reply to client
cs.send(0,0)
# clean-up
del cs

View File

@ -1,15 +0,0 @@
LAMMPS W data file
2 atoms
1 atom types
0.0 3.16 xlo xhi
0.0 3.16 ylo yhi
0.0 3.16 zlo zhi
Atoms
1 1 0.000 0.000 0.000
2 1 1.58 1.58 1.58

View File

@ -1,34 +0,0 @@
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
mass 1 183.85
replicate $x $y $z
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3

View File

@ -1,38 +0,0 @@
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
mass 1 183.85
group one id 2
displace_atoms one move 0.1 0.2 0.3
replicate $x $y $z
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
dump 1 all custom 1 dump.W.min id type x y z
thermo 1
minimize 1.0e-6 1.0e-6 10 50

View File

@ -1,76 +0,0 @@
LAMMPS (18 Sep 2018)
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
reading atoms ...
2 atoms
mass 1 183.85
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
2 atoms
Time spent = 0.000187325 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
Per MPI rank memory allocation (min/avg/max) = 1.8 | 1.8 | 1.8 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -549.75686 36815830
1 300 0 0 -549.75686 36815830
2 300 0 0 -549.75686 36815830
3 300 0 0 -549.75686 36815830
Loop time of 0.400933 on 1 procs for 3 steps with 2 atoms
Performance: 0.646 ns/day, 37.123 hours/ns, 7.483 timesteps/s
0.1% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 4.755e-06 | 4.755e-06 | 4.755e-06 | 0.0 | 0.00
Output | 0.00010114 | 0.00010114 | 0.00010114 | 0.0 | 0.03
Modify | 0.40082 | 0.40082 | 0.40082 | 0.0 | 99.97
Other | | 1.232e-05 | | | 0.00
Nlocal: 2 ave 2 max 2 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 7 ave 7 max 7 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:09

View File

@ -1,78 +0,0 @@
LAMMPS (19 Sep 2019)
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
reading atoms ...
2 atoms
read_data CPU = 0.0014801 secs
mass 1 183.85
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
2 atoms
replicate CPU = 0.000123978 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 1.801 | 1.801 | 1.801 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -549.75686 36815830
1 298.93216 0 0 -549.75686 36815825
2 295.76254 0 0 -549.75687 36814830
3 290.55935 0 0 -549.75687 36811865
Loop time of 2.60414 on 1 procs for 3 steps with 2 atoms
Performance: 0.100 ns/day, 241.124 hours/ns, 1.152 timesteps/s
0.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 7.1526e-06 | 7.1526e-06 | 7.1526e-06 | 0.0 | 0.00
Output | 0.00012779 | 0.00012779 | 0.00012779 | 0.0 | 0.00
Modify | 2.604 | 2.604 | 2.604 | 0.0 | 99.99
Other | | 9.06e-06 | | | 0.00
Nlocal: 2 ave 2 max 2 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 7 ave 7 max 7 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:00:05

View File

@ -1,92 +0,0 @@
LAMMPS (19 Sep 2019)
# small W unit cell for use with NWChem
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md file tmp.couple
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
reading atoms ...
2 atoms
read_data CPU = 0.00183487 secs
mass 1 183.85
group one id 2
1 atoms in group one
displace_atoms one move 0.1 0.2 0.3
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 1 MPI processor grid
2 atoms
replicate CPU = 0.000159979 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
dump 1 all custom 1 tmp.dump id type x y z
thermo 1
minimize 1.0e-6 1.0e-6 10 50
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (../min.cpp:174)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:166)
Per MPI rank memory allocation (min/avg/max) = 4.676 | 4.676 | 4.676 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -547.52142 28510277
1 300 0 0 -549.43104 35614471
2 300 0 0 -549.75661 36815830
3 300 0 0 -549.75662 36815830
Loop time of 7.71121 on 1 procs for 3 steps with 2 atoms
0.0% CPU use with 1 MPI tasks x no OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
-547.560202518 -549.795386038 -549.795398827
Force two-norm initial, final = 16.0041 0.00108353
Force max component initial, final = 9.57978 0.000719909
Final line search alpha, max atom move = 1 0.000719909
Iterations, force evaluations = 3 5
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 9.5367e-07 | 9.5367e-07 | 9.5367e-07 | 0.0 | 0.00
Comm | 1.3113e-05 | 1.3113e-05 | 1.3113e-05 | 0.0 | 0.00
Output | 0.00017023 | 0.00017023 | 0.00017023 | 0.0 | 0.00
Modify | 7.7109 | 7.7109 | 7.7109 | 0.0 |100.00
Other | | 0.0001729 | | | 0.00
Nlocal: 2 ave 2 max 2 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 1
Dangerous builds not checked
Total wall time: 0:00:19

File diff suppressed because it is too large Load Diff

View File

@ -1,817 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
#**** Enter the geometry using fractional coordinates ****
geometry units angstrom noautosym
system crystal
lat_a 3.16d0
lat_b 3.16d0
lat_c 3.16d0
end
W 0.999335 0.99967 0.998875
W 0.500665 0.50033 0.501125
end
nwpw
vectors input nwchem_lammps.movecs
end
#***** setup the nwpw gamma point code ****
nwpw
simulation_cell
ngrid 16 16 16
end
ewald_ncut 8
mulliken
lcao #old default
end
nwpw
tolerances 1.0d-9 1.0d-9
end
task pspw stress
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = singsing
program = /home/sjplimp/tools/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Thu Oct 3 16:57:17 2019
compiled = Wed_Oct_02_09:25:27_2019
source = /home/sjplimp/tools/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 13107200 doubles = 100.0 Mbytes
stack = 13107197 doubles = 100.0 Mbytes
global = 26214400 doubles = 200.0 Mbytes (distinct from heap & stack)
total = 52428797 doubles = 400.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = pspw
Operation = stress
Status = unknown
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 2 Thu Oct 3 16:57:16 2019
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
There are no basis sets in the database
NWChem Input Module
-------------------
!!!!!!!!! geom_3d NEEDS TESTING !!!!!!!!!!
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 W 74.0000 3.15789860 3.15895720 3.15644500
2 W 74.0000 1.58210140 1.58104280 1.58355500
Lattice Parameters
------------------
lattice vectors in angstroms (scale by 1.889725989 to convert to a.u.)
a1=< 3.160 0.000 0.000 >
a2=< 0.000 3.160 0.000 >
a3=< 0.000 0.000 3.160 >
a= 3.160 b= 3.160 c= 3.160
alpha= 90.000 beta= 90.000 gamma= 90.000
omega= 31.6
reciprocal lattice vectors in a.u.
b1=< 1.052 0.000 -0.000 >
b2=< -0.000 1.052 -0.000 >
b3=< 0.000 0.000 1.052 >
Atomic Mass
-----------
W 183.951000
XYZ format geometry
-------------------
2
geometry
W 3.15789860 3.15895720 3.15644500
W 1.58210140 1.58104280 1.58355500
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 W | 1 W | 5.15689 | 2.72891
------------------------------------------------------------------------------
number of included internuclear distances: 1
==============================================================================
>>>> PSPW Parallel Module - stress <<<<
****************************************************
* *
* NWPW PSPW Calculation *
* *
* [ (Grassmann/Stiefel manifold implementation) ] *
* *
* [ NorthWest Chemistry implementation ] *
* *
* version #5.10 06/12/02 *
* *
* This code was developed by Eric J. Bylaska, *
* and was based upon algorithms and code *
* developed by the group of Prof. John H. Weare *
* *
****************************************************
>>> JOB STARTED AT Thu Oct 3 16:57:17 2019 <<<
================ input data ========================
input psi filename:./nwchem_lammps.movecs
initializing pspw_APC data structure
------------------------------------
nga, ngs: 3 6
Gc : 2.5000000000000000
APC gamma: 1 0.59999999999999998
APC gamma: 2 0.90000000000000002
APC gamma: 3 1.3500000000000001
number of processors used: 1
processor grid : 1 x 1
parallel mapping :2d hilbert
parallel mapping : balanced
number of threads : 1
parallel io : off
options:
boundary conditions = periodic (version3)
electron spin = restricted
exchange-correlation = LDA (Vosko et al) parameterization
elements involved in the cluster:
1: W valence charge: 6.0000 lmax= 2
comment : Troullier-Martins pseudopotential
pseudpotential type : 0
highest angular component : 2
local potential used : 0
number of non-local projections: 8
semicore corrections included : 1.800 (radius) 4.538 (charge)
cutoff = 2.389 3.185 2.244
total charge: 0.000
atomic composition:
W : 2
number of electrons: spin up= 6 ( 6 per task) down= 6 ( 6 per task) (Fourier space)
number of orbitals : spin up= 6 ( 6 per task) down= 6 ( 6 per task) (Fourier space)
supercell:
cell_name: cell_default
lattice: a1=< 5.972 0.000 0.000 >
a2=< 0.000 5.972 0.000 >
a3=< 0.000 0.000 5.972 >
reciprocal: b1=< 1.052 0.000 -0.000 >
b2=< -0.000 1.052 -0.000 >
b3=< 0.000 0.000 1.052 >
lattice: a= 5.972 b= 5.972 c= 5.972
alpha= 90.000 beta= 90.000 gamma= 90.000
omega= 212.9
density cutoff= 35.427 fft= 16x 16x 16( 1052 waves 1052 per task)
wavefnc cutoff= 35.427 fft= 16x 16x 16( 1052 waves 1052 per task)
Ewald summation: cut radius= 1.90 and 8
Madelung Wigner-Seitz= 1.76011888 (alpha= 2.83729748 rs= 3.70444413)
technical parameters:
time step= 5.80 fictitious mass= 400000.0
tolerance=0.100E-08 (energy) 0.100E-08 (density)
maximum iterations = 1000 ( 10 inner 100 outer )
== Energy Calculation ==
====== Grassmann conjugate gradient iteration ======
>>> ITERATION STARTED AT Thu Oct 3 16:57:17 2019 <<<
iter. Energy DeltaE DeltaRho
------------------------------------------------------
10 -0.2020457267E+02 -0.12753E-06 0.54770E-09
20 -0.2020457281E+02 -0.96520E-09 0.65680E-11
*** tolerance ok. iteration terminated
>>> ITERATION ENDED AT Thu Oct 3 16:57:18 2019 <<<
== Summary Of Results ==
number of electrons: spin up= 6.00000 down= 6.00000 (real space)
total energy : -0.2020457281E+02 ( -0.10102E+02/ion)
total orbital energy: 0.5093546150E+01 ( 0.84892E+00/electron)
hartree energy : 0.2903382088E+00 ( 0.48390E-01/electron)
exc-corr energy : -0.9445078100E+01 ( -0.15742E+01/electron)
ion-ion energy : -0.2193939674E+02 ( -0.10970E+02/ion)
kinetic (planewave) : 0.1441586264E+02 ( 0.24026E+01/electron)
V_local (planewave) : 0.1156111351E+02 ( 0.19269E+01/electron)
V_nl (planewave) : -0.1508741234E+02 ( -0.25146E+01/electron)
V_Coul (planewave) : 0.5806764176E+00 ( 0.96779E-01/electron)
V_xc. (planewave) : -0.6376694082E+01 ( -0.10628E+01/electron)
Virial Coefficient : -0.6466707350E+00
orbital energies:
0.5414291E+00 ( 14.733eV)
0.5414285E+00 ( 14.733eV)
0.5414070E+00 ( 14.733eV)
0.3596871E+00 ( 9.788eV)
0.3596781E+00 ( 9.787eV)
0.2031433E+00 ( 5.528eV)
Total PSPW energy : -0.2020457281E+02
=== Spin Contamination ===
<Sexact^2> = 0.0000000000000000
<S^2> = 0.0000000000000000
== Center of Charge ==
spin up ( -0.0030, -0.0015, -0.0050 )
spin down ( -0.0030, -0.0015, -0.0050 )
total ( -0.0030, -0.0015, -0.0050 )
ionic ( -1.4929, -1.4929, -1.4929 )
== Molecular Dipole wrt Center of Mass ==
mu = ( -17.8792, -17.8970, -17.8547 ) au
|mu| = 30.9638 au, 78.6976 Debye
Translation force removed: ( -0.00000 -0.00000 -0.00000)
============= Ion Gradients =================
Ion Forces:
1 W ( 0.002737 0.001358 0.004631 )
2 W ( -0.002737 -0.001358 -0.004631 )
C.O.M. ( 0.000000 0.000000 0.000000 )
===============================================
|F| = 0.784689E-02
|F|/nion = 0.392344E-02
max|Fatom|= 0.554859E-02 ( 0.285eV/Angstrom)
======================
= Stress calculation =
======================
============= total gradient ==============
S = ( 0.12512 0.00000 0.00000 )
( 0.00000 0.12512 0.00001 )
( 0.00000 0.00001 0.12511 )
===================================================
|S| = 0.21671E+00
pressure = 0.125E+00 au
= 0.368E+02 Mbar
= 0.368E+04 GPa
= 0.363E+08 atm
dE/da = 0.12512
dE/db = 0.12512
dE/dc = 0.12511
dE/dalpha = -0.00003
dE/dbeta = -0.00002
dE/dgamma = -0.00001
*************************************************************
** **
** PSPW Mulliken analysis **
** **
** Population analysis algorithm devloped by Ryoichi Kawai **
** **
** Thu Oct 3 16:57 **
** **
*************************************************************
== XYZ OUTPUT ==
2
W -0.002101 -0.001043 -0.003555
W -1.577898 -1.578956 -1.576444
== Atomic Orbital Expansion ==
W nodamping
=====================================================
| POPULATION ANALYSIS OF FILLED MOLECULAR ORBITALS |
=====================================================
== Using pseudoatomic orbital expansion ==
------------------------------------------------------------------------------
*** ORBITAL= 1*** SPIN=BOTH SUM= 0.12471E+01 E= 0.54143E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00000
px pz py
1 W 1 0.00000 -0.00018 -0.00011 0.00005
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.49999 0.00003 -0.68532 0.00001 0.10591 0.13824
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00001 -0.00003 -0.00187 -0.00238 -0.00028 0.00001 0.00000 -0.00017
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 0.00018 0.00011 -0.00005
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.49999 0.00003 -0.68532 0.00001 0.10591 0.13824
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00001 -0.00003 0.00187 0.00238 0.00028 -0.00001 -0.00000 0.00017
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 2*** SPIN=BOTH SUM= 0.12472E+01 E= 0.54143E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00000
px pz py
1 W 1 0.00000 0.00002 -0.00005 -0.00011
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.49998 -0.00001 -0.02322 0.00001 -0.61187 0.35363
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00002 -0.00001 0.00071 -0.00049 -0.00015 -0.00283 0.00006 0.00266
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 -0.00002 0.00005 0.00011
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.49998 -0.00001 -0.02322 0.00001 -0.61187 0.35363
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00002 -0.00001 -0.00071 0.00049 0.00015 0.00283 -0.00006 -0.00266
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 3*** SPIN=BOTH SUM= 0.12472E+01 E= 0.54141E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00000 0.00010 0.00006 0.00020
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.49999 0.00000 0.17259 0.00000 0.33820 0.59651
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00001 0.00000 0.00001 -0.00015 0.00015 -0.00033 -0.00325 -0.00033
s
2 W 0 0.00000 -0.00000
px pz py
2 W 1 0.00000 -0.00010 -0.00006 -0.00020
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.49999 0.00000 0.17259 0.00000 0.33820 0.59651
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00001 0.00000 -0.00001 0.00015 -0.00015 0.00033 0.00325 0.00033
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 4*** SPIN=BOTH SUM= 0.14577E+01 E= 0.35969E+00 ( 9.788eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00002 0.00162 -0.00440 0.00049
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.48998 -0.09896 0.00001 0.69296 0.00001 -0.00001
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00999 -0.09993 0.00031 -0.00131 -0.00234 -0.00064 0.00000 0.00022
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00002 0.00162 -0.00440 0.00049
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.48998 0.09896 -0.00001 -0.69296 -0.00001 0.00001
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00999 0.09993 0.00031 -0.00131 -0.00234 -0.00064 0.00000 0.00022
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 0.9800 0.0200
------------------------------------------------------------------------------
*** ORBITAL= 5*** SPIN=BOTH SUM= 0.14616E+01 E= 0.35968E+00 ( 9.787eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00000
px pz py
1 W 1 0.00001 0.00206 0.00063 -0.00121
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.48871 -0.69206 -0.00002 -0.09883 0.00001 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.01129 -0.10621 0.00214 0.00009 0.00033 0.00014 0.00000 0.00063
s
2 W 0 0.00000 -0.00000
px pz py
2 W 1 0.00001 0.00206 0.00063 -0.00121
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.48871 0.69206 0.00002 0.09883 -0.00001 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.01129 0.10621 0.00214 0.00009 0.00033 0.00014 0.00000 0.00063
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 0.9774 0.0226
------------------------------------------------------------------------------
*** ORBITAL= 6*** SPIN=BOTH SUM= 0.19540E+01 E= 0.20314E+00 ( 5.528eV)
NO ATOM L POPULATION
s
1 W 0 0.49974 -0.70692
px pz py
1 W 1 0.00000 0.00028 0.00047 0.00014
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.00000 -0.00000 -0.00000 -0.00000 -0.00000 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00026 0.01609 -0.00000 -0.00007 0.00021 -0.00003 0.00000 -0.00004
s
2 W 0 0.49974 -0.70692
px pz py
2 W 1 0.00000 -0.00028 -0.00047 -0.00014
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.00000 -0.00000 -0.00000 -0.00000 -0.00000 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00026 0.01609 0.00000 0.00007 -0.00021 0.00003 -0.00000 0.00004
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.9995 0.0000 0.0000 0.0005
========================================
| POPULATION ANALYSIS ON EACH ATOM |
========================================
NO ATOM SPIN TOTAL s p d f
1 W UP 3.00000 0.49974 0.00003 2.47866 0.02157
1 W DOWN 3.00000 0.49974 0.00003 2.47866 0.02157
2 W UP 3.00000 0.49974 0.00003 2.47866 0.02157
2 W DOWN 3.00000 0.49974 0.00003 2.47866 0.02157
=== TOTAL ANGULAR MOMENTUM POPULATION ===
SPIN s p d f
UP 16.66% 0.00% 82.62% 0.72%
UP 16.66% 0.00% 82.62% 0.72%
TOTAL 16.66% 0.00% 82.62% 0.72%
*************************************************************
** **
** PSPW Atomic Point Charge (APC) Analysis **
** **
** Point charge analysis based on paper by P.E. Blochl **
** (J. Chem. Phys. vol 103, page 7422, 1995) **
** **
*************************************************************
pspw_APC data structure
-----------------------
nga, ngs: 3 6
Gc : 2.5000000000000000
APC gamma: 1 0.59999999999999998
APC gamma: 2 0.90000000000000002
APC gamma: 3 1.3500000000000001
charge analysis on each atom
----------------------------
no atom Qelc Qion Qtotal
-- ---- ------- ------- -------
1 W -6.000 6.000 -0.000
2 W -6.000 6.000 -0.000
Total Q -12.000 12.000 -0.000
gaussian coefficients of model density
--------------------------------------
no atom g=0.000 g=0.600 g=0.900 g=1.350
-- ---- ------- ------- ------- -------
1 W 6.000 -7.235 17.653 -16.419
2 W 6.000 -7.235 17.653 -16.419
=== Electric Field at Atoms ===
1 W Atomic Electric Field =( -0.00022 -0.00011 -0.00038 )
(ion) =( 0.00094 0.00047 0.00159 )
(electronic) =( -0.00116 -0.00058 -0.00197 )
2 W Atomic Electric Field =( 0.00022 0.00011 0.00038 )
(ion) =( -0.00094 -0.00047 -0.00159 )
(electronic) =( 0.00116 0.00058 0.00197 )
output psi filename:./nwchem_lammps.movecs
== Timing ==
cputime in seconds
prologue : 0.114428E+00
main loop : 0.475396E+00
epilogue : 0.316691E-01
total : 0.621493E+00
cputime/step: 0.559289E-02 ( 85 evalulations, 20 linesearches)
Time spent doing total step percent
total time : 0.623259E+00 0.733246E-02 100.0 %
i/o time : 0.103071E-01 0.121260E-03 1.7 %
FFTs : 0.348712E-01 0.410250E-03 5.6 %
dot products : 0.981057E-02 0.115418E-03 1.6 %
geodesic : 0.696999E-01 0.819999E-03 11.2 %
ffm_dgemm : 0.104145E-02 0.122523E-04 0.2 %
fmf_dgemm : 0.565297E-01 0.665055E-03 9.1 %
mmm_dgemm : 0.129490E-03 0.152342E-05 0.0 %
m_diagonalize : 0.701885E-03 0.825747E-05 0.1 %
exchange correlation : 0.764353E-01 0.899239E-03 12.3 %
local pseudopotentials : 0.439882E-03 0.517509E-05 0.1 %
non-local pseudopotentials : 0.271890E-01 0.319871E-03 4.4 %
hartree potentials : 0.202482E-02 0.238214E-04 0.3 %
ion-ion interaction : 0.104062E+00 0.122426E-02 16.7 %
structure factors : 0.152984E-01 0.179981E-03 2.5 %
phase factors : 0.107278E-04 0.126210E-06 0.0 %
masking and packing : 0.304392E-01 0.358108E-03 4.9 %
queue fft : 0.111536E+00 0.131219E-02 17.9 %
queue fft (serial) : 0.708244E-01 0.833228E-03 11.4 %
queue fft (message passing): 0.360800E-01 0.424470E-03 5.8 %
non-local psp FFM : 0.860008E-02 0.101177E-03 1.4 %
non-local psp FMF : 0.111482E-01 0.131155E-03 1.8 %
non-local psp FFM A : 0.214632E-02 0.252509E-04 0.3 %
non-local psp FFM B : 0.560879E-02 0.659858E-04 0.9 %
>>> JOB COMPLETED AT Thu Oct 3 16:57:18 2019 <<<
Task times cpu: 0.6s wall: 0.6s
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 0 0 0 0 0 0 0 0
number of processes/call 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
bytes total: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 0 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 294 17
current total bytes 0 0
maximum total bytes 4879496 351944
maximum total K-bytes 4880 352
maximum total M-bytes 5 1
NWChem Input Module
-------------------
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.6s wall: 0.7s

View File

@ -1,816 +0,0 @@
argument 1 = nwchem_lammps.nw
============================== echo of input deck ==============================
echo
#**** Enter the geometry using fractional coordinates ****
geometry units angstrom noautosym
system crystal
lat_a 3.16d0
lat_b 3.16d0
lat_c 3.16d0
end
W 0.0158218 0.0316436 0.0474661
W 0.515824 0.531647 0.547471
end
nwpw
vectors input nwchem_lammps.movecs
end
#***** setup the nwpw gamma point code ****
nwpw
simulation_cell
ngrid 16 16 16
end
ewald_ncut 8
mulliken
lcao #old default
end
nwpw
tolerances 1.0d-9 1.0d-9
end
task pspw stress
================================================================================
Northwest Computational Chemistry Package (NWChem) 6.8
------------------------------------------------------
Environmental Molecular Sciences Laboratory
Pacific Northwest National Laboratory
Richland, WA 99352
Copyright (c) 1994-2018
Pacific Northwest National Laboratory
Battelle Memorial Institute
NWChem is an open-source computational chemistry package
distributed under the terms of the
Educational Community License (ECL) 2.0
A copy of the license is included with this distribution
in the LICENSE.TXT file
ACKNOWLEDGMENT
--------------
This software and its documentation were developed at the
EMSL at Pacific Northwest National Laboratory, a multiprogram
national laboratory, operated for the U.S. Department of Energy
by Battelle under Contract Number DE-AC05-76RL01830. Support
for this work was provided by the Department of Energy Office
of Biological and Environmental Research, Office of Basic
Energy Sciences, and the Office of Advanced Scientific Computing.
Job information
---------------
hostname = singsing
program = /home/sjplimp/tools/nwchem-6.8.1-release/bin/LINUX64/nwchem
date = Thu Oct 3 16:58:54 2019
compiled = Wed_Oct_02_09:25:27_2019
source = /home/sjplimp/tools/nwchem-6.8.1-release
nwchem branch = Development
nwchem revision = N/A
ga revision = 5.6.5
use scalapack = F
input = nwchem_lammps.nw
prefix = nwchem_lammps.
data base = ./nwchem_lammps.db
status = restart
nproc = 1
time left = -1s
Memory information
------------------
heap = 13107200 doubles = 100.0 Mbytes
stack = 13107197 doubles = 100.0 Mbytes
global = 26214400 doubles = 200.0 Mbytes (distinct from heap & stack)
total = 52428797 doubles = 400.0 Mbytes
verify = yes
hardfail = no
Directory information
---------------------
0 permanent = .
0 scratch = .
Previous task information
-------------------------
Theory = pspw
Operation = stress
Status = unknown
Qmmm = F
Ignore = F
Geometries in the database
--------------------------
Name Natoms Last Modified
-------------------------------- ------ ------------------------
1 geometry 2 Thu Oct 3 16:58:53 2019
The geometry named "geometry" is the default for restart
Basis sets in the database
--------------------------
There are no basis sets in the database
NWChem Input Module
-------------------
!!!!!!!!! geom_3d NEEDS TESTING !!!!!!!!!!
Geometry "geometry" -> ""
-------------------------
Output coordinates in angstroms (scale by 1.889725989 to convert to a.u.)
No. Tag Charge X Y Z
---- ---------------- ---------- -------------- -------------- --------------
1 W 74.0000 0.04999689 0.09999378 0.14999288
2 W 74.0000 1.63000384 1.68000452 1.73000836
Lattice Parameters
------------------
lattice vectors in angstroms (scale by 1.889725989 to convert to a.u.)
a1=< 3.160 0.000 0.000 >
a2=< 0.000 3.160 0.000 >
a3=< 0.000 0.000 3.160 >
a= 3.160 b= 3.160 c= 3.160
alpha= 90.000 beta= 90.000 gamma= 90.000
omega= 31.6
reciprocal lattice vectors in a.u.
b1=< 1.052 0.000 -0.000 >
b2=< -0.000 1.052 -0.000 >
b3=< 0.000 0.000 1.052 >
Atomic Mass
-----------
W 183.951000
XYZ format geometry
-------------------
2
geometry
W 0.04999689 0.09999378 0.14999288
W 1.63000384 1.68000452 1.73000836
==============================================================================
internuclear distances
------------------------------------------------------------------------------
center one | center two | atomic units | angstroms
------------------------------------------------------------------------------
2 W | 1 W | 5.17154 | 2.73666
------------------------------------------------------------------------------
number of included internuclear distances: 1
==============================================================================
>>>> PSPW Parallel Module - stress <<<<
****************************************************
* *
* NWPW PSPW Calculation *
* *
* [ (Grassmann/Stiefel manifold implementation) ] *
* *
* [ NorthWest Chemistry implementation ] *
* *
* version #5.10 06/12/02 *
* *
* This code was developed by Eric J. Bylaska, *
* and was based upon algorithms and code *
* developed by the group of Prof. John H. Weare *
* *
****************************************************
>>> JOB STARTED AT Thu Oct 3 16:58:54 2019 <<<
================ input data ========================
input psi filename:./nwchem_lammps.movecs
initializing pspw_APC data structure
------------------------------------
nga, ngs: 3 6
Gc : 2.5000000000000000
APC gamma: 1 0.59999999999999998
APC gamma: 2 0.90000000000000002
APC gamma: 3 1.3500000000000001
number of processors used: 1
processor grid : 1 x 1
parallel mapping :2d hilbert
parallel mapping : balanced
number of threads : 1
parallel io : off
options:
boundary conditions = periodic (version3)
electron spin = restricted
exchange-correlation = LDA (Vosko et al) parameterization
elements involved in the cluster:
1: W valence charge: 6.0000 lmax= 2
comment : Troullier-Martins pseudopotential
pseudpotential type : 0
highest angular component : 2
local potential used : 0
number of non-local projections: 8
semicore corrections included : 1.800 (radius) 4.538 (charge)
cutoff = 2.389 3.185 2.244
total charge: 0.000
atomic composition:
W : 2
number of electrons: spin up= 6 ( 6 per task) down= 6 ( 6 per task) (Fourier space)
number of orbitals : spin up= 6 ( 6 per task) down= 6 ( 6 per task) (Fourier space)
supercell:
cell_name: cell_default
lattice: a1=< 5.972 0.000 0.000 >
a2=< 0.000 5.972 0.000 >
a3=< 0.000 0.000 5.972 >
reciprocal: b1=< 1.052 0.000 -0.000 >
b2=< -0.000 1.052 -0.000 >
b3=< 0.000 0.000 1.052 >
lattice: a= 5.972 b= 5.972 c= 5.972
alpha= 90.000 beta= 90.000 gamma= 90.000
omega= 212.9
density cutoff= 35.427 fft= 16x 16x 16( 1052 waves 1052 per task)
wavefnc cutoff= 35.427 fft= 16x 16x 16( 1052 waves 1052 per task)
Ewald summation: cut radius= 1.90 and 8
Madelung Wigner-Seitz= 1.76011888 (alpha= 2.83729748 rs= 3.70444413)
technical parameters:
time step= 5.80 fictitious mass= 400000.0
tolerance=0.100E-08 (energy) 0.100E-08 (density)
maximum iterations = 1000 ( 10 inner 100 outer )
== Energy Calculation ==
====== Grassmann conjugate gradient iteration ======
>>> ITERATION STARTED AT Thu Oct 3 16:58:54 2019 <<<
iter. Energy DeltaE DeltaRho
------------------------------------------------------
10 -0.2020460841E+02 -0.37164E-09 0.13892E-11
*** tolerance ok. iteration terminated
>>> ITERATION ENDED AT Thu Oct 3 16:58:54 2019 <<<
== Summary Of Results ==
number of electrons: spin up= 6.00000 down= 6.00000 (real space)
total energy : -0.2020460841E+02 ( -0.10102E+02/ion)
total orbital energy: 0.5093526999E+01 ( 0.84892E+00/electron)
hartree energy : 0.2902689593E+00 ( 0.48378E-01/electron)
exc-corr energy : -0.9445045626E+01 ( -0.15742E+01/electron)
ion-ion energy : -0.2193948849E+02 ( -0.10970E+02/ion)
kinetic (planewave) : 0.1441573280E+02 ( 0.24026E+01/electron)
V_local (planewave) : 0.1156119613E+02 ( 0.19269E+01/electron)
V_nl (planewave) : -0.1508727219E+02 ( -0.25145E+01/electron)
V_Coul (planewave) : 0.5805379185E+00 ( 0.96756E-01/electron)
V_xc. (planewave) : -0.6376667662E+01 ( -0.10628E+01/electron)
Virial Coefficient : -0.6466688811E+00
orbital energies:
0.5414223E+00 ( 14.733eV)
0.5414201E+00 ( 14.733eV)
0.5414174E+00 ( 14.733eV)
0.3596809E+00 ( 9.787eV)
0.3596804E+00 ( 9.787eV)
0.2031424E+00 ( 5.528eV)
Total PSPW energy : -0.2020460841E+02
=== Spin Contamination ===
<Sexact^2> = 0.0000000000000000
<S^2> = 0.0000000000000000
== Center of Charge ==
spin up ( 0.0106, 0.0203, 0.0283 )
spin down ( 0.0106, 0.0203, 0.0283 )
total ( 0.0106, 0.0203, 0.0283 )
ionic ( -1.3984, -1.3039, -1.2094 )
== Molecular Dipole wrt Center of Mass ==
mu = ( -16.9083, -15.8910, -14.8528 ) au
|mu| = 27.5503 au, 70.0218 Debye
Translation force removed: ( -0.00002 0.00000 0.00002)
============= Ion Gradients =================
Ion Forces:
1 W ( -0.000001 0.000005 0.000014 )
2 W ( 0.000001 -0.000005 -0.000014 )
C.O.M. ( -0.000000 0.000000 0.000000 )
===============================================
|F| = 0.216488E-04
|F|/nion = 0.108244E-04
max|Fatom|= 0.153080E-04 ( 0.001eV/Angstrom)
======================
= Stress calculation =
======================
============= total gradient ==============
S = ( 0.12513 0.00001 -0.00003 )
( 0.00001 0.12513 -0.00001 )
( -0.00003 -0.00001 0.12513 )
===================================================
|S| = 0.21673E+00
pressure = 0.125E+00 au
= 0.368E+02 Mbar
= 0.368E+04 GPa
= 0.363E+08 atm
dE/da = 0.12513
dE/db = 0.12513
dE/dc = 0.12513
dE/dalpha = 0.00006
dE/dbeta = 0.00020
dE/dgamma = -0.00008
*************************************************************
** **
** PSPW Mulliken analysis **
** **
** Population analysis algorithm devloped by Ryoichi Kawai **
** **
** Thu Oct 3 16:58 **
** **
*************************************************************
== XYZ OUTPUT ==
2
W 0.049997 0.099994 0.149993
W -1.529995 -1.479995 -1.429991
== Atomic Orbital Expansion ==
W nodamping
=====================================================
| POPULATION ANALYSIS OF FILLED MOLECULAR ORBITALS |
=====================================================
== Using pseudoatomic orbital expansion ==
------------------------------------------------------------------------------
*** ORBITAL= 1*** SPIN=BOTH SUM= 0.12471E+01 E= 0.54142E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00000
px pz py
1 W 1 0.00000 0.00000 0.00000 0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.50000 -0.00001 -0.03953 0.00002 0.50309 0.49532
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00000 -0.00001 -0.00000 -0.00000 0.00000 0.00000 -0.00001 -0.00000
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 -0.00000 -0.00000 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.50000 -0.00001 -0.03953 0.00002 0.50309 0.49532
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00000 -0.00001 0.00000 0.00000 -0.00000 -0.00000 0.00001 0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 2*** SPIN=BOTH SUM= 0.12471E+01 E= 0.54142E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00000 0.00000 0.00000 0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.50000 0.00004 0.62658 0.00003 -0.20360 0.25680
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00000 -0.00004 0.00000 0.00000 -0.00000 -0.00000 -0.00001 0.00000
s
2 W 0 0.00000 -0.00000
px pz py
2 W 1 0.00000 -0.00000 -0.00000 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.50000 0.00004 0.62658 0.00003 -0.20360 0.25680
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00000 -0.00004 -0.00000 -0.00000 -0.00000 0.00000 0.00001 -0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 3*** SPIN=BOTH SUM= 0.12471E+01 E= 0.54142E+00 ( 14.733eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 0.00001
px pz py
1 W 1 0.00000 0.00000 -0.00000 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.50000 -0.00001 -0.32532 -0.00000 -0.45327 0.43441
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00000 0.00001 0.00000 -0.00000 -0.00000 -0.00000 -0.00000 0.00000
s
2 W 0 0.00000 0.00001
px pz py
2 W 1 0.00000 -0.00000 0.00000 0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.50000 -0.00001 -0.32532 -0.00000 -0.45327 0.43441
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00000 0.00001 -0.00000 0.00001 0.00000 0.00000 0.00000 -0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 1.0000 0.0000
------------------------------------------------------------------------------
*** ORBITAL= 4*** SPIN=BOTH SUM= 0.14785E+01 E= 0.35968E+00 ( 9.787eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00000 -0.00000 0.00001 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.48310 0.33381 0.00000 -0.60965 0.00000 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.01690 0.13001 -0.00000 0.00000 0.00000 0.00000 0.00000 -0.00000
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 -0.00000 0.00001 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.48310 -0.33381 -0.00000 0.60965 -0.00000 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.01690 -0.13001 -0.00000 0.00000 0.00000 0.00000 0.00000 -0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 0.9662 0.0338
------------------------------------------------------------------------------
*** ORBITAL= 5*** SPIN=BOTH SUM= 0.14407E+01 E= 0.35968E+00 ( 9.787eV)
NO ATOM L POPULATION
s
1 W 0 0.00000 -0.00000
px pz py
1 W 1 0.00000 -0.00000 -0.00000 0.00001
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.49580 0.61761 -0.00000 0.33817 0.00000 -0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00420 0.06484 -0.00001 0.00000 -0.00000 -0.00000 -0.00000 -0.00000
s
2 W 0 0.00000 0.00000
px pz py
2 W 1 0.00000 -0.00000 -0.00000 0.00001
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.49580 -0.61761 0.00000 -0.33817 -0.00000 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00420 -0.06484 -0.00001 0.00000 -0.00000 -0.00000 -0.00000 -0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.0000 0.0000 0.9916 0.0084
------------------------------------------------------------------------------
*** ORBITAL= 6*** SPIN=BOTH SUM= 0.19540E+01 E= 0.20314E+00 ( 5.528eV)
NO ATOM L POPULATION
s
1 W 0 0.49974 -0.70692
px pz py
1 W 1 0.00000 0.00000 -0.00000 0.00000
dx2-y2 dzx d3z2-1 dyz dxy
1 W 2 0.00000 0.00000 -0.00000 -0.00000 -0.00000 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
1 W 3 0.00026 0.01609 -0.00000 -0.00000 -0.00000 -0.00000 -0.00000 -0.00000
s
2 W 0 0.49974 -0.70692
px pz py
2 W 1 0.00000 -0.00000 0.00000 -0.00000
dx2-y2 dzx d3z2-1 dyz dxy
2 W 2 0.00000 0.00000 -0.00000 -0.00000 -0.00000 0.00000
fx(x2-3y2) fz(5z2-1) fx(5z2-1) fz(5z2-3) fy(5z2-1) fxyz fy(3x2-y2)
2 W 3 0.00026 0.01609 0.00000 0.00000 -0.00000 0.00000 0.00000 0.00000
=== DISTRIBUTION ===
1(W ) 0.5000 2(W ) 0.5000
== ANGULAR MOMENTUM POPULATIONS ===
s p d f
0.9995 0.0000 0.0000 0.0005
========================================
| POPULATION ANALYSIS ON EACH ATOM |
========================================
NO ATOM SPIN TOTAL s p d f
1 W UP 3.00000 0.49974 0.00000 2.47889 0.02137
1 W DOWN 3.00000 0.49974 0.00000 2.47889 0.02137
2 W UP 3.00000 0.49974 0.00000 2.47889 0.02137
2 W DOWN 3.00000 0.49974 0.00000 2.47889 0.02137
=== TOTAL ANGULAR MOMENTUM POPULATION ===
SPIN s p d f
UP 16.66% 0.00% 82.63% 0.71%
UP 16.66% 0.00% 82.63% 0.71%
TOTAL 16.66% 0.00% 82.63% 0.71%
*************************************************************
** **
** PSPW Atomic Point Charge (APC) Analysis **
** **
** Point charge analysis based on paper by P.E. Blochl **
** (J. Chem. Phys. vol 103, page 7422, 1995) **
** **
*************************************************************
pspw_APC data structure
-----------------------
nga, ngs: 3 6
Gc : 2.5000000000000000
APC gamma: 1 0.59999999999999998
APC gamma: 2 0.90000000000000002
APC gamma: 3 1.3500000000000001
charge analysis on each atom
----------------------------
no atom Qelc Qion Qtotal
-- ---- ------- ------- -------
1 W -6.000 6.000 -0.000
2 W -6.000 6.000 0.000
Total Q -12.000 12.000 -0.000
gaussian coefficients of model density
--------------------------------------
no atom g=0.000 g=0.600 g=0.900 g=1.350
-- ---- ------- ------- ------- -------
1 W 6.000 -7.235 17.654 -16.419
2 W 6.000 -7.234 17.651 -16.418
=== Electric Field at Atoms ===
1 W Atomic Electric Field =( -0.00002 0.00000 0.00001 )
(ion) =( 0.00000 0.00000 0.00000 )
(electronic) =( -0.00002 -0.00000 0.00001 )
2 W Atomic Electric Field =( -0.00002 0.00000 0.00002 )
(ion) =( -0.00000 -0.00000 -0.00000 )
(electronic) =( -0.00002 0.00000 0.00002 )
output psi filename:./nwchem_lammps.movecs
== Timing ==
cputime in seconds
prologue : 0.991130E-01
main loop : 0.101190E+00
epilogue : 0.203540E-01
total : 0.220657E+00
cputime/step: 0.252975E-01 ( 4 evalulations, 1 linesearches)
Time spent doing total step percent
total time : 0.222262E+00 0.555655E-01 100.0 %
i/o time : 0.847340E-02 0.211835E-02 3.8 %
FFTs : 0.576015E-02 0.144004E-02 2.6 %
dot products : 0.157053E-02 0.392634E-03 0.7 %
geodesic : 0.203228E-02 0.508070E-03 0.9 %
ffm_dgemm : 0.641376E-04 0.160344E-04 0.0 %
fmf_dgemm : 0.202988E-02 0.507471E-03 0.9 %
mmm_dgemm : 0.286302E-05 0.715756E-06 0.0 %
m_diagonalize : 0.101088E-03 0.252721E-04 0.0 %
exchange correlation : 0.287819E-02 0.719547E-03 1.3 %
local pseudopotentials : 0.346661E-03 0.866652E-04 0.2 %
non-local pseudopotentials : 0.268912E-02 0.672280E-03 1.2 %
hartree potentials : 0.163791E-03 0.409476E-04 0.1 %
ion-ion interaction : 0.699389E-01 0.174847E-01 31.5 %
structure factors : 0.889608E-02 0.222402E-02 4.0 %
phase factors : 0.102510E-04 0.256275E-05 0.0 %
masking and packing : 0.839656E-02 0.209914E-02 3.8 %
queue fft : 0.418949E-02 0.104737E-02 1.9 %
queue fft (serial) : 0.264608E-02 0.661519E-03 1.2 %
queue fft (message passing): 0.136477E-02 0.341193E-03 0.6 %
non-local psp FFM : 0.391964E-03 0.979910E-04 0.2 %
non-local psp FMF : 0.407219E-03 0.101805E-03 0.2 %
non-local psp FFM A : 0.144235E-03 0.360588E-04 0.1 %
non-local psp FFM B : 0.216961E-03 0.542402E-04 0.1 %
>>> JOB COMPLETED AT Thu Oct 3 16:58:54 2019 <<<
Task times cpu: 0.2s wall: 0.2s
Summary of allocated global arrays
-----------------------------------
No active global arrays
GA Statistics for process 0
------------------------------
create destroy get put acc scatter gather read&inc
calls: 0 0 0 0 0 0 0 0
number of processes/call 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
bytes total: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
bytes remote: 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
Max memory consumed for GA by this process: 0 bytes
MA_summarize_allocated_blocks: starting scan ...
MA_summarize_allocated_blocks: scan completed: 0 heap blocks, 0 stack blocks
MA usage statistics:
allocation statistics:
heap stack
---- -----
current number of blocks 0 0
maximum number of blocks 294 17
current total bytes 0 0
maximum total bytes 4879496 351944
maximum total K-bytes 4880 352
maximum total M-bytes 5 1
NWChem Input Module
-------------------
CITATION
--------
Please cite the following reference when publishing
results obtained with NWChem:
M. Valiev, E.J. Bylaska, N. Govind, K. Kowalski,
T.P. Straatsma, H.J.J. van Dam, D. Wang, J. Nieplocha,
E. Apra, T.L. Windus, W.A. de Jong
"NWChem: a comprehensive and scalable open-source
solution for large scale molecular simulations"
Comput. Phys. Commun. 181, 1477 (2010)
doi:10.1016/j.cpc.2010.04.018
AUTHORS
-------
E. Apra, E. J. Bylaska, W. A. de Jong, N. Govind, K. Kowalski,
T. P. Straatsma, M. Valiev, H. J. J. van Dam, D. Wang, T. L. Windus,
J. Hammond, J. Autschbach, K. Bhaskaran-Nair, J. Brabec, K. Lopata,
S. A. Fischer, S. Krishnamoorthy, M. Jacquelin, W. Ma, M. Klemm, O. Villa,
Y. Chen, V. Anisimov, F. Aquino, S. Hirata, M. T. Hackler, V. Konjkov,
D. Mejia-Rodriguez, T. Risthaus, M. Malagoli, A. Marenich,
A. Otero-de-la-Roza, J. Mullin, P. Nichols, R. Peverati, J. Pittner, Y. Zhao,
P.-D. Fan, A. Fonari, M. J. Williamson, R. J. Harrison, J. R. Rehr,
M. Dupuis, D. Silverstein, D. M. A. Smith, J. Nieplocha, V. Tipparaju,
M. Krishnan, B. E. Van Kuiken, A. Vazquez-Mayagoitia, L. Jensen, M. Swart,
Q. Wu, T. Van Voorhis, A. A. Auer, M. Nooijen, L. D. Crosby, E. Brown,
G. Cisneros, G. I. Fann, H. Fruchtl, J. Garza, K. Hirao, R. A. Kendall,
J. A. Nichols, K. Tsemekhman, K. Wolinski, J. Anchell, D. E. Bernholdt,
P. Borowski, T. Clark, D. Clerc, H. Dachsel, M. J. O. Deegan, K. Dyall,
D. Elwood, E. Glendening, M. Gutowski, A. C. Hess, J. Jaffe, B. G. Johnson,
J. Ju, R. Kobayashi, R. Kutteh, Z. Lin, R. Littlefield, X. Long, B. Meng,
T. Nakajima, S. Niu, L. Pollack, M. Rosing, K. Glaesemann, G. Sandrone,
M. Stave, H. Taylor, G. Thomas, J. H. van Lenthe, A. T. Wong, Z. Zhang.
Total times cpu: 0.2s wall: 0.3s

View File

@ -1,28 +0,0 @@
echo
#**** Enter the geometry using fractional coordinates ****
geometry units angstrom noautosym
system crystal
lat_a 3.16d0
lat_b 3.16d0
lat_c 3.16d0
end
W 0.0 0.0 0.0
W 0.5 0.5 0.5
end
#***** setup the nwpw gamma point code ****
nwpw
simulation_cell
ngrid 16 16 16
end
ewald_ncut 8
mulliken
lcao #old default
end
nwpw
tolerances 1.0d-9 1.0d-9
end
task pspw stress

View File

@ -1,53 +0,0 @@
# Startparameter for this run:
NWRITE = 2 write-flag & timer
PREC = normal normal or accurate (medium, high low for compatibility)
ISTART = 0 job : 0-new 1-cont 2-samecut
ICHARG = 2 charge: 1-file 2-atom 10-const
ISPIN = 1 spin polarized calculation?
LSORBIT = F spin-orbit coupling
INIWAV = 1 electr: 0-lowe 1-rand 2-diag
# Electronic Relaxation 1
ENCUT = 600.0 eV #Plane wave energy cutoff
ENINI = 600.0 initial cutoff
NELM = 100; NELMIN= 2; NELMDL= -5 # of ELM steps
EDIFF = 0.1E-05 stopping-criterion for ELM
# Ionic relaxation
EDIFFG = 0.1E-02 stopping-criterion for IOM
NSW = 0 number of steps for IOM
NBLOCK = 1; KBLOCK = 1 inner block; outer block
IBRION = -1 ionic relax: 0-MD 1-quasi-New 2-CG #No ion relaxation with -1
NFREE = 0 steps in history (QN), initial steepest desc. (CG)
ISIF = 2 stress and relaxation # 2: F-yes Sts-yes RlxIon-yes cellshape-no cellvol-no
IWAVPR = 10 prediction: 0-non 1-charg 2-wave 3-comb # 10: TMPCAR stored in memory rather than file
POTIM = 0.5000 time-step for ionic-motion
TEBEG = 3500.0; TEEND = 3500.0 temperature during run # Finite Temperature variables if AI-MD is on
SMASS = -3.00 Nose mass-parameter (am)
estimated Nose-frequenzy (Omega) = 0.10E-29 period in steps =****** mass= -0.366E-27a.u.
PSTRESS= 0.0 pullay stress
# DOS related values:
EMIN = 10.00; EMAX =-10.00 energy-range for DOS
EFERMI = 0.00
ISMEAR = 0; SIGMA = 0.10 broadening in eV -4-tet -1-fermi 0-gaus
# Electronic relaxation 2 (details)
IALGO = 48 algorithm
# Write flags
LWAVE = T write WAVECAR
LCHARG = T write CHGCAR
LVTOT = F write LOCPOT, total local potential
LVHAR = F write LOCPOT, Hartree potential only
LELF = F write electronic localiz. function (ELF)
# Dipole corrections
LMONO = F monopole corrections only (constant potential shift)
LDIPOL = F correct potential (dipole corrections)
IDIPOL = 0 1-x, 2-y, 3-z, 4-all directions
EPSILON= 1.0000000 bulk dielectric constant
# Exchange correlation treatment:
GGA = -- GGA type

View File

@ -1,6 +0,0 @@
K-Points
0
Monkhorst Pack
15 15 15
0 0 0

View File

@ -1,11 +0,0 @@
W unit cell
1.0
3.16 0.00000000 0.00000000
0.00000000 3.16 0.00000000
0.00000000 0.00000000 3.16
W
2
Direct
0.00000000 0.00000000 0.00000000
0.50000000 0.50000000 0.50000000

View File

@ -1,149 +0,0 @@
Sample LAMMPS MD wrapper on VASP quantum DFT via client/server
coupling
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
In this dir, the vasp_wrap.py is a wrapper on the VASP quantum DFT
code so it can work as a "server" code which LAMMPS drives as a
"client" code to perform ab initio MD. LAMMPS performs the MD
timestepping, sends VASP a current set of coordinates each timestep,
VASP computes forces and energy and virial and returns that info to
LAMMPS.
Messages are exchanged between MC and LAMMPS via a client/server
library (CSlib), which is included in the LAMMPS distribution in
lib/message. As explained below you can choose to exchange data
between the two programs either via files or sockets (ZMQ). If the
vasp_wrap.py program became parallel, or the CSlib library calls were
integrated into VASP directly, then data could also be exchanged via
MPI.
----------------
Build LAMMPS with its MESSAGE package installed:
See the Build extras doc page and its MESSAGE package
section for details.
CMake:
-D PKG_MESSAGE=yes # include the MESSAGE package
-D MESSAGE_ZMQ=value # build with ZeroMQ support, value = no (default) or yes
Traditional make:
cd lammps/lib/message
python Install.py -m -z # build CSlib with MPI and ZMQ support
cd lammps/src
make yes-message
make mpi
You can leave off the -z if you do not have ZMQ on your system.
----------------
Build the CSlib in a form usable by the vasp_wrapper.py script:
% cd lammps/lib/message/cslib/src
% make shlib # build serial and parallel shared lib with ZMQ support
% make shlib zmq=no # build serial and parallel shared lib w/out ZMQ support
This will make a shared library versions of the CSlib, which Python
requires. Python must be able to find both the cslib.py script and
the libcsnompi.so library in your lammps/lib/message/cslib/src
directory. If it is not able to do this, you will get an error when
you run vasp_wrapper.py.
You can do this by augmenting two environment variables, either
from the command line, or in your shell start-up script.
Here is the sample syntax for the csh or tcsh shells:
setenv PYTHONPATH ${PYTHONPATH}:/home/sjplimp/lammps/lib/message/cslib/src
setenv LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:/home/sjplimp/lammps/lib/message/cslib/src
----------------
Prepare to use VASP and the vasp_wrapper.py script
You can run the vasp_wrap.py script as-is to test that the coupling
between it and LAMMPS is functional. This will use the included
vasprun.xml file output by a previous VASP run.
But note that the as-is version of vasp_wrap.py will not attempt to
run VASP.
To do this, you must edit the 1st vaspcmd line at the top of
vasp_wrapper.py to be the launch command needed to run VASP on your
system. It can be a command to run VASP in serial or in parallel,
e.g. an mpirun command. Then comment out the 2nd vaspcmd line
immediately following it.
Insure you have the necessary VASP input files in this
directory, suitable for the VASP calculation you want to perform:
INCAR
KPOINTS
POSCAR_template
POTCAR
Examples of all but the POTCAR file are provided. As explained below,
POSCAR_W is an input file for a 2-atom unit cell of tungsten and can
be used to test the LAMMPS/VASP coupling. The POTCAR file is a
proprietary VASP file, so use one from your VASP installation.
Note that the POSCAR_template file should be matched to the LAMMPS
input script (# of atoms and atom types, box size, etc). The provided
POSCAR_W matches in.client.W.
Once you run VASP yourself, the vasprun.xml file will be overwritten.
----------------
To run in client/server mode:
NOTE: The vasp_wrap.py script must be run with Python version 2, not
3. This is because it used the CSlib python wrapper, which only
supports version 2. We plan to upgrade CSlib to support Python 3.
Both the client (LAMMPS) and server (vasp_wrap.py) must use the same
messaging mode, namely file or zmq. This is an argument to the
vasp_wrap.py code; it can be selected by setting the "mode" variable
when you run LAMMPS. The default mode = file.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means either of
the messaging modes can be used and LAMMPS can be run in serial or
parallel. The vasp_wrap.py code is always run in serial, but it
launches VASP from Python via an mpirun command which can run VASP
itself in parallel.
When you run, the server should print out thermodynamic info every
timestep which corresponds to the forces and virial computed by VASP.
VASP will also generate output files each timestep. The vasp_wrapper.py
script could be generalized to archive these.
The examples below are commands you should use in two different
terminal windows. The order of the two commands (client or server
launch) does not matter. You can run them both in the same window if
you append a "&" character to the first one to run it in the
background.
--------------
File mode of messaging:
% mpirun -np 1 lmp_mpi -v mode file -in in.client.W
% python vasp_wrap.py file POSCAR_W
% mpirun -np 2 lmp_mpi -v mode file -in in.client.W
% python vasp_wrap.py file POSCAR_W
ZMQ mode of messaging:
% mpirun -np 1 lmp_mpi -v mode zmq -in in.client.W
% python vasp_wrap.py zmq POSCAR_W
% mpirun -np 2 lmp_mpi -v mode zmq -in in.client.W
% python vasp_wrap.py zmq POSCAR_W

View File

@ -1,15 +0,0 @@
LAMMPS W data file
2 atoms
1 atom types
0.0 3.16 xlo xhi
0.0 3.16 ylo yhi
0.0 3.16 zlo zhi
Atoms
1 1 0.000 0.000 0.000
2 1 1.58 1.58 1.58

View File

@ -1,35 +0,0 @@
# small W unit cell for use with VASP
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
mass 1 183.85
replicate $x $y $z
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
message quit

View File

@ -1,76 +0,0 @@
LAMMPS (22 Aug 2018)
# small W unit cell for use with VASP
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555"
message client md zmq localhost:5555
variable x index 1
variable y index 1
variable z index 1
units metal
atom_style atomic
atom_modify sort 0 0.0 map yes
read_data data.W
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 2 MPI processor grid
reading atoms ...
2 atoms
mass 1 183.85
replicate $x $y $z
replicate 1 $y $z
replicate 1 1 $z
replicate 1 1 1
orthogonal box = (0 0 0) to (3.16 3.16 3.16)
1 by 1 by 2 MPI processor grid
2 atoms
Time spent = 0.000148058 secs
velocity all create 300.0 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 10 check no
fix 1 all nve
fix 2 all client/md
fix_modify 2 energy yes
thermo 1
run 3
Per MPI rank memory allocation (min/avg/max) = 1.8 | 1.8 | 1.8 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 0 0 -48.030793 -78159.503
1 298.24318 0 0 -48.03102 -78167.19
2 296.85584 0 0 -48.031199 -78173.26
3 295.83795 0 0 -48.031331 -78177.714
Loop time of 0.457491 on 2 procs for 3 steps with 2 atoms
Performance: 0.567 ns/day, 42.360 hours/ns, 6.558 timesteps/s
50.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 1.3828e-05 | 2.9922e-05 | 4.6015e-05 | 0.0 | 0.01
Output | 7.5817e-05 | 9.3937e-05 | 0.00011206 | 0.0 | 0.02
Modify | 0.45735 | 0.45736 | 0.45736 | 0.0 | 99.97
Other | | 1.204e-05 | | | 0.00
Nlocal: 1 ave 1 max 1 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 4 ave 4 max 4 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds not checked
Total wall time: 0:01:21

View File

@ -1,300 +0,0 @@
#!/usr/bin/env python
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# https://www.lammps.org/ Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
# ----------------------------------------------------------------------
# Syntax: vasp_wrap.py file/zmq POSCARfile
# wrapper on VASP to act as server program using CSlib
# receives message with list of coords from client
# creates VASP inputs
# invokes VASP to calculate self-consistent energy of that config
# reads VASP outputs
# sends message with energy, forces, pressure to client
# NOTES:
# check to insure basic VASP input files are in place?
# could archive VASP input/output in special filenames or dirs?
# need to check that POTCAR file is consistent with atom ordering?
# could make syntax for launching VASP more flexible
# e.g. command-line arg for # of procs
# detect if VASP had an error and return ERROR field, e.g. non-convergence ??
from __future__ import print_function
import sys
version = sys.version_info[0]
if version == 3:
sys.exit("The CSlib python wrapper does not yet support python 3")
import subprocess
import xml.etree.ElementTree as ET
from cslib import CSlib
# comment out 2nd line once 1st line is correct for your system
vaspcmd = "srun -N 1 --ntasks-per-node=4 " + \
"-n 4 /projects/vasp/2017-build/cts1/vasp5.4.4/vasp_tfermi/bin/vasp_std"
vaspcmd = "touch tmp"
# enums matching FixClientMD class in LAMMPS
SETUP,STEP = range(1,2+1)
DIM,PERIODICITY,ORIGIN,BOX,NATOMS,NTYPES,TYPES,COORDS,UNITS,CHARGE = range(1,10+1)
FORCES,ENERGY,VIRIAL,ERROR = range(1,4+1)
# -------------------------------------
# functions
# error message and exit
def error(txt):
print("ERROR:",txt)
sys.exit(1)
# -------------------------------------
# read initial VASP POSCAR file to setup problem
# return natoms,ntypes,box
def vasp_setup(poscar):
ps = open(poscar,'r').readlines()
# box size
words = ps[2].split()
xbox = float(words[0])
words = ps[3].split()
ybox = float(words[1])
words = ps[4].split()
zbox = float(words[2])
box = [xbox,ybox,zbox]
ntypes = 0
natoms = 0
words = ps[6].split()
for word in words:
if word == '#': break
ntypes += 1
natoms += int(word)
return natoms,ntypes,box
# -------------------------------------
# write a new POSCAR file for VASP
def poscar_write(poscar,natoms,ntypes,types,coords,box):
psold = open(poscar,'r').readlines()
psnew = open("POSCAR",'w')
# header, including box size
psnew.write(psold[0])
psnew.write(psold[1])
psnew.write("%g %g %g\n" % (box[0],box[1],box[2]))
psnew.write("%g %g %g\n" % (box[3],box[4],box[5]))
psnew.write("%g %g %g\n" % (box[6],box[7],box[8]))
psnew.write(psold[5])
psnew.write(psold[6])
# per-atom coords
# grouped by types
psnew.write("Cartesian\n")
for itype in range(1,ntypes+1):
for i in range(natoms):
if types[i] != itype: continue
x = coords[3*i+0]
y = coords[3*i+1]
z = coords[3*i+2]
aline = " %g %g %g\n" % (x,y,z)
psnew.write(aline)
psnew.close()
# -------------------------------------
# read a VASP output vasprun.xml file
# uses ElementTree module
# see https://docs.python.org/2/library/xml.etree.elementtree.html
def vasprun_read():
tree = ET.parse('vasprun.xml')
root = tree.getroot()
#fp = open("vasprun.xml","r")
#root = ET.parse(fp)
scsteps = root.findall('calculation/scstep')
energy = scsteps[-1].find('energy')
for child in energy:
if child.attrib["name"] == "e_0_energy":
eout = float(child.text)
fout = []
sout = []
varrays = root.findall('calculation/varray')
for varray in varrays:
if varray.attrib["name"] == "forces":
forces = varray.findall("v")
for line in forces:
fxyz = line.text.split()
fxyz = [float(value) for value in fxyz]
fout += fxyz
if varray.attrib["name"] == "stress":
tensor = varray.findall("v")
stensor = []
for line in tensor:
sxyz = line.text.split()
sxyz = [float(value) for value in sxyz]
stensor.append(sxyz)
sxx = stensor[0][0]
syy = stensor[1][1]
szz = stensor[2][2]
# symmetrize off-diagonal components
sxy = 0.5 * (stensor[0][1] + stensor[1][0])
sxz = 0.5 * (stensor[0][2] + stensor[2][0])
syz = 0.5 * (stensor[1][2] + stensor[2][1])
sout = [sxx,syy,szz,sxy,sxz,syz]
#fp.close()
return eout,fout,sout
# -------------------------------------
# main program
# command-line args
if len(sys.argv) != 3:
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
sys.exit(1)
mode = sys.argv[1]
poscar_template = sys.argv[2]
if mode == "file": cs = CSlib(1,mode,"tmp.couple",None)
elif mode == "zmq": cs = CSlib(1,mode,"*:5555",None)
else:
print("Syntax: python vasp_wrap.py file/zmq POSCARfile")
sys.exit(1)
natoms,ntypes,box = vasp_setup(poscar_template)
# initial message for MD protocol
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID != 0: error("Bad initial client/server handshake")
protocol = cs.unpack_string(1)
if protocol != "md": error("Mismatch in client/server protocol")
cs.send(0,0)
# endless server loop
while 1:
# recv message from client
# msgID = 0 = all-done message
msgID,nfield,fieldID,fieldtype,fieldlen = cs.recv()
if msgID < 0: break
# SETUP receive at beginning of each run
# required fields: DIM, PERIODICTY, ORIGIN, BOX,
# NATOMS, NTYPES, TYPES, COORDS
# optional fields: others in enum above, but VASP ignores them
if msgID == SETUP:
origin = []
box = []
natoms_recv = ntypes_recv = 0
types = []
coords = []
for field in fieldID:
if field == DIM:
dim = cs.unpack_int(DIM)
if dim != 3: error("VASP only performs 3d simulations")
elif field == PERIODICITY:
periodicity = cs.unpack(PERIODICITY,1)
if not periodicity[0] or not periodicity[1] or not periodicity[2]:
error("VASP wrapper only currently supports fully periodic systems")
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box = cs.unpack(BOX,1)
elif field == NATOMS:
natoms_recv = cs.unpack_int(NATOMS)
if natoms != natoms_recv:
error("VASP wrapper mis-match in number of atoms")
elif field == NTYPES:
ntypes_recv = cs.unpack_int(NTYPES)
if ntypes != ntypes_recv:
error("VASP wrapper mis-match in number of atom types")
elif field == TYPES:
types = cs.unpack(TYPES,1)
elif field == COORDS:
coords = cs.unpack(COORDS,1)
if not origin or not box or not natoms or not ntypes or \
not types or not coords:
error("Required VASP wrapper setup field not received");
# STEP receive at each timestep of run or minimization
# required fields: COORDS
# optional fields: ORIGIN, BOX
elif msgID == STEP:
coords = []
for field in fieldID:
if field == COORDS:
coords = cs.unpack(COORDS,1)
elif field == ORIGIN:
origin = cs.unpack(ORIGIN,1)
elif field == BOX:
box = cs.unpack(BOX,1)
if not coords: error("Required VASP wrapper step field not received");
else: error("VASP wrapper received unrecognized message")
# create POSCAR file
poscar_write(poscar_template,natoms,ntypes,types,coords,box)
# invoke VASP
print("\nLaunching VASP ...")
print(vaspcmd)
subprocess.check_output(vaspcmd,stderr=subprocess.STDOUT,shell=True)
# process VASP output
energy,forces,virial = vasprun_read()
# convert VASP kilobars to bars
for i,value in enumerate(virial): virial[i] *= 1000.0
# return forces, energy, pressure to client
cs.send(msgID,3);
cs.pack(FORCES,4,3*natoms,forces)
cs.pack_double(ENERGY,energy)
cs.pack(VIRIAL,4,6,virial)
# final reply to client
cs.send(0,0)
# clean-up
del cs

File diff suppressed because it is too large Load Diff

View File

@ -86,7 +86,6 @@ mc: MC package models: GCMC, Widom, fix mol/swap
mdi: use of the MDI package and MolSSI MDI code coupling library
meam: MEAM test for SiC and shear (same as shear examples)
melt: rapid melt of 3d LJ system
message: client/server coupling of 2 codes
micelle: self-assembly of small lipid-like molecules into 2d bilayers
min: energy minimization of 2d LJ melt
mliap: examples for using several bundled MLIAP potentials

View File

@ -1,116 +0,0 @@
This dir contains scripts that demonstrate how to use LAMMPS as both a
client and server code to run a simple MD simulation. LAMMPS as a
client performs the MD timestepping. LAMMPS as a server provides the
energy and forces between interacting particles. Every timestep the
LAMMPS client sends a message to the LAMMPS server and receives a
response message in return.
Another code could replace LAMMPS as the client, e.g. another MD code
which wants to use a LAMMPS potential. Another code could replace
LAMMPS as the server, e.g. a quantum code computing quantum forces, so
that ab initio MD could be performed. See an example of the latter in
examples/COUPLE/lammps_vasp.
See the MESSAGE package documentation Build_extras.html#message
and Build_extras.html#message for more details on how client/server
coupling works in LAMMPS.
--------------
Note that you can adjust the problem size run by these scripts by
setting "x,y,z" variables when you run LAMMPS. The default problem size
is x = y = z = 5, which is 500 particles.
lmp_mpi -v x 10 -v y 10 -v z 20 # 8000 particles
This applies to either in.message or in.message.client
You can also run the in.message scripts with an NPT integrator
instead of NVE, if you comment/uncomment the correct lines.
The client and server script define a "mode" variable
which can be set to file, zmq, mpi/one, or mpi/two,
as illustrated below.
--------------
To run this problem in the traditional way (no client/server coupling)
do one of these:
% lmp_serial -in in.message
% mpirun -np 4 lmp_mpi -in in.message
Or run with in.message.tilt.
--------------
To run in client/server mode:
Both the client and server script must use the same messaging mode.
This can be selected by setting the "mode" variable when you run
LAMMPS. The default mode = file. The other options for the mode
variable are zmq, mpione, mpitwo.
Here we assume LAMMPS was built to run in parallel, and the MESSAGE
package was installed with socket (ZMQ) support. This means any of
the 4 messaging modes can be used.
The next sections illustrate how to launch LAMMPS twice, once as a
client, once as a server, for each of the messaging modes.
In all cases, the client should print out thermodynamic info for 50
steps. The server should print out setup info, print nothing until
the client exits, at which point the server should also exit.
The examples below show launching LAMMPS twice from the same window
(or batch script), using the "&" character to launch the first time in
the background. For all modes except {mpi/one}, you could also launch
twice in separate windows on your desktop machine. It does not matter
whether you launch the client or server first.
In these examples either the client or server can be run on one or
more processors. If running in a non-MPI mode (file or zmq) you can
launch LAMMPS on a single processor without using mpirun.
IMPORTANT: If you run in mpi/two mode, you must launch LAMMPS both
times via mpirun, even if one or both of them runs on a single
processor. This is so that MPI can figure out how to connect both MPI
processes together to exchange MPI messages between them.
--------------
NOTE: the Script.sh file has comands to perform all the
runs listed below.
--------------
File or ZMQ or mpi/two modes of messaging:
% mpirun -np 1 lmp_mpi -v mode file -log log.client -in in.message.client &
% mpirun -np 2 lmp_mpi -v mode file -log log.server -in in.message.server
% mpirun -np 4 lmp_mpi -v mode zmq -log log.client -in in.message.client &
% mpirun -np 1 lmp_mpi -v mode zmq -log log.server -in in.message.server
% mpirun -np 2 lmp_mpi -v mode mpitwo -log log.client -in in.message.client &
% mpirun -np 4 lmp_mpi -v mode mpitwo -log log.server -in in.message.server
Or run with in.message.tilt.client/server.
Don't run the tilt files with the "file" mode; they run too slow.
--------------
Mpi/one mode of messaging:
Launch LAMMPS twice in a single mpirun command:
% mpirun -np 2 lmp_mpi -mpicolor 0 -in in.message.client -v mode mpione -log log.client : -np 4 lmp_mpi -mpicolor 1 -in in.message.server -v mode mpione -log log.server
Or run with in.message.tilt.client/server.
The two -np values determine how many procs the client and the server
run on.
A LAMMPS executable run in this manner must use the -mpicolor color
command-line option as its first option, where color is set to one
integer value for the 1st app, and another value for the 2nd app.

View File

@ -1,55 +0,0 @@
# sample launch script
# message on 1 proc each
mpirun -np 1 lmp_mpi -log log.message.g++.1 < in.message
mpirun -np 1 lmp_mpi -v mode file -log log.message.client.file.g++.1 < in.message.client &
mpirun -np 1 lmp_mpi -v mode file -log log.message.server.file.g++.1 < in.message.server
mpirun -np 1 lmp_mpi -v mode zmq -log log.message.client.zmq.g++.1 < in.message.client &
mpirun -np 1 lmp_mpi -v mode zmq -log log.message.server.zmq.g++.1 < in.message.server
mpirun -np 1 lmp_mpi -v mode mpitwo -log log.message.client.mpitwo.g++.1 < in.message.client &
mpirun -np 1 lmp_mpi -v mode mpitwo -log log.message.server.mpitwo.g++.1 < in.message.server
mpirun -np 1 lmp_mpi -m 0 -in in.message.client -v mode mpione -log log.message.client.mpione.g++.1 : -np 1 lmp_mpi -m 1 -in in.message.server -v mode mpione -log log.message.server.mpione.g++.1
# message on 2/4 procs each
mpirun -np 4 lmp_mpi -log log.message.g++.4 < in.message
mpirun -np 2 lmp_mpi -v mode file -log log.message.client.file.g++.2 < in.message.client &
mpirun -np 4 lmp_mpi -v mode file -log log.message.server.file.g++.4 < in.message.server
mpirun -np 2 lmp_mpi -v mode zmq -log log.message.client.zmq.g++.2 < in.message.client &
mpirun -np 4 lmp_mpi -v mode zmq -log log.message.server.zmq.g++.4 < in.message.server
mpirun -np 2 lmp_mpi -v mode mpitwo -log log.message.client.mpitwo.g++.2 < in.message.client &
mpirun -np 4 lmp_mpi -v mode mpitwo -log log.message.server.mpitwo.g++.4 < in.message.server
mpirun -np 2 lmp_mpi -m 0 -in in.message.client -v mode mpione -log log.message.client.mpione.g++.2 : -np 4 lmp_mpi -m 1 -in in.message.server -v mode mpione -log log.message.server.mpione.g++.4
# message.tilt on 1 proc each
mpirun -np 1 lmp_mpi -log log.message.tilt.g++.1 < in.message.tilt
mpirun -np 1 lmp_mpi -v mode zmq -log log.message.tilt.client.zmq.g++.1 < in.message.tilt.client &
mpirun -np 1 lmp_mpi -v mode zmq -log log.message.tilt.server.zmq.g++.1 < in.message.tilt.server
mpirun -np 1 lmp_mpi -v mode mpitwo -log log.message.tilt.client.mpitwo.g++.1 < in.message.tilt.client &
mpirun -np 1 lmp_mpi -v mode mpitwo -log log.message.tilt.server.mpitwo.g++.1 < in.message.tilt.server
mpirun -np 1 lmp_mpi -m 0 -in in.message.tilt.client -v mode mpione -log log.message.tilt.client.mpione.g++.1 : -np 1 lmp_mpi -m 1 -in in.message.tilt.server -v mode mpione -log log.message.tilt.server.mpione.g++.1
# message.tilt on 2/4 procs each
mpirun -np 1 lmp_mpi -log log.message.tilt.g++.4 < in.message.tilt
mpirun -np 2 lmp_mpi -v mode zmq -log log.message.tilt.client.zmq.g++.2 < in.message.tilt.client &
mpirun -np 4 lmp_mpi -v mode zmq -log log.message.tilt.server.zmq.g++.4 < in.message.tilt.server
mpirun -np 2 lmp_mpi -v mode mpitwo -log log.message.tilt.client.mpitwo.g++.2 < in.message.tilt.client &
mpirun -np 4 lmp_mpi -v mode mpitwo -log log.message.tilt.server.mpitwo.g++.4 < in.message.tilt.server
mpirun -np 2 lmp_mpi -m 0 -in in.message.tilt.client -v mode mpione -log log.message.tilt.client.mpione.g++.2 : -np 4 lmp_mpi -m 1 -in in.message.tilt.server -v mode mpione -log log.message.tilt.server.mpione.g++.4

View File

@ -1,29 +0,0 @@
# 3d Lennard-Jones melt - no client/server mode
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
thermo 10
run 50

View File

@ -1,43 +0,0 @@
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
elif "${mode} == mpione" &
"message client md mpi/one" &
elif "${mode} == mpitwo" &
"message client md mpi/two tmp.couple"
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
message quit

View File

@ -1,29 +0,0 @@
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then &
"message server md file tmp.couple" &
elif "${mode} == zmq" &
"message server md zmq *:5555" &
elif "${mode} == mpione" &
"message server md mpi/one" &
elif "${mode} == mpitwo" &
"message server md mpi/two tmp.couple"
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
region box block 0 1 0 1 0 1
create_box 1 box
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md

View File

@ -1,30 +0,0 @@
# 2d NEMD simulation - no client/server mode
units lj
atom_style atomic
dimension 2
lattice sq2 0.8442
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
create_atoms 1 box
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000

View File

@ -1,44 +0,0 @@
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then &
"message client md file tmp.couple" &
elif "${mode} == zmq" &
"message client md zmq localhost:5555" &
elif "${mode} == mpione" &
"message client md mpi/one" &
elif "${mode} == mpitwo" &
"message client md mpi/two tmp.couple"
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
create_atoms 1 box
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
message quit

View File

@ -1,31 +0,0 @@
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then &
"message server md file tmp.couple" &
elif "${mode} == zmq" &
"message server md zmq *:5555" &
elif "${mode} == mpione" &
"message server md mpi/one" &
elif "${mode} == mpitwo" &
"message server md mpi/two tmp.couple"
units lj
atom_style atomic
dimension 2
atom_modify map yes
lattice sq2 0.8442
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
create_atoms 1 box
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.0
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md

View File

@ -1,79 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000752926 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.303 | 2.303 | 2.303 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 5.0251 on 1 procs for 50 steps with 500 atoms
Performance: 4298.421 tau/day, 9.950 timesteps/s
0.1% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 9.7752e-06 | 9.7752e-06 | 9.7752e-06 | 0.0 | 0.00
Comm | 0.00014925 | 0.00014925 | 0.00014925 | 0.0 | 0.00
Output | 0.00023127 | 0.00023127 | 0.00023127 | 0.0 | 0.00
Modify | 5.0242 | 5.0242 | 5.0242 | 0.0 | 99.98
Other | | 0.0004668 | | | 0.01
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:05

View File

@ -1,79 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md file tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000613928 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.302 | 2.302 | 2.302 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 5.02384 on 2 procs for 50 steps with 500 atoms
Performance: 4299.499 tau/day, 9.953 timesteps/s
50.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 1.9073e-06 | 3.3379e-06 | 4.7684e-06 | 0.0 | 0.00
Comm | 0.00020742 | 0.00021136 | 0.00021529 | 0.0 | 0.00
Output | 0.00026989 | 0.00048053 | 0.00069118 | 0.0 | 0.01
Modify | 5.0171 | 5.0199 | 5.0228 | 0.1 | 99.92
Other | | 0.003203 | | | 0.06
Nlocal: 250 ave 255 max 245 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:05

View File

@ -1,79 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/one
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000540018 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.303 | 2.303 | 2.303 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0403891 on 1 procs for 50 steps with 500 atoms
Performance: 534798.272 tau/day, 1237.959 timesteps/s
99.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 4.7684e-06 | 4.7684e-06 | 4.7684e-06 | 0.0 | 0.01
Comm | 6.3181e-05 | 6.3181e-05 | 6.3181e-05 | 0.0 | 0.16
Output | 9.5367e-05 | 9.5367e-05 | 9.5367e-05 | 0.0 | 0.24
Modify | 0.040053 | 0.040053 | 0.040053 | 0.0 | 99.17
Other | | 0.0001726 | | | 0.43
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,79 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/one
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000475883 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.302 | 2.302 | 2.302 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0208495 on 2 procs for 50 steps with 500 atoms
Performance: 1035997.740 tau/day, 2398.143 timesteps/s
99.1% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 3.0994e-06 | 4.53e-06 | 5.9605e-06 | 0.0 | 0.02
Comm | 0.00012422 | 0.00012457 | 0.00012493 | 0.0 | 0.60
Output | 5.7697e-05 | 7.987e-05 | 0.00010204 | 0.0 | 0.38
Modify | 0.020463 | 0.020464 | 0.020466 | 0.0 | 98.15
Other | | 0.0001761 | | | 0.84
Nlocal: 250 ave 255 max 245 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,79 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/two tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000603914 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.303 | 2.303 | 2.303 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.069119 on 1 procs for 50 steps with 500 atoms
Performance: 312504.627 tau/day, 723.390 timesteps/s
42.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 7.1526e-06 | 7.1526e-06 | 7.1526e-06 | 0.0 | 0.01
Comm | 0.0001049 | 0.0001049 | 0.0001049 | 0.0 | 0.15
Output | 0.00014019 | 0.00014019 | 0.00014019 | 0.0 | 0.20
Modify | 0.068602 | 0.068602 | 0.068602 | 0.0 | 99.25
Other | | 0.0002651 | | | 0.38
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,79 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/two tmp.couple
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000667095 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.302 | 2.302 | 2.302 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0190214 on 2 procs for 50 steps with 500 atoms
Performance: 1135563.588 tau/day, 2628.619 timesteps/s
58.5% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 1.9073e-06 | 2.861e-06 | 3.8147e-06 | 0.0 | 0.02
Comm | 0.00017238 | 0.00017989 | 0.0001874 | 0.0 | 0.95
Output | 0.00012803 | 0.00015497 | 0.00018191 | 0.0 | 0.81
Modify | 0.018065 | 0.018181 | 0.018297 | 0.1 | 95.58
Other | | 0.0005029 | | | 2.64
Nlocal: 250 ave 255 max 245 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -1,79 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md zmq localhost:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000734091 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.303 | 2.303 | 2.303 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0778341 on 1 procs for 50 steps with 500 atoms
Performance: 277513.222 tau/day, 642.392 timesteps/s
11.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 7.8678e-06 | 7.8678e-06 | 7.8678e-06 | 0.0 | 0.01
Comm | 8.3685e-05 | 8.3685e-05 | 8.3685e-05 | 0.0 | 0.11
Output | 0.00011373 | 0.00011373 | 0.00011373 | 0.0 | 0.15
Modify | 0.07734 | 0.07734 | 0.07734 | 0.0 | 99.37
Other | | 0.0002885 | | | 0.37
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,79 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md zmq localhost:5555
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
atom_modify sort 0 0.0 map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000570059 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
fix 2 all client/md
fix_modify 2 energy yes
thermo 10
run 50
Per MPI rank memory allocation (min/avg/max) = 2.302 | 2.302 | 2.302 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 -4.6176881 -5.0221006
10 1.1347688 0 0 -4.6166043 -2.6072847
20 0.628166 0 0 -4.62213 1.0186262
30 0.73767593 0 0 -4.6254647 0.49629637
40 0.69517962 0 0 -4.6253506 0.69303877
50 0.70150496 0 0 -4.6259832 0.59551518
Loop time of 0.0416595 on 2 procs for 50 steps with 500 atoms
Performance: 518489.499 tau/day, 1200.207 timesteps/s
56.5% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 2.861e-06 | 3.3379e-06 | 3.8147e-06 | 0.0 | 0.01
Comm | 0.00013399 | 0.00013685 | 0.00013971 | 0.0 | 0.33
Output | 8.6784e-05 | 0.00011206 | 0.00013733 | 0.0 | 0.27
Modify | 0.040948 | 0.04103 | 0.041112 | 0.0 | 98.49
Other | | 0.0003769 | | | 0.90
Nlocal: 250 ave 255 max 245 min
Histogram: 1 0 0 0 0 0 0 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,85 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - no client/server mode
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000682831 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
thermo 10
run 50
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.143 | 3.143 | 3.143 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
10 1.1347688 -6.3153532 0 -4.6166043 -2.6072847
20 0.628166 -5.5624945 0 -4.62213 1.0186262
30 0.73767593 -5.7297655 0 -4.6254647 0.49629637
40 0.69517962 -5.6660345 0 -4.6253506 0.69303877
50 0.70150496 -5.6761362 0 -4.6259832 0.59551518
Loop time of 0.039681 on 1 procs for 50 steps with 500 atoms
Performance: 544341.699 tau/day, 1260.050 timesteps/s
99.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.029993 | 0.029993 | 0.029993 | 0.0 | 75.59
Neigh | 0.0073051 | 0.0073051 | 0.0073051 | 0.0 | 18.41
Comm | 0.0012736 | 0.0012736 | 0.0012736 | 0.0 | 3.21
Output | 0.00012803 | 0.00012803 | 0.00012803 | 0.0 | 0.32
Modify | 0.00053287 | 0.00053287 | 0.00053287 | 0.0 | 1.34
Other | | 0.000448 | | | 1.13
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1946 ave 1946 max 1946 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18820 ave 18820 max 18820 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18820
Ave neighs/atom = 37.64
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,85 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - no client/server mode
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.39798 8.39798 8.39798)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
Time spent = 0.000656843 secs
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
# same with NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1 1 1.0
thermo 10
run 50
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.109 | 3.109 | 3.109 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6176881 -5.0221006
10 1.1347688 -6.3153532 0 -4.6166043 -2.6072847
20 0.628166 -5.5624945 0 -4.62213 1.0186262
30 0.73767593 -5.7297655 0 -4.6254647 0.49629637
40 0.69517962 -5.6660345 0 -4.6253506 0.69303877
50 0.70150496 -5.6761362 0 -4.6259832 0.59551518
Loop time of 0.0131519 on 4 procs for 50 steps with 500 atoms
Performance: 1642350.242 tau/day, 3801.737 timesteps/s
97.9% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.006074 | 0.0065379 | 0.0072589 | 0.6 | 49.71
Neigh | 0.0014219 | 0.0015552 | 0.0017018 | 0.3 | 11.82
Comm | 0.003546 | 0.0043943 | 0.0049584 | 0.8 | 33.41
Output | 0.000108 | 0.00012845 | 0.00016999 | 0.0 | 0.98
Modify | 0.00014353 | 0.00014949 | 0.00015569 | 0.0 | 1.14
Other | | 0.0003865 | | | 2.94
Nlocal: 125 ave 128 max 121 min
Histogram: 1 0 0 0 1 0 0 0 1 1
Nghost: 1091 ave 1094 max 1089 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Neighs: 4705 ave 4849 max 4648 min
Histogram: 2 1 0 0 0 0 0 0 0 1
Total # of neighbors = 18820
Ave neighs/atom = 37.64
Neighbor list builds = 4
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,44 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md file tmp.couple
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 1 by 1 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:05

View File

@ -1,44 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md file tmp.couple
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 2 by 2 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 2 by 2 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:05

View File

@ -1,44 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/one
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 1 by 1 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -1,44 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/one
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 2 by 2 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 2 by 2 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -1,44 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/two tmp.couple
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 1 by 1 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -1,44 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md mpi/two tmp.couple
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 2 by 2 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 2 by 2 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -1,44 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md zmq *:5555
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 1 by 1 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 1 by 1 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -1,44 +0,0 @@
LAMMPS (22 Aug 2018)
# 3d Lennard-Jones melt - server script
variable mode index file
if "${mode} == file" then "message server md file tmp.couple" elif "${mode} == zmq" "message server md zmq *:5555" elif "${mode} == mpione" "message server md mpi/one" elif "${mode} == mpitwo" "message server md mpi/two tmp.couple"
message server md zmq *:5555
units lj
atom_style atomic
atom_modify map yes
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6796 1.6796 1.6796)
1 by 2 by 2 MPI processor grid
mass * 1.0 # masses not used by server
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
server md
1 by 2 by 2 MPI processor grid
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Server MD calls = 51
Server MD reneighborings 5
Total wall time: 0:00:00

View File

@ -1,130 +0,0 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/one
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000485897 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 0
ghost atom cutoff = 0
binsize = 15.3919, bins = 1 1 1
0 neighbor lists, perpetual/occasional/extra = 0 0 0
Per MPI rank memory allocation (min/avg/max) = 2.308 | 2.308 | 2.308 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.0086399 0 1.3760711 6.8772078 0.61567613
2000 1.0707188 0 1.3744107 6.628097 1.2313523
3000 1.0627515 0 1.310096 6.1647179 1.8470284
4000 0.94091054 0 1.268976 6.4723215 2.4627045
5000 1.0218949 0 1.2430242 5.6945977 3.0783806
6000 0.98649481 0 1.1997565 5.2870413 3.6940568
7000 0.9047957 0 1.1461262 5.9291636 4.3097329
8000 0.85697614 0 1.0437412 5.0652097 4.925409
9000 0.84208329 0 1.109218 6.1749808 5.5410852
10000 0.86418108 0 1.1594773 6.2581867 6.1567613
11000 0.95136356 0 1.1650901 5.8389085 6.7724374
12000 0.94571583 0 1.2210342 6.2498816 7.3881135
13000 0.95994288 0 1.2172042 5.9608165 -7.3881135
14000 0.99053999 0 1.2925597 6.9994446 -6.7724374
15000 1.0316726 0 1.3346023 6.6902672 -6.1567613
16000 0.99537481 0 1.3227696 7.0301123 -5.5410852
17000 1.0306843 0 1.3101457 6.4750102 -4.925409
18000 1.071154 0 1.2947547 5.695888 -4.3097329
19000 0.97120752 0 1.3035465 7.3945362 -3.6940568
20000 0.97198994 0 1.2244663 6.0047605 -3.0783806
21000 0.97943545 0 1.2393394 6.3871012 -2.4627045
22000 0.98550707 0 1.1768148 5.019967 -1.8470284
23000 0.96920052 0 1.1730698 5.7944947 -1.2313523
24000 0.94069959 0 1.184119 5.8434876 -0.61567613
25000 0.91569312 0 1.1642118 5.668997 0
26000 0.98882932 0 1.1999248 5.0115507 0.61567613
27000 0.8972608 0 1.2556546 7.0837158 1.2313523
28000 0.93554756 0 1.2221911 5.9302841 1.8470284
29000 0.97894608 0 1.2168736 5.5766766 2.4627045
30000 0.97877055 0 1.2575839 6.4308887 3.0783806
31000 1.0002387 0 1.2338069 5.3873124 3.6940568
32000 0.89608618 0 1.2382021 6.7892204 4.3097329
33000 0.87439302 0 1.2252635 7.078134 4.925409
34000 1.076102 0 1.2991393 5.5556892 5.5410852
35000 1.0018689 0 1.272105 6.1320483 6.1567613
36000 0.93327214 0 1.2428039 7.0030867 6.7724374
37000 1.0770236 0 1.3002931 5.4996076 7.3881135
38000 0.98715132 0 1.215562 5.5958335 -7.3881135
39000 0.95028417 0 1.2566706 6.4133713 -6.7724374
40000 1.0445585 0 1.241151 5.3589806 -6.1567613
41000 0.93799713 0 1.2109086 6.4957845 -5.5410852
42000 0.99231038 0 1.2228781 5.9363471 -4.925409
43000 0.97913815 0 1.1854842 5.8837987 -4.3097329
44000 0.86748838 0 1.1616201 6.8991278 -3.6940568
45000 0.96284421 0 1.1549383 5.1226785 -3.0783806
46000 0.98701623 0 1.170581 4.9719567 -2.4627045
47000 0.92618683 0 1.2146576 6.7100075 -1.8470284
48000 1.0092593 0 1.2523988 5.7067864 -1.2313523
49000 1.0187472 0 1.271608 5.3355092 -0.61567613
50000 1.0194881 0 1.2831094 6.2449759 0
Loop time of 1.74559 on 1 procs for 50000 steps with 160 atoms
Performance: 12374053.445 tau/day, 28643.642 timesteps/s
100.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0020533 | 0.0020533 | 0.0020533 | 0.0 | 0.12
Comm | 0.015517 | 0.015517 | 0.015517 | 0.0 | 0.89
Output | 0.00052404 | 0.00052404 | 0.00052404 | 0.0 | 0.03
Modify | 1.6784 | 1.6784 | 1.6784 | 0.0 | 96.15
Other | | 0.04905 | | | 2.81
Nlocal: 160 ave 160 max 160 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 5270
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -1,130 +0,0 @@
LAMMPS (22 Aug 2018)
# 2d NEMD simulation - client script
variable mode index file
if "${mode} == file" then "message client md file tmp.couple" elif "${mode} == zmq" "message client md zmq localhost:5555" elif "${mode} == mpione" "message client md mpi/one" elif "${mode} == mpitwo" "message client md mpi/two tmp.couple"
message client md mpi/one
units lj
atom_style atomic
dimension 2
atom_modify sort 0 0.0 map yes
lattice sq2 0.8442
Lattice spacing in x,y,z = 1.53919 1.53919 1.53919
region box prism 0 10 0 8 -0.5 0.5 0 0 0
create_box 2 box
Created triclinic box = (0 0 -0.769595) to (15.3919 12.3135 0.769595) with tilt (0 0 0)
2 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 160 atoms
Time spent = 0.000453949 secs
mass * 1.0
velocity all create 1.44 87287 loop geom
region slice block 4 6 INF INF INF INF
set region slice type 2
40 settings made for type
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nvt/sllod temp 1.0 1.0 1.0 tchain 1
fix 2 all deform 1 xy erate 0.01 remap v
fix 3 all client/md
fix_modify 3 energy yes
thermo_style custom step temp epair etotal press xy
thermo 1000
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 0
ghost atom cutoff = 0
binsize = 15.3919, bins = 1 1 1
0 neighbor lists, perpetual/occasional/extra = 0 0 0
Per MPI rank memory allocation (min/avg/max) = 2.308 | 2.308 | 2.308 Mbytes
Step Temp E_pair TotEng Press Xy
0 1.44 0 1.431 1.2080502 0
1000 1.0876599 0 1.3637783 6.063363 0.61567613
2000 1.0722996 0 1.4108622 7.0518942 1.2313523
3000 1.0580774 0 1.3845895 6.0395275 1.8470284
4000 1.0068006 0 1.3804387 7.0944598 2.4627045
5000 0.95391814 0 1.2578438 6.1878831 3.0783806
6000 0.93492945 0 1.2711287 7.0440517 3.6940568
7000 0.94107853 0 1.2384371 6.1438077 4.3097329
8000 0.89711744 0 1.145748 6.3558305 4.925409
9000 0.90190304 0 1.0860684 4.957098 5.5410852
10000 0.84255749 0 1.0376892 5.2351795 6.1567613
11000 0.90250882 0 1.0497695 4.3844838 6.7724374
12000 0.83461274 0 1.0281949 5.1534361 7.3881135
13000 0.80315331 0 1.0226333 5.757222 -7.3881135
14000 0.81820939 0 0.99276466 4.6943725 -6.7724374
15000 0.8239631 0 1.0408289 5.1669006 -6.1567613
16000 0.88908894 0 1.1554855 6.3510278 -5.5410852
17000 0.98268136 0 1.2086981 5.6302847 -4.925409
18000 1.0098162 0 1.3687676 7.5243587 -4.3097329
19000 1.0795086 0 1.4562691 7.639418 -3.6940568
20000 1.1932155 0 1.5278988 7.0668432 -3.0783806
21000 1.2424296 0 1.6048792 7.959585 -2.4627045
22000 1.297169 0 1.7421262 8.9432388 -1.8470284
23000 1.2863494 0 1.7236774 8.3584973 -1.2313523
24000 1.4084347 0 1.7370339 7.2729078 -0.61567613
25000 1.3339728 0 1.6883255 7.529098 0
26000 1.1483243 0 1.5155578 7.3869994 0.61567613
27000 1.1372606 0 1.4368323 7.0580136 1.2313523
28000 1.0518579 0 1.355311 6.478857 1.8470284
29000 1.0581145 0 1.2535509 5.3697479 2.4627045
30000 0.93612564 0 1.185532 5.5520142 3.0783806
31000 0.94387516 0 1.1716454 5.8655485 3.6940568
32000 0.83953515 0 1.0737746 5.5551953 4.3097329
33000 0.84862926 0 1.0564042 5.7795428 4.925409
34000 0.83621877 0 1.079387 5.7514243 5.5410852
35000 0.86938506 0 1.031783 4.5897825 6.1567613
36000 0.88990609 0 1.0807597 5.3751744 6.7724374
37000 0.89534631 0 1.1238882 5.2400355 7.3881135
38000 0.98545003 0 1.2121125 5.7783854 -7.3881135
39000 0.96737778 0 1.2472934 6.1139 -6.7724374
40000 1.0664194 0 1.2956811 5.7353661 -6.1567613
41000 1.0681494 0 1.3269435 6.3102722 -5.5410852
42000 1.0875422 0 1.3963739 7.1208066 -4.925409
43000 1.0968173 0 1.3388062 6.1704339 -4.3097329
44000 1.1182109 0 1.3773214 7.0872686 -3.6940568
45000 1.1243261 0 1.432186 6.884782 -3.0783806
46000 1.039713 0 1.4389721 7.5585257 -2.4627045
47000 1.0816108 0 1.4100361 6.4611126 -1.8470284
48000 0.97637127 0 1.3605389 7.3992744 -1.2313523
49000 1.0361978 0 1.2721873 5.8166109 -0.61567613
50000 0.92367087 0 1.1875669 6.4685214 0
Loop time of 2.82785 on 2 procs for 50000 steps with 160 atoms
Performance: 7638300.565 tau/day, 17681.251 timesteps/s
100.0% CPU use with 2 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0011888 | 0.0012611 | 0.0013335 | 0.2 | 0.04
Comm | 0.024838 | 0.025075 | 0.025312 | 0.1 | 0.89
Output | 0.0003581 | 0.00052559 | 0.00069308 | 0.0 | 0.02
Modify | 2.7209 | 2.7263 | 2.7318 | 0.3 | 96.41
Other | | 0.07465 | | | 2.64
Nlocal: 80 ave 80 max 80 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 2 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 5257
Dangerous builds = 0
Total wall time: 0:00:02

Some files were not shown because too many files have changed in this diff Show More