Fix merge conflicts

This commit is contained in:
rohskopf
2022-07-07 09:11:28 -06:00
272 changed files with 42151 additions and 3135 deletions

View File

@ -6,6 +6,9 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
"Please remove CMakeCache.txt and CMakeFiles first.")
endif()
set(LAMMPS_THIRDPARTY_URL "https://download.lammps.org/thirdparty"
CACHE STRING "URL for thirdparty package downloads")
# global LAMMPS/plugin build settings
set(LAMMPS_SOURCE_DIR "" CACHE PATH "Location of LAMMPS sources folder")
if(NOT LAMMPS_SOURCE_DIR)
@ -78,6 +81,13 @@ function(get_newest_file path variable)
set(${variable} ${_bestfile} PARENT_SCOPE)
endfunction()
# get LAMMPS version date
function(get_lammps_version version_header variable)
file(STRINGS ${version_header} line REGEX LAMMPS_VERSION)
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\1\\2\\3" date "${line}")
set(${variable} "${date}" PARENT_SCOPE)
endfunction()
#################################################################################
# LAMMPS C++ interface. We only need the header related parts except on windows.
add_library(lammps INTERFACE)
@ -89,6 +99,7 @@ endif()
################################################################################
# MPI configuration
if(NOT CMAKE_CROSSCOMPILING)
set(MPI_CXX_SKIP_MPICXX TRUE)
find_package(MPI QUIET)
option(BUILD_MPI "Build MPI version" ${MPI_FOUND})
else()

View File

@ -47,8 +47,8 @@ if(DOWNLOAD_KOKKOS)
list(APPEND KOKKOS_LIB_BUILD_ARGS "-DCMAKE_CXX_EXTENSIONS=${CMAKE_CXX_EXTENSIONS}")
list(APPEND KOKKOS_LIB_BUILD_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}")
include(ExternalProject)
set(KOKKOS_URL "https://github.com/kokkos/kokkos/archive/3.6.00.tar.gz" CACHE STRING "URL for KOKKOS tarball")
set(KOKKOS_MD5 "b5c44ea961031795f434002cd7b31c20" CACHE STRING "MD5 checksum of KOKKOS tarball")
set(KOKKOS_URL "https://github.com/kokkos/kokkos/archive/3.6.01.tar.gz" CACHE STRING "URL for KOKKOS tarball")
set(KOKKOS_MD5 "0ec97fc0c356dd65bd2487defe81a7bf" CACHE STRING "MD5 checksum of KOKKOS tarball")
mark_as_advanced(KOKKOS_URL)
mark_as_advanced(KOKKOS_MD5)
ExternalProject_Add(kokkos_build
@ -72,7 +72,7 @@ if(DOWNLOAD_KOKKOS)
add_dependencies(LAMMPS::KOKKOSCORE kokkos_build)
add_dependencies(LAMMPS::KOKKOSCONTAINERS kokkos_build)
elseif(EXTERNAL_KOKKOS)
find_package(Kokkos 3.6.00 REQUIRED CONFIG)
find_package(Kokkos 3.6.01 REQUIRED CONFIG)
target_link_libraries(lammps PRIVATE Kokkos::kokkos)
target_link_libraries(lmp PRIVATE Kokkos::kokkos)
else()

View File

@ -8,8 +8,8 @@ option(DOWNLOAD_MDI "Download and compile the MDI library instead of using an al
if(DOWNLOAD_MDI)
message(STATUS "MDI download requested - we will build our own")
set(MDI_URL "https://github.com/MolSSI-MDI/MDI_Library/archive/v1.3.2.tar.gz" CACHE STRING "URL for MDI tarball")
set(MDI_MD5 "836f5da400d8cff0f0e4435640f9454f" CACHE STRING "MD5 checksum for MDI tarball")
set(MDI_URL "https://github.com/MolSSI-MDI/MDI_Library/archive/v1.4.1.tar.gz" CACHE STRING "URL for MDI tarball")
set(MDI_MD5 "f9505fccd4c79301a619f6452dad4ad9" CACHE STRING "MD5 checksum for MDI tarball")
mark_as_advanced(MDI_URL)
mark_as_advanced(MDI_MD5)
enable_language(C)
@ -44,7 +44,7 @@ if(DOWNLOAD_MDI)
ExternalProject_Add(mdi_build
URL ${MDI_URL}
URL_MD5 ${MDI_MD5}
CMAKE_ARGS ${CMAKE_REQUEST_PIC}
CMAKE_ARGS
-DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
@ -54,6 +54,7 @@ if(DOWNLOAD_MDI)
-Dlanguage=C
-Dlibtype=STATIC
-Dmpi=${MDI_USE_MPI}
-Dplugins=ON
-Dpython_plugins=${MDI_USE_PYTHON_PLUGINS}
UPDATE_COMMAND ""
INSTALL_COMMAND ""

View File

@ -44,7 +44,9 @@ if(DOWNLOAD_N2P2)
else()
# get path to MPI include directory
get_target_property(N2P2_MPI_INCLUDE MPI::MPI_CXX INTERFACE_INCLUDE_DIRECTORIES)
set(N2P2_PROJECT_OPTIONS "-I${N2P2_MPI_INCLUDE}")
foreach (_INCL ${N2P2_MPI_INCLUDE})
set(N2P2_PROJECT_OPTIONS "${N2P2_PROJECT_OPTIONS} -I${_INCL}")
endforeach()
endif()
# prefer GNU make, if available. N2P2 lib seems to need it.
@ -75,7 +77,7 @@ if(DOWNLOAD_N2P2)
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
PATCH_COMMAND sed -i -e "s/\\(MPI_\\(P\\|Unp\\)ack(\\)/\\1(void *) /" src/libnnpif/LAMMPS/InterfaceLammps.cpp
BUILD_COMMAND ${N2P2_MAKE} -f makefile libnnpif ${N2P2_BUILD_OPTIONS}
BUILD_COMMAND ${N2P2_MAKE} -C <SOURCE_DIR>/src -f makefile libnnpif ${N2P2_BUILD_OPTIONS}
BUILD_ALWAYS YES
INSTALL_COMMAND ""
BUILD_IN_SOURCE 1

View File

@ -138,6 +138,8 @@ KOKKOS, o = OPENMP, t = OPT.
* :doc:`smd/vol <compute_smd_vol>`
* :doc:`snap <compute_sna_atom>`
* :doc:`sna/atom <compute_sna_atom>`
* :doc:`sna/grid <compute_sna_atom>`
* :doc:`sna/grid/local <compute_sna_atom>`
* :doc:`snad/atom <compute_sna_atom>`
* :doc:`snav/atom <compute_sna_atom>`
* :doc:`sph/e/atom <compute_sph_e_atom>`

View File

@ -103,7 +103,7 @@ OPT.
* :doc:`lb/viscous <fix_lb_viscous>`
* :doc:`lineforce <fix_lineforce>`
* :doc:`manifoldforce <fix_manifoldforce>`
* :doc:`mdi/aimd <fix_mdi_aimd>`
* :doc:`mdi/qm <fix_mdi_qm>`
* :doc:`meso/move <fix_meso_move>`
* :doc:`mol/swap <fix_mol_swap>`
* :doc:`momentum (k) <fix_momentum>`

View File

@ -194,7 +194,7 @@ OPT.
* :doc:`lubricateU/poly <pair_lubricateU>`
* :doc:`mdpd <pair_mesodpd>`
* :doc:`mdpd/rhosum <pair_mesodpd>`
* :doc:`meam <pair_meam>`
* :doc:`meam (k) <pair_meam>`
* :doc:`meam/spline (o) <pair_meam_spline>`
* :doc:`meam/sw/spline <pair_meam_sw_spline>`
* :doc:`mesocnt <pair_mesocnt>`

View File

@ -470,6 +470,12 @@ This will most likely cause errors in kinetic fluctuations.
*More than one compute sna/atom*
Self-explanatory.
*More than one compute sna/grid*
Self-explanatory.
*More than one compute sna/grid/local*
Self-explanatory.
*More than one compute snad/atom*
Self-explanatory.

View File

@ -5,9 +5,9 @@ Client/server coupling of two (or more) codes is where one code is the
"client" and sends request messages (data) to one (or more) "server"
code(s). A server responds to each request with a reply message
(data). This enables two (or more) codes to work in tandem to perform
a simulation. LAMMPS can act as either a client or server code; it
does this by using the `MolSSI Driver Interface (MDI) library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_,
a simulation. In this context, LAMMPS can act as either a client or
server code. It does this by using the `MolSSI Driver Interface (MDI)
library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_,
developed by the `Molecular Sciences Software Institute (MolSSI)
<https://molssi.org>`_, which is supported by the :ref:`MDI <PKG-MDI>`
package.
@ -63,22 +63,39 @@ The package also provides a :doc:`mdi plugin <mdi>` command which
enables LAMMPS to operate as an MDI driver and load an MDI engine as a
plugin library.
The package also has a `fix mdi/aimd <fix_mdi_aimd>` command in which
LAMMPS operates as an MDI driver to perform *ab initio* MD simulations
in conjunction with a quantum mechanics code. Its post_force() method
illustrates how a driver issues MDI commands to another code. This
command can be used to couple to an MDI engine which is either a
stand-alone code or a plugin library.
The package also has a `fix mdi/qm <fix_mdi_qm>` command in which
LAMMPS operates as an MDI driver in conjunction with a quantum
mechanics code as an MDI engine. The post_force() method of the
fix_mdi_qm.cpp file shows how a driver issues MDI commands to another
code. This command can be used to couple to an MDI engine which is
either a stand-alone code or a plugin library.
As explained on the `fix mdi/qm <fix_mdi_qm>` command doc page, it can
be used to perform *ab initio* MD simulations or energy minimizations,
or to evaluate the quantum energy and forces for a series of
independent systems. The examples/mdi directory has example input
scripts for all of these use cases.
----------
The examples/mdi directory contains Python scripts and LAMMPS input
script which use LAMMPS as either an MDI driver or engine or both.
Three example use cases are provided:
Currently, 5 example use cases are provided:
* Run ab initio MD (AIMD) using 2 instances of LAMMPS, one as driver
and one as an engine. As an engine, LAMMPS is a surrogate for a
quantum code.
* Run ab initio MD (AIMD) using 2 instances of LAMMPS. As a driver
LAMMPS performs the timestepping in either NVE or NPT mode. As an
engine, LAMMPS computes forces and is a surrogate for a quantum
code.
* As a driver, LAMMPS runs an MD simulation. Every N steps it passes
the current snapshot to an MDI engine to evaluate the energy,
virial, and peratom forces. As the engine LAMMPS is a surrogate for
a quantum code.
* As a driver, LAMMPS loops over a series of data files and passes the
configuration to an MDI engine to evaluate the energy, virial, and
peratom forces. As the engine LAMMPS is a surrogate for a quantum
code.
* A Python script driver invokes a sequence of unrelated LAMMPS
calculations. Calculations can be single-point energy/force
@ -91,20 +108,22 @@ Three example use cases are provided:
Note that in any of these example where LAMMPS is used as an engine,
an actual QM code (which supports MDI) could be used in its place,
without modifying other code or scripts, except to specify the name of
the QM code.
without modifying the input scripts or launch commands, except to
specify the name of the QM code.
The examples/mdi/README file explains how to launch both driver and
The examples/mdi/Run.sh file illustrates how to launch both driver and
engine codes so that they communicate using the MDI library via either
MPI or sockets.
MPI or sockets. Or using the engine as a stand-alone code or plugin
library.
-------------
Currently there are two quantum DFT codes which have direct MDI
support, `Quantum ESPRESSO (QE) <https://www.quantum-espresso.org/>`_
and `INQ <https://qsg.llnl.gov/node/101.html>`_. There are also
several QM codes which have indirect support through QCEngine or i-PI.
The former means they require a wrapper program (QCEngine) with MDI
Currently there are at least two quantum DFT codes which have direct
MDI support, `Quantum ESPRESSO (QE)
<https://www.quantum-espresso.org/>`_ and `INQ
<https://qsg.llnl.gov/node/101.html>`_. There are also several QM
codes which have indirect support through QCEngine or i-PI. The
former means they require a wrapper program (QCEngine) with MDI
support which writes/read files to pass data to the quantum code
itself. The list of QCEngine-supported and i-PI-supported quantum
codes is on the `MDI webpage

View File

@ -68,7 +68,8 @@ liquid Ar via the GK formalism:
# Sample LAMMPS input script for viscosity of liquid Ar
units real
variable T equal 86.4956
variable T equal 200.0 # run temperature
variable Tinit equal 250.0 # equilibration temperature
variable V equal vol
variable dt equal 4.0
variable p equal 400 # correlation length
@ -99,12 +100,14 @@ liquid Ar via the GK formalism:
# equilibration and thermalization
velocity all create $T 102486 mom yes rot yes dist gaussian
fix NVT all nvt temp $T $T 10 drag 0.2
velocity all create ${Tinit} 102486 mom yes rot yes dist gaussian
fix NVT all nvt temp ${Tinit} ${Tinit} 10 drag 0.2
run 8000
# viscosity calculation, switch to NVE if desired
velocity all create $T 102486 mom yes rot yes dist gaussian
fix NVT all nvt temp $T $T 10 drag 0.2
#unfix NVT
#fix NVE all nve
@ -122,7 +125,7 @@ liquid Ar via the GK formalism:
run 100000
variable v equal (v_v11+v_v22+v_v33)/3.0
variable ndens equal count(all)/vol
print "average viscosity: $v [Pa.s] @ $T K, ${ndens} /A^3"
print "average viscosity: $v [Pa.s] @ $T K, ${ndens} atoms/A^3"
The fifth method is related to the above Green-Kubo method,
but uses the Einstein formulation, analogous to the Einstein
@ -131,9 +134,9 @@ time-integrated momentum fluxes play the role of Cartesian
coordinates, whose mean-square displacement increases linearly
with time at sufficiently long times.
The sixth is periodic perturbation method. It is also a non-equilibrium MD method.
However, instead of measure the momentum flux in response of applied velocity gradient,
it measures the velocity profile in response of applied stress.
The sixth is the periodic perturbation method, which is also a non-equilibrium MD method.
However, instead of measuring the momentum flux in response to an applied velocity gradient,
it measures the velocity profile in response to applied stress.
A cosine-shaped periodic acceleration is added to the system via the
:doc:`fix accelerate/cos <fix_accelerate_cos>` command,
and the :doc:`compute viscosity/cos<compute_viscosity_cos>` command is used to monitor the

View File

@ -1479,7 +1479,7 @@ the :doc:`Build extras <Build_extras>` page.
* lib/mdi/README
* :doc:`Howto MDI <Howto_mdi>`
* :doc:`mdi <mdi>`
* :doc:`fix mdi/aimd <fix_mdi_aimd>`
* :doc:`fix mdi/qm <fix_mdi_qm>`
* examples/PACKAGES/mdi
----------
@ -1801,6 +1801,8 @@ computes which analyze attributes of the potential.
* src/ML-SNAP: filenames -> commands
* :doc:`pair_style snap <pair_snap>`
* :doc:`compute sna/atom <compute_sna_atom>`
* :doc:`compute sna/grid <compute_sna_atom>`
* :doc:`compute sna/grid/local <compute_sna_atom>`
* :doc:`compute snad/atom <compute_sna_atom>`
* :doc:`compute snav/atom <compute_sna_atom>`
* examples/snap

View File

@ -284,6 +284,8 @@ The individual style names on the :doc:`Commands compute <Commands_compute>` pag
* :doc:`smd/vol <compute_smd_vol>` - per-particle volumes and their sum in Smooth Mach Dynamics
* :doc:`snap <compute_sna_atom>` - gradients of SNAP energy and forces w.r.t. linear coefficients and related quantities for fitting SNAP potentials
* :doc:`sna/atom <compute_sna_atom>` - bispectrum components for each atom
* :doc:`sna/grid <compute_sna_atom>` - global array of bispectrum components on a regular grid
* :doc:`sna/grid/local <compute_sna_atom>` - local array of bispectrum components on a regular grid
* :doc:`snad/atom <compute_sna_atom>` - derivative of bispectrum components for each atom
* :doc:`snav/atom <compute_sna_atom>` - virial contribution from bispectrum components for each atom
* :doc:`sph/e/atom <compute_sph_e_atom>` - per-atom internal energy of Smooth-Particle Hydrodynamics atoms

View File

@ -2,6 +2,8 @@
.. index:: compute snad/atom
.. index:: compute snav/atom
.. index:: compute snap
.. index:: compute sna/grid
.. index:: compute sna/grid/local
compute sna/atom command
========================
@ -15,6 +17,12 @@ compute snav/atom command
compute snap command
====================
compute sna/grid command
========================
compute sna/grid/local command
==============================
Syntax
""""""
@ -24,6 +32,9 @@ Syntax
compute ID group-ID snad/atom rcutfac rfac0 twojmax R_1 R_2 ... w_1 w_2 ... keyword values ...
compute ID group-ID snav/atom rcutfac rfac0 twojmax R_1 R_2 ... w_1 w_2 ... keyword values ...
compute ID group-ID snap rcutfac rfac0 twojmax R_1 R_2 ... w_1 w_2 ... keyword values ...
compute ID group-ID snap rcutfac rfac0 twojmax R_1 R_2 ... w_1 w_2 ... keyword values ...
compute ID group-ID sna/grid nx ny nz rcutfac rfac0 twojmax R_1 R_2 ... w_1 w_2 ... keyword values ...
compute ID group-ID sna/grid/local nx ny nz rcutfac rfac0 twojmax R_1 R_2 ... w_1 w_2 ... keyword values ...
* ID, group-ID are documented in :doc:`compute <compute>` command
* sna/atom = style name of this compute command
@ -32,6 +43,7 @@ Syntax
* twojmax = band limit for bispectrum components (non-negative integer)
* R_1, R_2,... = list of cutoff radii, one for each type (distance units)
* w_1, w_2,... = list of neighbor weights, one for each type
* nx, ny, nz = number of grid points in x, y, and z directions (positive integer)
* zero or more keyword/value pairs may be appended
* keyword = *rmin0* or *switchflag* or *bzeroflag* or *quadraticflag* or *chem* or *bnormflag* or *wselfallflag* or *bikflag* or *switchinnerflag* or *sinner* or *dinner* or *dgradflag*
@ -81,6 +93,7 @@ Examples
compute snap all snap 1.4 0.95 6 2.0 1.0
compute snap all snap 1.0 0.99363 6 3.81 3.83 1.0 0.93 chem 2 0 1
compute snap all snap 1.0 0.99363 6 3.81 3.83 1.0 0.93 switchinnerflag 1 sinner 1.35 1.6 dinner 0.25 0.3
compute bgrid all sna/grid/local 200 200 200 1.4 0.95 6 2.0 1.0
Description
"""""""""""
@ -215,6 +228,46 @@ command:
See section below on output for a detailed explanation of the data
layout in the global array.
The compute *sna/grid* and *sna/grid/local* commands calculate
bispectrum components for a regular grid of points.
These are calculated from the local density of nearby atoms *i'*
around each grid point, as if there was a central atom *i*
at the grid point. This is useful for characterizing fine-scale
structure in a configuration of atoms, and it is used
in the `MALA package <https://github.com/casus/mala>`_
to build machine-learning surrogates for finite-temperature Kohn-Sham
density functional theory (:ref:`Ellis et al. <Ellis2021>`)
Neighbor atoms not in the group do not contribute to the
bispectrum components of the grid points. The distance cutoff :math:`R_{ii'}`
assumes that *i* has the same type as the neighbor atom *i'*.
Compute *sna/grid* calculates a global array containing bispectrum
components for a regular grid of points.
The grid is aligned with the current box dimensions, with the
first point at the box origin, and forming a regular 3d array with
*nx*, *ny*, and *nz* points in the x, y, and z directions. For triclinic
boxes, the array is congruent with the periodic lattice vectors
a, b, and c. The array contains one row for each of the
:math:`nx \times ny \times nz` grid points, looping over the index for *ix* fastest,
then *iy*, and *iz* slowest. Each row of the array contains the *x*, *y*,
and *z* coordinates of the grid point, followed by the bispectrum
components. See section below on output for a detailed explanation of the data
layout in the global array.
Compute *sna/grid/local* calculates bispectrum components of a regular
grid of points similarly to compute *sna/grid* described above.
However, because the array is local, it contains only rows for grid points
that are local to the processor sub-domain. The global grid
of :math:`nx \times ny \times nz` points is still laid out in space the same as for *sna/grid*,
but grid points are strictly partitioned, so that every grid point appears in
one and only one local array. The array contains one row for each of the
local grid points, looping over the global index *ix* fastest,
then *iy*, and *iz* slowest. Each row of the array contains
the global indexes *ix*, *iy*, and *iz* first, followed by the *x*, *y*,
and *z* coordinates of the grid point, followed by the bispectrum
components. See section below on output for a detailed explanation of the data
layout in the global array.
The value of all bispectrum components will be zero for atoms not in
the group. Neighbor atoms not in the group do not contribute to the
bispectrum of atoms in the group.
@ -428,6 +481,21 @@ number of columns in the global array generated by *snap* are 31, and
931, respectively, while the number of rows is 1+3\*\ *N*\ +6, where *N*
is the total number of atoms.
Compute *sna/grid* evaluates a global array.
The array contains one row for each of the
:math:`nx \times ny \times nz` grid points, looping over the index for *ix* fastest,
then *iy*, and *iz* slowest. Each row of the array contains the *x*, *y*,
and *z* coordinates of the grid point, followed by the bispectrum
components.
Compute *sna/grid/local* evaluates a local array.
The array contains one row for each of the
local grid points, looping over the global index *ix* fastest,
then *iy*, and *iz* slowest. Each row of the array contains
the global indexes *ix*, *iy*, and *iz* first, followed by the *x*, *y*,
and *z* coordinates of the grid point, followed by the bispectrum
components.
If the *quadratic* keyword value is set to 1, then additional columns
are generated, corresponding to the products of all distinct pairs of
bispectrum components. If the number of bispectrum components is *K*,
@ -509,8 +577,7 @@ The optional keyword defaults are *rmin0* = 0,
.. _Thompson20141:
**(Thompson)** Thompson, Swiler, Trott, Foiles, Tucker, under review, preprint
available at `arXiv:1409.3880 <http://arxiv.org/abs/1409.3880>`_
**(Thompson)** Thompson, Swiler, Trott, Foiles, Tucker, J Comp Phys, 285, 316, (2015).
.. _Bartok20101:
@ -531,4 +598,8 @@ of Angular Momentum, World Scientific, Singapore (1987).
.. _Cusentino2020:
**(Cusentino)** Cusentino, Wood, and Thompson, J Phys Chem A, xxx, xxxxx, (2020)
**(Cusentino)** Cusentino, Wood, Thompson, J Phys Chem A, 124, 5456, (2020)
.. _Ellis2021:
**(Ellis)** Ellis, Fiedler, Popoola, Modine, Stephens, Thompson, Cangi, Rajamanickam, Phys Rev B, 104, 035120, (2021)

View File

@ -27,6 +27,9 @@ dump command
:doc:`dump custom/adios <dump_adios>` command
=============================================
:doc:`dump cfg/uef <dump_cfg_uef>` command
==========================================
Syntax
""""""
@ -36,7 +39,7 @@ Syntax
* ID = user-assigned name for the dump
* group-ID = ID of the group of atoms to be dumped
* style = *atom* or *atom/gz* or *atom/zstd or *atom/mpiio* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/mpiio* or *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *dcd* or *h5md* or *image* or *local* or *local/gz* or *local/zstd* or *molfile* or *movie* or *netcdf* or *netcdf/mpiio* or *vtk* or *xtc* or *xyz* or *xyz/gz* or *xyz/zstd* or *xyz/mpiio* or *yaml*
* style = *atom* or *atom/gz* or *atom/zstd or *atom/mpiio* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/mpiio* or *cfg/uef* or *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *dcd* or *h5md* or *image* or *local* or *local/gz* or *local/zstd* or *molfile* or *movie* or *netcdf* or *netcdf/mpiio* or *vtk* or *xtc* or *xyz* or *xyz/gz* or *xyz/zstd* or *xyz/mpiio* or *yaml*
* N = dump every this many timesteps
* file = name of file to write dump info to
* args = list of arguments for a particular style
@ -47,22 +50,23 @@ Syntax
*atom/gz* args = none
*atom/zstd* args = none
*atom/mpiio* args = none
*atom/adios* args = none, discussed on :doc:`dump atom/adios <dump_adios>` doc page
*atom/adios* args = none, discussed on :doc:`dump atom/adios <dump_adios>` page
*cfg* args = same as *custom* args, see below
*cfg/gz* args = same as *custom* args, see below
*cfg/zstd* args = same as *custom* args, see below
*cfg/mpiio* args = same as *custom* args, see below
*cfg/uef* args = same as *custom* args, discussed on :doc:`dump cfg/uef <dump_cfg_uef>` page
*custom*, *custom/gz*, *custom/zstd*, *custom/mpiio* args = see below
*custom/adios* args = same as *custom* args, discussed on :doc:`dump custom/adios <dump_adios>` doc page
*custom/adios* args = same as *custom* args, discussed on :doc:`dump custom/adios <dump_adios>` page
*dcd* args = none
*h5md* args = discussed on :doc:`dump h5md <dump_h5md>` doc page
*image* args = discussed on :doc:`dump image <dump_image>` doc page
*h5md* args = discussed on :doc:`dump h5md <dump_h5md>` page
*image* args = discussed on :doc:`dump image <dump_image>` page
*local*, *local/gz*, *local/zstd* args = see below
*molfile* args = discussed on :doc:`dump molfile <dump_molfile>` doc page
*movie* args = discussed on :doc:`dump image <dump_image>` doc page
*netcdf* args = discussed on :doc:`dump netcdf <dump_netcdf>` doc page
*netcdf/mpiio* args = discussed on :doc:`dump netcdf <dump_netcdf>` doc page
*vtk* args = same as *custom* args, see below, also :doc:`dump vtk <dump_vtk>` doc page
*molfile* args = discussed on :doc:`dump molfile <dump_molfile>` page
*movie* args = discussed on :doc:`dump image <dump_image>` page
*netcdf* args = discussed on :doc:`dump netcdf <dump_netcdf>` page
*netcdf/mpiio* args = discussed on :doc:`dump netcdf <dump_netcdf>` page
*vtk* args = same as *custom* args, see below, also :doc:`dump vtk <dump_vtk>` page
*xtc* args = none
*xyz* args = none
*xyz/gz* args = none
@ -155,7 +159,7 @@ timesteps in one of several styles. The *image* and *movie* styles are
the exception: the *image* style renders a JPG, PNG, or PPM image file
of the atom configuration every N timesteps while the *movie* style
combines and compresses them into a movie file; both are discussed in
detail on the :doc:`dump image <dump_image>` doc page. The timesteps on
detail on the :doc:`dump image <dump_image>` page. The timesteps on
which dump output is written can also be controlled by a variable.
See the :doc:`dump_modify every <dump_modify>` command.
@ -194,7 +198,7 @@ or multiple smaller files).
For the *atom*, *custom*, *cfg*, and *local* styles, sorting is off by
default. For the *dcd*, *xtc*, *xyz*, and *molfile* styles, sorting
by atom ID is on by default. See the :doc:`dump_modify <dump_modify>`
doc page for details.
page for details.
The *atom/gz*, *cfg/gz*, *custom/gz*, *local/gz*, and *xyz/gz* styles
are identical in command syntax to the corresponding styles without
@ -204,7 +208,7 @@ alternative approach to writing compressed files via a pipe, as done
by the regular dump styles, which may be required on clusters where
the interface to the high-speed network disallows using the fork()
library call (which is needed for a pipe). For the remainder of this
doc page, you should thus consider the *atom* and *atom/gz* styles
page, you should thus consider the *atom* and *atom/gz* styles
(etc) to be inter-changeable, with the exception of the required
filename suffix.
@ -218,7 +222,7 @@ As explained below, the *atom/mpiio*, *cfg/mpiio*, *custom/mpiio*, and
*xyz/mpiio* styles are identical in command syntax and in the format
of the dump files they create, to the corresponding styles without
"mpiio", except the single dump file they produce is written in
parallel via the MPI-IO library. For the remainder of this doc page,
parallel via the MPI-IO library. For the remainder of this page,
you should thus consider the *atom* and *atom/mpiio* styles (etc) to
be inter-changeable. The one exception is how the filename is
specified for the MPI-IO styles, as explained below.
@ -664,7 +668,7 @@ so that each value is 0.0 to 1.0. If the simulation box is triclinic
(tilted), then all atom coords will still be between 0.0 and 1.0.
I.e. actual unscaled (x,y,z) = xs\*A + ys\*B + zs\*C, where (A,B,C) are
the non-orthogonal vectors of the simulation box edges, as discussed
on the :doc:`Howto triclinic <Howto_triclinic>` doc page.
on the :doc:`Howto triclinic <Howto_triclinic>` page.
Use *xu*, *yu*, *zu* if you want the coordinates "unwrapped" by the
image flags for each atom. Unwrapped means that if the atom has
@ -787,7 +791,7 @@ more info.
The *atom/mpiio*, *cfg/mpiio*, *custom/mpiio*, and *xyz/mpiio* styles
are part of the MPIIO package. They are only enabled if LAMMPS was
built with that package. See the :doc:`Build package <Build_package>`
doc page for more info.
page for more info.
The *xtc*, *dcd* and *yaml* styles are part of the EXTRA-DUMP package.
They are only enabled if LAMMPS was built with that package. See the
@ -797,12 +801,12 @@ Related commands
""""""""""""""""
:doc:`dump atom/adios <dump_adios>`, :doc:`dump custom/adios <dump_adios>`,
:doc:`dump h5md <dump_h5md>`, :doc:`dump image <dump_image>`,
:doc:`dump molfile <dump_molfile>`, :doc:`dump_modify <dump_modify>`,
:doc:`undump <undump>`
:doc:`dump cfg/uef <dump_cfg_uef>`, :doc:`dump h5md <dump_h5md>`,
:doc:`dump image <dump_image>`, :doc:`dump molfile <dump_molfile>`,
:doc:`dump_modify <dump_modify>`, :doc:`undump <undump>`, :doc:`write_dump <write_dump>`
Default
"""""""
The defaults for the *image* and *movie* styles are listed on the
:doc:`dump image <dump_image>` doc page.
:doc:`dump image <dump_image>` page.

View File

@ -246,7 +246,7 @@ accelerated styles exist.
* :doc:`lb/viscous <fix_lb_viscous>` -
* :doc:`lineforce <fix_lineforce>` - constrain atoms to move in a line
* :doc:`manifoldforce <fix_manifoldforce>` - restrain atoms to a manifold during minimization
* :doc:`mdi/aimd <fix_mdi_aimd>` - LAMMPS operates as driver for ab initio MD (AIMD) via the MolSSI Driver Interface (MDI)
* :doc:`mdi/qm <fix_mdi_qm>` - LAMMPS operates as driver for a quantum code via the MolSSI Driver Interface (MDI)
* :doc:`meso/move <fix_meso_move>` - move mesoscopic SPH/SDPD particles in a prescribed fashion
* :doc:`mol/swap <fix_mol_swap>` - Monte Carlo atom type swapping with a molecule
* :doc:`momentum <fix_momentum>` - zero the linear and/or angular momentum of a group of atoms

View File

@ -116,13 +116,6 @@ potential energy of the system as part of :doc:`thermodynamic output
<thermo_style>`. The default setting for this fix is :doc:`fix_modify
energy yes <fix_modify>`.
The :doc:`fix_modify <fix_modify>` *virial* option is supported by
this fix to add the contribution compute by LATTE to the global
pressure of the system via the :doc:`compute pressure
<compute_pressure>` command. This can be accessed by
:doc:`thermodynamic output <thermo_style>`. The default setting for
this fix is :doc:`fix_modify virial yes <fix_modify>`.
The :doc:`fix_modify <fix_modify>` *virial* option is supported by
this fix to add the contribution computed by LATTE to the global
pressure of the system as part of :doc:`thermodynamic output
@ -137,7 +130,7 @@ energy discussed above. The scalar value calculated by this fix is
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
The DFTB forces computed by LATTE via this fix are imposed during an
The DFTB forces computed by LATTE via this fix are used during an
energy minimization, invoked by the :doc:`minimize <minimize>`
command.

View File

@ -1,93 +0,0 @@
.. index:: fix mdi/aimd
fix mdi/aimd command
======================
Syntax
""""""
.. parsed-literal::
fix ID group-ID mdi/aimd keyword
* ID, group-ID are documented in :doc:`fix <fix>` command
* mdi/aimd = style name of this fix command
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all mdi/aimd
Description
"""""""""""
This command enables LAMMPS to act as a client with another server
code to couple the two codes together to perform ab initio MD (AIMD)
simulations.
More specifically, this command causes LAMMPS to begin using the `MDI
Library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_
to run as an MDI driver (client), which sends MDI commands to an
external MDI engine code (server) which in the case of AIMD is a
quantum mechanics (QM) code, or could be LAMMPS itself, acting as a
surrogate for a QM code. See the :doc:`Howto mdi <Howto_mdi>` page
for more information about how LAMMPS can operate as either an MDI
driver or engine.
The examples/mdi directory contains input scripts performing AIMD in
this manner with LAMMPS acting as both a driver and an engine
(surrogate for a QM code). The examples/mdi/README file explains how
to launch both driver and engine codes so that they communicate using
the MDI library via either MPI or sockets. Any QM code that supports
MDI could be used in place of LAMMPS acting as a QM surrogate. See
the :doc:`Howto mdi <Howto_mdi>` page for a current list (March 2022)
of such QM codes.
The engine code can run either as a stand-alone code, launched at the
same time as LAMMPS, or as a plugin library. See the :doc:`mdi plugin
<mdi>` command for how to trigger LAMMPS to load the plugin library.
Again, the examples/mdi/README file explains how to launch both driver
and engine codes so that engine is used in plugin mode.
----------
This fix performs the timestepping portion of an AIMD simulation.
Both LAMMPS and the engine code (QM or LAMMPS) should define the same
system (simulation box, atoms and their types) in their respective
input scripts. LAMMPS then begins its timestepping.
At the point in each timestep when LAMMPS needs the force on each
atom, it communicates with the engine code. It sends the current
simulation box size and shape (if they change dynamically, e.g. during
an NPT simulation), and the current atom coordinates. The engine code
computes quantum forces on each atom and returns them to LAMMPS. If
LAMMPS also needs the system energy and/or virial, it requests those
values from the engine code as well.
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
To use LAMMPS as an MDI driver in conjunction with other MDI-enabled
atomistic codes, the :doc:`units <units>` command should be used to
specify *real* or *metal* units. This will ensure the correct unit
conversions between LAMMPS and MDI units, which the other codes will
also perform in their preferred units.
LAMMPS can also be used as an MDI driver in other unit choices it
supports, e.g. *lj*, but then no unit conversion is performed.
Related commands
""""""""""""""""
:doc:`mdi engine <mdi>`
Default
"""""""
none

276
doc/src/fix_mdi_qm.rst Normal file
View File

@ -0,0 +1,276 @@
.. index:: fix mdi/qm
fix mdi/qm command
======================
Syntax
""""""
.. parsed-literal::
fix ID group-ID mdi/qm keyword
* ID, group-ID are documented in :doc:`fix <fix>` command
* mdi/qm = style name of this fix command
* zero or more keyword/value pairs may be appended
* keyword = *virial* or *add* or *every* or *connect* or *elements*
.. parsed-literal::
*virial* args = *yes* or *no*
yes = request virial tensor from server code
no = do not request virial tensor from server code
*add* args = *yes* or *no*
yes = add returned value from server code to LAMMPS quantities
no = do not add returned values to LAMMPS quantities
*every* args = Nevery
Nevery = request values from server code once every Nevery steps
*connect* args = *yes* or *no*
yes = perform a one-time connection to the MDI engine code
no = do not perform the connection operation
*elements* args = N_1 N_2 ... N_ntypes
N_1,N_2,...N_ntypes = atomic number for each of ntypes LAMMPS atom types
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all mdi/qm
fix 1 all mdi/qm virial yes
fix 1 all mdi/qm add no every 100 elements 13 29
Description
"""""""""""
This command enables LAMMPS to act as a client with another server
code that will compute the total energy, per-atom forces, and total
virial for atom conformations and simulation box size/shapes that
LAMMPS sends it.
Typically the server code will be a quantum mechanics (QM) code, hence
the name of the fix. However this is not required, the server code
could be another classical molecular dynamics code or LAMMPS itself.
The server code must support use of the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_ as
explained below.
These are example use cases for this fix, discussed further below:
* perform an ab initio MD (AIMD) simulation with quantum forces
* perform an energy minimization with quantum forces
* perform a nudged elastic band (NEB) calculation with quantum forces
* perform a QM calculation for a series of independent systems which LAMMPS reads or generates
The code coupling performed by this command is done via the `MDI
Library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_.
LAMMPS runs as an MDI driver (client), and sends MDI commands to an
external MDI engine code (server), e.g. a QM code which has support
for MDI. See the :doc:`Howto mdi <Howto_mdi>` page for more
information about how LAMMPS can operate as either an MDI driver or
engine.
The examples/mdi directory contains input scripts using this fix in
the various use cases discussed below. In each case, two instances of
LAMMPS are used, once as an MDI driver, once as an MDI engine
(surrogate for a QM code). The examples/mdi/README file explains how
to launch two codes so that they communicate via the MDI library using
either MPI or sockets. Any QM code that supports MDI could be used in
place of LAMMPS acting as a QM surrogate. See the :doc:`Howto mdi
<Howto_mdi>` page for a current list (March 2022) of such QM codes.
Note that an engine code can support MDI in either or both of two
modes. It can be used as a stand-alone code, launched at the same
time as LAMMPS. Or it can be used as a plugin library, which LAMMPS
loads. See the :doc:`mdi plugin <mdi>` command for how to trigger
LAMMPS to load a plugin library. The examples/mdi/README file
explains how to launch the two codes in either mode.
----------
The *virial* keyword setting of yes or no determines whether
LAMMPS will request the QM code to also compute and return
a 6-element symmetric virial tensor for the system.
The *add* keyword setting of *yes* or *no* determines whether the
energy and forces and virial returned by the QM code will be added to
the LAMMPS internal energy and forces and virial or not. If the
setting is *no* then the default :doc:`fix_modify energy <fix_modify>`
and :doc:`fix_modify virial <fix_modify>` settings are also set to
*no* and your input scripts should not set them to yes. See more
details on these fix_modify settings below.
Whatever the setting for the *add* keyword, the QM energy, forces, and
virial will be stored by the fix, so they can be accessed by other
commands. See details below.
The *every* keyword determines how often the QM code will be invoked
during a dynamics run with the current LAMMPS simulation box and
configuration of atoms. The QM code will be called once every
*Nevery* timesteps.
The *connect* keyword determines whether this fix performs a one-time
connection to the QM code. The default is *yes*. The only time a
*no* is needed is if this command is used multiple times in an input
script. E.g. if it used inside a loop which also uses the :doc:`clear
<clear>` command to destroy the system (including any defined fixes).
See the examples/mdi/in.series.driver script as an example of this,
where LAMMPS is using the QM code to compute energy and forces for a
series of system configurations. In this use case *connect no*
is used along with the :doc:`mdi connect and exit <mdi>` command
to one-time initiate/terminate the connection outside the loop.
The *elements* keyword allows specification of what element each
LAMMPS atom type corresponds to. This is specified by the atomic
number of the element, e.g. 13 for Al. An atomic number must be
specified for each of the ntypes LAMMPS atom types. Ntypes is
typically specified via the create_box command or in the data file
read by the read_data command. If this keyword is not specified, then
this fix will send the LAMMPS atom type for each atom to the MDI
engine. If both the LAMMPS driver and the MDI engine are initialized
so that atom type values are consistent in both codes, then the
*elements* keyword is not needed. Otherwise the keyword can be used
to insure the two codes are consistent in their definition of atomic
species.
----------
The following 3 example use cases are illustrated in the examples/mdi
directory. See its README file for more details.
(1) To run an ab initio MD (AIMD) dynamics simulation, or an energy
minimization with QM forces, or a multi-replica NEB calculation, use
*add yes* and *every 1* (the defaults). This is so that every time
LAMMPS needs energy and forces, the QM code will be invoked.
Both LAMMPS and the QM code should define the same system (simulation
box, atoms and their types) in their respective input scripts. Note
that on this scenario, it may not be necessary for LAMMPS to define a
pair style or use a neighbor list.
LAMMPS will then perform the timestepping or minimization iterations
for the simulation. At the point in each timestep or iteration when
LAMMPS needs the force on each atom, it communicates with the engine
code. It sends the current simulation box size and shape (if they
change dynamically, e.g. during an NPT simulation), and the current
atom coordinates. The engine code computes quantum forces on each
atom and the total energy of the system and returns them to LAMMPS.
Note that if the AIMD simulation is an NPT or NPH model, or the energy
minimization includes :doc:`fix box relax <fix_box_relax>` to
equilibrate the box size/shape, then LAMMPS computes a pressure. This
means the *virial* keyword should be set to *yes* so that the QM
contribution to the pressure can be included.
(2) To run dynamics with a LAMMPS interatomic potential, and evaluate
the QM energy and forces once every 1000 steps, use *add no* and
*every 1000*. This could be useful for using an MD run to generate
randomized configurations which are then passed to the QM code to
produce training data for a machine learning potential. A :doc:`dump
custom <dump>` command could be invoked every 1000 steps to dump the
atom coordinates and QM forces to a file. Likewise the QM energy and
virial could be output with the :doc:`thermo_style custom
<thermo_style>` command.
(3) To do a QM evaluation of energy and forces for a series of *N*
independent systems (simulation box and atoms), use *add no* and
*every 1*. Write a LAMMPS input script which loops over the *N*
systems. See the :doc:`Howto multiple <Howto_multiple>` doc page for
details on looping and removing old systems. The series of systems
could be initialized by reading them from data files with
:doc:`read_data <read_data>` commands. Or, for example, by using the
:doc:`lattice <lattice>` , :doc:`create_atoms <create_atoms>`,
:doc:`delete_atoms <delete_atoms>`, and/or :doc:`displace_atoms
random <displace_atoms>` commands to generate a series of different
systems. At the end of the loop perform :doc:`run 0 <run>` and
:doc:`write_dump <write_dump>` commands to invoke the QM code and
output the QM energy and forces. As in (2) this be useful to produce
QM data for training a machine learning potential.
----------
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files
<restart>`.
The :doc:`fix_modify <fix_modify>` *energy* option is supported by
this fix to add the potential energy computed by the QM code to the
global potential energy of the system as part of :doc:`thermodynamic
output <thermo_style>`. The default setting for this fix is
:doc:`fix_modify energy yes <fix_modify>`, unless the *add* keyword is
set to *no*, in which case the default setting is *no*.
The :doc:`fix_modify <fix_modify>` *virial* option is supported by
this fix to add the contribution computed by the QM code to the global
pressure of the system as part of :doc:`thermodynamic output
<thermo_style>`. The default setting for this fix is :doc:`fix_modify
virial yes <fix_modify>`, unless the *add* keyword is set to *no*, in
which case the default setting is *no*.
This fix computes a global scalar which can be accessed by various
:doc:`output commands <Howto_output>`. The scalar is the energy
returned by the QM code. The scalar value calculated by this fix is
"extensive".
This fix also computes a global vector with of length 6 which contains
the symmetric virial tensor values returned by the QM code. It can
likewise be accessed by various :doc:`output commands <Howto_output>`.
The ordering of values in the symmetric virial tensor is as follows:
vxx, vyy, vzz, vxy, vxz, vyz. The values will be in pressure
:doc:`units <units>`.
This fix also computes a peratom array with 3 columns which contains
the peratom forces returned by the QM code. It can likewise be
accessed by various :doc:`output commands <Howto_output>`.
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
Assuming the *add* keyword is set to *yes* (the default), the forces
computed by the QM code are used during an energy minimization,
invoked by the :doc:`minimize <minimize>` command.
.. note::
If you want the potential energy associated with the QM forces to
be included in the total potential energy of the system (the
quantity being minimized), you MUST not disable the
:doc:`fix_modify <fix_modify>` *energy* option for this fix, which
means the *add* keyword should also be set to *yes* (the default).
Restrictions
""""""""""""
This command is part of the MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` page for more info.
The QM code does not currently compute and return per-atom energy or
per-atom virial contributions. So they will not show up as part of
the calculations performed by the :doc:`compute pe/atom
<compute_pe_atom>` or :doc:`compute stress/atom <compute_stress_atom>`
commands.
To use LAMMPS as an MDI driver in conjunction with other MDI-enabled
codes (MD or QM codes), the :doc:`units <units>` command should be
used to specify *real* or *metal* units. This will ensure the correct
unit conversions between LAMMPS and MDI units. The other code will
also perform similar unit conversions into its preferred units.
LAMMPS can also be used as an MDI driver in other unit choices it
supports, e.g. *lj*, but then no unit conversion is performed.
Related commands
""""""""""""""""
:doc:`mdi plugin <mdi>`, :doc:`mdi engine <mdi>`
Default
"""""""
The default for the optional keywords are virial = no, add = yes,
every = 1, connect = yes.

View File

@ -156,27 +156,28 @@ and Boolean operators:
Each A and B is a number or string or a variable reference like $a or
${abc}, or A or B can be another Boolean expression.
If a variable is used it can produce a number when evaluated, like an
:doc:`equal-style variable <variable>`. Or it can produce a string,
like an :doc:`index-style variable <variable>`. For an individual
Boolean operator, A and B must both be numbers or must both be
strings. You cannot compare a number to a string.
Note that all variables used will be substituted for before the
Boolean expression in evaluated. A variable can produce a number,
like an :doc:`equal-style variable <variable>`. Or it can produce a
string, like an :doc:`index-style variable <variable>`.
The Boolean operators "==" and "!=" can operate on a pair or strings
or numbers. They cannot compare a number to a string. All the other
Boolean operations can only operate on numbers.
Expressions are evaluated left to right and have the usual C-style
precedence: the unary logical NOT operator "!" has the highest
precedence, the 4 relational operators "<", "<=", ">", and ">=" are
next; the two remaining relational operators "==" and "!=" are next;
then the logical AND operator "&&"; and finally the logical OR
operator "\|\|" and logical XOR (exclusive or) operator "\|\^" have the
lowest precedence. Parenthesis can be used to group one or more
operator "\|\|" and logical XOR (exclusive or) operator "\|\^" have
the lowest precedence. Parenthesis can be used to group one or more
portions of an expression and/or enforce a different order of
evaluation than what would occur with the default precedence.
When the 6 relational operators (first 6 in list above) compare 2
numbers, they return either a 1.0 or 0.0 depending on whether the
relationship between A and B is TRUE or FALSE. When the 6 relational
operators compare 2 strings, they also return a 1.0 or 0.0 for TRUE or
FALSE, but the comparison is done by the C function strcmp().
relationship between A and B is TRUE or FALSE.
When the 3 logical operators (last 3 in list above) compare 2 numbers,
they also return either a 1.0 or 0.0 depending on whether the
@ -190,8 +191,16 @@ returns 1.0 if its argument is 0.0, else it returns 0.0. The 3
logical operators can only be used to operate on numbers, not on
strings.
The overall Boolean expression produces a TRUE result if the result is
non-zero. If the result is zero, the expression result is FALSE.
The overall Boolean expression produces a TRUE result if the numeric
result is non-zero. If the result is zero, the expression result is
FALSE.
.. note::
If the Boolean expression is a single numeric value with no Boolean
operators, it will be FALSE if the value = 0.0, otherwise TRUE. If
the Boolean expression is a single string, an error message will be
issued.
----------

View File

@ -8,21 +8,26 @@ Syntax
.. parsed-literal::
mdi mode args
mdi option args
* mode = *engine* or *plugin*
* option = *engine* or *plugin* or *connect* or *exit*
.. parsed-literal::
*engine* args = none
*plugin* args = name keyword value keyword value
*engine* args = zero or more keyword arg pairs
keywords = *elements*
*elements* args = N_1 N_2 ... N_ntypes
N_1,N_2,...N_ntypes = atomic number for each of ntypes LAMMPS atom types
*plugin* args = name keyword value keyword value ...
name = name of plugin library, e.g. lammps means a liblammps.so library will be loaded
keyword/value pairs in any order, some are required, some are optional
keywords = *mdi* or *infile* or *extra* or *command*
*mdi* value = args passed to MDI for driver to operate with plugins
*infile* value = filename the engine will read at start-up
*mdi* value = args passed to MDI for driver to operate with plugins (required)
*infile* value = filename the engine will read at start-up (optional)
*extra* value = aditional command-line args to pass to engine library when loaded
*command* value = a LAMMPS input script command to execute
*command* value = a LAMMPS input script command to execute (required)
*connect* args = none
*exit* args = none
Examples
""""""""
@ -30,26 +35,19 @@ Examples
.. code-block:: LAMMPS
mdi engine
mdi engine elements 13 29
mdi plugin lammps mdi "-role ENGINE -name lammps -method LINK" &
infile in.aimd.engine extra "-log log.aimd.engine.plugin" &
command "run 5"
mdi connect
mdi exit
Description
"""""""""""
This command implements two high-level operations within LAMMPS to use
the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>` for
coupling to other codes in a client/server protocol.
The *engine* mode enables LAMMPS to act as an MDI engine (server),
responding to requests from an MDI driver (client) code.
The *plugin* mode enables LAMMPS to act as an MDI driver (client), and
load the MDI engine (server) code as a library plugin. In this case
the MDI engine is a library plugin. It can also be a stand-alone
code, launched separately from LAMMPS, in which case the mdi plugin
command is not used.
This command implements operations within LAMMPS to use the `MDI
Library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`
for coupling to other codes in a client/server protocol.
See the Howto MDI doc page for a discussion of all the different ways
2 or more codes can interact via MDI.
@ -61,6 +59,22 @@ stand-alone code or as a plugin. The README file in that directory
shows how to launch and couple codes for all the 4 usage modes, and so
they communicate via the MDI library using either MPI or sockets.
The scripts in that directory illustrate the use of all the options
for this command.
The *engine* option enables LAMMPS to act as an MDI engine (server),
responding to requests from an MDI driver (client) code.
The *plugin* option enables LAMMPS to act as an MDI driver (client),
and load the MDI engine (server) code as a library plugin. In this
case the MDI engine is a library plugin. An MDI engine can also be a
stand-alone code, launched separately from LAMMPS, in which case the
mdi plugin command is not used.
The *connect* and *exit* options are only used when LAMMPS is acting
as an MDI driver. As explained below, these options are normally not
needed, except for a specific kind of use case.
----------
The *mdi engine* command is used to make LAMMPS operate as an MDI
@ -100,6 +114,8 @@ commands, which are described further below.
- Send/request charge on each atom (N values)
* - >COORDS or <COORDS
- Send/request coordinates of each atom (3N values)
* - >ELEMENTS
- Send elements (atomic numbers) for each atom (N values)
* - <ENERGY
- Request total energy (potential + kinetic) of the system (1 value)
* - >FORCES or <FORCES
@ -121,11 +137,11 @@ commands, which are described further below.
* - <PE
- Request potential energy of the system (1 value)
* - <STRESS
- Request stress tensor (virial) of the system (6 values)
- Request symmetric stress tensor (virial) of the system (9 values)
* - >TOLERANCE
- Send 4 tolerance parameters for next MD minimization via OPTG command
* - >TYPES or <TYPES
- Send/request the numeric type of each atom (N values)
- Send/request the LAMMPS atom type for each atom (N values)
* - >VELOCITIES or <VELOCITIES
- Send/request the velocity of each atom (3N values)
* - @INIT_MD or @INIT_OPTG
@ -145,9 +161,25 @@ commands, which are described further below.
builds. If the change in atom positions is large (since the
previous >COORDS command), then LAMMPS will do a more expensive
operation to migrate atoms to new processors as needed and
re-neighbor. If the >NATOMS or >TYPES commands have been sent
(since the previous >COORDS command), then LAMMPS assumes the
system is new and re-initializes an entirely new simulation.
re-neighbor. If the >NATOMS or >TYPES or >ELEMENTS commands have
been sent (since the previous >COORDS command), then LAMMPS assumes
the system is new and re-initializes an entirely new simulation.
.. note::
The >TYPES or >ELEMENTS commands are how the MDI driver tells the
LAMMPS engine which LAMMPS atom type to assign to each atom. If
both the MDI driver and the LAMMPS engine are initialized so that
atom type values are consistent in both codes, then the >TYPES
command can be used. If not, the optional *elements* keyword can
be used to specify what element each LAMMPS atom type corresponds
to. This is specified by the atomic number of the element, e.g. 13
for Al. An atomic number must be specified for each of the ntypes
LAMMPS atom types. Ntypes is typically specified via the
create_box command or in the data file read by the read_data
command. In this has been done, the MDI driver can send an
>ELEMENTS command to the LAMMPS driver with the atomic number of
each atom.
The MD and OPTG commands perform an entire MD simulation or energy
minimization (to convergence) with no communication from the driver
@ -270,7 +302,7 @@ The *command* keyword is required. It specifies a LAMMPS input script
command (as a single argument in quotes if it is multiple words).
Once the plugin library is launched, LAMMPS will execute this command.
Other previously-defined commands in the input script, such as the
:doc:`fix mdi/aimd <fix_mdi_aimd>` command, should perform MDI
:doc:`fix mdi/qm <fix_mdi_qm>` command, should perform MDI
communication with the engine, while the specified *command* executes.
Note that if *command* is an :doc:`include <include>` command, then it
could specify a filename with multiple LAMMPS commands.
@ -284,6 +316,31 @@ could specify a filename with multiple LAMMPS commands.
"mdi plugin" command could then load the same library plugin or
a different one if desired.
----------
The *mdi connect* and *mdi exit* commands are only used when LAMMPS is
operating as an MDI driver. And when other LAMMPS command(s) which
send MDI commands and associated data to/from the MDI engine are not
able to initiate and terminate the connection to the engine code.
The only current MDI driver command in LAMMPS is the :doc:`fix mdi/qm
<fix_mdi_qm>` command. If it is only used once in an input script
then it can initiate and terminate the connection. But if it is being
issued multiple times, e.g. in a loop that issues a :doc:`clear
<clear>` command, then it cannot initiate or terminate the connection
multiple times. Instead, the *mdi connect* and *mdi exit* commands
should be used outside the loop to initiate or terminate the connection.
See the examples/mdi/in.series.driver script for an example of how
this is done. The LOOP in that script is reading a series of data
file configurations and passing them to an MDI engine (e.g. quantum
code) for energy and force evaluation. A *clear* command inside the
loop wipes out the current system so a new one can be defined. This
operation also destroys all fixes. So the :doc:`fix mdi/qm
<fix_mdi_qm>` command is issued once per loop iteration. Note that it
includes a "connect no" option which disables the initiate/terminate
logic within that fix.
Restrictions
""""""""""""
@ -304,7 +361,7 @@ supports, e.g. *lj*, but then no unit conversion is performed.
Related commands
""""""""""""""""
:doc:`fix mdi/aimd <fix_mdi_aimd>`
:doc:`fix mdi/qm <fix_mdi_qm>`
Default
"""""""

View File

@ -71,7 +71,7 @@ Syntax
*no_affinity* values = none
*kokkos* args = keyword value ...
zero or more keyword/value pairs may be appended
keywords = *neigh* or *neigh/qeq* or *neigh/thread* or *newton* or *binsize* or *comm* or *comm/exchange* or *comm/forward* *comm/pair/forward* *comm/fix/forward* or *comm/reverse* or *gpu/aware* or *pair/only*
keywords = *neigh* or *neigh/qeq* or *neigh/thread* or *newton* or *binsize* or *comm* or *comm/exchange* or *comm/forward* *comm/pair/forward* *comm/fix/forward* or *comm/reverse* or *comm/pair/reverse* or *gpu/aware* or *pair/only*
*neigh* value = *full* or *half*
full = full neighbor list
half = half neighbor list built in thread-safe manner
@ -96,6 +96,7 @@ Syntax
*comm/pair/forward* value = *no* or *device*
*comm/fix/forward* value = *no* or *device*
*comm/reverse* value = *no* or *host* or *device*
*comm/pair/reverse* value = *no* or *device*
no = perform communication pack/unpack in non-KOKKOS mode
host = perform pack/unpack on host (e.g. with OpenMP threading)
device = perform pack/unpack on device (e.g. on GPU)
@ -500,7 +501,7 @@ rule of thumb may give too large a binsize and the default should be
overridden with a smaller value.
The *comm* and *comm/exchange* and *comm/forward* and *comm/pair/forward*
and *comm/fix/forward* and comm/reverse*
and *comm/fix/forward* and *comm/reverse* and *comm/pair/reverse*
keywords determine whether the host or device performs the packing and
unpacking of data when communicating per-atom data between processors.
"Exchange" communication happens only on timesteps that neighbor lists
@ -521,9 +522,16 @@ packing/unpacking data for the communication. A value of *host* means to
use the host, typically a multi-core CPU, and perform the
packing/unpacking in parallel with threads. A value of *device* means to
use the device, typically a GPU, to perform the packing/unpacking
operation. If a value of *host* is used for the *comm/pair/forward* or
*comm/fix/forward* keyword, it will be automatically be changed to *no*
since these keywords don't support *host* mode.
operation.
For the *comm/pair/forward* or *comm/fix/forward* or *comm/pair/reverse*
keywords, if a value of *host* is used it will be automatically
be changed to *no* since these keywords don't support *host* mode. The
value of *no* will also always be used when running on the CPU, i.e. setting
the value to *device* will have no effect if the pair/fix style is
running on the CPU. For the *comm/fix/forward* or *comm/pair/reverse*
keywords, not all styles support *device* mode and in that case will run
in *no* mode instead.
The optimal choice for these keywords depends on the input script and
the hardware used. The *no* value is useful for verifying that the

View File

@ -1,8 +1,11 @@
.. index:: pair_style meam
.. index:: pair_style meam/kk
pair_style meam command
=========================
Accelerator Variants: *meam/kk*
Syntax
""""""
@ -347,6 +350,12 @@ Most published MEAM parameter sets use the default values *attrac* = *repulse* =
Setting *repuls* = *attrac* = *delta* corresponds to the form used in several
recent published MEAM parameter sets, such as :ref:`(Valone) <Valone>`
----------
.. include:: accel_styles.rst
----------
.. note::
The default form of the *erose* expression in LAMMPS was corrected

View File

@ -373,6 +373,7 @@ Caltech
Camilloni
Camiloni
Campana
Cangi
Cao
Capolungo
Caro
@ -2681,6 +2682,7 @@ polyhedra
Polym
polymorphism
popen
Popoola
Popov
popstore
Poresag
@ -2824,6 +2826,7 @@ radians
Rafferty
rahman
Rahman
Rajamanickam
Ralf
Raman
ramped
@ -3676,13 +3679,19 @@ vx
Vx
vxcm
vxmu
vxx
vxy
vxz
vy
Vy
vycm
vyy
vyz
vz
Vz
vzcm
vzi
vzz
Waals
Wadley
wallstyle

View File

@ -31,6 +31,23 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL "Windows")
if(CMAKE_CROSSCOMPILING)
set_target_properties(paceplugin PROPERTIES LINK_FLAGS "-Wl,--export-all-symbols")
endif()
get_lammps_version(${LAMMPS_SOURCE_DIR}/version.h LAMMPS_VERSION)
find_program(MAKENSIS_PATH makensis)
if(MAKENSIS_PATH)
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/lammps.ico
${CMAKE_SOURCE_DIR}/lammps-text-logo-wide.bmp ${CMAKE_SOURCE_DIR}/paceplugin.nsis ${CMAKE_BINARY_DIR})
if(BUILD_MPI)
add_custom_target(package ${MAKENSIS_PATH} -V1 -DVERSION=${LAMMPS_VERSION}-MPI paceplugin.nsis
DEPENDS paceplugin
BYPRODUCTS LAMMPS-ML-PACE-plugin-${LAMMPS_VERSION}-MPI.exe)
else()
add_custom_target(package ${MAKENSIS_PATH} -V1 -DVERSION=${LAMMPS_VERSION} paceplugin.nsis
COMMAND ${CMAKE_COMMAND} -E echo ${PWD}
DEPENDS paceplugin lammps.ico lammps-text-logo-wide.bmp paceplugin.nsis
BYPRODUCTS LAMMPS-ML-PACE-plugin-${LAMMPS_VERSION}.exe)
endif()
endif()
else()
set_target_properties(paceplugin PROPERTIES LINK_FLAGS "-rdynamic")
endif()

View File

@ -2,10 +2,11 @@ These are examples that work the MDI package in LAMMPS which uses the
MolSSI MDI library for coupling codes together and communicating
between them with MDI messages.
In MDI lingo, one code is the driver and another code is the engine.
The 2 codes can be written in any language; C++ (LAMMPS) and Python
are illustrated here. The 2 codes can each be stand-alone codes, in
which case they can be run on different numbers of processors. The 2
Within the MDI context, one code is the driver and another code is
the engine. The 2 codes can be written in any language; C++ (LAMMPS)
and Python are illustrated here. The 2 codes can each be stand-alone
codes, in which case they can be run on different numbers of processors.
The 2
codes can communicate either via TCP (sockets) or via MPI. For the
TCP case, the driver and engine need to be launched separately,
e.g. in 2 windows on your desktop machine. For the MPI case, a single
@ -19,87 +20,99 @@ LAMMPS supports operating in all these MDI modes. It can be an engine
operating either as a stand-alone code or as a plugin. It can also be
a driver and couple to an engine that is either a stand-alone code or
a plugin. Examples for all these use cases are in this directory.
The example commands below illustrate how to run all the variants.
The Run.sh file shows how run in all the modes. Type "sh Run.sh"
to try them all out.
To use LAMMPS as a plugin engine, you must build it as a shared library.
Something like this, which also builds the normal LAMMPS executable
lmp_mpi:
To use LAMMPS as a plugin engine, you must build it as a shared
library. Something like this with make, which also builds the normal
LAMMPS executable lmp_mpi:
cd src
make yes-mdi
make mode=shlib mpi
To use the serial_driver.py example you will need Python 3 with Numpy
and mpi4py available in your Python. Make sure LAMMPS and Python are
using same the same version of MPI.
-------------------------------------------------
Examples 4 and 5 use Python scripts as MDI drivers. For this you will
need Python 3 with Numpy and mpi4py installed. Make sure LAMMPS and
Python/mpi4py are using same the same version of MPI.
You will also need MDI installed in your Python. You cannot use the
LAMMPS build of the MDI library for this, b/c LAMMPS builds MDI as a
static library, not shared, which Python requires.
You can install MDI in your Python via conda:
% conda install -c conda-forge pymdi=1.4.1
or via pip:
% pip install pymdi==1.4.1
It is likely fine to leave off the version number, to get the latest
MDI version. But to be safe, 1.4.1 is the version LAMMPS is currently
using.
-------------------------------------------------
5 example use-cases are explained below.
In the first 3 examples, results running with MDI should be identical
to running without MDI (alone log files). Example #4 has no non-MDI
run. Example #5 results should match the non-MDI run of example #1.
-------------------------------------------------
* Example #1 = run ab initio MD (AIMD)
Two instances of LAMMPS operate as a driver and engine. As an engine,
LAMMPS is a surrogate for a quantum code.
You can compare the thermo output in log.aimd.alone.1 to the thermo output in
any of the log.aimd.driver* files. It should be identical.
Note that the "alone" and "driver" input scripts have options for
running in NVE vs NPT Comment in/out the appropriate line to make
change. Nothing needs to be changed in the "engine" scripts.
-------------------------------------------------
-------------------------------------------------
* Example #1 = run ab inito MD (AIMD)
Two instances of LAMMPS operate as a driver and engine
As an engine, LAMMPS is a surrogate for a quantum code
* Example #2 = run LAMMPS, compute QM forces on snapshots from a long run
Note that the 2 input scripts in.aimd.alone and in.aimd.driver
have an option for running in NVE vs NPT mode. Comment in/out
the appropriate line to change modes. Nothing needs to be
changed in the in.aimd.engine or in.aimd.engine.plugin scripts.
Two instances of LAMMPS operate as a driver and engine. As an engine,
LAMMPS is a surrogate for a quantum code
---
You can compare the thermo output in log.snapshot.alone.1 to the
thermo output in any of the log.snapshot.driver* files. It should be
identical.
Run the entire calculation with a single instance of LAMMPS by itself
results should be identical to running this example with MDI
% lmp_mpi < in.aimd.alone
With MDI, the thermo output of the driver should match the thermo
output of the in.aimd.alone script.
---
Run with TCP: 1 proc each
% lmp_mpi -mdi "-name aimd -role DRIVER -method TCP -port 8021" -log log.aimd.driver -in in.aimd.driver
% lmp_mpi -mdi "-name LAMMPS -role ENGINE -method TCP -port 8021 -hostname localhost" -log log.aimd.engine -in in.aimd.engine
---
Run with TCP: 3 procs + 4 procs
% mpirun -np 3 lmp_mpi -mdi "-name aimd -role DRIVER -method TCP -port 8021" -log log.aimd.driver -in in.aimd.driver
% mpirun -np 4 lmp_mpi -mdi "-name LAMMPS -role ENGINE -method TCP -port 8021 -hostname localhost" -log log.aimd.engine -in in.aimd.engine
---
Run with MPI: 1 proc each
% mpirun -np 1 lmp_mpi -mdi "-name aimd -role DRIVER -method MPI" -log log.aimd.driver -in in.aimd.driver : -np 1 lmp_mpi -mdi "-name LAMMPS -role ENGINE -method MPI" -log log.aimd.engine -in in.aimd.engine
---
Run with MPI: 3 procs + 4 procs
% mpirun -np 3 lmp_mpi -mdi "-name aimd -role DRIVER -method MPI" -log log.aimd.driver -in in.aimd.driver : -np 4 lmp_mpi -mdi "-name LAMMPS -role ENGINE -method MPI" -log log.aimd.engine -in in.aimd.engine
---
Run in plugin mode: 1 proc
% lmp_mpi -mdi "-name aimd -role DRIVER -method LINK -plugin_path /home/sjplimp/lammps/git/src" -log log.aimd.driver.plugin -in in.aimd.driver.plugin
---
Run in plugin mode: 3 procs
% mpirun -np 3 lmp_mpi -mdi "-name aimd -role DRIVER -method LINK -plugin_path /home/sjplimp/lammps/git/src" -log log.aimd.driver.plugin -in in.aimd.driver.plugin
You can compare the dumped forces in dump.snapshot.alone.1 to the
forces in any of the dump.snapshot.* files. They should be identical,
although at step 0 the forces are "zero" and may be epsilon different.
-------------------------------------------------
-------------------------------------------------
* Example #2 = Python driver runs a sequence of unrelated LAMMPS calculations
Each calculation can be a single-point evaluation, MD run, or minimization
* Example #3 = run LAMMPS, compute QM forces on series of independent systems
Two instances of LAMMPS operate as a driver and engine. As an engine,
LAMMPS is a surrogate for a quantum code
You can compare the thermo output in log.series.alone.1 to the thermo
output in any of the log.series.driver* files. It should be
identical.
You can compare the dumped forces in dump.series.alone.1 to the forces
in any of the dump.series.* files. They should be identical,
-------------------------------------------------
-------------------------------------------------
* Example #4 = Python driver runs a sequence of unrelated LAMMPS calculations
Each calculation can be a single-point evaluation, MD run, or
minimization
The sequence_driver.py code allows for optional switches in addition
to -mdi (required) and the -plugin and -plugin_args switches which are
@ -128,101 +141,31 @@ copied here:
# -seed 12345
# random number seed > 0, default = 12345
---
Run with TCP: 1 proc each
% python3 sequence_driver.py -mdi "-role DRIVER -name sequence -method TCP -port 8021"
% lmp_mpi -mdi "-role ENGINE -name LAMMPS -method TCP -port 8021 -hostname localhost" -log log.sequence -in in.sequence
---
Run with TCP: 2 proc + 4 procs
% mpirun -np 2 python3 sequence_driver.py -mdi "-role DRIVER -name sequence -method TCP -port 8021"
% mpirun -np 4 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method TCP -port 8021 -hostname localhost" -log log.sequence -in in.sequence
---
Run with MPI: 1 proc each
% mpirun -np 1 python3 sequence_driver.py -mdi "-role DRIVER -name sequence -method MPI" : -np 1 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method MPI" -log log.sequence -in in.sequence
---
Run with MPI: 2 procs + 4 procs
% mpirun -np 2 python3 sequence_driver.py -mdi "-role DRIVER -name sequence -method MPI" : -np 4 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method MPI" -log log.sequence -in in.sequence
---
Run in plugin mode: 1 proc
% python3 sequence_driver.py -plugin lammps -mdi "-role DRIVER -name sequence -method LINK -plugin_path /home/sjplimp/lammps/git/src" -plugin_args "-log log.sequence -in in.sequence"
---
Run in plugin mode: 3 procs
% mpirun -np 3 python3 sequence_driver.py -plugin lammps -mdi "-role DRIVER -name sequence -method LINK -plugin_path /home/sjplimp/lammps/git/src" -plugin_args "-log log.sequence -in in.sequence"
You can compare the thermo output in any of the log.sequence.engine.*
files. It should be identical.
-------------------------------------------------
-------------------------------------------------
* Example #3 = run AIMD with Python driver code and 2 LAMMPS instances as engines
First LAMMPS instance performs the MD timestepping
Second LAMMPS instance is surrogate QM = computes forces
* Example #5 = run AIMD with Python driver code and 2 LAMMPS instances as engines
First LAMMPS instance performs the MD timestepping. Second LAMMPS
instance is surrogate QM to compute forces.
The aimd_driver.py code allows for an optional switch in addition to
-mdi (required) and the -plugin and -plugin_args swiches which are
used to link to the 2 engines as a plugin libraries. The example run
commands below use the default values of the optional switch. The
switch is also explained the top of the file; the info is copied
here:
switch is also explained the top of the file; the info is copied here:
# -nsteps 5
# number of timesteps in dynamics runs, default = 5
# -nsteps 10
# number of timesteps in dynamics runs, default = 10
---
This calculation is the same as Example #1 with a LAMMPS driver and a
LAMMPS engine. Now there is a Python driver and two LAMMPS engines.
Run the entire calculation with a single instance of LAMMPS by itself
results should be identical to running this example with MDI
You can compare the thermo output in log.aimd.alone.1 output to the
thermo output is any of the log.sequence.engine.* files. It should be
identical for the Total Energy printed out by the Python driver script.
% lmp_mpi < in.aimd.alone
With MDI, the driver prints the QM and Total energies. These should
match the PotEng and TotEng output of the in.aimd.alone script.
---
Run with TCP: 1 proc each
% python3 aimd_driver.py -mdi "-role DRIVER -name aimd -method TCP -port 8021"
% lmp_mpi -mdi "-role ENGINE -name MM -method TCP -port 8021 -hostname localhost" -log log.aimd.mm -in in.aimd.mm
% lmp_mpi -mdi "-role ENGINE -name QM -method TCP -port 8021 -hostname localhost" -log log.aimd.qm -in in.aimd.qm
---
Run with TCP: 2 procs + 2 procs + 3 procs
% mpirun -np 2 python3 aimd_driver.py -mdi "-role DRIVER -name aimd -method TCP -port 8021"
% mpirun -np 2 lmp_mpi -mdi "-role ENGINE -name MM -method TCP -port 8021 -hostname localhost" -log log.aimd.mm -in in.aimd.mm
% mpirun -np 3 lmp_mpi -mdi "-role ENGINE -name QM -method TCP -port 8021 -hostname localhost" -log log.aimd.qm -in in.aimd.qm
---
Run with MPI: 1 proc each
% mpirun -np 1 python3 aimd_driver.py -mdi "-role DRIVER -name aimd -method MPI" : -np 1 lmp_mpi -mdi "-role ENGINE -name MM -method MPI" -log log.aimd.mm -in in.aimd.mm : -np 1 lmp_mpi -mdi "-role ENGINE -name QM -method MPI" -log log.aimd.qm -in in.aimd.qm
---
Run with MPI: 2 procs + 2 procs + 3 procs
% mpirun -np 2 python3 aimd_driver.py -mdi "-role DRIVER -name aimd -method MPI" : -np 2 lmp_mpi -mdi "-role ENGINE -name MM -method MPI" -log log.aimd.mm -in in.aimd.mm : -np 3 lmp_mpi -mdi "-role ENGINE -name QM -method MPI" -log log.aimd.qm -in in.aimd.qm
E.g. Step 10: MM energy 1.69875, QM energy -6.31535, Total energy -4.6166

256
examples/mdi/Run.sh Normal file
View File

@ -0,0 +1,256 @@
# Run all the examples
# -------------------------------------------------
# -------------------------------------------------
# Example 1 = run ab initio MD (AIMD)
# ---
# Run without MDI
lmp_mpi -log log.aimd.alone.1 < in.aimd.alone
# ---
# Run with TCP: 1 proc each
lmp_mpi -mdi "-name LMP1 -role DRIVER -method TCP -port 8021" -log log.aimd.driver.tcp.1 -in in.aimd.driver &
lmp_mpi -mdi "-name LMP2 -role ENGINE -method TCP -port 8021 -hostname localhost" -log log.aimd.engine.tcp.1 -in in.aimd.engine
# ---
# Run with TCP: 3 procs + 4 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method TCP -port 8021" -log log.aimd.driver.tcp.3 -in in.aimd.driver &
mpirun -np 4 lmp_mpi -mdi "-name LMP2 -role ENGINE -method TCP -port 8021 -hostname localhost" -log log.aimd.engine.tcp.4 -in in.aimd.engine
# ---
# Run with MPI: 1 proc each
mpirun -np 1 lmp_mpi -mdi "-name LMP1 -role DRIVER -method MPI" -log log.aimd.driver.mpi.1 -in in.aimd.driver : -np 1 lmp_mpi -mdi "-name LMP2 -role ENGINE -method MPI" -log log.aimd.engine.mpi.1 -in in.aimd.engine
# ---
# Run with MPI: 3 procs + 4 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method MPI" -log log.aimd.driver.mpi.3 -in in.aimd.driver : -np 4 lmp_mpi -mdi "-name LMP2 -role ENGINE -method MPI" -log log.aimd.engine.mpi.4 -in in.aimd.engine
# ---
# Run in plugin mode: 1 proc
lmp_mpi -mdi "-name LMP1 -role DRIVER -method LINK -plugin_path /home/sjplimp/lammps/git/src" -log log.aimd.driver.plugin.1 -in in.aimd.driver.plugin
mv log.aimd.engine.plugin log.aimd.engine.plugin.1
# ---
# Run in plugin mode: 3 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method LINK -plugin_path /home/sjplimp/lammps/git/src" -log log.aimd.driver.plugin.3 -in in.aimd.driver.plugin
mv log.aimd.engine.plugin log.aimd.engine.plugin.3
# -------------------------------------------------
# -------------------------------------------------
# Example 2 = run LAMMPS, compute QM forces on snapshots from a long run
# ---
# Run without MDI
lmp_mpi -log log.snapshot.alone.1 < in.snapshot.alone
mv dump.snapshot.alone dump.snapshot.alone.1
# ---
# Run with TCP: 1 proc each
lmp_mpi -mdi "-name LMP1 -role DRIVER -method TCP -port 8021" -log log.snapshot.driver.tcp.1 -in in.snapshot.driver &
lmp_mpi -mdi "-name LMP2 -role ENGINE -method TCP -port 8021 -hostname localhost" -log log.snapshot.engine.tcp.1 -in in.snapshot.engine
mv dump.snapshot.driver dump.snapshot.driver.tcp.1
# ---
# Run with TCP: 3 procs + 4 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method TCP -port 8021" -log log.snapshot.driver.tcp.3 -in in.snapshot.driver &
mpirun -np 4 lmp_mpi -mdi "-name LMP2 -role ENGINE -method TCP -port 8021 -hostname localhost" -log log.snapshot.engine.tcp.4 -in in.snapshot.engine
mv dump.snapshot.driver dump.snapshot.driver.tcp.4
# ---
# Run with MPI: 1 proc each
mpirun -np 1 lmp_mpi -mdi "-name LMP1 -role DRIVER -method MPI" -log log.snapshot.driver.mpi.1 -in in.snapshot.driver : -np 1 lmp_mpi -mdi "-name LMP2 -role ENGINE -method MPI" -log log.snapshot.engine.mpi.1 -in in.snapshot.engine
mv dump.snapshot.driver dump.snapshot.driver.mpi.1
# ---
# Run with MPI: 3 procs + 4 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method MPI" -log log.snapshot.driver.mpi.3 -in in.snapshot.driver : -np 4 lmp_mpi -mdi "-name LMP2 -role ENGINE -method MPI" -log log.snapshot.engine.mpi.3 -in in.snapshot.engine
mv dump.snapshot.driver dump.snapshot.driver.mpi.4
# ---
# Run in plugin mode: 1 proc
lmp_mpi -mdi "-name LMP1 -role DRIVER -method LINK -plugin_path /home/sjplimp/lammps/git/src" -log log.snapshot.driver.plugin.1 -in in.snapshot.driver.plugin
mv log.snapshot.engine.plugin log.snapshot.engine.plugin.1
mv dump.snapshot.driver.plugin dump.snapshot.driver.plugin.1
# ---
# Run in plugin mode: 3 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method LINK -plugin_path /home/sjplimp/lammps/git/src" -log log.snapshot.driver.plugin.3 -in in.snapshot.driver.plugin
mv log.snapshot.engine.plugin log.snapshot.engine.plugin.3
mv dump.snapshot.driver.plugin dump.snapshot.driver.plugin.3
# -------------------------------------------------
# -------------------------------------------------
# Example 3 = run LAMMPS, compute QM forces on series of independent systems
# ---
# Run without MDI
lmp_mpi -log log.series.alone.1 < in.series.alone
mv dump.series.alone dump.series.alone.1
# ---
# Run with TCP: 1 proc each
lmp_mpi -mdi "-name LMP1 -role DRIVER -method TCP -port 8021" -log log.series.driver.tcp.1 -in in.series.driver &
lmp_mpi -mdi "-name LMP2 -role ENGINE -method TCP -port 8021 -hostname localhost" -log log.series.engine.tcp.1 -in in.series.engine
mv dump.series.driver dump.series.driver.tcp.1
# ---
# Run with TCP: 3 procs + 4 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method TCP -port 8021" -log log.series.driver.tcp.3 -in in.series.driver &
mpirun -np 4 lmp_mpi -mdi "-name LMP2 -role ENGINE -method TCP -port 8021 -hostname localhost" -log log.series.engine.tcp.4 -in in.series.engine
mv dump.series.driver dump.series.driver.tcp.4
# ---
# Run with MPI: 1 proc each
mpirun -np 1 lmp_mpi -mdi "-name LMP1 -role DRIVER -method MPI" -log log.series.driver.mpi.1 -in in.series.driver : -np 1 lmp_mpi -mdi "-name LMP2 -role ENGINE -method MPI" -log log.series.engine.mpi.1 -in in.series.engine
mv dump.series.driver dump.series.driver.mpi.1
# ---
# Run with MPI: 3 procs + 4 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method MPI" -log log.series.driver.mpi.3 -in in.series.driver : -np 4 lmp_mpi -mdi "-name LMP2 -role ENGINE -method MPI" -log log.series.engine.mpi.4 -in in.series.engine
mv dump.series.driver dump.series.driver.mpi.4
# ---
# Run in plugin mode: 1 proc
lmp_mpi -mdi "-name LMP1 -role DRIVER -method LINK -plugin_path /home/sjplimp/lammps/git/src" -log log.series.driver.plugin.1 -in in.series.driver.plugin
mv log.series.engine.plugin log.series.engine.plugin.1
mv dump.series.driver.plugin dump.series.driver.plugin.1
# ---
# Run in plugin mode: 3 procs
mpirun -np 3 lmp_mpi -mdi "-name LMP1 -role DRIVER -method LINK -plugin_path /home/sjplimp/lammps/git/src" -log log.series.driver.plugin.3 -in in.series.driver.plugin
mv log.series.engine.plugin log.series.engine.plugin.3
mv dump.series.driver.plugin dump.series.driver.plugin.3
# -------------------------------------------------
# -------------------------------------------------
# Example 4 = Python driver runs a sequence of unrelated LAMMPS calculations
# ---
# Run with TCP: 1 proc each
python3 sequence_driver.py -mdi "-role DRIVER -name sequence -method TCP -port 8021" &
lmp_mpi -mdi "-role ENGINE -name LMP -method TCP -port 8021 -hostname localhost" -log log.sequence.engine.tcp.1 -in in.sequence.python
# ---
# Run with TCP: 2 proc + 4 procs
mpirun -np 2 python3 sequence_driver.py -mdi "-role DRIVER -name sequence -method TCP -port 8021" &
mpirun -np 4 lmp_mpi -mdi "-role ENGINE -name LMP -method TCP -port 8021 -hostname localhost" -log log.sequence.engine.tcp.4 -in in.sequence.python
# ---
# Run with MPI: 1 proc each
mpirun -np 1 python3 sequence_driver.py -mdi "-role DRIVER -name sequence -method MPI" : -np 1 lmp_mpi -mdi "-role ENGINE -name LAMMPS -method MPI" -log log.sequence.engine.mpi.1 -in in.sequence.python
# ---
# Run with MPI: 2 procs + 4 procs
mpirun -np 2 python3 sequence_driver.py -mdi "-role DRIVER -name sequence -method MPI" : -np 4 lmp_mpi -mdi "-role ENGINE -name LMP -method MPI" -log log.sequence.engine.mpi.4 -in in.sequence.python
# ---
# Run in plugin mode: 1 proc
python3 sequence_driver.py -plugin lammps -mdi "-role DRIVER -name sequence -method LINK -plugin_path /home/sjplimp/lammps/git/src" -plugin_args "-log log.sequence.engine.plugin.1 -in in.sequence".python
# ---
# Run in plugin mode: 3 procs
mpirun -np 3 python3 sequence_driver.py -plugin lammps -mdi "-role DRIVER -name sequence -method LINK -plugin_path /home/sjplimp/lammps/git/src" -plugin_args "-log log.sequence.engine.plugin.3 -in in.sequence".python
# -------------------------------------------------
# -------------------------------------------------
# Example 5 = run AIMD with Python driver code and 2 LAMMPS instances as engines
# ---
# Run with TCP: 1 proc each
python3 aimd_driver.py -mdi "-role DRIVER -name aimdpy -method TCP -port 8021" > log.aimdpy.driver.tcp.1 &
lmp_mpi -mdi "-role ENGINE -name MM -method TCP -port 8021 -hostname localhost" -log log.aimdpy.mm.tcp.1 -in in.aimdpy.mm &
lmp_mpi -mdi "-role ENGINE -name QM -method TCP -port 8021 -hostname localhost" -log log.aimdpy.qm.tcp.1 -in in.aimdpy.qm
# ---
# Run with TCP: 2 procs + 2 procs + 3 procs
mpirun -np 2 python3 aimd_driver.py -mdi "-role DRIVER -name aimdpy -method TCP -port 8021" > log.aimdpy.driver.tcp.2 &
mpirun -np 2 lmp_mpi -mdi "-role ENGINE -name MM -method TCP -port 8021 -hostname localhost" -log log.aimdpy.mm.tcp.2 -in in.aimdpy.mm &
mpirun -np 3 lmp_mpi -mdi "-role ENGINE -name QM -method TCP -port 8021 -hostname localhost" -log log.aimdpy.qm.tcp.3 -in in.aimdpy.qm
# ---
# Run with MPI: 1 proc each
mpirun -np 1 python3 aimd_driver.py -mdi "-role DRIVER -name aimdpy -method MPI" : -np 1 lmp_mpi -mdi "-role ENGINE -name MM -method MPI" -log log.aimdpy.mm.mpi.1 -in in.aimdpy.mm : -np 1 lmp_mpi -mdi "-role ENGINE -name QM -method MPI" -log log.aimdpy.qm.mpi.1 -in in.aimdpy.qm > log.aimdpy.driver.mpi.1
# ---
# Run with MPI: 2 procs + 2 procs + 3 procs
mpirun -np 2 python3 aimd_driver.py -mdi "-role DRIVER -name aimdpy -method MPI" : -np 2 lmp_mpi -mdi "-role ENGINE -name MM -method MPI" -log log.aimdpy.mm.mpi.2 -in in.aimdpy.mm : -np 3 lmp_mpi -mdi "-role ENGINE -name QM -method MPI" -log log.aimdpy.qm.mpi.3 -in in.aimdpy.qm > log.aimdpy.driver.mpi.2

View File

@ -23,8 +23,8 @@
# -plugin_args arglist
# args to add when launching plugin library, only when using plugin mode
# enclose arglist in quotes if multiple words
# -nsteps 5
# number of timesteps, default = 5
# -nsteps 10
# number of timesteps, default = 10
import sys
import mdi
@ -182,7 +182,7 @@ mdiarg = ""
plugin = ""
plugin_args = ""
nsteps = 5
nsteps = 10
# parse command-line args

1018
examples/mdi/data.series.0.7 Normal file

File diff suppressed because it is too large Load Diff

1018
examples/mdi/data.series.0.8 Normal file

File diff suppressed because it is too large Load Diff

1018
examples/mdi/data.series.0.9 Normal file

File diff suppressed because it is too large Load Diff

1018
examples/mdi/data.snapshot Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -29,4 +29,4 @@ fix 1 all nve
thermo_style custom step temp pe etotal press vol
thermo 1
run 5
run 10

View File

@ -23,10 +23,9 @@ fix 1 all nve
# NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1.0 1.0 1.0
fix 2 all mdi/aimd
fix_modify 2 energy yes virial yes
fix 2 all mdi/qm virial yes
thermo_style custom step temp pe etotal press vol
thermo 1
run 5
run 10

View File

@ -23,12 +23,11 @@ fix 1 all nve
# NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1.0 1.0 1.0
fix 2 all mdi/aimd
fix_modify 2 energy yes virial yes
fix 2 all mdi/qm virial yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi plugin lammps mdi "-role ENGINE -name lammps -method LINK" &
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" &
infile in.aimd.engine extra "-log log.aimd.engine.plugin" &
command "run 5"
command "run 10"

View File

@ -1,16 +1,11 @@
# 3d Lennard-Jones melt - MDI engine script
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
lattice fcc 1.0
region box block 0 1 0 1 0 1
create_box 1 box
create_atoms 1 box
mass 1 1.0
pair_style lj/cut 2.5
@ -19,4 +14,7 @@ pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi engine

View File

@ -19,3 +19,4 @@ fix 1 all nve
thermo 10
mdi engine

View File

@ -0,0 +1,37 @@
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
displace_atoms all random 0.1 0.1 0.1 48294
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
compute 1 all pressure NULL virial
thermo_style custom step temp pe c_1 c_1[1] c_1[2] c_1[3]
run 0
write_dump all custom dump.series.alone &
id type x y z fx fy fz modify sort id append yes
clear
next rho
jump SELF LOOP

View File

@ -0,0 +1,39 @@
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
mdi connect
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
displace_atoms all random 0.1 0.1 0.1 48294
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
write_dump all custom dump.series.driver &
id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
mdi exit

View File

@ -0,0 +1,38 @@
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
displace_atoms all random 0.1 0.1 0.1 48294
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" &
infile in.series.engine &
extra "-log log.series.engine.plugin" &
command "run 0"
write_dump all custom dump.series.driver.plugin &
id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP

View File

@ -0,0 +1,17 @@
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
region box block 0 1 0 1 0 1
create_box 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
mdi engine

View File

@ -0,0 +1,28 @@
# 3d Lennard-Jones melt - MDI driver script
units lj
atom_style atomic
read_data data.snapshot
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
compute 1 all pressure NULL virial
thermo_style custom step temp pe c_1 c_1[1] c_1[2] c_1[3]
thermo 100
dump 1 all custom 100 dump.snapshot.alone &
id type x y z fx fy fz
dump_modify 1 sort id
run 300

View File

@ -0,0 +1,33 @@
# 3d Lennard-Jones melt - MDI driver script
units lj
atom_style atomic
read_data data.snapshot
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
fix 2 all mdi/qm add no every 100 virial yes
compute 1 all pressure NULL virial
variable evirial equal (f_2[1]+f_2[2]+f_2[3])/3
thermo_style custom step temp pe c_1 c_1[1] c_1[2] c_1[3]
thermo 100
dump 1 all custom 100 dump.snapshot.driver &
id type x y z f_2[1] f_2[2] f_2[3]
dump_modify 1 sort id
run 300 pre no post no every 100 &
"print 'QM eng = $(f_2/atoms)'" &
"print 'QM virial = $(v_evirial) $(f_2[1]) $(f_2[2]) $(f_2[3])'"

View File

@ -0,0 +1,42 @@
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
read_data data.snapshot
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
fix 2 all mdi/qm add no every 100 virial yes
compute 1 all pressure NULL virial
variable evirial equal (f_2[1]+f_2[2]+f_2[3])/3
thermo_style custom step temp pe c_1 c_1[1] c_1[2] c_1[3]
thermo 100
dump 1 all custom 100 dump.snapshot.driver.plugin &
id type x y z f_2[1] f_2[2] f_2[3]
dump_modify 1 sort id
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" &
infile in.snapshot.engine &
extra "-log log.snapshot.engine.plugin" &
command """
run 300 pre no post no every 100
"print 'QM eng = $(f_2/atoms)'"
"print 'QM virial = $(v_evirial) $(f_2[1]) $(f_2[2]) $(f_2[3])'"
"""

View File

@ -0,0 +1,17 @@
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
region box block 0 1 0 1 0 1
create_box 1 box
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
mdi engine

View File

@ -1,4 +1,4 @@
LAMMPS (17 Feb 2022)
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
@ -39,8 +39,8 @@ fix 1 all nve
thermo_style custom step temp pe etotal press vol
thermo 1
run 5
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
run 10
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
@ -61,30 +61,35 @@ Step Temp PotEng TotEng Press Volume
3 1.4189655 -6.7420029 -4.6178116 -4.8276957 592.27671
4 1.4016029 -6.7161132 -4.6179137 -4.6726332 592.27671
5 1.3779738 -6.6808361 -4.6180094 -4.468186 592.27671
Loop time of 0.00427098 on 1 procs for 5 steps with 500 atoms
6 1.3471497 -6.6344152 -4.6177322 -4.2103477 592.27671
7 1.3081237 -6.5752633 -4.6170021 -3.8956402 592.27671
8 1.2599751 -6.502724 -4.6165412 -3.5228721 592.27671
9 1.2021373 -6.4153971 -4.6157975 -3.0910274 592.27671
10 1.1347688 -6.3153532 -4.6166043 -2.6072847 592.27671
Loop time of 0.00919691 on 1 procs for 10 steps with 500 atoms
Performance: 505739.085 tau/day, 1170.692 timesteps/s
73.9% CPU use with 1 MPI tasks x no OpenMP threads
Performance: 469723.136 tau/day, 1087.322 timesteps/s
98.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.0038665 | 0.0038665 | 0.0038665 | 0.0 | 90.53
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.0001297 | 0.0001297 | 0.0001297 | 0.0 | 3.04
Output | 0.00014902 | 0.00014902 | 0.00014902 | 0.0 | 3.49
Modify | 6.5249e-05 | 6.5249e-05 | 6.5249e-05 | 0.0 | 1.53
Other | | 6.054e-05 | | | 1.42
Pair | 0.0066536 | 0.0066536 | 0.0066536 | 0.0 | 72.35
Neigh | 0.0017906 | 0.0017906 | 0.0017906 | 0.0 | 19.47
Comm | 0.0002554 | 0.0002554 | 0.0002554 | 0.0 | 2.78
Output | 0.00029976 | 0.00029976 | 0.00029976 | 0.0 | 3.26
Modify | 9.8718e-05 | 9.8718e-05 | 9.8718e-05 | 0.0 | 1.07
Other | | 9.887e-05 | | | 1.08
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 19500 ave 19500 max 19500 min
Neighs: 19396 ave 19396 max 19396 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19500
Ave neighs/atom = 39
Neighbor list builds = 0
Total # of neighbors = 19396
Ave neighs/atom = 38.792
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,83 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
# NVE
fix 1 all nve
# NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1.0 1.0 1.0
fix 2 all mdi/qm virial yes
thermo_style custom step temp pe etotal press vol
thermo 1
run 10
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp PotEng TotEng Press Volume
0 1.44 -6.7733681 -4.6176881 -5.0221006 592.27671
1 1.4377309 -6.7699814 -4.6176981 -5.0007431 592.27671
2 1.430825 -6.7596844 -4.6177394 -4.9363501 592.27671
3 1.4189655 -6.7420029 -4.6178116 -4.8276957 592.27671
4 1.4016029 -6.7161132 -4.6179137 -4.6726332 592.27671
5 1.3779738 -6.6808361 -4.6180094 -4.468186 592.27671
6 1.3471497 -6.6344152 -4.6177322 -4.2103477 592.27671
7 1.3081237 -6.5752633 -4.6170021 -3.8956402 592.27671
8 1.2599751 -6.502724 -4.6165412 -3.5228721 592.27671
9 1.2021373 -6.4153971 -4.6157975 -3.0910274 592.27671
10 1.1347688 -6.3153532 -4.6166043 -2.6072847 592.27671
Loop time of 0.0078442 on 1 procs for 10 steps with 500 atoms
Performance: 550725.026 tau/day, 1274.826 timesteps/s
99.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 1.592e-06 | 1.592e-06 | 1.592e-06 | 0.0 | 0.02
Comm | 2.6703e-05 | 2.6703e-05 | 2.6703e-05 | 0.0 | 0.34
Output | 0.00021168 | 0.00021168 | 0.00021168 | 0.0 | 2.70
Modify | 0.0075488 | 0.0075488 | 0.0075488 | 0.0 | 96.23
Other | | 5.544e-05 | | | 0.71
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,83 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 3 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
# NVE
fix 1 all nve
# NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1.0 1.0 1.0
fix 2 all mdi/qm virial yes
thermo_style custom step temp pe etotal press vol
thermo 1
run 10
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp PotEng TotEng Press Volume
0 1.44 -6.7733681 -4.6176881 -5.0221006 592.27671
1 1.4377309 -6.7699814 -4.6176981 -5.0007431 592.27671
2 1.430825 -6.7596844 -4.6177394 -4.9363501 592.27671
3 1.4189655 -6.7420029 -4.6178116 -4.8276957 592.27671
4 1.4016029 -6.7161132 -4.6179137 -4.6726332 592.27671
5 1.3779738 -6.6808361 -4.6180094 -4.468186 592.27671
6 1.3471497 -6.6344152 -4.6177322 -4.2103477 592.27671
7 1.3081237 -6.5752633 -4.6170021 -3.8956402 592.27671
8 1.2599751 -6.502724 -4.6165412 -3.5228721 592.27671
9 1.2021373 -6.4153971 -4.6157975 -3.0910274 592.27671
10 1.1347688 -6.3153532 -4.6166043 -2.6072847 592.27671
Loop time of 0.00523112 on 3 procs for 10 steps with 500 atoms
Performance: 825827.658 tau/day, 1911.638 timesteps/s
98.8% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 5.95e-07 | 1.7073e-06 | 3.907e-06 | 0.0 | 0.03
Comm | 3.8259e-05 | 6.2707e-05 | 7.5974e-05 | 0.0 | 1.20
Output | 0.00017543 | 0.00021238 | 0.00028075 | 0.0 | 4.06
Modify | 0.004815 | 0.0048289 | 0.0048521 | 0.0 | 92.31
Other | | 0.0001254 | | | 2.40
Nlocal: 166.667 ave 176 max 150 min
Histogram: 1 0 0 0 0 0 0 0 0 2
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,84 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
# NVE
fix 1 all nve
# NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1.0 1.0 1.0
fix 2 all mdi/qm virial yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" infile in.aimd.engine extra "-log log.aimd.engine.plugin" command "run 10"
run 10
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp PotEng TotEng Press Volume
0 1.44 -6.7733681 -4.6176881 -5.0221006 592.27671
1 1.4377309 -6.7699814 -4.6176981 -5.0007431 592.27671
2 1.430825 -6.7596844 -4.6177394 -4.9363501 592.27671
3 1.4189655 -6.7420029 -4.6178116 -4.8276957 592.27671
4 1.4016029 -6.7161132 -4.6179137 -4.6726332 592.27671
5 1.3779738 -6.6808361 -4.6180094 -4.468186 592.27671
6 1.3471497 -6.6344152 -4.6177322 -4.2103477 592.27671
7 1.3081237 -6.5752633 -4.6170021 -3.8956402 592.27671
8 1.2599751 -6.502724 -4.6165412 -3.5228721 592.27671
9 1.2021373 -6.4153971 -4.6157975 -3.0910274 592.27671
10 1.1347688 -6.3153532 -4.6166043 -2.6072847 592.27671
Loop time of 0.00940117 on 1 procs for 10 steps with 500 atoms
Performance: 459517.175 tau/day, 1063.697 timesteps/s
96.1% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 1.9e-06 | 1.9e-06 | 1.9e-06 | 0.0 | 0.02
Comm | 3.0131e-05 | 3.0131e-05 | 3.0131e-05 | 0.0 | 0.32
Output | 0.00030359 | 0.00030359 | 0.00030359 | 0.0 | 3.23
Modify | 0.0090041 | 0.0090041 | 0.0090041 | 0.0 | 95.78
Other | | 6.144e-05 | | | 0.65
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,84 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 3 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
# NVE
fix 1 all nve
# NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1.0 1.0 1.0
fix 2 all mdi/qm virial yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" infile in.aimd.engine extra "-log log.aimd.engine.plugin" command "run 10"
run 10
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp PotEng TotEng Press Volume
0 1.44 -6.7733681 -4.6176881 -5.0221006 592.27671
1 1.4377309 -6.7699814 -4.6176981 -5.0007431 592.27671
2 1.430825 -6.7596844 -4.6177394 -4.9363501 592.27671
3 1.4189655 -6.7420029 -4.6178116 -4.8276957 592.27671
4 1.4016029 -6.7161132 -4.6179137 -4.6726332 592.27671
5 1.3779738 -6.6808361 -4.6180094 -4.468186 592.27671
6 1.3471497 -6.6344152 -4.6177322 -4.2103477 592.27671
7 1.3081237 -6.5752633 -4.6170021 -3.8956402 592.27671
8 1.2599751 -6.502724 -4.6165412 -3.5228721 592.27671
9 1.2021373 -6.4153971 -4.6157975 -3.0910274 592.27671
10 1.1347688 -6.3153532 -4.6166043 -2.6072847 592.27671
Loop time of 0.00613177 on 3 procs for 10 steps with 500 atoms
Performance: 704527.327 tau/day, 1630.850 timesteps/s
99.2% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 7.93e-07 | 1.8723e-06 | 3.996e-06 | 0.0 | 0.03
Comm | 4.4254e-05 | 7.4628e-05 | 9.321e-05 | 0.0 | 1.22
Output | 0.00019476 | 0.00024309 | 0.00032745 | 0.0 | 3.96
Modify | 0.005637 | 0.0056559 | 0.0056903 | 0.0 | 92.24
Other | | 0.0001563 | | | 2.55
Nlocal: 166.667 ave 176 max 150 min
Histogram: 1 0 0 0 0 0 0 0 0 2
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,83 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
# NVE
fix 1 all nve
# NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1.0 1.0 1.0
fix 2 all mdi/qm virial yes
thermo_style custom step temp pe etotal press vol
thermo 1
run 10
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp PotEng TotEng Press Volume
0 1.44 -6.7733681 -4.6176881 -5.0221006 592.27671
1 1.4377309 -6.7699814 -4.6176981 -5.0007431 592.27671
2 1.430825 -6.7596844 -4.6177394 -4.9363501 592.27671
3 1.4189655 -6.7420029 -4.6178116 -4.8276957 592.27671
4 1.4016029 -6.7161132 -4.6179137 -4.6726332 592.27671
5 1.3779738 -6.6808361 -4.6180094 -4.468186 592.27671
6 1.3471497 -6.6344152 -4.6177322 -4.2103477 592.27671
7 1.3081237 -6.5752633 -4.6170021 -3.8956402 592.27671
8 1.2599751 -6.502724 -4.6165412 -3.5228721 592.27671
9 1.2021373 -6.4153971 -4.6157975 -3.0910274 592.27671
10 1.1347688 -6.3153532 -4.6166043 -2.6072847 592.27671
Loop time of 2.41182 on 1 procs for 10 steps with 500 atoms
Performance: 1791.180 tau/day, 4.146 timesteps/s
0.1% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 2.596e-06 | 2.596e-06 | 2.596e-06 | 0.0 | 0.00
Comm | 4.9627e-05 | 4.9627e-05 | 4.9627e-05 | 0.0 | 0.00
Output | 0.00063707 | 0.00063707 | 0.00063707 | 0.0 | 0.03
Modify | 2.411 | 2.411 | 2.411 | 0.0 | 99.97
Other | | 0.0001146 | | | 0.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:02

View File

@ -0,0 +1,83 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 3 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
# NVE
fix 1 all nve
# NPT
#fix 1 all npt temp 1.0 1.0 0.1 iso 1.0 1.0 1.0
fix 2 all mdi/qm virial yes
thermo_style custom step temp pe etotal press vol
thermo 1
run 10
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp PotEng TotEng Press Volume
0 1.44 -6.7733681 -4.6176881 -5.0221006 592.27671
1 1.4377309 -6.7699814 -4.6176981 -5.0007431 592.27671
2 1.430825 -6.7596844 -4.6177394 -4.9363501 592.27671
3 1.4189655 -6.7420029 -4.6178116 -4.8276957 592.27671
4 1.4016029 -6.7161132 -4.6179137 -4.6726332 592.27671
5 1.3779738 -6.6808361 -4.6180094 -4.468186 592.27671
6 1.3471497 -6.6344152 -4.6177322 -4.2103477 592.27671
7 1.3081237 -6.5752633 -4.6170021 -3.8956402 592.27671
8 1.2599751 -6.502724 -4.6165412 -3.5228721 592.27671
9 1.2021373 -6.4153971 -4.6157975 -3.0910274 592.27671
10 1.1347688 -6.3153532 -4.6166043 -2.6072847 592.27671
Loop time of 2.39983 on 3 procs for 10 steps with 500 atoms
Performance: 1800.127 tau/day, 4.167 timesteps/s
66.7% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 3.75e-07 | 1.3583e-06 | 2.574e-06 | 0.0 | 0.00
Comm | 5.4167e-05 | 0.00010938 | 0.00015338 | 0.0 | 0.00
Output | 0.00030885 | 0.00042099 | 0.00064497 | 0.0 | 0.02
Modify | 2.3988 | 2.3989 | 2.3989 | 0.0 | 99.96
Other | | 0.0004276 | | | 0.02
Nlocal: 166.667 ave 176 max 150 min
Histogram: 1 0 0 0 0 0 0 0 0 2
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:03

View File

@ -0,0 +1,55 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
Lattice spacing in x,y,z = 1.5874011 1.5874011 1.5874011
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.5874011 1.5874011 1.5874011)
1 by 1 by 1 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 1 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.144 | 3.144 | 3.144 Mbytes
Step Temp PotEng TotEng Press Volume
0 0 -6.7733681 -6.7733681 -6.2353173 592.27671
1 0 -6.7699814 -6.7699814 -6.2120481 592.27671
2 0 -6.7596844 -6.7596844 -6.1418368 592.27671
3 0 -6.7420029 -6.7420029 -6.0231905 592.27671
4 0 -6.7161132 -6.7161132 -5.8534999 592.27671
5 0 -6.6808361 -6.6808361 -5.6291449 592.27671
6 0 -6.6344152 -6.6344152 -5.3453369 592.27671
7 0 -6.5752633 -6.5752633 -4.9977496 592.27671
8 0 -6.502724 -6.502724 -4.5844158 592.27671
9 0 -6.4153971 -6.4153971 -4.103842 592.27671
10 0 -6.3153532 -6.3153532 -3.5633405 592.27671
Total wall time: 0:00:00

View File

@ -0,0 +1,55 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
Lattice spacing in x,y,z = 1.5874011 1.5874011 1.5874011
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.5874011 1.5874011 1.5874011)
1 by 2 by 2 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 2 by 2 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.109 | 3.109 | 3.109 Mbytes
Step Temp PotEng TotEng Press Volume
0 0 -6.7733681 -6.7733681 -6.2353173 592.27671
1 0 -6.7699814 -6.7699814 -6.2120481 592.27671
2 0 -6.7596844 -6.7596844 -6.1418368 592.27671
3 0 -6.7420029 -6.7420029 -6.0231905 592.27671
4 0 -6.7161132 -6.7161132 -5.8534999 592.27671
5 0 -6.6808361 -6.6808361 -5.6291449 592.27671
6 0 -6.6344152 -6.6344152 -5.3453369 592.27671
7 0 -6.5752633 -6.5752633 -4.9977496 592.27671
8 0 -6.502724 -6.502724 -4.5844158 592.27671
9 0 -6.4153971 -6.4153971 -4.103842 592.27671
10 0 -6.3153532 -6.3153532 -3.5633405 592.27671
Total wall time: 0:00:00

View File

@ -0,0 +1,55 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
Lattice spacing in x,y,z = 1.5874011 1.5874011 1.5874011
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.5874011 1.5874011 1.5874011)
1 by 1 by 1 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 1 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.144 | 3.144 | 3.144 Mbytes
Step Temp PotEng TotEng Press Volume
0 0 -6.7733681 -6.7733681 -6.2353173 592.27671
1 0 -6.7699814 -6.7699814 -6.2120481 592.27671
2 0 -6.7596844 -6.7596844 -6.1418368 592.27671
3 0 -6.7420029 -6.7420029 -6.0231905 592.27671
4 0 -6.7161132 -6.7161132 -5.8534999 592.27671
5 0 -6.6808361 -6.6808361 -5.6291449 592.27671
6 0 -6.6344152 -6.6344152 -5.3453369 592.27671
7 0 -6.5752633 -6.5752633 -4.9977496 592.27671
8 0 -6.502724 -6.502724 -4.5844158 592.27671
9 0 -6.4153971 -6.4153971 -4.103842 592.27671
10 0 -6.3153532 -6.3153532 -3.5633405 592.27671
Total wall time: 0:00:00

View File

@ -0,0 +1,55 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
Lattice spacing in x,y,z = 1.5874011 1.5874011 1.5874011
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.5874011 1.5874011 1.5874011)
1 by 1 by 3 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 3 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.124 | 3.13 | 3.133 Mbytes
Step Temp PotEng TotEng Press Volume
0 0 -6.7733681 -6.7733681 -6.2353173 592.27671
1 0 -6.7699814 -6.7699814 -6.2120481 592.27671
2 0 -6.7596844 -6.7596844 -6.1418368 592.27671
3 0 -6.7420029 -6.7420029 -6.0231905 592.27671
4 0 -6.7161132 -6.7161132 -5.8534999 592.27671
5 0 -6.6808361 -6.6808361 -5.6291449 592.27671
6 0 -6.6344152 -6.6344152 -5.3453369 592.27671
7 0 -6.5752633 -6.5752633 -4.9977496 592.27671
8 0 -6.502724 -6.502724 -4.5844158 592.27671
9 0 -6.4153971 -6.4153971 -4.103842 592.27671
10 0 -6.3153532 -6.3153532 -3.5633405 592.27671
Total wall time: 0:00:00

View File

@ -0,0 +1,55 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
Lattice spacing in x,y,z = 1.5874011 1.5874011 1.5874011
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.5874011 1.5874011 1.5874011)
1 by 1 by 1 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 1 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.144 | 3.144 | 3.144 Mbytes
Step Temp PotEng TotEng Press Volume
0 0 -6.7733681 -6.7733681 -6.2353173 592.27671
1 0 -6.7699814 -6.7699814 -6.2120481 592.27671
2 0 -6.7596844 -6.7596844 -6.1418368 592.27671
3 0 -6.7420029 -6.7420029 -6.0231905 592.27671
4 0 -6.7161132 -6.7161132 -5.8534999 592.27671
5 0 -6.6808361 -6.6808361 -5.6291449 592.27671
6 0 -6.6344152 -6.6344152 -5.3453369 592.27671
7 0 -6.5752633 -6.5752633 -4.9977496 592.27671
8 0 -6.502724 -6.502724 -4.5844158 592.27671
9 0 -6.4153971 -6.4153971 -4.103842 592.27671
10 0 -6.3153532 -6.3153532 -3.5633405 592.27671
Total wall time: 0:00:02

View File

@ -0,0 +1,55 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
Lattice spacing in x,y,z = 1.5874011 1.5874011 1.5874011
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.5874011 1.5874011 1.5874011)
1 by 2 by 2 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
thermo_style custom step temp pe etotal press vol
thermo 1
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 2 by 2 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.109 | 3.109 | 3.109 Mbytes
Step Temp PotEng TotEng Press Volume
0 0 -6.7733681 -6.7733681 -6.2353173 592.27671
1 0 -6.7699814 -6.7699814 -6.2120481 592.27671
2 0 -6.7596844 -6.7596844 -6.1418368 592.27671
3 0 -6.7420029 -6.7420029 -6.0231905 592.27671
4 0 -6.7161132 -6.7161132 -5.8534999 592.27671
5 0 -6.6808361 -6.6808361 -5.6291449 592.27671
6 0 -6.6344152 -6.6344152 -5.3453369 592.27671
7 0 -6.5752633 -6.5752633 -4.9977496 592.27671
8 0 -6.502724 -6.502724 -4.5844158 592.27671
9 0 -6.4153971 -6.4153971 -4.103842 592.27671
10 0 -6.3153532 -6.3153532 -3.5633405 592.27671
Total wall time: 0:00:02

View File

@ -0,0 +1,77 @@
LAMMPS (2 Jun 2022)
LAMMPS (2 Jun 2022)
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 1 MPI processor grid
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.000 seconds
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 1 MPI processor grid
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
Setting up Verlet run ...
Unit style : lj
Current step : 0
Time step : 0.005
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Setting up Verlet run ...
Unit style : lj
Current step : 0
Time step : 0.005
Per MPI rank memory allocation (min/avg/max) = 3.144 | 3.144 | 3.144 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -6.7733681 0 -6.7733681 -6.2353173
Step 0: MM energy 2.15568, QM energy -6.77337, Total energy -4.61769
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 2.15568 1.2132167
1 0 -6.7699814 0 -6.7699814 -6.2120481
Step 1: MM energy 2.15228, QM energy -6.76998, Total energy -4.6177
1 1.4377309 0 0 2.1522832 1.211305
2 0 -6.7596844 0 -6.7596844 -6.1418368
Step 2: MM energy 2.14195, QM energy -6.75968, Total energy -4.61774
2 1.430825 0 0 2.141945 1.2054866
3 0 -6.7420029 0 -6.7420029 -6.0231905
Step 3: MM energy 2.12419, QM energy -6.742, Total energy -4.61781
3 1.4189655 0 0 2.1241913 1.1954949
4 0 -6.7161132 0 -6.7161132 -5.8534999
Step 4: MM energy 2.0982, QM energy -6.71611, Total energy -4.61791
4 1.4016029 0 0 2.0981995 1.1808667
5 0 -6.6808361 0 -6.6808361 -5.6291449
Step 5: MM energy 2.06283, QM energy -6.68084, Total energy -4.61801
5 1.3779738 0 0 2.0628267 1.1609589
6 0 -6.6344152 0 -6.6344152 -5.3453369
Step 6: MM energy 2.01668, QM energy -6.63442, Total energy -4.61773
6 1.3471497 0 0 2.016683 1.1349892
7 0 -6.5752633 0 -6.5752633 -4.9977496
Step 7: MM energy 1.95826, QM energy -6.57526, Total energy -4.617
7 1.3081237 0 0 1.9582612 1.1021094
8 0 -6.502724 0 -6.502724 -4.5844158
Step 8: MM energy 1.88618, QM energy -6.50272, Total energy -4.61654
8 1.2599751 0 0 1.8861828 1.0615437
9 0 -6.4153971 0 -6.4153971 -4.103842
Step 9: MM energy 1.7996, QM energy -6.4154, Total energy -4.6158
9 1.2021373 0 0 1.7995995 1.0128146
10 0 -6.3153532 0 -6.3153532 -3.5633405
Step 10: MM energy 1.69875, QM energy -6.31535, Total energy -4.6166
10 1.1347688 0 0 1.6987489 0.95605588
Total wall time: 0:00:00
Total wall time: 0:00:00

View File

@ -0,0 +1,77 @@
LAMMPS (2 Jun 2022)
LAMMPS (2 Jun 2022)
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 2 MPI processor grid
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.000 seconds
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 3 MPI processor grid
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
Setting up Verlet run ...
Unit style : lj
Current step : 0
Time step : 0.005
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Setting up Verlet run ...
Unit style : lj
Current step : 0
Time step : 0.005
Per MPI rank memory allocation (min/avg/max) = 3.124 | 3.13 | 3.133 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -6.7733681 0 -6.7733681 -6.2353173
Step 0: MM energy 2.15568, QM energy -6.77337, Total energy -4.61769
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 2.15568 1.2132167
1 0 -6.7699814 0 -6.7699814 -6.2120481
Step 1: MM energy 2.15228, QM energy -6.76998, Total energy -4.6177
1 1.4377309 0 0 2.1522832 1.211305
2 0 -6.7596844 0 -6.7596844 -6.1418368
Step 2: MM energy 2.14195, QM energy -6.75968, Total energy -4.61774
2 1.430825 0 0 2.141945 1.2054866
3 0 -6.7420029 0 -6.7420029 -6.0231905
Step 3: MM energy 2.12419, QM energy -6.742, Total energy -4.61781
3 1.4189655 0 0 2.1241913 1.1954949
4 0 -6.7161132 0 -6.7161132 -5.8534999
Step 4: MM energy 2.0982, QM energy -6.71611, Total energy -4.61791
4 1.4016029 0 0 2.0981995 1.1808667
5 0 -6.6808361 0 -6.6808361 -5.6291449
Step 5: MM energy 2.06283, QM energy -6.68084, Total energy -4.61801
5 1.3779738 0 0 2.0628267 1.1609589
6 0 -6.6344152 0 -6.6344152 -5.3453369
Step 6: MM energy 2.01668, QM energy -6.63442, Total energy -4.61773
6 1.3471497 0 0 2.016683 1.1349892
7 0 -6.5752633 0 -6.5752633 -4.9977496
Step 7: MM energy 1.95826, QM energy -6.57526, Total energy -4.617
7 1.3081237 0 0 1.9582612 1.1021094
8 0 -6.502724 0 -6.502724 -4.5844158
Step 8: MM energy 1.88618, QM energy -6.50272, Total energy -4.61654
8 1.2599751 0 0 1.8861828 1.0615437
9 0 -6.4153971 0 -6.4153971 -4.103842
Step 9: MM energy 1.7996, QM energy -6.4154, Total energy -4.6158
9 1.2021373 0 0 1.7995995 1.0128146
10 0 -6.3153532 0 -6.3153532 -3.5633405
Step 10: MM energy 1.69875, QM energy -6.31535, Total energy -4.6166
10 1.1347688 0 0 1.6987489 0.95605588
Total wall time: 0:00:00
Total wall time: 0:00:00

View File

@ -4,3 +4,8 @@ Step 2: MM energy 2.14195, QM energy -6.75968, Total energy -4.61774
Step 3: MM energy 2.12419, QM energy -6.742, Total energy -4.61781
Step 4: MM energy 2.0982, QM energy -6.71611, Total energy -4.61791
Step 5: MM energy 2.06283, QM energy -6.68084, Total energy -4.61801
Step 6: MM energy 2.01668, QM energy -6.63442, Total energy -4.61773
Step 7: MM energy 1.95826, QM energy -6.57526, Total energy -4.617
Step 8: MM energy 1.88618, QM energy -6.50272, Total energy -4.61654
Step 9: MM energy 1.7996, QM energy -6.4154, Total energy -4.6158
Step 10: MM energy 1.69875, QM energy -6.31535, Total energy -4.6166

View File

@ -4,3 +4,8 @@ Step 2: MM energy 2.14195, QM energy -6.75968, Total energy -4.61774
Step 3: MM energy 2.12419, QM energy -6.742, Total energy -4.61781
Step 4: MM energy 2.0982, QM energy -6.71611, Total energy -4.61791
Step 5: MM energy 2.06283, QM energy -6.68084, Total energy -4.61801
Step 6: MM energy 2.01668, QM energy -6.63442, Total energy -4.61773
Step 7: MM energy 1.95826, QM energy -6.57526, Total energy -4.617
Step 8: MM energy 1.88618, QM energy -6.50272, Total energy -4.61654
Step 9: MM energy 1.7996, QM energy -6.4154, Total energy -4.6158
Step 10: MM energy 1.69875, QM energy -6.31535, Total energy -4.6166

View File

@ -0,0 +1,49 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - LAMMPS as MM engine for aimd_driver.py
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
mdi engine
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 2.15568 1.2132167
1 1.4377309 0 0 2.1522832 1.211305
2 1.430825 0 0 2.141945 1.2054866
3 1.4189655 0 0 2.1241913 1.1954949
4 1.4016029 0 0 2.0981995 1.1808667
5 1.3779738 0 0 2.0628267 1.1609589
6 1.3471497 0 0 2.016683 1.1349892
7 1.3081237 0 0 1.9582612 1.1021094
8 1.2599751 0 0 1.8861828 1.0615437
9 1.2021373 0 0 1.7995995 1.0128146
10 1.1347688 0 0 1.6987489 0.95605588
Total wall time: 0:00:00

View File

@ -0,0 +1,49 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - LAMMPS as MM engine for aimd_driver.py
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.000 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
mdi engine
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 2.15568 1.2132167
1 1.4377309 0 0 2.1522832 1.211305
2 1.430825 0 0 2.141945 1.2054866
3 1.4189655 0 0 2.1241913 1.1954949
4 1.4016029 0 0 2.0981995 1.1808667
5 1.3779738 0 0 2.0628267 1.1609589
6 1.3471497 0 0 2.016683 1.1349892
7 1.3081237 0 0 1.9582612 1.1021094
8 1.2599751 0 0 1.8861828 1.0615437
9 1.2021373 0 0 1.7995995 1.0128146
10 1.1347688 0 0 1.6987489 0.95605588
Total wall time: 0:00:00

View File

@ -0,0 +1,49 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - LAMMPS as MM engine for aimd_driver.py
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
mdi engine
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 2.15568 1.2132167
1 1.4377309 0 0 2.1522832 1.211305
2 1.430825 0 0 2.141945 1.2054866
3 1.4189655 0 0 2.1241913 1.1954949
4 1.4016029 0 0 2.0981995 1.1808667
5 1.3779738 0 0 2.0628267 1.1609589
6 1.3471497 0 0 2.016683 1.1349892
7 1.3081237 0 0 1.9582612 1.1021094
8 1.2599751 0 0 1.8861828 1.0615437
9 1.2021373 0 0 1.7995995 1.0128146
10 1.1347688 0 0 1.6987489 0.95605588
Total wall time: 0:00:02

View File

@ -0,0 +1,49 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - LAMMPS as MM engine for aimd_driver.py
variable x index 5
variable y index 5
variable z index 5
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 $x 0 $y 0 $z
region box block 0 5 0 $y 0 $z
region box block 0 5 0 5 0 $z
region box block 0 5 0 5 0 5
create_box 1 box
Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
1 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
mdi engine
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 0 0 2.15568 1.2132167
1 1.4377309 0 0 2.1522832 1.211305
2 1.430825 0 0 2.141945 1.2054866
3 1.4189655 0 0 2.1241913 1.1954949
4 1.4016029 0 0 2.0981995 1.1808667
5 1.3779738 0 0 2.0628267 1.1609589
6 1.3471497 0 0 2.016683 1.1349892
7 1.3081237 0 0 1.9582612 1.1021094
8 1.2599751 0 0 1.8861828 1.0615437
9 1.2021373 0 0 1.7995995 1.0128146
10 1.1347688 0 0 1.6987489 0.95605588
Total wall time: 0:00:02

View File

@ -1,4 +1,4 @@
LAMMPS (17 Feb 2022)
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - LAMMPS as surrogate QM engine for aimd_driver.py
variable x index 5
@ -20,7 +20,7 @@ Created orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms 1 box
Created 500 atoms
using lattice units in orthogonal box = (0 0 0) to (8.397981 8.397981 8.397981)
create_atoms CPU = 0.001 seconds
create_atoms CPU = 0.000 seconds
mass 1 1.0
pair_style lj/cut 2.5
@ -31,7 +31,7 @@ neigh_modify delay 0 every 1 check yes
mdi engine
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
@ -52,4 +52,9 @@ Step Temp E_pair E_mol TotEng Press
3 0 -6.7420029 0 -6.7420029 -6.0231905
4 0 -6.7161132 0 -6.7161132 -5.8534999
5 0 -6.6808361 0 -6.6808361 -5.6291449
6 0 -6.6344152 0 -6.6344152 -5.3453369
7 0 -6.5752633 0 -6.5752633 -4.9977496
8 0 -6.502724 0 -6.502724 -4.5844158
9 0 -6.4153971 0 -6.4153971 -4.103842
10 0 -6.3153532 0 -6.3153532 -3.5633405
Total wall time: 0:00:00

View File

@ -1,4 +1,4 @@
LAMMPS (17 Feb 2022)
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - LAMMPS as surrogate QM engine for aimd_driver.py
variable x index 5
@ -31,7 +31,7 @@ neigh_modify delay 0 every 1 check yes
mdi engine
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
@ -52,4 +52,9 @@ Step Temp E_pair E_mol TotEng Press
3 0 -6.7420029 0 -6.7420029 -6.0231905
4 0 -6.7161132 0 -6.7161132 -5.8534999
5 0 -6.6808361 0 -6.6808361 -5.6291449
6 0 -6.6344152 0 -6.6344152 -5.3453369
7 0 -6.5752633 0 -6.5752633 -4.9977496
8 0 -6.502724 0 -6.502724 -4.5844158
9 0 -6.4153971 0 -6.4153971 -4.103842
10 0 -6.3153532 0 -6.3153532 -3.5633405
Total wall time: 0:00:00

View File

@ -1,4 +1,4 @@
LAMMPS (17 Feb 2022)
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - LAMMPS as surrogate QM engine for aimd_driver.py
variable x index 5
@ -31,7 +31,7 @@ neigh_modify delay 0 every 1 check yes
mdi engine
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
@ -52,4 +52,9 @@ Step Temp E_pair E_mol TotEng Press
3 0 -6.7420029 0 -6.7420029 -6.0231905
4 0 -6.7161132 0 -6.7161132 -5.8534999
5 0 -6.6808361 0 -6.6808361 -5.6291449
Total wall time: 0:00:01
6 0 -6.6344152 0 -6.6344152 -5.3453369
7 0 -6.5752633 0 -6.5752633 -4.9977496
8 0 -6.502724 0 -6.502724 -4.5844158
9 0 -6.4153971 0 -6.4153971 -4.103842
10 0 -6.3153532 0 -6.3153532 -3.5633405
Total wall time: 0:00:02

View File

@ -1,4 +1,4 @@
LAMMPS (17 Feb 2022)
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - LAMMPS as surrogate QM engine for aimd_driver.py
variable x index 5
@ -31,7 +31,7 @@ neigh_modify delay 0 every 1 check yes
mdi engine
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
@ -52,4 +52,9 @@ Step Temp E_pair E_mol TotEng Press
3 0 -6.7420029 0 -6.7420029 -6.0231905
4 0 -6.7161132 0 -6.7161132 -5.8534999
5 0 -6.6808361 0 -6.6808361 -5.6291449
Total wall time: 0:00:01
6 0 -6.6344152 0 -6.6344152 -5.3453369
7 0 -6.5752633 0 -6.5752633 -4.9977496
8 0 -6.502724 0 -6.502724 -4.5844158
9 0 -6.4153971 0 -6.4153971 -4.103842
10 0 -6.3153532 0 -6.3153532 -3.5633405
Total wall time: 0:00:02

View File

@ -0,0 +1,90 @@
LAMMPS (2 Jun 2022)
# MDI engine script to process a series of evaulate, run, minimize commands
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6795962 1.6795962 1.6795962)
1 by 1 by 1 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
thermo 10
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.9713146 0 -2.4947521 3.1253597
10 1.2380002 -4.3210346 0 -2.4930499 2.0015258
20 1.173596 -4.2234559 0 -2.4905682 2.3587731
30 1.2989994 -4.4124445 0 -2.4943907 1.903698
40 1.4510255 -4.6467459 0 -2.504216 1.2196259
50 1.4631454 -4.6641774 0 -2.5037518 1.2838406
60 1.2694188 -4.3794267 0 -2.5050505 2.4497113
70 1.3363814 -4.4759884 0 -2.5027378 2.2441463
80 1.402534 -4.5752515 0 -2.5043224 1.9011715
90 1.3870321 -4.5512479 0 -2.5032084 2.0040237
100 1.3635651 -4.5209384 0 -2.5075493 1.9773816
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -4.1934138 0 -2.7168513 0.72358299
10 1.0170498 -4.2225288 0 -2.7207912 0.7556766
20 0.92567967 -4.0920979 0 -2.725274 1.2463143
30 1.0851758 -4.3346599 0 -2.73233 0.53176652
40 1.2163699 -4.5351986 0 -2.7391524 -0.077915153
50 1.2305739 -4.5558134 0 -2.7387942 -0.10711153
60 1.1172288 -4.3979372 0 -2.7482791 0.52752067
70 1.2228415 -4.5540741 0 -2.7484722 0.11937533
80 1.1776333 -4.4870195 0 -2.7481704 0.33904864
90 1.219283 -4.5483185 0 -2.747971 0.17898549
100 1.2138939 -4.5432229 0 -2.7508327 0.3076354
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.8524214 0 -2.3758589 4.6814052
10 1.3356745 -4.3481612 0 -2.3759544 3.061856
20 1.1791601 -4.117932 0 -2.3768284 3.8565
30 1.3435556 -4.3613609 0 -2.3775171 3.0728735
40 1.5628445 -4.6886004 0 -2.3809628 2.0989245
50 1.4735556 -4.5569123 0 -2.3811152 2.6364099
60 1.609387 -4.7581056 0 -2.3817452 1.8988642
70 1.5014902 -4.5938759 0 -2.3768318 2.458161
80 1.3763383 -4.4089865 0 -2.3767369 3.0379808
90 1.498202 -4.5909613 0 -2.3787724 2.5543714
100 1.43934 -4.5009545 0 -2.375679 3.0923444
Total wall time: 0:00:00

View File

@ -0,0 +1,90 @@
LAMMPS (2 Jun 2022)
# MDI engine script to process a series of evaulate, run, minimize commands
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6795962 1.6795962 1.6795962)
1 by 2 by 2 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
thermo 10
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 2 by 2 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.9713146 0 -2.4947521 3.1253597
10 1.2380002 -4.3210346 0 -2.4930499 2.0015258
20 1.173596 -4.2234559 0 -2.4905682 2.3587731
30 1.2989994 -4.4124445 0 -2.4943907 1.903698
40 1.4510255 -4.6467459 0 -2.504216 1.2196259
50 1.4631454 -4.6641774 0 -2.5037518 1.2838406
60 1.2694188 -4.3794267 0 -2.5050505 2.4497113
70 1.3363814 -4.4759884 0 -2.5027378 2.2441463
80 1.402534 -4.5752515 0 -2.5043224 1.9011715
90 1.3870321 -4.5512479 0 -2.5032084 2.0040237
100 1.3635651 -4.5209384 0 -2.5075493 1.9773816
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 2 by 2 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -4.1934138 0 -2.7168513 0.72358299
10 1.0170498 -4.2225288 0 -2.7207912 0.7556766
20 0.92567967 -4.0920979 0 -2.725274 1.2463143
30 1.0851758 -4.3346599 0 -2.73233 0.53176652
40 1.2163699 -4.5351986 0 -2.7391524 -0.077915153
50 1.2305739 -4.5558134 0 -2.7387942 -0.10711153
60 1.1172288 -4.3979372 0 -2.7482791 0.52752067
70 1.2228415 -4.5540741 0 -2.7484722 0.11937533
80 1.1776333 -4.4870195 0 -2.7481704 0.33904864
90 1.219283 -4.5483185 0 -2.747971 0.17898549
100 1.2138939 -4.5432229 0 -2.7508327 0.3076354
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 2 by 2 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.8524214 0 -2.3758589 4.6814052
10 1.3356745 -4.3481612 0 -2.3759544 3.061856
20 1.1791601 -4.117932 0 -2.3768284 3.8565
30 1.3435556 -4.3613609 0 -2.3775171 3.0728735
40 1.5628445 -4.6886004 0 -2.3809628 2.0989245
50 1.4735556 -4.5569123 0 -2.3811152 2.6364099
60 1.609387 -4.7581056 0 -2.3817452 1.8988642
70 1.5014902 -4.5938759 0 -2.3768318 2.458161
80 1.3763383 -4.4089865 0 -2.3767369 3.0379808
90 1.498202 -4.5909613 0 -2.3787724 2.5543714
100 1.43934 -4.5009545 0 -2.375679 3.0923444
Total wall time: 0:00:00

View File

@ -0,0 +1,90 @@
LAMMPS (2 Jun 2022)
# MDI engine script to process a series of evaulate, run, minimize commands
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6795962 1.6795962 1.6795962)
1 by 1 by 1 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
thermo 10
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.9713146 0 -2.4947521 3.1253597
10 1.2380002 -4.3210346 0 -2.4930499 2.0015258
20 1.173596 -4.2234559 0 -2.4905682 2.3587731
30 1.2989994 -4.4124445 0 -2.4943907 1.903698
40 1.4510255 -4.6467459 0 -2.504216 1.2196259
50 1.4631454 -4.6641774 0 -2.5037518 1.2838406
60 1.2694188 -4.3794267 0 -2.5050505 2.4497113
70 1.3363814 -4.4759884 0 -2.5027378 2.2441463
80 1.402534 -4.5752515 0 -2.5043224 1.9011715
90 1.3870321 -4.5512479 0 -2.5032084 2.0040237
100 1.3635651 -4.5209384 0 -2.5075493 1.9773816
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -4.1934138 0 -2.7168513 0.72358299
10 1.0170498 -4.2225288 0 -2.7207912 0.7556766
20 0.92567967 -4.0920979 0 -2.725274 1.2463143
30 1.0851758 -4.3346599 0 -2.73233 0.53176652
40 1.2163699 -4.5351986 0 -2.7391524 -0.077915153
50 1.2305739 -4.5558134 0 -2.7387942 -0.10711153
60 1.1172288 -4.3979372 0 -2.7482791 0.52752067
70 1.2228415 -4.5540741 0 -2.7484722 0.11937533
80 1.1776333 -4.4870195 0 -2.7481704 0.33904864
90 1.219283 -4.5483185 0 -2.747971 0.17898549
100 1.2138939 -4.5432229 0 -2.7508327 0.3076354
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.8524214 0 -2.3758589 4.6814052
10 1.3356745 -4.3481612 0 -2.3759544 3.061856
20 1.1791601 -4.117932 0 -2.3768284 3.8565
30 1.3435556 -4.3613609 0 -2.3775171 3.0728735
40 1.5628445 -4.6886004 0 -2.3809628 2.0989245
50 1.4735556 -4.5569123 0 -2.3811152 2.6364099
60 1.609387 -4.7581056 0 -2.3817452 1.8988642
70 1.5014902 -4.5938759 0 -2.3768318 2.458161
80 1.3763383 -4.4089865 0 -2.3767369 3.0379808
90 1.498202 -4.5909613 0 -2.3787724 2.5543714
100 1.43934 -4.5009545 0 -2.375679 3.0923444
Total wall time: 0:00:00

View File

@ -0,0 +1,90 @@
LAMMPS (2 Jun 2022)
# MDI engine script to process a series of evaulate, run, minimize commands
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6795962 1.6795962 1.6795962)
1 by 1 by 3 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
thermo 10
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 3 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.07 | 3.07 | 3.071 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.9713146 0 -2.4947521 3.1253597
10 1.2380002 -4.3210346 0 -2.4930499 2.0015258
20 1.173596 -4.2234559 0 -2.4905682 2.3587731
30 1.2989994 -4.4124445 0 -2.4943907 1.903698
40 1.4510255 -4.6467459 0 -2.504216 1.2196259
50 1.4631454 -4.6641774 0 -2.5037518 1.2838406
60 1.2694188 -4.3794267 0 -2.5050505 2.4497113
70 1.3363814 -4.4759884 0 -2.5027378 2.2441463
80 1.402534 -4.5752515 0 -2.5043224 1.9011715
90 1.3870321 -4.5512479 0 -2.5032084 2.0040237
100 1.3635651 -4.5209384 0 -2.5075493 1.9773816
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 1 by 3 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.07 | 3.07 | 3.071 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -4.1934138 0 -2.7168513 0.72358299
10 1.0170498 -4.2225288 0 -2.7207912 0.7556766
20 0.92567967 -4.0920979 0 -2.725274 1.2463143
30 1.0851758 -4.3346599 0 -2.73233 0.53176652
40 1.2163699 -4.5351986 0 -2.7391524 -0.077915153
50 1.2305739 -4.5558134 0 -2.7387942 -0.10711153
60 1.1172288 -4.3979372 0 -2.7482791 0.52752067
70 1.2228415 -4.5540741 0 -2.7484722 0.11937533
80 1.1776333 -4.4870195 0 -2.7481704 0.33904864
90 1.219283 -4.5483185 0 -2.747971 0.17898549
100 1.2138939 -4.5432229 0 -2.7508327 0.3076354
delete_atoms group all
Deleted 64 atoms, new total = 0
3 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.07 | 3.07 | 3.071 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.8524214 0 -2.3758589 4.6814052
10 1.3356745 -4.3481612 0 -2.3759544 3.061856
20 1.1791601 -4.117932 0 -2.3768284 3.8565
30 1.3435556 -4.3613609 0 -2.3775171 3.0728735
40 1.5628445 -4.6886004 0 -2.3809628 2.0989245
50 1.4735556 -4.5569123 0 -2.3811152 2.6364099
60 1.609387 -4.7581056 0 -2.3817452 1.8988642
70 1.5014902 -4.5938759 0 -2.3768318 2.458161
80 1.3763383 -4.4089865 0 -2.3767369 3.0379808
90 1.498202 -4.5909613 0 -2.3787724 2.5543714
100 1.43934 -4.5009545 0 -2.375679 3.0923444
Total wall time: 0:00:00

View File

@ -0,0 +1,90 @@
LAMMPS (2 Jun 2022)
# MDI engine script to process a series of evaulate, run, minimize commands
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6795962 1.6795962 1.6795962)
1 by 1 by 1 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
thermo 10
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.9713146 0 -2.4947521 3.1253597
10 1.2380002 -4.3210346 0 -2.4930499 2.0015258
20 1.173596 -4.2234559 0 -2.4905682 2.3587731
30 1.2989994 -4.4124445 0 -2.4943907 1.903698
40 1.4510255 -4.6467459 0 -2.504216 1.2196259
50 1.4631454 -4.6641774 0 -2.5037518 1.2838406
60 1.2694188 -4.3794267 0 -2.5050505 2.4497113
70 1.3363814 -4.4759884 0 -2.5027378 2.2441463
80 1.402534 -4.5752515 0 -2.5043224 1.9011715
90 1.3870321 -4.5512479 0 -2.5032084 2.0040237
100 1.3635651 -4.5209384 0 -2.5075493 1.9773816
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -4.1934138 0 -2.7168513 0.72358299
10 1.0170498 -4.2225288 0 -2.7207912 0.7556766
20 0.92567967 -4.0920979 0 -2.725274 1.2463143
30 1.0851758 -4.3346599 0 -2.73233 0.53176652
40 1.2163699 -4.5351986 0 -2.7391524 -0.077915153
50 1.2305739 -4.5558134 0 -2.7387942 -0.10711153
60 1.1172288 -4.3979372 0 -2.7482791 0.52752067
70 1.2228415 -4.5540741 0 -2.7484722 0.11937533
80 1.1776333 -4.4870195 0 -2.7481704 0.33904864
90 1.219283 -4.5483185 0 -2.747971 0.17898549
100 1.2138939 -4.5432229 0 -2.7508327 0.3076354
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 1 by 1 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.8524214 0 -2.3758589 4.6814052
10 1.3356745 -4.3481612 0 -2.3759544 3.061856
20 1.1791601 -4.117932 0 -2.3768284 3.8565
30 1.3435556 -4.3613609 0 -2.3775171 3.0728735
40 1.5628445 -4.6886004 0 -2.3809628 2.0989245
50 1.4735556 -4.5569123 0 -2.3811152 2.6364099
60 1.609387 -4.7581056 0 -2.3817452 1.8988642
70 1.5014902 -4.5938759 0 -2.3768318 2.458161
80 1.3763383 -4.4089865 0 -2.3767369 3.0379808
90 1.498202 -4.5909613 0 -2.3787724 2.5543714
100 1.43934 -4.5009545 0 -2.375679 3.0923444
Total wall time: 0:00:00

View File

@ -0,0 +1,90 @@
LAMMPS (2 Jun 2022)
# MDI engine script to process a series of evaulate, run, minimize commands
units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.6795962 1.6795962 1.6795962)
1 by 2 by 2 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all nve
thermo 10
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 2 by 2 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.9713146 0 -2.4947521 3.1253597
10 1.2380002 -4.3210346 0 -2.4930499 2.0015258
20 1.173596 -4.2234559 0 -2.4905682 2.3587731
30 1.2989994 -4.4124445 0 -2.4943907 1.903698
40 1.4510255 -4.6467459 0 -2.504216 1.2196259
50 1.4631454 -4.6641774 0 -2.5037518 1.2838406
60 1.2694188 -4.3794267 0 -2.5050505 2.4497113
70 1.3363814 -4.4759884 0 -2.5027378 2.2441463
80 1.402534 -4.5752515 0 -2.5043224 1.9011715
90 1.3870321 -4.5512479 0 -2.5032084 2.0040237
100 1.3635651 -4.5209384 0 -2.5075493 1.9773816
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 2 by 2 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -4.1934138 0 -2.7168513 0.72358299
10 1.0170498 -4.2225288 0 -2.7207912 0.7556766
20 0.92567967 -4.0920979 0 -2.725274 1.2463143
30 1.0851758 -4.3346599 0 -2.73233 0.53176652
40 1.2163699 -4.5351986 0 -2.7391524 -0.077915153
50 1.2305739 -4.5558134 0 -2.7387942 -0.10711153
60 1.1172288 -4.3979372 0 -2.7482791 0.52752067
70 1.2228415 -4.5540741 0 -2.7484722 0.11937533
80 1.1776333 -4.4870195 0 -2.7481704 0.33904864
90 1.219283 -4.5483185 0 -2.747971 0.17898549
100 1.2138939 -4.5432229 0 -2.7508327 0.3076354
delete_atoms group all
Deleted 64 atoms, new total = 0
1 by 2 by 2 MPI processor grid
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.077 | 3.077 | 3.077 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 -3.8524214 0 -2.3758589 4.6814052
10 1.3356745 -4.3481612 0 -2.3759544 3.061856
20 1.1791601 -4.117932 0 -2.3768284 3.8565
30 1.3435556 -4.3613609 0 -2.3775171 3.0728735
40 1.5628445 -4.6886004 0 -2.3809628 2.0989245
50 1.4735556 -4.5569123 0 -2.3811152 2.6364099
60 1.609387 -4.7581056 0 -2.3817452 1.8988642
70 1.5014902 -4.5938759 0 -2.3768318 2.458161
80 1.3763383 -4.4089865 0 -2.3767369 3.0379808
90 1.498202 -4.5909613 0 -2.3787724 2.5543714
100 1.43934 -4.5009545 0 -2.375679 3.0923444
Total wall time: 0:00:00

View File

@ -0,0 +1,248 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.7
Reading data file ...
orthogonal box = (0 0 0) to (8.9390354 8.9390354 8.9390354)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.004 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
compute 1 all pressure NULL virial
thermo_style custom step temp pe c_1 c_1[1] c_1[2] c_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 7 7 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.137 | 3.137 | 3.137 Mbytes
Step Temp PotEng c_1 c_1[1] c_1[2] c_1[3]
0 0 -5.200819 -4.5647906 -4.5444385 -4.5699966 -4.5799366
Loop time of 2.729e-06 on 1 procs for 0 steps with 500 atoms
109.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.729e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 15687 ave 15687 max 15687 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 15687
Ave neighs/atom = 31.374
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.alone id type x y z fx fy fz modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.8
Reading data file ...
orthogonal box = (0 0 0) to (8.5498797 8.5498797 8.5498797)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.004 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
compute 1 all pressure NULL virial
thermo_style custom step temp pe c_1 c_1[1] c_1[2] c_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 7 7 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.135 | 3.135 | 3.135 Mbytes
Step Temp PotEng c_1 c_1[1] c_1[2] c_1[3]
0 0 -6.0419499 -4.2737827 -4.2865535 -4.2176976 -4.3170971
Loop time of 1.434e-06 on 1 procs for 0 steps with 500 atoms
139.5% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.434e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 18734 ave 18734 max 18734 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18734
Ave neighs/atom = 37.468
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.alone id type x y z fx fy fz modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.9
Reading data file ...
orthogonal box = (0 0 0) to (8.2207069 8.2207069 8.2207069)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.003 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
compute 1 all pressure NULL virial
thermo_style custom step temp pe c_1 c_1[1] c_1[2] c_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.134 | 3.134 | 3.134 Mbytes
Step Temp PotEng c_1 c_1[1] c_1[2] c_1[3]
0 0 -6.4477578 -1.5268553 -1.5717034 -1.568693 -1.4401696
Loop time of 1.483e-06 on 1 procs for 0 steps with 500 atoms
134.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.483e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1956 ave 1956 max 1956 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 20023 ave 20023 max 20023 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 20023
Ave neighs/atom = 40.046
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.alone id type x y z fx fy fz modify sort id append yes
clear
next rho
jump SELF LOOP
Total wall time: 0:00:00

View File

@ -0,0 +1,213 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
mdi connect
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.7
Reading data file ...
orthogonal box = (0 0 0) to (8.9390354 8.9390354 8.9390354)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.003 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -5.200819 -4.5647906 -4.5444385 -4.5699966 -4.5799366
Loop time of 1.919e-06 on 1 procs for 0 steps with 500 atoms
0.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.919e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.8
Reading data file ...
orthogonal box = (0 0 0) to (8.5498797 8.5498797 8.5498797)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.003 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.0419499 -4.2737827 -4.2865535 -4.2176976 -4.3170971
Loop time of 1.141e-06 on 1 procs for 0 steps with 500 atoms
87.6% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.141e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.9
Reading data file ...
orthogonal box = (0 0 0) to (8.2207069 8.2207069 8.2207069)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.002 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.4477578 -1.5268553 -1.5717034 -1.568693 -1.4401696
Loop time of 1.019e-06 on 1 procs for 0 steps with 500 atoms
98.1% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.019e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
mdi exit
Total wall time: 0:00:00

View File

@ -0,0 +1,213 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
mdi connect
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.7
Reading data file ...
orthogonal box = (0 0 0) to (8.9390354 8.9390354 8.9390354)
1 by 1 by 3 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.004 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -5.200819 -4.5647906 -4.5444385 -4.5699966 -4.5799366
Loop time of 4.00933e-06 on 3 procs for 0 steps with 500 atoms
91.5% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 4.009e-06 | | |100.00
Nlocal: 166.667 ave 177 max 150 min
Histogram: 1 0 0 0 0 0 0 0 1 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.8
Reading data file ...
orthogonal box = (0 0 0) to (8.5498797 8.5498797 8.5498797)
1 by 1 by 3 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.003 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.0419499 -4.2737827 -4.2865535 -4.2176976 -4.3170971
Loop time of 3.11833e-06 on 3 procs for 0 steps with 500 atoms
117.6% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.118e-06 | | |100.00
Nlocal: 166.667 ave 178 max 150 min
Histogram: 1 0 0 0 0 0 0 1 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.9
Reading data file ...
orthogonal box = (0 0 0) to (8.2207069 8.2207069 8.2207069)
3 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.003 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.4477578 -1.5268553 -1.5717034 -1.568693 -1.4401696
Loop time of 2.85467e-06 on 3 procs for 0 steps with 500 atoms
140.1% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.855e-06 | | |100.00
Nlocal: 166.667 ave 181 max 150 min
Histogram: 1 0 0 0 0 0 1 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
mdi exit
Total wall time: 0:00:00

View File

@ -0,0 +1,212 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.7
Reading data file ...
orthogonal box = (0 0 0) to (8.9390354 8.9390354 8.9390354)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.004 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" infile in.series.engine extra "-log log.series.engine.plugin" command "run 0"
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -5.200819 -4.5647906 -4.5444385 -4.5699966 -4.5799366
Loop time of 2.359e-06 on 1 procs for 0 steps with 500 atoms
127.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.359e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver.plugin id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.8
Reading data file ...
orthogonal box = (0 0 0) to (8.5498797 8.5498797 8.5498797)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.004 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" infile in.series.engine extra "-log log.series.engine.plugin" command "run 0"
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.0419499 -4.2737827 -4.2865535 -4.2176976 -4.3170971
Loop time of 1.468e-06 on 1 procs for 0 steps with 500 atoms
204.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.468e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver.plugin id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.9
Reading data file ...
orthogonal box = (0 0 0) to (8.2207069 8.2207069 8.2207069)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.003 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" infile in.series.engine extra "-log log.series.engine.plugin" command "run 0"
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.4477578 -1.5268553 -1.5717034 -1.568693 -1.4401696
Loop time of 1.378e-06 on 1 procs for 0 steps with 500 atoms
145.1% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.378e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver.plugin id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
Total wall time: 0:00:00

View File

@ -0,0 +1,212 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.7
Reading data file ...
orthogonal box = (0 0 0) to (8.9390354 8.9390354 8.9390354)
1 by 1 by 3 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.004 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" infile in.series.engine extra "-log log.series.engine.plugin" command "run 0"
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -5.200819 -4.5647906 -4.5444385 -4.5699966 -4.5799366
Loop time of 3.50867e-06 on 3 procs for 0 steps with 500 atoms
114.0% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.509e-06 | | |100.00
Nlocal: 166.667 ave 177 max 150 min
Histogram: 1 0 0 0 0 0 0 0 1 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver.plugin id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.8
Reading data file ...
orthogonal box = (0 0 0) to (8.5498797 8.5498797 8.5498797)
1 by 1 by 3 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.003 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" infile in.series.engine extra "-log log.series.engine.plugin" command "run 0"
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.0419499 -4.2737827 -4.2865535 -4.2176976 -4.3170971
Loop time of 3.225e-06 on 3 procs for 0 steps with 500 atoms
82.7% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.225e-06 | | |100.00
Nlocal: 166.667 ave 178 max 150 min
Histogram: 1 0 0 0 0 0 0 1 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver.plugin id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.9
Reading data file ...
orthogonal box = (0 0 0) to (8.2207069 8.2207069 8.2207069)
3 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.003 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
mdi plugin lammps mdi "-role ENGINE -name LMP2 -method LINK" infile in.series.engine extra "-log log.series.engine.plugin" command "run 0"
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.4477578 -1.5268553 -1.5717034 -1.568693 -1.4401696
Loop time of 2.713e-06 on 3 procs for 0 steps with 500 atoms
98.3% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.713e-06 | | |100.00
Nlocal: 166.667 ave 181 max 150 min
Histogram: 1 0 0 0 0 0 1 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver.plugin id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
Total wall time: 0:00:00

View File

@ -0,0 +1,213 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
mdi connect
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.7
Reading data file ...
orthogonal box = (0 0 0) to (8.9390354 8.9390354 8.9390354)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.005 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -5.200819 -4.5647906 -4.5444385 -4.5699966 -4.5799366
Loop time of 3.017e-06 on 1 procs for 0 steps with 500 atoms
99.4% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.017e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.8
Reading data file ...
orthogonal box = (0 0 0) to (8.5498797 8.5498797 8.5498797)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.005 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.0419499 -4.2737827 -4.2865535 -4.2176976 -4.3170971
Loop time of 1.977e-06 on 1 procs for 0 steps with 500 atoms
101.2% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.977e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.9
Reading data file ...
orthogonal box = (0 0 0) to (8.2207069 8.2207069 8.2207069)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.004 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.4477578 -1.5268553 -1.5717034 -1.568693 -1.4401696
Loop time of 2.042e-06 on 1 procs for 0 steps with 500 atoms
97.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.042e-06 | | |100.00
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
mdi exit
Total wall time: 0:00:00

View File

@ -0,0 +1,213 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI driver script
variable x index 5
variable y index 5
variable z index 5
variable rho index 0.7 0.8 0.9
mdi connect
label LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.7
Reading data file ...
orthogonal box = (0 0 0) to (8.9390354 8.9390354 8.9390354)
1 by 1 by 3 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.005 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -5.200819 -4.5647906 -4.5444385 -4.5699966 -4.5799366
Loop time of 9.08933e-06 on 3 procs for 0 steps with 500 atoms
95.3% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.089e-06 | | |100.00
Nlocal: 166.667 ave 177 max 150 min
Histogram: 1 0 0 0 0 0 0 0 1 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.8
Reading data file ...
orthogonal box = (0 0 0) to (8.5498797 8.5498797 8.5498797)
1 by 1 by 3 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.005 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.0419499 -4.2737827 -4.2865535 -4.2176976 -4.3170971
Loop time of 3.63567e-06 on 3 procs for 0 steps with 500 atoms
100.9% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.636e-06 | | |100.00
Nlocal: 166.667 ave 178 max 150 min
Histogram: 1 0 0 0 0 0 0 1 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
units lj
atom_style atomic
read_data data.series.${rho}
read_data data.series.0.9
Reading data file ...
orthogonal box = (0 0 0) to (8.2207069 8.2207069 8.2207069)
3 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
read_data CPU = 0.004 seconds
displace_atoms all random 0.1 0.1 0.1 48294
Displacing atoms ...
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
fix 1 all mdi/qm add no virial yes connect no
variable evirial equal (f_1[1]+f_1[2]+f_1[3])/3
thermo_style custom step temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
run 0
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (../atom.cpp:2127)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (../comm_brick.cpp:210)
Per MPI rank memory allocation (min/avg/max) = 2.297 | 2.297 | 2.297 Mbytes
Step Temp f_1 v_evirial f_1[1] f_1[2] f_1[3]
0 0 -6.4477578 -1.5268553 -1.5717034 -1.568693 -1.4401696
Loop time of 3.806e-06 on 3 procs for 0 steps with 500 atoms
105.1% CPU use with 3 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 3.806e-06 | | |100.00
Nlocal: 166.667 ave 181 max 150 min
Histogram: 1 0 0 0 0 0 1 0 0 1
Nghost: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 3 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
write_dump all custom dump.series.driver id type x y z f_1[1] f_1[2] f_1[3] modify sort id append yes
clear
next rho
jump SELF LOOP
mdi exit
Total wall time: 0:00:00

View File

@ -0,0 +1,58 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
Lattice spacing in x,y,z = 1.5874011 1.5874011 1.5874011
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.5874011 1.5874011 1.5874011)
1 by 1 by 1 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 1 by 1 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 7 7 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.137 | 3.137 | 3.137 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -5.200819 0 -5.200819 -4.5647906
delete_atoms group all
Deleted 500 atoms, new total = 0
1 by 1 by 1 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.137 | 3.137 | 3.137 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -6.0419499 0 -6.0419499 -4.2737827
delete_atoms group all
Deleted 500 atoms, new total = 0
1 by 1 by 1 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.137 | 3.137 | 3.137 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -6.4477578 0 -6.4477578 -1.5268553
Total wall time: 0:00:00

View File

@ -0,0 +1,58 @@
LAMMPS (2 Jun 2022)
# 3d Lennard-Jones melt - MDI engine script
units lj
atom_style atomic
lattice fcc 1.0
Lattice spacing in x,y,z = 1.5874011 1.5874011 1.5874011
region box block 0 1 0 1 0 1
create_box 1 box
Created orthogonal box = (0 0 0) to (1.5874011 1.5874011 1.5874011)
1 by 2 by 2 MPI processor grid
mass 1 1.0
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 1 check yes
mdi engine
delete_atoms group all
Deleted 0 atoms, new total = 0
1 by 2 by 2 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 7 7 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.103 | 3.103 | 3.103 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -5.200819 0 -5.200819 -4.5647906
delete_atoms group all
Deleted 500 atoms, new total = 0
1 by 2 by 2 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.103 | 3.103 | 3.103 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -6.0419499 0 -6.0419499 -4.2737827
delete_atoms group all
Deleted 500 atoms, new total = 0
1 by 2 by 2 MPI processor grid
WARNING: No fixes with time integration, atoms won't move (../verlet.cpp:60)
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 3.103 | 3.103 | 3.103 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -6.4477578 0 -6.4477578 -1.5268553
Total wall time: 0:00:00

Some files were not shown because too many files have changed in this diff Show More