Merge branch 'master' into prepare-clang-format

# Conflicts:
#	src/KOKKOS/nbin_kokkos.h
#	src/KOKKOS/nbin_ssa_kokkos.h
#	src/MOLECULE/bond_fene_expand.h
#	src/USER-DPD/nbin_ssa.h
#	src/USER-DPD/nstencil_half_bin_2d_ssa.h
#	src/USER-DPD/nstencil_half_bin_3d_ssa.h
#	src/USER-INTEL/nbin_intel.h
#	src/USER-MISC/fix_propel_self.cpp
#	src/USER-OMP/npair_full_multi_old_omp.h
#	src/USER-OMP/npair_half_multi_old_newton_omp.h
#	src/USER-OMP/npair_half_size_multi_newtoff_omp.h
#	src/USER-OMP/npair_halffull_newtoff_omp.h
#	src/USER-OMP/npair_halffull_newton_omp.h
#	src/USER-OMP/npair_skip_omp.h
#	src/main.cpp
#	src/nbin_standard.h
#	src/npair_full_multi_old.h
#	src/npair_halffull_newtoff.h
#	src/npair_halffull_newton.h
#	src/npair_skip.h
#	src/npair_skip_respa.h
#	src/npair_skip_size.h
#	src/npair_skip_size_off2on.h
#	src/npair_skip_size_off2on_oneside.h
#	src/nstencil_full_bin_2d.h
#	src/nstencil_full_bin_3d.h
#	src/nstencil_full_ghost_bin_2d.h
#	src/nstencil_full_ghost_bin_3d.h
#	src/nstencil_full_multi_2d.h
#	src/nstencil_full_multi_3d.h
#	src/nstencil_full_multi_old_2d.h
#	src/nstencil_full_multi_old_3d.h
#	src/nstencil_half_bin_2d_newtoff.cpp
#	src/nstencil_half_bin_3d_newtoff.cpp
#	src/nstencil_half_bin_3d_newton_tri.h
#	src/nstencil_half_ghost_bin_2d_newtoff.cpp
#	src/nstencil_half_ghost_bin_2d_newtoff.h
#	src/nstencil_half_ghost_bin_3d_newtoff.cpp
#	src/nstencil_half_ghost_bin_3d_newtoff.h
#	src/nstencil_half_multi_2d.h
#	src/nstencil_half_multi_2d_newtoff.h
#	src/nstencil_half_multi_2d_newton_tri.h
#	src/nstencil_half_multi_2d_tri.h
#	src/nstencil_half_multi_3d_newtoff.h
#	src/nstencil_half_multi_3d_newton_tri.h
This commit is contained in:
Axel Kohlmeyer
2021-05-14 14:53:49 -04:00
626 changed files with 54551 additions and 12282 deletions

View File

@ -121,10 +121,10 @@ set(STANDARD_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS DIPOLE
PLUGIN QEQ REPLICA RIGID SHOCK SPIN SNAP SRD KIM PYTHON MSCG MPIIO VORONOI
USER-ADIOS USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-MESODPD USER-CGSDK
USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD
USER-LB USER-MANIFOLD USER-MEAMC USER-MESONT USER-MGPT USER-MISC USER-MOFFF
USER-LB USER-MANIFOLD USER-MDI USER-MEAMC USER-MESONT USER-MGPT USER-MISC USER-MOFFF
USER-MOLFILE USER-NETCDF USER-PHONON USER-PLUMED USER-PTM USER-QTB
USER-REACTION USER-REAXC USER-SCAFACOS USER-SDPD USER-SMD USER-SMTBQ USER-SPH
USER-TALLY USER-UEF USER-VTK USER-QUIP USER-QMMM USER-YAFF USER-PACE)
USER-TALLY USER-UEF USER-VTK USER-QUIP USER-QMMM USER-YAFF USER-PACE USER-BROWNIAN)
set(SUFFIX_PACKAGES CORESHELL GPU KOKKOS OPT USER-INTEL USER-OMP)
@ -324,8 +324,8 @@ else()
set(CUDA_REQUEST_PIC)
endif()
foreach(PKG_WITH_INCL KSPACE PYTHON MLIAP VORONOI USER-COLVARS USER-MOLFILE USER-NETCDF USER-PLUMED USER-QMMM
USER-QUIP USER-SCAFACOS USER-SMD USER-VTK KIM LATTE MESSAGE MSCG COMPRESS USER-PACE)
foreach(PKG_WITH_INCL KSPACE PYTHON MLIAP VORONOI USER-COLVARS USER-MDI USER-MOLFILE USER-NETCDF USER-PLUMED
USER-QMMM USER-QUIP USER-SCAFACOS USER-SMD USER-VTK KIM LATTE MESSAGE MSCG COMPRESS USER-PACE)
if(PKG_${PKG_WITH_INCL})
include(Packages/${PKG_WITH_INCL})
endif()

View File

@ -37,8 +37,8 @@ if(DOWNLOAD_KOKKOS)
list(APPEND KOKKOS_LIB_BUILD_ARGS "-DCMAKE_CXX_EXTENSIONS=${CMAKE_CXX_EXTENSIONS}")
list(APPEND KOKKOS_LIB_BUILD_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}")
include(ExternalProject)
set(KOKKOS_URL "https://github.com/kokkos/kokkos/archive/3.3.01.tar.gz" CACHE STRING "URL for KOKKOS tarball")
set(KOKKOS_MD5 "08201d1c7cf5bc458ce0f5b44a629d5a" CACHE STRING "MD5 checksum of KOKKOS tarball")
set(KOKKOS_URL "https://github.com/kokkos/kokkos/archive/3.4.00.tar.gz" CACHE STRING "URL for KOKKOS tarball")
set(KOKKOS_MD5 "c2fdcedb6953e6160c765366f6045abb" CACHE STRING "MD5 checksum of KOKKOS tarball")
mark_as_advanced(KOKKOS_URL)
mark_as_advanced(KOKKOS_MD5)
ExternalProject_Add(kokkos_build
@ -58,7 +58,7 @@ if(DOWNLOAD_KOKKOS)
target_link_libraries(lmp PRIVATE LAMMPS::KOKKOS)
add_dependencies(LAMMPS::KOKKOS kokkos_build)
elseif(EXTERNAL_KOKKOS)
find_package(Kokkos 3.3.01 REQUIRED CONFIG)
find_package(Kokkos 3.4.00 REQUIRED CONFIG)
target_link_libraries(lammps PRIVATE Kokkos::kokkos)
target_link_libraries(lmp PRIVATE Kokkos::kokkos)
else()

View File

@ -0,0 +1,66 @@
find_package(mdi QUIET)
if(${mdi_FOUND})
set(DOWNLOAD_MDI_DEFAULT OFF)
else()
set(DOWNLOAD_MDI_DEFAULT ON)
endif()
option(DOWNLOAD_MDI "Download and compile the MDI library instead of using an already installed one" ${DOWNLOAD_MDI_DEFAULT})
if(DOWNLOAD_MDI)
message(STATUS "MDI download requested - we will build our own")
set(MDI_URL "https://github.com/MolSSI-MDI/MDI_Library/archive/v1.2.9.tar.gz" CACHE STRING "URL for MDI tarball")
set(MDI_MD5 "ddfa46d6ee15b4e59cfd527ec7212184" CACHE STRING "MD5 checksum for MDI tarball")
mark_as_advanced(MDI_URL)
mark_as_advanced(MDI_MD5)
set(LAMMPS_LIB_MDI_BIN_DIR ${LAMMPS_LIB_BINARY_DIR}/mdi)
include(ExternalProject)
message(STATUS "Building mdi.")
ExternalProject_Add(mdi_external
URL ${MDI_URL}
URL_MD5 ${MDI_MD5}
UPDATE_COMMAND ""
CMAKE_ARGS ${CMAKE_REQUEST_PIC}
-DCMAKE_INSTALL_PREFIX=${LAMMPS_LIB_MDI_BIN_DIR}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_LIBDIR=${CMAKE_INSTALL_LIBDIR}
-DCMAKE_INSTALL_INCLUDEDIR=${CMAKE_INSTALL_INCLUDEDIR}
-DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS}
-Dlanguage=C
CMAKE_CACHE_ARGS -DCMAKE_C_FLAGS:STRING=${CMAKE_C_FLAGS}
-DCMAKE_CXX_FLAGS:STRING=${CMAKE_CXX_FLAGS}
-DTargetOpenMP_FIND_COMPONENTS:STRING=C;CXX)
# Link the lammps library against MDI
target_include_directories(lammps PRIVATE ${LAMMPS_LIB_MDI_BIN_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/mdi)
target_link_directories(lammps PRIVATE ${LAMMPS_LIB_MDI_BIN_DIR}/${CMAKE_INSTALL_LIBDIR}/mdi)
target_link_libraries(lammps PRIVATE mdi)
add_dependencies(lammps mdi_external)
# Link the lammps executable against MDI
target_include_directories(lmp PRIVATE ${LAMMPS_LIB_MDI_BIN_DIR}/${CMAKE_INSTALL_INCLUDEDIR}/mdi)
target_link_directories(lmp PRIVATE ${LAMMPS_LIB_MDI_BIN_DIR}/${CMAKE_INSTALL_LIBDIR}/mdi)
target_link_libraries(lmp PRIVATE mdi)
add_dependencies(lmp mdi_external)
else()
find_package(mdi)
if(NOT mdi_FOUND)
message(FATAL_ERROR "MDI library not found. Help CMake to find it "
"by setting mdi_LIBRARY and mdi_INCLUDE_DIR, or set DOWNLOAD_MDI=ON "
"to download and compile it")
endif()
# Link the lammps library against MDI
target_include_directories(lammps PRIVATE ${mdi_INCLUDE_DIR})
target_link_libraries(lammps PRIVATE ${mdi_LIBRARY})
# Link the lammps executable against MDI
target_include_directories(lmp PRIVATE ${mdi_INCLUDE_DIR})
target_link_libraries(lmp PRIVATE ${mdi_LIBRARY})
endif()
target_compile_definitions(lammps PRIVATE -DLMP_USER_MDI)
target_compile_definitions(lmp PRIVATE -DLMP_USER_MDI)

View File

@ -5,9 +5,9 @@
set(ALL_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE
GRANULAR KSPACE MANYBODY MC MISC MLIAP MOLECULE OPT PERI
PLUGIN POEMS PYTHON QEQ REPLICA RIGID SHOCK SNAP SPIN SRD VORONOI
USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS USER-DIFFRACTION
USER-DPD USER-DRUDE USER-EFF USER-FEP USER-MEAMC USER-MESODPD
USER-MISC USER-MOFFF USER-OMP USER-PHONON USER-REACTION
USER-BROWNIAN USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS
USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-MEAMC
USER-MESODPD USER-MISC USER-MOFFF USER-OMP USER-PHONON USER-REACTION
USER-REAXC USER-SDPD USER-SPH USER-SMD USER-UEF USER-YAFF)
foreach(PKG ${ALL_PACKAGES})

View File

@ -49,6 +49,7 @@ This is the list of packages that may require additional steps.
* :ref:`USER-COLVARS <user-colvars>`
* :ref:`USER-H5MD <user-h5md>`
* :ref:`USER-INTEL <user-intel>`
* :ref:`USER-MDI <user-mdi>`
* :ref:`USER-MESONT <user-mesont>`
* :ref:`USER-MOLFILE <user-molfile>`
* :ref:`USER-NETCDF <user-netcdf>`
@ -467,6 +468,9 @@ They must be specified in uppercase.
* - ARMV8_THUNDERX2
- HOST
- ARMv8 Cavium ThunderX2 CPU
* - A64FX
- HOST
- ARMv8.2 with SVE Support
* - WSM
- HOST
- Intel Westmere CPU (SSE 4.2)
@ -539,6 +543,9 @@ They must be specified in uppercase.
* - AMPERE80
- GPU
- NVIDIA Ampere generation CC 8.0 GPU
* - AMPERE86
- GPU
- NVIDIA Ampere generation CC 8.6 GPU
* - VEGA900
- GPU
- AMD GPU MI25 GFX900
@ -547,12 +554,12 @@ They must be specified in uppercase.
- AMD GPU MI50/MI60 GFX906
* - VEGA908
- GPU
- AMD GPU GFX908
- AMD GPU MI100 GFX908
* - INTEL_GEN
- GPU
- Intel GPUs Gen9+
This list was last updated for version 3.3 of the Kokkos library.
This list was last updated for version 3.4 of the Kokkos library.
.. tabs::
@ -1533,6 +1540,35 @@ TBB and MKL.
----------
.. _user-mdi:
USER-MDI package
-----------------------------
.. tabs::
.. tab:: CMake build
.. code-block:: bash
-D DOWNLOAD_MDI=value # download MDI Library for build, value = no (default) or yes
.. tab:: Traditional make
Before building LAMMPS, you must build the MDI Library in
``lib/mdi``\ . You can do this by executing a command like one
of the following from the ``lib/mdi`` directory:
.. code-block:: bash
$ python Install.py -m gcc # build using gcc compiler
$ python Install.py -m icc # build using icc compiler
The build should produce two files: ``lib/mdi/includelink/mdi.h``
and ``lib/mdi/liblink/libmdi.so``\ .
----------
.. _user-mesont:
USER-MESONT package

View File

@ -67,6 +67,7 @@ An alphabetic list of all general LAMMPS commands.
* :doc:`lattice <lattice>`
* :doc:`log <log>`
* :doc:`mass <mass>`
* :doc:`mdi/engine <mdi_engine>`
* :doc:`message <message>`
* :doc:`minimize <minimize>`
* :doc:`min_modify <min_modify>`

View File

@ -39,6 +39,9 @@ OPT.
* :doc:`ave/time <fix_ave_time>`
* :doc:`aveforce <fix_aveforce>`
* :doc:`balance <fix_balance>`
* :doc:`brownian <fix_brownian>`
* :doc:`brownian/asphere <fix_brownian>`
* :doc:`brownian/sphere <fix_brownian>`
* :doc:`bocs <fix_bocs>`
* :doc:`bond/break <fix_bond_break>`
* :doc:`bond/create <fix_bond_create>`
@ -98,6 +101,7 @@ OPT.
* :doc:`lb/viscous <fix_lb_viscous>`
* :doc:`lineforce <fix_lineforce>`
* :doc:`manifoldforce <fix_manifoldforce>`
* :doc:`mdi/engine <fix_mdi_engine>`
* :doc:`meso/move <fix_meso_move>`
* :doc:`momentum (k) <fix_momentum>`
* :doc:`momentum/chunk <fix_momentum>`

View File

@ -171,7 +171,7 @@ OPT.
* :doc:`lj/sdk/coul/long (go) <pair_sdk>`
* :doc:`lj/sdk/coul/msm (o) <pair_sdk>`
* :doc:`lj/sf/dipole/sf (go) <pair_dipole>`
* :doc:`lj/smooth (o) <pair_lj_smooth>`
* :doc:`lj/smooth (go) <pair_lj_smooth>`
* :doc:`lj/smooth/linear (o) <pair_lj_smooth_linear>`
* :doc:`lj/switch3/coulgauss/long <pair_lj_switch3_coulgauss_long>`
* :doc:`lj96/cut (go) <pair_lj96>`

View File

@ -108,6 +108,8 @@ Lowercase directories
+-------------+------------------------------------------------------------------+
| msst | MSST shock dynamics |
+-------------+------------------------------------------------------------------+
| multi | multi neighboring for systems with large interaction disparities |
+-------------+------------------------------------------------------------------+
| nb3b | use of non-bonded 3-body harmonic pair style |
+-------------+------------------------------------------------------------------+
| neb | nudged elastic band (NEB) calculation for barrier finding |

View File

@ -23,6 +23,7 @@ General howto
Howto_library
Howto_couple
Howto_client_server
Howto_mdi
Settings howto
==============

132
doc/src/Howto_mdi.rst Normal file
View File

@ -0,0 +1,132 @@
Using LAMMPS with the MDI library for code coupling
===================================================
..note::
This Howto doc page will eventually replace the
:doc:`Howto client/server <Howto_client_server>` doc page.
Client/server coupling of two codes is where one code is the "client"
and sends request messages (data) to a "server" code. The server
responds to each request with a reply message. This enables the two
codes to work in tandem to perform a simulation. LAMMPS can act as
either a client or server code; it does this by using the `MolSSI
Driver Interface (MDI) library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_,
developed by the `Molecular Sciences Software Institute (MolSSI)
<https://molssi.org>`_.
Alternate methods for code coupling with LAMMPS are described on the
:doc:`Howto couple <Howto_couple>` doc page.
Some advantages of client/server coupling are that the two codes can run
as stand-alone executables; they need not be linked together. Thus
neither code needs to have a library interface. This also makes it easy
to run the two codes on different numbers of processors. If a message
protocol (format and content) is defined for a particular kind of
simulation, then in principle any code which implements the client-side
protocol can be used in tandem with any code which implements the
server-side protocol. Neither code needs to know what specific other
code it is working with.
In MDI nomenclature, a client code is the "driver", and a server code is
an "engine". One driver code can communicate with one or more instances
of one or more engine codes. Driver and engine codes can be written in
any language: C, C++, Fortran, Python, etc.
In addition to allowing driver and engine(s) running to run as
stand-alone executables, MDI also enables a server code to be a
"plugin" to the client code. In this scenario, server code(s) are
compiled as shared libraries, and one (or more) instances of the
server are instantiated by the driver code. If the driver code runs
in parallel, it can split its MPI communicator into multiple
sub-communicators, and launch each plugin engine instance on a
sub-communicator. Driver processors in that sub-communicator exchange
messages with that engine instance, and can also send MPI messages to
other processors in the driver. The driver code can also destroy
engine instances and re-instantiate them.
The way that a driver communicates with an engine is by making
MDI_Send() and MDI_Recv() calls, which are conceptually similar to
MPI_Send() and MPI_Recv() calls. Each send or receive has a string
which identifies the command name, and optionally some data, which can
be a single value or vector of values of any data type. Inside the
MDI library, data is exchanged between the driver and engine via MPI
calls or sockets. This a run-time choice by the user.
-------------
As an example, LAMMPS and the ``pw.x`` command from Quantum Espresso (a
suite of quantum DFT codes), can work together via the MDI library to
perform an ab initio MD (AIMD) simulation, where LAMMPS runs an MD
simulation and sends a message each timestep to ``pw.x`` asking it to
compute quantum forces on the current configuration of atoms. Here is
how the 2 codes are launched to communicate by MPI:
.. code-block:: bash
% mpirun -np 2 lmp_mpi -mdi "-role DRIVER -name d -method MPI" \
-in in.aimd : -np 16 pw.x -in qe.in -mdi "-role ENGINE -name e -method MPI"
In this case LAMMPS runs on 2 processors (MPI tasks), ``pw.x`` runs on 16
processors.
Here is how the 2 codes are launched to communicate by sockets:
.. code-block:: bash
% mpirun -np 2 lmp_mpi -mdi "-role DRIVER -name d -method TCP -port 8021" -in in.aimd
% mpirun -np 16 pw.x -in qe.in -mdi "-role ENGINE -name e -method TCP -port 8021 -hostname localhost"
These commands could be issued in different windows on a desktop
machine. Or in the same window, if the first command is ended with
"&" so as to run in the background. If "localhost" is replaced by an
IP address, ``pw.x`` could be run on another machine on the same network, or
even on another machine across the country.
After both codes initialize themselves to model the same system, this is
what occurs each timestep:
* LAMMPS send a ">COORDS" message to ``pw.x`` with a 3*N vector of current atom coords
* ``pw.x`` receives the message/coords and computes quantum forces on all the atoms
* LAMMPS send a "<FORCES" message to ``pw.x`` and waits for the result
* ``pw.x`` receives the message (after its computation finishes) and sends a 3*N vector of forces
* LAMMPS receives the forces and time integrates to complete a single timestep
-------------
Examples scripts for using LAMMPS as an MDI engine are in the
examples/mdi directory. See the README file in that directory for
instructions on how to run the examples.
..note::
Work is underway to add commands that allow LAMMPS to be used as an
MDI driver, e.g. for the AIMD example discussed above. Example
scripts for this usage mode will be added the same directory when
available.
If LAMMPS is used as a stand-alone engine it should set up the system
it will be modeling in its input script, then invoke the
:doc:`mdi/engine <mdi_engine>` command. This will put LAMMPS into
engine mode where it waits for messages and data from the driver.
When the driver sends an "EXIT" command, LAMMPS will exit engine mode
and the input script will continue.
If LAMMPS is used as a plugin engine it operates the same way, except
that the driver will pass LAMMPS an input script to initialize itself.
Upon receiving the "EXIT" command, LAMMPS will exit engine mode and the
input script will continue. After finishing execution of the input
script, the instance of LAMMPS will be destroyed.
LAMMPS supports the full set of MD-appropriate engine commands defined
by the MDI library. See the :doc:`mdi/engine <mdi_engine>` doc page for
a list of these.
If those commands are not sufficient for a user-developed driver to use
LAMMPS as an engine, then new commands can be easily added. See these
two files which implement the definition of MDI commands and the logic
for responding to them:
* src/MDI/mdi_engine.cpp
* src/MDI/fix_mdi_engine.cpp

View File

@ -69,6 +69,7 @@ page gives those details.
* :ref:`USER-ATC <PKG-USER-ATC>`
* :ref:`USER-AWPMD <PKG-USER-AWPMD>`
* :ref:`USER-BOCS <PKG-USER-BOCS>`
* :ref:`USER-BROWNIAN <PKG-USER-BROWNIAN>`
* :ref:`USER-CGDNA <PKG-USER-CGDNA>`
* :ref:`USER-CGSDK <PKG-USER-CGSDK>`
* :ref:`USER-COLVARS <PKG-USER-COLVARS>`
@ -81,6 +82,7 @@ page gives those details.
* :ref:`USER-INTEL <PKG-USER-INTEL>`
* :ref:`USER-LB <PKG-USER-LB>`
* :ref:`USER-MANIFOLD <PKG-USER-MANIFOLD>`
* :ref:`USER-MDI <PKG-USER-MDI>`
* :ref:`USER-MEAMC <PKG-USER-MEAMC>`
* :ref:`USER-MESODPD <PKG-USER-MESODPD>`
* :ref:`USER-MESONT <PKG-USER-MESONT>`
@ -1266,6 +1268,26 @@ Example inputs are in the examples/USER/bocs folder.
----------
.. _PKG-USER-BROWNIAN:
USER-BROWNIAN package
---------------------
**Contents:**
This package provides :doc:`fix brownian, fix brownian/sphere, and
fix brownian/asphere <fix_brownian>` as well as
:doc:`fix propel/self <fix_propel_self>` which allow to do Brownian
Dynamics time integration of point, spherical and aspherical particles
and also support self-propelled particles.
**Authors:** Sam Cameron (University of Bristol),
Stefan Paquay (while at Brandeis University) (initial version of fix propel/self)
Example inputs are in the examples/USER/brownian folder.
----------
.. _PKG-USER-CGDNA:
USER-CGDNA package
@ -1770,6 +1792,28 @@ Waltham, MA, USA)
----------
.. _PKG-USER-MDI:
USER-MDI package
----------------
**Contents:**
A LAMMPS command and fix to allow client-server coupling of LAMMPS to
other atomic or molecular simulation codes via the `MolSSI Driver Interface
(MDI) library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_.
**Author:** Taylor Barnes - MolSSI, taylor.a.barnes at gmail.com
**Supporting info:**
* src/USER-MDI/README
* :doc:`mdi/engine <mdi_engine>`
* :doc:`fix mdi/engine <fix_mdi_engine>`
* examples/USER/mdi
----------
.. _PKG-USER-MEAMC:
USER-MEAMC package

View File

@ -39,6 +39,8 @@ package:
+------------------------------------------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------+------------------------------------------------------+---------+
| :ref:`USER-BOCS <PKG-USER-BOCS>` | BOCS bottom up coarse graining | :doc:`fix bocs <fix_bocs>` | USER/bocs | no |
+------------------------------------------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------+------------------------------------------------------+---------+
| :ref:`USER-BROWNIAN <PKG-USER-BROWNIAN>` | Brownian dynamics and self-propelled particles | :doc:`fix brownian <fix_brownian>`, :doc:`fix propel/self <fix_propel_self>` | USER/brownian | no |
+------------------------------------------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------+------------------------------------------------------+---------+
| :ref:`USER-CGDNA <PKG-USER-CGDNA>` | coarse-grained DNA force fields | src/USER-CGDNA/README | USER/cgdna | no |
+------------------------------------------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------+------------------------------------------------------+---------+
| :ref:`USER-CGSDK <PKG-USER-CGSDK>` | SDK coarse-graining model | :doc:`pair_style lj/sdk <pair_sdk>` | USER/cgsdk | no |
@ -63,6 +65,8 @@ package:
+------------------------------------------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------+------------------------------------------------------+---------+
| :ref:`USER-MANIFOLD <PKG-USER-MANIFOLD>` | motion on 2d surfaces | :doc:`fix manifoldforce <fix_manifoldforce>` | USER/manifold | no |
+------------------------------------------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------+------------------------------------------------------+---------+
| :ref:`USER-MDI <PKG-USER-MDI>` | client-server coupling | :doc:`MDI Howto <Howto_mdi>` | USER/mdi | ext |
+------------------------------------------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------+------------------------------------------------------+---------+
| :ref:`USER-MEAMC <PKG-USER-MEAMC>` | modified EAM potential (C++) | :doc:`pair_style meam/c <pair_meamc>` | meamc | no |
+------------------------------------------------+-----------------------------------------------------------------+-------------------------------------------------------------------------------+------------------------------------------------------+---------+
| :ref:`USER-MESODPD <PKG-USER-MESODPD>` | mesoscale DPD models | :doc:`pair_style edpd <pair_mesodpd>` | USER/mesodpd | no |

View File

@ -11,13 +11,17 @@ Syntax
comm_modify keyword value ...
* zero or more keyword/value pairs may be appended
* keyword = *mode* or *cutoff* or *cutoff/multi* or *group* or *vel*
* keyword = *mode* or *cutoff* or *cutoff/multi* or *multi/reduce* or *group* or *vel*
.. parsed-literal::
*mode* value = *single* or *multi* = communicate atoms within a single or multiple distances
*mode* value = *single*, *multi*, or *multi/old* = communicate atoms within a single or multiple distances
*cutoff* value = Rcut (distance units) = communicate atoms from this far away
*cutoff/multi* type value
*cutoff/multi* collection value
collection = atom collection or collection range (supports asterisk notation)
value = Rcut (distance units) = communicate atoms for selected types from this far away
*reduce/multi* arg = none = reduce number of communicated ghost atoms for multi style
*cutoff/multi/old* type value
type = atom type or type range (supports asterisk notation)
value = Rcut (distance units) = communicate atoms for selected types from this far away
*group* value = group-ID = only communicate atoms in the group
@ -28,9 +32,9 @@ Examples
.. code-block:: LAMMPS
comm_modify mode multi
comm_modify mode multi reduce/multi
comm_modify mode multi group solvent
comm_modift mode multi cutoff/multi 1 10.0 cutoff/multi 2*4 15.0
comm_modify mode multi cutoff/multi 1 10.0 cutoff/multi 2*4 15.0
comm_modify vel yes
comm_modify mode single cutoff 5.0 vel yes
comm_modify cutoff/multi * 0.0
@ -62,12 +66,18 @@ sub-domain. The distance is by default the maximum of the neighbor
cutoff across all atom type pairs.
For many systems this is an efficient algorithm, but for systems with
widely varying cutoffs for different type pairs, the *multi* mode can
be faster. In this case, each atom type is assigned its own distance
widely varying cutoffs for different type pairs, the *multi* or *multi/old* mode can
be faster. In *multi*, each atom is assigned to a collection which should
correspond to a set of atoms with similar interaction cutoffs.
In this case, each atom collection is assigned its own distance
cutoff for communication purposes, and fewer atoms will be
communicated. See the :doc:`neighbor multi <neighbor>` command for a
neighbor list construction option that may also be beneficial for
simulations of this kind.
communicated. in *multi/old*, a similar technique is used but atoms
are grouped by atom type. See the :doc:`neighbor multi <neighbor>` and
:doc:`neighbor multi/old <neighbor>` commands for
neighbor list construction options that may also be beneficial for
simulations of this kind. The *multi* communication mode is only compatible
with the *multi* neighbor style. The *multi/old* communication mode is comparable
with both the *multi* and *multi/old* neighbor styles.
The *cutoff* keyword allows you to extend the ghost cutoff distance
for communication mode *single*\ , which is the distance from the borders
@ -87,12 +97,26 @@ warning is printed, if this bond based estimate is larger than the
communication cutoff used.
The *cutoff/multi* option is equivalent to *cutoff*\ , but applies to
communication mode *multi* instead. Since in this case the communication
cutoffs are determined per atom type, a type specifier is needed and
cutoff for one or multiple types can be extended. Also ranges of types
using the usual asterisk notation can be given. For granular pair styles,
the default cutoff is set to the sum of the current maximum atomic radii
for each type.
communication mode *multi* instead. Since the communication cutoffs are
determined per atom collections, a collection specifier is needed and
cutoff for one or multiple collections can be extended. Also ranges of
collections using the usual asterisk notation can be given. Collections
are indexed from 1 to N where N is the total number of collections.
Note that the arguments for *cutoff/multi* are parsed right before each
simulation to account for potential changes in the number of
collections. Custom cutoffs are preserved between runs but if
collections are redefined, one may want to re-specify the communication
cutoffs. For granular pair styles,the default cutoff is set to the sum
of the current maximum atomic radii for each collection. The
*cutoff/multi/old* option is similar to *cutoff/multi* except it
operates on atom types as opposed to collections.
The *reduce/multi* option applies to *multi* and sets the communication
cutoff for a particle equal to the maximum interaction distance between particles
in the same collection. This reduces the number of
ghost atoms that need to be communicated. This method is only compatible with the
*multi* neighbor style and requires a half neighbor list and Newton on.
See the :doc:`neighbor multi <neighbor>` command for more information.
These are simulation scenarios in which it may be useful or even
necessary to set a ghost cutoff > neighbor cutoff:
@ -123,7 +147,7 @@ ghost cutoff should be set.
In the last scenario, a :doc:`fix <fix>` or :doc:`compute <compute>` or
:doc:`pairwise potential <pair_style>` needs to calculate with ghost
atoms beyond the normal pairwise cutoff for some computation it
performs (e.g. locate neighbors of ghost atoms in a multibody pair
performs (e.g. locate neighbors of ghost atoms in a manybody pair
potential). Setting the ghost cutoff appropriately can insure it will
find the needed atoms.

View File

@ -59,6 +59,7 @@ Commands
lattice
log
mass
mdi_engine
message
min_modify
min_spin

View File

@ -182,6 +182,9 @@ accelerated styles exist.
* :doc:`ave/time <fix_ave_time>` - compute/output global time-averaged quantities
* :doc:`aveforce <fix_aveforce>` - add an averaged force to each atom
* :doc:`balance <fix_balance>` - perform dynamic load-balancing
* :doc:`brownian <fix_brownian>` - overdamped translational brownian motion
* :doc:`brownian/asphere <fix_brownian>` - overdamped translational and rotational brownian motion for ellipsoids
* :doc:`brownian/sphere <fix_brownian>` - overdamped translational and rotational brownian motion for spheres
* :doc:`bocs <fix_bocs>` - NPT style time integration with pressure correction
* :doc:`bond/break <fix_bond_break>` - break bonds on the fly
* :doc:`bond/create <fix_bond_create>` - create bonds on the fly

View File

@ -328,8 +328,8 @@ keyword 'ChiralIDs' lists the atom IDs of chiral atoms whose
handedness should be enforced. The fifth optional section begins with
the keyword 'Constraints' and lists additional criteria that must be
satisfied in order for the reaction to occur. Currently, there are
five types of constraints available, as discussed below: 'distance',
'angle', 'dihedral', 'arrhenius', and 'rmsd'.
six types of constraints available, as discussed below: 'distance',
'angle', 'dihedral', 'arrhenius', 'rmsd', and 'custom'.
A sample map file is given below:
@ -500,6 +500,45 @@ example, the molecule fragment could consist of only the backbone
atoms of a polymer chain. This constraint can be used to enforce a
specific relative position and orientation between reacting molecules.
The constraint of type 'custom' has the following syntax:
.. parsed-literal::
custom *varstring*
where 'custom' is the required keyword, and *varstring* is a
variable expression. The expression must be a valid equal-style
variable formula that can be read by the :doc:`variable <variable>` command,
after any special reaction functions are evaluated. If the resulting
expression is zero, the reaction is prevented from occurring;
otherwise, it is permitted to occur. There are two special reaction
functions available, 'rxnsum' and 'rxnave'. These functions operate
over the atoms in a given reaction site, and have one mandatory
argument and one optional argument. The mandatory argument is the
identifier for an atom-style variable. The second, optional argument
is the name of a molecule fragment in the pre-reaction template, and
can be used to operate over a subset of atoms in the reaction site.
The 'rxnsum' function sums the atom-style variable over the reaction
site, while the 'rxnave' returns the average value. For example, a
constraint on the total potential energy of atoms involved in the
reaction can be imposed as follows:
.. code-block:: LAMMPS
compute 1 all pe/atom # in LAMMPS input script
variable my_pe atom c_1 # in LAMMPS input script
.. code-block:: LAMMPS
custom "rxnsum(v_my_pe) > 100" # in Constraints section of map file
The above example prevents the reaction from occurring unless the
total potential energy of the reaction site is above 100. The variable
expression can be interpreted as the probability of the reaction
occurring by using an inequality and the 'random(x,y,z)' function
available as an equal-style variable input, similar to the 'arrhenius'
constraint above.
By default, all constraints must be satisfied for the reaction to
occur. In other words, constraints are evaluated as a series of
logical values using the logical AND operator "&&". More complex logic

216
doc/src/fix_brownian.rst Normal file
View File

@ -0,0 +1,216 @@
.. index:: fix brownian
.. index:: fix brownian/sphere
.. index:: fix brownian/asphere
fix brownian command
===========================
fix brownian/sphere command
===========================
fix brownian/sphere command
===========================
Syntax
""""""
.. parsed-literal::
fix ID group-ID style_name temp seed keyword args
* ID, group-ID are documented in :doc:`fix <fix>` command
* style_name = *brownian* or *brownian/sphere* or *brownian/asphere*
* temp = temperature
* seed = random number generator seed
* one or more keyword/value pairs may be appended
* keyword = *rng* or *dipole* or *gamma_r_eigen* or *gamma_t_eigen* or *gamma_r* or *gamma_t*
.. parsed-literal::
*rng* value = *uniform* or *gaussian* or *none*
*uniform* = use uniform random number generator
*gaussian* = use gaussian random number generator
*none* = turn off noise
*dipole* value = *mux* and *muy* and *muz* for *brownian/asphere*
*mux*, *muy*, and *muz* = update orientation of dipole having direction (*mux*,*muy*,*muz*) in body frame of rigid body
*gamma_r_eigen* values = *gr1* and *gr2* and *gr3* for *brownian/asphere*
*gr1*, *gr2*, and *gr3* = diagonal entries of body frame rotational friction tensor
*gamma_r* values = *gr* for *brownian/sphere*
*gr* = magnitude of the (isotropic) rotational friction tensor
*gamma_t_eigen* values = *gt1* and *gt2* and *gt3* for *brownian/asphere*
*gt1*, *gt2*, and *gt3* = diagonal entries of body frame translational friction tensor
*gamma_t* values = *gt* for *brownian* and *brownian/sphere*
*gt* = magnitude of the (isotropic) translational friction tensor
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all brownian 1.0 12908410 gamma_t 1.0
fix 1 all brownian 1.0 12908410 gamma_t 3.0 rng gaussian
fix 1 all brownian/sphere 1.0 1294019 gamma_t 3.0 gamma_r 1.0
fix 1 all brownian/sphere 1.0 19581092 gamma_t 1.0 gamma_r 0.3 rng none
fix 1 all brownian/asphere 1.0 1294019 gamma_t_eigen 1.0 2.0 3.0 gamma_r_eigen 4.0 7.0 8.0 rng gaussian
fix 1 all brownian/asphere 1.0 1294019 gamma_t_eigen 1.0 2.0 3.0 gamma_r_eigen 4.0 7.0 8.0 dipole 1.0 0.0 0.0
Description
"""""""""""
Perform Brownian Dynamics time integration to update position, velocity,
dipole orientation (for spheres) and quaternion orientation (for
ellipsoids, with optional dipole update as well) of all particles in the
fix group in each timestep. Brownian Dynamics uses Newton's laws of
motion in the limit that inertial forces are negligible compared to
viscous forces. The stochastic equation of motion for the center of mass
positions is
.. math::
d\mathbf{r} = \mathbf{\gamma}_t^{-1}\mathbf{F}dt+\sqrt{2k_BT}\mathbf{\gamma}_t^{-1/2}d\mathbf{W}_t,
in the lab-frame (i.e. :math:`\mathbf{\gamma}_t` is not diagonal, but
only depends on orientation and so the noise is still additive).
The rotational motion for the spherical and ellipsoidal particles is not
as simple an expression, but is chosen to replicate the Boltzmann
distribution for the case of conservative torques (see :ref:`(Ilie)
<Ilie1>` or :ref:`(Delong) <Delong1>`).
For the style *brownian*, only the positions of the particles are
updated. This is therefore suitable for point particle simulations.
For the style *brownian/sphere*, the positions of the particles are
updated, and a dipole slaved to the spherical orientation is also
updated. This style therefore requires the hybrid atom style
:doc:`atom_style dipole <atom_style>` and :doc:`atom_style sphere
<atom_style>`.
For the style *brownian/asphere*, the center of mass positions and the
quaternions of ellipsoidal particles are updated. This fix style is
suitable for equations of motion where the rotational and translational
friction tensors can be diagonalized in a certain (body) reference frame.
---------
.. note::
This integrator does not by default assume a relationship between the
rotational and translational friction tensors, though such a relationship
should exist in the case of no-slip boundary conditions between the particles and
the surrounding (implicit) solvent. E.g. in the case of spherical particles,
the condition :math:`\gamma_t=3\gamma_r/\sigma^2` must be explicitly
accounted for by setting *gamma_t* to 3x and *gamma_r* to x (where
:math:`\sigma` is the spherical diameter). A similar (though more complex)
relationship holds for ellipsoids and rod-like particles.
---------
.. note::
Temperature computation using the :doc:`compute temp <compute_temp>`
will not correctly compute temperature of these overdamped dynamics
since we are explicitly neglecting inertial effects.
Furthermore, this time integrator does not add the stochastic terms or
viscous terms to the force and/or torques. Rather, they are just added
in to the equations of motion to update the degrees of freedom.
---------
If the *rng* keyword is used with the *uniform* value, then the noise
is generated from a uniform distribution (see
:ref:`(Dunweg) <Dunweg7>` for why this works). This is the same method
of noise generation as used in :doc:`fix_langevin <fix_langevin>`.
If the *rng* keyword is used with the *gaussian* value, then the noise
is generated from a gaussian distribution. Typically this added
complexity is unnecessary, and one should be fine using the *uniform*
value for reasons argued in :ref:`(Dunweg) <Dunweg7>`.
If the *rng* keyword is used with the *none* value, then the noise
terms are set to zero.
The *gamma_t* keyword sets the (isotropic) translational viscous damping.
Required for (and only compatible with) *brownian* and *brownian/sphere*.
The units of *gamma_t* are mass/time.
The *gamma_r* keyword sets the (isotropic) rotational viscous damping.
Required for (and only compatible with) *brownian/sphere*.
The units of *gamma_r* are mass*length**2/time.
The *gamma_r_eigen*, and *gamma_t_eigen* keywords are the eigenvalues of
the rotational and viscous damping tensors (having the same units as
their isotropic counterparts). Required for (and only compatible with)
*brownian/asphere*. For a 2D system, the first two values of *gamma_r_eigen*
must be inf (only rotation in xy plane), and the third value of *gamma_t_eigen*
must be inf (only diffusion in xy plane).
If the *dipole* keyword is used, then the dipole moments of the particles
are updated as described above. Only compatible with *brownian/asphere*
(as *brownian/sphere* updates dipoles automatically).
----------
.. note::
For style *brownian/asphere*, the components *gamma_t_eigen* =(x,x,x) and
*gamma_r_eigen* = (y,y,y), the dynamics will replicate those of the
*brownian/sphere* style with *gamma_t* = x and *gamma_r* = y.
----------
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
No information about this fix is written to :doc:`binary restart files <restart>`.
No global or per-atom quantities are stored
by this fix for access by various :doc:`output commands <Howto_output>`.
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command. This fix is not invoked during
:doc:`energy minimization <minimize>`.
Restrictions
""""""""""""
The style *brownian/sphere* fix requires that atoms store torque and angular velocity (omega)
as defined by the :doc:`atom_style sphere <atom_style>` command.
The style *brownian/asphere* fix requires that atoms store torque and quaternions
as defined by the :doc:`atom_style ellipsoid <atom_style>` command.
If the *dipole* keyword is used, they must also store a dipole moment
as defined by the :doc:`atom_style dipole <atom_style>` command.
This fix is part of the USER-BROWNIAN package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>`
doc page for more info.
Related commands
""""""""""""""""
:doc:`fix propel/self <fix_propel_self>`,
:doc:`fix langevin <fix_langevin>`, :doc:`fix nve/sphere <fix_nve_sphere>`,
Default
"""""""
The default for *rng* is *uniform*. The default for the rotational and translational friction
tensors are the identity tensor.
----------
.. _Ilie1:
**(Ilie)** Ilie, Briels, den Otter, Journal of Chemical Physics, 142, 114103 (2015).
.. _Delong1:
**(Delong)** Delong, Usabiaga, Donev, Journal of Chemical Physics. 143, 144107 (2015)
.. _Dunweg7:
**(Dunweg)** Dunweg and Paul, Int J of Modern Physics C, 2, 817-27 (1991).

View File

@ -0,0 +1,59 @@
.. index:: fix move
fix mdi/engine command
======================
Syntax
""""""
.. parsed-literal::
fix ID group-ID mdi/engine
* ID, group-ID are documented in :doc:`fix <fix>` command
* mdi/engine = style name of this fix command
Examples
""""""""
.. code-block:: LAMMPS
fix 1 all mdi/engine
Description
"""""""""""
This fix is used along with the :doc:`mdi/engine <mdi_engine>` command
to enable LAMMPS to use the `MDI Library
<https://molssi-mdi.github.io/MDI_Library/html/index.html>`_ to run as
an MDI engine. The fix provides hooks that enable MDI driver codes to
communicate with LAMMPS at various points within a LAMMPS timestep.
It is not generally necessary to add this fix to a LAMMPS input file,
even when using the :doc:`mdi/engine <mdi_engine>` command. If the
:doc:`mdi/engine <mdi_engine>` command is executed and this fix is not
present, it will automatically be added and applied as a new fix for
all atoms for the duration of the command. Thus it is only necessary
to add this fix to an input file when you want to modify the group-ID
or the ordering of this fix relative to other fixes in the input script.
For more information about running LAMMPS as an MDI engine, see the
:doc:`mdi/engine <mdi_engine>` command and the :doc:`Howto mdi
<Howto_mdi>` doc page.
Restrictions
""""""""""""
This command is part of the USER-MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` doc page for more info.
Related commands
""""""""""""""""
:doc:`mdi/engine <mdi_engine>`
Default
"""""""
none

View File

@ -8,52 +8,121 @@ Syntax
.. parsed-literal::
fix ID group-ID propel/self mode magnitude keyword values ...
fix ID group-ID propel/self mode magnitude keyword values
* ID, group-ID are documented in :doc:`fix <fix>` command
* propel/self = style name of this fix command
* mode = velocity or quat
* magnitude = magnitude of the active force
* one or more keyword/value pairs may be appended to args
* keyword = *types*
* mode = *dipole* or *velocity* or *quat*
* magnitude = magnitude of self-propulsion force
* zero or one keyword/value pairs may be appended
* keyword = *qvector*
.. parsed-literal::
*qvector* value = direction of force in ellipsoid frame
*sx*, *sy*, *sz* = components of *qvector*
*types* values = one or more atom types
Examples
""""""""
.. code-block:: LAMMPS
fix active_group all propel/self velocity 1.0
fix constant_velocity all viscous 1.0
fix active_group all propel/self quat 1.0
fix active all propel/self quat 1.0 types 1 2 4
fix active all propel/self dipole 40.0
fix active all propel/self velocity 10.0
fix active all propel/self quat 15.7 qvector 1.0 0.0 0.0
Description
"""""""""""
Adds a force of a constant magnitude to each atom in the group. The nature in
which the force is added depends on the mode.
Add a force to each atom in the group due to a self-propulsion force. The
force is given by
For *mode* = *velocity*, the active force acts along the velocity vector of
each atom. This can be interpreted as a velocity-dependent friction,
such as proposed by :ref:`(Erdmann) <Erdmann>`.
.. math::
For *mode* = *quat* the force is applied along the axis obtained
by rotating the x-axis along the atom's quaternion. In other words, the
force is along the x-axis in the atom's body frame. This mode requires
all atoms in the group to have a quaternion, so atom_style should
either be ellipsoid or body. In combination with Langevin thermostat
for translation and rotation in the overdamped regime, the quaternion
mode corresponds to the active Brownian particle model introduced by
:ref:`(Henkes) <Henkes>`, :ref:`(Bialke) <Bialke>` and :ref:`(Fily)
<Fily>`.
F_i = f_P e_i
By default, this fix is applied to all atoms in the group. You can
override this behavior by specifying the atom types the fix should work
on through the *types* keyword.
where *i* is the particle the force is being applied to, :math:`f_P`
is the magnitude of the force, and :math:`e_i` is the vector direction
of the force. The specification of :math:`e_i` is based on which of the
three keywords (*dipole* or *velocity* or *quat*) one selects.
For mode *dipole*, :math:`e_i` is just equal to
the dipole vectors of the atoms in the group. Therefore, if the dipoles
are not unit vectors, the :math:`e_i` will not be unit vectors.
.. note::
If another command changes the magnitude of the dipole, this force will
change accordingly (since :math:`|e_i|` will change, which is physically
equivalent to re-scaling :math:`f_P` while keeping :math:`|e_i|` constant),
and no warning will be provided by LAMMPS. This is almost never what you
want, so ensure you are not changing dipole magnitudes with another LAMMPS
fix or pair style. Furthermore, self-propulsion forces (almost) always
set :math:`e_i` to be a unit vector for all times, so it's best to set
all the dipole magnitudes to 1.0 unless you have a good reason not to
(see the :doc:`set <set>` command on how to do this).
For mode *velocity*, :math:`e_i` points in the direction
of the current velocity (a unit-vector). This can be interpreted as a
velocity-dependent friction, as proposed by e.g. :ref:`(Erdmann) <Erdmann1>`.
For mode *quat*, :math:`e_i` points in the direction of a unit
vector, oriented in the coordinate frame of the ellipsoidal particles,
which defaults to point along the x-direction. This default behavior
can be changed by via the *quatvec* keyword.
The optional *quatvec* keyword specifies the direction of self-propulsion
via a unit vector (sx,sy,sz). The arguments *sx*, *sy*, and *sz*, are
defined within the coordinate frame of the atom's
ellipsoid. For instance, for an ellipsoid with long axis along
its x-direction, if one wanted the self-propulsion force to also
be along this axis, set *sx* equal to 1 and *sy*, *sz* both equal
to zero. This keyword may only be specified for mode *quat*.
.. note::
In using keyword *quatvec*, the three arguments *sx*,
*sy*, and *sz* will be automatically normalized to components
of a unit vector internally to avoid users having to explicitly
do so themselves. Therefore, in mode *quat*, the vectors :math:`e_i`
will always be of unit length.
Along with adding a force contribution, this fix can also
contribute to the virial (pressure) of the system, defined as
:math:`f_P \sum_i <e_i . r_i>/(d V)`, where :math:`r_i` is the
*unwrapped* coordinate of particle i in the case of periodic
boundary conditions. See :ref:`(Winkler) <Winkler1>` for a
discussion of this active pressure contribution.
For modes *dipole* and *quat*, this fix is by default
included in pressure computations.
For mode *velocity*, this fix is by default not included
in pressure computations.
.. note::
In contrast to equilibrium systems, pressure of active systems
in general depends on the geometry of the container.
The active pressure contribution as calculated in this fix
is only valid for certain boundary conditions (spherical
walls, rectangular walls, or periodic boundary conditions).
For other geometries, the pressure must be measured via
explicit calculation of the force per unit area on a wall,
and so one must not calculate it using this fix.
(Use :doc:`fix_modify <fix_modify>` as described below
to turn off the virial contribution of this fix). Again,
see :ref:`(Winkler) <Winkler1>` for discussion of why this
is the case.
Furthermore, when dealing with active systems, the temperature
is no longer well defined. Therefore, one should ensure that
the *virial* flag is used in the
:doc:`compute pressure <compute_pressure>` command (turning
off temperature contributions).
----------
@ -62,40 +131,48 @@ Restart, fix_modify, output, run start/stop, minimize info
No information about this fix is written to :doc:`binary restart files <restart>`.
This fix is not imposed during minimization.
The :doc:`fix_modify <fix_modify>` *virial* option is supported by this
fix to add the contribution due to the added forces on atoms to the
system's virial as part of :doc:`thermodynamic output <thermo_style>`.
The default is *virial yes* for keywords *dipole* and *quat*. The
default is *virial no* for keyword *velocity*.
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.
Restrictions
""""""""""""
In quat mode, this fix makes use of per-atom quaternions to take
into account the fact that the orientation can rotate and hence the
direction of the active force can change. The quat mode
of this fix only works with atom_style ellipsoid.
With keyword *dipole*, this fix only works when the DIPOLE package is enabled.
See the :doc:`Build package <Build_package>` doc page for more info.
This fix is part of the USER-BROWNIAN package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package <Build_package>`
doc page for more info.
Related commands
""""""""""""""""
:doc:`fix setforce <fix_setforce>`, :doc:`fix addforce <fix_addforce>`
.. _Erdmann:
**(Erdmann)** U. Erdmann , W. Ebeling, L. Schimansky-Geier, and F. Schweitzer,
Eur. Phys. J. B 15, 105-113, 2000.
.. _Henkes:
**(Henkes)** Henkes, S, Fily, Y., and Marchetti, M. C. Phys. Rev. E, 84, 040301(R), 2011.
.. _Bialke:
**(Bialke)** J. Bialke, T. Speck, and H Loewen, Phys. Rev. Lett. 108, 168301, 2012.
.. _Fily:
**(Fily)** Y. Fily and M.C. Marchetti, Phys. Rev. Lett. 108, 235702, 2012.
:doc:`fix efield <fix_efield>` , :doc:`fix setforce <fix_setforce>`,
:doc:`fix addforce <fix_addforce>`
Default
"""""""
types
none
----------
.. _Erdmann1:
**(Erdmann)** U. Erdmann , W. Ebeling, L. Schimansky-Geier, and F. Schweitzer,
Eur. Phys. J. B 15, 105-113, 2000.
.. _Winkler1:
**(Winkler)** Winkler, Wysocki, and Gompper, Soft Matter, 11, 6680 (2015).

88
doc/src/mdi_engine.rst Normal file
View File

@ -0,0 +1,88 @@
.. index:: mdi_engine
mdi_engine command
==================
Syntax
""""""
.. parsed-literal::
mdi_engine
Description
"""""""""""
This command is used to have LAMMPS act as a server with another
client code to effectively couple the two codes together in
client/server mode.
More specifically, this command causes LAMMPS to begin using the `MDI
Library <https://molssi-mdi.github.io/MDI_Library/html/index.html>`_
to run as an MDI engine (server), responding to commands made by an
external MDI driver code (client). See the :doc:`Howto mdi
<Howto_mdi>` doc page for more information about how LAMMPS can work
as both an MDI driver or engine.
General information about launching codes that communicate using the
MDI Library can be found in the `corresponding page
<https://molssi-mdi.github.io/MDI_Library/html/library_page.html#library_launching_sec>`_
of the MDI Library's documentation.
----------
This command should typically be used in an input script after LAMMPS
has setup the system it is going to model in collaboration with the
driver code. Depending on how the driver code tells the LAMMPS engine
to exit, other commands can be executed after this command, but
typically it should be used at the end of the LAMMPS input script.
To act as a MD-based MDI engine, this is the list of MDI commands from
a driver code which LAMMPS currently recognizes. See more details
about these commands in the `MDI library documentation
<https://molssi-mdi.github.io/MDI_Library/html/mdi_standard.html>`_
.. NOTE: Taylor - is this the best link for this info? Can we flesh this
.. out with the full list of supported commands? Maybe the distinction
.. of what "node" the commands refer to is not needed in this table?
.. list-table::
:widths: 20 80
:header-rows: 1
* - Command name
- Action
* - >NATOMS
- Driver sends the number of atoms in the system
* - <NATOMS
- Driver requests the number of atoms in the system
* - <COORDS
- Driver requests 3*N double-precision atom coordinates
* - >FORCES
- Driver sends 3*N double-precision atom forces
* - <COORDS
- Driver requests 3*N double-precision atom forces
* - EXIT
- Driver tells the engine (LAMMPS) to exit engine mode
If these commands are not sufficient to support what a driver which
you write needs, additional commands can be defined by simply using a
new command name not in this list. Code to support the new command
needs to be added to the USER-MDI package within LAMMPS; see its
src/USER-MDI/mdi_engine.cpp and fix_mdi_engine.cpp files.
Restrictions
""""""""""""
This command is part of the USER-MDI package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` doc page for more info.
Related commands
""""""""""""""""
:doc:`fix mdi/engine <fix_mdi_engine>`
Default
"""""""
None

View File

@ -14,7 +14,7 @@ Syntax
.. parsed-literal::
keyword = *delay* or *every* or *check* or *once* or *cluster* or *include* or *exclude* or *page* or *one* or *binsize*
keyword = *delay* or *every* or *check* or *once* or *cluster* or *include* or *exclude* or *page* or *one* or *binsize* or *collection/type* or *collection/interval*
*delay* value = N
N = delay building until this many steps since last build
*every* value = M
@ -47,6 +47,12 @@ Syntax
N = max number of neighbors of one atom
*binsize* value = size
size = bin size for neighbor list construction (distance units)
*collection/type* values = N arg1 ... argN
N = number of custom collections
arg = N separate lists of types (see below)
*collection/interval* values = N arg1 ... argN
N = number of custom collections
arg = N separate cutoffs for intervals (see below)
Examples
""""""""
@ -58,6 +64,8 @@ Examples
neigh_modify exclude group frozen frozen check no
neigh_modify exclude group residue1 chain3
neigh_modify exclude molecule/intra rigid
neigh_modify collection/type 2 1*2,5 3*4
neigh_modify collection/interval 2 1.0 10.0
Description
"""""""""""
@ -188,8 +196,9 @@ atom can have.
The *binsize* option allows you to specify what size of bins will be
used in neighbor list construction to sort and find neighboring atoms.
By default, for :doc:`neighbor style bin <neighbor>`, LAMMPS uses bins
that are 1/2 the size of the maximum pair cutoff. For :doc:`neighbor style multi <neighbor>`, the bins are 1/2 the size of the minimum pair
cutoff. Typically these are good values for minimizing the time for
that are 1/2 the size of the maximum pair cutoff. For :doc:`neighbor style multi <neighbor>`,
the bins are 1/2 the size of the collection interaction cutoff.
Typically these are good values for minimizing the time for
neighbor list construction. This setting overrides the default.
If you make it too big, there is little overhead due to
looping over bins, but more atoms are checked. If you make it too
@ -197,6 +206,31 @@ small, the optimal number of atoms is checked, but bin overhead goes
up. If you set the binsize to 0.0, LAMMPS will use the default
binsize of 1/2 the cutoff.
The *collection/type* option allows you to define collections of atom
types, used by the *multi* neighbor mode. By grouping atom types with
similar physical size or interaction cutoff lengths, one may be able
to improve performance by reducing
overhead. You must first specify the number of collections N to be
defined followed by N lists of types. Each list consists of a series of type
ranges separated by commas. The range can be specified as a
single numeric value, or a wildcard asterisk can be used to specify a range
of values. This takes the form "\*" or "\*n" or "n\*" or "m\*n". For
example, if M = the number of atom types, then an asterisk with no numeric
values means all types from 1 to M. A leading asterisk means all types
from 1 to n (inclusive). A trailing asterisk means all types from n to M
(inclusive). A middle asterisk means all types from m to n (inclusive).
Note that all atom types must be included in exactly one of the N collections.
The *collection/interval* option provides a similar capability. This
command allows a user to define collections by specifying a series of
cutoff intervals. LAMMPS will automatically sort atoms into these
intervals based on their type-dependent cutoffs or their finite size.
You must first specify the number of collections N to be defined
followed by N values representing the upper cutoff of each interval.
This command is particularly useful for granular pair styles where the
interaction distance of particles depends on their radius and may not
depend on their atom type.
Restrictions
""""""""""""

View File

@ -11,7 +11,7 @@ Syntax
neighbor skin style
* skin = extra distance beyond force cutoff (distance units)
* style = *bin* or *nsq* or *multi*
* style = *bin* or *nsq* or *multi* or *multi/old*
Examples
""""""""
@ -55,14 +55,31 @@ For the *bin* style, the bin size is set to 1/2 of
the largest cutoff distance between any pair of atom types and a
single set of bins is defined to search over for all atom types. This
can be inefficient if one pair of types has a very long cutoff, but
other type pairs have a much shorter cutoff. For style *multi* the
bin size is set to 1/2 of the shortest cutoff distance and multiple
sets of bins are defined to search over for different atom types.
other type pairs have a much shorter cutoff. The *multi* style uses
different sized bins for collections of different sized particles, where
"size" may mean the physical size of the particle or its cutoff
distance for interacting with other particles. Different
sets of bins are then used to construct the neighbor lists as as further
described by Shire, Hanley, and Stratford :ref:`(Shire) <bytype-Shire>`.
This imposes some extra setup overhead, but the searches themselves
may be much faster for the short-cutoff cases.
See the :doc:`comm_modify mode multi <comm_modify>` command for a
communication option that may also be beneficial for simulations of
this kind.
may be much faster. By default, each atom type defines a separate
collection of particles. For systems where two or more atom types
have the same size (either physical size or cutoff distance), the
definition of collections can be customized, which can result in less
overhead and faster performance. See the :doc:`neigh_modify <neigh_modify>`
command for how to define custom collections. Whether the collection
definition is customized or not, also see the
:doc:`comm_modify mode multi <comm_modify>` command for communication
options that further improve performance in a manner consistent with
neighbor style multi.
An alternate style, *multi/old*, sets the bin size to 1/2 of the shortest
cutoff distance and multiple sets of bins are defined to search over for
different atom types. This algorithm used to be the default *multi*
algorithm in LAMMPS but was found to be significantly slower than the new
approach. For now we are keeping the old option in case there are use cases
where multi/old outperforms the new multi style.
The :doc:`neigh_modify <neigh_modify>` command has additional options
that control how often neighbor lists are built and which pairs are
@ -90,3 +107,9 @@ Default
| 0.001 bin for units = si, skin = 0.001 meters = 1.0 mm
| 0.1 bin for units = cgs, skin = 0.1 cm = 1.0 mm
|
----------
.. _bytype-Shire:
**(Shire)** Shire, Hanley and Stratford, Comp Part Mech, (2020).

View File

@ -1,10 +1,11 @@
.. index:: pair_style lj/smooth
.. index:: pair_style lj/smooth/gpu
.. index:: pair_style lj/smooth/omp
pair_style lj/smooth command
============================
Accelerator Variants: *lj/smooth/omp*
Accelerator Variants: *lj/smooth/gpu*, *lj/smooth/omp*
Syntax
""""""

View File

@ -47,6 +47,7 @@ Agnolin
Ai
Aidan
aij
aimd
airebo
Aj
ajs
@ -196,6 +197,7 @@ Ballenegger
Bammann
Banna
Barashev
barnes
barostat
Barostats
barostatted
@ -312,6 +314,7 @@ br
Branduardi
Branicio
brennan
Briels
Brien
Brilliantov
Broadwell
@ -626,6 +629,7 @@ delflag
Dellago
delocalization
delocalized
Delong
delr
deltaHf
Dendrimer
@ -709,6 +713,7 @@ dodgerblue
dof
doi
Donadio
Donev
dotc
Doty
doxygen
@ -1143,6 +1148,7 @@ GMock
gneb
GNEB
Goldfarb
Gompper
Gonzalez-Melchor
googlemail
googletest
@ -1203,6 +1209,7 @@ Halver
Hamaker
Hamel
Hammerschmidt
Hanley
haptic
Hara
Harpertown
@ -1265,6 +1272,7 @@ holonomic
Homebrew
hooke
Hookean
hostname
hotpink
Houlle
howto
@ -1321,6 +1329,7 @@ ijk
ijkl
ik
Ikeshoji
Ilie
ilmenau
Ilmenau
ilp
@ -1723,6 +1732,7 @@ Lmpsdata
lmptype
LMT
ln
localhost
localTemp
localvectors
Loewen
@ -1853,6 +1863,8 @@ mc
McLachlan
md
mdf
MDI
mdi
mdpd
mDPD
meam
@ -3109,6 +3121,7 @@ Swinburne
Swol
Swope
Sx
sx
sy
Sy
symplectic
@ -3118,6 +3131,7 @@ sysdim
Syst
systemd
Sz
sz
Tabbernor
tabinner
Tadmor
@ -3136,6 +3150,7 @@ Tanmoy
Tartakovsky
taskset
taubi
taylor
tb
tchain
Tchain
@ -3358,6 +3373,7 @@ upenn
upto
Urbakh
Urbana
Usabiaga
usec
uSemiParallel
userguide
@ -3502,6 +3518,7 @@ Wikipedia
Wildcard
wildcard
wildcards
Winkler
Wirnsberger
wirtes
witin
@ -3513,6 +3530,7 @@ Worley
Wriggers
Wuppertal
Wurtzite
Wysocki
www
wx
Wx

View File

@ -0,0 +1,53 @@
##### 2d overdamped brownian dynamics with self-propulsion force in direction of velocity. #####
variable gamma_t equal 1.0
variable temp equal 1.0
variable seed equal 1974019
variable fp equal 4.0
variable params string ${gamma_t}_${temp}_${fp}
units lj
dimension 2
newton off
lattice sq 0.4
region box block -16 16 -16 16 -0.2 0.2
create_box 1 box
create_atoms 1 box
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix step all brownian ${temp} ${seed} gamma_t ${gamma_t}
fix vel all propel/self velocity ${fp}
fix 2 all enforce2d
fix_modify vel virial yes
compute press all pressure NULL virial
thermo_style custom step temp epair c_press
#equilibration
timestep 0.0000000001
thermo 500
run 5000
reset_timestep 0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 12000

View File

@ -0,0 +1,151 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### 2d overdamped brownian dynamics with self-propulsion force in direction of velocity. #####
variable gamma_t equal 1.0
variable temp equal 1.0
variable seed equal 1974019
variable fp equal 4.0
variable params string ${gamma_t}_${temp}_${fp}
variable params string 1_${temp}_${fp}
variable params string 1_1_${fp}
variable params string 1_1_4
units lj
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -16 16 -16 16 -0.2 0.2
create_box 1 box
Created orthogonal box = (-25.298221 -25.298221 -0.31622777) to (25.298221 25.298221 0.31622777)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 1024 atoms
create_atoms CPU = 0.001 seconds
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix step all brownian ${temp} ${seed} gamma_t ${gamma_t}
fix step all brownian 1 ${seed} gamma_t ${gamma_t}
fix step all brownian 1 1974019 gamma_t ${gamma_t}
fix step all brownian 1 1974019 gamma_t 1
fix vel all propel/self velocity ${fp}
fix vel all propel/self velocity 4
fix 2 all enforce2d
fix_modify vel virial yes
compute press all pressure NULL virial
thermo_style custom step temp epair c_press
#equilibration
timestep 0.0000000001
thermo 500
run 5000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 2.289 | 2.289 | 2.289 Mbytes
Step Temp E_pair c_press
0 1 0 -0.18336111
500 2.0519273e+10 0 -0.048238222
1000 1.9821717e+10 0 -0.4711053
1500 1.9697609e+10 0 -0.13539588
2000 2.0209443e+10 0 0.0094958039
2500 1.9591299e+10 0 0.40117118
3000 2.089566e+10 0 -0.036548251
3500 1.978692e+10 0 0.28282578
4000 2.0657848e+10 0 0.17618064
4500 2.0837353e+10 0 -0.080724651
5000 2.0348316e+10 0 -0.17471195
Loop time of 0.575164 on 1 procs for 5000 steps with 1024 atoms
Performance: 0.075 tau/day, 8693.168 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.0036819 | 0.0036819 | 0.0036819 | 0.0 | 0.64
Output | 0.00027752 | 0.00027752 | 0.00027752 | 0.0 | 0.05
Modify | 0.51999 | 0.51999 | 0.51999 | 0.0 | 90.41
Other | | 0.05121 | | | 8.90
Nlocal: 1024.00 ave 1024 max 1024 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 65.0000 ave 65 max 65 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
reset_timestep 0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 12000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 2.664 | 2.664 | 2.664 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4] c_press
0 2.0328444e+10 0 0 0 0 0 -0.17471195
1000 197017.59 0 0.018147562 0.019839233 0 0.037986796 -0.71897807
2000 197030.23 0 0.03909867 0.041721342 0 0.080820011 -0.30051929
3000 201997.2 0 0.065694399 0.06235257 0 0.12804697 -0.85167039
4000 199927.76 0 0.085698715 0.080328815 0 0.16602753 0.18493117
5000 198665.7 0 0.10896054 0.097021266 0 0.2059818 -0.090735406
6000 199277.78 0 0.13081111 0.11724814 0 0.24805925 -0.18189034
7000 199850.54 0 0.14721838 0.13806858 0 0.28528696 0.11334674
8000 191577.11 0 0.16582149 0.15935853 0 0.32518002 -0.73284569
9000 197331.29 0 0.17995704 0.18652927 0 0.3664863 -0.015558407
10000 197048.17 0 0.2034106 0.20329856 0 0.40670916 0.36985211
11000 200105.54 0 0.21809835 0.21966463 0 0.43776298 0.36437
12000 203180.39 0 0.23810386 0.23666184 0 0.47476569 -0.072006034
Loop time of 1.37465 on 1 procs for 12000 steps with 1024 atoms
Performance: 7542.303 tau/day, 8729.517 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.00012231 | 0.00012231 | 0.00012231 | 0.0 | 0.01
Comm | 0.0024607 | 0.0024607 | 0.0024607 | 0.0 | 0.18
Output | 0.00068665 | 0.00068665 | 0.00068665 | 0.0 | 0.05
Modify | 1.2479 | 1.2479 | 1.2479 | 0.0 | 90.78
Other | | 0.1235 | | | 8.98
Nlocal: 1024.00 ave 1024 max 1024 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,151 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### 2d overdamped brownian dynamics with self-propulsion force in direction of velocity. #####
variable gamma_t equal 1.0
variable temp equal 1.0
variable seed equal 1974019
variable fp equal 4.0
variable params string ${gamma_t}_${temp}_${fp}
variable params string 1_${temp}_${fp}
variable params string 1_1_${fp}
variable params string 1_1_4
units lj
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -16 16 -16 16 -0.2 0.2
create_box 1 box
Created orthogonal box = (-25.298221 -25.298221 -0.31622777) to (25.298221 25.298221 0.31622777)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 1024 atoms
create_atoms CPU = 0.001 seconds
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix step all brownian ${temp} ${seed} gamma_t ${gamma_t}
fix step all brownian 1 ${seed} gamma_t ${gamma_t}
fix step all brownian 1 1974019 gamma_t ${gamma_t}
fix step all brownian 1 1974019 gamma_t 1
fix vel all propel/self velocity ${fp}
fix vel all propel/self velocity 4
fix 2 all enforce2d
fix_modify vel virial yes
compute press all pressure NULL virial
thermo_style custom step temp epair c_press
#equilibration
timestep 0.0000000001
thermo 500
run 5000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 2.289 | 2.289 | 2.289 Mbytes
Step Temp E_pair c_press
0 1 0 -0.18336111
500 1.9862591e+10 0 -0.32013566
1000 2.0093184e+10 0 -0.36609742
1500 1.9562283e+10 0 -0.53349351
2000 1.9903977e+10 0 0.63783249
2500 2.0260128e+10 0 0.30046413
3000 1.9948065e+10 0 -0.63093105
3500 1.9507486e+10 0 0.48762848
4000 2.0049087e+10 0 0.40289309
4500 1.9975813e+10 0 0.57649363
5000 2.0129291e+10 0 -0.41288352
Loop time of 0.238949 on 4 procs for 5000 steps with 1024 atoms
Performance: 0.181 tau/day, 20924.952 timesteps/s
92.6% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.0080078 | 0.024718 | 0.031782 | 6.2 | 10.34
Output | 0.0001812 | 0.00029999 | 0.00063467 | 0.0 | 0.13
Modify | 0.13401 | 0.14401 | 0.15438 | 2.4 | 60.27
Other | | 0.06992 | | | 29.26
Nlocal: 256.000 ave 256 max 256 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 33.0000 ave 33 max 33 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
reset_timestep 0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 12000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 2.664 | 2.664 | 2.664 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4] c_press
0 2.0109634e+10 0 0 0 0 0 -0.41288352
1000 195711.46 0 0.020076462 0.020523099 0 0.040599561 -0.32125126
2000 203263.85 0 0.039242992 0.039661282 0 0.078904274 0.11008705
3000 197417.54 0 0.064938128 0.057716419 0 0.12265455 0.16967601
4000 200505.97 0 0.086511225 0.074975267 0 0.16148649 0.31338473
5000 199373.77 0 0.10583263 0.098175658 0 0.20400829 0.34205791
6000 192881.14 0 0.12152088 0.11706037 0 0.23858125 -0.27870467
7000 203045.3 0 0.1383248 0.13629503 0 0.27461983 -0.046936646
8000 198544.08 0 0.16064738 0.1582206 0 0.31886798 -0.18803452
9000 205450.74 0 0.17926529 0.1829047 0 0.36216999 0.47191228
10000 200371.73 0 0.20084273 0.20365189 0 0.40449463 0.093098262
11000 202911.93 0 0.21569236 0.22221715 0 0.43790952 -0.38430031
12000 192590.04 0 0.24041439 0.24114487 0 0.48155926 -0.1677052
Loop time of 0.443026 on 4 procs for 12000 steps with 1024 atoms
Performance: 23402.683 tau/day, 27086.439 timesteps/s
97.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 3.2663e-05 | 3.3855e-05 | 3.4809e-05 | 0.0 | 0.01
Comm | 0.0030291 | 0.0030628 | 0.0030825 | 0.0 | 0.69
Output | 0.00027895 | 0.00051624 | 0.001184 | 0.0 | 0.12
Modify | 0.31607 | 0.33372 | 0.37391 | 4.0 | 75.33
Other | | 0.1057 | | | 23.86
Nlocal: 256.000 ave 259 max 253 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Nghost: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 15
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,54 @@
##### overdamped dynamics of non-interacting ellipsoids in 2D #####
variable rng string gaussian
variable seed string 198098
variable temp string 1.0
variable gamma_r_1 string inf
variable gamma_r_2 string inf
variable gamma_r_3 string 0.1
variable gamma_t_1 string 5.0
variable gamma_t_2 string 7.0
variable gamma_t_3 string inf
variable params string ${rng}_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
units lj
atom_style hybrid dipole ellipsoid
dimension 2
newton off
lattice sq 0.4
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
create_atoms 1 box
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * shape 3.0 1.0 1.0
set type * quat/random ${seed}
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/asphere ${temp} ${seed} rng ${rng} &
gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} &
gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} &
dipole 1.0 0.0 0.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type &
# x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000

View File

@ -0,0 +1,54 @@
##### overdamped dynamics of non-interacting ellipsoids in 3D #####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_r_1 string 2.0
variable gamma_r_2 string 0.25
variable gamma_r_3 string 0.1
variable gamma_t_1 string 5.0
variable gamma_t_2 string 7.0
variable gamma_t_3 string 9.0
variable params string ${rng}_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
units lj
atom_style hybrid dipole ellipsoid
dimension 3
newton off
lattice sc 0.4
region box block -8 8 -8 8 -8 8
create_box 1 box
create_atoms 1 box
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * shape 3.0 1.0 1.0
set type * quat/random ${seed}
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/asphere ${temp} ${seed} rng ${rng} &
gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} &
gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} &
dipole 1.0 0.0 0.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type &
# x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000

View File

@ -0,0 +1,145 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of non-interacting ellipsoids in 2D #####
variable rng string gaussian
variable seed string 198098
variable temp string 1.0
variable gamma_r_1 string inf
variable gamma_r_2 string inf
variable gamma_r_3 string 0.1
variable gamma_t_1 string 5.0
variable gamma_t_2 string 7.0
variable gamma_t_3 string inf
variable params string ${rng}_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_0.1_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_0.1_5.0_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_0.1_5.0_7.0_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_0.1_5.0_7.0_inf
units lj
atom_style hybrid dipole ellipsoid
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
Created orthogonal box = (-47.434165 -47.434165 -0.31622777) to (47.434165 47.434165 0.31622777)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.005 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 198098 1.0
Setting atom values ...
3600 settings made for dipole/random
set type * shape 3.0 1.0 1.0
Setting atom values ...
3600 settings made for shape
set type * quat/random ${seed}
set type * quat/random 198098
Setting atom values ...
3600 settings made for quat/random
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/asphere ${temp} ${seed} rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 ${seed} rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf 0.1 gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf 0.1 gamma_t_eigen 5.0 ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf 0.1 gamma_t_eigen 5.0 7.0 ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf 0.1 gamma_t_eigen 5.0 7.0 inf dipole 1.0 0.0 0.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type # x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 5.114 | 5.114 | 5.114 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 0.99972222 0 0 0 0 0
100 34376.187 0 0.00034728749 0.00034318997 0 0.00069047747
200 36135.708 0 0.00067452936 0.0006818928 0 0.0013564222
300 34444.929 0 0.0010189319 0.00099849203 0 0.002017424
400 35668.312 0 0.0013648699 0.0013311884 0 0.0026960583
500 35388.615 0 0.0017330203 0.0016077184 0 0.0033407387
600 35180.065 0 0.002052223 0.0019394635 0 0.0039916865
700 34035.38 0 0.0024329956 0.0022664905 0 0.0046994861
800 34581.664 0 0.002783885 0.0025794872 0 0.0053633723
900 34579.945 0 0.003163442 0.0029351952 0 0.0060986372
1000 34158.066 0 0.0035589034 0.0032627605 0 0.0068216639
1100 33453.827 0 0.0038861895 0.003565372 0 0.0074515615
1200 33608.06 0 0.0041325698 0.0038943268 0 0.0080268966
1300 34381.633 0 0.004405682 0.0043294156 0 0.0087350976
1400 32925.746 0 0.0047383547 0.0046803517 0 0.0094187065
1500 34809.764 0 0.0051149571 0.0049309746 0 0.010045932
1600 33580.096 0 0.0054893472 0.0052465377 0 0.010735885
1700 34596.275 0 0.00581894 0.0056500316 0 0.011468972
1800 33926.736 0 0.0062129617 0.0059796125 0 0.012192574
1900 35577.131 0 0.0065668637 0.0062530163 0 0.01281988
2000 34224.967 0 0.0070005917 0.006598912 0 0.013599504
2100 33991.406 0 0.0073134826 0.0069119252 0 0.014225408
2200 34647.054 0 0.007659301 0.0073434715 0 0.015002772
2300 33956.835 0 0.007965191 0.0076318537 0 0.015597045
2400 35272.549 0 0.0082467116 0.007929202 0 0.016175914
2500 33901.494 0 0.0086251299 0.0082790757 0 0.016904206
2600 34138.227 0 0.0089419364 0.0086639744 0 0.017605911
2700 33691.013 0 0.0093083376 0.0090219118 0 0.018330249
2800 34716.817 0 0.0095840095 0.0094118945 0 0.018995904
2900 34473.982 0 0.0099773501 0.0098167668 0 0.019794117
3000 33406.776 0 0.010391969 0.010098625 0 0.020490594
Loop time of 3.67112 on 1 procs for 3000 steps with 3600 atoms
Performance: 706.051 tau/day, 817.189 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.037973 | 0.037973 | 0.037973 | 0.0 | 1.03
Output | 0.0040674 | 0.0040674 | 0.0040674 | 0.0 | 0.11
Modify | 3.515 | 3.515 | 3.515 | 0.0 | 95.75
Other | | 0.1141 | | | 3.11
Nlocal: 3600.00 ave 3600 max 3600 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 121.000 ave 121 max 121 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:03

View File

@ -0,0 +1,145 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of non-interacting ellipsoids in 2D #####
variable rng string gaussian
variable seed string 198098
variable temp string 1.0
variable gamma_r_1 string inf
variable gamma_r_2 string inf
variable gamma_r_3 string 0.1
variable gamma_t_1 string 5.0
variable gamma_t_2 string 7.0
variable gamma_t_3 string inf
variable params string ${rng}_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_0.1_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_0.1_5.0_${gamma_t_2}_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_0.1_5.0_7.0_${gamma_t_3}
variable params string gaussian_1.0_inf_inf_0.1_5.0_7.0_inf
units lj
atom_style hybrid dipole ellipsoid
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
Created orthogonal box = (-47.434165 -47.434165 -0.31622777) to (47.434165 47.434165 0.31622777)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.007 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 198098 1.0
Setting atom values ...
3600 settings made for dipole/random
set type * shape 3.0 1.0 1.0
Setting atom values ...
3600 settings made for shape
set type * quat/random ${seed}
set type * quat/random 198098
Setting atom values ...
3600 settings made for quat/random
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/asphere ${temp} ${seed} rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 ${seed} rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf 0.1 gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf 0.1 gamma_t_eigen 5.0 ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf 0.1 gamma_t_eigen 5.0 7.0 ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng gaussian gamma_r_eigen inf inf 0.1 gamma_t_eigen 5.0 7.0 inf dipole 1.0 0.0 0.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type # x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 5.102 | 5.102 | 5.102 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 0.99972222 0 0 0 0 0
100 33874.438 0 0.0003458147 0.00033165629 0 0.00067747099
200 34893.188 0 0.00066290209 0.00068146332 0 0.0013443654
300 34494.226 0 0.0010064902 0.0010276646 0 0.0020341548
400 34537.887 0 0.0013457339 0.0014057042 0 0.0027514381
500 34458.46 0 0.0017006949 0.0017120083 0 0.0034127033
600 33229.977 0 0.0020841613 0.0020485346 0 0.0041326959
700 33288.631 0 0.0024270272 0.0023673304 0 0.0047943576
800 35317.512 0 0.0027924435 0.0026950912 0 0.0054875347
900 33094.299 0 0.0031503627 0.0030789319 0 0.0062292946
1000 35801.751 0 0.003489398 0.0034594626 0 0.0069488607
1100 33427.701 0 0.0038547506 0.0038375809 0 0.0076923316
1200 34675.07 0 0.0041824195 0.0042017298 0 0.0083841493
1300 33080.294 0 0.0045258945 0.0045816356 0 0.0091075301
1400 34927.288 0 0.0048252992 0.0049215701 0 0.0097468693
1500 34338.558 0 0.0051959155 0.0053020102 0 0.010497926
1600 34686.248 0 0.0055111463 0.0056220225 0 0.011133169
1700 34336.158 0 0.0059240394 0.0059060319 0 0.011830071
1800 34315.859 0 0.0063027944 0.0063004467 0 0.012603241
1900 35096.721 0 0.0066098525 0.00672222 0 0.013332073
2000 33544.18 0 0.0069401261 0.007074124 0 0.01401425
2100 33863.219 0 0.0072726502 0.0074175954 0 0.014690246
2200 34705.892 0 0.0075586722 0.0077552683 0 0.015313941
2300 34025.357 0 0.0079046728 0.0081760519 0 0.016080725
2400 34741.849 0 0.008252969 0.0085203087 0 0.016773278
2500 34406.959 0 0.0085370091 0.0088556377 0 0.017392647
2600 34062.63 0 0.0088134153 0.0092536326 0 0.018067048
2700 34677.666 0 0.0090592854 0.0096225881 0 0.018681874
2800 33464.216 0 0.0093984162 0.0099647695 0 0.019363186
2900 32920.721 0 0.0098222985 0.010366517 0 0.020188816
3000 34539.66 0 0.010133317 0.01068102 0 0.020814337
Loop time of 1.12143 on 4 procs for 3000 steps with 3600 atoms
Performance: 2311.341 tau/day, 2675.163 timesteps/s
96.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.036017 | 0.042828 | 0.051558 | 2.7 | 3.82
Output | 0.0012608 | 0.0025993 | 0.0063775 | 4.3 | 0.23
Modify | 0.9002 | 0.93095 | 0.99546 | 3.9 | 83.01
Other | | 0.1451 | | | 12.93
Nlocal: 900.000 ave 900 max 900 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 61.0000 ave 61 max 61 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,145 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of non-interacting ellipsoids in 3D #####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_r_1 string 2.0
variable gamma_r_2 string 0.25
variable gamma_r_3 string 0.1
variable gamma_t_1 string 5.0
variable gamma_t_2 string 7.0
variable gamma_t_3 string 9.0
variable params string ${rng}_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_0.1_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_0.1_5.0_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_0.1_5.0_7.0_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_0.1_5.0_7.0_9.0
units lj
atom_style hybrid dipole ellipsoid
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 3
newton off
lattice sc 0.4
Lattice spacing in x,y,z = 1.3572088 1.3572088 1.3572088
region box block -8 8 -8 8 -8 8
create_box 1 box
Created orthogonal box = (-10.857670 -10.857670 -10.857670) to (10.857670 10.857670 10.857670)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 4096 atoms
create_atoms CPU = 0.005 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 198098 1.0
Setting atom values ...
4096 settings made for dipole/random
set type * shape 3.0 1.0 1.0
Setting atom values ...
4096 settings made for shape
set type * quat/random ${seed}
set type * quat/random 198098
Setting atom values ...
4096 settings made for quat/random
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/asphere ${temp} ${seed} rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 ${seed} rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 0.1 gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 0.1 gamma_t_eigen 5.0 ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 0.1 gamma_t_eigen 5.0 7.0 ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 0.1 gamma_t_eigen 5.0 7.0 9.0 dipole 1.0 0.0 0.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type # x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 5.219 | 5.219 | 5.219 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 1.4996338 0 0 0 0 0
100 45690.838 0 0.00029994317 0.00029953902 0.00030002809 0.00089951027
200 45571.166 0 0.00061376797 0.00060955238 0.00061153551 0.0018348559
300 44693.418 0 0.00093058034 0.00089383536 0.00091554588 0.0027399616
400 44831.846 0 0.001250227 0.0012230128 0.0012120517 0.0036852914
500 45028.015 0 0.0015448869 0.0015339549 0.0014978843 0.0045767262
600 45895.442 0 0.0018621952 0.0018169905 0.0018352784 0.0055144641
700 45858.744 0 0.0021617097 0.0021137714 0.0021360394 0.0064115206
800 45155.215 0 0.002428445 0.0024288837 0.0024516737 0.0073090023
900 45427.427 0 0.0027265978 0.0027662531 0.0027329878 0.0082258387
1000 45398.166 0 0.0030685345 0.0030805014 0.0029765916 0.0091256275
1100 44622.428 0 0.0033766954 0.0033976168 0.0032745406 0.010048853
1200 45500.277 0 0.0036410565 0.0036840528 0.0035831659 0.010908275
1300 45265.8 0 0.0039143146 0.0039419334 0.0038761633 0.011732411
1400 45482.435 0 0.0042006542 0.0043373651 0.004164002 0.012702021
1500 45126.629 0 0.0044647379 0.0046021855 0.004487041 0.013553965
1600 45178.172 0 0.0047726618 0.0049110287 0.0048012671 0.014484958
1700 44918.685 0 0.005104787 0.0052522662 0.0050844375 0.015441491
1800 44776.678 0 0.0054395368 0.0056092038 0.0054623875 0.016511128
1900 46035.987 0 0.0057735872 0.0059357043 0.0057296009 0.017438892
2000 45436.517 0 0.0060837459 0.0063485717 0.0059769119 0.018409229
2100 45871.502 0 0.0063736337 0.0066551978 0.0063077439 0.019336575
2200 45511.847 0 0.0066419141 0.0069700452 0.0065553318 0.020167291
2300 45597.047 0 0.0069251517 0.0073015716 0.0068945654 0.021121289
2400 44832.007 0 0.0071894253 0.0076238221 0.0071638554 0.021977103
2500 45668.42 0 0.0074351304 0.0079594991 0.0075390719 0.022933701
2600 45248.483 0 0.007781496 0.008293944 0.0077956068 0.023871047
2700 45308.515 0 0.0080302993 0.0086329679 0.0081457335 0.024809001
2800 45637.72 0 0.0083889026 0.0089173198 0.0086032427 0.025909465
2900 45909.343 0 0.0087169392 0.009181179 0.0088778569 0.026775975
3000 45213.613 0 0.0090508891 0.0094253485 0.0092660321 0.02774227
Loop time of 4.13937 on 1 procs for 3000 steps with 4096 atoms
Performance: 626.183 tau/day, 724.749 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.24709 | 0.24709 | 0.24709 | 0.0 | 5.97
Output | 0.004636 | 0.004636 | 0.004636 | 0.0 | 0.11
Modify | 3.7604 | 3.7604 | 3.7604 | 0.0 | 90.85
Other | | 0.1272 | | | 3.07
Nlocal: 4096.00 ave 4096 max 4096 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 817.000 ave 817 max 817 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:04

View File

@ -0,0 +1,145 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of non-interacting ellipsoids in 3D #####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_r_1 string 2.0
variable gamma_r_2 string 0.25
variable gamma_r_3 string 0.1
variable gamma_t_1 string 5.0
variable gamma_t_2 string 7.0
variable gamma_t_3 string 9.0
variable params string ${rng}_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_${temp}_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_${gamma_r_1}_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_${gamma_r_2}_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_${gamma_r_3}_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_0.1_${gamma_t_1}_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_0.1_5.0_${gamma_t_2}_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_0.1_5.0_7.0_${gamma_t_3}
variable params string uniform_1.0_2.0_0.25_0.1_5.0_7.0_9.0
units lj
atom_style hybrid dipole ellipsoid
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 3
newton off
lattice sc 0.4
Lattice spacing in x,y,z = 1.3572088 1.3572088 1.3572088
region box block -8 8 -8 8 -8 8
create_box 1 box
Created orthogonal box = (-10.857670 -10.857670 -10.857670) to (10.857670 10.857670 10.857670)
2 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 4096 atoms
create_atoms CPU = 0.002 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 198098 1.0
Setting atom values ...
4096 settings made for dipole/random
set type * shape 3.0 1.0 1.0
Setting atom values ...
4096 settings made for shape
set type * quat/random ${seed}
set type * quat/random 198098
Setting atom values ...
4096 settings made for quat/random
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/asphere ${temp} ${seed} rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 ${seed} rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng ${rng} gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen ${gamma_r_1} ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 ${gamma_r_2} ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 ${gamma_r_3} gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 0.1 gamma_t_eigen ${gamma_t_1} ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 0.1 gamma_t_eigen 5.0 ${gamma_t_2} ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 0.1 gamma_t_eigen 5.0 7.0 ${gamma_t_3} dipole 1.0 0.0 0.0
fix 1 all brownian/asphere 1.0 198098 rng uniform gamma_r_eigen 2.0 0.25 0.1 gamma_t_eigen 5.0 7.0 9.0 dipole 1.0 0.0 0.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type # x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 5.152 | 5.152 | 5.152 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 1.4996338 0 0 0 0 0
100 45236.508 0 0.00030817418 0.00030717742 0.0003019227 0.0009172743
200 45564.566 0 0.00062027526 0.00062110132 0.0006080391 0.0018494157
300 46232.801 0 0.00091155216 0.00094473459 0.00093009391 0.0027863807
400 45250.414 0 0.0011980791 0.0012538262 0.0012201461 0.0036720513
500 45217.133 0 0.0015186813 0.0015752994 0.001509437 0.0046034177
600 45531.276 0 0.0018194588 0.0019243758 0.0018209246 0.0055647592
700 44834.624 0 0.0021277747 0.0022417115 0.0021352036 0.0065046898
800 45413.998 0 0.0024558838 0.0025741787 0.0024088704 0.0074389329
900 45668.624 0 0.0027366171 0.002858242 0.0027580782 0.0083529374
1000 45809.223 0 0.0030331425 0.003186293 0.0030414906 0.0092609261
1100 45193.019 0 0.0033199824 0.0034668659 0.003298885 0.010085733
1200 44522.927 0 0.0036503132 0.0037490684 0.0036089852 0.011008367
1300 45214.567 0 0.0039958617 0.0040881934 0.0038709079 0.011954963
1400 45217.997 0 0.004276499 0.0044624985 0.0041104891 0.012849487
1500 45497.171 0 0.0045943272 0.0047116875 0.0044113504 0.013717365
1600 45905.187 0 0.0049004996 0.0049982014 0.0047394999 0.014638201
1700 45551.346 0 0.0051540939 0.0053187249 0.0050861052 0.015558924
1800 45347.782 0 0.0054101891 0.0056306 0.0053515873 0.016392376
1900 45107.895 0 0.005743705 0.0059584896 0.0056220384 0.017324233
2000 45043.389 0 0.0059803588 0.006230449 0.005911555 0.018122363
2100 45433.293 0 0.0062610364 0.0066140744 0.0062152977 0.019090408
2200 45804.217 0 0.0064995183 0.0068831274 0.0064971789 0.019879825
2300 45697.516 0 0.0067910846 0.0071845673 0.0068046192 0.020780271
2400 45447.422 0 0.0071022706 0.0074743709 0.0070983185 0.02167496
2500 45395.18 0 0.0073817023 0.0077467991 0.0074263196 0.022554821
2600 45943.044 0 0.0075953233 0.007997707 0.0076508583 0.023243889
2700 45859.978 0 0.0079082128 0.0082090043 0.0078853376 0.024002555
2800 45822.007 0 0.0082607534 0.0084510061 0.0081985549 0.024910314
2900 45438.456 0 0.0085958203 0.0088807705 0.0084755353 0.025952126
3000 45060.957 0 0.0089017992 0.0090966159 0.0086718875 0.026670303
Loop time of 1.23282 on 4 procs for 3000 steps with 4096 atoms
Performance: 2102.502 tau/day, 2433.452 timesteps/s
97.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 8.5831e-06 | 8.5831e-06 | 8.5831e-06 | 0.0 | 0.00
Comm | 0.10931 | 0.11473 | 0.11748 | 0.9 | 9.31
Output | 0.001375 | 0.0018924 | 0.0034099 | 2.0 | 0.15
Modify | 0.97744 | 0.99158 | 1.0089 | 1.3 | 80.43
Other | | 0.1246 | | | 10.11
Nlocal: 1024.00 ave 1035 max 1016 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Nghost: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,43 @@
##### dynamics of non-interacting point particles in 2D #####
variable rng string gaussian
variable seed string 198098
variable temp string 5.0
variable gamma_t string 1.0
variable params string ${rng}_${temp}_${gamma_t}
units lj
atom_style atomic
dimension 2
newton off
lattice sq 0.4
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
create_atoms 1 box
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian ${temp} ${seed} rng ${rng} gamma_t ${gamma_t}
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type &
# x y z xu yu zu fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000

View File

@ -0,0 +1,44 @@
##### overdamped dynamics of non-interacting point particles in 3D #####
variable rng string gaussian
variable seed string 198098
variable temp string 5.0
variable gamma_t string 1.0
variable params string ${rng}_${temp}_${gamma_t}
units lj
atom_style atomic
dimension 3
newton off
lattice sc 0.4
region box block -8 8 -8 8 -8 8
create_box 1 box
create_atoms 1 box
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian ${temp} ${seed} rng ${rng} gamma_t ${gamma_t}
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type &
# x y z xu yu zu fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000

View File

@ -0,0 +1,119 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### dynamics of non-interacting point particles in 2D #####
variable rng string gaussian
variable seed string 198098
variable temp string 5.0
variable gamma_t string 1.0
variable params string ${rng}_${temp}_${gamma_t}
variable params string gaussian_${temp}_${gamma_t}
variable params string gaussian_5.0_${gamma_t}
variable params string gaussian_5.0_1.0
units lj
atom_style atomic
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
Created orthogonal box = (-47.434165 -47.434165 -0.31622777) to (47.434165 47.434165 0.31622777)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.003 seconds
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian ${temp} ${seed} rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 ${seed} rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng gaussian gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng gaussian gamma_t 1.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type # x y z xu yu zu fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 2.664 | 2.664 | 2.664 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 0.99972222 0 0 0 0 0
100 1022861.2 0 0.010252464 0.0095481044 0 0.019800568
200 986781.19 0 0.020552091 0.019485252 0 0.040037343
300 1030219 0 0.030642552 0.028377678 0 0.05902023
400 1003322.5 0 0.040610693 0.038179284 0 0.078789978
500 989343.12 0 0.049978908 0.047445856 0 0.097424764
600 1029781.3 0 0.059551719 0.057941149 0 0.11749287
700 999447.72 0 0.06979546 0.067552325 0 0.13734778
800 995373.97 0 0.080049251 0.078006344 0 0.1580556
900 1011991.4 0 0.089753134 0.087065214 0 0.17681835
1000 1006017.1 0 0.10041092 0.097934217 0 0.19834514
1100 997762.63 0 0.11229742 0.10841547 0 0.22071289
1200 1011707.8 0 0.12006388 0.1190115 0 0.23907538
1300 1012099.1 0 0.13097486 0.12996632 0 0.26094117
1400 997602.43 0 0.14345778 0.13830585 0 0.28176362
1500 1005358.1 0 0.15441686 0.14927539 0 0.30369225
1600 1007081.8 0 0.16496828 0.15936363 0 0.3243319
1700 990284.9 0 0.1747286 0.16818246 0 0.34291106
1800 969006.97 0 0.18228778 0.17972813 0 0.3620159
1900 998066.69 0 0.19338277 0.19226121 0 0.38564397
2000 972300.66 0 0.20352485 0.20145928 0 0.40498413
2100 985025.88 0 0.21283854 0.21090075 0 0.42373929
2200 1010964.6 0 0.22279055 0.22110734 0 0.44389789
2300 975819.44 0 0.23128131 0.23226488 0 0.46354619
2400 977043.53 0 0.24284105 0.24301689 0 0.48585794
2500 969708.21 0 0.25415238 0.25354284 0 0.50769522
2600 981969.5 0 0.26457173 0.26318018 0 0.52775192
2700 987261.1 0 0.27497004 0.27761213 0 0.55258218
2800 1005751.1 0 0.28530448 0.28715428 0 0.57245876
2900 975930.11 0 0.29394811 0.29896948 0 0.59291759
3000 997388.08 0 0.30674701 0.31193573 0 0.61868274
Loop time of 1.501 on 1 procs for 3000 steps with 3600 atoms
Performance: 1726.852 tau/day, 1998.672 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.00059271 | 0.00059271 | 0.00059271 | 0.0 | 0.04
Comm | 0.0055437 | 0.0055437 | 0.0055437 | 0.0 | 0.37
Output | 0.0039999 | 0.0039999 | 0.0039999 | 0.0 | 0.27
Modify | 1.3852 | 1.3852 | 1.3852 | 0.0 | 92.28
Other | | 0.1057 | | | 7.04
Nlocal: 3600.00 ave 3600 max 3600 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 21
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,119 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### dynamics of non-interacting point particles in 2D #####
variable rng string gaussian
variable seed string 198098
variable temp string 5.0
variable gamma_t string 1.0
variable params string ${rng}_${temp}_${gamma_t}
variable params string gaussian_${temp}_${gamma_t}
variable params string gaussian_5.0_${gamma_t}
variable params string gaussian_5.0_1.0
units lj
atom_style atomic
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
Created orthogonal box = (-47.434165 -47.434165 -0.31622777) to (47.434165 47.434165 0.31622777)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.001 seconds
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian ${temp} ${seed} rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 ${seed} rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng gaussian gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng gaussian gamma_t 1.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type # x y z xu yu zu fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 2.664 | 2.664 | 2.664 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 0.99972222 0 0 0 0 0
100 1017972.1 0 0.010094052 0.0097502899 0 0.019844342
200 1004552.1 0 0.020125116 0.01957629 0 0.039701406
300 1017712.9 0 0.030271373 0.029411656 0 0.059683029
400 1016693.8 0 0.040610061 0.038605869 0 0.07921593
500 999527.84 0 0.049451389 0.049042225 0 0.098493614
600 961157.92 0 0.059691948 0.059033176 0 0.11872512
700 1006804.9 0 0.071205977 0.069972106 0 0.14117808
800 1007321.8 0 0.081136977 0.079825976 0 0.16096295
900 1002801.7 0 0.091236148 0.090833816 0 0.18206996
1000 1010134.7 0 0.10091362 0.10023906 0 0.20115269
1100 990246.55 0 0.1118367 0.11141049 0 0.22324719
1200 1010555.5 0 0.12091736 0.12355456 0 0.24447192
1300 997117.19 0 0.13099592 0.13292775 0 0.26392367
1400 1020817.1 0 0.14167961 0.14172898 0 0.28340859
1500 1015048.1 0 0.15225884 0.15162948 0 0.30388833
1600 990291.98 0 0.16460973 0.16251919 0 0.32712891
1700 980848.58 0 0.17380313 0.17351201 0 0.34731513
1800 1000673.8 0 0.18383991 0.18175453 0 0.36559445
1900 1009388.9 0 0.19411523 0.19367453 0 0.38778976
2000 1005935.9 0 0.2015342 0.20585359 0 0.40738779
2100 985500.56 0 0.21161056 0.21238463 0 0.42399519
2200 997241.34 0 0.21841986 0.22117922 0 0.43959908
2300 1011672.3 0 0.22688099 0.23155741 0 0.4584384
2400 989837.68 0 0.23849839 0.24219 0 0.48068839
2500 1035706.8 0 0.24541408 0.24947563 0 0.49488971
2600 992370.08 0 0.25537803 0.25758332 0 0.51296135
2700 990586.56 0 0.26542605 0.26762286 0 0.53304892
2800 1002767.3 0 0.27570392 0.27874972 0 0.55445363
2900 995307.27 0 0.28580946 0.29115624 0 0.5769657
3000 1024317.7 0 0.29493208 0.30208924 0 0.59702132
Loop time of 0.413047 on 4 procs for 3000 steps with 3600 atoms
Performance: 6275.312 tau/day, 7263.093 timesteps/s
98.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.00016236 | 0.00016338 | 0.00016403 | 0.0 | 0.04
Comm | 0.0026367 | 0.0030084 | 0.0031497 | 0.4 | 0.73
Output | 0.0011849 | 0.0013574 | 0.0018065 | 0.7 | 0.33
Modify | 0.34447 | 0.35223 | 0.36357 | 1.2 | 85.28
Other | | 0.05629 | | | 13.63
Nlocal: 900.000 ave 906 max 891 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Nghost: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 22
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,119 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of non-interacting point particles in 3D #####
variable rng string gaussian
variable seed string 198098
variable temp string 5.0
variable gamma_t string 1.0
variable params string ${rng}_${temp}_${gamma_t}
variable params string gaussian_${temp}_${gamma_t}
variable params string gaussian_5.0_${gamma_t}
variable params string gaussian_5.0_1.0
units lj
atom_style atomic
dimension 3
newton off
lattice sc 0.4
Lattice spacing in x,y,z = 1.3572088 1.3572088 1.3572088
region box block -8 8 -8 8 -8 8
create_box 1 box
Created orthogonal box = (-10.857670 -10.857670 -10.857670) to (10.857670 10.857670 10.857670)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 4096 atoms
create_atoms CPU = 0.002 seconds
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian ${temp} ${seed} rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 ${seed} rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng gaussian gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng gaussian gamma_t 1.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type # x y z xu yu zu fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 2.694 | 2.694 | 2.694 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 1.4996338 0 0 0 0 0
100 1500286.3 0 0.0098123603 0.010352169 0.010242435 0.030406964
200 1488308 0 0.019934427 0.019968198 0.020471735 0.06037436
300 1484472.4 0 0.029397156 0.030749312 0.030121294 0.090267762
400 1517938.7 0 0.039217504 0.041440617 0.040512943 0.12117106
500 1492769.5 0 0.04890343 0.051561801 0.050614941 0.15108017
600 1510159.6 0 0.059770181 0.061650364 0.061298117 0.18271866
700 1485424.1 0 0.070537955 0.071144877 0.071141546 0.21282438
800 1496377.2 0 0.081291995 0.082546059 0.080653381 0.24449144
900 1484409.1 0 0.090940427 0.093298981 0.091328056 0.27556746
1000 1503322.4 0 0.10176921 0.10246052 0.10151773 0.30574747
1100 1503322.4 0 0.11295993 0.11052632 0.11053406 0.33402031
1200 1489236.2 0 0.12509723 0.11961982 0.12146498 0.36618203
1300 1476050.3 0 0.13449034 0.12941323 0.1309765 0.39488007
1400 1520818.7 0 0.14613571 0.13788044 0.14083944 0.42485558
1500 1498936.4 0 0.15752286 0.15057712 0.15063399 0.45873397
1600 1507524.1 0 0.16793678 0.16095681 0.16063531 0.4895289
1700 1480581.2 0 0.17748019 0.172614 0.16922383 0.51931802
1800 1505353.6 0 0.18850931 0.18304171 0.18063119 0.55218221
1900 1491234.7 0 0.19836402 0.19306339 0.1929707 0.58439811
2000 1519868.8 0 0.20698191 0.20211344 0.20328302 0.61237838
2100 1493919.5 0 0.21453524 0.21186097 0.21423293 0.64062914
2200 1517098.6 0 0.2257338 0.22381647 0.22474081 0.67429108
2300 1481270.7 0 0.23499747 0.23348379 0.23498244 0.70346369
2400 1495445.1 0 0.24535894 0.24290239 0.24229161 0.73055293
2500 1522839.3 0 0.25695938 0.25109669 0.25214541 0.76020148
2600 1518697.4 0 0.26680819 0.26120216 0.2604112 0.78842155
2700 1529283.1 0 0.27524422 0.26942681 0.27148042 0.81615146
2800 1500557.5 0 0.28436226 0.27957592 0.27935619 0.84329437
2900 1509711.1 0 0.2948528 0.28562401 0.29055956 0.87103637
3000 1522712.8 0 0.30347033 0.2975063 0.30121685 0.90219348
Loop time of 2.35056 on 1 procs for 3000 steps with 4096 atoms
Performance: 1102.718 tau/day, 1276.293 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.00077772 | 0.00077772 | 0.00077772 | 0.0 | 0.03
Comm | 0.010985 | 0.010985 | 0.010985 | 0.0 | 0.47
Output | 0.0045807 | 0.0045807 | 0.0045807 | 0.0 | 0.19
Modify | 2.2116 | 2.2116 | 2.2116 | 0.0 | 94.09
Other | | 0.1226 | | | 5.22
Nlocal: 4096.00 ave 4096 max 4096 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 24
Dangerous builds = 0
Total wall time: 0:00:02

View File

@ -0,0 +1,119 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of non-interacting point particles in 3D #####
variable rng string gaussian
variable seed string 198098
variable temp string 5.0
variable gamma_t string 1.0
variable params string ${rng}_${temp}_${gamma_t}
variable params string gaussian_${temp}_${gamma_t}
variable params string gaussian_5.0_${gamma_t}
variable params string gaussian_5.0_1.0
units lj
atom_style atomic
dimension 3
newton off
lattice sc 0.4
Lattice spacing in x,y,z = 1.3572088 1.3572088 1.3572088
region box block -8 8 -8 8 -8 8
create_box 1 box
Created orthogonal box = (-10.857670 -10.857670 -10.857670) to (10.857670 10.857670 10.857670)
2 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 4096 atoms
create_atoms CPU = 0.001 seconds
mass * 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian ${temp} ${seed} rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 ${seed} rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng ${rng} gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng gaussian gamma_t ${gamma_t}
fix 1 all brownian 5.0 198098 rng gaussian gamma_t 1.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type # x y z xu yu zu fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 2.672 | 2.672 | 2.672 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 1.4996338 0 0 0 0 0
100 1515328.2 0 0.010465453 0.010044629 0.0097242319 0.030234314
200 1510820.8 0 0.020658886 0.019954762 0.020008864 0.060622512
300 1482006.5 0 0.030402195 0.029802874 0.030047586 0.090252655
400 1492228.5 0 0.039622543 0.038899144 0.040381854 0.11890354
500 1494985.5 0 0.050523465 0.050022913 0.050186478 0.15073286
600 1516047.4 0 0.061111845 0.061433818 0.059195364 0.18174103
700 1510021.8 0 0.071636778 0.072829755 0.06946406 0.21393059
800 1505964.7 0 0.08240965 0.08433785 0.078799851 0.24554735
900 1491035.9 0 0.093659937 0.094517749 0.08812559 0.27630328
1000 1516599.6 0 0.10436496 0.10431759 0.097480868 0.30616342
1100 1495170.3 0 0.11468757 0.111397 0.1069763 0.33306087
1200 1500630.6 0 0.12360977 0.12264534 0.11583999 0.3620951
1300 1474889.5 0 0.13432447 0.13471694 0.12702491 0.39606632
1400 1487145.8 0 0.14573239 0.14431493 0.13669403 0.42674135
1500 1519496.7 0 0.15610742 0.15505416 0.14600182 0.4571634
1600 1525674.1 0 0.16728653 0.1649354 0.15562133 0.48784325
1700 1540725.4 0 0.17846447 0.17666562 0.16531781 0.52044791
1800 1512334.8 0 0.18872753 0.18538847 0.17450009 0.54861609
1900 1498371.4 0 0.19688928 0.19333299 0.18581712 0.5760394
2000 1546459.4 0 0.20955053 0.20243854 0.19613897 0.60812803
2100 1509712.9 0 0.21922567 0.20940597 0.20567239 0.63430404
2200 1509630.4 0 0.23067999 0.21856734 0.21619911 0.66544645
2300 1483929.1 0 0.24160803 0.231048 0.22617193 0.69882797
2400 1488492.1 0 0.25399491 0.24082678 0.23972356 0.73454526
2500 1508107.9 0 0.26608734 0.25316913 0.2486814 0.76793787
2600 1511952.1 0 0.27523956 0.2623673 0.25706539 0.79467225
2700 1488888.8 0 0.28518299 0.27425585 0.26728622 0.82672506
2800 1515428.4 0 0.29595429 0.28589969 0.27781327 0.85966725
2900 1504312.1 0 0.30393798 0.29533034 0.28725362 0.88652194
3000 1521521.3 0 0.31445132 0.30117607 0.29959324 0.91522062
Loop time of 0.708196 on 4 procs for 3000 steps with 4096 atoms
Performance: 3660.004 tau/day, 4236.115 timesteps/s
97.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.00020647 | 0.00021023 | 0.0002141 | 0.0 | 0.03
Comm | 0.0045607 | 0.0050649 | 0.0053098 | 0.4 | 0.72
Output | 0.0013759 | 0.002265 | 0.0037355 | 1.9 | 0.32
Modify | 0.57353 | 0.58931 | 0.6109 | 1.8 | 83.21
Other | | 0.1113 | | | 15.72
Nlocal: 1024.00 ave 1043 max 1001 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 25
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,45 @@
##### overdamped dynamics of a sphere (with dipole attached to it) in 2D #####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_t string 5.0
variable gamma_r string 0.7
variable params string ${rng}_${temp}_${gamma_r}_${gamma_t}
units lj
atom_style hybrid dipole sphere
dimension 2
newton off
lattice sq 0.4
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
create_atoms 1 box
mass * 1.0
set type * dipole/random ${seed} 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/sphere ${temp} ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type &
# x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000

View File

@ -0,0 +1,45 @@
##### overdamped dynamics of a sphere (with dipole attached to it) in 3D#####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_t string 5.0
variable gamma_r string 0.7
variable params string ${rng}_${temp}_${gamma_r}_${gamma_t}
units lj
atom_style hybrid dipole sphere
dimension 3
newton off
lattice sc 0.4
region box block -8 8 -8 8 -8 8
create_box 1 box
create_atoms 1 box
mass * 1.0
set type * dipole/random ${seed} 1.0
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/sphere ${temp} ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type &
# x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000

View File

@ -0,0 +1,126 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of a sphere (with dipole attached to it) in 2D #####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_t string 5.0
variable gamma_r string 0.7
variable params string ${rng}_${temp}_${gamma_r}_${gamma_t}
variable params string uniform_${temp}_${gamma_r}_${gamma_t}
variable params string uniform_1.0_${gamma_r}_${gamma_t}
variable params string uniform_1.0_0.7_${gamma_t}
variable params string uniform_1.0_0.7_5.0
units lj
atom_style hybrid dipole sphere
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
Created orthogonal box = (-47.434165 -47.434165 -0.31622777) to (47.434165 47.434165 0.31622777)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.005 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 198098 1.0
Setting atom values ...
3600 settings made for dipole/random
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/sphere ${temp} ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r 0.7 gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r 0.7 gamma_t 5.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type # x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 4.664 | 4.664 | 4.664 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 0.99972222 0 0 0 0 0
100 20867.136 0 0.00040006704 0.00039570887 0 0.00079577592
200 20835.491 0 0.00077560901 0.00080455484 0 0.0015801638
300 20813.122 0 0.0011737739 0.0012172689 0 0.0023910428
400 21137.397 0 0.0015587675 0.0016096093 0 0.0031683768
500 21167.188 0 0.0019294105 0.0020251322 0 0.0039545428
600 21345.908 0 0.0023105313 0.0024111742 0 0.0047217054
700 21086.272 0 0.0027236116 0.0027846006 0 0.0055082122
800 20840.906 0 0.0031505299 0.0031810732 0 0.0063316031
900 20916.456 0 0.0035525852 0.0035981301 0 0.0071507153
1000 20752.249 0 0.0039147929 0.0039791172 0 0.0078939101
1100 20643.612 0 0.0042977921 0.0043701484 0 0.0086679405
1200 21085.63 0 0.0045584242 0.0047475091 0 0.0093059332
1300 20900.794 0 0.0049718803 0.0051481706 0 0.010120051
1400 20980.731 0 0.0054234603 0.0054230724 0 0.010846533
1500 20916.308 0 0.0058502946 0.0058114313 0 0.011661726
1600 20949.786 0 0.0062258463 0.006208129 0 0.012433975
1700 20531.205 0 0.0066276219 0.006595921 0 0.013223543
1800 21418.472 0 0.0070077409 0.007030461 0 0.014038202
1900 21291.928 0 0.0074052208 0.0074333041 0 0.014838525
2000 20893.895 0 0.0077407477 0.007901402 0 0.01564215
2100 21218.001 0 0.0080384756 0.0082611258 0 0.016299601
2200 21116.189 0 0.0084325164 0.008617977 0 0.017050493
2300 20718.83 0 0.0089455345 0.0091768161 0 0.018122351
2400 20719.164 0 0.0093666455 0.0095272546 0 0.0188939
2500 20991.382 0 0.009706795 0.0098256506 0 0.019532446
2600 20515.74 0 0.0099247069 0.010329841 0 0.020254548
2700 21001.55 0 0.010448354 0.010693502 0 0.021141855
2800 21363.824 0 0.010990971 0.011142092 0 0.022133063
2900 20497.025 0 0.011399704 0.011504868 0 0.022904573
3000 20726.572 0 0.011785354 0.01187482 0 0.023660175
Loop time of 1.76023 on 1 procs for 3000 steps with 3600 atoms
Performance: 1472.538 tau/day, 1704.326 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.026518 | 0.026518 | 0.026518 | 0.0 | 1.51
Output | 0.0040107 | 0.0040107 | 0.0040107 | 0.0 | 0.23
Modify | 1.6194 | 1.6194 | 1.6194 | 0.0 | 92.00
Other | | 0.1103 | | | 6.27
Nlocal: 3600.00 ave 3600 max 3600 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 121.000 ave 121 max 121 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,126 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of a sphere (with dipole attached to it) in 2D #####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_t string 5.0
variable gamma_r string 0.7
variable params string ${rng}_${temp}_${gamma_r}_${gamma_t}
variable params string uniform_${temp}_${gamma_r}_${gamma_t}
variable params string uniform_1.0_${gamma_r}_${gamma_t}
variable params string uniform_1.0_0.7_${gamma_t}
variable params string uniform_1.0_0.7_5.0
units lj
atom_style hybrid dipole sphere
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -30 30 -30 30 -0.2 0.2
create_box 1 box
Created orthogonal box = (-47.434165 -47.434165 -0.31622777) to (47.434165 47.434165 0.31622777)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.002 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 198098 1.0
Setting atom values ...
3600 settings made for dipole/random
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/sphere ${temp} ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r 0.7 gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r 0.7 gamma_t 5.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_2d.lammpstrj id type # x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 4.664 | 4.664 | 4.664 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 0.99972222 0 0 0 0 0
100 21085.797 0 0.00042014118 0.00040399828 0 0.00082413946
200 20598.717 0 0.00081715618 0.00082613236 0 0.0016432885
300 21040.226 0 0.0012412527 0.0012520475 0 0.0024933002
400 21289.734 0 0.0016129899 0.001634482 0 0.003247472
500 20951.595 0 0.0020104279 0.0020197694 0 0.0040301973
600 20984.974 0 0.0023965593 0.0024277086 0 0.0048242679
700 21252.602 0 0.0028349303 0.0028407812 0 0.0056757114
800 20951.95 0 0.0032674595 0.0032573476 0 0.0065248071
900 20828.611 0 0.003647953 0.0036650963 0 0.0073130493
1000 21073.256 0 0.0040238604 0.0040103537 0 0.0080342142
1100 21104.396 0 0.0043694059 0.0044146515 0 0.0087840574
1200 20580.591 0 0.0047638237 0.0047646659 0 0.0095284896
1300 20667.623 0 0.0051512568 0.0051134445 0 0.010264701
1400 20466.72 0 0.0055921578 0.005517863 0 0.011110021
1500 20842.366 0 0.0059747304 0.0059374031 0 0.011912134
1600 20867.02 0 0.0065493697 0.0064163066 0 0.012965676
1700 21021.077 0 0.0070208005 0.0068164842 0 0.013837285
1800 21191.183 0 0.0073708939 0.0073226521 0 0.014693546
1900 20792.8 0 0.0076984189 0.0077400043 0 0.015438423
2000 21296.326 0 0.0081882545 0.0081503672 0 0.016338622
2100 21085.097 0 0.008596146 0.0086041272 0 0.017200273
2200 20506.523 0 0.0089905439 0.0091045462 0 0.01809509
2300 21068.555 0 0.0094163509 0.0094703314 0 0.018886682
2400 21128.867 0 0.0097349212 0.0098535832 0 0.019588504
2500 21009.514 0 0.010218059 0.010244621 0 0.020462679
2600 21177.52 0 0.01060437 0.010642719 0 0.021247089
2700 20832.56 0 0.011052623 0.011078899 0 0.022131522
2800 21298.053 0 0.011439708 0.011587702 0 0.023027411
2900 21002.28 0 0.011863624 0.01199682 0 0.023860443
3000 20737.939 0 0.012229608 0.012324579 0 0.024554186
Loop time of 0.492798 on 4 procs for 3000 steps with 3600 atoms
Performance: 5259.763 tau/day, 6087.689 timesteps/s
96.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.018005 | 0.021524 | 0.025207 | 2.2 | 4.37
Output | 0.0013187 | 0.0018334 | 0.0032332 | 1.9 | 0.37
Modify | 0.37545 | 0.38775 | 0.40664 | 1.9 | 78.68
Other | | 0.08169 | | | 16.58
Nlocal: 900.000 ave 900 max 900 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 61.0000 ave 61 max 61 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,126 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of a sphere (with dipole attached to it) in 3D#####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_t string 5.0
variable gamma_r string 0.7
variable params string ${rng}_${temp}_${gamma_r}_${gamma_t}
variable params string uniform_${temp}_${gamma_r}_${gamma_t}
variable params string uniform_1.0_${gamma_r}_${gamma_t}
variable params string uniform_1.0_0.7_${gamma_t}
variable params string uniform_1.0_0.7_5.0
units lj
atom_style hybrid dipole sphere
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 3
newton off
lattice sc 0.4
Lattice spacing in x,y,z = 1.3572088 1.3572088 1.3572088
region box block -8 8 -8 8 -8 8
create_box 1 box
Created orthogonal box = (-10.857670 -10.857670 -10.857670) to (10.857670 10.857670 10.857670)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 4096 atoms
create_atoms CPU = 0.005 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 198098 1.0
Setting atom values ...
4096 settings made for dipole/random
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/sphere ${temp} ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r 0.7 gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r 0.7 gamma_t 5.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type # x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 4.737 | 4.737 | 4.737 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 1.4996338 0 0 0 0 0
100 32032.279 0 0.00040227568 0.00039159837 0.00040147813 0.0011953522
200 31673.093 0 0.00077657885 0.00077292327 0.00079963705 0.0023491392
300 31476.164 0 0.0011712083 0.0011606723 0.0012089537 0.0035408343
400 31911.374 0 0.0015803424 0.001562091 0.0016042321 0.0047466655
500 31182.011 0 0.0019677217 0.0019269105 0.0020015977 0.0058962298
600 31206.05 0 0.0023360975 0.0023134398 0.0024213372 0.0070708745
700 31278.057 0 0.0026966955 0.0027129858 0.0028721373 0.0082818187
800 31677.724 0 0.0031197964 0.003134834 0.0032726303 0.0095272607
900 31312.741 0 0.0035636612 0.0035573653 0.0037328373 0.010853864
1000 31426.075 0 0.0039774626 0.003952159 0.0041879386 0.01211756
1100 31361.699 0 0.0044256852 0.004320566 0.004638132 0.013384383
1200 31559.778 0 0.0048338539 0.0047210601 0.0050296056 0.01458452
1300 31716.797 0 0.0052239651 0.0050796723 0.0054794684 0.015783106
1400 31231.077 0 0.0055890568 0.005472377 0.0059264123 0.016987846
1500 31605.513 0 0.0059876582 0.0058974054 0.0063452478 0.018230311
1600 31551.402 0 0.006413094 0.0062665632 0.0067442106 0.019423868
1700 31725.868 0 0.0068244611 0.0067189707 0.0071424779 0.02068591
1800 31385.794 0 0.0071570297 0.0070502303 0.0075240296 0.02173129
1900 31754.094 0 0.0075638662 0.0074243015 0.0079935325 0.0229817
2000 31668.959 0 0.0080059944 0.0079019753 0.0084000614 0.024308031
2100 31781.994 0 0.0084108141 0.0082719077 0.0088004977 0.02548322
2200 31455.021 0 0.0088844434 0.0086931769 0.0091916929 0.026769313
2300 31273.079 0 0.0093155639 0.0091027782 0.0095364621 0.027954804
2400 31283.781 0 0.0098441686 0.0094496218 0.0099279073 0.029221698
2500 31758.315 0 0.010372129 0.0097843406 0.010334653 0.030491123
2600 31780.442 0 0.010770862 0.010313119 0.010637545 0.031721525
2700 31552.277 0 0.011268703 0.010693437 0.01110762 0.033069759
2800 31124.693 0 0.011661333 0.011100115 0.011480624 0.034242072
2900 31438.795 0 0.012068847 0.011346633 0.011842006 0.035257486
3000 31574.258 0 0.012482632 0.011691477 0.012210207 0.036384317
Loop time of 2.8531 on 1 procs for 3000 steps with 4096 atoms
Performance: 908.486 tau/day, 1051.488 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 3.2425e-05 | 3.2425e-05 | 3.2425e-05 | 0.0 | 0.00
Comm | 0.13219 | 0.13219 | 0.13219 | 0.0 | 4.63
Output | 0.0045686 | 0.0045686 | 0.0045686 | 0.0 | 0.16
Modify | 2.5857 | 2.5857 | 2.5857 | 0.0 | 90.63
Other | | 0.1307 | | | 4.58
Nlocal: 4096.00 ave 4096 max 4096 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:00:02

View File

@ -0,0 +1,126 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
##### overdamped dynamics of a sphere (with dipole attached to it) in 3D#####
variable rng string uniform
variable seed string 198098
variable temp string 1.0
variable gamma_t string 5.0
variable gamma_r string 0.7
variable params string ${rng}_${temp}_${gamma_r}_${gamma_t}
variable params string uniform_${temp}_${gamma_r}_${gamma_t}
variable params string uniform_1.0_${gamma_r}_${gamma_t}
variable params string uniform_1.0_0.7_${gamma_t}
variable params string uniform_1.0_0.7_5.0
units lj
atom_style hybrid dipole sphere
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 3
newton off
lattice sc 0.4
Lattice spacing in x,y,z = 1.3572088 1.3572088 1.3572088
region box block -8 8 -8 8 -8 8
create_box 1 box
Created orthogonal box = (-10.857670 -10.857670 -10.857670) to (10.857670 10.857670 10.857670)
2 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 4096 atoms
create_atoms CPU = 0.006 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 198098 1.0
Setting atom values ...
4096 settings made for dipole/random
velocity all create 1.0 1 loop geom
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
pair_style none
fix 1 all brownian/sphere ${temp} ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 ${seed} rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng ${rng} gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r ${gamma_r} gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r 0.7 gamma_t ${gamma_t}
fix 1 all brownian/sphere 1.0 198098 rng uniform gamma_r 0.7 gamma_t 5.0
#initialisation for the main run
# MSD
compute msd all msd
thermo_style custom step ke pe c_msd[*]
#dump 1 all custom 1000 dump_${params}_3d.lammpstrj id type # x y z xu yu zu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
timestep 0.00001
thermo 100
# main run
run 3000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 4.694 | 4.694 | 4.694 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4]
0 1.4996338 0 0 0 0 0
100 30882.707 0 0.00040787161 0.00039391576 0.00040796913 0.0012097565
200 31370.751 0 0.00081742036 0.00078240455 0.0008195167 0.0024193416
300 31469.51 0 0.0012118299 0.0011819412 0.0012477119 0.003641483
400 31696.58 0 0.0015540547 0.0015849689 0.0015836091 0.0047226327
500 31488.269 0 0.0019638041 0.0019659637 0.0020547832 0.005984551
600 30942.589 0 0.0023273784 0.0023572171 0.0024715245 0.00715612
700 31228.473 0 0.0027821732 0.002735338 0.0028734675 0.0083909787
800 31426.92 0 0.0031663838 0.0031092782 0.0033231014 0.0095987634
900 31447.595 0 0.003539588 0.003564381 0.003753036 0.010857005
1000 31363.911 0 0.0039854308 0.003937555 0.0041203919 0.012043378
1100 31522.958 0 0.0043009285 0.0043676491 0.0044799414 0.013148519
1200 31403.033 0 0.0046361199 0.0047513598 0.0049014974 0.014288977
1300 31752.182 0 0.0049824718 0.0051327113 0.0053130614 0.015428244
1400 31336.955 0 0.0054251445 0.0055442325 0.0057472998 0.016716677
1500 31224.306 0 0.0059295596 0.0059920697 0.0061375228 0.018059152
1600 31744.535 0 0.0063845142 0.0063600989 0.0064833215 0.019227935
1700 31472.081 0 0.0068360092 0.0067985824 0.0069464303 0.020581022
1800 31577.334 0 0.0073001079 0.0071355564 0.0073400543 0.021775719
1900 31521.234 0 0.0077178677 0.0074371106 0.007708008 0.022862986
2000 31045.148 0 0.0080515968 0.0078583776 0.0081000219 0.024009996
2100 31289.809 0 0.0084280175 0.0082322226 0.0084475904 0.02510783
2200 31505.455 0 0.008802925 0.0085708943 0.0087648194 0.026138639
2300 31882.722 0 0.0092223105 0.0089242925 0.0092643028 0.027410906
2400 31028.15 0 0.0095737559 0.0093585981 0.0096771837 0.028609538
2500 31581.041 0 0.0099316284 0.009785264 0.010100235 0.029817127
2600 31272.119 0 0.010332986 0.01007291 0.010474606 0.030880502
2700 31537.8 0 0.010751592 0.010565273 0.01093107 0.032247935
2800 31060.697 0 0.011156729 0.011010751 0.011260025 0.033427506
2900 31541.612 0 0.011542003 0.011499419 0.011642873 0.034684295
3000 31305.382 0 0.011876832 0.011866445 0.012052577 0.035795854
Loop time of 1.00142 on 4 procs for 3000 steps with 4096 atoms
Performance: 2588.329 tau/day, 2995.751 timesteps/s
95.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.084416 | 0.10685 | 0.12695 | 5.7 | 10.67
Output | 0.001471 | 0.0019466 | 0.0033245 | 1.8 | 0.19
Modify | 0.66585 | 0.70645 | 0.78995 | 6.0 | 70.54
Other | | 0.1862 | | | 18.59
Nlocal: 1024.00 ave 1024 max 1024 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 353.000 ave 353 max 353 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,60 @@
# 2D overdamped active brownian particle dynamics (ABP)
# with WCA potential
variable gamma_t string 1.0
variable gamma_r string 1.0
variable temp string 1.0
variable seed equal 1974019
variable fp string 4.0
variable params string ${temp}_${gamma_t}_${gamma_r}_${fp}
units lj
atom_style hybrid dipole sphere
dimension 2
newton off
lattice sq 0.4
region box block -16 16 -16 16 -0.2 0.2
create_box 1 box
create_atoms 1 box
mass * 1.0
set type * dipole/random ${seed} 1.0
velocity all create 1.0 1 loop geom
# more careful with neighbors since higher diffusion in abps
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
# WCA potential (purely repulsive)
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.1224
pair_modify shift yes
# overdamped brownian dynamics time-step
fix step all brownian/sphere ${temp} ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
# self-propulsion force along the dipole direction
fix activity all propel/self dipole ${fp}
fix 2 all enforce2d
compute press all pressure NULL virial
thermo_style custom step pe ke c_press
#equilibration
timestep 0.0000000001
thermo 100
run 5000
reset_timestep 0
# MSD
compute msd all msd
thermo_style custom step temp epair c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 20000

View File

@ -0,0 +1,67 @@
# 3D overdamped active brownian dynamics with no interactions
variable gamma_t string 3.0
variable gamma_r string 1.0
variable temp string 1.0
variable seed equal 1974019
variable fp string 4.0
variable params string ${temp}_${gamma_t}_${gamma_r}_${fp}
units lj
atom_style hybrid dipole sphere
dimension 3
newton off
lattice sc 0.4
region box block -8 8 -8 8 -8 8
create_box 1 box
create_atoms 1 box
mass * 1.0
set type * dipole/random ${seed} 1.0
velocity all create 1.0 1 loop geom
pair_style none
# overdamped brownian dynamics time-step
fix step all brownian/sphere ${temp} ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
# self-propulsion force along the dipole direction
fix activity all propel/self dipole ${fp}
compute press all pressure NULL virial
thermo_style custom step ke pe c_press
#equilibration
timestep 0.0000000001
thermo 100
run 5000
reset_timestep 0
# MSD to demonstrate expected diffusive behaviour for ideal active
# brownian motion, which is
#
# MSD = (2*d*kb*T/gamma_t + 2*fp**2*gamma_r/(kb*T*gamma_t**2*(d-1)))*t
# + 2*fp**2*gamma_r**2/(gamma_t**2*(d-1)**2*(kb*T)**2)*(e^(-(d-1)*t*kb*T/gamma_r)-1)
#
# with d being simulation dimension
compute msd all msd
thermo_style custom step ke pe c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 12000
# if you want to check that rotational diffusion is behaving as expected,
# uncomment next three lines for dump output and then plot <e(t).e(0)>,
# which should decay exponentially with timescale (d-1)*D_r (with d
# being simulation dimension)
#dump 1 all custom 2000 dump_ideal_${params}_3d.lammpstrj id type &
# x y xu yu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
#run 120000

View File

@ -0,0 +1,221 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
# 2D overdamped active brownian particle dynamics (ABP)
# with WCA potential
variable gamma_t string 1.0
variable gamma_r string 1.0
variable temp string 1.0
variable seed equal 1974019
variable fp string 4.0
variable params string ${temp}_${gamma_t}_${gamma_r}_${fp}
variable params string 1.0_${gamma_t}_${gamma_r}_${fp}
variable params string 1.0_1.0_${gamma_r}_${fp}
variable params string 1.0_1.0_1.0_${fp}
variable params string 1.0_1.0_1.0_4.0
units lj
atom_style hybrid dipole sphere
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -16 16 -16 16 -0.2 0.2
create_box 1 box
Created orthogonal box = (-25.298221 -25.298221 -0.31622777) to (25.298221 25.298221 0.31622777)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 1024 atoms
create_atoms CPU = 0.002 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 1974019 1.0
Setting atom values ...
1024 settings made for dipole/random
velocity all create 1.0 1 loop geom
# more careful with neighbors since higher diffusion in abps
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
# WCA potential (purely repulsive)
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.1224
pair_modify shift yes
# overdamped brownian dynamics time-step
fix step all brownian/sphere ${temp} ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t 1.0 gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t 1.0 gamma_r 1.0
# self-propulsion force along the dipole direction
fix activity all propel/self dipole ${fp}
fix activity all propel/self dipole 4.0
fix 2 all enforce2d
compute press all pressure NULL virial
thermo_style custom step pe ke c_press
#equilibration
timestep 0.0000000001
thermo 100
run 5000
Neighbor list info ...
update every 1 steps, delay 1 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.1224
ghost atom cutoff = 2.1224
binsize = 1.0612, bins = 48 48 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton off
pair build: half/bin/newtoff
stencil: half/bin/2d/newtoff
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.066 | 5.066 | 5.066 Mbytes
Step PotEng KinEng c_press
0 0 0.99902344 -0.53979198
100 0 1.026585e+10 -0.5398101
200 0 1.0630628e+10 -0.53977393
300 0 1.03483e+10 -0.53977041
400 0 1.049279e+10 -0.53974314
500 0 1.0832067e+10 -0.53979451
600 0 1.0403632e+10 -0.53976233
700 0 1.0334726e+10 -0.53976174
800 0 1.0119596e+10 -0.53969338
900 0 1.0786136e+10 -0.53970415
1000 0 1.0539036e+10 -0.53974577
1100 0 1.0643695e+10 -0.53982431
1200 0 1.0234642e+10 -0.53976823
1300 0 1.036268e+10 -0.53981454
1400 0 1.0605702e+10 -0.53988117
1500 0 1.0517916e+10 -0.53989207
1600 0 1.0564482e+10 -0.53993016
1700 0 1.0460152e+10 -0.53984454
1800 0 1.0468566e+10 -0.53985574
1900 0 1.0474075e+10 -0.53985439
2000 0 1.0683568e+10 -0.53987349
2100 0 1.0269077e+10 -0.53990709
2200 0 1.0386943e+10 -0.53990068
2300 0 1.0406078e+10 -0.53978402
2400 0 1.0482072e+10 -0.53980757
2500 0 1.0442975e+10 -0.53982657
2600 0 1.0292103e+10 -0.53985533
2700 0 1.1106453e+10 -0.53991861
2800 0 1.0395289e+10 -0.53990138
2900 0 1.034021e+10 -0.53992375
3000 0 1.0434718e+10 -0.53995566
3100 0 1.0194094e+10 -0.53993997
3200 0 1.0411552e+10 -0.54000097
3300 0 1.0214175e+10 -0.53999884
3400 0 1.0434719e+10 -0.54000005
3500 0 1.0529638e+10 -0.53998281
3600 0 1.0406541e+10 -0.54000141
3700 0 1.0577151e+10 -0.54002354
3800 0 1.0488249e+10 -0.53996003
3900 0 1.0316153e+10 -0.54002024
4000 0 1.0491289e+10 -0.5400259
4100 0 1.0587981e+10 -0.5399811
4200 0 1.0332035e+10 -0.53997951
4300 0 1.0776469e+10 -0.53994151
4400 0 1.0982142e+10 -0.53983842
4500 0 1.0796919e+10 -0.5398414
4600 0 1.0324249e+10 -0.53979712
4700 0 1.0420899e+10 -0.53981967
4800 0 1.0274188e+10 -0.53976759
4900 0 1.0411535e+10 -0.5397757
5000 0 1.0399215e+10 -0.53980199
Loop time of 1.34285 on 1 procs for 5000 steps with 1024 atoms
Performance: 0.032 tau/day, 3723.422 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.25309 | 0.25309 | 0.25309 | 0.0 | 18.85
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.068734 | 0.068734 | 0.068734 | 0.0 | 5.12
Output | 0.0012887 | 0.0012887 | 0.0012887 | 0.0 | 0.10
Modify | 0.96552 | 0.96552 | 0.96552 | 0.0 | 71.90
Other | | 0.05422 | | | 4.04
Nlocal: 1024.00 ave 1024 max 1024 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 201.000 ave 201 max 201 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 2112.00 ave 2112 max 2112 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 2112
Ave neighs/atom = 2.0625000
Neighbor list builds = 0
Dangerous builds = 0
reset_timestep 0
# MSD
compute msd all msd
thermo_style custom step temp epair c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 20000
Per MPI rank memory allocation (min/avg/max) = 5.441 | 5.441 | 5.441 Mbytes
Step Temp E_pair c_msd[1] c_msd[2] c_msd[3] c_msd[4] c_press
0 1.0409381e+10 0 0 0 0 0 -0.53980199
1000 107022.73 0.0080050427 0.020451432 0.021388798 0 0.04184023 -0.54900967
2000 107475.82 0.017262846 0.040669645 0.044251149 0 0.084920794 -0.42740968
3000 105388.35 0.042257875 0.062828995 0.05845782 0 0.12128682 -0.31792184
4000 106238.38 0.052733384 0.079036841 0.079396453 0 0.15843329 -0.24243699
5000 102904.54 0.088524456 0.095977642 0.099533961 0 0.1955116 -0.093468615
6000 105274.15 0.065334999 0.11591691 0.11675531 0 0.23267222 -0.21904478
7000 108903.41 0.06724271 0.13694218 0.13914947 0 0.27609164 -0.15913012
8000 101451.44 0.097201152 0.15704893 0.16178845 0 0.31883738 -0.055786965
9000 106808.72 0.084301668 0.18029391 0.175753 0 0.3560469 0.014898739
10000 107381.19 0.088583354 0.2000753 0.19569789 0 0.39577319 0.19417596
11000 102105.78 0.081066654 0.22042599 0.21914042 0 0.43956641 0.060574143
12000 105384.94 0.098716908 0.24382064 0.24673594 0 0.49055657 0.17067875
13000 107479.53 0.099989043 0.26942088 0.27207566 0 0.54149654 0.25514896
14000 102938.12 0.093252916 0.28529564 0.28698837 0 0.57228401 0.19976355
15000 104408.02 0.11900926 0.31291315 0.31195058 0 0.62486373 0.36956014
16000 103447.68 0.09627777 0.34145225 0.33159885 0 0.6730511 0.29857404
17000 108400.05 0.11433561 0.36561966 0.36068301 0 0.72630267 0.41922801
18000 103363.68 0.11040153 0.38709746 0.39228677 0 0.77938423 0.38111686
19000 103310.43 0.10660536 0.41406235 0.40975085 0 0.8238132 0.36022184
20000 102692.1 0.13517651 0.43870812 0.44138776 0 0.88009588 0.51144366
Loop time of 5.66207 on 1 procs for 20000 steps with 1024 atoms
Performance: 3051.889 tau/day, 3532.279 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.3123 | 1.3123 | 1.3123 | 0.0 | 23.18
Neigh | 0.011856 | 0.011856 | 0.011856 | 0.0 | 0.21
Comm | 0.2747 | 0.2747 | 0.2747 | 0.0 | 4.85
Output | 0.0011516 | 0.0011516 | 0.0011516 | 0.0 | 0.02
Modify | 3.8451 | 3.8451 | 3.8451 | 0.0 | 67.91
Other | | 0.2169 | | | 3.83
Nlocal: 1024.00 ave 1024 max 1024 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 184.000 ave 184 max 184 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 2558.00 ave 2558 max 2558 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 2558
Ave neighs/atom = 2.4980469
Neighbor list builds = 23
Dangerous builds = 0
Total wall time: 0:00:07

View File

@ -0,0 +1,221 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
# 2D overdamped active brownian particle dynamics (ABP)
# with WCA potential
variable gamma_t string 1.0
variable gamma_r string 1.0
variable temp string 1.0
variable seed equal 1974019
variable fp string 4.0
variable params string ${temp}_${gamma_t}_${gamma_r}_${fp}
variable params string 1.0_${gamma_t}_${gamma_r}_${fp}
variable params string 1.0_1.0_${gamma_r}_${fp}
variable params string 1.0_1.0_1.0_${fp}
variable params string 1.0_1.0_1.0_4.0
units lj
atom_style hybrid dipole sphere
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 2
newton off
lattice sq 0.4
Lattice spacing in x,y,z = 1.5811388 1.5811388 1.5811388
region box block -16 16 -16 16 -0.2 0.2
create_box 1 box
Created orthogonal box = (-25.298221 -25.298221 -0.31622777) to (25.298221 25.298221 0.31622777)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 1024 atoms
create_atoms CPU = 0.001 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 1974019 1.0
Setting atom values ...
1024 settings made for dipole/random
velocity all create 1.0 1 loop geom
# more careful with neighbors since higher diffusion in abps
neighbor 1.0 bin
neigh_modify every 1 delay 1 check yes
# WCA potential (purely repulsive)
pair_style lj/cut 2.5
pair_coeff * * 1.0 1.0 1.1224
pair_modify shift yes
# overdamped brownian dynamics time-step
fix step all brownian/sphere ${temp} ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t 1.0 gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t 1.0 gamma_r 1.0
# self-propulsion force along the dipole direction
fix activity all propel/self dipole ${fp}
fix activity all propel/self dipole 4.0
fix 2 all enforce2d
compute press all pressure NULL virial
thermo_style custom step pe ke c_press
#equilibration
timestep 0.0000000001
thermo 100
run 5000
Neighbor list info ...
update every 1 steps, delay 1 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.1224
ghost atom cutoff = 2.1224
binsize = 1.0612, bins = 48 48 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton off
pair build: half/bin/newtoff
stencil: half/bin/2d/newtoff
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.052 | 5.052 | 5.052 Mbytes
Step PotEng KinEng c_press
0 0 0.99902344 -0.53979198
100 0 1.0503521e+10 -0.53983092
200 0 1.0390343e+10 -0.5398287
300 0 1.0493441e+10 -0.53979247
400 0 1.0545991e+10 -0.53978678
500 0 1.0266398e+10 -0.53986297
600 0 1.0484775e+10 -0.53978746
700 0 1.0583596e+10 -0.53969114
800 0 1.0521919e+10 -0.53968754
900 0 1.0492019e+10 -0.53958643
1000 0 1.0298052e+10 -0.53949872
1100 0 1.0531424e+10 -0.53955431
1200 0 1.0635635e+10 -0.53960048
1300 0 1.0633405e+10 -0.53966331
1400 0 1.0195401e+10 -0.53968849
1500 0 1.0593758e+10 -0.53969763
1600 0 1.0425238e+10 -0.53971936
1700 0 1.0470017e+10 -0.53981957
1800 0 1.0545953e+10 -0.53987747
1900 0 1.0425015e+10 -0.53990412
2000 0 1.0655092e+10 -0.5399511
2100 0 1.0197224e+10 -0.53988687
2200 0 1.0448012e+10 -0.53986066
2300 0 1.0355268e+10 -0.53980415
2400 0 1.0246979e+10 -0.53979737
2500 0 1.0021539e+10 -0.5397919
2600 0 1.0200824e+10 -0.5397575
2700 0 1.0721591e+10 -0.53973512
2800 0 1.0354562e+10 -0.5397127
2900 0 1.0306795e+10 -0.5396946
3000 0 1.0301339e+10 -0.53968642
3100 0 1.0435826e+10 -0.53970945
3200 0 1.019524e+10 -0.53969746
3300 0 1.0550481e+10 -0.53967977
3400 0 1.0283446e+10 -0.53971102
3500 0 1.0956695e+10 -0.53976173
3600 0 1.0271033e+10 -0.53983632
3700 0 1.0389461e+10 -0.53977293
3800 0 1.0680515e+10 -0.53977425
3900 0 1.0072183e+10 -0.53982922
4000 0 1.0458036e+10 -0.53980042
4100 0 1.0588689e+10 -0.53971405
4200 0 1.0068308e+10 -0.5398033
4300 0 1.0502064e+10 -0.53981291
4400 0 1.0590544e+10 -0.5398346
4500 0 1.0411612e+10 -0.5397916
4600 0 1.0518596e+10 -0.53984868
4700 0 1.0386105e+10 -0.53977803
4800 0 1.029525e+10 -0.53970882
4900 0 1.0519112e+10 -0.53969616
5000 0 1.0335841e+10 -0.53976477
Loop time of 0.471663 on 4 procs for 5000 steps with 1024 atoms
Performance: 0.092 tau/day, 10600.781 timesteps/s
95.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.067099 | 0.07105 | 0.077898 | 1.6 | 15.06
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.0581 | 0.066084 | 0.072322 | 2.0 | 14.01
Output | 0.0014644 | 0.002618 | 0.0037239 | 1.6 | 0.56
Modify | 0.24817 | 0.25719 | 0.26697 | 1.3 | 54.53
Other | | 0.07472 | | | 15.84
Nlocal: 256.000 ave 256 max 256 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 105.000 ave 105 max 105 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 544.000 ave 544 max 544 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 2176
Ave neighs/atom = 2.1250000
Neighbor list builds = 0
Dangerous builds = 0
reset_timestep 0
# MSD
compute msd all msd
thermo_style custom step temp epair c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 20000
Per MPI rank memory allocation (min/avg/max) = 5.427 | 5.427 | 5.427 Mbytes
Step Temp E_pair c_msd[1] c_msd[2] c_msd[3] c_msd[4] c_press
0 1.0345945e+10 0 0 0 0 0 -0.53976477
1000 100114.28 0.0029703577 0.020320684 0.020950989 0 0.041271673 -0.43948247
2000 106825.83 0.020969054 0.039616412 0.039459167 0 0.079075578 -0.22765541
3000 105287.4 0.037343571 0.056828177 0.058639835 0 0.11546801 -0.11728136
4000 104522.23 0.052237136 0.080264931 0.080863543 0 0.16112847 0.033230576
5000 103277.94 0.053791862 0.099188864 0.10141444 0 0.20060331 0.073591503
6000 104252.87 0.073304776 0.11964238 0.1215576 0 0.24119999 0.22062305
7000 105184.19 0.089054043 0.13691291 0.14216478 0 0.27907769 0.29015692
8000 104211.82 0.072577918 0.15820522 0.15658491 0 0.31479013 0.25908291
9000 99242.172 0.071616004 0.17658708 0.17479704 0 0.35138412 0.26305532
10000 105070.83 0.077009979 0.20175025 0.19871513 0 0.40046538 0.34120567
11000 106421.07 0.098623061 0.22472634 0.22671582 0 0.45144216 0.44021335
12000 103209.85 0.12032847 0.25004966 0.25368441 0 0.50373406 0.57344873
13000 107156.89 0.1058386 0.27283231 0.2744873 0 0.54731961 0.47957408
14000 108119.3 0.1204768 0.29333677 0.30054535 0 0.59388213 0.51832639
15000 105477.62 0.12510026 0.32217621 0.32806599 0 0.6502422 0.50174158
16000 106676.27 0.10893618 0.34980866 0.36031184 0 0.7101205 0.44769198
17000 103048.41 0.10625673 0.3781797 0.37970499 0 0.75788468 0.42803898
18000 109454.8 0.10555778 0.40997694 0.41396777 0 0.82394471 0.41380982
19000 107459.73 0.11267582 0.43757738 0.43577856 0 0.87335594 0.4917748
20000 101991.9 0.084279008 0.45363612 0.46278076 0 0.91641688 0.41707912
Loop time of 1.80877 on 4 procs for 20000 steps with 1024 atoms
Performance: 9553.439 tau/day, 11057.221 timesteps/s
98.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.34461 | 0.36424 | 0.40948 | 4.4 | 20.14
Neigh | 0.0031493 | 0.003215 | 0.0032432 | 0.1 | 0.18
Comm | 0.19538 | 0.20419 | 0.2104 | 1.2 | 11.29
Output | 0.00054121 | 0.00087297 | 0.0018425 | 0.0 | 0.05
Modify | 0.98335 | 1.0156 | 1.0791 | 3.8 | 56.15
Other | | 0.2207 | | | 12.20
Nlocal: 256.000 ave 261 max 252 min
Histogram: 1 1 0 0 0 0 1 0 0 1
Nghost: 93.0000 ave 100 max 83 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Neighs: 662.250 ave 693 max 635 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Total # of neighbors = 2649
Ave neighs/atom = 2.5869141
Neighbor list builds = 23
Dangerous builds = 0
Total wall time: 0:00:02

View File

@ -0,0 +1,210 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
# 3D overdamped active brownian dynamics with no interactions
variable gamma_t string 3.0
variable gamma_r string 1.0
variable temp string 1.0
variable seed equal 1974019
variable fp string 4.0
variable params string ${temp}_${gamma_t}_${gamma_r}_${fp}
variable params string 1.0_${gamma_t}_${gamma_r}_${fp}
variable params string 1.0_3.0_${gamma_r}_${fp}
variable params string 1.0_3.0_1.0_${fp}
variable params string 1.0_3.0_1.0_4.0
units lj
atom_style hybrid dipole sphere
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 3
newton off
lattice sc 0.4
Lattice spacing in x,y,z = 1.3572088 1.3572088 1.3572088
region box block -8 8 -8 8 -8 8
create_box 1 box
Created orthogonal box = (-10.857670 -10.857670 -10.857670) to (10.857670 10.857670 10.857670)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 4096 atoms
create_atoms CPU = 0.004 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 1974019 1.0
Setting atom values ...
4096 settings made for dipole/random
velocity all create 1.0 1 loop geom
pair_style none
# overdamped brownian dynamics time-step
fix step all brownian/sphere ${temp} ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t 3.0 gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t 3.0 gamma_r 1.0
# self-propulsion force along the dipole direction
fix activity all propel/self dipole ${fp}
fix activity all propel/self dipole 4.0
compute press all pressure NULL virial
thermo_style custom step ke pe c_press
#equilibration
timestep 0.0000000001
thermo 100
run 5000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 4.362 | 4.362 | 4.362 Mbytes
Step KinEng PotEng c_press
0 1.4996338 0 0.068021726
100 5.184227e+09 0 0.06801544
200 5.2165482e+09 0 0.068010729
300 5.2782092e+09 0 0.068009058
400 5.3244927e+09 0 0.068003481
500 5.2376606e+09 0 0.067998237
600 5.2735634e+09 0 0.067998037
700 5.2692439e+09 0 0.068025402
800 5.2667984e+09 0 0.068030143
900 5.242057e+09 0 0.0680246
1000 5.2557468e+09 0 0.068028348
1100 5.2975687e+09 0 0.068029528
1200 5.2081927e+09 0 0.068017542
1300 5.2636873e+09 0 0.068012572
1400 5.2187907e+09 0 0.06802049
1500 5.2349541e+09 0 0.0680373
1600 5.216092e+09 0 0.068056885
1700 5.2598019e+09 0 0.068069504
1800 5.2569065e+09 0 0.068065306
1900 5.2072055e+09 0 0.068074863
2000 5.2092961e+09 0 0.068061619
2100 5.2918572e+09 0 0.068076418
2200 5.2680626e+09 0 0.068072149
2300 5.242958e+09 0 0.06806486
2400 5.2494099e+09 0 0.06805038
2500 5.2055798e+09 0 0.068072194
2600 5.2264829e+09 0 0.068069312
2700 5.3557342e+09 0 0.068064812
2800 5.2186177e+09 0 0.068042942
2900 5.2652497e+09 0 0.068044214
3000 5.1894899e+09 0 0.068044801
3100 5.241524e+09 0 0.068056675
3200 5.1915006e+09 0 0.06805641
3300 5.2367825e+09 0 0.068049946
3400 5.2288011e+09 0 0.068060182
3500 5.2704335e+09 0 0.068070881
3600 5.2886558e+09 0 0.068050439
3700 5.1976022e+09 0 0.068045927
3800 5.1525512e+09 0 0.068054494
3900 5.2212395e+09 0 0.068061432
4000 5.2309575e+09 0 0.068070842
4100 5.2260184e+09 0 0.068078378
4200 5.2829349e+09 0 0.068071652
4300 5.2204917e+09 0 0.068083072
4400 5.255242e+09 0 0.068066175
4500 5.2435681e+09 0 0.068050802
4600 5.2483356e+09 0 0.06805658
4700 5.2365098e+09 0 0.068041845
4800 5.2254325e+09 0 0.068038583
4900 5.1842852e+09 0 0.068028401
5000 5.2240722e+09 0 0.068031544
Loop time of 5.14275 on 1 procs for 5000 steps with 4096 atoms
Performance: 0.008 tau/day, 972.242 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.26842 | 0.26842 | 0.26842 | 0.0 | 5.22
Output | 0.0035088 | 0.0035088 | 0.0035088 | 0.0 | 0.07
Modify | 4.6588 | 4.6588 | 4.6588 | 0.0 | 90.59
Other | | 0.212 | | | 4.12
Nlocal: 4096.00 ave 4096 max 4096 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 817.000 ave 817 max 817 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
reset_timestep 0
# MSD to demonstrate expected diffusive behaviour for ideal active
# brownian motion, which is
#
# MSD = (2*d*kb*T/gamma_t + 2*fp**2*gamma_r/(kb*T*gamma_t**2*(d-1)))*t
# + 2*fp**2*gamma_r**2/(gamma_t**2*(d-1)**2*(kb*T)**2)*(e^(-(d-1)*t*kb*T/gamma_r)-1)
#
# with d being simulation dimension
compute msd all msd
thermo_style custom step ke pe c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 12000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 4.737 | 4.737 | 4.737 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4] c_press
0 5.2240722e+09 0 0 0 0 0 0.068031544
1000 52651.581 0 0.0066842466 0.0067977045 0.0066831353 0.020165086 0.060774985
2000 52835.806 0 0.013693443 0.014008773 0.013518945 0.041221161 0.094748037
3000 52097.629 0 0.020666918 0.021696789 0.020665685 0.063029392 0.10673866
4000 52579.452 0 0.028145318 0.028504548 0.02830967 0.084959536 0.13358122
5000 51255.456 0 0.035019271 0.034644123 0.03638843 0.10605182 0.13507609
6000 52730.035 0 0.041412307 0.042689213 0.043339117 0.12744064 0.16497663
7000 52247.642 0 0.048119396 0.050556395 0.050706527 0.14938232 0.16360301
8000 52169.849 0 0.055241196 0.058678631 0.059373122 0.17329295 0.1676169
9000 52520.526 0 0.063519587 0.066592779 0.066988842 0.19710121 0.17142694
10000 53519.297 0 0.07164814 0.074576535 0.075619236 0.22184391 0.15619444
11000 52937.293 0 0.077992504 0.083184462 0.082988794 0.24416576 0.15257327
12000 51762.283 0 0.085959749 0.090992292 0.08984213 0.26679417 0.15996211
Loop time of 11.6748 on 1 procs for 12000 steps with 4096 atoms
Performance: 888.063 tau/day, 1027.851 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.0024164 | 0.0024164 | 0.0024164 | 0.0 | 0.02
Comm | 0.048127 | 0.048127 | 0.048127 | 0.0 | 0.41
Output | 0.0019393 | 0.0019393 | 0.0019393 | 0.0 | 0.02
Modify | 11.12 | 11.12 | 11.12 | 0.0 | 95.24
Other | | 0.5027 | | | 4.31
Nlocal: 4096.00 ave 4096 max 4096 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 75
Dangerous builds = 0
# if you want to check that rotational diffusion is behaving as expected,
# uncomment next three lines for dump output and then plot <e(t).e(0)>,
# which should decay exponentially with timescale (d-1)*D_r (with d
# being simulation dimension)
#dump 1 all custom 2000 dump_ideal_${params}_3d.lammpstrj id type # x y xu yu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
#run 120000
Total wall time: 0:00:16

View File

@ -0,0 +1,210 @@
LAMMPS (8 Apr 2021)
using 1 OpenMP thread(s) per MPI task
# 3D overdamped active brownian dynamics with no interactions
variable gamma_t string 3.0
variable gamma_r string 1.0
variable temp string 1.0
variable seed equal 1974019
variable fp string 4.0
variable params string ${temp}_${gamma_t}_${gamma_r}_${fp}
variable params string 1.0_${gamma_t}_${gamma_r}_${fp}
variable params string 1.0_3.0_${gamma_r}_${fp}
variable params string 1.0_3.0_1.0_${fp}
variable params string 1.0_3.0_1.0_4.0
units lj
atom_style hybrid dipole sphere
WARNING: Atom style hybrid defines both, per-type and per-atom masses; both must be set, but only per-atom masses will be used (src/atom_vec_hybrid.cpp:156)
dimension 3
newton off
lattice sc 0.4
Lattice spacing in x,y,z = 1.3572088 1.3572088 1.3572088
region box block -8 8 -8 8 -8 8
create_box 1 box
Created orthogonal box = (-10.857670 -10.857670 -10.857670) to (10.857670 10.857670 10.857670)
2 by 1 by 2 MPI processor grid
create_atoms 1 box
Created 4096 atoms
create_atoms CPU = 0.002 seconds
mass * 1.0
set type * dipole/random ${seed} 1.0
set type * dipole/random 1974019 1.0
Setting atom values ...
4096 settings made for dipole/random
velocity all create 1.0 1 loop geom
pair_style none
# overdamped brownian dynamics time-step
fix step all brownian/sphere ${temp} ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 ${seed} gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t ${gamma_t} gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t 3.0 gamma_r ${gamma_r}
fix step all brownian/sphere 1.0 1974019 gamma_t 3.0 gamma_r 1.0
# self-propulsion force along the dipole direction
fix activity all propel/self dipole ${fp}
fix activity all propel/self dipole 4.0
compute press all pressure NULL virial
thermo_style custom step ke pe c_press
#equilibration
timestep 0.0000000001
thermo 100
run 5000
WARNING: No pairwise cutoff or binsize set. Atom sorting therefore disabled. (src/atom.cpp:2141)
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 4.319 | 4.319 | 4.319 Mbytes
Step KinEng PotEng c_press
0 1.4996338 0 0.068021726
100 5.2484581e+09 0 0.068010768
200 5.277936e+09 0 0.068024039
300 5.2651427e+09 0 0.068014821
400 5.2066432e+09 0 0.068027583
500 5.2250276e+09 0 0.068030242
600 5.239771e+09 0 0.068018406
700 5.1953674e+09 0 0.068017081
800 5.2097107e+09 0 0.068010167
900 5.2559863e+09 0 0.068012923
1000 5.2743197e+09 0 0.068017855
1100 5.1999741e+09 0 0.068014189
1200 5.3216344e+09 0 0.068005604
1300 5.2839264e+09 0 0.067982558
1400 5.2462761e+09 0 0.067977843
1500 5.2208208e+09 0 0.067979594
1600 5.2740284e+09 0 0.067972573
1700 5.1919692e+09 0 0.067974452
1800 5.2497614e+09 0 0.067966417
1900 5.2910442e+09 0 0.067976096
2000 5.27238e+09 0 0.067963979
2100 5.3305398e+09 0 0.06795661
2200 5.205471e+09 0 0.067970212
2300 5.1803713e+09 0 0.067931775
2400 5.2134311e+09 0 0.067941825
2500 5.2367424e+09 0 0.067963456
2600 5.2246738e+09 0 0.067957556
2700 5.2514573e+09 0 0.067960724
2800 5.2601577e+09 0 0.067965167
2900 5.2422855e+09 0 0.067956561
3000 5.1796674e+09 0 0.067946764
3100 5.2308189e+09 0 0.067946585
3200 5.1835395e+09 0 0.067951909
3300 5.2762112e+09 0 0.067963199
3400 5.3224133e+09 0 0.067944918
3500 5.2314242e+09 0 0.06795318
3600 5.2760337e+09 0 0.067958005
3700 5.2549349e+09 0 0.06795228
3800 5.3343065e+09 0 0.067944561
3900 5.2440993e+09 0 0.067947433
4000 5.2565026e+09 0 0.067962624
4100 5.1766738e+09 0 0.067949542
4200 5.2058437e+09 0 0.067959946
4300 5.2777775e+09 0 0.067945883
4400 5.2020331e+09 0 0.067953495
4500 5.1417619e+09 0 0.067944161
4600 5.2672994e+09 0 0.067936777
4700 5.222847e+09 0 0.067943025
4800 5.2467842e+09 0 0.06794191
4900 5.2784378e+09 0 0.067939495
5000 5.2563969e+09 0 0.067940246
Loop time of 1.55848 on 4 procs for 5000 steps with 4096 atoms
Performance: 0.028 tau/day, 3208.260 timesteps/s
97.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.15304 | 0.15677 | 0.16459 | 1.2 | 10.06
Output | 0.0012078 | 0.0021182 | 0.0047011 | 3.2 | 0.14
Modify | 1.1966 | 1.2236 | 1.2761 | 2.8 | 78.51
Other | | 0.176 | | | 11.29
Nlocal: 1024.00 ave 1024 max 1024 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 353.000 ave 353 max 353 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 0
Dangerous builds = 0
reset_timestep 0
# MSD to demonstrate expected diffusive behaviour for ideal active
# brownian motion, which is
#
# MSD = (2*d*kb*T/gamma_t + 2*fp**2*gamma_r/(kb*T*gamma_t**2*(d-1)))*t
# + 2*fp**2*gamma_r**2/(gamma_t**2*(d-1)**2*(kb*T)**2)*(e^(-(d-1)*t*kb*T/gamma_r)-1)
#
# with d being simulation dimension
compute msd all msd
thermo_style custom step ke pe c_msd[*] c_press
timestep 0.00001
thermo 1000
# main run
run 12000
WARNING: Communication cutoff is 0.0. No ghost atoms will be generated. Atoms may get lost. (src/comm_brick.cpp:167)
Per MPI rank memory allocation (min/avg/max) = 4.694 | 4.694 | 4.694 Mbytes
Step KinEng PotEng c_msd[1] c_msd[2] c_msd[3] c_msd[4] c_press
0 5.2563969e+09 0 0 0 0 0 0.067940246
1000 52568.549 0 0.0067249858 0.0066478843 0.0066014231 0.019974293 0.066777589
2000 52836.937 0 0.013611101 0.013799663 0.013161144 0.040571907 0.066769693
3000 52129.467 0 0.020360834 0.02089829 0.01995025 0.061209374 0.060026879
4000 52075.177 0 0.027638751 0.028062314 0.026895904 0.082596969 0.078290387
5000 52203.996 0 0.034087112 0.034933104 0.033832559 0.10285278 0.083657551
6000 52986.764 0 0.041562413 0.042238976 0.040542538 0.12434393 0.11542014
7000 51941.229 0 0.049216989 0.049250201 0.047598008 0.1460652 0.13739893
8000 52618.713 0 0.057198947 0.057409217 0.05404895 0.16865711 0.13681938
9000 52501.332 0 0.066447829 0.065262287 0.062271789 0.19398191 0.14306596
10000 52545.628 0 0.073800792 0.072510553 0.070100713 0.21641206 0.14689578
11000 52416.561 0 0.081881868 0.080638809 0.078969817 0.24149049 0.15608324
12000 52271.578 0 0.090521937 0.088555992 0.08592156 0.26499949 0.1474981
Loop time of 3.13506 on 4 procs for 12000 steps with 4096 atoms
Performance: 3307.113 tau/day, 3827.677 timesteps/s
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0.00060225 | 0.00060934 | 0.00061345 | 0.0 | 0.02
Comm | 0.029197 | 0.029376 | 0.029582 | 0.1 | 0.94
Output | 0.00060606 | 0.00087148 | 0.0016448 | 0.0 | 0.03
Modify | 2.84 | 2.8773 | 2.8942 | 1.3 | 91.78
Other | | 0.2269 | | | 7.24
Nlocal: 1024.00 ave 1037 max 999 min
Histogram: 1 0 0 0 0 0 0 1 1 1
Nghost: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0.0000000
Neighbor list builds = 73
Dangerous builds = 0
# if you want to check that rotational diffusion is behaving as expected,
# uncomment next three lines for dump output and then plot <e(t).e(0)>,
# which should decay exponentially with timescale (d-1)*D_r (with d
# being simulation dimension)
#dump 1 all custom 2000 dump_ideal_${params}_3d.lammpstrj id type # x y xu yu mux muy muz fx fy fz
#dump_modify 1 first yes sort id
#run 120000
Total wall time: 0:00:04

24
examples/USER/mdi/README Normal file
View File

@ -0,0 +1,24 @@
This dir contains scripts that demonstrate how to use LAMMPS as an
MDI engine. LAMMPS as an engine performs the MD timestepping.
The driver is a simple Python script. Every timestep the driver
sends one or more commands to LAMMPS.
--------------
The Script.sh file has comands to perform some very simple example
runs.
--------------
More complex calculations using LAMMPS as an MDI engine will
typically require the use of an MDI driver. Several MDI drivers
support calculations with LAMMPS, and include:
Ab Initio Molecular Dynamics (AIMD) Driver:
https://github.com/MolSSI-MDI/MDI_AIMD_Driver
Nudged Elastic Band (NEB) Driver:
https://github.com/MolSSI-MDI/MDI_NEB_Driver
Metadynamics Driver:
https://github.com/MolSSI-MDI/MDI_Metadynamics

View File

@ -0,0 +1,16 @@
#!/bin/bash
# sample launch scripts
# TCP, running LAMMPS on one proc
python driver.py -mdi "-name driver -role DRIVER -method TCP -port 8021" &
../../../src/lmp_mdi -mdi "-name LAMMPS -role ENGINE -method TCP -port 8021 -hostname localhost" -in lammps.in > lammps.out &
wait
# TCP, running LAMMPS on two procs
python driver.py -mdi "-name driver -role DRIVER -method TCP -port 8021" &
mpiexec -n 2 ../../../src/lmp_mdi -mdi "-name LAMMPS -role ENGINE -method TCP -port 8021 -hostname localhost" -in lammps.in > lammps.out &
wait

View File

@ -0,0 +1,24 @@
import sys
import mdi
use_mpi4py = False
try:
from mpi4py import MPI
use_mpi4py = True
except:
pass
# Initialize the MDI Library
mdi.MDI_Init(sys.argv[2])
# Connect to the engine
comm = mdi.MDI_Accept_communicator()
# Determine the name of the engine
mdi.MDI_Send_Command("<NAME", comm)
name = mdi.MDI_Recv(mdi.MDI_NAME_LENGTH, mdi.MDI_CHAR, comm)
print("Engine name: " + str(name))
# Send the "EXIT" command to the engine
mdi.MDI_Send_Command("EXIT", comm)

View File

@ -0,0 +1,92 @@
LAMMPS data file for water
24 atoms
16 bonds
8 angles
0 dihedrals
0 impropers
2 atom types
1 bond types
1 angle types
0 dihedral types
0 improper types
0.0 4.9325 xlo xhi
0.0 4.9325 ylo yhi
0.0 4.9325 zlo zhi
Masses
1 15.9994
2 1.008
Pair Coeffs
1 0.102 3.188
2 0.000 0.000
Bond Coeffs
1 450 0.9572
Angle Coeffs
1 55.0 104.52
Atoms
1 0 1 -0.83400 2.17919 0.196156 4.15513
2 0 2 0.41700 2.29785 4.8353 0.126003
3 0 2 0.41700 1.82037 1.07996 4.23498
4 0 1 -0.83400 4.65839 0.120414 0.305758
5 0 2 0.41700 4.67446 -0.0220991 4.29186
6 0 2 0.41700 4.28188 0.994196 0.410515
7 0 1 -0.83400 3.65045 2.40907 0.344349
8 0 2 0.41700 3.52052 2.1838 4.35565
9 0 2 0.41700 4.26579 3.14208 0.327669
10 0 1 -0.83400 1.21327 2.62177 4.15519
11 0 2 0.41700 1.47452 3.53837 4.0667
12 0 2 0.41700 1.20743 2.46396 0.16677
13 0 1 -0.83400 4.45777 4.47325 2.74192
14 0 2 0.41700 4.53396 4.49652 1.78804
15 0 2 0.41700 4.21354 3.56943 2.94119
16 0 1 -0.83400 2.04119 4.41585 1.64725
17 0 2 0.41700 2.26934 4.77582 2.50434
18 0 2 0.41700 1.69079 3.54574 1.83793
19 0 1 -0.83400 3.73384 1.97964 2.81949
20 0 2 0.41700 3.41083 2.22014 1.95113
21 0 2 0.41700 3.91914 1.04272 2.75561
22 0 1 -0.83400 1.20859 2.09853 1.68186
23 0 2 0.41700 1.01865 2.25693 2.60655
24 0 2 0.41700 1.16884 1.14674 1.58832
Bonds
1 1 1 2
2 1 1 3
3 1 4 5
4 1 4 6
5 1 7 8
6 1 7 9
7 1 10 11
8 1 10 12
9 1 13 14
10 1 13 15
11 1 16 17
12 1 16 18
13 1 19 20
14 1 19 21
15 1 22 23
16 1 22 24
Angles
1 1 2 1 3
2 1 5 4 6
3 1 8 7 9
4 1 11 10 12
5 1 14 13 15
6 1 17 16 18
7 1 20 19 21
8 1 23 22 24

View File

@ -0,0 +1,28 @@
units real
neigh_modify delay 0 every 1 check yes
atom_style full
bond_style harmonic
angle_style harmonic
pair_style lj/cut/coul/long 10.0
pair_modify mix arithmetic
kspace_style pppm 1e-4
special_bonds amber
atom_modify sort 0 0
read_data lammps.data
timestep 1.0
dump 1 all custom 1 dump.lammpstrj id element xu yu zu
dump 2 all custom 1 dump.force id element fx fy fz
dump 3 all xyz 1 dump.xyz
dump_modify 1 element O H
dump_modify 2 element O H
thermo_style multi
thermo 1
fix 1 all nvt temp 300.0 300.0 70.0
mdi/engine

View File

@ -1,54 +0,0 @@
dimension 2
boundary p p p
variable L equal 20
region total block -$L $L -$L $L -0.5 0.5
lattice hex 0.3
create_box 2 total
create_atoms 1 box
# Set random fraction to passive:
set type 1 type/fraction 2 0.5 1337
# Purely repulsive particles:
variable rc equal "2^(1.0/6.0)"
pair_style lj/cut ${rc}
pair_coeff * * 1.0 1.0
pair_modify shift yes
mass * 1.0
fix step all nve
fix temp all langevin 1.0 1.0 1.0 13
fix twod all enforce2d
neighbor 0.6 bin
dump traj all custom 250 2d_active.dump.bin id type x y z
thermo_style custom time step pe ke etotal temp
thermo 1000
run 5000
group one type 1
group two type 2
compute ke1 one ke
compute ke2 two ke
thermo_style custom step pe ke etotal temp c_ke1 c_ke2
fix active all propel/self velocity 1.0
# With active force there is more motion so increase bin size:
neighbor 1.0 bin
run 10000
# Only make type 1 active:
fix active all propel/self velocity 1.0 types 1
# With active force there is more motion so increase bin size:
neighbor 1.0 bin
run 10000

View File

@ -1,37 +0,0 @@
dimension 2
boundary p p p
variable L equal 20
region total block -$L $L -$L $L -0.5 0.5
lattice hex 0.3
create_box 2 total
create_atoms 1 box
# Set random fraction to passive:
set type 1 type/fraction 2 0.5 1337
# Purely repulsive particles:
variable rc equal "2^(1.0/6.0)"
pair_style lj/cut ${rc}
pair_coeff * * 1.0 1.0
pair_modify shift yes
mass * 1.0
fix step all nve
fix twod all enforce2d
neighbor 0.6 bin
dump traj all custom 250 2d_active.dump.bin id type x y z
thermo_style custom step pe ke etotal temp
thermo 1000
run 10000
fix active all propel/self velocity 1.0
fix fric all viscous 1.0
# With active force there is more motion so increase bin size:
neighbor 1.0 bin
run 10000

View File

@ -1,40 +0,0 @@
dimension 3
boundary p p p
atom_style ellipsoid
variable L equal 20
region total block -$L $L -$L $L -$L $L
lattice sc 0.1
create_box 2 total
create_atoms 1 box
# Set random fraction to passive:
set type 1 type/fraction 2 0.5 1337
# Purely repulsive particles:
variable rc equal "2^(1.0/6.0)"
pair_style lj/cut ${rc}
pair_coeff * * 1.0 1.0
pair_modify shift yes
# mass * 1.0
set type * shape 1.0 1.0 1.0
set type * density 1.9098593171027443
set type * quat 0 0 1 0
fix step all nve/asphere
fix temp all langevin 1.0 1.0 1.0 13 angmom 3.333333333
neighbor 0.6 bin
dump traj all custom 100 3d_active.dump.bin id type x y z fx fy fz
thermo_style custom step pe ke etotal temp
thermo 100
run 500
fix active all propel/self quat 1.0
# With active force there is more motion so increase bin size:
neighbor 1.0 bin
run 500

View File

@ -86,7 +86,7 @@ pair_style gran/hertz/history &
${kn} ${kt} ${gamma_n} ${gamma_t} ${coeffFric} 1
pair_coeff * *
neighbor ${skin} bin
neighbor ${skin} multi
thermo ${logfreq}
comm_style brick

View File

@ -49,7 +49,7 @@ pair_style gran/hertz/history &
${kn} ${kt} ${gamma_n} ${gamma_t} ${coeffFric} 1
pair_coeff * *
neighbor ${skin} bin
neighbor ${skin} multi
thermo ${logfreq}
comm_style brick

20015
examples/multi/data.powerlaw Normal file

File diff suppressed because it is too large Load Diff

67
examples/multi/in.colloid Normal file
View File

@ -0,0 +1,67 @@
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.1
region box block 0 60 0 60 -0.5 0.5
create_box 5 box
create_atoms 1 box
#Roughly equally partition atoms between types 1-4
set group all type/fraction 2 0.500 23984
set group all type/fraction 3 0.333 43684
set group all type/fraction 4 0.250 87811
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 60.0 60.0 0.0 30.0 units box
region sphere2 sphere 130.0 130.0 0.0 30.0 units box
delete_atoms region sphere1
delete_atoms region sphere2
create_atoms 5 single 60.0 60.0 0.0 units box
create_atoms 5 single 130.0 130.0 0.0 units box
set type 1 mass 400
set type 2 mass 1
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi #multi/old
neigh_modify delay 0 collection/type 2 1*4 5
comm_modify mode multi reduce/multi #multi/old
# colloid potential
pair_style colloid 20.0
pair_coeff * * 144.0 1.0 0.0 0.0 3.0
pair_coeff 1 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 2 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 3 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 4 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 5 5 39.5 1.0 20.0 20.0 30.0
fix 1 all npt temp 2.0 2.0 1.0 iso 0.0 1.0 10.0 drag 1.0 &
mtk no pchain 0 tchain 1
fix 2 all enforce2d
#dump 1 all atom 1000 dump.colloid
#dump 2 all image 1000 image.*.jpg type type &
# zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type &
# zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 100
timestep 0.005
run 2000

View File

@ -0,0 +1,50 @@
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.5
region box block 0 60 0 60 -0.5 0.5
create_box 2 box
create_atoms 1 box
change_box all triclinic
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 20.0 20.0 0.0 10.0 units box
region sphere2 sphere 60.0 60.0 0.0 10.0 units box
delete_atoms region sphere1
delete_atoms region sphere2
create_atoms 2 single 20.0 20.0 0.0 units box
create_atoms 2 single 60.0 60.0 0.0 units box
set type 2 mass 400
set type 1 mass 1
set type 2 diameter 20
set type 1 diameter 1
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi #multi/old
neigh_modify delay 0 collection/interval 2 1 20
comm_modify mode multi vel yes reduce/multi #multi/old
# granular potential
pair_style granular
pair_coeff * * hooke 1.0 0.5 tangential linear_history 1.0 0.5 0.1 damping mass_velocity
fix 1 all nph/sphere iso 0.0 1.0 10.0 drag 1.0
fix 2 all enforce2d
fix 3 all deform 1 xy erate 1e-3
#dump 1 all custom 1000 dump.granular id x y z radius
thermo_style custom step temp epair etotal press vol
thermo 100
timestep 0.005
run 2000

View File

@ -0,0 +1,33 @@
# Shear power-law distributed granular particles
units lj
atom_style sphere
dimension 2
read_data data.powerlaw
change_box all triclinic
# multi neighbor and comm for efficiency
neighbor 1 multi
neigh_modify delay 0 collection/interval 6 1.5 3 10 30 100 200
comm_modify mode multi vel yes reduce/multi
# granular potential
pair_style granular
pair_coeff * * hooke 20.0 0.5 tangential linear_history 1.0 0.5 0.1 damping mass_velocity
# fixes
fix 1 all nve/sphere
fix 2 all enforce2d
fix 3 all deform 1 xy erate 1e-4
# dump 1 all custom 20000 dump.granular id x y z radius
thermo_style custom step temp epair etotal press vol pxy
thermo 100
timestep 0.005
run 1000

View File

@ -0,0 +1,183 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.1
Lattice spacing in x,y,z = 3.1622777 3.1622777 3.1622777
region box block 0 60 0 60 -0.5 0.5
create_box 5 box
Created orthogonal box = (0.0000000 0.0000000 -1.5811388) to (189.73666 189.73666 1.5811388)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.001 seconds
#Roughly equally partition atoms between types 1-4
set group all type/fraction 2 0.500 23984
Setting atom values ...
1768 settings made for type/fraction
set group all type/fraction 3 0.333 43684
Setting atom values ...
1255 settings made for type/fraction
set group all type/fraction 4 0.250 87811
Setting atom values ...
927 settings made for type/fraction
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 60.0 60.0 0.0 30.0 units box
region sphere2 sphere 130.0 130.0 0.0 30.0 units box
delete_atoms region sphere1
Deleted 289 atoms, new total = 3311
delete_atoms region sphere2
Deleted 287 atoms, new total = 3024
create_atoms 5 single 60.0 60.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
create_atoms 5 single 130.0 130.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
set type 1 mass 400
Setting atom values ...
753 settings made for mass
set type 2 mass 1
Setting atom values ...
722 settings made for mass
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi #multi/old
neigh_modify delay 0 collection/type 2 1*4 5
comm_modify mode multi reduce/multi #multi/old
# colloid potential
pair_style colloid 20.0
pair_coeff * * 144.0 1.0 0.0 0.0 3.0
pair_coeff 1 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 2 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 3 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 4 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 5 5 39.5 1.0 20.0 20.0 30.0
fix 1 all npt temp 2.0 2.0 1.0 iso 0.0 1.0 10.0 drag 1.0 mtk no pchain 0 tchain 1
fix 2 all enforce2d
#dump 1 all atom 1000 dump.colloid
#dump 2 all image 1000 image.*.jpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 1000
timestep 0.005
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 31
ghost atom cutoff = 31
binsize = 2, bins = 95 95 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair colloid, perpetual
attributes: half, newton on
pair build: half/multi/newton
stencil: half/multi/2d
bin: multi
Per MPI rank memory allocation (min/avg/max) = 4.385 | 4.385 | 4.385 Mbytes
Step Temp E_pair TotEng Press Volume
0 1.44 0 1.4395241 0.121 36000
1000 1.8856066 -0.15771717 1.7272663 0.13840578 42574.399
2000 1.8589993 -0.11434676 1.7440382 0.097157151 58590.69
3000 1.8984314 -0.093445816 1.8043582 0.07444246 77824.12
4000 1.9603204 -0.07451891 1.8851536 0.066010381 90951.299
5000 2.0298924 -0.073898174 1.9553234 0.075791214 90146.92
6000 2.0797015 -0.086800285 1.992214 0.082095164 78182.702
7000 2.0867886 -0.10960963 1.9764893 0.10103655 63990.386
8000 2.0803886 -0.12736298 1.9523381 0.12561727 52648.372
9000 2.0605661 -0.14572043 1.9141648 0.15154081 44589.764
10000 2.0636909 -0.18556771 1.8774412 0.1604707 38996.941
11000 2.0498344 -0.20303461 1.8461224 0.18295046 34927.993
12000 2.0466611 -0.2217963 1.8241884 0.23031182 31760.363
13000 2.0441824 -0.24716826 1.7963386 0.22167931 29178.226
14000 2.047513 -0.26988172 1.7769547 0.24070752 26991.372
15000 2.0154283 -0.26144354 1.7533187 0.27587713 25247.715
16000 2.0160849 -0.28106984 1.7343488 0.32297139 23703.607
17000 2.0184729 -0.31071368 1.7070922 0.29815613 22300.6
18000 2.0237288 -0.33944941 1.6836106 0.3262795 21098.856
19000 2.0329827 -0.35438937 1.6779215 0.33691952 19989.867
20000 2.021113 -0.37316841 1.6472766 0.39687648 18978.666
21000 2.0352439 -0.40857976 1.6259915 0.38632613 18146.277
22000 2.0158566 -0.41271329 1.6024771 0.41480502 17409.593
23000 2.0170409 -0.42611776 1.5902566 0.40446612 16748.968
24000 2.0108878 -0.43899286 1.5712304 0.42075035 16086.941
25000 2.0218394 -0.47012156 1.5510497 0.46655183 15460.154
26000 2.0100713 -0.47985916 1.5295479 0.45575323 15013.774
27000 2.0251738 -0.5016665 1.5228381 0.50151992 14591.521
28000 2.0062966 -0.50284394 1.5027897 0.5462034 14135.093
29000 2.0146666 -0.53126035 1.4827405 0.60379062 13725.945
30000 2.0036455 -0.53246643 1.4705169 0.56784088 13417.305
31000 2.0127662 -0.54487777 1.4672233 0.6427741 13139.392
32000 2.0221816 -0.5625554 1.4589579 0.60695012 12779.609
33000 2.024983 -0.59515221 1.4291616 0.60005385 12584.572
34000 2.0184045 -0.59033569 1.4274018 0.62519753 12355.49
35000 2.0155635 -0.61190466 1.4029927 0.71044196 12106.819
36000 2.0252503 -0.61581601 1.408765 0.68805882 11728.608
37000 2.0112487 -0.64540754 1.3651765 0.66981639 11475.772
38000 2.0147475 -0.64161981 1.3724619 0.71130901 11285.511
39000 2.0213092 -0.67174661 1.3488946 0.6969697 11044.647
40000 2.0178739 -0.67924699 1.3379601 0.77309897 10824.198
41000 1.9952353 -0.67490899 1.3196669 0.76592358 10646.649
42000 2.002415 -0.70533555 1.2964178 0.81084741 10519.804
43000 2.0211625 -0.71370366 1.3067909 0.77355048 10434.893
44000 2.0252106 -0.72635544 1.2981859 0.83770143 10132.262
45000 2.0126446 -0.75197714 1.2600024 0.88927993 9946.7842
46000 2.0431159 -0.78445975 1.257981 0.84492327 9869.8151
47000 2.0199724 -0.76967899 1.2496259 0.90977181 9653.4334
48000 2.0109636 -0.78968551 1.2206135 0.89458323 9496.7246
49000 2.0131059 -0.79687252 1.2155681 0.91239613 9418.3093
50000 2.0073361 -0.79981468 1.206858 0.98524334 9289.4715
Loop time of 19.7532 on 1 procs for 50000 steps with 3026 atoms
Performance: 1093493.133 tau/day, 2531.234 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 10.789 | 10.789 | 10.789 | 0.0 | 54.62
Neigh | 2.6848 | 2.6848 | 2.6848 | 0.0 | 13.59
Comm | 0.53244 | 0.53244 | 0.53244 | 0.0 | 2.70
Output | 0.0010482 | 0.0010482 | 0.0010482 | 0.0 | 0.01
Modify | 4.9599 | 4.9599 | 4.9599 | 0.0 | 25.11
Other | | 0.7856 | | | 3.98
Nlocal: 3026.00 ave 3026 max 3026 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 584.000 ave 584 max 584 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 25892.0 ave 25892 max 25892 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 25892
Ave neighs/atom = 8.5565102
Neighbor list builds = 4330
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:19

View File

@ -0,0 +1,183 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.1
Lattice spacing in x,y,z = 3.1622777 3.1622777 3.1622777
region box block 0 60 0 60 -0.5 0.5
create_box 5 box
Created orthogonal box = (0.0000000 0.0000000 -1.5811388) to (189.73666 189.73666 1.5811388)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.001 seconds
#Roughly equally partition atoms between types 1-4
set group all type/fraction 2 0.500 23984
Setting atom values ...
1768 settings made for type/fraction
set group all type/fraction 3 0.333 43684
Setting atom values ...
1255 settings made for type/fraction
set group all type/fraction 4 0.250 87811
Setting atom values ...
927 settings made for type/fraction
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 60.0 60.0 0.0 30.0 units box
region sphere2 sphere 130.0 130.0 0.0 30.0 units box
delete_atoms region sphere1
Deleted 289 atoms, new total = 3311
delete_atoms region sphere2
Deleted 287 atoms, new total = 3024
create_atoms 5 single 60.0 60.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
create_atoms 5 single 130.0 130.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
set type 1 mass 400
Setting atom values ...
753 settings made for mass
set type 2 mass 1
Setting atom values ...
722 settings made for mass
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi #multi/old
neigh_modify delay 0 collection/type 2 1*4 5
comm_modify mode multi reduce/multi #multi/old
# colloid potential
pair_style colloid 20.0
pair_coeff * * 144.0 1.0 0.0 0.0 3.0
pair_coeff 1 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 2 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 3 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 4 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 5 5 39.5 1.0 20.0 20.0 30.0
fix 1 all npt temp 2.0 2.0 1.0 iso 0.0 1.0 10.0 drag 1.0 mtk no pchain 0 tchain 1
fix 2 all enforce2d
#dump 1 all atom 1000 dump.colloid
#dump 2 all image 1000 image.*.jpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 1000
timestep 0.005
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 31
ghost atom cutoff = 31
binsize = 2, bins = 95 95 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair colloid, perpetual
attributes: half, newton on
pair build: half/multi/newton
stencil: half/multi/2d
bin: multi
Per MPI rank memory allocation (min/avg/max) = 4.327 | 4.329 | 4.330 Mbytes
Step Temp E_pair TotEng Press Volume
0 1.44 0 1.4395241 0.121 36000
1000 1.8856066 -0.15771717 1.7272663 0.13840578 42574.399
2000 1.8590154 -0.11436231 1.7440387 0.097150798 58590.688
3000 1.8956738 -0.090814176 1.8042332 0.075557943 77825.289
4000 1.9570462 -0.072505537 1.8838939 0.072824365 90931.708
5000 2.0376745 -0.083247829 1.9537533 0.068496975 90055.295
6000 2.0744887 -0.085395371 1.9884077 0.0821927 78070.648
7000 2.1002183 -0.11654617 1.9829781 0.10523249 63934.448
8000 2.0818325 -0.13271654 1.948428 0.11909162 52636.484
9000 2.0693987 -0.16404154 1.9046733 0.14702552 44539.609
10000 2.0667772 -0.19779488 1.8682993 0.17245383 38822.542
11000 2.0640582 -0.22114917 1.842227 0.18083079 34788.927
12000 2.0308462 -0.20353105 1.8266441 0.20640739 31706.009
13000 2.0395895 -0.24217765 1.7967378 0.21832952 29152.654
14000 2.030848 -0.2586169 1.77156 0.26577748 27068.89
15000 2.0222966 -0.27554585 1.7460825 0.2777169 25272.786
16000 2.0398867 -0.31547563 1.723737 0.27763622 23666.792
17000 2.03026 -0.32453791 1.7050512 0.28099246 22272.809
18000 2.0345512 -0.35026242 1.6836164 0.36600779 21023.172
19000 2.0242864 -0.35813231 1.6654851 0.33415432 19941.244
20000 2.0132465 -0.36563904 1.6469422 0.403365 18979.884
21000 2.0280384 -0.4075867 1.6197815 0.37205362 18152.487
22000 2.0206494 -0.40600336 1.6139782 0.42704594 17370.812
23000 2.0395761 -0.45083258 1.5880695 0.40276343 16700.427
24000 2.017203 -0.44930293 1.5672335 0.43867313 16161.79
25000 2.0191846 -0.4672218 1.5512955 0.47031215 15622.756
26000 2.0131624 -0.46436088 1.5481363 0.51717944 15141.645
27000 2.0322461 -0.50659994 1.5249745 0.49218933 14627.657
28000 2.0169304 -0.50555565 1.5107082 0.55547935 14186.079
29000 2.024656 -0.52258414 1.5014028 0.59125812 13759.99
30000 2.0153725 -0.53585947 1.478847 0.57235811 13384.355
31000 2.0163261 -0.56383766 1.4518221 0.58232057 13098.196
32000 2.0109673 -0.56784395 1.4424588 0.58282178 12831.934
33000 2.0099169 -0.57625621 1.4329964 0.65139601 12479.442
34000 2.0238152 -0.60189607 1.4212503 0.62659152 12210.628
35000 2.0359989 -0.62654733 1.4087787 0.67574446 11972.725
36000 2.0222689 -0.62880837 1.3927923 0.66602146 11690.049
37000 1.9982569 -0.62746376 1.3701328 0.71326589 11433.825
38000 1.9969836 -0.63975181 1.3565719 0.72799891 11285.497
39000 2.0071087 -0.65781805 1.3486274 0.79121297 11107.469
40000 2.0243046 -0.6881221 1.3355135 0.77519099 10943.846
41000 2.0351657 -0.70309175 1.3314014 0.68815156 10742.515
42000 2.0224788 -0.70975664 1.3120538 0.80484619 10505.657
43000 2.0123135 -0.70818545 1.3034631 0.84204556 10353.024
44000 1.999883 -0.70981202 1.2894101 0.94070546 10212.224
45000 2.0127291 -0.73338075 1.2786832 0.82095205 10109.959
46000 2.0109037 -0.75130029 1.2589389 0.88538358 9953.4822
47000 1.9879175 -0.73152019 1.2557404 0.92089629 9832.892
48000 2.0108204 -0.76655178 1.2436041 0.95379465 9633.6453
49000 1.9868193 -0.76613798 1.2200247 0.88790224 9504.2918
50000 2.0141467 -0.80029827 1.2131829 1.0064263 9346.3268
Loop time of 6.98615 on 4 procs for 50000 steps with 3026 atoms
Performance: 3091831.080 tau/day, 7157.016 timesteps/s
99.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.2795 | 2.5856 | 2.9414 | 17.4 | 37.01
Neigh | 0.62273 | 0.70156 | 0.76736 | 7.4 | 10.04
Comm | 1.0765 | 1.4945 | 1.8884 | 28.6 | 21.39
Output | 0.00076496 | 0.0008953 | 0.0012832 | 0.0 | 0.01
Modify | 1.718 | 1.7755 | 1.827 | 3.7 | 25.41
Other | | 0.4281 | | | 6.13
Nlocal: 756.500 ave 839 max 673 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 292.500 ave 307 max 282 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 6435.25 ave 7367 max 5493 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Total # of neighbors = 25741
Ave neighs/atom = 8.5066094
Neighbor list builds = 4335
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:07

View File

@ -0,0 +1,183 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.1
Lattice spacing in x,y,z = 3.1622777 3.1622777 3.1622777
region box block 0 60 0 60 -0.5 0.5
create_box 5 box
Created orthogonal box = (0.0000000 0.0000000 -1.5811388) to (189.73666 189.73666 1.5811388)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.001 seconds
#Roughly equally partition atoms between types 1-4
set group all type/fraction 2 0.500 23984
Setting atom values ...
1768 settings made for type/fraction
set group all type/fraction 3 0.333 43684
Setting atom values ...
1255 settings made for type/fraction
set group all type/fraction 4 0.250 87811
Setting atom values ...
927 settings made for type/fraction
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 60.0 60.0 0.0 30.0 units box
region sphere2 sphere 130.0 130.0 0.0 30.0 units box
delete_atoms region sphere1
Deleted 289 atoms, new total = 3311
delete_atoms region sphere2
Deleted 287 atoms, new total = 3024
create_atoms 5 single 60.0 60.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
create_atoms 5 single 130.0 130.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
set type 1 mass 400
Setting atom values ...
753 settings made for mass
set type 2 mass 1
Setting atom values ...
722 settings made for mass
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi/old #multi
neigh_modify delay 0 #multi/custom 2 1*4 5
comm_modify mode multi/old #multi multi/reduce
# colloid potential
pair_style colloid 20.0
pair_coeff * * 144.0 1.0 0.0 0.0 3.0
pair_coeff 1 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 2 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 3 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 4 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 5 5 39.5 1.0 20.0 20.0 30.0
fix 1 all npt temp 2.0 2.0 1.0 iso 0.0 1.0 10.0 drag 1.0 mtk no pchain 0 tchain 1
fix 2 all enforce2d
#dump 1 all atom 1000 dump.colloid
#dump 2 all image 1000 image.*.jpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 1000
timestep 0.005
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 31
ghost atom cutoff = 31
binsize = 2, bins = 95 95 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair colloid, perpetual
attributes: half, newton on
pair build: half/multi/old/newton
stencil: half/multi/old/2d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.496 | 4.496 | 4.496 Mbytes
Step Temp E_pair TotEng Press Volume
0 1.44 0 1.4395241 0.121 36000
1000 1.8856066 -0.15771717 1.7272663 0.13840578 42574.399
2000 1.8589993 -0.11434676 1.7440382 0.097157151 58590.69
3000 1.8984314 -0.093445816 1.8043582 0.07444246 77824.12
4000 1.9603204 -0.07451891 1.8851536 0.066010381 90951.299
5000 2.0298924 -0.073898174 1.9553234 0.075791214 90146.92
6000 2.0797015 -0.086800284 1.992214 0.082095164 78182.702
7000 2.086794 -0.10961479 1.9764895 0.10103993 63990.387
8000 2.082863 -0.12779588 1.9543788 0.12672452 52629.802
9000 2.0718275 -0.15189022 1.9192526 0.14728063 44541.722
10000 2.0603856 -0.18054161 1.8791631 0.16715133 38940.135
11000 2.046791 -0.20458359 1.841531 0.19532742 34907.116
12000 2.0406846 -0.2252868 1.8147234 0.2036178 31740.208
13000 2.0369763 -0.23721632 1.7990869 0.25542564 29079.901
14000 2.0376121 -0.26282517 1.7741135 0.24722118 26947.344
15000 2.0312772 -0.2851101 1.7454959 0.2801199 25180.963
16000 2.0080448 -0.28992973 1.7174515 0.30099318 23723.043
17000 2.0234993 -0.30440169 1.7184289 0.3193226 22342.977
18000 2.0216103 -0.32036933 1.7005729 0.3460322 21068.99
19000 2.0493952 -0.37711533 1.6716026 0.33804972 20013.325
20000 2.0307894 -0.38462795 1.6454903 0.37041981 19092.745
21000 2.0328577 -0.39442652 1.6377594 0.36327057 18260.298
22000 2.0325613 -0.40481002 1.6270796 0.42756691 17447.199
23000 2.0199358 -0.42175719 1.5975111 0.40948041 16768.71
24000 2.0149952 -0.43618764 1.5781417 0.45406069 16187.334
25000 2.0153221 -0.45884172 1.5558143 0.52717203 15605.577
26000 2.0099026 -0.47080566 1.5384327 0.49181459 15088.041
27000 2.0128537 -0.49799999 1.5141885 0.53907465 14590.392
28000 2.0287266 -0.53112525 1.4969309 0.59750714 14208.419
29000 2.0143609 -0.53175704 1.4819381 0.56118773 13840.642
30000 2.0235262 -0.53923416 1.4836234 0.52579997 13500.15
31000 2.0390444 -0.57976823 1.4586023 0.5760349 13082.091
32000 2.018046 -0.57797686 1.4394022 0.59127933 12761.726
33000 2.0059068 -0.57185148 1.4333925 0.58992758 12473.866
34000 1.9828456 -0.57147221 1.4107181 0.77593228 12208.869
35000 1.9900097 -0.58349168 1.4058604 0.681968 11937.285
36000 2.0271405 -0.64374859 1.382722 0.63152587 11675.264
37000 2.0032809 -0.63520712 1.3674117 0.71639384 11440.274
38000 2.0000566 -0.63941617 1.3599795 0.74099652 11235.252
39000 1.9872705 -0.64765522 1.3389586 0.7575743 11080.857
40000 2.0224403 -0.6795645 1.3422075 0.82918546 10861.905
41000 2.0137595 -0.69863075 1.3144633 0.80397759 10712.981
42000 1.9950915 -0.68892531 1.3055069 0.77631365 10632.931
43000 2.0080851 -0.70534369 1.3020778 0.82408436 10408.82
44000 2.0239806 -0.73189482 1.2914169 0.83228695 10227.18
45000 2.0019542 -0.72613202 1.2751606 0.9145618 10044.013
46000 2.0173095 -0.75370218 1.2629407 0.99791312 9837.9611
47000 1.9921201 -0.75875076 1.232711 1.0047839 9711.2083
48000 2.0283587 -0.79063641 1.237052 0.83617499 9610.9933
49000 2.0051919 -0.79078067 1.2137485 0.95651813 9411.7165
50000 2.0140985 -0.81796958 1.1954634 0.93791038 9296.069
Loop time of 28.5339 on 1 procs for 50000 steps with 3026 atoms
Performance: 756994.490 tau/day, 1752.302 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 10.918 | 10.918 | 10.918 | 0.0 | 38.26
Neigh | 10.375 | 10.375 | 10.375 | 0.0 | 36.36
Comm | 1.2856 | 1.2856 | 1.2856 | 0.0 | 4.51
Output | 0.0010955 | 0.0010955 | 0.0010955 | 0.0 | 0.00
Modify | 5.0132 | 5.0132 | 5.0132 | 0.0 | 17.57
Other | | 0.9412 | | | 3.30
Nlocal: 3026.00 ave 3026 max 3026 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 2292.00 ave 2292 max 2292 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 25767.0 ave 25767 max 25767 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 25767
Ave neighs/atom = 8.5152016
Neighbor list builds = 4332
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:28

View File

@ -0,0 +1,183 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.1
Lattice spacing in x,y,z = 3.1622777 3.1622777 3.1622777
region box block 0 60 0 60 -0.5 0.5
create_box 5 box
Created orthogonal box = (0.0000000 0.0000000 -1.5811388) to (189.73666 189.73666 1.5811388)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.001 seconds
#Roughly equally partition atoms between types 1-4
set group all type/fraction 2 0.500 23984
Setting atom values ...
1768 settings made for type/fraction
set group all type/fraction 3 0.333 43684
Setting atom values ...
1255 settings made for type/fraction
set group all type/fraction 4 0.250 87811
Setting atom values ...
927 settings made for type/fraction
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 60.0 60.0 0.0 30.0 units box
region sphere2 sphere 130.0 130.0 0.0 30.0 units box
delete_atoms region sphere1
Deleted 289 atoms, new total = 3311
delete_atoms region sphere2
Deleted 287 atoms, new total = 3024
create_atoms 5 single 60.0 60.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
create_atoms 5 single 130.0 130.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
set type 1 mass 400
Setting atom values ...
753 settings made for mass
set type 2 mass 1
Setting atom values ...
722 settings made for mass
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi/old #multi
neigh_modify delay 0 #multi/custom 2 1*4 5
comm_modify mode multi/old #multi multi/reduce
# colloid potential
pair_style colloid 20.0
pair_coeff * * 144.0 1.0 0.0 0.0 3.0
pair_coeff 1 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 2 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 3 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 4 5 75.4 1.0 0.0 20.0 14.0
pair_coeff 5 5 39.5 1.0 20.0 20.0 30.0
fix 1 all npt temp 2.0 2.0 1.0 iso 0.0 1.0 10.0 drag 1.0 mtk no pchain 0 tchain 1
fix 2 all enforce2d
#dump 1 all atom 1000 dump.colloid
#dump 2 all image 1000 image.*.jpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 1000
timestep 0.005
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 31
ghost atom cutoff = 31
binsize = 2, bins = 95 95 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair colloid, perpetual
attributes: half, newton on
pair build: half/multi/old/newton
stencil: half/multi/old/2d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.404 | 4.406 | 4.410 Mbytes
Step Temp E_pair TotEng Press Volume
0 1.44 0 1.4395241 0.121 36000
1000 1.8856066 -0.15771717 1.7272663 0.13840578 42574.399
2000 1.8590154 -0.11436231 1.7440387 0.097150798 58590.688
3000 1.8956738 -0.090814168 1.8042332 0.075557943 77825.289
4000 1.9567884 -0.072243657 1.8838981 0.072836007 90931.521
5000 2.0386455 -0.084279096 1.9536927 0.06867562 90054.581
6000 2.0816461 -0.093158646 1.9877995 0.082802397 78084.994
7000 2.0854943 -0.10553618 1.979269 0.10230351 63886.068
8000 2.0923948 -0.14072173 1.9509816 0.11775174 52590.899
9000 2.0687841 -0.15957251 1.9085279 0.14963059 44575.69
10000 2.0607467 -0.18970216 1.8703636 0.17210861 39016.271
11000 2.0538523 -0.20866031 1.8445133 0.18554787 34992.223
12000 2.0408745 -0.22276635 1.8174337 0.21228473 31794.869
13000 2.0366678 -0.24217764 1.7938171 0.22999314 29186.441
14000 2.0470314 -0.26923854 1.7771164 0.2576977 26941.432
15000 2.0262458 -0.27296827 1.7526079 0.25960813 25184.491
16000 2.0410096 -0.30940081 1.7309343 0.27842776 23619.633
17000 2.027379 -0.32411477 1.7025943 0.32102949 22231.582
18000 2.0338405 -0.34468182 1.6884866 0.3306203 21028.933
19000 2.032206 -0.36558904 1.6659454 0.33926726 19958.945
20000 2.0347643 -0.3915229 1.642569 0.33718716 19054.271
21000 2.0242901 -0.38913219 1.634489 0.38062225 18190.934
22000 2.0207557 -0.41078199 1.6093059 0.40143768 17422.03
23000 2.0069068 -0.42062708 1.5856165 0.40146954 16717.999
24000 2.0300595 -0.4536262 1.5757624 0.49229743 16097.323
25000 2.0347548 -0.47655047 1.5575319 0.46787969 15564.848
26000 2.0180789 -0.46537586 1.5520362 0.48541997 15072.597
27000 2.0150506 -0.4886202 1.5257645 0.53829749 14621.24
28000 2.0175464 -0.50951413 1.5073655 0.50140171 14253.441
29000 2.0186127 -0.53911975 1.4788258 0.52955802 13930.266
30000 2.0006844 -0.52621334 1.4738099 0.60130639 13650.051
31000 2.0179614 -0.54573939 1.4715551 0.58747508 13285.903
32000 2.0333208 -0.57431851 1.4583303 0.62631039 12894.077
33000 2.0017273 -0.57778326 1.4232825 0.61159622 12595.987
34000 2.0063025 -0.58192939 1.4237101 0.66174764 12316.964
35000 2.0174782 -0.60591394 1.4108976 0.63571024 12063.433
36000 2.025112 -0.64319133 1.3812514 0.62829458 11930.246
37000 2.0431268 -0.64342323 1.3990283 0.68038546 11651.664
38000 2.0064271 -0.63716263 1.3686014 0.72167175 11345.421
39000 2.0284014 -0.67236471 1.3553663 0.68693225 11062.293
40000 2.0181711 -0.6962559 1.3212483 0.76033095 10864.176
41000 1.9908152 -0.66607906 1.3240783 0.90250403 10812.599
42000 2.0007084 -0.68853623 1.311511 0.88096905 10627.922
43000 1.998883 -0.69053805 1.3076844 0.81765345 10469.928
44000 2.0197069 -0.72507021 1.2939693 0.87004916 10194.954
45000 2.0112835 -0.72638581 1.284233 0.99236207 9968.2662
46000 2.0195002 -0.75152677 1.2673061 0.92706763 9751.1162
47000 1.983694 -0.75006702 1.2329714 0.8945741 9652.1453
48000 1.9977505 -0.77207122 1.225019 0.92107083 9647.1543
49000 2.0000901 -0.76254934 1.2368798 1.0320945 9536.2823
50000 2.0150929 -0.80463979 1.2097872 0.99556424 9324.0277
Loop time of 10.7578 on 4 procs for 50000 steps with 3026 atoms
Performance: 2007847.166 tau/day, 4647.794 timesteps/s
98.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.3814 | 2.6878 | 2.9507 | 15.2 | 24.98
Neigh | 2.3959 | 2.6615 | 2.9677 | 16.2 | 24.74
Comm | 2.4113 | 2.9894 | 3.5621 | 29.6 | 27.79
Output | 0.00077024 | 0.00091029 | 0.0012971 | 0.0 | 0.01
Modify | 1.7966 | 1.8497 | 1.907 | 3.8 | 17.19
Other | | 0.5686 | | | 5.29
Nlocal: 756.500 ave 838 max 693 min
Histogram: 2 0 0 0 0 0 0 1 0 1
Nghost: 1282.50 ave 1333 max 1216 min
Histogram: 1 0 0 0 1 0 0 0 1 1
Neighs: 6426.25 ave 7350 max 5786 min
Histogram: 2 0 0 0 0 1 0 0 0 1
Total # of neighbors = 25705
Ave neighs/atom = 8.4947125
Neighbor list builds = 4326
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:10

View File

@ -0,0 +1,175 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.5
Lattice spacing in x,y,z = 1.4142136 1.4142136 1.4142136
region box block 0 60 0 60 -0.5 0.5
create_box 2 box
Created orthogonal box = (0.0000000 0.0000000 -0.70710678) to (84.852814 84.852814 0.70710678)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.001 seconds
change_box all triclinic
Changing box ...
triclinic box = (0.0000000 0.0000000 -0.70710678) to (84.852814 84.852814 0.70710678) with tilt (0.0000000 0.0000000 0.0000000)
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 20.0 20.0 0.0 10.0 units box
region sphere2 sphere 60.0 60.0 0.0 10.0 units box
delete_atoms region sphere1
Deleted 154 atoms, new total = 3446
delete_atoms region sphere2
Deleted 158 atoms, new total = 3288
create_atoms 2 single 20.0 20.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
create_atoms 2 single 60.0 60.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
set type 2 mass 400
Setting atom values ...
2 settings made for mass
set type 1 mass 1
Setting atom values ...
3288 settings made for mass
set type 2 diameter 20
Setting atom values ...
2 settings made for diameter
set type 1 diameter 1
Setting atom values ...
3288 settings made for diameter
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi #multi/old
neigh_modify delay 0 collection/interval 2 1 20
comm_modify mode multi vel yes reduce/multi #multi/old
# colloid potential
pair_style granular
pair_coeff * * hooke 1.0 0.5 tangential linear_history 1.0 0.5 0.1 damping mass_velocity
fix 1 all nph/sphere iso 0.0 1.0 10.0 drag 1.0
fix 2 all enforce2d
fix 3 all deform 1 xy erate 1e-3
#dump 1 all custom 1000 dump.granular id x y z radius
#dump 2 all image 1000 image.*.jpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 1000
timestep 0.005
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 21
ghost atom cutoff = 21
binsize = 1, bins = 85 85 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair granular, perpetual
attributes: half, newton on, size, history
pair build: half/size/multi/newton/tri
stencil: half/multi/2d/tri
bin: multi
Per MPI rank memory allocation (min/avg/max) = 10.29 | 10.29 | 10.29 Mbytes
Step Temp E_pair TotEng Press Volume
0 1.44 0 1.4395623 0.66837658 7200
1000 0.32604952 0 0.32595042 0.17341597 7862.5013
2000 0.12631038 0 0.12627198 0.069126477 8216.6956
3000 0.069351715 0 0.069330635 0.040799593 8344.1931
4000 0.045023755 0 0.04501007 0.029184795 8239.1832
5000 0.032735149 0 0.0327252 0.025640841 7943.5691
6000 0.026205227 0 0.026197262 0.021206924 7617.6672
7000 0.02165475 0 0.021648168 0.018789365 7255.2897
8000 0.018299317 0 0.018293755 0.019272158 6887.3386
9000 0.016283763 0 0.016278813 0.020434892 6524.0274
10000 0.015148918 0 0.015144313 0.021650465 6168.4941
11000 0.014180465 0 0.014176155 0.022320009 5823.98
12000 0.013505744 0 0.013501639 0.023978674 5492.4853
13000 0.013009585 0 0.01300563 0.024391329 5175.7455
14000 0.012494373 0 0.012490576 0.027331543 4874.3212
15000 0.012057669 0 0.012054004 0.030561239 4589.518
16000 0.011510988 0 0.01150749 0.034613772 4321.1694
17000 0.011198594 0 0.01119519 0.042263536 4070.0115
18000 0.010978603 0 0.010975266 0.053637275 3836.0304
19000 0.010768789 0 0.010765516 0.069472547 3619.75
20000 0.0102256 0 0.010222492 0.085332898 3420.2738
21000 0.0089630315 0 0.0089603072 0.11199196 3236.9821
22000 0.006565581 0 0.0065635854 0.14807426 3071.3012
23000 0.0050916998 0 0.0050901522 0.1903446 2923.4162
24000 0.0040345997 0 0.0040333734 0.237983 2792.2658
25000 0.0032995328 0 0.0032985299 0.29120001 2677.7475
26000 0.0024157863 0 0.002415052 0.33851944 2578.4972
27000 0.0020664445 0 0.0020658164 0.37561848 2491.0264
28000 0.0017843883 0 0.0017838459 0.41119961 2412.3871
29000 0.0011813262 0 0.0011809672 0.44749341 2341.7208
30000 0.00063084711 0 0.00063065536 0.4879202 2279.0452
31000 0.00056027405 0 0.00056010376 0.52932126 2224.9456
32000 0.00053304715 0 0.00053288513 0.56822504 2179.1224
33000 0.00052245707 0 0.00052229826 0.60025509 2140.5345
34000 0.00073726189 0 0.0007370378 0.62001489 2106.3045
35000 0.00075804791 0 0.0007578175 0.6359631 2072.525
36000 0.00052579203 0 0.00052563222 0.65678516 2038.1907
37000 0.00036977909 0 0.0003696667 0.68784389 2005.5831
38000 0.00036252798 0 0.00036241779 0.72116044 1977.7441
39000 0.00036254566 0 0.00036243547 0.74720837 1954.9127
40000 0.00036237175 0 0.00036226161 0.76605408 1934.6006
41000 0.00032453104 0 0.00032443239 0.78424188 1914.1939
42000 0.00025394755 0 0.00025387036 0.80529272 1893.064
43000 0.00021067821 0 0.00021061418 0.82962095 1872.365
44000 0.00017927684 0 0.00017922235 0.85522899 1853.531
45000 0.0001464225 0 0.000146378 0.87925998 1837.2423
46000 0.00012922979 0 0.00012919051 0.8986549 1822.9227
47000 0.0001643557 0 0.00016430575 0.91743602 1809.0605
48000 0.00020154753 0 0.00020148627 0.93686779 1794.9227
49000 0.00017742528 0 0.00017737135 0.94988773 1780.3811
50000 0.00015150521 0 0.00015145916 0.97929588 1764.7507
Loop time of 54.9135 on 1 procs for 50000 steps with 3290 atoms
Performance: 393345.914 tau/day, 910.523 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 44.691 | 44.691 | 44.691 | 0.0 | 81.38
Neigh | 0.21653 | 0.21653 | 0.21653 | 0.0 | 0.39
Comm | 0.75388 | 0.75388 | 0.75388 | 0.0 | 1.37
Output | 0.0011999 | 0.0011999 | 0.0011999 | 0.0 | 0.00
Modify | 8.4718 | 8.4718 | 8.4718 | 0.0 | 15.43
Other | | 0.7794 | | | 1.42
Nlocal: 3290.00 ave 3290 max 3290 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 525.000 ave 525 max 525 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 26732.0 ave 26732 max 26732 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 26732
Ave neighs/atom = 8.1252280
Neighbor list builds = 342
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:54

View File

@ -0,0 +1,175 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.5
Lattice spacing in x,y,z = 1.4142136 1.4142136 1.4142136
region box block 0 60 0 60 -0.5 0.5
create_box 2 box
Created orthogonal box = (0.0000000 0.0000000 -0.70710678) to (84.852814 84.852814 0.70710678)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.001 seconds
change_box all triclinic
Changing box ...
triclinic box = (0.0000000 0.0000000 -0.70710678) to (84.852814 84.852814 0.70710678) with tilt (0.0000000 0.0000000 0.0000000)
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 20.0 20.0 0.0 10.0 units box
region sphere2 sphere 60.0 60.0 0.0 10.0 units box
delete_atoms region sphere1
Deleted 154 atoms, new total = 3446
delete_atoms region sphere2
Deleted 158 atoms, new total = 3288
create_atoms 2 single 20.0 20.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
create_atoms 2 single 60.0 60.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
set type 2 mass 400
Setting atom values ...
2 settings made for mass
set type 1 mass 1
Setting atom values ...
3288 settings made for mass
set type 2 diameter 20
Setting atom values ...
2 settings made for diameter
set type 1 diameter 1
Setting atom values ...
3288 settings made for diameter
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi #multi/old
neigh_modify delay 0 collection/interval 2 1 20
comm_modify mode multi vel yes reduce/multi #multi/old
# colloid potential
pair_style granular
pair_coeff * * hooke 1.0 0.5 tangential linear_history 1.0 0.5 0.1 damping mass_velocity
fix 1 all nph/sphere iso 0.0 1.0 10.0 drag 1.0
fix 2 all enforce2d
fix 3 all deform 1 xy erate 1e-3
#dump 1 all custom 1000 dump.granular id x y z radius
#dump 2 all image 1000 image.*.jpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 1000
timestep 0.005
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 21
ghost atom cutoff = 21
binsize = 1, bins = 85 85 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair granular, perpetual
attributes: half, newton on, size, history
pair build: half/size/multi/newton/tri
stencil: half/multi/2d/tri
bin: multi
Per MPI rank memory allocation (min/avg/max) = 10.09 | 10.10 | 10.10 Mbytes
Step Temp E_pair TotEng Press Volume
0 1.44 0 1.4395623 0.66837658 7200
1000 0.32605303 0 0.32595393 0.17341558 7862.5037
2000 0.12631567 0 0.12627728 0.069188881 8216.7174
3000 0.069373812 0 0.069352726 0.040740832 8344.2982
4000 0.045084633 0 0.045070929 0.029328609 8239.3656
5000 0.032681746 0 0.032671813 0.025416741 7943.7831
6000 0.026301239 0 0.026293245 0.021418793 7617.8426
7000 0.021666723 0 0.021660138 0.018961011 7255.9338
8000 0.018141337 0 0.018135823 0.019306113 6887.4963
9000 0.015922309 0 0.015917469 0.020033398 6524.016
10000 0.014744547 0 0.014740066 0.020959503 6168.1945
11000 0.013872852 0 0.013868636 0.021708943 5823.3153
12000 0.013321594 0 0.013317545 0.02332141 5491.4979
13000 0.01269964 0 0.012695779 0.024796428 5174.6263
14000 0.01227055 0 0.012266821 0.027785072 4873.4516
15000 0.012120508 0 0.012116824 0.029656636 4588.8603
16000 0.011612027 0 0.011608498 0.034695109 4320.4674
17000 0.011216697 0 0.011213288 0.042746966 4069.4275
18000 0.010950166 0 0.010946838 0.053528994 3835.5439
19000 0.010887635 0 0.010884325 0.069684492 3619.562
20000 0.010563449 0 0.010560238 0.08654561 3420.2636
21000 0.0092336323 0 0.0092308257 0.11286068 3237.1408
22000 0.006929086 0 0.0069269799 0.15018917 3072.0438
23000 0.0052239156 0 0.0052223277 0.19067193 2924.441
24000 0.0044210081 0 0.0044196644 0.23908686 2793.2426
25000 0.0034916086 0 0.0034905473 0.29112824 2678.7912
26000 0.002549072 0 0.0025482972 0.33567824 2579.3738
27000 0.0020890726 0 0.0020884377 0.37328514 2491.0502
28000 0.001772982 0 0.0017724431 0.41079958 2411.9111
29000 0.001127719 0 0.0011273762 0.44752241 2341.1888
30000 0.00053266563 0 0.00053250373 0.48791815 2278.5611
31000 0.00050278646 0 0.00050263364 0.52896525 2224.5328
32000 0.00051880956 0 0.00051865187 0.56884574 2178.6674
33000 0.00054908167 0 0.00054891477 0.6016387 2140.3696
34000 0.00075213884 0 0.00075191023 0.62070188 2106.6504
35000 0.00081295162 0 0.00081270452 0.63492031 2073.0077
36000 0.00056699821 0 0.00056682587 0.65608409 2038.3251
37000 0.0003540723 0 0.00035396468 0.68803919 2005.497
38000 0.00031139738 0 0.00031130273 0.72103717 1977.7345
39000 0.00034087822 0 0.00034077461 0.74697975 1954.8979
40000 0.00035452426 0 0.0003544165 0.76682035 1934.5695
41000 0.00030882258 0 0.00030872871 0.78390763 1914.3326
42000 0.00025492799 0 0.00025485051 0.80439795 1893.1474
43000 0.00021545017 0 0.00021538468 0.82803644 1872.073
44000 0.00017293257 0 0.00017288 0.85436769 1852.6548
45000 0.00014097725 0 0.0001409344 0.8796181 1836.0087
46000 0.0001139199 0 0.00011388527 0.90006173 1821.7977
47000 0.00012678598 0 0.00012674745 0.90876359 1808.4913
48000 0.00013796773 0 0.00013792579 0.93661523 1793.8082
49000 0.00014723144 0 0.00014718669 0.95869417 1779.1875
50000 0.00013610653 0 0.00013606516 0.97777198 1765.3247
Loop time of 17.7405 on 4 procs for 50000 steps with 3290 atoms
Performance: 1217551.996 tau/day, 2818.407 timesteps/s
100.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 6.6629 | 9.6168 | 12.444 | 76.6 | 54.21
Neigh | 0.049771 | 0.055182 | 0.06133 | 2.0 | 0.31
Comm | 1.7883 | 4.6306 | 7.6179 | 111.5 | 26.10
Output | 0.00085342 | 0.0010606 | 0.0015425 | 0.9 | 0.01
Modify | 2.7244 | 2.895 | 3.0436 | 8.2 | 16.32
Other | | 0.5419 | | | 3.05
Nlocal: 822.500 ave 897 max 779 min
Histogram: 1 1 0 1 0 0 0 0 0 1
Nghost: 190.500 ave 211 max 179 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Neighs: 6665.75 ave 7329 max 6104 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Total # of neighbors = 26663
Ave neighs/atom = 8.1042553
Neighbor list builds = 342
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:17

View File

@ -0,0 +1,175 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.5
Lattice spacing in x,y,z = 1.4142136 1.4142136 1.4142136
region box block 0 60 0 60 -0.5 0.5
create_box 2 box
Created orthogonal box = (0.0000000 0.0000000 -0.70710678) to (84.852814 84.852814 0.70710678)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.001 seconds
change_box all triclinic
Changing box ...
triclinic box = (0.0000000 0.0000000 -0.70710678) to (84.852814 84.852814 0.70710678) with tilt (0.0000000 0.0000000 0.0000000)
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 20.0 20.0 0.0 10.0 units box
region sphere2 sphere 60.0 60.0 0.0 10.0 units box
delete_atoms region sphere1
Deleted 154 atoms, new total = 3446
delete_atoms region sphere2
Deleted 158 atoms, new total = 3288
create_atoms 2 single 20.0 20.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
create_atoms 2 single 60.0 60.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
set type 2 mass 400
Setting atom values ...
2 settings made for mass
set type 1 mass 1
Setting atom values ...
3288 settings made for mass
set type 2 diameter 20
Setting atom values ...
2 settings made for diameter
set type 1 diameter 1
Setting atom values ...
3288 settings made for diameter
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi/old #multi
neigh_modify delay 0 #collection/interval 2 1 20
comm_modify mode multi/old vel yes #reduce/multi
# colloid potential
pair_style granular
pair_coeff * * hooke 1.0 0.5 tangential linear_history 1.0 0.5 0.1 damping mass_velocity
fix 1 all nph/sphere iso 0.0 1.0 10.0 drag 1.0
fix 2 all enforce2d
fix 3 all deform 1 xy erate 1e-3
#dump 1 all custom 1000 dump.granular id x y z radius
#dump 2 all image 1000 image.*.jpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 1000
timestep 0.005
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 21
ghost atom cutoff = 21
binsize = 1, bins = 85 85 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair granular, perpetual
attributes: half, newton on, size, history
pair build: half/size/multi/old/newton/tri
stencil: half/multi/old/2d/tri
bin: standard
Per MPI rank memory allocation (min/avg/max) = 10.38 | 10.38 | 10.38 Mbytes
Step Temp E_pair TotEng Press Volume
0 1.44 0 1.4395623 0.66837658 7200
1000 0.32273428 0 0.32263619 0.17174972 7859.8897
2000 0.12441598 0 0.12437817 0.067078155 8212.9946
3000 0.067389284 0 0.067368801 0.040425551 8336.7112
4000 0.044312733 0 0.044299264 0.028220228 8229.0658
5000 0.032702163 0 0.032692223 0.024302012 7931.1298
6000 0.025856 0 0.025848141 0.021241317 7603.5534
7000 0.021437473 0 0.021430957 0.019285494 7243.5757
8000 0.018129567 0 0.018124057 0.020738727 6877.4816
9000 0.016370159 0 0.016365184 0.020261904 6515.3445
10000 0.01500918 0 0.015004618 0.020551803 6160.4475
11000 0.014156551 0 0.014152248 0.021324815 5815.4665
12000 0.013725406 0 0.013721235 0.021159958 5483.6304
13000 0.013215746 0 0.013211729 0.021685712 5165.4758
14000 0.012398153 0 0.012394384 0.024155434 4862.8657
15000 0.011842796 0 0.011839196 0.028503991 4577.9008
16000 0.011433182 0 0.011429706 0.033564839 4309.8792
17000 0.011166574 0 0.01116318 0.040592677 4058.9964
18000 0.01100067 0 0.010997326 0.04899206 3825.155
19000 0.010224474 0 0.010221366 0.063670337 3607.6577
20000 0.0091360558 0 0.0091332789 0.088230111 3408.5658
21000 0.0077336471 0 0.0077312964 0.11769368 3227.7002
22000 0.0060202357 0 0.0060184059 0.15272492 3064.3986
23000 0.0046705738 0 0.0046691542 0.19450723 2918.0014
24000 0.0040248311 0 0.0040236078 0.24161743 2788.4113
25000 0.0032075267 0 0.0032065518 0.28976925 2674.5604
26000 0.0021358008 0 0.0021351516 0.33635615 2574.9564
27000 0.0016902771 0 0.0016897633 0.37624261 2487.2379
28000 0.0014038216 0 0.0014033949 0.41492061 2409.2461
29000 0.00090262588 0 0.00090235152 0.45392924 2340.0308
30000 0.00049466445 0 0.0004945141 0.49295063 2279.2316
31000 0.00056998139 0 0.00056980814 0.53299532 2226.5683
32000 0.00057327032 0 0.00057309607 0.56856551 2181.7093
33000 0.00044845449 0 0.00044831818 0.59623461 2142.7574
34000 0.00059840346 0 0.00059822157 0.61758978 2107.1254
35000 0.00075311121 0 0.0007528823 0.63756791 2072.7217
36000 0.00053773653 0 0.00053757309 0.66026022 2039.1654
37000 0.00030439696 0 0.00030430444 0.69059127 2007.7901
38000 0.00034435616 0 0.00034425149 0.72166346 1980.7136
39000 0.00039692535 0 0.0003968047 0.7468036 1957.9531
40000 0.0003542502 0 0.00035414252 0.76604173 1937.3834
41000 0.0003094667 0 0.00030937263 0.78323183 1916.7027
42000 0.00027258976 0 0.0002725069 0.80315572 1895.0714
43000 0.00020659987 0 0.00020653707 0.82746098 1873.5408
44000 0.00016023865 0 0.00016018994 0.85418945 1853.8677
45000 0.00016112731 0 0.00016107833 0.87913874 1837.1144
46000 0.00016131366 0 0.00016126463 0.89921653 1822.7355
47000 0.00015754747 0 0.00015749958 0.91653641 1809.0285
48000 0.00017794764 0 0.00017789356 0.93582953 1794.7043
49000 0.00018879338 0 0.000188736 0.95775166 1780.0323
50000 0.00017781117 0 0.00017775712 0.97893641 1765.9442
Loop time of 74.6636 on 1 procs for 50000 steps with 3290 atoms
Performance: 289297.713 tau/day, 669.671 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 56.696 | 56.696 | 56.696 | 0.0 | 75.93
Neigh | 2.2232 | 2.2232 | 2.2232 | 0.0 | 2.98
Comm | 6.1867 | 6.1867 | 6.1867 | 0.0 | 8.29
Output | 0.0012016 | 0.0012016 | 0.0012016 | 0.0 | 0.00
Modify | 8.432 | 8.432 | 8.432 | 0.0 | 11.29
Other | | 1.125 | | | 1.51
Nlocal: 3290.00 ave 3290 max 3290 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 6295.00 ave 6295 max 6295 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 53729.0 ave 53729 max 53729 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 53729
Ave neighs/atom = 16.331003
Neighbor list builds = 348
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:01:14

View File

@ -0,0 +1,175 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Big colloid particles and small LJ particles
units lj
atom_style sphere
dimension 2
lattice sq 0.5
Lattice spacing in x,y,z = 1.4142136 1.4142136 1.4142136
region box block 0 60 0 60 -0.5 0.5
create_box 2 box
Created orthogonal box = (0.0000000 0.0000000 -0.70710678) to (84.852814 84.852814 0.70710678)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 3600 atoms
create_atoms CPU = 0.063 seconds
change_box all triclinic
Changing box ...
triclinic box = (0.0000000 0.0000000 -0.70710678) to (84.852814 84.852814 0.70710678) with tilt (0.0000000 0.0000000 0.0000000)
# remove two spheres of small particles and add large particles in the voids
region sphere1 sphere 20.0 20.0 0.0 10.0 units box
region sphere2 sphere 60.0 60.0 0.0 10.0 units box
delete_atoms region sphere1
Deleted 154 atoms, new total = 3446
delete_atoms region sphere2
Deleted 158 atoms, new total = 3288
create_atoms 2 single 20.0 20.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
create_atoms 2 single 60.0 60.0 0.0 units box
Created 1 atoms
create_atoms CPU = 0.000 seconds
set type 2 mass 400
Setting atom values ...
2 settings made for mass
set type 1 mass 1
Setting atom values ...
3288 settings made for mass
set type 2 diameter 20
Setting atom values ...
2 settings made for diameter
set type 1 diameter 1
Setting atom values ...
3288 settings made for diameter
velocity all create 1.44 87287 loop geom
# multi neighbor and comm for efficiency
neighbor 1 multi/old #multi
neigh_modify delay 0 #collection/interval 2 1 20
comm_modify mode multi/old vel yes #reduce/multi
# colloid potential
pair_style granular
pair_coeff * * hooke 1.0 0.5 tangential linear_history 1.0 0.5 0.1 damping mass_velocity
fix 1 all nph/sphere iso 0.0 1.0 10.0 drag 1.0
fix 2 all enforce2d
fix 3 all deform 1 xy erate 1e-3
#dump 1 all custom 1000 dump.granular id x y z radius
#dump 2 all image 1000 image.*.jpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 2 pad 5 adiam 1 5.0 adiam 2 1.5
#dump 3 all movie 1000 movie.mpg type type # zoom 1.5 center d 0.5 0.5 0.5
#dump_modify 3 pad 5 adiam 1 5.0 adiam 2 1.5
thermo_style custom step temp epair etotal press vol
thermo 1000
timestep 0.005
run 50000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 21
ghost atom cutoff = 21
binsize = 1, bins = 85 85 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair granular, perpetual
attributes: half, newton on, size, history
pair build: half/size/multi/old/newton/tri
stencil: half/multi/old/2d/tri
bin: standard
Per MPI rank memory allocation (min/avg/max) = 10.20 | 10.20 | 10.20 Mbytes
Step Temp E_pair TotEng Press Volume
0 1.44 0 1.4395623 0.66837658 7200
1000 0.32273428 0 0.32263619 0.17174972 7859.8897
2000 0.12441598 0 0.12437817 0.067078155 8212.9946
3000 0.067389284 0 0.067368801 0.040425551 8336.7112
4000 0.044312733 0 0.044299264 0.028220228 8229.0658
5000 0.032702163 0 0.032692223 0.024302012 7931.1298
6000 0.025856 0 0.025848141 0.021241317 7603.5534
7000 0.021437473 0 0.021430957 0.019285494 7243.5757
8000 0.018129567 0 0.018124057 0.020738727 6877.4816
9000 0.01637016 0 0.016365184 0.020261904 6515.3445
10000 0.01500918 0 0.015004618 0.020551803 6160.4475
11000 0.014156553 0 0.01415225 0.021324818 5815.4665
12000 0.013725412 0 0.01372124 0.021159958 5483.6304
13000 0.013215733 0 0.013211716 0.021685624 5165.4758
14000 0.012398179 0 0.012394411 0.024155572 4862.8657
15000 0.01184269 0 0.01183909 0.028504106 4577.901
16000 0.01143291 0 0.011429435 0.033564204 4309.88
17000 0.011166204 0 0.01116281 0.040588854 4058.9972
18000 0.011000875 0 0.010997532 0.048998904 3825.1569
19000 0.010225905 0 0.010222797 0.063669588 3607.6622
20000 0.0091390255 0 0.0091362477 0.088165402 3408.567
21000 0.0077382041 0 0.0077358521 0.11770474 3227.6936
22000 0.00601731 0 0.0060154811 0.15261994 3064.3873
23000 0.0046667591 0 0.0046653407 0.19453819 2917.9782
24000 0.0040425749 0 0.0040413461 0.24145833 2788.3897
25000 0.0031933217 0 0.0031923511 0.28989713 2674.5164
26000 0.0021138997 0 0.0021132571 0.33598673 2574.9312
27000 0.001700508 0 0.0016999912 0.37665013 2487.1626
28000 0.0014434246 0 0.0014429859 0.41572163 2409.327
29000 0.00089885063 0 0.00089857742 0.453431 2340.2313
30000 0.00048556478 0 0.00048541719 0.49176025 2279.2155
31000 0.00058130972 0 0.00058113303 0.53236818 2226.2349
32000 0.00057749847 0 0.00057732294 0.5691506 2181.2738
33000 0.00044719326 0 0.00044705733 0.59696179 2142.571
34000 0.00060924828 0 0.0006090631 0.61735036 2107.1282
35000 0.00077419805 0 0.00077396273 0.63696098 2072.6008
36000 0.00055752003 0 0.00055735057 0.65981842 2038.8242
37000 0.00031402452 0 0.00031392907 0.69018949 2007.3235
38000 0.00034969879 0 0.0003495925 0.72155053 1980.1706
39000 0.00041434197 0 0.00041421603 0.74680715 1957.3838
40000 0.00037229243 0 0.00037217927 0.76581686 1936.8034
41000 0.00031028842 0 0.00031019411 0.78321059 1916.1108
42000 0.00026623668 0 0.00026615575 0.80267329 1894.4649
43000 0.00020543723 0 0.00020537479 0.82714001 1872.7672
44000 0.0001563321 0 0.00015628458 0.85496396 1853.0284
45000 0.00014981713 0 0.00014977159 0.87924842 1836.4755
46000 0.00015641585 0 0.00015636831 0.89896936 1822.1989
47000 0.00016004701 0 0.00015999837 0.91661933 1808.4606
48000 0.00017437702 0 0.00017432402 0.93565475 1794.1258
49000 0.00018645903 0 0.00018640235 0.95733183 1779.4032
50000 0.00018469122 0 0.00018463508 0.96446925 1765.1534
Loop time of 30.1448 on 4 procs for 50000 steps with 3290 atoms
Performance: 716540.413 tau/day, 1658.658 timesteps/s
90.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 8.7565 | 12.704 | 16.036 | 89.8 | 42.14
Neigh | 0.4494 | 0.56436 | 0.66263 | 11.8 | 1.87
Comm | 9.5962 | 12.989 | 17.006 | 90.8 | 43.09
Output | 0.00088467 | 0.0011022 | 0.0015811 | 0.9 | 0.00
Modify | 2.9732 | 3.0944 | 3.2463 | 7.0 | 10.27
Other | | 0.7918 | | | 2.63
Nlocal: 822.500 ave 859 max 785 min
Histogram: 1 0 1 0 0 0 0 0 1 1
Nghost: 3049.75 ave 3089 max 2999 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Neighs: 13440.5 ave 14459 max 11964 min
Histogram: 1 0 0 0 0 1 0 0 1 1
Total # of neighbors = 53762
Ave neighs/atom = 16.341033
Neighbor list builds = 348
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:30

View File

@ -0,0 +1,291 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Shear power-law distributed granular particles
units lj
atom_style sphere
dimension 2
read_data data.powerlaw
Reading data file ...
triclinic box = (9.9514336 9.9514336 0.0000000) to (331.81396 331.81396 1.0000000) with tilt (0.0000000 0.0000000 0.0000000)
1 by 1 by 1 MPI processor grid
reading atoms ...
10000 atoms
reading velocities ...
10000 velocities
read_data CPU = 0.027 seconds
change_box all triclinic
Changing box ...
triclinic box = (9.9514336 9.9514336 0.0000000) to (331.81396 331.81396 1.0000000) with tilt (0.0000000 0.0000000 0.0000000)
# multi neighbor and comm for efficiency
neighbor 1 multi
neigh_modify delay 0 collection/interval 6 1.5 3 10 30 100 200
comm_modify mode multi vel yes reduce/multi
# granular potential
pair_style granular
pair_coeff * * hooke 20.0 0.5 tangential linear_history 1.0 0.5 0.1 damping mass_velocity
# fixes
fix 1 all nve/sphere
fix 2 all enforce2d
fix 3 all deform 1 xy erate 1e-4
# dump 1 all custom 20000 dump.granular id x y z radius
thermo_style custom step temp epair etotal press vol pxy
thermo 1000
timestep 0.005
run 200000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 164.7888
ghost atom cutoff = 164.7888
binsize = 82.3944, bins = 4 4 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair granular, perpetual
attributes: half, newton on, size, history
pair build: half/size/multi/newton/tri
stencil: half/multi/2d/tri
bin: multi
Per MPI rank memory allocation (min/avg/max) = 14.77 | 14.77 | 14.77 Mbytes
Step Temp E_pair TotEng Press Volume Pxy
0 4.9204851e-05 0 4.9199931e-05 0.61204991 103595.49 -0.00083309917
1000 0.00038076464 0 0.00038072657 0.58694623 103595.49 -0.0066712806
2000 0.00027986478 0 0.00027983679 0.5845274 103595.49 -0.008880933
3000 0.00022105227 0 0.00022103017 0.58295464 103595.49 -0.011327442
4000 0.00020888366 0 0.00020886277 0.5826542 103595.49 -0.014147424
5000 0.00019912663 0 0.00019910672 0.58175837 103595.49 -0.015685634
6000 0.0001989441 0 0.00019892421 0.58170841 103595.49 -0.017379973
7000 0.00019307783 0 0.00019305852 0.58133913 103595.49 -0.019556709
8000 0.00018132444 0 0.00018130631 0.58134077 103595.49 -0.021609399
9000 0.00017909088 0 0.00017907297 0.58117179 103595.49 -0.023603514
10000 0.00018391928 0 0.00018390089 0.58070675 103595.49 -0.026026784
11000 0.00018985439 0 0.00018983541 0.58006086 103595.49 -0.028574238
12000 0.00018903569 0 0.00018901678 0.5794232 103595.49 -0.031151884
13000 0.00019070382 0 0.00019068475 0.57890243 103595.49 -0.033469404
14000 0.00019371625 0 0.00019369688 0.5787389 103595.49 -0.035646526
15000 0.00019833475 0 0.00019831492 0.57883166 103595.49 -0.037709788
16000 0.0002011729 0 0.00020115278 0.57875606 103595.49 -0.039452453
17000 0.00020285197 0 0.00020283169 0.5786311 103595.49 -0.040960671
18000 0.00020319174 0 0.00020317142 0.57842387 103595.49 -0.042257072
19000 0.00020290253 0 0.00020288224 0.57795042 103595.49 -0.043364149
20000 0.00020509848 0 0.00020507797 0.5771478 103595.49 -0.044392259
21000 0.00021803258 0 0.00021801078 0.57569003 103595.49 -0.044749043
22000 0.00020751217 0 0.00020749141 0.57477071 103595.49 -0.045719593
23000 0.00022053275 0 0.0002205107 0.57409228 103595.49 -0.047332146
24000 0.00022689646 0 0.00022687377 0.57325004 103595.49 -0.04871759
25000 0.00025224804 0 0.00025222282 0.57283712 103595.49 -0.050254871
26000 0.00025343198 0 0.00025340664 0.57238659 103595.49 -0.051604284
27000 0.00026689801 0 0.00026687132 0.57221042 103595.49 -0.052915257
28000 0.00027867954 0 0.00027865167 0.57197974 103595.49 -0.053832129
29000 0.00028697929 0 0.00028695059 0.57177264 103595.49 -0.054693121
30000 0.00028857612 0 0.00028854727 0.57145453 103595.49 -0.055559611
31000 0.00029228405 0 0.00029225482 0.5711044 103595.49 -0.056492699
32000 0.00029648627 0 0.00029645663 0.57060211 103595.49 -0.05729896
33000 0.00030524162 0 0.00030521109 0.57002519 103595.49 -0.058201322
34000 0.00031725644 0 0.00031722472 0.56920654 103595.49 -0.059128438
35000 0.00032273791 0 0.00032270564 0.56844677 103595.49 -0.060009671
36000 0.00033013013 0 0.00033009712 0.56795943 103595.49 -0.061074282
37000 0.00033942153 0 0.00033938759 0.56749208 103595.49 -0.062058531
38000 0.00035141528 0 0.00035138014 0.56682741 103595.49 -0.062953956
39000 0.00036126777 0 0.00036123164 0.56655193 103595.49 -0.063757684
40000 0.00037765934 0 0.00037762157 0.5661991 103595.49 -0.064535541
41000 0.00040834365 0 0.00040830281 0.56554085 103595.49 -0.064688281
42000 0.00042857233 0 0.00042852948 0.56474014 103595.49 -0.065262664
43000 0.00042692021 0 0.00042687752 0.56362013 103595.49 -0.065276794
44000 0.00040298912 0 0.00040294882 0.5631005 103595.49 -0.065626396
45000 0.00040947381 0 0.00040943286 0.56291946 103595.49 -0.066167734
46000 0.00040202686 0 0.00040198666 0.56273846 103595.49 -0.066543782
47000 0.00038914356 0 0.00038910465 0.56265937 103595.49 -0.067359923
48000 0.00038429737 0 0.00038425894 0.56274908 103595.49 -0.068231096
49000 0.00036912968 0 0.00036909277 0.56261623 103595.49 -0.068791569
50000 0.00035203094 0 0.00035199574 0.56257856 103595.49 -0.069298217
51000 0.00034403223 0 0.00034399783 0.56236537 103595.49 -0.070273225
52000 0.00034132431 0 0.00034129018 0.56220555 103595.49 -0.071740344
53000 0.000335692 0 0.00033565843 0.56194913 103595.49 -0.072834415
54000 0.00033048196 0 0.00033044891 0.56231491 103595.49 -0.073996938
55000 0.00032751145 0 0.00032747869 0.56225025 103595.49 -0.07506664
56000 0.00032696951 0 0.00032693682 0.56285506 103595.49 -0.076445354
57000 0.00033158698 0 0.00033155382 0.56354729 103595.49 -0.077682996
58000 0.00034300009 0 0.00034296579 0.56416964 103595.49 -0.078836604
59000 0.00034340257 0 0.00034336823 0.56490867 103595.49 -0.079658197
60000 0.00034736137 0 0.00034732663 0.56519398 103595.49 -0.080570223
61000 0.00034984523 0 0.00034981025 0.5651693 103595.49 -0.081115325
62000 0.00034995431 0 0.00034991932 0.56534549 103595.49 -0.081486038
63000 0.00033854269 0 0.00033850883 0.56582558 103595.49 -0.081892374
64000 0.00032621515 0 0.00032618253 0.5658388 103595.49 -0.082786608
65000 0.00031773942 0 0.00031770764 0.56576287 103595.49 -0.083706189
66000 0.00031772736 0 0.00031769558 0.56548117 103595.49 -0.084236463
67000 0.0003148631 0 0.00031483161 0.56483795 103595.49 -0.084506082
68000 0.00030359752 0 0.00030356716 0.56446443 103595.49 -0.084985509
69000 0.00030395128 0 0.00030392088 0.56437593 103595.49 -0.085548157
70000 0.00032811658 0 0.00032808376 0.56372411 103595.49 -0.085304154
71000 0.00035494531 0 0.00035490981 0.56326137 103595.49 -0.085047806
72000 0.0003253841 0 0.00032535156 0.56244462 103595.49 -0.085693663
73000 0.00032328895 0 0.00032325662 0.5629287 103595.49 -0.086119464
74000 0.00032650113 0 0.00032646848 0.56306166 103595.49 -0.087182721
75000 0.00034303222 0 0.00034299792 0.56219559 103595.49 -0.086604025
76000 0.00033786129 0 0.0003378275 0.56188071 103595.49 -0.086852177
77000 0.00033559735 0 0.00033556379 0.5619155 103595.49 -0.08689764
78000 0.00032579863 0 0.00032576605 0.56177059 103595.49 -0.087109469
79000 0.00031610815 0 0.00031607654 0.56160391 103595.49 -0.087250861
80000 0.00031246546 0 0.00031243422 0.56181676 103595.49 -0.087117648
81000 0.00029392131 0 0.00029389192 0.56205441 103595.49 -0.087601617
82000 0.00029624453 0 0.00029621491 0.56285229 103595.49 -0.08824145
83000 0.00030538821 0 0.00030535767 0.5627754 103595.49 -0.088318188
84000 0.00029587833 0 0.00029584874 0.56267246 103595.49 -0.08930338
85000 0.00030551128 0 0.00030548073 0.56251282 103595.49 -0.0897211
86000 0.00030000969 0 0.00029997969 0.56249642 103595.49 -0.089920789
87000 0.00030211667 0 0.00030208646 0.56256648 103595.49 -0.090315024
88000 0.00030524995 0 0.00030521943 0.5623007 103595.49 -0.090706456
89000 0.00031961257 0 0.00031958061 0.56210244 103595.49 -0.090852204
90000 0.0003195337 0 0.00031950175 0.56207472 103595.49 -0.090879606
91000 0.00033860446 0 0.0003385706 0.56197196 103595.49 -0.090891252
92000 0.0003327551 0 0.00033272183 0.56172473 103595.49 -0.090725694
93000 0.00032983619 0 0.00032980321 0.5619443 103595.49 -0.090626404
94000 0.00034024354 0 0.00034020952 0.56150371 103595.49 -0.090769983
95000 0.00033201405 0 0.00033198084 0.56145998 103595.49 -0.09102312
96000 0.00032851608 0 0.00032848323 0.56201045 103595.49 -0.09152522
97000 0.0003353172 0 0.00033528367 0.56256203 103595.49 -0.092443634
98000 0.00033453146 0 0.00033449801 0.5632537 103595.49 -0.093069693
99000 0.00034432742 0 0.00034429299 0.56355465 103595.49 -0.093332298
100000 0.00035299312 0 0.00035295782 0.56420115 103595.49 -0.093871701
101000 0.00042149444 0 0.00042145229 0.56424332 103595.49 -0.094001873
102000 0.0004580706 0 0.0004580248 0.56378535 103595.49 -0.093786943
103000 0.00046113464 0 0.00046108853 0.56428549 103595.49 -0.093463429
104000 0.00047583409 0 0.00047578651 0.5645355 103595.49 -0.093225615
105000 0.00048367276 0 0.00048362439 0.56469488 103595.49 -0.092935582
106000 0.00046931008 0 0.00046926315 0.56464923 103595.49 -0.09282958
107000 0.00046460766 0 0.00046456119 0.56502528 103595.49 -0.093077749
108000 0.00046398187 0 0.00046393547 0.56532911 103595.49 -0.09321949
109000 0.00047530523 0 0.0004752577 0.56561281 103595.49 -0.093217991
110000 0.00048531886 0 0.00048527033 0.56549262 103595.49 -0.092956034
111000 0.00049659003 0 0.00049654038 0.56507505 103595.49 -0.092554122
112000 0.00050113619 0 0.00050108607 0.56528891 103595.49 -0.092227508
113000 0.0005138896 0 0.00051383821 0.56550655 103595.49 -0.092096556
114000 0.00052560295 0 0.00052555039 0.56567551 103595.49 -0.09181586
115000 0.00054349317 0 0.00054343882 0.56530917 103595.49 -0.090961623
116000 0.00056022902 0 0.00056017299 0.56482302 103595.49 -0.090810658
117000 0.00055876064 0 0.00055870476 0.56488791 103595.49 -0.090329656
118000 0.00056191427 0 0.00056185808 0.56461166 103595.49 -0.090161067
119000 0.0005488829 0 0.00054882801 0.56437975 103595.49 -0.090328459
120000 0.00054084712 0 0.00054079303 0.564481 103595.49 -0.090602791
121000 0.00053717105 0 0.00053711733 0.56481743 103595.49 -0.090309102
122000 0.00053834163 0 0.00053828779 0.56385259 103595.49 -0.090433254
123000 0.00053319394 0 0.00053314062 0.56335613 103595.49 -0.090723928
124000 0.00053127439 0 0.00053122127 0.5631684 103595.49 -0.091178253
125000 0.00053624623 0 0.00053619261 0.56387166 103595.49 -0.091701174
126000 0.0005253773 0 0.00052532476 0.5639006 103595.49 -0.092033098
127000 0.00052459276 0 0.0005245403 0.56361298 103595.49 -0.092219098
128000 0.00054030806 0 0.00054025403 0.56307203 103595.49 -0.092196938
129000 0.00055474894 0 0.00055469346 0.5622815 103595.49 -0.09178309
130000 0.00057391115 0 0.00057385376 0.56244981 103595.49 -0.09170211
131000 0.00058650769 0 0.00058644904 0.56195859 103595.49 -0.090649841
132000 0.00058529163 0 0.0005852331 0.56162943 103595.49 -0.090167101
133000 0.00062544817 0 0.00062538563 0.5594761 103595.49 -0.088989624
134000 0.00063457749 0 0.00063451403 0.55917757 103595.49 -0.089702278
135000 0.00065371789 0 0.00065365252 0.55885043 103595.49 -0.090030252
136000 0.00070050714 0 0.00070043709 0.55854751 103595.49 -0.08960124
137000 0.0006750775 0 0.00067501 0.55809563 103595.49 -0.090252473
138000 0.00068827043 0 0.0006882016 0.55806674 103595.49 -0.090238994
139000 0.00069748073 0 0.00069741098 0.55734587 103595.49 -0.090118549
140000 0.00071065284 0 0.00071058177 0.55711669 103595.49 -0.090336074
141000 0.00070994204 0 0.00070987104 0.55638115 103595.49 -0.089917062
142000 0.00071514386 0 0.00071507235 0.55614391 103595.49 -0.090392071
143000 0.00071334667 0 0.00071327533 0.55640687 103595.49 -0.091256718
144000 0.00069553102 0 0.00069546147 0.55705702 103595.49 -0.091761396
145000 0.00068849503 0 0.00068842618 0.55692035 103595.49 -0.091895738
146000 0.00068407816 0 0.00068400975 0.55660026 103595.49 -0.092191588
147000 0.00069521557 0 0.00069514605 0.55556456 103595.49 -0.092354739
148000 0.00068349281 0 0.00068342446 0.55537498 103595.49 -0.092914636
149000 0.00067959644 0 0.00067952848 0.55537695 103595.49 -0.093738463
150000 0.00067100566 0 0.00067093856 0.55544851 103595.49 -0.094104003
151000 0.00068044722 0 0.00068037917 0.5554655 103595.49 -0.094943239
152000 0.00068109012 0 0.00068102201 0.55585405 103595.49 -0.095355111
153000 0.00068666181 0 0.00068659314 0.55501583 103595.49 -0.095234652
154000 0.00068283406 0 0.00068276578 0.55644996 103595.49 -0.095902623
155000 0.00069836346 0 0.00069829363 0.55747472 103595.49 -0.096978444
156000 0.00072807264 0 0.00072799984 0.55807332 103595.49 -0.097415305
157000 0.00077300609 0 0.00077292879 0.55871196 103595.49 -0.098034508
158000 0.00081631408 0 0.00081623245 0.558479 103595.49 -0.09825722
159000 0.00079291984 0 0.00079284054 0.55784788 103595.49 -0.097758094
160000 0.0008203256 0 0.00082024357 0.55700259 103595.49 -0.097519328
161000 0.00081471235 0 0.00081463087 0.556622 103595.49 -0.097787992
162000 0.00080692462 0 0.00080684393 0.5566795 103595.49 -0.097210216
163000 0.00081149678 0 0.00081141564 0.55596697 103595.49 -0.097517476
164000 0.00081577795 0 0.00081569637 0.55569684 103595.49 -0.096908869
165000 0.00084604988 0 0.00084596528 0.55492052 103595.49 -0.095481627
166000 0.00082198923 0 0.00082190703 0.5552628 103595.49 -0.09477531
167000 0.00084903108 0 0.00084894618 0.55477991 103595.49 -0.094758799
168000 0.00081613582 0 0.00081605421 0.55508416 103595.49 -0.094804088
169000 0.00083341061 0 0.00083332727 0.55476794 103595.49 -0.094519882
170000 0.00077835092 0 0.00077827308 0.55516626 103595.49 -0.094843673
171000 0.00074843733 0 0.00074836249 0.55417469 103595.49 -0.094731356
172000 0.0007425125 0 0.00074243825 0.55431854 103595.49 -0.095174333
173000 0.00074144093 0 0.00074136678 0.55429464 103595.49 -0.094982598
174000 0.00072375323 0 0.00072368086 0.55421045 103595.49 -0.09489531
175000 0.0007270779 0 0.00072700519 0.55413607 103595.49 -0.094197685
176000 0.00071114682 0 0.00071107571 0.55342226 103595.49 -0.093083865
177000 0.00069325125 0 0.00069318193 0.55441386 103595.49 -0.093289572
178000 0.00067686202 0 0.00067679434 0.55504892 103595.49 -0.093512587
179000 0.00068326039 0 0.00068319206 0.55519365 103595.49 -0.093974329
180000 0.00075070045 0 0.00075062538 0.55415541 103595.49 -0.09327459
181000 0.00077670344 0 0.00077662577 0.55328725 103595.49 -0.092373689
182000 0.00077422781 0 0.00077415038 0.553131 103595.49 -0.092353979
183000 0.00080250542 0 0.00080242517 0.5519122 103595.49 -0.091897169
184000 0.00081235214 0 0.00081227091 0.55172769 103595.49 -0.091906209
185000 0.00078879443 0 0.00078871555 0.55145488 103595.49 -0.091198506
186000 0.00078497746 0 0.00078489896 0.55202944 103595.49 -0.091674987
187000 0.00079483049 0 0.000794751 0.55278073 103595.49 -0.092508295
188000 0.00079056756 0 0.0007904885 0.55362903 103595.49 -0.092801369
189000 0.00079162262 0 0.00079154346 0.55429061 103595.49 -0.092964781
190000 0.00078121133 0 0.00078113321 0.55386716 103595.49 -0.092689851
191000 0.00076574893 0 0.00076567235 0.5546533 103595.49 -0.093414672
192000 0.00076215201 0 0.0007620758 0.55503049 103595.49 -0.093986391
193000 0.00075652635 0 0.0007564507 0.55477696 103595.49 -0.094417347
194000 0.00075725781 0 0.00075718208 0.55457687 103595.49 -0.094241721
195000 0.0007434693 0 0.00074339496 0.55471575 103595.49 -0.094102015
196000 0.00073792493 0 0.00073785114 0.55463671 103595.49 -0.094452279
197000 0.00074673445 0 0.00074665978 0.55459327 103595.49 -0.09463863
198000 0.00072734835 0 0.00072727561 0.55514628 103595.49 -0.094622434
199000 0.00071846919 0 0.00071839734 0.55501969 103595.49 -0.094414887
200000 0.00072384651 0 0.00072377412 0.55533335 103595.49 -0.094159469
Loop time of 443.321 on 1 procs for 200000 steps with 10000 atoms
Performance: 194892.839 tau/day, 451.141 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 362.28 | 362.28 | 362.28 | 0.0 | 81.72
Neigh | 1.737 | 1.737 | 1.737 | 0.0 | 0.39
Comm | 5.0082 | 5.0082 | 5.0082 | 0.0 | 1.13
Output | 0.01774 | 0.01774 | 0.01774 | 0.0 | 0.00
Modify | 64.992 | 64.992 | 64.992 | 0.0 | 14.66
Other | | 9.286 | | | 2.09
Nlocal: 10000.0 ave 10000 max 10000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 487.000 ave 487 max 487 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 34427.0 ave 34427 max 34427 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 34427
Ave neighs/atom = 3.4427000
Neighbor list builds = 244
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:07:23

View File

@ -0,0 +1,291 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:97)
using 1 OpenMP thread(s) per MPI task
# Shear power-law distributed granular particles
units lj
atom_style sphere
dimension 2
read_data data.powerlaw
Reading data file ...
triclinic box = (9.9514336 9.9514336 0.0000000) to (331.81396 331.81396 1.0000000) with tilt (0.0000000 0.0000000 0.0000000)
2 by 2 by 1 MPI processor grid
reading atoms ...
10000 atoms
reading velocities ...
10000 velocities
read_data CPU = 0.114 seconds
change_box all triclinic
Changing box ...
triclinic box = (9.9514336 9.9514336 0.0000000) to (331.81396 331.81396 1.0000000) with tilt (0.0000000 0.0000000 0.0000000)
# multi neighbor and comm for efficiency
neighbor 1 multi
neigh_modify delay 0 collection/interval 6 1.5 3 10 30 100 200
comm_modify mode multi vel yes reduce/multi
# granular potential
pair_style granular
pair_coeff * * hooke 20.0 0.5 tangential linear_history 1.0 0.5 0.1 damping mass_velocity
# fixes
fix 1 all nve/sphere
fix 2 all enforce2d
fix 3 all deform 1 xy erate 1e-4
# dump 1 all custom 20000 dump.granular id x y z radius
thermo_style custom step temp epair etotal press vol pxy
thermo 1000
timestep 0.005
run 200000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 164.7888
ghost atom cutoff = 164.7888
binsize = 82.3944, bins = 4 4 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair granular, perpetual
attributes: half, newton on, size, history
pair build: half/size/multi/newton/tri
stencil: half/multi/2d/tri
bin: multi
Per MPI rank memory allocation (min/avg/max) = 12.65 | 12.67 | 12.68 Mbytes
Step Temp E_pair TotEng Press Volume Pxy
0 4.9204851e-05 0 4.9199931e-05 0.61204991 103595.49 -0.00083309917
1000 0.00038076464 0 0.00038072657 0.58694623 103595.49 -0.0066712806
2000 0.00027986478 0 0.00027983679 0.5845274 103595.49 -0.008880933
3000 0.00022105227 0 0.00022103017 0.58295464 103595.49 -0.011327442
4000 0.00020888366 0 0.00020886277 0.5826542 103595.49 -0.014147424
5000 0.00019912663 0 0.00019910672 0.58175837 103595.49 -0.015685634
6000 0.0001989441 0 0.00019892421 0.58170841 103595.49 -0.017379973
7000 0.00019307783 0 0.00019305852 0.58133913 103595.49 -0.019556709
8000 0.00018132444 0 0.00018130631 0.58134077 103595.49 -0.021609399
9000 0.00017909088 0 0.00017907297 0.58117179 103595.49 -0.023603514
10000 0.00018391928 0 0.00018390089 0.58070675 103595.49 -0.026026784
11000 0.00018985439 0 0.00018983541 0.58006086 103595.49 -0.028574238
12000 0.00018903569 0 0.00018901678 0.5794232 103595.49 -0.031151884
13000 0.00019070382 0 0.00019068475 0.57890243 103595.49 -0.033469404
14000 0.00019371625 0 0.00019369688 0.5787389 103595.49 -0.035646526
15000 0.00019833475 0 0.00019831492 0.57883166 103595.49 -0.037709788
16000 0.0002011729 0 0.00020115278 0.57875606 103595.49 -0.039452453
17000 0.00020285197 0 0.00020283168 0.5786311 103595.49 -0.040960671
18000 0.00020319173 0 0.00020317141 0.57842387 103595.49 -0.042257076
19000 0.00020290253 0 0.00020288224 0.57795043 103595.49 -0.043364149
20000 0.00020509847 0 0.00020507796 0.57714779 103595.49 -0.04439226
21000 0.0002180326 0 0.0002180108 0.57569003 103595.49 -0.044749042
22000 0.00020751218 0 0.00020749143 0.57477071 103595.49 -0.045719595
23000 0.0002205328 0 0.00022051075 0.57409228 103595.49 -0.047332145
24000 0.00022689643 0 0.00022687374 0.57325004 103595.49 -0.048717593
25000 0.00025224797 0 0.00025222275 0.57283728 103595.49 -0.050255013
26000 0.00025343192 0 0.00025340657 0.5723866 103595.49 -0.051604294
27000 0.0002668981 0 0.00026687141 0.57221051 103595.49 -0.052915314
28000 0.00027867942 0 0.00027865155 0.57197889 103595.49 -0.053831415
29000 0.00028697868 0 0.00028694998 0.57177287 103595.49 -0.054693262
30000 0.00028857623 0 0.00028854737 0.5714545 103595.49 -0.055559583
31000 0.00029228526 0 0.00029225603 0.57110328 103595.49 -0.056491646
32000 0.00029648479 0 0.00029645514 0.57060242 103595.49 -0.057299367
33000 0.00030524226 0 0.00030521174 0.57003089 103595.49 -0.058207205
34000 0.00031725278 0 0.00031722106 0.56920179 103595.49 -0.059123522
35000 0.00032273715 0 0.00032270488 0.56844806 103595.49 -0.06001074
36000 0.00033013214 0 0.00033009912 0.56795631 103595.49 -0.06107304
37000 0.00033942364 0 0.00033938969 0.56749308 103595.49 -0.062060209
38000 0.00035140856 0 0.00035137342 0.56682754 103595.49 -0.062954063
39000 0.00036125739 0 0.00036122126 0.56654839 103595.49 -0.063755554
40000 0.00037765404 0 0.00037761628 0.56619876 103595.49 -0.064535888
41000 0.00040833154 0 0.00040829071 0.56554179 103595.49 -0.064688901
42000 0.0004285629 0 0.00042852004 0.56474124 103595.49 -0.06526276
43000 0.00042691211 0 0.00042686942 0.56362219 103595.49 -0.065275494
44000 0.00040296803 0 0.00040292773 0.56310053 103595.49 -0.065625772
45000 0.00040933842 0 0.00040929749 0.56291338 103595.49 -0.066164396
46000 0.00040202229 0 0.00040198209 0.56273845 103595.49 -0.066541045
47000 0.00038914157 0 0.00038910266 0.562658 103595.49 -0.067357923
48000 0.00038428678 0 0.00038424835 0.5627468 103595.49 -0.068230972
49000 0.00036912501 0 0.0003690881 0.56261857 103595.49 -0.068794933
50000 0.00035203987 0 0.00035200467 0.56258099 103595.49 -0.069290362
51000 0.00034400774 0 0.00034397334 0.56237066 103595.49 -0.070274303
52000 0.00034126666 0 0.00034123253 0.56221436 103595.49 -0.071744779
53000 0.00033563205 0 0.00033559849 0.56195218 103595.49 -0.072836324
54000 0.0003304406 0 0.00033040756 0.5623187 103595.49 -0.073999087
55000 0.00032742828 0 0.00032739553 0.56225383 103595.49 -0.075066978
56000 0.00032696921 0 0.00032693651 0.56285643 103595.49 -0.076445225
57000 0.0003316388 0 0.00033160564 0.56354513 103595.49 -0.077683955
58000 0.00034325202 0 0.0003432177 0.56416895 103595.49 -0.078839054
59000 0.0003433584 0 0.00034332406 0.56490343 103595.49 -0.079658776
60000 0.00034732721 0 0.00034729247 0.5651932 103595.49 -0.080573609
61000 0.00034978913 0 0.00034975415 0.56517379 103595.49 -0.081109788
62000 0.00034995232 0 0.00034991733 0.56534537 103595.49 -0.081491908
63000 0.00033854315 0 0.00033850929 0.56582246 103595.49 -0.081894953
64000 0.0003260452 0 0.00032601259 0.56583259 103595.49 -0.082790811
65000 0.00031763096 0 0.0003175992 0.56576947 103595.49 -0.083707224
66000 0.00031761371 0 0.00031758194 0.56548785 103595.49 -0.084243042
67000 0.00031503681 0 0.0003150053 0.56483862 103595.49 -0.08451056
68000 0.0003036386 0 0.00030360824 0.56444755 103595.49 -0.084967521
69000 0.00030398979 0 0.00030395939 0.56436362 103595.49 -0.085541879
70000 0.0003281569 0 0.00032812409 0.56372598 103595.49 -0.085287975
71000 0.00035614631 0 0.0003561107 0.56322133 103595.49 -0.084970215
72000 0.00032709207 0 0.00032705936 0.56231837 103595.49 -0.085540239
73000 0.00032545048 0 0.00032541793 0.56278947 103595.49 -0.085940822
74000 0.00033285331 0 0.00033282002 0.56288405 103595.49 -0.08695227
75000 0.00034622589 0 0.00034619127 0.56198219 103595.49 -0.086349357
76000 0.00033654825 0 0.0003365146 0.56183659 103595.49 -0.086892729
77000 0.00033550364 0 0.00033547009 0.56197292 103595.49 -0.087018641
78000 0.00032680247 0 0.00032676979 0.56183307 103595.49 -0.087097072
79000 0.00031624495 0 0.00031621333 0.56161689 103595.49 -0.087358849
80000 0.0003124879 0 0.00031245665 0.5618608 103595.49 -0.087165611
81000 0.00029451552 0 0.00029448606 0.56211081 103595.49 -0.087652479
82000 0.00029588468 0 0.00029585509 0.5628096 103595.49 -0.08832193
83000 0.00030483225 0 0.00030480177 0.56261673 103595.49 -0.088586937
84000 0.00029556003 0 0.00029553047 0.56272654 103595.49 -0.089434209
85000 0.00030506369 0 0.00030503319 0.5627918 103595.49 -0.089830152
86000 0.00030015302 0 0.00030012301 0.56240656 103595.49 -0.090100219
87000 0.00030322942 0 0.0003031991 0.56243997 103595.49 -0.090327187
88000 0.00030569181 0 0.00030566124 0.56236256 103595.49 -0.090734148
89000 0.00031220625 0 0.00031217503 0.5621542 103595.49 -0.090898044
90000 0.00032214966 0 0.00032211744 0.56209534 103595.49 -0.090909986
91000 0.00033884101 0 0.00033880712 0.56191673 103595.49 -0.090818046
92000 0.00033260559 0 0.00033257233 0.56172194 103595.49 -0.090647169
93000 0.00032732547 0 0.00032729274 0.5619652 103595.49 -0.090575176
94000 0.00033817734 0 0.00033814352 0.56155436 103595.49 -0.090700379
95000 0.00033009649 0 0.00033006348 0.56147407 103595.49 -0.090940641
96000 0.00032882782 0 0.00032879494 0.56191577 103595.49 -0.091469188
97000 0.00032856078 0 0.00032852793 0.56271585 103595.49 -0.092256803
98000 0.00033030749 0 0.00033027446 0.56340097 103595.49 -0.093188128
99000 0.00033611507 0 0.00033608146 0.56375754 103595.49 -0.093539699
100000 0.00034990568 0 0.00034987069 0.56450225 103595.49 -0.093951624
101000 0.00044441478 0 0.00044437034 0.56437908 103595.49 -0.094161976
102000 0.00045403284 0 0.00045398743 0.56433013 103595.49 -0.093900071
103000 0.00045412317 0 0.00045407776 0.56468095 103595.49 -0.093670567
104000 0.00046494637 0 0.00046489988 0.56478442 103595.49 -0.093397211
105000 0.00047962271 0 0.00047957475 0.56482329 103595.49 -0.093141318
106000 0.00046840864 0 0.0004683618 0.56494359 103595.49 -0.092994704
107000 0.00046432422 0 0.00046427779 0.56543377 103595.49 -0.093135897
108000 0.0004655443 0 0.00046549774 0.5656898 103595.49 -0.093383926
109000 0.0004863785 0 0.00048632986 0.5657434 103595.49 -0.093328929
110000 0.00048804324 0 0.00048799443 0.5656147 103595.49 -0.09302382
111000 0.00050352097 0 0.00050347062 0.56529279 103595.49 -0.092461373
112000 0.00050474509 0 0.00050469461 0.56537494 103595.49 -0.092212501
113000 0.0005125299 0 0.00051247865 0.56547326 103595.49 -0.092304578
114000 0.00052700168 0 0.00052694898 0.56568076 103595.49 -0.092013613
115000 0.00054217865 0 0.00054212444 0.56526328 103595.49 -0.091011537
116000 0.00055122699 0 0.00055117186 0.56489606 103595.49 -0.090688925
117000 0.00055802701 0 0.00055797121 0.56458767 103595.49 -0.090385903
118000 0.00055416633 0 0.00055411091 0.56433528 103595.49 -0.090454192
119000 0.00055519395 0 0.00055513843 0.56411926 103595.49 -0.090495063
120000 0.0005535194 0 0.00055346405 0.56424847 103595.49 -0.090915789
121000 0.00054781097 0 0.00054775619 0.56443756 103595.49 -0.090687173
122000 0.00054528815 0 0.00054523362 0.56401103 103595.49 -0.090443168
123000 0.0005456223 0 0.00054556773 0.56376875 103595.49 -0.090277114
124000 0.00054080131 0 0.00054074723 0.563306 103595.49 -0.091297668
125000 0.00054597 0 0.0005459154 0.56387718 103595.49 -0.091522394
126000 0.000544669 0 0.00054461453 0.56318185 103595.49 -0.091100523
127000 0.00054592361 0 0.00054586902 0.56328758 103595.49 -0.091299714
128000 0.00056246325 0 0.000562407 0.56296852 103595.49 -0.091491356
129000 0.00057655488 0 0.00057649723 0.56242057 103595.49 -0.091474584
130000 0.00060363901 0 0.00060357864 0.56182729 103595.49 -0.091367782
131000 0.00060590757 0 0.00060584698 0.56115572 103595.49 -0.090594163
132000 0.00061689139 0 0.0006168297 0.56029248 103595.49 -0.089857939
133000 0.00063288773 0 0.00063282444 0.55971427 103595.49 -0.08954619
134000 0.00064153654 0 0.00064147238 0.55929877 103595.49 -0.089860563
135000 0.00065473169 0 0.00065466622 0.5590797 103595.49 -0.089932375
136000 0.0006814182 0 0.00068135006 0.55797116 103595.49 -0.08929097
137000 0.00068344911 0 0.00068338077 0.55796657 103595.49 -0.089644888
138000 0.00071510067 0 0.00071502916 0.55752379 103595.49 -0.089734088
139000 0.00074772787 0 0.0007476531 0.55740054 103595.49 -0.089968295
140000 0.00072706311 0 0.0007269904 0.55659113 103595.49 -0.090370844
141000 0.0007179286 0 0.00071785681 0.55659012 103595.49 -0.089976688
142000 0.00072587657 0 0.00072580399 0.55589037 103595.49 -0.090532153
143000 0.00074470967 0 0.0007446352 0.55553128 103595.49 -0.091019969
144000 0.00071737422 0 0.00071730248 0.55555994 103595.49 -0.090926005
145000 0.00070363824 0 0.00070356787 0.55548936 103595.49 -0.0912353
146000 0.00069604487 0 0.00069597527 0.55540516 103595.49 -0.091656715
147000 0.00070047196 0 0.00070040191 0.55466746 103595.49 -0.092101291
148000 0.00069764904 0 0.00069757927 0.55460283 103595.49 -0.092334573
149000 0.00068884707 0 0.00068877819 0.55462796 103595.49 -0.0928736
150000 0.00067704593 0 0.00067697823 0.55520015 103595.49 -0.093512131
151000 0.00067702275 0 0.00067695505 0.55530068 103595.49 -0.094127311
152000 0.000690717 0 0.00069064792 0.55432538 103595.49 -0.094248615
153000 0.00067758953 0 0.00067752177 0.55460446 103595.49 -0.094839924
154000 0.00067748542 0 0.00067741767 0.55532529 103595.49 -0.095832411
155000 0.00068723442 0 0.0006871657 0.55637763 103595.49 -0.096838207
156000 0.00071590663 0 0.00071583504 0.5569485 103595.49 -0.097686166
157000 0.00078378647 0 0.0007837081 0.55755381 103595.49 -0.097968527
158000 0.00080144334 0 0.00080136319 0.55741023 103595.49 -0.098119361
159000 0.00079183165 0 0.00079175247 0.55756142 103595.49 -0.097925888
160000 0.00081212358 0 0.00081204237 0.55669124 103595.49 -0.098171108
161000 0.00082903843 0 0.00082895553 0.55608918 103595.49 -0.097827206
162000 0.00084257416 0 0.0008424899 0.5560239 103595.49 -0.096775743
163000 0.00086279615 0 0.00086270987 0.55550215 103595.49 -0.095981927
164000 0.00092139657 0 0.00092130443 0.55395137 103595.49 -0.095215338
165000 0.00095519936 0 0.00095510384 0.55376787 103595.49 -0.0945666
166000 0.00092201276 0 0.00092192056 0.55373794 103595.49 -0.093531233
167000 0.0008525194 0 0.00085243415 0.55375862 103595.49 -0.09389901
168000 0.00081977785 0 0.00081969587 0.5536646 103595.49 -0.093829746
169000 0.00079692467 0 0.00079684497 0.55416599 103595.49 -0.094433271
170000 0.00077787798 0 0.0007778002 0.55416629 103595.49 -0.095043413
171000 0.0007651362 0 0.00076505969 0.55418872 103595.49 -0.095212376
172000 0.00074631438 0 0.00074623975 0.55403881 103595.49 -0.095463949
173000 0.00074431288 0 0.00074423845 0.55415801 103595.49 -0.095319615
174000 0.00073924649 0 0.00073917257 0.55373682 103595.49 -0.094724272
175000 0.00070973165 0 0.00070966068 0.55393569 103595.49 -0.094201112
176000 0.00069820766 0 0.00069813784 0.55341229 103595.49 -0.093530469
177000 0.00070922657 0 0.00070915564 0.55282781 103595.49 -0.093282587
178000 0.00073688566 0 0.00073681197 0.55223248 103595.49 -0.092190554
179000 0.00072455886 0 0.0007244864 0.55244288 103595.49 -0.09182393
180000 0.00071894558 0 0.00071887369 0.55281517 103595.49 -0.092177032
181000 0.00071752475 0 0.000717453 0.55297293 103595.49 -0.09230934
182000 0.00072247421 0 0.00072240196 0.55287294 103595.49 -0.092465908
183000 0.00070596821 0 0.00070589761 0.5526242 103595.49 -0.092013313
184000 0.00072460909 0 0.00072453663 0.55230817 103595.49 -0.092211243
185000 0.00072499917 0 0.00072492667 0.55273387 103595.49 -0.092920761
186000 0.00072852698 0 0.00072845413 0.55358929 103595.49 -0.093428623
187000 0.00072515668 0 0.00072508417 0.55336733 103595.49 -0.093440126
188000 0.00071355728 0 0.00071348593 0.55408468 103595.49 -0.094819333
189000 0.00071703212 0 0.00071696042 0.55424355 103595.49 -0.096198144
190000 0.00071808849 0 0.00071801668 0.55504118 103595.49 -0.097199842
191000 0.00072458142 0 0.00072450896 0.55493515 103595.49 -0.097564772
192000 0.00071472504 0 0.00071465357 0.55572657 103595.49 -0.097663072
193000 0.00070803966 0 0.00070796886 0.55546116 103595.49 -0.097306162
194000 0.00068236077 0 0.00068229254 0.55581155 103595.49 -0.097141594
195000 0.00067304613 0 0.00067297882 0.55559507 103595.49 -0.096646489
196000 0.00066680808 0 0.0006667414 0.55512029 103595.49 -0.096241456
197000 0.00065829161 0 0.00065822578 0.55545375 103595.49 -0.095808778
198000 0.0006617611 0 0.00066169492 0.55551074 103595.49 -0.095423531
199000 0.00066805655 0 0.00066798975 0.55581298 103595.49 -0.095287337
200000 0.00067263902 0 0.00067257175 0.55601669 103595.49 -0.095551006
Loop time of 145.127 on 4 procs for 200000 steps with 10000 atoms
Performance: 595339.507 tau/day, 1378.101 timesteps/s
100.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 57.89 | 91.946 | 113.82 | 229.7 | 63.36
Neigh | 0.34276 | 0.48607 | 0.56708 | 13.1 | 0.33
Comm | 6.569 | 28.916 | 63.505 | 417.0 | 19.92
Output | 0.0087178 | 0.011935 | 0.01563 | 2.3 | 0.01
Modify | 10.432 | 16.495 | 20.378 | 97.7 | 11.37
Other | | 7.272 | | | 5.01
Nlocal: 2500.00 ave 2990 max 1652 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Nghost: 228.000 ave 254 max 198 min
Histogram: 1 0 1 0 0 0 0 0 0 2
Neighs: 8611.25 ave 10364 max 5676 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Total # of neighbors = 34445
Ave neighs/atom = 3.4445000
Neighbor list builds = 241
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:02:25

View File

@ -62,7 +62,7 @@ delete_atoms overlap 0.5 small big
reset_timestep 0
neighbor 0.3 bin
neighbor 0.3 multi
neigh_modify delay 0 every 1 check yes
comm_modify mode multi group big vel yes

192
lib/gpu/lal_lj_smooth.cpp Normal file
View File

@ -0,0 +1,192 @@
/***************************************************************************
lj_smooth.cpp
-------------------
Gurgen Melikyan (HSE University)
Class for acceleration of the lj/smooth pair style.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin :
email : gkmeliyan@edu.hse.ru
***************************************************************************/
#if defined(USE_OPENCL)
#include "lj_smooth_cl.h"
#elif defined(USE_CUDART)
const char *lj_smooth=0;
#else
#include "lj_smooth_cubin.h"
#endif
#include "lal_lj_smooth.h"
#include <cassert>
namespace LAMMPS_AL {
#define LJSMOOTHT LJSMOOTH<numtyp, acctyp>
extern Device<PRECISION,ACC_PRECISION> device;
template <class numtyp, class acctyp>
LJSMOOTHT::LJSMOOTH() : BaseAtomic<numtyp,acctyp>(), _allocated(false) {
}
template <class numtyp, class acctyp>
LJSMOOTHT::~LJSMOOTH() {
clear();
}
template <class numtyp, class acctyp>
int LJSMOOTHT::bytes_per_atom(const int max_nbors) const {
return this->bytes_per_atom_atomic(max_nbors);
}
template <class numtyp, class acctyp>
int LJSMOOTHT::init(const int ntypes,
double **host_cutsq, double **host_lj1,
double **host_lj2, double **host_lj3,
double **host_lj4, double **host_offset,
double *host_special_lj, const int nlocal,
const int nall, const int max_nbors,
const int maxspecial, const double cell_size,
const double gpu_split, FILE *_screen,
double **host_ljsw0, double **host_ljsw1, double **host_ljsw2, double **host_ljsw3,
double **host_ljsw4,
double **cut_inner, double **cut_inner_sq) {
const int max_shared_types=this->device->max_shared_types();
int onetype=0;
#ifdef USE_OPENCL
if (maxspecial==0)
for (int i=1; i<ntypes; i++)
for (int j=i; j<ntypes; j++)
if (host_cutsq[i][j]>0) {
if (onetype>0)
onetype=-1;
else if (onetype==0)
onetype=i*max_shared_types+j;
}
if (onetype<0) onetype=0;
#endif
int success;
success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,gpu_split,
_screen,lj_smooth,"k_lj_smooth",onetype);
if (success!=0)
return success;
// If atom type constants fit in shared memory use fast kernel
int lj_types=ntypes;
shared_types=false;
if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
lj_types=max_shared_types;
shared_types=true;
}
_lj_types=lj_types;
// Allocate a host write buffer for data initialization
UCL_H_Vec<numtyp> host_write(lj_types*lj_types*32,*(this->ucl_device),
UCL_WRITE_ONLY);
for (int i=0; i<lj_types*lj_types; i++)
host_write[i]=0.0;
lj1.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
this->atom->type_pack4(ntypes,lj_types,lj1,host_write,host_lj1,host_lj2,
host_cutsq, cut_inner_sq);
lj3.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
this->atom->type_pack4(ntypes,lj_types,lj3,host_write,host_lj3,host_lj4,
host_offset);
ljsw.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
this->atom->type_pack4(ntypes,lj_types,ljsw,host_write,host_ljsw1,host_ljsw2,
host_ljsw3,host_ljsw4);
ljsw0.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
this->atom->type_pack2(ntypes,lj_types,ljsw0,host_write,host_ljsw0,cut_inner);
UCL_H_Vec<double> dview;
sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
dview.view(host_special_lj,4,*(this->ucl_device));
ucl_copy(sp_lj,dview,false);
_allocated=true;
this->_max_bytes=lj1.row_bytes()+lj3.row_bytes()+ljsw.row_bytes()+ljsw0.row_bytes()+sp_lj.row_bytes();
return 0;
}
template <class numtyp, class acctyp>
void LJSMOOTHT::reinit(const int ntypes, double **host_cutsq, double **host_lj1,
double **host_lj2, double **host_lj3,
double **host_lj4, double **host_offset,
double **host_ljsw0, double **host_ljsw1, double **host_ljsw2, double **host_ljsw3,
double **host_ljsw4,
double **cut_inner, double **cut_inner_sq) {
// Allocate a host write buffer for data initialization
UCL_H_Vec<numtyp> host_write(_lj_types*_lj_types*32,*(this->ucl_device),
UCL_WRITE_ONLY);
for (int i=0; i<_lj_types*_lj_types; i++)
host_write[i]=0.0;
this->atom->type_pack4(ntypes,_lj_types,lj1,host_write,host_lj1,host_lj2,
host_cutsq,cut_inner_sq);
this->atom->type_pack4(ntypes,_lj_types,lj3,host_write,host_lj3,host_lj4,
host_offset);
this->atom->type_pack4(ntypes,_lj_types,ljsw,host_write,host_ljsw1,host_ljsw2,
host_ljsw3,host_ljsw4);
this->atom->type_pack2(ntypes,_lj_types,ljsw0,host_write,host_ljsw0,cut_inner);
}
template <class numtyp, class acctyp>
void LJSMOOTHT::clear() {
if (!_allocated)
return;
_allocated=false;
lj1.clear();
lj3.clear();
ljsw.clear();
ljsw0.clear();
sp_lj.clear();
this->clear_atomic();
}
template <class numtyp, class acctyp>
double LJSMOOTHT::host_memory_usage() const {
return this->host_memory_usage_atomic()+sizeof(LJSMOOTH<numtyp,acctyp>);
}
// ---------------------------------------------------------------------------
// Calculate energies, forces, and torques
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int LJSMOOTHT::loop(const int eflag, const int vflag) {
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
int ainum=this->ans->inum();
int nbor_pitch=this->nbor->nbor_pitch();
this->time_pair.start();
if (shared_types) {
this->k_pair_sel->set_size(GX,BX);
this->k_pair_sel->run(&this->atom->x, &lj1, &lj3, &ljsw, &ljsw0, &sp_lj,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->ans->force, &this->ans->engv, &eflag,
&vflag, &ainum, &nbor_pitch,
&this->_threads_per_atom);
} else {
this->k_pair.set_size(GX,BX);
this->k_pair.run(&this->atom->x, &lj1, &lj3, &ljsw, &ljsw0, &_lj_types, &sp_lj,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->ans->force, &this->ans->engv, &eflag, &vflag,
&ainum, &nbor_pitch, &this->_threads_per_atom);
}
this->time_pair.stop();
return GX;
}
template class LJSMOOTH<PRECISION,ACC_PRECISION>;
}

259
lib/gpu/lal_lj_smooth.cu Normal file
View File

@ -0,0 +1,259 @@
// **************************************************************************
// lj_smooth.cu
// -------------------
// Gurgen Melikyan (HSE University)
//
// Device code for acceleration of the lj/smooth pair style
//
// __________________________________________________________________________
// This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
// __________________________________________________________________________
//
// begin :
// email : gkmeliyan@edu.hse.ru
// ***************************************************************************
#if defined(NV_KERNEL) || defined(USE_HIP)
#include "lal_aux_fun1.h"
#ifndef _DOUBLE_DOUBLE
_texture( pos_tex,float4);
#else
_texture_2d( pos_tex,int4);
#endif
#else
#define pos_tex x_
#endif
__kernel void k_lj_smooth(const __global numtyp4 *restrict x_,
const __global numtyp4 *restrict lj1,
const __global numtyp4 *restrict lj3,
const __global numtyp4 *restrict ljsw,
const __global numtyp2 *restrict ljsw0,
const int lj_types,
const __global numtyp *restrict sp_lj,
const __global int * dev_nbor,
const __global int * dev_packed,
__global acctyp4 *restrict ans,
__global acctyp *restrict engv,
const int eflag, const int vflag, const int inum,
const int nbor_pitch, const int t_per_atom) {
int tid, ii, offset;
atom_info(t_per_atom,ii,tid,offset);
int n_stride;
local_allocate_store_pair();
acctyp4 f;
f.x=(acctyp)0; f.y=(acctyp)0; f.z=(acctyp)0;
acctyp energy, virial[6];
if (EVFLAG) {
energy=(acctyp)0;
for (int i=0; i<6; i++) virial[i]=(acctyp)0;
}
if (ii<inum) {
int i, numj, nbor, nbor_end;
nbor_info(dev_nbor,dev_packed,nbor_pitch,t_per_atom,ii,offset,i,numj,
n_stride,nbor_end,nbor);
numtyp4 ix; fetch4(ix,i,pos_tex); //x_[i];
int itype=ix.w;
numtyp force, r6inv, factor_lj, forcelj;
numtyp r, t, tsq, fskin;
for ( ; nbor<nbor_end; nbor+=n_stride) {
int j=dev_packed[nbor];
factor_lj = sp_lj[sbmask(j)];
j &= NEIGHMASK;
numtyp4 jx; fetch4(jx,j,pos_tex); //x_[j];
int jtype=jx.w;
// Compute r12
numtyp delx = ix.x-jx.x;
numtyp dely = ix.y-jx.y;
numtyp delz = ix.z-jx.z;
numtyp rsq = delx*delx+dely*dely+delz*delz;
int mtype=itype*lj_types+jtype;
if (rsq<lj1[mtype].z) {
numtyp r2inv=ucl_recip(rsq);
if (rsq < lj1[mtype].w) {
r6inv = r2inv*r2inv*r2inv;
forcelj = r6inv*(lj1[mtype].x*r6inv-lj1[mtype].y);
}
else {
r = sqrt(rsq);
t = r - ljsw0[mtype].y;
tsq = t*t;
fskin = ljsw[mtype].x + ljsw[mtype].y*t +
ljsw[mtype].z*tsq + ljsw[mtype].w*tsq*t;
forcelj = fskin*r;
}
force = factor_lj*r2inv*forcelj;
f.x+=delx*force;
f.y+=dely*force;
f.z+=delz*force;
if (EVFLAG && eflag) {
numtyp e;
if (rsq < lj1[mtype].w)
e = r6inv * (lj3[mtype].x*r6inv - lj3[mtype].y) - lj3[mtype].z;
else
e = ljsw0[mtype].x - ljsw[mtype].x*t -
ljsw[mtype].y*tsq/2.0 - ljsw[mtype].z*tsq*t/3.0 -
ljsw[mtype].w*tsq*tsq/4.0 - lj3[mtype].z;
//numtyp e=r6inv*(lj3[mtype].x*r6inv-lj3[mtype].y);
energy+=factor_lj*e;
}
if (EVFLAG && vflag) {
virial[0] += delx*delx*force;
virial[1] += dely*dely*force;
virial[2] += delz*delz*force;
virial[3] += delx*dely*force;
virial[4] += delx*delz*force;
virial[5] += dely*delz*force;
}
}
} // for nbor
} // if ii
store_answers(f,energy,virial,ii,inum,tid,t_per_atom,offset,eflag,vflag,
ans,engv);
}
__kernel void k_lj_smooth_fast(const __global numtyp4 *restrict x_,
const __global numtyp4 *restrict lj1_in,
const __global numtyp4 *restrict lj3_in,
const __global numtyp4 *restrict ljsw,
const __global numtyp2 *restrict ljsw0,
const __global numtyp *restrict sp_lj_in,
const __global int * dev_nbor,
const __global int * dev_packed,
__global acctyp4 *restrict ans,
__global acctyp *restrict engv,
const int eflag, const int vflag, const int inum,
const int nbor_pitch, const int t_per_atom) {
int tid, ii, offset;
atom_info(t_per_atom,ii,tid,offset);
#ifndef ONETYPE
__local numtyp4 lj1[MAX_SHARED_TYPES*MAX_SHARED_TYPES];
__local numtyp4 lj3[MAX_SHARED_TYPES*MAX_SHARED_TYPES];
__local numtyp sp_lj[4];
if (tid<4)
sp_lj[tid]=sp_lj_in[tid];
if (tid<MAX_SHARED_TYPES*MAX_SHARED_TYPES) {
lj1[tid]=lj1_in[tid];
if (EVFLAG && eflag)
lj3[tid]=lj3_in[tid];
}
__syncthreads();
#else
const numtyp lj1x=lj1_in[ONETYPE].x;
const numtyp lj1y=lj1_in[ONETYPE].y;
const numtyp cutsq=lj1_in[ONETYPE].z;
numtyp lj3x, lj3y, lj3z;
if (EVFLAG && eflag) {
lj3x=lj3_in[ONETYPE].x;
lj3y=lj3_in[ONETYPE].y;
lj3z=lj3_in[ONETYPE].z;
}
#endif
int n_stride;
local_allocate_store_pair();
acctyp4 f;
f.x=(acctyp)0; f.y=(acctyp)0; f.z=(acctyp)0;
acctyp energy, virial[6];
if (EVFLAG) {
energy=(acctyp)0;
for (int i=0; i<6; i++) virial[i]=(acctyp)0;
}
if (ii<inum) {
int i, numj, nbor, nbor_end;
nbor_info(dev_nbor,dev_packed,nbor_pitch,t_per_atom,ii,offset,i,numj,
n_stride,nbor_end,nbor);
numtyp4 ix; fetch4(ix,i,pos_tex); //x_[i];
#ifndef ONETYPE
int iw=ix.w;
int itype=fast_mul((int)MAX_SHARED_TYPES,iw);
numtyp force, r6inv, factor_lj, forcelj;
numtyp r, t, tsq, fskin;
#endif
NOUNROLL
for ( ; nbor<nbor_end; nbor+=n_stride) {
int j=dev_packed[nbor];
#ifndef ONETYPE
factor_lj = sp_lj[sbmask(j)];
j &= NEIGHMASK;
#endif
numtyp4 jx; fetch4(jx,j,pos_tex); //x_[j];
#ifndef ONETYPE
int mtype=itype+jx.w;
#endif
// Compute r12
numtyp delx = ix.x-jx.x;
numtyp dely = ix.y-jx.y;
numtyp delz = ix.z-jx.z;
numtyp rsq = delx*delx+dely*dely+delz*delz;
if (rsq<lj1[mtype].z) {
numtyp r2inv=ucl_recip(rsq);
if (rsq < lj1[mtype].w) {
r6inv = r2inv*r2inv*r2inv;
forcelj = r6inv*(lj1[mtype].x*r6inv-lj1[mtype].y);
}
else {
r = sqrt(rsq);
t = r - ljsw0[mtype].y; //?
tsq = t*t;
fskin = ljsw[mtype].x + ljsw[mtype].y*t +
ljsw[mtype].z*tsq + ljsw[mtype].w*tsq*t;
forcelj = fskin*r;
}
force = factor_lj*r2inv*forcelj;
f.x+=delx*force;
f.y+=dely*force;
f.z+=delz*force;
if (EVFLAG && eflag) {
numtyp e;
if (rsq < lj1[mtype].w)
e = r6inv * (lj3[mtype].x*r6inv - lj3[mtype].y) - lj3[mtype].z;
else
e = ljsw0[mtype].x - ljsw[mtype].x*t -
ljsw[mtype].y*tsq/2.0 - ljsw[mtype].z*tsq*t/3.0 -
ljsw[mtype].w*tsq*tsq/4.0 - lj3[mtype].z; //???
energy+=factor_lj*e;
}
if (EVFLAG && vflag) {
virial[0] += delx*delx*force;
virial[1] += dely*dely*force;
virial[2] += delz*delz*force;
virial[3] += delx*dely*force;
virial[4] += delx*delz*force;
virial[5] += dely*delz*force;
}
}
} // for nbor
} // if ii
store_answers(f,energy,virial,ii,inum,tid,t_per_atom,offset,eflag,vflag,
ans,engv);
}

91
lib/gpu/lal_lj_smooth.h Normal file
View File

@ -0,0 +1,91 @@
/***************************************************************************
lj_smooth.h
-------------------
Gurgen Melikyan (HSE University)
Class for acceleration of the lj/smooth pair style.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin :
email : gkmelikyan@edu.hse.ru
***************************************************************************/
#ifndef LAL_LJ_SMOOTH_H
#define LAL_LJ_SMOOTH_H
#include "lal_base_atomic.h"
namespace LAMMPS_AL {
template <class numtyp, class acctyp>
class LJSMOOTH : public BaseAtomic<numtyp, acctyp> {
public:
LJSMOOTH();
~LJSMOOTH();
/// Clear any previous data and set up for a new LAMMPS run
/** \param max_nbors initial number of rows in the neighbor matrix
* \param cell_size cutoff + skin
* \param gpu_split fraction of particles handled by device
*
* Returns:
* - 0 if successful
* - -1 if fix gpu not found
* - -3 if there is an out of memory error
* - -4 if the GPU library was not compiled for GPU
* - -5 Double precision is not supported on card **/
int init(const int ntypes, double **host_cutsq,
double **host_lj1, double **host_lj2, double **host_lj3,
double **host_lj4, double **host_offset, double *host_special_lj,
const int nlocal, const int nall, const int max_nbors,
const int maxspecial, const double cell_size,
const double gpu_split, FILE *screen,
double **host_ljsw0, double **host_ljsw1, double **host_ljsw2,
double **host_ljsw3, double **host_ljsw4,
double **cut_inner, double **cut_inner_sq);
/// Send updated coeffs from host to device (to be compatible with fix adapt)
void reinit(const int ntypes, double **host_cutsq,
double **host_lj1, double **host_lj2, double **host_lj3,
double **host_lj4, double **host_offset,
double **host_ljsw0, double **host_ljsw1, double **host_ljsw2,
double **host_ljsw3, double **host_ljsw4,
double **cut_inner, double **cut_inner_sq);
/// Clear all host and device data
/** \note This is called at the beginning of the init() routine **/
void clear();
/// Returns memory usage on device per atom
int bytes_per_atom(const int max_nbors) const;
/// Total host memory used by library for pair style
double host_memory_usage() const;
// --------------------------- TYPE DATA --------------------------
/// lj1.x = lj1, lj1.y = lj2, lj1.z = cutsq, lj1.w = cut_inner_sq
UCL_D_Vec<numtyp4> lj1;
/// lj3.x = lj3, lj3.y = lj4, lj3.z = offset
UCL_D_Vec<numtyp4> lj3;
/// ljsw.x = ljsw1, ljsw.y = ljsw2, ljsw.z = ljsw3, ljsw.w = ljsw4
UCL_D_Vec<numtyp4> ljsw;
/// ljsw0.x = ljsw0 ljsw0.y = cut_inner
UCL_D_Vec<numtyp2> ljsw0;
/// Special LJ values
UCL_D_Vec<numtyp> sp_lj;
/// If atom type constants fit in shared memory, use fast kernels
bool shared_types;
/// Number of atom types
int _lj_types;
private:
bool _allocated;
int loop(const int _eflag, const int _vflag);
};
}
#endif

View File

@ -0,0 +1,144 @@
/***************************************************************************
lj_smooth_ext.cpp
-------------------
Gurgen Melikyan (HSE University)
Functions for LAMMPS access to lj/smooth acceleration routines.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin :
email : gkmeliyan@edu.hse.ru
***************************************************************************/
#include <iostream>
#include <cassert>
#include <cmath>
#include "lal_lj_smooth.h"
using namespace std;
using namespace LAMMPS_AL;
static LJSMOOTH<PRECISION,ACC_PRECISION> LJSMTMF;
// ---------------------------------------------------------------------------
// Allocate memory on host and device and copy constants to device
// ---------------------------------------------------------------------------
int ljsmt_gpu_init(const int ntypes, double **cutsq, double **host_lj1,
double **host_lj2, double **host_lj3, double **host_lj4,
double **offset, double *special_lj, const int inum,
const int nall, const int max_nbors, const int maxspecial,
const double cell_size, int &gpu_mode, FILE *screen,
double **host_ljsw0, double **host_ljsw1, double **host_ljsw2, double **host_ljsw3,
double **host_ljsw4, double **cut_inner, double **cut_inner_sq) {
LJSMTMF.clear();
gpu_mode=LJSMTMF.device->gpu_mode();
double gpu_split=LJSMTMF.device->particle_split();
int first_gpu=LJSMTMF.device->first_device();
int last_gpu=LJSMTMF.device->last_device();
int world_me=LJSMTMF.device->world_me();
int gpu_rank=LJSMTMF.device->gpu_rank();
int procs_per_gpu=LJSMTMF.device->procs_per_gpu();
LJSMTMF.device->init_message(screen,"lj/smooth",first_gpu,last_gpu);
bool message=false;
if (LJSMTMF.device->replica_me()==0 && screen)
message=true;
if (message) {
fprintf(screen,"Initializing Device and compiling on process 0...");
fflush(screen);
}
int init_ok=0;
if (world_me==0)
init_ok=LJSMTMF.init(ntypes, cutsq, host_lj1, host_lj2, host_lj3,
host_lj4, offset, special_lj, inum, nall, max_nbors,
maxspecial, cell_size, gpu_split, screen,
host_ljsw0, host_ljsw1, host_ljsw2, host_ljsw3, host_ljsw4, cut_inner, cut_inner_sq);
LJSMTMF.device->world_barrier();
if (message)
fprintf(screen,"Done.\n");
for (int i=0; i<procs_per_gpu; i++) {
if (message) {
if (last_gpu-first_gpu==0)
fprintf(screen,"Initializing Device %d on core %d...",first_gpu,i);
else
fprintf(screen,"Initializing Devices %d-%d on core %d...",first_gpu,
last_gpu,i);
fflush(screen);
}
if (gpu_rank==i && world_me!=0)
init_ok=LJSMTMF.init(ntypes, cutsq, host_lj1, host_lj2, host_lj3, host_lj4,
offset, special_lj, inum, nall, max_nbors, maxspecial,
cell_size, gpu_split, screen, host_ljsw0, host_ljsw1, host_ljsw2, host_ljsw3,
host_ljsw4, cut_inner, cut_inner_sq);
LJSMTMF.device->gpu_barrier();
if (message)
fprintf(screen,"Done.\n");
}
if (message)
fprintf(screen,"\n");
if (init_ok==0)
LJSMTMF.estimate_gpu_overhead();
return init_ok;
}
// ---------------------------------------------------------------------------
// Copy updated coeffs from host to device
// ---------------------------------------------------------------------------
void ljsmt_gpu_reinit(const int ntypes, double **cutsq, double **host_lj1,
double **host_lj2, double **host_lj3, double **host_lj4,
double **offset, double **host_ljsw0, double **host_ljsw1, double **host_ljsw2, double **host_ljsw3,
double **host_ljsw4, double **cut_inner, double **cut_inner_sq) {
int world_me=LJSMTMF.device->world_me();
int gpu_rank=LJSMTMF.device->gpu_rank();
int procs_per_gpu=LJSMTMF.device->procs_per_gpu();
if (world_me==0)
LJSMTMF.reinit(ntypes, cutsq, host_lj1, host_lj2, host_lj3, host_lj4, offset, host_ljsw0, host_ljsw1, host_ljsw2, host_ljsw3, host_ljsw4, cut_inner, cut_inner_sq);
LJSMTMF.device->world_barrier();
for (int i=0; i<procs_per_gpu; i++) {
if (gpu_rank==i && world_me!=0)
LJSMTMF.reinit(ntypes, cutsq, host_lj1, host_lj2, host_lj3, host_lj4, offset, host_ljsw0, host_ljsw1, host_ljsw2, host_ljsw3, host_ljsw4, cut_inner, cut_inner_sq);
LJSMTMF.device->gpu_barrier();
}
}
void ljsmt_gpu_clear() {
LJSMTMF.clear();
}
int ** ljsmt_gpu_compute_n(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
double *sublo, double *subhi, tagint *tag, int **nspecial,
tagint **special, const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success) {
return LJSMTMF.compute(ago, inum_full, nall, host_x, host_type, sublo,
subhi, tag, nspecial, special, eflag, vflag, eatom,
vatom, host_start, ilist, jnum, cpu_time, success);
}
void ljsmt_gpu_compute(const int ago, const int inum_full, const int nall,
double **host_x, int *host_type, int *ilist, int *numj,
int **firstneigh, const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
const double cpu_time, bool &success) {
LJSMTMF.compute(ago,inum_full,nall,host_x,host_type,ilist,numj,
firstneigh,eflag,vflag,eatom,vatom,host_start,cpu_time,success);
}
double ljsmt_gpu_bytes() {
return LJSMTMF.host_memory_usage();
}

View File

@ -1,5 +1,168 @@
# Change Log
## [3.4.00](https://github.com/kokkos/kokkos/tree/3.4.00) (2021-04-25)
[Full Changelog](https://github.com/kokkos/kokkos/compare/3.3.01...3.4.00)
**Highlights:**
- SYCL Backend Almost Feature Complete
- OpenMPTarget Backend Almost Feature Complete
- Performance Improvements for HIP backend
- Require CMake 3.16 or newer
- Tool Callback Interface Enhancements
- cmath wrapper functions available now in Kokkos::Experimental
**Features:**
- Implement parallel_scan with ThreadVectorRange and Reducer [\#3861](https://github.com/kokkos/kokkos/pull/3861)
- Implement SYCL Random [\#3849](https://github.com/kokkos/kokkos/pull/3849)
- OpenMPTarget: Adding Implementation for nested reducers [\#3845](https://github.com/kokkos/kokkos/pull/3845)
- Implement UniqueToken for SYCL [\#3833](https://github.com/kokkos/kokkos/pull/3833)
- OpenMPTarget: UniqueToken::Global implementation [\#3823](https://github.com/kokkos/kokkos/pull/3823)
- DualView sync's on ExecutionSpaces [\#3822](https://github.com/kokkos/kokkos/pull/3822)
- SYCL outer TeamPolicy parallel_reduce [\#3818](https://github.com/kokkos/kokkos/pull/3818)
- SYCL TeamPolicy::team_scan [\#3815](https://github.com/kokkos/kokkos/pull/3815)
- SYCL MDRangePolicy parallel_reduce [\#3801](https://github.com/kokkos/kokkos/pull/3801)
- Enable use of execution space instances in ScatterView [\#3786](https://github.com/kokkos/kokkos/pull/3786)
- SYCL TeamPolicy nested parallel_reduce [\#3783](https://github.com/kokkos/kokkos/pull/3783)
- OpenMPTarget: MDRange with TagType for parallel_for [\#3781](https://github.com/kokkos/kokkos/pull/3781)
- Adding OpenMPTarget parallel_scan [\#3655](https://github.com/kokkos/kokkos/pull/3655)
- SYCL basic TeamPolicy [\#3654](https://github.com/kokkos/kokkos/pull/3654)
- OpenMPTarget: scratch memory implementation [\#3611](https://github.com/kokkos/kokkos/pull/3611)
**Implemented enhancements Backends and Archs:**
- SYCL choose a specific GPU [\#3918](https://github.com/kokkos/kokkos/pull/3918)
- [HIP] Lock access to scratch memory when using Teams [\#3916](https://github.com/kokkos/kokkos/pull/3916)
- [HIP] fix multithreaded access to get_next_driver [\#3908](https://github.com/kokkos/kokkos/pull/3908)
- Forward declare HIPHostPinnedSpace and SYCLSharedUSMSpace [\#3902](https://github.com/kokkos/kokkos/pull/3902)
- Let SYCL USMObjectMem use SharedAllocationRecord [\#3898](https://github.com/kokkos/kokkos/pull/3898)
- Implement clock_tic for SYCL [\#3893](https://github.com/kokkos/kokkos/pull/3893)
- Don't use a static variable in HIPInternal::scratch_space [\#3866](https://github.com/kokkos/kokkos/pull/3866)(https://github.com/kokkos/kokkos/pull/3866)
- Reuse memory for SYCL parallel_reduce [\#3873](https://github.com/kokkos/kokkos/pull/3873)
- Update SYCL compiler in CI [\#3826](https://github.com/kokkos/kokkos/pull/3826)
- Introduce HostSharedPtr to manage m_space_instance for Cuda/HIP/SYCL [\#3824](https://github.com/kokkos/kokkos/pull/3824)
- [HIP] Use shuffle for range reduction [\#3811](https://github.com/kokkos/kokkos/pull/3811)
- OpenMPTarget: Changes to the hierarchical parallelism [\#3808](https://github.com/kokkos/kokkos/pull/3808)
- Remove ExtendedReferenceWrapper for SYCL parallel_reduce [\#3802](https://github.com/kokkos/kokkos/pull/3802)
- Eliminate sycl_indirect_launch [\#3777](https://github.com/kokkos/kokkos/pull/3777)
- OpenMPTarget: scratch implementation for parallel_reduce [\#3776](https://github.com/kokkos/kokkos/pull/3776)
- Allow initializing SYCL execution space from sycl::queue and SYCL::impl_static_fence [\#3767](https://github.com/kokkos/kokkos/pull/3767)
- SYCL TeamPolicy scratch memory alternative [\#3763](https://github.com/kokkos/kokkos/pull/3763)
- Alternative implementation for SYCL TeamPolicy [\#3759](https://github.com/kokkos/kokkos/pull/3759)
- Unify handling of synchronous errors in SYCL [\#3754](https://github.com/kokkos/kokkos/pull/3754)
- core/Cuda: Half_t updates for cgsolve [\#3746](https://github.com/kokkos/kokkos/pull/3746)
- Unify HIPParallelLaunch structures [\#3733](https://github.com/kokkos/kokkos/pull/3733)
- Improve performance for SYCL parallel_reduce [\#3732](https://github.com/kokkos/kokkos/pull/3732)
- Use consistent types in Kokkos_OpenMPTarget_Parallel.hpp [\#3703](https://github.com/kokkos/kokkos/pull/3703)
- Implement non-blocking kernel launches for HIP backend [\#3697](https://github.com/kokkos/kokkos/pull/3697)
- Change SYCLInternal::m_queue std::unique_ptr -> std::optional [\#3677](https://github.com/kokkos/kokkos/pull/3677)
- Use alternative SYCL parallel_reduce implementation [\#3671](https://github.com/kokkos/kokkos/pull/3671)
- Use runtime values in KokkosExp_MDRangePolicy.hpp [\#3626](https://github.com/kokkos/kokkos/pull/3626)
- Clean up AnalyzePolicy [\#3564](https://github.com/kokkos/kokkos/pull/3564)
- Changes for indirect launch of SYCL parallel reduce [\#3511](https://github.com/kokkos/kokkos/pull/3511)
**Implemented enhancements BuildSystem:**
- Also require C++14 when building gtest [\#3912](https://github.com/kokkos/kokkos/pull/3912)
- Fix compiling SYCL with OpenMP [\#3874](https://github.com/kokkos/kokkos/pull/3874)
- Require C++17 for SYCL (at configuration time) [\#3869](https://github.com/kokkos/kokkos/pull/3869)
- Add COMPILE_DEFINITIONS argument to kokkos_create_imported_tpl [\#3862](https://github.com/kokkos/kokkos/pull/3862)
- Do not pass arch flags to the linker with no rdc [\#3846](https://github.com/kokkos/kokkos/pull/3846)
- Try compiling C++14 check with C++14 support and print error message [\#3843](https://github.com/kokkos/kokkos/pull/3843)
- Enable HIP with Cray Clang [\#3842](https://github.com/kokkos/kokkos/pull/3842)
- Add an option to disable header self containment tests [\#3834](https://github.com/kokkos/kokkos/pull/3834)
- CMake check for C++14 [\#3809](https://github.com/kokkos/kokkos/pull/3809)
- Prefer -std=* over --std=* [\#3779](https://github.com/kokkos/kokkos/pull/3779)
- Kokkos launch compiler updates [\#3778](https://github.com/kokkos/kokkos/pull/3778)
- Updated comments and enabled no-op for kokkos_launch_compiler [\#3774](https://github.com/kokkos/kokkos/pull/3774)
- Apple's Clang not correctly recognised [\#3772](https://github.com/kokkos/kokkos/pull/3772)
- kokkos_launch_compiler + CUDA auto-detect arch [\#3770](https://github.com/kokkos/kokkos/pull/3770)
- Add Spack test support for Kokkos [\#3753](https://github.com/kokkos/kokkos/pull/3753)
- Split SYCL tests for aot compilation [\#3741](https://github.com/kokkos/kokkos/pull/3741)
- Use consistent OpenMP flag for IntelClang [\#3735](https://github.com/kokkos/kokkos/pull/3735)
- Add support for -Wno-deprecated-gpu-targets [\#3722](https://github.com/kokkos/kokkos/pull/3722)
- Add configuration to target CUDA compute capability 8.6 [\#3713](https://github.com/kokkos/kokkos/pull/3713)
- Added VERSION and SOVERSION to KOKKOS_INTERNAL_ADD_LIBRARY [\#3706](https://github.com/kokkos/kokkos/pull/3706)
- Add fast-math to known NVCC flags [\#3699](https://github.com/kokkos/kokkos/pull/3699)
- Add MI-100 arch string [\#3698](https://github.com/kokkos/kokkos/pull/3698)
- Require CMake >=3.16 [\#3679](https://github.com/kokkos/kokkos/pull/3679)
- KokkosCI.cmake, KokkosCTest.cmake.in, CTestConfig.cmake.in + CI updates [\#2844](https://github.com/kokkos/kokkos/pull/2844)
**Implemented enhancements Tools:**
- Improve readability of the callback invocation in profiling [\#3860](https://github.com/kokkos/kokkos/pull/3860)
- V1.1 Tools Interface: incremental, action-based [\#3812](https://github.com/kokkos/kokkos/pull/3812)
- Enable launch latency simulations [\#3721](https://github.com/kokkos/kokkos/pull/3721)
- Added metadata callback to tools interface [\#3711](https://github.com/kokkos/kokkos/pull/3711)
- MDRange Tile Size Tuning [\#3688](https://github.com/kokkos/kokkos/pull/3688)
- Added support for command-line args for kokkos-tools [\#3627](https://github.com/kokkos/kokkos/pull/3627)
- Query max tile sizes for an MDRangePolicy, and set tile sizes on an existing policy [\#3481](https://github.com/kokkos/kokkos/pull/3481)
**Implemented enhancements Other:**
- Try detecting ndevices in get_gpu [\#3921](https://github.com/kokkos/kokkos/pull/3921)
- Use strcmp to compare names() [\#3909](https://github.com/kokkos/kokkos/pull/3909)
- Add execution space arguments for constructor overloads that might allocate a new underlying View [\#3904](https://github.com/kokkos/kokkos/pull/3904)
- Prefix labels in internal use of kokkos_malloc [\#3891](https://github.com/kokkos/kokkos/pull/3891)
- Prefix labels for internal uses of SharedAllocationRecord [\#3890](https://github.com/kokkos/kokkos/pull/3890)
- Add missing hypot math function [\#3880](https://github.com/kokkos/kokkos/pull/3880)
- Unify algorithm unit tests to avoid code duplication [\#3851](https://github.com/kokkos/kokkos/pull/3851)
- DualView.template view() better matches for Devices in UVMSpace cases [\#3857](https://github.com/kokkos/kokkos/pull/3857)
- More extensive disentangling of Policy Traits [\#3829](https://github.com/kokkos/kokkos/pull/3829)
- Replaced nanosleep and sched_yield with STL routines [\#3825](https://github.com/kokkos/kokkos/pull/3825)
- Constructing Atomic Subviews [\#3810](https://github.com/kokkos/kokkos/pull/3810)
- Metadata Declaration in Core [\#3729](https://github.com/kokkos/kokkos/pull/3729)
- Allow using tagged final functor in parallel_reduce [\#3714](https://github.com/kokkos/kokkos/pull/3714)
- Major duplicate code removal in SharedAllocationRecord specializations [\#3658](https://github.com/kokkos/kokkos/pull/3658)
**Fixed bugs:**
- Provide forward declarations in Kokkos_ViewLayoutTiled.hpp for XL [\#3911](https://github.com/kokkos/kokkos/pull/3911)
- Fixup absolute value of floating points in Kokkos complex [\#3882](https://github.com/kokkos/kokkos/pull/3882)
- Address intel 17 ICE [\#3881](https://github.com/kokkos/kokkos/pull/3881)
- Add missing pow(Kokkos::complex) overloads [\#3868](https://github.com/kokkos/kokkos/pull/3868)
- Fix bug {pow, log}(Kokkos::complex) [\#3866](https://github.com/kokkos/kokkos/pull/3866)(https://github.com/kokkos/kokkos/pull/3866)
- Cleanup writing to output streams in Cuda [\#3859](https://github.com/kokkos/kokkos/pull/3859)
- Fixup cache CUDA fallback execution space instance used by DualView::sync [\#3856](https://github.com/kokkos/kokkos/pull/3856)
- Fix cmake warning with pthread [\#3854](https://github.com/kokkos/kokkos/pull/3854)
- Fix typo FOUND_CUDA_{DRIVVER -> DRIVER} [\#3852](https://github.com/kokkos/kokkos/pull/3852)
- Fix bug in SYCL team_reduce [\#3848](https://github.com/kokkos/kokkos/pull/3848)
- Atrocious bug in MDRange tuning [\#3803](https://github.com/kokkos/kokkos/pull/3803)
- Fix compiling SYCL with Kokkos_ENABLE_TUNING=ON [\#3800](https://github.com/kokkos/kokkos/pull/3800)
- Fixed command line parsing bug [\#3797](https://github.com/kokkos/kokkos/pull/3797)
- Workaround race condition in SYCL parallel_reduce [\#3782](https://github.com/kokkos/kokkos/pull/3782)
- Fix Atomic{Min,Max} for Kepler30 [\#3780](https://github.com/kokkos/kokkos/pull/3780)
- Fix SYCL typo [\#3755](https://github.com/kokkos/kokkos/pull/3755)
- Fixed Kokkos_install_additional_files macro [\#3752](https://github.com/kokkos/kokkos/pull/3752)
- Fix a typo for Kokkos_ARCH_A64FX [\#3751](https://github.com/kokkos/kokkos/pull/3751)
- OpenMPTarget: fixes and workarounds to work with "Release" build type [\#3748](https://github.com/kokkos/kokkos/pull/3748)
- Fix parsing bug for number of devices command line argument [\#3724](https://github.com/kokkos/kokkos/pull/3724)
- Avoid more warnings with clang and C++20 [\#3719](https://github.com/kokkos/kokkos/pull/3719)
- Fix gcc-10.1 C++20 warnings [\#3718](https://github.com/kokkos/kokkos/pull/3718)
- Fix cuda cache config not being set correct [\#3712](https://github.com/kokkos/kokkos/pull/3712)
- Fix dualview deepcopy perftools [\#3701](https://github.com/kokkos/kokkos/pull/3701)
- use drand instead of frand in drand [\#3696](https://github.com/kokkos/kokkos/pull/3696)
**Incompatibilities:**
- Remove unimplemented member functions of SYCLDevice [\#3919](https://github.com/kokkos/kokkos/pull/3919)
- Replace cl::sycl [\#3896](https://github.com/kokkos/kokkos/pull/3896)
- Get rid of SYCL workaround in Kokkos_Complex.hpp [\#3884](https://github.com/kokkos/kokkos/pull/3884)
- Replace most uses of if_c [\#3883](https://github.com/kokkos/kokkos/pull/3883)
- Remove Impl::enable_if_type [\#3863](https://github.com/kokkos/kokkos/pull/3863)
- Remove HostBarrier test [\#3847](https://github.com/kokkos/kokkos/pull/3847)
- Avoid (void) interface [\#3836](https://github.com/kokkos/kokkos/pull/3836)
- Remove VerifyExecutionCanAccessMemorySpace [\#3813](https://github.com/kokkos/kokkos/pull/3813)
- Avoid duplicated code in ScratchMemorySpace [\#3793](https://github.com/kokkos/kokkos/pull/3793)
- Remove superfluous FunctorFinal specialization [\#3788](https://github.com/kokkos/kokkos/pull/3788)
- Rename cl::sycl -> sycl in Kokkos_MathematicalFunctions.hpp [\#3678](https://github.com/kokkos/kokkos/pull/3678)
- Remove integer_sequence backward compatibility implementation [\#3533](https://github.com/kokkos/kokkos/pull/3533)
**Enabled tests:**
- Fixup re-enable core performance tests [\#3903](https://github.com/kokkos/kokkos/pull/3903)
- Enable more SYCL tests [\#3900](https://github.com/kokkos/kokkos/pull/3900)
- Restrict MDRange Policy tests for Intel GPUs [\#3853](https://github.com/kokkos/kokkos/pull/3853)
- Disable death tests for rawhide [\#3844](https://github.com/kokkos/kokkos/pull/3844)
- OpenMPTarget: Block unit tests that do not pass with the nvidia compiler [\#3839](https://github.com/kokkos/kokkos/pull/3839)
- Enable Bitset container test for SYCL [\#3830](https://github.com/kokkos/kokkos/pull/3830)
- Enable some more SYCL tests [\#3744](https://github.com/kokkos/kokkos/pull/3744)
- Enable SYCL atomic tests [\#3742](https://github.com/kokkos/kokkos/pull/3742)
- Enable more SYCL perf_tests [\#3692](https://github.com/kokkos/kokkos/pull/3692)
- Enable examples for SYCL [\#3691](https://github.com/kokkos/kokkos/pull/3691)
## [3.3.01](https://github.com/kokkos/kokkos/tree/3.3.01) (2021-01-06)
[Full Changelog](https://github.com/kokkos/kokkos/compare/3.3.00...3.3.01)

View File

@ -72,7 +72,7 @@ ENDFUNCTION()
LIST(APPEND CMAKE_MODULE_PATH cmake/Modules)
IF(NOT KOKKOS_HAS_TRILINOS)
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
cmake_minimum_required(VERSION 3.16 FATAL_ERROR)
set(CMAKE_DISABLE_SOURCE_CHANGES ON)
set(CMAKE_DISABLE_IN_SOURCE_BUILD ON)
IF (Spack_WORKAROUND)
@ -111,21 +111,19 @@ ENDIF()
set(Kokkos_VERSION_MAJOR 3)
set(Kokkos_VERSION_MINOR 3)
set(Kokkos_VERSION_PATCH 1)
set(Kokkos_VERSION_MINOR 4)
set(Kokkos_VERSION_PATCH 00)
set(Kokkos_VERSION "${Kokkos_VERSION_MAJOR}.${Kokkos_VERSION_MINOR}.${Kokkos_VERSION_PATCH}")
math(EXPR KOKKOS_VERSION "${Kokkos_VERSION_MAJOR} * 10000 + ${Kokkos_VERSION_MINOR} * 100 + ${Kokkos_VERSION_PATCH}")
IF(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0")
MESSAGE(STATUS "Setting policy CMP0074 to use <Package>_ROOT variables")
CMAKE_POLICY(SET CMP0074 NEW)
ENDIF()
# Load either the real TriBITS or a TriBITS wrapper
# for certain utility functions that are universal (like GLOBAL_SET)
INCLUDE(${KOKKOS_SRC_PATH}/cmake/fake_tribits.cmake)
IF (Kokkos_ENABLE_CUDA AND ${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14.0")
IF (Kokkos_ENABLE_CUDA)
# If we are building CUDA, we have tricked CMake because we declare a CXX project
# If the default C++ standard for a given compiler matches the requested
# standard, then CMake just omits the -std flag in later versions of CMake
@ -139,15 +137,19 @@ ENDIF()
# I really wish these were regular variables
# but scoping issues can make it difficult
GLOBAL_SET(KOKKOS_COMPILE_OPTIONS)
GLOBAL_SET(KOKKOS_LINK_OPTIONS -DKOKKOS_DEPENDENCE)
GLOBAL_SET(KOKKOS_LINK_OPTIONS)
GLOBAL_SET(KOKKOS_CUDA_OPTIONS)
GLOBAL_SET(KOKKOS_CUDAFE_OPTIONS)
GLOBAL_SET(KOKKOS_XCOMPILER_OPTIONS)
# We need to append text here for making sure TPLs
# we import are available for an installed Kokkos
GLOBAL_SET(KOKKOS_TPL_EXPORTS)
# this could probably be scoped to project
# KOKKOS_DEPENDENCE is used by kokkos_launch_compiler
GLOBAL_SET(KOKKOS_COMPILE_DEFINITIONS KOKKOS_DEPENDENCE)
# MSVC never goes through kokkos_launch_compiler
IF(NOT MSVC)
GLOBAL_APPEND(KOKKOS_LINK_OPTIONS -DKOKKOS_DEPENDENCE)
ENDIF()
# Include a set of Kokkos-specific wrapper functions that
# will either call raw CMake or TriBITS

View File

@ -11,8 +11,8 @@ CXXFLAGS += $(SHFLAGS)
endif
KOKKOS_VERSION_MAJOR = 3
KOKKOS_VERSION_MINOR = 3
KOKKOS_VERSION_PATCH = 1
KOKKOS_VERSION_MINOR = 4
KOKKOS_VERSION_PATCH = 00
KOKKOS_VERSION = $(shell echo $(KOKKOS_VERSION_MAJOR)*10000+$(KOKKOS_VERSION_MINOR)*100+$(KOKKOS_VERSION_PATCH) | bc)
# Options: Cuda,HIP,OpenMP,Pthread,Serial
@ -20,7 +20,7 @@ KOKKOS_DEVICES ?= "OpenMP"
#KOKKOS_DEVICES ?= "Pthread"
# Options:
# Intel: KNC,KNL,SNB,HSW,BDW,SKX
# NVIDIA: Kepler,Kepler30,Kepler32,Kepler35,Kepler37,Maxwell,Maxwell50,Maxwell52,Maxwell53,Pascal60,Pascal61,Volta70,Volta72,Turing75,Ampere80
# NVIDIA: Kepler,Kepler30,Kepler32,Kepler35,Kepler37,Maxwell,Maxwell50,Maxwell52,Maxwell53,Pascal60,Pascal61,Volta70,Volta72,Turing75,Ampere80,Ampere86
# ARM: ARMv80,ARMv81,ARMv8-ThunderX,ARMv8-TX2,A64FX
# IBM: BGQ,Power7,Power8,Power9
# AMD-GPUS: Vega900,Vega906,Vega908
@ -164,17 +164,17 @@ KOKKOS_INTERNAL_OS_DARWIN := $(call kokkos_has_string,$(KOKKOS_OS),Darwin)
KOKKOS_CXX_VERSION := $(strip $(shell $(CXX) --version 2>&1))
KOKKOS_INTERNAL_COMPILER_INTEL := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),Intel Corporation)
KOKKOS_INTERNAL_COMPILER_PGI := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),PGI)
KOKKOS_INTERNAL_COMPILER_XL := $(strip $(shell $(CXX) -qversion 2>&1 | grep XL | wc -l))
KOKKOS_INTERNAL_COMPILER_CRAY := $(strip $(shell $(CXX) -craype-verbose 2>&1 | grep "CC-" | wc -l))
KOKKOS_INTERNAL_COMPILER_NVCC := $(strip $(shell echo "$(shell export OMPI_CXX=$(OMPI_CXX); export MPICH_CXX=$(MPICH_CXX); $(CXX) --version 2>&1 | grep nvcc | wc -l)>0" | bc))
KOKKOS_INTERNAL_COMPILER_XL := $(strip $(shell $(CXX) -qversion 2>&1 | grep -c XL))
KOKKOS_INTERNAL_COMPILER_CRAY := $(strip $(shell $(CXX) -craype-verbose 2>&1 | grep -c "CC-"))
KOKKOS_INTERNAL_COMPILER_NVCC := $(strip $(shell echo "$(shell export OMPI_CXX=$(OMPI_CXX); export MPICH_CXX=$(MPICH_CXX); $(CXX) --version 2>&1 | grep -c nvcc)>0" | bc))
KOKKOS_INTERNAL_COMPILER_CLANG := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),clang)
KOKKOS_INTERNAL_COMPILER_APPLE_CLANG := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),Apple LLVM)
KOKKOS_INTERNAL_COMPILER_APPLE_CLANG := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),Apple clang)
KOKKOS_INTERNAL_COMPILER_HCC := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),HCC)
KOKKOS_INTERNAL_COMPILER_GCC := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),GCC)
# Check Host Compiler if using NVCC through nvcc_wrapper
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVCC), 1)
KOKKOS_INTERNAL_COMPILER_NVCC_WRAPPER := $(strip $(shell echo $(CXX) | grep nvcc_wrapper | wc -l))
KOKKOS_INTERNAL_COMPILER_NVCC_WRAPPER := $(strip $(shell echo $(CXX) | grep -c nvcc_wrapper))
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVCC_WRAPPER), 1)
KOKKOS_CXX_HOST_VERSION := $(strip $(shell $(CXX) $(CXXFLAGS) --host-version 2>&1))
@ -297,11 +297,11 @@ else
#KOKKOS_INTERNAL_CXX1Z_FLAG := -hstd=c++1z
#KOKKOS_INTERNAL_CXX2A_FLAG := -hstd=c++2a
else
KOKKOS_INTERNAL_CXX14_FLAG := --std=c++14
KOKKOS_INTERNAL_CXX1Y_FLAG := --std=c++1y
KOKKOS_INTERNAL_CXX17_FLAG := --std=c++17
KOKKOS_INTERNAL_CXX1Z_FLAG := --std=c++1z
KOKKOS_INTERNAL_CXX2A_FLAG := --std=c++2a
KOKKOS_INTERNAL_CXX14_FLAG := -std=c++14
KOKKOS_INTERNAL_CXX1Y_FLAG := -std=c++1y
KOKKOS_INTERNAL_CXX17_FLAG := -std=c++17
KOKKOS_INTERNAL_CXX1Z_FLAG := -std=c++1z
KOKKOS_INTERNAL_CXX2A_FLAG := -std=c++2a
endif
endif
endif
@ -332,6 +332,7 @@ KOKKOS_INTERNAL_USE_ARCH_VOLTA70 := $(call kokkos_has_string,$(KOKKOS_ARCH),Volt
KOKKOS_INTERNAL_USE_ARCH_VOLTA72 := $(call kokkos_has_string,$(KOKKOS_ARCH),Volta72)
KOKKOS_INTERNAL_USE_ARCH_TURING75 := $(call kokkos_has_string,$(KOKKOS_ARCH),Turing75)
KOKKOS_INTERNAL_USE_ARCH_AMPERE80 := $(call kokkos_has_string,$(KOKKOS_ARCH),Ampere80)
KOKKOS_INTERNAL_USE_ARCH_AMPERE86 := $(call kokkos_has_string,$(KOKKOS_ARCH),Ampere86)
KOKKOS_INTERNAL_USE_ARCH_NVIDIA := $(shell expr $(KOKKOS_INTERNAL_USE_ARCH_KEPLER30) \
+ $(KOKKOS_INTERNAL_USE_ARCH_KEPLER32) \
+ $(KOKKOS_INTERNAL_USE_ARCH_KEPLER35) \
@ -344,7 +345,8 @@ KOKKOS_INTERNAL_USE_ARCH_NVIDIA := $(shell expr $(KOKKOS_INTERNAL_USE_ARCH_KEPLE
+ $(KOKKOS_INTERNAL_USE_ARCH_VOLTA70) \
+ $(KOKKOS_INTERNAL_USE_ARCH_VOLTA72) \
+ $(KOKKOS_INTERNAL_USE_ARCH_TURING75) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMPERE80))
+ $(KOKKOS_INTERNAL_USE_ARCH_AMPERE80) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMPERE86))
#SEK: This seems like a bug to me
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_NVIDIA), 0)
@ -585,10 +587,10 @@ ifeq ($(KOKKOS_INTERNAL_ENABLE_PROFILING_LOAD_PRINT), 1)
endif
ifeq ($(KOKKOS_INTERNAL_ENABLE_TUNING), 1)
tmp := $(call kokkos_append_header,"\#define KOKKOS_ENABLE_TUNING")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_TUNING")
endif
tmp := $(call kokkos_append_header,"\#define KOKKOS_ENABLE_LIBDL")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_LIBDL")
ifeq ($(KOKKOS_INTERNAL_USE_HWLOC), 1)
ifneq ($(KOKKOS_CMAKE), yes)
@ -752,6 +754,14 @@ ifeq ($(KOKKOS_INTERNAL_USE_ARCH_A64FX), 1)
KOKKOS_CXXFLAGS += -march=armv8.2-a+sve
KOKKOS_LDFLAGS += -march=armv8.2-a+sve
ifeq ($(KOKKOS_INTERNAL_COMPILER_CLANG), 1)
KOKKOS_CXXFLAGS += -msve-vector-bits=512
KOKKOS_LDFLAGS += -msve-vector-bits=512
endif
ifeq ($(KOKKOS_INTERNAL_COMPILER_GCC), 1)
KOKKOS_CXXFLAGS += -msve-vector-bits=512
KOKKOS_LDFLAGS += -msve-vector-bits=512
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_ZEN), 1)
@ -1100,6 +1110,11 @@ ifeq ($(KOKKOS_INTERNAL_USE_CUDA_ARCH), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMPERE80")
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_80
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMPERE86), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMPERE")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMPERE86")
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_86
endif
ifneq ($(KOKKOS_INTERNAL_USE_ARCH_NVIDIA), 0)
KOKKOS_CXXFLAGS += $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)
@ -1159,7 +1174,7 @@ endif
KOKKOS_INTERNAL_LS_CONFIG := $(shell ls KokkosCore_config.h 2>&1)
ifeq ($(KOKKOS_INTERNAL_LS_CONFIG), KokkosCore_config.h)
KOKKOS_INTERNAL_NEW_CONFIG := $(strip $(shell diff KokkosCore_config.h KokkosCore_config.tmp | grep define | wc -l))
KOKKOS_INTERNAL_NEW_CONFIG := $(strip $(shell diff KokkosCore_config.h KokkosCore_config.tmp | grep -c define))
else
KOKKOS_INTERNAL_NEW_CONFIG := 1
endif
@ -1181,41 +1196,41 @@ tmp := $(call kokkos_update_config_header, KOKKOS_SETUP_HPP_, "KokkosCore_Config
tmp := $(call kokkos_update_config_header, KOKKOS_DECLARE_HPP_, "KokkosCore_Config_DeclareBackend.tmp", "KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_update_config_header, KOKKOS_POST_INCLUDE_HPP_, "KokkosCore_Config_PostInclude.tmp", "KokkosCore_Config_PostInclude.hpp")
ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_CUDA.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_CUDA.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <setup/Kokkos_Setup_Cuda.hpp>","KokkosCore_Config_SetupBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <fwd/Kokkos_Fwd_CUDA.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <decl/Kokkos_Declare_CUDA.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <setup/Kokkos_Setup_Cuda.hpp>","KokkosCore_Config_SetupBackend.hpp")
ifeq ($(KOKKOS_INTERNAL_CUDA_USE_UVM), 1)
else
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMPTARGET), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_OPENMPTARGET.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_OPENMPTARGET.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <fwd/Kokkos_Fwd_OPENMPTARGET.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <decl/Kokkos_Declare_OPENMPTARGET.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_HIP), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_HIP.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_HIP.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <setup/Kokkos_Setup_HIP.hpp>","KokkosCore_Config_SetupBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <fwd/Kokkos_Fwd_HIP.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <decl/Kokkos_Declare_HIP.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <setup/Kokkos_Setup_HIP.hpp>","KokkosCore_Config_SetupBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMP), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_OPENMP.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_OPENMP.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <fwd/Kokkos_Fwd_OPENMP.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <decl/Kokkos_Declare_OPENMP.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_PTHREADS), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_THREADS.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_THREADS.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <fwd/Kokkos_Fwd_THREADS.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <decl/Kokkos_Declare_THREADS.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_HPX), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_HPX.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_HPX.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <fwd/Kokkos_Fwd_HPX.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <decl/Kokkos_Declare_HPX.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_SERIAL), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_SERIAL.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_SERIAL.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <fwd/Kokkos_Fwd_SERIAL.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <decl/Kokkos_Declare_SERIAL.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_MEMKIND), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_HBWSpace.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_HBWSpace.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <fwd/Kokkos_Fwd_HBWSpace.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"$H""include <decl/Kokkos_Declare_HBWSpace.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/*.hpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/impl/*.hpp)
@ -1334,7 +1349,7 @@ ifneq ($(KOKKOS_INTERNAL_USE_SERIAL), 1)
endif
# With Cygwin functions such as fdopen and fileno are not defined
# when strict ansi is enabled. strict ansi gets enabled with --std=c++14
# when strict ansi is enabled. strict ansi gets enabled with -std=c++14
# though. So we hard undefine it here. Not sure if that has any bad side effects
# This is needed for gtest actually, not for Kokkos itself!
ifeq ($(KOKKOS_INTERNAL_OS_CYGWIN), 1)

View File

@ -36,6 +36,8 @@ Kokkos_MemorySpace.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_MemorySpace.cpp
Kokkos_HostSpace_deepcopy.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_HostSpace_deepcopy.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_HostSpace_deepcopy.cpp
Kokkos_NumericTraits.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_NumericTraits.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_NumericTraits.cpp
ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1)
Kokkos_Cuda_Instance.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/Cuda/Kokkos_Cuda_Instance.cpp

View File

@ -668,6 +668,25 @@ struct Random_UniqueIndex<Kokkos::Experimental::HIP> {
};
#endif
#ifdef KOKKOS_ENABLE_SYCL
template <>
struct Random_UniqueIndex<Kokkos::Experimental::SYCL> {
using locks_view_type = View<int*, Kokkos::Experimental::SYCL>;
KOKKOS_FUNCTION
static int get_state_idx(const locks_view_type& locks_) {
#ifdef KOKKOS_ARCH_INTEL_GEN
int i = Kokkos::Impl::clock_tic() % locks_.extent(0);
#else
int i = 0;
#endif
while (Kokkos::atomic_compare_exchange(&locks_(i), 0, 1)) {
i = (i + 1) % static_cast<int>(locks_.extent(0));
}
return i;
}
};
#endif
} // namespace Impl
template <class DeviceType>
@ -1028,7 +1047,7 @@ class Random_XorShift1024 {
KOKKOS_INLINE_FUNCTION
double drand(const double& start, const double& end) {
return frand(end - start) + start;
return drand(end - start) + start;
}
// Marsaglia polar method for drawing a standard normal distributed random

View File

@ -3,6 +3,7 @@
KOKKOS_INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR})
KOKKOS_INCLUDE_DIRECTORIES(REQUIRED_DURING_INSTALLATION_TESTING ${CMAKE_CURRENT_SOURCE_DIR})
KOKKOS_INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../src )
KOKKOS_INCLUDE_DIRECTORIES(${KOKKOS_SOURCE_DIR}/core/unit_test/category_files)
SET(GTEST_SOURCE_DIR ${${PARENT_PACKAGE_NAME}_SOURCE_DIR}/tpls/gtest)
@ -25,7 +26,7 @@ KOKKOS_ADD_TEST_LIBRARY(
TARGET_COMPILE_DEFINITIONS(kokkosalgorithms_gtest PUBLIC GTEST_HAS_TR1_TUPLE=0 GTEST_HAS_PTHREAD=0)
IF((NOT (Kokkos_ENABLE_CUDA AND WIN32)) AND (NOT ("${KOKKOS_CXX_COMPILER_ID}" STREQUAL "Fujitsu")))
TARGET_COMPILE_FEATURES(kokkosalgorithms_gtest PUBLIC cxx_std_11)
TARGET_COMPILE_FEATURES(kokkosalgorithms_gtest PUBLIC cxx_std_14)
ENDIF()
# Suppress clang-tidy diagnostics on code that we do not have control over
@ -33,51 +34,42 @@ IF(CMAKE_CXX_CLANG_TIDY)
SET_TARGET_PROPERTIES(kokkosalgorithms_gtest PROPERTIES CXX_CLANG_TIDY "")
ENDIF()
SET(SOURCES
UnitTestMain.cpp
)
SET(ALGORITHM UnitTestMain.cpp)
IF(Kokkos_ENABLE_OPENMP)
LIST( APPEND SOURCES
TestOpenMP.cpp
LIST(APPEND ALGORITHM_SOURCES
TestOpenMP_Sort1D.cpp
TestOpenMP_Sort3D.cpp
TestOpenMP_SortDynamicView.cpp
TestOpenMP_Random.cpp
)
ENDIF()
IF(Kokkos_ENABLE_HIP)
LIST( APPEND SOURCES
TestHIP.cpp
)
ENDIF()
foreach(Tag Threads;Serial;OpenMP;Cuda;HPX;HIP;SYCL)
# Because there is always an exception to the rule
if(Tag STREQUAL "Threads")
set(DEVICE "PTHREAD")
else()
string(TOUPPER ${Tag} DEVICE)
endif()
IF(Kokkos_ENABLE_CUDA)
LIST( APPEND SOURCES
TestCuda.cpp
if(Kokkos_ENABLE_${DEVICE})
set(dir ${CMAKE_CURRENT_BINARY_DIR})
set(file ${dir}/Test${Tag}.cpp)
# Write to a temporary intermediate file and call configure_file to avoid
# updating timestamps triggering unnecessary rebuilds on subsequent cmake runs.
file(WRITE ${dir}/dummy.cpp
"#include <Test${Tag}_Category.hpp>\n"
"#include <TestRandomCommon.hpp>\n"
"#include <TestSortCommon.hpp>\n"
)
ENDIF()
IF(Kokkos_ENABLE_HPX)
LIST( APPEND SOURCES
TestHPX.cpp
)
ENDIF()
IF(Kokkos_ENABLE_SERIAL)
LIST( APPEND SOURCES
TestSerial.cpp
)
ENDIF()
IF(Kokkos_ENABLE_PTHREAD)
LIST( APPEND SOURCES
TestThreads.cpp
)
ENDIF()
configure_file(${dir}/dummy.cpp ${file})
list(APPEND ALGORITHM_SOURCES ${file})
endif()
endforeach()
KOKKOS_ADD_EXECUTABLE_AND_TEST(
UnitTest
SOURCES ${SOURCES}
SOURCES
UnitTestMain.cpp
${ALGORITHM_SOURCES}
)

View File

@ -20,11 +20,19 @@ override LDFLAGS += -lpthread
include $(KOKKOS_PATH)/Makefile.kokkos
KOKKOS_CXXFLAGS += -I$(GTEST_PATH) -I${KOKKOS_PATH}/algorithms/unit_tests
KOKKOS_CXXFLAGS += -I$(GTEST_PATH) -I${KOKKOS_PATH}/algorithms/unit_tests -I${KOKKOS_PATH}/core/unit_test/category_files
TEST_TARGETS =
TARGETS =
tmp := $(foreach device, $(KOKKOS_DEVICELIST), \
$(if $(filter Test$(device).cpp, $(shell ls Test$(device).cpp 2>/dev/null)),,\
$(shell echo "\#include <Test"${device}"_Category.hpp>" > Test$(device).cpp); \
$(shell echo "\#include <TestRandomCommon.hpp>" >> Test$(device).cpp); \
$(shell echo "\#include <TestSortCommon.hpp>" >> Test$(device).cpp); \
) \
)
ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1)
OBJ_CUDA = TestCuda.o UnitTestMain.o gtest-all.o
TARGETS += KokkosAlgorithms_UnitTest_Cuda
@ -44,7 +52,7 @@ ifeq ($(KOKKOS_INTERNAL_USE_PTHREADS), 1)
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMP), 1)
OBJ_OPENMP = TestOpenMP.o TestOpenMP_Random.o TestOpenMP_Sort1D.o TestOpenMP_Sort3D.o TestOpenMP_SortDynamicView.o UnitTestMain.o gtest-all.o
OBJ_OPENMP = TestOpenMP.o TestOpenMP_Sort1D.o TestOpenMP_Sort3D.o TestOpenMP_SortDynamicView.o UnitTestMain.o gtest-all.o
TARGETS += KokkosAlgorithms_UnitTest_OpenMP
TEST_TARGETS += test-openmp
endif

View File

@ -59,6 +59,8 @@ TEST(openmp, SortUnsigned1D) {
Impl::test_1D_sort<Kokkos::OpenMP, unsigned>(171);
}
TEST(openmp, SortIssue1160) { Impl::test_issue_1160_sort<Kokkos::OpenMP>(); }
} // namespace Test
#else
void KOKKOS_ALGORITHMS_UNITTESTS_TESTOPENMP_PREVENT_LINK_ERROR() {}

View File

@ -491,6 +491,34 @@ void test_random(unsigned int num_draws) {
}
} // namespace Impl
template <typename ExecutionSpace>
void test_random_xorshift64() {
#if defined(KOKKOS_ENABLE_SYCL) || defined(KOKKOS_ENABLE_CUDA) || \
defined(KOKKOS_ENABLE_HIP)
const int num_draws = 132141141;
#else // SERIAL, HPX, OPENMP
const int num_draws = 10240000;
#endif
Impl::test_random<Kokkos::Random_XorShift64_Pool<ExecutionSpace>>(num_draws);
Impl::test_random<Kokkos::Random_XorShift64_Pool<
Kokkos::Device<ExecutionSpace, typename ExecutionSpace::memory_space>>>(
num_draws);
}
template <typename ExecutionSpace>
void test_random_xorshift1024() {
#if defined(KOKKOS_ENABLE_SYCL) || defined(KOKKOS_ENABLE_CUDA) || \
defined(KOKKOS_ENABLE_HIP)
const int num_draws = 52428813;
#else // SERIAL, HPX, OPENMP
const int num_draws = 10130144;
#endif
Impl::test_random<Kokkos::Random_XorShift1024_Pool<ExecutionSpace>>(
num_draws);
Impl::test_random<Kokkos::Random_XorShift1024_Pool<
Kokkos::Device<ExecutionSpace, typename ExecutionSpace::memory_space>>>(
num_draws);
}
} // namespace Test
#endif // KOKKOS_TEST_UNORDERED_MAP_HPP

View File

@ -0,0 +1,60 @@
/*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#ifndef KOKKOS_ALGORITHMS_UNITTESTS_TESTRANDOM_COMMON_HPP
#define KOKKOS_ALGORITHMS_UNITTESTS_TESTRANDOM_COMMON_HPP
#include <TestRandom.hpp>
namespace Test {
TEST(TEST_CATEGORY, Random_XorShift64) {
test_random_xorshift64<TEST_EXECSPACE>();
}
TEST(TEST_CATEGORY, Random_XorShift1024_0) {
test_random_xorshift1024<TEST_EXECSPACE>();
}
} // namespace Test
#endif

View File

@ -42,10 +42,14 @@
//@HEADER
*/
#ifndef KOKKOS_TEST_HIP_HPP
#define KOKKOS_TEST_HIP_HPP
#ifndef KOKKOS_ALGORITHMS_UNITTESTS_TESTSORT_COMMON_HPP
#define KOKKOS_ALGORITHMS_UNITTESTS_TESTSORT_COMMON_HPP
#define TEST_CATEGORY hip
#define TEST_EXECSPACE Kokkos::Experimental::HIP
#include <TestSort.hpp>
namespace Test {
TEST(TEST_CATEGORY, SortUnsigned) {
Impl::test_sort<TEST_EXECSPACE, unsigned>(171);
}
} // namespace Test
#endif

View File

@ -3,8 +3,4 @@ image:
clone_folder: c:\projects\source
build_script:
- cmd: >-
mkdir build &&
cd build &&
cmake c:\projects\source -DKokkos_ENABLE_TESTS=ON &&
cmake --build . --target install &&
ctest -C Debug -V
cmake c:\projects\source -DKokkos_ENABLE_TESTS=ON -DCMAKE_CXX_FLAGS="/W0 /EHsc /d1reportClassLayoutChanges" -DCTEST_ARGS="-C Debug -V --output-on-failure" -DBUILD_NAME=MSVC-2019 -DBUILD_TYPE=Debug -DSITE=AppVeyor -DTARGET=install -P cmake/KokkosCI.cmake

View File

@ -13,6 +13,17 @@
# $1 are 'ar', 'cmake', etc. during the linking phase
#
# emit a message about the underlying command executed
: ${DEBUG:=0}
: ${KOKKOS_DEBUG_LAUNCH_COMPILER:=${DEBUG}}
debug-message()
{
if [ "${KOKKOS_DEBUG_LAUNCH_COMPILER}" -ne 0 ]; then
echo -e "##### $(basename ${BASH_SOURCE[0]}) executing: \"$@\"... #####"
fi
}
# check the arguments for the KOKKOS_DEPENDENCE compiler definition
KOKKOS_DEPENDENCE=0
for i in ${@}
@ -23,16 +34,30 @@ do
fi
done
# if C++ is not passed, someone is probably trying to invoke it directly
# if Kokkos compiler is not passed, someone is probably trying to invoke it directly
if [ -z "${1}" ]; then
echo -e "\n${BASH_SOURCE[0]} was invoked without the C++ compiler as the first argument."
echo -e "\n${BASH_SOURCE[0]} was invoked without the Kokkos compiler as the first argument."
echo "This script is not indended to be directly invoked by any mechanism other"
echo -e "than through a RULE_LAUNCH_COMPILE or RULE_LAUNCH_LINK property set in CMake\n"
echo -e "than through a RULE_LAUNCH_COMPILE or RULE_LAUNCH_LINK property set in CMake.\n"
exit 1
fi
# if Kokkos compiler is not passed, someone is probably trying to invoke it directly
if [ -z "${2}" ]; then
echo -e "\n${BASH_SOURCE[0]} was invoked without the C++ compiler as the second argument."
echo "This script is not indended to be directly invoked by any mechanism other"
echo -e "than through a RULE_LAUNCH_COMPILE or RULE_LAUNCH_LINK property set in CMake.\n"
exit 1
fi
# if there aren't two args, this isn't necessarily invalid, just a bit strange
if [ -z "${2}" ]; then exit 0; fi
if [ -z "${3}" ]; then exit 0; fi
# store the Kokkos compiler
KOKKOS_COMPILER=${1}
# remove the Kokkos compiler from the arguments
shift
# store the expected C++ compiler
CXX_COMPILER=${1}
@ -40,48 +65,57 @@ CXX_COMPILER=${1}
# remove the expected C++ compiler from the arguments
shift
# after the above shift, $1 is now the exe for the compile or link command, e.g.
# kokkos_launch_compiler g++ gcc -c file.c -o file.o
# NOTE: in below, ${KOKKOS_COMPILER} is usually nvcc_wrapper
#
# after the above shifts, $1 is now the exe for the compile or link command, e.g.
# kokkos_launch_compiler ${KOKKOS_COMPILER} g++ gcc -c file.c -o file.o
# becomes:
# kokkos_launch_compiler gcc -c file.c -o file.o
# Check to see if the executable is the C++ compiler and if it is not, then
# We check to see if the executable is the C++ compiler and if it is not, then
# just execute the command.
#
# Summary:
# kokkos_launch_compiler g++ gcc -c file.c -o file.o
# kokkos_launch_compiler ${KOKKOS_COMPILER} g++ gcc -c file.c -o file.o
# results in this command being executed:
# gcc -c file.c -o file.o
# and
# kokkos_launch_compiler g++ g++ -c file.cpp -o file.o
# kokkos_launch_compiler ${KOKKOS_COMPILER} g++ g++ -c file.cpp -o file.o
# results in this command being executed:
# nvcc_wrapper -c file.cpp -o file.o
# ${KOKKOS_COMPILER} -c file.cpp -o file.o
if [[ "${KOKKOS_DEPENDENCE}" -eq "0" || "${CXX_COMPILER}" != "${1}" ]]; then
# the command does not depend on Kokkos so just execute the command w/o re-directing to nvcc_wrapper
debug-message $@
# the command does not depend on Kokkos so just execute the command w/o re-directing to ${KOKKOS_COMPILER}
eval $@
else
# the executable is the C++ compiler, so we need to re-direct to nvcc_wrapper
# the executable is the C++ compiler, so we need to re-direct to ${KOKKOS_COMPILER}
if [ ! -f "${KOKKOS_COMPILER}" ]; then
echo -e "\nError: the compiler redirect for Kokkos was not found at ${KOKKOS_COMPILER}\n"
exit 1
fi
# find the nvcc_wrapper from the same build/install
NVCC_WRAPPER="$(dirname ${BASH_SOURCE[0]})/nvcc_wrapper"
if [ -z "${NVCC_WRAPPER}" ]; then
echo -e "\nError: nvcc_wrapper not found in $(dirname ${BASH_SOURCE[0]}).\n"
exit 1
if [ "${KOKKOS_COMPILER}" = "${NVCC_WRAPPER}" ]; then
# this should only be valid in the install tree -- it will be set to CMAKE_CXX_COMPILER used using Kokkos installation
if [ -z $(echo "@NVCC_WRAPPER_DEFAULT_COMPILER@" | grep 'NVCC_WRAPPER_DEFAULT_COMPILER') ]; then
: ${NVCC_WRAPPER_DEFAULT_COMPILER:="@NVCC_WRAPPER_DEFAULT_COMPILER@"}
fi
# set default nvcc wrapper compiler if not specified
: ${NVCC_WRAPPER_DEFAULT_COMPILER:=${CXX_COMPILER}}
export NVCC_WRAPPER_DEFAULT_COMPILER
# calling itself will cause an infinitely long build
# nvcc_wrapper calling itself will cause an infinitely long build
if [ "${NVCC_WRAPPER}" = "${NVCC_WRAPPER_DEFAULT_COMPILER}" ]; then
echo -e "\nError: NVCC_WRAPPER == NVCC_WRAPPER_DEFAULT_COMPILER. Terminating to avoid infinite loop!\n"
exit 1
fi
fi
# discard the compiler from the command
shift
# execute nvcc_wrapper
${NVCC_WRAPPER} $@
debug-message ${KOKKOS_COMPILER} $@
# execute ${KOKKOS_COMPILER} (again, usually nvcc_wrapper)
${KOKKOS_COMPILER} $@
fi

View File

@ -191,11 +191,11 @@ do
shift
;;
#Handle known nvcc args
--dryrun|--verbose|--keep|--keep-dir*|-G|--relocatable-device-code*|-lineinfo|-expt-extended-lambda|-expt-relaxed-constexpr|--resource-usage|-Xptxas*|--fmad*|--Wext-lambda-captures-this|-Wext-lambda-captures-this)
--dryrun|--verbose|--keep|--keep-dir*|-G|--relocatable-device-code*|-lineinfo|-expt-extended-lambda|-expt-relaxed-constexpr|--resource-usage|-Xptxas*|--fmad*|--use_fast_math|--Wext-lambda-captures-this|-Wext-lambda-captures-this)
cuda_args="$cuda_args $1"
;;
#Handle more known nvcc args
--expt-extended-lambda|--expt-relaxed-constexpr)
--expt-extended-lambda|--expt-relaxed-constexpr|--Wno-deprecated-gpu-targets|-Wno-deprecated-gpu-targets)
cuda_args="$cuda_args $1"
;;
#Handle known nvcc args that have an argument

View File

@ -0,0 +1,91 @@
#----------------------------------------------------------------------------------------#
#
# CTestConfig.cmake template for Kokkos
#
#----------------------------------------------------------------------------------------#
#
# dash-board related
#
set(CTEST_PROJECT_NAME "Kokkos")
set(CTEST_NIGHTLY_START_TIME "01:00:00 UTC")
set(CTEST_DROP_METHOD "https")
set(CTEST_DROP_SITE "cdash.nersc.gov")
set(CTEST_DROP_LOCATION "/submit.php?project=${CTEST_PROJECT_NAME}")
set(CTEST_CDASH_VERSION "1.6")
set(CTEST_CDASH_QUERY_VERSION TRUE)
set(CTEST_SUBMIT_RETRY_COUNT "1")
set(CTEST_SUBMIT_RETRY_DELAY "30")
#
# configure/build related
#
set(CTEST_BUILD_NAME "@BUILD_NAME@")
set(CTEST_MODEL "@MODEL@")
set(CTEST_SITE "@SITE@")
set(CTEST_CONFIGURATION_TYPE "@BUILD_TYPE@")
set(CTEST_SOURCE_DIRECTORY "@SOURCE_REALDIR@")
set(CTEST_BINARY_DIRECTORY "@BINARY_REALDIR@")
#
# configure/build related
#
set(CTEST_UPDATE_TYPE "git")
set(CTEST_UPDATE_VERSION_ONLY ON)
# set(CTEST_GENERATOR "")
# set(CTEST_GENERATOR_PLATFORM "")
#
# testing related
#
set(CTEST_TIMEOUT "7200")
set(CTEST_TEST_TIMEOUT "7200")
set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_ERRORS "100")
set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS "100")
set(CTEST_CUSTOM_MAXIMUM_PASSED_TEST_OUTPUT_SIZE "1048576")
#
# coverage related
#
set(CTEST_CUSTOM_COVERAGE_EXCLUDE ".*tpls/.*;/usr/.*;.*unit_test/.*;.*unit_tests/.*;.*perf_test/.*")
#
# commands
#
if(NOT "@CHECKOUT_COMMAND@" STREQUAL "")
set(CTEST_CHECKOUT_COMMAND "@CHECKOUT_COMMAND@")
endif()
set(CTEST_UPDATE_COMMAND "@GIT_EXECUTABLE@")
set(CTEST_CONFIGURE_COMMAND "@CMAKE_COMMAND@ -DCMAKE_BUILD_TYPE=@BUILD_TYPE@ -DKokkos_ENABLE_TESTS=ON @CONFIG_ARGS@ @SOURCE_REALDIR@")
set(CTEST_BUILD_COMMAND "@CMAKE_COMMAND@ --build @BINARY_REALDIR@ --target @TARGET@")
if(NOT WIN32)
set(CTEST_BUILD_COMMAND "${CTEST_BUILD_COMMAND} -- -j@BUILD_JOBS@")
endif()
set(CTEST_COVERAGE_COMMAND "gcov")
set(CTEST_MEMORYCHECK_COMMAND "valgrind")
set(CTEST_GIT_COMMAND "@GIT_EXECUTABLE@")
#
# various configs
#
set(APPEND_VALUE @APPEND@)
if(APPEND_VALUE)
set(APPEND_CTEST APPEND)
endif()
macro(SET_TEST_PROP VAR)
if(NOT "${ARGS}" STREQUAL "")
set(${VAR}_CTEST ${VAR} ${ARGN})
endif()
endmacro()
set_test_prop(START @START@)
set_test_prop(END @END@)
set_test_prop(STRIDE @STRIDE@)
set_test_prop(INCLUDE @INCLUDE@)
set_test_prop(EXCLUDE @EXCLUDE@)
set_test_prop(INCLUDE_LABEL @INCLUDE_LABEL@)
set_test_prop(EXCLUDE_LABEL @EXCLUDE_LABEL@)
set_test_prop(PARALLEL_LEVEL @PARALLEL_LEVEL@)
set_test_prop(STOP_TIME @STOP_TIME@)
set_test_prop(COVERAGE_LABELS @LABELS@)

View File

@ -0,0 +1,350 @@
cmake_minimum_required(VERSION 3.16 FATAL_ERROR)
message(STATUS "")
get_cmake_property(_cached_vars CACHE_VARIABLES)
set(KOKKOS_CMAKE_ARGS)
set(EXCLUDED_VARIABLES "CMAKE_COMMAND" "CMAKE_CPACK_COMMAND" "CMAKE_CTEST_COMMAND" "CMAKE_ROOT"
"CTEST_ARGS" "BUILD_NAME" "CMAKE_CXX_FLAGS" "CMAKE_BUILD_TYPE")
list(SORT _cached_vars)
foreach(_var ${_cached_vars})
if(NOT "${_var}" IN_LIST EXCLUDED_VARIABLES)
list(APPEND KOKKOS_CMAKE_ARGS ${_var})
if("${_var}" STREQUAL "CMAKE_BUILD_TYPE")
set(BUILD_TYPE "${CMAKE_BUILD_TYPE}")
endif()
endif()
endforeach()
#----------------------------------------------------------------------------------------#
#
# Macros and variables
#
#----------------------------------------------------------------------------------------#
macro(CHECK_REQUIRED VAR)
if(NOT DEFINED ${VAR})
message(FATAL_ERROR "Error! Variable '${VAR}' must be defined")
endif()
endmacro()
# require the build name variable
CHECK_REQUIRED(BUILD_NAME)
# uses all args
macro(SET_DEFAULT VAR)
if(NOT DEFINED ${VAR})
set(${VAR} ${ARGN})
endif()
# remove these ctest configuration variables from the defines
# passed to the Kokkos configuration
if("${VAR}" IN_LIST KOKKOS_CMAKE_ARGS)
list(REMOVE_ITEM KOKKOS_CMAKE_ARGS "${VAR}")
endif()
endmacro()
# uses first arg -- useful for selecting via priority from multiple
# potentially defined variables, e.g.:
#
# set_default_arg1(BUILD_NAME ${TRAVIS_BUILD_NAME} ${BUILD_NAME})
#
macro(SET_DEFAULT_ARG1 VAR)
if(NOT DEFINED ${VAR})
foreach(_ARG ${ARGN})
if(NOT "${_ARG}" STREQUAL "")
set(${VAR} ${_ARG})
break()
endif()
endforeach()
endif()
# remove these ctest configuration variables from the defines
# passed to the Kokkos configuration
if("${VAR}" IN_LIST KOKKOS_CMAKE_ARGS)
list(REMOVE_ITEM KOKKOS_CMAKE_ARGS "${VAR}")
endif()
endmacro()
# determine the default working directory
if(NOT "$ENV{WORKSPACE}" STREQUAL "")
set(WORKING_DIR "$ENV{WORKSPACE}")
else()
get_filename_component(WORKING_DIR ${CMAKE_CURRENT_LIST_DIR} DIRECTORY)
endif()
# determine the hostname
execute_process(COMMAND hostname
OUTPUT_VARIABLE HOSTNAME
OUTPUT_STRIP_TRAILING_WHITESPACE)
SET_DEFAULT(HOSTNAME "$ENV{HOSTNAME}")
# get the number of processors
include(ProcessorCount)
ProcessorCount(NUM_PROCESSORS)
# find git
find_package(Git QUIET)
if(NOT GIT_EXECUTABLE)
unset(GIT_EXECUTABLE CACHE)
unset(GIT_EXECUTABLE)
endif()
function(EXECUTE_GIT_COMMAND VAR)
set(${VAR} "" PARENT_SCOPE)
execute_process(COMMAND ${GIT_EXECUTABLE} ${ARGN}
OUTPUT_VARIABLE VAL
RESULT_VARIABLE RET
OUTPUT_STRIP_TRAILING_WHITESPACE
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
ERROR_QUIET)
string(REPLACE ";" " " _CMD "${GIT_EXECUTABLE} ${ARGN}")
set(LAST_GIT_COMMAND "${_CMD}" PARENT_SCOPE)
if(RET EQUAL 0)
set(${VAR} "${VAL}" PARENT_SCOPE)
endif()
endfunction()
# just gets the git branch name if available
function(GET_GIT_BRANCH_NAME VAR)
execute_git_command(GIT_BRANCH branch --show-current)
set(_INVALID "%D" "HEAD")
if(NOT GIT_BRANCH OR "${GIT_BRANCH}" IN_LIST _INVALID)
execute_git_command(GIT_BRANCH show -s --format=%D)
if(NOT GIT_BRANCH OR "${GIT_BRANCH}" IN_LIST _INVALID)
execute_git_command(GIT_BRANCH --describe all)
endif()
endif()
#
if(GIT_BRANCH)
string(REPLACE " " ";" _DESC "${GIT_BRANCH}")
# just set it to last one via loop instead of wonky cmake index manip
foreach(_ITR ${_DESC})
set(GIT_BRANCH "${_ITR}")
endforeach()
set(${VAR} "${GIT_BRANCH}" PARENT_SCOPE)
message(STATUS "GIT BRANCH via '${LAST_GIT_COMMAND}': ${GIT_BRANCH}")
endif()
endfunction()
# just gets the git branch name if available
function(GET_GIT_AUTHOR_NAME VAR)
execute_git_command(GIT_AUTHOR show -s --format=%an)
if(GIT_AUTHOR)
string(LENGTH "${GIT_AUTHOR}" STRLEN)
# if the build name gets too long, this can cause submission errors
if(STRLEN GREATER 24)
# remove middle initial
string(REGEX REPLACE " [A-Z]\. " " " GIT_AUTHOR "${GIT_AUTHOR}")
# get first and sur name
string(REGEX REPLACE "([A-Za-z]+) ([A-Za-z]+)" "\\1" F_NAME "${GIT_AUTHOR}")
string(REGEX REPLACE "([A-Za-z]+) ([A-Za-z]+)" "\\2" S_NAME "${GIT_AUTHOR}")
if(S_NAME)
set(GIT_AUTHOR "${S_NAME}")
elseif(F_NAME)
set(GIT_AUTHOR "${F_NAME}")
endif()
endif()
# remove any spaces, quotes, periods, etc.
string(REGEX REPLACE "[ ',;_\.\"]+" "" GIT_AUTHOR "${GIT_AUTHOR}")
set(${VAR} "${GIT_AUTHOR}" PARENT_SCOPE)
message(STATUS "GIT AUTHOR via '${LAST_GIT_COMMAND}': ${GIT_AUTHOR}")
endif()
endfunction()
# get the name of the branch
GET_GIT_BRANCH_NAME(GIT_BRANCH)
# get the name of the author
GET_GIT_AUTHOR_NAME(GIT_AUTHOR)
# author, prefer git method for consistency
SET_DEFAULT_ARG1(AUTHOR ${GIT_AUTHOR} $ENV{GIT_AUTHOR} $ENV{AUTHOR})
# SLUG == owner_name/repo_name
SET_DEFAULT_ARG1(SLUG $ENV{TRAVIS_PULL_REQUEST_SLUG} $ENV{TRAVIS_REPO_SLUG} $ENV{APPVEYOR_REPO_NAME} $ENV{PULL_REQUEST_SLUG} $ENV{REPO_SLUG})
# branch name
SET_DEFAULT_ARG1(BRANCH $ENV{TRAVIS_PULL_REQUEST_BRANCH} $ENV{TRAVIS_BRANCH} $ENV{APPVEYOR_PULL_REQUEST_HEAD_REPO_BRANCH} $ENV{APPVEYOR_REPO_BRANCH} $ENV{GIT_BRANCH} $ENV{BRANCH_NAME} $ENV{BRANCH} ${GIT_BRANCH})
# pull request number
SET_DEFAULT_ARG1(PULL_REQUEST_NUM $ENV{TRAVIS_PULL_REQUEST} $ENV{CHANGE_ID} $ENV{APPVEYOR_PULL_REQUEST_NUMBER} $ENV{PULL_REQUEST_NUM})
# get the event type, e.g. push, pull_request, api, cron, etc.
SET_DEFAULT_ARG1(EVENT_TYPE $ENV{TRAVIS_EVENT_TYPE} ${EVENT_TYPE})
if("${BRANCH}" STREQUAL "")
message(STATUS "Checked: environment variables for Travis, Appveyor, Jenkins (git plugin), BRANCH_NAME, BRANCH and 'git branch --show-current'")
message(FATAL_ERROR "Error! Git branch could not be determined. Please provide -DBRANCH=<name>")
endif()
#----------------------------------------------------------------------------------------#
#
# Set default values if not provided on command-line
#
#----------------------------------------------------------------------------------------#
SET_DEFAULT(SOURCE_DIR "${WORKING_DIR}") # source directory
SET_DEFAULT(BINARY_DIR "${WORKING_DIR}/build") # build directory
SET_DEFAULT(BUILD_TYPE "${CMAKE_BUILD_TYPE}") # Release, Debug, etc.
SET_DEFAULT(MODEL "Continuous") # Continuous, Nightly, or Experimental
SET_DEFAULT(JOBS 1) # number of parallel ctests
SET_DEFAULT(CTEST_COMMAND "${CMAKE_CTEST_COMMAND}") # just in case
SET_DEFAULT(CTEST_ARGS "-V --output-on-failure") # extra arguments when ctest is called
SET_DEFAULT(GIT_EXECUTABLE "git") # ctest_update
SET_DEFAULT(TARGET "all") # build target
SET_DEFAULT_ARG1(SITE "$ENV{SITE}"
"${HOSTNAME}") # update site
SET_DEFAULT_ARG1(BUILD_JOBS "$ENV{BUILD_JOBS}"
"${NUM_PROCESSORS}") # number of parallel compile jobs
#
# The variable below correspond to ctest arguments, i.e. START,END,STRIDE are
# '-I START,END,STRIDE'
#
SET_DEFAULT(START "")
SET_DEFAULT(END "")
SET_DEFAULT(STRIDE "")
SET_DEFAULT(INCLUDE "")
SET_DEFAULT(EXCLUDE "")
SET_DEFAULT(INCLUDE_LABEL "")
SET_DEFAULT(EXCLUDE_LABEL "")
SET_DEFAULT(PARALLEL_LEVEL "")
SET_DEFAULT(STOP_TIME "")
SET_DEFAULT(LABELS "")
SET_DEFAULT(NOTES "")
# default static build tag for Nightly
set(BUILD_TAG "${BRANCH}")
if(NOT BUILD_TYPE)
# default for kokkos if not specified
set(BUILD_TYPE "RelWithDebInfo")
endif()
# generate dynamic name if continuous or experimental model
if(NOT "${MODEL}" STREQUAL "Nightly")
if(EVENT_TYPE AND PULL_REQUEST_NUM)
# e.g. pull_request/123
if(AUTHOR)
set(BUILD_TAG "${AUTHOR}/${EVENT_TYPE}/${PULL_REQUEST_NUM}")
else()
set(BUILD_TAG "${EVENT_TYPE}/${PULL_REQUEST_NUM}")
endif()
elseif(SLUG)
# e.g. owner_name/repo_name
set(BUILD_TAG "${SLUG}")
elseif(AUTHOR)
set(BUILD_TAG "${AUTHOR}/${BRANCH}")
endif()
if(EVENT_TYPE AND NOT PULL_REQUEST_NUM)
set(BUILD_TAG "${BUILD_TAG}-${EVENT_TYPE}")
endif()
endif()
# unnecessary
string(REPLACE "/remotes/" "/" BUILD_TAG "${BUILD_TAG}")
string(REPLACE "/origin/" "/" BUILD_TAG "${BUILD_TAG}")
message(STATUS "BUILD_TAG: ${BUILD_TAG}")
set(BUILD_NAME "[${BUILD_TAG}] [${BUILD_NAME}-${BUILD_TYPE}]")
# colons in build name create extra (empty) entries in CDash
string(REPLACE ":" "-" BUILD_NAME "${BUILD_NAME}")
# unnecessary info
string(REPLACE "/merge]" "]" BUILD_NAME "${BUILD_NAME}")
# consistency
string(REPLACE "/pr/" "/pull/" BUILD_NAME "${BUILD_NAME}")
string(REPLACE "pull_request/" "pull/" BUILD_NAME "${BUILD_NAME}")
# miscellaneous from missing fields
string(REPLACE "--" "-" BUILD_NAME "${BUILD_NAME}")
string(REPLACE "-]" "]" BUILD_NAME "${BUILD_NAME}")
# check binary directory
if(EXISTS ${BINARY_DIR})
if(NOT IS_DIRECTORY "${BINARY_DIR}")
message(FATAL_ERROR "Error! '${BINARY_DIR}' already exists and is not a directory!")
endif()
file(GLOB BINARY_DIR_FILES "${BINARY_DIR}/*")
if(NOT "${BINARY_DIR_FILES}" STREQUAL "")
message(FATAL_ERROR "Error! '${BINARY_DIR}' already exists and is not empty!")
endif()
endif()
get_filename_component(SOURCE_REALDIR ${SOURCE_DIR} REALPATH)
get_filename_component(BINARY_REALDIR ${BINARY_DIR} REALPATH)
#----------------------------------------------------------------------------------------#
#
# Generate the CTestConfig.cmake
#
#----------------------------------------------------------------------------------------#
set(CONFIG_ARGS)
foreach(_ARG ${KOKKOS_CMAKE_ARGS})
if(NOT "${${_ARG}}" STREQUAL "")
get_property(_ARG_TYPE CACHE ${_ARG} PROPERTY TYPE)
if("${_ARG_TYPE}" STREQUAL "UNINITIALIZED")
if("${${_ARG}}" STREQUAL "ON" OR "${${_ARG}}" STREQUAL "OFF")
set(_ARG_TYPE "BOOL")
elseif(EXISTS "${${_ARG}}" AND NOT IS_DIRECTORY "${${_ARG}}")
set(_ARG_TYPE "FILEPATH")
elseif(EXISTS "${${_ARG}}" AND IS_DIRECTORY "${${_ARG}}")
set(_ARG_TYPE "PATH")
elseif(NOT "${${_ARG}}" STREQUAL "")
set(_ARG_TYPE "STRING")
endif()
endif()
set(CONFIG_ARGS "${CONFIG_ARGS}set(${_ARG} \"${${_ARG}}\" CACHE ${_ARG_TYPE} \"\")\n")
endif()
endforeach()
file(WRITE ${BINARY_REALDIR}/initial-cache.cmake
"
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\" CACHE STRING \"\")
${CONFIG_ARGS}
")
file(READ ${BINARY_REALDIR}/initial-cache.cmake _CACHE_INFO)
message(STATUS "Initial cache:\n${_CACHE_INFO}")
# initialize the cache
set(CONFIG_ARGS "-C ${BINARY_REALDIR}/initial-cache.cmake")
# generate the CTestConfig.cmake
configure_file(
${CMAKE_CURRENT_LIST_DIR}/CTestConfig.cmake.in
${BINARY_REALDIR}/CTestConfig.cmake
@ONLY)
# copy/generate the dashboard script
configure_file(
${CMAKE_CURRENT_LIST_DIR}/KokkosCTest.cmake.in
${BINARY_REALDIR}/KokkosCTest.cmake
@ONLY)
# custom CTest settings go in ${BINARY_DIR}/CTestCustom.cmake
execute_process(
COMMAND ${CMAKE_COMMAND} -E touch CTestCustom.cmake
WORKING_DIRECTORY ${BINARY_REALDIR}
)
#----------------------------------------------------------------------------------------#
#
# Execute CTest
#
#----------------------------------------------------------------------------------------#
message(STATUS "")
message(STATUS "BUILD_NAME: ${BUILD_NAME}")
message(STATUS "Executing '${CTEST_COMMAND} -S KokkosCTest.cmake ${CTEST_ARGS}'...")
message(STATUS "")
# e.g. -DCTEST_ARGS="--output-on-failure -VV" should really be -DCTEST_ARGS="--output-on-failure;-VV"
string(REPLACE " " ";" CTEST_ARGS "${CTEST_ARGS}")
execute_process(
COMMAND ${CTEST_COMMAND} -S KokkosCTest.cmake ${CTEST_ARGS}
RESULT_VARIABLE RET
WORKING_DIRECTORY ${BINARY_REALDIR}
)
# ensure that any non-zero result variable gets propagated
if(NOT RET EQUAL 0)
message(FATAL_ERROR "CTest return non-zero exit code: ${RET}")
endif()

View File

@ -0,0 +1,261 @@
cmake_minimum_required(VERSION 3.16 FATAL_ERROR)
if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/CTestConfig.cmake")
include("${CMAKE_CURRENT_LIST_DIR}/CTestConfig.cmake")
endif()
include(ProcessorCount)
ProcessorCount(CTEST_PROCESSOR_COUNT)
cmake_policy(SET CMP0009 NEW)
cmake_policy(SET CMP0011 NEW)
# ---------------------------------------------------------------------------- #
# -- Commands
# ---------------------------------------------------------------------------- #
find_program(CTEST_CMAKE_COMMAND NAMES cmake)
find_program(CTEST_UNAME_COMMAND NAMES uname)
find_program(CTEST_BZR_COMMAND NAMES bzr)
find_program(CTEST_CVS_COMMAND NAMES cvs)
find_program(CTEST_GIT_COMMAND NAMES git)
find_program(CTEST_HG_COMMAND NAMES hg)
find_program(CTEST_P4_COMMAND NAMES p4)
find_program(CTEST_SVN_COMMAND NAMES svn)
find_program(VALGRIND_COMMAND NAMES valgrind)
find_program(GCOV_COMMAND NAMES gcov)
find_program(LCOV_COMMAND NAMES llvm-cov)
find_program(MEMORYCHECK_COMMAND NAMES valgrind )
set(MEMORYCHECK_TYPE Valgrind)
# set(MEMORYCHECK_TYPE Purify)
# set(MEMORYCHECK_TYPE BoundsChecker)
# set(MEMORYCHECK_TYPE ThreadSanitizer)
# set(MEMORYCHECK_TYPE AddressSanitizer)
# set(MEMORYCHECK_TYPE LeakSanitizer)
# set(MEMORYCHECK_TYPE MemorySanitizer)
# set(MEMORYCHECK_TYPE UndefinedBehaviorSanitizer)
set(MEMORYCHECK_COMMAND_OPTIONS "--trace-children=yes --leak-check=full")
# ---------------------------------------------------------------------------- #
# -- Settings
# ---------------------------------------------------------------------------- #
## -- Process timeout in seconds
set(CTEST_TIMEOUT "7200")
## -- Set output to English
set(ENV{LC_MESSAGES} "en_EN" )
# ---------------------------------------------------------------------------- #
# -- Copy ctest configuration file
# ---------------------------------------------------------------------------- #
macro(COPY_CTEST_CONFIG_FILES)
foreach(_FILE CTestConfig.cmake CTestCustom.cmake)
# if current directory is not binary or source directory
if(NOT "${CMAKE_CURRENT_LIST_DIR}" STREQUAL "${CTEST_BINARY_DIRECTORY}" AND
NOT "${CTEST_SOURCE_DIRECTORY}" STREQUAL "${CTEST_BINARY_DIRECTORY}")
# if file exists in current directory
if(EXISTS ${CMAKE_CURRENT_LIST_DIR}/${_FILE})
configure_file(${CMAKE_CURRENT_LIST_DIR}/${_FILE}
${CTEST_BINARY_DIRECTORY}/${_FILE} COPYONLY)
endif()
# if source and binary differ
elseif(NOT "${CTEST_SOURCE_DIRECTORY}" STREQUAL "${CTEST_BINARY_DIRECTORY}")
# if file exists in source directory but not in binary directory
if(EXISTS ${CTEST_SOURCE_DIRECTORY}/${_FILE} AND
NOT EXISTS ${CTEST_BINARY_DIRECTORY}/${_FILE})
configure_file(${CTEST_SOURCE_DIRECTORY}/${_FILE}
${CTEST_BINARY_DIRECTORY}/${_FILE} COPYONLY)
endif()
endif()
endforeach()
endmacro()
ctest_read_custom_files("${CMAKE_CURRENT_LIST_DIR}")
message(STATUS "CTEST_MODEL: ${CTEST_MODEL}")
#-------------------------------------------------------------------------#
# Start
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Running START_CTEST stage...")
message(STATUS "")
ctest_start(${CTEST_MODEL} TRACK ${CTEST_MODEL} ${APPEND_CTEST}
${CTEST_SOURCE_DIRECTORY} ${CTEST_BINARY_DIRECTORY})
#-------------------------------------------------------------------------#
# Config
#
copy_ctest_config_files()
ctest_read_custom_files("${CTEST_BINARY_DIRECTORY}")
#-------------------------------------------------------------------------#
# Update
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Running CTEST_UPDATE stage...")
message(STATUS "")
ctest_update(SOURCE "${CTEST_SOURCE_DIRECTORY}"
RETURN_VALUE up_ret)
#-------------------------------------------------------------------------#
# Configure
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Running CTEST_CONFIGURE stage...")
message(STATUS "")
ctest_configure(BUILD "${CTEST_BINARY_DIRECTORY}"
SOURCE ${CTEST_SOURCE_DIRECTORY}
${APPEND_CTEST}
OPTIONS "${CTEST_CONFIGURE_OPTIONS}"
RETURN_VALUE config_ret)
#-------------------------------------------------------------------------#
# Echo configure log bc Damien wants to delay merging this PR for eternity
#
file(GLOB _configure_log "${CTEST_BINARY_DIRECTORY}/Testing/Temporary/LastConfigure*.log")
# should only have one but loop just for safety
foreach(_LOG ${_configure_log})
file(READ ${_LOG} _LOG_MESSAGE)
message(STATUS "Configure Log: ${_LOG}")
message(STATUS "\n${_LOG_MESSAGE}\n")
endforeach()
#-------------------------------------------------------------------------#
# Build
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Running CTEST_BUILD stage...")
message(STATUS "")
ctest_build(BUILD "${CTEST_BINARY_DIRECTORY}"
${APPEND_CTEST}
RETURN_VALUE build_ret)
#-------------------------------------------------------------------------#
# Echo build log bc Damien wants to delay merging this PR for eternity
#
file(GLOB _build_log "${CTEST_BINARY_DIRECTORY}/Testing/Temporary/LastBuild*.log")
# should only have one but loop just for safety
foreach(_LOG ${_build_log})
file(READ ${_LOG} _LOG_MESSAGE)
message(STATUS "Build Log: ${_LOG}")
message(STATUS "\n${_LOG_MESSAGE}\n")
endforeach()
#-------------------------------------------------------------------------#
# Test
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Running CTEST_TEST stage...")
message(STATUS "")
ctest_test(RETURN_VALUE test_ret
${APPEND_CTEST}
${START_CTEST}
${END_CTEST}
${STRIDE_CTEST}
${INCLUDE_CTEST}
${EXCLUDE_CTEST}
${INCLUDE_LABEL_CTEST}
${EXCLUDE_LABEL_CTEST}
${PARALLEL_LEVEL_CTEST}
${STOP_TIME_CTEST}
SCHEDULE_RANDOM OFF)
#-------------------------------------------------------------------------#
# Coverage
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Running CTEST_COVERAGE stage...")
message(STATUS "")
execute_process(COMMAND ${CTEST_COVERAGE_COMMAND} ${CTEST_COVERAGE_EXTRA_FLAGS}
WORKING_DIRECTORY ${CTEST_BINARY_DIRECTORY}
ERROR_QUIET)
ctest_coverage(${APPEND_CTEST}
${CTEST_COVERAGE_LABELS}
RETURN_VALUE cov_ret)
#-------------------------------------------------------------------------#
# MemCheck
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Running CTEST_MEMCHECK stage...")
message(STATUS "")
ctest_memcheck(RETURN_VALUE mem_ret
${APPEND_CTEST}
${START_CTEST}
${END_CTEST}
${STRIDE_CTEST}
${INCLUDE_CTEST}
${EXCLUDE_CTEST}
${INCLUDE_LABEL_CTEST}
${EXCLUDE_LABEL_CTEST}
${PARALLEL_LEVEL_CTEST})
#-------------------------------------------------------------------------#
# Submit
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Running CTEST_SUBMIT stage...")
message(STATUS "")
file(GLOB_RECURSE NOTE_FILES "${CTEST_BINARY_DIRECTORY}/*CTestNotes.cmake")
foreach(_FILE ${NOTE_FILES})
message(STATUS "Including CTest notes files: \"${_FILE}\"...")
include("${_FILE}")
endforeach()
# capture submit error so it doesn't fail because of a submission error
ctest_submit(RETURN_VALUE submit_ret
RETRY_COUNT 2
RETRY_DELAY 10
CAPTURE_CMAKE_ERROR submit_err)
#-------------------------------------------------------------------------#
# Submit
#
message(STATUS "")
message(STATUS "[${CTEST_BUILD_NAME}] Finished ${CTEST_MODEL} Stages (${STAGES})")
message(STATUS "")
#-------------------------------------------------------------------------#
# Non-zero exit codes for important errors
#
if(NOT config_ret EQUAL 0)
message(FATAL_ERROR "Error during configuration! Exit code: ${config_ret}")
endif()
if(NOT build_ret EQUAL 0)
message(FATAL_ERROR "Error during build! Exit code: ${build_ret}")
endif()
if(NOT test_ret EQUAL 0)
message(FATAL_ERROR "Error during testing! Exit code: ${test_ret}")
endif()

View File

@ -19,17 +19,44 @@ INCLUDE("${Kokkos_CMAKE_DIR}/KokkosTargets.cmake")
INCLUDE("${Kokkos_CMAKE_DIR}/KokkosConfigCommon.cmake")
UNSET(Kokkos_CMAKE_DIR)
# if CUDA was enabled and separable compilation was specified, e.g.
# find_package(Kokkos COMPONENTS separable_compilation)
# then we set the RULE_LAUNCH_COMPILE and RULE_LAUNCH_LINK
IF(@Kokkos_ENABLE_CUDA@ AND NOT "separable_compilation" IN_LIST Kokkos_FIND_COMPONENTS)
# check for conflicts
IF("launch_compiler" IN_LIST Kokkos_FIND_COMPONENTS AND
"separable_compilation" IN_LIST Kokkos_FIND_COMPONENTS)
MESSAGE(STATUS "'launch_compiler' implies global redirection of targets depending on Kokkos to appropriate compiler.")
MESSAGE(STATUS "'separable_compilation' implies explicitly defining where redirection occurs via 'kokkos_compilation(PROJECT|TARGET|SOURCE|DIRECTORY ...)'")
MESSAGE(FATAL_ERROR "Conflicting COMPONENTS: 'launch_compiler' and 'separable_compilation'")
ENDIF()
IF("launch_compiler" IN_LIST Kokkos_FIND_COMPONENTS)
#
# if find_package(Kokkos COMPONENTS launch_compiler) then rely on the
# RULE_LAUNCH_COMPILE and RULE_LAUNCH_LINK to always redirect to the
# appropriate compiler for Kokkos
#
MESSAGE(STATUS "kokkos_launch_compiler is enabled globally. C++ compiler commands with -DKOKKOS_DEPENDENCE will be redirected to the appropriate compiler for Kokkos")
kokkos_compilation(
GLOBAL
CHECK_CUDA_COMPILES)
ELSEIF(@Kokkos_ENABLE_CUDA@ AND NOT "separable_compilation" IN_LIST Kokkos_FIND_COMPONENTS)
#
# if CUDA was enabled, separable compilation was not specified, and current compiler
# cannot compile CUDA, then set the RULE_LAUNCH_COMPILE and RULE_LAUNCH_LINK globally and
# kokkos_launch_compiler will re-direct to the compiler used to compile CUDA code during installation.
# kokkos_launch_compiler will re-direct if ${CMAKE_CXX_COMPILER} and -DKOKKOS_DEPENDENCE is present,
# otherwise, the original command will be executed
#
# run test to see if CMAKE_CXX_COMPILER=nvcc_wrapper
kokkos_compiler_is_nvcc(IS_NVCC ${CMAKE_CXX_COMPILER})
# if not nvcc_wrapper, use RULE_LAUNCH_COMPILE and RULE_LAUNCH_LINK
IF(NOT IS_NVCC AND NOT CMAKE_CXX_COMPILER_ID STREQUAL Clang AND
(NOT DEFINED Kokkos_LAUNCH_COMPILER OR Kokkos_LAUNCH_COMPILER))
MESSAGE(STATUS "kokkos_launch_compiler is enabled globally. C++ compiler commands with -DKOKKOS_DEPENDENCE will be redirected to nvcc_wrapper")
# if not nvcc_wrapper and Kokkos_LAUNCH_COMPILER was not set to OFF
IF(NOT IS_NVCC AND (NOT DEFINED Kokkos_LAUNCH_COMPILER OR Kokkos_LAUNCH_COMPILER))
MESSAGE(STATUS "kokkos_launch_compiler is enabled globally. C++ compiler commands with -DKOKKOS_DEPENDENCE will be redirected to the appropriate compiler for Kokkos")
kokkos_compilation(GLOBAL)
ENDIF()
UNSET(IS_NVCC) # be mindful of the environment, pollution is bad
# be mindful of the environment, pollution is bad
UNSET(IS_NVCC)
ENDIF()

View File

@ -3,6 +3,7 @@ SET(Kokkos_OPTIONS @KOKKOS_ENABLED_OPTIONS@)
SET(Kokkos_TPLS @KOKKOS_ENABLED_TPLS@)
SET(Kokkos_ARCH @KOKKOS_ENABLED_ARCH_LIST@)
SET(Kokkos_CXX_COMPILER "@CMAKE_CXX_COMPILER@")
SET(Kokkos_CXX_COMPILER_ID "@KOKKOS_CXX_COMPILER_ID@")
# These are needed by KokkosKernels
FOREACH(DEV ${Kokkos_DEVICES})
@ -13,7 +14,7 @@ IF(NOT Kokkos_FIND_QUIETLY)
MESSAGE(STATUS "Enabled Kokkos devices: ${Kokkos_DEVICES}")
ENDIF()
IF (Kokkos_ENABLE_CUDA AND ${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14.0")
IF (Kokkos_ENABLE_CUDA)
# If we are building CUDA, we have tricked CMake because we declare a CXX project
# If the default C++ standard for a given compiler matches the requested
# standard, then CMake just omits the -std flag in later versions of CMake
@ -90,52 +91,6 @@ function(kokkos_check)
endif()
endfunction()
# this function is provided to easily select which files use nvcc_wrapper:
#
# GLOBAL --> all files
# TARGET --> all files in a target
# SOURCE --> specific source files
# DIRECTORY --> all files in directory
# PROJECT --> all files/targets in a project/subproject
#
FUNCTION(kokkos_compilation)
CMAKE_PARSE_ARGUMENTS(COMP "GLOBAL;PROJECT" "" "DIRECTORY;TARGET;SOURCE" ${ARGN})
# search relative first and then absolute
SET(_HINTS "${CMAKE_CURRENT_LIST_DIR}/../.." "@CMAKE_INSTALL_PREFIX@")
# find kokkos_launch_compiler
FIND_PROGRAM(Kokkos_COMPILE_LAUNCHER
NAMES kokkos_launch_compiler
HINTS ${_HINTS}
PATHS ${_HINTS}
PATH_SUFFIXES bin)
IF(NOT Kokkos_COMPILE_LAUNCHER)
MESSAGE(FATAL_ERROR "Kokkos could not find 'kokkos_launch_compiler'. Please set '-DKokkos_COMPILE_LAUNCHER=/path/to/launcher'")
ENDIF()
IF(COMP_GLOBAL)
# if global, don't bother setting others
SET_PROPERTY(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${Kokkos_COMPILE_LAUNCHER} ${CMAKE_CXX_COMPILER}")
SET_PROPERTY(GLOBAL PROPERTY RULE_LAUNCH_LINK "${Kokkos_COMPILE_LAUNCHER} ${CMAKE_CXX_COMPILER}")
ELSE()
FOREACH(_TYPE PROJECT DIRECTORY TARGET SOURCE)
# make project/subproject scoping easy, e.g. KokkosCompilation(PROJECT) after project(...)
IF("${_TYPE}" STREQUAL "PROJECT" AND COMP_${_TYPE})
LIST(APPEND COMP_DIRECTORY ${PROJECT_SOURCE_DIR})
UNSET(COMP_${_TYPE})
ENDIF()
# set the properties if defined
IF(COMP_${_TYPE})
# MESSAGE(STATUS "Using nvcc_wrapper :: ${_TYPE} :: ${COMP_${_TYPE}}")
SET_PROPERTY(${_TYPE} ${COMP_${_TYPE}} PROPERTY RULE_LAUNCH_COMPILE "${Kokkos_COMPILE_LAUNCHER} ${CMAKE_CXX_COMPILER}")
SET_PROPERTY(${_TYPE} ${COMP_${_TYPE}} PROPERTY RULE_LAUNCH_LINK "${Kokkos_COMPILE_LAUNCHER} ${CMAKE_CXX_COMPILER}")
ENDIF()
ENDFOREACH()
ENDIF()
ENDFUNCTION()
# A test to check whether a downstream project set the C++ compiler to NVCC or not
# this is called only when Kokkos was installed with Kokkos_ENABLE_CUDA=ON
FUNCTION(kokkos_compiler_is_nvcc VAR COMPILER)
@ -159,3 +114,161 @@ FUNCTION(kokkos_compiler_is_nvcc VAR COMPILER)
ENDIF()
ENDFUNCTION()
# this function checks whether the current CXX compiler supports building CUDA
FUNCTION(kokkos_cxx_compiler_cuda_test _VAR _COMPILER)
FILE(WRITE ${PROJECT_BINARY_DIR}/compile_tests/compiles_cuda.cu
"
#include <cuda.h>
#include <cstdlib>
__global__
void kernel(int sz, double* data)
{
int _beg = blockIdx.x * blockDim.x + threadIdx.x;
for(int i = _beg; i < sz; ++i)
data[i] += static_cast<double>(i);
}
int main()
{
double* data = NULL;
int blocks = 64;
int grids = 64;
int ret = cudaMalloc(&data, blocks * grids * sizeof(double));
if(ret != cudaSuccess)
return EXIT_FAILURE;
kernel<<<grids, blocks>>>(blocks * grids, data);
cudaDeviceSynchronize();
return EXIT_SUCCESS;
}
")
# save the command for debugging
SET(_COMMANDS "${_COMPILER} ${ARGN} -c ${PROJECT_BINARY_DIR}/compile_tests/compiles_cuda.cu")
# use execute_process instead of try compile because we want to set custom compiler
EXECUTE_PROCESS(COMMAND ${_COMPILER} ${ARGN} -c ${PROJECT_BINARY_DIR}/compile_tests/compiles_cuda.cu
RESULT_VARIABLE _RET
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/compile_tests
TIMEOUT 15
OUTPUT_QUIET
ERROR_QUIET)
IF(NOT _RET EQUAL 0)
# save the command for debugging
SET(_COMMANDS "${_COMMAND}\n${_COMPILER} --cuda-gpu-arch=sm_35 ${ARGN} -c ${PROJECT_BINARY_DIR}/compile_tests/compiles_cuda.cu")
# try the compile test again with clang arguments
EXECUTE_PROCESS(COMMAND ${_COMPILER} --cuda-gpu-arch=sm_35 -c ${PROJECT_BINARY_DIR}/compile_tests/compiles_cuda.cu
RESULT_VARIABLE _RET
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/compile_tests
TIMEOUT 15
OUTPUT_QUIET
ERROR_QUIET)
ENDIF()
SET(${_VAR}_COMMANDS "${_COMMANDS}" PARENT_SCOPE)
SET(${_VAR} ${_RET} PARENT_SCOPE)
ENDFUNCTION()
# this function is provided to easily select which files use the same compiler as Kokkos
# when it was installed (or nvcc_wrapper):
#
# GLOBAL --> all files
# TARGET --> all files in a target
# SOURCE --> specific source files
# DIRECTORY --> all files in directory
# PROJECT --> all files/targets in a project/subproject
#
# Use the COMPILER argument to specify a compiler, if needed. By default, it will
# set the values to ${Kokkos_CXX_COMPILER} unless Kokkos_ENABLE_CUDA=ON and
# Kokkos_CXX_COMPILER_ID is NVIDIA, then it will set it to nvcc_wrapper
#
# Use CHECK_CUDA_COMPILES to run a check when CUDA is enabled
#
FUNCTION(kokkos_compilation)
CMAKE_PARSE_ARGUMENTS(COMP
"GLOBAL;PROJECT;CHECK_CUDA_COMPILES"
"COMPILER"
"DIRECTORY;TARGET;SOURCE;COMMAND_PREFIX"
${ARGN})
# if built w/o CUDA support, we want to basically make this a no-op
SET(_Kokkos_ENABLE_CUDA @Kokkos_ENABLE_CUDA@)
# search relative first and then absolute
SET(_HINTS "${CMAKE_CURRENT_LIST_DIR}/../.." "@CMAKE_INSTALL_PREFIX@")
# find kokkos_launch_compiler
FIND_PROGRAM(Kokkos_COMPILE_LAUNCHER
NAMES kokkos_launch_compiler
HINTS ${_HINTS}
PATHS ${_HINTS}
PATH_SUFFIXES bin)
IF(NOT Kokkos_COMPILE_LAUNCHER)
MESSAGE(FATAL_ERROR "Kokkos could not find 'kokkos_launch_compiler'. Please set '-DKokkos_COMPILE_LAUNCHER=/path/to/launcher'")
ENDIF()
# if COMPILER was not specified, assume Kokkos_CXX_COMPILER
IF(NOT COMP_COMPILER)
SET(COMP_COMPILER ${Kokkos_CXX_COMPILER})
IF(_Kokkos_ENABLE_CUDA AND Kokkos_CXX_COMPILER_ID STREQUAL NVIDIA)
# find nvcc_wrapper
FIND_PROGRAM(Kokkos_NVCC_WRAPPER
NAMES nvcc_wrapper
HINTS ${_HINTS}
PATHS ${_HINTS}
PATH_SUFFIXES bin)
# fatal if we can't nvcc_wrapper
IF(NOT Kokkos_NVCC_WRAPPER)
MESSAGE(FATAL_ERROR "Kokkos could not find nvcc_wrapper. Please set '-DKokkos_NVCC_WRAPPER=/path/to/nvcc_wrapper'")
ENDIF()
SET(COMP_COMPILER ${Kokkos_NVCC_WRAPPER})
ENDIF()
ENDIF()
# check that the original compiler still exists!
IF(NOT EXISTS ${COMP_COMPILER})
MESSAGE(FATAL_ERROR "Kokkos could not find original compiler: '${COMP_COMPILER}'")
ENDIF()
# try to ensure that compiling cuda code works!
IF(_Kokkos_ENABLE_CUDA AND COMP_CHECK_CUDA_COMPILES)
# this may fail if kokkos_compiler launcher was used during install
kokkos_cxx_compiler_cuda_test(_COMPILES_CUDA
${Kokkos_COMPILE_LAUNCHER} ${COMP_COMPILER} ${CMAKE_CXX_COMPILER})
# if above failed, throw an error
IF(NOT _COMPILES_CUDA)
MESSAGE(FATAL_ERROR "kokkos_cxx_compiler_cuda_test failed! Test commands:\n${_COMPILES_CUDA_COMMANDS}")
ENDIF()
ENDIF()
IF(COMP_COMMAND_PREFIX)
SET(_PREFIX "${COMP_COMMAND_PREFIX}")
STRING(REPLACE ";" " " _PREFIX "${COMP_COMMAND_PREFIX}")
SET(Kokkos_COMPILER_LAUNCHER "${_PREFIX} ${Kokkos_COMPILE_LAUNCHER}")
ENDIF()
IF(COMP_GLOBAL)
# if global, don't bother setting others
SET_PROPERTY(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${Kokkos_COMPILE_LAUNCHER} ${COMP_COMPILER} ${CMAKE_CXX_COMPILER}")
SET_PROPERTY(GLOBAL PROPERTY RULE_LAUNCH_LINK "${Kokkos_COMPILE_LAUNCHER} ${COMP_COMPILER} ${CMAKE_CXX_COMPILER}")
ELSE()
FOREACH(_TYPE PROJECT DIRECTORY TARGET SOURCE)
# make project/subproject scoping easy, e.g. KokkosCompilation(PROJECT) after project(...)
IF("${_TYPE}" STREQUAL "PROJECT" AND COMP_${_TYPE})
LIST(APPEND COMP_DIRECTORY ${PROJECT_SOURCE_DIR})
UNSET(COMP_${_TYPE})
ENDIF()
# set the properties if defined
IF(COMP_${_TYPE})
# MESSAGE(STATUS "Using ${COMP_COMPILER} :: ${_TYPE} :: ${COMP_${_TYPE}}")
SET_PROPERTY(${_TYPE} ${COMP_${_TYPE}} PROPERTY RULE_LAUNCH_COMPILE "${Kokkos_COMPILE_LAUNCHER} ${COMP_COMPILER} ${CMAKE_CXX_COMPILER}")
SET_PROPERTY(${_TYPE} ${COMP_${_TYPE}} PROPERTY RULE_LAUNCH_LINK "${Kokkos_COMPILE_LAUNCHER} ${COMP_COMPILER} ${CMAKE_CXX_COMPILER}")
ENDIF()
ENDFOREACH()
ENDIF()
ENDFUNCTION()

Some files were not shown because too many files have changed in this diff Show More