Merge branch 'develop' into new-computes

This commit is contained in:
Axel Kohlmeyer
2023-08-24 09:38:05 -04:00
104 changed files with 3859 additions and 3735 deletions

View File

@ -145,6 +145,7 @@ if(MSVC)
add_compile_options(/Zc:__cplusplus)
add_compile_options(/wd4244)
add_compile_options(/wd4267)
add_compile_options(/wd4250)
add_compile_options(/EHsc)
endif()
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
@ -268,7 +269,6 @@ set(STANDARD_PACKAGES
MOFFF
MOLECULE
MOLFILE
MPIIO
NETCDF
ORIENT
PERI
@ -377,7 +377,6 @@ endif()
# "hard" dependencies between packages resulting
# in an error instead of skipping over files
pkg_depends(ML-IAP ML-SNAP)
pkg_depends(MPIIO MPI)
pkg_depends(ATC MANYBODY)
pkg_depends(LATBOLTZ MPI)
pkg_depends(SCAFACOS MPI)
@ -388,6 +387,7 @@ pkg_depends(CG-DNA MOLECULE)
pkg_depends(CG-DNA ASPHERE)
pkg_depends(ELECTRODE KSPACE)
pkg_depends(EXTRA-MOLECULE MOLECULE)
pkg_depends(MESONT MOLECULE)
# detect if we may enable OpenMP support by default
set(BUILD_OMP_DEFAULT OFF)
@ -579,13 +579,6 @@ foreach(PKG ${STANDARD_PACKAGES})
RegisterPackages(${${PKG}_SOURCES_DIR})
endforeach()
# packages that need defines set
foreach(PKG MPIIO)
if(PKG_${PKG})
target_compile_definitions(lammps PRIVATE -DLMP_${PKG})
endif()
endforeach()
# dedicated check for entire contents of accelerator packages
foreach(PKG ${SUFFIX_PACKAGES})
set(${PKG}_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/${PKG})

View File

@ -28,6 +28,7 @@ if(MSVC)
add_compile_options(/Zc:__cplusplus)
add_compile_options(/wd4244)
add_compile_options(/wd4267)
add_compile_options(/wd4250)
add_compile_options(/EHsc)
endif()
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)

View File

@ -63,7 +63,6 @@ set(ALL_PACKAGES
MOFFF
MOLECULE
MOLFILE
MPIIO
NETCDF
OPENMP
OPT

View File

@ -65,7 +65,6 @@ set(ALL_PACKAGES
MOFFF
MOLECULE
MOLFILE
MPIIO
NETCDF
OPENMP
OPT

View File

@ -83,7 +83,6 @@ endforeach()
# these two packages require a full MPI implementation
if(BUILD_MPI)
set(PKG_MPIIO ON CACHE BOOL "" FORCE)
set(PKG_LATBOLTZ ON CACHE BOOL "" FORCE)
endif()

View File

@ -19,7 +19,6 @@ set(PACKAGES_WITH_LIB
ML-PACE
ML-QUIP
MOLFILE
MPIIO
NETCDF
PLUMED
PYTHON

View File

@ -90,7 +90,7 @@ standard. A more detailed discussion of that is below.
directory, or ``make`` from the ``src/STUBS`` dir. If the build
fails, you may need to edit the ``STUBS/Makefile`` for your
platform. The stubs library does not provide MPI/IO functions
required by some LAMMPS packages, e.g. ``MPIIO`` or ``LATBOLTZ``,
required by some LAMMPS packages, e.g. ``LATBOLTZ``,
and thus is not compatible with those packages.
.. note::
@ -128,14 +128,13 @@ and adds vectorization support when compiled with compatible compilers,
in particular the Intel compilers on top of OpenMP. Also, the ``KOKKOS``
package can be compiled to include OpenMP threading.
In addition, there are a few commands in LAMMPS that have native
OpenMP support included as well. These are commands in the ``MPIIO``,
``ML-SNAP``, ``DIFFRACTION``, and ``DPD-REACT`` packages.
Furthermore, some packages support OpenMP threading indirectly through
the libraries they interface to: e.g. ``KSPACE``, and ``COLVARS``.
See the :doc:`Packages details <Packages_details>` page for more info
on these packages, and the pages for their respective commands for
OpenMP threading info.
In addition, there are a few commands in LAMMPS that have native OpenMP
support included as well. These are commands in the ``ML-SNAP``,
``DIFFRACTION``, and ``DPD-REACT`` packages. Furthermore, some packages
support OpenMP threading indirectly through the libraries they interface
to: e.g. ``KSPACE``, and ``COLVARS``. See the :doc:`Packages details
<Packages_details>` page for more info on these packages, and the pages
for their respective commands for OpenMP threading info.
For CMake, if you use ``BUILD_OMP=yes``, you can use these packages
and turn on their native OpenMP support and turn on their native OpenMP

View File

@ -91,7 +91,7 @@ KOKKOS, o = OPENMP, t = OPT.
* :doc:`ke/atom/eff <compute_ke_atom_eff>`
* :doc:`ke/eff <compute_ke_eff>`
* :doc:`ke/rigid <compute_ke_rigid>`
* :doc:`local/comp/atom (k) <compute_local_comp_atom>`
* :doc:`composition/atom (k) <compute_composition_atom>`
* :doc:`mliap <compute_mliap>`
* :doc:`momentum <compute_momentum>`
* :doc:`msd <compute_msd>`

View File

@ -23,17 +23,14 @@ An alphabetic list of all LAMMPS :doc:`dump <dump>` commands.
* :doc:`atom <dump>`
* :doc:`atom/adios <dump_adios>`
* :doc:`atom/gz <dump>`
* :doc:`atom/mpiio <dump>`
* :doc:`atom/zstd <dump>`
* :doc:`cfg <dump>`
* :doc:`cfg/gz <dump>`
* :doc:`cfg/mpiio <dump>`
* :doc:`cfg/uef <dump_cfg_uef>`
* :doc:`cfg/zstd <dump>`
* :doc:`custom <dump>`
* :doc:`custom/adios <dump_adios>`
* :doc:`custom/gz <dump>`
* :doc:`custom/mpiio <dump>`
* :doc:`custom/zstd <dump>`
* :doc:`dcd <dump>`
* :doc:`grid <dump>`
@ -51,7 +48,6 @@ An alphabetic list of all LAMMPS :doc:`dump <dump>` commands.
* :doc:`xtc <dump>`
* :doc:`xyz <dump>`
* :doc:`xyz/gz <dump>`
* :doc:`xyz/mpiio <dump>`
* :doc:`xyz/zstd <dump>`
* :doc:`yaml <dump>`

View File

@ -85,6 +85,25 @@ The same functionality is available through
:doc:`bond style mesocnt <bond_mesocnt>` and
:doc:`angle style mesocnt <angle_mesocnt>`.
MPIIO package
-------------
.. deprecated:: TBD
The MPIIO package has been removed from LAMMPS since it was unmaintained
for many years and thus not updated to incorporate required changes that
had been applied to the corresponding non-MPIIO commands. As a
consequence the MPIIO commands had become unreliable and sometimes
crashing LAMMPS or corrupting data. Similar functionality is available
through the :ref:`ADIOS package <PKG-ADIOS>` and the :ref:`NETCDF
package <PKG-NETCDF>`. Also, the :doc:`dump_modify nfile or dump_modify
fileper <dump_modify>` keywords may be used for an efficient way of
writing out dump files when running on large numbers of processors.
Similarly, the "nfile" and "fileper" keywords exist for restarts:
see :doc:`restart <restart>`, :doc:`read_restart <read_restart>`,
:doc:`write_restart <write_restart>`.
MSCG package
------------

View File

@ -7148,9 +7148,6 @@ keyword to allow for additional bonds to be formed
*Read_dump xyz fields do not have consistent scaling/wrapping*
Self-explanatory.
*Reading from MPI-IO filename when MPIIO package is not installed*
Self-explanatory.
*Reax_defs.h setting for NATDEF is too small*
Edit the setting in the ReaxFF library and re-compile the
library and re-build LAMMPS.
@ -8489,9 +8486,6 @@ keyword to allow for additional bonds to be formed
The write_restart command cannot be used before a read_data,
read_restart, or create_box command.
*Writing to MPI-IO filename when MPIIO package is not installed*
Self-explanatory.
*Zero length rotation vector with displace_atoms*
Self-explanatory.

View File

@ -65,12 +65,13 @@ Running LAMMPS
^^^^^^^^^^^^^^
From within the LAMMPS GUI main window LAMMPS can be started either from
the ``Run`` menu or by the hotkey `Ctrl-Enter` (`Command-Enter` on
macOS). LAMMPS is running in a separate thread, so the GUI will stay
responsive and thus is capable to interact with the calculation and
access its data. It is important to note, that LAMMPS is using the
contents of the input buffer for the run, **not** the file it was read
from. If there are unsaved changes in the buffer, they *will* be used.
the ``Run`` menu, by the hotkey `Ctrl-Enter` (`Command-Enter` on macOS),
or by clicking on the green button in the status bar. LAMMPS runs in a
separate thread, so the GUI stays responsive and thus it is able to
interact with the calculation and access its data. It is important to
note, that LAMMPS is using the contents of the input buffer for the run,
**not** the file it was read from. If there are unsaved changes in the
buffer, they *will* be used.
.. image:: JPG/lammps-gui-running.png
:align: center
@ -82,48 +83,59 @@ contain the selected number of threads, if thread-parallel acceleration
was selected in the ``Preferences`` dialog. On the right side, a
progress bar is shown that displays the estimated progress on the
current :doc:`run command <run>`. Additionally, two windows will open:
the log window with the captured screen output and the chart window
with a line graph created from the thermodynamic output of the run.
the log window with the captured screen output and the chart window with
a line graph created from the thermodynamic output of the run.
The run can be stopped cleanly by using either the ``Stop LAMMPS`` entry
in the ``Run`` menu or with the hotkey `Ctrl-/` (`Command-/` on macOS).
This will cause that the running LAMMPS process will complete the
current iteration and then stop. This is equivalent to the command
`timer timeout 0 <timer>` and implemented by calling the
:cpp:func:`lammps_force_timeout()` function of the LAMMPS C-library
interface.
in the ``Run`` menu, the hotkey `Ctrl-/` (`Command-/` on macOS), or
clicking on the red button in the status bar. This will cause that the
running LAMMPS process will complete the current iteration and then
stop. This is equivalent to the command :doc:`timer timeout 0 <timer>`
and implemented by calling the :cpp:func:`lammps_force_timeout()`
function of the LAMMPS C-library interface.
Viewing Snapshot Images
^^^^^^^^^^^^^^^^^^^^^^^
By selecting the ``View Image`` entry in the ``Run`` menu or by hitting
the `Ctrl-I` (`Command-I` on macOS) hotkey, LAMMPS gui will issue a
By selecting the ``View Image`` entry in the ``Run`` menu, by hitting
the `Ctrl-I` (`Command-I` on macOS) hotkey or by clicking on the
"palette" button in the status bar, LAMMPS GUI will issue a
:doc:`write_dump image <dump_image>` command and read the resulting
snapshot image into an image viewer window.
snapshot image into an image viewer window. When possible, LAMMPS
GUI will try to detect which elements the atoms correspond to (via
their mass) and then colorize them accordingly. Otherwise just some
predefined sequence of colors are assigned to different atom types.
.. image:: JPG/lammps-gui-image.png
:align: center
:scale: 50%
The image size, some default image quality settings, and some colors
can be changed in the ``Preferences`` dialog window. From the image
viewer window further adjustments can be made: high-quality rendering,
anti-aliasing, display of box or axes, zoom factor. The the image can
be rotated horizontally and vertically and it is possible to only
display the atoms within a predefined group (default is "all").
After each change, the image is rendered again and the display updated.
The default image size, some default image quality settings, the view
style and some colors can be changed in the ``Preferences`` dialog
window. From the image viewer window further adjustments can be made:
actual image size, high-quality rendering, anti-aliasing, view style,
display of box or axes, zoom factor. The the image can be rotated
horizontally and vertically and it is possible to only display the atoms
within a predefined group (default is "all"). After each change, the
image is rendered again and the display updated. The small palette icon
on the top left will be colored while LAMMPS is running to render the
image and it will be grayed out again, when it is done. When there are
many items to show and high quality images with anti-aliasing are
requested, re-rendering can take several seconds. From the ``File``
menu, the shown image can be saved to a file permanently or copied into
the cut-n-paste buffer for pasting into another application.
Editor Functions
^^^^^^^^^^^^^^^^
The editor has the usual functionality that similar programs have: text
selection via mouse or with cursor moves while holding the Shift key,
Cut, Copy, Paste, Undo, Redo. All of these editing functions are available
via hotkeys. When trying to exit the editor with a modified buffer, a
dialog will pop up asking whether to cancel the quit, or don't save or
save the buffer's contents to a file.
The editor has most the usual functionality that similar programs have:
text selection via mouse or with cursor moves while holding the Shift
key, Cut, Copy, Paste, Undo, Redo. All of these editing functions are
available via hotkeys. When trying to exit the editor with a modified
buffer, a dialog will pop up asking whether to cancel the quit, or don't
save or save the buffer's contents to a file.
Context Specific Help
^^^^^^^^^^^^^^^^^^^^^
@ -285,8 +297,15 @@ General Settings:
- *Replace image window on new render:* when checked, an existing
chart window will be replaced when a new snapshot image is requested,
otherwise each command will create a new image window.
- *Path to LAMMPS Shared Library File:* this options is only available
when LAMMPS GUI was compiled to load the LAMMPS library at run time
instead of being linked to it directly. With the ``Browse..`` button
or by changing the text, a different shared library file with a
different compilation of LAMMPS with different settings or from a
different version can be loaded. After this setting was changed,
LAMMPS GUI needs to be re-launched.
- *Select Default Font:* Opens a font selection dialog where the type
and size for the default font (used for everthing but the editor and
and size for the default font (used for everything but the editor and
log) of the application can be set.
- *Select Text Font:* Opens a font selection dialog where the type and
size for the text editor and log font of the application can be set.
@ -294,10 +313,10 @@ General Settings:
Accelerators:
^^^^^^^^^^^^^
This tab enables to select accelerator settings and is equivalent to
using the `-suffix` and `-package` flags on the command line. Only
settings supported by the LAMMPS library and local hardware are
available. The `Number of threads` field allows to set the maximum
This tab enables to select which accelerator package is used and is
equivalent to using the `-suffix` and `-package` flags on the command
line. Only settings supported by the LAMMPS library and local hardware
are available. The `Number of threads` field allows to set the maximum
number of threads for the accelerator packages that use threads.
Snapshot Image:
@ -306,14 +325,16 @@ Snapshot Image:
This tab allows to set some defaults for the snapshot images displayed
in the ``Image Viewer`` window, like its dimensions and the zoom factor
applied. The *Antialias* switch requests to render images with twice
the number of pixels for width and height and then uses a bi-cubic
scaling algorithm to rescale them back to the requested size. This
produces higher quality images with smoother edges at the expense of
requiring more CPU time to render the image. The *HQ Image mode* option
turns on using a screen space ambient occlusion mode (SSAO) when
rendering images. This is also more time consuming, but produces a more
'spatial' representation of the system. Finally there are a couple of
drop down lists to select the background and box color.
the number of pixels for width and height and then smoothly scales the
image back to the requested size. This produces higher quality images
with smoother edges at the expense of requiring more CPU time to render
the image. The *HQ Image mode* option turns on using a screen space
ambient occlusion mode (SSAO) when rendering images. This is also more
time consuming, but produces a more 'spatial' representation of the
system. The *VDW Style* checkbox selects whether atoms are represented
by space filling spheres when checked or by smaller spheres and stick.
Finally there are a couple of drop down lists to select the background
and box color.
Hotkeys

View File

@ -5,7 +5,7 @@ LAMMPS can be downloaded, built, and configured for macOS with `Homebrew
<homebrew_>`_. (Alternatively, see the installation instructions for
:doc:`downloading an executable via Conda <Install_conda>`.) The
following LAMMPS packages are unavailable at this time because of
additional requirements not yet met: GPU, KOKKOS, MSCG, MPIIO, POEMS,
additional requirements not yet met: GPU, KOKKOS, MSCG, POEMS,
VORONOI.
After installing Homebrew, you can install LAMMPS on your system with

View File

@ -18,11 +18,10 @@ needed to run in parallel with MPI.
The LAMMPS binaries contain *all* :doc:`optional packages <Packages>`
included in the source distribution except: ADIOS, H5MD, KIM, ML-PACE,
ML-QUIP, MSCG, NETCDF, PLUMED, QMMM, SCAFACOS, and VTK. The serial
version also does not include the MPIIO and LATBOLTZ packages. The
PYTHON package is only available in the Python installers that bundle a
Python runtime. The GPU package is compiled for OpenCL with mixed
precision kernels.
ML-QUIP, MSCG, NETCDF, QMMM, SCAFACOS, and VTK. The serial version also
does not include the LATBOLTZ package. The PYTHON package is only
available in the Python installers that bundle a Python runtime. The
GPU package is compiled for OpenCL with mixed precision kernels.
The LAMMPS library is compiled as a shared library and the
:doc:`LAMMPS Python module <Python_module>` is installed, so that

BIN
doc/src/JPG/image.both.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

BIN
doc/src/JPG/image.fsaa.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

BIN
doc/src/JPG/image.ssao.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 89 KiB

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

@ -87,7 +87,6 @@ page gives those details.
* :ref:`MOFFF <PKG-MOFFF>`
* :ref:`MOLECULE <PKG-MOLECULE>`
* :ref:`MOLFILE <PKG-MOLFILE>`
* :ref:`MPIIO <PKG-MPIIO>`
* :ref:`NETCDF <PKG-NETCDF>`
* :ref:`OPENMP <PKG-OPENMP>`
* :ref:`OPT <PKG-OPT>`
@ -2033,38 +2032,6 @@ This package has :ref:`specific installation instructions <molfile>` on the :doc
----------
.. _PKG-MPIIO:
MPIIO package
-------------
**Contents:**
Support for parallel output/input of dump and restart files via the
MPIIO library. It adds :doc:`dump styles <dump>` with a "mpiio" in
their style name. Restart files with an ".mpiio" suffix are also
written and read in parallel.
.. warning::
The MPIIO package is currently unmaintained and has become
unreliable. Use with caution.
**Install:**
The MPIIO package requires that LAMMPS is build in :ref:`MPI parallel mode <serial>`.
**Supporting info:**
* src/MPIIO: filenames -> commands
* :doc:`dump <dump>`
* :doc:`restart <restart>`
* :doc:`write_restart <write_restart>`
* :doc:`read_restart <read_restart>`
----------
.. _PKG-NETCDF:
NETCDF package

View File

@ -333,11 +333,6 @@ whether an extra library is needed to build and use the package:
- :doc:`dump molfile <dump_molfile>`
- n/a
- ext
* - :ref:`MPIIO <PKG-MPIIO>`
- MPI parallel I/O dump and restart
- :doc:`dump <dump>`
- n/a
- no
* - :ref:`NETCDF <PKG-NETCDF>`
- dump output via NetCDF
- :doc:`dump netcdf <dump_netcdf>`

View File

@ -245,7 +245,7 @@ The individual style names on the :doc:`Commands compute <Commands_compute>` pag
* :doc:`ke/atom/eff <compute_ke_atom_eff>` - per-atom translational and radial kinetic energy in the electron force field model
* :doc:`ke/eff <compute_ke_eff>` - kinetic energy of a group of nuclei and electrons in the electron force field model
* :doc:`ke/rigid <compute_ke_rigid>` - translational kinetic energy of rigid bodies
* :doc:`local/comp/atom <compute_local_comp_atom>` - local composition for each atom
* :doc:`composition/atom <compute_composition_atom>` - local composition for each atom
* :doc:`mliap <compute_mliap>` - gradients of energy and forces with respect to model parameters and related quantities for training machine learning interatomic potentials
* :doc:`momentum <compute_momentum>` - translational momentum
* :doc:`msd <compute_msd>` - mean-squared displacement of group of atoms

View File

@ -1,20 +1,20 @@
.. index:: compute local/comp/atom
.. index:: compute local/comp/atom/kk
.. index:: compute composition/atom
.. index:: compute composition/atom/kk
compute local/comp/atom command
===============================
compute composition/atom command
================================
Accelerator Variants: *local/comp/atom/kk*
Accelerator Variants: *composition/atom/kk*
Syntax
""""""
.. code-block:: LAMMPS
compute ID group-ID local/comp/atom keyword values ...
compute ID group-ID composition/atom keyword values ...
* ID, group-ID are documented in :doc:`compute <compute>` command
* local/comp/atom = style name of this compute command
* composition/atom = style name of this compute command
* one or more keyword/value pairs may be appended
.. parsed-literal::
@ -27,9 +27,9 @@ Examples
.. code-block:: LAMMPS
compute 1 all local/comp/atom
compute 1 all composition/atom
compute 1 all local/comp/atom cutoff 9.0
compute 1 all composition/atom cutoff 9.0
comm_modify cutoff 9.0

View File

@ -14,10 +14,6 @@
.. index:: dump custom/gz
.. index:: dump local/gz
.. index:: dump xyz/gz
.. index:: dump atom/mpiio
.. index:: dump cfg/mpiio
.. index:: dump custom/mpiio
.. index:: dump xyz/mpiio
.. index:: dump atom/zstd
.. index:: dump cfg/zstd
.. index:: dump custom/zstd
@ -63,7 +59,7 @@ Syntax
* ID = user-assigned name for the dump
* group-ID = ID of the group of atoms to be dumped
* style = *atom* or *atom/adios* or *atom/gz* or *atom/zstd* or *atom/mpiio* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/mpiio* or *cfg/uef* or *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *custom/adios* or *dcd* or *grid* or *grid/vtk* or *h5md* or *image* or *local* or *local/gz* or *local/zstd* or *molfile* or *movie* or *netcdf* or *netcdf/mpiio* or *vtk* or *xtc* or *xyz* or *xyz/gz* or *xyz/zstd* or *xyz/mpiio* or *yaml*
* style = *atom* or *atom/adios* or *atom/gz* or *atom/zstd* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/uef* or *custom* or *custom/gz* or *custom/zstd* or *custom/adios* or *dcd* or *grid* or *grid/vtk* or *h5md* or *image* or *local* or *local/gz* or *local/zstd* or *molfile* or *movie* or *netcdf* or *netcdf/mpiio* or *vtk* or *xtc* or *xyz* or *xyz/gz* or *xyz/zstd* or *yaml*
* N = dump on timesteps which are multiples of N
* file = name of file to write dump info to
* attribute1,attribute2,... = list of attributes for a particular style
@ -74,13 +70,11 @@ Syntax
*atom/adios* attributes = none, discussed on :doc:`dump atom/adios <dump_adios>` page
*atom/gz* attributes = none
*atom/zstd* attributes = none
*atom/mpiio* attributes = none
*cfg* attributes = same as *custom* attributes, see below
*cfg/gz* attributes = same as *custom* attributes, see below
*cfg/zstd* attributes = same as *custom* attributes, see below
*cfg/mpiio* attributes = same as *custom* attributes, see below
*cfg/uef* attributes = same as *custom* attributes, discussed on :doc:`dump cfg/uef <dump_cfg_uef>` page
*custom*, *custom/gz*, *custom/zstd*, *custom/mpiio* attributes = see below
*custom*, *custom/gz*, *custom/zstd* attributes = see below
*custom/adios* attributes = same as *custom* attributes, discussed on :doc:`dump custom/adios <dump_adios>` page
*dcd* attributes = none
*h5md* attributes = discussed on :doc:`dump h5md <dump_h5md>` page
@ -97,10 +91,9 @@ Syntax
*xyz* attributes = none
*xyz/gz* attributes = none
*xyz/zstd* attributes = none
*xyz/mpiio* attributes = none
*yaml* attributes = same as *custom* attributes, see below
* *custom* or *custom/gz* or *custom/zstd* or *custom/mpiio* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/mpiio* or *cfg/uef* or *netcdf* or *netcdf/mpiio* or *yaml* attributes:
* *custom* or *custom/gz* or *custom/zstd* or *cfg* or *cfg/gz* or *cfg/zstd* or *cfg/uef* or *netcdf* or *netcdf/mpiio* or *yaml* attributes:
.. parsed-literal::
@ -179,11 +172,9 @@ Examples
.. code-block:: LAMMPS
dump myDump all atom 100 dump.lammpstrj
dump myDump all atom/mpiio 100 dump.atom.mpiio
dump myDump all atom/gz 100 dump.atom.gz
dump myDump all atom/zstd 100 dump.atom.zst
dump 2 subgroup atom 50 dump.run.bin
dump 2 subgroup atom/mpiio 50 dump.run.mpiio.bin
dump 4a all custom 100 dump.myforce.* id type x y vx fx
dump 4a all custom 100 dump.myvel.lammpsbin id type x y z vx vy vz
dump 4b flow custom 100 dump.%.myforce id type c_myF[3] v_ke
@ -622,27 +613,10 @@ when running on large numbers of processors.
Note that using the "\*" and "%" characters together can produce a
large number of small dump files!
For styles that end with *mpiio* an ".mpiio" must appear somewhere in
the specified filename. These styles write their dump file in
parallel via the MPI-IO library, which is part of the MPI standard for
versions 2.0 and above. Note these styles are identical in command
syntax to the corresponding styles without "mpiio". Likewise, the
dump files produced by these MPI-IO styles are identical in format to
the files produced by their non-MPI-IO style counterparts. This means
you can write a dump file using MPI-IO and use the :doc:`read_dump
<read_dump>` command or perform other post-processing, just as if the
dump file was not written using MPI-IO.
.. deprecated:: TBD
Because MPI-IO dump files are one large file which all processors
write to, you cannot use the "%" wildcard character described above in
the filename. However you can use the ".bin" or ".lammpsbin" suffix
described below. Again, this file will be written in parallel and
have the same binary format as if it were written without MPI-IO.
.. warning::
The MPIIO package within LAMMPS is currently unmaintained and has
become unreliable. Use with caution.
The MPIIO package and the the corresponding "/mpiio" dump styles, except
for the unrelated "netcdf/mpiio" style were removed from LAMMPS.
----------
@ -956,11 +930,6 @@ the COMPRESS package. They are only enabled if LAMMPS was built with
that package. See the :doc:`Build package <Build_package>` page for
more info.
The *atom/mpiio*, *cfg/mpiio*, *custom/mpiio*, and *xyz/mpiio* styles
are part of the MPIIO package. They are only enabled if LAMMPS was
built with that package. See the :doc:`Build package <Build_package>`
page for more info.
The *xtc*, *dcd*, and *yaml* styles are part of the EXTRA-DUMP package.
They are only enabled if LAMMPS was built with that package. See the
:doc:`Build package <Build_package>` page for more info.
@ -971,6 +940,7 @@ Related commands
:doc:`dump atom/adios <dump_adios>`, :doc:`dump custom/adios <dump_adios>`,
:doc:`dump cfg/uef <dump_cfg_uef>`, :doc:`dump h5md <dump_h5md>`,
:doc:`dump image <dump_image>`, :doc:`dump molfile <dump_molfile>`,
:doc:`dump netcdf <dump_netcdf>`, :doc:`dump netcdf/mpiio <dump_netcdf>`,
:doc:`dump_modify <dump_modify>`, :doc:`undump <undump>`,
:doc:`write_dump <write_dump>`

View File

@ -24,7 +24,7 @@ Syntax
* color = atom attribute that determines color of each atom
* diameter = atom attribute that determines size of each atom
* zero or more keyword/value pairs may be appended
* keyword = *atom* or *adiam* or *bond* or *grid* or *line* or *tri* or *body* or *fix* or *size* or *view* or *center* or *up* or *zoom* or *box* or *axes* or *subbox* or *shiny* or *ssao*
* keyword = *atom* or *adiam* or *bond* or *grid* or *line* or *tri* or *body* or *fix* or *size* or *view* or *center* or *up* or *zoom* or *box* or *axes* or *subbox* or *shiny* or *fsaa* or *ssao*
.. parsed-literal::
@ -85,6 +85,8 @@ Syntax
diam = diameter of subdomain lines as fraction of shortest box length
*shiny* value = sfactor = shinyness of spheres and cylinders
sfactor = shinyness of spheres and cylinders from 0.0 to 1.0
*fsaa* arg = yes/no
yes/no = do or do not apply anti-aliasing
*ssao* value = shading seed dfactor = SSAO depth shading
shading = *yes* or *no* = turn depth shading on/off
seed = random # seed (positive integer)
@ -597,13 +599,47 @@ image will appear. The *sfactor* value must be a value 0.0 <=
*sfactor* <= 1.0, where *sfactor* = 1 is a highly reflective surface
and *sfactor* = 0 is a rough non-shiny surface.
The *ssao* keyword turns on/off a screen space ambient occlusion
(SSAO) model for depth shading. If *yes* is set, then atoms further
away from the viewer are darkened via a randomized process, which is
perceived as depth. The calculation of this effect can increase the
cost of computing the image by roughly 2x. The strength of the effect
can be scaled by the *dfactor* parameter. If *no* is set, no depth
shading is performed.
.. versionadded:: TBD
The *fsaa* keyword can be used with the dump image command to improve
the image quality by enabling full scene anti-aliasing. Internally the
image is rendered at twice the width and height and then scaled down by
computing the average of each 2x2 block of pixels to produce a single
pixel in the final image at the original size. This produces images with
smoother, less ragged edges. The application of this algorithm can
increase the cost of computing the image by about 3x or more.
The *ssao* keyword turns on/off a screen space ambient occlusion (SSAO)
model for depth shading. If *yes* is set, then atoms further away from
the viewer are darkened via a randomized process, which is perceived as
depth. The strength of the effect can be scaled by the *dfactor*
parameter. If *no* is set, no depth shading is performed. The
calculation of this effect can increase the cost of computing the image
substantially by 5x or more, especially with larger images. When used
in combination with the *fsaa* keyword the computational cost of depth
shading is particularly large.
----------
Image Quality Settings
""""""""""""""""""""""
The two keywords *fsaa* and *ssao* can be used to improve the image
quality at the expense of additional computational cost to render the
images. The images below show from left to right the same render with
default settings, with *fsaa* added, with *ssao* added, and with both
keywords added.
.. |imagequality1| image:: JPG/image.default.png
:width: 24%
.. |imagequality2| image:: JPG/image.fsaa.png
:width: 24%
.. |imagequality3| image:: JPG/image.ssao.png
:width: 24%
.. |imagequality4| image:: JPG/image.both.png
:width: 24%
|imagequality1| |imagequality2| |imagequality3| |imagequality4|
----------
@ -1051,6 +1087,7 @@ The defaults for the dump_modify keywords specific to dump image and dump movie
* boxcolor = yellow
* color = 140 color names are pre-defined as listed below
* framerate = 24
* fsaa = no
* gmap = min max cf 0.0 2 min blue max red
----------

View File

@ -124,17 +124,6 @@ Description
Modify the parameters of a previously defined dump command. Not all
parameters are relevant to all dump styles.
As explained on the :doc:`dump <dump>` doc page, the *atom/mpiio*,
*custom/mpiio*, and *xyz/mpiio* dump styles are identical in command
syntax and in the format of the dump files they create, to the
corresponding styles without "mpiio", except the single dump file they
produce is written in parallel via the MPI-IO library. Thus if a
dump_modify option below is valid for the *atom* style, it is also
valid for the *atom/mpiio* style, and similarly for the other styles
which allow for use of MPI-IO.
----------
Unless otherwise noted, the following keywords apply to all the
various dump styles, including the :doc:`dump image <dump_image>` and
:doc:`dump movie <dump_image>` styles.
@ -181,19 +170,20 @@ extra buffering.
.. versionadded:: 4May2022
The *colname* keyword can be used to change the default header keyword
for dump styles: *atom*, *custom*, *cfg*, and *local* and their compressed,
ADIOS, and MPIIO variants. The setting for *ID string* replaces the default
text with the provided string. *ID* can be a positive integer when it
represents the column number counting from the left, a negative integer
when it represents the column number from the right (i.e. -1 is the last
column/keyword), or a custom dump keyword (or compute, fix, property, or
variable reference) and then it replaces the string for that specific
keyword. For *atom* dump styles only the keywords "id", "type", "x",
"y", "z", "ix", "iy", "iz" can be accessed via string regardless of
whether scaled or unwrapped coordinates were enabled or disabled, and
it always assumes 8 columns for indexing regardless of whether image
flags are enabled or not. For dump style *cfg* only changes to the
"auxiliary" keywords (6th or later keyword) will become visible.
for dump styles: *atom*, *custom*, *cfg*, and *local* and their
compressed, ADIOS variants. The setting for *ID string* replaces the
default text with the provided string. *ID* can be a positive integer
when it represents the column number counting from the left, a negative
integer when it represents the column number from the right (i.e. -1 is
the last column/keyword), or a custom dump keyword (or compute, fix,
property, or variable reference) and then it replaces the string for
that specific keyword. For *atom* dump styles only the keywords "id",
"type", "x", "y", "z", "ix", "iy", "iz" can be accessed via string
regardless of whether scaled or unwrapped coordinates were enabled or
disabled, and it always assumes 8 columns for indexing regardless of
whether image flags are enabled or not. For dump style *cfg* only
changes to the "auxiliary" keywords (6th or later keyword) will become
visible.
The *colname* keyword can be used multiple times. If multiple *colname*
settings refer to the same keyword, the last setting has precedence. A
@ -417,10 +407,10 @@ performed with dump style *xtc*\ .
----------
The *format* keyword can be used to change the default numeric format output
by the text-based dump styles: *atom*, *local*, *custom*, *cfg*, and
*xyz* styles, and their MPIIO variants. Only the *line* or *none*
options can be used with the *atom* and *xyz* styles.
The *format* keyword can be used to change the default numeric format
output by the text-based dump styles: *atom*, *local*, *custom*, *cfg*,
and *xyz* styles. Only the *line* or *none* options can be used with the
*atom* and *xyz* styles.
All the specified format strings are C-style formats, such as used by
the C/C++ printf() command. The *line* keyword takes a single

View File

@ -19,7 +19,6 @@ Examples
read_restart save.10000
read_restart restart.*
read_restart restart.*.mpiio
Description
"""""""""""
@ -120,22 +119,6 @@ different the number of processors in the current LAMMPS simulation.
This can be a fast mode of input on parallel machines that support
parallel I/O.
A restart file can also be read in parallel as one large binary file
via the MPI-IO library, assuming it was also written with MPI-IO.
MPI-IO is part of the MPI standard for versions 2.0 and above. Using
MPI-IO requires two steps. First, build LAMMPS with its MPIIO package
installed, e.g.
.. code-block:: bash
make yes-mpiio # installs the MPIIO package
make mpi # build LAMMPS for your platform
Second, use a restart filename which contains ".mpiio". Note that it
does not have to end in ".mpiio", just contain those characters.
Unlike MPI-IO dump files, a particular restart file must be both
written and read using MPI-IO.
----------
Here is the list of information included in a restart file, which
@ -268,8 +251,7 @@ information about these bonds is written to the restart file.
Restrictions
""""""""""""
To write and read restart files in parallel with MPI-IO, the MPIIO
package must be installed.
none
Related commands
""""""""""""""""

View File

@ -33,7 +33,6 @@ Examples
restart 0
restart 1000 poly.restart
restart 1000 poly.restart.mpiio
restart 1000 restart.*.equil
restart 10000 poly.%.1 poly.%.2 nfile 10
restart v_mystep poly.restart
@ -81,21 +80,6 @@ of output and subsequent input on parallel machines that support
parallel I/O. The optional *fileper* and *nfile* keywords discussed
below can alter the number of files written.
The restart file can also be written in parallel as one large binary
file via the MPI-IO library, which is part of the MPI standard for
versions 2.0 and above. Using MPI-IO requires two steps. First,
build LAMMPS with its MPIIO package installed, e.g.
.. code-block:: bash
make yes-mpiio # installs the MPIIO package
make mpi # build LAMMPS for your platform
Second, use a restart filename which contains ".mpiio". Note that it
does not have to end in ".mpiio", just contain those characters.
Unlike MPI-IO dump files, a particular restart file must be both
written and read using MPI-IO.
Restart files are written on timesteps that are a multiple of N but
not on the first timestep of a run or minimization. You can use the
:doc:`write_restart <write_restart>` command to write a restart file
@ -104,15 +88,17 @@ timestep of a run unless it is a multiple of N. A restart file is
written on the last timestep of a minimization if N > 0 and the
minimization converges.
Instead of a numeric value, N can be specified as an :doc:`equal-style variable <variable>`, which should be specified as v_name, where
name is the variable name. In this case, the variable is evaluated at
the beginning of a run to determine the next timestep at which a
restart file will be written out. On that timestep, the variable will
be evaluated again to determine the next timestep, etc. Thus the
variable should return timestep values. See the stagger() and
logfreq() and stride() math functions for :doc:`equal-style variables <variable>`, as examples of useful functions to use in
this context. Other similar math functions could easily be added as
options for :doc:`equal-style variables <variable>`.
Instead of a numeric value, N can be specified as an :doc:`equal-style
variable <variable>`, which should be specified as v_name, where name is
the variable name. In this case, the variable is evaluated at the
beginning of a run to determine the next timestep at which a restart
file will be written out. On that timestep, the variable will be
evaluated again to determine the next timestep, etc. Thus the variable
should return timestep values. See the stagger() and logfreq() and
stride() math functions for :doc:`equal-style variables <variable>`, as
examples of useful functions to use in this context. Other similar math
functions could easily be added as options for :doc:`equal-style
variables <variable>`.
For example, the following commands will write restart files
every step from 1100 to 1200, and could be useful for debugging
@ -170,8 +156,7 @@ next 3 processors and write it to a restart file.
Restrictions
""""""""""""
To write and read restart files in parallel with MPI-IO, the MPIIO
package must be installed.
none
Related commands
""""""""""""""""

View File

@ -27,7 +27,6 @@ Examples
.. code-block:: LAMMPS
write_restart restart.equil
write_restart restart.equil.mpiio
write_restart poly.%.* nfile 10
Description
@ -53,32 +52,6 @@ output and subsequent input on parallel machines that support parallel
I/O. The optional *fileper* and *nfile* keywords discussed below can
alter the number of files written.
The restart file can also be written in parallel as one large binary
file via the MPI-IO library, which is part of the MPI standard for
versions 2.0 and above. Using MPI-IO requires two steps. First,
build LAMMPS with its MPIIO package installed, e.g.
.. tabs::
.. tab:: CMake build
.. code-block:: bash
cmake . -DPKG_MPIIO=on # enables the MPIIO package in the build folder
cmake --build . # recompiles LAMMPS with the package code included
.. tab:: Traditional make
.. code-block:: bash
make yes-mpiio # installs the MPIIO package
make mpi # build LAMMPS for your platform
Second, use a restart filename which contains ".mpiio". Note that it
does not have to end in ".mpiio", just contain those characters.
Unlike MPI-IO dump files, a particular restart file must be both
written and read using MPI-IO.
Restart files can be read by a :doc:`read_restart <read_restart>`
command to restart a simulation from a particular state. Because the
file is binary (to enable exact restarts), it may not be readable on
@ -128,9 +101,6 @@ before the restart file is written. This means that your system must
be ready to perform a simulation before using this command (force
fields setup, atom masses initialized, etc).
To write and read restart files in parallel with MPI-IO, the MPIIO
package must be installed.
Related commands
""""""""""""""""

View File

@ -1196,6 +1196,7 @@ Freitas
Frenkel
Friedrichs
fs
fsaa
fsh
fstyle
fsw

View File

@ -39,7 +39,7 @@ fix INT all nve
###############################################################################
# OUTPUT
###############################################################################
dump 1 all atom 1 dump.hdnnp
# dump 1 all atom 1 dump.hdnnp
###############################################################################
# SIMULATION

View File

@ -0,0 +1,53 @@
###############################################################################
# MD simulation for HDNNP water
###############################################################################
###############################################################################
# VARIABLES
###############################################################################
clear
# Configuration files
variable cfgFile string "data.H2O-360mol"
# Timesteps
variable numSteps equal 10
variable dt equal 0.0005
# HDNNP
variable hdnnpCutoff equal 6.36
variable hdnnpDir string "hdnnp-data"
###############################################################################
# GENERAL SETUP
###############################################################################
units metal
boundary p p p
atom_style atomic
region box block 0.0 2.2695686722465727E+01 0.0 2.3586033624598713E+01 0.0 2.2237130028217017E+01
create_box 3 box
mass 1 1.0
read_data ${cfgFile} add append offset 1 0 0 0 0
timestep ${dt}
thermo 1
###############################################################################
# HDNNP
###############################################################################
pair_style hybrid lj/cut 6.0 hdnnp ${hdnnpCutoff} dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_coeff * * hdnnp NULL H O
pair_coeff 1 * lj/cut 0.1 3.0
###############################################################################
# INTEGRATOR
###############################################################################
fix INT all nve
###############################################################################
# OUTPUT
###############################################################################
#dump 1 all atom 1 dump.hdnnp
###############################################################################
# SIMULATION
###############################################################################
run ${numSteps}

View File

@ -0,0 +1,667 @@
LAMMPS (2 Aug 2023 - Development - patch_2Aug2023-264-g174825fe8c)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
###############################################################################
# MD simulation for HDNNP water
###############################################################################
###############################################################################
# VARIABLES
###############################################################################
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# Configuration files
variable cfgFile string "data.H2O-360mol"
# Timesteps
variable numSteps equal 10
variable dt equal 0.0005
# HDNNP
variable hdnnpCutoff equal 6.36
variable hdnnpDir string "hdnnp-data"
###############################################################################
# GENERAL SETUP
###############################################################################
units metal
boundary p p p
atom_style atomic
read_data ${cfgFile}
read_data data.H2O-360mol
Reading data file ...
orthogonal box = (0 0 0) to (22.695687 23.586034 22.23713)
1 by 1 by 1 MPI processor grid
reading atoms ...
1080 atoms
read_data CPU = 0.004 seconds
timestep ${dt}
timestep 0.0005
thermo 1
###############################################################################
# HDNNP
###############################################################################
pair_style hdnnp ${hdnnpCutoff} dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_style hdnnp 6.36 dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_style hdnnp 6.36 dir hdnnp-data showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_coeff * * H O
###############################################################################
# INTEGRATOR
###############################################################################
fix INT all nve
###############################################################################
# OUTPUT
###############################################################################
# dump 1 all atom 1 dump.hdnnp
###############################################################################
# SIMULATION
###############################################################################
run ${numSteps}
run 10
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
Your simulation uses code contributions which should be cited:
- ML-HDNNP package: doi:10.1021/acs.jctc.8b00770
@Article{Singraber19,
author = {Singraber, Andreas and Behler, J{"o}rg and Dellago, Christoph},
title = {Library-Based {LAMMPS} Implementation of High-Dimensional
Neural Network Potentials},
year = {2019},
month = mar,
volume = {15},
pages = {1827--1840},
doi = {10.1021/acs.jctc.8b00770},
journal = {J.~Chem.\ Theory Comput.},
number = {3}
}
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
*******************************************************************************
WELCOME TO n²p², A SOFTWARE PACKAGE FOR NEURAL NETWORK POTENTIALS!
------------------------------------------------------------------
n²p² version (from git): patch_2Aug2023-264-g174825fe8c
(version.h): v2.2.0
------------------------------------------------------------
Git branch : collected-small-changes
Git revision : 174825fe8c9493cb252d7b9e8dafdcc5d74be96d
Compile date/time : Aug 23 2023 08:43:11
------------------------------------------------------------
Features/Flags:
------------------------------------------------------------
Symmetry function groups : enabled
Symmetry function cache : enabled
Timing function available : available
Asymmetric polynomial SFs : available
SF low neighbor number check : enabled
SF derivative memory layout : reduced
MPI explicitly disabled : no
------------------------------------------------------------
Please cite the following papers when publishing results obtained with n²p²:
-------------------------------------------------------------------------------
* General citation for n²p² and the LAMMPS interface:
Singraber, A.; Behler, J.; Dellago, C.
Library-Based LAMMPS Implementation of High-Dimensional
Neural Network Potentials.
J. Chem. Theory Comput. 2019 15 (3), 18271840.
https://doi.org/10.1021/acs.jctc.8b00770
-------------------------------------------------------------------------------
* Additionally, if you use the NNP training features of n²p²:
Singraber, A.; Morawietz, T.; Behler, J.; Dellago, C.
Parallel Multistream Training of High-Dimensional Neural
Network Potentials.
J. Chem. Theory Comput. 2019, 15 (5), 30753092.
https://doi.org/10.1021/acs.jctc.8b01092
-------------------------------------------------------------------------------
* Additionally, if polynomial symmetry functions are used:
Bircher, M. P.; Singraber, A.; Dellago, C.
Improved Description of Atomic Environments Using Low-Cost
Polynomial Functions with Compact Support.
arXiv:2010.14414 [cond-mat, physics:physics] 2020.
https://arxiv.org/abs/2010.14414
*******************************************************************************
*** SETUP: SETTINGS FILE ******************************************************
Settings file name: hdnnp-data/input.nn
Read 120 lines.
Found 70 lines with keywords.
This settings file defines a short-range only NNP.
*******************************************************************************
*** SETUP: NORMALIZATION ******************************************************
Data set normalization is used.
Mean energy per atom : -2.5521343547039809E+01
Conversion factor energy : 2.4265748255366972E+02
Conversion factor length : 5.8038448995319847E+00
*******************************************************************************
*** SETUP: ELEMENT MAP ********************************************************
Number of element strings found: 2
Element 0: H ( 1)
Element 1: O ( 8)
*******************************************************************************
*** SETUP: ELEMENTS ***********************************************************
Number of elements is consistent: 2
Atomic energy offsets per element:
Element 0: 0.00000000E+00
Element 1: 0.00000000E+00
Energy offsets are automatically subtracted from reference energies.
*******************************************************************************
*** SETUP: CUTOFF FUNCTIONS ***************************************************
Parameter alpha for inner cutoff: 0.000000
Inner cutoff = Symmetry function cutoff * alpha
Equal cutoff function type for all symmetry functions:
CutoffFunction::CT_TANHU (2)
f(r) = tanh^3(1 - r/rc)
*******************************************************************************
*** SETUP: SYMMETRY FUNCTIONS *************************************************
Abbreviations:
--------------
ind .... Symmetry function index.
ec ..... Central atom element.
tp ..... Symmetry function type.
sbtp ... Symmetry function subtype (e.g. cutoff type).
e1 ..... Neighbor 1 element.
e2 ..... Neighbor 2 element.
eta .... Gaussian width eta.
rs/rl... Shift distance of Gaussian or left cutoff radius for polynomial.
angl.... Left cutoff angle for polynomial.
angr.... Right cutoff angle for polynomial.
la ..... Angle prefactor lambda.
zeta ... Angle term exponent zeta.
rc ..... Cutoff radius / right cutoff radius for polynomial.
a ...... Free parameter alpha (e.g. cutoff alpha).
ln ..... Line number in settings file.
Short range atomic symmetry functions element H :
-------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln
-------------------------------------------------------------------------------------------------
1 H 2 ct2 H 1.000E-03 0.000E+00 1.200E+01 0.00 51
2 H 2 ct2 O 1.000E-03 0.000E+00 1.200E+01 0.00 61
3 H 2 ct2 H 1.000E-02 0.000E+00 1.200E+01 0.00 52
4 H 2 ct2 O 1.000E-02 0.000E+00 1.200E+01 0.00 62
5 H 2 ct2 H 3.000E-02 0.000E+00 1.200E+01 0.00 53
6 H 2 ct2 O 3.000E-02 0.000E+00 1.200E+01 0.00 63
7 H 2 ct2 H 6.000E-02 0.000E+00 1.200E+01 0.00 54
8 H 2 ct2 O 6.000E-02 0.000E+00 1.200E+01 0.00 64
9 H 2 ct2 O 1.500E-01 9.000E-01 1.200E+01 0.00 65
10 H 2 ct2 H 1.500E-01 1.900E+00 1.200E+01 0.00 55
11 H 2 ct2 O 3.000E-01 9.000E-01 1.200E+01 0.00 66
12 H 2 ct2 H 3.000E-01 1.900E+00 1.200E+01 0.00 56
13 H 2 ct2 O 6.000E-01 9.000E-01 1.200E+01 0.00 67
14 H 2 ct2 H 6.000E-01 1.900E+00 1.200E+01 0.00 57
15 H 2 ct2 O 1.500E+00 9.000E-01 1.200E+01 0.00 68
16 H 2 ct2 H 1.500E+00 1.900E+00 1.200E+01 0.00 58
17 H 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 115
18 H 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 114
19 H 3 ct2 H O 1.000E-02 0.000E+00 1.200E+01 -1 4.0 0.00 105
20 H 3 ct2 H O 1.000E-02 0.000E+00 1.200E+01 1 4.0 0.00 103
21 H 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 100
22 H 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 113
23 H 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 98
24 H 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 112
25 H 3 ct2 H O 7.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 95
26 H 3 ct2 H O 7.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 93
27 H 3 ct2 H O 2.000E-01 0.000E+00 1.200E+01 1 1.0 0.00 90
-------------------------------------------------------------------------------------------------
Short range atomic symmetry functions element O :
-------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln
-------------------------------------------------------------------------------------------------
1 O 2 ct2 H 1.000E-03 0.000E+00 1.200E+01 0.00 70
2 O 2 ct2 O 1.000E-03 0.000E+00 1.200E+01 0.00 80
3 O 2 ct2 H 1.000E-02 0.000E+00 1.200E+01 0.00 71
4 O 2 ct2 O 1.000E-02 0.000E+00 1.200E+01 0.00 81
5 O 2 ct2 H 3.000E-02 0.000E+00 1.200E+01 0.00 72
6 O 2 ct2 O 3.000E-02 0.000E+00 1.200E+01 0.00 82
7 O 2 ct2 H 6.000E-02 0.000E+00 1.200E+01 0.00 73
8 O 2 ct2 O 6.000E-02 0.000E+00 1.200E+01 0.00 83
9 O 2 ct2 H 1.500E-01 9.000E-01 1.200E+01 0.00 74
10 O 2 ct2 O 1.500E-01 4.000E+00 1.200E+01 0.00 84
11 O 2 ct2 H 3.000E-01 9.000E-01 1.200E+01 0.00 75
12 O 2 ct2 O 3.000E-01 4.000E+00 1.200E+01 0.00 85
13 O 2 ct2 H 6.000E-01 9.000E-01 1.200E+01 0.00 76
14 O 2 ct2 O 6.000E-01 4.000E+00 1.200E+01 0.00 86
15 O 2 ct2 H 1.500E+00 9.000E-01 1.200E+01 0.00 77
16 O 2 ct2 O 1.500E+00 4.000E+00 1.200E+01 0.00 87
17 O 3 ct2 H O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 110
18 O 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 120
19 O 3 ct2 H O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 109
20 O 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 119
21 O 3 ct2 H H 1.000E-02 0.000E+00 1.200E+01 -1 4.0 0.00 104
22 O 3 ct2 H H 1.000E-02 0.000E+00 1.200E+01 1 4.0 0.00 102
23 O 3 ct2 H H 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 99
24 O 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 108
25 O 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 118
26 O 3 ct2 H H 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 97
27 O 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 107
28 O 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 117
29 O 3 ct2 H H 7.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 94
30 O 3 ct2 H H 7.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 92
-------------------------------------------------------------------------------------------------
Minimum cutoff radius for element H: 12.000000
Minimum cutoff radius for element O: 12.000000
Maximum cutoff radius (global) : 12.000000
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION MEMORY *******************************************
Symmetry function derivatives memory table for element H :
-------------------------------------------------------------------------------
Relevant symmetry functions for neighbors with element:
- H: 15 of 27 ( 55.6 %)
- O: 19 of 27 ( 70.4 %)
-------------------------------------------------------------------------------
Symmetry function derivatives memory table for element O :
-------------------------------------------------------------------------------
Relevant symmetry functions for neighbors with element:
- H: 18 of 30 ( 60.0 %)
- O: 16 of 30 ( 53.3 %)
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION CACHE ********************************************
Element H: in total 4 caches, used 17.00 times on average.
Element O: in total 4 caches, used 17.00 times on average.
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION GROUPS *******************************************
Abbreviations:
--------------
ind .... Symmetry function index.
ec ..... Central atom element.
tp ..... Symmetry function type.
sbtp ... Symmetry function subtype (e.g. cutoff type).
e1 ..... Neighbor 1 element.
e2 ..... Neighbor 2 element.
eta .... Gaussian width eta.
rs/rl... Shift distance of Gaussian or left cutoff radius for polynomial.
angl.... Left cutoff angle for polynomial.
angr.... Right cutoff angle for polynomial.
la ..... Angle prefactor lambda.
zeta ... Angle term exponent zeta.
rc ..... Cutoff radius / right cutoff radius for polynomial.
a ...... Free parameter alpha (e.g. cutoff alpha).
ln ..... Line number in settings file.
mi ..... Member index.
sfi .... Symmetry function index.
e ...... Recalculate exponential term.
Short range atomic symmetry function groups element H :
----------------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln mi sfi e
----------------------------------------------------------------------------------------------------------
1 H 2 ct2 H * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 51 1 1
- - - - - 1.000E-02 0.000E+00 - - 52 2 3
- - - - - 3.000E-02 0.000E+00 - - 53 3 5
- - - - - 6.000E-02 0.000E+00 - - 54 4 7
- - - - - 1.500E-01 1.900E+00 - - 55 5 10
- - - - - 3.000E-01 1.900E+00 - - 56 6 12
- - - - - 6.000E-01 1.900E+00 - - 57 7 14
- - - - - 1.500E+00 1.900E+00 - - 58 8 16
2 H 2 ct2 O * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 61 1 2
- - - - - 1.000E-02 0.000E+00 - - 62 2 4
- - - - - 3.000E-02 0.000E+00 - - 63 3 6
- - - - - 6.000E-02 0.000E+00 - - 64 4 8
- - - - - 1.500E-01 9.000E-01 - - 65 5 9
- - - - - 3.000E-01 9.000E-01 - - 66 6 11
- - - - - 6.000E-01 9.000E-01 - - 67 7 13
- - - - - 1.500E+00 9.000E-01 - - 68 8 15
3 H 3 ct2 H O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-02 0.000E+00 - -1 4.0 - 105 1 19 1
- - - - - - 1.000E-02 0.000E+00 - 1 4.0 - 103 2 20 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 100 3 21 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 98 4 23 0
- - - - - - 7.000E-02 0.000E+00 - -1 1.0 - 95 5 25 1
- - - - - - 7.000E-02 0.000E+00 - 1 1.0 - 93 6 26 0
- - - - - - 2.000E-01 0.000E+00 - 1 1.0 - 90 7 27 1
4 H 3 ct2 O O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 115 1 17 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 114 2 18 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 113 3 22 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 112 4 24 0
----------------------------------------------------------------------------------------------------------
Short range atomic symmetry function groups element O :
----------------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln mi sfi e
----------------------------------------------------------------------------------------------------------
1 O 2 ct2 H * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 70 1 1
- - - - - 1.000E-02 0.000E+00 - - 71 2 3
- - - - - 3.000E-02 0.000E+00 - - 72 3 5
- - - - - 6.000E-02 0.000E+00 - - 73 4 7
- - - - - 1.500E-01 9.000E-01 - - 74 5 9
- - - - - 3.000E-01 9.000E-01 - - 75 6 11
- - - - - 6.000E-01 9.000E-01 - - 76 7 13
- - - - - 1.500E+00 9.000E-01 - - 77 8 15
2 O 2 ct2 O * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 80 1 2
- - - - - 1.000E-02 0.000E+00 - - 81 2 4
- - - - - 3.000E-02 0.000E+00 - - 82 3 6
- - - - - 6.000E-02 0.000E+00 - - 83 4 8
- - - - - 1.500E-01 4.000E+00 - - 84 5 10
- - - - - 3.000E-01 4.000E+00 - - 85 6 12
- - - - - 6.000E-01 4.000E+00 - - 86 7 14
- - - - - 1.500E+00 4.000E+00 - - 87 8 16
3 O 3 ct2 H H * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-02 0.000E+00 - -1 4.0 - 104 1 21 1
- - - - - - 1.000E-02 0.000E+00 - 1 4.0 - 102 2 22 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 99 3 23 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 97 4 26 0
- - - - - - 7.000E-02 0.000E+00 - -1 1.0 - 94 5 29 1
- - - - - - 7.000E-02 0.000E+00 - 1 1.0 - 92 6 30 0
4 O 3 ct2 H O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 110 1 17 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 109 2 19 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 108 3 24 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 107 4 27 0
5 O 3 ct2 O O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 120 1 18 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 119 2 20 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 118 3 25 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 117 4 28 0
----------------------------------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: NEURAL NETWORKS ****************************************************
Normalize neurons (all elements): 0
-------------------------------------------------------------------------------
Atomic short range NN for element H :
Number of weights : 1325
Number of biases : 51
Number of connections: 1376
Architecture 27 25 25 1
-------------------------------------------------------------------------------
1 G t t l
2 G t t
3 G t t
4 G t t
5 G t t
6 G t t
7 G t t
8 G t t
9 G t t
10 G t t
11 G t t
12 G t t
13 G t t
14 G t t
15 G t t
16 G t t
17 G t t
18 G t t
19 G t t
20 G t t
21 G t t
22 G t t
23 G t t
24 G t t
25 G t t
26 G
27 G
-------------------------------------------------------------------------------
Atomic short range NN for element O :
Number of weights : 1400
Number of biases : 51
Number of connections: 1451
Architecture 30 25 25 1
-------------------------------------------------------------------------------
1 G t t l
2 G t t
3 G t t
4 G t t
5 G t t
6 G t t
7 G t t
8 G t t
9 G t t
10 G t t
11 G t t
12 G t t
13 G t t
14 G t t
15 G t t
16 G t t
17 G t t
18 G t t
19 G t t
20 G t t
21 G t t
22 G t t
23 G t t
24 G t t
25 G t t
26 G
27 G
28 G
29 G
30 G
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION SCALING ******************************************
Equal scaling type for all symmetry functions:
Scaling type::ST_SCALECENTER (3)
Gs = Smin + (Smax - Smin) * (G - Gmean) / (Gmax - Gmin)
Smin = 0.000000
Smax = 1.000000
Symmetry function scaling statistics from file: hdnnp-data/scaling.data
-------------------------------------------------------------------------------
Abbreviations:
--------------
ind ..... Symmetry function index.
min ..... Minimum symmetry function value.
max ..... Maximum symmetry function value.
mean .... Mean symmetry function value.
sigma ... Standard deviation of symmetry function values.
sf ...... Scaling factor for derivatives.
Smin .... Desired minimum scaled symmetry function value.
Smax .... Desired maximum scaled symmetry function value.
t ....... Scaling type.
Scaling data for symmetry functions element H :
-------------------------------------------------------------------------------
ind min max mean sigma sf Smin Smax t
-------------------------------------------------------------------------------
1 1.09E+00 9.62E+00 2.27E+00 6.79E-01 1.17E-01 0.00 1.00 3
2 7.33E-01 5.00E+00 1.33E+00 3.39E-01 2.34E-01 0.00 1.00 3
3 7.60E-01 7.14E+00 1.65E+00 5.08E-01 1.57E-01 0.00 1.00 3
4 5.48E-01 3.77E+00 1.02E+00 2.54E-01 3.11E-01 0.00 1.00 3
5 4.01E-01 4.15E+00 9.09E-01 2.98E-01 2.67E-01 0.00 1.00 3
6 3.62E-01 2.27E+00 6.49E-01 1.48E-01 5.25E-01 0.00 1.00 3
7 1.89E-01 2.23E+00 4.57E-01 1.60E-01 4.90E-01 0.00 1.00 3
8 2.67E-01 1.32E+00 4.24E-01 8.05E-02 9.49E-01 0.00 1.00 3
9 2.45E-01 9.48E-01 3.62E-01 5.30E-02 1.42E+00 0.00 1.00 3
10 2.22E-01 2.76E+00 5.39E-01 2.01E-01 3.94E-01 0.00 1.00 3
11 1.47E-01 5.56E-01 2.68E-01 2.62E-02 2.45E+00 0.00 1.00 3
12 9.91E-02 1.73E+00 2.96E-01 1.16E-01 6.14E-01 0.00 1.00 3
13 6.51E-02 3.45E-01 1.85E-01 1.97E-02 3.57E+00 0.00 1.00 3
14 3.17E-02 9.13E-01 1.50E-01 5.35E-02 1.13E+00 0.00 1.00 3
15 2.92E-03 2.65E-01 7.65E-02 1.88E-02 3.82E+00 0.00 1.00 3
16 3.21E-04 2.87E-01 4.58E-02 2.33E-02 3.49E+00 0.00 1.00 3
17 2.47E-04 1.38E-01 1.77E-02 9.75E-03 7.23E+00 0.00 1.00 3
18 5.10E-03 5.83E-01 2.39E-02 3.78E-02 1.73E+00 0.00 1.00 3
19 3.23E-04 2.16E-01 1.71E-02 1.40E-02 4.63E+00 0.00 1.00 3
20 4.96E-02 1.69E+00 1.45E-01 1.10E-01 6.11E-01 0.00 1.00 3
21 3.41E-03 3.16E-01 1.84E-02 2.01E-02 3.20E+00 0.00 1.00 3
22 1.31E-04 1.03E-01 6.37E-03 6.61E-03 9.76E+00 0.00 1.00 3
23 3.38E-02 9.16E-01 8.13E-02 5.79E-02 1.13E+00 0.00 1.00 3
24 4.17E-04 1.58E-01 4.66E-03 9.86E-03 6.35E+00 0.00 1.00 3
25 7.35E-04 5.92E-02 3.70E-03 3.31E-03 1.71E+01 0.00 1.00 3
26 8.98E-03 1.94E-01 2.41E-02 1.10E-02 5.40E+00 0.00 1.00 3
27 2.12E-04 8.78E-03 2.06E-03 5.88E-04 1.17E+02 0.00 1.00 3
-------------------------------------------------------------------------------
Scaling data for symmetry functions element O :
-------------------------------------------------------------------------------
ind min max mean sigma sf Smin Smax t
-------------------------------------------------------------------------------
1 1.51E+00 1.00E+01 2.65E+00 6.78E-01 1.18E-01 0.00 1.00 3
2 4.44E-01 4.62E+00 9.66E-01 3.37E-01 2.39E-01 0.00 1.00 3
3 1.19E+00 7.53E+00 2.03E+00 5.06E-01 1.58E-01 0.00 1.00 3
4 2.76E-01 3.39E+00 6.59E-01 2.50E-01 3.21E-01 0.00 1.00 3
5 8.06E-01 4.54E+00 1.30E+00 2.94E-01 2.68E-01 0.00 1.00 3
6 1.05E-01 1.89E+00 3.07E-01 1.42E-01 5.60E-01 0.00 1.00 3
7 5.69E-01 2.62E+00 8.48E-01 1.57E-01 4.89E-01 0.00 1.00 3
8 2.33E-02 9.36E-01 1.11E-01 6.98E-02 1.10E+00 0.00 1.00 3
9 5.14E-01 1.85E+00 7.25E-01 9.80E-02 7.46E-01 0.00 1.00 3
10 1.11E-01 2.91E+00 4.75E-01 2.34E-01 3.57E-01 0.00 1.00 3
11 3.53E-01 1.07E+00 5.35E-01 4.52E-02 1.39E+00 0.00 1.00 3
12 3.04E-02 2.53E+00 3.17E-01 2.10E-01 4.00E-01 0.00 1.00 3
13 1.60E-01 6.63E-01 3.70E-01 3.08E-02 1.99E+00 0.00 1.00 3
14 2.78E-03 2.30E+00 1.77E-01 1.86E-01 4.35E-01 0.00 1.00 3
15 9.56E-03 3.91E-01 1.53E-01 2.79E-02 2.62E+00 0.00 1.00 3
16 3.75E-06 2.04E+00 5.41E-02 1.43E-01 4.91E-01 0.00 1.00 3
17 2.47E-03 3.43E-01 1.67E-02 2.19E-02 2.93E+00 0.00 1.00 3
18 1.74E-05 5.63E-02 9.55E-04 3.36E-03 1.78E+01 0.00 1.00 3
19 5.48E-02 3.02E+00 2.04E-01 2.01E-01 3.37E-01 0.00 1.00 3
20 1.38E-03 4.99E-01 1.28E-02 3.18E-02 2.01E+00 0.00 1.00 3
21 6.69E-03 2.67E-01 3.09E-02 1.71E-02 3.84E+00 0.00 1.00 3
22 1.70E-02 1.42E+00 7.63E-02 9.29E-02 7.14E-01 0.00 1.00 3
23 1.98E-02 4.08E-01 4.88E-02 2.55E-02 2.58E+00 0.00 1.00 3
24 5.28E-04 2.33E-01 7.21E-03 1.45E-02 4.30E+00 0.00 1.00 3
25 1.11E-05 3.53E-02 4.25E-04 2.05E-03 2.83E+01 0.00 1.00 3
26 1.60E-02 8.22E-01 5.08E-02 5.28E-02 1.24E+00 0.00 1.00 3
27 3.99E-03 7.86E-01 3.69E-02 5.05E-02 1.28E+00 0.00 1.00 3
28 4.05E-05 9.84E-02 1.21E-03 5.79E-03 1.02E+01 0.00 1.00 3
29 6.04E-03 9.93E-02 1.62E-02 5.52E-03 1.07E+01 0.00 1.00 3
30 2.96E-03 1.55E-01 1.16E-02 8.94E-03 6.59E+00 0.00 1.00 3
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION STATISTICS ***************************************
Equal symmetry function statistics for all elements.
Collect min/max/mean/sigma : 0
Collect extrapolation warnings : 1
Write extrapolation warnings immediately to stderr: 0
Halt on any extrapolation warning : 0
*******************************************************************************
*** SETUP: NEURAL NETWORK WEIGHTS *********************************************
Short NN weight file name format: hdnnp-data/weights.%03d.data
Setting short NN weights for element H from file: hdnnp-data/weights.001.data
Setting short NN weights for element O from file: hdnnp-data/weights.008.data
*******************************************************************************
*** SETUP: LAMMPS INTERFACE ***************************************************
Individual extrapolation warnings will not be shown.
Extrapolation warning summary will be shown every 5 timesteps.
The simulation will be stopped when 100 extrapolation warnings are exceeded.
Extrapolation warnings are accumulated over all time steps.
-------------------------------------------------------------------------------
CAUTION: If the LAMMPS unit system differs from the one used
during NN training, appropriate conversion factors
must be provided (see keywords cflength and cfenergy).
Length unit conversion factor: 1.8897261327999999E+00
Energy unit conversion factor: 3.6749325399999998E-02
Checking consistency of cutoff radii (in LAMMPS units):
LAMMPS Cutoff (via pair_coeff) : 6.360E+00
Maximum symmetry function cutoff: 6.350E+00
Cutoff radii are consistent.
-------------------------------------------------------------------------------
Element mapping string from LAMMPS to n2p2: "1:H,2:O"
CAUTION: Please ensure that this mapping between LAMMPS
atom types and NNP elements is consistent:
---------------------------
LAMMPS type | NNP element
---------------------------
1 <-> H ( 1)
2 <-> O ( 8)
---------------------------
NNP setup for LAMMPS completed.
*******************************************************************************
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.36
ghost atom cutoff = 8.36
binsize = 4.18, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair hdnnp, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
### NNP EW SUMMARY ### TS: 0 EW 0 EWPERSTEP 0.000e+00
Per MPI rank memory allocation (min/avg/max) = 4.021 | 4.021 | 4.021 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -750069.48 0 -750069.48 -5297.5537
1 8.5815594 -750070.71 0 -750069.51 -5249.2914
2 30.988787 -750073.91 0 -750069.59 -5023.6945
3 58.859866 -750077.88 0 -750069.67 -4427.8346
4 82.576399 -750081.26 0 -750069.74 -3275.4378
### NNP EW SUMMARY ### TS: 5 EW 0 EWPERSTEP 0.000e+00
5 94.968097 -750083.01 0 -750069.76 -1511.6733
6 93.724286 -750082.8 0 -750069.73 709.20465
7 82.243957 -750081.13 0 -750069.66 3020.5084
8 68.611429 -750079.14 0 -750069.57 4922.5176
9 62.314385 -750078.21 0 -750069.51 5933.1543
### NNP EW SUMMARY ### TS: 10 EW 0 EWPERSTEP 0.000e+00
10 69.501045 -750079.21 0 -750069.52 5761.8646
Loop time of 3.2801 on 1 procs for 10 steps with 1080 atoms
Performance: 0.132 ns/day, 182.228 hours/ns, 3.049 timesteps/s, 3.293 katom-step/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.2794 | 3.2794 | 3.2794 | 0.0 | 99.98
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.00030785 | 0.00030785 | 0.00030785 | 0.0 | 0.01
Output | 0.00018531 | 0.00018531 | 0.00018531 | 0.0 | 0.01
Modify | 0.00013118 | 0.00013118 | 0.00013118 | 0.0 | 0.00
Other | | 9.142e-05 | | | 0.00
Nlocal: 1080 ave 1080 max 1080 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 4536 ave 4536 max 4536 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 239270 ave 239270 max 239270 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 239270
Ave neighs/atom = 221.5463
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:03

View File

@ -0,0 +1,667 @@
LAMMPS (2 Aug 2023 - Development - patch_2Aug2023-264-g174825fe8c)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
###############################################################################
# MD simulation for HDNNP water
###############################################################################
###############################################################################
# VARIABLES
###############################################################################
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# Configuration files
variable cfgFile string "data.H2O-360mol"
# Timesteps
variable numSteps equal 10
variable dt equal 0.0005
# HDNNP
variable hdnnpCutoff equal 6.36
variable hdnnpDir string "hdnnp-data"
###############################################################################
# GENERAL SETUP
###############################################################################
units metal
boundary p p p
atom_style atomic
read_data ${cfgFile}
read_data data.H2O-360mol
Reading data file ...
orthogonal box = (0 0 0) to (22.695687 23.586034 22.23713)
2 by 2 by 1 MPI processor grid
reading atoms ...
1080 atoms
read_data CPU = 0.007 seconds
timestep ${dt}
timestep 0.0005
thermo 1
###############################################################################
# HDNNP
###############################################################################
pair_style hdnnp ${hdnnpCutoff} dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_style hdnnp 6.36 dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_style hdnnp 6.36 dir hdnnp-data showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_coeff * * H O
###############################################################################
# INTEGRATOR
###############################################################################
fix INT all nve
###############################################################################
# OUTPUT
###############################################################################
# dump 1 all atom 1 dump.hdnnp
###############################################################################
# SIMULATION
###############################################################################
run ${numSteps}
run 10
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
Your simulation uses code contributions which should be cited:
- ML-HDNNP package: doi:10.1021/acs.jctc.8b00770
@Article{Singraber19,
author = {Singraber, Andreas and Behler, J{"o}rg and Dellago, Christoph},
title = {Library-Based {LAMMPS} Implementation of High-Dimensional
Neural Network Potentials},
year = {2019},
month = mar,
volume = {15},
pages = {1827--1840},
doi = {10.1021/acs.jctc.8b00770},
journal = {J.~Chem.\ Theory Comput.},
number = {3}
}
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
*******************************************************************************
WELCOME TO n²p², A SOFTWARE PACKAGE FOR NEURAL NETWORK POTENTIALS!
------------------------------------------------------------------
n²p² version (from git): patch_2Aug2023-264-g174825fe8c
(version.h): v2.2.0
------------------------------------------------------------
Git branch : collected-small-changes
Git revision : 174825fe8c9493cb252d7b9e8dafdcc5d74be96d
Compile date/time : Aug 23 2023 08:43:11
------------------------------------------------------------
Features/Flags:
------------------------------------------------------------
Symmetry function groups : enabled
Symmetry function cache : enabled
Timing function available : available
Asymmetric polynomial SFs : available
SF low neighbor number check : enabled
SF derivative memory layout : reduced
MPI explicitly disabled : no
------------------------------------------------------------
Please cite the following papers when publishing results obtained with n²p²:
-------------------------------------------------------------------------------
* General citation for n²p² and the LAMMPS interface:
Singraber, A.; Behler, J.; Dellago, C.
Library-Based LAMMPS Implementation of High-Dimensional
Neural Network Potentials.
J. Chem. Theory Comput. 2019 15 (3), 18271840.
https://doi.org/10.1021/acs.jctc.8b00770
-------------------------------------------------------------------------------
* Additionally, if you use the NNP training features of n²p²:
Singraber, A.; Morawietz, T.; Behler, J.; Dellago, C.
Parallel Multistream Training of High-Dimensional Neural
Network Potentials.
J. Chem. Theory Comput. 2019, 15 (5), 30753092.
https://doi.org/10.1021/acs.jctc.8b01092
-------------------------------------------------------------------------------
* Additionally, if polynomial symmetry functions are used:
Bircher, M. P.; Singraber, A.; Dellago, C.
Improved Description of Atomic Environments Using Low-Cost
Polynomial Functions with Compact Support.
arXiv:2010.14414 [cond-mat, physics:physics] 2020.
https://arxiv.org/abs/2010.14414
*******************************************************************************
*** SETUP: SETTINGS FILE ******************************************************
Settings file name: hdnnp-data/input.nn
Read 120 lines.
Found 70 lines with keywords.
This settings file defines a short-range only NNP.
*******************************************************************************
*** SETUP: NORMALIZATION ******************************************************
Data set normalization is used.
Mean energy per atom : -2.5521343547039809E+01
Conversion factor energy : 2.4265748255366972E+02
Conversion factor length : 5.8038448995319847E+00
*******************************************************************************
*** SETUP: ELEMENT MAP ********************************************************
Number of element strings found: 2
Element 0: H ( 1)
Element 1: O ( 8)
*******************************************************************************
*** SETUP: ELEMENTS ***********************************************************
Number of elements is consistent: 2
Atomic energy offsets per element:
Element 0: 0.00000000E+00
Element 1: 0.00000000E+00
Energy offsets are automatically subtracted from reference energies.
*******************************************************************************
*** SETUP: CUTOFF FUNCTIONS ***************************************************
Parameter alpha for inner cutoff: 0.000000
Inner cutoff = Symmetry function cutoff * alpha
Equal cutoff function type for all symmetry functions:
CutoffFunction::CT_TANHU (2)
f(r) = tanh^3(1 - r/rc)
*******************************************************************************
*** SETUP: SYMMETRY FUNCTIONS *************************************************
Abbreviations:
--------------
ind .... Symmetry function index.
ec ..... Central atom element.
tp ..... Symmetry function type.
sbtp ... Symmetry function subtype (e.g. cutoff type).
e1 ..... Neighbor 1 element.
e2 ..... Neighbor 2 element.
eta .... Gaussian width eta.
rs/rl... Shift distance of Gaussian or left cutoff radius for polynomial.
angl.... Left cutoff angle for polynomial.
angr.... Right cutoff angle for polynomial.
la ..... Angle prefactor lambda.
zeta ... Angle term exponent zeta.
rc ..... Cutoff radius / right cutoff radius for polynomial.
a ...... Free parameter alpha (e.g. cutoff alpha).
ln ..... Line number in settings file.
Short range atomic symmetry functions element H :
-------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln
-------------------------------------------------------------------------------------------------
1 H 2 ct2 H 1.000E-03 0.000E+00 1.200E+01 0.00 51
2 H 2 ct2 O 1.000E-03 0.000E+00 1.200E+01 0.00 61
3 H 2 ct2 H 1.000E-02 0.000E+00 1.200E+01 0.00 52
4 H 2 ct2 O 1.000E-02 0.000E+00 1.200E+01 0.00 62
5 H 2 ct2 H 3.000E-02 0.000E+00 1.200E+01 0.00 53
6 H 2 ct2 O 3.000E-02 0.000E+00 1.200E+01 0.00 63
7 H 2 ct2 H 6.000E-02 0.000E+00 1.200E+01 0.00 54
8 H 2 ct2 O 6.000E-02 0.000E+00 1.200E+01 0.00 64
9 H 2 ct2 O 1.500E-01 9.000E-01 1.200E+01 0.00 65
10 H 2 ct2 H 1.500E-01 1.900E+00 1.200E+01 0.00 55
11 H 2 ct2 O 3.000E-01 9.000E-01 1.200E+01 0.00 66
12 H 2 ct2 H 3.000E-01 1.900E+00 1.200E+01 0.00 56
13 H 2 ct2 O 6.000E-01 9.000E-01 1.200E+01 0.00 67
14 H 2 ct2 H 6.000E-01 1.900E+00 1.200E+01 0.00 57
15 H 2 ct2 O 1.500E+00 9.000E-01 1.200E+01 0.00 68
16 H 2 ct2 H 1.500E+00 1.900E+00 1.200E+01 0.00 58
17 H 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 115
18 H 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 114
19 H 3 ct2 H O 1.000E-02 0.000E+00 1.200E+01 -1 4.0 0.00 105
20 H 3 ct2 H O 1.000E-02 0.000E+00 1.200E+01 1 4.0 0.00 103
21 H 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 100
22 H 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 113
23 H 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 98
24 H 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 112
25 H 3 ct2 H O 7.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 95
26 H 3 ct2 H O 7.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 93
27 H 3 ct2 H O 2.000E-01 0.000E+00 1.200E+01 1 1.0 0.00 90
-------------------------------------------------------------------------------------------------
Short range atomic symmetry functions element O :
-------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln
-------------------------------------------------------------------------------------------------
1 O 2 ct2 H 1.000E-03 0.000E+00 1.200E+01 0.00 70
2 O 2 ct2 O 1.000E-03 0.000E+00 1.200E+01 0.00 80
3 O 2 ct2 H 1.000E-02 0.000E+00 1.200E+01 0.00 71
4 O 2 ct2 O 1.000E-02 0.000E+00 1.200E+01 0.00 81
5 O 2 ct2 H 3.000E-02 0.000E+00 1.200E+01 0.00 72
6 O 2 ct2 O 3.000E-02 0.000E+00 1.200E+01 0.00 82
7 O 2 ct2 H 6.000E-02 0.000E+00 1.200E+01 0.00 73
8 O 2 ct2 O 6.000E-02 0.000E+00 1.200E+01 0.00 83
9 O 2 ct2 H 1.500E-01 9.000E-01 1.200E+01 0.00 74
10 O 2 ct2 O 1.500E-01 4.000E+00 1.200E+01 0.00 84
11 O 2 ct2 H 3.000E-01 9.000E-01 1.200E+01 0.00 75
12 O 2 ct2 O 3.000E-01 4.000E+00 1.200E+01 0.00 85
13 O 2 ct2 H 6.000E-01 9.000E-01 1.200E+01 0.00 76
14 O 2 ct2 O 6.000E-01 4.000E+00 1.200E+01 0.00 86
15 O 2 ct2 H 1.500E+00 9.000E-01 1.200E+01 0.00 77
16 O 2 ct2 O 1.500E+00 4.000E+00 1.200E+01 0.00 87
17 O 3 ct2 H O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 110
18 O 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 120
19 O 3 ct2 H O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 109
20 O 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 119
21 O 3 ct2 H H 1.000E-02 0.000E+00 1.200E+01 -1 4.0 0.00 104
22 O 3 ct2 H H 1.000E-02 0.000E+00 1.200E+01 1 4.0 0.00 102
23 O 3 ct2 H H 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 99
24 O 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 108
25 O 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 118
26 O 3 ct2 H H 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 97
27 O 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 107
28 O 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 117
29 O 3 ct2 H H 7.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 94
30 O 3 ct2 H H 7.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 92
-------------------------------------------------------------------------------------------------
Minimum cutoff radius for element H: 12.000000
Minimum cutoff radius for element O: 12.000000
Maximum cutoff radius (global) : 12.000000
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION MEMORY *******************************************
Symmetry function derivatives memory table for element H :
-------------------------------------------------------------------------------
Relevant symmetry functions for neighbors with element:
- H: 15 of 27 ( 55.6 %)
- O: 19 of 27 ( 70.4 %)
-------------------------------------------------------------------------------
Symmetry function derivatives memory table for element O :
-------------------------------------------------------------------------------
Relevant symmetry functions for neighbors with element:
- H: 18 of 30 ( 60.0 %)
- O: 16 of 30 ( 53.3 %)
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION CACHE ********************************************
Element H: in total 4 caches, used 17.00 times on average.
Element O: in total 4 caches, used 17.00 times on average.
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION GROUPS *******************************************
Abbreviations:
--------------
ind .... Symmetry function index.
ec ..... Central atom element.
tp ..... Symmetry function type.
sbtp ... Symmetry function subtype (e.g. cutoff type).
e1 ..... Neighbor 1 element.
e2 ..... Neighbor 2 element.
eta .... Gaussian width eta.
rs/rl... Shift distance of Gaussian or left cutoff radius for polynomial.
angl.... Left cutoff angle for polynomial.
angr.... Right cutoff angle for polynomial.
la ..... Angle prefactor lambda.
zeta ... Angle term exponent zeta.
rc ..... Cutoff radius / right cutoff radius for polynomial.
a ...... Free parameter alpha (e.g. cutoff alpha).
ln ..... Line number in settings file.
mi ..... Member index.
sfi .... Symmetry function index.
e ...... Recalculate exponential term.
Short range atomic symmetry function groups element H :
----------------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln mi sfi e
----------------------------------------------------------------------------------------------------------
1 H 2 ct2 H * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 51 1 1
- - - - - 1.000E-02 0.000E+00 - - 52 2 3
- - - - - 3.000E-02 0.000E+00 - - 53 3 5
- - - - - 6.000E-02 0.000E+00 - - 54 4 7
- - - - - 1.500E-01 1.900E+00 - - 55 5 10
- - - - - 3.000E-01 1.900E+00 - - 56 6 12
- - - - - 6.000E-01 1.900E+00 - - 57 7 14
- - - - - 1.500E+00 1.900E+00 - - 58 8 16
2 H 2 ct2 O * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 61 1 2
- - - - - 1.000E-02 0.000E+00 - - 62 2 4
- - - - - 3.000E-02 0.000E+00 - - 63 3 6
- - - - - 6.000E-02 0.000E+00 - - 64 4 8
- - - - - 1.500E-01 9.000E-01 - - 65 5 9
- - - - - 3.000E-01 9.000E-01 - - 66 6 11
- - - - - 6.000E-01 9.000E-01 - - 67 7 13
- - - - - 1.500E+00 9.000E-01 - - 68 8 15
3 H 3 ct2 H O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-02 0.000E+00 - -1 4.0 - 105 1 19 1
- - - - - - 1.000E-02 0.000E+00 - 1 4.0 - 103 2 20 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 100 3 21 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 98 4 23 0
- - - - - - 7.000E-02 0.000E+00 - -1 1.0 - 95 5 25 1
- - - - - - 7.000E-02 0.000E+00 - 1 1.0 - 93 6 26 0
- - - - - - 2.000E-01 0.000E+00 - 1 1.0 - 90 7 27 1
4 H 3 ct2 O O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 115 1 17 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 114 2 18 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 113 3 22 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 112 4 24 0
----------------------------------------------------------------------------------------------------------
Short range atomic symmetry function groups element O :
----------------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln mi sfi e
----------------------------------------------------------------------------------------------------------
1 O 2 ct2 H * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 70 1 1
- - - - - 1.000E-02 0.000E+00 - - 71 2 3
- - - - - 3.000E-02 0.000E+00 - - 72 3 5
- - - - - 6.000E-02 0.000E+00 - - 73 4 7
- - - - - 1.500E-01 9.000E-01 - - 74 5 9
- - - - - 3.000E-01 9.000E-01 - - 75 6 11
- - - - - 6.000E-01 9.000E-01 - - 76 7 13
- - - - - 1.500E+00 9.000E-01 - - 77 8 15
2 O 2 ct2 O * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 80 1 2
- - - - - 1.000E-02 0.000E+00 - - 81 2 4
- - - - - 3.000E-02 0.000E+00 - - 82 3 6
- - - - - 6.000E-02 0.000E+00 - - 83 4 8
- - - - - 1.500E-01 4.000E+00 - - 84 5 10
- - - - - 3.000E-01 4.000E+00 - - 85 6 12
- - - - - 6.000E-01 4.000E+00 - - 86 7 14
- - - - - 1.500E+00 4.000E+00 - - 87 8 16
3 O 3 ct2 H H * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-02 0.000E+00 - -1 4.0 - 104 1 21 1
- - - - - - 1.000E-02 0.000E+00 - 1 4.0 - 102 2 22 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 99 3 23 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 97 4 26 0
- - - - - - 7.000E-02 0.000E+00 - -1 1.0 - 94 5 29 1
- - - - - - 7.000E-02 0.000E+00 - 1 1.0 - 92 6 30 0
4 O 3 ct2 H O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 110 1 17 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 109 2 19 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 108 3 24 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 107 4 27 0
5 O 3 ct2 O O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 120 1 18 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 119 2 20 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 118 3 25 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 117 4 28 0
----------------------------------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: NEURAL NETWORKS ****************************************************
Normalize neurons (all elements): 0
-------------------------------------------------------------------------------
Atomic short range NN for element H :
Number of weights : 1325
Number of biases : 51
Number of connections: 1376
Architecture 27 25 25 1
-------------------------------------------------------------------------------
1 G t t l
2 G t t
3 G t t
4 G t t
5 G t t
6 G t t
7 G t t
8 G t t
9 G t t
10 G t t
11 G t t
12 G t t
13 G t t
14 G t t
15 G t t
16 G t t
17 G t t
18 G t t
19 G t t
20 G t t
21 G t t
22 G t t
23 G t t
24 G t t
25 G t t
26 G
27 G
-------------------------------------------------------------------------------
Atomic short range NN for element O :
Number of weights : 1400
Number of biases : 51
Number of connections: 1451
Architecture 30 25 25 1
-------------------------------------------------------------------------------
1 G t t l
2 G t t
3 G t t
4 G t t
5 G t t
6 G t t
7 G t t
8 G t t
9 G t t
10 G t t
11 G t t
12 G t t
13 G t t
14 G t t
15 G t t
16 G t t
17 G t t
18 G t t
19 G t t
20 G t t
21 G t t
22 G t t
23 G t t
24 G t t
25 G t t
26 G
27 G
28 G
29 G
30 G
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION SCALING ******************************************
Equal scaling type for all symmetry functions:
Scaling type::ST_SCALECENTER (3)
Gs = Smin + (Smax - Smin) * (G - Gmean) / (Gmax - Gmin)
Smin = 0.000000
Smax = 1.000000
Symmetry function scaling statistics from file: hdnnp-data/scaling.data
-------------------------------------------------------------------------------
Abbreviations:
--------------
ind ..... Symmetry function index.
min ..... Minimum symmetry function value.
max ..... Maximum symmetry function value.
mean .... Mean symmetry function value.
sigma ... Standard deviation of symmetry function values.
sf ...... Scaling factor for derivatives.
Smin .... Desired minimum scaled symmetry function value.
Smax .... Desired maximum scaled symmetry function value.
t ....... Scaling type.
Scaling data for symmetry functions element H :
-------------------------------------------------------------------------------
ind min max mean sigma sf Smin Smax t
-------------------------------------------------------------------------------
1 1.09E+00 9.62E+00 2.27E+00 6.79E-01 1.17E-01 0.00 1.00 3
2 7.33E-01 5.00E+00 1.33E+00 3.39E-01 2.34E-01 0.00 1.00 3
3 7.60E-01 7.14E+00 1.65E+00 5.08E-01 1.57E-01 0.00 1.00 3
4 5.48E-01 3.77E+00 1.02E+00 2.54E-01 3.11E-01 0.00 1.00 3
5 4.01E-01 4.15E+00 9.09E-01 2.98E-01 2.67E-01 0.00 1.00 3
6 3.62E-01 2.27E+00 6.49E-01 1.48E-01 5.25E-01 0.00 1.00 3
7 1.89E-01 2.23E+00 4.57E-01 1.60E-01 4.90E-01 0.00 1.00 3
8 2.67E-01 1.32E+00 4.24E-01 8.05E-02 9.49E-01 0.00 1.00 3
9 2.45E-01 9.48E-01 3.62E-01 5.30E-02 1.42E+00 0.00 1.00 3
10 2.22E-01 2.76E+00 5.39E-01 2.01E-01 3.94E-01 0.00 1.00 3
11 1.47E-01 5.56E-01 2.68E-01 2.62E-02 2.45E+00 0.00 1.00 3
12 9.91E-02 1.73E+00 2.96E-01 1.16E-01 6.14E-01 0.00 1.00 3
13 6.51E-02 3.45E-01 1.85E-01 1.97E-02 3.57E+00 0.00 1.00 3
14 3.17E-02 9.13E-01 1.50E-01 5.35E-02 1.13E+00 0.00 1.00 3
15 2.92E-03 2.65E-01 7.65E-02 1.88E-02 3.82E+00 0.00 1.00 3
16 3.21E-04 2.87E-01 4.58E-02 2.33E-02 3.49E+00 0.00 1.00 3
17 2.47E-04 1.38E-01 1.77E-02 9.75E-03 7.23E+00 0.00 1.00 3
18 5.10E-03 5.83E-01 2.39E-02 3.78E-02 1.73E+00 0.00 1.00 3
19 3.23E-04 2.16E-01 1.71E-02 1.40E-02 4.63E+00 0.00 1.00 3
20 4.96E-02 1.69E+00 1.45E-01 1.10E-01 6.11E-01 0.00 1.00 3
21 3.41E-03 3.16E-01 1.84E-02 2.01E-02 3.20E+00 0.00 1.00 3
22 1.31E-04 1.03E-01 6.37E-03 6.61E-03 9.76E+00 0.00 1.00 3
23 3.38E-02 9.16E-01 8.13E-02 5.79E-02 1.13E+00 0.00 1.00 3
24 4.17E-04 1.58E-01 4.66E-03 9.86E-03 6.35E+00 0.00 1.00 3
25 7.35E-04 5.92E-02 3.70E-03 3.31E-03 1.71E+01 0.00 1.00 3
26 8.98E-03 1.94E-01 2.41E-02 1.10E-02 5.40E+00 0.00 1.00 3
27 2.12E-04 8.78E-03 2.06E-03 5.88E-04 1.17E+02 0.00 1.00 3
-------------------------------------------------------------------------------
Scaling data for symmetry functions element O :
-------------------------------------------------------------------------------
ind min max mean sigma sf Smin Smax t
-------------------------------------------------------------------------------
1 1.51E+00 1.00E+01 2.65E+00 6.78E-01 1.18E-01 0.00 1.00 3
2 4.44E-01 4.62E+00 9.66E-01 3.37E-01 2.39E-01 0.00 1.00 3
3 1.19E+00 7.53E+00 2.03E+00 5.06E-01 1.58E-01 0.00 1.00 3
4 2.76E-01 3.39E+00 6.59E-01 2.50E-01 3.21E-01 0.00 1.00 3
5 8.06E-01 4.54E+00 1.30E+00 2.94E-01 2.68E-01 0.00 1.00 3
6 1.05E-01 1.89E+00 3.07E-01 1.42E-01 5.60E-01 0.00 1.00 3
7 5.69E-01 2.62E+00 8.48E-01 1.57E-01 4.89E-01 0.00 1.00 3
8 2.33E-02 9.36E-01 1.11E-01 6.98E-02 1.10E+00 0.00 1.00 3
9 5.14E-01 1.85E+00 7.25E-01 9.80E-02 7.46E-01 0.00 1.00 3
10 1.11E-01 2.91E+00 4.75E-01 2.34E-01 3.57E-01 0.00 1.00 3
11 3.53E-01 1.07E+00 5.35E-01 4.52E-02 1.39E+00 0.00 1.00 3
12 3.04E-02 2.53E+00 3.17E-01 2.10E-01 4.00E-01 0.00 1.00 3
13 1.60E-01 6.63E-01 3.70E-01 3.08E-02 1.99E+00 0.00 1.00 3
14 2.78E-03 2.30E+00 1.77E-01 1.86E-01 4.35E-01 0.00 1.00 3
15 9.56E-03 3.91E-01 1.53E-01 2.79E-02 2.62E+00 0.00 1.00 3
16 3.75E-06 2.04E+00 5.41E-02 1.43E-01 4.91E-01 0.00 1.00 3
17 2.47E-03 3.43E-01 1.67E-02 2.19E-02 2.93E+00 0.00 1.00 3
18 1.74E-05 5.63E-02 9.55E-04 3.36E-03 1.78E+01 0.00 1.00 3
19 5.48E-02 3.02E+00 2.04E-01 2.01E-01 3.37E-01 0.00 1.00 3
20 1.38E-03 4.99E-01 1.28E-02 3.18E-02 2.01E+00 0.00 1.00 3
21 6.69E-03 2.67E-01 3.09E-02 1.71E-02 3.84E+00 0.00 1.00 3
22 1.70E-02 1.42E+00 7.63E-02 9.29E-02 7.14E-01 0.00 1.00 3
23 1.98E-02 4.08E-01 4.88E-02 2.55E-02 2.58E+00 0.00 1.00 3
24 5.28E-04 2.33E-01 7.21E-03 1.45E-02 4.30E+00 0.00 1.00 3
25 1.11E-05 3.53E-02 4.25E-04 2.05E-03 2.83E+01 0.00 1.00 3
26 1.60E-02 8.22E-01 5.08E-02 5.28E-02 1.24E+00 0.00 1.00 3
27 3.99E-03 7.86E-01 3.69E-02 5.05E-02 1.28E+00 0.00 1.00 3
28 4.05E-05 9.84E-02 1.21E-03 5.79E-03 1.02E+01 0.00 1.00 3
29 6.04E-03 9.93E-02 1.62E-02 5.52E-03 1.07E+01 0.00 1.00 3
30 2.96E-03 1.55E-01 1.16E-02 8.94E-03 6.59E+00 0.00 1.00 3
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION STATISTICS ***************************************
Equal symmetry function statistics for all elements.
Collect min/max/mean/sigma : 0
Collect extrapolation warnings : 1
Write extrapolation warnings immediately to stderr: 0
Halt on any extrapolation warning : 0
*******************************************************************************
*** SETUP: NEURAL NETWORK WEIGHTS *********************************************
Short NN weight file name format: hdnnp-data/weights.%03d.data
Setting short NN weights for element H from file: hdnnp-data/weights.001.data
Setting short NN weights for element O from file: hdnnp-data/weights.008.data
*******************************************************************************
*** SETUP: LAMMPS INTERFACE ***************************************************
Individual extrapolation warnings will not be shown.
Extrapolation warning summary will be shown every 5 timesteps.
The simulation will be stopped when 100 extrapolation warnings are exceeded.
Extrapolation warnings are accumulated over all time steps.
-------------------------------------------------------------------------------
CAUTION: If the LAMMPS unit system differs from the one used
during NN training, appropriate conversion factors
must be provided (see keywords cflength and cfenergy).
Length unit conversion factor: 1.8897261327999999E+00
Energy unit conversion factor: 3.6749325399999998E-02
Checking consistency of cutoff radii (in LAMMPS units):
LAMMPS Cutoff (via pair_coeff) : 6.360E+00
Maximum symmetry function cutoff: 6.350E+00
Cutoff radii are consistent.
-------------------------------------------------------------------------------
Element mapping string from LAMMPS to n2p2: "1:H,2:O"
CAUTION: Please ensure that this mapping between LAMMPS
atom types and NNP elements is consistent:
---------------------------
LAMMPS type | NNP element
---------------------------
1 <-> H ( 1)
2 <-> O ( 8)
---------------------------
NNP setup for LAMMPS completed.
*******************************************************************************
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.36
ghost atom cutoff = 8.36
binsize = 4.18, bins = 6 6 6
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair hdnnp, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
### NNP EW SUMMARY ### TS: 0 EW 0 EWPERSTEP 0.000e+00
Per MPI rank memory allocation (min/avg/max) = 3.13 | 3.13 | 3.13 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -750069.48 0 -750069.48 -5297.5537
1 8.5815594 -750070.71 0 -750069.51 -5249.2914
2 30.988787 -750073.91 0 -750069.59 -5023.6945
3 58.859866 -750077.88 0 -750069.67 -4427.8346
4 82.576399 -750081.26 0 -750069.74 -3275.4378
### NNP EW SUMMARY ### TS: 5 EW 0 EWPERSTEP 0.000e+00
5 94.968097 -750083.01 0 -750069.76 -1511.6733
6 93.724286 -750082.8 0 -750069.73 709.20465
7 82.243957 -750081.13 0 -750069.66 3020.5084
8 68.611429 -750079.14 0 -750069.57 4922.5176
9 62.314385 -750078.21 0 -750069.51 5933.1543
### NNP EW SUMMARY ### TS: 10 EW 0 EWPERSTEP 0.000e+00
10 69.501045 -750079.21 0 -750069.52 5761.8646
Loop time of 0.930358 on 4 procs for 10 steps with 1080 atoms
Performance: 0.464 ns/day, 51.687 hours/ns, 10.749 timesteps/s, 11.608 katom-step/s
99.6% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.85419 | 0.89495 | 0.92919 | 3.5 | 96.19
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.00075831 | 0.035035 | 0.075822 | 17.5 | 3.77
Output | 0.00018471 | 0.00023973 | 0.00031043 | 0.0 | 0.03
Modify | 4.0258e-05 | 4.2308e-05 | 4.4218e-05 | 0.0 | 0.00
Other | | 9.199e-05 | | | 0.01
Nlocal: 270 ave 278 max 262 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 2552 ave 2564 max 2541 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 59817.5 ave 61917 max 57577 min
Histogram: 1 1 0 0 0 0 0 0 0 2
Total # of neighbors = 239270
Ave neighs/atom = 221.5463
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,689 @@
LAMMPS (2 Aug 2023 - Development - patch_2Aug2023-264-g174825fe8c)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
###############################################################################
# MD simulation for HDNNP water
###############################################################################
###############################################################################
# VARIABLES
###############################################################################
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# Configuration files
variable cfgFile string "data.H2O-360mol"
# Timesteps
variable numSteps equal 10
variable dt equal 0.0005
# HDNNP
variable hdnnpCutoff equal 6.36
variable hdnnpDir string "hdnnp-data"
###############################################################################
# GENERAL SETUP
###############################################################################
units metal
boundary p p p
atom_style atomic
region box block 0.0 2.2695686722465727E+01 0.0 2.3586033624598713E+01 0.0 2.2237130028217017E+01
create_box 3 box
Created orthogonal box = (0 0 0) to (22.695687 23.586034 22.23713)
1 by 1 by 1 MPI processor grid
mass 1 1.0
read_data ${cfgFile} add append offset 1 0 0 0 0
read_data data.H2O-360mol add append offset 1 0 0 0 0
Reading data file ...
orthogonal box = (0 0 0) to (22.695687 23.586034 22.23713)
1 by 1 by 1 MPI processor grid
reading atoms ...
1080 atoms
read_data CPU = 0.004 seconds
timestep ${dt}
timestep 0.0005
thermo 1
###############################################################################
# HDNNP
###############################################################################
pair_style hybrid lj/cut 6.0 hdnnp ${hdnnpCutoff} dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_style hybrid lj/cut 6.0 hdnnp 6.36 dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_style hybrid lj/cut 6.0 hdnnp 6.36 dir hdnnp-data showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_coeff * * hdnnp NULL H O
pair_coeff 1 * lj/cut 0.1 3.0
###############################################################################
# INTEGRATOR
###############################################################################
fix INT all nve
###############################################################################
# OUTPUT
###############################################################################
#dump 1 all atom 1 dump.hdnnp
###############################################################################
# SIMULATION
###############################################################################
run ${numSteps}
run 10
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
Your simulation uses code contributions which should be cited:
- ML-HDNNP package: doi:10.1021/acs.jctc.8b00770
@Article{Singraber19,
author = {Singraber, Andreas and Behler, J{"o}rg and Dellago, Christoph},
title = {Library-Based {LAMMPS} Implementation of High-Dimensional
Neural Network Potentials},
year = {2019},
month = mar,
volume = {15},
pages = {1827--1840},
doi = {10.1021/acs.jctc.8b00770},
journal = {J.~Chem.\ Theory Comput.},
number = {3}
}
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
*******************************************************************************
WELCOME TO n²p², A SOFTWARE PACKAGE FOR NEURAL NETWORK POTENTIALS!
------------------------------------------------------------------
n²p² version (from git): patch_2Aug2023-264-g174825fe8c
(version.h): v2.2.0
------------------------------------------------------------
Git branch : collected-small-changes
Git revision : 174825fe8c9493cb252d7b9e8dafdcc5d74be96d
Compile date/time : Aug 23 2023 08:43:11
------------------------------------------------------------
Features/Flags:
------------------------------------------------------------
Symmetry function groups : enabled
Symmetry function cache : enabled
Timing function available : available
Asymmetric polynomial SFs : available
SF low neighbor number check : enabled
SF derivative memory layout : reduced
MPI explicitly disabled : no
------------------------------------------------------------
Please cite the following papers when publishing results obtained with n²p²:
-------------------------------------------------------------------------------
* General citation for n²p² and the LAMMPS interface:
Singraber, A.; Behler, J.; Dellago, C.
Library-Based LAMMPS Implementation of High-Dimensional
Neural Network Potentials.
J. Chem. Theory Comput. 2019 15 (3), 18271840.
https://doi.org/10.1021/acs.jctc.8b00770
-------------------------------------------------------------------------------
* Additionally, if you use the NNP training features of n²p²:
Singraber, A.; Morawietz, T.; Behler, J.; Dellago, C.
Parallel Multistream Training of High-Dimensional Neural
Network Potentials.
J. Chem. Theory Comput. 2019, 15 (5), 30753092.
https://doi.org/10.1021/acs.jctc.8b01092
-------------------------------------------------------------------------------
* Additionally, if polynomial symmetry functions are used:
Bircher, M. P.; Singraber, A.; Dellago, C.
Improved Description of Atomic Environments Using Low-Cost
Polynomial Functions with Compact Support.
arXiv:2010.14414 [cond-mat, physics:physics] 2020.
https://arxiv.org/abs/2010.14414
*******************************************************************************
*** SETUP: SETTINGS FILE ******************************************************
Settings file name: hdnnp-data/input.nn
Read 120 lines.
Found 70 lines with keywords.
This settings file defines a short-range only NNP.
*******************************************************************************
*** SETUP: NORMALIZATION ******************************************************
Data set normalization is used.
Mean energy per atom : -2.5521343547039809E+01
Conversion factor energy : 2.4265748255366972E+02
Conversion factor length : 5.8038448995319847E+00
*******************************************************************************
*** SETUP: ELEMENT MAP ********************************************************
Number of element strings found: 2
Element 0: H ( 1)
Element 1: O ( 8)
*******************************************************************************
*** SETUP: ELEMENTS ***********************************************************
Number of elements is consistent: 2
Atomic energy offsets per element:
Element 0: 0.00000000E+00
Element 1: 0.00000000E+00
Energy offsets are automatically subtracted from reference energies.
*******************************************************************************
*** SETUP: CUTOFF FUNCTIONS ***************************************************
Parameter alpha for inner cutoff: 0.000000
Inner cutoff = Symmetry function cutoff * alpha
Equal cutoff function type for all symmetry functions:
CutoffFunction::CT_TANHU (2)
f(r) = tanh^3(1 - r/rc)
*******************************************************************************
*** SETUP: SYMMETRY FUNCTIONS *************************************************
Abbreviations:
--------------
ind .... Symmetry function index.
ec ..... Central atom element.
tp ..... Symmetry function type.
sbtp ... Symmetry function subtype (e.g. cutoff type).
e1 ..... Neighbor 1 element.
e2 ..... Neighbor 2 element.
eta .... Gaussian width eta.
rs/rl... Shift distance of Gaussian or left cutoff radius for polynomial.
angl.... Left cutoff angle for polynomial.
angr.... Right cutoff angle for polynomial.
la ..... Angle prefactor lambda.
zeta ... Angle term exponent zeta.
rc ..... Cutoff radius / right cutoff radius for polynomial.
a ...... Free parameter alpha (e.g. cutoff alpha).
ln ..... Line number in settings file.
Short range atomic symmetry functions element H :
-------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln
-------------------------------------------------------------------------------------------------
1 H 2 ct2 H 1.000E-03 0.000E+00 1.200E+01 0.00 51
2 H 2 ct2 O 1.000E-03 0.000E+00 1.200E+01 0.00 61
3 H 2 ct2 H 1.000E-02 0.000E+00 1.200E+01 0.00 52
4 H 2 ct2 O 1.000E-02 0.000E+00 1.200E+01 0.00 62
5 H 2 ct2 H 3.000E-02 0.000E+00 1.200E+01 0.00 53
6 H 2 ct2 O 3.000E-02 0.000E+00 1.200E+01 0.00 63
7 H 2 ct2 H 6.000E-02 0.000E+00 1.200E+01 0.00 54
8 H 2 ct2 O 6.000E-02 0.000E+00 1.200E+01 0.00 64
9 H 2 ct2 O 1.500E-01 9.000E-01 1.200E+01 0.00 65
10 H 2 ct2 H 1.500E-01 1.900E+00 1.200E+01 0.00 55
11 H 2 ct2 O 3.000E-01 9.000E-01 1.200E+01 0.00 66
12 H 2 ct2 H 3.000E-01 1.900E+00 1.200E+01 0.00 56
13 H 2 ct2 O 6.000E-01 9.000E-01 1.200E+01 0.00 67
14 H 2 ct2 H 6.000E-01 1.900E+00 1.200E+01 0.00 57
15 H 2 ct2 O 1.500E+00 9.000E-01 1.200E+01 0.00 68
16 H 2 ct2 H 1.500E+00 1.900E+00 1.200E+01 0.00 58
17 H 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 115
18 H 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 114
19 H 3 ct2 H O 1.000E-02 0.000E+00 1.200E+01 -1 4.0 0.00 105
20 H 3 ct2 H O 1.000E-02 0.000E+00 1.200E+01 1 4.0 0.00 103
21 H 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 100
22 H 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 113
23 H 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 98
24 H 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 112
25 H 3 ct2 H O 7.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 95
26 H 3 ct2 H O 7.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 93
27 H 3 ct2 H O 2.000E-01 0.000E+00 1.200E+01 1 1.0 0.00 90
-------------------------------------------------------------------------------------------------
Short range atomic symmetry functions element O :
-------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln
-------------------------------------------------------------------------------------------------
1 O 2 ct2 H 1.000E-03 0.000E+00 1.200E+01 0.00 70
2 O 2 ct2 O 1.000E-03 0.000E+00 1.200E+01 0.00 80
3 O 2 ct2 H 1.000E-02 0.000E+00 1.200E+01 0.00 71
4 O 2 ct2 O 1.000E-02 0.000E+00 1.200E+01 0.00 81
5 O 2 ct2 H 3.000E-02 0.000E+00 1.200E+01 0.00 72
6 O 2 ct2 O 3.000E-02 0.000E+00 1.200E+01 0.00 82
7 O 2 ct2 H 6.000E-02 0.000E+00 1.200E+01 0.00 73
8 O 2 ct2 O 6.000E-02 0.000E+00 1.200E+01 0.00 83
9 O 2 ct2 H 1.500E-01 9.000E-01 1.200E+01 0.00 74
10 O 2 ct2 O 1.500E-01 4.000E+00 1.200E+01 0.00 84
11 O 2 ct2 H 3.000E-01 9.000E-01 1.200E+01 0.00 75
12 O 2 ct2 O 3.000E-01 4.000E+00 1.200E+01 0.00 85
13 O 2 ct2 H 6.000E-01 9.000E-01 1.200E+01 0.00 76
14 O 2 ct2 O 6.000E-01 4.000E+00 1.200E+01 0.00 86
15 O 2 ct2 H 1.500E+00 9.000E-01 1.200E+01 0.00 77
16 O 2 ct2 O 1.500E+00 4.000E+00 1.200E+01 0.00 87
17 O 3 ct2 H O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 110
18 O 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 120
19 O 3 ct2 H O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 109
20 O 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 119
21 O 3 ct2 H H 1.000E-02 0.000E+00 1.200E+01 -1 4.0 0.00 104
22 O 3 ct2 H H 1.000E-02 0.000E+00 1.200E+01 1 4.0 0.00 102
23 O 3 ct2 H H 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 99
24 O 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 108
25 O 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 118
26 O 3 ct2 H H 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 97
27 O 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 107
28 O 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 117
29 O 3 ct2 H H 7.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 94
30 O 3 ct2 H H 7.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 92
-------------------------------------------------------------------------------------------------
Minimum cutoff radius for element H: 12.000000
Minimum cutoff radius for element O: 12.000000
Maximum cutoff radius (global) : 12.000000
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION MEMORY *******************************************
Symmetry function derivatives memory table for element H :
-------------------------------------------------------------------------------
Relevant symmetry functions for neighbors with element:
- H: 15 of 27 ( 55.6 %)
- O: 19 of 27 ( 70.4 %)
-------------------------------------------------------------------------------
Symmetry function derivatives memory table for element O :
-------------------------------------------------------------------------------
Relevant symmetry functions for neighbors with element:
- H: 18 of 30 ( 60.0 %)
- O: 16 of 30 ( 53.3 %)
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION CACHE ********************************************
Element H: in total 4 caches, used 17.00 times on average.
Element O: in total 4 caches, used 17.00 times on average.
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION GROUPS *******************************************
Abbreviations:
--------------
ind .... Symmetry function index.
ec ..... Central atom element.
tp ..... Symmetry function type.
sbtp ... Symmetry function subtype (e.g. cutoff type).
e1 ..... Neighbor 1 element.
e2 ..... Neighbor 2 element.
eta .... Gaussian width eta.
rs/rl... Shift distance of Gaussian or left cutoff radius for polynomial.
angl.... Left cutoff angle for polynomial.
angr.... Right cutoff angle for polynomial.
la ..... Angle prefactor lambda.
zeta ... Angle term exponent zeta.
rc ..... Cutoff radius / right cutoff radius for polynomial.
a ...... Free parameter alpha (e.g. cutoff alpha).
ln ..... Line number in settings file.
mi ..... Member index.
sfi .... Symmetry function index.
e ...... Recalculate exponential term.
Short range atomic symmetry function groups element H :
----------------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln mi sfi e
----------------------------------------------------------------------------------------------------------
1 H 2 ct2 H * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 51 1 1
- - - - - 1.000E-02 0.000E+00 - - 52 2 3
- - - - - 3.000E-02 0.000E+00 - - 53 3 5
- - - - - 6.000E-02 0.000E+00 - - 54 4 7
- - - - - 1.500E-01 1.900E+00 - - 55 5 10
- - - - - 3.000E-01 1.900E+00 - - 56 6 12
- - - - - 6.000E-01 1.900E+00 - - 57 7 14
- - - - - 1.500E+00 1.900E+00 - - 58 8 16
2 H 2 ct2 O * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 61 1 2
- - - - - 1.000E-02 0.000E+00 - - 62 2 4
- - - - - 3.000E-02 0.000E+00 - - 63 3 6
- - - - - 6.000E-02 0.000E+00 - - 64 4 8
- - - - - 1.500E-01 9.000E-01 - - 65 5 9
- - - - - 3.000E-01 9.000E-01 - - 66 6 11
- - - - - 6.000E-01 9.000E-01 - - 67 7 13
- - - - - 1.500E+00 9.000E-01 - - 68 8 15
3 H 3 ct2 H O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-02 0.000E+00 - -1 4.0 - 105 1 19 1
- - - - - - 1.000E-02 0.000E+00 - 1 4.0 - 103 2 20 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 100 3 21 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 98 4 23 0
- - - - - - 7.000E-02 0.000E+00 - -1 1.0 - 95 5 25 1
- - - - - - 7.000E-02 0.000E+00 - 1 1.0 - 93 6 26 0
- - - - - - 2.000E-01 0.000E+00 - 1 1.0 - 90 7 27 1
4 H 3 ct2 O O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 115 1 17 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 114 2 18 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 113 3 22 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 112 4 24 0
----------------------------------------------------------------------------------------------------------
Short range atomic symmetry function groups element O :
----------------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln mi sfi e
----------------------------------------------------------------------------------------------------------
1 O 2 ct2 H * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 70 1 1
- - - - - 1.000E-02 0.000E+00 - - 71 2 3
- - - - - 3.000E-02 0.000E+00 - - 72 3 5
- - - - - 6.000E-02 0.000E+00 - - 73 4 7
- - - - - 1.500E-01 9.000E-01 - - 74 5 9
- - - - - 3.000E-01 9.000E-01 - - 75 6 11
- - - - - 6.000E-01 9.000E-01 - - 76 7 13
- - - - - 1.500E+00 9.000E-01 - - 77 8 15
2 O 2 ct2 O * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 80 1 2
- - - - - 1.000E-02 0.000E+00 - - 81 2 4
- - - - - 3.000E-02 0.000E+00 - - 82 3 6
- - - - - 6.000E-02 0.000E+00 - - 83 4 8
- - - - - 1.500E-01 4.000E+00 - - 84 5 10
- - - - - 3.000E-01 4.000E+00 - - 85 6 12
- - - - - 6.000E-01 4.000E+00 - - 86 7 14
- - - - - 1.500E+00 4.000E+00 - - 87 8 16
3 O 3 ct2 H H * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-02 0.000E+00 - -1 4.0 - 104 1 21 1
- - - - - - 1.000E-02 0.000E+00 - 1 4.0 - 102 2 22 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 99 3 23 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 97 4 26 0
- - - - - - 7.000E-02 0.000E+00 - -1 1.0 - 94 5 29 1
- - - - - - 7.000E-02 0.000E+00 - 1 1.0 - 92 6 30 0
4 O 3 ct2 H O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 110 1 17 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 109 2 19 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 108 3 24 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 107 4 27 0
5 O 3 ct2 O O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 120 1 18 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 119 2 20 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 118 3 25 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 117 4 28 0
----------------------------------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: NEURAL NETWORKS ****************************************************
Normalize neurons (all elements): 0
-------------------------------------------------------------------------------
Atomic short range NN for element H :
Number of weights : 1325
Number of biases : 51
Number of connections: 1376
Architecture 27 25 25 1
-------------------------------------------------------------------------------
1 G t t l
2 G t t
3 G t t
4 G t t
5 G t t
6 G t t
7 G t t
8 G t t
9 G t t
10 G t t
11 G t t
12 G t t
13 G t t
14 G t t
15 G t t
16 G t t
17 G t t
18 G t t
19 G t t
20 G t t
21 G t t
22 G t t
23 G t t
24 G t t
25 G t t
26 G
27 G
-------------------------------------------------------------------------------
Atomic short range NN for element O :
Number of weights : 1400
Number of biases : 51
Number of connections: 1451
Architecture 30 25 25 1
-------------------------------------------------------------------------------
1 G t t l
2 G t t
3 G t t
4 G t t
5 G t t
6 G t t
7 G t t
8 G t t
9 G t t
10 G t t
11 G t t
12 G t t
13 G t t
14 G t t
15 G t t
16 G t t
17 G t t
18 G t t
19 G t t
20 G t t
21 G t t
22 G t t
23 G t t
24 G t t
25 G t t
26 G
27 G
28 G
29 G
30 G
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION SCALING ******************************************
Equal scaling type for all symmetry functions:
Scaling type::ST_SCALECENTER (3)
Gs = Smin + (Smax - Smin) * (G - Gmean) / (Gmax - Gmin)
Smin = 0.000000
Smax = 1.000000
Symmetry function scaling statistics from file: hdnnp-data/scaling.data
-------------------------------------------------------------------------------
Abbreviations:
--------------
ind ..... Symmetry function index.
min ..... Minimum symmetry function value.
max ..... Maximum symmetry function value.
mean .... Mean symmetry function value.
sigma ... Standard deviation of symmetry function values.
sf ...... Scaling factor for derivatives.
Smin .... Desired minimum scaled symmetry function value.
Smax .... Desired maximum scaled symmetry function value.
t ....... Scaling type.
Scaling data for symmetry functions element H :
-------------------------------------------------------------------------------
ind min max mean sigma sf Smin Smax t
-------------------------------------------------------------------------------
1 1.09E+00 9.62E+00 2.27E+00 6.79E-01 1.17E-01 0.00 1.00 3
2 7.33E-01 5.00E+00 1.33E+00 3.39E-01 2.34E-01 0.00 1.00 3
3 7.60E-01 7.14E+00 1.65E+00 5.08E-01 1.57E-01 0.00 1.00 3
4 5.48E-01 3.77E+00 1.02E+00 2.54E-01 3.11E-01 0.00 1.00 3
5 4.01E-01 4.15E+00 9.09E-01 2.98E-01 2.67E-01 0.00 1.00 3
6 3.62E-01 2.27E+00 6.49E-01 1.48E-01 5.25E-01 0.00 1.00 3
7 1.89E-01 2.23E+00 4.57E-01 1.60E-01 4.90E-01 0.00 1.00 3
8 2.67E-01 1.32E+00 4.24E-01 8.05E-02 9.49E-01 0.00 1.00 3
9 2.45E-01 9.48E-01 3.62E-01 5.30E-02 1.42E+00 0.00 1.00 3
10 2.22E-01 2.76E+00 5.39E-01 2.01E-01 3.94E-01 0.00 1.00 3
11 1.47E-01 5.56E-01 2.68E-01 2.62E-02 2.45E+00 0.00 1.00 3
12 9.91E-02 1.73E+00 2.96E-01 1.16E-01 6.14E-01 0.00 1.00 3
13 6.51E-02 3.45E-01 1.85E-01 1.97E-02 3.57E+00 0.00 1.00 3
14 3.17E-02 9.13E-01 1.50E-01 5.35E-02 1.13E+00 0.00 1.00 3
15 2.92E-03 2.65E-01 7.65E-02 1.88E-02 3.82E+00 0.00 1.00 3
16 3.21E-04 2.87E-01 4.58E-02 2.33E-02 3.49E+00 0.00 1.00 3
17 2.47E-04 1.38E-01 1.77E-02 9.75E-03 7.23E+00 0.00 1.00 3
18 5.10E-03 5.83E-01 2.39E-02 3.78E-02 1.73E+00 0.00 1.00 3
19 3.23E-04 2.16E-01 1.71E-02 1.40E-02 4.63E+00 0.00 1.00 3
20 4.96E-02 1.69E+00 1.45E-01 1.10E-01 6.11E-01 0.00 1.00 3
21 3.41E-03 3.16E-01 1.84E-02 2.01E-02 3.20E+00 0.00 1.00 3
22 1.31E-04 1.03E-01 6.37E-03 6.61E-03 9.76E+00 0.00 1.00 3
23 3.38E-02 9.16E-01 8.13E-02 5.79E-02 1.13E+00 0.00 1.00 3
24 4.17E-04 1.58E-01 4.66E-03 9.86E-03 6.35E+00 0.00 1.00 3
25 7.35E-04 5.92E-02 3.70E-03 3.31E-03 1.71E+01 0.00 1.00 3
26 8.98E-03 1.94E-01 2.41E-02 1.10E-02 5.40E+00 0.00 1.00 3
27 2.12E-04 8.78E-03 2.06E-03 5.88E-04 1.17E+02 0.00 1.00 3
-------------------------------------------------------------------------------
Scaling data for symmetry functions element O :
-------------------------------------------------------------------------------
ind min max mean sigma sf Smin Smax t
-------------------------------------------------------------------------------
1 1.51E+00 1.00E+01 2.65E+00 6.78E-01 1.18E-01 0.00 1.00 3
2 4.44E-01 4.62E+00 9.66E-01 3.37E-01 2.39E-01 0.00 1.00 3
3 1.19E+00 7.53E+00 2.03E+00 5.06E-01 1.58E-01 0.00 1.00 3
4 2.76E-01 3.39E+00 6.59E-01 2.50E-01 3.21E-01 0.00 1.00 3
5 8.06E-01 4.54E+00 1.30E+00 2.94E-01 2.68E-01 0.00 1.00 3
6 1.05E-01 1.89E+00 3.07E-01 1.42E-01 5.60E-01 0.00 1.00 3
7 5.69E-01 2.62E+00 8.48E-01 1.57E-01 4.89E-01 0.00 1.00 3
8 2.33E-02 9.36E-01 1.11E-01 6.98E-02 1.10E+00 0.00 1.00 3
9 5.14E-01 1.85E+00 7.25E-01 9.80E-02 7.46E-01 0.00 1.00 3
10 1.11E-01 2.91E+00 4.75E-01 2.34E-01 3.57E-01 0.00 1.00 3
11 3.53E-01 1.07E+00 5.35E-01 4.52E-02 1.39E+00 0.00 1.00 3
12 3.04E-02 2.53E+00 3.17E-01 2.10E-01 4.00E-01 0.00 1.00 3
13 1.60E-01 6.63E-01 3.70E-01 3.08E-02 1.99E+00 0.00 1.00 3
14 2.78E-03 2.30E+00 1.77E-01 1.86E-01 4.35E-01 0.00 1.00 3
15 9.56E-03 3.91E-01 1.53E-01 2.79E-02 2.62E+00 0.00 1.00 3
16 3.75E-06 2.04E+00 5.41E-02 1.43E-01 4.91E-01 0.00 1.00 3
17 2.47E-03 3.43E-01 1.67E-02 2.19E-02 2.93E+00 0.00 1.00 3
18 1.74E-05 5.63E-02 9.55E-04 3.36E-03 1.78E+01 0.00 1.00 3
19 5.48E-02 3.02E+00 2.04E-01 2.01E-01 3.37E-01 0.00 1.00 3
20 1.38E-03 4.99E-01 1.28E-02 3.18E-02 2.01E+00 0.00 1.00 3
21 6.69E-03 2.67E-01 3.09E-02 1.71E-02 3.84E+00 0.00 1.00 3
22 1.70E-02 1.42E+00 7.63E-02 9.29E-02 7.14E-01 0.00 1.00 3
23 1.98E-02 4.08E-01 4.88E-02 2.55E-02 2.58E+00 0.00 1.00 3
24 5.28E-04 2.33E-01 7.21E-03 1.45E-02 4.30E+00 0.00 1.00 3
25 1.11E-05 3.53E-02 4.25E-04 2.05E-03 2.83E+01 0.00 1.00 3
26 1.60E-02 8.22E-01 5.08E-02 5.28E-02 1.24E+00 0.00 1.00 3
27 3.99E-03 7.86E-01 3.69E-02 5.05E-02 1.28E+00 0.00 1.00 3
28 4.05E-05 9.84E-02 1.21E-03 5.79E-03 1.02E+01 0.00 1.00 3
29 6.04E-03 9.93E-02 1.62E-02 5.52E-03 1.07E+01 0.00 1.00 3
30 2.96E-03 1.55E-01 1.16E-02 8.94E-03 6.59E+00 0.00 1.00 3
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION STATISTICS ***************************************
Equal symmetry function statistics for all elements.
Collect min/max/mean/sigma : 0
Collect extrapolation warnings : 1
Write extrapolation warnings immediately to stderr: 0
Halt on any extrapolation warning : 0
*******************************************************************************
*** SETUP: NEURAL NETWORK WEIGHTS *********************************************
Short NN weight file name format: hdnnp-data/weights.%03d.data
Setting short NN weights for element H from file: hdnnp-data/weights.001.data
Setting short NN weights for element O from file: hdnnp-data/weights.008.data
*******************************************************************************
*** SETUP: LAMMPS INTERFACE ***************************************************
Individual extrapolation warnings will not be shown.
Extrapolation warning summary will be shown every 5 timesteps.
The simulation will be stopped when 100 extrapolation warnings are exceeded.
Extrapolation warnings are accumulated over all time steps.
-------------------------------------------------------------------------------
CAUTION: If the LAMMPS unit system differs from the one used
during NN training, appropriate conversion factors
must be provided (see keywords cflength and cfenergy).
Length unit conversion factor: 1.8897261327999999E+00
Energy unit conversion factor: 3.6749325399999998E-02
Checking consistency of cutoff radii (in LAMMPS units):
LAMMPS Cutoff (via pair_coeff) : 6.360E+00
Maximum symmetry function cutoff: 6.350E+00
Cutoff radii are consistent.
-------------------------------------------------------------------------------
Element mapping string from LAMMPS to n2p2: "2:H,3:O"
CAUTION: Please ensure that this mapping between LAMMPS
atom types and NNP elements is consistent:
---------------------------
LAMMPS type | NNP element
---------------------------
1 <-> --
2 <-> H ( 1)
3 <-> O ( 8)
---------------------------
NNP setup for LAMMPS completed.
*******************************************************************************
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.36
ghost atom cutoff = 8.36
binsize = 4.18, bins = 6 6 6
4 neighbor lists, perpetual/occasional/extra = 4 0 0
(1) pair lj/cut, perpetual, skip from (3)
attributes: half, newton on, cut 8
pair build: skip
stencil: none
bin: none
(2) pair hdnnp, perpetual, skip from (4)
attributes: full, newton on
pair build: skip
stencil: none
bin: none
(3) neighbor class addition, perpetual, half/full trim from (4)
attributes: half, newton on, cut 8
pair build: halffull/newton/trim
stencil: none
bin: none
(4) neighbor class addition, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
### NNP EW SUMMARY ### TS: 0 EW 0 EWPERSTEP 0.000e+00
Per MPI rank memory allocation (min/avg/max) = 7.06 | 7.06 | 7.06 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -750069.48 0 -750069.48 -5297.5537
1 8.5815594 -750070.71 0 -750069.51 -5249.2914
2 30.988787 -750073.91 0 -750069.59 -5023.6945
3 58.859866 -750077.88 0 -750069.67 -4427.8346
4 82.576399 -750081.26 0 -750069.74 -3275.4378
### NNP EW SUMMARY ### TS: 5 EW 0 EWPERSTEP 0.000e+00
5 94.968097 -750083.01 0 -750069.76 -1511.6733
6 93.724286 -750082.8 0 -750069.73 709.20465
7 82.243957 -750081.13 0 -750069.66 3020.5084
8 68.611429 -750079.14 0 -750069.57 4922.5176
9 62.314385 -750078.21 0 -750069.51 5933.1543
### NNP EW SUMMARY ### TS: 10 EW 0 EWPERSTEP 0.000e+00
10 69.501045 -750079.21 0 -750069.52 5761.8646
Loop time of 3.32416 on 1 procs for 10 steps with 1080 atoms
Performance: 0.130 ns/day, 184.675 hours/ns, 3.008 timesteps/s, 3.249 katom-step/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.3234 | 3.3234 | 3.3234 | 0.0 | 99.98
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.00032742 | 0.00032742 | 0.00032742 | 0.0 | 0.01
Output | 0.00019506 | 0.00019506 | 0.00019506 | 0.0 | 0.01
Modify | 0.00011454 | 0.00011454 | 0.00011454 | 0.0 | 0.00
Other | | 0.0001165 | | | 0.00
Nlocal: 1080 ave 1080 max 1080 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 4536 ave 4536 max 4536 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:03

View File

@ -0,0 +1,689 @@
LAMMPS (2 Aug 2023 - Development - patch_2Aug2023-264-g174825fe8c)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
###############################################################################
# MD simulation for HDNNP water
###############################################################################
###############################################################################
# VARIABLES
###############################################################################
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# Configuration files
variable cfgFile string "data.H2O-360mol"
# Timesteps
variable numSteps equal 10
variable dt equal 0.0005
# HDNNP
variable hdnnpCutoff equal 6.36
variable hdnnpDir string "hdnnp-data"
###############################################################################
# GENERAL SETUP
###############################################################################
units metal
boundary p p p
atom_style atomic
region box block 0.0 2.2695686722465727E+01 0.0 2.3586033624598713E+01 0.0 2.2237130028217017E+01
create_box 3 box
Created orthogonal box = (0 0 0) to (22.695687 23.586034 22.23713)
2 by 2 by 1 MPI processor grid
mass 1 1.0
read_data ${cfgFile} add append offset 1 0 0 0 0
read_data data.H2O-360mol add append offset 1 0 0 0 0
Reading data file ...
orthogonal box = (0 0 0) to (22.695687 23.586034 22.23713)
2 by 2 by 1 MPI processor grid
reading atoms ...
1080 atoms
read_data CPU = 0.007 seconds
timestep ${dt}
timestep 0.0005
thermo 1
###############################################################################
# HDNNP
###############################################################################
pair_style hybrid lj/cut 6.0 hdnnp ${hdnnpCutoff} dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_style hybrid lj/cut 6.0 hdnnp 6.36 dir ${hdnnpDir} showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_style hybrid lj/cut 6.0 hdnnp 6.36 dir hdnnp-data showew no showewsum 5 resetew no maxew 100 cflength 1.8897261328 cfenergy 0.0367493254
pair_coeff * * hdnnp NULL H O
pair_coeff 1 * lj/cut 0.1 3.0
###############################################################################
# INTEGRATOR
###############################################################################
fix INT all nve
###############################################################################
# OUTPUT
###############################################################################
#dump 1 all atom 1 dump.hdnnp
###############################################################################
# SIMULATION
###############################################################################
run ${numSteps}
run 10
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
Your simulation uses code contributions which should be cited:
- ML-HDNNP package: doi:10.1021/acs.jctc.8b00770
@Article{Singraber19,
author = {Singraber, Andreas and Behler, J{"o}rg and Dellago, Christoph},
title = {Library-Based {LAMMPS} Implementation of High-Dimensional
Neural Network Potentials},
year = {2019},
month = mar,
volume = {15},
pages = {1827--1840},
doi = {10.1021/acs.jctc.8b00770},
journal = {J.~Chem.\ Theory Comput.},
number = {3}
}
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
*******************************************************************************
WELCOME TO n²p², A SOFTWARE PACKAGE FOR NEURAL NETWORK POTENTIALS!
------------------------------------------------------------------
n²p² version (from git): patch_2Aug2023-264-g174825fe8c
(version.h): v2.2.0
------------------------------------------------------------
Git branch : collected-small-changes
Git revision : 174825fe8c9493cb252d7b9e8dafdcc5d74be96d
Compile date/time : Aug 23 2023 08:43:11
------------------------------------------------------------
Features/Flags:
------------------------------------------------------------
Symmetry function groups : enabled
Symmetry function cache : enabled
Timing function available : available
Asymmetric polynomial SFs : available
SF low neighbor number check : enabled
SF derivative memory layout : reduced
MPI explicitly disabled : no
------------------------------------------------------------
Please cite the following papers when publishing results obtained with n²p²:
-------------------------------------------------------------------------------
* General citation for n²p² and the LAMMPS interface:
Singraber, A.; Behler, J.; Dellago, C.
Library-Based LAMMPS Implementation of High-Dimensional
Neural Network Potentials.
J. Chem. Theory Comput. 2019 15 (3), 18271840.
https://doi.org/10.1021/acs.jctc.8b00770
-------------------------------------------------------------------------------
* Additionally, if you use the NNP training features of n²p²:
Singraber, A.; Morawietz, T.; Behler, J.; Dellago, C.
Parallel Multistream Training of High-Dimensional Neural
Network Potentials.
J. Chem. Theory Comput. 2019, 15 (5), 30753092.
https://doi.org/10.1021/acs.jctc.8b01092
-------------------------------------------------------------------------------
* Additionally, if polynomial symmetry functions are used:
Bircher, M. P.; Singraber, A.; Dellago, C.
Improved Description of Atomic Environments Using Low-Cost
Polynomial Functions with Compact Support.
arXiv:2010.14414 [cond-mat, physics:physics] 2020.
https://arxiv.org/abs/2010.14414
*******************************************************************************
*** SETUP: SETTINGS FILE ******************************************************
Settings file name: hdnnp-data/input.nn
Read 120 lines.
Found 70 lines with keywords.
This settings file defines a short-range only NNP.
*******************************************************************************
*** SETUP: NORMALIZATION ******************************************************
Data set normalization is used.
Mean energy per atom : -2.5521343547039809E+01
Conversion factor energy : 2.4265748255366972E+02
Conversion factor length : 5.8038448995319847E+00
*******************************************************************************
*** SETUP: ELEMENT MAP ********************************************************
Number of element strings found: 2
Element 0: H ( 1)
Element 1: O ( 8)
*******************************************************************************
*** SETUP: ELEMENTS ***********************************************************
Number of elements is consistent: 2
Atomic energy offsets per element:
Element 0: 0.00000000E+00
Element 1: 0.00000000E+00
Energy offsets are automatically subtracted from reference energies.
*******************************************************************************
*** SETUP: CUTOFF FUNCTIONS ***************************************************
Parameter alpha for inner cutoff: 0.000000
Inner cutoff = Symmetry function cutoff * alpha
Equal cutoff function type for all symmetry functions:
CutoffFunction::CT_TANHU (2)
f(r) = tanh^3(1 - r/rc)
*******************************************************************************
*** SETUP: SYMMETRY FUNCTIONS *************************************************
Abbreviations:
--------------
ind .... Symmetry function index.
ec ..... Central atom element.
tp ..... Symmetry function type.
sbtp ... Symmetry function subtype (e.g. cutoff type).
e1 ..... Neighbor 1 element.
e2 ..... Neighbor 2 element.
eta .... Gaussian width eta.
rs/rl... Shift distance of Gaussian or left cutoff radius for polynomial.
angl.... Left cutoff angle for polynomial.
angr.... Right cutoff angle for polynomial.
la ..... Angle prefactor lambda.
zeta ... Angle term exponent zeta.
rc ..... Cutoff radius / right cutoff radius for polynomial.
a ...... Free parameter alpha (e.g. cutoff alpha).
ln ..... Line number in settings file.
Short range atomic symmetry functions element H :
-------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln
-------------------------------------------------------------------------------------------------
1 H 2 ct2 H 1.000E-03 0.000E+00 1.200E+01 0.00 51
2 H 2 ct2 O 1.000E-03 0.000E+00 1.200E+01 0.00 61
3 H 2 ct2 H 1.000E-02 0.000E+00 1.200E+01 0.00 52
4 H 2 ct2 O 1.000E-02 0.000E+00 1.200E+01 0.00 62
5 H 2 ct2 H 3.000E-02 0.000E+00 1.200E+01 0.00 53
6 H 2 ct2 O 3.000E-02 0.000E+00 1.200E+01 0.00 63
7 H 2 ct2 H 6.000E-02 0.000E+00 1.200E+01 0.00 54
8 H 2 ct2 O 6.000E-02 0.000E+00 1.200E+01 0.00 64
9 H 2 ct2 O 1.500E-01 9.000E-01 1.200E+01 0.00 65
10 H 2 ct2 H 1.500E-01 1.900E+00 1.200E+01 0.00 55
11 H 2 ct2 O 3.000E-01 9.000E-01 1.200E+01 0.00 66
12 H 2 ct2 H 3.000E-01 1.900E+00 1.200E+01 0.00 56
13 H 2 ct2 O 6.000E-01 9.000E-01 1.200E+01 0.00 67
14 H 2 ct2 H 6.000E-01 1.900E+00 1.200E+01 0.00 57
15 H 2 ct2 O 1.500E+00 9.000E-01 1.200E+01 0.00 68
16 H 2 ct2 H 1.500E+00 1.900E+00 1.200E+01 0.00 58
17 H 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 115
18 H 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 114
19 H 3 ct2 H O 1.000E-02 0.000E+00 1.200E+01 -1 4.0 0.00 105
20 H 3 ct2 H O 1.000E-02 0.000E+00 1.200E+01 1 4.0 0.00 103
21 H 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 100
22 H 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 113
23 H 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 98
24 H 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 112
25 H 3 ct2 H O 7.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 95
26 H 3 ct2 H O 7.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 93
27 H 3 ct2 H O 2.000E-01 0.000E+00 1.200E+01 1 1.0 0.00 90
-------------------------------------------------------------------------------------------------
Short range atomic symmetry functions element O :
-------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln
-------------------------------------------------------------------------------------------------
1 O 2 ct2 H 1.000E-03 0.000E+00 1.200E+01 0.00 70
2 O 2 ct2 O 1.000E-03 0.000E+00 1.200E+01 0.00 80
3 O 2 ct2 H 1.000E-02 0.000E+00 1.200E+01 0.00 71
4 O 2 ct2 O 1.000E-02 0.000E+00 1.200E+01 0.00 81
5 O 2 ct2 H 3.000E-02 0.000E+00 1.200E+01 0.00 72
6 O 2 ct2 O 3.000E-02 0.000E+00 1.200E+01 0.00 82
7 O 2 ct2 H 6.000E-02 0.000E+00 1.200E+01 0.00 73
8 O 2 ct2 O 6.000E-02 0.000E+00 1.200E+01 0.00 83
9 O 2 ct2 H 1.500E-01 9.000E-01 1.200E+01 0.00 74
10 O 2 ct2 O 1.500E-01 4.000E+00 1.200E+01 0.00 84
11 O 2 ct2 H 3.000E-01 9.000E-01 1.200E+01 0.00 75
12 O 2 ct2 O 3.000E-01 4.000E+00 1.200E+01 0.00 85
13 O 2 ct2 H 6.000E-01 9.000E-01 1.200E+01 0.00 76
14 O 2 ct2 O 6.000E-01 4.000E+00 1.200E+01 0.00 86
15 O 2 ct2 H 1.500E+00 9.000E-01 1.200E+01 0.00 77
16 O 2 ct2 O 1.500E+00 4.000E+00 1.200E+01 0.00 87
17 O 3 ct2 H O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 110
18 O 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 -1 4.0 0.00 120
19 O 3 ct2 H O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 109
20 O 3 ct2 O O 1.000E-03 0.000E+00 1.200E+01 1 4.0 0.00 119
21 O 3 ct2 H H 1.000E-02 0.000E+00 1.200E+01 -1 4.0 0.00 104
22 O 3 ct2 H H 1.000E-02 0.000E+00 1.200E+01 1 4.0 0.00 102
23 O 3 ct2 H H 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 99
24 O 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 108
25 O 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 118
26 O 3 ct2 H H 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 97
27 O 3 ct2 H O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 107
28 O 3 ct2 O O 3.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 117
29 O 3 ct2 H H 7.000E-02 0.000E+00 1.200E+01 -1 1.0 0.00 94
30 O 3 ct2 H H 7.000E-02 0.000E+00 1.200E+01 1 1.0 0.00 92
-------------------------------------------------------------------------------------------------
Minimum cutoff radius for element H: 12.000000
Minimum cutoff radius for element O: 12.000000
Maximum cutoff radius (global) : 12.000000
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION MEMORY *******************************************
Symmetry function derivatives memory table for element H :
-------------------------------------------------------------------------------
Relevant symmetry functions for neighbors with element:
- H: 15 of 27 ( 55.6 %)
- O: 19 of 27 ( 70.4 %)
-------------------------------------------------------------------------------
Symmetry function derivatives memory table for element O :
-------------------------------------------------------------------------------
Relevant symmetry functions for neighbors with element:
- H: 18 of 30 ( 60.0 %)
- O: 16 of 30 ( 53.3 %)
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION CACHE ********************************************
Element H: in total 4 caches, used 17.00 times on average.
Element O: in total 4 caches, used 17.00 times on average.
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION GROUPS *******************************************
Abbreviations:
--------------
ind .... Symmetry function index.
ec ..... Central atom element.
tp ..... Symmetry function type.
sbtp ... Symmetry function subtype (e.g. cutoff type).
e1 ..... Neighbor 1 element.
e2 ..... Neighbor 2 element.
eta .... Gaussian width eta.
rs/rl... Shift distance of Gaussian or left cutoff radius for polynomial.
angl.... Left cutoff angle for polynomial.
angr.... Right cutoff angle for polynomial.
la ..... Angle prefactor lambda.
zeta ... Angle term exponent zeta.
rc ..... Cutoff radius / right cutoff radius for polynomial.
a ...... Free parameter alpha (e.g. cutoff alpha).
ln ..... Line number in settings file.
mi ..... Member index.
sfi .... Symmetry function index.
e ...... Recalculate exponential term.
Short range atomic symmetry function groups element H :
----------------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln mi sfi e
----------------------------------------------------------------------------------------------------------
1 H 2 ct2 H * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 51 1 1
- - - - - 1.000E-02 0.000E+00 - - 52 2 3
- - - - - 3.000E-02 0.000E+00 - - 53 3 5
- - - - - 6.000E-02 0.000E+00 - - 54 4 7
- - - - - 1.500E-01 1.900E+00 - - 55 5 10
- - - - - 3.000E-01 1.900E+00 - - 56 6 12
- - - - - 6.000E-01 1.900E+00 - - 57 7 14
- - - - - 1.500E+00 1.900E+00 - - 58 8 16
2 H 2 ct2 O * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 61 1 2
- - - - - 1.000E-02 0.000E+00 - - 62 2 4
- - - - - 3.000E-02 0.000E+00 - - 63 3 6
- - - - - 6.000E-02 0.000E+00 - - 64 4 8
- - - - - 1.500E-01 9.000E-01 - - 65 5 9
- - - - - 3.000E-01 9.000E-01 - - 66 6 11
- - - - - 6.000E-01 9.000E-01 - - 67 7 13
- - - - - 1.500E+00 9.000E-01 - - 68 8 15
3 H 3 ct2 H O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-02 0.000E+00 - -1 4.0 - 105 1 19 1
- - - - - - 1.000E-02 0.000E+00 - 1 4.0 - 103 2 20 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 100 3 21 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 98 4 23 0
- - - - - - 7.000E-02 0.000E+00 - -1 1.0 - 95 5 25 1
- - - - - - 7.000E-02 0.000E+00 - 1 1.0 - 93 6 26 0
- - - - - - 2.000E-01 0.000E+00 - 1 1.0 - 90 7 27 1
4 H 3 ct2 O O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 115 1 17 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 114 2 18 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 113 3 22 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 112 4 24 0
----------------------------------------------------------------------------------------------------------
Short range atomic symmetry function groups element O :
----------------------------------------------------------------------------------------------------------
ind ec tp sbtp e1 e2 eta rs/rl rc angl angr la zeta a ln mi sfi e
----------------------------------------------------------------------------------------------------------
1 O 2 ct2 H * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 70 1 1
- - - - - 1.000E-02 0.000E+00 - - 71 2 3
- - - - - 3.000E-02 0.000E+00 - - 72 3 5
- - - - - 6.000E-02 0.000E+00 - - 73 4 7
- - - - - 1.500E-01 9.000E-01 - - 74 5 9
- - - - - 3.000E-01 9.000E-01 - - 75 6 11
- - - - - 6.000E-01 9.000E-01 - - 76 7 13
- - - - - 1.500E+00 9.000E-01 - - 77 8 15
2 O 2 ct2 O * * 1.200E+01 0.00 * * *
- - - - - 1.000E-03 0.000E+00 - - 80 1 2
- - - - - 1.000E-02 0.000E+00 - - 81 2 4
- - - - - 3.000E-02 0.000E+00 - - 82 3 6
- - - - - 6.000E-02 0.000E+00 - - 83 4 8
- - - - - 1.500E-01 4.000E+00 - - 84 5 10
- - - - - 3.000E-01 4.000E+00 - - 85 6 12
- - - - - 6.000E-01 4.000E+00 - - 86 7 14
- - - - - 1.500E+00 4.000E+00 - - 87 8 16
3 O 3 ct2 H H * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-02 0.000E+00 - -1 4.0 - 104 1 21 1
- - - - - - 1.000E-02 0.000E+00 - 1 4.0 - 102 2 22 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 99 3 23 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 97 4 26 0
- - - - - - 7.000E-02 0.000E+00 - -1 1.0 - 94 5 29 1
- - - - - - 7.000E-02 0.000E+00 - 1 1.0 - 92 6 30 0
4 O 3 ct2 H O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 110 1 17 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 109 2 19 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 108 3 24 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 107 4 27 0
5 O 3 ct2 O O * * 1.200E+01 * * 0.00 * * * *
- - - - - - 1.000E-03 0.000E+00 - -1 4.0 - 120 1 18 1
- - - - - - 1.000E-03 0.000E+00 - 1 4.0 - 119 2 20 0
- - - - - - 3.000E-02 0.000E+00 - -1 1.0 - 118 3 25 1
- - - - - - 3.000E-02 0.000E+00 - 1 1.0 - 117 4 28 0
----------------------------------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: NEURAL NETWORKS ****************************************************
Normalize neurons (all elements): 0
-------------------------------------------------------------------------------
Atomic short range NN for element H :
Number of weights : 1325
Number of biases : 51
Number of connections: 1376
Architecture 27 25 25 1
-------------------------------------------------------------------------------
1 G t t l
2 G t t
3 G t t
4 G t t
5 G t t
6 G t t
7 G t t
8 G t t
9 G t t
10 G t t
11 G t t
12 G t t
13 G t t
14 G t t
15 G t t
16 G t t
17 G t t
18 G t t
19 G t t
20 G t t
21 G t t
22 G t t
23 G t t
24 G t t
25 G t t
26 G
27 G
-------------------------------------------------------------------------------
Atomic short range NN for element O :
Number of weights : 1400
Number of biases : 51
Number of connections: 1451
Architecture 30 25 25 1
-------------------------------------------------------------------------------
1 G t t l
2 G t t
3 G t t
4 G t t
5 G t t
6 G t t
7 G t t
8 G t t
9 G t t
10 G t t
11 G t t
12 G t t
13 G t t
14 G t t
15 G t t
16 G t t
17 G t t
18 G t t
19 G t t
20 G t t
21 G t t
22 G t t
23 G t t
24 G t t
25 G t t
26 G
27 G
28 G
29 G
30 G
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION SCALING ******************************************
Equal scaling type for all symmetry functions:
Scaling type::ST_SCALECENTER (3)
Gs = Smin + (Smax - Smin) * (G - Gmean) / (Gmax - Gmin)
Smin = 0.000000
Smax = 1.000000
Symmetry function scaling statistics from file: hdnnp-data/scaling.data
-------------------------------------------------------------------------------
Abbreviations:
--------------
ind ..... Symmetry function index.
min ..... Minimum symmetry function value.
max ..... Maximum symmetry function value.
mean .... Mean symmetry function value.
sigma ... Standard deviation of symmetry function values.
sf ...... Scaling factor for derivatives.
Smin .... Desired minimum scaled symmetry function value.
Smax .... Desired maximum scaled symmetry function value.
t ....... Scaling type.
Scaling data for symmetry functions element H :
-------------------------------------------------------------------------------
ind min max mean sigma sf Smin Smax t
-------------------------------------------------------------------------------
1 1.09E+00 9.62E+00 2.27E+00 6.79E-01 1.17E-01 0.00 1.00 3
2 7.33E-01 5.00E+00 1.33E+00 3.39E-01 2.34E-01 0.00 1.00 3
3 7.60E-01 7.14E+00 1.65E+00 5.08E-01 1.57E-01 0.00 1.00 3
4 5.48E-01 3.77E+00 1.02E+00 2.54E-01 3.11E-01 0.00 1.00 3
5 4.01E-01 4.15E+00 9.09E-01 2.98E-01 2.67E-01 0.00 1.00 3
6 3.62E-01 2.27E+00 6.49E-01 1.48E-01 5.25E-01 0.00 1.00 3
7 1.89E-01 2.23E+00 4.57E-01 1.60E-01 4.90E-01 0.00 1.00 3
8 2.67E-01 1.32E+00 4.24E-01 8.05E-02 9.49E-01 0.00 1.00 3
9 2.45E-01 9.48E-01 3.62E-01 5.30E-02 1.42E+00 0.00 1.00 3
10 2.22E-01 2.76E+00 5.39E-01 2.01E-01 3.94E-01 0.00 1.00 3
11 1.47E-01 5.56E-01 2.68E-01 2.62E-02 2.45E+00 0.00 1.00 3
12 9.91E-02 1.73E+00 2.96E-01 1.16E-01 6.14E-01 0.00 1.00 3
13 6.51E-02 3.45E-01 1.85E-01 1.97E-02 3.57E+00 0.00 1.00 3
14 3.17E-02 9.13E-01 1.50E-01 5.35E-02 1.13E+00 0.00 1.00 3
15 2.92E-03 2.65E-01 7.65E-02 1.88E-02 3.82E+00 0.00 1.00 3
16 3.21E-04 2.87E-01 4.58E-02 2.33E-02 3.49E+00 0.00 1.00 3
17 2.47E-04 1.38E-01 1.77E-02 9.75E-03 7.23E+00 0.00 1.00 3
18 5.10E-03 5.83E-01 2.39E-02 3.78E-02 1.73E+00 0.00 1.00 3
19 3.23E-04 2.16E-01 1.71E-02 1.40E-02 4.63E+00 0.00 1.00 3
20 4.96E-02 1.69E+00 1.45E-01 1.10E-01 6.11E-01 0.00 1.00 3
21 3.41E-03 3.16E-01 1.84E-02 2.01E-02 3.20E+00 0.00 1.00 3
22 1.31E-04 1.03E-01 6.37E-03 6.61E-03 9.76E+00 0.00 1.00 3
23 3.38E-02 9.16E-01 8.13E-02 5.79E-02 1.13E+00 0.00 1.00 3
24 4.17E-04 1.58E-01 4.66E-03 9.86E-03 6.35E+00 0.00 1.00 3
25 7.35E-04 5.92E-02 3.70E-03 3.31E-03 1.71E+01 0.00 1.00 3
26 8.98E-03 1.94E-01 2.41E-02 1.10E-02 5.40E+00 0.00 1.00 3
27 2.12E-04 8.78E-03 2.06E-03 5.88E-04 1.17E+02 0.00 1.00 3
-------------------------------------------------------------------------------
Scaling data for symmetry functions element O :
-------------------------------------------------------------------------------
ind min max mean sigma sf Smin Smax t
-------------------------------------------------------------------------------
1 1.51E+00 1.00E+01 2.65E+00 6.78E-01 1.18E-01 0.00 1.00 3
2 4.44E-01 4.62E+00 9.66E-01 3.37E-01 2.39E-01 0.00 1.00 3
3 1.19E+00 7.53E+00 2.03E+00 5.06E-01 1.58E-01 0.00 1.00 3
4 2.76E-01 3.39E+00 6.59E-01 2.50E-01 3.21E-01 0.00 1.00 3
5 8.06E-01 4.54E+00 1.30E+00 2.94E-01 2.68E-01 0.00 1.00 3
6 1.05E-01 1.89E+00 3.07E-01 1.42E-01 5.60E-01 0.00 1.00 3
7 5.69E-01 2.62E+00 8.48E-01 1.57E-01 4.89E-01 0.00 1.00 3
8 2.33E-02 9.36E-01 1.11E-01 6.98E-02 1.10E+00 0.00 1.00 3
9 5.14E-01 1.85E+00 7.25E-01 9.80E-02 7.46E-01 0.00 1.00 3
10 1.11E-01 2.91E+00 4.75E-01 2.34E-01 3.57E-01 0.00 1.00 3
11 3.53E-01 1.07E+00 5.35E-01 4.52E-02 1.39E+00 0.00 1.00 3
12 3.04E-02 2.53E+00 3.17E-01 2.10E-01 4.00E-01 0.00 1.00 3
13 1.60E-01 6.63E-01 3.70E-01 3.08E-02 1.99E+00 0.00 1.00 3
14 2.78E-03 2.30E+00 1.77E-01 1.86E-01 4.35E-01 0.00 1.00 3
15 9.56E-03 3.91E-01 1.53E-01 2.79E-02 2.62E+00 0.00 1.00 3
16 3.75E-06 2.04E+00 5.41E-02 1.43E-01 4.91E-01 0.00 1.00 3
17 2.47E-03 3.43E-01 1.67E-02 2.19E-02 2.93E+00 0.00 1.00 3
18 1.74E-05 5.63E-02 9.55E-04 3.36E-03 1.78E+01 0.00 1.00 3
19 5.48E-02 3.02E+00 2.04E-01 2.01E-01 3.37E-01 0.00 1.00 3
20 1.38E-03 4.99E-01 1.28E-02 3.18E-02 2.01E+00 0.00 1.00 3
21 6.69E-03 2.67E-01 3.09E-02 1.71E-02 3.84E+00 0.00 1.00 3
22 1.70E-02 1.42E+00 7.63E-02 9.29E-02 7.14E-01 0.00 1.00 3
23 1.98E-02 4.08E-01 4.88E-02 2.55E-02 2.58E+00 0.00 1.00 3
24 5.28E-04 2.33E-01 7.21E-03 1.45E-02 4.30E+00 0.00 1.00 3
25 1.11E-05 3.53E-02 4.25E-04 2.05E-03 2.83E+01 0.00 1.00 3
26 1.60E-02 8.22E-01 5.08E-02 5.28E-02 1.24E+00 0.00 1.00 3
27 3.99E-03 7.86E-01 3.69E-02 5.05E-02 1.28E+00 0.00 1.00 3
28 4.05E-05 9.84E-02 1.21E-03 5.79E-03 1.02E+01 0.00 1.00 3
29 6.04E-03 9.93E-02 1.62E-02 5.52E-03 1.07E+01 0.00 1.00 3
30 2.96E-03 1.55E-01 1.16E-02 8.94E-03 6.59E+00 0.00 1.00 3
-------------------------------------------------------------------------------
*******************************************************************************
*** SETUP: SYMMETRY FUNCTION STATISTICS ***************************************
Equal symmetry function statistics for all elements.
Collect min/max/mean/sigma : 0
Collect extrapolation warnings : 1
Write extrapolation warnings immediately to stderr: 0
Halt on any extrapolation warning : 0
*******************************************************************************
*** SETUP: NEURAL NETWORK WEIGHTS *********************************************
Short NN weight file name format: hdnnp-data/weights.%03d.data
Setting short NN weights for element H from file: hdnnp-data/weights.001.data
Setting short NN weights for element O from file: hdnnp-data/weights.008.data
*******************************************************************************
*** SETUP: LAMMPS INTERFACE ***************************************************
Individual extrapolation warnings will not be shown.
Extrapolation warning summary will be shown every 5 timesteps.
The simulation will be stopped when 100 extrapolation warnings are exceeded.
Extrapolation warnings are accumulated over all time steps.
-------------------------------------------------------------------------------
CAUTION: If the LAMMPS unit system differs from the one used
during NN training, appropriate conversion factors
must be provided (see keywords cflength and cfenergy).
Length unit conversion factor: 1.8897261327999999E+00
Energy unit conversion factor: 3.6749325399999998E-02
Checking consistency of cutoff radii (in LAMMPS units):
LAMMPS Cutoff (via pair_coeff) : 6.360E+00
Maximum symmetry function cutoff: 6.350E+00
Cutoff radii are consistent.
-------------------------------------------------------------------------------
Element mapping string from LAMMPS to n2p2: "2:H,3:O"
CAUTION: Please ensure that this mapping between LAMMPS
atom types and NNP elements is consistent:
---------------------------
LAMMPS type | NNP element
---------------------------
1 <-> --
2 <-> H ( 1)
3 <-> O ( 8)
---------------------------
NNP setup for LAMMPS completed.
*******************************************************************************
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.36
ghost atom cutoff = 8.36
binsize = 4.18, bins = 6 6 6
4 neighbor lists, perpetual/occasional/extra = 4 0 0
(1) pair lj/cut, perpetual, skip from (3)
attributes: half, newton on, cut 8
pair build: skip
stencil: none
bin: none
(2) pair hdnnp, perpetual, skip from (4)
attributes: full, newton on
pair build: skip
stencil: none
bin: none
(3) neighbor class addition, perpetual, half/full trim from (4)
attributes: half, newton on, cut 8
pair build: halffull/newton/trim
stencil: none
bin: none
(4) neighbor class addition, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
### NNP EW SUMMARY ### TS: 0 EW 0 EWPERSTEP 0.000e+00
Per MPI rank memory allocation (min/avg/max) = 5.024 | 5.024 | 5.024 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -750069.48 0 -750069.48 -5297.5537
1 8.5815594 -750070.71 0 -750069.51 -5249.2914
2 30.988787 -750073.91 0 -750069.59 -5023.6945
3 58.859866 -750077.88 0 -750069.67 -4427.8346
4 82.576399 -750081.26 0 -750069.74 -3275.4378
### NNP EW SUMMARY ### TS: 5 EW 0 EWPERSTEP 0.000e+00
5 94.968097 -750083.01 0 -750069.76 -1511.6733
6 93.724286 -750082.8 0 -750069.73 709.20465
7 82.243957 -750081.13 0 -750069.66 3020.5084
8 68.611429 -750079.14 0 -750069.57 4922.5176
9 62.314385 -750078.21 0 -750069.51 5933.1543
### NNP EW SUMMARY ### TS: 10 EW 0 EWPERSTEP 0.000e+00
10 69.501045 -750079.21 0 -750069.52 5761.8646
Loop time of 0.936871 on 4 procs for 10 steps with 1080 atoms
Performance: 0.461 ns/day, 52.048 hours/ns, 10.674 timesteps/s, 11.528 katom-step/s
99.6% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.86567 | 0.89891 | 0.93611 | 3.1 | 95.95
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.00035446 | 0.037585 | 0.070835 | 15.3 | 4.01
Output | 0.00018528 | 0.00023648 | 0.00030553 | 0.0 | 0.03
Modify | 4.2224e-05 | 4.4078e-05 | 4.6256e-05 | 0.0 | 0.00
Other | | 9.307e-05 | | | 0.01
Nlocal: 270 ave 278 max 262 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 2552 ave 2564 max 2541 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 0
Ave neighs/atom = 0
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -35,6 +35,7 @@ else()
add_compile_options(/Zc:__cplusplus)
add_compile_options(/wd4244)
add_compile_options(/wd4267)
add_compile_options(/wd4250)
add_compile_options(/EHsc)
endif()
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)

View File

@ -132,22 +132,26 @@ class TorchWrapper(torch.nn.Module):
descriptors = torch.as_tensor(descriptors,dtype=self.dtype, device=self.device).requires_grad_(True)
elems = torch.as_tensor(elems,dtype=torch.int32, device=self.device)
elems=elems-1
device = self.device
if (use_gpu_data and (device is None) and (str(beta.device).find('CUDA') == 1)):
device = 'cuda' #Override device as it wasn't defined in the model
with torch.autograd.enable_grad():
if (use_gpu_data):
energy_nn = torch.as_tensor(energy,dtype=self.dtype, device=self.device)
energy_nn = torch.as_tensor(energy,dtype=self.dtype, device=device)
energy_nn[:] = self.model(descriptors, elems).flatten()
else:
energy_nn = self.model(descriptors, elems).flatten()
energy[:] = energy_nn.detach().cpu().numpy().astype(np.float64)
if (use_gpu_data):
beta_nn = torch.as_tensor(beta,dtype=self.dtype, device=self.device)
beta_nn = torch.as_tensor(beta,dtype=self.dtype, device=device)
beta_nn[:] = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
else:
beta_nn = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
beta[:] = beta_nn.detach().cpu().numpy().astype(np.float64)
class IgnoreElems(torch.nn.Module):
"""
A class to represent a NN model agnostic of element typing.

View File

@ -63,12 +63,16 @@ class numpy_wrapper:
.. note::
While the returned arrays of per-atom data are dimensioned
for the range [0:nmax] - as is the underlying storage -
the data is usually only valid for the range of [0:nlocal],
unless the property of interest is also updated for ghost
atoms. In some cases, this depends on a LAMMPS setting, see
for example :doc:`comm_modify vel yes <comm_modify>`.
The returned arrays of per-atom data are by default dimensioned
for the range [0:nlocal] since that data is *always* valid. The
underlying storage for the data, however, is typically allocated
for the range of [0:nmax]. Whether there is valid data in the range
[nlocal:nlocal+nghost] depends on whether the property of interest
is also updated for ghost atoms. This is not often the case. In
some cases, it depends on a LAMMPS setting, see for example
:doc:`comm_modify vel yes <comm_modify>`. By using the optional
*nelem* parameter the size of the returned NumPy can be overridden.
There is no check whether the number of elements chosen is valid.
:param name: name of the property
:type name: string

14
src/.gitignore vendored
View File

@ -580,8 +580,8 @@
/compute_ke_eff.h
/compute_ke_rigid.cpp
/compute_ke_rigid.h
/compute_local_comp_atom.cpp
/compute_local_comp_atom.h
/compute_composition_atom.cpp
/compute_composition_atom.h
/compute_meso_e_atom.cpp
/compute_meso_e_atom.h
/compute_meso_rho_atom.cpp
@ -673,20 +673,14 @@
/dump_atom_gz.h
/dump_atom_zstd.cpp
/dump_atom_zstd.h
/dump_atom_mpiio.cpp
/dump_atom_mpiio.h
/dump_cfg_gz.cpp
/dump_cfg_gz.h
/dump_cfg_mpiio.cpp
/dump_cfg_mpiio.h
/dump_cfg_zstd.cpp
/dump_cfg_zstd.h
/dump_custom_adios.cpp
/dump_custom_adios.h
/dump_custom_gz.cpp
/dump_custom_gz.h
/dump_custom_mpiio.cpp
/dump_custom_mpiio.h
/dump_custom_zstd.cpp
/dump_custom_zstd.h
/dump_h5md.cpp
@ -705,8 +699,6 @@
/dump_xtc.h
/dump_xyz_gz.cpp
/dump_xyz_gz.h
/dump_xyz_mpiio.cpp
/dump_xyz_mpiio.h
/dump_xyz_zstd.cpp
/dump_xyz_zstd.h
/dump_yaml.cpp
@ -1459,8 +1451,6 @@
/remap.h
/remap_wrap.cpp
/remap_wrap.h
/restart_mpiio.cpp
/restart_mpiio.h
/rigid_const.h
/scafacos.cpp
/scafacos.h

View File

@ -150,6 +150,7 @@ if (test $1 = "MOLECULE") then
depend GPU
depend KOKKOS
depend FEP
depend MESONT
depend OPENMP
depend INTEL
fi

View File

@ -27,7 +27,7 @@ namespace LAMMPS_NS {
// error arrays
double E1[] = {1.483110564084803581889448079057,
static constexpr double E1[] = {1.483110564084803581889448079057,
-3.01071073386594942470731046311E-1,
6.8994830689831566246603180718E-2,
-1.3916271264722187682546525687E-2,
@ -56,7 +56,7 @@ double E1[] = {1.483110564084803581889448079057,
1.6E-29,
-1.0E-30};
double E2[] = {1.077977852072383151168335910348,
static constexpr double E2[] = {1.077977852072383151168335910348,
-2.6559890409148673372146500904E-2,
-1.487073146698099509605046333E-3,
-1.38040145414143859607708920E-4,
@ -101,7 +101,7 @@ double E2[] = {1.077977852072383151168335910348,
2.0E-30,
-1.0E-30};
double DE1[] = {-0.689379974848418501361491576718,
static constexpr double DE1[] = {-0.689379974848418501361491576718,
0.295939056851161774752959335568,
-0.087237828075228616420029484096,
0.019959734091835509766546612696,
@ -129,7 +129,7 @@ double DE1[] = {-0.689379974848418501361491576718,
8.32E-28,
-5.4E-29};
double DE2[] = {0.717710208167480928473053690384,
static constexpr double DE2[] = {0.717710208167480928473053690384,
-0.379868973985143305103199928808,
0.125832094465157378967135019248,
-0.030917661684228839423081992424,
@ -160,7 +160,7 @@ double DE2[] = {0.717710208167480928473053690384,
/* ---------------------------------------------------------------------- */
inline double ipoly02(double x)
static inline double ipoly02(double x)
{
/* P(x) in the range x > 2 */
int i;
@ -178,7 +178,7 @@ inline double ipoly02(double x)
/* ---------------------------------------------------------------------- */
inline double ipoly1(double x)
static inline double ipoly1(double x)
{
/* First derivative P'(x) in the range x < 2 */
int i;
@ -197,7 +197,7 @@ inline double ipoly1(double x)
/* ---------------------------------------------------------------------- */
inline double ipoly01(double x)
static inline double ipoly01(double x)
{
// P(x) in the range x < 2
@ -216,7 +216,7 @@ inline double ipoly01(double x)
/* ---------------------------------------------------------------------- */
inline double ierfoverx1(double x, double *df)
static inline double ierfoverx1(double x, double *df)
{
// Computes Erf(x)/x and its first derivative
@ -245,7 +245,7 @@ inline double ierfoverx1(double x, double *df)
/* ---------------------------------------------------------------------- */
inline void KinElec(double radius, double *eke, double *frc)
static inline void KinElec(double radius, double *eke, double *frc)
{
*eke += 1.5 / (radius * radius);
*frc += 3.0 / (radius * radius * radius);
@ -253,7 +253,7 @@ inline void KinElec(double radius, double *eke, double *frc)
/* ---------------------------------------------------------------------- */
inline void ElecNucNuc(double q, double rc, double *ecoul, double *frc)
static inline void ElecNucNuc(double q, double rc, double *ecoul, double *frc)
{
*ecoul += q / rc;
*frc += q / (rc * rc);
@ -261,7 +261,8 @@ inline void ElecNucNuc(double q, double rc, double *ecoul, double *frc)
/* ---------------------------------------------------------------------- */
inline void ElecNucElec(double q, double rc, double re1, double *ecoul, double *frc, double *fre1)
static inline void ElecNucElec(double q, double rc, double re1, double *ecoul, double *frc,
double *fre1)
{
double a, arc;
double coeff_a;
@ -289,7 +290,7 @@ inline void ElecNucElec(double q, double rc, double re1, double *ecoul, double *
/* ---------------------------------------------------------------------- */
inline void ElecElecElec(double rc, double re1, double re2, double *ecoul, double *frc,
static inline void ElecElecElec(double rc, double re1, double re2, double *ecoul, double *frc,
double *fre1, double *fre2)
{
double a, arc, re, fre;
@ -327,7 +328,7 @@ inline void ElecElecElec(double rc, double re1, double re2, double *ecoul, doubl
/* ---------------------------------------------------------------------- */
inline void ElecCoreNuc(double q, double rc, double re1, double *ecoul, double *frc)
static inline void ElecCoreNuc(double q, double rc, double re1, double *ecoul, double *frc)
{
double a, arc;
double coeff_a;
@ -347,7 +348,8 @@ inline void ElecCoreNuc(double q, double rc, double re1, double *ecoul, double *
/* ---------------------------------------------------------------------- */
inline void ElecCoreCore(double q, double rc, double re1, double re2, double *ecoul, double *frc)
static inline void ElecCoreCore(double q, double rc, double re1, double re2, double *ecoul,
double *frc)
{
double a, arc, re;
double coeff_a;
@ -369,8 +371,8 @@ inline void ElecCoreCore(double q, double rc, double re1, double re2, double *ec
/* ---------------------------------------------------------------------- */
inline void ElecCoreElec(double q, double rc, double re1, double re2, double *ecoul, double *frc,
double *fre2)
static inline void ElecCoreElec(double q, double rc, double re1, double re2, double *ecoul,
double *frc, double *fre2)
{
double a, arc, re;
double coeff_a;
@ -404,7 +406,7 @@ inline void ElecCoreElec(double q, double rc, double re1, double re2, double *ec
/* ---------------------------------------------------------------------- */
inline void PauliElecElec(int samespin, double rc, double re1, double re2, double *epauli,
static inline void PauliElecElec(int samespin, double rc, double re1, double re2, double *epauli,
double *frc, double *fre1, double *fre2)
{
double ree, rem;
@ -452,7 +454,7 @@ inline void PauliElecElec(int samespin, double rc, double re1, double re2, doubl
/* ---------------------------------------------------------------------- */
inline void PauliCoreElec(double rc, double re2, double *epauli, double *frc, double *fre2,
static inline void PauliCoreElec(double rc, double re2, double *epauli, double *frc, double *fre2,
double PAULI_CORE_A, double PAULI_CORE_B, double PAULI_CORE_C)
{
double E, dEdrc, dEdre2, rcsq, ssq;
@ -476,9 +478,10 @@ inline void PauliCoreElec(double rc, double re2, double *epauli, double *frc, do
/* ---------------------------------------------------------------------- */
inline void PauliCorePElec(double rc, double re2, double *epauli, double *frc, double *fre2,
double PAULI_CORE_P_A, double PAULI_CORE_P_B, double PAULI_CORE_P_C,
double PAULI_CORE_P_D, double PAULI_CORE_P_E)
static inline void PauliCorePElec(double rc, double re2, double *epauli, double *frc, double *fre2,
double PAULI_CORE_P_A, double PAULI_CORE_P_B,
double PAULI_CORE_P_C, double PAULI_CORE_P_D,
double PAULI_CORE_P_E)
{
double E, dEdrc, dEdre2;
@ -510,8 +513,8 @@ inline void PauliCorePElec(double rc, double re2, double *epauli, double *frc, d
/* ---------------------------------------------------------------------- */
inline void RForce(double dx, double dy, double dz, double rc, double force, double *fx, double *fy,
double *fz)
static inline void RForce(double dx, double dy, double dz, double rc, double force, double *fx,
double *fy, double *fz)
{
force /= rc;
*fx = force * dx;
@ -521,7 +524,7 @@ inline void RForce(double dx, double dy, double dz, double rc, double force, dou
/* ---------------------------------------------------------------------- */
inline void SmallRForce(double dx, double dy, double dz, double rc, double force, double *fx,
static inline void SmallRForce(double dx, double dy, double dz, double rc, double force, double *fx,
double *fy, double *fz)
{
/* Handles case where rc is small to avoid division by zero */
@ -552,7 +555,7 @@ inline void SmallRForce(double dx, double dy, double dz, double rc, double force
/* ---------------------------------------------------------------------- */
inline double cutoff(double x)
static inline double cutoff(double x)
{
/* cubic: return x * x * (2.0 * x - 3.0) + 1.0; */
/* quintic: return -6 * pow(x, 5) + 15 * pow(x, 4) - 10 * pow(x, 3) + 1; */
@ -564,7 +567,7 @@ inline double cutoff(double x)
/* ---------------------------------------------------------------------- */
inline double dcutoff(double x)
static inline double dcutoff(double x)
{
/* cubic: return (6.0 * x * x - 6.0 * x); */
/* quintic: return -30 * pow(x, 4) + 60 * pow(x, 3) - 30 * pow(x, 2); */

View File

@ -15,7 +15,7 @@
Contributing author: Megan McCarthy (SNL)
------------------------------------------------------------------------- */
#include "compute_local_comp_atom.h"
#include "compute_composition_atom.h"
#include "atom.h"
#include "comm.h"
@ -37,22 +37,22 @@ using namespace MathConst;
/* ---------------------------------------------------------------------- */
ComputeLocalCompAtom::ComputeLocalCompAtom(LAMMPS *lmp, int narg, char **arg) :
Compute(lmp, narg, arg), result(nullptr)
ComputeCompositionAtom::ComputeCompositionAtom(LAMMPS *lmp, int narg, char **arg) :
Compute(lmp, narg, arg), list(nullptr), result(nullptr)
{
if (narg < 3 || narg > 5) error->all(FLERR, "Illegal compute local/comp/atom command");
if (narg < 3 || narg > 5) error->all(FLERR, "Illegal compute composition/atom command");
cutoff = 0.0;
cutsq = cutoff = 0.0;
int iarg = 3;
while (iarg < narg) {
if (strcmp(arg[iarg], "cutoff") == 0) {
if (iarg + 2 > narg) error->all(FLERR, "Illegal compute local/comp/atom command");
if (iarg + 2 > narg) error->all(FLERR, "Illegal compute composition/atom command");
cutoff = utils::numeric(FLERR, arg[iarg + 1], false, lmp);
if (cutoff <= 0.0) error->all(FLERR, "Illegal compute local/comp/atom command");
if (cutoff <= 0.0) error->all(FLERR, "Illegal compute composition/atom command");
iarg += 2;
} else
error->all(FLERR, "Illegal compute local/comp/atom command");
error->all(FLERR, "Illegal compute composition/atom command");
}
peratom_flag = 1;
@ -65,7 +65,7 @@ ComputeLocalCompAtom::ComputeLocalCompAtom(LAMMPS *lmp, int narg, char **arg) :
/* ---------------------------------------------------------------------- */
ComputeLocalCompAtom::~ComputeLocalCompAtom()
ComputeCompositionAtom::~ComputeCompositionAtom()
{
if (copymode) return;
@ -74,11 +74,11 @@ ComputeLocalCompAtom::~ComputeLocalCompAtom()
/* ---------------------------------------------------------------------- */
void ComputeLocalCompAtom::init()
void ComputeCompositionAtom::init()
{
if (!force->pair && cutoff == 0.0)
error->all(FLERR,
"Compute local/comp/atom requires a cutoff be specified "
"Compute composition/atom requires a cutoff be specified "
"or a pair style be defined");
double skin = neighbor->skin;
@ -91,7 +91,7 @@ void ComputeLocalCompAtom::init()
if (cutoff > cutghost)
error->all(FLERR,
"Compute local/comp/atom cutoff exceeds ghost atom range - "
"Compute composition/atom cutoff exceeds ghost atom range - "
"use comm_modify cutoff command");
}
@ -111,14 +111,14 @@ void ComputeLocalCompAtom::init()
/* ---------------------------------------------------------------------- */
void ComputeLocalCompAtom::init_list(int /*id*/, NeighList *ptr)
void ComputeCompositionAtom::init_list(int /*id*/, NeighList *ptr)
{
list = ptr;
}
/* ---------------------------------------------------------------------- */
void ComputeLocalCompAtom::compute_peratom()
void ComputeCompositionAtom::compute_peratom()
{
int i, j, ii, jj, inum, jnum;
double xtmp, ytmp, ztmp, delx, dely, delz, rsq;
@ -132,7 +132,7 @@ void ComputeLocalCompAtom::compute_peratom()
if (atom->nmax > nmax) {
memory->destroy(result);
nmax = atom->nmax;
memory->create(result, nmax, size_peratom_cols, "local/comp/atom:result");
memory->create(result, nmax, size_peratom_cols, "composition/atom:result");
array_atom = result;
}
@ -196,9 +196,7 @@ void ComputeLocalCompAtom::compute_peratom()
// local comp fractions per element
double lfac = 1.0 / count;
for (int n = 1; n < size_peratom_cols; n++)
result[i][n+1] *= lfac;
for (int n = 1; n < size_peratom_cols; n++) result[i][n + 1] *= lfac;
}
}
}
@ -207,7 +205,7 @@ void ComputeLocalCompAtom::compute_peratom()
memory usage of local atom-based array
------------------------------------------------------------------------- */
double ComputeLocalCompAtom::memory_usage()
double ComputeCompositionAtom::memory_usage()
{
double bytes = (double) 2 * nmax * sizeof(double);
return bytes;

View File

@ -13,21 +13,21 @@
#ifdef COMPUTE_CLASS
// clang-format off
ComputeStyle(local/comp/atom,ComputeLocalCompAtom);
ComputeStyle(composition/atom,ComputeCompositionAtom);
// clang-format on
#else
#ifndef LMP_COMPUTE_LOCAL_COMP_ATOM_H
#define LMP_COMPUTE_LOCAL_COMP_ATOM_H
#ifndef LMP_COMPUTE_COMPOSITION_ATOM_H
#define LMP_COMPUTE_COMPOSITION_ATOM_H
#include "compute.h"
namespace LAMMPS_NS {
class ComputeLocalCompAtom : public Compute {
class ComputeCompositionAtom : public Compute {
public:
ComputeLocalCompAtom(class LAMMPS *, int, char **);
~ComputeLocalCompAtom() override;
ComputeCompositionAtom(class LAMMPS *, int, char **);
~ComputeCompositionAtom() override;
void init() override;
void init_list(int, class NeighList *) override;
void compute_peratom() override;

View File

@ -980,9 +980,12 @@ void CommKokkos::borders()
} else {
atomKK->sync(Host,ALL_MASK);
k_sendlist.sync<LMPHostType>();
k_sendlist.modify<LMPHostType>();
atomKK->modified(Host,ALL_MASK); // needed here for atom map
int prev_auto_sync = lmp->kokkos->auto_sync;
lmp->kokkos->auto_sync = 1;
CommBrick::borders();
lmp->kokkos->auto_sync = prev_auto_sync;
k_sendlist.modify<LMPHostType>();
atomKK->modified(Host,ALL_MASK);
}
if (comm->nprocs == 1 && !ghost_velocity && !forward_comm_classic)

View File

@ -15,7 +15,7 @@
Contributing author: Megan McCarthy (SNL)
------------------------------------------------------------------------- */
#include "compute_local_comp_atom_kokkos.h"
#include "compute_composition_atom_kokkos.h"
#include "atom_kokkos.h"
#include "atom_masks.h"
@ -39,8 +39,8 @@ using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
template<class DeviceType>
ComputeLocalCompAtomKokkos<DeviceType>::ComputeLocalCompAtomKokkos(LAMMPS *lmp, int narg, char **arg) :
ComputeLocalCompAtom(lmp, narg, arg)
ComputeCompositionAtomKokkos<DeviceType>::ComputeCompositionAtomKokkos(LAMMPS *lmp, int narg, char **arg) :
ComputeCompositionAtom(lmp, narg, arg)
{
kokkosable = 1;
atomKK = (AtomKokkos *) atom;
@ -52,7 +52,7 @@ ComputeLocalCompAtomKokkos<DeviceType>::ComputeLocalCompAtomKokkos(LAMMPS *lmp,
/* ---------------------------------------------------------------------- */
template<class DeviceType>
ComputeLocalCompAtomKokkos<DeviceType>::~ComputeLocalCompAtomKokkos()
ComputeCompositionAtomKokkos<DeviceType>::~ComputeCompositionAtomKokkos()
{
if (copymode) return;
@ -62,9 +62,9 @@ ComputeLocalCompAtomKokkos<DeviceType>::~ComputeLocalCompAtomKokkos()
/* ---------------------------------------------------------------------- */
template<class DeviceType>
void ComputeLocalCompAtomKokkos<DeviceType>::init()
void ComputeCompositionAtomKokkos<DeviceType>::init()
{
ComputeLocalCompAtom::init();
ComputeCompositionAtom::init();
// adjust neighbor list request for KOKKOS
@ -77,7 +77,7 @@ void ComputeLocalCompAtomKokkos<DeviceType>::init()
/* ---------------------------------------------------------------------- */
template<class DeviceType>
void ComputeLocalCompAtomKokkos<DeviceType>::compute_peratom()
void ComputeCompositionAtomKokkos<DeviceType>::compute_peratom()
{
invoked_peratom = update->ntimestep;
@ -87,7 +87,7 @@ void ComputeLocalCompAtomKokkos<DeviceType>::compute_peratom()
if (atom->nmax > nmax) {
memoryKK->destroy_kokkos(k_result,result);
nmax = atom->nmax;
memoryKK->create_kokkos(k_result,result,nmax,size_peratom_cols,"local/comp/atom:result");
memoryKK->create_kokkos(k_result,result,nmax,size_peratom_cols,"composition/atom:result");
d_result = k_result.view<DeviceType>();
array_atom = result;
}
@ -114,7 +114,7 @@ void ComputeLocalCompAtomKokkos<DeviceType>::compute_peratom()
Kokkos::deep_copy(d_result,0.0);
copymode = 1;
typename Kokkos::RangePolicy<DeviceType, TagComputeLocalCompAtom> policy(0,inum);
typename Kokkos::RangePolicy<DeviceType, TagComputeCompositionAtom> policy(0,inum);
Kokkos::parallel_for("ComputeLocalComp",policy,*this);
copymode = 0;
@ -124,7 +124,7 @@ void ComputeLocalCompAtomKokkos<DeviceType>::compute_peratom()
template<class DeviceType>
KOKKOS_INLINE_FUNCTION
void ComputeLocalCompAtomKokkos<DeviceType>::operator()(TagComputeLocalCompAtom, const int &ii) const
void ComputeCompositionAtomKokkos<DeviceType>::operator()(TagComputeCompositionAtom, const int &ii) const
{
const int i = d_ilist[ii];
@ -173,8 +173,8 @@ void ComputeLocalCompAtomKokkos<DeviceType>::operator()(TagComputeLocalCompAtom,
}
namespace LAMMPS_NS {
template class ComputeLocalCompAtomKokkos<LMPDeviceType>;
template class ComputeCompositionAtomKokkos<LMPDeviceType>;
#ifdef LMP_KOKKOS_GPU
template class ComputeLocalCompAtomKokkos<LMPHostType>;
template class ComputeCompositionAtomKokkos<LMPHostType>;
#endif
}

View File

@ -13,37 +13,37 @@
#ifdef COMPUTE_CLASS
// clang-format off
ComputeStyle(local/comp/atom/kk,ComputeLocalCompAtomKokkos<LMPDeviceType>);
ComputeStyle(local/comp/atom/kk/device,ComputeLocalCompAtomKokkos<LMPDeviceType>);
ComputeStyle(local/comp/atom/kk/host,ComputeLocalCompAtomKokkos<LMPHostType>);
ComputeStyle(composition/atom/kk,ComputeCompositionAtomKokkos<LMPDeviceType>);
ComputeStyle(composition/atom/kk/device,ComputeCompositionAtomKokkos<LMPDeviceType>);
ComputeStyle(composition/atom/kk/host,ComputeCompositionAtomKokkos<LMPHostType>);
// clang-format on
#else
#ifndef LMP_COMPUTE_LOCAL_COMP_ATOM_KOKKOS_H
#define LMP_COMPUTE_LOCAL_COMP_ATOM_KOKKOS_H
#ifndef LMP_COMPUTE_COMPOSITION_ATOM_KOKKOS_H
#define LMP_COMPUTE_COMPOSITION_ATOM_KOKKOS_H
#include "compute_local_comp_atom.h"
#include "compute_composition_atom.h"
#include "kokkos_type.h"
namespace LAMMPS_NS {
// clang-format off
struct TagComputeLocalCompAtom {};
struct TagComputeCompositionAtom {};
// clang-format on
template <class DeviceType> class ComputeLocalCompAtomKokkos : public ComputeLocalCompAtom {
template <class DeviceType> class ComputeCompositionAtomKokkos : public ComputeCompositionAtom {
public:
typedef DeviceType device_type;
typedef ArrayTypes<DeviceType> AT;
ComputeLocalCompAtomKokkos(class LAMMPS *, int, char **);
~ComputeLocalCompAtomKokkos() override;
ComputeCompositionAtomKokkos(class LAMMPS *, int, char **);
~ComputeCompositionAtomKokkos() override;
void init() override;
void compute_peratom() override;
KOKKOS_INLINE_FUNCTION
void operator()(TagComputeLocalCompAtom, const int &) const;
void operator()(TagComputeCompositionAtom, const int &) const;
private:

40
src/MESONT/Install.sh Executable file
View File

@ -0,0 +1,40 @@
# Install/unInstall package files in LAMMPS
# mode = 0/1/2 for uninstall/install/update
mode=$1
# enforce using portable C locale
LC_ALL=C
export LC_ALL
# arg1 = file, arg2 = file it depends on
action () {
if (test $mode = 0) then
rm -f ../$1
elif (! cmp -s $1 ../$1) then
if (test -z "$2" || test -e ../$2) then
cp $1 ..
if (test $mode = 2) then
echo " updating src/$1"
fi
fi
elif (test -n "$2") then
if (test ! -e ../$2) then
rm -f ../$1
fi
fi
}
# some styles in MESONT have base classes in MOLECULE
if (test $1 = 1) then
if (test ! -e ../bond_harmonic.cpp) then
echo "Must install MOLECULE package with MESONT"
exit 1
fi
fi
for file in *.cpp *.h; do
action ${file}
done

View File

@ -200,7 +200,7 @@ void PairHDNNP::coeff(int narg, char **arg)
emap = "";
for (int i = 2; i < narg; i++) {
if (strcmp(arg[i], "NULL") != 0) {
if (i != 2) emap += ",";
if (!emap.empty()) emap += ",";
emap += std::to_string(i - 1) + ":" + arg[i];
map[i - 1] = 1;
}

View File

@ -1,58 +0,0 @@
# Install/unInstall package files in LAMMPS
# mode = 0/1/2 for uninstall/install/update
mode=$1
# enforce using portable C locale
LC_ALL=C
export LC_ALL
# arg1 = file, arg2 = file it depends on
action () {
if (test $mode = 0) then
rm -f ../$1
elif (! cmp -s $1 ../$1) then
if (test -z "$2" || test -e ../$2) then
cp $1 ..
if (test $mode = 2) then
echo " updating src/$1"
fi
fi
elif (test -n "$2") then
if (test ! -e ../$2) then
rm -f ../$1
fi
fi
}
# force rebuild of files with LMP_MPIIO switch
# also read/write restart so their dependence on changed mpiio.h is rebuilt
touch ../mpiio.h
touch ../read_restart.cpp
touch ../write_restart.cpp
# all package files with no dependencies
for file in *.cpp *.h; do
test -f ${file} && action $file
done
# edit 2 Makefile.package to include/exclude LMP_MPIIO setting
if (test $1 = 1) then
if (test -e ../Makefile.package) then
sed -i -e 's/[^ \t]*MPIIO[^ \t]* //' ../Makefile.package
sed -i -e 's|^PKG_INC =[ \t]*|&-DLMP_MPIIO |' ../Makefile.package
fi
elif (test $1 = 0) then
if (test -e ../Makefile.package) then
sed -i -e 's/[^ \t]*MPIIO[^ \t]* //' ../Makefile.package
fi
fi

View File

@ -1,728 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "dump_atom_mpiio.h"
#include "domain.h"
#include "error.h"
#include "memory.h"
#include "update.h"
#include <cmath>
#include <cstring>
#include "omp_compat.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
using namespace LAMMPS_NS;
#define MAX_TEXT_HEADER_SIZE 4096
#define DUMP_BUF_CHUNK_SIZE 16384
#define DUMP_BUF_INCREMENT_SIZE 4096
/* ---------------------------------------------------------------------- */
DumpAtomMPIIO::DumpAtomMPIIO(LAMMPS *lmp, int narg, char **arg) : DumpAtom(lmp, narg, arg)
{
if (me == 0)
error->warning(FLERR, "MPI-IO output is unmaintained and unreliable. Use with caution.");
}
/* ---------------------------------------------------------------------- */
DumpAtomMPIIO::~DumpAtomMPIIO()
{
if (multifile == 0) MPI_File_close(&mpifh);
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::openfile()
{
if (singlefile_opened) { // single file already opened, so just return after resetting filesize
mpifo = currentFileSize;
MPI_File_set_size(mpifh, mpifo + headerSize + sumFileSize);
currentFileSize = mpifo + headerSize + sumFileSize;
return;
}
if (multifile == 0) singlefile_opened = 1;
// if one file per timestep, replace '*' with current timestep
filecurrent = filename;
if (multifile) {
filecurrent = utils::strdup(utils::star_subst(filecurrent, update->ntimestep, padflag));
if (maxfiles > 0) {
if (numfiles < maxfiles) {
nameslist[numfiles] = utils::strdup(filecurrent);
++numfiles;
} else {
remove(nameslist[fileidx]);
delete[] nameslist[fileidx];
nameslist[fileidx] = utils::strdup(filecurrent);
fileidx = (fileidx + 1) % maxfiles;
}
}
}
if (append_flag) { // append open
int err = MPI_File_open(world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND | MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS)
error->one(FLERR, "Cannot open dump file {}: {}", filecurrent, utils::getsyserror());
int myrank;
MPI_Comm_rank(world, &myrank);
if (myrank == 0) MPI_File_get_size(mpifh, &mpifo);
MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
MPI_File_set_size(mpifh, mpifo + headerSize + sumFileSize);
currentFileSize = mpifo + headerSize + sumFileSize;
} else { // replace open
int err =
MPI_File_open(world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS)
error->one(FLERR, "Cannot open dump file {}: {}", filecurrent, utils::getsyserror());
mpifo = 0;
MPI_File_set_size(mpifh, (MPI_Offset) (headerSize + sumFileSize));
currentFileSize = (headerSize + sumFileSize);
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write()
{
if (domain->triclinic == 0) {
boxxlo = domain->boxlo[0];
boxxhi = domain->boxhi[0];
boxylo = domain->boxlo[1];
boxyhi = domain->boxhi[1];
boxzlo = domain->boxlo[2];
boxzhi = domain->boxhi[2];
} else {
boxxlo = domain->boxlo_bound[0];
boxxhi = domain->boxhi_bound[0];
boxylo = domain->boxlo_bound[1];
boxyhi = domain->boxhi_bound[1];
boxzlo = domain->boxlo_bound[2];
boxzhi = domain->boxhi_bound[2];
boxxy = domain->xy;
boxxz = domain->xz;
boxyz = domain->yz;
}
// nme = # of dump lines this proc contributes to dump
nme = count();
// ntotal = total # of dump lines in snapshot
// nmax = max # of dump lines on any proc
bigint bnme = nme;
MPI_Allreduce(&bnme, &ntotal, 1, MPI_LMP_BIGINT, MPI_SUM, world);
int nmax;
MPI_Allreduce(&nme, &nmax, 1, MPI_INT, MPI_MAX, world);
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
// ensure filewriter proc can receive everyone's info
// limit nmax*size_one to int since used as arg in MPI_Rsend() below
// pack my data into buf
// if sorting on IDs also request ID list from pack()
// sort buf as needed
if (nmax > maxbuf) {
if ((bigint) nmax * size_one > MAXSMALLINT)
error->all(FLERR, "Too much per-proc info for dump");
maxbuf = nmax;
memory->destroy(buf);
memory->create(buf, (maxbuf * size_one), "dump:buf");
}
if (sort_flag && sortcol == 0 && nmax > maxids) {
maxids = nmax;
memory->destroy(ids);
memory->create(ids, maxids, "dump:ids");
}
if (sort_flag && sortcol == 0)
pack(ids);
else
pack(nullptr);
if (sort_flag) sort();
// determine how much data needs to be written for setting the file size and prepocess it prior to writing
performEstimate = 1;
write_header(nheader);
write_data(nme, buf);
MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs - 1), world);
openfile();
performEstimate = 0;
write_header(nheader); // mpifo now points to end of header info
// now actually write the data
performEstimate = 0;
write_data(nme, buf);
if (multifile) MPI_File_close(&mpifh);
if (multifile) delete[] filecurrent;
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::init_style()
{
if (image_flag == 0)
size_one = 5;
else
size_one = 8;
// format = copy of default or user-specified line format
// default depends on image flags
delete[] format;
if (format_line_user) {
format = utils::strdup(std::string(format_line_user) + "\n");
} else {
if (image_flag == 0)
format = utils::strdup(TAGINT_FORMAT " %d %g %g %g\n");
else
format = utils::strdup(TAGINT_FORMAT " %d %g %g %g %d %d %d\n");
}
// setup boundary string
domain->boundary_string(boundstr);
// setup column string
std::string default_columns;
if (scale_flag == 0 && image_flag == 0)
default_columns = "id type x y z";
else if (scale_flag == 0 && image_flag == 1)
default_columns = "id type x y z ix iy iz";
else if (scale_flag == 1 && image_flag == 0)
default_columns = "id type xs ys zs";
else if (scale_flag == 1 && image_flag == 1)
default_columns = "id type xs ys zs ix iy iz";
int icol = 0;
columns.clear();
for (const auto &item : utils::split_words(default_columns)) {
if (columns.size()) columns += " ";
if (keyword_user[icol].size())
columns += keyword_user[icol];
else
columns += item;
++icol;
}
// setup function ptrs
if (binary && domain->triclinic == 0)
header_choice = &DumpAtomMPIIO::header_binary;
else if (binary && domain->triclinic == 1)
header_choice = &DumpAtomMPIIO::header_binary_triclinic;
else if (!binary && domain->triclinic == 0)
header_choice = &DumpAtomMPIIO::header_item;
else if (!binary && domain->triclinic == 1)
header_choice = &DumpAtomMPIIO::header_item_triclinic;
if (scale_flag == 1 && image_flag == 0 && domain->triclinic == 0)
pack_choice = &DumpAtomMPIIO::pack_scale_noimage;
else if (scale_flag == 1 && image_flag == 1 && domain->triclinic == 0)
pack_choice = &DumpAtomMPIIO::pack_scale_image;
else if (scale_flag == 1 && image_flag == 0 && domain->triclinic == 1)
pack_choice = &DumpAtomMPIIO::pack_scale_noimage_triclinic;
else if (scale_flag == 1 && image_flag == 1 && domain->triclinic == 1)
pack_choice = &DumpAtomMPIIO::pack_scale_image_triclinic;
else if (scale_flag == 0 && image_flag == 0)
pack_choice = &DumpAtomMPIIO::pack_noscale_noimage;
else if (scale_flag == 0 && image_flag == 1)
pack_choice = &DumpAtomMPIIO::pack_noscale_image;
if (binary)
write_choice = &DumpAtomMPIIO::write_binary;
else if (buffer_flag == 1)
write_choice = &DumpAtomMPIIO::write_string;
else if (image_flag == 0)
write_choice = &DumpAtomMPIIO::write_lines_noimage;
else if (image_flag == 1)
write_choice = &DumpAtomMPIIO::write_lines_image;
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write_header(bigint ndump)
{
if (!header_choice) error->all(FLERR, "Must not use 'run pre no' after creating a new dump");
(this->*header_choice)(ndump);
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::header_binary(bigint ndump)
{
if (performEstimate) {
headerBuffer = (char *) malloc((2 * sizeof(bigint)) + (9 * sizeof(int)) + (6 * sizeof(double)));
headerSize = 0;
memcpy(headerBuffer + headerSize, &update->ntimestep, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &ndump, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &domain->triclinic, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &domain->boundary[0][0], 6 * sizeof(int));
headerSize += 6 * sizeof(int);
memcpy(headerBuffer + headerSize, &boxxlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxylo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &size_one, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &nprocs, sizeof(int));
headerSize += sizeof(int);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_BYTE, MPI_STATUS_IGNORE);
mpifo += headerSize;
free(headerBuffer);
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::header_binary_triclinic(bigint ndump)
{
if (performEstimate) {
headerBuffer = (char *) malloc((2 * sizeof(bigint)) + (9 * sizeof(int)) + (9 * sizeof(double)));
headerSize = 0;
memcpy(headerBuffer + headerSize, &update->ntimestep, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &ndump, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &domain->triclinic, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &domain->boundary[0][0], 6 * sizeof(int));
headerSize += 6 * sizeof(int);
memcpy(headerBuffer + headerSize, &boxxlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxylo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxy, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxz, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyz, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &size_one, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &nprocs, sizeof(int));
headerSize += sizeof(int);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_BYTE, MPI_STATUS_IGNORE);
mpifo += headerSize;
free(headerBuffer);
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::header_item(bigint ndump)
{
if (performEstimate) {
auto itemtxt = fmt::format("ITEM: TIMESTEP\n{}\n", update->ntimestep);
itemtxt += fmt::format("ITEM: NUMBER OF ATOMS\n{}\n", ndump);
itemtxt += fmt::format("ITEM: BOX BOUNDS {}\n", boundstr);
itemtxt += fmt::format("{} {}\n{} {}\n{} {}\n", boxxlo, boxxhi, boxylo, boxyhi, boxzlo, boxzhi);
itemtxt += fmt::format("ITEM: ATOMS {}\n", columns);
headerSize = itemtxt.size();
headerBuffer = utils::strdup(itemtxt);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_CHAR, MPI_STATUS_IGNORE);
mpifo += headerSize;
delete[] headerBuffer;
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::header_item_triclinic(bigint ndump)
{
if (performEstimate) {
auto itemtxt = fmt::format("ITEM: TIMESTEP\n{}\n", update->ntimestep);
itemtxt += fmt::format("ITEM: NUMBER OF ATOMS\n{}\n", ndump);
itemtxt += fmt::format("ITEM: BOX BOUNDS xy xz yz {}\n", boundstr);
itemtxt += fmt::format("{} {} {}\n{} {} {}\n{} {} {}\n", boxxlo, boxxhi, boxxy, boxylo, boxyhi,
boxxz, boxzlo, boxzhi, boxyz);
itemtxt += fmt::format("ITEM: ATOMS {}\n", columns);
headerSize = itemtxt.size();
headerBuffer = utils::strdup(itemtxt);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_CHAR, MPI_STATUS_IGNORE);
mpifo += headerSize;
delete[] headerBuffer;
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write_data(int n, double *mybuf)
{
(this->*write_choice)(n, mybuf);
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write_binary(int n, double *mybuf)
{
n *= size_one;
if (performEstimate) {
bigint incPrefix = 0;
bigint bigintNme = (bigint) nme;
MPI_Scan(&bigintNme, &incPrefix, 1, MPI_LMP_BIGINT, MPI_SUM, world);
sumFileSize = (incPrefix * size_one * sizeof(double)) + (nprocs * sizeof(int));
offsetFromHeader = ((incPrefix - bigintNme) * size_one * sizeof(double)) + (me * sizeof(int));
} else {
int byteBufSize = (n * sizeof(double)) + sizeof(int);
char *bufWithSize;
memory->create(bufWithSize, byteBufSize, "dump:bufWithSize");
memcpy(bufWithSize, (char *) (&n), sizeof(int));
memcpy(&((char *) bufWithSize)[sizeof(int)], mybuf, (n * sizeof(double)));
MPI_File_write_at_all(mpifh, mpifo + offsetFromHeader, bufWithSize, byteBufSize, MPI_BYTE,
MPI_STATUS_IGNORE);
memory->destroy(bufWithSize);
if (flush_flag) MPI_File_sync(mpifh);
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write_string(int n, double *mybuf)
{
if (performEstimate) {
#if defined(_OPENMP)
int nthreads = omp_get_max_threads();
if (nthreads > 1)
nsme = convert_string_omp(n, mybuf);
else {
nsme = convert_string(n, mybuf);
}
#else
nsme = convert_string(n, mybuf);
#endif
bigint incPrefix = 0;
bigint bigintNsme = (bigint) nsme;
MPI_Scan(&bigintNsme, &incPrefix, 1, MPI_LMP_BIGINT, MPI_SUM, world);
sumFileSize = (incPrefix * sizeof(char));
offsetFromHeader = ((incPrefix - bigintNsme) * sizeof(char));
} else {
MPI_File_write_at_all(mpifh, mpifo + offsetFromHeader, sbuf, nsme, MPI_CHAR, MPI_STATUS_IGNORE);
if (flush_flag) MPI_File_sync(mpifh);
}
}
/* ---------------------------------------------------------------------- */
int DumpAtomMPIIO::convert_string(int n, double *mybuf)
{
if (image_flag == 0)
return convert_noimage(n, mybuf);
else
return convert_image(n, mybuf);
}
/* ---------------------------------------------------------------------- */
#if defined(_OPENMP)
int DumpAtomMPIIO::convert_string_omp(int n, double *mybuf)
{
if (image_flag == 0)
return convert_noimage_omp(n, mybuf);
else
return convert_image_omp(n, mybuf);
}
/* ----------------------------------------------------------------------
multithreaded version - convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpAtomMPIIO::convert_image_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n, mybuf);
} else {
memory->create(mpifhStringCountPerThread, nthreads, "dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads * sizeof(char *));
memory->create(bufOffset, nthreads, "dump:bufOffset");
memory->create(bufRange, nthreads, "dump:bufRange");
memory->create(bufLength, nthreads, "dump:bufLength");
int i = 0;
for (i = 0; i < (nthreads - 1); i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = (int) (floor((double) n / (double) nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = n - (i * (int) (floor((double) n / (double) nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, \
mpifhStringCountPerThread, \
mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m = 0;
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(
mpifh_buffer_line_per_thread[tid],
(mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]), format,
static_cast<int>(mybuf[bufOffset[tid] + m]),
static_cast<int>(mybuf[bufOffset[tid] + m + 1]), mybuf[bufOffset[tid] + m + 2],
mybuf[bufOffset[tid] + m + 3], mybuf[bufOffset[tid] + m + 4],
static_cast<int>(mybuf[bufOffset[tid] + m + 5]),
static_cast<int>(mybuf[bufOffset[tid] + m + 6]),
static_cast<int>(mybuf[bufOffset[tid] + m + 7]));
m += size_one;
}
}
#pragma omp barrier
mpifhStringCount = 0;
for (i = 0; i < nthreads; i++) { mpifhStringCount += mpifhStringCountPerThread[i]; }
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount + 1;
memory->grow(sbuf, maxsbuf, "dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i = 0; i < nthreads; i++) {
strcat(sbuf, mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
}
return mpifhStringCount;
}
/* ----------------------------------------------------------------------
multithreaded version - convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpAtomMPIIO::convert_noimage_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n, mybuf);
} else {
memory->create(mpifhStringCountPerThread, nthreads, "dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads * sizeof(char *));
memory->create(bufOffset, nthreads, "dump:bufOffset");
memory->create(bufRange, nthreads, "dump:bufRange");
memory->create(bufLength, nthreads, "dump:bufLength");
int i = 0;
for (i = 0; i < (nthreads - 1); i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = (int) (floor((double) n / (double) nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = n - (i * (int) (floor((double) n / (double) nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, \
mpifhStringCountPerThread, \
mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m = 0;
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(
mpifh_buffer_line_per_thread[tid],
(mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]), format,
static_cast<int>(mybuf[bufOffset[tid] + m]),
static_cast<int>(mybuf[bufOffset[tid] + m + 1]), mybuf[bufOffset[tid] + m + 2],
mybuf[bufOffset[tid] + m + 3], mybuf[bufOffset[tid] + m + 4]);
m += size_one;
}
}
#pragma omp barrier
mpifhStringCount = 0;
for (i = 0; i < nthreads; i++) { mpifhStringCount += mpifhStringCountPerThread[i]; }
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount + 1;
memory->grow(sbuf, maxsbuf, "dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i = 0; i < nthreads; i++) {
strcat(sbuf, mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
}
return mpifhStringCount;
}
#endif

View File

@ -1,72 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef DUMP_CLASS
// clang-format off
DumpStyle(atom/mpiio,DumpAtomMPIIO);
// clang-format on
#else
#ifndef LMP_DUMP_ATOM_MPIIO_H
#define LMP_DUMP_ATOM_MPIIO_H
#include "dump_atom.h"
namespace LAMMPS_NS {
class DumpAtomMPIIO : public DumpAtom {
public:
DumpAtomMPIIO(class LAMMPS *, int, char **);
~DumpAtomMPIIO() override;
protected:
bigint
sumFileSize; // size in bytes of the file up through this rank offset from the end of the header data
char *headerBuffer; // buffer for holding header data
MPI_File mpifh;
MPI_Offset mpifo, offsetFromHeader, headerSize, currentFileSize;
int performEstimate; // switch for write_data and write_header methods to use for gathering data and detemining filesize for preallocation vs actually writing the data
char *filecurrent; // name of file for this round (with % and * replaced)
void openfile() override;
void write_header(bigint) override;
void write() override;
void write_data(int, double *) override;
void init_style() override;
typedef void (DumpAtomMPIIO::*FnPtrHeader)(bigint);
FnPtrHeader header_choice; // ptr to write header functions
void header_binary(bigint);
void header_binary_triclinic(bigint);
void header_item(bigint);
void header_item_triclinic(bigint);
#if defined(_OPENMP)
int convert_string_omp(int, double *); // multithreaded version of convert_string
int convert_image_omp(int, double *); // multithreaded version of convert_image
int convert_noimage_omp(int, double *); // multithreaded version of convert_noimage
#endif
int convert_string(int, double *) override;
typedef void (DumpAtomMPIIO::*FnPtrData)(int, double *);
FnPtrData write_choice; // ptr to write data functions
void write_binary(int, double *);
void write_string(int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,488 +0,0 @@
// clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "omp_compat.h"
#include "dump_cfg_mpiio.h"
#include "atom.h"
#include "domain.h"
#include "update.h"
#include "memory.h"
#include "error.h"
#include <cmath>
#include <cstring>
#ifdef LMP_USER_IO_TIMER
#include <sys/times.h>
#include <hwi/include/bqc/A2_inlines.h>
long dumpCFGTimestamps[10];
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
using namespace LAMMPS_NS;
#define MAX_TEXT_HEADER_SIZE 4096
#define DUMP_BUF_CHUNK_SIZE 16384
#define DUMP_BUF_INCREMENT_SIZE 4096
#define UNWRAPEXPAND 10.0
#define ONEFIELD 32
#define DELTA 1048576
/* ---------------------------------------------------------------------- */
DumpCFGMPIIO::DumpCFGMPIIO(LAMMPS *lmp, int narg, char **arg) :
DumpCFG(lmp, narg, arg)
{
if (me == 0)
error->warning(FLERR,"MPI-IO output is unmaintained and unreliable. Use with caution.");
}
/* ---------------------------------------------------------------------- */
DumpCFGMPIIO::~DumpCFGMPIIO()
{
if (multifile == 0) MPI_File_close(&mpifh);
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::openfile()
{
if (singlefile_opened) { // single file already opened, so just return after resetting filesize
mpifo = currentFileSize;
MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
currentFileSize = mpifo+headerSize+sumFileSize;
return;
}
if (multifile == 0) singlefile_opened = 1;
// if one file per timestep, replace '*' with current timestep
filecurrent = filename;
if (multifile) {
filecurrent = utils::strdup(utils::star_subst(filecurrent, update->ntimestep, padflag));
if (maxfiles > 0) {
if (numfiles < maxfiles) {
nameslist[numfiles] = utils::strdup(filecurrent);
++numfiles;
} else {
remove(nameslist[fileidx]);
delete[] nameslist[fileidx];
nameslist[fileidx] = utils::strdup(filecurrent);
fileidx = (fileidx + 1) % maxfiles;
}
}
}
if (append_flag) { // append open
int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND |
MPI_MODE_WRONLY, MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) error->one(FLERR, "Cannot open dump file {}", filecurrent);
int myrank;
MPI_Comm_rank(world,&myrank);
if (myrank == 0)
MPI_File_get_size(mpifh,&mpifo);
MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
currentFileSize = mpifo+headerSize+sumFileSize;
} else { // replace open
int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) error->one(FLERR, "Cannot open dump file {}", filecurrent);
mpifo = 0;
MPI_File_set_size(mpifh,(MPI_Offset) (headerSize+sumFileSize));
currentFileSize = (headerSize+sumFileSize);
}
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::write()
{
#ifdef LMP_USER_IO_TIMER
long startTimeBase, endTimeBase;
MPI_Barrier(world); // timestamp barrier
if (me == 0)
startTimeBase = GetTimeBase();
#endif
if (domain->triclinic == 0) {
boxxlo = domain->boxlo[0];
boxxhi = domain->boxhi[0];
boxylo = domain->boxlo[1];
boxyhi = domain->boxhi[1];
boxzlo = domain->boxlo[2];
boxzhi = domain->boxhi[2];
} else {
boxxlo = domain->boxlo_bound[0];
boxxhi = domain->boxhi_bound[0];
boxylo = domain->boxlo_bound[1];
boxyhi = domain->boxhi_bound[1];
boxzlo = domain->boxlo_bound[2];
boxzhi = domain->boxhi_bound[2];
boxxy = domain->xy;
boxxz = domain->xz;
boxyz = domain->yz;
}
// nme = # of dump lines this proc contributes to dump
nme = count();
// ntotal = total # of dump lines in snapshot
// nmax = max # of dump lines on any proc
bigint bnme = nme;
MPI_Allreduce(&bnme,&ntotal,1,MPI_LMP_BIGINT,MPI_SUM,world);
int nmax;
MPI_Allreduce(&nme,&nmax,1,MPI_INT,MPI_MAX,world);
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
// ensure filewriter proc can receive everyone's info
// limit nmax*size_one to int since used as arg in MPI_Rsend() below
// pack my data into buf
// if sorting on IDs also request ID list from pack()
// sort buf as needed
if (nmax > maxbuf) {
if ((bigint) nmax * size_one > MAXSMALLINT)
error->all(FLERR,"Too much per-proc info for dump");
maxbuf = nmax;
memory->destroy(buf);
memory->create(buf,(maxbuf*size_one),"dump:buf");
}
if (sort_flag && sortcol == 0 && nmax > maxids) {
maxids = nmax;
memory->destroy(ids);
memory->create(ids,maxids,"dump:ids");
}
if (sort_flag && sortcol == 0) pack(ids);
else pack(nullptr);
if (sort_flag) sort();
// determine how much data needs to be written for setting the file size and prepocess it prior to writing
performEstimate = 1;
write_header(nheader);
write_data(nme,buf);
MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs-1), world);
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[0] = GetTimeBase();
#endif
openfile();
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[1] = GetTimeBase();
#endif
performEstimate = 0;
write_header(nheader); // mpifo now points to end of header info
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[2] = GetTimeBase();
#endif
// now actually write the data
performEstimate = 0;
write_data(nme,buf);
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[3] = GetTimeBase();
#endif
if (multifile) MPI_File_close(&mpifh);
if (multifile) delete [] filecurrent;
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[4] = GetTimeBase();
if (me == 0) {
endTimeBase = GetTimeBase();
printf("total dump cycles: %ld - estimates and setup: %ld openfile: %ld write header: %ld write data: %ld close file: %ld\n",(long) (endTimeBase-startTimeBase),(long) (dumpCFGTimestamps[0]-startTimeBase),(long) (dumpCFGTimestamps[1]-dumpCFGTimestamps[0]),(long) (dumpCFGTimestamps[2]-dumpCFGTimestamps[1]),(long) (dumpCFGTimestamps[3]-dumpCFGTimestamps[2]),(long) (dumpCFGTimestamps[4]-dumpCFGTimestamps[3]));
}
#endif
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::init_style()
{
if (multifile == 0 && !multifile_override)
error->all(FLERR,"Dump cfg requires one snapshot per file");
DumpCFG::init_style();
// setup function ptrs
write_choice = &DumpCFGMPIIO::write_string;
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::write_header(bigint n)
{
// set scale factor used by AtomEye for CFG viz
// default = 1.0
// for peridynamics, set to pre-computed PD scale factor
// so PD particles mimic C atoms
// for unwrapped coords, set to UNWRAPEXPAND (10.0)
// so molecules are not split across periodic box boundaries
double scale = 1.0;
if (atom->peri_flag) scale = atom->pdscale;
else if (unwrapflag == 1) scale = UNWRAPEXPAND;
auto header = fmt::format("Number of particles = {}\n",n);
header += fmt::format("A = {} Angstrom (basic length-scale)\n",scale);
header += fmt::format("H0(1,1) = {} A\n",domain->xprd);
header += fmt::format("H0(1,2) = 0 A\n");
header += fmt::format("H0(1,3) = 0 A\n");
header += fmt::format("H0(2,1) = {} A\n",domain->xy);
header += fmt::format("H0(2,2) = {} A\n",domain->yprd);
header += fmt::format("H0(2,3) = 0 A\n");
header += fmt::format("H0(3,1) = {} A\n",domain->xz);
header += fmt::format("H0(3,2) = {} A\n",domain->yz);
header += fmt::format("H0(3,3) = {} A\n",domain->zprd);
header += fmt::format(".NO_VELOCITY.\n");
header += fmt::format("entry_count = {}\n",nfield-2);
for (int i = 0; i < nfield-5; i++)
header += fmt::format("auxiliary[{}] = {}\n",i,auxname[i]);
if (performEstimate) {
headerSize = header.size();
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh,mpifo,(void *)header.c_str(),header.size(),MPI_CHAR,MPI_STATUS_IGNORE);
mpifo += header.size();
}
}
#if defined(_OPENMP)
/* ----------------------------------------------------------------------
convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpCFGMPIIO::convert_string_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n,mybuf);
}
else {
memory->create(mpifhStringCountPerThread,nthreads,"dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads*sizeof(char*));
memory->create(bufOffset,nthreads,"dump:bufOffset");
memory->create(bufRange,nthreads,"dump:bufRange");
memory->create(bufLength,nthreads,"dump:bufLength");
int i=0;
for (i=0;i<(nthreads-1);i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
bufRange[i] = (int)(floor((double)n/(double)nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
bufRange[i] = n-(i*(int)(floor((double)n/(double)nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, mpifhStringCountPerThread, mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m=0;
if (unwrapflag == 0) {
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(mpifh_buffer_line_per_thread[tid],(mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
for (int j = 0; j < size_one; j++) {
if (j == 0) {
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%f \n",(mybuf[bufOffset[tid]+m]));
} else if (j == 1) {
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%s \n",typenames[(int) mybuf[bufOffset[tid]+m]]);
} else if (j >= 2) {
if (vtype[j] == Dump::INT)
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<int> (mybuf[bufOffset[tid]+m]));
else if (vtype[j] == Dump::DOUBLE)
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],mybuf[bufOffset[tid]+m]);
else if (vtype[j] == Dump::STRING)
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],typenames[(int) mybuf[bufOffset[tid]+m]]);
else if (vtype[j] == Dump::BIGINT)
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<bigint> (mybuf[bufOffset[tid]+m]));
}
m++;
} // for j
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"\n");
} // for i
} // wrap flag
else if (unwrapflag == 1) {
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(mpifh_buffer_line_per_thread[tid],(mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
for (int j = 0; j < size_one; j++) {
double unwrap_coord;
if (j == 0) {
//offset += sprintf(&sbuf[offset],"%f \n",mybuf[m]);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%f \n",mybuf[bufOffset[tid]+m]);
} else if (j == 1) {
// offset += sprintf(&sbuf[offset],"%s \n",typenames[(int) mybuf[m]]);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%s \n",typenames[(int) mybuf[bufOffset[tid]+m]]);
} else if (j >= 2 && j <= 4) {
unwrap_coord = (mybuf[bufOffset[tid]+m] - 0.5)/UNWRAPEXPAND + 0.5;
//offset += sprintf(&sbuf[offset],vformat[j],unwrap_coord);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],unwrap_coord);
} else if (j >= 5) {
if (vtype[j] == Dump::INT)
//offset +=
// sprintf(&sbuf[offset],vformat[j],static_cast<int> (mybuf[m]));
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<int> (mybuf[bufOffset[tid]+m]));
else if (vtype[j] == Dump::DOUBLE)
// offset += sprintf(&sbuf[offset],vformat[j],mybuf[m]);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],mybuf[bufOffset[tid]+m]);
else if (vtype[j] == Dump::STRING)
// offset +=
// sprintf(&sbuf[offset],vformat[j],typenames[(int) mybuf[m]]);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],typenames[(int) mybuf[bufOffset[tid]+m]]);
else if (vtype[j] == Dump::BIGINT)
// offset +=
// sprintf(&sbuf[offset],vformat[j],static_cast<bigint> (mybuf[m]));
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<bigint> (mybuf[bufOffset[tid]+m]));
}
m++;
} // for j
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"\n");
} // for i
} // unwrap flag
} // pragma omp parallel
#pragma omp barrier
mpifhStringCount = 0;
for (i=0;i<nthreads;i++) {
mpifhStringCount += mpifhStringCountPerThread[i];
}
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount+1;
memory->grow(sbuf,maxsbuf,"dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i=0;i<nthreads;i++) {
strcat(sbuf,mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
} // else omp
return mpifhStringCount;
}
#endif
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::write_data(int n, double *mybuf)
{
(this->*write_choice)(n,mybuf);
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::write_string(int n, double *mybuf)
{
if (performEstimate) {
#if defined(_OPENMP)
int nthreads = omp_get_max_threads();
if (nthreads > 1)
nsme = convert_string_omp(n,mybuf);
else
nsme = convert_string(n,mybuf);
#else
nsme = convert_string(n,mybuf);
#endif
bigint incPrefix = 0;
bigint bigintNsme = (bigint) nsme;
MPI_Scan(&bigintNsme,&incPrefix,1,MPI_LMP_BIGINT,MPI_SUM,world);
sumFileSize = (incPrefix*sizeof(char));
offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
}
else {
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
if (flush_flag)
MPI_File_sync(mpifh);
}
}

View File

@ -1,60 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef DUMP_CLASS
// clang-format off
DumpStyle(cfg/mpiio,DumpCFGMPIIO);
// clang-format on
#else
#ifndef LMP_DUMP_CFG_MPIIO_H
#define LMP_DUMP_CFG_MPIIO_H
#include "dump_cfg.h"
namespace LAMMPS_NS {
class DumpCFGMPIIO : public DumpCFG {
public:
DumpCFGMPIIO(class LAMMPS *, int, char **);
~DumpCFGMPIIO() override;
protected:
bigint
sumFileSize; // size in bytes of the file up through this rank offset from the end of the header data
char *headerBuffer; // buffer for holding header data
MPI_File mpifh;
MPI_Offset mpifo, offsetFromHeader, headerSize, currentFileSize;
int performEstimate; // switch for write_data and write_header methods to use for gathering data and detemining filesize for preallocation vs actually writing the data
char *filecurrent; // name of file for this round (with % and * replaced)
#if defined(_OPENMP)
int convert_string_omp(int, double *); // multithreaded version of convert_string
#endif
void openfile() override;
void init_style() override;
void write_header(bigint) override;
void write() override;
void write_data(int, double *) override;
typedef void (DumpCFGMPIIO::*FnPtrData)(int, double *);
FnPtrData write_choice; // ptr to write data functions
void write_string(int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,648 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "dump_custom_mpiio.h"
#include "domain.h"
#include "error.h"
#include "fix.h"
#include "input.h"
#include "memory.h"
#include "modify.h"
#include "update.h"
#include "variable.h"
#include <cmath>
#include <cstring>
#include "omp_compat.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
using namespace LAMMPS_NS;
#define DUMP_BUF_CHUNK_SIZE 16384
#define DUMP_BUF_INCREMENT_SIZE 4096
/* ---------------------------------------------------------------------- */
DumpCustomMPIIO::DumpCustomMPIIO(LAMMPS *lmp, int narg, char **arg) : DumpCustom(lmp, narg, arg)
{
if (me == 0)
error->warning(FLERR, "MPI-IO output is unmaintained and unreliable. Use with caution.");
}
/* ---------------------------------------------------------------------- */
DumpCustomMPIIO::~DumpCustomMPIIO()
{
if (multifile == 0) MPI_File_close(&mpifh);
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::openfile()
{
if (singlefile_opened) { // single file already opened, so just return after resetting filesize
mpifo = currentFileSize;
MPI_File_set_size(mpifh, mpifo + headerSize + sumFileSize);
currentFileSize = mpifo + headerSize + sumFileSize;
return;
}
if (multifile == 0) singlefile_opened = 1;
// if one file per timestep, replace '*' with current timestep
filecurrent = filename;
if (multifile) {
filecurrent = utils::strdup(utils::star_subst(filecurrent, update->ntimestep, padflag));
if (maxfiles > 0) {
if (numfiles < maxfiles) {
nameslist[numfiles] = utils::strdup(filecurrent);
++numfiles;
} else {
remove(nameslist[fileidx]);
delete[] nameslist[fileidx];
nameslist[fileidx] = utils::strdup(filecurrent);
fileidx = (fileidx + 1) % maxfiles;
}
}
}
if (append_flag) { // append open
int err = MPI_File_open(world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND | MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS)
error->one(FLERR, "Cannot open dump file {}: {}", filecurrent, utils::getsyserror());
int myrank;
MPI_Comm_rank(world, &myrank);
if (myrank == 0) MPI_File_get_size(mpifh, &mpifo);
MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
MPI_File_set_size(mpifh, mpifo + headerSize + sumFileSize);
currentFileSize = mpifo + headerSize + sumFileSize;
} else { // replace open
int err =
MPI_File_open(world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS)
error->one(FLERR, "Cannot open dump file {}: {}", filecurrent, utils::getsyserror());
mpifo = 0;
MPI_File_set_size(mpifh, (MPI_Offset) (headerSize + sumFileSize));
currentFileSize = (headerSize + sumFileSize);
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write()
{
if (domain->triclinic == 0) {
boxxlo = domain->boxlo[0];
boxxhi = domain->boxhi[0];
boxylo = domain->boxlo[1];
boxyhi = domain->boxhi[1];
boxzlo = domain->boxlo[2];
boxzhi = domain->boxhi[2];
} else {
boxxlo = domain->boxlo_bound[0];
boxxhi = domain->boxhi_bound[0];
boxylo = domain->boxlo_bound[1];
boxyhi = domain->boxhi_bound[1];
boxzlo = domain->boxlo_bound[2];
boxzhi = domain->boxhi_bound[2];
boxxy = domain->xy;
boxxz = domain->xz;
boxyz = domain->yz;
}
// nme = # of dump lines this proc contributes to dump
nme = count();
// ntotal = total # of dump lines in snapshot
// nmax = max # of dump lines on any proc
bigint bnme = nme;
MPI_Allreduce(&bnme, &ntotal, 1, MPI_LMP_BIGINT, MPI_SUM, world);
int nmax;
MPI_Allreduce(&nme, &nmax, 1, MPI_INT, MPI_MAX, world);
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
// ensure filewriter proc can receive everyone's info
// limit nmax*size_one to int since used as arg in MPI_Rsend() below
// pack my data into buf
// if sorting on IDs also request ID list from pack()
// sort buf as needed
if (nmax > maxbuf) {
if ((bigint) nmax * size_one > MAXSMALLINT)
error->all(FLERR, "Too much per-proc info for dump");
maxbuf = nmax;
memory->destroy(buf);
memory->create(buf, (maxbuf * size_one), "dump:buf");
}
if (sort_flag && sortcol == 0 && nmax > maxids) {
maxids = nmax;
memory->destroy(ids);
memory->create(ids, maxids, "dump:ids");
}
if (sort_flag && sortcol == 0)
pack(ids);
else
pack(nullptr);
if (sort_flag) sort();
// determine how much data needs to be written for setting the file size and prepocess it prior to writing
performEstimate = 1;
write_header(nheader);
write_data(nme, buf);
MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs - 1), world);
openfile();
performEstimate = 0;
write_header(nheader); // mpifo now points to end of header info
// now actually write the data
performEstimate = 0;
write_data(nme, buf);
if (multifile) MPI_File_close(&mpifh);
if (multifile) delete[] filecurrent;
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::init_style()
{
// assemble ITEMS: column string from defaults and user values
delete[] columns;
std::string combined;
int icol = 0;
for (const auto &item : utils::split_words(columns_default)) {
if (combined.size()) combined += " ";
if (keyword_user[icol].size())
combined += keyword_user[icol];
else
combined += item;
++icol;
}
columns = utils::strdup(combined);
// format = copy of default or user-specified line format
delete[] format;
if (format_line_user)
format = utils::strdup(format_line_user);
else
format = utils::strdup(format_default);
// tokenize the format string and add space at end of each format element
// if user-specified int/float format exists, use it instead
// if user-specified column format exists, use it instead
// lo priority = line, medium priority = int/float, hi priority = column
auto words = utils::split_words(format);
if ((int) words.size() < nfield)
error->all(FLERR, "Dump_modify format line is too short: {}", format);
int i = 0;
for (const auto &word : words) {
if (i >= nfield) break;
delete[] vformat[i];
if (format_column_user[i])
vformat[i] = utils::strdup(std::string(format_column_user[i]) + " ");
else if (vtype[i] == Dump::INT && format_int_user)
vformat[i] = utils::strdup(std::string(format_int_user) + " ");
else if (vtype[i] == Dump::DOUBLE && format_float_user)
vformat[i] = utils::strdup(std::string(format_float_user) + " ");
else if (vtype[i] == Dump::BIGINT && format_bigint_user)
vformat[i] = utils::strdup(std::string(format_bigint_user) + " ");
else
vformat[i] = utils::strdup(word + " ");
// remove trailing blank on last column's format
if (i == nfield - 1) vformat[i][strlen(vformat[i]) - 1] = '\0';
++i;
}
// setup boundary string
domain->boundary_string(boundstr);
// setup function ptrs
if (binary && domain->triclinic == 0)
header_choice = &DumpCustomMPIIO::header_binary;
else if (binary && domain->triclinic == 1)
header_choice = &DumpCustomMPIIO::header_binary_triclinic;
else if (!binary && domain->triclinic == 0)
header_choice = &DumpCustomMPIIO::header_item;
else if (!binary && domain->triclinic == 1)
header_choice = &DumpCustomMPIIO::header_item_triclinic;
if (binary)
write_choice = &DumpCustomMPIIO::write_binary;
else
write_choice = &DumpCustomMPIIO::write_string;
// find current ptr for each compute,fix,variable
// check that fix frequency is acceptable
for (i = 0; i < ncompute; i++) {
compute[i] = modify->get_compute_by_id(id_compute[i]);
if (!compute[i])
error->all(FLERR, "Could not find dump custom/mpiio compute ID {}", id_compute[i]);
}
for (i = 0; i < nfix; i++) {
fix[i] = modify->get_fix_by_id(id_fix[i]);
if (!fix[i]) error->all(FLERR, "Could not find dump custom/mpiio fix ID {}", id_fix[i]);
if (nevery % fix[i]->peratom_freq)
error->all(FLERR, "dump custom/mpiio and fix not computed at compatible times");
}
for (i = 0; i < nvariable; i++) {
int ivariable = input->variable->find(id_variable[i]);
if (ivariable < 0)
error->all(FLERR, "Could not find dump custom/mpiio variable name {}", id_variable[i]);
variable[i] = ivariable;
}
// set index and check validity of region
if (idregion && !domain->get_region_by_id(idregion))
error->all(FLERR, "Region {} for dump custom/mpiio does not exist", idregion);
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write_header(bigint ndump)
{
if (!header_choice) error->all(FLERR, "Must not use 'run pre no' after creating a new dump");
(this->*header_choice)(ndump);
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::header_binary(bigint ndump)
{
if (performEstimate) {
headerBuffer = (char *) malloc((2 * sizeof(bigint)) + (9 * sizeof(int)) + (6 * sizeof(double)));
headerSize = 0;
memcpy(headerBuffer + headerSize, &update->ntimestep, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &ndump, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &domain->triclinic, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &domain->boundary[0][0], 6 * sizeof(int));
headerSize += 6 * sizeof(int);
memcpy(headerBuffer + headerSize, &boxxlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxylo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &size_one, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &nprocs, sizeof(int));
headerSize += sizeof(int);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_BYTE, MPI_STATUS_IGNORE);
mpifo += headerSize;
free(headerBuffer);
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::header_binary_triclinic(bigint ndump)
{
if (performEstimate) {
headerBuffer = (char *) malloc((2 * sizeof(bigint)) + (9 * sizeof(int)) + (9 * sizeof(double)));
headerSize = 0;
memcpy(headerBuffer + headerSize, &update->ntimestep, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &ndump, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &domain->triclinic, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &domain->boundary[0][0], 6 * sizeof(int));
headerSize += 6 * sizeof(int);
memcpy(headerBuffer + headerSize, &boxxlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxylo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxy, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxz, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyz, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &size_one, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &nprocs, sizeof(int));
headerSize += sizeof(int);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_BYTE, MPI_STATUS_IGNORE);
mpifo += headerSize;
free(headerBuffer);
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::header_item(bigint ndump)
{
if (performEstimate) {
auto itemtxt = fmt::format("ITEM: TIMESTEP\n{}\n", update->ntimestep);
itemtxt += fmt::format("ITEM: NUMBER OF ATOMS\n{}\n", ndump);
itemtxt += fmt::format("ITEM: BOX BOUNDS {}\n", boundstr);
itemtxt += fmt::format("{} {}\n{} {}\n{} {}\n", boxxlo, boxxhi, boxylo, boxyhi, boxzlo, boxzhi);
itemtxt += fmt::format("ITEM: ATOMS {}\n", columns);
headerSize = itemtxt.size();
headerBuffer = utils::strdup(itemtxt);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_CHAR, MPI_STATUS_IGNORE);
mpifo += headerSize;
delete[] headerBuffer;
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::header_item_triclinic(bigint ndump)
{
if (performEstimate) {
auto itemtxt = fmt::format("ITEM: TIMESTEP\n{}\n", update->ntimestep);
itemtxt += fmt::format("ITEM: NUMBER OF ATOMS\n{}\n", ndump);
itemtxt += fmt::format("ITEM: BOX BOUNDS xy xz yz {}\n", boundstr);
itemtxt += fmt::format("{} {} {}\n{} {} {}\n{} {} {}\n", boxxlo, boxxhi, boxxy, boxylo, boxyhi,
boxxz, boxzlo, boxzhi, boxyz);
itemtxt += fmt::format("ITEM: ATOMS {}\n", columns);
headerSize = itemtxt.size();
headerBuffer = utils::strdup(itemtxt);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_CHAR, MPI_STATUS_IGNORE);
mpifo += headerSize;
delete[] headerBuffer;
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write_data(int n, double *mybuf)
{
(this->*write_choice)(n, mybuf);
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write_binary(int n, double *mybuf)
{
n *= size_one;
if (performEstimate) {
bigint incPrefix = 0;
bigint bigintNme = (bigint) nme;
MPI_Scan(&bigintNme, &incPrefix, 1, MPI_LMP_BIGINT, MPI_SUM, world);
sumFileSize = (incPrefix * size_one * sizeof(double)) + (nprocs * sizeof(int));
offsetFromHeader = ((incPrefix - bigintNme) * size_one * sizeof(double)) + (me * sizeof(int));
} else {
int byteBufSize = (n * sizeof(double)) + sizeof(int);
char *bufWithSize;
memory->create(bufWithSize, byteBufSize, "dump:bufWithSize");
memcpy(bufWithSize, (char *) (&n), sizeof(int));
memcpy(&((char *) bufWithSize)[sizeof(int)], mybuf, (n * sizeof(double)));
MPI_File_write_at_all(mpifh, mpifo + offsetFromHeader, bufWithSize, byteBufSize, MPI_BYTE,
MPI_STATUS_IGNORE);
memory->destroy(bufWithSize);
if (flush_flag) MPI_File_sync(mpifh);
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write_string(int n, double *mybuf)
{
if (performEstimate) {
#if defined(_OPENMP)
int nthreads = omp_get_max_threads();
if ((nthreads > 1) && !(lmp->kokkos))
nsme = convert_string_omp(n, mybuf); // not (yet) compatible with Kokkos
else
nsme = convert_string(n, mybuf);
#else
nsme = convert_string(n, mybuf);
#endif
bigint incPrefix = 0;
bigint bigintNsme = (bigint) nsme;
MPI_Scan(&bigintNsme, &incPrefix, 1, MPI_LMP_BIGINT, MPI_SUM, world);
sumFileSize = (incPrefix * sizeof(char));
offsetFromHeader = ((incPrefix - bigintNsme) * sizeof(char));
} else {
MPI_File_write_at_all(mpifh, mpifo + offsetFromHeader, sbuf, nsme, MPI_CHAR, MPI_STATUS_IGNORE);
if (flush_flag) MPI_File_sync(mpifh);
}
}
#if defined(_OPENMP)
/* ----------------------------------------------------------------------
multithreaded version - convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpCustomMPIIO::convert_string_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n, mybuf);
} else {
memory->create(mpifhStringCountPerThread, nthreads, "dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads * sizeof(char *));
memory->create(bufOffset, nthreads, "dump:bufOffset");
memory->create(bufRange, nthreads, "dump:bufRange");
memory->create(bufLength, nthreads, "dump:bufLength");
int i = 0;
for (i = 0; i < (nthreads - 1); i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = (int) (floor((double) n / (double) nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = n - (i * (int) (floor((double) n / (double) nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, \
mpifhStringCountPerThread, \
mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m = 0;
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(
mpifh_buffer_line_per_thread[tid],
(mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
for (int j = 0; j < size_one; j++) {
if (vtype[j] == Dump::INT)
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),
vformat[j], static_cast<int>(mybuf[bufOffset[tid] + m]));
else if (vtype[j] == Dump::DOUBLE)
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),
vformat[j], mybuf[bufOffset[tid] + m]);
else if (vtype[j] == Dump::STRING)
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),
vformat[j], typenames[(int) mybuf[bufOffset[tid] + m]]);
m++;
}
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]), "\n");
}
}
#pragma omp barrier
mpifhStringCount = 0;
for (i = 0; i < nthreads; i++) { mpifhStringCount += mpifhStringCountPerThread[i]; }
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount + 1;
memory->grow(sbuf, maxsbuf, "dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i = 0; i < nthreads; i++) {
strcat(sbuf, mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
}
return mpifhStringCount;
}
#endif

View File

@ -1,68 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef DUMP_CLASS
// clang-format off
DumpStyle(custom/mpiio,DumpCustomMPIIO);
// clang-format on
#else
#ifndef LMP_DUMP_CUSTOM_MPIIO_H
#define LMP_DUMP_CUSTOM_MPIIO_H
#include "dump_custom.h"
namespace LAMMPS_NS {
class DumpCustomMPIIO : public DumpCustom {
public:
DumpCustomMPIIO(class LAMMPS *, int, char **);
~DumpCustomMPIIO() override;
protected:
bigint
sumFileSize; // size in bytes of the file up through this rank offset from the end of the header data
char *headerBuffer; // buffer for holding header data
MPI_File mpifh;
MPI_Offset mpifo, offsetFromHeader, headerSize, currentFileSize;
int performEstimate; // switch for write_data and write_header methods to use for gathering data and detemining filesize for preallocation vs actually writing the data
char *filecurrent; // name of file for this round (with % and * replaced)
#if defined(_OPENMP)
int convert_string_omp(int, double *); // multithreaded version of convert_string
#endif
void openfile() override;
void write_header(bigint) override;
void write() override;
void write_data(int, double *) override;
void init_style() override;
typedef void (DumpCustomMPIIO::*FnPtrHeader)(bigint);
FnPtrHeader header_choice; // ptr to write header functions
void header_binary(bigint);
void header_binary_triclinic(bigint);
void header_item(bigint);
void header_item_triclinic(bigint);
typedef void (DumpCustomMPIIO::*FnPtrData)(int, double *);
FnPtrData write_choice; // ptr to write data functions
void write_binary(int, double *);
void write_string(int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,369 +0,0 @@
// clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "dump_xyz_mpiio.h"
#include "compute.h"
#include "domain.h"
#include "error.h"
#include "memory.h"
#include "update.h"
#include <cmath>
#include <cstring>
#include "omp_compat.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
using namespace LAMMPS_NS;
#define MAX_TEXT_HEADER_SIZE 4096
#define DUMP_BUF_CHUNK_SIZE 16384
#define DUMP_BUF_INCREMENT_SIZE 4096
/* ---------------------------------------------------------------------- */
DumpXYZMPIIO::DumpXYZMPIIO(LAMMPS *lmp, int narg, char **arg) :
DumpXYZ(lmp, narg, arg) {
if (me == 0)
error->warning(FLERR,"MPI-IO output is unmaintained and unreliable. Use with caution.");
}
/* ---------------------------------------------------------------------- */
DumpXYZMPIIO::~DumpXYZMPIIO()
{
if (multifile == 0) MPI_File_close(&mpifh);
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::openfile()
{
if (singlefile_opened) { // single file already opened, so just return after resetting filesize
mpifo = currentFileSize;
MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
currentFileSize = mpifo+headerSize+sumFileSize;
return;
}
if (multifile == 0) singlefile_opened = 1;
// if one file per timestep, replace '*' with current timestep
filecurrent = filename;
if (multifile) {
filecurrent = utils::strdup(utils::star_subst(filecurrent, update->ntimestep, padflag));
if (maxfiles > 0) {
if (numfiles < maxfiles) {
nameslist[numfiles] = utils::strdup(filecurrent);
++numfiles;
} else {
remove(nameslist[fileidx]);
delete[] nameslist[fileidx];
nameslist[fileidx] = utils::strdup(filecurrent);
fileidx = (fileidx + 1) % maxfiles;
}
}
}
if (append_flag) { // append open
int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND |
MPI_MODE_WRONLY , MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) error->one(FLERR, "Cannot open dump file {}", filecurrent);
int myrank;
MPI_Comm_rank(world,&myrank);
if (myrank == 0) MPI_File_get_size(mpifh,&mpifo);
MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
currentFileSize = mpifo+headerSize+sumFileSize;
} else { // replace open
int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) error->one(FLERR, "Cannot open dump file {}",filecurrent);
mpifo = 0;
MPI_File_set_size(mpifh,(MPI_Offset) (headerSize+sumFileSize));
currentFileSize = (headerSize+sumFileSize);
}
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::write()
{
if (domain->triclinic == 0) {
boxxlo = domain->boxlo[0];
boxxhi = domain->boxhi[0];
boxylo = domain->boxlo[1];
boxyhi = domain->boxhi[1];
boxzlo = domain->boxlo[2];
boxzhi = domain->boxhi[2];
} else {
boxxlo = domain->boxlo_bound[0];
boxxhi = domain->boxhi_bound[0];
boxylo = domain->boxlo_bound[1];
boxyhi = domain->boxhi_bound[1];
boxzlo = domain->boxlo_bound[2];
boxzhi = domain->boxhi_bound[2];
boxxy = domain->xy;
boxxz = domain->xz;
boxyz = domain->yz;
}
// nme = # of dump lines this proc contributes to dump
nme = count();
// ntotal = total # of dump lines in snapshot
// nmax = max # of dump lines on any proc
bigint bnme = nme;
MPI_Allreduce(&bnme,&ntotal,1,MPI_LMP_BIGINT,MPI_SUM,world);
int nmax;
MPI_Allreduce(&nme,&nmax,1,MPI_INT,MPI_MAX,world);
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
// ensure filewriter proc can receive everyone's info
// limit nmax*size_one to int since used as arg in MPI_Rsend() below
// pack my data into buf
// if sorting on IDs also request ID list from pack()
// sort buf as needed
if (nmax > maxbuf) {
if ((bigint) nmax * size_one > MAXSMALLINT)
error->all(FLERR,"Too much per-proc info for dump");
maxbuf = nmax;
memory->destroy(buf);
memory->create(buf,(maxbuf*size_one),"dump:buf");
}
if (sort_flag && sortcol == 0 && nmax > maxids) {
maxids = nmax;
memory->destroy(ids);
memory->create(ids,maxids,"dump:ids");
}
if (sort_flag && sortcol == 0) pack(ids);
else pack(nullptr);
if (sort_flag) sort();
// determine how much data needs to be written for setting the file size and prepocess it prior to writing
performEstimate = 1;
write_header(nheader);
write_data(nme,buf);
MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs-1), world);
openfile();
performEstimate = 0;
write_header(nheader); // actually write the header - mpifo now points to end of header info
// now actually write the data
performEstimate = 0;
write_data(nme,buf);
if (multifile) MPI_File_close(&mpifh);
if (multifile) delete[] filecurrent;
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::init_style()
{
// format = copy of default or user-specified line format
delete[] format;
char *str;
if (format_line_user) str = format_line_user;
else str = format_default;
int n = strlen(str) + 2;
format = new char[n];
strcpy(format,str);
strcat(format,"\n");
// initialize typenames array to be backward compatible by default
// a 32-bit int can be maximally 10 digits plus sign
if (typenames == nullptr) {
typenames = new char*[ntypes+1];
for (int itype = 1; itype <= ntypes; itype++)
typenames[itype] = utils::strdup(std::to_string(itype));
}
// setup function ptr
write_choice = &DumpXYZMPIIO::write_string;
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::write_header(bigint n)
{
auto header = fmt::format("{}\n Atoms. Timestep: {}", n, update->ntimestep);
if (time_flag) header += fmt::format(" Time: {:.6f}", compute_time());
header += "\n";
if (performEstimate) {
headerSize = header.size();
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh,mpifo,(void *)header.c_str(),header.size(),MPI_CHAR,MPI_STATUS_IGNORE);
mpifo += header.size();
}
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::write_data(int n, double *mybuf)
{
(this->*write_choice)(n,mybuf);
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::write_string(int n, double *mybuf)
{
if (performEstimate) {
#if defined(_OPENMP)
int nthreads = omp_get_max_threads();
if (nthreads > 1)
nsme = convert_string_omp(n,mybuf);
else
nsme = convert_string(n,mybuf);
#else
nsme = convert_string(n,mybuf);
#endif
bigint incPrefix = 0;
bigint bigintNsme = (bigint) nsme;
MPI_Scan(&bigintNsme,&incPrefix,1,MPI_LMP_BIGINT,MPI_SUM,world);
sumFileSize = (incPrefix*sizeof(char));
offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
}
else { // write data
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
if (flush_flag)
MPI_File_sync(mpifh);
}
}
#if defined(_OPENMP)
/* ----------------------------------------------------------------------
multithreaded version - convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpXYZMPIIO::convert_string_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n,mybuf);
}
else {
memory->create(mpifhStringCountPerThread,nthreads,"dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads*sizeof(char*));
memory->create(bufOffset,nthreads,"dump:bufOffset");
memory->create(bufRange,nthreads,"dump:bufRange");
memory->create(bufLength,nthreads,"dump:bufLength");
int i=0;
for (i=0;i<(nthreads-1);i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
bufRange[i] = (int)(floor((double)n/(double)nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
bufRange[i] = n-(i*(int)(floor((double)n/(double)nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, mpifhStringCountPerThread, mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m=0;
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(mpifh_buffer_line_per_thread[tid],(mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),format,typenames[static_cast<int> (mybuf[bufOffset[tid]+m+1])],mybuf[bufOffset[tid]+m+2],mybuf[bufOffset[tid]+m+3],mybuf[bufOffset[tid]+m+4]);
m += size_one;
}
}
#pragma omp barrier
mpifhStringCount = 0;
for (i=0;i<nthreads;i++) {
mpifhStringCount += mpifhStringCountPerThread[i];
}
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount+1;
memory->grow(sbuf,maxsbuf,"dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i=0;i<nthreads;i++) {
strcat(sbuf,mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
}
return mpifhStringCount;
}
#endif

View File

@ -1,61 +0,0 @@
/* -*- c++ -*- ---------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef DUMP_CLASS
// clang-format off
DumpStyle(xyz/mpiio,DumpXYZMPIIO);
// clang-format on
#else
#ifndef LMP_DUMP_XYZ_MPIIO_H
#define LMP_DUMP_XYZ_MPIIO_H
#include "dump_xyz.h"
namespace LAMMPS_NS {
class DumpXYZMPIIO : public DumpXYZ {
public:
DumpXYZMPIIO(class LAMMPS *, int, char **);
~DumpXYZMPIIO() override;
protected:
bigint
sumFileSize; // size in bytes of the file up through this rank offset from the end of the header data
char *headerBuffer; // buffer for holding header data
MPI_File mpifh;
MPI_Offset mpifo, offsetFromHeader, headerSize, currentFileSize;
int performEstimate; // switch for write_data and write_header methods to use for gathering data and detemining filesize for preallocation vs actually writing the data
char *filecurrent; // name of file for this round (with % and * replaced)
#if defined(_OPENMP)
int convert_string_omp(int, double *); // multithreaded version of convert_string
#endif
void openfile() override;
void write_header(bigint) override;
void write() override;
void write_data(int, double *) override;
void init_style() override;
typedef void (DumpXYZMPIIO::*FnPtrData)(int, double *);
FnPtrData write_choice; // ptr to write data functions
void write_string(int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,199 +0,0 @@
// clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "restart_mpiio.h"
#include "error.h"
using namespace LAMMPS_NS;
// the (rather old) version of ROMIO in MPICH for Windows
// uses "char *" instead of "const char *". This works around it.
#if defined(_WIN32)
#define ROMIO_COMPAT_CAST (char *)
#else
#define ROMIO_COMPAT_CAST
#endif
/* ---------------------------------------------------------------------- */
RestartMPIIO::RestartMPIIO(LAMMPS *lmp) : Pointers(lmp)
{
mpiio_exists = 1;
MPI_Comm_size(world,&nprocs);
MPI_Comm_rank(world,&myrank);
}
/* ----------------------------------------------------------------------
calls MPI_File_open in read-only mode, read_restart should call this
for some file servers it is most efficient to only read or only write
------------------------------------------------------------------------- */
void RestartMPIIO::openForRead(const char *filename)
{
int err = MPI_File_open(world, ROMIO_COMPAT_CAST filename, MPI_MODE_RDONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot open restart file for reading - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
}
/* ----------------------------------------------------------------------
calls MPI_File_open in write-only mode, write_restart should call this
for some file servers it is most efficient to only read or only write
------------------------------------------------------------------------- */
void RestartMPIIO::openForWrite(const char *filename)
{
int err = MPI_File_open(world, ROMIO_COMPAT_CAST filename, MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot open restart file for writing - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
}
/* ----------------------------------------------------------------------
determine the absolute offset for the data to be written with
MPI_Scan of the send sizes
compute the file size based off the MPI_Scan send size value on the last rank
set the filesize with ftruncate via MPI_File_set_size
write the data via collective MPI-IO by calling MPI_File_write_at_all
------------------------------------------------------------------------- */
void RestartMPIIO::write(MPI_Offset headerOffset, int send_size, double *buf)
{
bigint incPrefix = 0;
bigint bigintSendSize = (bigint) send_size;
MPI_Scan(&bigintSendSize,&incPrefix,1,MPI_LMP_BIGINT,MPI_SUM,world);
bigint largestIncPrefix = incPrefix;
MPI_Bcast(&largestIncPrefix, 1, MPI_LMP_BIGINT, (nprocs-1), world);
int err = MPI_File_set_size(mpifh,
(headerOffset+(largestIncPrefix*sizeof(double))));
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot set restart file size - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
err = MPI_File_write_at_all(mpifh,headerOffset +
((incPrefix-bigintSendSize)*sizeof(double)),
buf,send_size,MPI_DOUBLE,MPI_STATUS_IGNORE);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot write to restart file - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
}
/* ----------------------------------------------------------------------
read the data into buf via collective MPI-IO by calling MPI_File_read_at_all
with the chunkOffset and chunkSize provided
if the consolidated chunksize is greater than INT_MAX
can only happen in extreme situation of reading restart file on
much fewer ranks than written and with relatively large data sizes
follow the collective IO call with rank independent IO to read remaining data
------------------------------------------------------------------------- */
void RestartMPIIO::read(MPI_Offset chunkOffset, bigint chunkSize, double *buf)
{
int intChunkSize;
bigint remainingSize = 0;
if (chunkSize > INT_MAX) {
intChunkSize = INT_MAX;
remainingSize = chunkSize - INT_MAX;
}
else intChunkSize = (int) chunkSize;
int err = MPI_File_read_at_all(mpifh,chunkOffset,buf,intChunkSize,
MPI_DOUBLE,MPI_STATUS_IGNORE);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot read from restart file - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
MPI_Offset currentOffset = chunkOffset+intChunkSize;
MPI_Offset bufOffset = intChunkSize;
while (remainingSize > 0) {
int currentChunkSize;
if (remainingSize > INT_MAX) {
currentChunkSize = INT_MAX;
remainingSize -= INT_MAX;
}
else {
currentChunkSize = remainingSize;
remainingSize = 0;
}
int err = MPI_File_read_at(mpifh,currentOffset,&buf[bufOffset],
currentChunkSize,MPI_DOUBLE,MPI_STATUS_IGNORE);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot read from restart file - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
currentOffset += currentChunkSize;
bufOffset += currentChunkSize;
}
}
/* ----------------------------------------------------------------------
calls MPI_File_close
------------------------------------------------------------------------- */
void RestartMPIIO::close()
{
int err = MPI_File_close(&mpifh);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot close restart file - MPI error: %s",mpiErrorString);
error->one(FLERR,str);
}
}

View File

@ -1,40 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef LMP_RESTART_MPIIO_H
#define LMP_RESTART_MPIIO_H
#include "pointers.h"
namespace LAMMPS_NS {
class RestartMPIIO : protected Pointers {
private:
MPI_File mpifh;
int nprocs, myrank;
public:
int mpiio_exists;
RestartMPIIO(class LAMMPS *);
void openForRead(const char *);
void openForWrite(const char *);
void write(MPI_Offset, int, double *);
void read(MPI_Offset, bigint, double *);
void close();
};
} // namespace LAMMPS_NS
#endif

View File

@ -44,7 +44,7 @@ endif
# PACKLIB = all packages that require an additional lib
# should be PACKSYS + PACKINT + PACKEXT
# PACKSYS = subset that reqiure a common system library
# include MPIIO and LB b/c require full MPI, not just STUBS
# include LATBOLTZ b/c it requires a full MPI, not just STUBS
# PACKINT = subset that require an internal (provided) library
# PACKEXT = subset that require an external (downloaded) library
@ -106,7 +106,6 @@ PACKAGE = \
mofff \
molecule \
molfile \
mpiio \
netcdf \
openmp \
opt \
@ -212,7 +211,6 @@ PACKLIB = \
kim \
kokkos \
lepton \
mpiio \
poems \
python \
voronoi \
@ -237,7 +235,7 @@ PACKLIB = \
vtk \
electrode
PACKSYS = compress latboltz mpiio python
PACKSYS = compress latboltz python
PACKINT = atc awpmd colvars electrode gpu kokkos lepton ml-pod poems

View File

@ -46,7 +46,7 @@ class ThrOMP {
public:
ThrOMP(LAMMPS *, int);
virtual ~ThrOMP() = default;
virtual ~ThrOMP() noexcept(false) {}
double memory_usage_thr();

View File

@ -52,6 +52,17 @@ lmpgitversion.h
mliap_model_python_couple.cpp
mliap_model_python_couple.h
# removed in August 2023
dump_atom_mpiio.cpp
dump_atom_mpiio.h
dump_cfg_mpiio.cpp
dump_cfg_mpiio.h
dump_custom_mpiio.cpp
dump_custom_mpiio.h
dump_xyz_mpiio.cpp
dump_xyz_mpiio.h
restart_mpiio.cpp
restart_mpiio.h
# removed on 3 August 2023
fix_mscg.cpp
fix_mscg.h
# removed on 29 March 2023

View File

@ -120,8 +120,6 @@ Dump::Dump(LAMMPS *lmp, int /*narg*/, char **arg) :
char *ptr;
if ((ptr = strchr(filename,'%'))) {
if (strstr(style,"mpiio"))
error->all(FLERR,"Dump file MPI-IO output not allowed with % in filename");
multiproc = 1;
nclusterprocs = 1;
filewriter = 1;

View File

@ -27,6 +27,9 @@ DumpDeprecated::DumpDeprecated(LAMMPS *lmp, int narg, char **arg) : Dump(lmp, na
if (my_style == "DEPRECATED") {
if (lmp->comm->me == 0) utils::logmesg(lmp, "\nDump style 'DEPRECATED' is a dummy style\n\n");
return;
} else if (utils::strmatch(my_style, "/mpiio$")) {
utils::logmesg(lmp, "\nThe MPIIO and thus dump style {} have been removed from LAMMPS.\n\n",
my_style);
}
error->all(FLERR, "This dump style is no longer available");
}

View File

@ -15,6 +15,10 @@
// clang-format off
// list all deprecated and removed dump styles here
DumpStyle(DEPRECATED,DumpDeprecated);
DumpStyle(atom/mpiio,DumpDeprecated);
DumpStyle(cfg/mpiio,DumpDeprecated);
DumpStyle(custom/mpiio,DumpDeprecated);
DumpStyle(xyz/mpiio,DumpDeprecated);
// clang-format on
#else

View File

@ -248,10 +248,14 @@ DumpImage::DumpImage(LAMMPS *lmp, int narg, char **arg) :
if (iarg+3 > narg) error->all(FLERR,"Illegal dump image command");
int width = utils::inumeric(FLERR,arg[iarg+1],false,lmp);
int height = utils::inumeric(FLERR,arg[iarg+2],false,lmp);
if (width <= 0 || height <= 0)
error->all(FLERR,"Illegal dump image command");
if (width <= 0 || height <= 0) error->all(FLERR,"Illegal dump image command");
if (image->fsaa) {
image->width = width*2;
image->height = height*2;
} else {
image->width = width;
image->height = height;
}
iarg += 3;
} else if (strcmp(arg[iarg],"view") == 0) {
@ -345,6 +349,23 @@ DumpImage::DumpImage(LAMMPS *lmp, int narg, char **arg) :
image->shiny = shiny;
iarg += 2;
} else if (strcmp(arg[iarg],"fsaa") == 0) {
if (iarg+2 > narg) error->all(FLERR,"Illegal dump_modify command");
int aa = utils::logical(FLERR, arg[iarg+1], false, lmp);
if (aa) {
if (!image->fsaa) {
image->width = image->width*2;
image->height = image->height*2;
}
} else {
if (image->fsaa) {
image->width = image->width/2;
image->height = image->height/2;
}
}
image->fsaa = aa;
iarg += 2;
} else if (strcmp(arg[iarg],"ssao") == 0) {
if (iarg+4 > narg) error->all(FLERR,"Illegal dump image command");
image->ssao = utils::logical(FLERR,arg[iarg+1],false,lmp);

View File

@ -55,7 +55,9 @@ enum{NO,YES};
/* ---------------------------------------------------------------------- */
Image::Image(LAMMPS *lmp, int nmap_caller) : Pointers(lmp)
Image::Image(LAMMPS *lmp, int nmap_caller) :
Pointers(lmp), depthBuffer(nullptr), surfaceBuffer(nullptr), depthcopy(nullptr),
surfacecopy(nullptr), imageBuffer(nullptr), rgbcopy(nullptr), writeBuffer(nullptr)
{
MPI_Comm_rank(world,&me);
MPI_Comm_size(world,&nprocs);
@ -69,6 +71,7 @@ Image::Image(LAMMPS *lmp, int nmap_caller) : Pointers(lmp)
persp = 0.0;
shiny = 1.0;
ssao = NO;
fsaa = NO;
up[0] = 0.0;
up[1] = 0.0;
@ -154,6 +157,13 @@ Image::~Image()
void Image::buffers()
{
memory->destroy(depthBuffer);
memory->destroy(surfaceBuffer);
memory->destroy(imageBuffer);
memory->destroy(depthcopy);
memory->destroy(surfacecopy);
memory->destroy(rgbcopy);
npixels = width * height;
memory->create(depthBuffer,npixels,"image:depthBuffer");
memory->create(surfaceBuffer,2*npixels,"image:surfaceBuffer");
@ -380,6 +390,26 @@ void Image::merge()
} else {
writeBuffer = imageBuffer;
}
// scale down image for antialiasing. can be done in place with simple averaging
if (fsaa) {
for (int h=0; h < height; h += 2) {
for (int w=0; w < width; w +=2) {
int idx1 = 3*height*h + 3*w;
int idx2 = 3*height*h + 3*(w+1);
int idx3 = 3*height*(h+1) + 3*w;
int idx4 = 3*height*(h+1) + 3*(w+1);
int out = 3*(height/2)*(h/2) + 3*(w/2);
for (int i=0; i < 3; ++i) {
writeBuffer[out+i] = (unsigned char) (0.25*((int)writeBuffer[idx1+i]
+(int)writeBuffer[idx2+i]
+(int)writeBuffer[idx3+i]
+(int)writeBuffer[idx4+i]));
}
}
}
}
}
/* ----------------------------------------------------------------------
@ -929,6 +959,9 @@ void Image::compute_SSAO()
int pixelstart = static_cast<int> (1.0*me/nprocs * npixels);
int pixelstop = static_cast<int> (1.0*(me+1)/nprocs * npixels);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int index = pixelstart; index < pixelstop; index++) {
int x = index % width;
int y = index / width;
@ -1037,6 +1070,7 @@ void Image::compute_SSAO()
void Image::write_JPG(FILE *fp)
{
#ifdef LAMMPS_JPEG
int aafactor = fsaa ? 2 : 1;
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPROW row_pointer;
@ -1044,8 +1078,8 @@ void Image::write_JPG(FILE *fp)
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo);
jpeg_stdio_dest(&cinfo,fp);
cinfo.image_width = width;
cinfo.image_height = height;
cinfo.image_width = width/aafactor;
cinfo.image_height = height/aafactor;
cinfo.input_components = 3;
cinfo.in_color_space = JCS_RGB;
@ -1055,7 +1089,7 @@ void Image::write_JPG(FILE *fp)
while (cinfo.next_scanline < cinfo.image_height) {
row_pointer = (JSAMPROW)
&writeBuffer[(cinfo.image_height - 1 - cinfo.next_scanline) * 3 * width];
&writeBuffer[(cinfo.image_height - 1 - cinfo.next_scanline) * 3 * (width/aafactor)];
jpeg_write_scanlines(&cinfo,&row_pointer,1);
}
@ -1071,6 +1105,7 @@ void Image::write_JPG(FILE *fp)
void Image::write_PNG(FILE *fp)
{
#ifdef LAMMPS_PNG
int aafactor = fsaa ? 2 : 1;
png_structp png_ptr;
png_infop info_ptr;
@ -1090,8 +1125,8 @@ void Image::write_PNG(FILE *fp)
}
png_init_io(png_ptr, fp);
png_set_compression_level(png_ptr,Z_BEST_COMPRESSION);
png_set_IHDR(png_ptr,info_ptr,width,height,8,PNG_COLOR_TYPE_RGB,
png_set_compression_level(png_ptr,Z_BEST_SPEED);
png_set_IHDR(png_ptr,info_ptr,width/aafactor,height/aafactor,8,PNG_COLOR_TYPE_RGB,
PNG_INTERLACE_NONE,PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT);
png_text text_ptr[2];
@ -1111,9 +1146,9 @@ void Image::write_PNG(FILE *fp)
png_set_text(png_ptr,info_ptr,text_ptr,1);
png_write_info(png_ptr,info_ptr);
auto row_pointers = new png_bytep[height];
for (int i=0; i < height; ++i)
row_pointers[i] = (png_bytep) &writeBuffer[(height-i-1)*3*width];
auto row_pointers = new png_bytep[height/aafactor];
for (int i=0; i < height/aafactor; ++i)
row_pointers[i] = (png_bytep) &writeBuffer[((height/aafactor)-i-1)*3*(width/aafactor)];
png_write_image(png_ptr, row_pointers);
png_write_end(png_ptr, info_ptr);
@ -1129,11 +1164,12 @@ void Image::write_PNG(FILE *fp)
void Image::write_PPM(FILE *fp)
{
fprintf(fp,"P6\n%d %d\n255\n",width,height);
int aafactor = fsaa ? 2 : 1;
fprintf(fp,"P6\n%d %d\n255\n",width/aafactor,height/aafactor);
int y;
for (y = height-1; y >= 0; y--)
fwrite(&writeBuffer[y*width*3],3,width,fp);
for (y = (height/aafactor)-1; y >= 0; y--)
fwrite(&writeBuffer[y*(width/aafactor)*3],3,width/aafactor,fp);
}
/* ----------------------------------------------------------------------

View File

@ -28,6 +28,7 @@ class Image : protected Pointers {
double zoom; // zoom factor
double persp; // perspective factor
double shiny; // shininess of objects
int fsaa; // antialiasing on or off
int ssao; // SSAO on or off
int seed; // RN seed for SSAO
double ssaoint; // strength of shading from 0 to 1

View File

@ -628,16 +628,9 @@ executing.
void lammps_commands_string(void *handle, const char *str)
{
auto lmp = (LAMMPS *) handle;
// copy str and convert from CR-LF (DOS-style) to LF (Unix style) line
int n = strlen(str);
char *ptr, *copy = new char[n+1];
for (ptr = copy; *str != '\0'; ++str) {
if ((str[0] == '\r') && (str[1] == '\n')) continue;
*ptr++ = *str;
}
*ptr = '\0';
std::string cmd;
bool append = false;
bool triple = false;
BEGIN_CAPTURE
{
@ -645,27 +638,30 @@ void lammps_commands_string(void *handle, const char *str)
lmp->error->all(FLERR,"Library error: issuing LAMMPS command during run");
}
n = strlen(copy);
ptr = copy;
for (int i=0; i < n; ++i) {
// process continuation characters and here docs
for (const auto &line : utils::split_lines(str)) {
if (append || triple)
cmd += line;
else
cmd = line;
// handle continuation character as last character in line or string
if ((copy[i] == '&') && (copy[i+1] == '\n'))
copy[i+1] = copy[i] = ' ';
else if ((copy[i] == '&') && (copy[i+1] == '\0'))
copy[i] = ' ';
if (utils::strmatch(line, "\"\"\".*\"\"\"")) {
triple = false;
} else if (utils::strmatch(line, "\"\"\"")) {
triple = !triple;
}
if (triple) cmd += '\n';
if (copy[i] == '\n') {
copy[i] = '\0';
lmp->input->one(ptr);
ptr = copy + i+1;
} else if (copy[i+1] == '\0')
lmp->input->one(ptr);
if (!triple && utils::strmatch(cmd, "&$")) {
append = true;
cmd.back() = ' ';
} else append = false;
if (!append && !triple)
lmp->input->one(cmd.c_str());
}
}
END_CAPTURE
delete[] copy;
}
// -----------------------------------------------------------------------

View File

@ -20,7 +20,7 @@ namespace LAMMPS_NS {
class PythonInterface {
public:
virtual ~PythonInterface() = default;
virtual ~PythonInterface() noexcept(false) {}
virtual void command(int, char **) = 0;
virtual void invoke_function(int, char *) = 0;
virtual int find(const char *) = 0;

View File

@ -1,51 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef LMP_MPIIO_H
#define LMP_MPIIO_H
// true interface to MPIIO package
// used when MPIIO package is installed
#ifdef LMP_MPIIO
#if defined(MPI_STUBS)
#error "The MPIIO package cannot be compiled in serial with MPI STUBS"
#endif
#include "restart_mpiio.h" // IWYU pragma: export
#else
// dummy interface to MPIIO package
// needed for compiling when MPIIO package is not installed
namespace LAMMPS_NS {
class RestartMPIIO {
public:
int mpiio_exists;
RestartMPIIO(class LAMMPS *) { mpiio_exists = 0; }
~RestartMPIIO() {}
void openForRead(const char *) {}
void openForWrite(const char *) {}
void write(MPI_Offset, int, double *) {}
void read(MPI_Offset, long, double *) {}
void close() {}
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -994,22 +994,12 @@ void Output::create_restart(int narg, char **arg)
error->all(FLERR,"Both restart files must use % or neither");
}
int mpiioflag;
if (utils::strmatch(arg[1],"\\.mpiio$")) mpiioflag = 1;
else mpiioflag = 0;
if (nfile == 2) {
if (mpiioflag && !utils::strmatch(arg[2],"\\.mpiio$"))
error->all(FLERR,"Both restart files must use MPI-IO or neither");
if (!mpiioflag && utils::strmatch(arg[2],"\\.mpiio$"))
error->all(FLERR,"Both restart files must use MPI-IO or neither");
}
// setup output style and process optional args
delete restart;
restart = new WriteRestart(lmp);
int iarg = nfile+1;
restart->multiproc_options(multiproc,mpiioflag,narg-iarg,&arg[iarg]);
restart->multiproc_options(multiproc,narg-iarg,&arg[iarg]);
}
/* ----------------------------------------------------------------------

View File

@ -92,7 +92,7 @@ class Pointers {
atomKK(ptr->atomKK),
memoryKK(ptr->memoryKK),
python(ptr->python) {}
virtual ~Pointers() = default;
virtual ~Pointers() noexcept(false) {}
// remove other default members

View File

@ -30,7 +30,6 @@
#include "label_map.h"
#include "memory.h"
#include "modify.h"
#include "mpiio.h"
#include "pair.h"
#include "special.h"
#include "update.h"
@ -43,7 +42,7 @@ using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
ReadRestart::ReadRestart(LAMMPS *lmp) : Command(lmp), mpiio(nullptr) {}
ReadRestart::ReadRestart(LAMMPS *lmp) : Command(lmp) {}
/* ---------------------------------------------------------------------- */
@ -88,17 +87,8 @@ void ReadRestart::command(int narg, char **arg)
if (strchr(arg[0],'%')) multiproc = 1;
else multiproc = 0;
if (strstr(arg[0],".mpiio")) mpiioflag = 1;
else mpiioflag = 0;
if (multiproc && mpiioflag)
error->all(FLERR,"Read restart MPI-IO input not allowed with % in filename");
if (mpiioflag) {
mpiio = new RestartMPIIO(lmp);
if (!mpiio->mpiio_exists)
error->all(FLERR,"Reading from MPI-IO filename when MPIIO package is not installed");
}
if (utils::strmatch(arg[0],"\\.mpiio"))
error->all(FLERR,"MPI-IO files are no longer supported by LAMMPS");
// open single restart file or base file for multiproc case
@ -182,28 +172,6 @@ void ReadRestart::command(int narg, char **arg)
double *buf = nullptr;
int m,flag;
// MPI-IO input from single file
if (mpiioflag) {
mpiio->openForRead(file);
memory->create(buf,assignedChunkSize,"read_restart:buf");
mpiio->read((headerOffset+assignedChunkOffset),assignedChunkSize,buf);
mpiio->close();
// can calculate number of atoms from assignedChunkSize
if (!nextra) {
atom->nlocal = 1; // temporarily claim there is one atom...
int perAtomSize = avec->size_restart(); // ...so we can get its size
atom->nlocal = 0; // restore nlocal to zero atoms
int atomCt = (int) (assignedChunkSize / perAtomSize);
if (atomCt > atom->nmax) avec->grow(atomCt);
}
m = 0;
while (m < assignedChunkSize) m += avec->unpack_restart(&buf[m]);
}
// input of single native file
// nprocs_file = # of chunks in file
// proc 0 reads a chunk and bcasts it to other procs
@ -211,7 +179,7 @@ void ReadRestart::command(int narg, char **arg)
// if remapflag set, remap the atom to box before checking sub-domain
// check for atom in sub-domain differs for orthogonal vs triclinic box
else if (multiproc == 0) {
if (multiproc == 0) {
int triclinic = domain->triclinic;
imageint *iptr;
@ -410,7 +378,7 @@ void ReadRestart::command(int narg, char **arg)
// for multiproc or MPI-IO files:
// perform irregular comm to migrate atoms to correct procs
if (multiproc || mpiioflag) {
if (multiproc) {
// if remapflag set, remap all atoms I read back to box before migrating
@ -419,8 +387,7 @@ void ReadRestart::command(int narg, char **arg)
imageint *image = atom->image;
int nlocal = atom->nlocal;
for (int i = 0; i < nlocal; i++)
domain->remap(x[i],image[i]);
for (int i = 0; i < nlocal; i++) domain->remap(x[i],image[i]);
}
// create a temporary fix to hold and migrate extra atom info
@ -528,8 +495,6 @@ void ReadRestart::command(int narg, char **arg)
if (comm->me == 0)
utils::logmesg(lmp," read_restart CPU = {:.3f} seconds\n",platform::walltime()-time1);
delete mpiio;
}
/* ----------------------------------------------------------------------
@ -989,120 +954,9 @@ void ReadRestart::file_layout()
error->all(FLERR,"Restart file is not a multi-proc file");
if (multiproc && multiproc_file == 0)
error->all(FLERR,"Restart file is a multi-proc file");
} else if (flag == MPIIO) {
int mpiioflag_file = read_int();
if (mpiioflag == 0 && mpiioflag_file)
error->all(FLERR,"Restart file is a MPI-IO file");
if (mpiioflag && mpiioflag_file == 0)
error->all(FLERR,"Restart file is not a MPI-IO file");
if (mpiioflag) {
bigint *nproc_chunk_offsets;
memory->create(nproc_chunk_offsets,nprocs,
"write_restart:nproc_chunk_offsets");
bigint *nproc_chunk_sizes;
memory->create(nproc_chunk_sizes,nprocs,
"write_restart:nproc_chunk_sizes");
// on rank 0 read in the chunk sizes that were written out
// then consolidate them and compute offsets relative to the
// end of the header info to fit the current partition size
// if the number of ranks that did the writing is different
if (me == 0) {
int ndx;
int *all_written_send_sizes;
memory->create(all_written_send_sizes,nprocs_file,
"write_restart:all_written_send_sizes");
int *nproc_chunk_number;
memory->create(nproc_chunk_number,nprocs,
"write_restart:nproc_chunk_number");
utils::sfread(FLERR,all_written_send_sizes,sizeof(int),nprocs_file,fp,nullptr,error);
if ((nprocs != nprocs_file) && !(atom->nextra_store)) {
// nprocs differ, but atom sizes are fixed length, yeah!
atom->nlocal = 1; // temporarily claim there is one atom...
int perAtomSize = atom->avec->size_restart(); // ...so we can get its size
atom->nlocal = 0; // restore nlocal to zero atoms
bigint total_size = 0;
for (int i = 0; i < nprocs_file; ++i) {
total_size += all_written_send_sizes[i];
}
bigint total_ct = total_size / perAtomSize;
bigint base_ct = total_ct / nprocs;
bigint leftover_ct = total_ct - (base_ct * nprocs);
bigint current_ByteOffset = 0;
base_ct += 1;
bigint base_ByteOffset = base_ct * (perAtomSize * sizeof(double));
for (ndx = 0; ndx < leftover_ct; ++ndx) {
nproc_chunk_offsets[ndx] = current_ByteOffset;
nproc_chunk_sizes[ndx] = base_ct * perAtomSize;
current_ByteOffset += base_ByteOffset;
}
base_ct -= 1;
base_ByteOffset -= (perAtomSize * sizeof(double));
for (; ndx < nprocs; ++ndx) {
nproc_chunk_offsets[ndx] = current_ByteOffset;
nproc_chunk_sizes[ndx] = base_ct * perAtomSize;
current_ByteOffset += base_ByteOffset;
}
} else { // we have to read in based on how it was written
int init_chunk_number = nprocs_file/nprocs;
int num_extra_chunks = nprocs_file - (nprocs*init_chunk_number);
for (int i = 0; i < nprocs; i++) {
if (i < num_extra_chunks)
nproc_chunk_number[i] = init_chunk_number+1;
else
nproc_chunk_number[i] = init_chunk_number;
}
int all_written_send_sizes_index = 0;
bigint current_offset = 0;
for (int i=0;i<nprocs;i++) {
nproc_chunk_offsets[i] = current_offset;
nproc_chunk_sizes[i] = 0;
for (int j=0;j<nproc_chunk_number[i];j++) {
nproc_chunk_sizes[i] +=
all_written_send_sizes[all_written_send_sizes_index];
current_offset +=
(all_written_send_sizes[all_written_send_sizes_index] *
sizeof(double));
all_written_send_sizes_index++;
}
}
}
memory->destroy(all_written_send_sizes);
memory->destroy(nproc_chunk_number);
}
// scatter chunk sizes and offsets to all procs
MPI_Scatter(nproc_chunk_sizes, 1, MPI_LMP_BIGINT,
&assignedChunkSize , 1, MPI_LMP_BIGINT, 0,world);
MPI_Scatter(nproc_chunk_offsets, 1, MPI_LMP_BIGINT,
&assignedChunkOffset , 1, MPI_LMP_BIGINT, 0,world);
memory->destroy(nproc_chunk_sizes);
memory->destroy(nproc_chunk_offsets);
}
}
flag = read_int();
}
// if MPI-IO file, broadcast the end of the header offset
// this allows all ranks to compute offset to their data
if (mpiioflag) {
if (me == 0) headerOffset = platform::ftell(fp);
MPI_Bcast(&headerOffset,1,MPI_LMP_BIGINT,0,world);
}
}
// ----------------------------------------------------------------------

View File

@ -39,13 +39,6 @@ class ReadRestart : public Command {
int nprocs_file; // total # of procs that wrote restart file
int revision; // revision number of the restart file format
// MPI-IO values
int mpiioflag; // 1 for MPIIO output, else 0
class RestartMPIIO *mpiio; // MPIIO for restart file input
bigint assignedChunkSize;
MPI_Offset assignedChunkOffset, headerOffset;
std::string file_search(const std::string &);
void header();
void type_arrays();

View File

@ -29,7 +29,6 @@
#include "label_map.h"
#include "memory.h"
#include "modify.h"
#include "mpiio.h"
#include "neighbor.h"
#include "output.h"
#include "pair.h"
@ -74,16 +73,13 @@ void WriteRestart::command(int narg, char **arg)
if (strchr(arg[0],'%')) multiproc = nprocs;
else multiproc = 0;
if (utils::strmatch(arg[0],"\\.mpiio$")) mpiioflag = 1;
else mpiioflag = 0;
if ((comm->me == 0) && mpiioflag)
error->warning(FLERR,"MPI-IO output is unmaintained and unreliable. Use with caution.");
if (utils::strmatch(arg[0],"\\.mpiio$"))
error->all(FLERR,"MPI-IO files are no longer supported by LAMMPS");
// setup output style and process optional args
// also called by Output class for periodic restart files
multiproc_options(multiproc,mpiioflag,narg-1,&arg[1]);
multiproc_options(multiproc,narg-1,&arg[1]);
// init entire system since comm->exchange is done
// comm::init needs neighbor::init needs pair::init needs kspace::init, etc
@ -119,21 +115,9 @@ void WriteRestart::command(int narg, char **arg)
/* ---------------------------------------------------------------------- */
void WriteRestart::multiproc_options(int multiproc_caller, int mpiioflag_caller, int narg, char **arg)
void WriteRestart::multiproc_options(int multiproc_caller, int narg, char **arg)
{
multiproc = multiproc_caller;
mpiioflag = mpiioflag_caller;
// error checks
if (multiproc && mpiioflag)
error->all(FLERR,"Restart file MPI-IO output not allowed with % in filename");
if (mpiioflag) {
mpiio = new RestartMPIIO(lmp);
if (!mpiio->mpiio_exists)
error->all(FLERR,"Writing to MPI-IO filename when MPIIO package is not installed");
}
// defaults for multiproc file writing
@ -354,20 +338,6 @@ void WriteRestart::write(const std::string &file)
}
}
// MPI-IO output to single file
if (mpiioflag) {
if (me == 0 && fp) {
magic_string();
if (ferror(fp)) io_error = 1;
fclose(fp);
fp = nullptr;
}
mpiio->openForWrite(file.c_str());
mpiio->write(headerOffset,send_size,buf);
mpiio->close();
} else {
// output of one or more native files
// filewriter = 1 = this proc writes to file
// ping each proc in my cluster, receive its data, write data to file
@ -397,7 +367,6 @@ void WriteRestart::write(const std::string &file)
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,MPI_STATUS_IGNORE);
MPI_Rsend(buf,send_size,MPI_DOUBLE,fileproc,0,world);
}
}
// check for I/O error status
@ -578,18 +547,7 @@ void WriteRestart::force_fields()
void WriteRestart::file_layout(int send_size)
{
if (me == 0) {
write_int(MULTIPROC,multiproc);
write_int(MPIIO,mpiioflag);
}
if (mpiioflag) {
int *all_send_sizes;
memory->create(all_send_sizes,nprocs,"write_restart:all_send_sizes");
MPI_Gather(&send_size, 1, MPI_INT, all_send_sizes, 1, MPI_INT, 0,world);
if (me == 0) fwrite(all_send_sizes,sizeof(int),nprocs,fp);
memory->destroy(all_send_sizes);
}
if (me == 0) write_int(MULTIPROC,multiproc);
// -1 flag signals end of file layout info
@ -597,14 +555,6 @@ void WriteRestart::file_layout(int send_size)
int flag = -1;
fwrite(&flag,sizeof(int),1,fp);
}
// if MPI-IO file, broadcast the end of the header offste
// this allows all ranks to compute offset to their data
if (mpiioflag) {
if (me == 0) headerOffset = platform::ftell(fp);
MPI_Bcast(&headerOffset,1,MPI_LMP_BIGINT,0,world);
}
}
// ----------------------------------------------------------------------

View File

@ -28,7 +28,7 @@ class WriteRestart : public Command {
public:
WriteRestart(class LAMMPS *);
void command(int, char **) override;
void multiproc_options(int, int, int, char **);
void multiproc_options(int, int, char **);
void write(const std::string &);
private:
@ -44,12 +44,6 @@ class WriteRestart : public Command {
int fileproc; // ID of proc in my cluster who writes to file
int icluster; // which cluster I am in
// MPI-IO values
int mpiioflag; // 1 for MPIIO output, else 0
class RestartMPIIO *mpiio; // MPIIO for restart file output
MPI_Offset headerOffset;
void header();
void type_arrays();
void force_fields();
@ -66,8 +60,6 @@ class WriteRestart : public Command {
void write_int_vec(int, int, int *);
void write_double_vec(int, int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.16)
project(lammps-gui VERSION 1.2.0 LANGUAGES CXX)
project(lammps-gui VERSION 1.2.2 LANGUAGES CXX)
set(CMAKE_AUTOUIC ON)
set(CMAKE_AUTOMOC ON)
@ -43,6 +43,7 @@ else()
add_compile_options(/Zc:__cplusplus)
add_compile_options(/wd4244)
add_compile_options(/wd4267)
add_compile_options(/wd4250)
if(LAMMPS_EXCEPTIONS)
add_compile_options(/EHsc)
endif()

View File

@ -5,23 +5,17 @@ LAMMPS-GUI TODO list:
- rewrite syntax highlighting to be line oriented instead of word oriented.
handle first part of line based on regular expressions, then advance and only highlight strings and numbers.
handle "&" continuation and multiline strings with """ like C style comments in Qt docs example
- add CTRL-q hotkey to log windows so you can exit the entire application (add do you really want to? dialog to this)
- add "syntax check" with enabled "-skiprun" flag
- need to handle "label" and "jump" commands from within ?
- switch processing of input to line based commands or?
- switch input file editor to read-only while loop is running
- triple quoted heredocs don't work with lammps_commands_string()
# Long term ideas
- add feature to LAMMPS (to the LAMMPS class) to store current file name and line number, update while reading/parsing
use in error messages
add API to library interface to query this info and use it for highlighting in text editor
- rewrite entire application to either use QtCreator for everything or just build the App and its layout manually
- rewrite entire application to build the App and its layout manually
- port to Qt6
- also a rewrite should establish consistent naming conventions. now we have a mix of LAMMPS style, Qt style, and others.
- add option to attach a debugger to the running program (highly non-portable, need customization support in preferences)
- write a "wizard" dialog that can be used for beginners to create an input file template for a few typical use scenarios
- support single stepping, i.e. process input line by line (need to detect continuation chars!) with highlighting active line(s)
- have command text input file in/above status bar where individual commands can be tested. have insert button to copy line into file at the current point
- have command text input line in/above status bar where individual commands can be tested. have insert button to copy line into file at the current point
- support text completion as done with lammps-shell
- add a "python" mode, where instead of launching LAMMPS, python is loaded where the LAMMPS python module is made available.
- support multiple tabs and multiple LAMMPS instances?

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.8 KiB

After

Width:  |  Height:  |  Size: 5.3 KiB

View File

@ -183,7 +183,7 @@ Highlighter::Highlighter(QTextDocument *parent) : QSyntaxHighlighter(parent)
}
const QString number_keywords[] = {
QStringLiteral("(^|\\s+)[0-9]+"), // integer
QStringLiteral("(^|\\s+)[0-9:*]+"), // integer and integer ranges
QStringLiteral("(^|\\s+)[0-9]+\\.[0-9]*[edED]?[-+]?[0-9]*"), // floating point 1
QStringLiteral("(^|\\s+)[0-9]*\\.[0-9]+[edED]?[-+]?[0-9]*"), // floating point 2
QStringLiteral("(^|\\s+)[0-9]+([edED][-+]?[0-9]+)?") // floating point 3
@ -207,10 +207,12 @@ Highlighter::Highlighter(QTextDocument *parent) : QSyntaxHighlighter(parent)
void Highlighter::highlightBlock(const QString &text)
{
// clang-format off
auto style = QRegularExpression("^(fix|compute|dump|set)\\s+(\\w+)\\s+(\\S+)\\s+(\\S+)").match(text);
auto force = QRegularExpression("^(atom_style|pair_style|bond_style|angle_style|dihedral_style|improper_style|kspace_style)\\s+(\\S+)").match(text);
auto defs = QRegularExpression("^(group|variable)\\s+(\\S+)\\s+(\\S+)").match(text);
auto undo = QRegularExpression("^(unfix|uncompute|undump)\\s+(\\w+)").match(text);
// clang-format on
bool do_style = true;
bool do_force = true;
bool do_defs = true;

View File

@ -22,6 +22,7 @@
#include <QImage>
#include <QImageReader>
#include <QLabel>
#include <QLineEdit>
#include <QMenuBar>
#include <QMessageBox>
#include <QPalette>
@ -31,11 +32,28 @@
#include <QScrollArea>
#include <QScrollBar>
#include <QSettings>
#include <QSpinBox>
#include <QStatusBar>
#include <QVBoxLayout>
#include <QWheelEvent>
#include <QWidgetAction>
#include <cmath>
extern "C" {
#include "periodic_table.h"
static int get_pte_from_mass(double mass)
{
int idx = 0;
for (int i = 0; i < nr_pte_entries; ++i)
if (fabs(mass - pte_mass[i]) < 0.65) idx = i;
if ((mass > 0.0) && (mass < 2.2)) idx = 1;
// discriminate between Cobalt and Nickel. The loop will detect Nickel
if ((mass < 61.24) && (mass > 58.8133)) idx = 27;
return idx;
}
}
static const QString blank(" ");
ImageViewer::ImageViewer(const QString &fileName, LammpsWrapper *_lammps, QWidget *parent) :
@ -58,23 +76,63 @@ ImageViewer::ImageViewer(const QString &fileName, LammpsWrapper *_lammps, QWidge
QVBoxLayout *mainLayout = new QVBoxLayout;
QSettings settings;
vdwfactor = 0.4;
auto *renderstatus = new QLabel(QString());
auto pix = QPixmap(":/emblem-photos.png");
renderstatus->setPixmap(pix.scaled(22, 22, Qt::KeepAspectRatio));
renderstatus->setEnabled(false);
renderstatus->setToolTip("Render status");
settings.beginGroup("snapshot");
auto *xval = new QSpinBox;
xval->setRange(100, 10000);
xval->setStepType(QAbstractSpinBox::AdaptiveDecimalStepType);
xval->setValue(settings.value("xsize", "800").toInt());
xval->setObjectName("xsize");
xval->setToolTip("Set rendered image width");
auto *yval = new QSpinBox;
yval->setRange(100, 10000);
yval->setStepType(QAbstractSpinBox::AdaptiveDecimalStepType);
yval->setValue(settings.value("ysize", "600").toInt());
yval->setObjectName("ysize");
yval->setToolTip("Set rendered image height");
settings.endGroup();
connect(xval, &QAbstractSpinBox::editingFinished, this, &ImageViewer::edit_size);
connect(yval, &QAbstractSpinBox::editingFinished, this, &ImageViewer::edit_size);
auto *dossao = new QPushButton(QIcon(":/hd-img.png"), "");
dossao->setCheckable(true);
dossao->setToolTip("Toggle SSAO rendering");
auto *doanti = new QPushButton(QIcon(":/antialias.png"), "");
doanti->setCheckable(true);
doanti->setToolTip("Toggle anti-aliasing");
auto *dovdw = new QPushButton(QIcon(":/vdw-style.png"), "");
dovdw->setCheckable(true);
dovdw->setToolTip("Toggle VDW style representation");
auto *dobox = new QPushButton(QIcon(":/system-box.png"), "");
dobox->setCheckable(true);
dobox->setToolTip("Toggle displaying box");
auto *doaxes = new QPushButton(QIcon(":/axes-img.png"), "");
doaxes->setCheckable(true);
doaxes->setToolTip("Toggle displaying axes");
auto *zoomin = new QPushButton(QIcon(":/gtk-zoom-in.png"), "");
zoomin->setToolTip("Zoom in by 10 percent");
auto *zoomout = new QPushButton(QIcon(":/gtk-zoom-out.png"), "");
zoomout->setToolTip("Zoom out by 10 percent");
auto *rotleft = new QPushButton(QIcon(":/object-rotate-left.png"), "");
rotleft->setToolTip("Rotate left by 15 degrees");
auto *rotright = new QPushButton(QIcon(":/object-rotate-right.png"), "");
rotright->setToolTip("Rotate right by 15 degrees");
auto *rotup = new QPushButton(QIcon(":/gtk-go-up.png"), "");
rotup->setToolTip("Rotate up by 15 degrees");
auto *rotdown = new QPushButton(QIcon(":/gtk-go-down.png"), "");
rotdown->setToolTip("Rotate down by 15 degrees");
auto *reset = new QPushButton(QIcon(":/gtk-zoom-fit.png"), "");
reset->setToolTip("Reset view to defaults");
auto *combo = new QComboBox;
combo->setObjectName("group");
combo->setToolTip("Select group to display");
int ngroup = lammps->id_count("group");
char gname[64];
for (int i = 0; i < ngroup; ++i) {
@ -84,8 +142,14 @@ ImageViewer::ImageViewer(const QString &fileName, LammpsWrapper *_lammps, QWidge
QHBoxLayout *menuLayout = new QHBoxLayout;
menuLayout->addWidget(menuBar);
menuLayout->addWidget(renderstatus);
menuLayout->addWidget(new QLabel(" Width: "));
menuLayout->addWidget(xval);
menuLayout->addWidget(new QLabel(" Height: "));
menuLayout->addWidget(yval);
menuLayout->addWidget(dossao);
menuLayout->addWidget(doanti);
menuLayout->addWidget(dovdw);
menuLayout->addWidget(dobox);
menuLayout->addWidget(doaxes);
menuLayout->addWidget(zoomin);
@ -100,6 +164,7 @@ ImageViewer::ImageViewer(const QString &fileName, LammpsWrapper *_lammps, QWidge
connect(dossao, &QPushButton::released, this, &ImageViewer::toggle_ssao);
connect(doanti, &QPushButton::released, this, &ImageViewer::toggle_anti);
connect(dovdw, &QPushButton::released, this, &ImageViewer::toggle_vdw);
connect(dobox, &QPushButton::released, this, &ImageViewer::toggle_box);
connect(doaxes, &QPushButton::released, this, &ImageViewer::toggle_axes);
connect(zoomin, &QPushButton::released, this, &ImageViewer::do_zoom_in);
@ -119,6 +184,7 @@ ImageViewer::ImageViewer(const QString &fileName, LammpsWrapper *_lammps, QWidge
reset_view();
dobox->setChecked(showbox);
dovdw->setChecked(vdwfactor > 1.0);
doaxes->setChecked(showaxes);
dossao->setChecked(usessao);
doanti->setChecked(antialias);
@ -137,9 +203,12 @@ void ImageViewer::reset_view()
{
QSettings settings;
settings.beginGroup("snapshot");
xsize = settings.value("xsize", "800").toInt();
ysize = settings.value("ysize", "600").toInt();
zoom = settings.value("zoom", 1.0).toDouble();
hrot = settings.value("hrot", 60).toInt();
vrot = settings.value("vrot", 30).toInt();
vdwfactor = settings.value("vdwstyle", false).toBool() ? 1.6 : 0.5;
showbox = settings.value("box", true).toBool();
showaxes = settings.value("axes", false).toBool();
usessao = settings.value("ssao", false).toBool();
@ -151,18 +220,37 @@ void ImageViewer::reset_view()
if (lo) {
// grab layout manager for the top bar
lo = lo->itemAt(0)->layout();
// grab the first 4 buttons after the menu bar
auto *button = qobject_cast<QPushButton *>(lo->itemAt(1)->widget());
auto *field = qobject_cast<QSpinBox *>(lo->itemAt(3)->widget());
field->setValue(xsize);
field = qobject_cast<QSpinBox *>(lo->itemAt(5)->widget());
field->setValue(ysize);
auto *button = qobject_cast<QPushButton *>(lo->itemAt(6)->widget());
button->setChecked(usessao);
button = qobject_cast<QPushButton *>(lo->itemAt(2)->widget());
button = qobject_cast<QPushButton *>(lo->itemAt(7)->widget());
button->setChecked(antialias);
button = qobject_cast<QPushButton *>(lo->itemAt(3)->widget());
button = qobject_cast<QPushButton *>(lo->itemAt(8)->widget());
button->setChecked(vdwfactor > 1.0);
button = qobject_cast<QPushButton *>(lo->itemAt(9)->widget());
button->setChecked(showbox);
button = qobject_cast<QPushButton *>(lo->itemAt(4)->widget());
button = qobject_cast<QPushButton *>(lo->itemAt(10)->widget());
button->setChecked(showaxes);
// grab the last entry -> group selector
auto *cb = qobject_cast<QComboBox *>(lo->itemAt(lo->count() - 1)->widget());
cb->setCurrentText("all");
this->repaint();
}
createImage();
}
void ImageViewer::edit_size()
{
QSpinBox *field = qobject_cast<QSpinBox *>(sender());
if (field->objectName() == "xsize") {
xsize = field->value();
} else if (field->objectName() == "ysize") {
ysize = field->value();
}
createImage();
}
@ -183,6 +271,17 @@ void ImageViewer::toggle_anti()
createImage();
}
void ImageViewer::toggle_vdw()
{
QPushButton *button = qobject_cast<QPushButton *>(sender());
if (vdwfactor > 1.0)
vdwfactor = 0.4;
else
vdwfactor = 1.6;
button->setChecked(vdwfactor > 1.0);
createImage();
}
void ImageViewer::toggle_box()
{
QPushButton *button = qobject_cast<QPushButton *>(sender());
@ -215,28 +314,28 @@ void ImageViewer::do_zoom_out()
void ImageViewer::do_rot_left()
{
vrot -= 15;
vrot -= 10;
if (vrot < -180) vrot += 360;
createImage();
}
void ImageViewer::do_rot_right()
{
vrot += 15;
vrot += 10;
if (vrot > 180) vrot -= 360;
createImage();
}
void ImageViewer::do_rot_down()
{
hrot -= 15;
hrot -= 10;
if (hrot < 0) hrot += 360;
createImage();
}
void ImageViewer::do_rot_up()
{
hrot += 15;
hrot += 10;
if (hrot > 360) hrot -= 360;
createImage();
}
@ -250,6 +349,11 @@ void ImageViewer::change_group(int idx)
void ImageViewer::createImage()
{
auto *lo = layout();
if (lo) lo = lo->itemAt(0)->layout();
if (lo) qobject_cast<QLabel *>(lo->itemAt(1)->widget())->setEnabled(true);
this->repaint();
QSettings settings;
QString dumpcmd = QString("write_dump ") + group + " image ";
QDir dumpdir(QDir::tempPath());
@ -258,15 +362,35 @@ void ImageViewer::createImage()
settings.beginGroup("snapshot");
int aa = antialias ? 2 : 1;
int xsize = settings.value("xsize", 800).toInt() * aa;
int ysize = settings.value("ysize", 600).toInt() * aa;
int tmpxsize = xsize * aa;
int tmpysize = ysize * aa;
int hhrot = (hrot > 180) ? 360 - hrot : hrot;
// determine elements from masses and set their covalent radii
int ntypes = lammps->extract_setting("ntypes");
int nbondtypes = lammps->extract_setting("nbondtypes");
double *masses = (double *)lammps->extract_atom("mass");
QString units = (const char *)lammps->extract_global("units");
QString elements = "element ";
QString adiams;
if ((units == "real") || (units == "metal")) {
for (int i = 1; i <= ntypes; ++i) {
int idx = get_pte_from_mass(masses[i]);
elements += QString(pte_label[idx]) + blank;
adiams += QString("adiam %1 %2 ").arg(i).arg(vdwfactor * pte_vdw_radius[idx]);
}
}
if (!adiams.isEmpty())
dumpcmd += blank + "element";
else
dumpcmd += blank + settings.value("color", "type").toString();
dumpcmd += blank + settings.value("diameter", "type").toString();
dumpcmd += QString(" size ") + QString::number(xsize) + blank + QString::number(ysize);
dumpcmd += QString(" size ") + QString::number(tmpxsize) + blank + QString::number(tmpysize);
dumpcmd += QString(" zoom ") + QString::number(zoom);
lammps->command(dumpcmd.toLocal8Bit());
dumpcmd += " shiny 0.5 ";
if (nbondtypes > 0) dumpcmd += " bond atom 0.4 ";
if (lammps->extract_setting("dimension") == 3) {
dumpcmd += QString(" view ") + QString::number(hhrot) + blank + QString::number(vrot);
}
@ -277,12 +401,13 @@ void ImageViewer::createImage()
dumpcmd += QString(" box no 0.0");
if (showaxes)
dumpcmd += QString(" axes yes 0.2 0.025");
dumpcmd += QString(" axes yes 0.5 0.025");
else
dumpcmd += QString(" axes no 0.0 0.0");
dumpcmd += " modify boxcolor " + settings.value("boxcolor", "yellow").toString();
dumpcmd += " backcolor " + settings.value("background", "black").toString();
if (!adiams.isEmpty()) dumpcmd += blank + elements + blank + adiams + blank;
settings.endGroup();
lammps->command(dumpcmd.toLocal8Bit());
@ -299,13 +424,12 @@ void ImageViewer::createImage()
}
dumpfile.remove();
settings.beginGroup("snapshot");
xsize = settings.value("xsize", 800).toInt();
ysize = settings.value("ysize", 600).toInt();
settings.endGroup();
// scale back to achieve antialiasing
image = newImage.scaled(xsize, ysize, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
imageLabel->setPixmap(QPixmap::fromImage(image));
imageLabel->adjustSize();
if (lo) qobject_cast<QLabel *>(lo->itemAt(1)->widget())->setEnabled(false);
this->repaint();
}
void ImageViewer::saveAs()

View File

@ -45,9 +45,11 @@ private slots:
void normalSize();
void fitToWindow();
void edit_size();
void reset_view();
void toggle_ssao();
void toggle_anti();
void toggle_vdw();
void toggle_box();
void toggle_axes();
void do_zoom_in();
@ -86,8 +88,9 @@ private:
LammpsWrapper *lammps;
QString group;
QString filename;
int xsize, ysize;
int hrot, vrot;
double zoom;
double zoom, vdwfactor;
bool showbox, showaxes, antialias, usessao;
};
#endif

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

View File

@ -34,6 +34,7 @@
#include <QPlainTextEdit>
#include <QProcess>
#include <QProgressBar>
#include <QPushButton>
#include <QSettings>
#include <QShortcut>
#include <QStatusBar>
@ -241,7 +242,22 @@ LammpsGui::LammpsGui(QWidget *parent, const char *filename) :
auto pix = QPixmap(":/lammps-icon-128x128.png");
lammpsstatus->setPixmap(pix.scaled(22, 22, Qt::KeepAspectRatio));
ui->statusbar->addWidget(lammpsstatus);
lammpsstatus->setToolTip("LAMMPS instance is active");
lammpsstatus->hide();
auto *lammpsrun = new QPushButton(QIcon(":/system-run.png"), "");
auto *lammpsstop = new QPushButton(QIcon(":/process-stop.png"), "");
auto *lammpsimage = new QPushButton(QIcon(":/emblem-photos.png"), "");
lammpsrun->setToolTip("Run LAMMPS on input");
lammpsstop->setToolTip("Stop LAMMPS");
lammpsimage->setToolTip("Create snapshot image");
ui->statusbar->addWidget(lammpsrun);
ui->statusbar->addWidget(lammpsstop);
ui->statusbar->addWidget(lammpsimage);
connect(lammpsrun, &QPushButton::released, this, &LammpsGui::run_buffer);
connect(lammpsstop, &QPushButton::released, this, &LammpsGui::stop_run);
connect(lammpsimage, &QPushButton::released, this, &LammpsGui::render_image);
status = new QLabel("Ready.");
status->setFixedWidth(300);
ui->statusbar->addWidget(status);
@ -280,6 +296,10 @@ void LammpsGui::new_document()
current_file.clear();
ui->textEdit->document()->setPlainText(QString());
if (lammps.is_running()) {
stop_run();
runner->wait();
}
lammps.close();
lammpsstatus->hide();
setWindowTitle(QString("LAMMPS-GUI - *unknown*"));
@ -549,6 +569,10 @@ void LammpsGui::save_as()
void LammpsGui::quit()
{
if (lammps.is_running()) {
stop_run();
runner->wait();
}
lammps.close();
lammpsstatus->hide();
lammps.finalize();
@ -806,7 +830,7 @@ void LammpsGui::run_buffer()
char *input = mystrdup(ui->textEdit->toPlainText().toStdString() + "\n");
is_running = true;
LammpsRunner *runner = new LammpsRunner(this);
runner = new LammpsRunner(this);
runner->setup_run(&lammps, input);
connect(runner, &LammpsRunner::resultReady, this, &LammpsGui::run_done);
connect(runner, &LammpsRunner::finished, runner, &QObject::deleteLater);
@ -1035,6 +1059,11 @@ void LammpsGui::edit_variables()
SetVariables vars(newvars);
if (vars.exec() == QDialog::Accepted) {
variables = newvars;
if (lammps.is_running()) {
stop_run();
runner->wait();
delete runner;
}
lammps.close();
lammpsstatus->hide();
}
@ -1057,6 +1086,11 @@ void LammpsGui::preferences()
(oldthreads != settings.value("nthreads", 1).toInt()) ||
(oldecho != settings.value("echo", 0).toInt()) ||
(oldcite != settings.value("cite", 0).toInt())) {
if (lammps.is_running()) {
stop_run();
runner->wait();
delete runner;
}
lammps.close();
lammpsstatus->hide();
}

View File

@ -26,6 +26,7 @@
// forward declarations
class GeneralTab;
class LammpsRunner;
QT_BEGIN_NAMESPACE
namespace Ui {
@ -114,6 +115,7 @@ private:
QList<QPair<QString, QString>> variables;
LammpsWrapper lammps;
LammpsRunner *runner;
std::string plugin_path;
bool is_running;
std::vector<char *> lammps_args;

View File

@ -33,7 +33,7 @@
<file>help-about.png</file>
<file>emblem-photos.png</file>
<file>process-stop.png</file>
<file>emblem-default.png</file>
<file>system-run.png</file>
<file>window-close.png</file>
<file>application-plot.png</file>
<file>application-calc.png</file>
@ -43,5 +43,6 @@
<file>antialias.png</file>
<file>ovito.png</file>
<file>vmd.png</file>
<file>vdw-style.png</file>
</qresource>
</RCC>

View File

@ -212,7 +212,7 @@
</action>
<action name="actionRun_Buffer">
<property name="icon">
<iconset theme=":/emblem-default.png"/>
<iconset theme=":/system-run.png"/>
</property>
<property name="text">
<string>&amp;Run LAMMPS</string>

View File

@ -45,6 +45,32 @@ int LammpsWrapper::extract_setting(const char *keyword)
return val;
}
void *LammpsWrapper::extract_global(const char *keyword)
{
void *val = nullptr;
if (lammps_handle) {
#if defined(LAMMPS_GUI_USE_PLUGIN)
val = ((liblammpsplugin_t *)plugin_handle)->extract_global(lammps_handle, keyword);
#else
val = lammps_extract_global(lammps_handle, keyword);
#endif
}
return val;
}
void *LammpsWrapper::extract_atom(const char *keyword)
{
void *val = nullptr;
if (lammps_handle) {
#if defined(LAMMPS_GUI_USE_PLUGIN)
val = ((liblammpsplugin_t *)plugin_handle)->extract_atom(lammps_handle, keyword);
#else
val = lammps_extract_atom(lammps_handle, keyword);
#endif
}
return val;
}
int LammpsWrapper::id_count(const char *keyword)
{
int val = 0;

View File

@ -29,15 +29,21 @@ public:
void force_timeout();
int extract_setting(const char *keyword);
void *extract_global(const char *keyword);
void *extract_atom(const char *keyword);
int id_count(const char *idtype);
int id_name(const char *idtype, int idx, char *buf, int buflen);
double get_thermo(const char *keyword);
void *last_thermo(const char *keyword, int idx);
bool is_open() const { return lammps_handle != nullptr; }
bool is_running();
bool has_error() const;
int get_last_error_message(char *errorbuf, int buflen);
bool config_accelerator(const char *package, const char *category, const char *setting) const;
bool config_has_package(const char *pkg) const;
bool has_gpu_device() const;

View File

@ -0,0 +1,205 @@
/***************************************************************************
* RCS INFORMATION:
*
* $RCSfile: periodic_table.h,v $
* $Author: johns $ $Locker: $ $State: Exp $
* $Revision: 1.12 $ $Date: 2009/01/21 17:45:41 $
*
***************************************************************************/
/*
* periodic table of elements and helper functions to convert
* ordinal numbers to labels and back.
* all tables and functions are declared static, so that it
* can be safely included by all plugins that may need it.
*
* 2002-2009 akohlmey@cmm.chem.upenn.edu, vmd@ks.uiuc.edu
*/
#include <string.h>
#include <ctype.h>
/* periodic table of elements for translation of ordinal to atom type */
static const char *pte_label[] = {
"X", "H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne",
"Na", "Mg", "Al", "Si", "P" , "S", "Cl", "Ar", "K", "Ca", "Sc",
"Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Ge",
"As", "Se", "Br", "Kr", "Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc",
"Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb", "Te", "I", "Xe",
"Cs", "Ba", "La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb",
"Dy", "Ho", "Er", "Tm", "Yb", "Lu", "Hf", "Ta", "W", "Re", "Os",
"Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi", "Po", "At", "Rn", "Fr",
"Ra", "Ac", "Th", "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf",
"Es", "Fm", "Md", "No", "Lr", "Rf", "Db", "Sg", "Bh", "Hs", "Mt",
"Ds", "Rg"
};
static const int nr_pte_entries = sizeof(pte_label) / sizeof(char *);
/* corresponding table of masses. */
static const double pte_mass[] = {
/* X */ 0.00000, 1.00794, 4.00260, 6.941, 9.012182, 10.811,
/* C */ 12.0107, 14.0067, 15.9994, 18.9984032, 20.1797,
/* Na */ 22.989770, 24.3050, 26.981538, 28.0855, 30.973761,
/* S */ 32.065, 35.453, 39.948, 39.0983, 40.078, 44.955910,
/* Ti */ 47.867, 50.9415, 51.9961, 54.938049, 55.845, 58.9332,
/* Ni */ 58.6934, 63.546, 65.409, 69.723, 72.64, 74.92160,
/* Se */ 78.96, 79.904, 83.798, 85.4678, 87.62, 88.90585,
/* Zr */ 91.224, 92.90638, 95.94, 98.0, 101.07, 102.90550,
/* Pd */ 106.42, 107.8682, 112.411, 114.818, 118.710, 121.760,
/* Te */ 127.60, 126.90447, 131.293, 132.90545, 137.327,
/* La */ 138.9055, 140.116, 140.90765, 144.24, 145.0, 150.36,
/* Eu */ 151.964, 157.25, 158.92534, 162.500, 164.93032,
/* Er */ 167.259, 168.93421, 173.04, 174.967, 178.49, 180.9479,
/* W */ 183.84, 186.207, 190.23, 192.217, 195.078, 196.96655,
/* Hg */ 200.59, 204.3833, 207.2, 208.98038, 209.0, 210.0, 222.0,
/* Fr */ 223.0, 226.0, 227.0, 232.0381, 231.03588, 238.02891,
/* Np */ 237.0, 244.0, 243.0, 247.0, 247.0, 251.0, 252.0, 257.0,
/* Md */ 258.0, 259.0, 262.0, 261.0, 262.0, 266.0, 264.0, 269.0,
/* Mt */ 268.0, 271.0, 272.0
};
/*
* corresponding table of VDW radii.
* van der Waals radii are taken from A. Bondi,
* J. Phys. Chem., 68, 441 - 452, 1964,
* except the value for H, which is taken from R.S. Rowland & R. Taylor,
* J.Phys.Chem., 100, 7384 - 7391, 1996. Radii that are not available in
* either of these publications have RvdW = 2.00 \AA
* The radii for Ions (Na, K, Cl, Ca, Mg, and Cs are based on the CHARMM27
* Rmin/2 parameters for (SOD, POT, CLA, CAL, MG, CES) by default.
*/
static const double pte_vdw_radius[] = {
/* X */ 1.5, 1.2, 1.4, 1.82, 2.0, 2.0,
/* C */ 1.7, 1.55, 1.52, 1.47, 1.54,
/* Na */ 1.36, 1.18, 2.0, 2.1, 1.8,
/* S */ 1.8, 2.27, 1.88, 1.76, 1.37, 2.0,
/* Ti */ 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
/* Ni */ 1.63, 1.4, 1.39, 1.07, 2.0, 1.85,
/* Se */ 1.9, 1.85, 2.02, 2.0, 2.0, 2.0,
/* Zr */ 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
/* Pd */ 1.63, 1.72, 1.58, 1.93, 2.17, 2.0,
/* Te */ 2.06, 1.98, 2.16, 2.1, 2.0,
/* La */ 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
/* Eu */ 2.0, 2.0, 2.0, 2.0, 2.0,
/* Er */ 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
/* W */ 2.0, 2.0, 2.0, 2.0, 1.72, 1.66,
/* Hg */ 1.55, 1.96, 2.02, 2.0, 2.0, 2.0, 2.0,
/* Fr */ 2.0, 2.0, 2.0, 2.0, 2.0, 1.86,
/* Np */ 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
/* Md */ 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
/* Mt */ 2.0, 2.0, 2.0
};
/* lookup functions */
static const char *get_pte_label(const int idx)
{
if ((idx < 1) || (idx >= nr_pte_entries)) return pte_label[0];
return pte_label[idx];
}
static double get_pte_mass(const int idx)
{
if ((idx < 1) || (idx >= nr_pte_entries)) return pte_mass[0];
return pte_mass[idx];
}
static double get_pte_vdw_radius(const int idx)
{
if ((idx < 1) || (idx >= nr_pte_entries)) return pte_vdw_radius[0];
#if 1
/* Replace with Hydrogen radius with an "all-atom" radius */
if (idx == 1)
return 1.0; /* H */
#else
/* Replace with old VMD atom radii values */
switch (idx) {
case 1: return 1.0; /* H */
case 6: return 1.5; /* C */
case 7: return 1.4; /* N */
case 8: return 1.3; /* O */
case 9: return 1.2; /* F */
case 15: return 1.5; /* P */
case 16: return 1.9; /* S */
}
#endif
return pte_vdw_radius[idx];
}
static int get_pte_idx(const char *label)
{
int i;
char atom[3];
/* zap string */
atom[0] = (char) 0;
atom[1] = (char) 0;
atom[2] = (char) 0;
/* if we don't have a null-pointer, there must be at least two
* chars, which is all we need. we convert to the capitalization
* convention of the table above during assignment. */
if (label != NULL) {
atom[0] = (char) toupper((int) label[0]);
atom[1] = (char) tolower((int) label[1]);
}
/* discard numbers in atom label */
if (isdigit(atom[1])) atom[1] = (char) 0;
for (i=0; i < nr_pte_entries; ++i) {
if ( (pte_label[i][0] == atom[0])
&& (pte_label[i][1] == atom[1]) ) return i;
}
return 0;
}
static int get_pte_idx_from_string(const char *label) {
int i, ind;
char atom[3];
if (label != NULL) {
/* zap string */
atom[0] = atom[1] = atom[2] = '\0';
for (ind=0,i=0; (ind<2) && (label[i]!='\0'); i++) {
if (label[i] != ' ') {
atom[ind] = toupper(label[i]);
ind++;
}
}
if (ind < 1)
return 0; /* no non-whitespace characters */
for (i=0; i < nr_pte_entries; ++i) {
if ((toupper(pte_label[i][0]) == atom[0]) && (toupper(pte_label[i][1]) == atom[1]))
return i;
}
}
return 0;
}
#if 0
#include <stdio.h>
int main() {
int i;
printf("Periodic table check/dump\n");
printf(" Table contains data for %d elements\n", nr_pte_entries);
printf(" Mass table size check: %d\n", sizeof(pte_mass) / sizeof(double));
printf(" VDW table size check: %d\n", sizeof(pte_vdw_radius) / sizeof(double));
printf("\n");
printf("Symbol Num Mass rVDW\n");
for (i=0; i<nr_pte_entries; i++) {
printf(" %-2s %3d %6.2f %4.2f\n",
get_pte_label(i), i, get_pte_mass(i), get_pte_vdw_radius(i));
}
return 0;
}
#endif

View File

@ -138,6 +138,8 @@ void Preferences::accept()
if (box) settings->setValue("box", box->isChecked());
box = tabWidget->findChild<QCheckBox *>("axes");
if (box) settings->setValue("axes", box->isChecked());
box = tabWidget->findChild<QCheckBox *>("vdwstyle");
if (box) settings->setValue("vdwstyle", box->isChecked());
QComboBox *combo = tabWidget->findChild<QComboBox *>("background");
if (combo) settings->setValue("background", combo->currentText());
combo = tabWidget->findChild<QComboBox *>("boxcolor");
@ -395,6 +397,7 @@ SnapshotTab::SnapshotTab(QSettings *_settings, QWidget *parent) :
auto *ssao = new QLabel("HQ Image mode:");
auto *bbox = new QLabel("Show Box:");
auto *axes = new QLabel("Show Axes:");
auto *vdw = new QLabel("VDW Style:");
auto *cback = new QLabel("Background Color:");
auto *cbox = new QLabel("Box Color:");
settings->beginGroup("snapshot");
@ -405,6 +408,7 @@ SnapshotTab::SnapshotTab(QSettings *_settings, QWidget *parent) :
auto *sval = new QCheckBox;
auto *bval = new QCheckBox;
auto *eval = new QCheckBox;
auto *vval = new QCheckBox;
sval->setCheckState(settings->value("ssao", false).toBool() ? Qt::Checked : Qt::Unchecked);
sval->setObjectName("ssao");
aval->setCheckState(settings->value("antialias", false).toBool() ? Qt::Checked : Qt::Unchecked);
@ -413,6 +417,8 @@ SnapshotTab::SnapshotTab(QSettings *_settings, QWidget *parent) :
bval->setObjectName("box");
eval->setCheckState(settings->value("axes", false).toBool() ? Qt::Checked : Qt::Unchecked);
eval->setObjectName("axes");
vval->setCheckState(settings->value("vdwstyle", false).toBool() ? Qt::Checked : Qt::Unchecked);
vval->setObjectName("vdwstyle");
auto *intval = new QIntValidator(100, 100000, this);
xval->setValidator(intval);
@ -442,28 +448,31 @@ SnapshotTab::SnapshotTab(QSettings *_settings, QWidget *parent) :
boxcolor->setCurrentText(settings->value("boxcolor", "yellow").toString());
settings->endGroup();
grid->addWidget(xsize, 0, 0, Qt::AlignTop);
grid->addWidget(ysize, 1, 0, Qt::AlignTop);
grid->addWidget(zoom, 2, 0, Qt::AlignTop);
grid->addWidget(anti, 3, 0, Qt::AlignTop);
grid->addWidget(ssao, 4, 0, Qt::AlignTop);
grid->addWidget(bbox, 5, 0, Qt::AlignTop);
grid->addWidget(axes, 6, 0, Qt::AlignTop);
grid->addWidget(cback, 7, 0, Qt::AlignTop);
grid->addWidget(cbox, 8, 0, Qt::AlignTop);
grid->addWidget(xval, 0, 1, Qt::AlignTop);
grid->addWidget(yval, 1, 1, Qt::AlignTop);
grid->addWidget(zval, 2, 1, Qt::AlignTop);
grid->addWidget(aval, 3, 1, Qt::AlignTop);
grid->addWidget(sval, 4, 1, Qt::AlignVCenter);
grid->addWidget(bval, 5, 1, Qt::AlignVCenter);
grid->addWidget(eval, 6, 1, Qt::AlignVCenter);
grid->addWidget(background, 7, 1, Qt::AlignVCenter);
grid->addWidget(boxcolor, 8, 1, Qt::AlignVCenter);
int i = 0;
grid->addWidget(xsize, i, 0, Qt::AlignTop);
grid->addWidget(xval, i++, 1, Qt::AlignTop);
grid->addWidget(ysize, i, 0, Qt::AlignTop);
grid->addWidget(yval, i++, 1, Qt::AlignTop);
grid->addWidget(zoom, i, 0, Qt::AlignTop);
grid->addWidget(zval, i++, 1, Qt::AlignTop);
grid->addWidget(anti, i, 0, Qt::AlignTop);
grid->addWidget(aval, i++, 1, Qt::AlignTop);
grid->addWidget(ssao, i, 0, Qt::AlignTop);
grid->addWidget(sval, i++, 1, Qt::AlignVCenter);
grid->addWidget(bbox, i, 0, Qt::AlignTop);
grid->addWidget(bval, i++, 1, Qt::AlignVCenter);
grid->addWidget(axes, i, 0, Qt::AlignTop);
grid->addWidget(eval, i++, 1, Qt::AlignVCenter);
grid->addWidget(vdw, i, 0, Qt::AlignTop);
grid->addWidget(vval, i++, 1, Qt::AlignVCenter);
grid->addWidget(cback, i, 0, Qt::AlignTop);
grid->addWidget(background, i++, 1, Qt::AlignVCenter);
grid->addWidget(cbox, i, 0, Qt::AlignTop);
grid->addWidget(boxcolor, i++, 1, Qt::AlignVCenter);
grid->addItem(new QSpacerItem(100, 100, QSizePolicy::Minimum, QSizePolicy::Expanding), 9, 0);
grid->addItem(new QSpacerItem(100, 100, QSizePolicy::Minimum, QSizePolicy::Expanding), 9, 1);
grid->addItem(new QSpacerItem(100, 100, QSizePolicy::Expanding, QSizePolicy::Expanding), 9, 2);
grid->addItem(new QSpacerItem(100, 100, QSizePolicy::Minimum, QSizePolicy::Expanding), i, 0);
grid->addItem(new QSpacerItem(100, 100, QSizePolicy::Minimum, QSizePolicy::Expanding), i, 1);
grid->addItem(new QSpacerItem(100, 100, QSizePolicy::Expanding, QSizePolicy::Expanding), i, 2);
setLayout(grid);
}

View File

@ -34,6 +34,7 @@ private slots:
public:
bool need_relaunch;
private:
QTabWidget *tabWidget;
QDialogButtonBox *buttonBox;

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

Some files were not shown because too many files have changed in this diff Show More