Compare commits

...

61 Commits

Author SHA1 Message Date
7586adbb6a Merge pull request #3029 from akohlmey/maintenance-2021-09-29
Second round of maintenance fixes for the stable release
2022-01-06 19:58:51 -05:00
69d6ddccc5 create missing de,df table elements from linear extrapolation 2022-01-05 15:34:30 -05:00
5ae496dcef backport array dimension bugfix for NETCDF package in simplified form 2022-01-03 19:55:23 -05:00
bc5d742623 explain that the computed force in python pair is force/r same as in Pair:single() 2022-01-03 10:12:38 -05:00
882e699163 Incorporate bugfixes from issue #3074, a few additional cleanups 2022-01-03 10:11:18 -05:00
9c725d79d6 correct code example for current code 2022-01-01 16:42:28 -05:00
79fbf437a3 correct format string for Error::one() 2021-12-29 16:19:10 -05:00
d130aa4289 address segfault issue with fix nve/gpu when group is not "all" 2021-12-29 14:06:52 -05:00
5d8b83a251 backport GPU package build system updates from upstream 2021-12-27 20:30:43 -05:00
5a2548a83d have internal fix/compute ids include the fix id for fix reaxff/species
this allows using the fix multiple times
also remove code and warning that checks for multiple fix instances

# Conflicts:
#	src/REAXFF/fix_reaxff_species.cpp
2021-12-23 11:36:28 -05:00
a85b310e1f add missing fclose() 2021-12-23 11:28:24 -05:00
e51fd40547 correct names of the pack/unpack routines for forward communication 2021-12-09 18:33:13 -05:00
62f271658b correct setting forward/reverse buffer size info 2021-12-08 13:58:12 -05:00
0aa742934f correct docs for pair style local/density 2021-12-08 00:51:52 -05:00
a26a709a7b correct handling of data packing for forward and reverse communication 2021-12-08 00:51:52 -05:00
027293d285 whitespace 2021-11-24 15:47:05 -05:00
f7d049ac2d generate atom tags for newly created atoms, if tags are enabled. triclinic support. 2021-11-24 15:36:16 -05:00
ea0ff1c8f7 Update CMake utility function get_lammps_version()
With the introduction of LAMMPS_UPDATE, version.h is no longer a single line
file. With this change the CMake utility will only process the LAMMPS_VERSION
line. Fixes issue #3038
2021-11-23 10:44:40 -05:00
5c1bb5f13a Write dump header after sort to fix incorrect atom count for multiproc 2021-11-22 15:52:27 -05:00
24d9b4b611 Update lebedeva potential file and docs based on email on mailing list
https://matsci.org/t/lammps-users-webpage-and-parameter-file-for-the-lebedeva-potential/39059
2021-11-17 08:45:55 -05:00
a0e75c9006 correct unit description of eta_n0 parameters. fixes #3016 2021-11-17 08:38:09 -05:00
2435b953e1 increment update counter 2021-11-17 07:04:44 -05:00
c042e12323 clarifications and corrections for the discussion of the main git branches 2021-11-17 07:04:13 -05:00
e9efe46db9 update branch names 2021-11-17 07:03:56 -05:00
ecc14b7308 update documentation to refer to the new branch names (develop, release) 2021-11-17 07:03:27 -05:00
0152fe5cdf fix segfault when using atom style smd as part of a hybrid style
also remove redundant for clearing
2021-11-16 21:49:56 -05:00
892d17af22 plug memory leaks 2021-11-16 21:49:41 -05:00
2cca00203e Avoid file name collisions in dump unit tests
# Conflicts:
#	unittest/formats/test_dump_atom.cpp
2021-11-16 15:08:27 -05:00
9f4626a62a correct uninitialized data access bug due to shadowing of a base class member 2021-11-16 10:51:46 -05:00
e890a0b45e Merge pull request #2999 from akohlmey/maintenance-2021-09-29
Maintenance fixes for the stable release
2021-11-09 15:11:19 -05:00
68223f0385 mention that dump sorting is limited to less than 2 billion atoms 2021-11-07 08:31:15 -05:00
1291a88bff skip MPI tests if they would be oversubscribing the available processors 2021-11-07 08:30:19 -05:00
d9b687450a account for increased floating point errors when summing numbers to zero 2021-11-07 08:30:04 -05:00
bd950b37d7 change git:// protocol for accessing github to https:// protocol
https://github.blog/2021-09-01-improving-git-protocol-security-github/
2021-11-02 15:30:27 -04:00
21fcdf8c56 Fix bug in Kokkos neighborlist where stencil wasn't updated for occasional list 2021-11-02 13:17:28 -04:00
6b400fb4bf fix indexing bug 2021-10-31 16:19:17 -04:00
d982298ab2 update new LAMMPS paper citation info 2021-10-28 10:09:01 -04:00
765fd7f763 Use correct sizeof in memset 2021-10-27 17:46:37 -04:00
0325047c01 update a few GPU kernels so they can be compiled on GPUs without double precisions support 2021-10-21 07:34:05 -04:00
2dce8923ee more direct version of clearing out loaded plugins 2021-10-19 08:28:19 -04:00
8d1ba074be wipe out all loaded plugins before destroying the LAMMPS instance 2021-10-18 18:06:09 -04:00
4675a3b560 Only check for GPU double precision support if a GPU is present 2021-10-18 13:44:37 -04:00
8999b1f69f add a LAMMPS_UPDATE string define to signal updates to stable releases 2021-10-17 18:06:04 -04:00
6c2b19c11b Add support for an "Update #" appendix to the version string
This is for informative output only, so that any code depending
on the LAMMPS_VERSION define will not have to be changed and no
warnings will be printed etc.
2021-10-17 18:05:29 -04:00
a425334928 port dump vtk to correctly support custom per-atom arrays and fix some bugs 2021-10-17 11:00:33 -04:00
db2faf2789 fix bugs related to custom per-atom properties in dump style custom 2021-10-17 11:00:21 -04:00
fdbb7d0da4 Report only compatible GPU, i.e. no GPU if mixed/double precision is requested by the hardware does not support it 2021-10-15 20:26:47 -04:00
52cd99918f pppm kspace styles also require -DFFT_SINGLE when using GPUs in single precision 2021-10-15 20:24:47 -04:00
a3e6a95ffb allow single precision FFT introspection 2021-10-15 20:24:47 -04:00
5b65169997 correct expansion of fix/compute/variable arguments to avoid bogus thermo outpu 2021-10-15 20:23:57 -04:00
5f3bf69e30 plug memory leaks 2021-10-15 17:00:46 -04:00
507c02b9af must set define to "see" the lammps_open() library function 2021-10-09 10:21:31 -04:00
b7fe47ba48 Fix bugs and compilation issues in KOKKOS 2021-10-08 09:39:53 -04:00
7dfd11da4b re-freeze Sphinx and other pip installed packages for doc build
The change relative to the stable release fixes a bug with python 3.10 support
2021-10-05 10:52:34 -04:00
97ba95f30e fix a couple more bugs like in 5246cedda6 2021-10-05 10:39:03 -04:00
c1945b4ec9 Fix misplaced MPI calls bug in pair style drip 2021-10-04 07:12:50 -04:00
c4291a4b8e unfreeze versions of python packages used to build the documentation 2021-10-02 23:57:23 -04:00
5b5dfa86c5 also update eigen download for traditional build 2021-10-02 23:56:28 -04:00
3ca3f6959f update eigen3 to the latest release and move download to our own server 2021-10-02 22:55:06 -04:00
f7b7bfa406 Avoid assertions in PythonCapabilities check when using external KOKKOS 2021-10-01 12:05:59 -04:00
3d2f29c92d fix memory allocation bug causing memory corruption on 32-bit arches 2021-10-01 01:16:45 -04:00
95 changed files with 2291 additions and 1307 deletions

View File

@ -25,7 +25,7 @@ function(validate_option name values)
endfunction(validate_option)
function(get_lammps_version version_header variable)
file(READ ${version_header} line)
file(STRINGS ${version_header} line REGEX LAMMPS_VERSION)
set(MONTHS x Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec)
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\1" day "${line}")
string(REGEX REPLACE "#define LAMMPS_VERSION \"([0-9]+) ([A-Za-z]+) ([0-9]+)\"" "\\2" month "${line}")

View File

@ -306,12 +306,12 @@ elseif(GPU_API STREQUAL "HIP")
if(HIP_COMPILER STREQUAL "clang")
add_custom_command(OUTPUT ${CUBIN_FILE}
VERBATIM COMMAND ${HIP_HIPCC_EXECUTABLE} --genco --offload-arch=${HIP_ARCH} -O3 -ffast-math -DUSE_HIP -D_${GPU_PREC_SETTING} -DLAMMPS_${LAMMPS_SIZES} -I${LAMMPS_LIB_SOURCE_DIR}/gpu -o ${CUBIN_FILE} ${CU_CPP_FILE}
VERBATIM COMMAND ${HIP_HIPCC_EXECUTABLE} --genco --offload-arch=${HIP_ARCH} -O3 -DUSE_HIP -D_${GPU_PREC_SETTING} -DLAMMPS_${LAMMPS_SIZES} -I${LAMMPS_LIB_SOURCE_DIR}/gpu -o ${CUBIN_FILE} ${CU_CPP_FILE}
DEPENDS ${CU_CPP_FILE}
COMMENT "Generating ${CU_NAME}.cubin")
else()
add_custom_command(OUTPUT ${CUBIN_FILE}
VERBATIM COMMAND ${HIP_HIPCC_EXECUTABLE} --genco -t="${HIP_ARCH}" -f=\"-O3 -ffast-math -DUSE_HIP -D_${GPU_PREC_SETTING} -DLAMMPS_${LAMMPS_SIZES} -I${LAMMPS_LIB_SOURCE_DIR}/gpu\" -o ${CUBIN_FILE} ${CU_CPP_FILE}
VERBATIM COMMAND ${HIP_HIPCC_EXECUTABLE} --genco -t="${HIP_ARCH}" -f=\"-O3 -DUSE_HIP -D_${GPU_PREC_SETTING} -DLAMMPS_${LAMMPS_SIZES} -I${LAMMPS_LIB_SOURCE_DIR}/gpu\" -o ${CUBIN_FILE} ${CU_CPP_FILE}
DEPENDS ${CU_CPP_FILE}
COMMENT "Generating ${CU_NAME}.cubin")
endif()

View File

@ -7,8 +7,9 @@ endif()
option(DOWNLOAD_EIGEN3 "Download Eigen3 instead of using an already installed one)" ${DOWNLOAD_EIGEN3_DEFAULT})
if(DOWNLOAD_EIGEN3)
message(STATUS "Eigen3 download requested - we will build our own")
set(EIGEN3_URL "https://gitlab.com/libeigen/eigen/-/archive/3.3.9/eigen-3.3.9.tar.gz" CACHE STRING "URL for Eigen3 tarball")
set(EIGEN3_MD5 "609286804b0f79be622ccf7f9ff2b660" CACHE STRING "MD5 checksum of Eigen3 tarball")
set(EIGEN3_URL "https://download.lammps.org/thirdparty/eigen-3.4.0.tar.gz" CACHE STRING "URL for Eigen3 tarball")
set(EIGEN3_MD5 "4c527a9171d71a72a9d4186e65bea559" CACHE STRING "MD5 checksum of Eigen3 tarball")
mark_as_advanced(EIGEN3_URL)
mark_as_advanced(EIGEN3_MD5)
include(ExternalProject)

View File

@ -230,7 +230,7 @@ $(VENV):
)
$(MATHJAX):
@git clone -b 3.2.0 -c advice.detachedHead=0 --depth 1 git://github.com/mathjax/MathJax.git $@
@git clone -b 3.2.0 -c advice.detachedHead=0 --depth 1 https://github.com/mathjax/MathJax.git $@
$(ANCHORCHECK): $(VENV)
@( \

View File

@ -33,9 +33,9 @@ when necessary.
## Pull Requests
ALL changes to the LAMMPS code and documentation, however trivial, MUST
be submitted as a pull request to GitHub. All changes to the "master"
be submitted as a pull request to GitHub. All changes to the "develop"
branch must be made exclusively through merging pull requests. The
"unstable" and "stable" branches, respectively are only to be updated
"release" and "stable" branches, respectively are only to be updated
upon patch or stable releases with fast-forward merges based on the
associated tags. Pull requests may also be submitted to (long-running)
feature branches created by LAMMPS developers inside the LAMMPS project,
@ -123,16 +123,16 @@ and thus were this comment should be placed.
LAMMPS uses a continuous release development model with incremental
changes, i.e. significant effort is made - including automated pre-merge
testing - that the code in the branch "master" does not get easily
testing - that the code in the branch "develop" does not get easily
broken. These tests are run after every update to a pull request. More
extensive and time consuming tests (including regression testing) are
performed after code is merged to the "master" branch. There are patch
performed after code is merged to the "develop" branch. There are patch
releases of LAMMPS every 3-5 weeks at a point, when the LAMMPS
developers feel, that a sufficient amount of changes have happened, and
the post-merge testing has been successful. These patch releases are
marked with a `patch_<version date>` tag and the "unstable" branch
marked with a `patch_<version date>` tag and the "release" branch
follows only these versions (and thus is always supposed to be of
production quality, unlike "master", which may be temporary broken, in
production quality, unlike "develop", which may be temporary broken, in
the case of larger change sets or unexpected incompatibilities or side
effects.

View File

@ -14,7 +14,7 @@ environments with restricted disk space capacity it may be needed to
reduce the storage requirements. Here are some suggestions:
- Create a so-called shallow repository by cloning only the last commit
instead of the full project history by using ``git clone git@github.com:lammps/lammps --depth=1 --branch=master``.
instead of the full project history by using ``git clone git@github.com:lammps/lammps --depth=1 --branch=develop``.
This reduces the downloaded size to about half. With ``--depth=1`` it is not possible to check out different
versions/branches of LAMMPS, using ``--depth=1000`` will make multiple recent versions available at little
extra storage needs (the entire git history had nearly 30,000 commits in fall 2021).

View File

@ -33,12 +33,15 @@ various tools and files. Some of them have to be installed (see below). For
the rest the build process will attempt to download and install them into
a python virtual environment and local folders.
A current version of the manual (latest patch release, aka unstable
branch) is is available online at:
`https://docs.lammps.org/Manual.html <https://docs.lammps.org/Manual.html>`_.
A version of the manual corresponding to the ongoing development (aka master branch)
is available online at: `https://docs.lammps.org/latest/
<https://docs.lammps.org/latest/>`_
A current version of the manual (latest patch release, that is the state
of the *release* branch) is is available online at:
`https://docs.lammps.org/ <https://docs.lammps.org/>`_.
A version of the manual corresponding to the ongoing development (that is
the state of the *develop* branch) is available online at:
`https://docs.lammps.org/latest/ <https://docs.lammps.org/latest/>`_
A version of the manual corresponding to the latest stable LAMMPS release
(that is the state of the *stable* branch) is available online at:
`https://docs.lammps.org/stable/ <https://docs.lammps.org/stable/>`_
Build using GNU make
--------------------

View File

@ -53,7 +53,7 @@ of each timestep. First of all, implement a constructor:
if (narg < 4)
error->all(FLERR,"Illegal fix print/vel command");
nevery = force->inumeric(FLERR,arg[3]);
nevery = utils::inumeric(FLERR,arg[3],false,lmp);
if (nevery <= 0)
error->all(FLERR,"Illegal fix print/vel command");
}

View File

@ -7,11 +7,11 @@ LAMMPS GitHub tutorial
This document describes the process of how to use GitHub to integrate
changes or additions you have made to LAMMPS into the official LAMMPS
distribution. It uses the process of updating this very tutorial as
an example to describe the individual steps and options. You need to
be familiar with git and you may want to have a look at the
`git book <http://git-scm.com/book/>`_ to reacquaint yourself with some
of the more advanced git features used below.
distribution. It uses the process of updating this very tutorial as an
example to describe the individual steps and options. You need to be
familiar with git and you may want to have a look at the `git book
<http://git-scm.com/book/>`_ to familiarize yourself with some of the
more advanced git features used below.
As of fall 2016, submitting contributions to LAMMPS via pull requests
on GitHub is the preferred option for integrating contributed features
@ -37,15 +37,15 @@ username or e-mail address and password.
**Forking the repository**
To get changes into LAMMPS, you need to first fork the `lammps/lammps`
repository on GitHub. At the time of writing, *master* is the preferred
repository on GitHub. At the time of writing, *develop* is the preferred
target branch. Thus go to `LAMMPS on GitHub <https://github.com/lammps/lammps>`_
and make sure branch is set to "master", as shown in the figure below.
and make sure branch is set to "develop", as shown in the figure below.
.. image:: JPG/tutorial_branch.png
:align: center
If it is not, use the button to change it to *master*\ . Once it is, use the
fork button to create a fork.
If it is not, use the button to change it to *develop*. Once it is, use
the fork button to create a fork.
.. image:: JPG/tutorial_fork.png
:align: center
@ -64,11 +64,12 @@ LAMMPS development.
**Adding changes to your own fork**
Additions to the upstream version of LAMMPS are handled using *feature
branches*\ . For every new feature, a so-called feature branch is
branches*. For every new feature, a so-called feature branch is
created, which contains only those modification relevant to one specific
feature. For example, adding a single fix would consist of creating a
branch with only the fix header and source file and nothing else. It is
explained in more detail here: `feature branch workflow <https://www.atlassian.com/git/tutorials/comparing-workflows/feature-branch-workflow>`_.
explained in more detail here: `feature branch workflow
<https://www.atlassian.com/git/tutorials/comparing-workflows/feature-branch-workflow>`_.
**Feature branches**
@ -94,8 +95,8 @@ The above command copies ("clones") the git repository to your local
machine to a directory with the name you chose. If none is given, it will
default to "lammps". Typical names are "mylammps" or something similar.
You can use this local clone to make changes and
test them without interfering with the repository on GitHub.
You can use this local clone to make changes and test them without
interfering with the repository on GitHub.
To pull changes from upstream into this copy, you can go to the directory
and use git pull:
@ -103,28 +104,46 @@ and use git pull:
.. code-block:: bash
$ cd mylammps
$ git checkout master
$ git pull https://github.com/lammps/lammps
$ git checkout develop
$ git pull https://github.com/lammps/lammps develop
You can also add this URL as a remote:
.. code-block:: bash
$ git remote add lammps_upstream https://www.github.com/lammps/lammps
$ git remote add upstream https://www.github.com/lammps/lammps
At this point, you typically make a feature branch from the updated master
From then on you can update your upstream branches with:
.. code-block:: bash
$ git fetch upstream
and then refer to the upstream repository branches with
`upstream/develop` or `upstream/release` and so on.
At this point, you typically make a feature branch from the updated
branch for the feature you want to work on. This tutorial contains the
workflow that updated this tutorial, and hence we will call the branch
"github-tutorial-update":
.. code-block:: bash
$ git checkout -b github-tutorial-update master
$ git fetch upstream
$ git checkout -b github-tutorial-update upstream/develop
Now that we have changed branches, we can make our changes to our local
repository. Just remember that if you want to start working on another,
unrelated feature, you should switch branches!
.. note::
Committing changes to the *develop*, *release*, or *stable* branches
is strongly discouraged. While it may be convenient initially, it
will create more work in the long run. Various texts and tutorials
on using git effectively discuss the motivation for using feature
branches instead.
**After changes are made**
After everything is done, add the files to the branch and commit them:
@ -287,28 +306,32 @@ After each push, the automated checks are run again.
LAMMPS developers may add labels to your pull request to assign it to
categories (mostly for bookkeeping purposes), but a few of them are
important: needs_work, work_in_progress, test-for-regression, and
full-regression-test. The first two indicate, that your pull request
is not considered to be complete. With "needs_work" the burden is on
exclusively on you; while "work_in_progress" can also mean, that a
LAMMPS developer may want to add changes. Please watch the comments
to the pull requests. The two "test" labels are used to trigger
extended tests before the code is merged. This is sometimes done by
LAMMPS developers, if they suspect that there may be some subtle
side effects from your changes. It is not done by default, because
those tests are very time consuming.
important: *needs_work*, *work_in_progress*, *run_tests*,
*test_for_regression*, and *ready_for_merge*. The first two indicate,
that your pull request is not considered to be complete. With
"needs_work" the burden is on exclusively on you; while
"work_in_progress" can also mean, that a LAMMPS developer may want to
add changes. Please watch the comments to the pull requests. The two
"test" labels are used to trigger extended tests before the code is
merged. This is sometimes done by LAMMPS developers, if they suspect
that there may be some subtle side effects from your changes. It is not
done by default, because those tests are very time consuming. The
*ready_for_merge* label is usually attached when the LAMMPS developer
assigned to the pull request considers this request complete and to
trigger a final full test evaluation.
**Reviews**
As of Summer 2018, a pull request needs at least 1 approving review
from a LAMMPS developer with write access to the repository.
In case your changes touch code that certain developers are associated
with, they are auto-requested by the GitHub software. Those associations
are set in the file
`.github/CODEOWNERS <https://github.com/lammps/lammps/blob/master/.github/CODEOWNERS>`_
Thus if you want to be automatically notified to review when anybody
changes files or packages, that you have contributed to LAMMPS, you can
add suitable patterns to that file, or a LAMMPS developer may add you.
As of Fall 2021, a pull request needs to pass all automatic tests and at
least 1 approving review from a LAMMPS developer with write access to
the repository before it is eligible for merging. In case your changes
touch code that certain developers are associated with, they are
auto-requested by the GitHub software. Those associations are set in
the file `.github/CODEOWNERS
<https://github.com/lammps/lammps/blob/develop/.github/CODEOWNERS>`_ Thus
if you want to be automatically notified to review when anybody changes
files or packages, that **you** have contributed to LAMMPS, you can add
suitable patterns to that file, or a LAMMPS developer may add you.
Otherwise, you can also manually request reviews from specific developers,
or LAMMPS developers - in their assessment of your pull request - may
@ -329,7 +352,7 @@ LAMMPS developer (including him/herself) or c) Axel Kohlmeyer (akohlmey).
After the review, the developer can choose to implement changes directly
or suggest them to you.
* Case c) means that the pull request has been assigned to the developer
overseeing the merging of pull requests into the master branch.
overseeing the merging of pull requests into the *develop* branch.
In this case, Axel assigned the tutorial to Steve:
@ -351,11 +374,11 @@ Sometimes, however, you might not feel comfortable having other people
push changes into your own branch, or maybe the maintainers are not sure
their idea was the right one. In such a case, they can make changes,
reassign you as the assignee, and file a "reverse pull request", i.e.
file a pull request in your GitHub repository to include changes in the
branch, that you have submitted as a pull request yourself. In that
case, you can choose to merge their changes back into your branch,
possibly make additional changes or corrections and proceed from there.
It looks something like this:
file a pull request in **your** forked GitHub repository to include
changes in the branch, that you have submitted as a pull request
yourself. In that case, you can choose to merge their changes back into
your branch, possibly make additional changes or corrections and proceed
from there. It looks something like this:
.. image:: JPG/tutorial_reverse_pull_request.png
:align: center
@ -419,7 +442,7 @@ This merge also shows up on the lammps GitHub page:
**After a merge**
When everything is fine, the feature branch is merged into the master branch:
When everything is fine, the feature branch is merged into the *develop* branch:
.. image:: JPG/tutorial_merged.png
:align: center
@ -433,8 +456,8 @@ branch!
.. code-block:: bash
$ git checkout master
$ git pull master
$ git checkout develop
$ git pull https://github.com/lammps/lammps develop
$ git branch -d github-tutorial-update
If you do not pull first, it is not really a problem but git will warn
@ -442,6 +465,7 @@ you at the next statement that you are deleting a local branch that
was not yet fully merged into HEAD. This is because git does not yet
know your branch just got merged into LAMMPS upstream. If you
first delete and then pull, everything should still be fine.
You can display all branches that are fully merged by:
Finally, if you delete the branch locally, you might want to push this
to your remote(s) as well:
@ -453,14 +477,14 @@ to your remote(s) as well:
**Recent changes in the workflow**
Some changes to the workflow are not captured in this tutorial. For
example, in addition to the master branch, to which all new features
should be submitted, there is now also an "unstable" and a "stable"
branch; these have the same content as "master", but are only updated
after a patch release or stable release was made.
Furthermore, the naming of the patches now follow the pattern
"patch_<Day><Month><Year>" to simplify comparisons between releases.
Finally, all patches and submissions are subject to automatic testing
and code checks to make sure they at the very least compile.
example, in addition to the *develop* branch, to which all new features
should be submitted, there is also a *release* and a *stable* branch;
these have the same content as *develop*, but are only updated after a
patch release or stable release was made. Furthermore, the naming of
the patches now follow the pattern "patch_<Day><Month><Year>" to
simplify comparisons between releases. Finally, all patches and
submissions are subject to automatic testing and code checks to make
sure they at the very least compile.
A discussion of the LAMMPS developer GitHub workflow can be found in the file
`doc/github-development-workflow.md <https://github.com/lammps/lammps/blob/master/doc/github-development-workflow.md>`_
`doc/github-development-workflow.md <https://github.com/lammps/lammps/blob/develop/doc/github-development-workflow.md>`_

View File

@ -9,7 +9,8 @@ has several advantages:
command.
* You can create your own development branches to add code to LAMMPS.
* You can submit your new features back to GitHub for inclusion in
LAMMPS.
LAMMPS. For that you should first create your own :doc:`fork on
GitHub <Howto_github>`.
You must have `git <git_>`_ installed on your system to use the
commands explained below to communicate with the git servers on
@ -20,35 +21,56 @@ provides `limited support for subversion clients <svn_>`_.
As of October 2016, the official home of public LAMMPS development is
on GitHub. The previously advertised LAMMPS git repositories on
git.lammps.org and bitbucket.org are now deprecated or offline.
git.lammps.org and bitbucket.org are now offline or deprecated.
.. _git: https://git-scm.com
.. _svn: https://help.github.com/en/github/importing-your-projects-to-github/working-with-subversion-on-github
You can follow LAMMPS development on 3 different git branches:
You can follow the LAMMPS development on 3 different git branches:
* **stable** : this branch is updated with every stable release
* **unstable** : this branch is updated with every patch release
* **master** : this branch continuously follows ongoing development
* **stable** : this branch is updated from the *release* branch with
every stable release version and also has selected bug fixes and updates
back-ported from the *develop* branch
* **release** : this branch is updated with every patch release;
updates are always "fast forward" merges from *develop*
* **develop** : this branch follows the ongoing development and
is updated with every merge commit of a pull request
To access the git repositories on your box, use the clone command to
create a local copy of the LAMMPS repository with a command like:
.. code-block:: bash
$ git clone -b unstable https://github.com/lammps/lammps.git mylammps
$ git clone -b release https://github.com/lammps/lammps.git mylammps
where "mylammps" is the name of the directory you wish to create on
your machine and "unstable" is one of the 3 branches listed above.
your machine and "release" is one of the 3 branches listed above.
(Note that you actually download all 3 branches; you can switch
between them at any time using "git checkout <branch name>".)
.. admonition:: Saving time and disk space when using ``git clone``
The complete git history of the LAMMPS project is quite large because
it contains the entire commit history of the project since fall 2006,
which includes the time when LAMMPS was managed with subversion.
This includes a few commits that have added and removed some large
files (mostly by accident). If you do not need access to the entire
commit history (most people don't), you can speed up the "cloning"
process and reduce local disk space requirements by using the
*--depth* git command line flag. That will create a "shallow clone"
of the repository containing only a subset of the git history. Using
a depth of 1000 is usually sufficient to include the head commits of
the *develop* and the *release* branches. To include the head commit
of the *stable* branch you may need a depth of up to 10000. If you
later need more of the git history, you can always convert the
shallow clone into a "full clone".
Once the command completes, your directory will contain the same files
as if you unpacked a current LAMMPS tarball, with the exception, that
the HTML documentation files are not included. They can be fetched
from the LAMMPS website by typing ``make fetch`` in the doc directory.
Or they can be generated from the content provided in doc/src by
typing ``make html`` from the doc directory.
Or they can be generated from the content provided in ``doc/src`` by
typing ``make html`` from the ``doc`` directory.
After initial cloning, as bug fixes and new features are added to
LAMMPS you can stay up-to-date by typing the following git commands
@ -56,9 +78,9 @@ from within the "mylammps" directory:
.. code-block:: bash
$ git checkout unstable # not needed if you always stay in this branch
$ git checkout stable # use one of these 3 checkout commands
$ git checkout master # to choose the branch to follow
$ git checkout release # not needed if you always stay in this branch
$ git checkout stable # use one of these 3 checkout commands
$ git checkout develop # to choose the branch to follow
$ git pull
Doing a "pull" will not change any files you have added to the LAMMPS
@ -81,7 +103,7 @@ Stable versions and what tagID to use for a particular stable version
are discussed on `this page <https://www.lammps.org/bug.html#version>`_.
Note that this command will print some warnings, because in order to get
back to the latest revision and to be able to update with ``git pull``
again, you will need to do ``git checkout unstable`` (or
again, you will need to do ``git checkout release`` (or
check out any other desired branch) first.
Once you have updated your local files with a ``git pull`` (or ``git
@ -137,9 +159,9 @@ changed. How to do this depends on the build system you are using.
.. admonition:: Git protocols
:class: note
The servers at github.com support the "git://" and "https://" access
protocols for anonymous, read-only access. If you have a suitably
configured GitHub account, you may also use SSH protocol with the
The servers at github.com support the "https://" access protocol for
anonymous, read-only access. If you have a suitably configured GitHub
account, you may also use SSH protocol with the
URL "git@github.com:lammps/lammps.git".
The LAMMPS GitHub project is currently managed by Axel Kohlmeyer

View File

@ -16,7 +16,7 @@ source code design, the program structure, the spatial decomposition
approach, the neighbor finding, basic communications algorithms, and how
users and developers have contributed to LAMMPS is:
`LAMMPS - A flexible simulation tool for particle-based materials modeling at the atomic, meso, and continuum scales, Comp. Phys. Comm. (accepted 09/2021), DOI:10.1016/j.cpc.2021.108171 <https://doi.org/10.1016/j.cpc.2021.108171>`_
`LAMMPS - A flexible simulation tool for particle-based materials modeling at the atomic, meso, and continuum scales, Comp. Phys. Comm. 271, 108171 (2022) <https://doi.org/10.1016/j.cpc.2021.108171>`_
So a project using LAMMPS or a derivative application that uses LAMMPS
as a simulation engine should cite this paper. The paper is expected to

View File

@ -19,7 +19,7 @@ software and open-source distribution, see `www.gnu.org <gnuorg_>`_
or `www.opensource.org <opensource_>`_. The legal text of the GPL as it
applies to LAMMPS is in the LICENSE file included in the LAMMPS distribution.
.. _gpl: https://github.com/lammps/lammps/blob/master/LICENSE
.. _gpl: https://github.com/lammps/lammps/blob/develop/LICENSE
.. _lgpl: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html

View File

@ -7,26 +7,34 @@ correctly and reliably at all times. You can follow its development
in a public `git repository on GitHub <https://github.com/lammps/lammps>`_.
Whenever we fix a bug or update or add a feature, it will be merged into
the `master` branch of the git repository. When a sufficient number of
the *develop* branch of the git repository. When a sufficient number of
changes have accumulated *and* the software passes a set of automated
tests, we release it in the next *patch* release, which are made every
few weeks. Info on patch releases are on `this website page
few weeks. The *release* branch of the git repository is updated with
every such release. Info on patch releases are on `this website page
<https://www.lammps.org/bug.html>`_.
Once or twice a year, only bug fixes and small, non-intrusive changes are
included for a period of time, and the code is subjected to more detailed
Once or twice a year, we apply only bug fixes and small, non-intrusive
changes to the *develop* branch and the code is subjected to more detailed
and thorough testing than the default automated testing. The latest
patch release after such a period is then labeled as a *stable* version.
patch release after such a period is then also labeled as a *stable* version
and the *stable* branch is updated with it. Between stable releases
we occasionally release some updates to the stable release containing
only bug fixes and updates back-ported from *develop* but no new features
and update the *stable* branch accordingly.
Each version of LAMMPS contains all the features and bug-fixes up to
and including its version date.
Each version of LAMMPS contains all the documented features up to and
including its version date.
The version date is printed to the screen and logfile every time you
run LAMMPS. It is also in the file src/version.h and in the LAMMPS
directory name created when you unpack a tarball. And it is on the
first page of the :doc:`manual <Manual>`.
* If you browse the HTML pages on the LAMMPS WWW site, they always
describe the most current patch release of LAMMPS.
* If you browse the HTML pages on the LAMMPS WWW site, they will by
default describe the most current patch release version of LAMMPS.
In the navigation bar on the bottom left, there is the option to
view instead the documentation for the most recent *stable* version
or the latest version from the current development branch.
* If you browse the HTML pages included in your tarball, they
describe the version you have, which may be older.

View File

@ -12,24 +12,24 @@ includes some optional methods to enable its use with rRESPA.
Here is a brief description of the class methods in pair.h:
+---------------------------------+-------------------------------------------------------------------+
| compute | workhorse routine that computes pairwise interactions |
+---------------------------------+-------------------------------------------------------------------+
| settings | reads the input script line with arguments you define |
+---------------------------------+-------------------------------------------------------------------+
| coeff | set coefficients for one i,j type pair |
+---------------------------------+-------------------------------------------------------------------+
| init_one | perform initialization for one i,j type pair |
+---------------------------------+-------------------------------------------------------------------+
| init_style | initialization specific to this pair style |
+---------------------------------+-------------------------------------------------------------------+
| write & read_restart | write/read i,j pair coeffs to restart files |
+---------------------------------+-------------------------------------------------------------------+
| write & read_restart_settings | write/read global settings to restart files |
+---------------------------------+-------------------------------------------------------------------+
| single | force and energy of a single pairwise interaction between 2 atoms |
+---------------------------------+-------------------------------------------------------------------+
| compute_inner/middle/outer | versions of compute used by rRESPA |
+---------------------------------+-------------------------------------------------------------------+
+---------------------------------+---------------------------------------------------------------------+
| compute | workhorse routine that computes pairwise interactions |
+---------------------------------+---------------------------------------------------------------------+
| settings | reads the input script line with arguments you define |
+---------------------------------+---------------------------------------------------------------------+
| coeff | set coefficients for one i,j type pair |
+---------------------------------+---------------------------------------------------------------------+
| init_one | perform initialization for one i,j type pair |
+---------------------------------+---------------------------------------------------------------------+
| init_style | initialization specific to this pair style |
+---------------------------------+---------------------------------------------------------------------+
| write & read_restart | write/read i,j pair coeffs to restart files |
+---------------------------------+---------------------------------------------------------------------+
| write & read_restart_settings | write/read global settings to restart files |
+---------------------------------+---------------------------------------------------------------------+
| single | force/r and energy of a single pairwise interaction between 2 atoms |
+---------------------------------+---------------------------------------------------------------------+
| compute_inner/middle/outer | versions of compute used by rRESPA |
+---------------------------------+---------------------------------------------------------------------+
The inner/middle/outer routines are optional.

View File

@ -1011,7 +1011,9 @@ can be disabled with the :code:`checksum` parameter.
Restrictions
""""""""""""
none
*dump_modify sort* is not supported for dumps of groups containing
more than 2 billion atoms.
Related commands
""""""""""""""""

View File

@ -310,7 +310,7 @@ Forschungszentrum Juelich.
The library is available for download at "http://scafacos.de" or can
be cloned from the git-repository
"git://github.com/scafacos/scafacos.git".
"https://github.com/scafacos/scafacos.git".
In order to use this KSpace style, you must download and build the
ScaFaCoS library, then build LAMMPS with the SCAFACOS package

View File

@ -205,7 +205,7 @@ For *damping mass_velocity*, the normal damping is given by:
\eta_n = \eta_{n0} m_{eff}
Here, :math:`\eta_{n0}` is the damping coefficient specified for the normal
contact model, in units of *mass*\ /\ *time* and
contact model, in units of 1/\ *time* and
:math:`m_{eff} = m_i m_j/(m_i + m_j)` is the effective mass.
Use *damping mass_velocity* to reproduce the damping behavior of
*pair gran/hooke/\**.

View File

@ -26,15 +26,29 @@ Examples
Description
"""""""""""
The *lebedeva/z* style computes the Lebedeva interaction
potential as described in :ref:`(Lebedeva et al.) <Leb01>`. An important simplification is made,
which is to take all normals along the z-axis.
The *lebedeva/z* pair style computes the Lebedeva interaction potential
as described in :ref:`(Lebedeva1) <Leb01>` and :ref:`(Lebedeva2)
<Leb02>`. An important simplification is made, which is to take all
normals along the z-axis.
The Lebedeva potential is intended for the description of the interlayer
interaction between graphene layers. To perform a realistic simulation,
this potential must be used in combination with an intralayer potential
such as :doc:`AIREBO <pair_airebo>` or :doc:`Tersoff <pair_tersoff>`
facilitated by using pair style :doc:`hybrid/overlay <pair_hybrid>`. To
keep the intralayer properties unaffected, the interlayer interaction
within the same layers should be avoided. This can be achieved by
assigning different atom types to atoms of different layers (e.g. 1 and
2 in the examples above).
Other interactions can be set to zero using pair_style *none*\ .
.. math::
E = & \frac{1}{2} \sum_i \sum_{i \neq j} V_{ij}\\
E = & \frac{1}{2} \sum_i \sum_{j \neq i} V_{ij}\\
V_{ij} = & B e^{-\alpha(r_{ij} - z_0)} \\
& + C(1 + D_1\rho^2_{ij} + D_2\rho^4_{ij} e^{-\lambda_1\rho^2_{ij}} e^{-\lambda_2 (z^2_{ij} - z^2_0)} \\
& + C(1 + D_1\rho^2_{ij} + D_2\rho^4_{ij}) e^{-\lambda_1\rho^2_{ij}} e^{-\lambda_2 (z^2_{ij} - z^2_0)} \\
& - A \left(\frac{z_0}{r_ij}\right)^6 + A \left( \frac{z_0}{r_c} \right)^6 \\
\rho^2_{ij} = & x^2_{ij} + y^2_{ij} \qquad (\mathbf{n_i} \equiv \mathbf{\hat{z}})
@ -43,12 +57,15 @@ Energies are shifted so that they go continuously to zero at the cutoff assuming
that the exponential part of :math:`V_{ij}` (first term) decays sufficiently fast.
This shift is achieved by the last term in the equation for :math:`V_{ij}` above.
The parameter file (e.g. CC.Lebedeva), is intended for use with metal
:doc:`units <units>`, with energies in meV. An additional parameter, *S*,
is available to facilitate scaling of energies.
The provided parameter file (CC.Lebedeva) contains two sets of parameters.
This potential must be used in combination with hybrid/overlay.
Other interactions can be set to zero using pair_style *none*\ .
- The first set (element name "C") is suitable for normal conditions and
is taken from :ref:`(Popov1) <Popov>`
- The second set (element name "C1") is suitable for high-pressure
conditions and is taken from :ref:`(Koziol1) <Koziol>`
Both sets contain an additional parameter, *S*, that can be used to
facilitate scaling of energies and is set to 1.0 by default.
Restrictions
""""""""""""
@ -77,4 +94,16 @@ none
.. _Leb01:
**(Lebedeva et al.)** I. V. Lebedeva, A. A. Knizhnik, A. M. Popov, Y. E. Lozovik, B. V. Potapkin, Phys. Rev. B, 84, 245437 (2011)
**(Lebedeva1)** I. V. Lebedeva, A. A. Knizhnik, A. M. Popov, Y. E. Lozovik, B. V. Potapkin, Phys. Rev. B, 84, 245437 (2011)
.. _Leb02:
**(Lebedeva2)** I. V. Lebedeva, A. A. Knizhnik, A. M. Popov, Y. E. Lozovik, B. V. Potapkin, Physica E: 44, 949-954 (2012)
.. _Popov:
**(Popov1)** A.M. Popov, I. V. Lebedeva, A. A. Knizhnik, Y. E. Lozovik and B. V. Potapkin, Chem. Phys. Lett. 536, 82-86 (2012).
.. _Koziol:
**(Koziol1)** Z. Koziol, G. Gawlik and J. Jagielski, Chinese Phys. B 28, 096101 (2019).

View File

@ -26,23 +26,25 @@ Examples
Description
"""""""""""
The local density (LD) potential is a mean-field manybody potential, and, in some
sense,a generalization of embedded atom models (EAM). The name "local density
potential" arises from the fact that it assigns an energy to an atom depending
on the number of neighboring atoms of given type around it within a predefined
spherical volume (i.e., within a cutoff). The bottom-up coarse-graining (CG)
literature suggests that such potentials can be widely useful in capturing
effective multibody forces in a computationally efficient manner so as to
improve the quality of CG models of implicit solvation:ref:`(Sanyal1) <Sanyal1>` and
phase-segregation in liquid mixtures:ref:`(Sanyal2) <Sanyal2>`, and provide guidelines
to determine the extent of manybody correlations present in a CG
model.:ref:`(Rosenberger) <Rosenberger>` The LD potential in LAMMPS is primarily
intended to be used as a corrective potential over traditional pair potentials
in bottom-up CG models, i.e., as a hybrid pair style with
other explicit pair interaction terms (e.g., table spline, Lennard Jones, etc.).
Because the LD potential is not a pair potential per se, it is implemented
simply as a single auxiliary file with all specifications that will be read
upon initialization.
The local density (LD) potential is a mean-field manybody potential,
and, in some way, a generalization of embedded atom models (EAM). The
name "local density potential" arises from the fact that it assigns an
energy to an atom depending on the number of neighboring atoms of a
given type around it within a predefined spherical volume (i.e., within
the cutoff). The bottom-up coarse-graining (CG) literature suggests
that such potentials can be widely useful in capturing effective
multibody forces in a computationally efficient manner and thus improve
the quality of CG models of implicit solvation :ref:`(Sanyal1)
<Sanyal1>` and phase-segregation in liquid mixtures :ref:`(Sanyal2)
<Sanyal2>`, and provide guidelines to determine the extent of manybody
correlations present in a CG model :ref:`(Rosenberger) <Rosenberger>`.
The LD potential in LAMMPS is primarily intended to be used as a
corrective potential over traditional pair potentials in bottom-up CG
models via :doc:`hybrid/overlay pair style <pair_hybrid>` with other
explicit pair interaction terms (e.g., tabulated, Lennard-Jones, Morse
etc.). Because the LD potential is not a pair potential per se, it is
implemented simply as a single auxiliary file with all specifications
that will be read upon initialization.
.. note::

View File

@ -126,11 +126,11 @@ and *compute_energy*, which both take 3 numerical arguments:
* itype = the (numerical) type of the first atom
* jtype = the (numerical) type of the second atom
This functions need to compute the force and the energy, respectively,
and use the result as return value. The functions need to use the
*pmap* dictionary to convert the LAMMPS atom type number to the symbolic
value of the internal potential parameter data structure. Following
the *LJCutMelt* example, here are the two functions:
This functions need to compute the (scaled) force and the energy,
respectively, and use the result as return value. The functions need
to use the *pmap* dictionary to convert the LAMMPS atom type number
to the symbolic value of the internal potential parameter data structure.
Following the *LJCutMelt* example, here are the two functions:
.. code-block:: python
@ -154,10 +154,10 @@ the *LJCutMelt* example, here are the two functions:
for consistency with the C++ pair styles in LAMMPS, the
*compute_force* function follows the conventions of the Pair::single()
methods and does not return the full force, but the force scaled by
the distance between the two atoms, so this value only needs to be
multiplied by delta x, delta y, and delta z to conveniently obtain the
three components of the force vector between these two atoms.
methods and does not return the pairwise force directly, but the force
divided by the distance between the two atoms, so this value only needs
to be multiplied by delta x, delta y, and delta z to conveniently obtain
the three components of the force vector between these two atoms.
----------

View File

@ -1,6 +1,6 @@
Sphinx==4.0.3
Sphinx==4.2.0
sphinxcontrib-spelling==7.2.1
git+git://github.com/akohlmey/sphinx-fortran@parallel-read
git+https://github.com/akohlmey/sphinx-fortran@parallel-read
sphinx_tabs==3.2.0
breathe==4.31.0
Pygments==2.10.0

View File

@ -1122,6 +1122,7 @@ gaussian
gaussians
Gaussians
Gavhane
Gawlik
gayberne
gcc
gcmc
@ -1476,6 +1477,7 @@ Izz
Jacobsen
Jadhao
Jadhav
Jagielski
jagreat
Jahn
Jalalvand
@ -1602,6 +1604,7 @@ Koslowski
Kosovan
Koster
Kosztin
Koziol
Kp
kradius
Kraker

View File

@ -1,267 +0,0 @@
LAMMPS (7 Aug 2019)
# LAMMPS input file for 26.5% benzene mole fraction solution
# with 380 benzene and 1000 water molecules,
# using all possible local density potentials
# between benzene and water
#
# Author: Tanmoy Sanyal, Shell Group, UC Santa Barbara
#
# Refer: Sanyal and Shell, JPC-B, 2018, 122 (21), 5678-5693
# Initialize simulation box
dimension 3
boundary p p p
units real
atom_style molecular
# Set potential styles
pair_style hybrid/overlay table spline 500 local/density
# Read molecule data and set initial velocities
read_data benzene_water.data
orthogonal box = (-12.865 -12.865 -64.829) to (12.865 12.865 64.829)
1 by 1 by 8 MPI processor grid
reading atoms ...
1380 atoms
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
special bonds CPU = 0.000566959 secs
read_data CPU = 0.00661397 secs
velocity all create 3.0000e+02 16611 rot yes dist gaussian
# Assign potentials
pair_coeff 1 1 table benzene_water.pair.table PairBB
WARNING: 33 of 500 force values in table are inconsistent with -dE/dr.
Should only be flagged at inflection points (../pair_table.cpp:483)
WARNING: 150 of 500 distance values in table with relative error
over 1e-06 to re-computed values (../pair_table.cpp:492)
pair_coeff 1 2 table benzene_water.pair.table PairWW
WARNING: 61 of 500 force values in table are inconsistent with -dE/dr.
Should only be flagged at inflection points (../pair_table.cpp:483)
WARNING: 90 of 500 distance values in table with relative error
over 1e-06 to re-computed values (../pair_table.cpp:492)
pair_coeff 2 2 table benzene_water.pair.table PairBW
WARNING: 108 of 500 force values in table are inconsistent with -dE/dr.
Should only be flagged at inflection points (../pair_table.cpp:483)
WARNING: 135 of 500 distance values in table with relative error
over 1e-06 to re-computed values (../pair_table.cpp:492)
pair_coeff * * local/density benzene_water.localdensity.table
# Recentering during minimization and equilibration
fix recentering all recenter 0.0 0.0 0.0 units box
# Thermostat & time integration
timestep 2.0
thermo 100
thermo_style custom temp ke pe etotal ebond eangle edihed evdwl
# Minimization
minimize 1.e-4 0.0 10000 10000
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (../min.cpp:168)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 15.25
ghost atom cutoff = 15.25
binsize = 7.625, bins = 4 4 18
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair table, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
(2) pair local/density, perpetual, copy from (1)
attributes: half, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 8.061 | 8.32 | 8.674 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
300 1233.1611 4162.3053 5395.4665 0 0 0 4162.3053
300 1233.1611 2275.526 3508.6871 0 0 0 2275.526
Loop time of 0.352822 on 8 procs for 40 steps with 1380 atoms
71.3% CPU use with 8 MPI tasks x no OpenMP threads
Minimization stats:
Stopping criterion = linesearch alpha is zero
Energy initial, next-to-last, final =
4162.30533361 2208.86525108 2275.52597861
Force two-norm initial, final = 259.364 69.3915
Force max component initial, final = 22.2077 8.31436
Final line search alpha, max atom move = 2.90022e-12 2.41135e-11
Iterations, force evaluations = 40 110
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.053192 | 0.23903 | 0.32779 | 17.2 | 67.75
Bond | 9.0599e-06 | 1.6302e-05 | 2.5272e-05 | 0.0 | 0.00
Neigh | 0.00044513 | 0.0023614 | 0.0063851 | 5.1 | 0.67
Comm | 0.015469 | 0.090432 | 0.20295 | 20.0 | 25.63
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.02098 | | | 5.95
Nlocal: 172.5 ave 348 max 72 min
Histogram: 5 0 0 0 0 0 0 0 1 2
Nghost: 2193.62 ave 4352 max 932 min
Histogram: 3 0 0 2 0 0 2 0 0 1
Neighs: 9700.5 ave 20535 max 3685 min
Histogram: 5 0 0 0 0 0 0 1 0 2
Total # of neighbors = 77604
Ave neighs/atom = 56.2348
Ave special neighs/atom = 0
Neighbor list builds = 2
Dangerous builds = 0
# Set up integration parameters
fix timeintegration all nve
fix thermostat all langevin 3.0000e+02 3.0000e+02 1.0000e+02 81890
# Equilibration (for realistic results, run for 5000000 steps)
reset_timestep 0
run 5000
WARNING: Fix recenter should come after all other integration fixes (../fix_recenter.cpp:131)
Per MPI rank memory allocation (min/avg/max) = 6.936 | 7.195 | 7.552 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
300 1233.1611 2866.9109 4100.0721 0 0 0 2866.9109
273.33541 1123.5553 3983.2007 5106.756 0 0 0 3983.2007
293.68078 1207.1857 3319.6601 4526.8458 0 0 0 3319.6601
314.21462 1291.5908 3389.2178 4680.8086 0 0 0 3389.2178
323.77563 1330.8917 3332.9828 4663.8745 0 0 0 3332.9828
302.5902 1243.8082 3461.7692 4705.5774 0 0 0 3461.7692
295.39324 1214.2249 3411.5727 4625.7976 0 0 0 3411.5727
320.52341 1317.5234 3453.1931 4770.7164 0 0 0 3453.1931
312.00777 1282.5195 3403.3443 4685.8638 0 0 0 3403.3443
307.96774 1265.9128 3429.7809 4695.6937 0 0 0 3429.7809
294.75922 1211.6187 3388.8404 4600.4591 0 0 0 3388.8404
311.24567 1279.3869 3514.9603 4794.3472 0 0 0 3514.9603
306.6152 1260.3531 3447.2011 4707.5542 0 0 0 3447.2011
305.23306 1254.6718 3375.5092 4630.181 0 0 0 3375.5092
321.62889 1322.0675 3460.2581 4782.3256 0 0 0 3460.2581
316.37725 1300.4804 3437.0312 4737.5116 0 0 0 3437.0312
322.90522 1327.3139 3389.1262 4716.44 0 0 0 3389.1262
307.57893 1264.3146 3359.8491 4624.1637 0 0 0 3359.8491
302.22607 1242.3115 3406.1711 4648.4826 0 0 0 3406.1711
302.73997 1244.4239 3220.2582 4464.6821 0 0 0 3220.2582
303.66194 1248.2137 3318.4629 4566.6765 0 0 0 3318.4629
308.73862 1269.0815 3369.5894 4638.671 0 0 0 3369.5894
315.60294 1297.2976 3411.2405 4708.5381 0 0 0 3411.2405
310.0113 1274.3129 3360.1054 4634.4183 0 0 0 3360.1054
302.36229 1242.8714 3326.9845 4569.8559 0 0 0 3326.9845
317.78659 1306.2735 3355.4976 4661.7711 0 0 0 3355.4976
302.50479 1243.4571 3317.6846 4561.1417 0 0 0 3317.6846
304.29249 1250.8056 3423.5068 4674.3124 0 0 0 3423.5068
305.99948 1257.8222 3432.9395 4690.7617 0 0 0 3432.9395
309.93363 1273.9937 3393.657 4667.6506 0 0 0 3393.657
316.14884 1299.5415 3463.0636 4762.6051 0 0 0 3463.0636
300.38817 1234.7567 3309.2495 4544.0062 0 0 0 3309.2495
311.05735 1278.6128 3304.4418 4583.0546 0 0 0 3304.4418
311.11872 1278.865 3291.1891 4570.0542 0 0 0 3291.1891
315.74338 1297.8749 3341.3063 4639.1812 0 0 0 3341.3063
297.5658 1223.1552 3316.3862 4539.5414 0 0 0 3316.3862
311.79033 1281.6257 3357.4556 4639.0813 0 0 0 3357.4556
310.93666 1278.1167 3414.7694 4692.8861 0 0 0 3414.7694
307.37298 1263.468 3337.3889 4600.8569 0 0 0 3337.3889
298.84185 1228.4005 3329.6173 4558.0178 0 0 0 3329.6173
310.54684 1276.5143 3351.0852 4627.5995 0 0 0 3351.0852
300.0871 1233.5191 3302.2315 4535.7506 0 0 0 3302.2315
304.69078 1252.4427 3324.2508 4576.6935 0 0 0 3324.2508
313.50714 1288.6827 3330.4088 4619.0915 0 0 0 3330.4088
329.80018 1355.6559 3301.86 4657.5159 0 0 0 3301.86
304.57609 1251.9713 3365.2938 4617.2652 0 0 0 3365.2938
308.73584 1269.0701 3344.4155 4613.4856 0 0 0 3344.4155
306.90951 1261.5629 3304.4698 4566.0327 0 0 0 3304.4698
308.85761 1269.5707 3392.1511 4661.7218 0 0 0 3392.1511
302.78788 1244.6208 3317.0849 4561.7057 0 0 0 3317.0849
321.68092 1322.2813 3321.5755 4643.8568 0 0 0 3321.5755
Loop time of 16.3061 on 8 procs for 5000 steps with 1380 atoms
Performance: 52.986 ns/day, 0.453 hours/ns, 306.634 timesteps/s
69.6% CPU use with 8 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.1872 | 10.542 | 14.607 | 116.7 | 64.65
Bond | 0.00044084 | 0.00069669 | 0.00095081 | 0.0 | 0.00
Neigh | 0.026948 | 0.15225 | 0.44344 | 42.0 | 0.93
Comm | 0.63452 | 4.2953 | 9.49 | 133.9 | 26.34
Output | 0.0016391 | 0.012378 | 0.050919 | 13.9 | 0.08
Modify | 0.45894 | 1.2107 | 4.4629 | 116.4 | 7.42
Other | | 0.09292 | | | 0.57
Nlocal: 172.5 ave 380 max 70 min
Histogram: 5 0 0 0 0 0 0 1 1 1
Nghost: 2213 ave 4440 max 903 min
Histogram: 3 0 0 2 0 0 2 0 0 1
Neighs: 10042.5 ave 24051 max 3500 min
Histogram: 5 0 0 0 0 0 0 1 1 1
Total # of neighbors = 80340
Ave neighs/atom = 58.2174
Ave special neighs/atom = 0
Neighbor list builds = 123
Dangerous builds = 1
# Turn off recentering during production phase
unfix recentering
# Setup trajectory output
dump myDump all custom 100 benzene_water.lammpstrj.gz id type x y z element
dump_modify myDump element B W
dump_modify myDump sort id
# Production (for realistic results, run for 10000000 steps)
reset_timestep 0
run 1000
Per MPI rank memory allocation (min/avg/max) = 8.232 | 8.492 | 8.851 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
321.68092 1322.2813 3784.0834 5106.3647 0 0 0 3784.0834
310.59763 1276.7231 3318.3283 4595.0513 0 0 0 3318.3283
303.39445 1247.1141 3324.1191 4571.2332 0 0 0 3324.1191
311.37275 1279.9092 3305.0901 4584.9993 0 0 0 3305.0901
311.29071 1279.572 3248.216 4527.788 0 0 0 3248.216
314.53456 1292.906 3283.4563 4576.3623 0 0 0 3283.4563
316.52595 1301.0916 3258.9171 4560.0087 0 0 0 3258.9171
318.92447 1310.9509 3235.6256 4546.5765 0 0 0 3235.6256
311.79212 1281.6331 3308.099 4589.7321 0 0 0 3308.099
305.52477 1255.8709 3267.6907 4523.5616 0 0 0 3267.6907
301.07457 1237.5782 3206.3997 4443.9779 0 0 0 3206.3997
Loop time of 4.44139 on 8 procs for 1000 steps with 1380 atoms
Performance: 38.907 ns/day, 0.617 hours/ns, 225.155 timesteps/s
60.8% CPU use with 8 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.656 | 2.5078 | 3.5775 | 57.7 | 56.46
Bond | 0.00013375 | 0.0001854 | 0.0002377 | 0.0 | 0.00
Neigh | 0.0048757 | 0.029188 | 0.090432 | 18.9 | 0.66
Comm | 0.51836 | 1.4427 | 2.6285 | 56.9 | 32.48
Output | 0.083084 | 0.089199 | 0.10333 | 2.3 | 2.01
Modify | 0.0087376 | 0.019705 | 0.038437 | 8.4 | 0.44
Other | | 0.3526 | | | 7.94
Nlocal: 172.5 ave 388 max 69 min
Histogram: 5 0 0 0 0 0 0 2 0 1
Nghost: 2207.88 ave 4429 max 896 min
Histogram: 3 0 0 2 0 0 2 0 0 1
Neighs: 10094.1 ave 24847 max 3403 min
Histogram: 5 0 0 0 0 0 1 1 0 1
Total # of neighbors = 80753
Ave neighs/atom = 58.5167
Ave special neighs/atom = 0
Neighbor list builds = 23
Dangerous builds = 0
Total wall time: 0:00:21

View File

@ -0,0 +1,300 @@
LAMMPS (27 Oct 2021)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# LAMMPS input file for 26.5% benzene mole fraction solution
# with 380 benzene and 1000 water molecules,
# using all possible local density potentials
# between benzene and water
#
# Author: Tanmoy Sanyal, Shell Group, UC Santa Barbara
#
# Refer: Sanyal and Shell, JPC-B, 2018, 122 (21), 5678-5693
# Initialize simulation box
dimension 3
boundary p p p
units real
atom_style molecular
# Set potential styles
pair_style hybrid/overlay table spline 500 local/density
# Read molecule data and set initial velocities
read_data benzene_water.data
Reading data file ...
orthogonal box = (-12.865000 -12.865000 -64.829000) to (12.865000 12.865000 64.829000)
1 by 1 by 1 MPI processor grid
reading atoms ...
1380 atoms
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0 0 0
special bond factors coul: 0 0 0
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
special bonds CPU = 0.000 seconds
read_data CPU = 0.006 seconds
velocity all create 3.0000e+02 16611 rot yes dist gaussian
# Assign potentials
pair_coeff 1 1 table benzene_water.pair.table PairBB
WARNING: 33 of 500 force values in table PairBB are inconsistent with -dE/dr.
WARNING: Should only be flagged at inflection points (src/pair_table.cpp:465)
WARNING: 150 of 500 distance values in table 1e-06 with relative error
WARNING: over PairBB to re-computed values (src/pair_table.cpp:473)
pair_coeff 1 2 table benzene_water.pair.table PairWW
WARNING: 61 of 500 force values in table PairWW are inconsistent with -dE/dr.
WARNING: Should only be flagged at inflection points (src/pair_table.cpp:465)
WARNING: 90 of 500 distance values in table 1e-06 with relative error
WARNING: over PairWW to re-computed values (src/pair_table.cpp:473)
pair_coeff 2 2 table benzene_water.pair.table PairBW
WARNING: 108 of 500 force values in table PairBW are inconsistent with -dE/dr.
WARNING: Should only be flagged at inflection points (src/pair_table.cpp:465)
WARNING: 135 of 500 distance values in table 1e-06 with relative error
WARNING: over PairBW to re-computed values (src/pair_table.cpp:473)
pair_coeff * * local/density benzene_water.localdensity.table
# Recentering during minimization and equilibration
fix recentering all recenter 0.0 0.0 0.0 units box
# Thermostat & time integration
timestep 2.0
thermo 100
thermo_style custom temp ke pe etotal ebond eangle edihed evdwl
# Minimization
minimize 1.e-4 0.0 10000 10000
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
Your simulation uses code contributions which should be cited:
- pair_style local/density command:
@Article{Sanyal16,
author = {T.Sanyal and M.Scott Shell},
title = {Coarse-grained models using local-density potentials optimized with the relative entropy: Application to implicit solvation},
journal = {J.~Chem.~Phys.},
year = 2016,
DOI = doi.org/10.1063/1.4958629}
@Article{Sanyal18,
author = {T.Sanyal and M.Scott Shell},
title = {Transferable coarse-grained models of liquid-liquid equilibrium using local density potentials optimized with the relative entropy},
journal = {J.~Phys.~Chem. B},
year = 2018,
DOI = doi.org/10.1021/acs.jpcb.7b12446}
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (src/min.cpp:187)
generated 0 of 1 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 15.25
ghost atom cutoff = 15.25
binsize = 7.625, bins = 4 4 18
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair table, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d
bin: standard
(2) pair local/density, perpetual, copy from (1)
attributes: half, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 8.754 | 8.754 | 8.754 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
300 1233.1611 2374.6749 3607.836 0 0 0 2374.6749
300 1233.1611 985.54829 2218.7094 0 0 0 985.54829
300 1233.1611 962.66036 2195.8215 0 0 0 962.66036
Loop time of 0.812343 on 1 procs for 134 steps with 1380 atoms
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
2374.67491482358 962.664796664787 962.660357218268
Force two-norm initial, final = 263.77519 15.741017
Force max component initial, final = 22.412654 7.9360139
Final line search alpha, max atom move = 0.014975513 0.11884588
Iterations, force evaluations = 134 240
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.78539 | 0.78539 | 0.78539 | 0.0 | 96.68
Bond | 2.0149e-05 | 2.0149e-05 | 2.0149e-05 | 0.0 | 0.00
Neigh | 0.016759 | 0.016759 | 0.016759 | 0.0 | 2.06
Comm | 0.0045 | 0.0045 | 0.0045 | 0.0 | 0.55
Output | 2.9402e-05 | 2.9402e-05 | 2.9402e-05 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.005647 | | | 0.70
Nlocal: 1380.00 ave 1380 max 1380 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 5832.00 ave 5832 max 5832 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 78165.0 ave 78165 max 78165 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 78165
Ave neighs/atom = 56.641304
Ave special neighs/atom = 0.0000000
Neighbor list builds = 5
Dangerous builds = 0
# Set up integration parameters
fix timeintegration all nve
fix thermostat all langevin 3.0000e+02 3.0000e+02 1.0000e+02 81890
# Equilibration (for realistic results, run for 5000000 steps)
reset_timestep 0
run 5000
generated 0 of 1 mixed pair_coeff terms from geometric mixing rule
WARNING: Fix recenter should come after all other integration fixes (src/fix_recenter.cpp:133)
Per MPI rank memory allocation (min/avg/max) = 7.629 | 7.629 | 7.629 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
300 1233.1611 962.66036 2195.8215 0 0 0 962.66036
253.1913 1040.7522 1803.711 2844.4633 0 0 0 1803.711
290.31049 1193.332 2059.0637 3252.3958 0 0 0 2059.0637
299.30778 1230.3157 2140.226 3370.5417 0 0 0 2140.226
309.81524 1273.507 2178.3782 3451.8853 0 0 0 2178.3782
299.79526 1232.3195 2229.9248 3462.2444 0 0 0 2229.9248
299.24909 1230.0745 2260.7129 3490.7874 0 0 0 2260.7129
299.5898 1231.475 2244.2384 3475.7134 0 0 0 2244.2384
297.81223 1224.1682 2320.27 3544.4382 0 0 0 2320.27
301.53975 1239.4903 2277.0431 3516.5334 0 0 0 2277.0431
292.00572 1200.3003 2292.3073 3492.6076 0 0 0 2292.3073
309.19709 1270.9661 2303.6055 3574.5716 0 0 0 2303.6055
297.54933 1223.0876 2304.127 3527.2146 0 0 0 2304.127
303.48106 1247.4702 2303.5673 3551.0375 0 0 0 2303.5673
296.46047 1218.6118 2256.1591 3474.7709 0 0 0 2256.1591
299.4835 1231.038 2280.0452 3511.0832 0 0 0 2280.0452
306.25958 1258.8914 2307.9795 3566.8709 0 0 0 2307.9795
304.67335 1252.3711 2284.8252 3537.1963 0 0 0 2284.8252
298.33637 1226.3227 2289.8499 3516.1726 0 0 0 2289.8499
303.1338 1246.0427 2342.2148 3588.2575 0 0 0 2342.2148
305.86051 1257.251 2341.0106 3598.2616 0 0 0 2341.0106
297.75418 1223.9296 2303.5613 3527.4909 0 0 0 2303.5613
296.79348 1219.9806 2327.5207 3547.5013 0 0 0 2327.5207
307.25403 1262.9791 2288.4219 3551.401 0 0 0 2288.4219
301.26976 1238.3805 2291.2465 3529.627 0 0 0 2291.2465
297.17249 1221.5385 2283.3926 3504.9311 0 0 0 2283.3926
313.99072 1290.6705 2293.9661 3584.6366 0 0 0 2293.9661
301.70804 1240.1821 2331.1694 3571.3515 0 0 0 2331.1694
300.62599 1235.7343 2325.4367 3561.171 0 0 0 2325.4367
292.13495 1200.8316 2315.631 3516.4626 0 0 0 2315.631
313.9981 1290.7008 2286.0536 3576.7545 0 0 0 2286.0536
300.25311 1234.2015 2324.2379 3558.4394 0 0 0 2324.2379
309.3746 1271.6958 2322.2298 3593.9256 0 0 0 2322.2298
300.23041 1234.1082 2332.7521 3566.8603 0 0 0 2332.7521
302.97054 1245.3716 2303.1689 3548.5405 0 0 0 2303.1689
294.77155 1211.6694 2334.5087 3546.1781 0 0 0 2334.5087
296.81476 1220.0681 2322.5932 3542.6613 0 0 0 2322.5932
301.83238 1240.6932 2345.4841 3586.1773 0 0 0 2345.4841
295.0399 1212.7724 2312.3889 3525.1614 0 0 0 2312.3889
300.73565 1236.185 2338.8384 3575.0235 0 0 0 2338.8384
303.02264 1245.5858 2310.0868 3555.6726 0 0 0 2310.0868
302.86404 1244.9339 2332.2001 3577.134 0 0 0 2332.2001
293.77916 1207.5901 2293.2799 3500.8701 0 0 0 2293.2799
299.30072 1230.2867 2317.5065 3547.7933 0 0 0 2317.5065
311.05029 1278.5837 2311.0476 3589.6313 0 0 0 2311.0476
293.25646 1205.4416 2314.7398 3520.1814 0 0 0 2314.7398
310.49018 1276.2814 2337.4909 3613.7723 0 0 0 2337.4909
302.37336 1242.9169 2340.3197 3583.2366 0 0 0 2340.3197
297.06862 1221.1116 2323.9136 3545.0252 0 0 0 2323.9136
300.54817 1235.4144 2315.2405 3550.6549 0 0 0 2315.2405
309.10643 1270.5934 2333.1848 3603.7783 0 0 0 2333.1848
Loop time of 15.2696 on 1 procs for 5000 steps with 1380 atoms
Performance: 56.583 ns/day, 0.424 hours/ns, 327.447 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 14.432 | 14.432 | 14.432 | 0.0 | 94.51
Bond | 0.00032375 | 0.00032375 | 0.00032375 | 0.0 | 0.00
Neigh | 0.41541 | 0.41541 | 0.41541 | 0.0 | 2.72
Comm | 0.0975 | 0.0975 | 0.0975 | 0.0 | 0.64
Output | 0.0013044 | 0.0013044 | 0.0013044 | 0.0 | 0.01
Modify | 0.30336 | 0.30336 | 0.30336 | 0.0 | 1.99
Other | | 0.01973 | | | 0.13
Nlocal: 1380.00 ave 1380 max 1380 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 5843.00 ave 5843 max 5843 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 76949.0 ave 76949 max 76949 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 76949
Ave neighs/atom = 55.760145
Ave special neighs/atom = 0.0000000
Neighbor list builds = 121
Dangerous builds = 1
# Turn off recentering during production phase
unfix recentering
# Setup trajectory output
dump myDump all custom 100 benzene_water.lammpstrj.gz id type x y z element
dump_modify myDump element B W
dump_modify myDump sort id
# Production (for realistic results, run for 10000000 steps)
reset_timestep 0
run 1000
generated 0 of 1 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 9.022 | 9.022 | 9.022 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
309.10643 1270.5934 2333.1848 3603.7783 0 0 0 2333.1848
300.84572 1236.6375 2331.3493 3567.9868 0 0 0 2331.3493
300.90599 1236.8852 2337.6775 3574.5627 0 0 0 2337.6775
302.77895 1244.5841 2341.7778 3586.362 0 0 0 2341.7778
291.66639 1198.9055 2320.3512 3519.2567 0 0 0 2320.3512
298.7003 1227.8187 2292.8195 3520.6382 0 0 0 2292.8195
301.11163 1237.7305 2310.017 3547.7475 0 0 0 2310.017
305.22515 1254.6393 2315.1355 3569.7748 0 0 0 2315.1355
295.15921 1213.2629 2310.184 3523.4468 0 0 0 2310.184
299.2024 1229.8826 2332.2118 3562.0943 0 0 0 2332.2118
302.80078 1244.6738 2320.3763 3565.0502 0 0 0 2320.3763
Loop time of 3.07208 on 1 procs for 1000 steps with 1380 atoms
Performance: 56.249 ns/day, 0.427 hours/ns, 325.512 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.8993 | 2.8993 | 2.8993 | 0.0 | 94.37
Bond | 6.5327e-05 | 6.5327e-05 | 6.5327e-05 | 0.0 | 0.00
Neigh | 0.083502 | 0.083502 | 0.083502 | 0.0 | 2.72
Comm | 0.019967 | 0.019967 | 0.019967 | 0.0 | 0.65
Output | 0.012268 | 0.012268 | 0.012268 | 0.0 | 0.40
Modify | 0.052801 | 0.052801 | 0.052801 | 0.0 | 1.72
Other | | 0.004203 | | | 0.14
Nlocal: 1380.00 ave 1380 max 1380 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 5860.00 ave 5860 max 5860 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 77055.0 ave 77055 max 77055 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 77055
Ave neighs/atom = 55.836957
Ave special neighs/atom = 0.0000000
Neighbor list builds = 24
Dangerous builds = 0
Total wall time: 0:00:19

View File

@ -0,0 +1,299 @@
LAMMPS (27 Oct 2021)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# LAMMPS input file for 26.5% benzene mole fraction solution
# with 380 benzene and 1000 water molecules,
# using all possible local density potentials
# between benzene and water
#
# Author: Tanmoy Sanyal, Shell Group, UC Santa Barbara
#
# Refer: Sanyal and Shell, JPC-B, 2018, 122 (21), 5678-5693
# Initialize simulation box
dimension 3
boundary p p p
units real
atom_style molecular
# Set potential styles
pair_style hybrid/overlay table spline 500 local/density
# Read molecule data and set initial velocities
read_data benzene_water.data
Reading data file ...
orthogonal box = (-12.865000 -12.865000 -64.829000) to (12.865000 12.865000 64.829000)
1 by 1 by 4 MPI processor grid
reading atoms ...
1380 atoms
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0 0 0
special bond factors coul: 0 0 0
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
special bonds CPU = 0.000 seconds
read_data CPU = 0.007 seconds
velocity all create 3.0000e+02 16611 rot yes dist gaussian
# Assign potentials
pair_coeff 1 1 table benzene_water.pair.table PairBB
WARNING: 33 of 500 force values in table PairBB are inconsistent with -dE/dr.
WARNING: Should only be flagged at inflection points (src/pair_table.cpp:465)
WARNING: 150 of 500 distance values in table 1e-06 with relative error
WARNING: over PairBB to re-computed values (src/pair_table.cpp:473)
pair_coeff 1 2 table benzene_water.pair.table PairWW
WARNING: 61 of 500 force values in table PairWW are inconsistent with -dE/dr.
WARNING: Should only be flagged at inflection points (src/pair_table.cpp:465)
WARNING: 90 of 500 distance values in table 1e-06 with relative error
WARNING: over PairWW to re-computed values (src/pair_table.cpp:473)
pair_coeff 2 2 table benzene_water.pair.table PairBW
WARNING: 108 of 500 force values in table PairBW are inconsistent with -dE/dr.
WARNING: Should only be flagged at inflection points (src/pair_table.cpp:465)
WARNING: 135 of 500 distance values in table 1e-06 with relative error
WARNING: over PairBW to re-computed values (src/pair_table.cpp:473)
pair_coeff * * local/density benzene_water.localdensity.table
# Recentering during minimization and equilibration
fix recentering all recenter 0.0 0.0 0.0 units box
# Thermostat & time integration
timestep 2.0
thermo 100
thermo_style custom temp ke pe etotal ebond eangle edihed evdwl
# Minimization
minimize 1.e-4 0.0 10000 10000
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
Your simulation uses code contributions which should be cited:
- pair_style local/density command:
@Article{Sanyal16,
author = {T.Sanyal and M.Scott Shell},
title = {Coarse-grained models using local-density potentials optimized with the relative entropy: Application to implicit solvation},
journal = {J.~Chem.~Phys.},
year = 2016,
DOI = doi.org/10.1063/1.4958629}
@Article{Sanyal18,
author = {T.Sanyal and M.Scott Shell},
title = {Transferable coarse-grained models of liquid-liquid equilibrium using local density potentials optimized with the relative entropy},
journal = {J.~Phys.~Chem. B},
year = 2018,
DOI = doi.org/10.1021/acs.jpcb.7b12446}
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (src/min.cpp:187)
generated 0 of 1 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 15.25
ghost atom cutoff = 15.25
binsize = 7.625, bins = 4 4 18
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair table, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d
bin: standard
(2) pair local/density, perpetual, copy from (1)
attributes: half, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 8.441 | 8.589 | 8.688 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
300 1233.1611 2374.6749 3607.836 0 0 0 2374.6749
300 1233.1611 1024.8113 2257.9724 0 0 0 1024.8113
Loop time of 0.240559 on 4 procs for 74 steps with 1380 atoms
98.5% CPU use with 4 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
2374.67491482358 1024.89407898645 1024.81130011575
Force two-norm initial, final = 263.77519 20.459697
Force max component initial, final = 22.412654 8.6082349
Final line search alpha, max atom move = 0.027790997 0.23923143
Iterations, force evaluations = 74 118
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.15928 | 0.1873 | 0.22814 | 6.5 | 77.86
Bond | 3.857e-06 | 4.4012e-06 | 5.496e-06 | 0.0 | 0.00
Neigh | 0.00064142 | 0.0028761 | 0.0058864 | 4.2 | 1.20
Comm | 0.0040776 | 0.039595 | 0.074187 | 12.6 | 16.46
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.01078 | | | 4.48
Nlocal: 345.000 ave 664 max 147 min
Histogram: 2 0 0 0 0 1 0 0 0 1
Nghost: 2850.50 ave 4438 max 1208 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Neighs: 19377.5 ave 37718 max 7456 min
Histogram: 2 0 0 0 0 1 0 0 0 1
Total # of neighbors = 77510
Ave neighs/atom = 56.166667
Ave special neighs/atom = 0.0000000
Neighbor list builds = 3
Dangerous builds = 0
# Set up integration parameters
fix timeintegration all nve
fix thermostat all langevin 3.0000e+02 3.0000e+02 1.0000e+02 81890
# Equilibration (for realistic results, run for 5000000 steps)
reset_timestep 0
run 5000
generated 0 of 1 mixed pair_coeff terms from geometric mixing rule
WARNING: Fix recenter should come after all other integration fixes (src/fix_recenter.cpp:133)
Per MPI rank memory allocation (min/avg/max) = 7.316 | 7.465 | 7.563 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
300 1233.1611 1024.8113 2257.9724 0 0 0 1024.8113
263.61917 1083.6164 1866.745 2950.3614 0 0 0 1866.745
296.0253 1216.823 2122.8463 3339.6692 0 0 0 2122.8463
301.93846 1241.1292 2172.9802 3414.1095 0 0 0 2172.9802
293.9491 1208.2887 2205.4892 3413.7779 0 0 0 2205.4892
286.33795 1177.0027 2204.8908 3381.8935 0 0 0 2204.8908
295.48217 1214.5904 2230.8849 3445.4753 0 0 0 2230.8849
293.88908 1208.0419 2218.7563 3426.7982 0 0 0 2218.7563
295.13798 1213.1756 2277.4515 3490.6271 0 0 0 2277.4515
290.39538 1193.681 2273.4385 3467.1195 0 0 0 2273.4385
297.56782 1223.1635 2268.7182 3491.8817 0 0 0 2268.7182
306.45578 1259.6978 2289.1507 3548.8486 0 0 0 2289.1507
308.54582 1268.289 2284.8514 3553.1404 0 0 0 2284.8514
302.17353 1242.0955 2262.5577 3504.6532 0 0 0 2262.5577
295.30087 1213.8452 2315.8853 3529.7305 0 0 0 2315.8853
308.59197 1268.4787 2291.8314 3560.3101 0 0 0 2291.8314
297.75618 1223.9378 2287.2003 3511.1381 0 0 0 2287.2003
303.43395 1247.2765 2297.7158 3544.9923 0 0 0 2297.7158
307.16233 1262.6021 2255.9769 3518.5791 0 0 0 2255.9769
301.34428 1238.6868 2284.416 3523.1028 0 0 0 2284.416
295.43209 1214.3846 2294.1043 3508.4889 0 0 0 2294.1043
287.86904 1183.2963 2257.0204 3440.3168 0 0 0 2257.0204
297.2661 1221.9233 2251.4194 3473.3428 0 0 0 2251.4194
298.90221 1228.6486 2261.834 3490.4826 0 0 0 2261.834
288.07202 1184.1307 2284.1918 3468.3225 0 0 0 2284.1918
300.41201 1234.8547 2303.9573 3538.812 0 0 0 2303.9573
283.91279 1167.034 2329.7936 3496.8277 0 0 0 2329.7936
297.27507 1221.9602 2337.0516 3559.0118 0 0 0 2337.0516
296.22263 1217.6341 2335.6424 3553.2765 0 0 0 2335.6424
296.13784 1217.2856 2364.7034 3581.989 0 0 0 2364.7034
308.17642 1266.7706 2320.2753 3587.0459 0 0 0 2320.2753
310.26592 1275.3596 2301.9318 3577.2914 0 0 0 2301.9318
292.97391 1204.2801 2289.8116 3494.0917 0 0 0 2289.8116
294.81231 1211.8369 2315.0388 3526.8757 0 0 0 2315.0388
298.66155 1227.6594 2317.2844 3544.9437 0 0 0 2317.2844
302.77939 1244.5859 2301.2063 3545.7922 0 0 0 2301.2063
291.47597 1198.1228 2285.1757 3483.2985 0 0 0 2285.1757
286.19045 1176.3964 2265.2665 3441.6629 0 0 0 2265.2665
295.58144 1214.9984 2272.3165 3487.315 0 0 0 2272.3165
283.86988 1166.8577 2320.6142 3487.4719 0 0 0 2320.6142
300.0576 1233.3979 2330.8962 3564.2941 0 0 0 2330.8962
299.86413 1232.6026 2321.2281 3553.8308 0 0 0 2321.2281
292.79017 1203.5248 2334.2308 3537.7557 0 0 0 2334.2308
291.5027 1198.2327 2335.2119 3533.4446 0 0 0 2335.2119
299.55471 1231.3307 2332.5216 3563.8524 0 0 0 2332.5216
293.29613 1205.6046 2295.3263 3500.9309 0 0 0 2295.3263
303.13151 1246.0333 2310.0548 3556.0881 0 0 0 2310.0548
298.83954 1228.391 2297.3117 3525.7027 0 0 0 2297.3117
297.44775 1222.67 2307.2483 3529.9183 0 0 0 2307.2483
309.59874 1272.6171 2309.2439 3581.861 0 0 0 2309.2439
307.47844 1263.9015 2274.998 3538.8995 0 0 0 2274.998
Loop time of 11.2235 on 4 procs for 5000 steps with 1380 atoms
Performance: 76.982 ns/day, 0.312 hours/ns, 445.495 timesteps/s
98.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 7.1444 | 8.5074 | 10.534 | 44.9 | 75.80
Bond | 0.00017048 | 0.00020672 | 0.00030488 | 0.0 | 0.00
Neigh | 0.026174 | 0.12108 | 0.26052 | 28.2 | 1.08
Comm | 0.21788 | 1.8597 | 3.3375 | 81.2 | 16.57
Output | 0.0008989 | 0.0069895 | 0.021647 | 10.2 | 0.06
Modify | 0.19418 | 0.7044 | 2.1378 | 98.6 | 6.28
Other | | 0.02368 | | | 0.21
Nlocal: 345.000 ave 678 max 148 min
Histogram: 2 0 0 0 1 0 0 0 0 1
Nghost: 2854.25 ave 4464 max 1181 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Neighs: 19366.8 ave 38533 max 7481 min
Histogram: 2 0 0 0 0 1 0 0 0 1
Total # of neighbors = 77467
Ave neighs/atom = 56.135507
Ave special neighs/atom = 0.0000000
Neighbor list builds = 121
Dangerous builds = 1
# Turn off recentering during production phase
unfix recentering
# Setup trajectory output
dump myDump all custom 100 benzene_water.lammpstrj.gz id type x y z element
dump_modify myDump element B W
dump_modify myDump sort id
# Production (for realistic results, run for 10000000 steps)
reset_timestep 0
run 1000
generated 0 of 1 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 8.640 | 8.791 | 8.894 Mbytes
Temp KinEng PotEng TotEng E_bond E_angle E_dihed E_vdwl
307.47844 1263.9015 2274.998 3538.8995 0 0 0 2274.998
309.46142 1272.0526 2274.8499 3546.9026 0 0 0 2274.8499
300.70977 1236.0787 2301.0588 3537.1374 0 0 0 2301.0588
300.53659 1235.3668 2316.1008 3551.4675 0 0 0 2316.1008
300.48582 1235.1581 2296.3009 3531.459 0 0 0 2296.3009
299.2618 1230.1267 2325.7501 3555.8768 0 0 0 2325.7501
303.00905 1245.5299 2321.8238 3567.3537 0 0 0 2321.8238
300.07018 1233.4496 2339.2833 3572.7329 0 0 0 2339.2833
304.20292 1250.4374 2353.1018 3603.5392 0 0 0 2353.1018
304.19487 1250.4043 2334.5087 3584.913 0 0 0 2334.5087
294.24283 1209.4961 2335.0535 3544.5496 0 0 0 2335.0535
Loop time of 2.90512 on 4 procs for 1000 steps with 1380 atoms
Performance: 59.481 ns/day, 0.403 hours/ns, 344.220 timesteps/s
98.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.8627 | 2.2082 | 2.7289 | 22.6 | 76.01
Bond | 4.042e-05 | 5.3677e-05 | 8.4044e-05 | 0.0 | 0.00
Neigh | 0.0066184 | 0.030172 | 0.064523 | 13.9 | 1.04
Comm | 0.05914 | 0.51145 | 0.86887 | 40.7 | 17.61
Output | 0.0057814 | 0.0073478 | 0.011158 | 2.6 | 0.25
Modify | 0.0085337 | 0.020869 | 0.042248 | 9.4 | 0.72
Other | | 0.127 | | | 4.37
Nlocal: 345.000 ave 682 max 147 min
Histogram: 2 0 0 0 1 0 0 0 0 1
Nghost: 2836.25 ave 4427 max 1175 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Neighs: 19249.8 ave 38683 max 7433 min
Histogram: 2 0 0 0 1 0 0 0 0 1
Total # of neighbors = 76999
Ave neighs/atom = 55.796377
Ave special neighs/atom = 0.0000000
Neighbor list builds = 23
Dangerous builds = 0
Total wall time: 0:00:14

View File

@ -1,226 +0,0 @@
LAMMPS (7 Aug 2019)
# LAMMPS input file for 50.0% methanol mole fraction solution
# with 2500 methanol molecules in implicit water.
#
#
# Author: David Rosenberger, van der Vegt Group, TU Darmstadt
#
# Refer: Rosenberger, Sanyal, Shell, van der Vegt, J. Chem. Theory Comput. 15, 2881-2895 (2019)
# Initialize simulation box
dimension 3
boundary p p p
units real
atom_style molecular
# Set potential styles
pair_style hybrid/overlay table spline 500 local/density
# Read molecule data and set initial velocities
read_data methanol_implicit_water.data
orthogonal box = (-31.123 -31.123 -31.123) to (31.123 31.123 31.123)
2 by 2 by 2 MPI processor grid
reading atoms ...
2500 atoms
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
special bonds CPU = 0.00063014 secs
read_data CPU = 0.00599909 secs
velocity all create 3.0000e+02 12142 rot yes dist gaussian
# Assign potentials
pair_coeff 1 1 table methanol_implicit_water.pair.table PairMM
WARNING: 93 of 500 force values in table are inconsistent with -dE/dr.
Should only be flagged at inflection points (../pair_table.cpp:483)
WARNING: 254 of 500 distance values in table with relative error
over 1e-06 to re-computed values (../pair_table.cpp:492)
pair_coeff * * local/density methanol_implicit_water.localdensity.table
#Recentering during minimization and equilibration
fix recentering all recenter 0.0 0.0 0.0 units box
#Thermostat & time integration
timestep 1.0
thermo 100
thermo_style custom etotal ke pe temp evdwl
#minimization
minimize 1.e-4 0.0 1000 1000
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (../min.cpp:168)
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 17
ghost atom cutoff = 17
binsize = 8.5, bins = 8 8 8
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair table, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
(2) pair local/density, perpetual, copy from (1)
attributes: half, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 7.411 | 7.411 | 7.412 Mbytes
TotEng KinEng PotEng Temp E_vdwl
1470.3564 2234.7133 -764.35689 300 -764.35689
46.496766 2234.7133 -2188.2165 300 -2188.2165
7.9030246 2234.7133 -2226.8103 300 -2226.8103
Loop time of 0.463996 on 8 procs for 121 steps with 2500 atoms
91.4% CPU use with 8 MPI tasks x no OpenMP threads
Minimization stats:
Stopping criterion = linesearch alpha is zero
Energy initial, next-to-last, final =
-764.356892369 -2227.85589084 -2226.81026984
Force two-norm initial, final = 134.911 3.83896
Force max component initial, final = 14.1117 1.07422
Final line search alpha, max atom move = 5.06747e-10 5.44356e-10
Iterations, force evaluations = 121 154
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.41442 | 0.41976 | 0.42434 | 0.5 | 90.47
Bond | 1.1683e-05 | 2.0713e-05 | 3.5048e-05 | 0.0 | 0.00
Neigh | 0.0084722 | 0.0090862 | 0.010038 | 0.5 | 1.96
Comm | 0.022712 | 0.028157 | 0.034072 | 1.9 | 6.07
Output | 3.1948e-05 | 3.6925e-05 | 6.6996e-05 | 0.0 | 0.01
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.006937 | | | 1.50
Nlocal: 312.5 ave 333 max 299 min
Histogram: 2 2 0 0 1 0 2 0 0 1
Nghost: 2546 ave 2580 max 2517 min
Histogram: 1 1 0 3 0 1 0 0 0 2
Neighs: 33215.4 ave 37251 max 29183 min
Histogram: 1 0 0 1 2 2 0 1 0 1
Total # of neighbors = 265723
Ave neighs/atom = 106.289
Ave special neighs/atom = 0
Neighbor list builds = 6
Dangerous builds = 0
#set up integration parameters
fix timeintegration all nve
fix thermostat all langevin 3.0000e+02 3.0000e+02 1.0000e+02 59915
#Equilibration (for realistic results, run for 2000000 steps)
reset_timestep 0
thermo 200
thermo_style custom etotal ke pe temp evdwl
#run equilibration
run 2000
WARNING: Fix recenter should come after all other integration fixes (../fix_recenter.cpp:131)
Per MPI rank memory allocation (min/avg/max) = 6.286 | 6.286 | 6.287 Mbytes
TotEng KinEng PotEng Temp E_vdwl
177.26822 2234.7133 -2057.4451 300 -2057.4451
736.24287 2151.2608 -1415.0179 288.79688 -1415.0179
963.07617 2090.6433 -1127.5671 280.65926 -1127.5671
1148.9049 2173.1327 -1024.2279 291.73309 -1024.2279
1303.6409 2279.8586 -976.21767 306.06055 -976.21767
1355.42 2281.0383 -925.61826 306.21892 -925.61826
1394.5206 2276.2093 -881.68863 305.57064 -881.68863
1346.9764 2215.2973 -868.32091 297.3935 -868.32091
1381.3654 2248.8061 -867.44063 301.89189 -867.44063
1315.8059 2189.3193 -873.51332 293.90606 -873.51332
1314.4456 2209.7431 -895.29752 296.64787 -895.29752
Loop time of 6.38989 on 8 procs for 2000 steps with 2500 atoms
Performance: 27.043 ns/day, 0.887 hours/ns, 312.994 timesteps/s
80.5% CPU use with 8 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 5.2693 | 5.3572 | 5.457 | 2.1 | 83.84
Bond | 0.00028825 | 0.00033835 | 0.00039148 | 0.0 | 0.01
Neigh | 0.0296 | 0.032337 | 0.035071 | 0.9 | 0.51
Comm | 0.64679 | 0.73397 | 0.80847 | 5.2 | 11.49
Output | 0.00033498 | 0.00051582 | 0.0015228 | 0.0 | 0.01
Modify | 0.16395 | 0.18919 | 0.21056 | 3.9 | 2.96
Other | | 0.07636 | | | 1.19
Nlocal: 312.5 ave 337 max 295 min
Histogram: 2 2 0 1 0 0 0 1 1 1
Nghost: 2551.62 ave 2582 max 2525 min
Histogram: 2 1 0 0 1 1 1 0 1 1
Neighs: 33241.8 ave 37659 max 29705 min
Histogram: 2 0 0 2 2 0 0 0 1 1
Total # of neighbors = 265934
Ave neighs/atom = 106.374
Ave special neighs/atom = 0
Neighbor list builds = 21
Dangerous builds = 0
#turn off recentering during production run
unfix recentering
#setup trajectory output
dump myDump all custom 100 methanol_implicit_water.lammpstrj.gz id type x y z element
dump_modify myDump element M
dump_modify myDump sort id
#run production (for realistic results, run for 10000000 steps)
reset_timestep 0
thermo 1000
thermo_style custom etotal ke pe temp evdwl
run 10000
Per MPI rank memory allocation (min/avg/max) = 7.588 | 7.589 | 7.589 Mbytes
TotEng KinEng PotEng Temp E_vdwl
1442.5428 2209.7431 -767.20027 296.64787 -767.20027
1391.8624 2262.6889 -870.82656 303.7556 -870.82656
1375.914 2244.6176 -868.7036 301.3296 -868.7036
1345.9064 2227.2324 -881.32599 298.99573 -881.32599
1379.2334 2278.1156 -898.88222 305.82657 -898.88222
1389.7928 2255.8062 -866.01341 302.83163 -866.01341
1380.4549 2258.2108 -877.75582 303.15443 -877.75582
1380.8489 2256.9432 -876.09428 302.98426 -876.09428
1326.5151 2225.7408 -899.22577 298.79549 -899.22577
1376.6025 2253.0128 -876.41028 302.45662 -876.41028
1331.0008 2218.1033 -887.10258 297.77019 -887.10258
Loop time of 25.4591 on 8 procs for 10000 steps with 2500 atoms
Performance: 33.937 ns/day, 0.707 hours/ns, 392.787 timesteps/s
89.3% CPU use with 8 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 21.635 | 21.916 | 22.237 | 3.9 | 86.08
Bond | 0.0011308 | 0.0013149 | 0.0016932 | 0.5 | 0.01
Neigh | 0.14593 | 0.15675 | 0.16667 | 1.9 | 0.62
Comm | 1.3789 | 1.7502 | 1.9558 | 13.7 | 6.87
Output | 0.34664 | 0.82927 | 1.2013 | 32.8 | 3.26
Modify | 0.24904 | 0.25842 | 0.26907 | 1.2 | 1.02
Other | | 0.5475 | | | 2.15
Nlocal: 312.5 ave 327 max 298 min
Histogram: 2 0 0 1 1 0 1 1 1 1
Nghost: 2575 ave 2601 max 2559 min
Histogram: 2 0 3 1 0 0 0 0 1 1
Neighs: 33223.2 ave 35920 max 30303 min
Histogram: 1 1 1 1 0 1 0 0 0 3
Total # of neighbors = 265786
Ave neighs/atom = 106.314
Ave special neighs/atom = 0
Neighbor list builds = 103
Dangerous builds = 0
Total wall time: 0:00:32

View File

@ -0,0 +1,259 @@
LAMMPS (27 Oct 2021)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# LAMMPS input file for 50.0% methanol mole fraction solution
# with 2500 methanol molecules in implicit water.
#
#
# Author: David Rosenberger, van der Vegt Group, TU Darmstadt
#
# Refer: Rosenberger, Sanyal, Shell, van der Vegt, J. Chem. Theory Comput. 15, 2881-2895 (2019)
# Initialize simulation box
dimension 3
boundary p p p
units real
atom_style molecular
# Set potential styles
pair_style hybrid/overlay table spline 500 local/density
# Read molecule data and set initial velocities
read_data methanol_implicit_water.data
Reading data file ...
orthogonal box = (-31.123000 -31.123000 -31.123000) to (31.123000 31.123000 31.123000)
1 by 1 by 1 MPI processor grid
reading atoms ...
2500 atoms
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0 0 0
special bond factors coul: 0 0 0
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
special bonds CPU = 0.001 seconds
read_data CPU = 0.016 seconds
velocity all create 3.0000e+02 12142 rot yes dist gaussian
# Assign potentials
pair_coeff 1 1 table methanol_implicit_water.pair.table PairMM
WARNING: 93 of 500 force values in table PairMM are inconsistent with -dE/dr.
WARNING: Should only be flagged at inflection points (src/pair_table.cpp:465)
WARNING: 254 of 500 distance values in table 1e-06 with relative error
WARNING: over PairMM to re-computed values (src/pair_table.cpp:473)
pair_coeff * * local/density methanol_implicit_water.localdensity.table
#Recentering during minimization and equilibration
fix recentering all recenter 0.0 0.0 0.0 units box
#Thermostat & time integration
timestep 1.0
thermo 100
thermo_style custom etotal ke pe temp evdwl
#minimization
minimize 1.e-4 0.0 1000 1000
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
Your simulation uses code contributions which should be cited:
- pair_style local/density command:
@Article{Sanyal16,
author = {T.Sanyal and M.Scott Shell},
title = {Coarse-grained models using local-density potentials optimized with the relative entropy: Application to implicit solvation},
journal = {J.~Chem.~Phys.},
year = 2016,
DOI = doi.org/10.1063/1.4958629}
@Article{Sanyal18,
author = {T.Sanyal and M.Scott Shell},
title = {Transferable coarse-grained models of liquid-liquid equilibrium using local density potentials optimized with the relative entropy},
journal = {J.~Phys.~Chem. B},
year = 2018,
DOI = doi.org/10.1021/acs.jpcb.7b12446}
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (src/min.cpp:187)
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 17
ghost atom cutoff = 17
binsize = 8.5, bins = 8 8 8
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair table, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d
bin: standard
(2) pair local/density, perpetual, copy from (1)
attributes: half, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 9.535 | 9.535 | 9.535 Mbytes
TotEng KinEng PotEng Temp E_vdwl
1283.8556 2234.7133 -950.85771 300 -950.85771
-10.187232 2234.7133 -2244.9005 300 -2244.9005
-124.79406 2234.7133 -2359.5074 300 -2359.5074
-126.7619 2234.7133 -2361.4752 300 -2361.4752
Loop time of 3.74581 on 1 procs for 205 steps with 2500 atoms
99.5% CPU use with 1 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
-950.857712502514 -2361.24417962983 -2361.47519428972
Force two-norm initial, final = 135.25170 2.8038329
Force max component initial, final = 14.083102 1.1154133
Final line search alpha, max atom move = 0.16981022 0.18940857
Iterations, force evaluations = 205 223
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.5678 | 3.5678 | 3.5678 | 0.0 | 95.25
Bond | 7.5831e-05 | 7.5831e-05 | 7.5831e-05 | 0.0 | 0.00
Neigh | 0.12962 | 0.12962 | 0.12962 | 0.0 | 3.46
Comm | 0.019204 | 0.019204 | 0.019204 | 0.0 | 0.51
Output | 0.00023948 | 0.00023948 | 0.00023948 | 0.0 | 0.01
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.02886 | | | 0.77
Nlocal: 2500.00 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 6729.00 ave 6729 max 6729 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 265637.0 ave 265637 max 265637 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 265637
Ave neighs/atom = 106.25480
Ave special neighs/atom = 0.0000000
Neighbor list builds = 11
Dangerous builds = 0
#set up integration parameters
fix timeintegration all nve
fix thermostat all langevin 3.0000e+02 3.0000e+02 1.0000e+02 59915
#Equilibration (for realistic results, run for 2000000 steps)
reset_timestep 0
thermo 200
thermo_style custom etotal ke pe temp evdwl
#run equilibration
run 2000
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
WARNING: Fix recenter should come after all other integration fixes (src/fix_recenter.cpp:133)
Per MPI rank memory allocation (min/avg/max) = 8.410 | 8.410 | 8.410 Mbytes
TotEng KinEng PotEng Temp E_vdwl
-126.7619 2234.7133 -2361.4752 300 -2361.4752
517.05047 2015.8636 -1498.8131 270.62043 -1498.8131
931.78263 2135.4332 -1203.6506 286.6721 -1203.6506
1162.6209 2242.1662 -1079.5453 301.00051 -1079.5453
1164.2129 2211.6204 -1047.4075 296.89989 -1047.4075
1258.0085 2286.5942 -1028.5857 306.96477 -1028.5857
1231.1937 2200.814 -969.62032 295.44917 -969.62032
1251.2144 2245.0533 -993.83885 301.3881 -993.83885
1237.2495 2239.8802 -1002.6307 300.69363 -1002.6307
1232.3342 2224.3415 -992.00722 298.60763 -992.00722
1235.3228 2197.191 -961.86817 294.9628 -961.86817
Loop time of 23.6478 on 1 procs for 2000 steps with 2500 atoms
Performance: 7.307 ns/day, 3.284 hours/ns, 84.575 timesteps/s
99.5% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 22.797 | 22.797 | 22.797 | 0.0 | 96.40
Bond | 0.00070412 | 0.00070412 | 0.00070412 | 0.0 | 0.00
Neigh | 0.2249 | 0.2249 | 0.2249 | 0.0 | 0.95
Comm | 0.12259 | 0.12259 | 0.12259 | 0.0 | 0.52
Output | 0.00088925 | 0.00088925 | 0.00088925 | 0.0 | 0.00
Modify | 0.46447 | 0.46447 | 0.46447 | 0.0 | 1.96
Other | | 0.03711 | | | 0.16
Nlocal: 2500.00 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 6752.00 ave 6752 max 6752 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 265940.0 ave 265940 max 265940 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 265940
Ave neighs/atom = 106.37600
Ave special neighs/atom = 0.0000000
Neighbor list builds = 20
Dangerous builds = 0
#turn off recentering during production run
unfix recentering
#setup trajectory output
dump myDump all custom 100 methanol_implicit_water.lammpstrj.gz id type x y z element
dump_modify myDump element M
dump_modify myDump sort id
#run production (for realistic results, run for 10000000 steps)
reset_timestep 0
thermo 1000
thermo_style custom etotal ke pe temp evdwl
run 10000
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 9.918 | 9.918 | 9.918 Mbytes
TotEng KinEng PotEng Temp E_vdwl
1235.3228 2197.191 -961.86817 294.9628 -961.86817
1289.8463 2236.1425 -946.29622 300.19186 -946.29622
1348.0825 2305.0295 -956.94703 309.43963 -956.94703
1279.5478 2241.1582 -961.61041 300.86521 -961.61041
1231.8597 2201.9591 -970.09949 295.60291 -970.09949
1277.3424 2221.3696 -944.02725 298.20867 -944.02725
1296.0116 2222.0998 -926.08818 298.3067 -926.08818
1266.2849 2206.3727 -940.08782 296.1954 -940.08782
1313.2808 2260.5077 -947.22683 303.46278 -947.22683
1309.3076 2234.3895 -925.08198 299.95654 -925.08198
1275.9792 2221.3037 -945.32449 298.19982 -945.32449
Loop time of 67.3224 on 1 procs for 10000 steps with 2500 atoms
Performance: 12.834 ns/day, 1.870 hours/ns, 148.539 timesteps/s
99.4% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 64.476 | 64.476 | 64.476 | 0.0 | 95.77
Bond | 0.0014504 | 0.0014504 | 0.0014504 | 0.0 | 0.00
Neigh | 0.71333 | 0.71333 | 0.71333 | 0.0 | 1.06
Comm | 0.32846 | 0.32846 | 0.32846 | 0.0 | 0.49
Output | 0.46997 | 0.46997 | 0.46997 | 0.0 | 0.70
Modify | 1.2336 | 1.2336 | 1.2336 | 0.0 | 1.83
Other | | 0.09996 | | | 0.15
Nlocal: 2500.00 ave 2500 max 2500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 6662.00 ave 6662 max 6662 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 265774.0 ave 265774 max 265774 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 265774
Ave neighs/atom = 106.30960
Ave special neighs/atom = 0.0000000
Neighbor list builds = 104
Dangerous builds = 0
Total wall time: 0:01:34

View File

@ -0,0 +1,259 @@
LAMMPS (27 Oct 2021)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:98)
using 1 OpenMP thread(s) per MPI task
# LAMMPS input file for 50.0% methanol mole fraction solution
# with 2500 methanol molecules in implicit water.
#
#
# Author: David Rosenberger, van der Vegt Group, TU Darmstadt
#
# Refer: Rosenberger, Sanyal, Shell, van der Vegt, J. Chem. Theory Comput. 15, 2881-2895 (2019)
# Initialize simulation box
dimension 3
boundary p p p
units real
atom_style molecular
# Set potential styles
pair_style hybrid/overlay table spline 500 local/density
# Read molecule data and set initial velocities
read_data methanol_implicit_water.data
Reading data file ...
orthogonal box = (-31.123000 -31.123000 -31.123000) to (31.123000 31.123000 31.123000)
1 by 2 by 2 MPI processor grid
reading atoms ...
2500 atoms
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0 0 0
special bond factors coul: 0 0 0
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
special bonds CPU = 0.000 seconds
read_data CPU = 0.005 seconds
velocity all create 3.0000e+02 12142 rot yes dist gaussian
# Assign potentials
pair_coeff 1 1 table methanol_implicit_water.pair.table PairMM
WARNING: 93 of 500 force values in table PairMM are inconsistent with -dE/dr.
WARNING: Should only be flagged at inflection points (src/pair_table.cpp:465)
WARNING: 254 of 500 distance values in table 1e-06 with relative error
WARNING: over PairMM to re-computed values (src/pair_table.cpp:473)
pair_coeff * * local/density methanol_implicit_water.localdensity.table
#Recentering during minimization and equilibration
fix recentering all recenter 0.0 0.0 0.0 units box
#Thermostat & time integration
timestep 1.0
thermo 100
thermo_style custom etotal ke pe temp evdwl
#minimization
minimize 1.e-4 0.0 1000 1000
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
Your simulation uses code contributions which should be cited:
- pair_style local/density command:
@Article{Sanyal16,
author = {T.Sanyal and M.Scott Shell},
title = {Coarse-grained models using local-density potentials optimized with the relative entropy: Application to implicit solvation},
journal = {J.~Chem.~Phys.},
year = 2016,
DOI = doi.org/10.1063/1.4958629}
@Article{Sanyal18,
author = {T.Sanyal and M.Scott Shell},
title = {Transferable coarse-grained models of liquid-liquid equilibrium using local density potentials optimized with the relative entropy},
journal = {J.~Phys.~Chem. B},
year = 2018,
DOI = doi.org/10.1021/acs.jpcb.7b12446}
CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE-CITE
WARNING: Using 'neigh_modify every 1 delay 0 check yes' setting during minimization (src/min.cpp:187)
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 17
ghost atom cutoff = 17
binsize = 8.5, bins = 8 8 8
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair table, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d
bin: standard
(2) pair local/density, perpetual, copy from (1)
attributes: half, newton on
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 7.855 | 7.855 | 7.855 Mbytes
TotEng KinEng PotEng Temp E_vdwl
1283.8556 2234.7133 -950.85771 300 -950.85771
-10.187232 2234.7133 -2244.9005 300 -2244.9005
-124.3661 2234.7133 -2359.0794 300 -2359.0794
-146.7158 2234.7133 -2381.4291 300 -2381.4291
Loop time of 0.528503 on 4 procs for 244 steps with 2500 atoms
99.7% CPU use with 4 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = energy tolerance
Energy initial, next-to-last, final =
-950.857712502527 -2381.2294195605 -2381.42909821383
Force two-norm initial, final = 135.25170 2.3117934
Force max component initial, final = 14.083102 0.60833889
Final line search alpha, max atom move = 0.18347073 0.11161238
Iterations, force evaluations = 244 278
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.48518 | 0.48843 | 0.49223 | 0.4 | 92.42
Bond | 1.0084e-05 | 1.0861e-05 | 1.1483e-05 | 0.0 | 0.00
Neigh | 0.018199 | 0.019153 | 0.020036 | 0.5 | 3.62
Comm | 0.010229 | 0.014832 | 0.018994 | 2.6 | 2.81
Output | 3.7985e-05 | 4.2069e-05 | 5.3874e-05 | 0.0 | 0.01
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.006032 | | | 1.14
Nlocal: 625.000 ave 638 max 618 min
Histogram: 2 0 0 0 1 0 0 0 0 1
Nghost: 3613.75 ave 3640 max 3580 min
Histogram: 1 0 0 0 1 0 0 0 1 1
Neighs: 66411.2 ave 70713 max 62416 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Total # of neighbors = 265645
Ave neighs/atom = 106.25800
Ave special neighs/atom = 0.0000000
Neighbor list builds = 13
Dangerous builds = 0
#set up integration parameters
fix timeintegration all nve
fix thermostat all langevin 3.0000e+02 3.0000e+02 1.0000e+02 59915
#Equilibration (for realistic results, run for 2000000 steps)
reset_timestep 0
thermo 200
thermo_style custom etotal ke pe temp evdwl
#run equilibration
run 2000
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
WARNING: Fix recenter should come after all other integration fixes (src/fix_recenter.cpp:133)
Per MPI rank memory allocation (min/avg/max) = 6.730 | 6.730 | 6.731 Mbytes
TotEng KinEng PotEng Temp E_vdwl
-146.7158 2234.7133 -2381.4291 300 -2381.4291
540.68168 2041.44 -1500.7584 274.05395 -1500.7584
945.4949 2163.7509 -1218.256 290.47363 -1218.256
1118.7729 2195.7579 -1076.985 294.77042 -1076.985
1215.0058 2233.2445 -1018.2387 299.80282 -1018.2387
1251.8045 2240.8439 -989.03944 300.823 -989.03944
1206.649 2149.5807 -942.93169 288.57134 -942.93169
1290.6111 2248.3623 -957.75117 301.83231 -957.75117
1312.8944 2219.147 -906.25264 297.9103 -906.25264
1260.002 2211.4176 -951.41561 296.87266 -951.41561
1335.0956 2270.1367 -935.04108 304.75543 -935.04108
Loop time of 3.56721 on 4 procs for 2000 steps with 2500 atoms
Performance: 48.441 ns/day, 0.495 hours/ns, 560.663 timesteps/s
99.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.3122 | 3.3399 | 3.3633 | 1.0 | 93.63
Bond | 7.5941e-05 | 8.062e-05 | 8.7627e-05 | 0.0 | 0.00
Neigh | 0.03524 | 0.036666 | 0.037864 | 0.6 | 1.03
Comm | 0.080116 | 0.10444 | 0.13373 | 6.1 | 2.93
Output | 0.00019977 | 0.00022502 | 0.00029007 | 0.0 | 0.01
Modify | 0.077781 | 0.078206 | 0.078752 | 0.1 | 2.19
Other | | 0.007641 | | | 0.21
Nlocal: 625.000 ave 637 max 616 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Nghost: 3597.25 ave 3610 max 3586 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 66468.2 ave 69230 max 62721 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 265873
Ave neighs/atom = 106.34920
Ave special neighs/atom = 0.0000000
Neighbor list builds = 20
Dangerous builds = 0
#turn off recentering during production run
unfix recentering
#setup trajectory output
dump myDump all custom 100 methanol_implicit_water.lammpstrj.gz id type x y z element
dump_modify myDump element M
dump_modify myDump sort id
#run production (for realistic results, run for 10000000 steps)
reset_timestep 0
thermo 1000
thermo_style custom etotal ke pe temp evdwl
run 10000
generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 8.071 | 8.071 | 8.071 Mbytes
TotEng KinEng PotEng Temp E_vdwl
1335.0956 2270.1367 -935.04108 304.75543 -935.04108
1266.2305 2227.2123 -960.98186 298.99303 -960.98186
1304.2289 2238.1343 -933.90544 300.45925 -933.90544
1311.3201 2232.0862 -920.7661 299.64733 -920.7661
1289.9028 2241.3533 -951.45049 300.89139 -951.45049
1314.2234 2244.8514 -930.62797 301.361 -930.62797
1282.2744 2240.6716 -958.39719 300.79987 -958.39719
1239.302 2181.5711 -942.2691 292.86591 -942.2691
1327.0954 2242.6441 -915.54875 301.06468 -915.54875
1334.9799 2239.6841 -904.70423 300.66731 -904.70423
1320.6105 2263.4912 -942.88066 303.8633 -942.88066
Loop time of 23.3399 on 4 procs for 10000 steps with 2500 atoms
Performance: 37.018 ns/day, 0.648 hours/ns, 428.451 timesteps/s
99.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 21.343 | 21.606 | 21.766 | 3.7 | 92.57
Bond | 0.00045963 | 0.0004817 | 0.0005083 | 0.0 | 0.00
Neigh | 0.20708 | 0.22081 | 0.22733 | 1.7 | 0.95
Comm | 0.63014 | 0.80326 | 1.0801 | 19.8 | 3.44
Output | 0.11791 | 0.14443 | 0.22211 | 11.8 | 0.62
Modify | 0.37291 | 0.389 | 0.41719 | 2.7 | 1.67
Other | | 0.1761 | | | 0.75
Nlocal: 625.000 ave 636 max 613 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 3597.00 ave 3613 max 3580 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Neighs: 66408.5 ave 69186 max 61728 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Total # of neighbors = 265634
Ave neighs/atom = 106.25360
Ave special neighs/atom = 0.0000000
Neighbor list builds = 102
Dangerous builds = 0
Total wall time: 0:00:27

View File

@ -1,5 +1,5 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# Generic Linux Makefile for CUDA with the Multi-Process Service (MPS)
# - change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */

View File

@ -39,11 +39,9 @@ HIP_PLATFORM=$(shell $(HIP_PATH)/bin/hipconfig --platform)
HIP_COMPILER=$(shell $(HIP_PATH)/bin/hipconfig --compiler)
ifeq (hcc,$(HIP_PLATFORM))
HIP_OPTS += -ffast-math
# possible values: gfx803,gfx900,gfx906
HIP_ARCH = gfx906
else ifeq (amd,$(HIP_PLATFORM))
HIP_OPTS += -ffast-math
# possible values: gfx803,gfx900,gfx906
HIP_ARCH = gfx906
else ifeq (nvcc,$(HIP_PLATFORM))

View File

@ -1,5 +1,5 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# Generic Linux Makefile for CUDA
# - Change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
@ -13,7 +13,7 @@ endif
NVCC = nvcc
# obsolete hardware. not supported by current drivers anymore.
# obsolete hardware. not supported by current drivers and toolkits anymore.
#CUDA_ARCH = -arch=sm_13
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
@ -28,11 +28,11 @@ NVCC = nvcc
#CUDA_ARCH = -arch=sm_37
# Maxwell hardware
CUDA_ARCH = -arch=sm_50
#CUDA_ARCH = -arch=sm_50
#CUDA_ARCH = -arch=sm_52
# Pascal hardware
#CUDA_ARCH = -arch=sm_60
CUDA_ARCH = -arch=sm_60
#CUDA_ARCH = -arch=sm_61
# Volta hardware
@ -70,7 +70,7 @@ LIB_DIR = ./
AR = ar
BSH = /bin/sh
# GPU binning not recommended with modern GPUs
# GPU binning not recommended for most modern GPUs
CUDPP_OPT = #-DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -1,6 +1,6 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# - Change CUDA_ARCH for your GPU
# Generic Linux Makefile for CUDA complied for multiple compute capabilities
# - Add your GPU to CUDA_CODE
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps

1
lib/gpu/Makefile.mpi Symbolic link
View File

@ -0,0 +1 @@
Makefile.linux

View File

@ -1,5 +1,5 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# Generic Linux Makefile for CUDA without MPI libraries
# - Change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
@ -28,11 +28,11 @@ NVCC = nvcc
#CUDA_ARCH = -arch=sm_37
# Maxwell hardware
CUDA_ARCH = -arch=sm_50
#CUDA_ARCH = -arch=sm_50
#CUDA_ARCH = -arch=sm_52
# Pascal hardware
#CUDA_ARCH = -arch=sm_60
CUDA_ARCH = -arch=sm_60
#CUDA_ARCH = -arch=sm_61
# Volta hardware
@ -41,6 +41,10 @@ CUDA_ARCH = -arch=sm_50
# Turing hardware
#CUDA_ARCH = -arch=sm_75
# Ampere hardware
#CUDA_ARCH = -arch=sm_80
#CUDA_ARCH = -arch=sm_86
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL

View File

@ -1,23 +0,0 @@
NVCC = $(CUDA_HOME)/bin/nvcc
EXTRAMAKE = Makefile.lammps.standard
CUDA_ARCH = -arch=sm_75
CUDA_PRECISION = -D_SINGLE_DOUBLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64 -Xlinker -rpath -Xlinker $(CUDA_HOME)/lib64 -lcudart
CUDA_OPTS = -DUNIX -O3 --use_fast_math --ftz=true
CUDR_CPP = mpic++ -DMPI_GERYON -DUCL_NO_EXIT -I$(CUDA_HOME)/include
CUDR_OPTS = -O3 -ffast-math -funroll-loops -DMPI_GERYON -DLAMMPS_SMALLBIG
BIN_DIR = .
OBJ_DIR = obj
LIB_DIR = .
AR = ar
BSH = /bin/sh
# GPU binning not recommended with most modern GPUs
CUDPP_OPT = #-DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -18,7 +18,7 @@
#endif
__kernel void kernel_cast_x(__global numtyp4 *restrict x_type,
const __global double *restrict x,
const __global numtyp *restrict x,
const __global int *restrict type,
const int nall) {
int ii=GLOBAL_ID_X;

View File

@ -475,7 +475,7 @@ class Atom {
UCL_Vector<numtyp,numtyp> v;
#ifdef GPU_CAST
UCL_Vector<double,double> x_cast;
UCL_Vector<numtyp,numtyp> x_cast;
UCL_Vector<int,int> type_cast;
#endif

View File

@ -1039,10 +1039,18 @@ Device<PRECISION,ACC_PRECISION> global_device;
using namespace LAMMPS_AL;
bool lmp_has_gpu_device()
// check if a suitable GPU is present.
// for mixed and double precision GPU library compilation
// also the GPU needs to support double precision.
bool lmp_has_compatible_gpu_device()
{
UCL_Device gpu;
return (gpu.num_platforms() > 0);
bool compatible_gpu = gpu.num_platforms() > 0;
#if defined(_SINGLE_DOUBLE) || defined(_DOUBLE_DOUBLE)
if (compatible_gpu && !gpu.double_precision(0))
compatible_gpu = false;
#endif
return compatible_gpu;
}
std::string lmp_gpu_device_info()

View File

@ -82,9 +82,9 @@ __kernel void k_zbl(const __global numtyp4 *restrict x_,
const __global numtyp4 *restrict coeff1,
const __global numtyp4 *restrict coeff2,
const __global numtyp4 *restrict coeff3,
const double cut_globalsq,
const double cut_innersq,
const double cut_inner,
const numtyp cut_globalsq,
const numtyp cut_innersq,
const numtyp cut_inner,
const int lj_types,
const __global int *dev_nbor,
const __global int *dev_packed,
@ -174,9 +174,9 @@ __kernel void k_zbl_fast(const __global numtyp4 *restrict x_,
const __global numtyp4 *restrict coeff1_in,
const __global numtyp4 *restrict coeff2_in,
const __global numtyp4 *restrict coeff3_in,
const double cut_globalsq,
const double cut_innersq,
const double cut_inner,
const numtyp cut_globalsq,
const numtyp cut_innersq,
const numtyp cut_inner,
const __global int *dev_nbor,
const __global int *dev_packed,
__global acctyp4 *restrict ans,

View File

@ -67,9 +67,9 @@ class ZBL : public BaseAtomic<numtyp, acctyp> {
/// If atom type constants fit in shared memory, use fast kernels
bool shared_types;
double _cut_globalsq;
double _cut_innersq;
double _cut_inner;
numtyp _cut_globalsq;
numtyp _cut_innersq;
numtyp _cut_inner;
/// Number of atom types
int _lj_types;

View File

@ -17,11 +17,12 @@ parser = ArgumentParser(prog='Install.py',
# settings
version = '3.3.9'
version = '3.4.0'
tarball = "eigen.tar.gz"
# known checksums for different Eigen versions. used to validate the download.
checksums = { \
'3.4.0' : '4c527a9171d71a72a9d4186e65bea559', \
'3.3.9' : '609286804b0f79be622ccf7f9ff2b660', \
'3.3.7' : '9e30f67e8531477de4117506fe44669b' \
}
@ -35,7 +36,7 @@ Syntax from src dir: make lib-smd args="-b"
Syntax from lib dir: python Install.py -b
or: python Install.py -p /usr/include/eigen3"
or: python Install.py -v 3.3.7 -b
or: python Install.py -v 3.4.0 -b
Example:
@ -77,7 +78,7 @@ if pathflag:
if buildflag:
print("Downloading Eigen ...")
eigentar = os.path.join(homepath, tarball)
url = "https://gitlab.com/libeigen/eigen/-/archive/%s/eigen-%s.tar.gz" % (version,version)
url = "https://download.lammps.org/thirdparty/eigen-%s.tar.gz" % version
geturl(url, eigentar)
# verify downloaded archive integrity via md5 checksum, if known.

View File

@ -26,6 +26,8 @@
#error "Unsupported QE coupling API. Want API version 1."
#endif
// we need to pass an MPI communicator to the LAMMPS library interface
#define LAMMPS_LIB_MPI
#include "library.h"
static const char delim[] = " \t\n\r";
@ -67,8 +69,8 @@ int main(int argc, char **argv)
#if 1 // AK: temporary hack
if ( qmmmcfg.nmm != 2 ) {
if (me == 0) {
fprintf( stderr, "\n Error in the number of processors for MM code"
"\n for the time being only two processor are allowed\n");
fprintf( stderr, "\n Error in the number of processors for the MM code.\n"
" Currently only requesting 2 MM processors is allowed.\n");
}
MPI_Finalize();
return -1;

View File

@ -1,12 +1,13 @@
# DATE: 2018-11-28 UNITS: metal CONTRIBUTOR: Zbigniew Koziol softquake@gmail.com CITATION: Z. Koziol et al.: https://arxiv.org/abs/1803.05162
# DATE: 2021-11-04 UNITS: metal CONTRIBUTOR: Zbigniew Koziol softquake@gmail.com CITATION: Z. Koziol et al.: https://arxiv.org/abs/1803.05162
#
# Lebedeva Potential. https://doi.org/10.1016/j.physe.2011.07.018
# Lebedeva potential: https://doi.org/10.1039/C0CP02614J and https://doi.org/10.1016/j.physe.2011.07.018
# Parameters must be in this order as here, otherwise their values may be changed.
# Energies here are given in meV.
# The last one, S, is convenient for scaling the potential amplitude. S is a multiplication factor for A, B, C
# A B C z0 alpha D1 D2 lambda1 lambda2 S
# These are values according to Levedeva et al
#C C 10.510 11.6523.34 35.883 3.34 4.16 -0.86232 0.10049 0.48703 0.46445 1.0
# These are values according to Lebedeva et al.: https://doi.org/10.1016/j.cplett.2012.03.082
C C 10.510 11.652 29.5 3.34 4.16 -0.86232 0.10049 0.48703 0.46445 1.0
#
# These are values by Z. Koziol et al.: https://arxiv.org/abs/1803.05162
C C 14.558 21.204 1.8 3.198 4.16 -0.862 0.10049 0.6 0.4 1.0
C1 C1 14.558 21.204 1.8 3.198 4.16 -0.862 0.10049 0.6 0.4 1.0

View File

@ -43,7 +43,6 @@ class ComputeTempRotate : public Compute {
double memory_usage();
private:
int fix_dof;
double tfactor, masstotal;
double **vbiasall; // stored velocity bias for all atoms
int maxbias; // size of vbiasall array

View File

@ -176,20 +176,20 @@ void AngleGaussian::compute(int eflag, int vflag)
void AngleGaussian::allocate()
{
allocated = 1;
int n = atom->nangletypes;
int n = atom->nangletypes+1;
memory->create(nterms,n+1,"angle:nterms");
memory->create(angle_temperature,n+1,"angle:angle_temperature");
memory->create(nterms,n,"angle:nterms");
memory->create(angle_temperature,n,"angle:angle_temperature");
alpha = new double *[n+1];
width = new double *[n+1];
theta0 = new double *[n+1];
memset(alpha,0,sizeof(double)*(n+1));
memset(width,0,sizeof(double)*(n+1));
memset(theta0,0,sizeof(double)*(n+1));
alpha = new double*[n];
width = new double*[n];
theta0 = new double*[n];
memset(alpha,0,sizeof(double *)*n);
memset(width,0,sizeof(double *)*n);
memset(theta0,0,sizeof(double *)*n);
memory->create(setflag,n+1,"angle:setflag");
for (int i = 1; i <= n; i++) setflag[i] = 0;
memory->create(setflag,n,"angle:setflag");
memset(setflag,0,sizeof(int)*n);
}
/* ----------------------------------------------------------------------

View File

@ -14,8 +14,6 @@
#include "bond_gaussian.h"
#include <cmath>
#include <cstring>
#include "atom.h"
#include "neighbor.h"
#include "comm.h"
@ -24,6 +22,8 @@
#include "memory.h"
#include "error.h"
#include <cmath>
#include <cstring>
using namespace LAMMPS_NS;
using namespace MathConst;
@ -131,20 +131,20 @@ void BondGaussian::compute(int eflag, int vflag)
void BondGaussian::allocate()
{
allocated = 1;
int n = atom->nbondtypes;
int n = atom->nbondtypes+1;
memory->create(nterms,n+1,"bond:nterms");
memory->create(bond_temperature,n+1,"bond:bond_temperature");
memory->create(nterms,n,"bond:nterms");
memory->create(bond_temperature,n,"bond:bond_temperature");
alpha = new double *[n+1];
width = new double *[n+1];
r0 = new double *[n+1];
memset(alpha,0,sizeof(double)*(n+1));
memset(width,0,sizeof(double)*(n+1));
memset(r0,0,sizeof(double)*(n+1));
alpha = new double *[n];
width = new double *[n];
r0 = new double *[n];
memset(alpha,0,sizeof(double *)*n);
memset(width,0,sizeof(double *)*n);
memset(r0,0,sizeof(double *)*n);
memory->create(setflag,n+1,"bond:setflag");
for (int i = 1; i <= n; i++) setflag[i] = 0;
memory->create(setflag,n,"bond:setflag");
memset(setflag,0,sizeof(int)*n);
}
/* ----------------------------------------------------------------------

View File

@ -37,7 +37,7 @@ using namespace FixConst;
FixNVEGPU::FixNVEGPU(LAMMPS *lmp, int narg, char **arg) :
FixNVE(lmp, narg, arg)
{
_dtfm = 0;
_dtfm = nullptr;
_nlocal_max = 0;
}
@ -57,7 +57,11 @@ void FixNVEGPU::setup(int vflag)
_respa_on = 1;
else
_respa_on = 0;
if (atom->ntypes > 1) reset_dt();
// ensure that _dtfm array is initialized if the group is not "all"
// or there is more than one atom type as that re-ordeted array is used for
// per-type/per-atom masses and group membership detection.
if ((igroup != 0) || (atom->ntypes > 1)) reset_dt();
}
/* ----------------------------------------------------------------------

View File

@ -31,7 +31,6 @@
#include "neigh_request.h"
#include "neighbor.h"
#include "potential_file_reader.h"
#include "tokenizer.h"
#include <cmath>
#include <cstring>
@ -59,6 +58,7 @@ PairDRIP::PairDRIP(LAMMPS *lmp) : Pair(lmp)
{
single_enable = 0;
restartinfo = 0;
one_coeff = 1;
manybody_flag = 1;
centroidstressflag = CENTROID_NOTAVAIL;
unit_convert_flag = utils::get_supported_conversions(utils::ENERGY);
@ -241,17 +241,17 @@ void PairDRIP::read_file(char *filename)
nparams++;
}
MPI_Bcast(&nparams, 1, MPI_INT, 0, world);
MPI_Bcast(&maxparam, 1, MPI_INT, 0, world);
if (comm->me != 0) {
params = (Param *) memory->srealloc(params, maxparam * sizeof(Param), "pair:params");
}
MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world);
}
MPI_Bcast(&nparams, 1, MPI_INT, 0, world);
MPI_Bcast(&maxparam, 1, MPI_INT, 0, world);
if (comm->me != 0) {
params = (Param *) memory->srealloc(params, maxparam * sizeof(Param), "pair:params");
}
MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world);
memory->destroy(elem2param);
memory->create(elem2param, nelements, nelements, "pair:elem2param");
for (int i = 0; i < nelements; i++) {
@ -259,7 +259,7 @@ void PairDRIP::read_file(char *filename)
int n = -1;
for (int m = 0; m < nparams; m++) {
if (i == params[m].ielement && j == params[m].jelement) {
if (n >= 0) error->all(FLERR, "Potential file has duplicate entry");
if (n >= 0) error->all(FLERR, "DRIP potential file has duplicate entry");
n = m;
}
}

View File

@ -142,8 +142,8 @@ void PairILPGrapheneHBN::allocate()
void PairILPGrapheneHBN::settings(int narg, char **arg)
{
if (narg < 1 || narg > 2) error->all(FLERR, "Illegal pair_style command");
if (strcmp(force->pair_style, "hybrid/overlay") != 0)
error->all(FLERR, "ERROR: requires hybrid/overlay pair_style");
if (!utils::strmatch(force->pair_style, "^hybrid/overlay"))
error->all(FLERR, "Pair style ilp/graphene/hbn must be used as sub-style with hybrid/overlay");
cut_global = utils::numeric(FLERR, arg[0], false, lmp);
if (narg == 2) tap_flag = utils::numeric(FLERR, arg[1], false, lmp);
@ -273,16 +273,17 @@ void PairILPGrapheneHBN::read_file(char *filename)
nparams++;
}
MPI_Bcast(&nparams, 1, MPI_INT, 0, world);
MPI_Bcast(&maxparam, 1, MPI_INT, 0, world);
if (comm->me != 0) {
params = (Param *) memory->srealloc(params, maxparam * sizeof(Param), "pair:params");
}
MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world);
}
MPI_Bcast(&nparams, 1, MPI_INT, 0, world);
MPI_Bcast(&maxparam, 1, MPI_INT, 0, world);
if (comm->me != 0) {
params = (Param *) memory->srealloc(params, maxparam * sizeof(Param), "pair:params");
}
MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world);
memory->destroy(elem2param);
memory->destroy(cutILPsq);
memory->create(elem2param, nelements, nelements, "pair:elem2param");
@ -292,7 +293,7 @@ void PairILPGrapheneHBN::read_file(char *filename)
int n = -1;
for (int m = 0; m < nparams; m++) {
if (i == params[m].ielement && j == params[m].jelement) {
if (n >= 0) error->all(FLERR, "ILP Potential file has duplicate entry");
if (n >= 0) error->all(FLERR, "ILP potential file has duplicate entry");
n = m;
}
}

View File

@ -34,7 +34,6 @@
#include "neigh_request.h"
#include "neighbor.h"
#include "potential_file_reader.h"
#include "tokenizer.h"
#include <cmath>
#include <cstring>
@ -142,8 +141,8 @@ void PairKolmogorovCrespiFull::allocate()
void PairKolmogorovCrespiFull::settings(int narg, char **arg)
{
if (narg < 1 || narg > 2) error->all(FLERR, "Illegal pair_style command");
if (strcmp(force->pair_style, "hybrid/overlay") != 0)
error->all(FLERR, "ERROR: requires hybrid/overlay pair_style");
if (!utils::strmatch(force->pair_style, "^hybrid/overlay"))
error->all(FLERR, "Pair style kolmogorov/crespi/full must be used as sub-style with hybrid/overlay");
cut_global = utils::numeric(FLERR, arg[0], false, lmp);
if (narg == 2) tap_flag = utils::numeric(FLERR, arg[1], false, lmp);
@ -270,17 +269,17 @@ void PairKolmogorovCrespiFull::read_file(char *filename)
nparams++;
}
MPI_Bcast(&nparams, 1, MPI_INT, 0, world);
MPI_Bcast(&maxparam, 1, MPI_INT, 0, world);
if (comm->me != 0) {
params = (Param *) memory->srealloc(params, maxparam * sizeof(Param), "pair:params");
}
MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world);
}
MPI_Bcast(&nparams, 1, MPI_INT, 0, world);
MPI_Bcast(&maxparam, 1, MPI_INT, 0, world);
if (comm->me != 0) {
params = (Param *) memory->srealloc(params, maxparam * sizeof(Param), "pair:params");
}
MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world);
memory->destroy(elem2param);
memory->destroy(cutKCsq);
memory->create(elem2param, nelements, nelements, "pair:elem2param");
@ -290,7 +289,7 @@ void PairKolmogorovCrespiFull::read_file(char *filename)
int n = -1;
for (int m = 0; m < nparams; m++) {
if (i == params[m].ielement && j == params[m].jelement) {
if (n >= 0) error->all(FLERR, "KC Potential file has duplicate entry");
if (n >= 0) error->all(FLERR, "KC potential file has duplicate entry");
n = m;
}
}

View File

@ -68,14 +68,14 @@ void BondClass2Kokkos<DeviceType>::compute(int eflag_in, int vflag_in)
//if(k_eatom.extent(0)<maxeatom) { // won't work without adding zero functor
memoryKK->destroy_kokkos(k_eatom,eatom);
memoryKK->create_kokkos(k_eatom,eatom,maxeatom,"improper:eatom");
d_eatom = k_eatom.template view<DeviceType>();
d_eatom = k_eatom.template view<KKDeviceType>();
//}
}
if (vflag_atom) {
//if(k_vatom.extent(0)<maxvatom) { // won't work without adding zero functor
memoryKK->destroy_kokkos(k_vatom,vatom);
memoryKK->create_kokkos(k_vatom,vatom,maxvatom,"improper:vatom");
d_vatom = k_vatom.template view<DeviceType>();
d_vatom = k_vatom.template view<KKDeviceType>();
//}
}
@ -210,10 +210,10 @@ void BondClass2Kokkos<DeviceType>::coeff(int narg, char **arg)
BondClass2::coeff(narg, arg);
int n = atom->nbondtypes;
Kokkos::DualView<F_FLOAT*,DeviceType> k_k2("BondClass2::k2",n+1);
Kokkos::DualView<F_FLOAT*,DeviceType> k_k3("BondClass2::k3",n+1);
Kokkos::DualView<F_FLOAT*,DeviceType> k_k4("BondClass2::k4",n+1);
Kokkos::DualView<F_FLOAT*,DeviceType> k_r0("BondClass2::r0",n+1);
typename AT::tdual_ffloat_1d k_k2("BondClass2::k2",n+1);
typename AT::tdual_ffloat_1d k_k3("BondClass2::k3",n+1);
typename AT::tdual_ffloat_1d k_k4("BondClass2::k4",n+1);
typename AT::tdual_ffloat_1d k_r0("BondClass2::r0",n+1);
d_k2 = k_k2.template view<DeviceType>();
d_k3 = k_k3.template view<DeviceType>();
@ -247,10 +247,10 @@ void BondClass2Kokkos<DeviceType>::read_restart(FILE *fp)
BondClass2::read_restart(fp);
int n = atom->nbondtypes;
Kokkos::DualView<F_FLOAT*,DeviceType> k_k2("BondClass2::k2",n+1);
Kokkos::DualView<F_FLOAT*,DeviceType> k_k3("BondClass2::k3",n+1);
Kokkos::DualView<F_FLOAT*,DeviceType> k_k4("BondClass2::k4",n+1);
Kokkos::DualView<F_FLOAT*,DeviceType> k_r0("BondClass2::r0",n+1);
typename AT::tdual_ffloat_1d k_k2("BondClass2::k2",n+1);
typename AT::tdual_ffloat_1d k_k3("BondClass2::k3",n+1);
typename AT::tdual_ffloat_1d k_k4("BondClass2::k4",n+1);
typename AT::tdual_ffloat_1d k_r0("BondClass2::r0",n+1);
d_k2 = k_k2.template view<DeviceType>();
d_k3 = k_k3.template view<DeviceType>();

View File

@ -67,10 +67,11 @@ class BondClass2Kokkos : public BondClass2 {
typename Kokkos::View<double*[3],typename AT::t_f_array::array_layout,typename KKDevice<DeviceType>::value,Kokkos::MemoryTraits<Kokkos::Atomic> > f;
typename AT::t_int_2d bondlist;
Kokkos::DualView<E_FLOAT*,Kokkos::LayoutRight,DeviceType> k_eatom;
Kokkos::DualView<F_FLOAT*[6],Kokkos::LayoutRight,DeviceType> k_vatom;
Kokkos::View<E_FLOAT*,Kokkos::LayoutRight,typename KKDevice<DeviceType>::value,Kokkos::MemoryTraits<Kokkos::Atomic> > d_eatom;
Kokkos::View<F_FLOAT*[6],Kokkos::LayoutRight,typename KKDevice<DeviceType>::value,Kokkos::MemoryTraits<Kokkos::Atomic> > d_vatom;
typedef typename KKDevice<DeviceType>::value KKDeviceType;
Kokkos::DualView<E_FLOAT*,Kokkos::LayoutRight,KKDeviceType> k_eatom;
Kokkos::DualView<F_FLOAT*[6],Kokkos::LayoutRight,KKDeviceType> k_vatom;
Kokkos::View<E_FLOAT*,Kokkos::LayoutRight,KKDeviceType,Kokkos::MemoryTraits<Kokkos::Atomic> > d_eatom;
Kokkos::View<F_FLOAT*[6],Kokkos::LayoutRight,KKDeviceType,Kokkos::MemoryTraits<Kokkos::Atomic> > d_vatom;
int nlocal,newton_bond;
int eflag,vflag;

View File

@ -173,8 +173,6 @@ void ComputeOrientOrderAtomKokkos<DeviceType>::compute_peratom()
x = atomKK->k_x.view<DeviceType>();
mask = atomKK->k_mask.view<DeviceType>();
Kokkos::deep_copy(d_qnm,{0.0,0.0});
int vector_length_default = 1;
int team_size_default = 1;
if (!host_flag)
@ -185,6 +183,8 @@ void ComputeOrientOrderAtomKokkos<DeviceType>::compute_peratom()
if (chunk_size > inum - chunk_offset)
chunk_size = inum - chunk_offset;
Kokkos::deep_copy(d_qnm,{0.0,0.0});
//Neigh
{
int vector_length = vector_length_default;
@ -286,7 +286,7 @@ void ComputeOrientOrderAtomKokkos<DeviceType>::operator() (TagComputeOrientOrder
const int i = d_ilist[ii + chunk_offset];
const int ncount = d_ncount(ii);
// if not nnn neighbors, order parameter = 0;
// if not nnn neighbors, order parameter = 0
if ((ncount == 0) || (ncount < nnn)) {
for (int jj = 0; jj < ncol; jj++)
@ -316,7 +316,7 @@ void ComputeOrientOrderAtomKokkos<DeviceType>::operator() (TagComputeOrientOrder
const int ncount = d_ncount(ii);
if (jj >= ncount) return;
// if not nnn neighbors, order parameter = 0;
// if not nnn neighbors, order parameter = 0
if ((ncount == 0) || (ncount < nnn))
return;
@ -328,6 +328,12 @@ template<class DeviceType>
KOKKOS_INLINE_FUNCTION
void ComputeOrientOrderAtomKokkos<DeviceType>::operator() (TagComputeOrientOrderAtomBOOP2,const int& ii) const {
const int ncount = d_ncount(ii);
// if not nnn neighbors, order parameter = 0
if ((ncount == 0) || (ncount < nnn))
return;
calc_boop2(ncount, ii);
}

View File

@ -38,7 +38,20 @@ ModifyKokkos::ModifyKokkos(LAMMPS *lmp) : Modify(lmp)
void ModifyKokkos::setup(int vflag)
{
// compute setup needs to come before fix setup
// b/c NH fixes need use DOF of temperature computes
// b/c NH fixes need DOF of temperature computes
// fix group setup() is special case since populates a dynamic group
// needs to be done before temperature compute setup
for (int i = 0; i < nfix; i++) {
if (strcmp(fix[i]->style,"GROUP") == 0) {
atomKK->sync(fix[i]->execution_space,fix[i]->datamask_read);
int prev_auto_sync = lmp->kokkos->auto_sync;
if (!fix[i]->kokkosable) lmp->kokkos->auto_sync = 1;
fix[i]->setup(vflag);
lmp->kokkos->auto_sync = prev_auto_sync;
atomKK->modified(fix[i]->execution_space,fix[i]->datamask_modify);
}
}
for (int i = 0; i < ncompute; i++) compute[i]->setup();
@ -124,6 +137,37 @@ void ModifyKokkos::setup_pre_neighbor()
}
}
/* ----------------------------------------------------------------------
setup post_neighbor call, only for fixes that define post_neighbor
called from Verlet, RESPA
------------------------------------------------------------------------- */
void ModifyKokkos::setup_post_neighbor()
{
if (update->whichflag == 1)
for (int i = 0; i < n_post_neighbor; i++) {
atomKK->sync(fix[list_post_neighbor[i]]->execution_space,
fix[list_post_neighbor[i]]->datamask_read);
int prev_auto_sync = lmp->kokkos->auto_sync;
if (!fix[list_post_neighbor[i]]->kokkosable) lmp->kokkos->auto_sync = 1;
fix[list_post_neighbor[i]]->setup_post_neighbor();
lmp->kokkos->auto_sync = prev_auto_sync;
atomKK->modified(fix[list_post_neighbor[i]]->execution_space,
fix[list_post_neighbor[i]]->datamask_modify);
}
else if (update->whichflag == 2)
for (int i = 0; i < n_min_post_neighbor; i++) {
atomKK->sync(fix[list_min_post_neighbor[i]]->execution_space,
fix[list_min_post_neighbor[i]]->datamask_read);
int prev_auto_sync = lmp->kokkos->auto_sync;
if (!fix[list_min_post_neighbor[i]]->kokkosable) lmp->kokkos->auto_sync = 1;
fix[list_min_post_neighbor[i]]->setup_post_neighbor();
lmp->kokkos->auto_sync = prev_auto_sync;
atomKK->modified(fix[list_min_post_neighbor[i]]->execution_space,
fix[list_min_post_neighbor[i]]->datamask_modify);
}
}
/* ----------------------------------------------------------------------
setup pre_force call, only for fixes that define pre_force
called from Verlet, RESPA, Min
@ -258,6 +302,24 @@ void ModifyKokkos::pre_neighbor()
}
}
/* ----------------------------------------------------------------------
post_neighbor call, only for relevant fixes
------------------------------------------------------------------------- */
void ModifyKokkos::post_neighbor()
{
for (int i = 0; i < n_post_neighbor; i++) {
atomKK->sync(fix[list_post_neighbor[i]]->execution_space,
fix[list_post_neighbor[i]]->datamask_read);
int prev_auto_sync = lmp->kokkos->auto_sync;
if (!fix[list_post_neighbor[i]]->kokkosable) lmp->kokkos->auto_sync = 1;
fix[list_post_neighbor[i]]->post_neighbor();
lmp->kokkos->auto_sync = prev_auto_sync;
atomKK->modified(fix[list_post_neighbor[i]]->execution_space,
fix[list_post_neighbor[i]]->datamask_modify);
}
}
/* ----------------------------------------------------------------------
pre_force call, only for relevant fixes
------------------------------------------------------------------------- */
@ -420,6 +482,12 @@ void ModifyKokkos::post_run()
atomKK->modified(fix[i]->execution_space,
fix[i]->datamask_modify);
}
// must reset this to its default value, since computes may be added
// or removed between runs and with this change we will redirect any
// calls to addstep_compute() to addstep_compute_all() instead.
n_timeflag = -1;
}
/* ----------------------------------------------------------------------
@ -567,6 +635,24 @@ void ModifyKokkos::min_pre_neighbor()
}
}
/* ----------------------------------------------------------------------
minimizer post-neighbor call, only for relevant fixes
------------------------------------------------------------------------- */
void ModifyKokkos::min_post_neighbor()
{
for (int i = 0; i < n_min_post_neighbor; i++) {
atomKK->sync(fix[list_min_post_neighbor[i]]->execution_space,
fix[list_min_post_neighbor[i]]->datamask_read);
int prev_auto_sync = lmp->kokkos->auto_sync;
if (!fix[list_min_post_neighbor[i]]->kokkosable) lmp->kokkos->auto_sync = 1;
fix[list_min_post_neighbor[i]]->min_post_neighbor();
lmp->kokkos->auto_sync = prev_auto_sync;
atomKK->modified(fix[list_min_post_neighbor[i]]->execution_space,
fix[list_min_post_neighbor[i]]->datamask_modify);
}
}
/* ----------------------------------------------------------------------
minimizer pre-force call, only for relevant fixes
------------------------------------------------------------------------- */
@ -646,7 +732,7 @@ double ModifyKokkos::min_energy(double *fextra)
}
/* ----------------------------------------------------------------------
store current state of extra dof, only for relevant fixes
store current state of extra minimizer dof, only for relevant fixes
------------------------------------------------------------------------- */
void ModifyKokkos::min_store()
@ -664,7 +750,7 @@ void ModifyKokkos::min_store()
}
/* ----------------------------------------------------------------------
mange state of extra dof on a stack, only for relevant fixes
manage state of extra minimizer dof on a stack, only for relevant fixes
------------------------------------------------------------------------- */
void ModifyKokkos::min_clearstore()
@ -710,7 +796,7 @@ void ModifyKokkos::min_popstore()
}
/* ----------------------------------------------------------------------
displace extra dof along vector hextra, only for relevant fixes
displace extra minimizer dof along vector hextra, only for relevant fixes
------------------------------------------------------------------------- */
void ModifyKokkos::min_step(double alpha, double *hextra)
@ -755,7 +841,7 @@ double ModifyKokkos::max_alpha(double *hextra)
}
/* ----------------------------------------------------------------------
extract extra dof for minimization, only for relevant fixes
extract extra minimizer dof, only for relevant fixes
------------------------------------------------------------------------- */
int ModifyKokkos::min_dof()
@ -775,7 +861,7 @@ int ModifyKokkos::min_dof()
}
/* ----------------------------------------------------------------------
reset reference state of fix, only for relevant fixes
reset minimizer reference state of fix, only for relevant fixes
------------------------------------------------------------------------- */
int ModifyKokkos::min_reset_ref()
@ -788,8 +874,8 @@ int ModifyKokkos::min_reset_ref()
int prev_auto_sync = lmp->kokkos->auto_sync;
if (!fix[list_min_energy[i]]->kokkosable) lmp->kokkos->auto_sync = 1;
itmp = fix[list_min_energy[i]]->min_reset_ref();
lmp->kokkos->auto_sync = prev_auto_sync;
if (itmp) itmpall = 1;
lmp->kokkos->auto_sync = prev_auto_sync;
atomKK->modified(fix[list_min_energy[i]]->execution_space,
fix[list_min_energy[i]]->datamask_modify);
}

View File

@ -26,6 +26,7 @@ class ModifyKokkos : public Modify {
void setup(int);
void setup_pre_exchange();
void setup_pre_neighbor();
void setup_post_neighbor();
void setup_pre_force(int);
void setup_pre_reverse(int, int);
void initial_integrate(int);
@ -33,6 +34,7 @@ class ModifyKokkos : public Modify {
void pre_decide();
void pre_exchange();
void pre_neighbor();
void post_neighbor();
void pre_force(int);
void pre_reverse(int,int);
void post_force(int);
@ -52,6 +54,7 @@ class ModifyKokkos : public Modify {
void min_pre_exchange();
void min_pre_neighbor();
void min_post_neighbor();
void min_pre_force(int);
void min_pre_reverse(int,int);
void min_post_force(int);

View File

@ -29,6 +29,8 @@ namespace LAMMPS_NS {
template<class DeviceType, int HALF_NEIGH, int GHOST, int TRI, int SIZE>
NPairKokkos<DeviceType,HALF_NEIGH,GHOST,TRI,SIZE>::NPairKokkos(LAMMPS *lmp) : NPair(lmp) {
last_stencil_old = -1;
// use 1D view for scalars to reduce GPU memory operations
d_scalars = typename AT::t_int_1d("neighbor:scalars",2);
@ -112,9 +114,11 @@ void NPairKokkos<DeviceType,HALF_NEIGH,GHOST,TRI,SIZE>::copy_stencil_info()
NPair::copy_stencil_info();
nstencil = ns->nstencil;
if (ns->last_stencil == update->ntimestep) {
if (ns->last_stencil != last_stencil_old) {
// copy stencil to device as it may have changed
last_stencil_old = ns->last_stencil;
int maxstencil = ns->get_maxstencil();
if (maxstencil > (int)k_stencil.extent(0))

View File

@ -138,7 +138,7 @@ class NPairKokkos : public NPair {
// data from NStencil class
int nstencil;
int nstencil,last_stencil_old;
DAT::tdual_int_1d k_stencil; // # of J neighs for each I
DAT::tdual_int_1d_3 k_stencilxyz;
};

View File

@ -115,6 +115,7 @@ void AtomVecSMD::grow_pointers()
vfrac = atom->vfrac;
rmass = atom->rmass;
x0 = atom->x0;
x = atom->x;
radius = atom->radius;
contact_radius = atom->contact_radius;
molecule = atom->molecule;
@ -129,13 +130,11 @@ void AtomVecSMD::grow_pointers()
/* ----------------------------------------------------------------------
clear extra forces starting at atom N
nbytes = # of bytes to clear for a per-atom vector
NOTE: does f need to be re-cleared?
------------------------------------------------------------------------- */
void AtomVecSMD::force_clear(int n, size_t nbytes)
{
memset(&desph[n],0,nbytes);
memset(&f[n][0],0,3*nbytes);
}
/* ----------------------------------------------------------------------

View File

@ -53,7 +53,7 @@ ComputeSMDTriangleVertices::ComputeSMDTriangleVertices(LAMMPS *lmp, int narg, ch
/* ---------------------------------------------------------------------- */
ComputeSMDTriangleVertices::~ComputeSMDTriangleVertices() {
memory->sfree(outputVector);
memory->destroy(outputVector);
}
/* ---------------------------------------------------------------------- */

View File

@ -85,7 +85,8 @@ PairULSPH::PairULSPH(LAMMPS *lmp) :
PairULSPH::~PairULSPH() {
if (allocated) {
//printf("... deallocating\n");
memory->destroy(setflag);
memory->destroy(cutsq);
memory->destroy(Q1);
memory->destroy(rho0);
memory->destroy(eos);

View File

@ -20,9 +20,6 @@
#include "pair_eam_cd.h"
#include <cmath>
#include <cstring>
#include "atom.h"
#include "force.h"
#include "comm.h"
@ -31,11 +28,11 @@
#include "error.h"
#include "tokenizer.h"
#include <cmath>
#include <cstring>
using namespace LAMMPS_NS;
#define ASSERT(cond)
#define MAXLINE 1024 // This sets the maximum line length in EAM input files.
PairEAMCD::PairEAMCD(LAMMPS *lmp, int _cdeamVersion)
@ -298,7 +295,7 @@ void PairEAMCD::compute(int eflag, int vflag)
// It will be replaced by the concentration at site i if atom i is either A or B.
double x_i = -1.0;
double D_i, h_prime_i;
double D_i = 0.0, h_prime_i;
// This if-clause is only required for ternary alloys.
@ -307,7 +304,6 @@ void PairEAMCD::compute(int eflag, int vflag)
// Compute local concentration at site i.
x_i = rhoB[i]/rho[i];
ASSERT(x_i >= 0 && x_i<=1.0);
if (cdeamVersion == 1) {
@ -317,8 +313,6 @@ void PairEAMCD::compute(int eflag, int vflag)
D_i = D_values[i] * h_prime_i / (2.0 * rho[i] * rho[i]);
} else if (cdeamVersion == 2) {
D_i = D_values[i];
} else {
ASSERT(false);
}
}
@ -354,14 +348,11 @@ void PairEAMCD::compute(int eflag, int vflag)
// This code line is required for ternary alloy.
if (jtype == speciesA || jtype == speciesB) {
ASSERT(rho[i] != 0.0);
ASSERT(rho[j] != 0.0);
if ((jtype == speciesA || jtype == speciesB) && rho[j] != 0.0) {
// Compute local concentration at site j.
x_j = rhoB[j]/rho[j];
ASSERT(x_j >= 0 && x_j<=1.0);
double D_j=0.0;
if (cdeamVersion == 1) {
@ -372,8 +363,6 @@ void PairEAMCD::compute(int eflag, int vflag)
D_j = D_values[j] * h_prime_j / (2.0 * rho[j] * rho[j]);
} else if (cdeamVersion == 2) {
D_j = D_values[j];
} else {
ASSERT(false);
}
double t2 = -rhoB[j];
if (itype == speciesB) t2 += rho[j];
@ -422,8 +411,6 @@ void PairEAMCD::compute(int eflag, int vflag)
// Calculate h(x_ij) polynomial function.
h = evalH(x_ij);
} else {
ASSERT(false);
}
fpair += h * phip;
phi *= h;
@ -460,7 +447,8 @@ void PairEAMCD::coeff(int narg, char **arg)
// Make sure the EAM file is a CD-EAM binary alloy.
if (setfl->nelements < 2)
error->all(FLERR,"The EAM file must contain at least 2 elements to be used with the eam/cd pair style.");
error->all(FLERR,"The EAM file must contain at least 2 elements to be "
"used with the eam/cd pair style.");
// Read in the coefficients of the h polynomial from the end of the EAM file.
@ -502,22 +490,19 @@ void PairEAMCD::read_h_coeff(char *filename)
// Open potential file
FILE *fptr;
char line[MAXLINE];
char nextline[MAXLINE];
int convert_flag = unit_convert_flag;
char line[2][MAXLINE];
int convert_flag = unit_convert_flag, toggle = 0;
fptr = utils::open_potential(filename, lmp, &convert_flag);
if (fptr == nullptr)
error->one(FLERR,"Cannot open EAMCD potential file {}",
filename);
error->one(FLERR,"Cannot open EAMCD potential file {}", filename);
// h coefficients are stored at the end of the file.
// Skip to last line of file.
while (fgets(nextline, MAXLINE, fptr) != nullptr) {
strcpy(line, nextline);
}
while (fgets(line[toggle], MAXLINE, fptr) != nullptr)
toggle = !toggle;
ValueTokenizer values(line);
ValueTokenizer values(line[!toggle]);
int degree = values.next_int();
nhcoeff = degree+1;
@ -527,10 +512,8 @@ void PairEAMCD::read_h_coeff(char *filename)
delete[] hcoeff;
hcoeff = new double[nhcoeff];
int i = 0;
while (values.has_next()) {
hcoeff[i++] = values.next_double();
}
for (int i = 0; i < nhcoeff; ++i)
hcoeff[i] = values.next_double();
// Close the potential file.
@ -545,7 +528,6 @@ void PairEAMCD::read_h_coeff(char *filename)
MPI_Bcast(hcoeff, nhcoeff, MPI_DOUBLE, 0, world);
}
/* ---------------------------------------------------------------------- */
int PairEAMCD::pack_forward_comm(int n, int *list, double *buf,
@ -572,7 +554,7 @@ int PairEAMCD::pack_forward_comm(int n, int *list, double *buf,
buf[m++] = rhoB[j];
}
return m;
} else { ASSERT(false); return 0; }
} else return 0;
} else if (communicationStage == 4) {
for (i = 0; i < n; i++) {
j = list[i];
@ -604,8 +586,6 @@ void PairEAMCD::unpack_forward_comm(int n, int first, double *buf)
rho[i] = buf[m++];
rhoB[i] = buf[m++];
}
} else {
ASSERT(false);
}
} else if (communicationStage == 4) {
for (i = first; i < last; i++) {
@ -636,7 +616,7 @@ int PairEAMCD::pack_reverse_comm(int n, int first, double *buf)
buf[m++] = rhoB[i];
}
return m;
} else { ASSERT(false); return 0; }
} else return 0;
} else if (communicationStage == 3) {
for (i = first; i < last; i++) {
buf[m++] = D_values[i];
@ -666,8 +646,6 @@ void PairEAMCD::unpack_reverse_comm(int n, int *list, double *buf)
rho[j] += buf[m++];
rhoB[j] += buf[m++];
}
} else {
ASSERT(false);
}
} else if (communicationStage == 3) {
for (i = 0; i < n; i++) {

View File

@ -120,6 +120,7 @@ class PairEAMCD : public PairEAMAlloy {
index.p = r * rdr + 1.0;
index.m = static_cast<int>(index.p);
index.m = index.m <= (nr - 1) ? index.m : (nr - 1);
index.m = index.m > 1 ? index.m : 1;
index.p -= index.m;
index.p = index.p <= 1.0 ? index.p : 1.0;
return index;
@ -132,6 +133,7 @@ class PairEAMCD : public PairEAMAlloy {
index.p = rho * rdrho + 1.0;
index.m = static_cast<int>(index.p);
index.m = index.m <= (nrho - 1) ? index.m : (nrho - 1);
index.m = index.m > 1 ? index.m : 1;
index.p -= index.m;
index.p = index.p <= 1.0 ? index.p : 1.0;
return index;

View File

@ -86,9 +86,8 @@ PairLocalDensity::PairLocalDensity(LAMMPS *lmp) : Pair(lmp)
fp = nullptr;
localrho = nullptr;
// set comm size needed by this pair
comm_forward = 1;
comm_reverse = 1;
// comm sizes needed by this pair style will be set when reading the potential file
comm_forward = comm_reverse = 0;
// cite publication
if (lmp->citeme) lmp->citeme->add(cite_pair_local_density);
@ -686,6 +685,11 @@ void PairLocalDensity::parse_file(char *filename) {
MPI_Bcast(&nLD,1,MPI_INT,0,world);
MPI_Bcast(&nrho,1,MPI_INT,0,world);
comm_forward = comm_reverse = nLD;
if ((me == 0) && (nLD != atom->ntypes*atom->ntypes))
error->warning(FLERR, "Expected {} local density potentials but got {}",
atom->ntypes*atom->ntypes, nLD);
// setting up all arrays to be read from files and broadcasted
memory->create(uppercut, nLD, "pairLD:uppercut");
@ -807,8 +811,8 @@ void PairLocalDensity::parse_file(char *filename) {
communication routines
------------------------------------------------------------------------- */
int PairLocalDensity::pack_comm(int n, int *list, double *buf,
int /* pbc_flag */, int * /* pbc */) {
int PairLocalDensity::pack_forward_comm(int n, int *list, double *buf,
int /* pbc_flag */, int * /* pbc */) {
int i,j,k;
int m;
@ -820,12 +824,12 @@ int PairLocalDensity::pack_comm(int n, int *list, double *buf,
}
}
return nLD;
return m;
}
/* ---------------------------------------------------------------------- */
void PairLocalDensity::unpack_comm(int n, int first, double *buf) {
void PairLocalDensity::unpack_forward_comm(int n, int first, double *buf) {
int i,k,m,last;
@ -851,7 +855,7 @@ int PairLocalDensity::pack_reverse_comm(int n, int first, double *buf) {
buf[m++] = localrho[k][i];
}
}
return nLD;
return m;
}
/* ---------------------------------------------------------------------- */

View File

@ -39,8 +39,8 @@ class PairLocalDensity : public Pair {
double init_one(int, int);
double single(int, int, int, int, double, double, double, double &);
virtual int pack_comm(int, int *, double *, int, int *);
virtual void unpack_comm(int, int, double *);
virtual int pack_forward_comm(int, int *, double *, int, int *);
virtual void unpack_forward_comm(int, int, double *);
int pack_reverse_comm(int, int, double *);
void unpack_reverse_comm(int, int *, double *);
double memory_usage();

View File

@ -75,7 +75,7 @@ FixChargeRegulation::FixChargeRegulation(LAMMPS *lmp, int narg, char **arg) :
size_vector = 8;
global_freq = 1;
extvector = 0;
restart_global = 1;
restart_global = 0;
time_depend = 1;
cr_nmax = 0;
overlap_flag = 0;
@ -985,9 +985,17 @@ int FixChargeRegulation::insert_particle(int ptype, double charge, double rd, do
modify->create_attribute(m);
}
atom->nghost = 0;
comm->borders();
atom->natoms++;
atom->nghost = 0;
if (atom->tag_enable) {
if (atom->tag_enable) {
atom->tag_extend();
if (atom->map_style != Atom::MAP_NONE) atom->map_init();
}
}
if (triclinic) domain->x2lamda(atom->nlocal);
comm->borders();
if (triclinic) domain->lamda2x(atom->nlocal+atom->nghost);
return m;
}

View File

@ -470,9 +470,9 @@ void AngleTable::compute_table(Table *tb)
memory->create(tb->ang,tablength,"angle:ang");
memory->create(tb->e,tablength,"angle:e");
memory->create(tb->de,tlm1,"angle:de");
memory->create(tb->de,tablength,"angle:de");
memory->create(tb->f,tablength,"angle:f");
memory->create(tb->df,tlm1,"angle:df");
memory->create(tb->df,tablength,"angle:df");
memory->create(tb->e2,tablength,"angle:e2");
memory->create(tb->f2,tablength,"angle:f2");
@ -488,6 +488,9 @@ void AngleTable::compute_table(Table *tb)
tb->de[i] = tb->e[i+1] - tb->e[i];
tb->df[i] = tb->f[i+1] - tb->f[i];
}
// get final elements from linear extrapolation
tb->de[tlm1] = 2.0*tb->de[tlm1-1] - tb->de[tlm1-2];
tb->df[tlm1] = 2.0*tb->df[tlm1-1] - tb->df[tlm1-2];
double ep0 = - tb->f[0];
double epn = - tb->f[tlm1];

View File

@ -435,9 +435,9 @@ void BondTable::compute_table(Table *tb)
memory->create(tb->r,tablength,"bond:r");
memory->create(tb->e,tablength,"bond:e");
memory->create(tb->de,tlm1,"bond:de");
memory->create(tb->de,tablength,"bond:de");
memory->create(tb->f,tablength,"bond:f");
memory->create(tb->df,tlm1,"bond:df");
memory->create(tb->df,tablength,"bond:df");
memory->create(tb->e2,tablength,"bond:e2");
memory->create(tb->f2,tablength,"bond:f2");
@ -453,6 +453,9 @@ void BondTable::compute_table(Table *tb)
tb->de[i] = tb->e[i+1] - tb->e[i];
tb->df[i] = tb->f[i+1] - tb->f[i];
}
// get final elements from linear extrapolation
tb->de[tlm1] = 2.0*tb->de[tlm1-1] - tb->de[tlm1-2];
tb->df[tlm1] = 2.0*tb->df[tlm1-1] - tb->df[tlm1-2];
double ep0 = - tb->f[0];
double epn = - tb->f[tlm1];

View File

@ -65,6 +65,10 @@ static constexpr int THIS_IS_A_COMPUTE = -2;
static constexpr int THIS_IS_A_VARIABLE = -3;
static constexpr int THIS_IS_A_BIGINT = -4;
// to be used instead of NC_MAX_VAR_DIMS which was expected
// to be no larger than 1024 but that is not guaranteed anymore.
static constexpr int LMP_MAX_VAR_DIMS = 1024;
/* ---------------------------------------------------------------------- */
#define NCERR(x) ncerr(x, nullptr, __LINE__)
@ -339,8 +343,8 @@ void DumpNetCDF::openfile()
if (framei != 0 && !multifile)
error->all(FLERR,"at keyword requires use of 'append yes'");
int dims[NC_MAX_VAR_DIMS];
size_t index[NC_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
int dims[LMP_MAX_VAR_DIMS];
size_t index[LMP_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
double d[1];
if (singlefile_opened) return;
@ -735,8 +739,8 @@ void DumpNetCDF::write_header(bigint n)
void DumpNetCDF::write_data(int n, double *mybuf)
{
size_t start[NC_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
ptrdiff_t stride[NC_MAX_VAR_DIMS];
size_t start[LMP_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
ptrdiff_t stride[LMP_MAX_VAR_DIMS];
if (!int_buffer) {
n_buffer = n;

View File

@ -65,6 +65,10 @@ static constexpr int THIS_IS_A_COMPUTE = -2;
static constexpr int THIS_IS_A_VARIABLE = -3;
static constexpr int THIS_IS_A_BIGINT = -4;
// to be used instead of NC_MAX_VAR_DIMS which was expected
// to be no larger than 1024 but that is not guaranteed anymore.
static constexpr int LMP_MAX_VAR_DIMS = 1024;
/* ---------------------------------------------------------------------- */
#define NCERR(x) ncerr(x, nullptr, __LINE__)
@ -275,7 +279,7 @@ void DumpNetCDFMPIIO::openfile()
if (not utils::file_is_readable(filecurrent))
error->all(FLERR, "cannot append to non-existent file {}", filecurrent);
MPI_Offset index[NC_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
MPI_Offset index[LMP_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
double d[1];
if (singlefile_opened) return;
@ -340,8 +344,8 @@ void DumpNetCDFMPIIO::openfile()
if (framei != 0 && !multifile)
error->all(FLERR,"at keyword requires use of 'append yes'");
int dims[NC_MAX_VAR_DIMS];
MPI_Offset index[NC_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
int dims[LMP_MAX_VAR_DIMS];
MPI_Offset index[LMP_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
double d[1];
if (singlefile_opened) return;
@ -753,8 +757,8 @@ void DumpNetCDFMPIIO::write_time_and_cell()
void DumpNetCDFMPIIO::write_data(int n, double *mybuf)
{
MPI_Offset start[NC_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
MPI_Offset stride[NC_MAX_VAR_DIMS];
MPI_Offset start[LMP_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
MPI_Offset stride[LMP_MAX_VAR_DIMS];
if (!int_buffer) {
n_buffer = std::max(1, n);

View File

@ -1,4 +1,3 @@
// clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
@ -62,9 +61,9 @@ FixReaxFFSpecies::FixReaxFFSpecies(LAMMPS *lmp, int narg, char **arg) :
MPI_Comm_size(world,&nprocs);
ntypes = atom->ntypes;
nevery = atoi(arg[3]);
nrepeat = atoi(arg[4]);
global_freq = nfreq = atoi(arg[5]);
nevery = utils::inumeric(FLERR,arg[3],false,lmp);
nrepeat = utils::inumeric(FLERR,arg[4],false,lmp);
global_freq = nfreq = utils::inumeric(FLERR,arg[5],false,lmp);
comm_forward = 4;
@ -95,15 +94,9 @@ FixReaxFFSpecies::FixReaxFFSpecies(LAMMPS *lmp, int narg, char **arg) :
rene_flag = 1;
}
if (me == 0 && rene_flag) {
error->warning(FLERR,"Resetting reneighboring criteria for fix reaxff/species");
}
tmparg = nullptr;
memory->create(tmparg,4,4,"reaxff/species:tmparg");
strcpy(tmparg[0],arg[3]);
strcpy(tmparg[1],arg[4]);
strcpy(tmparg[2],arg[5]);
if (me == 0 && rene_flag)
error->warning(FLERR,"Resetting reneighboring criteria to 'delay {} every {} check no' "
"for fix reaxff/species",neighbor->delay, neighbor->every);
if (me == 0) {
char *suffix = strrchr(arg[6],'.');
@ -121,8 +114,7 @@ FixReaxFFSpecies::FixReaxFFSpecies(LAMMPS *lmp, int narg, char **arg) :
} else fp = fopen(arg[6],"w");
if (!fp)
error->one(FLERR,fmt::format("Cannot open fix reaxff/species file {}: "
"{}",arg[6],utils::getsyserror()));
error->one(FLERR,"Cannot open fix reaxff/species file {}: {}",arg[6],utils::getsyserror());
}
x0 = nullptr;
@ -169,9 +161,9 @@ FixReaxFFSpecies::FixReaxFFSpecies(LAMMPS *lmp, int narg, char **arg) :
// set BO cutoff
if (strcmp(arg[iarg],"cutoff") == 0) {
if (iarg+4 > narg) error->all(FLERR,"Illegal fix reaxff/species command");
itype = atoi(arg[iarg+1]);
jtype = atoi(arg[iarg+2]);
bo_cut = atof(arg[iarg+3]);
itype = utils::inumeric(FLERR,arg[iarg+1],false,lmp);
jtype = utils::inumeric(FLERR,arg[iarg+2],false,lmp);
bo_cut = utils::numeric(FLERR,arg[iarg+3],false,lmp);
if (itype > ntypes || jtype > ntypes)
error->all(FLERR,"Illegal fix reaxff/species command");
if (itype <= 0 || jtype <= 0)
@ -201,7 +193,7 @@ FixReaxFFSpecies::FixReaxFFSpecies(LAMMPS *lmp, int narg, char **arg) :
} else if (strcmp(arg[iarg],"position") == 0) {
if (iarg+3 > narg) error->all(FLERR,"Illegal fix reaxff/species command");
posflag = 1;
posfreq = atoi(arg[iarg+1]);
posfreq = utils::inumeric(FLERR,arg[iarg+1],false,lmp);
if (posfreq < nfreq || (posfreq%nfreq != 0))
error->all(FLERR,"Illegal fix reaxff/species command");
@ -259,8 +251,8 @@ FixReaxFFSpecies::~FixReaxFFSpecies()
if (me == 0) fclose(fp);
if (me == 0 && posflag && multipos_opened) fclose(pos);
modify->delete_compute("SPECATOM");
modify->delete_fix("SPECBOND");
modify->delete_compute(fmt::format("SPECATOM_{}",id));
modify->delete_fix(fmt::format("SPECBOND_{}",id));
}
/* ---------------------------------------------------------------------- */
@ -300,22 +292,16 @@ void FixReaxFFSpecies::init()
if (nvalid != update->ntimestep)
nvalid = update->ntimestep+nfreq;
// check if this fix has been called twice
int count = 0;
for (int i = 0; i < modify->nfix; i++)
if (strcmp(modify->fix[i]->style,"reaxff/species") == 0) count++;
if (count > 1 && comm->me == 0)
error->warning(FLERR,"More than one fix reaxff/species");
if (!setupflag) {
// create a compute to store properties
modify->add_compute("SPECATOM all SPEC/ATOM q x y z vx vy vz abo01 abo02 abo03 abo04 "
"abo05 abo06 abo07 abo08 abo09 abo10 abo11 abo12 abo13 abo14 "
"abo15 abo16 abo17 abo18 abo19 abo20 abo21 abo22 abo23 abo24");
modify->add_compute(fmt::format("SPECATOM_{} all SPEC/ATOM q x y z vx vy vz abo01 abo02 "
"abo03 abo04 abo05 abo06 abo07 abo08 abo09 abo10 abo11 "
"abo12 abo13 abo14 abo15 abo16 abo17 abo18 abo19 abo20 "
"abo21 abo22 abo23 abo24",id));
// create a fix to point to fix_ave_atom for averaging stored properties
auto fixcmd = fmt::format("SPECBOND all ave/atom {} {} {}",tmparg[0],tmparg[1],tmparg[2]);
for (int i = 1; i < 32; ++i) fixcmd += " c_SPECATOM[" + std::to_string(i) + "]";
auto fixcmd = fmt::format("SPECBOND_{} all ave/atom {} {} {}",id,nevery,nrepeat,nfreq);
for (int i = 1; i < 32; ++i) fixcmd += fmt::format(" c_SPECATOM_{}[{}]",id,i);
f_SPECBOND = (FixAveAtom *) modify->add_fix(fixcmd);
setupflag = 1;
}
@ -683,8 +669,7 @@ void FixReaxFFSpecies::OpenPos()
char *ptr = strchr(filepos,'*');
*ptr = '\0';
if (padflag == 0)
sprintf(filecurrent,"%s" BIGINT_FORMAT "%s",
filepos,ntimestep,ptr+1);
sprintf(filecurrent,"%s" BIGINT_FORMAT "%s",filepos,ntimestep,ptr+1);
else {
char bif[8],pad[16];
strcpy(bif,BIGINT_FORMAT);

View File

@ -583,6 +583,7 @@ namespace ReaxFF {
} catch (std::exception &e) {
error->one(FLERR,e.what());
}
fclose(fp);
}
// broadcast global parameters and allocate list on ranks != 0

View File

@ -31,6 +31,7 @@
#include "domain.h"
#include "error.h"
#include "fix.h"
#include "fix_store.h"
#include "force.h"
#include "group.h"
#include "input.h"
@ -87,9 +88,9 @@ enum{X,Y,Z, // required for vtk, must come first
Q,MUX,MUY,MUZ,MU,RADIUS,DIAMETER,
OMEGAX,OMEGAY,OMEGAZ,ANGMOMX,ANGMOMY,ANGMOMZ,
TQX,TQY,TQZ,
VARIABLE,COMPUTE,FIX,INAME,DNAME,
COMPUTE,FIX,VARIABLE,IVEC,DVEC,IARRAY,DARRAY,
ATTRIBUTES}; // must come last
enum{LT,LE,GT,GE,EQ,NEQ};
enum{LT,LE,GT,GE,EQ,NEQ,XOR};
enum{VTK,VTP,VTU,PVTP,PVTU}; // file formats
#define ONEFIELD 32
@ -119,11 +120,10 @@ DumpVTK::DumpVTK(LAMMPS *lmp, int narg, char **arg) :
// ioptional = start of additional optional args
// only dump image and dump movie styles process optional args
ioptional = parse_fields(narg,arg);
ioptional = parse_fields(nargnew,earg);
if (ioptional < narg &&
strcmp(style,"image") != 0 && strcmp(style,"movie") != 0)
error->all(FLERR,"Invalid attribute in dump vtk command");
if (ioptional < nargnew)
error->all(FLERR,"Invalid attribute {} in dump vtk command", earg[ioptional]);
size_one = pack_choice.size();
current_pack_choice_key = -1;
@ -210,38 +210,40 @@ void DumpVTK::init_style()
else
write_choice = &DumpVTK::write_vtk;
// find current ptr for each compute,fix,variable
// find current ptr for each compute,fix,variable and custom atom property
// check that fix frequency is acceptable
int icompute;
for (int i = 0; i < ncompute; i++) {
icompute = modify->find_compute(id_compute[i]);
int icompute = modify->find_compute(id_compute[i]);
if (icompute < 0) error->all(FLERR,"Could not find dump vtk compute ID");
compute[i] = modify->compute[icompute];
}
int ifix;
for (int i = 0; i < nfix; i++) {
ifix = modify->find_fix(id_fix[i]);
int ifix = modify->find_fix(id_fix[i]);
if (ifix < 0) error->all(FLERR,"Could not find dump vtk fix ID");
fix[i] = modify->fix[ifix];
if (nevery % modify->fix[ifix]->peratom_freq)
error->all(FLERR,"Dump vtk and fix not computed at compatible times");
}
int ivariable;
for (int i = 0; i < nvariable; i++) {
ivariable = input->variable->find(id_variable[i]);
int ivariable = input->variable->find(id_variable[i]);
if (ivariable < 0)
error->all(FLERR,"Could not find dump vtk variable name");
variable[i] = ivariable;
}
int icustom;
int icustom,flag,cols;
for (int i = 0; i < ncustom; i++) {
icustom = atom->find_custom(id_custom[i],flag_custom[i]);
icustom = atom->find_custom(id_custom[i],flag,cols);
if (icustom < 0)
error->all(FLERR,"Could not find custom per-atom property ID");
custom[i] = icustom;
if (!flag && !cols) custom_flag[i] = IVEC;
else if (flag && !cols) custom_flag[i] = DVEC;
else if (!flag && cols) custom_flag[i] = IARRAY;
else if (flag && cols) custom_flag[i] = DARRAY;
}
// set index and check validity of region
@ -275,7 +277,7 @@ int DumpVTK::count()
// grow choose and variable vbuf arrays if needed
int nlocal = atom->nlocal;
const int nlocal = atom->nlocal;
if (atom->nmax > maxlocal) {
maxlocal = atom->nmax;
@ -345,10 +347,10 @@ int DumpVTK::count()
// un-choose if any threshold criterion isn't met
if (nthresh) {
double *ptr;
double *ptr,*ptrhold;
double *values;
double value;
int nstride;
int nlocal = atom->nlocal;
int nstride,lastflag;
for (int ithresh = 0; ithresh < nthresh; ithresh++) {
@ -635,26 +637,22 @@ int DumpVTK::count()
nstride = 1;
} else if (thresh_array[ithresh] == MUX) {
if (!atom->mu_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = &atom->mu[0][0];
nstride = 4;
} else if (thresh_array[ithresh] == MUY) {
if (!atom->mu_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = &atom->mu[0][1];
nstride = 4;
} else if (thresh_array[ithresh] == MUZ) {
if (!atom->mu_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = &atom->mu[0][2];
nstride = 4;
} else if (thresh_array[ithresh] == MU) {
if (!atom->mu_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = &atom->mu[0][3];
nstride = 4;
@ -753,9 +751,8 @@ int DumpVTK::count()
nstride = 1;
} else if (thresh_array[ithresh] == IVEC) {
int iwhich,flag,cols
i = ATTRIBUTES + nfield + ithresh;
iwhich = atom->find_custom(id_custom[field2index[i]],flag,cols);
int iwhich = custom[field2index[i]];
int *ivector = atom->ivector[iwhich];
for (i = 0; i < nlocal; i++)
dchoose[i] = ivector[i];
@ -763,16 +760,14 @@ int DumpVTK::count()
nstride = 1;
} else if (thresh_array[ithresh] == DVEC) {
int iwhich,flag,cols;
i = ATTRIBUTES + nfield + ithresh;
iwhich = atom->find_custom(id_custom[field2index[i]],flag,cols);
int iwhich = custom[field2index[i]];
ptr = atom->dvector[iwhich];
nstride = 1;
} else if (thresh_array[ithresh] == IARRAY) {
int iwhich,flag,cols;
i = ATTRIBUTES + nfield + ithresh;
iwhich = atom->find_custom(id_custom[field2index[i]],flag,cols);
int iwhich = custom[field2index[i]];
int **iarray = atom->iarray[iwhich];
int icol = argindex[i] - 1;
for (i = 0; i < nlocal; i++)
@ -781,43 +776,99 @@ int DumpVTK::count()
nstride = 1;
} else if (thresh_array[ithresh] == DARRAY) {
int iwhich,flag,cols;
i = ATTRIBUTES + nfield + ithresh;
iwhich = atom->find_custom(id_custom[field2index[i]],flag,cols)
int iwhich = custom[field2index[i]];
double **darray = atom->darray[iwhich];
ptr = &darray[0][argindex[i]-1];
nstride = atom->dcols[iwhich];
}
// unselect atoms that don't meet threshold criterion
// compare to single value or values stored in threshfix
// copy ptr attribute into thresh_fix if this is first comparison
value = thresh_value[ithresh];
if (thresh_last[ithresh] < 0) {
lastflag = 0;
value = thresh_value[ithresh];
} else {
lastflag = 1;
int ilast = thresh_last[ithresh];
values = thresh_fix[ilast]->vstore;
ptrhold = ptr;
if (thresh_first[ilast]) {
thresh_first[ilast] = 0;
for (i = 0; i < nlocal; i++, ptr += nstride) values[i] = *ptr;
ptr = ptrhold;
}
}
switch (thresh_op[ithresh]) {
case LT:
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr >= value) choose[i] = 0;
break;
case LE:
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr > value) choose[i] = 0;
break;
case GT:
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr <= value) choose[i] = 0;
break;
case GE:
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr < value) choose[i] = 0;
break;
case EQ:
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr != value) choose[i] = 0;
break;
case NEQ:
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr == value) choose[i] = 0;
break;
if (thresh_op[ithresh] == LT) {
if (lastflag) {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr >= values[i]) choose[i] = 0;
} else {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr >= value) choose[i] = 0;
}
} else if (thresh_op[ithresh] == LE) {
if (lastflag) {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr > values[i]) choose[i] = 0;
} else {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr > value) choose[i] = 0;
}
} else if (thresh_op[ithresh] == GT) {
if (lastflag) {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr <= values[i]) choose[i] = 0;
} else {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr <= value) choose[i] = 0;
}
} else if (thresh_op[ithresh] == GE) {
if (lastflag) {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr < values[i]) choose[i] = 0;
} else {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr < value) choose[i] = 0;
}
} else if (thresh_op[ithresh] == EQ) {
if (lastflag) {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr != values[i]) choose[i] = 0;
} else {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr != value) choose[i] = 0;
}
} else if (thresh_op[ithresh] == NEQ) {
if (lastflag) {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr == values[i]) choose[i] = 0;
} else {
for (i = 0; i < nlocal; i++, ptr += nstride)
if (choose[i] && *ptr == value) choose[i] = 0;
}
} else if (thresh_op[ithresh] == XOR) {
if (lastflag) {
for (i = 0; i < nlocal; i++, ptr += nstride)
if ((choose[i] && *ptr == 0.0 && values[i] == 0.0) ||
(*ptr != 0.0 && values[i] != 0.0))
choose[i] = 0;
} else {
for (i = 0; i < nlocal; i++, ptr += nstride)
if ((choose[i] && *ptr == 0.0 && value == 0.0) ||
(*ptr != 0.0 && value != 0.0))
choose[i] = 0;
}
}
// update values stored in threshfix
if (lastflag) {
ptr = ptrhold;
for (i = 0; i < nlocal; i++, ptr += nstride) values[i] = *ptr;
}
}
}
@ -1754,15 +1805,16 @@ int DumpVTK::parse_fields(int narg, char **arg)
} else {
int n,tmp;
int n,flag,cols;
ArgInfo argi(arg[iarg],ArgInfo::COMPUTE|ArgInfo::FIX|ArgInfo::VARIABLE
|ArgInfo::DVEC|ArgInfo::IVEC);
|ArgInfo::DNAME|ArgInfo::INAME);
argindex[ATTRIBUTES+i] = argi.get_index1();
auto aname = argi.get_name();
switch (argi.get_type()) {
case ArgInfo::UNKNOWN:
error->all(FLERR,"Invalid attribute in dump vtk command");
error->all(FLERR,"Invalid attribute in dump vtk command: {}",arg[iarg]);
break;
// compute value = c_ID
@ -1772,21 +1824,19 @@ int DumpVTK::parse_fields(int narg, char **arg)
pack_choice[ATTRIBUTES+i] = &DumpVTK::pack_compute;
vtype[ATTRIBUTES+i] = Dump::DOUBLE;
n = modify->find_compute(argi.get_name());
if (n < 0) error->all(FLERR,"Could not find dump vtk compute ID");
n = modify->find_compute(aname);
if (n < 0) error->all(FLERR,"Could not find dump vtk compute ID: {}",aname);
if (modify->compute[n]->peratom_flag == 0)
error->all(FLERR,"Dump vtk compute does not compute per-atom info");
error->all(FLERR,"Dump vtk compute {} does not compute per-atom info",aname);
if (argi.get_dim() == 0 && modify->compute[n]->size_peratom_cols > 0)
error->all(FLERR,
"Dump vtk compute does not calculate per-atom vector");
error->all(FLERR,"Dump vtk compute {} does not calculate per-atom vector",aname);
if (argi.get_dim() > 0 && modify->compute[n]->size_peratom_cols == 0)
error->all(FLERR,
"Dump vtk compute does not calculate per-atom array");
error->all(FLERR,"Dump vtk compute {} does not calculate per-atom array",aname);
if (argi.get_dim() > 0 &&
argi.get_index1() > modify->compute[n]->size_peratom_cols)
error->all(FLERR,"Dump vtk compute vector is accessed out-of-range");
error->all(FLERR,"Dump vtk compute {} vector is accessed out-of-range",aname);
field2index[ATTRIBUTES+i] = add_compute(argi.get_name());
field2index[ATTRIBUTES+i] = add_compute(aname);
name[ATTRIBUTES+i] = arg[iarg];
break;
@ -1797,19 +1847,19 @@ int DumpVTK::parse_fields(int narg, char **arg)
pack_choice[ATTRIBUTES+i] = &DumpVTK::pack_fix;
vtype[ATTRIBUTES+i] = Dump::DOUBLE;
n = modify->find_fix(argi.get_name());
if (n < 0) error->all(FLERR,"Could not find dump vtk fix ID");
n = modify->find_fix(aname);
if (n < 0) error->all(FLERR,"Could not find dump vtk fix ID: {}",aname);
if (modify->fix[n]->peratom_flag == 0)
error->all(FLERR,"Dump vtk fix does not compute per-atom info");
error->all(FLERR,"Dump vtk fix {} does not compute per-atom info",aname);
if (argi.get_dim() == 0 && modify->fix[n]->size_peratom_cols > 0)
error->all(FLERR,"Dump vtk fix does not compute per-atom vector");
error->all(FLERR,"Dump vtk fix {} does not compute per-atom vector",aname);
if (argi.get_dim() > 0 && modify->fix[n]->size_peratom_cols == 0)
error->all(FLERR,"Dump vtk fix does not compute per-atom array");
error->all(FLERR,"Dump vtk fix {} does not compute per-atom array",aname);
if (argi.get_dim() > 0 &&
argi.get_index1() > modify->fix[n]->size_peratom_cols)
error->all(FLERR,"Dump vtk fix vector is accessed out-of-range");
error->all(FLERR,"Dump vtk fix {} vector is accessed out-of-range",aname);
field2index[ATTRIBUTES+i] = add_fix(argi.get_name());
field2index[ATTRIBUTES+i] = add_fix(aname);
name[ATTRIBUTES+i] = arg[iarg];
break;
@ -1819,61 +1869,62 @@ int DumpVTK::parse_fields(int narg, char **arg)
pack_choice[ATTRIBUTES+i] = &DumpVTK::pack_variable;
vtype[ATTRIBUTES+i] = Dump::DOUBLE;
n = input->variable->find(argi.get_name());
if (n < 0) error->all(FLERR,"Could not find dump vtk variable name");
n = input->variable->find(aname);
if (n < 0) error->all(FLERR,"Could not find dump vtk variable name {}",aname);
if (input->variable->atomstyle(n) == 0)
error->all(FLERR,"Dump vtk variable is not atom-style variable");
error->all(FLERR,"Dump vtk variable {} is not atom-style variable",aname);
field2index[ATTRIBUTES+i] = add_variable(argi.get_name());
field2index[ATTRIBUTES+i] = add_variable(aname);
name[ATTRIBUTES+i] = arg[iarg];
break;
// custom per-atom integer vector = i_ID
case ArgInfo::INAME:
pack_choice[ATTRIBUTES+i] = &DumpVTK::pack_custom;
vtype[ATTRIBUTES+i] = Dump::INT;
tmp = -1;
n = atom->find_custom(argi.get_name(),tmp);
if (n < 0)
error->all(FLERR,"Could not find custom per-atom property ID");
if (tmp != 0)
error->all(FLERR,"Custom per-atom property ID is not integer");
field2index[ATTRIBUTES+i] = add_custom(argi.get_name(),0);
name[ATTRIBUTES+i] = arg[iarg];
break;
// custom per-atom floating point vector = d_ID
// custom per-atom floating point vector or array = d_ID d2_ID
case ArgInfo::DNAME:
pack_choice[ATTRIBUTES+i] = &DumpVTK::pack_custom;
vtype[ATTRIBUTES+i] = Dump::DOUBLE;
tmp = -1;
n = atom->find_custom(argi.get_name(),tmp);
n = atom->find_custom(aname,flag,cols);
if (n < 0)
error->all(FLERR,"Could not find custom per-atom property ID");
if (tmp != 1)
error->all(FLERR,"Custom per-atom property ID is not floating point");
field2index[ATTRIBUTES+i] = add_custom(argi.get_name(),1);
error->all(FLERR,"Could not find custom per-atom property ID: {}", aname);
if (argindex[ATTRIBUTES+i] == 0) {
if (!flag || cols)
error->all(FLERR,"Property double vector {} for dump vtk does not exist",aname);
} else {
if (!flag || !cols)
error->all(FLERR,"Property double array {} for dump vtk does not exist",aname);
if (argindex[ATTRIBUTES+i] > atom->dcols[n])
error->all(FLERR,"Dump vtk property array {} is accessed out-of-range",aname);
}
field2index[ATTRIBUTES+i] = add_custom(aname,1);
name[ATTRIBUTES+i] = arg[iarg];
break;
// NEWSTYLE
// custom per-atom integer array = i2_ID
// custom per-atom integer vector or array = i_ID or i2_ID
case ArgInfo::IARRAY:
return iarg;
case ArgInfo::INAME:
pack_choice[ATTRIBUTES+i] = &DumpVTK::pack_custom;
vtype[ATTRIBUTES+i] = Dump::INT;
// custom per-atom floating point array = d2_ID
n = atom->find_custom(aname,flag,cols);
case ArgInfo::DARRAY:
return iarg;
if (n < 0)
error->all(FLERR,"Could not find custom per-atom property ID: {}", aname);
if (argindex[ATTRIBUTES+i] == 0) {
if (flag || cols)
error->all(FLERR,"Property integer vector {} for dump vtk does not exist",aname);
} else {
if (flag || !cols)
error->all(FLERR,"Property integer array {} for dump vtk does not exist",aname);
if (argindex[ATTRIBUTES+i] > atom->icols[n])
error->all(FLERR,"Dump vtk property array {} is accessed out-of-range",aname);
}
field2index[ATTRIBUTES+i] = add_custom(aname,0);
name[ATTRIBUTES+i] = arg[iarg];
break;
// no match
default:
return iarg;
@ -1948,12 +1999,10 @@ int DumpVTK::add_compute(const char *id)
id_compute = (char **)
memory->srealloc(id_compute,(ncompute+1)*sizeof(char *),"dump:id_compute");
delete [] compute;
delete[] compute;
compute = new Compute*[ncompute+1];
int n = strlen(id) + 1;
id_compute[ncompute] = new char[n];
strcpy(id_compute[ncompute],id);
id_compute[ncompute] = utils::strdup(id);
ncompute++;
return ncompute-1;
}
@ -1973,12 +2022,10 @@ int DumpVTK::add_fix(const char *id)
id_fix = (char **)
memory->srealloc(id_fix,(nfix+1)*sizeof(char *),"dump:id_fix");
delete [] fix;
delete[] fix;
fix = new Fix*[nfix+1];
int n = strlen(id) + 1;
id_fix[nfix] = new char[n];
strcpy(id_fix[nfix],id);
id_fix[nfix] = utils::strdup(id);
nfix++;
return nfix-1;
}
@ -1999,22 +2046,20 @@ int DumpVTK::add_variable(const char *id)
id_variable = (char **)
memory->srealloc(id_variable,(nvariable+1)*sizeof(char *),
"dump:id_variable");
delete [] variable;
delete[] variable;
variable = new int[nvariable+1];
delete [] vbuf;
delete[] vbuf;
vbuf = new double*[nvariable+1];
for (int i = 0; i <= nvariable; i++) vbuf[i] = nullptr;
int n = strlen(id) + 1;
id_variable[nvariable] = new char[n];
strcpy(id_variable[nvariable],id);
id_variable[nvariable] = utils::strdup(id);
nvariable++;
return nvariable-1;
}
/* ----------------------------------------------------------------------
add custom atom property to list used by dump
return index of where this property is in list
return index of where this property is in Atom class custom lists
if already in list, do not add, just return index, else add to list
------------------------------------------------------------------------- */
@ -2022,21 +2067,17 @@ int DumpVTK::add_custom(const char *id, int flag)
{
int icustom;
for (icustom = 0; icustom < ncustom; icustom++)
if ((strcmp(id,id_custom[icustom]) == 0)
&& (flag == flag_custom[icustom])) break;
if (strcmp(id,id_custom[icustom]) == 0) break;
if (icustom < ncustom) return icustom;
id_custom = (char **)
memory->srealloc(id_custom,(ncustom+1)*sizeof(char *),"dump:id_custom");
flag_custom = (int *)
memory->srealloc(flag_custom,(ncustom+1)*sizeof(int),"dump:flag_custom");
int n = strlen(id) + 1;
id_custom[ncustom] = new char[n];
strcpy(id_custom[ncustom],id);
flag_custom[ncustom] = flag;
id_custom = (char **) memory->srealloc(id_custom,(ncustom+1)*sizeof(char *),"dump:id_custom");
custom = (int *) memory->srealloc(custom,(ncustom+1)*sizeof(int),"dump:custom");
custom_flag = (int *) memory->srealloc(custom_flag,(ncustom+1)*sizeof(int),"dump:custom_flag");
id_custom[ncustom] = utils::strdup(id);
custom_flag[ncustom] = flag;
ncustom++;
return ncustom-1;
}
@ -2050,21 +2091,17 @@ int DumpVTK::modify_param(int narg, char **arg)
else {
iregion = domain->find_region(arg[1]);
if (iregion == -1)
error->all(FLERR,"Dump_modify region ID does not exist");
delete [] idregion;
int n = strlen(arg[1]) + 1;
idregion = new char[n];
strcpy(idregion,arg[1]);
error->all(FLERR,"Dump_modify region ID {} does not exist",arg[1]);
delete[] idregion;
idregion = utils::strdup(arg[1]);
}
return 2;
}
if (strcmp(arg[0],"label") == 0) {
if (narg < 2) error->all(FLERR,"Illegal dump_modify command [label]");
delete [] label;
int n = strlen(arg[1]) + 1;
label = new char[n];
strcpy(label,arg[1]);
delete[] label;
label = utils::strdup(arg[1]);
return 2;
}
@ -2078,23 +2115,29 @@ int DumpVTK::modify_param(int narg, char **arg)
if (strcmp(arg[0],"element") == 0) {
if (narg < ntypes+1)
error->all(FLERR,"Dump modify: number of element names do not match atom types");
if (typenames) {
for (int i = 1; i <= ntypes; i++) delete [] typenames[i];
delete [] typenames;
typenames = nullptr;
}
error->all(FLERR,"Number of dump_modify element names does not match number of atom types");
for (int i = 1; i <= ntypes; i++) delete[] typenames[i];
delete[] typenames;
typenames = new char*[ntypes+1];
for (int itype = 1; itype <= ntypes; itype++) {
int n = strlen(arg[itype]) + 1;
typenames[itype] = new char[n];
strcpy(typenames[itype],arg[itype]);
typenames[itype] = utils::strdup(arg[itype]);
}
return ntypes+1;
}
if (strcmp(arg[0],"refresh") == 0) {
if (narg < 2) error->all(FLERR,"Illegal dump_modify command");
ArgInfo argi(arg[1],ArgInfo::COMPUTE);
if ((argi.get_type() != ArgInfo::COMPUTE) || (argi.get_dim() != 0))
error->all(FLERR,"Illegal dump_modify command");
if (refreshflag) error->all(FLERR,"Dump_modify can only have one refresh");
refreshflag = 1;
refresh = argi.copy_name();
return 2;
}
if (strcmp(arg[0],"thresh") == 0) {
if (narg < 2) error->all(FLERR,"Illegal dump_modify command");
if (strcmp(arg[1],"none") == 0) {
@ -2105,8 +2148,16 @@ int DumpVTK::modify_param(int narg, char **arg)
thresh_array = nullptr;
thresh_op = nullptr;
thresh_value = nullptr;
thresh_last = nullptr;
for (int i = 0; i < nthreshlast; i++) {
modify->delete_fix(thresh_fixID[i]);
delete[] thresh_fixID[i];
}
thresh_fix = nullptr;
thresh_fixID = nullptr;
thresh_first = nullptr;
}
nthresh = 0;
nthresh = nthreshlast = 0;
return 2;
}
@ -2117,6 +2168,7 @@ int DumpVTK::modify_param(int narg, char **arg)
memory->grow(thresh_array,nthresh+1,"dump:thresh_array");
memory->grow(thresh_op,(nthresh+1),"dump:thresh_op");
memory->grow(thresh_value,(nthresh+1),"dump:thresh_value");
memory->grow(thresh_last,(nthresh+1),"dump:thresh_last");
// set attribute type of threshold
// customize by adding to if statement
@ -2199,98 +2251,125 @@ int DumpVTK::modify_param(int narg, char **arg)
else if (strcmp(arg[1],"tqy") == 0) thresh_array[nthresh] = TQY;
else if (strcmp(arg[1],"tqz") == 0) thresh_array[nthresh] = TQZ;
// compute value = c_ID
// if no trailing [], then arg is set to 0, else arg is between []
// compute or fix or variable or custom vector/array
else if (strncmp(arg[1],"c_",2) == 0) {
thresh_array[nthresh] = COMPUTE;
int n = strlen(arg[1]);
char *suffix = new char[n];
strcpy(suffix,&arg[1][2]);
else {
int n,flag,cols;
ArgInfo argi(arg[1],ArgInfo::COMPUTE|ArgInfo::FIX|ArgInfo::VARIABLE
|ArgInfo::DNAME|ArgInfo::INAME);
argindex[ATTRIBUTES+nfield+nthresh] = argi.get_index1();
auto aname = argi.get_name();
char *ptr = strchr(suffix,'[');
if (ptr) {
if (suffix[strlen(suffix)-1] != ']')
error->all(FLERR,"Invalid attribute in dump modify command");
argindex[ATTRIBUTES+nfield+nthresh] = atoi(ptr+1);
*ptr = '\0';
} else argindex[ATTRIBUTES+nfield+nthresh] = 0;
switch (argi.get_type()) {
n = modify->find_compute(suffix);
if (n < 0) error->all(FLERR,"Could not find dump modify compute ID");
case ArgInfo::UNKNOWN:
error->all(FLERR,"Invalid attribute in dump modify command");
break;
if (modify->compute[n]->peratom_flag == 0)
error->all(FLERR,
"Dump modify compute ID does not compute per-atom info");
if (argindex[ATTRIBUTES+nfield+nthresh] == 0 &&
modify->compute[n]->size_peratom_cols > 0)
error->all(FLERR,
"Dump modify compute ID does not compute per-atom vector");
if (argindex[ATTRIBUTES+nfield+nthresh] > 0 &&
modify->compute[n]->size_peratom_cols == 0)
error->all(FLERR,
"Dump modify compute ID does not compute per-atom array");
if (argindex[ATTRIBUTES+nfield+nthresh] > 0 &&
argindex[ATTRIBUTES+nfield+nthresh] > modify->compute[n]->size_peratom_cols)
error->all(FLERR,"Dump modify compute ID vector is not large enough");
// compute value = c_ID
// if no trailing [], then arg is set to 0, else arg is between []
field2index[ATTRIBUTES+nfield+nthresh] = add_compute(suffix);
delete [] suffix;
case ArgInfo::COMPUTE:
thresh_array[nthresh] = COMPUTE;
n = modify->find_compute(aname);
if (n < 0) error->all(FLERR,"Could not find dump modify compute ID: {}",aname);
// fix value = f_ID
// if no trailing [], then arg is set to 0, else arg is between []
if (modify->compute[n]->peratom_flag == 0)
error->all(FLERR,"Dump modify compute ID {} does not compute per-atom info",aname);
if (argi.get_dim() == 0 && modify->compute[n]->size_peratom_cols > 0)
error->all(FLERR,"Dump modify compute ID {} does not compute per-atom vector",aname);
if (argi.get_index1() > 0 && modify->compute[n]->size_peratom_cols == 0)
error->all(FLERR,"Dump modify compute ID {} does not compute per-atom array",aname);
if (argi.get_index1() > 0 &&
argi.get_index1() > modify->compute[n]->size_peratom_cols)
error->all(FLERR,"Dump modify compute ID {} vector is not large enough",aname);
} else if (strncmp(arg[1],"f_",2) == 0) {
thresh_array[nthresh] = FIX;
int n = strlen(arg[1]);
char *suffix = new char[n];
strcpy(suffix,&arg[1][2]);
field2index[ATTRIBUTES+nfield+nthresh] = add_compute(aname);
break;
char *ptr = strchr(suffix,'[');
if (ptr) {
if (suffix[strlen(suffix)-1] != ']')
error->all(FLERR,"Invalid attribute in dump modify command");
argindex[ATTRIBUTES+nfield+nthresh] = atoi(ptr+1);
*ptr = '\0';
} else argindex[ATTRIBUTES+nfield+nthresh] = 0;
// fix value = f_ID
// if no trailing [], then arg is set to 0, else arg is between []
n = modify->find_fix(suffix);
if (n < 0) error->all(FLERR,"Could not find dump modify fix ID");
case ArgInfo::FIX:
thresh_array[nthresh] = FIX;
n = modify->find_fix(aname);
if (n < 0) error->all(FLERR,"Could not find dump modify fix ID: {}",aname);
if (modify->fix[n]->peratom_flag == 0)
error->all(FLERR,"Dump modify fix ID does not compute per-atom info");
if (argindex[ATTRIBUTES+nfield+nthresh] == 0 &&
modify->fix[n]->size_peratom_cols > 0)
error->all(FLERR,"Dump modify fix ID does not compute per-atom vector");
if (argindex[ATTRIBUTES+nfield+nthresh] > 0 &&
modify->fix[n]->size_peratom_cols == 0)
error->all(FLERR,"Dump modify fix ID does not compute per-atom array");
if (argindex[ATTRIBUTES+nfield+nthresh] > 0 &&
argindex[ATTRIBUTES+nfield+nthresh] > modify->fix[n]->size_peratom_cols)
error->all(FLERR,"Dump modify fix ID vector is not large enough");
if (modify->fix[n]->peratom_flag == 0)
error->all(FLERR,"Dump modify fix ID {} does not compute per-atom info",aname);
if (argi.get_dim() == 0 && modify->fix[n]->size_peratom_cols > 0)
error->all(FLERR,"Dump modify fix ID {} does not compute per-atom vector",aname);
if (argi.get_index1() > 0 && modify->fix[n]->size_peratom_cols == 0)
error->all(FLERR,"Dump modify fix ID {} does not compute per-atom array",aname);
if (argi.get_index1() > 0 && argi.get_index1() > modify->fix[n]->size_peratom_cols)
error->all(FLERR,"Dump modify fix ID {} vector is not large enough",aname);
field2index[ATTRIBUTES+nfield+nthresh] = add_fix(suffix);
delete [] suffix;
field2index[ATTRIBUTES+nfield+nthresh] = add_fix(aname);
break;
// variable value = v_ID
// variable value = v_ID
} else if (strncmp(arg[1],"v_",2) == 0) {
thresh_array[nthresh] = VARIABLE;
int n = strlen(arg[1]);
char *suffix = new char[n];
strcpy(suffix,&arg[1][2]);
case ArgInfo::VARIABLE:
thresh_array[nthresh] = VARIABLE;
n = input->variable->find(aname);
if (n < 0) error->all(FLERR,"Could not find dump modify variable name: {}",aname);
if (input->variable->atomstyle(n) == 0)
error->all(FLERR,"Dump modify variable {} is not atom-style variable",aname);
argindex[ATTRIBUTES+nfield+nthresh] = 0;
field2index[ATTRIBUTES+nfield+nthresh] = add_variable(aname);
break;
n = input->variable->find(suffix);
if (n < 0) error->all(FLERR,"Could not find dump modify variable name");
if (input->variable->atomstyle(n) == 0)
error->all(FLERR,"Dump modify variable is not atom-style variable");
// custom per atom floating point vector or array
field2index[ATTRIBUTES+nfield+nthresh] = add_variable(suffix);
delete [] suffix;
case ArgInfo::DNAME:
n = atom->find_custom(aname,flag,cols);
} else error->all(FLERR,"Invalid dump_modify threshold operator");
if (n < 0)
error->all(FLERR,"Could not find custom per-atom property ID: {}", aname);
if (argindex[ATTRIBUTES+nfield+nthresh] == 0) {
if (!flag || cols)
error->all(FLERR,"Property double vector for dump custom does not exist");
thresh_array[nthresh] = DVEC;
} else {
if (!flag || !cols)
error->all(FLERR,"Property double array for dump custom does not exist");
if (argindex[ATTRIBUTES+nfield+nthresh] > atom->dcols[n])
error->all(FLERR,"Dump custom property array is accessed out-of-range");
thresh_array[nthresh] = DARRAY;
}
field2index[ATTRIBUTES+nfield+nthresh] = add_custom(aname,thresh_array[nthresh]);
break;
// custom per atom integer vector or array
case ArgInfo::INAME:
n = atom->find_custom(aname,flag,cols);
if (n < 0)
error->all(FLERR,"Could not find custom per-atom property ID: {}", aname);
if (argindex[ATTRIBUTES+nfield+nthresh] == 0) {
if (flag || cols)
error->all(FLERR,"Property integer vector for dump custom does not exist");
thresh_array[nthresh] = IVEC;
} else {
if (flag || !cols)
error->all(FLERR,"Property integer array for dump custom does not exist");
if (argindex[ATTRIBUTES+nfield+nthresh] > atom->icols[n])
error->all(FLERR,"Dump custom property array is accessed out-of-range");
thresh_array[nthresh] = IARRAY;
}
field2index[ATTRIBUTES+nfield+nthresh] = add_custom(aname,thresh_array[nthresh]);
break;
// no match
default:
error->all(FLERR,"Invalid dump_modify thresh attribute: {}",aname);
break;
}
}
// set operation type of threshold
@ -2300,11 +2379,32 @@ int DumpVTK::modify_param(int narg, char **arg)
else if (strcmp(arg[2],">=") == 0) thresh_op[nthresh] = GE;
else if (strcmp(arg[2],"==") == 0) thresh_op[nthresh] = EQ;
else if (strcmp(arg[2],"!=") == 0) thresh_op[nthresh] = NEQ;
else error->all(FLERR,"Invalid dump_modify threshold operator");
else if (strcmp(arg[2],"|^") == 0) thresh_op[nthresh] = XOR;
else error->all(FLERR,"Invalid dump_modify thresh operator");
// set threshold value
// set threshold value as number or special LAST keyword
// create FixStore to hold LAST values, should work with restart
// id = dump-ID + nthreshlast + DUMP_STORE, fix group = dump group
thresh_value[nthresh] = utils::numeric(FLERR,arg[3],false,lmp);
if (strcmp(arg[3],"LAST") != 0) {
thresh_value[nthresh] = utils::numeric(FLERR,arg[3],false,lmp);
thresh_last[nthresh] = -1;
} else {
thresh_fix = (FixStore **)
memory->srealloc(thresh_fix,(nthreshlast+1)*sizeof(FixStore *),"dump:thresh_fix");
thresh_fixID = (char **)
memory->srealloc(thresh_fixID,(nthreshlast+1)*sizeof(char *),"dump:thresh_fixID");
memory->grow(thresh_first,(nthreshlast+1),"dump:thresh_first");
std::string threshid = fmt::format("{}{}_DUMP_STORE",id,nthreshlast);
thresh_fixID[nthreshlast] = utils::strdup(threshid);
threshid += fmt::format(" {} STORE peratom 1 1", group->names[igroup]);
thresh_fix[nthreshlast] = (FixStore *) modify->add_fix(threshid);
thresh_last[nthreshlast] = nthreshlast;
thresh_first[nthreshlast] = 1;
nthreshlast++;
}
nthresh++;
return 4;
@ -2389,25 +2489,35 @@ void DumpVTK::pack_variable(int n)
void DumpVTK::pack_custom(int n)
{
int index = field2index[n];
if (flag_custom[index] == 0) { // integer
int iwhich,tmp;
iwhich = atom->find_custom(id_custom[index],tmp);
int flag = custom_flag[field2index[current_pack_choice_key]];
int iwhich = custom[field2index[current_pack_choice_key]];
int index = argindex[current_pack_choice_key];
if (flag == IVEC) {
int *ivector = atom->ivector[iwhich];
for (int i = 0; i < nchoose; i++) {
buf[n] = ivector[clist[i]];
n += size_one;
}
} else if (flag_custom[index] == 1) { // double
int iwhich,tmp;
iwhich = atom->find_custom(id_custom[index],tmp);
} else if (flag == DVEC) {
double *dvector = atom->dvector[iwhich];
for (int i = 0; i < nchoose; i++) {
buf[n] = dvector[clist[i]];
n += size_one;
}
} else if (flag == IARRAY) {
index--;
int **iarray = atom->iarray[iwhich];
for (int i = 0; i < nchoose; i++) {
buf[n] = iarray[clist[i]][index];
n += size_one;
}
} else if (flag == DARRAY) {
index--;
double **darray = atom->darray[iwhich];
for (int i = 0; i < nchoose; i++) {
buf[n] = darray[clist[i]][index];
n += size_one;
}
}
}

View File

@ -198,6 +198,8 @@ void ComputeOrientOrderAtom::init()
error->all(FLERR,"Compute orientorder/atom cutoff is "
"longer than pairwise cutoff");
memory->destroy(qnm_r);
memory->destroy(qnm_i);
memory->create(qnm_r,nqlist,2*qmax+1,"orientorder/atom:qnm_r");
memory->create(qnm_i,nqlist,2*qmax+1,"orientorder/atom:qnm_i");
@ -669,6 +671,7 @@ void ComputeOrientOrderAtom::init_clebsch_gordan()
idxcg_count++;
}
idxcg_max = idxcg_count;
memory->destroy(cglist);
memory->create(cglist, idxcg_max, "computeorientorderatom:cglist");
idxcg_count = 0;

View File

@ -371,16 +371,6 @@ void Dump::write()
if (multiproc != nprocs) MPI_Allreduce(&nme,&nmax,1,MPI_INT,MPI_MAX,world);
else nmax = nme;
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
if (multiproc)
MPI_Allreduce(&bnme,&nheader,1,MPI_LMP_BIGINT,MPI_SUM,clustercomm);
if (filewriter && write_header_flag) write_header(nheader);
// insure buf is sized for packing and communicating
// use nmax to insure filewriter proc can receive info from others
// limit nmax*size_one to int since used as arg in MPI calls
@ -433,6 +423,19 @@ void Dump::write()
else pack(nullptr);
if (sort_flag) sort();
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
// must come after sort, which can change nme
bigint nheader = ntotal;
if (multiproc) {
bnme = nme;
MPI_Allreduce(&bnme,&nheader,1,MPI_LMP_BIGINT,MPI_SUM,clustercomm);
}
if (filewriter && write_header_flag) write_header(nheader);
// if buffering, convert doubles into strings
// insure sbuf is sized for communicating
// cannot buffer if output is to binary file

View File

@ -131,7 +131,7 @@ DumpCustom::DumpCustom(LAMMPS *lmp, int narg, char **arg) :
if (ioptional < nfield &&
strcmp(style,"image") != 0 && strcmp(style,"movie") != 0)
error->all(FLERR,"Invalid attribute in dump custom command");
error->all(FLERR,"Invalid attribute {} in dump {} command",earg[ioptional],style);
// noptional = # of optional args
// reset nfield to subtract off optional args
@ -238,9 +238,8 @@ DumpCustom::~DumpCustom()
for (int i = 0; i < ncustom; i++) delete[] id_custom[i];
memory->sfree(id_custom);
delete [] custom;
delete [] custom_flag;
memory->sfree(custom);
memory->sfree(custom_flag);
memory->destroy(choose);
memory->destroy(dchoose);
memory->destroy(clist);
@ -909,32 +908,27 @@ int DumpCustom::count()
} else if (thresh_array[ithresh] == Q) {
if (!atom->q_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = atom->q;
nstride = 1;
} else if (thresh_array[ithresh] == MUX) {
if (!atom->mu_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = &atom->mu[0][0];
nstride = 4;
} else if (thresh_array[ithresh] == MUY) {
if (!atom->mu_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = &atom->mu[0][1];
nstride = 4;
} else if (thresh_array[ithresh] == MUZ) {
if (!atom->mu_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = &atom->mu[0][2];
nstride = 4;
} else if (thresh_array[ithresh] == MU) {
if (!atom->mu_flag)
error->all(FLERR,
"Threshold for an atom property that isn't allocated");
error->all(FLERR,"Threshold for an atom property that isn't allocated");
ptr = &atom->mu[0][3];
nstride = 4;
@ -1521,7 +1515,7 @@ int DumpCustom::parse_fields(int narg, char **arg)
field2index[iarg] = add_variable(name);
break;
// custom per-atom floating point vector or array
// custom per-atom floating point vector or array = d_ID d2_ID
case ArgInfo::DNAME:
pack_choice[iarg] = &DumpCustom::pack_custom;
@ -1533,18 +1527,18 @@ int DumpCustom::parse_fields(int narg, char **arg)
error->all(FLERR,"Could not find custom per-atom property ID: {}", name);
if (argindex[iarg] == 0) {
if (!flag || cols)
error->all(FLERR,"Property double vector for dump custom does not exist");
error->all(FLERR,"Property double vector {} for dump custom does not exist",name);
} else {
if (!flag || !cols)
error->all(FLERR,"Property double array for dump custom does not exist");
error->all(FLERR,"Property double array {} for dump custom does not exist",name);
if (argindex[iarg] > atom->dcols[n])
error->all(FLERR,"Dump custom property array is accessed out-of-range");
error->all(FLERR,"Dump custom property array {} is accessed out-of-range",name);
}
field2index[iarg] = add_custom(name,1);
break;
// custom per-atom integer vector or array
// custom per-atom integer vector or array = i_ID or i2_ID
case ArgInfo::INAME:
pack_choice[iarg] = &DumpCustom::pack_custom;
@ -1556,15 +1550,12 @@ int DumpCustom::parse_fields(int narg, char **arg)
error->all(FLERR,"Could not find custom per-atom property ID: {}", name);
if (argindex[iarg] == 0) {
if (flag || cols)
error->all(FLERR,
"Property integer vector for dump custom does not exist");
error->all(FLERR,"Property integer vector {} for dump custom does not exist",name);
} else {
if (flag || !cols)
error->all(FLERR,
"Property integer array for dump custom does not exist");
error->all(FLERR,"Property integer array {} for dump custom does not exist",name);
if (argindex[iarg] > atom->icols[n])
error->all(FLERR,
"Dump custom property array is accessed out-of-range");
error->all(FLERR,"Dump custom property array {} is accessed out-of-range",name);
}
field2index[iarg] = add_custom(name,0);
@ -1574,6 +1565,7 @@ int DumpCustom::parse_fields(int narg, char **arg)
default:
return iarg;
break;
}
}
}
@ -1667,13 +1659,9 @@ int DumpCustom::add_custom(const char *id, int flag)
if (strcmp(id,id_custom[icustom]) == 0) break;
if (icustom < ncustom) return icustom;
id_custom = (char **)
memory->srealloc(id_custom,(ncustom+1)*sizeof(char *),"dump:id_custom");
delete [] custom;
custom = new int[ncustom+1];
delete [] custom_flag;
custom_flag = new int[ncustom+1];
id_custom = (char **) memory->srealloc(id_custom,(ncustom+1)*sizeof(char *),"dump:id_custom");
custom = (int *) memory->srealloc(custom,(ncustom+1)*sizeof(int),"dump:custom");
custom_flag = (int *) memory->srealloc(custom_flag,(ncustom+1)*sizeof(int),"dump:custom_flag");
id_custom[ncustom] = utils::strdup(id);
custom_flag[ncustom] = flag;
@ -1962,21 +1950,18 @@ int DumpCustom::modify_param(int narg, char **arg)
if (n < 0)
error->all(FLERR,"Could not find custom per-atom property ID: {}", name);
if (argindex[nfield+nthresh] == 0) {
if (flag || cols)
error->all(FLERR,
"Property double vector for dump custom does not exist");
if (!flag || cols)
error->all(FLERR,"Property double vector for dump custom does not exist");
thresh_array[nthresh] = DVEC;
} else {
if (flag || !cols)
error->all(FLERR,
"Property double array for dump custom does not exist");
if (!flag || !cols)
error->all(FLERR,"Property double array for dump custom does not exist");
if (argindex[nfield+nthresh] > atom->dcols[n])
error->all(FLERR,
"Dump custom property array is accessed out-of-range");
error->all(FLERR,"Dump custom property array is accessed out-of-range");
thresh_array[nthresh] = DARRAY;
}
field2index[nfield+nthresh] = add_custom(name,0);
field2index[nfield+nthresh] = add_custom(name,thresh_array[nthresh]);
break;
// custom per atom integer vector or array
@ -1988,20 +1973,17 @@ int DumpCustom::modify_param(int narg, char **arg)
error->all(FLERR,"Could not find custom per-atom property ID: {}", name);
if (argindex[nfield+nthresh] == 0) {
if (flag || cols)
error->all(FLERR,
"Property integer vector for dump custom does not exist");
error->all(FLERR,"Property integer vector for dump custom does not exist");
thresh_array[nthresh] = IVEC;
} else {
if (flag || !cols)
error->all(FLERR,
"Property integer array for dump custom does not exist");
error->all(FLERR,"Property integer array for dump custom does not exist");
if (argindex[nfield+nthresh] > atom->icols[n])
error->all(FLERR,
"Dump custom property array is accessed out-of-range");
error->all(FLERR,"Dump custom property array is accessed out-of-range");
thresh_array[nthresh] = IARRAY;
}
field2index[nfield+nthresh] = add_custom(name,0);
field2index[nfield+nthresh] = add_custom(name,thresh_array[nthresh]);
break;
// no match

View File

@ -195,7 +195,7 @@ void Error::one(const std::string &file, int line, const std::string &str)
MPI_Comm_rank(world,&me);
if (input && input->line) lastcmd = input->line;
std::string mesg = fmt::format("ERROR on proc {}: {} ({}:{})\n",
std::string mesg = fmt::format("ERROR on proc {}: {} ({}:{})\nLast command: {}\n",
me,str,truncpath(file),line,lastcmd);
utils::logmesg(lmp,mesg);

View File

@ -111,9 +111,9 @@ FixVector::FixVector(LAMMPS *lmp, int narg, char **arg) :
for (int i = 0; i < nvalues; i++) {
if (which[i] == ArgInfo::COMPUTE) {
Compute *compute = modify->compute[modify->find_compute(ids[i])];
if (argindex[0] == 0) value = compute->extscalar;
if (argindex[i] == 0) value = compute->extscalar;
else if (compute->extvector >= 0) value = compute->extvector;
else value = compute->extlist[argindex[0]-1];
else value = compute->extlist[argindex[i]-1];
} else if (which[i] == ArgInfo::FIX) {
Fix *fix = modify->fix[modify->find_fix(ids[i])];
if (argindex[i] == 0) value = fix->extvector;

View File

@ -299,6 +299,7 @@ void Info::command(int narg, char **arg)
if (has_png_support()) fputs("-DLAMMPS_PNG\n",out);
if (has_jpeg_support()) fputs("-DLAMMPS_JPEG\n",out);
if (has_ffmpeg_support()) fputs("-DLAMMPS_FFMPEG\n",out);
if (has_fft_single_support()) fputs("-DFFT_SINGLE\n",out);
if (has_exceptions()) fputs("-DLAMMPS_EXCEPTIONS\n",out);
#if defined(LAMMPS_BIGBIG)
@ -895,6 +896,8 @@ bool Info::is_available(const char *category, const char *name)
return has_jpeg_support();
} else if (strcmp(name,"ffmpeg") == 0) {
return has_ffmpeg_support();
} else if (strcmp(name,"fft_single") == 0) {
return has_fft_single_support();
} else if (strcmp(name,"exceptions") == 0) {
return has_exceptions();
}
@ -1143,6 +1146,14 @@ bool Info::has_ffmpeg_support() {
#endif
}
bool Info::has_fft_single_support() {
#ifdef FFT_SINGLE
return true;
#else
return false;
#endif
}
bool Info::has_exceptions() {
#ifdef LAMMPS_EXCEPTIONS
return true;
@ -1162,12 +1173,14 @@ bool Info::has_package(const std::string &package_name) {
#if defined(LMP_GPU)
extern bool lmp_gpu_config(const std::string &, const std::string &);
extern bool lmp_has_gpu_device();
extern bool lmp_has_compatible_gpu_device();
extern std::string lmp_gpu_device_info();
// we will only report compatible GPUs, i.e. when a GPU device is
// available *and* supports the required floating point precision
bool Info::has_gpu_device()
{
return lmp_has_gpu_device();
return lmp_has_compatible_gpu_device();
}
std::string Info::get_gpu_device_info()

View File

@ -42,6 +42,7 @@ class Info : public Command {
static bool has_png_support();
static bool has_jpeg_support();
static bool has_ffmpeg_support();
static bool has_fft_single_support();
static bool has_exceptions();
static bool has_package(const std::string &);
static bool has_accelerator_feature(const std::string &, const std::string &,

View File

@ -51,6 +51,10 @@
#include "update.h"
#include "version.h"
#if defined(LMP_PLUGIN)
#include "plugin.h"
#endif
#include <cctype>
#include <cmath>
#include <cstring>
@ -65,6 +69,12 @@
#include "lmpinstalledpkgs.h"
#include "lmpgitversion.h"
#if defined(LAMMPS_UPDATE)
#define UPDATE_STRING " - " LAMMPS_UPDATE
#else
#define UPDATE_STRING ""
#endif
static void print_style(FILE *fp, const char *str, int &pos);
struct LAMMPS_NS::package_styles_lists {
@ -511,7 +521,7 @@ LAMMPS::LAMMPS(int narg, char **arg, MPI_Comm communicator) :
}
if ((universe->me == 0) && !helpflag)
utils::logmesg(this,fmt::format("LAMMPS ({})\n",version));
utils::logmesg(this,fmt::format("LAMMPS ({}{})\n",version,UPDATE_STRING));
// universe is one or more worlds, as setup by partition switch
// split universe communicator into separate world communicators
@ -905,6 +915,10 @@ void LAMMPS::init()
void LAMMPS::destroy()
{
// must wipe out all plugins first, if configured
#if defined(LMP_PLUGIN)
plugin_clear(this);
#endif
delete update;
update = nullptr;
@ -1147,10 +1161,10 @@ void _noopt LAMMPS::help()
if (has_git_info) {
fprintf(fp,"\nLarge-scale Atomic/Molecular Massively Parallel Simulator - "
LAMMPS_VERSION "\nGit info (%s / %s)\n\n",git_branch, git_descriptor);
LAMMPS_VERSION UPDATE_STRING "\nGit info (%s / %s)\n\n",git_branch, git_descriptor);
} else {
fprintf(fp,"\nLarge-scale Atomic/Molecular Massively Parallel Simulator - "
LAMMPS_VERSION "\n\n");
LAMMPS_VERSION UPDATE_STRING "\n\n");
}
fprintf(fp,
"Usage example: %s -var t 300 -echo screen -in in.alloy\n\n"
@ -1350,7 +1364,7 @@ void LAMMPS::print_config(FILE *fp)
fmt::print(fp,"Accelerator configuration:\n\n{}\n",
Info::get_accelerator_info());
#if defined(LMP_GPU)
fmt::print(fp,"GPU present: {}\n\n",Info::has_gpu_device() ? "yes" : "no");
fmt::print(fp,"Compatible GPU present: {}\n\n",Info::has_gpu_device() ? "yes" : "no");
#endif
fputs("Active compile time flags:\n\n",fp);
@ -1358,6 +1372,7 @@ void LAMMPS::print_config(FILE *fp)
if (Info::has_png_support()) fputs("-DLAMMPS_PNG\n",fp);
if (Info::has_jpeg_support()) fputs("-DLAMMPS_JPEG\n",fp);
if (Info::has_ffmpeg_support()) fputs("-DLAMMPS_FFMPEG\n",fp);
if (Info::has_fft_single_support()) fputs("-DFFT_SINGLE\n",fp);
if (Info::has_exceptions()) fputs("-DLAMMPS_EXCEPTIONS\n",fp);
#if defined(LAMMPS_BIGBIG)
fputs("-DLAMMPS_BIGBIG\n",fp);

View File

@ -676,7 +676,10 @@ int utils::expand_args(const char *file, int line, int narg, char **arg, int mod
}
for (int index = nlo; index <= nhi; index++) {
earg[newarg] = utils::strdup(fmt::format("{}2_{}[{}]{}", word[0], id, index, tail));
if (word[1] == '2')
earg[newarg] = utils::strdup(fmt::format("{}2_{}[{}]{}", word[0], id, index, tail));
else
earg[newarg] = utils::strdup(fmt::format("{}_{}[{}]{}", word[0], id, index, tail));
newarg++;
}
} else {

View File

@ -1 +1,2 @@
#define LAMMPS_VERSION "29 Sep 2021"
#define LAMMPS_UPDATE "Update 2"

View File

@ -269,7 +269,7 @@ void WriteRestart::write(const std::string &file)
double *buf;
memory->create(buf,max_size,"write_restart:buf");
memset(buf,0,max_size*sizeof(buf));
memset(buf,0,max_size*sizeof(double));
// all procs write file layout info which may include per-proc sizes

View File

@ -8,12 +8,12 @@ fi
export GIT_CONFIG_COUNT=1
export GIT_CONFIG_KEY_0="url.$GITHUB_PROXY_DIR/.insteadOf"
export GIT_CONFIG_VALUE_0=git://github.com/
export GIT_CONFIG_VALUE_0=https://github.com/
echo "Redirecting git://github.com urls to local cache..."
echo "Redirecting https://github.com urls to local cache..."
function deactivate_git_cache {
echo "Removing git://github.com redirect..."
echo "Removing https://github.com redirect..."
unset GIT_CONFIG_COUNT
unset GIT_CONFIG_KEY_0
unset GIT_CONFIG_VALUE_0

View File

@ -63,7 +63,7 @@ echo "or"
echo
echo "-D LAMMPS_DOWNLOADS_URL=${HTTP_CACHE_URL} -C \"${LAMMPS_HTTP_CACHE_CONFIG}\""
echo
echo "pip installations and git clones (from git://) are automatically redirected"
echo "pip installations and git clones (from https://) are automatically redirected"
echo
echo Use 'deactivate_caches' to revert changes
echo

View File

@ -21,7 +21,7 @@ built CentOS 7.x singularity container.
```
cd some/work/directory
git clone --depth 500 git://github.com/lammps/lammps.git lammps
git clone --depth 500 https://github.com/lammps/lammps.git lammps
mkdir build-centos7
cd build-centos7
sudo singularity build centos7.sif ../tools/singularity/centos7.def

View File

@ -8,7 +8,7 @@ add_test(NAME RunLammps
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
set_tests_properties(RunLammps PROPERTIES
ENVIRONMENT "TSAN_OPTIONS=ignore_noninstrumented_modules=1;HWLOC_HIDE_ERRORS=1"
PASS_REGULAR_EXPRESSION "^LAMMPS \\([0-9]+ [A-Za-z]+ 2[0-9][0-9][0-9]\\)")
PASS_REGULAR_EXPRESSION "LAMMPS \\([0-9]+ [A-Za-z]+ 2[0-9][0-9][0-9]( - Update [0-9]+)?\\)")
# check if the compiled executable will print the help message
add_test(NAME HelpMessage
@ -31,6 +31,10 @@ if(BUILD_MPI)
set(MPI_TEST_NUM_PROCS 1)
set(MPI_TEST_WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
cmake_parse_arguments(MPI_TEST "" "NAME;NUM_PROCS;WORKING_DIRECTORY" "COMMAND" ${ARGN})
# Do not add test when oversubscribing
if(MPI_TEST_NUMPROCS GREATER MPIEXEC_MAX_NUMPROCS)
return()
endif()
list(GET MPI_TEST_COMMAND 0 EXECUTABLE)
list(REMOVE_AT MPI_TEST_COMMAND 0)
set(ARGS ${MPI_TEST_COMMAND})

View File

@ -246,7 +246,7 @@ TEST_F(GroupTest, Molecular)
ASSERT_DOUBLE_EQ(group->mass(group->find("half")), 40);
ASSERT_DOUBLE_EQ(group->mass(group->find("half"), domain->find_region("top")), 10);
ASSERT_NEAR(group->charge(group->find("top")), 0, 1.0e-14);
ASSERT_DOUBLE_EQ(group->charge(group->find("right"), domain->find_region("top")), 0);
ASSERT_NEAR(group->charge(group->find("right"), domain->find_region("top")), 0, 1.0e-14);
TEST_FAILURE(".*ERROR: Illegal group command.*", command("group three include xxx"););
}

View File

@ -858,6 +858,13 @@ TEST(PairStyle, gpu)
if (!Info::has_gpu_device()) GTEST_SKIP();
if (test_config.skip_tests.count(test_info_->name())) GTEST_SKIP();
// when testing PPPM styles with GPUs and GPU support is compiled with single precision
// we also must have single precision FFTs; otherwise skip since the test would abort
if (utils::strmatch(test_config.basename, ".*pppm.*") &&
(Info::has_accelerator_feature("GPU", "precision", "single")) &&
(!Info::has_fft_single_support()))
GTEST_SKIP();
const char *args_neigh[] = {"PairStyle", "-log", "none", "-echo",
"screen", "-nocite", "-sf", "gpu"};
const char *args_noneigh[] = {"PairStyle", "-log", "none", "-echo", "screen", "-nocite", "-sf",

View File

@ -14,7 +14,7 @@ post_commands: ! ""
input_file: in.bilayer
pair_style: hybrid/overlay lebedeva/z 16.0
pair_coeff: ! |
* * lebedeva/z CC.Lebedeva C C C
* * lebedeva/z CC.Lebedeva C1 C1 C1
extract: ! ""
natoms: 48
init_vdwl: 2360.887727742073

View File

@ -39,6 +39,21 @@ public:
END_HIDE_OUTPUT();
}
std::string dump_filename(std::string ident)
{
return fmt::format("dump_{}_{}.melt", dump_style, ident);
}
std::string text_dump_filename(std::string ident)
{
return fmt::format("dump_{}_text_{}.melt", dump_style, ident);
}
std::string binary_dump_filename(std::string ident)
{
return fmt::format("dump_{}_binary_{}.melt.bin", dump_style, ident);
}
void generate_dump(std::string dump_file, std::string dump_modify_options, int ntimesteps)
{
BEGIN_HIDE_OUTPUT();
@ -87,7 +102,7 @@ public:
TEST_F(DumpAtomTest, run0)
{
auto dump_file = "dump_run0.melt";
auto dump_file = dump_filename("run0");
generate_dump(dump_file, "scale yes image no", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -103,7 +118,7 @@ TEST_F(DumpAtomTest, run0)
TEST_F(DumpAtomTest, format_line_run0)
{
auto dump_file = "dump_format_line_run0.melt";
auto dump_file = dump_filename("format_line_run0");
generate_dump(dump_file, "format line \"%d %d %20.15g %g %g\" scale yes image no", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -119,7 +134,7 @@ TEST_F(DumpAtomTest, format_line_run0)
TEST_F(DumpAtomTest, no_scale_run0)
{
auto dump_file = "dump_no_scale_run0.melt";
auto dump_file = dump_filename("no_scale_run0");
generate_dump(dump_file, "scale no", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -134,7 +149,7 @@ TEST_F(DumpAtomTest, no_scale_run0)
TEST_F(DumpAtomTest, no_buffer_no_scale_run0)
{
auto dump_file = "dump_no_buffer_no_scale_run0.melt";
auto dump_file = dump_filename("no_buffer_no_scale_run0");
generate_dump(dump_file, "buffer no scale no", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -149,7 +164,7 @@ TEST_F(DumpAtomTest, no_buffer_no_scale_run0)
TEST_F(DumpAtomTest, no_buffer_with_scale_run0)
{
auto dump_file = "dump_no_buffer_with_scale_run0.melt";
auto dump_file = dump_filename("no_buffer_with_scale_run0");
generate_dump(dump_file, "buffer no scale yes", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -164,7 +179,7 @@ TEST_F(DumpAtomTest, no_buffer_with_scale_run0)
TEST_F(DumpAtomTest, with_image_run0)
{
auto dump_file = "dump_with_image_run0.melt";
auto dump_file = dump_filename("with_image_run0");
generate_dump(dump_file, "scale no image yes", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -177,7 +192,7 @@ TEST_F(DumpAtomTest, with_image_run0)
TEST_F(DumpAtomTest, with_units_run0)
{
auto dump_file = "dump_with_units_run0.melt";
auto dump_file = dump_filename("with_units_run0");
generate_dump(dump_file, "scale no units yes", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -192,7 +207,7 @@ TEST_F(DumpAtomTest, with_units_run0)
TEST_F(DumpAtomTest, with_time_run0)
{
auto dump_file = "dump_with_time_run0.melt";
auto dump_file = dump_filename("with_time_run0");
generate_dump(dump_file, "scale no time yes", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -206,7 +221,7 @@ TEST_F(DumpAtomTest, with_time_run0)
TEST_F(DumpAtomTest, with_units_run1)
{
auto dump_file = "dump_with_units_run1.melt";
auto dump_file = dump_filename("with_units_run1");
generate_dump(dump_file, "scale no units yes", 1);
ASSERT_FILE_EXISTS(dump_file);
@ -221,7 +236,7 @@ TEST_F(DumpAtomTest, with_units_run1)
TEST_F(DumpAtomTest, no_buffer_with_scale_and_image_run0)
{
auto dump_file = "dump_no_buffer_with_scale_and_image_run0.melt";
auto dump_file = dump_filename("no_buffer_with_scale_and_image_run0");
generate_dump(dump_file, "buffer no scale yes image yes", 0);
ASSERT_FILE_EXISTS(dump_file);
@ -235,7 +250,7 @@ TEST_F(DumpAtomTest, no_buffer_with_scale_and_image_run0)
}
TEST_F(DumpAtomTest, triclinic_run0)
{
auto dump_file = "dump_triclinic_run0.melt";
auto dump_file = dump_filename("triclinic_run0");
enable_triclinic();
generate_dump(dump_file, "", 0);
@ -251,7 +266,7 @@ TEST_F(DumpAtomTest, triclinic_run0)
TEST_F(DumpAtomTest, triclinic_with_units_run0)
{
auto dump_file = "dump_triclinic_with_units_run0.melt";
auto dump_file = dump_filename("triclinic_with_units_run0");
enable_triclinic();
generate_dump(dump_file, "units yes", 0);
@ -269,7 +284,7 @@ TEST_F(DumpAtomTest, triclinic_with_units_run0)
TEST_F(DumpAtomTest, triclinic_with_time_run0)
{
auto dump_file = "dump_triclinic_with_time_run0.melt";
auto dump_file = dump_filename("triclinic_with_time_run0");
enable_triclinic();
generate_dump(dump_file, "time yes", 0);
@ -286,7 +301,7 @@ TEST_F(DumpAtomTest, triclinic_with_time_run0)
TEST_F(DumpAtomTest, triclinic_with_image_run0)
{
auto dump_file = "dump_triclinic_with_image_run0.melt";
auto dump_file = dump_filename("triclinic_with_image_run0");
enable_triclinic();
generate_dump(dump_file, "image yes", 0);
@ -308,8 +323,8 @@ TEST_F(DumpAtomTest, binary_run0)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_run0.melt";
auto binary_file = "dump_binary_run0.melt.bin";
auto text_file = text_dump_filename("run0");
auto binary_file = binary_dump_filename("run0");
generate_text_and_binary_dump(text_file, binary_file, "", 0);
@ -329,8 +344,8 @@ TEST_F(DumpAtomTest, binary_with_units_run0)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_with_units_run0.melt";
auto binary_file = "dump_binary_with_units_run0.melt.bin";
auto text_file = text_dump_filename("with_units_run0");
auto binary_file = binary_dump_filename("with_units_run0");
generate_text_and_binary_dump(text_file, binary_file, "scale no units yes", 0);
@ -350,8 +365,8 @@ TEST_F(DumpAtomTest, binary_with_time_run0)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_with_time_run0.melt";
auto binary_file = "dump_binary_with_time_run0.melt.bin";
auto text_file = text_dump_filename("with_time_run0");
auto binary_file = binary_dump_filename("with_time_run0");
generate_text_and_binary_dump(text_file, binary_file, "scale no time yes", 0);
@ -371,8 +386,8 @@ TEST_F(DumpAtomTest, binary_triclinic_run0)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_tri_run0.melt";
auto binary_file = "dump_binary_tri_run0.melt.bin";
auto text_file = text_dump_filename("tri_run0");
auto binary_file = binary_dump_filename("tri_run0");
enable_triclinic();
generate_text_and_binary_dump(text_file, binary_file, "", 0);
@ -393,8 +408,8 @@ TEST_F(DumpAtomTest, binary_triclinic_with_units_run0)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_tri_with_units_run0.melt";
auto binary_file = "dump_binary_tri_with_units_run0.melt.bin";
auto text_file = text_dump_filename("tri_with_units_run0");
auto binary_file = binary_dump_filename("tri_with_units_run0");
enable_triclinic();
generate_text_and_binary_dump(text_file, binary_file, "scale no units yes", 0);
@ -415,8 +430,8 @@ TEST_F(DumpAtomTest, binary_triclinic_with_time_run0)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_tri_with_time_run0.melt";
auto binary_file = "dump_binary_tri_with_time_run0.melt.bin";
auto text_file = text_dump_filename("tri_with_time_run0");
auto binary_file = binary_dump_filename("tri_with_time_run0");
enable_triclinic();
generate_text_and_binary_dump(text_file, binary_file, "scale no time yes", 0);
@ -437,8 +452,8 @@ TEST_F(DumpAtomTest, binary_triclinic_with_image_run0)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_tri_with_image_run0.melt";
auto binary_file = "dump_binary_tri_with_image_run0.melt.bin";
auto text_file = text_dump_filename("tri_with_image_run0");
auto binary_file = binary_dump_filename("tri_with_image_run0");
enable_triclinic();
generate_text_and_binary_dump(text_file, binary_file, "image yes", 0);
@ -457,7 +472,7 @@ TEST_F(DumpAtomTest, binary_triclinic_with_image_run0)
TEST_F(DumpAtomTest, run1plus1)
{
auto dump_file = "dump_run1plus1.melt";
auto dump_file = dump_filename("run1plus1");
generate_dump(dump_file, "", 1);
ASSERT_FILE_EXISTS(dump_file);
@ -470,7 +485,7 @@ TEST_F(DumpAtomTest, run1plus1)
TEST_F(DumpAtomTest, run2)
{
auto dump_file = "dump_run2.melt";
auto dump_file = dump_filename("run2");
generate_dump(dump_file, "", 2);
ASSERT_FILE_EXISTS(dump_file);
@ -480,7 +495,7 @@ TEST_F(DumpAtomTest, run2)
TEST_F(DumpAtomTest, rerun)
{
auto dump_file = "dump_rerun.melt";
auto dump_file = dump_filename("rerun");
HIDE_OUTPUT([&] {
command("fix 1 all nve");
});
@ -508,38 +523,43 @@ TEST_F(DumpAtomTest, rerun)
TEST_F(DumpAtomTest, multi_file_run1)
{
auto dump_file = "dump_run1_*.melt";
auto dump_file = dump_filename("run1_*");
generate_dump(dump_file, "", 1);
ASSERT_FILE_EXISTS("dump_run1_0.melt");
ASSERT_FILE_EXISTS("dump_run1_1.melt");
ASSERT_EQ(count_lines("dump_run1_0.melt"), 41);
ASSERT_EQ(count_lines("dump_run1_1.melt"), 41);
delete_file("dump_run1_0.melt");
delete_file("dump_run1_1.melt");
auto run1_0 = dump_filename("run1_0");
auto run1_1 = dump_filename("run1_1");
ASSERT_FILE_EXISTS(run1_0);
ASSERT_FILE_EXISTS(run1_1);
ASSERT_EQ(count_lines(run1_0), 41);
ASSERT_EQ(count_lines(run1_1), 41);
delete_file(run1_0);
delete_file(run1_1);
}
TEST_F(DumpAtomTest, per_processor_file_run1)
{
auto dump_file = "dump_run1_p%.melt";
auto dump_file = dump_filename("run1_p%");
generate_dump(dump_file, "", 1);
ASSERT_FILE_EXISTS("dump_run1_p0.melt");
ASSERT_EQ(count_lines("dump_run1_p0.melt"), 82);
delete_file("dump_run1_p0.melt");
auto run1_p0 = dump_filename("run1_p0");
ASSERT_FILE_EXISTS(run1_p0);
ASSERT_EQ(count_lines(run1_p0), 82);
delete_file(run1_p0);
}
TEST_F(DumpAtomTest, per_processor_multi_file_run1)
{
auto dump_file = "dump_run1_p%_*.melt";
auto dump_file = dump_filename("run1_p%_*");
generate_dump(dump_file, "", 1);
ASSERT_FILE_EXISTS("dump_run1_p0_0.melt");
ASSERT_FILE_EXISTS("dump_run1_p0_1.melt");
ASSERT_EQ(count_lines("dump_run1_p0_0.melt"), 41);
ASSERT_EQ(count_lines("dump_run1_p0_1.melt"), 41);
delete_file("dump_run1_p0_0.melt");
delete_file("dump_run1_p0_1.melt");
auto run1_p0_0 = dump_filename("run1_p0_0");
auto run1_p0_1 = dump_filename("run1_p0_1");
ASSERT_FILE_EXISTS(run1_p0_0);
ASSERT_FILE_EXISTS(run1_p0_1);
ASSERT_EQ(count_lines(run1_p0_0), 41);
ASSERT_EQ(count_lines(run1_p0_1), 41);
delete_file(run1_p0_0);
delete_file(run1_p0_1);
}
TEST_F(DumpAtomTest, dump_modify_scale_invalid)
@ -571,16 +591,17 @@ TEST_F(DumpAtomTest, dump_modify_invalid)
TEST_F(DumpAtomTest, write_dump)
{
auto reference = "dump_ref_run0.melt";
auto dump_file = "write_dump_atom_run0.melt";
auto reference = dump_filename("run0_ref");
auto dump_file = fmt::format("write_{}", dump_filename("run*"));
BEGIN_HIDE_OUTPUT();
command(fmt::format("dump id all atom 1 {}", reference));
command("dump_modify id scale no units yes");
command("run 0");
command("write_dump all atom write_dump_atom_run*.melt modify scale no units yes");
command(fmt::format("write_dump all atom {} modify scale no units yes", dump_file));
END_HIDE_OUTPUT();
dump_file = fmt::format("write_{}", dump_filename("run0"));
ASSERT_FILE_EXISTS(reference);
ASSERT_FILE_EXISTS(dump_file);
@ -593,16 +614,17 @@ TEST_F(DumpAtomTest, binary_write_dump)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto reference = "dump_run0.melt.bin";
auto dump_file = "write_dump_atom_run0_p0.melt.bin";
auto reference = binary_dump_filename("write_run0_ref");
auto dump_file = fmt::format("write_{}", binary_dump_filename("write_dump_atom_run*_p%"));
BEGIN_HIDE_OUTPUT();
command(fmt::format("dump id all atom 1 {}", reference));
command("dump_modify id scale no units yes");
command("run 0");
command("write_dump all atom write_dump_atom_run*_p%.melt.bin modify scale no units yes");
command(fmt::format("write_dump all atom {} modify scale no units yes", dump_file));
END_HIDE_OUTPUT();
dump_file = fmt::format("write_{}", binary_dump_filename("write_dump_atom_run0_p0"));
ASSERT_FILE_EXISTS(reference);
ASSERT_FILE_EXISTS(dump_file);

View File

@ -37,6 +37,21 @@ public:
END_HIDE_OUTPUT();
}
std::string dump_filename(std::string ident)
{
return fmt::format("dump_{}_{}.melt", dump_style, ident);
}
std::string text_dump_filename(std::string ident)
{
return fmt::format("dump_{}_text_{}.melt", dump_style, ident);
}
std::string binary_dump_filename(std::string ident)
{
return fmt::format("dump_{}_binary_{}.melt.bin", dump_style, ident);
}
void generate_dump(std::string dump_file, std::string fields, std::string dump_modify_options,
int ntimesteps)
{
@ -87,7 +102,7 @@ public:
TEST_F(DumpCustomTest, run1)
{
auto dump_file = "dump_custom_run1.melt";
auto dump_file = dump_filename("run1");
auto fields =
"id type proc procp1 mass x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz";
@ -105,7 +120,7 @@ TEST_F(DumpCustomTest, run1)
TEST_F(DumpCustomTest, thresh_run0)
{
auto dump_file = "dump_custom_thresh_run0.melt";
auto dump_file = dump_filename("thresh_run0");
auto fields = "id type x y z";
generate_dump(dump_file, fields, "units yes thresh x < 1 thresh y < 1 thresh z < 1", 0);
@ -126,7 +141,7 @@ TEST_F(DumpCustomTest, compute_run0)
command("compute comp all property/atom x y z");
END_HIDE_OUTPUT();
auto dump_file = "dump_custom_compute_run0.melt";
auto dump_file = dump_filename("compute_run0");
auto fields = "id type x y z c_comp[1] c_comp[2] c_comp[3]";
generate_dump(dump_file, fields, "units yes", 0);
@ -149,7 +164,7 @@ TEST_F(DumpCustomTest, fix_run0)
command("fix numdiff all numdiff 1 0.0001");
END_HIDE_OUTPUT();
auto dump_file = "dump_custom_compute_run0.melt";
auto dump_file = dump_filename("fix_run0");
auto fields = "id x y z f_numdiff[1] f_numdiff[2] f_numdiff[3]";
generate_dump(dump_file, fields, "units yes", 0);
@ -171,7 +186,7 @@ TEST_F(DumpCustomTest, custom_run0)
command("compute 1 all property/atom i_flag1 d_flag2");
END_HIDE_OUTPUT();
auto dump_file = "dump_custom_custom_run0.melt";
auto dump_file = dump_filename("custom_run0");
auto fields = "id x y z i_flag1 d_flag2";
generate_dump(dump_file, fields, "units yes", 0);
@ -190,8 +205,8 @@ TEST_F(DumpCustomTest, binary_run1)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_custom_text_run1.melt";
auto binary_file = "dump_custom_binary_run1.melt.bin";
auto text_file = text_dump_filename("run1");
auto binary_file = binary_dump_filename("run1");
auto fields = "id type proc x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz";
generate_text_and_binary_dump(text_file, binary_file, fields, "units yes", 1);
@ -210,7 +225,7 @@ TEST_F(DumpCustomTest, binary_run1)
TEST_F(DumpCustomTest, triclinic_run1)
{
auto dump_file = "dump_custom_tri_run1.melt";
auto dump_file = dump_filename("tri_run1");
auto fields = "id type proc x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz";
enable_triclinic();
@ -231,8 +246,8 @@ TEST_F(DumpCustomTest, binary_triclinic_run1)
{
if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_custom_tri_text_run1.melt";
auto binary_file = "dump_custom_tri_binary_run1.melt.bin";
auto text_file = text_dump_filename("tri_run1");
auto binary_file = binary_dump_filename("tri_run1");
auto fields = "id type proc x y z xs ys zs xsu ysu zsu vx vy vz fx fy fz";
enable_triclinic();
@ -258,7 +273,7 @@ TEST_F(DumpCustomTest, with_variable_run1)
command("variable p atom (c_1%10)+1");
END_HIDE_OUTPUT();
auto dump_file = "dump_custom_with_variable_run1.melt";
auto dump_file = dump_filename("with_variable_run1");
auto fields = "id type x y z v_p";
generate_dump(dump_file, fields, "units yes", 1);
@ -275,7 +290,7 @@ TEST_F(DumpCustomTest, with_variable_run1)
TEST_F(DumpCustomTest, run1plus1)
{
auto dump_file = "dump_custom_run1plus1.melt";
auto dump_file = dump_filename("run1plus1");
auto fields = "id type x y z";
generate_dump(dump_file, fields, "units yes", 1);
@ -292,7 +307,7 @@ TEST_F(DumpCustomTest, run1plus1)
TEST_F(DumpCustomTest, run2)
{
auto dump_file = "dump_custom_run2.melt";
auto dump_file = dump_filename("run2");
auto fields = "id type x y z";
generate_dump(dump_file, fields, "", 2);
@ -303,7 +318,7 @@ TEST_F(DumpCustomTest, run2)
TEST_F(DumpCustomTest, rerun)
{
auto dump_file = "dump_rerun.melt";
auto dump_file = dump_filename("rerun");
auto fields = "id type xs ys zs";
HIDE_OUTPUT([&] {

View File

@ -166,9 +166,9 @@ class PythonCapabilities(unittest.TestCase):
self.assertIn('single',settings['GPU']['precision'])
if self.cmake_cache['PKG_KOKKOS']:
if self.cmake_cache['Kokkos_ENABLE_OPENMP']:
if 'Kokkos_ENABLE_OPENMP' in self.cmake_cache and self.cmake_cache['Kokkos_ENABLE_OPENMP']:
self.assertIn('openmp',settings['KOKKOS']['api'])
if self.cmake_cache['Kokkos_ENABLE_SERIAL']:
if 'Kokkos_ENABLE_SERIAL' in self.cmake_cache and self.cmake_cache['Kokkos_ENABLE_SERIAL']:
self.assertIn('serial',settings['KOKKOS']['api'])
self.assertIn('double',settings['KOKKOS']['precision'])