Merge branch 'master' of github.com:lammps/lammps into acks2_release

This commit is contained in:
Stan Gerald Moore
2021-10-13 13:37:04 -06:00
36 changed files with 354 additions and 390 deletions

2
.github/CODEOWNERS vendored
View File

@ -83,7 +83,7 @@ src/library.* @sjplimp
src/main.cpp @sjplimp src/main.cpp @sjplimp
src/min_*.* @sjplimp src/min_*.* @sjplimp
src/memory.* @sjplimp src/memory.* @sjplimp
src/modify.* @sjplimp src/modify.* @sjplimp @stanmoore1
src/molecule.* @sjplimp src/molecule.* @sjplimp
src/my_page.h @sjplimp src/my_page.h @sjplimp
src/my_pool_chunk.h @sjplimp src/my_pool_chunk.h @sjplimp

View File

@ -3,7 +3,7 @@ name: "CodeQL Code Analysis"
on: on:
push: push:
branches: [master] branches: [develop]
jobs: jobs:
analyze: analyze:

View File

@ -3,7 +3,7 @@ name: "Native Windows Compilation"
on: on:
push: push:
branches: [master] branches: [develop]
jobs: jobs:
build: build:

View File

@ -3,7 +3,7 @@ name: "Unittest for MacOS"
on: on:
push: push:
branches: [master] branches: [develop]
jobs: jobs:
build: build:

View File

@ -33,9 +33,9 @@ when necessary.
## Pull Requests ## Pull Requests
ALL changes to the LAMMPS code and documentation, however trivial, MUST ALL changes to the LAMMPS code and documentation, however trivial, MUST
be submitted as a pull request to GitHub. All changes to the "master" be submitted as a pull request to GitHub. All changes to the "develop"
branch must be made exclusively through merging pull requests. The branch must be made exclusively through merging pull requests. The
"unstable" and "stable" branches, respectively are only to be updated "release" and "stable" branches, respectively are only to be updated
upon patch or stable releases with fast-forward merges based on the upon patch or stable releases with fast-forward merges based on the
associated tags. Pull requests may also be submitted to (long-running) associated tags. Pull requests may also be submitted to (long-running)
feature branches created by LAMMPS developers inside the LAMMPS project, feature branches created by LAMMPS developers inside the LAMMPS project,
@ -123,16 +123,16 @@ and thus were this comment should be placed.
LAMMPS uses a continuous release development model with incremental LAMMPS uses a continuous release development model with incremental
changes, i.e. significant effort is made - including automated pre-merge changes, i.e. significant effort is made - including automated pre-merge
testing - that the code in the branch "master" does not get easily testing - that the code in the branch "develop" does not get easily
broken. These tests are run after every update to a pull request. More broken. These tests are run after every update to a pull request. More
extensive and time consuming tests (including regression testing) are extensive and time consuming tests (including regression testing) are
performed after code is merged to the "master" branch. There are patch performed after code is merged to the "develop" branch. There are patch
releases of LAMMPS every 3-5 weeks at a point, when the LAMMPS releases of LAMMPS every 3-5 weeks at a point, when the LAMMPS
developers feel, that a sufficient amount of changes have happened, and developers feel, that a sufficient amount of changes have happened, and
the post-merge testing has been successful. These patch releases are the post-merge testing has been successful. These patch releases are
marked with a `patch_<version date>` tag and the "unstable" branch marked with a `patch_<version date>` tag and the "release" branch
follows only these versions (and thus is always supposed to be of follows only these versions (and thus is always supposed to be of
production quality, unlike "master", which may be temporary broken, in production quality, unlike "develop", which may be temporary broken, in
the case of larger change sets or unexpected incompatibilities or side the case of larger change sets or unexpected incompatibilities or side
effects. effects.

View File

@ -1,4 +1,4 @@
.TH LAMMPS "29 September 2021" "2021-09-29" .TH LAMMPS "1" "29 September 2021" "2021-09-29"
.SH NAME .SH NAME
.B LAMMPS .B LAMMPS
\- Molecular Dynamics Simulator. \- Molecular Dynamics Simulator.

View File

@ -1,4 +1,4 @@
.TH MSI2LMP "v3.9.9" "2018-11-05" .TH MSI2LMP "1" "v3.9.9" "2018-11-05"
.SH NAME .SH NAME
.B MSI2LMP .B MSI2LMP
\- Converter for Materials Studio files to LAMMPS \- Converter for Materials Studio files to LAMMPS

View File

@ -14,7 +14,7 @@ environments with restricted disk space capacity it may be needed to
reduce the storage requirements. Here are some suggestions: reduce the storage requirements. Here are some suggestions:
- Create a so-called shallow repository by cloning only the last commit - Create a so-called shallow repository by cloning only the last commit
instead of the full project history by using ``git clone git@github.com:lammps/lammps --depth=1 --branch=master``. instead of the full project history by using ``git clone git@github.com:lammps/lammps --depth=1 --branch=develop``.
This reduces the downloaded size to about half. With ``--depth=1`` it is not possible to check out different This reduces the downloaded size to about half. With ``--depth=1`` it is not possible to check out different
versions/branches of LAMMPS, using ``--depth=1000`` will make multiple recent versions available at little versions/branches of LAMMPS, using ``--depth=1000`` will make multiple recent versions available at little
extra storage needs (the entire git history had nearly 30,000 commits in fall 2021). extra storage needs (the entire git history had nearly 30,000 commits in fall 2021).

View File

@ -33,12 +33,15 @@ various tools and files. Some of them have to be installed (see below). For
the rest the build process will attempt to download and install them into the rest the build process will attempt to download and install them into
a python virtual environment and local folders. a python virtual environment and local folders.
A current version of the manual (latest patch release, aka unstable A current version of the manual (latest patch release, that is the state
branch) is is available online at: of the *release* branch) is is available online at:
`https://docs.lammps.org/Manual.html <https://docs.lammps.org/Manual.html>`_. `https://docs.lammps.org/ <https://docs.lammps.org/>`_.
A version of the manual corresponding to the ongoing development (aka master branch) A version of the manual corresponding to the ongoing development (that is
is available online at: `https://docs.lammps.org/latest/ the state of the *develop* branch) is available online at:
<https://docs.lammps.org/latest/>`_ `https://docs.lammps.org/latest/ <https://docs.lammps.org/latest/>`_
A version of the manual corresponding to the latest stable LAMMPS release
(that is the state of the *stable* branch) is available online at:
`https://docs.lammps.org/stable/ <https://docs.lammps.org/stable/>`_
Build using GNU make Build using GNU make
-------------------- --------------------

View File

@ -7,11 +7,11 @@ LAMMPS GitHub tutorial
This document describes the process of how to use GitHub to integrate This document describes the process of how to use GitHub to integrate
changes or additions you have made to LAMMPS into the official LAMMPS changes or additions you have made to LAMMPS into the official LAMMPS
distribution. It uses the process of updating this very tutorial as distribution. It uses the process of updating this very tutorial as an
an example to describe the individual steps and options. You need to example to describe the individual steps and options. You need to be
be familiar with git and you may want to have a look at the familiar with git and you may want to have a look at the `git book
`git book <http://git-scm.com/book/>`_ to reacquaint yourself with some <http://git-scm.com/book/>`_ to familiarize yourself with some of the
of the more advanced git features used below. more advanced git features used below.
As of fall 2016, submitting contributions to LAMMPS via pull requests As of fall 2016, submitting contributions to LAMMPS via pull requests
on GitHub is the preferred option for integrating contributed features on GitHub is the preferred option for integrating contributed features
@ -37,15 +37,15 @@ username or e-mail address and password.
**Forking the repository** **Forking the repository**
To get changes into LAMMPS, you need to first fork the `lammps/lammps` To get changes into LAMMPS, you need to first fork the `lammps/lammps`
repository on GitHub. At the time of writing, *master* is the preferred repository on GitHub. At the time of writing, *develop* is the preferred
target branch. Thus go to `LAMMPS on GitHub <https://github.com/lammps/lammps>`_ target branch. Thus go to `LAMMPS on GitHub <https://github.com/lammps/lammps>`_
and make sure branch is set to "master", as shown in the figure below. and make sure branch is set to "develop", as shown in the figure below.
.. image:: JPG/tutorial_branch.png .. image:: JPG/tutorial_branch.png
:align: center :align: center
If it is not, use the button to change it to *master*\ . Once it is, use the If it is not, use the button to change it to *develop*. Once it is, use
fork button to create a fork. the fork button to create a fork.
.. image:: JPG/tutorial_fork.png .. image:: JPG/tutorial_fork.png
:align: center :align: center
@ -64,11 +64,12 @@ LAMMPS development.
**Adding changes to your own fork** **Adding changes to your own fork**
Additions to the upstream version of LAMMPS are handled using *feature Additions to the upstream version of LAMMPS are handled using *feature
branches*\ . For every new feature, a so-called feature branch is branches*. For every new feature, a so-called feature branch is
created, which contains only those modification relevant to one specific created, which contains only those modification relevant to one specific
feature. For example, adding a single fix would consist of creating a feature. For example, adding a single fix would consist of creating a
branch with only the fix header and source file and nothing else. It is branch with only the fix header and source file and nothing else. It is
explained in more detail here: `feature branch workflow <https://www.atlassian.com/git/tutorials/comparing-workflows/feature-branch-workflow>`_. explained in more detail here: `feature branch workflow
<https://www.atlassian.com/git/tutorials/comparing-workflows/feature-branch-workflow>`_.
**Feature branches** **Feature branches**
@ -94,8 +95,8 @@ The above command copies ("clones") the git repository to your local
machine to a directory with the name you chose. If none is given, it will machine to a directory with the name you chose. If none is given, it will
default to "lammps". Typical names are "mylammps" or something similar. default to "lammps". Typical names are "mylammps" or something similar.
You can use this local clone to make changes and You can use this local clone to make changes and test them without
test them without interfering with the repository on GitHub. interfering with the repository on GitHub.
To pull changes from upstream into this copy, you can go to the directory To pull changes from upstream into this copy, you can go to the directory
and use git pull: and use git pull:
@ -103,28 +104,45 @@ and use git pull:
.. code-block:: bash .. code-block:: bash
$ cd mylammps $ cd mylammps
$ git checkout master $ git checkout develop
$ git pull https://github.com/lammps/lammps $ git pull https://github.com/lammps/lammps develop
You can also add this URL as a remote: You can also add this URL as a remote:
.. code-block:: bash .. code-block:: bash
$ git remote add lammps_upstream https://www.github.com/lammps/lammps $ git remote add upstream https://www.github.com/lammps/lammps
At this point, you typically make a feature branch from the updated master From then on you can update your upstream branches with:
.. code-block:: bash
$ git fetch upstream
and then refer to the upstream repository branches with
`upstream/develop` or `upstream/release` and so on.
At this point, you typically make a feature branch from the updated
branch for the feature you want to work on. This tutorial contains the branch for the feature you want to work on. This tutorial contains the
workflow that updated this tutorial, and hence we will call the branch workflow that updated this tutorial, and hence we will call the branch
"github-tutorial-update": "github-tutorial-update":
.. code-block:: bash .. code-block:: bash
$ git checkout -b github-tutorial-update master $ git fetch upstream
$ git checkout -b github-tutorial-update upstream/develop
Now that we have changed branches, we can make our changes to our local Now that we have changed branches, we can make our changes to our local
repository. Just remember that if you want to start working on another, repository. Just remember that if you want to start working on another,
unrelated feature, you should switch branches! unrelated feature, you should switch branches!
.. note::
Committing changes to the *develop*, *release*, or *stable* branches
is strongly discouraged. While it may be convenient initially, it
will create more work in the long run. Various texts and tutorials
on using git effectively discuss the motivation for this.
**After changes are made** **After changes are made**
After everything is done, add the files to the branch and commit them: After everything is done, add the files to the branch and commit them:
@ -287,28 +305,32 @@ After each push, the automated checks are run again.
LAMMPS developers may add labels to your pull request to assign it to LAMMPS developers may add labels to your pull request to assign it to
categories (mostly for bookkeeping purposes), but a few of them are categories (mostly for bookkeeping purposes), but a few of them are
important: needs_work, work_in_progress, test-for-regression, and important: *needs_work*, *work_in_progress*, *run_tests*,
full-regression-test. The first two indicate, that your pull request *test_for_regression*, and *ready_for_merge*. The first two indicate,
is not considered to be complete. With "needs_work" the burden is on that your pull request is not considered to be complete. With
exclusively on you; while "work_in_progress" can also mean, that a "needs_work" the burden is on exclusively on you; while
LAMMPS developer may want to add changes. Please watch the comments "work_in_progress" can also mean, that a LAMMPS developer may want to
to the pull requests. The two "test" labels are used to trigger add changes. Please watch the comments to the pull requests. The two
extended tests before the code is merged. This is sometimes done by "test" labels are used to trigger extended tests before the code is
LAMMPS developers, if they suspect that there may be some subtle merged. This is sometimes done by LAMMPS developers, if they suspect
side effects from your changes. It is not done by default, because that there may be some subtle side effects from your changes. It is not
those tests are very time consuming. done by default, because those tests are very time consuming. The
*ready_for_merge* label is usually attached when the LAMMPS developer
assigned to the pull request considers this request complete and to
trigger a final full test evaluation.
**Reviews** **Reviews**
As of Summer 2018, a pull request needs at least 1 approving review As of Fall 2021, a pull request needs to pass all automatic tests and at
from a LAMMPS developer with write access to the repository. least 1 approving review from a LAMMPS developer with write access to
In case your changes touch code that certain developers are associated the repository before it is eligible for merging. In case your changes
with, they are auto-requested by the GitHub software. Those associations touch code that certain developers are associated with, they are
are set in the file auto-requested by the GitHub software. Those associations are set in
`.github/CODEOWNERS <https://github.com/lammps/lammps/blob/master/.github/CODEOWNERS>`_ the file `.github/CODEOWNERS
Thus if you want to be automatically notified to review when anybody <https://github.com/lammps/lammps/blob/develop/.github/CODEOWNERS>`_ Thus
changes files or packages, that you have contributed to LAMMPS, you can if you want to be automatically notified to review when anybody changes
add suitable patterns to that file, or a LAMMPS developer may add you. files or packages, that **you** have contributed to LAMMPS, you can add
suitable patterns to that file, or a LAMMPS developer may add you.
Otherwise, you can also manually request reviews from specific developers, Otherwise, you can also manually request reviews from specific developers,
or LAMMPS developers - in their assessment of your pull request - may or LAMMPS developers - in their assessment of your pull request - may
@ -329,7 +351,7 @@ LAMMPS developer (including him/herself) or c) Axel Kohlmeyer (akohlmey).
After the review, the developer can choose to implement changes directly After the review, the developer can choose to implement changes directly
or suggest them to you. or suggest them to you.
* Case c) means that the pull request has been assigned to the developer * Case c) means that the pull request has been assigned to the developer
overseeing the merging of pull requests into the master branch. overseeing the merging of pull requests into the *develop* branch.
In this case, Axel assigned the tutorial to Steve: In this case, Axel assigned the tutorial to Steve:
@ -351,11 +373,11 @@ Sometimes, however, you might not feel comfortable having other people
push changes into your own branch, or maybe the maintainers are not sure push changes into your own branch, or maybe the maintainers are not sure
their idea was the right one. In such a case, they can make changes, their idea was the right one. In such a case, they can make changes,
reassign you as the assignee, and file a "reverse pull request", i.e. reassign you as the assignee, and file a "reverse pull request", i.e.
file a pull request in your GitHub repository to include changes in the file a pull request in **your** forked GitHub repository to include
branch, that you have submitted as a pull request yourself. In that changes in the branch, that you have submitted as a pull request
case, you can choose to merge their changes back into your branch, yourself. In that case, you can choose to merge their changes back into
possibly make additional changes or corrections and proceed from there. your branch, possibly make additional changes or corrections and proceed
It looks something like this: from there. It looks something like this:
.. image:: JPG/tutorial_reverse_pull_request.png .. image:: JPG/tutorial_reverse_pull_request.png
:align: center :align: center
@ -419,7 +441,7 @@ This merge also shows up on the lammps GitHub page:
**After a merge** **After a merge**
When everything is fine, the feature branch is merged into the master branch: When everything is fine, the feature branch is merged into the *develop* branch:
.. image:: JPG/tutorial_merged.png .. image:: JPG/tutorial_merged.png
:align: center :align: center
@ -433,8 +455,8 @@ branch!
.. code-block:: bash .. code-block:: bash
$ git checkout master $ git checkout develop
$ git pull master $ git pull https://github.com/lammps/lammps develop
$ git branch -d github-tutorial-update $ git branch -d github-tutorial-update
If you do not pull first, it is not really a problem but git will warn If you do not pull first, it is not really a problem but git will warn
@ -442,6 +464,7 @@ you at the next statement that you are deleting a local branch that
was not yet fully merged into HEAD. This is because git does not yet was not yet fully merged into HEAD. This is because git does not yet
know your branch just got merged into LAMMPS upstream. If you know your branch just got merged into LAMMPS upstream. If you
first delete and then pull, everything should still be fine. first delete and then pull, everything should still be fine.
You can display all branches that are fully merged by:
Finally, if you delete the branch locally, you might want to push this Finally, if you delete the branch locally, you might want to push this
to your remote(s) as well: to your remote(s) as well:
@ -453,14 +476,14 @@ to your remote(s) as well:
**Recent changes in the workflow** **Recent changes in the workflow**
Some changes to the workflow are not captured in this tutorial. For Some changes to the workflow are not captured in this tutorial. For
example, in addition to the master branch, to which all new features example, in addition to the *develop* branch, to which all new features
should be submitted, there is now also an "unstable" and a "stable" should be submitted, there is also a *release* and a *stable* branch;
branch; these have the same content as "master", but are only updated these have the same content as *develop*, but are only updated after a
after a patch release or stable release was made. patch release or stable release was made. Furthermore, the naming of
Furthermore, the naming of the patches now follow the pattern the patches now follow the pattern "patch_<Day><Month><Year>" to
"patch_<Day><Month><Year>" to simplify comparisons between releases. simplify comparisons between releases. Finally, all patches and
Finally, all patches and submissions are subject to automatic testing submissions are subject to automatic testing and code checks to make
and code checks to make sure they at the very least compile. sure they at the very least compile.
A discussion of the LAMMPS developer GitHub workflow can be found in the file A discussion of the LAMMPS developer GitHub workflow can be found in the file
`doc/github-development-workflow.md <https://github.com/lammps/lammps/blob/master/doc/github-development-workflow.md>`_ `doc/github-development-workflow.md <https://github.com/lammps/lammps/blob/develop/doc/github-development-workflow.md>`_

View File

@ -9,7 +9,8 @@ has several advantages:
command. command.
* You can create your own development branches to add code to LAMMPS. * You can create your own development branches to add code to LAMMPS.
* You can submit your new features back to GitHub for inclusion in * You can submit your new features back to GitHub for inclusion in
LAMMPS. LAMMPS. For that you should first create your own :doc:`fork on
GitHub <Howto_github>`.
You must have `git <git_>`_ installed on your system to use the You must have `git <git_>`_ installed on your system to use the
commands explained below to communicate with the git servers on commands explained below to communicate with the git servers on
@ -20,35 +21,53 @@ provides `limited support for subversion clients <svn_>`_.
As of October 2016, the official home of public LAMMPS development is As of October 2016, the official home of public LAMMPS development is
on GitHub. The previously advertised LAMMPS git repositories on on GitHub. The previously advertised LAMMPS git repositories on
git.lammps.org and bitbucket.org are now deprecated or offline. git.lammps.org and bitbucket.org are now offline or deprecated.
.. _git: https://git-scm.com .. _git: https://git-scm.com
.. _svn: https://help.github.com/en/github/importing-your-projects-to-github/working-with-subversion-on-github .. _svn: https://help.github.com/en/github/importing-your-projects-to-github/working-with-subversion-on-github
You can follow LAMMPS development on 3 different git branches: You can follow the LAMMPS development on 3 different git branches:
* **stable** : this branch is updated with every stable release * **stable** : this branch is updated with every stable release;
* **unstable** : this branch is updated with every patch release updates are always "fast forward" merges from *develop*
* **master** : this branch continuously follows ongoing development * **release** : this branch is updated with every patch release;
updates are always "fast forward" merges from *develop*
* **develop** : this branch follows the ongoing development and
is updated with every merge commit of a pull request
To access the git repositories on your box, use the clone command to To access the git repositories on your box, use the clone command to
create a local copy of the LAMMPS repository with a command like: create a local copy of the LAMMPS repository with a command like:
.. code-block:: bash .. code-block:: bash
$ git clone -b unstable https://github.com/lammps/lammps.git mylammps $ git clone -b release https://github.com/lammps/lammps.git mylammps
where "mylammps" is the name of the directory you wish to create on where "mylammps" is the name of the directory you wish to create on
your machine and "unstable" is one of the 3 branches listed above. your machine and "release" is one of the 3 branches listed above.
(Note that you actually download all 3 branches; you can switch (Note that you actually download all 3 branches; you can switch
between them at any time using "git checkout <branch name>".) between them at any time using "git checkout <branch name>".)
.. note::
The complete git history of the LAMMPS project is quite large because
it contains the entire commit history of the project since fall 2006,
which includes the time when LAMMPS was managed with subversion. This
also includes commits that have added and removed some large files
(mostly by accident). If you do not need access to the entire commit
history, you can speed up the "cloning" process and reduce local disk
space requirements by using the *--depth* git command line flag thus
create a "shallow clone" of the repository that contains only a
subset of the git history. Using a depth of 1000 is usually sufficient
to include the head commits of the *develop* and the *release* branches.
To include the head commit of the *stable* branch you may need a depth
of up to 10000.
Once the command completes, your directory will contain the same files Once the command completes, your directory will contain the same files
as if you unpacked a current LAMMPS tarball, with the exception, that as if you unpacked a current LAMMPS tarball, with the exception, that
the HTML documentation files are not included. They can be fetched the HTML documentation files are not included. They can be fetched
from the LAMMPS website by typing ``make fetch`` in the doc directory. from the LAMMPS website by typing ``make fetch`` in the doc directory.
Or they can be generated from the content provided in doc/src by Or they can be generated from the content provided in ``doc/src`` by
typing ``make html`` from the doc directory. typing ``make html`` from the ``doc`` directory.
After initial cloning, as bug fixes and new features are added to After initial cloning, as bug fixes and new features are added to
LAMMPS you can stay up-to-date by typing the following git commands LAMMPS you can stay up-to-date by typing the following git commands
@ -56,9 +75,9 @@ from within the "mylammps" directory:
.. code-block:: bash .. code-block:: bash
$ git checkout unstable # not needed if you always stay in this branch $ git checkout release # not needed if you always stay in this branch
$ git checkout stable # use one of these 3 checkout commands $ git checkout stable # use one of these 3 checkout commands
$ git checkout master # to choose the branch to follow $ git checkout develop # to choose the branch to follow
$ git pull $ git pull
Doing a "pull" will not change any files you have added to the LAMMPS Doing a "pull" will not change any files you have added to the LAMMPS
@ -81,7 +100,7 @@ Stable versions and what tagID to use for a particular stable version
are discussed on `this page <https://www.lammps.org/bug.html#version>`_. are discussed on `this page <https://www.lammps.org/bug.html#version>`_.
Note that this command will print some warnings, because in order to get Note that this command will print some warnings, because in order to get
back to the latest revision and to be able to update with ``git pull`` back to the latest revision and to be able to update with ``git pull``
again, you will need to do ``git checkout unstable`` (or again, you will need to do ``git checkout release`` (or
check out any other desired branch) first. check out any other desired branch) first.
Once you have updated your local files with a ``git pull`` (or ``git Once you have updated your local files with a ``git pull`` (or ``git

View File

@ -19,7 +19,7 @@ software and open-source distribution, see `www.gnu.org <gnuorg_>`_
or `www.opensource.org <opensource_>`_. The legal text of the GPL as it or `www.opensource.org <opensource_>`_. The legal text of the GPL as it
applies to LAMMPS is in the LICENSE file included in the LAMMPS distribution. applies to LAMMPS is in the LICENSE file included in the LAMMPS distribution.
.. _gpl: https://github.com/lammps/lammps/blob/master/LICENSE .. _gpl: https://github.com/lammps/lammps/blob/develop/LICENSE
.. _lgpl: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html .. _lgpl: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html

View File

@ -7,7 +7,7 @@ correctly and reliably at all times. You can follow its development
in a public `git repository on GitHub <https://github.com/lammps/lammps>`_. in a public `git repository on GitHub <https://github.com/lammps/lammps>`_.
Whenever we fix a bug or update or add a feature, it will be merged into Whenever we fix a bug or update or add a feature, it will be merged into
the `master` branch of the git repository. When a sufficient number of the *develop* branch of the git repository. When a sufficient number of
changes have accumulated *and* the software passes a set of automated changes have accumulated *and* the software passes a set of automated
tests, we release it in the next *patch* release, which are made every tests, we release it in the next *patch* release, which are made every
few weeks. Info on patch releases are on `this website page few weeks. Info on patch releases are on `this website page

View File

@ -14,7 +14,7 @@ Intel Xeon Phi co-processors.
The `Benchmark page <https://www.lammps.org/bench.html>`_ of the LAMMPS The `Benchmark page <https://www.lammps.org/bench.html>`_ of the LAMMPS
website gives performance results for the various accelerator website gives performance results for the various accelerator
packages discussed on the :doc:`Speed packages <Speed_packages>` doc packages discussed on the :doc:`Accelerator packages <Speed_packages>`
page, for several of the standard LAMMPS benchmark problems, as a page, for several of the standard LAMMPS benchmark problems, as a
function of problem size and number of compute nodes, on different function of problem size and number of compute nodes, on different
hardware platforms. hardware platforms.

View File

@ -1,7 +1,7 @@
Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are
functionally the same as the corresponding style without the suffix. functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available They have been optimized to run faster, depending on your available
hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc hardware, as discussed on the :doc:`Accelerator packages <Speed_packages>`
page. The accelerated styles take the same arguments and should page. The accelerated styles take the same arguments and should
produce the same results, except for round-off and precision issues. produce the same results, except for round-off and precision issues.
@ -13,5 +13,5 @@ You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the :doc:`-suffix command-line switch <Run_options>` when you invoke LAMMPS, or you can use the by including their suffix, or you can use the :doc:`-suffix command-line switch <Run_options>` when you invoke LAMMPS, or you can use the
:doc:`suffix <suffix>` command in your input script. :doc:`suffix <suffix>` command in your input script.
See the :doc:`Speed packages <Speed_packages>` page for more See the :doc:`Accelerator packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively. instructions on how to use the accelerated styles effectively.

View File

@ -56,23 +56,7 @@ radian\^2.
---------- ----------
Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are .. include:: accel_styles.rst
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc
page. The accelerated styles take the same arguments and should
produce the same results, except for round-off and precision issues.
These accelerated styles are part of the GPU, INTEL, KOKKOS,
OPENMP and OPT packages, respectively. They are only enabled if
LAMMPS was built with those packages. See the :doc:`Build package <Build_package>` page for more info.
You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the :doc:`-suffix command-line switch <Run_options>` when you invoke LAMMPS, or you can use the
:doc:`suffix <suffix>` command in your input script.
See :doc:`Speed packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively.
---------- ----------

View File

@ -319,28 +319,9 @@ styles; see the :doc:`Modify <Modify>` doc page.
---------- ----------
Styles with a *kk* suffix are functionally the same as the .. include:: accel_styles.rst
corresponding style without the suffix. They have been optimized to
run faster, depending on your available hardware, as discussed in on
the :doc:`Speed packages <Speed_packages>` doc page. The accelerated
styles take the same arguments and should produce the same results,
except for round-off and precision issues.
Note that other acceleration packages in LAMMPS, specifically the GPU, ----------
INTEL, OPENMP, and OPT packages do not use accelerated atom
styles.
The accelerated styles are part of the KOKKOS package. They are only
enabled if LAMMPS was built with those packages. See the :doc:`Build
package <Build_package>` page for more info.
You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the :doc:`-suffix command-line
switch <Run_options>` when you invoke LAMMPS, or you can use the
:doc:`suffix <suffix>` command in your input script.
See the :doc:`Speed packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively.
Restrictions Restrictions
"""""""""""" """"""""""""

View File

@ -56,6 +56,17 @@ number of molecules of each species. In this context, "species" means
a unique molecule. The chemical formula of each species is given in a unique molecule. The chemical formula of each species is given in
the first line. the first line.
.. warning::
In order to compute averaged data, it is required that there are no
neighbor list rebuilds between the *Nfreq* steps. For that reason, fix
*reaxff/species* may change your neighbor list settings. There will
be a warning message showing the new settings. Having an *Nfreq*
setting that is larger than what is required for correct computation
of the ReaxFF force field interactions can thus lead to incorrect
results. For typical ReaxFF calculations a value of 100 is already
quite large.
If the filename ends with ".gz", the output file is written in gzipped If the filename ends with ".gz", the output file is written in gzipped
format. A gzipped dump file will be about 3x smaller than the text version, format. A gzipped dump file will be about 3x smaller than the text version,
but will also take longer to write. but will also take longer to write.

View File

@ -89,27 +89,14 @@ precession vectors instead of the forces.
---------- ----------
Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are .. include:: accel_styles.rst
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc
page. The accelerated styles take the same arguments and should
produce the same results, except for round-off and precision issues.
The region keyword is also supported by Kokkos, but a Kokkos-enabled .. note::
The region keyword is supported by Kokkos, but a Kokkos-enabled
region must be used. See the region :doc:`region <region>` command for region must be used. See the region :doc:`region <region>` command for
more information. more information.
These accelerated styles are part of the r Kokkos package. They are
only enabled if LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the :doc:`-suffix command-line switch <Run_options>` when you invoke LAMMPS, or you can use the
:doc:`suffix <suffix>` command in your input script.
See the :doc:`Speed packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively.
---------- ----------
Restart, fix_modify, output, run start/stop, minimize info Restart, fix_modify, output, run start/stop, minimize info

View File

@ -64,25 +64,7 @@ radian\^2.
---------- ----------
Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are .. include:: accel_styles.rst
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc
page. The accelerated styles take the same arguments and should
produce the same results, except for round-off and precision issues.
These accelerated styles are part of the GPU, INTEL, KOKKOS,
OPENMP and OPT packages, respectively. They are only enabled if
LAMMPS was built with those packages. See the :doc:`Build package
<Build_package>` page for more info.
You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the :doc:`-suffix
command-line switch <Run_options>` when you invoke LAMMPS, or you can
use the :doc:`suffix <suffix>` command in your input script.
See the :doc:`Speed packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively.
---------- ----------

View File

@ -414,33 +414,26 @@ relative RMS error.
---------- ----------
Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are .. include:: accel_styles.rst
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc
page. The accelerated styles take the same arguments and should
produce the same results, except for round-off and precision issues.
More specifically, the *pppm/gpu* style performs charge assignment and .. note::
force interpolation calculations on the GPU. These processes are
performed either in single or double precision, depending on whether
the -DFFT_SINGLE setting was specified in your low-level Makefile, as
discussed above. The FFTs themselves are still calculated on the CPU.
If *pppm/gpu* is used with a GPU-enabled pair style, part of the PPPM
calculation can be performed concurrently on the GPU while other
calculations for non-bonded and bonded force calculation are performed
on the CPU.
The *pppm/kk* style performs charge assignment and force interpolation For the GPU package, the *pppm/gpu* style performs charge assignment
calculations, along with the FFTs themselves, on the GPU or (optionally) threaded and force interpolation calculations on the GPU. These processes
on the CPU when using OpenMP and FFTW3. are performed either in single or double precision, depending on
whether the -DFFT_SINGLE setting was specified in your low-level
Makefile, as discussed above. The FFTs themselves are still
calculated on the CPU. If *pppm/gpu* is used with a GPU-enabled
pair style, part of the PPPM calculation can be performed
concurrently on the GPU while other calculations for non-bonded and
bonded force calculation are performed on the CPU.
These accelerated styles are part of the GPU, INTEL, KOKKOS, .. note::
OPENMP, and OPT packages respectively. They are only enabled if
LAMMPS was built with those packages. See the :doc:`Build package <Build_package>` page for more info.
See the :doc:`Speed packages <Speed_packages>` page for more For the KOKKOS package, the *pppm/kk* style performs charge
instructions on how to use the accelerated styles effectively. assignment and force interpolation calculations, along with the FFTs
themselves, on the GPU or (optionally) threaded on the CPU when
using OpenMP and FFTW3.
---------- ----------

View File

@ -166,7 +166,7 @@ intel", or "package omp" command with default settings.
set, either to default values or to specified settings. I.e. settings set, either to default values or to specified settings. I.e. settings
from previous invocations do not persist across multiple invocations. from previous invocations do not persist across multiple invocations.
See the :doc:`Speed packages <Speed_packages>` page for more details See the :doc:`Accelerator packages <Speed_packages>` page for more details
about using the various accelerator packages for speeding up LAMMPS about using the various accelerator packages for speeding up LAMMPS
simulations. simulations.

View File

@ -67,21 +67,7 @@ and input files are provided in the examples/PACKAGES/agni directory.
---------- ----------
Styles with *omp* suffix is functionally the same as the corresponding .. include:: accel_styles.rst
style without the suffix. They have been optimized to run faster,
depending on your available hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc page. The accelerated style takes
the same arguments and should produce the same results, except for
round-off and precision issues.
The accelerated style is part of the OPENMP. They are only enabled
if LAMMPS was built with those packages. See the :doc:`Build package <Build_package>` page for more info.
You can specify the accelerated style explicitly in your input script
by including their suffix, or you can use the :doc:`-suffix command-line switch <Run_options>` when you invoke LAMMPS, or you can use the
:doc:`suffix <suffix>` command in your input script.
See the :doc:`Speed packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively.
---------- ----------

View File

@ -383,30 +383,19 @@ coefficients to 0.0.
---------- ----------
Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are .. include:: accel_styles.rst
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc
page. Pair style *hybrid/scaled* does (currently) not support the
*gpu*, *omp*, *kk*, or *intel* suffix.
Since the *hybrid*, *hybrid/overlay*, *hybrid/scaled* styles delegate .. note::
computation to the individual sub-styles, the suffix versions of the
*hybrid* and *hybrid/overlay* styles are used to propagate the
corresponding suffix to all sub-styles, if those versions
exist. Otherwise the non-accelerated version will be used.
Since the *hybrid*, *hybrid/overlay*, *hybrid/scaled* styles
delegate computation to the individual sub-styles, the suffix
versions of the *hybrid* and *hybrid/overlay* styles are used to
propagate the corresponding suffix to all sub-styles, if those
versions exist. Otherwise the non-accelerated version will be used.
The individual accelerated sub-styles are part of the GPU, KOKKOS, The individual accelerated sub-styles are part of the GPU, KOKKOS,
INTEL, OPENMP, and OPT packages, respectively. They are only INTEL, OPENMP, and OPT packages, respectively. They are only
enabled if LAMMPS was built with those packages. See the :doc:`Build enabled if LAMMPS was built with those packages. See the
package <Build_package>` page for more info. :doc:`Build package <Build_package>` page for more info.
You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the :doc:`-suffix command-line switch <Run_options>` when you invoke LAMMPS, or you can use the
:doc:`suffix <suffix>` command in your input script.
See the :doc:`Speed packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively.
---------- ----------

View File

@ -159,28 +159,14 @@ taken from the ij and ik pairs (:math:`\sigma`, *a*, :math:`\gamma`)
---------- ----------
Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are .. include:: accel_styles.rst
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc
page. The accelerated styles take the same arguments and should
produce the same results, except for round-off and precision issues.
These accelerated styles are part of the GPU, INTEL, KOKKOS, .. note::
OPENMP and OPT packages, respectively. They are only enabled if
LAMMPS was built with those packages. See the :doc:`Build package <Build_package>` page for more info.
You can specify the accelerated styles explicitly in your input script When using the INTEL package with this style, there is an additional
by including their suffix, or you can use the :doc:`-suffix command-line switch <Run_options>` when you invoke LAMMPS, or you can use the 5 to 10 percent performance improvement when the Stillinger-Weber
:doc:`suffix <suffix>` command in your input script. parameters p and q are set to 4 and 0 respectively. These
parameters are common for modeling silicon and water.
When using the INTEL package with this style, there is an
additional 5 to 10 percent performance improvement when the
Stillinger-Weber parameters p and q are set to 4 and 0 respectively.
These parameters are common for modeling silicon and water.
See the :doc:`Speed packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively.
---------- ----------

View File

@ -371,26 +371,13 @@ sub-regions can be defined with the *open* keyword.
---------- ----------
Styles with a *gpu*, *intel*, *kk*, *omp*, or *opt* suffix are .. include:: accel_styles.rst
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed on the :doc:`Speed packages <Speed_packages>` doc
page. The accelerated styles take the same arguments and should
produce the same results, except for round-off and precision issues.
The code using the region (such as a fix or compute) must also be supported .. note::
by Kokkos or no acceleration will occur. Currently, only *block* style
regions are supported by Kokkos.
These accelerated styles are part of the Kokkos package. They are Currently, only *block* style regions are supported by Kokkos. The
only enabled if LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info. code using the region (such as a fix or compute) must also be
supported by Kokkos or no acceleration will occur.
You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the :doc:`-suffix command-line switch <Run_options>` when you invoke LAMMPS, or you can use the
:doc:`suffix <suffix>` command in your input script.
See the :doc:`Speed packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively.
---------- ----------

View File

@ -125,12 +125,13 @@ screen.0 by default; see the :doc:`-plog and -pscreen command-line switches <Run
for the second partition will not contain thermodynamic output beyond the for the second partition will not contain thermodynamic output beyond the
first timestep of the run. first timestep of the run.
See the :doc:`Speed packages <Speed_packages>` page for performance See the :doc:`Accelerator packages <Speed_packages>` page for
details of the speed-up offered by the *verlet/split* style. One performance details of the speed-up offered by the *verlet/split*
important performance consideration is the assignment of logical style. One important performance consideration is the assignment of
processors in the 2 partitions to the physical cores of a parallel logical processors in the 2 partitions to the physical cores of a
machine. The :doc:`processors <processors>` command has options to parallel machine. The :doc:`processors <processors>` command has
support this, and strategies are discussed in :doc:`Section 5 <Speed>` of the manual. options to support this, and strategies are discussed in :doc:`Section
5 <Speed>` of the manual.
---------- ----------
@ -295,10 +296,10 @@ except for round-off and precision issues.
You can specify *respa/omp* explicitly in your input script, or you You can specify *respa/omp* explicitly in your input script, or you
can use the :doc:`-suffix command-line switch <Run_options>` when you can use the :doc:`-suffix command-line switch <Run_options>` when you
invoke LAMMPS, or you can use the :doc:`suffix <suffix>` command in your invoke LAMMPS, or you can use the :doc:`suffix <suffix>` command in
input script. your input script.
See the :doc:`Speed packages <Speed_packages>` page for more See the :doc:`Accelerator packages <Speed_packages>` page for more
instructions on how to use the accelerated styles effectively. instructions on how to use the accelerated styles effectively.
---------- ----------
@ -308,7 +309,8 @@ Restrictions
The *verlet/split* style can only be used if LAMMPS was built with the The *verlet/split* style can only be used if LAMMPS was built with the
REPLICA package. Correspondingly the *respa/omp* style is available REPLICA package. Correspondingly the *respa/omp* style is available
only if the OPENMP package was included. See the :doc:`Build package <Build_package>` page for more info. only if the OPENMP package was included. See the :doc:`Build package
<Build_package>` page for more info.
Whenever using rRESPA, the user should experiment with trade-offs in Whenever using rRESPA, the user should experiment with trade-offs in
speed and accuracy for their system, and verify that they are speed and accuracy for their system, and verify that they are

View File

@ -26,6 +26,8 @@
#error "Unsupported QE coupling API. Want API version 1." #error "Unsupported QE coupling API. Want API version 1."
#endif #endif
// we need to pass an MPI communicator to the LAMMPS library interface
#define LAMMPS_LIB_MPI
#include "library.h" #include "library.h"
static const char delim[] = " \t\n\r"; static const char delim[] = " \t\n\r";
@ -67,8 +69,8 @@ int main(int argc, char **argv)
#if 1 // AK: temporary hack #if 1 // AK: temporary hack
if ( qmmmcfg.nmm != 2 ) { if ( qmmmcfg.nmm != 2 ) {
if (me == 0) { if (me == 0) {
fprintf( stderr, "\n Error in the number of processors for MM code" fprintf( stderr, "\n Error in the number of processors for the MM code.\n"
"\n for the time being only two processor are allowed\n"); " Currently only requesting 2 MM processors is allowed.\n");
} }
MPI_Finalize(); MPI_Finalize();
return -1; return -1;

View File

@ -58,6 +58,7 @@ PairDRIP::PairDRIP(LAMMPS *lmp) : Pair(lmp)
{ {
single_enable = 0; single_enable = 0;
restartinfo = 0; restartinfo = 0;
one_coeff = 1;
manybody_flag = 1; manybody_flag = 1;
centroidstressflag = CENTROID_NOTAVAIL; centroidstressflag = CENTROID_NOTAVAIL;
unit_convert_flag = utils::get_supported_conversions(utils::ENERGY); unit_convert_flag = utils::get_supported_conversions(utils::ENERGY);
@ -258,7 +259,7 @@ void PairDRIP::read_file(char *filename)
int n = -1; int n = -1;
for (int m = 0; m < nparams; m++) { for (int m = 0; m < nparams; m++) {
if (i == params[m].ielement && j == params[m].jelement) { if (i == params[m].ielement && j == params[m].jelement) {
if (n >= 0) error->all(FLERR, "Potential file has duplicate entry"); if (n >= 0) error->all(FLERR, "DRIP potential file has duplicate entry");
n = m; n = m;
} }
} }

View File

@ -142,8 +142,8 @@ void PairILPGrapheneHBN::allocate()
void PairILPGrapheneHBN::settings(int narg, char **arg) void PairILPGrapheneHBN::settings(int narg, char **arg)
{ {
if (narg < 1 || narg > 2) error->all(FLERR, "Illegal pair_style command"); if (narg < 1 || narg > 2) error->all(FLERR, "Illegal pair_style command");
if (strcmp(force->pair_style, "hybrid/overlay") != 0) if (!utils::strmatch(force->pair_style, "^hybrid/overlay"))
error->all(FLERR, "ERROR: requires hybrid/overlay pair_style"); error->all(FLERR, "Pair style ilp/graphene/hbn must be used as sub-style with hybrid/overlay");
cut_global = utils::numeric(FLERR, arg[0], false, lmp); cut_global = utils::numeric(FLERR, arg[0], false, lmp);
if (narg == 2) tap_flag = utils::numeric(FLERR, arg[1], false, lmp); if (narg == 2) tap_flag = utils::numeric(FLERR, arg[1], false, lmp);
@ -273,6 +273,7 @@ void PairILPGrapheneHBN::read_file(char *filename)
nparams++; nparams++;
} }
}
MPI_Bcast(&nparams, 1, MPI_INT, 0, world); MPI_Bcast(&nparams, 1, MPI_INT, 0, world);
MPI_Bcast(&maxparam, 1, MPI_INT, 0, world); MPI_Bcast(&maxparam, 1, MPI_INT, 0, world);
@ -282,7 +283,7 @@ void PairILPGrapheneHBN::read_file(char *filename)
} }
MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world); MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world);
}
memory->destroy(elem2param); memory->destroy(elem2param);
memory->destroy(cutILPsq); memory->destroy(cutILPsq);
memory->create(elem2param, nelements, nelements, "pair:elem2param"); memory->create(elem2param, nelements, nelements, "pair:elem2param");
@ -292,7 +293,7 @@ void PairILPGrapheneHBN::read_file(char *filename)
int n = -1; int n = -1;
for (int m = 0; m < nparams; m++) { for (int m = 0; m < nparams; m++) {
if (i == params[m].ielement && j == params[m].jelement) { if (i == params[m].ielement && j == params[m].jelement) {
if (n >= 0) error->all(FLERR, "ILP Potential file has duplicate entry"); if (n >= 0) error->all(FLERR, "ILP potential file has duplicate entry");
n = m; n = m;
} }
} }

View File

@ -34,7 +34,6 @@
#include "neigh_request.h" #include "neigh_request.h"
#include "neighbor.h" #include "neighbor.h"
#include "potential_file_reader.h" #include "potential_file_reader.h"
#include "tokenizer.h"
#include <cmath> #include <cmath>
#include <cstring> #include <cstring>
@ -142,8 +141,8 @@ void PairKolmogorovCrespiFull::allocate()
void PairKolmogorovCrespiFull::settings(int narg, char **arg) void PairKolmogorovCrespiFull::settings(int narg, char **arg)
{ {
if (narg < 1 || narg > 2) error->all(FLERR, "Illegal pair_style command"); if (narg < 1 || narg > 2) error->all(FLERR, "Illegal pair_style command");
if (strcmp(force->pair_style, "hybrid/overlay") != 0) if (!utils::strmatch(force->pair_style, "^hybrid/overlay"))
error->all(FLERR, "ERROR: requires hybrid/overlay pair_style"); error->all(FLERR, "Pair style kolmogorov/crespi/full must be used as sub-style with hybrid/overlay");
cut_global = utils::numeric(FLERR, arg[0], false, lmp); cut_global = utils::numeric(FLERR, arg[0], false, lmp);
if (narg == 2) tap_flag = utils::numeric(FLERR, arg[1], false, lmp); if (narg == 2) tap_flag = utils::numeric(FLERR, arg[1], false, lmp);
@ -270,6 +269,7 @@ void PairKolmogorovCrespiFull::read_file(char *filename)
nparams++; nparams++;
} }
}
MPI_Bcast(&nparams, 1, MPI_INT, 0, world); MPI_Bcast(&nparams, 1, MPI_INT, 0, world);
MPI_Bcast(&maxparam, 1, MPI_INT, 0, world); MPI_Bcast(&maxparam, 1, MPI_INT, 0, world);
@ -279,7 +279,6 @@ void PairKolmogorovCrespiFull::read_file(char *filename)
} }
MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world); MPI_Bcast(params, maxparam * sizeof(Param), MPI_BYTE, 0, world);
}
memory->destroy(elem2param); memory->destroy(elem2param);
memory->destroy(cutKCsq); memory->destroy(cutKCsq);
@ -290,7 +289,7 @@ void PairKolmogorovCrespiFull::read_file(char *filename)
int n = -1; int n = -1;
for (int m = 0; m < nparams; m++) { for (int m = 0; m < nparams; m++) {
if (i == params[m].ielement && j == params[m].jelement) { if (i == params[m].ielement && j == params[m].jelement) {
if (n >= 0) error->all(FLERR, "KC Potential file has duplicate entry"); if (n >= 0) error->all(FLERR, "KC potential file has duplicate entry");
n = m; n = m;
} }
} }

View File

@ -96,15 +96,9 @@ FixReaxFFSpecies::FixReaxFFSpecies(LAMMPS *lmp, int narg, char **arg) :
rene_flag = 1; rene_flag = 1;
} }
if (me == 0 && rene_flag) { if (me == 0 && rene_flag)
error->warning(FLERR,"Resetting reneighboring criteria for fix reaxff/species"); error->warning(FLERR,"Resetting reneighboring criteria to 'delay {} every {} check no' "
} "for fix reaxff/species",neighbor->delay, neighbor->every);
tmparg = nullptr;
memory->create(tmparg,4,4,"reaxff/species:tmparg");
strcpy(tmparg[0],arg[3]);
strcpy(tmparg[1],arg[4]);
strcpy(tmparg[2],arg[5]);
if (me == 0) { if (me == 0) {
if (platform::has_compress_extension(arg[6])) { if (platform::has_compress_extension(arg[6])) {
@ -113,8 +107,8 @@ FixReaxFFSpecies::FixReaxFFSpecies(LAMMPS *lmp, int narg, char **arg) :
if (!fp) error->one(FLERR,"Cannot open compressed file"); if (!fp) error->one(FLERR,"Cannot open compressed file");
} else fp = fopen(arg[6],"w"); } else fp = fopen(arg[6],"w");
if (!fp) error->one(FLERR,fmt::format("Cannot open fix reaxff/species file {}: " if (!fp)
"{}",arg[6],utils::getsyserror())); error->one(FLERR,"Cannot open fix reaxff/species file {}: {}",arg[6],utils::getsyserror());
} }
x0 = nullptr; x0 = nullptr;
@ -243,7 +237,6 @@ FixReaxFFSpecies::~FixReaxFFSpecies()
memory->destroy(NMol); memory->destroy(NMol);
memory->destroy(MolType); memory->destroy(MolType);
memory->destroy(MolName); memory->destroy(MolName);
memory->destroy(tmparg);
if (filepos) if (filepos)
delete [] filepos; delete [] filepos;
@ -309,7 +302,7 @@ void FixReaxFFSpecies::init()
"abo15 abo16 abo17 abo18 abo19 abo20 abo21 abo22 abo23 abo24"); "abo15 abo16 abo17 abo18 abo19 abo20 abo21 abo22 abo23 abo24");
// create a fix to point to fix_ave_atom for averaging stored properties // create a fix to point to fix_ave_atom for averaging stored properties
auto fixcmd = fmt::format("SPECBOND all ave/atom {} {} {}",tmparg[0],tmparg[1],tmparg[2]); auto fixcmd = fmt::format("SPECBOND all ave/atom {} {} {}",nevery,nrepeat,nfreq);
for (int i = 1; i < 32; ++i) fixcmd += " c_SPECATOM[" + std::to_string(i) + "]"; for (int i = 1; i < 32; ++i) fixcmd += " c_SPECATOM[" + std::to_string(i) + "]";
f_SPECBOND = (FixAveAtom *) modify->add_fix(fixcmd); f_SPECBOND = (FixAveAtom *) modify->add_fix(fixcmd);
setupflag = 1; setupflag = 1;

View File

@ -53,7 +53,6 @@ class FixReaxFFSpecies : public Fix {
double bg_cut; double bg_cut;
double **BOCut; double **BOCut;
char **tmparg;
FILE *fp, *pos; FILE *fp, *pos;
int eleflag, posflag, multipos, padflag, setupflag; int eleflag, posflag, multipos, padflag, setupflag;

View File

@ -2563,8 +2563,7 @@ void FixRigidSmall::write_restart_file(const char *file)
auto outfile = std::string(file) + ".rigid"; auto outfile = std::string(file) + ".rigid";
fp = fopen(outfile.c_str(),"w"); fp = fopen(outfile.c_str(),"w");
if (fp == nullptr) if (fp == nullptr)
error->one(FLERR,"Cannot open fix rigid restart file {}: {}", error->one(FLERR,"Cannot open fix rigid restart file {}: {}",outfile,utils::getsyserror());
outfile,utils::getsyserror());
fmt::print(fp,"# fix rigid mass, COM, inertia tensor info for " fmt::print(fp,"# fix rigid mass, COM, inertia tensor info for "
"{} bodies on timestep {}\n\n",nbody,update->ntimestep); "{} bodies on timestep {}\n\n",nbody,update->ntimestep);

View File

@ -39,6 +39,21 @@ public:
END_HIDE_OUTPUT(); END_HIDE_OUTPUT();
} }
std::string dump_filename(std::string ident)
{
return fmt::format("dump_{}_{}.melt", dump_style, ident);
}
std::string text_dump_filename(std::string ident)
{
return fmt::format("dump_{}_text_{}.melt", dump_style, ident);
}
std::string binary_dump_filename(std::string ident)
{
return fmt::format("dump_{}_binary_{}.melt.bin", dump_style, ident);
}
void generate_dump(std::string dump_file, std::string dump_modify_options, int ntimesteps) void generate_dump(std::string dump_file, std::string dump_modify_options, int ntimesteps)
{ {
BEGIN_HIDE_OUTPUT(); BEGIN_HIDE_OUTPUT();
@ -87,7 +102,7 @@ public:
TEST_F(DumpAtomTest, run0) TEST_F(DumpAtomTest, run0)
{ {
auto dump_file = "dump_run0.melt"; auto dump_file = dump_filename("run0");
generate_dump(dump_file, "scale yes image no", 0); generate_dump(dump_file, "scale yes image no", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -103,7 +118,7 @@ TEST_F(DumpAtomTest, run0)
TEST_F(DumpAtomTest, format_line_run0) TEST_F(DumpAtomTest, format_line_run0)
{ {
auto dump_file = "dump_format_line_run0.melt"; auto dump_file = dump_filename("format_line_run0");
generate_dump(dump_file, "format line \"%d %d %20.15g %g %g\" scale yes image no", 0); generate_dump(dump_file, "format line \"%d %d %20.15g %g %g\" scale yes image no", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -119,7 +134,7 @@ TEST_F(DumpAtomTest, format_line_run0)
TEST_F(DumpAtomTest, no_scale_run0) TEST_F(DumpAtomTest, no_scale_run0)
{ {
auto dump_file = "dump_no_scale_run0.melt"; auto dump_file = dump_filename("no_scale_run0");
generate_dump(dump_file, "scale off", 0); generate_dump(dump_file, "scale off", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -134,7 +149,7 @@ TEST_F(DumpAtomTest, no_scale_run0)
TEST_F(DumpAtomTest, no_buffer_no_scale_run0) TEST_F(DumpAtomTest, no_buffer_no_scale_run0)
{ {
auto dump_file = "dump_no_buffer_no_scale_run0.melt"; auto dump_file = dump_filename("no_buffer_no_scale_run0");
generate_dump(dump_file, "buffer false scale false", 0); generate_dump(dump_file, "buffer false scale false", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -149,7 +164,7 @@ TEST_F(DumpAtomTest, no_buffer_no_scale_run0)
TEST_F(DumpAtomTest, no_buffer_with_scale_run0) TEST_F(DumpAtomTest, no_buffer_with_scale_run0)
{ {
auto dump_file = "dump_no_buffer_with_scale_run0.melt"; auto dump_file = dump_filename("no_buffer_with_scale_run0");
generate_dump(dump_file, "buffer 0 scale 1", 0); generate_dump(dump_file, "buffer 0 scale 1", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -164,7 +179,7 @@ TEST_F(DumpAtomTest, no_buffer_with_scale_run0)
TEST_F(DumpAtomTest, with_image_run0) TEST_F(DumpAtomTest, with_image_run0)
{ {
auto dump_file = "dump_with_image_run0.melt"; auto dump_file = dump_filename("with_image_run0");
generate_dump(dump_file, "scale no image on", 0); generate_dump(dump_file, "scale no image on", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -177,7 +192,7 @@ TEST_F(DumpAtomTest, with_image_run0)
TEST_F(DumpAtomTest, with_units_run0) TEST_F(DumpAtomTest, with_units_run0)
{ {
auto dump_file = "dump_with_units_run0.melt"; auto dump_file = dump_filename("with_units_run0");
generate_dump(dump_file, "scale false units 1", 0); generate_dump(dump_file, "scale false units 1", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -192,7 +207,7 @@ TEST_F(DumpAtomTest, with_units_run0)
TEST_F(DumpAtomTest, with_time_run0) TEST_F(DumpAtomTest, with_time_run0)
{ {
auto dump_file = "dump_with_time_run0.melt"; auto dump_file = dump_filename("with_time_run0");
generate_dump(dump_file, "scale off time true", 0); generate_dump(dump_file, "scale off time true", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -206,7 +221,7 @@ TEST_F(DumpAtomTest, with_time_run0)
TEST_F(DumpAtomTest, with_units_run1) TEST_F(DumpAtomTest, with_units_run1)
{ {
auto dump_file = "dump_with_units_run1.melt"; auto dump_file = dump_filename("with_units_run1");
generate_dump(dump_file, "scale 0 units on", 1); generate_dump(dump_file, "scale 0 units on", 1);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -221,7 +236,7 @@ TEST_F(DumpAtomTest, with_units_run1)
TEST_F(DumpAtomTest, no_buffer_with_scale_and_image_run0) TEST_F(DumpAtomTest, no_buffer_with_scale_and_image_run0)
{ {
auto dump_file = "dump_no_buffer_with_scale_and_image_run0.melt"; auto dump_file = dump_filename("no_buffer_with_scale_and_image_run0");
generate_dump(dump_file, "buffer 0 scale 1 image true", 0); generate_dump(dump_file, "buffer 0 scale 1 image true", 0);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -235,7 +250,7 @@ TEST_F(DumpAtomTest, no_buffer_with_scale_and_image_run0)
} }
TEST_F(DumpAtomTest, triclinic_run0) TEST_F(DumpAtomTest, triclinic_run0)
{ {
auto dump_file = "dump_triclinic_run0.melt"; auto dump_file = dump_filename("triclinic_run0");
enable_triclinic(); enable_triclinic();
generate_dump(dump_file, "", 0); generate_dump(dump_file, "", 0);
@ -251,7 +266,7 @@ TEST_F(DumpAtomTest, triclinic_run0)
TEST_F(DumpAtomTest, triclinic_with_units_run0) TEST_F(DumpAtomTest, triclinic_with_units_run0)
{ {
auto dump_file = "dump_triclinic_with_units_run0.melt"; auto dump_file = dump_filename("triclinic_with_units_run0");
enable_triclinic(); enable_triclinic();
generate_dump(dump_file, "units on", 0); generate_dump(dump_file, "units on", 0);
@ -269,7 +284,7 @@ TEST_F(DumpAtomTest, triclinic_with_units_run0)
TEST_F(DumpAtomTest, triclinic_with_time_run0) TEST_F(DumpAtomTest, triclinic_with_time_run0)
{ {
auto dump_file = "dump_triclinic_with_time_run0.melt"; auto dump_file = dump_filename("triclinic_with_time_run0");
enable_triclinic(); enable_triclinic();
generate_dump(dump_file, "time on", 0); generate_dump(dump_file, "time on", 0);
@ -286,7 +301,7 @@ TEST_F(DumpAtomTest, triclinic_with_time_run0)
TEST_F(DumpAtomTest, triclinic_with_image_run0) TEST_F(DumpAtomTest, triclinic_with_image_run0)
{ {
auto dump_file = "dump_triclinic_with_image_run0.melt"; auto dump_file = dump_filename("triclinic_with_image_run0");
enable_triclinic(); enable_triclinic();
generate_dump(dump_file, "image yes", 0); generate_dump(dump_file, "image yes", 0);
@ -308,8 +323,8 @@ TEST_F(DumpAtomTest, binary_run0)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_run0.melt"; auto text_file = text_dump_filename("run0");
auto binary_file = "dump_binary_run0.melt.bin"; auto binary_file = binary_dump_filename("run0");
generate_text_and_binary_dump(text_file, binary_file, "", 0); generate_text_and_binary_dump(text_file, binary_file, "", 0);
@ -329,8 +344,8 @@ TEST_F(DumpAtomTest, binary_with_units_run0)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_with_units_run0.melt"; auto text_file = text_dump_filename("with_units_run0");
auto binary_file = "dump_binary_with_units_run0.melt.bin"; auto binary_file = binary_dump_filename("with_units_run0");
generate_text_and_binary_dump(text_file, binary_file, "scale no units yes", 0); generate_text_and_binary_dump(text_file, binary_file, "scale no units yes", 0);
@ -350,8 +365,8 @@ TEST_F(DumpAtomTest, binary_with_time_run0)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_with_time_run0.melt"; auto text_file = text_dump_filename("with_time_run0");
auto binary_file = "dump_binary_with_time_run0.melt.bin"; auto binary_file = binary_dump_filename("with_time_run0");
generate_text_and_binary_dump(text_file, binary_file, "scale no time yes", 0); generate_text_and_binary_dump(text_file, binary_file, "scale no time yes", 0);
@ -371,8 +386,8 @@ TEST_F(DumpAtomTest, binary_triclinic_run0)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_tri_run0.melt"; auto text_file = text_dump_filename("tri_run0");
auto binary_file = "dump_binary_tri_run0.melt.bin"; auto binary_file = binary_dump_filename("tri_run0");
enable_triclinic(); enable_triclinic();
generate_text_and_binary_dump(text_file, binary_file, "", 0); generate_text_and_binary_dump(text_file, binary_file, "", 0);
@ -393,8 +408,8 @@ TEST_F(DumpAtomTest, binary_triclinic_with_units_run0)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_tri_with_units_run0.melt"; auto text_file = text_dump_filename("tri_with_units_run0");
auto binary_file = "dump_binary_tri_with_units_run0.melt.bin"; auto binary_file = binary_dump_filename("tri_with_units_run0");
enable_triclinic(); enable_triclinic();
generate_text_and_binary_dump(text_file, binary_file, "scale no units yes", 0); generate_text_and_binary_dump(text_file, binary_file, "scale no units yes", 0);
@ -415,8 +430,8 @@ TEST_F(DumpAtomTest, binary_triclinic_with_time_run0)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_tri_with_time_run0.melt"; auto text_file = text_dump_filename("tri_with_time_run0");
auto binary_file = "dump_binary_tri_with_time_run0.melt.bin"; auto binary_file = binary_dump_filename("tri_with_time_run0");
enable_triclinic(); enable_triclinic();
generate_text_and_binary_dump(text_file, binary_file, "scale no time yes", 0); generate_text_and_binary_dump(text_file, binary_file, "scale no time yes", 0);
@ -437,8 +452,8 @@ TEST_F(DumpAtomTest, binary_triclinic_with_image_run0)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_text_tri_with_image_run0.melt"; auto text_file = text_dump_filename("tri_with_image_run0");
auto binary_file = "dump_binary_tri_with_image_run0.melt.bin"; auto binary_file = binary_dump_filename("tri_with_image_run0");
enable_triclinic(); enable_triclinic();
generate_text_and_binary_dump(text_file, binary_file, "image yes", 0); generate_text_and_binary_dump(text_file, binary_file, "image yes", 0);
@ -457,7 +472,7 @@ TEST_F(DumpAtomTest, binary_triclinic_with_image_run0)
TEST_F(DumpAtomTest, run1plus1) TEST_F(DumpAtomTest, run1plus1)
{ {
auto dump_file = "dump_run1plus1.melt"; auto dump_file = dump_filename("run1plus1");
generate_dump(dump_file, "", 1); generate_dump(dump_file, "", 1);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -470,7 +485,7 @@ TEST_F(DumpAtomTest, run1plus1)
TEST_F(DumpAtomTest, run2) TEST_F(DumpAtomTest, run2)
{ {
auto dump_file = "dump_run2.melt"; auto dump_file = dump_filename("run2");
generate_dump(dump_file, "", 2); generate_dump(dump_file, "", 2);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -480,7 +495,7 @@ TEST_F(DumpAtomTest, run2)
TEST_F(DumpAtomTest, rerun) TEST_F(DumpAtomTest, rerun)
{ {
auto dump_file = "dump_rerun.melt"; auto dump_file = dump_filename("rerun");
HIDE_OUTPUT([&] { HIDE_OUTPUT([&] {
command("fix 1 all nve"); command("fix 1 all nve");
}); });
@ -508,38 +523,43 @@ TEST_F(DumpAtomTest, rerun)
TEST_F(DumpAtomTest, multi_file_run1) TEST_F(DumpAtomTest, multi_file_run1)
{ {
auto dump_file = "dump_run1_*.melt"; auto dump_file = dump_filename("run1_*");
generate_dump(dump_file, "", 1); generate_dump(dump_file, "", 1);
ASSERT_FILE_EXISTS("dump_run1_0.melt"); auto run1_0 = dump_filename("run1_0");
ASSERT_FILE_EXISTS("dump_run1_1.melt"); auto run1_1 = dump_filename("run1_1");
ASSERT_EQ(count_lines("dump_run1_0.melt"), 41); ASSERT_FILE_EXISTS(run1_0);
ASSERT_EQ(count_lines("dump_run1_1.melt"), 41); ASSERT_FILE_EXISTS(run1_1);
delete_file("dump_run1_0.melt"); ASSERT_EQ(count_lines(run1_0), 41);
delete_file("dump_run1_1.melt"); ASSERT_EQ(count_lines(run1_1), 41);
delete_file(run1_0);
delete_file(run1_1);
} }
TEST_F(DumpAtomTest, per_processor_file_run1) TEST_F(DumpAtomTest, per_processor_file_run1)
{ {
auto dump_file = "dump_run1_p%.melt"; auto dump_file = dump_filename("run1_p%");
generate_dump(dump_file, "", 1); generate_dump(dump_file, "", 1);
ASSERT_FILE_EXISTS("dump_run1_p0.melt"); auto run1_p0 = dump_filename("run1_p0");
ASSERT_EQ(count_lines("dump_run1_p0.melt"), 82); ASSERT_FILE_EXISTS(run1_p0);
delete_file("dump_run1_p0.melt"); ASSERT_EQ(count_lines(run1_p0), 82);
delete_file(run1_p0);
} }
TEST_F(DumpAtomTest, per_processor_multi_file_run1) TEST_F(DumpAtomTest, per_processor_multi_file_run1)
{ {
auto dump_file = "dump_run1_p%_*.melt"; auto dump_file = dump_filename("run1_p%_*");
generate_dump(dump_file, "", 1); generate_dump(dump_file, "", 1);
ASSERT_FILE_EXISTS("dump_run1_p0_0.melt"); auto run1_p0_0 = dump_filename("run1_p0_0");
ASSERT_FILE_EXISTS("dump_run1_p0_1.melt"); auto run1_p0_1 = dump_filename("run1_p0_1");
ASSERT_EQ(count_lines("dump_run1_p0_0.melt"), 41); ASSERT_FILE_EXISTS(run1_p0_0);
ASSERT_EQ(count_lines("dump_run1_p0_1.melt"), 41); ASSERT_FILE_EXISTS(run1_p0_1);
delete_file("dump_run1_p0_0.melt"); ASSERT_EQ(count_lines(run1_p0_0), 41);
delete_file("dump_run1_p0_1.melt"); ASSERT_EQ(count_lines(run1_p0_1), 41);
delete_file(run1_p0_0);
delete_file(run1_p0_1);
} }
TEST_F(DumpAtomTest, dump_modify_scale_invalid) TEST_F(DumpAtomTest, dump_modify_scale_invalid)
@ -573,16 +593,17 @@ TEST_F(DumpAtomTest, dump_modify_invalid)
TEST_F(DumpAtomTest, write_dump) TEST_F(DumpAtomTest, write_dump)
{ {
auto reference = "dump_ref_run0.melt"; auto reference = dump_filename("run0_ref");
auto dump_file = "write_dump_atom_run0.melt"; auto dump_file = fmt::format("write_{}", dump_filename("run*"));
BEGIN_HIDE_OUTPUT(); BEGIN_HIDE_OUTPUT();
command(fmt::format("dump id all atom 1 {}", reference)); command(fmt::format("dump id all atom 1 {}", reference));
command("dump_modify id scale no units yes"); command("dump_modify id scale no units yes");
command("run 0"); command("run 0");
command("write_dump all atom write_dump_atom_run*.melt modify scale no units yes"); command(fmt::format("write_dump all atom {} modify scale no units yes", dump_file));
END_HIDE_OUTPUT(); END_HIDE_OUTPUT();
dump_file = fmt::format("write_{}", dump_filename("run0"));
ASSERT_FILE_EXISTS(reference); ASSERT_FILE_EXISTS(reference);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);
@ -595,16 +616,17 @@ TEST_F(DumpAtomTest, binary_write_dump)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto reference = "dump_run0.melt.bin"; auto reference = binary_dump_filename("write_run0_ref");
auto dump_file = "write_dump_atom_run0_p0.melt.bin"; auto dump_file = fmt::format("write_{}", binary_dump_filename("write_dump_atom_run*_p%"));
BEGIN_HIDE_OUTPUT(); BEGIN_HIDE_OUTPUT();
command(fmt::format("dump id all atom 1 {}", reference)); command(fmt::format("dump id all atom 1 {}", reference));
command("dump_modify id scale no units yes"); command("dump_modify id scale no units yes");
command("run 0"); command("run 0");
command("write_dump all atom write_dump_atom_run*_p%.melt.bin modify scale no units yes"); command(fmt::format("write_dump all atom {} modify scale no units yes", dump_file));
END_HIDE_OUTPUT(); END_HIDE_OUTPUT();
dump_file = fmt::format("write_{}", binary_dump_filename("write_dump_atom_run0_p0"));
ASSERT_FILE_EXISTS(reference); ASSERT_FILE_EXISTS(reference);
ASSERT_FILE_EXISTS(dump_file); ASSERT_FILE_EXISTS(dump_file);

View File

@ -37,6 +37,21 @@ public:
END_HIDE_OUTPUT(); END_HIDE_OUTPUT();
} }
std::string dump_filename(std::string ident)
{
return fmt::format("dump_{}_{}.melt", dump_style, ident);
}
std::string text_dump_filename(std::string ident)
{
return fmt::format("dump_{}_text_{}.melt", dump_style, ident);
}
std::string binary_dump_filename(std::string ident)
{
return fmt::format("dump_{}_binary_{}.melt.bin", dump_style, ident);
}
void generate_dump(std::string dump_file, std::string fields, std::string dump_modify_options, void generate_dump(std::string dump_file, std::string fields, std::string dump_modify_options,
int ntimesteps) int ntimesteps)
{ {
@ -87,7 +102,7 @@ public:
TEST_F(DumpCustomTest, run1) TEST_F(DumpCustomTest, run1)
{ {
auto dump_file = "dump_custom_run1.melt"; auto dump_file = dump_filename("run1");
auto fields = auto fields =
"id type proc procp1 mass x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz"; "id type proc procp1 mass x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz";
@ -105,7 +120,7 @@ TEST_F(DumpCustomTest, run1)
TEST_F(DumpCustomTest, thresh_run0) TEST_F(DumpCustomTest, thresh_run0)
{ {
auto dump_file = "dump_custom_thresh_run0.melt"; auto dump_file = dump_filename("thresh_run0");
auto fields = "id type x y z"; auto fields = "id type x y z";
generate_dump(dump_file, fields, "units yes thresh x < 1 thresh y < 1 thresh z < 1", 0); generate_dump(dump_file, fields, "units yes thresh x < 1 thresh y < 1 thresh z < 1", 0);
@ -126,7 +141,7 @@ TEST_F(DumpCustomTest, compute_run0)
command("compute comp all property/atom x y z"); command("compute comp all property/atom x y z");
END_HIDE_OUTPUT(); END_HIDE_OUTPUT();
auto dump_file = "dump_custom_compute_run0.melt"; auto dump_file = dump_filename("compute_run0");
auto fields = "id type x y z c_comp[1] c_comp[2] c_comp[3]"; auto fields = "id type x y z c_comp[1] c_comp[2] c_comp[3]";
generate_dump(dump_file, fields, "units yes", 0); generate_dump(dump_file, fields, "units yes", 0);
@ -149,7 +164,7 @@ TEST_F(DumpCustomTest, fix_run0)
command("fix numdiff all numdiff 1 0.0001"); command("fix numdiff all numdiff 1 0.0001");
END_HIDE_OUTPUT(); END_HIDE_OUTPUT();
auto dump_file = "dump_custom_compute_run0.melt"; auto dump_file = dump_filename("fix_run0");
auto fields = "id x y z f_numdiff[1] f_numdiff[2] f_numdiff[3]"; auto fields = "id x y z f_numdiff[1] f_numdiff[2] f_numdiff[3]";
generate_dump(dump_file, fields, "units yes", 0); generate_dump(dump_file, fields, "units yes", 0);
@ -171,7 +186,7 @@ TEST_F(DumpCustomTest, custom_run0)
command("compute 1 all property/atom i_flag1 d_flag2"); command("compute 1 all property/atom i_flag1 d_flag2");
END_HIDE_OUTPUT(); END_HIDE_OUTPUT();
auto dump_file = "dump_custom_custom_run0.melt"; auto dump_file = dump_filename("custom_run0");
auto fields = "id x y z i_flag1 d_flag2"; auto fields = "id x y z i_flag1 d_flag2";
generate_dump(dump_file, fields, "units yes", 0); generate_dump(dump_file, fields, "units yes", 0);
@ -190,8 +205,8 @@ TEST_F(DumpCustomTest, binary_run1)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_custom_text_run1.melt"; auto text_file = text_dump_filename("run1");
auto binary_file = "dump_custom_binary_run1.melt.bin"; auto binary_file = binary_dump_filename("run1");
auto fields = "id type proc x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz"; auto fields = "id type proc x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz";
generate_text_and_binary_dump(text_file, binary_file, fields, "units yes", 1); generate_text_and_binary_dump(text_file, binary_file, fields, "units yes", 1);
@ -210,7 +225,7 @@ TEST_F(DumpCustomTest, binary_run1)
TEST_F(DumpCustomTest, triclinic_run1) TEST_F(DumpCustomTest, triclinic_run1)
{ {
auto dump_file = "dump_custom_tri_run1.melt"; auto dump_file = dump_filename("tri_run1");
auto fields = "id type proc x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz"; auto fields = "id type proc x y z ix iy iz xs ys zs xu yu zu xsu ysu zsu vx vy vz fx fy fz";
enable_triclinic(); enable_triclinic();
@ -231,8 +246,8 @@ TEST_F(DumpCustomTest, binary_triclinic_run1)
{ {
if (!BINARY2TXT_BINARY) GTEST_SKIP(); if (!BINARY2TXT_BINARY) GTEST_SKIP();
auto text_file = "dump_custom_tri_text_run1.melt"; auto text_file = text_dump_filename("tri_run1");
auto binary_file = "dump_custom_tri_binary_run1.melt.bin"; auto binary_file = binary_dump_filename("tri_run1");
auto fields = "id type proc x y z xs ys zs xsu ysu zsu vx vy vz fx fy fz"; auto fields = "id type proc x y z xs ys zs xsu ysu zsu vx vy vz fx fy fz";
enable_triclinic(); enable_triclinic();
@ -258,7 +273,7 @@ TEST_F(DumpCustomTest, with_variable_run1)
command("variable p atom (c_1%10)+1"); command("variable p atom (c_1%10)+1");
END_HIDE_OUTPUT(); END_HIDE_OUTPUT();
auto dump_file = "dump_custom_with_variable_run1.melt"; auto dump_file = dump_filename("with_variable_run1");
auto fields = "id type x y z v_p"; auto fields = "id type x y z v_p";
generate_dump(dump_file, fields, "units yes", 1); generate_dump(dump_file, fields, "units yes", 1);
@ -275,7 +290,7 @@ TEST_F(DumpCustomTest, with_variable_run1)
TEST_F(DumpCustomTest, run1plus1) TEST_F(DumpCustomTest, run1plus1)
{ {
auto dump_file = "dump_custom_run1plus1.melt"; auto dump_file = dump_filename("run1plus1");
auto fields = "id type x y z"; auto fields = "id type x y z";
generate_dump(dump_file, fields, "units yes", 1); generate_dump(dump_file, fields, "units yes", 1);
@ -292,7 +307,7 @@ TEST_F(DumpCustomTest, run1plus1)
TEST_F(DumpCustomTest, run2) TEST_F(DumpCustomTest, run2)
{ {
auto dump_file = "dump_custom_run2.melt"; auto dump_file = dump_filename("run2");
auto fields = "id type x y z"; auto fields = "id type x y z";
generate_dump(dump_file, fields, "", 2); generate_dump(dump_file, fields, "", 2);
@ -303,7 +318,7 @@ TEST_F(DumpCustomTest, run2)
TEST_F(DumpCustomTest, rerun) TEST_F(DumpCustomTest, rerun)
{ {
auto dump_file = "dump_rerun.melt"; auto dump_file = dump_filename("rerun");
auto fields = "id type xs ys zs"; auto fields = "id type xs ys zs";
HIDE_OUTPUT([&] { HIDE_OUTPUT([&] {