Merge pull request #2848 from akohlmey/collected-small-changes

Collected small changes for the next patch release
This commit is contained in:
Axel Kohlmeyer
2021-07-27 14:24:12 -04:00
committed by GitHub
40 changed files with 294 additions and 313 deletions

View File

@ -244,15 +244,16 @@ if(PKG_ADIOS)
endif() endif()
if(NOT CMAKE_CROSSCOMPILING) if(NOT CMAKE_CROSSCOMPILING)
set(MPI_CXX_SKIP_MPICXX TRUE)
find_package(MPI QUIET) find_package(MPI QUIET)
option(BUILD_MPI "Build MPI version" ${MPI_FOUND}) option(BUILD_MPI "Build MPI version" ${MPI_FOUND})
else() else()
set(MPI_CXX_SKIP_MPICXX TRUE)
option(BUILD_MPI "Build MPI version" OFF) option(BUILD_MPI "Build MPI version" OFF)
endif() endif()
if(BUILD_MPI) if(BUILD_MPI)
# do not include the (obsolete) MPI C++ bindings which makes
# for leaner object files and avoids namespace conflicts
set(MPI_CXX_SKIP_MPICXX TRUE)
# We use a non-standard procedure to cross-compile with MPI on Windows # We use a non-standard procedure to cross-compile with MPI on Windows
if((CMAKE_SYSTEM_NAME STREQUAL "Windows") AND CMAKE_CROSSCOMPILING) if((CMAKE_SYSTEM_NAME STREQUAL "Windows") AND CMAKE_CROSSCOMPILING)
include(MPI4WIN) include(MPI4WIN)
@ -368,6 +369,8 @@ if(PKG_MSCG OR PKG_ATC OR PKG_AWPMD OR PKG_ML-QUIP OR PKG_LATTE)
endif() endif()
endif() endif()
# tweak jpeg library names to avoid linker errors with MinGW cross-compilation
set(JPEG_NAMES libjpeg libjpeg-62)
find_package(JPEG QUIET) find_package(JPEG QUIET)
option(WITH_JPEG "Enable JPEG support" ${JPEG_FOUND}) option(WITH_JPEG "Enable JPEG support" ${JPEG_FOUND})
if(WITH_JPEG) if(WITH_JPEG)

View File

@ -54,8 +54,8 @@ if(DOWNLOAD_PLUMED)
set(PLUMED_BUILD_BYPRODUCTS "<INSTALL_DIR>/lib/libplumedWrapper.a") set(PLUMED_BUILD_BYPRODUCTS "<INSTALL_DIR>/lib/libplumedWrapper.a")
endif() endif()
set(PLUMED_URL "https://github.com/plumed/plumed2/releases/download/v2.7.1/plumed-src-2.7.1.tgz" CACHE STRING "URL for PLUMED tarball") set(PLUMED_URL "https://github.com/plumed/plumed2/releases/download/v2.7.2/plumed-src-2.7.2.tgz" CACHE STRING "URL for PLUMED tarball")
set(PLUMED_MD5 "4eac6a462ec84dfe0cec96c82421b8e8" CACHE STRING "MD5 checksum of PLUMED tarball") set(PLUMED_MD5 "cfa0b4dd90a81c25d3302e8d97bfeaea" CACHE STRING "MD5 checksum of PLUMED tarball")
mark_as_advanced(PLUMED_URL) mark_as_advanced(PLUMED_URL)
mark_as_advanced(PLUMED_MD5) mark_as_advanced(PLUMED_MD5)
@ -72,7 +72,6 @@ if(DOWNLOAD_PLUMED)
${PLUMED_CONFIG_OMP} ${PLUMED_CONFIG_OMP}
CXX=${PLUMED_CONFIG_CXX} CXX=${PLUMED_CONFIG_CXX}
CC=${PLUMED_CONFIG_CC} CC=${PLUMED_CONFIG_CC}
PATCH_COMMAND sed -i "/^#include <algorithm>/a #include <limits>" <SOURCE_DIR>/src/lepton/Operation.h
BUILD_BYPRODUCTS ${PLUMED_BUILD_BYPRODUCTS} BUILD_BYPRODUCTS ${PLUMED_BUILD_BYPRODUCTS}
) )
ExternalProject_get_property(plumed_build INSTALL_DIR) ExternalProject_get_property(plumed_build INSTALL_DIR)

View File

@ -25,7 +25,7 @@ RasMol visualization programs. Pizza.py has tools that do interactive
3d OpenGL visualization and one that creates SVG images of dump file 3d OpenGL visualization and one that creates SVG images of dump file
snapshots. snapshots.
.. _pizza: https://pizza.sandia.gov .. _pizza: https://lammps.github.io/pizza
.. _ensight: https://www.ansys.com/products/fluids/ansys-ensight .. _ensight: https://www.ansys.com/products/fluids/ansys-ensight

View File

@ -24,11 +24,15 @@ General features
^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^
* runs on a single processor or in parallel * runs on a single processor or in parallel
* distributed-memory message-passing parallelism (MPI) * distributed memory message-passing parallelism (MPI)
* spatial-decomposition of simulation domain for parallelism * shared memory multi-threading parallelism (OpenMP)
* open-source distribution * spatial decomposition of simulation domain for MPI parallelism
* highly portable C++ * particle decomposition inside of spatial decomposition for OpenMP parallelism
* optional libraries used: MPI and single-processor FFT * GPLv2 licensed open-source distribution
* highly portable C++-11
* modular code with most functionality in optional packages
* only depends on MPI library for basic parallel functionality
* other libraries are optional and only required for specific packages
* GPU (CUDA and OpenCL), Intel Xeon Phi, and OpenMP support for many code features * GPU (CUDA and OpenCL), Intel Xeon Phi, and OpenMP support for many code features
* easy to extend with new features and functionality * easy to extend with new features and functionality
* runs from an input script * runs from an input script
@ -68,9 +72,9 @@ Interatomic potentials (force fields)
:doc:`improper style <improper_style>`, :doc:`kspace style <kspace_style>` :doc:`improper style <improper_style>`, :doc:`kspace style <kspace_style>`
commands) commands)
* pairwise potentials: Lennard-Jones, Buckingham, Morse, Born-Mayer-Huggins, Yukawa, soft, class 2 (COMPASS), hydrogen bond, tabulated * pairwise potentials: Lennard-Jones, Buckingham, Morse, Born-Mayer-Huggins, Yukawa, soft, class 2 (COMPASS), hydrogen bond, tabulated
* charged pairwise potentials: Coulombic, point-dipole * charged pairwise potentials: Coulombic, point-dipole
* many-body potentials: EAM, Finnis/Sinclair EAM, modified EAM (MEAM), embedded ion method (EIM), EDIP, ADP, Stillinger-Weber, Tersoff, REBO, AIREBO, ReaxFF, COMB, SNAP, Streitz-Mintmire, 3-body polymorphic * many-body potentials: EAM, Finnis/Sinclair EAM, modified EAM (MEAM), embedded ion method (EIM), EDIP, ADP, Stillinger-Weber, Tersoff, REBO, AIREBO, ReaxFF, COMB, SNAP, Streitz-Mintmire, 3-body polymorphic
* long-range interactions for charge, point-dipoles, and LJ dispersion: Ewald, Wolf, PPPM (similar to particle-mesh Ewald) * long-range interactions for charge, point-dipoles, and LJ dispersion: Ewald, Wolf, PPPM (similar to particle-mesh Ewald)
* polarization models: :doc:`QEq <fix_qeq>`, :doc:`core/shell model <Howto_coreshell>`, :doc:`Drude dipole model <Howto_drude>` * polarization models: :doc:`QEq <fix_qeq>`, :doc:`core/shell model <Howto_coreshell>`, :doc:`Drude dipole model <Howto_drude>`
* charge equilibration (QEq via dynamic, point, shielded, Slater methods) * charge equilibration (QEq via dynamic, point, shielded, Slater methods)
@ -170,9 +174,12 @@ Multi-replica models
^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
* :doc:`nudged elastic band <neb>` * :doc:`nudged elastic band <neb>`
* :doc:`hyperdynamics <hyper>`
* :doc:`parallel replica dynamics <prd>` * :doc:`parallel replica dynamics <prd>`
* :doc:`temperature accelerated dynamics <tad>` * :doc:`temperature accelerated dynamics <tad>`
* :doc:`parallel tempering <temper>` * :doc:`parallel tempering <temper>`
* :doc:`path-integral MD <fix_pimd>`
* multi-walker collective variables with :doc:`Colvars <fix_colvars>` and :doc:`Plumed <fix_plumed>`
.. _prepost: .. _prepost:
@ -187,7 +194,7 @@ Pre- and post-processing
plotting, and visualization for LAMMPS simulations. Pizza.py is plotting, and visualization for LAMMPS simulations. Pizza.py is
written in `Python <python_>`_ and is available for download from `the Pizza.py WWW site <pizza_>`_. written in `Python <python_>`_ and is available for download from `the Pizza.py WWW site <pizza_>`_.
.. _pizza: https://pizza.sandia.gov .. _pizza: https://lammps.github.io/pizza
.. _python: http://www.python.org .. _python: http://www.python.org

View File

@ -77,7 +77,7 @@ Here are suggestions on how to perform these tasks:
it easier to analyze and plot. See the :doc:`Tools <Tools>` doc page it easier to analyze and plot. See the :doc:`Tools <Tools>` doc page
for more discussion of the various tools. for more discussion of the various tools.
* **Pizza.py:** Our group has also written a separate toolkit called * **Pizza.py:** Our group has also written a separate toolkit called
`Pizza.py <https://pizza.sandia.gov>`_ which can do certain kinds of `Pizza.py <https://lammps.github.io/pizza>`_ which can do certain kinds of
setup, analysis, plotting, and visualization (via OpenGL) for LAMMPS setup, analysis, plotting, and visualization (via OpenGL) for LAMMPS
simulations. It thus provides some functionality for several of the simulations. It thus provides some functionality for several of the
above bullets. Pizza.py is written in `Python <http://www.python.org>`_ above bullets. Pizza.py is written in `Python <http://www.python.org>`_

View File

@ -18,10 +18,11 @@ supercomputers.
.. _mpi: https://en.wikipedia.org/wiki/Message_Passing_Interface .. _mpi: https://en.wikipedia.org/wiki/Message_Passing_Interface
.. _lws: https://www.lammps.org .. _lws: https://www.lammps.org
LAMMPS is written in C++. Earlier versions were written in F77 and LAMMPS is written in C++ and requires a compiler that is at least
F90. See the `History page <https://www.lammps.org/history.html>`_ of compatible with the C++-11 standard.
the website for details. All versions can be downloaded from the Earlier versions were written in F77 and F90. See the `History page
`LAMMPS website <lws_>`_. <https://www.lammps.org/history.html>`_ of the website for details. All
versions can be downloaded from the `LAMMPS website <lws_>`_.
LAMMPS is designed to be easy to modify or extend with new LAMMPS is designed to be easy to modify or extend with new
capabilities, such as new force fields, atom types, boundary capabilities, such as new force fields, atom types, boundary
@ -41,8 +42,9 @@ short distances, so that the local density of particles never becomes
too large. This is in contrast to methods used for modeling plasma too large. This is in contrast to methods used for modeling plasma
or gravitational bodies (e.g. galaxy formation). or gravitational bodies (e.g. galaxy formation).
On parallel machines, LAMMPS uses spatial-decomposition techniques to On parallel machines, LAMMPS uses spatial-decomposition techniques with
partition the simulation domain into small sub-domains of equal MPI parallelization to partition the simulation domain into small
computational cost, one of which is assigned to each processor. sub-domains of equal computational cost, one of which is assigned to
Processors communicate and store "ghost" atom information for atoms each processor. Processors communicate and store "ghost" atom
that border their sub-domain. information for atoms that border their sub-domain. Multi-threading
parallelization with with particle-decomposition can be used in addition.

View File

@ -35,9 +35,9 @@ visualization package you have installed.
Note that for GL, you need to be able to run the Pizza.py GL tool, Note that for GL, you need to be able to run the Pizza.py GL tool,
which is included in the pizza sub-directory. See the Pizza.py doc pages for more info: which is included in the pizza sub-directory. See the Pizza.py doc pages for more info:
* `https://pizza.sandia.gov <pizza_>`_ * `https://lammps.github.io/pizza <pizza_>`_
.. _pizza: https://pizza.sandia.gov .. _pizza: https://lammps.github.io/pizza
Note that for AtomEye, you need version 3, and there is a line in the Note that for AtomEye, you need version 3, and there is a line in the
scripts that specifies the path and name of the executable. See the scripts that specifies the path and name of the executable. See the

View File

@ -15,7 +15,7 @@ Sandia which provides tools for doing setup, analysis, plotting, and
visualization for LAMMPS simulations. visualization for LAMMPS simulations.
.. _lws: https://www.lammps.org .. _lws: https://www.lammps.org
.. _pizza: https://pizza.sandia.gov .. _pizza: https://lammps.github.io/pizza
.. _python: https://www.python.org .. _python: https://www.python.org
Additional tools included in the LAMMPS distribution are described on Additional tools included in the LAMMPS distribution are described on

View File

@ -558,7 +558,7 @@ Related commands
:doc:`group <group>`, :doc:`processors <processors>`, :doc:`group <group>`, :doc:`processors <processors>`,
:doc:`fix balance <fix_balance>`, :doc:`comm_style <comm_style>` :doc:`fix balance <fix_balance>`, :doc:`comm_style <comm_style>`
.. _pizza: https://pizza.sandia.gov .. _pizza: https://lammps.github.io/pizza
Default Default
""""""" """""""

View File

@ -119,8 +119,7 @@ The per-atom vector values will be an ID > 0, as explained above.
Restrictions Restrictions
"""""""""""" """"""""""""
These computes are part of the EXTRA-COMPUTE package. They are only enabled if none
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Related commands Related commands
"""""""""""""""" """"""""""""""""

View File

@ -182,8 +182,7 @@ page for an overview of LAMMPS output options.
Restrictions Restrictions
"""""""""""" """"""""""""
This compute is part of the EXTRA-COMPUTE package. It is only enabled if none
LAMMPS was built with that package. See the :doc:`Build package <Build_package>` page for more info.
Related commands Related commands
"""""""""""""""" """"""""""""""""

View File

@ -230,7 +230,7 @@ individual values and the file itself.
The *atom*, *local*, and *custom* styles create files in a simple text The *atom*, *local*, and *custom* styles create files in a simple text
format that is self-explanatory when viewing a dump file. Some of the format that is self-explanatory when viewing a dump file. Some of the
LAMMPS post-processing tools described on the :doc:`Tools <Tools>` doc LAMMPS post-processing tools described on the :doc:`Tools <Tools>` doc
page, including `Pizza.py <https://pizza.sandia.gov>`_, page, including `Pizza.py <https://lammps.github.io/pizza>`_,
work with this format, as does the :doc:`rerun <rerun>` command. work with this format, as does the :doc:`rerun <rerun>` command.
For post-processing purposes the *atom*, *local*, and *custom* text For post-processing purposes the *atom*, *local*, and *custom* text

View File

@ -590,8 +590,8 @@ Play the movie:
% mplayer foo.mpg % mplayer foo.mpg
% ffplay bar.avi % ffplay bar.avi
* c) Use the `Pizza.py <https://pizza.sandia.gov>`_ * c) Use the `Pizza.py <https://lammps.github.io/pizza>`_
`animate tool <https://pizza.sandia.gov/doc/animate.html>`_, `animate tool <https://lammps.github.io/pizza/doc/animate.html>`_,
which works directly on a series of image files. which works directly on a series of image files.
.. code-block:: python .. code-block:: python

View File

@ -403,7 +403,7 @@ Related commands
:doc:`group <group>`, :doc:`processors <processors>`, :doc:`balance <balance>`, :doc:`group <group>`, :doc:`processors <processors>`, :doc:`balance <balance>`,
:doc:`comm_style <comm_style>` :doc:`comm_style <comm_style>`
.. _pizza: https://pizza.sandia.gov .. _pizza: https://lammps.github.io/pizza
Default Default
""""""" """""""

View File

@ -89,7 +89,7 @@ first stage) is changed to:
.. parsed-literal:: .. parsed-literal::
Fi = -Grad(V) + 2 (Grad(V) dot T') T' Fi = -Grad(V) + 2 (Grad(V) dot T') T' + Fnudge_perp
and the relaxation procedure is continued to a new converged MEP. and the relaxation procedure is continued to a new converged MEP.

View File

@ -53,6 +53,7 @@ checksums = { \
'2.6.3' : 'a9f8028fd74528c2024781ea1fdefeee', \ '2.6.3' : 'a9f8028fd74528c2024781ea1fdefeee', \
'2.7.0' : '95f29dd0c067577f11972ff90dfc7d12', \ '2.7.0' : '95f29dd0c067577f11972ff90dfc7d12', \
'2.7.1' : '4eac6a462ec84dfe0cec96c82421b8e8', \ '2.7.1' : '4eac6a462ec84dfe0cec96c82421b8e8', \
'2.7.2' : 'cfa0b4dd90a81c25d3302e8d97bfeaea', \
} }
# parse and process arguments # parse and process arguments

View File

@ -1823,7 +1823,6 @@ class lammps(object):
with ExceptionCheck(self): with ExceptionCheck(self):
return self.lib.lammps_fix_external_get_force(self.lmp, fix_id.encode()) return self.lib.lammps_fix_external_get_force(self.lmp, fix_id.encode())
return None
# ------------------------------------------------------------------------- # -------------------------------------------------------------------------

View File

@ -647,7 +647,6 @@ void PPPMDispDielectric::fieldforce_c_ad()
// convert E-field to force and substract self forces // convert E-field to force and substract self forces
const double qfactor = qqrd2e * scale; const double qfactor = qqrd2e * scale;
double qtmp = eps[i]*q[i];
s1 = x[i][0]*hx_inv; s1 = x[i][0]*hx_inv;
s2 = x[i][1]*hy_inv; s2 = x[i][1]*hy_inv;
@ -751,7 +750,7 @@ void PPPMDispDielectric::fieldforce_c_peratom()
extended to non-neutral systems (J. Chem. Phys. 131, 094107). extended to non-neutral systems (J. Chem. Phys. 131, 094107).
------------------------------------------------------------------------- */ ------------------------------------------------------------------------- */
void PPPMDispDielectric::slabcorr(int eflag) void PPPMDispDielectric::slabcorr(int /*eflag*/)
{ {
// compute local contribution to global dipole moment // compute local contribution to global dipole moment

View File

@ -116,7 +116,7 @@ ComputeHMA::ComputeHMA(LAMMPS *lmp, int narg, char **arg) :
computeU = computeP = computeCv = -1; computeU = computeP = computeCv = -1;
returnAnharmonic = 0; returnAnharmonic = 0;
size_vector = 0; size_vector = 0;
memory->create(extlist, 3, "hma:extlist"); extlist = new int[3];
for (int iarg=4; iarg<narg; iarg++) { for (int iarg=4; iarg<narg; iarg++) {
if (!strcmp(arg[iarg], "u")) { if (!strcmp(arg[iarg], "u")) {
if (computeU>-1) continue; if (computeU>-1) continue;
@ -145,20 +145,11 @@ ComputeHMA::ComputeHMA(LAMMPS *lmp, int narg, char **arg) :
} }
} }
if (size_vector == 0) { if (size_vector == 0) error->all(FLERR,"Illegal compute hma command");
error->all(FLERR,"Illegal compute hma command"); vector = new double[size_vector];
}
if (size_vector<3) {
memory->grow(extlist, size_vector, "hma:extlist");
}
memory->create(vector, size_vector, "hma:vector");
if (computeU>-1 || computeCv>-1) { if (computeU>-1 || computeCv>-1) peflag = 1;
peflag = 1; if (computeP>-1) pressflag = 1;
}
if (computeP>-1) {
pressflag = 1;
}
nmax = 0; nmax = 0;
} }
@ -170,10 +161,11 @@ ComputeHMA::~ComputeHMA()
// check nfix in case all fixes have already been deleted // check nfix in case all fixes have already been deleted
if (modify->nfix) modify->delete_fix(id_fix); if (modify->nfix) modify->delete_fix(id_fix);
delete [] id_fix; delete[] id_fix;
delete [] id_temp; delete[] id_temp;
memory->destroy(extlist); delete[] extlist;
memory->destroy(vector); delete[] vector;
memory->destroy(deltaR); memory->destroy(deltaR);
} }

View File

@ -1,4 +1,3 @@
// clang-format off
/* ---------------------------------------------------------------------- /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories https://www.lammps.org/, Sandia National Laboratories
@ -14,28 +13,26 @@
#include "compute_force_tally.h" #include "compute_force_tally.h"
#include <cmath>
#include "atom.h" #include "atom.h"
#include "group.h" #include "comm.h"
#include "pair.h"
#include "update.h"
#include "memory.h"
#include "error.h" #include "error.h"
#include "force.h" #include "force.h"
#include "comm.h" #include "group.h"
#include "memory.h"
#include "pair.h"
#include "update.h"
#include <cmath>
using namespace LAMMPS_NS; using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
ComputeForceTally::ComputeForceTally(LAMMPS *lmp, int narg, char **arg) : ComputeForceTally::ComputeForceTally(LAMMPS *lmp, int narg, char **arg) : Compute(lmp, narg, arg)
Compute(lmp, narg, arg)
{ {
if (narg < 4) error->all(FLERR,"Illegal compute force/tally command"); if (narg < 4) error->all(FLERR, "Illegal compute force/tally command");
igroup2 = group->find(arg[3]); igroup2 = group->find(arg[3]);
if (igroup2 == -1) if (igroup2 == -1) error->all(FLERR, "Could not find compute force/tally second group ID");
error->all(FLERR,"Could not find compute force/tally second group ID");
groupbit2 = group->bitmask[igroup2]; groupbit2 = group->bitmask[igroup2];
scalar_flag = 1; scalar_flag = 1;
@ -46,7 +43,7 @@ ComputeForceTally::ComputeForceTally(LAMMPS *lmp, int narg, char **arg) :
comm_reverse = size_peratom_cols = 3; comm_reverse = size_peratom_cols = 3;
extscalar = 1; extscalar = 1;
peflag = 1; // we need Pair::ev_tally() to be run peflag = 1; // we need Pair::ev_tally() to be run
did_setup = invoked_peratom = invoked_scalar = -1; did_setup = invoked_peratom = invoked_scalar = -1;
nmax = -1; nmax = -1;
@ -68,17 +65,16 @@ ComputeForceTally::~ComputeForceTally()
void ComputeForceTally::init() void ComputeForceTally::init()
{ {
if (force->pair == nullptr) if (force->pair == nullptr)
error->all(FLERR,"Trying to use compute force/tally without pair style"); error->all(FLERR, "Trying to use compute force/tally without pair style");
else else
force->pair->add_tally_callback(this); force->pair->add_tally_callback(this);
if (comm->me == 0) { if (comm->me == 0) {
if (force->pair->single_enable == 0 || force->pair->manybody_flag) if (force->pair->single_enable == 0 || force->pair->manybody_flag)
error->warning(FLERR,"Compute force/tally used with incompatible pair style"); error->warning(FLERR, "Compute force/tally used with incompatible pair style");
if (force->bond || force->angle || force->dihedral if (force->bond || force->angle || force->dihedral || force->improper || force->kspace)
|| force->improper || force->kspace) error->warning(FLERR, "Compute force/tally only called from pair style");
error->warning(FLERR,"Compute force/tally only called from pair style");
} }
did_setup = -1; did_setup = -1;
} }
@ -99,51 +95,48 @@ void ComputeForceTally::pair_setup_callback(int, int)
if (atom->nmax > nmax) { if (atom->nmax > nmax) {
memory->destroy(fatom); memory->destroy(fatom);
nmax = atom->nmax; nmax = atom->nmax;
memory->create(fatom,nmax,size_peratom_cols,"force/tally:fatom"); memory->create(fatom, nmax, size_peratom_cols, "force/tally:fatom");
array_atom = fatom; array_atom = fatom;
} }
// clear storage // clear storage
for (int i=0; i < ntotal; ++i) for (int i = 0; i < ntotal; ++i)
for (int j=0; j < size_peratom_cols; ++j) for (int j = 0; j < size_peratom_cols; ++j) fatom[i][j] = 0.0;
fatom[i][j] = 0.0;
for (int i=0; i < size_peratom_cols; ++i) for (int i = 0; i < size_peratom_cols; ++i) vector[i] = ftotal[i] = 0.0;
vector[i] = ftotal[i] = 0.0;
did_setup = update->ntimestep; did_setup = update->ntimestep;
} }
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
void ComputeForceTally::pair_tally_callback(int i, int j, int nlocal, int newton, void ComputeForceTally::pair_tally_callback(int i, int j, int nlocal, int newton, double, double,
double, double, double fpair, double fpair, double dx, double dy, double dz)
double dx, double dy, double dz)
{ {
const int * const mask = atom->mask; const int *const mask = atom->mask;
if ( ((mask[i] & groupbit) && (mask[j] & groupbit2)) if (((mask[i] & groupbit) && (mask[j] & groupbit2)) ||
|| ((mask[i] & groupbit2) && (mask[j] & groupbit))) { ((mask[i] & groupbit2) && (mask[j] & groupbit))) {
if (newton || i < nlocal) { if (newton || i < nlocal) {
if (mask[i] & groupbit) { if (mask[i] & groupbit) {
ftotal[0] += fpair*dx; ftotal[0] += fpair * dx;
ftotal[1] += fpair*dy; ftotal[1] += fpair * dy;
ftotal[2] += fpair*dz; ftotal[2] += fpair * dz;
} }
fatom[i][0] += fpair*dx; fatom[i][0] += fpair * dx;
fatom[i][1] += fpair*dy; fatom[i][1] += fpair * dy;
fatom[i][2] += fpair*dz; fatom[i][2] += fpair * dz;
} }
if (newton || j < nlocal) { if (newton || j < nlocal) {
if (mask[j] & groupbit) { if (mask[j] & groupbit) {
ftotal[0] -= fpair*dx; ftotal[0] -= fpair * dx;
ftotal[1] -= fpair*dy; ftotal[1] -= fpair * dy;
ftotal[2] -= fpair*dz; ftotal[2] -= fpair * dz;
} }
fatom[j][0] -= fpair*dx; fatom[j][0] -= fpair * dx;
fatom[j][1] -= fpair*dy; fatom[j][1] -= fpair * dy;
fatom[j][2] -= fpair*dz; fatom[j][2] -= fpair * dz;
} }
} }
} }
@ -152,7 +145,7 @@ void ComputeForceTally::pair_tally_callback(int i, int j, int nlocal, int newton
int ComputeForceTally::pack_reverse_comm(int n, int first, double *buf) int ComputeForceTally::pack_reverse_comm(int n, int first, double *buf)
{ {
int i,m,last; int i, m, last;
m = 0; m = 0;
last = first + n; last = first + n;
@ -168,7 +161,7 @@ int ComputeForceTally::pack_reverse_comm(int n, int first, double *buf)
void ComputeForceTally::unpack_reverse_comm(int n, int *list, double *buf) void ComputeForceTally::unpack_reverse_comm(int n, int *list, double *buf)
{ {
int i,j,m; int i, j, m;
m = 0; m = 0;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
@ -184,15 +177,14 @@ void ComputeForceTally::unpack_reverse_comm(int n, int *list, double *buf)
double ComputeForceTally::compute_scalar() double ComputeForceTally::compute_scalar()
{ {
invoked_scalar = update->ntimestep; invoked_scalar = update->ntimestep;
if ((did_setup != invoked_scalar) if ((did_setup != invoked_scalar) || (update->eflag_global != invoked_scalar))
|| (update->eflag_global != invoked_scalar)) error->all(FLERR, "Energy was not tallied on needed timestep");
error->all(FLERR,"Energy was not tallied on needed timestep");
// sum accumulated forces across procs // sum accumulated forces across procs
MPI_Allreduce(ftotal,vector,size_peratom_cols,MPI_DOUBLE,MPI_SUM,world); MPI_Allreduce(ftotal, vector, size_peratom_cols, MPI_DOUBLE, MPI_SUM, world);
scalar = sqrt(vector[0]*vector[0]+vector[1]*vector[1]+vector[2]*vector[2]); scalar = sqrt(vector[0] * vector[0] + vector[1] * vector[1] + vector[2] * vector[2]);
return scalar; return scalar;
} }
@ -201,9 +193,8 @@ double ComputeForceTally::compute_scalar()
void ComputeForceTally::compute_peratom() void ComputeForceTally::compute_peratom()
{ {
invoked_peratom = update->ntimestep; invoked_peratom = update->ntimestep;
if ((did_setup != invoked_peratom) if ((did_setup != invoked_peratom) || (update->eflag_global != invoked_peratom))
|| (update->eflag_global != invoked_peratom)) error->all(FLERR, "Energy was not tallied on needed timestep");
error->all(FLERR,"Energy was not tallied on needed timestep");
// collect contributions from ghost atoms // collect contributions from ghost atoms
@ -213,8 +204,7 @@ void ComputeForceTally::compute_peratom()
// clear out ghost atom data after it has been collected to local atoms // clear out ghost atom data after it has been collected to local atoms
const int nall = atom->nlocal + atom->nghost; const int nall = atom->nlocal + atom->nghost;
for (int i = atom->nlocal; i < nall; ++i) for (int i = atom->nlocal; i < nall; ++i)
for (int j = 0; j < size_peratom_cols; ++j) for (int j = 0; j < size_peratom_cols; ++j) fatom[i][j] = 0.0;
fatom[i][j] = 0.0;
} }
} }
@ -224,7 +214,6 @@ void ComputeForceTally::compute_peratom()
double ComputeForceTally::memory_usage() double ComputeForceTally::memory_usage()
{ {
double bytes = (nmax < 0) ? 0 : nmax*size_peratom_cols * sizeof(double); double bytes = (nmax < 0) ? 0 : nmax * (double)size_peratom_cols * sizeof(double);
return bytes; return bytes;
} }

View File

@ -1,4 +1,3 @@
// clang-format off
/* ---------------------------------------------------------------------- /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories https://www.lammps.org/, Sandia National Laboratories
@ -15,26 +14,25 @@
#include "compute_heat_flux_tally.h" #include "compute_heat_flux_tally.h"
#include "atom.h" #include "atom.h"
#include "group.h" #include "comm.h"
#include "pair.h"
#include "update.h"
#include "memory.h"
#include "error.h" #include "error.h"
#include "force.h" #include "force.h"
#include "comm.h" #include "group.h"
#include "memory.h"
#include "pair.h"
#include "update.h"
using namespace LAMMPS_NS; using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
ComputeHeatFluxTally::ComputeHeatFluxTally(LAMMPS *lmp, int narg, char **arg) : ComputeHeatFluxTally::ComputeHeatFluxTally(LAMMPS *lmp, int narg, char **arg) :
Compute(lmp, narg, arg) Compute(lmp, narg, arg)
{ {
if (narg < 4) error->all(FLERR,"Illegal compute heat/flux/tally command"); if (narg < 4) error->all(FLERR, "Illegal compute heat/flux/tally command");
igroup2 = group->find(arg[3]); igroup2 = group->find(arg[3]);
if (igroup2 == -1) if (igroup2 == -1) error->all(FLERR, "Could not find compute heat/flux/tally second group ID");
error->all(FLERR,"Could not find compute heat/flux/tally second group ID");
groupbit2 = group->bitmask[igroup2]; groupbit2 = group->bitmask[igroup2];
vector_flag = 1; vector_flag = 1;
@ -44,7 +42,7 @@ ComputeHeatFluxTally::ComputeHeatFluxTally(LAMMPS *lmp, int narg, char **arg) :
comm_reverse = 7; comm_reverse = 7;
extvector = 1; extvector = 1;
size_vector = 6; size_vector = 6;
peflag = 1; // we need Pair::ev_tally() to be run peflag = 1; // we need Pair::ev_tally() to be run
did_setup = 0; did_setup = 0;
invoked_peratom = invoked_scalar = -1; invoked_peratom = invoked_scalar = -1;
@ -71,17 +69,16 @@ ComputeHeatFluxTally::~ComputeHeatFluxTally()
void ComputeHeatFluxTally::init() void ComputeHeatFluxTally::init()
{ {
if (force->pair == nullptr) if (force->pair == nullptr)
error->all(FLERR,"Trying to use compute heat/flux/tally without pair style"); error->all(FLERR, "Trying to use compute heat/flux/tally without pair style");
else else
force->pair->add_tally_callback(this); force->pair->add_tally_callback(this);
if (comm->me == 0) { if (comm->me == 0) {
if (force->pair->single_enable == 0 || force->pair->manybody_flag) if (force->pair->single_enable == 0 || force->pair->manybody_flag)
error->warning(FLERR,"Compute heat/flux/tally used with incompatible pair style"); error->warning(FLERR, "Compute heat/flux/tally used with incompatible pair style");
if (force->bond || force->angle || force->dihedral if (force->bond || force->angle || force->dihedral || force->improper || force->kspace)
|| force->improper || force->kspace) error->warning(FLERR, "Compute heat/flux/tally only called from pair style");
error->warning(FLERR,"Compute heat/flux/tally only called from pair style");
} }
did_setup = -1; did_setup = -1;
} }
@ -102,13 +99,13 @@ void ComputeHeatFluxTally::pair_setup_callback(int, int)
memory->destroy(stress); memory->destroy(stress);
memory->destroy(eatom); memory->destroy(eatom);
nmax = atom->nmax; nmax = atom->nmax;
memory->create(stress,nmax,6,"heat/flux/tally:stress"); memory->create(stress, nmax, 6, "heat/flux/tally:stress");
memory->create(eatom,nmax,"heat/flux/tally:eatom"); memory->create(eatom, nmax, "heat/flux/tally:eatom");
} }
// clear storage // clear storage
for (int i=0; i < ntotal; ++i) { for (int i = 0; i < ntotal; ++i) {
eatom[i] = 0.0; eatom[i] = 0.0;
stress[i][0] = 0.0; stress[i][0] = 0.0;
stress[i][1] = 0.0; stress[i][1] = 0.0;
@ -118,30 +115,29 @@ void ComputeHeatFluxTally::pair_setup_callback(int, int)
stress[i][5] = 0.0; stress[i][5] = 0.0;
} }
for (int i=0; i < size_vector; ++i) for (int i = 0; i < size_vector; ++i) vector[i] = heatj[i] = 0.0;
vector[i] = heatj[i] = 0.0;
did_setup = update->ntimestep; did_setup = update->ntimestep;
} }
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
void ComputeHeatFluxTally::pair_tally_callback(int i, int j, int nlocal, int newton, void ComputeHeatFluxTally::pair_tally_callback(int i, int j, int nlocal, int newton, double evdwl,
double evdwl, double ecoul, double fpair, double ecoul, double fpair, double dx, double dy,
double dx, double dy, double dz) double dz)
{ {
const int * const mask = atom->mask; const int *const mask = atom->mask;
if ( ((mask[i] & groupbit) && (mask[j] & groupbit2)) if (((mask[i] & groupbit) && (mask[j] & groupbit2)) ||
|| ((mask[i] & groupbit2) && (mask[j] & groupbit))) { ((mask[i] & groupbit2) && (mask[j] & groupbit))) {
const double epairhalf = 0.5 * (evdwl + ecoul); const double epairhalf = 0.5 * (evdwl + ecoul);
fpair *= 0.5; fpair *= 0.5;
const double v0 = dx*dx*fpair; // dx*fpair = Fij_x const double v0 = dx * dx * fpair; // dx*fpair = Fij_x
const double v1 = dy*dy*fpair; const double v1 = dy * dy * fpair;
const double v2 = dz*dz*fpair; const double v2 = dz * dz * fpair;
const double v3 = dx*dy*fpair; const double v3 = dx * dy * fpair;
const double v4 = dx*dz*fpair; const double v4 = dx * dz * fpair;
const double v5 = dy*dz*fpair; const double v5 = dy * dz * fpair;
if (newton || i < nlocal) { if (newton || i < nlocal) {
eatom[i] += epairhalf; eatom[i] += epairhalf;
@ -168,7 +164,7 @@ void ComputeHeatFluxTally::pair_tally_callback(int i, int j, int nlocal, int new
int ComputeHeatFluxTally::pack_reverse_comm(int n, int first, double *buf) int ComputeHeatFluxTally::pack_reverse_comm(int n, int first, double *buf)
{ {
int i,m,last; int i, m, last;
m = 0; m = 0;
last = first + n; last = first + n;
@ -188,7 +184,7 @@ int ComputeHeatFluxTally::pack_reverse_comm(int n, int first, double *buf)
void ComputeHeatFluxTally::unpack_reverse_comm(int n, int *list, double *buf) void ComputeHeatFluxTally::unpack_reverse_comm(int n, int *list, double *buf)
{ {
int i,j,m; int i, j, m;
m = 0; m = 0;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
@ -209,7 +205,7 @@ void ComputeHeatFluxTally::compute_vector()
{ {
invoked_vector = update->ntimestep; invoked_vector = update->ntimestep;
if ((did_setup != invoked_vector) || (update->eflag_global != invoked_vector)) if ((did_setup != invoked_vector) || (update->eflag_global != invoked_vector))
error->all(FLERR,"Energy was not tallied on needed timestep"); error->all(FLERR, "Energy was not tallied on needed timestep");
// collect contributions from ghost atoms // collect contributions from ghost atoms
@ -244,26 +240,28 @@ void ComputeHeatFluxTally::compute_vector()
double *rmass = atom->rmass; double *rmass = atom->rmass;
int *type = atom->type; int *type = atom->type;
double jc[3] = {0.0,0.0,0.0}; double jc[3] = {0.0, 0.0, 0.0};
double jv[3] = {0.0,0.0,0.0}; double jv[3] = {0.0, 0.0, 0.0};
for (int i = 0; i < nlocal; i++) { for (int i = 0; i < nlocal; i++) {
if (mask[i] & groupbit) { if (mask[i] & groupbit) {
const double * const vi = v[i]; const double *const vi = v[i];
const double * const si = stress[i]; const double *const si = stress[i];
double ke_i; double ke_i;
if (rmass) ke_i = pfactor * rmass[i]; if (rmass)
else ke_i = pfactor * mass[type[i]]; ke_i = pfactor * rmass[i];
ke_i *= (vi[0]*vi[0] + vi[1]*vi[1] + vi[2]*vi[2]); else
ke_i = pfactor * mass[type[i]];
ke_i *= (vi[0] * vi[0] + vi[1] * vi[1] + vi[2] * vi[2]);
ke_i += eatom[i]; ke_i += eatom[i];
jc[0] += ke_i*vi[0]; jc[0] += ke_i * vi[0];
jc[1] += ke_i*vi[1]; jc[1] += ke_i * vi[1];
jc[2] += ke_i*vi[2]; jc[2] += ke_i * vi[2];
jv[0] += si[0]*vi[0] + si[3]*vi[1] + si[4]*vi[2]; jv[0] += si[0] * vi[0] + si[3] * vi[1] + si[4] * vi[2];
jv[1] += si[3]*vi[0] + si[1]*vi[1] + si[5]*vi[2]; jv[1] += si[3] * vi[0] + si[1] * vi[1] + si[5] * vi[2];
jv[2] += si[4]*vi[0] + si[5]*vi[1] + si[2]*vi[2]; jv[2] += si[4] * vi[0] + si[5] * vi[1] + si[2] * vi[2];
} }
} }
@ -274,7 +272,7 @@ void ComputeHeatFluxTally::compute_vector()
heatj[3] = jc[0]; heatj[3] = jc[0];
heatj[4] = jc[1]; heatj[4] = jc[1];
heatj[5] = jc[2]; heatj[5] = jc[2];
MPI_Allreduce(heatj,vector,size_vector,MPI_DOUBLE,MPI_SUM,world); MPI_Allreduce(heatj, vector, size_vector, MPI_DOUBLE, MPI_SUM, world);
} }
/* ---------------------------------------------------------------------- /* ----------------------------------------------------------------------
@ -283,7 +281,6 @@ void ComputeHeatFluxTally::compute_vector()
double ComputeHeatFluxTally::memory_usage() double ComputeHeatFluxTally::memory_usage()
{ {
double bytes = (nmax < 0) ? 0 : nmax*comm_reverse * sizeof(double); double bytes = (nmax < 0) ? 0 : nmax * (double)comm_reverse * sizeof(double);
return bytes; return bytes;
} }

View File

@ -233,6 +233,6 @@ void ComputeHeatFluxVirialTally::compute_peratom()
double ComputeHeatFluxVirialTally::memory_usage() double ComputeHeatFluxVirialTally::memory_usage()
{ {
double bytes = (nmax < 0) ? 0 : nmax * size_peratom_cols * sizeof(double); double bytes = (nmax < 0) ? 0 : nmax * (double)size_peratom_cols * sizeof(double);
return bytes; return bytes;
} }

View File

@ -1,4 +1,3 @@
// clang-format off
/* ---------------------------------------------------------------------- /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories https://www.lammps.org/, Sandia National Laboratories
@ -15,25 +14,23 @@
#include "compute_pe_mol_tally.h" #include "compute_pe_mol_tally.h"
#include "atom.h" #include "atom.h"
#include "comm.h"
#include "error.h"
#include "force.h"
#include "group.h" #include "group.h"
#include "pair.h" #include "pair.h"
#include "update.h" #include "update.h"
#include "error.h"
#include "force.h"
#include "comm.h"
using namespace LAMMPS_NS; using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
ComputePEMolTally::ComputePEMolTally(LAMMPS *lmp, int narg, char **arg) : ComputePEMolTally::ComputePEMolTally(LAMMPS *lmp, int narg, char **arg) : Compute(lmp, narg, arg)
Compute(lmp, narg, arg)
{ {
if (narg < 4) error->all(FLERR,"Illegal compute pe/mol/tally command"); if (narg < 4) error->all(FLERR, "Illegal compute pe/mol/tally command");
igroup2 = group->find(arg[3]); igroup2 = group->find(arg[3]);
if (igroup2 == -1) if (igroup2 == -1) error->all(FLERR, "Could not find compute pe/mol/tally second group ID");
error->all(FLERR,"Could not find compute pe/mol/tally second group ID");
groupbit2 = group->bitmask[igroup2]; groupbit2 = group->bitmask[igroup2];
vector_flag = 1; vector_flag = 1;
@ -42,7 +39,7 @@ ComputePEMolTally::ComputePEMolTally(LAMMPS *lmp, int narg, char **arg) :
dynamic_group_allow = 0; dynamic_group_allow = 0;
extvector = 1; extvector = 1;
peflag = 1; // we need Pair::ev_tally() to be run peflag = 1; // we need Pair::ev_tally() to be run
did_setup = invoked_vector = -1; did_setup = invoked_vector = -1;
vector = new double[size_vector]; vector = new double[size_vector];
@ -61,20 +58,18 @@ ComputePEMolTally::~ComputePEMolTally()
void ComputePEMolTally::init() void ComputePEMolTally::init()
{ {
if (force->pair == nullptr) if (force->pair == nullptr)
error->all(FLERR,"Trying to use compute pe/mol/tally without pair style"); error->all(FLERR, "Trying to use compute pe/mol/tally without pair style");
else else
force->pair->add_tally_callback(this); force->pair->add_tally_callback(this);
if (atom->molecule_flag == 0) if (atom->molecule_flag == 0) error->all(FLERR, "Compute pe/mol/tally requires molecule IDs");
error->all(FLERR,"Compute pe/mol/tally requires molecule IDs");
if (comm->me == 0) { if (comm->me == 0) {
if (force->pair->single_enable == 0 || force->pair->manybody_flag) if (force->pair->single_enable == 0 || force->pair->manybody_flag)
error->warning(FLERR,"Compute pe/mol/tally used with incompatible pair style"); error->warning(FLERR, "Compute pe/mol/tally used with incompatible pair style");
if (force->bond || force->angle || force->dihedral if (force->bond || force->angle || force->dihedral || force->improper || force->kspace)
|| force->improper || force->kspace) error->warning(FLERR, "Compute pe/mol/tally only called from pair style");
error->warning(FLERR,"Compute pe/mol/tally only called from pair style");
} }
did_setup = -1; did_setup = -1;
} }
@ -93,29 +88,33 @@ void ComputePEMolTally::pair_setup_callback(int, int)
} }
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
void ComputePEMolTally::pair_tally_callback(int i, int j, int nlocal, int newton, void ComputePEMolTally::pair_tally_callback(int i, int j, int nlocal, int newton, double evdwl,
double evdwl, double ecoul, double, double ecoul, double, double, double, double)
double, double, double)
{ {
const int * const mask = atom->mask; const int *const mask = atom->mask;
const tagint * const molid = atom->molecule; const tagint *const molid = atom->molecule;
if ( ((mask[i] & groupbit) && (mask[j] & groupbit2)) if (((mask[i] & groupbit) && (mask[j] & groupbit2)) ||
|| ((mask[i] & groupbit2) && (mask[j] & groupbit))) { ((mask[i] & groupbit2) && (mask[j] & groupbit))) {
evdwl *= 0.5; ecoul *= 0.5; evdwl *= 0.5;
ecoul *= 0.5;
if (newton || i < nlocal) { if (newton || i < nlocal) {
if (molid[i] == molid[j]) { if (molid[i] == molid[j]) {
etotal[0] += evdwl; etotal[1] += ecoul; etotal[0] += evdwl;
etotal[1] += ecoul;
} else { } else {
etotal[2] += evdwl; etotal[3] += ecoul; etotal[2] += evdwl;
etotal[3] += ecoul;
} }
} }
if (newton || j < nlocal) { if (newton || j < nlocal) {
if (molid[i] == molid[j]) { if (molid[i] == molid[j]) {
etotal[0] += evdwl; etotal[1] += ecoul; etotal[0] += evdwl;
etotal[1] += ecoul;
} else { } else {
etotal[2] += evdwl; etotal[3] += ecoul; etotal[2] += evdwl;
etotal[3] += ecoul;
} }
} }
} }
@ -127,10 +126,9 @@ void ComputePEMolTally::compute_vector()
{ {
invoked_vector = update->ntimestep; invoked_vector = update->ntimestep;
if ((did_setup != invoked_vector) || (update->eflag_global != invoked_vector)) if ((did_setup != invoked_vector) || (update->eflag_global != invoked_vector))
error->all(FLERR,"Energy was not tallied on needed timestep"); error->all(FLERR, "Energy was not tallied on needed timestep");
// sum accumulated energies across procs // sum accumulated energies across procs
MPI_Allreduce(etotal,vector,size_vector,MPI_DOUBLE,MPI_SUM,world); MPI_Allreduce(etotal, vector, size_vector, MPI_DOUBLE, MPI_SUM, world);
} }

View File

@ -1,4 +1,3 @@
// clang-format off
/* ---------------------------------------------------------------------- /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories https://www.lammps.org/, Sandia National Laboratories
@ -15,26 +14,24 @@
#include "compute_pe_tally.h" #include "compute_pe_tally.h"
#include "atom.h" #include "atom.h"
#include "group.h" #include "comm.h"
#include "pair.h"
#include "update.h"
#include "memory.h"
#include "error.h" #include "error.h"
#include "force.h" #include "force.h"
#include "comm.h" #include "group.h"
#include "memory.h"
#include "pair.h"
#include "update.h"
using namespace LAMMPS_NS; using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
ComputePETally::ComputePETally(LAMMPS *lmp, int narg, char **arg) : ComputePETally::ComputePETally(LAMMPS *lmp, int narg, char **arg) : Compute(lmp, narg, arg)
Compute(lmp, narg, arg)
{ {
if (narg < 4) error->all(FLERR,"Illegal compute pe/tally command"); if (narg < 4) error->all(FLERR, "Illegal compute pe/tally command");
igroup2 = group->find(arg[3]); igroup2 = group->find(arg[3]);
if (igroup2 == -1) if (igroup2 == -1) error->all(FLERR, "Could not find compute pe/tally second group ID");
error->all(FLERR,"Could not find compute pe/tally second group ID");
groupbit2 = group->bitmask[igroup2]; groupbit2 = group->bitmask[igroup2];
scalar_flag = 1; scalar_flag = 1;
@ -45,7 +42,7 @@ ComputePETally::ComputePETally(LAMMPS *lmp, int narg, char **arg) :
comm_reverse = size_peratom_cols = 2; comm_reverse = size_peratom_cols = 2;
extscalar = 1; extscalar = 1;
peflag = 1; // we need Pair::ev_tally() to be run peflag = 1; // we need Pair::ev_tally() to be run
did_setup = invoked_peratom = invoked_scalar = -1; did_setup = invoked_peratom = invoked_scalar = -1;
nmax = -1; nmax = -1;
@ -67,17 +64,16 @@ ComputePETally::~ComputePETally()
void ComputePETally::init() void ComputePETally::init()
{ {
if (force->pair == nullptr) if (force->pair == nullptr)
error->all(FLERR,"Trying to use compute pe/tally without a pair style"); error->all(FLERR, "Trying to use compute pe/tally without a pair style");
else else
force->pair->add_tally_callback(this); force->pair->add_tally_callback(this);
if (comm->me == 0) { if (comm->me == 0) {
if (force->pair->single_enable == 0 || force->pair->manybody_flag) if (force->pair->single_enable == 0 || force->pair->manybody_flag)
error->warning(FLERR,"Compute pe/tally used with incompatible pair style"); error->warning(FLERR, "Compute pe/tally used with incompatible pair style");
if (force->bond || force->angle || force->dihedral if (force->bond || force->angle || force->dihedral || force->improper || force->kspace)
|| force->improper || force->kspace) error->warning(FLERR, "Compute pe/tally only called from pair style");
error->warning(FLERR,"Compute pe/tally only called from pair style");
} }
did_setup = -1; did_setup = -1;
} }
@ -98,14 +94,13 @@ void ComputePETally::pair_setup_callback(int, int)
if (atom->nmax > nmax) { if (atom->nmax > nmax) {
memory->destroy(eatom); memory->destroy(eatom);
nmax = atom->nmax; nmax = atom->nmax;
memory->create(eatom,nmax,size_peratom_cols,"pe/tally:eatom"); memory->create(eatom, nmax, size_peratom_cols, "pe/tally:eatom");
array_atom = eatom; array_atom = eatom;
} }
// clear storage // clear storage
for (int i=0; i < ntotal; ++i) for (int i = 0; i < ntotal; ++i) eatom[i][0] = eatom[i][1] = 0.0;
eatom[i][0] = eatom[i][1] = 0.0;
vector[0] = etotal[0] = vector[1] = etotal[1] = 0.0; vector[0] = etotal[0] = vector[1] = etotal[1] = 0.0;
@ -113,23 +108,27 @@ void ComputePETally::pair_setup_callback(int, int)
} }
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
void ComputePETally::pair_tally_callback(int i, int j, int nlocal, int newton, void ComputePETally::pair_tally_callback(int i, int j, int nlocal, int newton, double evdwl,
double evdwl, double ecoul, double, double ecoul, double, double, double, double)
double, double, double)
{ {
const int * const mask = atom->mask; const int *const mask = atom->mask;
if ( ((mask[i] & groupbit) && (mask[j] & groupbit2)) if (((mask[i] & groupbit) && (mask[j] & groupbit2)) ||
|| ((mask[i] & groupbit2) && (mask[j] & groupbit))) { ((mask[i] & groupbit2) && (mask[j] & groupbit))) {
evdwl *= 0.5; ecoul *= 0.5; evdwl *= 0.5;
ecoul *= 0.5;
if (newton || i < nlocal) { if (newton || i < nlocal) {
etotal[0] += evdwl; eatom[i][0] += evdwl; etotal[0] += evdwl;
etotal[1] += ecoul; eatom[i][1] += ecoul; eatom[i][0] += evdwl;
etotal[1] += ecoul;
eatom[i][1] += ecoul;
} }
if (newton || j < nlocal) { if (newton || j < nlocal) {
etotal[0] += evdwl; eatom[j][0] += evdwl; etotal[0] += evdwl;
etotal[1] += ecoul; eatom[j][1] += ecoul; eatom[j][0] += evdwl;
etotal[1] += ecoul;
eatom[j][1] += ecoul;
} }
} }
} }
@ -138,7 +137,7 @@ void ComputePETally::pair_tally_callback(int i, int j, int nlocal, int newton,
int ComputePETally::pack_reverse_comm(int n, int first, double *buf) int ComputePETally::pack_reverse_comm(int n, int first, double *buf)
{ {
int i,m,last; int i, m, last;
m = 0; m = 0;
last = first + n; last = first + n;
@ -153,7 +152,7 @@ int ComputePETally::pack_reverse_comm(int n, int first, double *buf)
void ComputePETally::unpack_reverse_comm(int n, int *list, double *buf) void ComputePETally::unpack_reverse_comm(int n, int *list, double *buf)
{ {
int i,j,m; int i, j, m;
m = 0; m = 0;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
@ -168,15 +167,14 @@ void ComputePETally::unpack_reverse_comm(int n, int *list, double *buf)
double ComputePETally::compute_scalar() double ComputePETally::compute_scalar()
{ {
invoked_scalar = update->ntimestep; invoked_scalar = update->ntimestep;
if ((did_setup != invoked_scalar) if ((did_setup != invoked_scalar) || (update->eflag_global != invoked_scalar))
|| (update->eflag_global != invoked_scalar)) error->all(FLERR, "Energy was not tallied on needed timestep");
error->all(FLERR,"Energy was not tallied on needed timestep");
// sum accumulated energies across procs // sum accumulated energies across procs
MPI_Allreduce(etotal,vector,size_peratom_cols,MPI_DOUBLE,MPI_SUM,world); MPI_Allreduce(etotal, vector, size_peratom_cols, MPI_DOUBLE, MPI_SUM, world);
scalar = vector[0]+vector[1]; scalar = vector[0] + vector[1];
return scalar; return scalar;
} }
@ -185,9 +183,8 @@ double ComputePETally::compute_scalar()
void ComputePETally::compute_peratom() void ComputePETally::compute_peratom()
{ {
invoked_peratom = update->ntimestep; invoked_peratom = update->ntimestep;
if ((did_setup != invoked_peratom) if ((did_setup != invoked_peratom) || (update->eflag_global != invoked_peratom))
|| (update->eflag_global != invoked_peratom)) error->all(FLERR, "Energy was not tallied on needed timestep");
error->all(FLERR,"Energy was not tallied on needed timestep");
// collect contributions from ghost atoms // collect contributions from ghost atoms
@ -196,8 +193,7 @@ void ComputePETally::compute_peratom()
// clear out ghost atom data after it has been collected to local atoms // clear out ghost atom data after it has been collected to local atoms
const int nall = atom->nlocal + atom->nghost; const int nall = atom->nlocal + atom->nghost;
for (int i = atom->nlocal; i < nall; ++i) for (int i = atom->nlocal; i < nall; ++i) eatom[i][0] = eatom[i][1] = 0.0;
eatom[i][0] = eatom[i][1] = 0.0;
} }
} }
@ -207,7 +203,6 @@ void ComputePETally::compute_peratom()
double ComputePETally::memory_usage() double ComputePETally::memory_usage()
{ {
double bytes = (nmax < 0) ? 0 : nmax*size_peratom_cols * sizeof(double); double bytes = (nmax < 0) ? 0 : nmax * (double)size_peratom_cols * sizeof(double);
return bytes; return bytes;
} }

View File

@ -1,4 +1,3 @@
// clang-format off
/* ---------------------------------------------------------------------- /* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories https://www.lammps.org/, Sandia National Laboratories
@ -15,27 +14,25 @@
#include "compute_stress_tally.h" #include "compute_stress_tally.h"
#include "atom.h" #include "atom.h"
#include "group.h"
#include "pair.h"
#include "update.h"
#include "memory.h"
#include "error.h"
#include "force.h"
#include "comm.h" #include "comm.h"
#include "domain.h" #include "domain.h"
#include "error.h"
#include "force.h"
#include "group.h"
#include "memory.h"
#include "pair.h"
#include "update.h"
using namespace LAMMPS_NS; using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
ComputeStressTally::ComputeStressTally(LAMMPS *lmp, int narg, char **arg) : ComputeStressTally::ComputeStressTally(LAMMPS *lmp, int narg, char **arg) : Compute(lmp, narg, arg)
Compute(lmp, narg, arg)
{ {
if (narg < 4) error->all(FLERR,"Illegal compute stress/tally command"); if (narg < 4) error->all(FLERR, "Illegal compute stress/tally command");
igroup2 = group->find(arg[3]); igroup2 = group->find(arg[3]);
if (igroup2 == -1) if (igroup2 == -1) error->all(FLERR, "Could not find compute stress/tally second group ID");
error->all(FLERR,"Could not find compute stress/tally second group ID");
groupbit2 = group->bitmask[igroup2]; groupbit2 = group->bitmask[igroup2];
scalar_flag = 1; scalar_flag = 1;
@ -46,7 +43,7 @@ ComputeStressTally::ComputeStressTally(LAMMPS *lmp, int narg, char **arg) :
comm_reverse = size_peratom_cols = 6; comm_reverse = size_peratom_cols = 6;
extscalar = 0; extscalar = 0;
peflag = 1; // we need Pair::ev_tally() to be run peflag = 1; // we need Pair::ev_tally() to be run
did_setup = invoked_peratom = invoked_scalar = -1; did_setup = invoked_peratom = invoked_scalar = -1;
nmax = -1; nmax = -1;
@ -70,17 +67,16 @@ ComputeStressTally::~ComputeStressTally()
void ComputeStressTally::init() void ComputeStressTally::init()
{ {
if (force->pair == nullptr) if (force->pair == nullptr)
error->all(FLERR,"Trying to use compute stress/tally without pair style"); error->all(FLERR, "Trying to use compute stress/tally without pair style");
else else
force->pair->add_tally_callback(this); force->pair->add_tally_callback(this);
if (comm->me == 0) { if (comm->me == 0) {
if (force->pair->single_enable == 0 || force->pair->manybody_flag) if (force->pair->single_enable == 0 || force->pair->manybody_flag)
error->warning(FLERR,"Compute stress/tally used with incompatible pair style"); error->warning(FLERR, "Compute stress/tally used with incompatible pair style");
if (force->bond || force->angle || force->dihedral if (force->bond || force->angle || force->dihedral || force->improper || force->kspace)
|| force->improper || force->kspace) error->warning(FLERR, "Compute stress/tally only called from pair style");
error->warning(FLERR,"Compute stress/tally only called from pair style");
} }
did_setup = -1; did_setup = -1;
} }
@ -101,55 +97,64 @@ void ComputeStressTally::pair_setup_callback(int, int)
if (atom->nmax > nmax) { if (atom->nmax > nmax) {
memory->destroy(stress); memory->destroy(stress);
nmax = atom->nmax; nmax = atom->nmax;
memory->create(stress,nmax,size_peratom_cols,"stress/tally:stress"); memory->create(stress, nmax, size_peratom_cols, "stress/tally:stress");
array_atom = stress; array_atom = stress;
} }
// clear storage // clear storage
for (int i=0; i < ntotal; ++i) for (int i = 0; i < ntotal; ++i)
for (int j=0; j < size_peratom_cols; ++j) for (int j = 0; j < size_peratom_cols; ++j) stress[i][j] = 0.0;
stress[i][j] = 0.0;
for (int i=0; i < size_peratom_cols; ++i) for (int i = 0; i < size_peratom_cols; ++i) vector[i] = virial[i] = 0.0;
vector[i] = virial[i] = 0.0;
did_setup = update->ntimestep; did_setup = update->ntimestep;
} }
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
void ComputeStressTally::pair_tally_callback(int i, int j, int nlocal, int newton, void ComputeStressTally::pair_tally_callback(int i, int j, int nlocal, int newton, double, double,
double, double, double fpair, double fpair, double dx, double dy, double dz)
double dx, double dy, double dz)
{ {
const int * const mask = atom->mask; const int *const mask = atom->mask;
if ( ((mask[i] & groupbit) && (mask[j] & groupbit2)) if (((mask[i] & groupbit) && (mask[j] & groupbit2)) ||
|| ((mask[i] & groupbit2) && (mask[j] & groupbit))) { ((mask[i] & groupbit2) && (mask[j] & groupbit))) {
fpair *= 0.5; fpair *= 0.5;
const double v0 = dx*dx*fpair; const double v0 = dx * dx * fpair;
const double v1 = dy*dy*fpair; const double v1 = dy * dy * fpair;
const double v2 = dz*dz*fpair; const double v2 = dz * dz * fpair;
const double v3 = dx*dy*fpair; const double v3 = dx * dy * fpair;
const double v4 = dx*dz*fpair; const double v4 = dx * dz * fpair;
const double v5 = dy*dz*fpair; const double v5 = dy * dz * fpair;
if (newton || i < nlocal) { if (newton || i < nlocal) {
virial[0] += v0; stress[i][0] += v0; virial[0] += v0;
virial[1] += v1; stress[i][1] += v1; stress[i][0] += v0;
virial[2] += v2; stress[i][2] += v2; virial[1] += v1;
virial[3] += v3; stress[i][3] += v3; stress[i][1] += v1;
virial[4] += v4; stress[i][4] += v4; virial[2] += v2;
virial[5] += v5; stress[i][5] += v5; stress[i][2] += v2;
virial[3] += v3;
stress[i][3] += v3;
virial[4] += v4;
stress[i][4] += v4;
virial[5] += v5;
stress[i][5] += v5;
} }
if (newton || j < nlocal) { if (newton || j < nlocal) {
virial[0] += v0; stress[j][0] += v0; virial[0] += v0;
virial[1] += v1; stress[j][1] += v1; stress[j][0] += v0;
virial[2] += v2; stress[j][2] += v2; virial[1] += v1;
virial[3] += v3; stress[j][3] += v3; stress[j][1] += v1;
virial[4] += v4; stress[j][4] += v4; virial[2] += v2;
virial[5] += v5; stress[j][5] += v5; stress[j][2] += v2;
virial[3] += v3;
stress[j][3] += v3;
virial[4] += v4;
stress[j][4] += v4;
virial[5] += v5;
stress[j][5] += v5;
} }
} }
} }
@ -158,7 +163,7 @@ void ComputeStressTally::pair_tally_callback(int i, int j, int nlocal, int newto
int ComputeStressTally::pack_reverse_comm(int n, int first, double *buf) int ComputeStressTally::pack_reverse_comm(int n, int first, double *buf)
{ {
int i,m,last; int i, m, last;
m = 0; m = 0;
last = first + n; last = first + n;
@ -177,7 +182,7 @@ int ComputeStressTally::pack_reverse_comm(int n, int first, double *buf)
void ComputeStressTally::unpack_reverse_comm(int n, int *list, double *buf) void ComputeStressTally::unpack_reverse_comm(int n, int *list, double *buf)
{ {
int i,j,m; int i, j, m;
m = 0; m = 0;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
@ -196,18 +201,17 @@ void ComputeStressTally::unpack_reverse_comm(int n, int *list, double *buf)
double ComputeStressTally::compute_scalar() double ComputeStressTally::compute_scalar()
{ {
invoked_scalar = update->ntimestep; invoked_scalar = update->ntimestep;
if ((did_setup != invoked_scalar) if ((did_setup != invoked_scalar) || (update->eflag_global != invoked_scalar))
|| (update->eflag_global != invoked_scalar)) error->all(FLERR, "Energy was not tallied on needed timestep");
error->all(FLERR,"Energy was not tallied on needed timestep");
// sum accumulated forces across procs // sum accumulated forces across procs
MPI_Allreduce(virial,vector,size_peratom_cols,MPI_DOUBLE,MPI_SUM,world); MPI_Allreduce(virial, vector, size_peratom_cols, MPI_DOUBLE, MPI_SUM, world);
if (domain->dimension == 3) if (domain->dimension == 3)
scalar = (vector[0]+vector[1]+vector[2])/3.0; scalar = (vector[0] + vector[1] + vector[2]) / 3.0;
else else
scalar = (vector[0]+vector[1])/2.0; scalar = (vector[0] + vector[1]) / 2.0;
return scalar; return scalar;
} }
@ -217,9 +221,8 @@ double ComputeStressTally::compute_scalar()
void ComputeStressTally::compute_peratom() void ComputeStressTally::compute_peratom()
{ {
invoked_peratom = update->ntimestep; invoked_peratom = update->ntimestep;
if ((did_setup != invoked_peratom) if ((did_setup != invoked_peratom) || (update->eflag_global != invoked_peratom))
|| (update->eflag_global != invoked_peratom)) error->all(FLERR, "Energy was not tallied on needed timestep");
error->all(FLERR,"Energy was not tallied on needed timestep");
// collect contributions from ghost atoms // collect contributions from ghost atoms
@ -228,8 +231,7 @@ void ComputeStressTally::compute_peratom()
const int nall = atom->nlocal + atom->nghost; const int nall = atom->nlocal + atom->nghost;
for (int i = atom->nlocal; i < nall; ++i) for (int i = atom->nlocal; i < nall; ++i)
for (int j = 0; j < size_peratom_cols; ++j) for (int j = 0; j < size_peratom_cols; ++j) stress[i][j] = 0.0;
stress[i][j] = 0.0;
} }
// convert to stress*volume units = -pressure*volume // convert to stress*volume units = -pressure*volume
@ -251,7 +253,6 @@ void ComputeStressTally::compute_peratom()
double ComputeStressTally::memory_usage() double ComputeStressTally::memory_usage()
{ {
double bytes = (nmax < 0) ? 0 : nmax*size_peratom_cols * sizeof(double); double bytes = (nmax < 0) ? 0 : nmax * (double)size_peratom_cols * sizeof(double);
return bytes; return bytes;
} }

View File

@ -50,7 +50,7 @@ CUB_URL="https://github.com/NVlabs/cub/archive/1.12.0.tar.gz"
KOKKOS_URL="https://github.com/kokkos/kokkos/archive/3.4.01.tar.gz" KOKKOS_URL="https://github.com/kokkos/kokkos/archive/3.4.01.tar.gz"
KIM_URL="https://s3.openkim.org/kim-api/kim-api-2.2.1.txz" KIM_URL="https://s3.openkim.org/kim-api/kim-api-2.2.1.txz"
MSCG_URL="https://github.com/uchicago-voth/MSCG-release/archive/1.7.3.1.tar.gz" MSCG_URL="https://github.com/uchicago-voth/MSCG-release/archive/1.7.3.1.tar.gz"
PLUMED_URL="https://github.com/plumed/plumed2/releases/download/v2.7.1/plumed-src-2.7.1.tgz" PLUMED_URL="https://github.com/plumed/plumed2/releases/download/v2.7.2/plumed-src-2.7.2.tgz"
PACELIB_URL="https://github.com/ICAMS/lammps-user-pace/archive/refs/tags/v.2021.4.9.tar.gz" PACELIB_URL="https://github.com/ICAMS/lammps-user-pace/archive/refs/tags/v.2021.4.9.tar.gz"
LATTE_URL="https://github.com/lanl/LATTE/archive/v1.2.2.tar.gz" LATTE_URL="https://github.com/lanl/LATTE/archive/v1.2.2.tar.gz"
SCAFACOS_URL="https://github.com/scafacos/scafacos/releases/download/v1.0.1/scafacos-1.0.1.tar.gz" SCAFACOS_URL="https://github.com/scafacos/scafacos/releases/download/v1.0.1/scafacos-1.0.1.tar.gz"

View File

@ -1,7 +1,7 @@
--- ---
lammps_version: 2 Jul 2021 lammps_version: 2 Jul 2021
date_generated: Wed Jul 21 15:49:45 2021 date_generated: Wed Jul 21 15:49:45 2021
epsilon: 1e-11 epsilon: 2e-11
prerequisites: ! | prerequisites: ! |
pair reaxff pair reaxff
fix qeq/reaxff fix qeq/reaxff

View File

@ -1,7 +1,7 @@
--- ---
lammps_version: 2 Jul 2021 lammps_version: 2 Jul 2021
date_generated: Wed Jul 21 15:49:47 2021 date_generated: Wed Jul 21 15:49:47 2021
epsilon: 1e-12 epsilon: 3e-12
prerequisites: ! | prerequisites: ! |
pair reaxff pair reaxff
fix qeq/reaxff fix qeq/reaxff

View File

@ -281,7 +281,6 @@ TEST_F(FileOperationsTest, error_message_warn)
TEST_F(FileOperationsTest, error_all_one) TEST_F(FileOperationsTest, error_all_one)
{ {
char buf[64];
BEGIN_HIDE_OUTPUT(); BEGIN_HIDE_OUTPUT();
command("echo none"); command("echo none");
command("log none"); command("log none");

View File

@ -94,7 +94,8 @@ TEST(Tokenizer, copy_constructor)
TEST(Tokenizer, move_constructor) TEST(Tokenizer, move_constructor)
{ {
Tokenizer u = std::move(Tokenizer("test new word ", " ")); Tokenizer t("test new word ", " ");
Tokenizer u = std::move(t);
ASSERT_THAT(u.next(), Eq("test")); ASSERT_THAT(u.next(), Eq("test"));
ASSERT_THAT(u.next(), Eq("new")); ASSERT_THAT(u.next(), Eq("new"));
ASSERT_THAT(u.next(), Eq("word")); ASSERT_THAT(u.next(), Eq("word"));
@ -248,7 +249,8 @@ TEST(ValueTokenizer, copy_constructor)
TEST(ValueTokenizer, move_constructor) TEST(ValueTokenizer, move_constructor)
{ {
ValueTokenizer u = std::move(ValueTokenizer(" test new word ", " ")); ValueTokenizer t(" test new word ", " ");
ValueTokenizer u = std::move(t);
ASSERT_THAT(u.next_string(), Eq("test")); ASSERT_THAT(u.next_string(), Eq("test"));
ASSERT_THAT(u.next_string(), Eq("new")); ASSERT_THAT(u.next_string(), Eq("new"));
ASSERT_THAT(u.next_string(), Eq("word")); ASSERT_THAT(u.next_string(), Eq("word"));