Merge pull request #4599 from akohlmey/collected-small-fixes

Collected small fixes and updates
This commit is contained in:
Axel Kohlmeyer
2025-06-04 14:42:06 -04:00
committed by GitHub
46 changed files with 1400 additions and 334 deletions

View File

@ -104,8 +104,8 @@ with a future release) from the `lammps-static` folder.
rm -rf release-packages
mkdir release-packages
cd release-packages
wget https://download.lammps.org/static/fedora41_musl.sif
apptainer shell fedora41_musl.sif
wget https://download.lammps.org/static/fedora41_musl_mingw.sif
apptainer shell fedora41_musl_mingw.sif
git clone -b release --depth 10 https://github.com/lammps/lammps.git lammps-release
cmake -S lammps-release/cmake -B build-release -G Ninja -D CMAKE_INSTALL_PREFIX=$PWD/lammps-static -D CMAKE_TOOLCHAIN_FILE=/usr/musl/share/cmake/linux-musl.cmake -C lammps-release/cmake/presets/most.cmake -C lammps-release/cmake/presets/kokkos-openmp.cmake -D DOWNLOAD_POTENTIALS=OFF -D BUILD_MPI=OFF -D BUILD_TESTING=OFF -D CMAKE_BUILD_TYPE=Release -D PKG_ATC=ON -D PKG_AWPMD=ON -D PKG_MANIFOLD=ON -D PKG_MESONT=ON -D PKG_MGPT=ON -D PKG_ML-PACE=ON -D PKG_ML-RANN=ON -D PKG_MOLFILE=ON -D PKG_PTM=ON -D PKG_QTB=ON -D PKG_SMTBQ=ON
cmake --build build-release --target all
@ -204,7 +204,7 @@ cd ..
rm -r release-packages
```
#### Build Multi-arch App-bundle for macOS
#### Build Multi-arch App-bundle with GUI for macOS
Building app-bundles for macOS is not as easily automated and portable
as some of the other steps. It requires a machine actually running
@ -251,7 +251,7 @@ attached to the GitHub release page.
We are currently building the application images on macOS 12 (aka Monterey).
#### Build Linux x86_64 binary tarball on Ubuntu 20.04LTS
#### Build Linux x86_64 binary tarball with GUI on Ubuntu 20.04LTS
While the flatpak Linux version uses portable runtime libraries provided
by the flatpak environment, we also build regular Linux executables that

View File

@ -1,4 +1,4 @@
# GitHub action to build LAMMPS on Linux with gcc and C++23
# GitHub action to build LAMMPS on Linux with gcc or clang and C++23
name: "Check for C++23 Compatibility"
on:
@ -11,11 +11,19 @@ on:
workflow_dispatch:
concurrency:
group: ${{ github.event_name }}-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{github.event_name == 'pull_request'}}
jobs:
build:
name: Build with C++23 support enabled
if: ${{ github.repository == 'lammps/lammps' }}
runs-on: ubuntu-latest
strategy:
max-parallel: 2
matrix:
idx: [ gcc, clang ]
env:
CCACHE_DIR: ${{ github.workspace }}/.ccache
@ -29,8 +37,11 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y ccache \
libeigen3-dev \
clang \
libcurl4-openssl-dev \
libeigen3-dev \
libfftw3-dev \
libomp-dev \
mold \
mpi-default-bin \
mpi-default-dev \
@ -58,14 +69,14 @@ jobs:
cmake -S cmake -B build \
-C cmake/presets/most.cmake \
-C cmake/presets/kokkos-openmp.cmake \
-C cmake/presets/${{ matrix.idx }}.cmake \
-D CMAKE_CXX_STANDARD=23 \
-D CMAKE_CXX_COMPILER=g++ \
-D CMAKE_C_COMPILER=gcc \
-D CMAKE_CXX_COMPILER_LAUNCHER=ccache \
-D CMAKE_C_COMPILER_LAUNCHER=ccache \
-D CMAKE_BUILD_TYPE=Debug \
-D CMAKE_CXX_FLAGS_DEBUG="-Og -g" \
-D DOWNLOAD_POTENTIALS=off \
-D FFT=KISS \
-D BUILD_MPI=on \
-D BUILD_SHARED_LIBS=on \
-D BUILD_TOOLS=off \

View File

@ -189,7 +189,7 @@ if(GPU_API STREQUAL "CUDA")
endif()
add_executable(nvc_get_devices ${LAMMPS_LIB_SOURCE_DIR}/gpu/geryon/ucl_get_devices.cpp)
target_compile_definitions(nvc_get_devices PRIVATE -DUCL_CUDADR)
target_compile_definitions(nvc_get_devices PRIVATE -DUCL_CUDADR -DLAMMPS_${LAMMPS_SIZES})
target_link_libraries(nvc_get_devices PRIVATE ${CUDA_LIBRARIES} ${CUDA_CUDA_LIBRARY})
target_include_directories(nvc_get_devices PRIVATE ${CUDA_INCLUDE_DIRS})
@ -254,7 +254,7 @@ elseif(GPU_API STREQUAL "OPENCL")
add_library(gpu STATIC ${GPU_LIB_SOURCES})
target_link_libraries(gpu PRIVATE OpenCL::OpenCL)
target_include_directories(gpu PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/gpu)
target_compile_definitions(gpu PRIVATE -DUSE_OPENCL -D_${GPU_PREC_SETTING} -DLAMMPS_${LAMMPS_SIZES})
target_compile_definitions(gpu PRIVATE -DUSE_OPENCL -D_${GPU_PREC_SETTING})
if(GPU_DEBUG)
target_compile_definitions(gpu PRIVATE -DUCL_DEBUG -DGERYON_KERNEL_DUMP)
else()
@ -490,5 +490,6 @@ else()
endif()
set_target_properties(gpu PROPERTIES OUTPUT_NAME lammps_gpu${LAMMPS_MACHINE})
target_compile_definitions(gpu PRIVATE -DLAMMPS_${LAMMPS_SIZES})
target_sources(lammps PRIVATE ${GPU_SOURCES})
target_include_directories(lammps PRIVATE ${GPU_SOURCES_DIR})

View File

@ -75,15 +75,34 @@ section below for examples where this has been done.
**Differences between the GPU and KOKKOS packages:**
* The GPU package accelerates only pair force, neighbor list, and (parts
of) PPPM calculations. The KOKKOS package attempts to run most of the
of) PPPM calculations (and runs the remaining force computations on
the CPU concurrently). The KOKKOS package attempts to run most of the
calculation on the GPU, but can transparently support non-accelerated
code (with a performance penalty due to having data transfers between
host and GPU).
* The list of which styles are accelerated by the GPU or KOKKOS package
differs with some overlap.
* The GPU package requires neighbor lists to be built on the CPU when using
hybrid pair styles, exclusion lists, or a triclinic simulation box.
* The GPU package can be compiled for CUDA, HIP, or OpenCL and thus supports
NVIDIA, AMD, and Intel GPUs well. On NVIDIA hardware, using CUDA is
typically resulting in equal or better performance over OpenCL.
* OpenCL in the GPU package does theoretically also support Intel CPUs or
Intel Xeon Phi, but the native support for those in KOKKOS (or INTEL)
is superior.
* The GPU package benefits from running multiple MPI processes (2-8) per
GPU to parallelize the non-GPU accelerated styles. The KOKKOS package
usually not, especially when all parts of the calculation have KOKKOS
support.
* The GPU package can be compiled for CUDA, HIP, or OpenCL and thus
supports NVIDIA, AMD, and Intel GPUs well. On NVIDIA or AMD hardware,
using native CUDA or HIP compilation, respectively, with either GPU or
KOKKOS results in equal or better performance over OpenCL.
* OpenCL in the GPU package supports NVIDIA, AMD, and Intel GPUs at the
*same time* and with the *same executable*. KOKKOS currently does not
support OpenCL.
* The GPU package supports single precision floating point, mixed
precision floating point, and double precision floating point math on
the GPU. This must be chosen at compile time. KOKKOS currently only
supports double precision floating point math. Using single or mixed
precision (recommended) results in significantly improved performance
on consumer GPUs for some loss in accuracy (which is rather small with
mixed precision). Single and mixed precision support for KOKKOS is in
development (no ETA yet).
* Some pair styles (for example :doc:`snap <pair_snap>`, :doc:`mliap
<pair_mliap>` or :doc:`reaxff <pair_reaxff>` in the KOKKOS package have
seen extensive optimizations and specializations for GPUs and CPUs.

View File

@ -1,16 +1,218 @@
Measuring performance
=====================
Before trying to make your simulation run faster, you should
understand how it currently performs and where the bottlenecks are.
Factors that influence performance
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The best way to do this is run the your system (actual number of
atoms) for a modest number of timesteps (say 100 steps) on several
different processor counts, including a single processor if possible.
Do this for an equilibrium version of your system, so that the
100-step timings are representative of a much longer run. There is
typically no need to run for 1000s of timesteps to get accurate
timings; you can simply extrapolate from short runs.
Before trying to make your simulation run faster, you should understand
how it currently performs and where the bottlenecks are. We generally
distinguish between serial performance (how fast can a single process do
the calculations?) and parallel efficiency (how much faster does a
calculation get by using more processes?). There are many factors
affecting either and below are some lists discussing some commonly
known but also some less known factors.
Factors affecting serial performance (in no specific order):
* CPU hardware: clock rate, cache sizes, CPU architecture (instructions
per clock, vectorization support, fused multiply-add support and more)
* RAM speed and number of channels that the CPU can use to access RAM
* Cooling: CPUs can change the CPU clock based on thermal load, thus the
degree of cooling can affect the speed of a CPU. Sometimes even the
temperature of neighboring compute nodes in a cluster can make a
difference.
* Compiler optimization: most of LAMMPS is written to be easy to modify
and thus compiler optimization can speed up calculations. However, too
aggressive compiler optimization can produce incorrect results or
crashes (during compilation or at runtime).
* Source code improvements: styles in the OPT, OPENMP, and INTEL package
can be faster than their base implementation due to improved data
access patterns, cache efficiency, or vectorization. Compiler optimization
is required to take full advantage of these.
* Number and kind of fixes, computes, or variables used during a simulation,
especially if they result in collective communication operations
* Pair style cutoffs and system density: calculations get slower the more
neighbors are in the neighbor list and thus for which interactions need
to be computed. Force fields with pair styles that compute interactions
between triples or quadruples of atoms or that use embedding energies or
charge equilibration will need to walk the neighbor lists multiple times.
* Neighbor list settings: tradeoff between neighbor list skin (larger
skin = more neighbors, more distances to compute before applying the
cutoff) and frequency of neighbor list builds (larger skin = fewer
neighbor list builds).
* Proximity of per-atom data in physical memory that for atoms that are
close in space improves cache efficiency (thus LAMMPS will by default
sort atoms in local storage accordingly)
* Using r-RESPA multi-timestepping or a SHAKE or RATTLE fix to constrain
bonds with higher-frequency vibrations may allow a larger (outer) timestep
and thus fewer force evaluations (usually the most time consuming step in
MD) for the same simulated time (with some tradeoff in accuracy).
Factors affecting parallel efficiency (in no specific order):
* Bandwidth and latency of communication between processes. This can vary a
lot between processes on the same CPU or physical node and processes
on different physical nodes and there vary between different
communication technologies (like Ethernet or InfiniBand or other
high-speed interconnects)
* Frequency and complexity of communication patterns required
* Number of "work units" (usually correlated with the number of atoms
and choice of force field) per MPI-process required for one time step
(if this number becomes too small, the cost of communication becomes
dominant).
* Choice of parallelization method (MPI-only, OpenMP-only, MPI+OpenMP,
MPI+GPU, MPI+GPU+OpenMP)
* Algorithmic complexity of the chosen force field (pair-wise vs. many-body
potential, Ewald vs. PPPM vs. (compensated or smoothed) cutoff-Coulomb)
* Communication cutoff: a larger cutoff results in more ghost atoms and
thus more data that needs to be communicated
* Frequency of neighbor list builds: during a neighbor list build the
domain decomposition is updated and the list of ghost atoms rebuilt
which requires multiple global communication steps
* FFT-grid settings and number of MPI processes for kspace style PPPM:
PPPM uses parallel 3d FFTs which will drop much faster in parallel
efficiency with respect to the number of MPI processes than other
parts of the force computation. Thus using MPI+OpenMP parallelization
or :doc:`run style verlet/split <run_style>` can improve parallel
efficiency by limiting the number of MPI processes used for the FFTs.
* Load (im-)balance: LAMMPS' domain decomposition assumes that atoms are
evenly distributed across the entire simulation box. If there are
areas of vacuum, this may lead to different amounts of work for
different MPI processes. Using the :doc:`processors command
<processors>` to change the spatial decomposition, or MPI+OpenMP
parallelization instead of only-MPI to have larger sub-domains, or the
(fix) balance command (without or with switching to communication style
tiled) to change the sub-domain volumes are all methods that
can help to avoid load imbalances.
Examples comparing serial performance
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Before looking at your own input deck(s), you should get some reference
data from a known input so that you know what kind of performance you
should expect from your input. For the following we therefore use the
``in.rhodo.scaled`` input file and ``data.rhodo`` data file from the
``bench`` folder. This is a system of 32000 atoms using the CHARMM force
field and long-range electrostatics running for 100 MD steps. The
performance data is printed at the end of a run and only measures the
performance during propagation and excludes the setup phase.
Running with a single MPI process on an AMD Ryzen Threadripper PRO
9985WX CPU (64 cores, 128 threads, base clock: 3.2GHz, max. clock
5.4GHz, L1/L2/L3 cache 5MB/64MB/256MB, 8 DDR5-6400 memory channels) one
gets the following performance report:
.. code-block::
Performance: 1.232 ns/day, 19.476 hours/ns, 7.131 timesteps/s, 228.197 katom-step/s
99.2% CPU use with 1 MPI tasks x 1 OpenMP threads
The %CPU value should be at 100% or very close. Lower values would
be an indication that there are *other* processes also using the same
CPU core and thus invalidating the performance data. The katom-step/s
value is best suited for comparisons, since it is fairly independent
from the system size. The `in.rhodo.scaled` input can be easily made
larger through replication in the three dimensions by settings variables
"x", "y", "z" to values other than 1 from the command line with the
"-var" flag. Example:
- 32000 atoms: 228.8 katom-step/s
- 64000 atoms: 231.6 katom-step/s
- 128000 atoms: 231.1 katom-step/s
- 256000 atoms: 226.4 katom-step/s
- 864000 atoms: 229.6 katom-step/s
Comparing to an AMD Ryzen 7 7840HS CPU (8 cores, 16 threads, base clock
3.8GHz, max. clock 5.1GHz, L1/L2/L3 cache 512kB/8MB/16MB, 2 DDR5-5600
memory channels), we get similar single core performance (~220
katom-step/s vs. ~230 katom-step/s) due to the similar clock and
architecture:
- 32000 atoms: 219.8 katom-step/s
- 64000 atoms: 222.5 katom-step/s
- 128000 atoms: 216.8 katom-step/s
- 256000 atoms: 221.0 katom-step/s
- 864000 atoms: 221.1 katom-step/s
Switching to an older Intel Xeon E5-2650 v4 CPU (12 cores, 12 threads,
base clock 2.2GHz, max. clock 2.9GHz, L1/L2/L3 cache (64kB/256kB/30MB, 4
DDR4-2400 memory channels) leads to a lower performance of approximately
109 katom-step/s due to differences in architecture and clock. In all
cases, when looking at multiple runs, the katom-step/s property
fluctuates by approximately 1% around the average.
From here on we are looking at the performance for the 256000 atom system only
and change several settings incrementally:
#. No compiler optimization GCC (-Og -g): 183.8 katom-step/s
#. Moderate optimization with debug info GCC (-O2 -g): 231.1 katom-step/s
#. Full compiler optimization GCC (-DNDEBUG -O3): 236.0 katom-step/s
#. Aggressive compiler optimization GCC (-O3 -ffast-math -march=native): 239.9 katom-step/s
#. Source code optimization in OPENMP package (1 thread): 266.7 katom-step/s
#. Use *fix nvt* instead of *fix npt* (compute virial only every 50 steps): 272.9 katom-step/s
#. Increase pair style cutoff by 2 :math:`\AA`: 181.2 katom-step/s
#. Use tight PPPM convergence (1.0e-6 instead of 1.0e-4): 161.9 katom-step/s
#. Use Ewald summation instead of PPPM (at 1.0e-4 convergence): 19.9 katom-step/s
The numbers show that gains from aggressive compiler optimizations are
rather small in LAMMPS, the data access optimizations in the OPENMP (and
OPT) packages are more prominent. On the other side, using more
accurate force field settings causes, not unexpectedly, a significant
slowdown (to about half the speed). Finally, using regular Ewald
summation causes a massive slowdown due to the bad algorithmic scaling
with system size.
Examples comparing parallel performance
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The parallel performance usually goes on top of the serial performance.
Using twice as many processors should increase the performance metric
by up to a factor of two. With the number of processors *N* and the
serial performance :math:`p_1` and the performance for *N* processors
:math:`p_N` we can define a *parallel efficiency* in percent as follows:
.. math::
P_{eff} = \frac{p_N}{p_1 \cdot N} \cdot 100\%
For the AMD Ryzen Threadripper PRO 9985WX CPU and the serial
simulation settings of point 6. from above, we get the following
parallel efficiency data for the 256000 atom system:
- 1 MPI task: 273.6 katom-step/s, :math:`P_{eff} = 100\%`
- 2 MPI tasks: 530.6 katom-step/s, :math:`P_{eff} = 97\%`
- 4 MPI tasks: 1.021 Matom-step/s, :math:`P_{eff} = 93\%`
- 8 MPI tasks: 1.837 Matom-step/s, :math:`P_{eff} = 84\%`
- 16 MPI tasks: 3.574 Matom-step/s, :math:`P_{eff} = 82\%`
- 32 MPI tasks: 6.479 Matom-step/s, :math:`P_{eff} = 74\%`
- 64 MPI tasks: 9.032 Matom-step/s, :math:`P_{eff} = 52\%`
- 128 MPI tasks: 12.03 Matom-step/s, :math:`P_{eff} = 34\%`
The 128 MPI tasks run uses CPU cores from hyper-threading.
For a small system with only 32000 atoms the parallel efficiency
drops off earlier when the number of work units is too small relative
to the communication overhead:
- 1 MPI task: 270.8 katom-step/s, :math:`P_{eff} = 100\%`
- 2 MPI tasks: 529.3 katom-step/s, :math:`P_{eff} = 98\%`
- 4 MPI tasks: 989.8 katom-step/s, :math:`P_{eff} = 91\%`
- 8 MPI tasks: 1.832 Matom-step/s, :math:`P_{eff} = 85\%`
- 16 MPI tasks: 3.463 Matom-step/s, :math:`P_{eff} = 80\%`
- 32 MPI tasks: 5.970 Matom-step/s, :math:`P_{eff} = 69\%`
- 64 MPI tasks: 7.477 Matom-step/s, :math:`P_{eff} = 42\%`
- 128 MPI tasks: 8.069 Matom-step/s, :math:`P_{eff} = 23\%`
Measuring performance of your input deck
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The best way to do this is run the your system (actual number of atoms)
for a modest number of timesteps (say 100 steps) on several different
processor counts, including a single processor if possible. Do this for
an equilibrium version of your system, so that the 100-step timings are
representative of a much longer run. There is typically no need to run
for 1000s of timesteps to get accurate timings; you can simply
extrapolate from short runs.
For the set of runs, look at the timing data printed to the screen and
log file at the end of each LAMMPS run. The
@ -28,12 +230,15 @@ breakdown and relative percentages. For example, trying different
options for speeding up the long-range solvers will have little impact
if they only consume 10% of the run time. If the pairwise time is
dominating, you may want to look at GPU or OMP versions of the pair
style, as discussed below. Comparing how the percentages change as
you increase the processor count gives you a sense of how different
operations within the timestep are scaling. Note that if you are
running with a Kspace solver, there is additional output on the
breakdown of the Kspace time. For PPPM, this includes the fraction
spent on FFTs, which can be communication intensive.
style, as discussed below. Comparing how the percentages change as you
increase the processor count gives you a sense of how different
operations within the timestep are scaling. If you are using PPPM as
Kspace solver, you can turn on an additional output with
:doc:`kspace_modify fftbench yes <kspace_modify>` which measures the
time spent during PPPM on the 3d FFTs, which can be communication
intensive for larger processor counts. This provides an indication
whether it is worth trying out alternatives to the default FFT settings
for additional performance.
Another important detail in the timing info are the histograms of
atoms counts and neighbor counts. If these vary widely across

View File

@ -82,10 +82,9 @@ specified values may represent calculations performed by computes and
fixes which store their own "group" definitions.
Each listed value can be the result of a compute or fix or the
evaluation of an equal-style or vector-style variable. For
vector-style variables, the specified indices can include a wildcard
character. See the :doc:`fix ave/correlate <fix_ave_correlate>` page
for details.
evaluation of an equal-style or vector-style variable. The specified
indices can include a wildcard string. See the
:doc:`fix ave/correlate <fix_ave_correlate>` page for details on that.
The *Nevery* and *Nfreq* arguments specify on what time steps the input
values will be used to calculate correlation data and the frequency

View File

@ -100,6 +100,56 @@ first is assigned to intra-molecular interactions (i.e. both atoms
have the same molecule ID), the second to inter-molecular interactions
(i.e. interacting atoms have different molecule IDs).
.. admonition:: When **NOT** to use a hybrid pair style
:class: warning
Using pair style *hybrid* can be very tempting to use if you need a
**many-body potential** supporting a mix of elements for which you
cannot find a potential file that covers *all* of them. Regardless
of how this is set up, there will be *errors*. The major use case
where the error is *small*, is when the many-body sub-styles are used
on different objects (for example a slab and a liquid, a metal and a
nano-machining work piece). In that case the *mixed* terms
**should** be provided by a pair-wise additive potential (like
Lennard-Jones or Morse) to avoid unexpected behavior and reduce
errors. LAMMPS cannot easily check for this condition and thus will
accept good and bad choices alike.
Outside of this, we *strongly* recommend *against* using pair style
hybrid with many-body potentials for the following reasons:
1. When trying to combine EAM or MEAM potentials, there is a *large*
error in the embedding term, since it is computed separately for
each sub-style only.
2. When trying to combine many-body potentials like Stillinger-Weber,
Tersoff, AIREBO, Vashishta, or similar, you have to understand
that the potential of a sub-style cannot be applied in a pair-wise
fashion but will need to be applied to multiples of atoms
(e.g. a Tersoff potential of elements A and B includes the
interactions A-A, B-B, A-B, A-A-A, A-A-B, A-B-B, A-B-A, B-A-A,
B-A-B, B-B-A, B-B-B; AIREBO also considers all quadruples of
atom elements).
3. When one of the sub-styles uses charge-equilibration (= QEq; like
in ReaxFF or COMB) you have inconsistent QEq behavior because
either you try to apply QEq to *all* atoms but then you are
missing the QEq parameters for the non-QEq pair style (and it
would be inconsistent to apply QEq for pair styles that are not
parameterized for QEq) or else you would have either no charges or
fixed charges interacting with the QEq which also leads to
inconsistent behavior between two sub-styles. When attempting to
use multiple ReaxFF instances to combine different potential
files, you might be able to work around the QEq limitations, but
point 2. still applies.
We understand that it is frustrating to not be able to run simulations
due to lack of available potential files, but that does not justify
combining potentials in a broken way via pair style hybrid. This is
not what the hybrid pair styles are designed for.
----------
Here are two examples of hybrid simulations. The *hybrid* style could
be used for a simulation of a metal droplet on a LJ surface. The metal
atoms interact with each other via an *eam* potential, the surface atoms
@ -374,12 +424,11 @@ selected sub-style.
----------
.. note::
Several of the potentials defined via the pair_style command in
LAMMPS are really many-body potentials, such as Tersoff, AIREBO, MEAM,
ReaxFF, etc. The way to think about using these potentials in a
hybrid setting is as follows.
Even though the command name "pair_style" would suggest that these are
pair-wise interactions, several of the potentials defined via the
pair_style command in LAMMPS are really many-body potentials, such as
Tersoff, AIREBO, MEAM, ReaxFF, etc. The way to think about using these
potentials in a hybrid setting is as follows.
A subset of atom types is assigned to the many-body potential with a
single :doc:`pair_coeff <pair_coeff>` command, using "\* \*" to include

View File

@ -82,6 +82,7 @@ Alessandro
Alexey
ali
aliceblue
aliphatic
Allera
Allinger
allocatable
@ -1176,6 +1177,7 @@ Fermionic
Ferrand
fexternal
Fexternal
ffast
ffield
ffl
fflush
@ -1817,6 +1819,7 @@ Karniadakis
Karplus
Karttunen
kate
katom
Katsnelson
Katsura
Kaufmann
@ -2195,6 +2198,7 @@ Materias
mathbf
mathjax
matlab
Matom
Matous
matplotlib
Matsubara
@ -2704,6 +2708,7 @@ Nprocs
npt
nr
Nr
Nrecent
Nrecompute
Nrepeat
nreset
@ -2782,6 +2787,7 @@ ocl
octahedral
octants
Odegard
Og
Ohara
O'Hearn
ohenrich
@ -3418,6 +3424,7 @@ ry
Ryckaert
Rycroft
Rydbergs
Ryzen
rz
Rz
Sabry
@ -3837,6 +3844,7 @@ Thiaville
Thibaudeau
Thijsse
Thirumalai
Threadripper
threebody
thrid
ThunderX

View File

@ -0,0 +1 @@
../../../potentials/MoCoNiVFeAlCr_2nn.meam

View File

@ -0,0 +1,46 @@
# May 2025
# Test script for MD-KMC accelerated diffusion testing in LAMMPS
# Created by Jacob Tavenner, Baylor University
# Initiation -------------------------------------
units metal
dimension 3
boundary p p p
atom_style atomic
# Atom Definition --------------------------------
lattice fcc 3.762
region whole block 0 1 0 1 0 1
create_box 2 whole
create_atoms 1 region whole
replicate 6 16 6
region puck block INF INF 7 9 INF INF
set region puck type 2
# Force Fields -----------------------------------
pair_style meam
pair_coeff * * library_2nn.meam Mo Co Ni V Fe Al Cr MoCoNiVFeAlCr_2nn.meam Ni Cr
# Settings ---------------------------------------
timestep 0.002
thermo 100
# Computations -----------------------------------
compute voroN all voronoi/atom neighbors yes
run 0
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe
# Execution --------------------------------------
velocity all create 2400 908124 loop geom
fix temp all npt temp 1000 1000 1000 aniso 0 0 1
fix mc all neighbor/swap 50 12 1340723 1000 3 voroN diff 2
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe f_mc[*]
#dump dump2 all custom 5000 dump.edge-3_Ni-Cr.* id type x y z c_eng c_csym
run 1000
#write_data pulse_center.data

View File

@ -0,0 +1,47 @@
# May 2025
# Test script for MD-KMC accelerated diffusion testing in LAMMPS
# Created by Jacob Tavenner, Baylor University
# Initiation -------------------------------------
units metal
dimension 3
boundary p p p
atom_style atomic
# Atom Definition --------------------------------
lattice fcc 3.762
region whole block 0 1 0 1 0 1
create_box 2 whole
create_atoms 1 region whole
replicate 6 16 6
region puck block INF INF INF 2 INF INF
set region puck type 2
# Force Fields -----------------------------------
pair_style meam
pair_coeff * * library_2nn.meam Mo Co Ni V Fe Al Cr MoCoNiVFeAlCr_2nn.meam Ni Cr
# Settings ---------------------------------------
timestep 0.002
thermo 100
# Computations -----------------------------------
compute voroN all voronoi/atom neighbors yes
run 0
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe
# Execution --------------------------------------
velocity all create 2400 908124 loop geom
fix temp all npt temp 1000 1000 1000 aniso 0 0 1
fix mc all neighbor/swap 50 12 1340723 1000 3 voroN diff 2
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe f_mc[*]
#dump dump2 all custom 5000 dump.edge-3_Ni-Cr.* id type x y z c_eng c_csym
run 1000
#write_data pulse_end.data

View File

@ -0,0 +1 @@
../../../potentials/library_2nn.meam

View File

@ -0,0 +1,154 @@
LAMMPS (2 Apr 2025 - Development - patch_2Apr2025-384-g88bc7dc720-modified)
using 1 OpenMP thread(s) per MPI task
# May 2025
# Test script for MD-KMC accelerated diffusion testing in LAMMPS
# Created by Jacob Tavenner, Baylor University
# Initiation -------------------------------------
units metal
dimension 3
boundary p p p
atom_style atomic
# Atom Definition --------------------------------
lattice fcc 3.762
Lattice spacing in x,y,z = 3.762 3.762 3.762
region whole block 0 1 0 1 0 1
create_box 2 whole
Created orthogonal box = (0 0 0) to (3.762 3.762 3.762)
1 by 1 by 1 MPI processor grid
create_atoms 1 region whole
Created 4 atoms
using lattice units in orthogonal box = (0 0 0) to (3.762 3.762 3.762)
create_atoms CPU = 0.000 seconds
replicate 6 16 6
Replication is creating a 6x16x6 = 576 times larger system...
orthogonal box = (0 0 0) to (22.572 60.192 22.572)
1 by 1 by 1 MPI processor grid
2304 atoms
replicate CPU = 0.000 seconds
region puck block INF INF 7 9 INF INF
set region puck type 2
Setting atom values ...
360 settings made for type
# Force Fields -----------------------------------
pair_style meam
pair_coeff * * library_2nn.meam Mo Co Ni V Fe Al Cr MoCoNiVFeAlCr_2nn.meam Ni Cr
Reading MEAM library file library_2nn.meam with DATE: 2024-08-08
Reading MEAM potential file MoCoNiVFeAlCr_2nn.meam with DATE: 2024-08-08
# Settings ---------------------------------------
timestep 0.002
thermo 100
# Computations -----------------------------------
compute voroN all voronoi/atom neighbors yes
run 0
WARNING: No fixes with time integration, atoms won't move
For more information see https://docs.lammps.org/err0028 (src/verlet.cpp:60)
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.8
ghost atom cutoff = 6.8
binsize = 3.4, bins = 7 18 7
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair meam, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
(2) pair meam, perpetual, half/full from (1)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 13.32 | 13.32 | 13.32 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -9674.3728 0 -9674.3728 -212400.94
Loop time of 1.202e-06 on 1 procs for 0 steps with 2304 atoms
0.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.202e-06 | | |100.00
Nlocal: 2304 ave 2304 max 2304 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 4735 ave 4735 max 4735 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 99072 ave 99072 max 99072 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 198144 ave 198144 max 198144 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 198144
Ave neighs/atom = 86
Neighbor list builds = 0
Dangerous builds = 0
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe
# Execution --------------------------------------
velocity all create 2400 908124
fix temp all npt temp 1000 1000 1000 aniso 0 0 1
fix mc all neighbor/swap 50 12 1340723 1000 3 voroN diff 2
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe f_mc[*]
#dump dump2 all custom 5000 dump.edge-3_Ni-Cr.* id type x y z c_eng c_csym
run 1000
Per MPI rank memory allocation (min/avg/max) = 13.32 | 13.32 | 13.32 Mbytes
Step Temp Press Pxx Pyy Pzz Lx Ly Lz Volume PotEng f_mc[1] f_mc[2]
0 2400 -187517.52 -187403.07 -187750.14 -187399.35 22.572 60.192 22.572 30667.534 -9674.3728 0 0
100 1664.9956 14000 14280.682 15095.077 12624.241 21.635315 57.726568 21.64791 27036.778 -9592.8978 24 22
200 1560.0093 -5452.2434 -5749.5816 -2957.4228 -7649.7258 21.734212 58.085959 21.724853 27426.596 -9562.8822 48 45
300 1586.4553 2030.9253 2776.4677 775.50538 2540.803 21.678654 58.101753 21.654423 27275.215 -9571.1308 72 66
400 1603.6896 -223.16773 156.17673 -478.47929 -347.20061 21.701021 58.098904 21.657752 27306.213 -9576.4456 96 90
500 1618.236 -925.51874 -1640.9078 451.6228 -1587.2713 21.718334 58.042685 21.666081 27312.054 -9581.2045 120 110
600 1581.9995 290.10126 1359.1314 1407.5434 -1896.371 21.679813 58.086147 21.692118 27316.815 -9570.4803 144 132
700 1568.3261 1387.3472 938.81523 2159.3686 1063.8577 21.685928 58.075626 21.67273 27295.153 -9566.2914 168 155
800 1607.1531 46.792964 -453.90265 -1533.3908 2127.6723 21.685188 58.202356 21.628338 27297.753 -9577.7848 192 177
900 1573.4747 -84.225488 548.90935 -1356.7479 555.16208 21.69634 58.150052 21.651847 27316.908 -9567.7039 216 196
1000 1609.2136 1215.0833 764.08936 3301.0811 -419.92053 21.683731 58.000401 21.68726 27275.31 -9578.2843 240 219
Loop time of 31.6263 on 1 procs for 1000 steps with 2304 atoms
Performance: 5.464 ns/day, 4.393 hours/ns, 31.619 timesteps/s, 72.851 katom-step/s
99.2% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 28.487 | 28.487 | 28.487 | 0.0 | 90.07
Neigh | 0.22789 | 0.22789 | 0.22789 | 0.0 | 0.72
Comm | 0.010808 | 0.010808 | 0.010808 | 0.0 | 0.03
Output | 0.00033526 | 0.00033526 | 0.00033526 | 0.0 | 0.00
Modify | 2.8963 | 2.8963 | 2.8963 | 0.0 | 9.16
Other | | 0.003905 | | | 0.01
Nlocal: 2304 ave 2304 max 2304 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 4750 ave 4750 max 4750 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 130023 ave 130023 max 130023 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 260046 ave 260046 max 260046 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 260046
Ave neighs/atom = 112.86719
Neighbor list builds = 65
Dangerous builds = 0
#write_data pulse_center.data
Total wall time: 0:00:31

View File

@ -0,0 +1,154 @@
LAMMPS (2 Apr 2025 - Development - patch_2Apr2025-384-g88bc7dc720-modified)
using 1 OpenMP thread(s) per MPI task
# May 2025
# Test script for MD-KMC accelerated diffusion testing in LAMMPS
# Created by Jacob Tavenner, Baylor University
# Initiation -------------------------------------
units metal
dimension 3
boundary p p p
atom_style atomic
# Atom Definition --------------------------------
lattice fcc 3.762
Lattice spacing in x,y,z = 3.762 3.762 3.762
region whole block 0 1 0 1 0 1
create_box 2 whole
Created orthogonal box = (0 0 0) to (3.762 3.762 3.762)
1 by 2 by 2 MPI processor grid
create_atoms 1 region whole
Created 4 atoms
using lattice units in orthogonal box = (0 0 0) to (3.762 3.762 3.762)
create_atoms CPU = 0.000 seconds
replicate 6 16 6
Replication is creating a 6x16x6 = 576 times larger system...
orthogonal box = (0 0 0) to (22.572 60.192 22.572)
1 by 4 by 1 MPI processor grid
2304 atoms
replicate CPU = 0.000 seconds
region puck block INF INF 7 9 INF INF
set region puck type 2
Setting atom values ...
360 settings made for type
# Force Fields -----------------------------------
pair_style meam
pair_coeff * * library_2nn.meam Mo Co Ni V Fe Al Cr MoCoNiVFeAlCr_2nn.meam Ni Cr
Reading MEAM library file library_2nn.meam with DATE: 2024-08-08
Reading MEAM potential file MoCoNiVFeAlCr_2nn.meam with DATE: 2024-08-08
# Settings ---------------------------------------
timestep 0.002
thermo 100
# Computations -----------------------------------
compute voroN all voronoi/atom neighbors yes
run 0
WARNING: No fixes with time integration, atoms won't move
For more information see https://docs.lammps.org/err0028 (src/verlet.cpp:60)
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.8
ghost atom cutoff = 6.8
binsize = 3.4, bins = 7 18 7
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair meam, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
(2) pair meam, perpetual, half/full from (1)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 9.636 | 9.636 | 9.636 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -9674.3728 0 -9674.3728 -212400.94
Loop time of 1.422e-06 on 4 procs for 0 steps with 2304 atoms
35.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.422e-06 | | |100.00
Nlocal: 576 ave 576 max 576 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 2131 ave 2131 max 2131 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 24768 ave 24768 max 24768 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 49536 ave 49536 max 49536 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 198144
Ave neighs/atom = 86
Neighbor list builds = 0
Dangerous builds = 0
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe
# Execution --------------------------------------
velocity all create 2400 908124
fix temp all npt temp 1000 1000 1000 aniso 0 0 1
fix mc all neighbor/swap 50 12 1340723 1000 3 voroN diff 2
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe f_mc[*]
#dump dump2 all custom 5000 dump.edge-3_Ni-Cr.* id type x y z c_eng c_csym
run 1000
Per MPI rank memory allocation (min/avg/max) = 9.636 | 9.636 | 9.636 Mbytes
Step Temp Press Pxx Pyy Pzz Lx Ly Lz Volume PotEng f_mc[1] f_mc[2]
0 2400 -187517.52 -187403.09 -187750.05 -187399.42 22.572 60.192 22.572 30667.534 -9674.3728 0 0
100 1668.8754 13300.763 12419.304 15568.772 11914.212 21.636248 57.724775 21.647685 27036.823 -9594.7526 24 23
200 1584.9699 -5686.0414 -4741.8496 -5914.7681 -6401.5064 21.729384 58.060532 21.730736 27415.923 -9571.0639 48 46
300 1582.0473 2806.2983 3413.4122 2716.0124 2289.4702 21.6679 58.033587 21.694744 27280.402 -9570.5549 72 69
400 1582.5825 845.29268 -849.61221 2123.5339 1261.9563 21.676298 58.14253 21.656418 27293.905 -9570.7948 96 93
500 1591.7285 -501.17955 1151.9743 -1719.3712 -936.14174 21.696367 58.157211 21.648308 27315.839 -9573.5089 120 116
600 1610.708 -821.74669 -1002.4957 291.88502 -1754.6294 21.730338 58.008213 21.661226 27304.8 -9579.5573 144 138
700 1598.5176 -590.00633 -1844.42 408.97706 -334.57602 21.712908 57.96131 21.698129 27307.281 -9575.8973 168 162
800 1584.3478 330.16711 666.88818 74.698331 248.91482 21.650908 58.045055 21.719838 27295.933 -9571.9268 192 186
900 1557.9946 1471.1207 2124.6512 1526.9937 761.71731 21.645578 58.156083 21.681637 27293.323 -9564.4385 216 207
1000 1582.5312 379.57005 -602.96446 2696.737 -955.06238 21.655418 58.231248 21.649581 27300.598 -9571.9879 240 227
Loop time of 9.1632 on 4 procs for 1000 steps with 2304 atoms
Performance: 18.858 ns/day, 1.273 hours/ns, 109.132 timesteps/s, 251.440 katom-step/s
98.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 7.867 | 7.9923 | 8.1311 | 4.3 | 87.22
Neigh | 0.054997 | 0.057518 | 0.060145 | 1.0 | 0.63
Comm | 0.017529 | 0.14801 | 0.27408 | 29.5 | 1.62
Output | 0.00015963 | 0.00017216 | 0.00020869 | 0.0 | 0.00
Modify | 0.95227 | 0.96325 | 0.9917 | 1.7 | 10.51
Other | | 0.001983 | | | 0.02
Nlocal: 576 ave 609 max 540 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 2161.5 ave 2173 max 2151 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 32450.2 ave 35422 max 29271 min
Histogram: 2 0 0 0 0 0 0 0 0 2
FullNghs: 64900.5 ave 70800 max 58684 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Total # of neighbors = 259602
Ave neighs/atom = 112.67448
Neighbor list builds = 62
Dangerous builds = 0
#write_data pulse_center.data
Total wall time: 0:00:09

View File

@ -0,0 +1,155 @@
LAMMPS (2 Apr 2025 - Development - patch_2Apr2025-384-g88bc7dc720-modified)
using 1 OpenMP thread(s) per MPI task
# May 2025
# Test script for MD-KMC accelerated diffusion testing in LAMMPS
# Created by Jacob Tavenner, Baylor University
# Initiation -------------------------------------
units metal
dimension 3
boundary p p p
atom_style atomic
# Atom Definition --------------------------------
lattice fcc 3.762
Lattice spacing in x,y,z = 3.762 3.762 3.762
region whole block 0 1 0 1 0 1
create_box 2 whole
Created orthogonal box = (0 0 0) to (3.762 3.762 3.762)
1 by 1 by 1 MPI processor grid
create_atoms 1 region whole
Created 4 atoms
using lattice units in orthogonal box = (0 0 0) to (3.762 3.762 3.762)
create_atoms CPU = 0.000 seconds
replicate 6 16 6
Replication is creating a 6x16x6 = 576 times larger system...
orthogonal box = (0 0 0) to (22.572 60.192 22.572)
1 by 1 by 1 MPI processor grid
2304 atoms
replicate CPU = 0.000 seconds
region puck block INF INF INF 2 INF INF
set region puck type 2
Setting atom values ...
360 settings made for type
# Force Fields -----------------------------------
pair_style meam
pair_coeff * * library_2nn.meam Mo Co Ni V Fe Al Cr MoCoNiVFeAlCr_2nn.meam Ni Cr
Reading MEAM library file library_2nn.meam with DATE: 2024-08-08
Reading MEAM potential file MoCoNiVFeAlCr_2nn.meam with DATE: 2024-08-08
# Settings ---------------------------------------
timestep 0.002
thermo 100
# Computations -----------------------------------
compute voroN all voronoi/atom neighbors yes
run 0
WARNING: No fixes with time integration, atoms won't move
For more information see https://docs.lammps.org/err0028 (src/verlet.cpp:60)
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.8
ghost atom cutoff = 6.8
binsize = 3.4, bins = 7 18 7
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair meam, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
(2) pair meam, perpetual, half/full from (1)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 13.32 | 13.32 | 13.32 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -9674.3728 0 -9674.3728 -212400.94
Loop time of 1.232e-06 on 1 procs for 0 steps with 2304 atoms
81.2% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.232e-06 | | |100.00
Nlocal: 2304 ave 2304 max 2304 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 4735 ave 4735 max 4735 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 99072 ave 99072 max 99072 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 198144 ave 198144 max 198144 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 198144
Ave neighs/atom = 86
Neighbor list builds = 0
Dangerous builds = 0
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe
# Execution --------------------------------------
velocity all create 2400 908124 loop geom
fix temp all npt temp 1000 1000 1000 aniso 0 0 1
fix mc all neighbor/swap 50 12 1340723 1000 3 voroN diff 2
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe f_mc[*]
#dump dump2 all custom 5000 dump.edge-3_Ni-Cr.* id type x y z c_eng c_csym
run 1000
Per MPI rank memory allocation (min/avg/max) = 13.32 | 13.32 | 13.32 Mbytes
Step Temp Press Pxx Pyy Pzz Lx Ly Lz Volume PotEng f_mc[1] f_mc[2]
0 2400 -187517.52 -187464.47 -188202.62 -186885.48 22.572 60.192 22.572 30667.534 -9674.3728 0 0
100 1665.6154 14281.316 14426.547 14555.867 13861.534 21.637238 57.719793 21.637281 27022.733 -9594.4303 24 24
200 1603.3309 -7325.7341 -8878.1524 -5333.0485 -7766.0015 21.710246 58.122827 21.725933 27415.106 -9577.4545 48 48
300 1603.2974 207.19165 1983.4565 -1841.9518 480.07024 21.678227 58.079126 21.674033 27288.745 -9577.6391 72 69
400 1600.1515 810.95054 1087.969 802.04946 542.83316 21.683731 58.045848 21.678505 27285.662 -9576.6508 96 92
500 1629.8313 -2808.1005 -3197.9357 310.89931 -5537.265 21.683924 58.090375 21.697076 27330.229 -9585.5435 120 113
600 1598.8232 -67.845623 -1573.0718 -1526.7607 2896.2957 21.70213 58.12191 21.653853 27313.504 -9576.4147 144 137
700 1607.2185 154.66718 -1777.2469 2566.4705 -325.22208 21.712408 57.971553 21.678708 27287.033 -9579.1772 168 158
800 1582.559 -891.23631 -632.46037 -636.88203 -1404.3665 21.671936 58.127004 21.678224 27308.594 -9571.6663 192 180
900 1586.7172 -617.17083 -2495.5378 -2302.8766 2946.9018 21.658489 58.181921 21.668968 27305.771 -9572.9641 216 204
1000 1607.563 -389.8113 810.4908 298.84287 -2278.7676 21.624573 58.076745 21.724272 27283.183 -9579.5034 240 227
Loop time of 31.7733 on 1 procs for 1000 steps with 2304 atoms
Performance: 5.439 ns/day, 4.413 hours/ns, 31.473 timesteps/s, 72.514 katom-step/s
99.2% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 28.604 | 28.604 | 28.604 | 0.0 | 90.02
Neigh | 0.21293 | 0.21293 | 0.21293 | 0.0 | 0.67
Comm | 0.010645 | 0.010645 | 0.010645 | 0.0 | 0.03
Output | 0.00033194 | 0.00033194 | 0.00033194 | 0.0 | 0.00
Modify | 2.9411 | 2.9411 | 2.9411 | 0.0 | 9.26
Other | | 0.00448 | | | 0.01
Nlocal: 2304 ave 2304 max 2304 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 4748 ave 4748 max 4748 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 130301 ave 130301 max 130301 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 260602 ave 260602 max 260602 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 260602
Ave neighs/atom = 113.10851
Neighbor list builds = 62
Dangerous builds = 0
#write_data pulse_end.data
Total wall time: 0:00:31

View File

@ -0,0 +1,155 @@
LAMMPS (2 Apr 2025 - Development - patch_2Apr2025-384-g88bc7dc720-modified)
using 1 OpenMP thread(s) per MPI task
# May 2025
# Test script for MD-KMC accelerated diffusion testing in LAMMPS
# Created by Jacob Tavenner, Baylor University
# Initiation -------------------------------------
units metal
dimension 3
boundary p p p
atom_style atomic
# Atom Definition --------------------------------
lattice fcc 3.762
Lattice spacing in x,y,z = 3.762 3.762 3.762
region whole block 0 1 0 1 0 1
create_box 2 whole
Created orthogonal box = (0 0 0) to (3.762 3.762 3.762)
1 by 2 by 2 MPI processor grid
create_atoms 1 region whole
Created 4 atoms
using lattice units in orthogonal box = (0 0 0) to (3.762 3.762 3.762)
create_atoms CPU = 0.000 seconds
replicate 6 16 6
Replication is creating a 6x16x6 = 576 times larger system...
orthogonal box = (0 0 0) to (22.572 60.192 22.572)
1 by 4 by 1 MPI processor grid
2304 atoms
replicate CPU = 0.000 seconds
region puck block INF INF INF 2 INF INF
set region puck type 2
Setting atom values ...
360 settings made for type
# Force Fields -----------------------------------
pair_style meam
pair_coeff * * library_2nn.meam Mo Co Ni V Fe Al Cr MoCoNiVFeAlCr_2nn.meam Ni Cr
Reading MEAM library file library_2nn.meam with DATE: 2024-08-08
Reading MEAM potential file MoCoNiVFeAlCr_2nn.meam with DATE: 2024-08-08
# Settings ---------------------------------------
timestep 0.002
thermo 100
# Computations -----------------------------------
compute voroN all voronoi/atom neighbors yes
run 0
WARNING: No fixes with time integration, atoms won't move
For more information see https://docs.lammps.org/err0028 (src/verlet.cpp:60)
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.8
ghost atom cutoff = 6.8
binsize = 3.4, bins = 7 18 7
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair meam, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
(2) pair meam, perpetual, half/full from (1)
attributes: half, newton on
pair build: halffull/newton
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 9.636 | 9.636 | 9.636 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -9674.3728 0 -9674.3728 -212400.94
Loop time of 1.53e-06 on 4 procs for 0 steps with 2304 atoms
65.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 1.53e-06 | | |100.00
Nlocal: 576 ave 576 max 576 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 2131 ave 2131 max 2131 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 24768 ave 24768 max 24768 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 49536 ave 49536 max 49536 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 198144
Ave neighs/atom = 86
Neighbor list builds = 0
Dangerous builds = 0
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe
# Execution --------------------------------------
velocity all create 2400 908124 loop geom
fix temp all npt temp 1000 1000 1000 aniso 0 0 1
fix mc all neighbor/swap 50 12 1340723 1000 3 voroN diff 2
thermo_style custom step temp press pxx pyy pzz lx ly lz vol pe f_mc[*]
#dump dump2 all custom 5000 dump.edge-3_Ni-Cr.* id type x y z c_eng c_csym
run 1000
Per MPI rank memory allocation (min/avg/max) = 9.636 | 9.636 | 9.636 Mbytes
Step Temp Press Pxx Pyy Pzz Lx Ly Lz Volume PotEng f_mc[1] f_mc[2]
0 2400 -187517.52 -187464.47 -188202.62 -186885.48 22.572 60.192 22.572 30667.534 -9674.3728 0 0
100 1665.569 14271.813 14638.855 14316.569 13860.016 21.63675 57.721065 21.637799 27023.366 -9594.291 24 24
200 1598.6479 -6990.8349 -8574.1986 -5033.6147 -7364.6916 21.708963 58.123129 21.724821 27412.223 -9575.7322 48 47
300 1604.388 456.43285 1926.408 -1214.1721 657.0626 21.673369 58.090421 21.671716 27285.018 -9577.698 72 70
400 1601.1591 1303.6721 703.88473 1137.6607 2069.471 21.684004 58.049595 21.671161 27278.522 -9576.4811 96 94
500 1623.6044 -2243.2478 -2084.532 320.87709 -4966.0885 21.686171 58.097101 21.695911 27334.758 -9583.1878 120 118
600 1587.2041 421.60034 190.88741 -328.76599 1402.6796 21.712439 58.086039 21.655927 27312.229 -9572.559 144 141
700 1591.2923 32.327829 -2893.2353 1839.7574 1150.4614 21.719102 57.999862 21.666164 27292.974 -9573.9009 168 165
800 1580.8587 -105.51079 654.26389 -160.04168 -810.75457 21.670225 58.109245 21.684683 27306.229 -9570.6482 192 186
900 1570.7648 1290.088 1252.3689 255.62548 2362.2695 21.68101 58.100507 21.658755 27283.051 -9567.9864 216 209
1000 1598.1483 -125.35291 -3626.5479 3404.789 -154.29983 21.720146 57.952942 21.686111 27297.313 -9576.2975 240 231
Loop time of 9.17241 on 4 procs for 1000 steps with 2304 atoms
Performance: 18.839 ns/day, 1.274 hours/ns, 109.023 timesteps/s, 251.188 katom-step/s
98.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 7.7477 | 8.0143 | 8.1344 | 5.5 | 87.37
Neigh | 0.050543 | 0.056882 | 0.05986 | 1.6 | 0.62
Comm | 0.069784 | 0.16898 | 0.40996 | 34.2 | 1.84
Output | 0.00015612 | 0.0001707 | 0.00021249 | 0.0 | 0.00
Modify | 0.90628 | 0.93003 | 0.96157 | 2.2 | 10.14
Other | | 0.002053 | | | 0.02
Nlocal: 576 ave 614 max 505 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 2165.75 ave 2204 max 2132 min
Histogram: 1 0 0 0 2 0 0 0 0 1
Neighs: 32430.8 ave 35552 max 26564 min
Histogram: 1 0 0 0 0 0 1 0 0 2
FullNghs: 64861.5 ave 71111 max 53164 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Total # of neighbors = 259446
Ave neighs/atom = 112.60677
Neighbor list builds = 62
Dangerous builds = 0
#write_data pulse_end.data
Total wall time: 0:00:09

View File

@ -61,7 +61,7 @@ int EAMT::init(const int ntypes, double host_cutforcesq, int **host_type2rhor,
if (onetype>0)
onetype=-1;
else if (onetype==0)
onetype=i*max_shared_types+i;
onetype=i;
}
if (onetype<0) onetype=0;
#endif
@ -109,7 +109,7 @@ int EAMT::init(const int ntypes, double host_cutforcesq, int **host_type2rhor,
int lj_types=ntypes;
shared_types=false;
if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
if (lj_types<=max_shared_types && this->_block_size>=max_shared_types*max_shared_types) {
lj_types=max_shared_types;
shared_types=true;
}

View File

@ -1,5 +1,35 @@
--- src/Makefile.orig 2020-05-03 03:50:23.501557199 -0400
+++ src/Makefile 2020-05-03 03:53:32.147681674 -0400
--- Makefile.orig 2025-06-04 12:16:01.056286325 -0400
+++ Makefile 2025-06-04 12:18:47.454879006 -0400
@@ -11,8 +11,7 @@
# Build all of the executable files
all:
- $(MAKE) -C src
- $(MAKE) -C examples
+ $(MAKE) -C src depend libvoro++.a
# Build the help files (with Doxygen)
help:
@@ -24,16 +23,12 @@
$(MAKE) -C examples clean
# Install the executable, man page, and shared library
-install:
- $(MAKE) -C src
- $(INSTALL) -d $(IFLAGS_EXEC) $(PREFIX)/bin
+install: all
$(INSTALL) -d $(IFLAGS_EXEC) $(PREFIX)/lib
$(INSTALL) -d $(IFLAGS_EXEC) $(PREFIX)/man
$(INSTALL) -d $(IFLAGS_EXEC) $(PREFIX)/man/man1
$(INSTALL) -d $(IFLAGS_EXEC) $(PREFIX)/include
$(INSTALL) -d $(IFLAGS_EXEC) $(PREFIX)/include/voro++
- $(INSTALL) $(IFLAGS_EXEC) src/voro++ $(PREFIX)/bin
- $(INSTALL) $(IFLAGS) man/voro++.1 $(PREFIX)/man/man1
$(INSTALL) $(IFLAGS) src/libvoro++.a $(PREFIX)/lib
$(INSTALL) $(IFLAGS) src/voro++.hh $(PREFIX)/include/voro++
$(INSTALL) $(IFLAGS) src/c_loops.hh $(PREFIX)/include/voro++
--- src/Makefile.orig 2013-10-17 13:54:13.000000000 -0400
+++ src/Makefile 2025-06-04 12:16:47.293104880 -0400
@@ -10,10 +10,10 @@
# List of the common source files
objs=cell.o common.o container.o unitcell.o v_compute.o c_loops.o \

View File

@ -29,6 +29,7 @@
#include "variable.h"
#include <algorithm>
#include <utility>
using namespace LAMMPS_NS;
using namespace FixConst;
@ -113,7 +114,7 @@ FixAveMoments::FixAveMoments(LAMMPS *lmp, int narg, char **arg) :
if ((val.which == ArgInfo::NONE) || (val.which == ArgInfo::UNKNOWN) || (argi.get_dim() > 1))
error->all(FLERR, val.iarg, "Invalid fix ave/moments argument: {}", arg[i]);
values.push_back(val);
values.push_back(std::move(val));
}
if (nvalues != (int)values.size())
error->all(FLERR, Error::NOPOINTER,

View File

@ -29,7 +29,7 @@ class MLIAPModelPythonKokkos : public MLIAPModelPython, public MLIAPModelKokkos<
public:
MLIAPModelPythonKokkos(LAMMPS *, char * = nullptr);
~MLIAPModelPythonKokkos();
void read_coeffs(char *fname);
void read_coeffs(char *fname) override;
void compute_gradients(class MLIAPData *) override;
void compute_gradgrads(class MLIAPData *) override;

View File

@ -37,15 +37,9 @@
#include <cstring>
// prototype repeated from base class implementation
namespace LAMMPS_NS {
struct ACEALImpl {
ACEALImpl() : basis_set(nullptr), ace(nullptr) {}
~ACEALImpl() {
delete basis_set;
delete ace;
}
ACEBBasisSet *basis_set;
ACEBEvaluator *ace;
};

View File

@ -38,14 +38,9 @@
#include <cstring>
// prototype repeated from base class implementation
namespace LAMMPS_NS {
struct ACEImpl {
ACEImpl() : basis_set(nullptr), ace(nullptr) {}
~ACEImpl()
{
delete basis_set;
delete ace;
}
ACECTildeBasisSet *basis_set;
ACERecursiveEvaluator *ace;
};

View File

@ -20,6 +20,7 @@
#include "angle.h"
#include "atom.h"
#include "bond.h"
#include "citeme.h"
#include "comm.h"
#include "compute.h"
#include "compute_voronoi_atom.h"
@ -31,6 +32,7 @@
#include "group.h"
#include "improper.h"
#include "kspace.h"
#include "math_extra.h"
#include "math_special.h"
#include "memory.h"
#include "modify.h"
@ -40,15 +42,17 @@
#include "region.h"
#include "update.h"
#include <cctype>
#include <cfloat>
#include <cmath>
#include <cstring>
#include <unordered_set>
using namespace LAMMPS_NS;
using namespace FixConst;
using MathExtra::distsq3;
using MathSpecial::square;
static const char cite_fix_neighbor_swap_c[] =
static const char cite_fix_neighbor_swap[] =
"fix neighbor/swap command: doi:10.1016/j.commatsci.2022.111929\n\n"
"@Article{Tavenner2023111929,\n"
" author = {Jacob P. Tavenner and Mikhail I. Mendelev and John W. Lawson},\n"
@ -58,15 +62,17 @@ static const char cite_fix_neighbor_swap_c[] =
" year = {2023},\n"
" volume = {218},\n"
" pages = {111929}\n"
" url = {https://www.sciencedirect.com/science/article/pii/S0927025622006401}\n"
" url = {https://dx.doi.org/10.1016/j.commatsci.2022.111929}\n"
"}\n\n";
/* ---------------------------------------------------------------------- */
FixNeighborSwap::FixNeighborSwap(LAMMPS *lmp, int narg, char **arg) :
Fix(lmp, narg, arg), region(nullptr), idregion(nullptr), type_list(nullptr), qtype(nullptr),
c_voro(nullptr), voro_neighbor_list(nullptr), sqrt_mass_ratio(nullptr),
local_swap_iatom_list(nullptr), random_equal(nullptr), c_pe(nullptr)
Fix(lmp, narg, arg), region(nullptr), idregion(nullptr), type_list(nullptr), rate_list(nullptr),
qtype(nullptr), sqrt_mass_ratio(nullptr), voro_neighbor_list(nullptr),
local_swap_iatom_list(nullptr), local_swap_neighbor_list(nullptr),
local_swap_type_list(nullptr), local_swap_probability(nullptr), random_equal(nullptr),
id_voro(nullptr), c_voro(nullptr), c_pe(nullptr)
{
if (narg < 10) utils::missing_cmd_args(FLERR, "fix neighbor/swap", error);
@ -79,37 +85,49 @@ FixNeighborSwap::FixNeighborSwap(LAMMPS *lmp, int narg, char **arg) :
restart_global = 1;
time_depend = 1;
ke_flag = 1;
diff_flag = 0;
rates_flag = 0;
nswaptypes = 0;
if (lmp->citeme) lmp->citeme->add(cite_fix_neighbor_swap);
// required args
nevery = utils::inumeric(FLERR, arg[3], false, lmp);
ncycles = utils::inumeric(FLERR, arg[4], false, lmp);
seed = utils::inumeric(FLERR, arg[5], false, lmp);
double temperature = utils::numeric(FLERR, arg[6], false, lmp);
r_0 = utils::inumeric(FLERR, arg[7], false, lmp);
double r_0 = utils::inumeric(FLERR, arg[7], false, lmp);
if (nevery <= 0)
error->all(FLERR, 3, "Illegal fix neighbor/swap command nevery value: {}", nevery);
if (ncycles < 0)
error->all(FLERR, 4, "Illegal fix neighbor/swap command ncycles value: {}", ncycles);
if (seed <= 0) error->all(FLERR, 5, "Illegal fix neighbor/swap command seed value: {}", seed);
if (temperature <= 0.0)
error->all(FLERR, 6, "Illegal fix neighbor/swap command temperature value: {}", temperature);
if (r_0 <= 0.0) error->all(FLERR, 7, "Illegal fix neighbor/swap command R0 value: {}", r_0);
// Voro compute check
int icompute = modify->find_compute(utils::strdup(arg[8]));
if (icompute < 0) error->all(FLERR, "Could not find neighbor compute ID");
c_voro = modify->compute[icompute];
id_voro = utils::strdup(arg[8]);
c_voro = modify->get_compute_by_id(id_voro);
if (!c_voro) error->all(FLERR, 8, "Could not find compute voronoi ID {}", id_voro);
if (c_voro->local_flag == 0)
error->all(FLERR, "Neighbor compute does not compute local info");
error->all(FLERR, 8, "Voronoi compute {} does not compute local info", id_voro);
if (c_voro->size_local_cols != 3)
error->all(FLERR, "Neighbor compute does not give i, j, size as expected");
if (nevery <= 0) error->all(FLERR, "Illegal fix neighbor/swap command nevery value");
if (ncycles < 0) error->all(FLERR, "Illegal fix neighbor/swap command ncycles value");
if (seed <= 0) error->all(FLERR, "Illegal fix neighbor/swap command seed value");
if (temperature <= 0.0) error->all(FLERR, "Illegal fix neighbor/swap command temperature value");
error->all(FLERR, 8, "Voronoi compute {} does not compute i, j, sizes as expected", id_voro);
beta = 1.0 / (force->boltz * temperature);
inv_r_0 = 1.0 / r_0;
memory->create(type_list, atom->ntypes, "neighbor/swap:type_list");
memory->create(rate_list, atom->ntypes, "neighbor/swap:rate_list");
// read options from end of input line
options(narg - 8, &arg[8]);
options(narg - 9, &arg[9]);
// random number generator, same for all procs
@ -126,11 +144,6 @@ FixNeighborSwap::FixNeighborSwap(LAMMPS *lmp, int narg, char **arg) :
nswap_successes = 0.0;
atom_swap_nmax = 0;
voro_neighbor_list = nullptr;
local_swap_iatom_list = nullptr;
local_swap_neighbor_list = nullptr;
local_swap_probability = nullptr;
local_swap_type_list = nullptr;
// set comm size needed by this Fix
@ -153,6 +166,7 @@ FixNeighborSwap::~FixNeighborSwap()
memory->destroy(local_swap_probability);
memory->destroy(local_swap_type_list);
delete[] idregion;
delete[] id_voro;
delete random_equal;
}
@ -160,62 +174,85 @@ FixNeighborSwap::~FixNeighborSwap()
parse optional parameters at end of input line
------------------------------------------------------------------------- */
static const std::unordered_set<std::string> known_keywords = {"region", "ke", "types", "diff",
"rates"};
static bool is_keyword(const std::string &arg)
{
return known_keywords.find(arg) != known_keywords.end();
}
void FixNeighborSwap::options(int narg, char **arg)
{
if (narg < 0) error->all(FLERR, "Illegal fix neighbor/swap command\n");
ke_flag = 1;
diff_flag = 0;
rates_flag = 0;
nswaptypes = 0;
if (narg < 0) utils::missing_cmd_args(FLERR, "fix neighbor/swap", error);
int ioffset = 9; // first 9 arguments are fixed and handled in constructor
int iarg = 0;
while (iarg < narg) {
if (strcmp(arg[iarg], "region") == 0) {
if (iarg + 2 > narg) error->all(FLERR, "Illegal fix neighbor/swap command");
region = domain->get_region_by_id(arg[iarg + 1]);
if (!region) error->all(FLERR, "Region ID for fix neighbor/swap does not exist");
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "fix neighbor/swap region", error);
delete[] idregion;
idregion = utils::strdup(arg[iarg + 1]);
region = domain->get_region_by_id(idregion);
if (!region)
error->all(FLERR, iarg + 1 + ioffset, "Region ID {} for fix neighbor/swap does not exist",
idregion);
iarg += 2;
} else if (strcmp(arg[iarg], "ke") == 0) {
if (iarg + 2 > narg) error->all(FLERR, "Illegal fix neighbor/swap command");
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "fix neighbor/swap ke", error);
ke_flag = utils::logical(FLERR, arg[iarg + 1], false, lmp);
iarg += 2;
} else if (strcmp(arg[iarg], "types") == 0) {
if (iarg + 3 > narg) error->all(FLERR, "Illegal fix neighbor/swap command");
if (diff_flag != 0) error->all(FLERR, "Illegal fix neighbor/swap command");
if (iarg + 3 > narg) utils::missing_cmd_args(FLERR, "fix neighbor/swap types", error);
if (diff_flag)
error->all(FLERR, iarg + ioffset, "Cannot use 'diff' and 'types' keywords together");
iarg++;
nswaptypes = 0;
while (iarg < narg) {
if (isalpha(arg[iarg][0])) break;
if (nswaptypes >= atom->ntypes) error->all(FLERR, "Illegal fix neighbor/swap command");
type_list[nswaptypes] = utils::numeric(FLERR, arg[iarg], false, lmp);
if (is_keyword(arg[iarg])) break;
if (nswaptypes >= atom->ntypes)
error->all(FLERR, iarg + ioffset, "Too many arguments to fix neighbor/swap types");
type_list[nswaptypes] = utils::expand_type_int(FLERR, arg[iarg], Atom::ATOM, lmp);
nswaptypes++;
iarg++;
}
} else if (strcmp(arg[iarg], "diff") == 0) {
if (iarg + 2 > narg) error->all(FLERR, "Illegal fix neighbor/swap command");
if (nswaptypes != 0) error->all(FLERR, "Illegal fix neighbor/swap command");
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "fix neighbor/swap diff", error);
if (diff_flag) error->all(FLERR, iarg + ioffset, "Cannot use 'diff' keyword multiple times");
if (nswaptypes != 0)
error->all(FLERR, iarg + ioffset, "Cannot use 'diff' and 'types' keywords together");
type_list[nswaptypes] = utils::numeric(FLERR, arg[iarg + 1], false, lmp);
diff_flag = 1;
nswaptypes++;
iarg += 2;
} else if (strcmp(arg[iarg], "rates") == 0) {
if (iarg + atom->ntypes >= narg) error->all(FLERR, "Illegal fix neighbor/swap command");
if (iarg + atom->ntypes >= narg)
utils::missing_cmd_args(FLERR, "fix neighbor/swap rates", error);
iarg++;
int i = 0;
while (iarg < narg) {
if (isalpha(arg[iarg][0])) break;
if (i >= atom->ntypes) error->all(FLERR, "Illegal fix neighbor/swap command");
if (is_keyword(arg[iarg])) break;
if (i >= atom->ntypes) error->all(FLERR, "Too many values for fix neighbor/swap rates");
rate_list[i] = utils::numeric(FLERR, arg[iarg], false, lmp);
i++;
iarg++;
}
rates_flag = 1;
if (i != atom->ntypes) error->all(FLERR, "Illegal fix neighbor/swap command");
} else
error->all(FLERR, "Illegal fix neighbor/swap command");
if (i != atom->ntypes)
error->all(FLERR, "Fix neighbor/swap rates keyword must have exactly {} arguments",
atom->ntypes);
} else {
error->all(FLERR, "Unknown fix neighbor/swap keyword: {}", arg[iarg]);
}
}
// checks
if (!nswaptypes && !diff_flag)
error->all(FLERR, Error::NOLASTLINE,
"Must specify at either 'types' or 'diff' keyword with fix neighbor/swap");
if (nswaptypes < 2 && !diff_flag)
error->all(FLERR, Error::NOLASTLINE,
"Must specify at least 2 atom types in fix neighbor/swap 'types' keyword");
}
/* ---------------------------------------------------------------------- */
@ -232,23 +269,26 @@ int FixNeighborSwap::setmask()
void FixNeighborSwap::init()
{
c_pe = modify->get_compute_by_id("thermo_pe");
if (!c_pe) error->all(FLERR, Error::NOLASTLINE, "Could not find 'thermo_pe' compute");
int *type = atom->type;
if (nswaptypes < 2 && !diff_flag)
error->all(FLERR, "Must specify at least 2 types in fix neighbor/swap command");
c_voro = modify->get_compute_by_id(id_voro);
if (!c_voro)
error->all(FLERR, Error::NOLASTLINE, "Could not find compute voronoi ID {}", id_voro);
// set index and check validity of region
if (idregion) {
region = domain->get_region_by_id(idregion);
if (!region) error->all(FLERR, "Region {} for fix setforce does not exist", idregion);
if (!region)
error->all(FLERR, Error::NOLASTLINE, "Region {} for fix neighbor/swap does not exist",
idregion);
}
for (int iswaptype = 0; iswaptype < nswaptypes; iswaptype++)
if (type_list[iswaptype] <= 0 || type_list[iswaptype] > atom->ntypes)
error->all(FLERR, "Invalid atom type in fix neighbor/swap command");
error->all(FLERR, Error::NOLASTLINE, "Invalid atom type in fix neighbor/swap command");
int *type = atom->type;
if (atom->q_flag) {
double qmax, qmin;
int firstall, first;
@ -258,23 +298,27 @@ void FixNeighborSwap::init()
for (int i = 0; i < atom->nlocal; i++) {
if (atom->mask[i] & groupbit) {
if (type[i] == type_list[iswaptype]) {
if (first) {
if (first > 0) {
qtype[iswaptype] = atom->q[i];
first = 0;
} else if (qtype[iswaptype] != atom->q[i])
error->one(FLERR, "All atoms of a swapped type must have the same charge.");
first = -1;
}
}
}
MPI_Allreduce(&first, &firstall, 1, MPI_INT, MPI_MIN, world);
if (firstall)
error->all(FLERR,
"At least one atom of each swapped type must be present to define charges.");
if (firstall < 0)
error->all(FLERR, Error::NOLASTLINE,
"All atoms of a swapped type must have the same charge");
if (firstall > 0)
error->all(FLERR, Error::NOLASTLINE,
"At least one atom of each swapped type must be present to define charges");
if (first) qtype[iswaptype] = -DBL_MAX;
MPI_Allreduce(&qtype[iswaptype], &qmax, 1, MPI_DOUBLE, MPI_MAX, world);
if (first) qtype[iswaptype] = DBL_MAX;
MPI_Allreduce(&qtype[iswaptype], &qmin, 1, MPI_DOUBLE, MPI_MIN, world);
if (qmax != qmin) error->all(FLERR, "All atoms of a swapped type must have same charge.");
if (qmax != qmin)
error->all(FLERR, Error::NOLASTLINE, "All atoms of a swapped type must have same charge.");
}
}
@ -308,8 +352,9 @@ void FixNeighborSwap::init()
int flagall;
MPI_Allreduce(&flag, &flagall, 1, MPI_INT, MPI_SUM, world);
if (flagall) error->all(FLERR, "Cannot do neighbor/swap on atoms in atom_modify first group");
if (flagall)
error->all(FLERR, Error::NOLASTLINE,
"Cannot use fix neighbor/swap on atoms in atom_modify first group");
}
}
@ -359,9 +404,6 @@ void FixNeighborSwap::pre_exchange()
int FixNeighborSwap::attempt_swap()
{
// int nlocal = atom->nlocal;
tagint *id = atom->tag;
if (niswap == 0) return 0;
// pre-swap energy
@ -372,9 +414,6 @@ int FixNeighborSwap::attempt_swap()
int i = pick_i_swap_atom();
// get global id and position of atom i
// get_global_i(i);
// build nearest-neighbor list based on atom i
build_i_neighbor_list(i);
@ -382,7 +421,7 @@ int FixNeighborSwap::attempt_swap()
// pick a neighbor atom j based on i neighbor list
jtype_selected = -1;
int j = pick_j_swap_neighbor(i);
int j = pick_j_swap_neighbor();
int itype = type_list[0];
int jtype = jtype_selected;
@ -494,26 +533,23 @@ double FixNeighborSwap::energy_full()
int FixNeighborSwap::pick_i_swap_atom()
{
tagint *id = atom->tag;
int id_center_local = -1;
int i = -1;
int iwhichglobal = static_cast<int>(niswap * random_equal->uniform());
if ((iwhichglobal >= niswap_before) && (iwhichglobal < niswap_before + niswap_local)) {
int iwhichlocal = iwhichglobal - niswap_before;
i = local_swap_iatom_list[iwhichlocal];
id_center_local = id[i];
MPI_Allreduce(&id[i], &id_center, 1, MPI_INT, MPI_MAX, world);
} else {
MPI_Allreduce(&id[i], &id_center, 1, MPI_INT, MPI_MAX, world);
id_center = -1;
}
return i;
}
/* ----------------------------------------------------------------------
------------------------------------------------------------------------- */
int FixNeighborSwap::pick_j_swap_neighbor(int i)
int FixNeighborSwap::pick_j_swap_neighbor()
{
int j = -1;
int jtype_selected_local = -1;
@ -535,7 +571,7 @@ int FixNeighborSwap::pick_j_swap_neighbor(int i)
return j;
}
}
error->all(FLERR, "Did not select local neighbor swap atom");
error->all(FLERR, Error::NOLASTLINE, "Did not select local neighbor swap atom");
}
MPI_Allreduce(&jtype_selected_local, &jtype_selected, 1, MPI_INT, MPI_MAX, world);
@ -545,16 +581,6 @@ int FixNeighborSwap::pick_j_swap_neighbor(int i)
/* ----------------------------------------------------------------------
------------------------------------------------------------------------- */
double FixNeighborSwap::get_distance(double *i, double *j)
{
double r = sqrt(MathSpecial::square((i[0] - j[0])) + MathSpecial::square((i[1] - j[1])) +
MathSpecial::square((i[2] - j[2])));
return r;
}
/* ----------------------------------------------------------------------
------------------------------------------------------------------------- */
void FixNeighborSwap::build_i_neighbor_list(int i_center)
{
int nghost = atom->nghost;
@ -621,20 +647,18 @@ void FixNeighborSwap::build_i_neighbor_list(int i_center)
// Get distance if own center atom
double r = INFINITY;
if (i_center >= 0) { double r = get_distance(x[temp_j], x[i_center]); }
// Get local id of ghost center atom when ghost
for (int i = nlocal; i < nlocal + nghost; i++) {
if ((id[i] == id_center) && (get_distance(x[temp_j], x[i]) < r)) {
r = get_distance(x[temp_j], x[i]);
}
double rtmp = sqrt(distsq3(x[temp_j], x[i]));
if ((id[i] == id_center) && (rtmp < r)) r = rtmp;
}
if (rates_flag) {
local_swap_probability[njswap_local] =
rate_list[type[temp_j] - 1] * exp(-MathSpecial::square(r / r_0));
rate_list[type[temp_j] - 1] * exp(-square(r * inv_r_0));
} else {
local_swap_probability[njswap_local] = exp(-MathSpecial::square(r / r_0));
local_swap_probability[njswap_local] = exp(-square(r * inv_r_0));
}
local_probability += local_swap_probability[njswap_local];
local_swap_type_list[njswap_local] = type[temp_j];
@ -646,20 +670,18 @@ void FixNeighborSwap::build_i_neighbor_list(int i_center)
// Calculate distance from i to each j, adjust probability of selection
// Get distance if own center atom
double r = INFINITY;
if (i_center >= 0) { double r = get_distance(x[temp_j], x[i_center]); }
// Get local id of ghost center atom when ghost
for (int i = nlocal; i < nlocal + nghost; i++) {
if ((id[i] == id_center) && (get_distance(x[temp_j], x[i]) < r)) {
r = get_distance(x[temp_j], x[i]);
}
double rtmp = sqrt(distsq3(x[temp_j], x[i]));
if ((id[i] == id_center) && (rtmp < r)) r = rtmp;
}
if (rates_flag) {
local_swap_probability[njswap_local] =
rate_list[type[temp_j] - 1] * exp(-MathSpecial::square(r / r_0));
rate_list[type[temp_j] - 1] * exp(-square(r * inv_r_0));
} else {
local_swap_probability[njswap_local] = exp(-MathSpecial::square(r / r_0));
local_swap_probability[njswap_local] = exp(-square(r * inv_r_0));
}
local_probability += local_swap_probability[njswap_local];
@ -677,19 +699,18 @@ void FixNeighborSwap::build_i_neighbor_list(int i_center)
// Calculate distance from i to each j, adjust probability of selection
// Get distance if own center atom
double r = INFINITY;
if (i_center >= 0) { r = get_distance(x[temp_j], x[i_center]); }
// Get local id of ghost center atoms
for (int i = nlocal; i < nlocal + nghost; i++) {
if ((id[i] == id_center) && (get_distance(x[temp_j], x[i]) < r))
r = get_distance(x[temp_j], x[i]);
double rtmp = sqrt(distsq3(x[temp_j], x[i]));
if ((id[i] == id_center) && (rtmp < r)) r = rtmp;
}
if (rates_flag) {
local_swap_probability[njswap_local] =
rate_list[type[temp_j] - 1] * exp(-MathSpecial::square(r / r_0));
rate_list[type[temp_j] - 1] * exp(-square(r * inv_r_0));
} else {
local_swap_probability[njswap_local] = exp(-MathSpecial::square(r / r_0));
local_swap_probability[njswap_local] = exp(-square(r * inv_r_0));
}
local_probability += local_swap_probability[njswap_local];
@ -702,20 +723,18 @@ void FixNeighborSwap::build_i_neighbor_list(int i_center)
// Calculate distance from i to each j, adjust probability of selection
// Get distance if own center atom
double r = INFINITY;
if (i_center >= 0) { double r = get_distance(x[temp_j], x[i_center]); }
// Get local id of ghost center atom when ghost
for (int i = nlocal; i < nlocal + nghost; i++) {
if ((id[i] == id_center) && (get_distance(x[temp_j], x[i]) < r)) {
r = get_distance(x[temp_j], x[i]);
}
double rtmp = sqrt(distsq3(x[temp_j], x[i]));
if ((id[i] == id_center) && (rtmp < r)) r = rtmp;
}
if (rates_flag) {
local_swap_probability[njswap_local] =
rate_list[type[temp_j] - 1] * exp(-MathSpecial::square(r / r_0));
rate_list[type[temp_j] - 1] * exp(-square(r * inv_r_0));
} else {
local_swap_probability[njswap_local] = exp(-MathSpecial::square(r / r_0));
local_swap_probability[njswap_local] = exp(-square(r * inv_r_0));
}
local_probability += local_swap_probability[njswap_local];
@ -897,5 +916,6 @@ void FixNeighborSwap::restart(char *buf)
bigint ntimestep_restart = (bigint) ubuf(list[n++]).i;
if (ntimestep_restart != update->ntimestep)
error->all(FLERR, "Must not reset timestep when restarting fix neighbor/swap");
error->all(FLERR, Error::NOLASTLINE,
"Must not reset timestep when restarting fix neighbor/swap");
}

View File

@ -47,7 +47,6 @@ class FixNeighborSwap : public Fix {
int niswap, njswap; // # of i,j swap atoms on all procs
int niswap_local, njswap_local; // # of swap atoms on this proc
int niswap_before, njswap_before; // # of swap atoms on procs < this proc
// int global_i_ID; // global id of selected i atom
class Region *region; // swap region
char *idregion; // swap region id
@ -66,7 +65,7 @@ class FixNeighborSwap : public Fix {
bool unequal_cutoffs;
int atom_swap_nmax;
double beta, r_0;
double beta, inv_r_0;
double local_probability; // Total swap probability stored on this proc
double global_probability; // Total swap probability across all proc
double prev_probability; // Swap probability on proc < this proc
@ -81,15 +80,14 @@ class FixNeighborSwap : public Fix {
class RanPark *random_equal;
class Compute *c_voro;
class Compute *c_pe;
char *id_voro;
class Compute *c_voro, *c_pe;
void options(int, char **);
int attempt_swap();
double energy_full();
int pick_i_swap_atom();
int pick_j_swap_neighbor(int);
double get_distance(double[3], double[3]);
int pick_j_swap_neighbor();
void build_i_neighbor_list(int);
void update_iswap_atoms_list();
};

View File

@ -227,7 +227,7 @@ double ComputeRHEOKernel::calc_dw(int i, int j, double delx, double dely, double
int corrections_i = check_corrections(i);
int corrections_j = check_corrections(j);
wp = calc_dw_scalar_quintic(delx, dely, delz, r);
wp = calc_dw_scalar_quintic(r);
// Overwrite if there are corrections
double dxij[3] = {delx, dely, delz};
@ -279,7 +279,7 @@ double ComputeRHEOKernel::calc_w_quintic(double r)
/* ---------------------------------------------------------------------- */
double ComputeRHEOKernel::calc_dw_scalar_quintic(double delx, double dely, double delz, double r)
double ComputeRHEOKernel::calc_dw_scalar_quintic(double r)
{
double wp, tmp1, tmp2, tmp3, tmp1sq, tmp2sq, tmp3sq, s;
@ -312,7 +312,7 @@ double ComputeRHEOKernel::calc_dw_scalar_quintic(double delx, double dely, doubl
double ComputeRHEOKernel::calc_dw_quintic(double delx, double dely, double delz, double r,
double *dW1, double *dW2)
{
double wp = calc_dw_scalar_quintic(delx, dely, delz, r);
double wp = calc_dw_scalar_quintic(r);
double wprinv = wp / r;
dW1[0] = delx * wprinv;

View File

@ -40,7 +40,7 @@ class ComputeRHEOKernel : public Compute {
double calc_w(int, int, double, double, double, double);
double calc_dw(int, int, double, double, double, double);
double calc_w_quintic(double);
double calc_dw_scalar_quintic(double, double, double, double);
double calc_dw_scalar_quintic(double);
double calc_dw_quintic(double, double, double, double, double *, double *);
double calc_w_wendlandc4(double);
double calc_dw_wendlandc4(double, double, double, double, double *, double *);

View File

@ -289,7 +289,7 @@ double FixRHEOPressure::calc_rho(double p, int i)
/* ---------------------------------------------------------------------- */
double FixRHEOPressure::calc_csq(double p, int i)
double FixRHEOPressure::calc_csq(double /*p*/, int i)
{
int type = atom->type[i];
double csq2 = csq[type];

View File

@ -40,10 +40,9 @@ static constexpr int FACESDELTA = 10000;
/* ---------------------------------------------------------------------- */
ComputeVoronoi::ComputeVoronoi(LAMMPS *lmp, int narg, char **arg) :
Compute(lmp, narg, arg), con_mono(nullptr), con_poly(nullptr),
radstr(nullptr), voro(nullptr), edge(nullptr), sendvector(nullptr),
rfield(nullptr), tags(nullptr), occvec(nullptr), sendocc(nullptr),
lroot(nullptr), lnext(nullptr), faces(nullptr)
Compute(lmp, narg, arg), con_mono(nullptr), con_poly(nullptr), radstr(nullptr),
voro(nullptr), edge(nullptr), sendvector(nullptr), rfield(nullptr), tags(nullptr),
occvec(nullptr), lroot(nullptr), lnext(nullptr), faces(nullptr)
{
int sgroup;
@ -156,9 +155,6 @@ ComputeVoronoi::~ComputeVoronoi()
memory->destroy(lroot);
memory->destroy(lnext);
memory->destroy(occvec);
#ifdef NOTINPLACE
memory->destroy(sendocc);
#endif
memory->destroy(tags);
memory->destroy(faces);
}
@ -211,9 +207,6 @@ void ComputeVoronoi::compute_peratom()
oldnatoms = atom->natoms;
oldmaxtag = atom->map_tag_max;
memory->create(occvec,oldmaxtag,"voronoi/atom:occvec");
#ifdef NOTINPLACE
memory->create(sendocc,oldmaxtag,"voronoi/atom:sendocc");
#endif
}
// get the occupation of each original voronoi cell
@ -429,12 +422,7 @@ void ComputeVoronoi::checkOccupation()
// MPI sum occupation
#ifdef NOTINPLACE
memcpy(sendocc, occvec, oldnatoms*sizeof(*occvec));
MPI_Allreduce(sendocc, occvec, oldnatoms, MPI_INT, MPI_SUM, world);
#else
MPI_Allreduce(MPI_IN_PLACE, occvec, oldnatoms, MPI_INT, MPI_SUM, world);
#endif
// determine the total number of atoms in this atom's currently occupied cell

View File

@ -61,7 +61,7 @@ class ComputeVoronoi : public Compute {
bool onlyGroup, occupation;
tagint *tags, oldmaxtag;
int *occvec, *sendocc, *lroot, *lnext, lmax, oldnatoms, oldnall;
int *occvec, *lroot, *lnext, lmax, oldnatoms, oldnall;
int faces_flag, nfaces, nfacesmax;
double **faces;
};

View File

@ -219,6 +219,9 @@
# define FMT_UNICODE !FMT_MSC_VERSION
#endif
// LAMMPS customization: avoid problems with consteval altogether
#define FMT_CONSTEVAL
#ifndef FMT_CONSTEVAL
# if ((FMT_GCC_VERSION >= 1000 || FMT_CLANG_VERSION >= 1101) && \
(!defined(__apple_build_version__) || \

View File

@ -585,7 +585,6 @@ void Input::substitute(char *&str, char *&str2, int &max, int &max2, int flag)
int i,n,paren_count,nchars;
char immediate[256];
char *var,*value,*beyond;
int quoteflag = 0;
char *ptrmatch;
char *ptr = str;
@ -599,7 +598,7 @@ void Input::substitute(char *&str, char *&str2, int &max, int &max2, int flag)
// variable substitution
if (*ptr == '$' && !quoteflag) {
if (*ptr == '$') {
// value = ptr to expanded variable
// variable name between curly braces, e.g. ${a}

View File

@ -174,13 +174,13 @@ void ReadData::command(int narg, char **arg)
addflag = VALUE;
bigint offset = utils::bnumeric(FLERR, arg[iarg + 1], false, lmp);
if (offset > MAXTAGINT)
error->all(FLERR, "Read data add IDoffset {} is too big", offset);
error->all(FLERR, iarg, "Read data add IDoffset {} is too big", offset);
id_offset = offset;
if (atom->molecule_flag) {
offset = utils::bnumeric(FLERR, arg[iarg + 2], false, lmp);
if (offset > MAXTAGINT)
error->all(FLERR, "Read data add MOLoffset {} is too big", offset);
error->all(FLERR, iarg, "Read data add MOLoffset {} is too big", offset);
mol_offset = offset;
iarg++;
}
@ -195,7 +195,7 @@ void ReadData::command(int narg, char **arg)
doffset = utils::inumeric(FLERR, arg[iarg + 4], false, lmp);
ioffset = utils::inumeric(FLERR, arg[iarg + 5], false, lmp);
if (toffset < 0 || boffset < 0 || aoffset < 0 || doffset < 0 || ioffset < 0)
error->all(FLERR, "Illegal read_data offset value(s)");
error->all(FLERR, iarg, "Illegal read_data offset value(s)");
iarg += 6;
} else if (strcmp(arg[iarg], "shift") == 0) {
if (iarg + 4 > narg) utils::missing_cmd_args(FLERR, "read_data shift", error);
@ -204,7 +204,8 @@ void ReadData::command(int narg, char **arg)
shift[1] = utils::numeric(FLERR, arg[iarg + 2], false, lmp);
shift[2] = utils::numeric(FLERR, arg[iarg + 3], false, lmp);
if (domain->dimension == 2 && shift[2] != 0.0)
error->all(FLERR, "Non-zero read_data shift z value for 2d simulation not allowed");
error->all(FLERR, iarg + 2,
"Non-zero read_data shift z value for 2d simulation not allowed");
iarg += 4;
} else if (strcmp(arg[iarg], "nocoeff") == 0) {
coeffflag = 0;
@ -213,86 +214,92 @@ void ReadData::command(int narg, char **arg)
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "read_data extra/atom/types", error);
extra_atom_types = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (extra_atom_types < 0)
error->all(FLERR, "Illegal read_data extra/atom/types value {}", extra_atom_types);
error->all(FLERR, iarg + 1, "Illegal read_data extra/atom/types value {}",
extra_atom_types);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/bond/types") == 0) {
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "read_data extra/bond/types", error);
if (!atom->avec->bonds_allow)
error->all(FLERR, "No bonds allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No bonds allowed with atom style {}", atom->get_style());
extra_bond_types = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (extra_bond_types < 0)
error->all(FLERR, "Illegal read_data extra/bond/types value {}", extra_bond_types);
error->all(FLERR, iarg + 1, "Illegal read_data extra/bond/types value {}",
extra_bond_types);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/angle/types") == 0) {
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "read_data extra/angle/types", error);
if (!atom->avec->angles_allow)
error->all(FLERR, "No angles allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No angles allowed with atom style {}", atom->get_style());
extra_angle_types = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (extra_angle_types < 0)
error->all(FLERR, "Illegal read_data extra/angle/types value {}", extra_angle_types);
error->all(FLERR, iarg + 1, "Illegal read_data extra/angle/types value {}",
extra_angle_types);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/dihedral/types") == 0) {
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "read_data extra/dihedral/types", error);
if (!atom->avec->dihedrals_allow)
error->all(FLERR, "No dihedrals allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No dihedrals allowed with atom style {}", atom->get_style());
extra_dihedral_types = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (extra_dihedral_types < 0)
error->all(FLERR, "Illegal read_data extra/dihedral/types value {}", extra_dihedral_types);
error->all(FLERR, iarg + 1, "Illegal read_data extra/dihedral/types value {}",
extra_dihedral_types);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/improper/types") == 0) {
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "read_data extra/improper/types", error);
if (!atom->avec->impropers_allow)
error->all(FLERR, "No impropers allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No impropers allowed with atom style {}", atom->get_style());
extra_improper_types = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (extra_improper_types < 0)
error->all(FLERR, "Illegal read_data extra/improper/types value {}", extra_improper_types);
error->all(FLERR, iarg + 1, "Illegal read_data extra/improper/types value {}",
extra_improper_types);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/bond/per/atom") == 0) {
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "read_data extra/bond/per/atom", error);
if (atom->molecular == Atom::ATOMIC)
error->all(FLERR, "No bonds allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No bonds allowed with atom style {}", atom->get_style());
atom->extra_bond_per_atom = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (atom->extra_bond_per_atom < 0)
error->all(FLERR, "Illegal read_data extra/bond/per/atom value {}",
error->all(FLERR, iarg + 1, "Illegal read_data extra/bond/per/atom value {}",
atom->extra_bond_per_atom);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/angle/per/atom") == 0) {
if (iarg + 2 > narg) utils::missing_cmd_args(FLERR, "read_data extra/angle/per/atom", error);
if (atom->molecular == Atom::ATOMIC)
error->all(FLERR, "No angles allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No angles allowed with atom style {}", atom->get_style());
atom->extra_angle_per_atom = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (atom->extra_angle_per_atom < 0)
error->all(FLERR, "Illegal read_data extra/angle/per/atom value {}",
error->all(FLERR, iarg + 1, "Illegal read_data extra/angle/per/atom value {}",
atom->extra_angle_per_atom);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/dihedral/per/atom") == 0) {
if (iarg + 2 > narg)
utils::missing_cmd_args(FLERR, "read_data extra/dihedral/per/atom", error);
if (atom->molecular == Atom::ATOMIC)
error->all(FLERR, "No dihedrals allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No dihedrals allowed with atom style {}", atom->get_style());
atom->extra_dihedral_per_atom = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (atom->extra_dihedral_per_atom < 0)
error->all(FLERR, "Illegal read_data extra/dihedral/per/atom value {}",
error->all(FLERR, iarg + 1, "Illegal read_data extra/dihedral/per/atom value {}",
atom->extra_dihedral_per_atom);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/improper/per/atom") == 0) {
if (iarg + 2 > narg)
utils::missing_cmd_args(FLERR, "read_data extra/improper/per/atom", error);
if (atom->molecular == Atom::ATOMIC)
error->all(FLERR, "No impropers allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No impropers allowed with atom style {}", atom->get_style());
atom->extra_improper_per_atom = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (atom->extra_improper_per_atom < 0)
error->all(FLERR, "Illegal read_data extra/improper/per/atom value {}",
error->all(FLERR, iarg + 1, "Illegal read_data extra/improper/per/atom value {}",
atom->extra_improper_per_atom);
iarg += 2;
} else if (strcmp(arg[iarg], "extra/special/per/atom") == 0) {
if (iarg + 2 > narg)
utils::missing_cmd_args(FLERR, "read_data extra/special/per/atom", error);
if (atom->molecular == Atom::ATOMIC)
error->all(FLERR, "No bonded interactions allowed with atom style {}", atom->get_style());
error->all(FLERR, iarg + 1, "No bonded interactions allowed with atom style {}",
atom->get_style());
force->special_extra = utils::inumeric(FLERR, arg[iarg + 1], false, lmp);
if (force->special_extra < 0)
error->all(FLERR, "Illegal read_data extra/special/per/atom value {}",
error->all(FLERR, iarg + 1, "Illegal read_data extra/special/per/atom value {}",
force->special_extra);
iarg += 2;
} else if (strcmp(arg[iarg], "group") == 0) {
@ -309,12 +316,12 @@ void ReadData::command(int narg, char **arg)
fix_section = (char **) memory->srealloc(fix_section, (nfix + 1) * sizeof(char *),
"read_data:fix_section");
if (is_data_section(arg[iarg + 3]))
error->all(FLERR,
error->all(FLERR, iarg + 3,
"Custom data section name {} for fix {} collides with existing data section",
arg[iarg + 3], arg[iarg + 1]);
fix_index[nfix] = modify->get_fix_by_id(arg[iarg + 1]);
if (!fix_index[nfix])
error->all(FLERR, "Fix ID {} for read_data does not exist", arg[iarg + 1]);
error->all(FLERR, iarg + 1, "Fix ID {} for read_data does not exist", arg[iarg + 1]);
if (strcmp(arg[iarg + 2], "NULL") == 0)
fix_header[nfix] = nullptr;
else
@ -327,7 +334,7 @@ void ReadData::command(int narg, char **arg)
iarg += 4;
} else
error->all(FLERR, "Unknown read_data keyword {}", arg[iarg]);
error->all(FLERR, iarg, "Unknown read_data keyword {}", arg[iarg]);
}
// error checks
@ -339,11 +346,12 @@ void ReadData::command(int narg, char **arg)
"Reading a data file with shrinkwrap boundaries is not "
"compatible with a MSM KSpace style");
if (domain->box_exist && !addflag)
error->all(FLERR, "Cannot use read_data without add keyword after simulation box is defined"
+ utils::errorurl(34));
error->all(FLERR,
"Cannot use read_data without add keyword after simulation box is defined" +
utils::errorurl(34));
if (!domain->box_exist && addflag)
error->all(FLERR, "Cannot use read_data add before simulation box is defined"
+ utils::errorurl(33));
error->all(FLERR,
"Cannot use read_data add before simulation box is defined" + utils::errorurl(33));
if (offsetflag) {
if (addflag == NONE) {
error->all(FLERR, "Cannot use read_data offset without add keyword");
@ -366,7 +374,7 @@ void ReadData::command(int narg, char **arg)
// check if data file is available and readable
if (!platform::file_is_readable(arg[0]))
error->all(FLERR, "Cannot open file {}: {}", arg[0], utils::getsyserror());
error->all(FLERR, Error::ARGZERO, "Cannot open file {}: {}", arg[0], utils::getsyserror());
// reset so we can warn about reset image flags exactly once per data file
@ -528,7 +536,7 @@ void ReadData::command(int narg, char **arg)
if (tilt_flag) triclinic = 1;
} else {
if (xloxhi_flag || yloyhi_flag || zlozhi_flag || tilt_flag)
error->all(FLERR,
error->all(FLERR, Error::ARGZERO,
"Read_data header cannot specify simulation box lo/hi/tilt and ABC vectors");
triclinic = triclinic_general = 1;
}
@ -538,10 +546,11 @@ void ReadData::command(int narg, char **arg)
if (domain->dimension == 2) {
if (triclinic_general == 0) {
if (boxlo[2] >= 0.0 || boxhi[2] <= 0.0)
error->all(FLERR, "Read_data zlo/zhi for 2d simulation must straddle 0.0");
error->all(FLERR, Error::ARGZERO,
"Read_data zlo/zhi for 2d simulation must straddle 0.0");
} else if (triclinic_general == 1) {
if (cvec[0] != 0.0 || cvec[1] != 0.0 || cvec[2] != 1.0 || abc_origin[2] != -0.5)
error->all(FLERR,
error->all(FLERR, Error::ARGZERO,
"Read_data cvec and/or abc_origin is invalid for "
"2d simulation with general triclinic box");
}
@ -619,7 +628,8 @@ void ReadData::command(int narg, char **arg)
if (triclinic_general) {
if (!domain->triclinic_general)
error->all(FLERR, "Read_data subsequent file cannot switch to general triclinic");
error->all(FLERR, Error::ARGZERO,
"Read_data subsequent file cannot switch to general triclinic");
int errflag = 0;
if (avec[0] != domain->avec[0] || avec[1] != domain->avec[1] || avec[2] != domain->avec[2])
errflag = 1;
@ -631,7 +641,8 @@ void ReadData::command(int narg, char **arg)
abc_origin[2] != domain->boxlo[2])
errflag = 1;
if (errflag)
error->all(FLERR, "Read_data subsequent file ABC vectors must be same as first file");
error->all(FLERR, Error::ARGZERO,
"Read_data subsequent file ABC vectors must be same as first file");
if (shift[0] != 0.0 || shift[1] != 0.0 || shift[2] != 0.0)
error->all(FLERR, "Read_data subsequent file with ABC vectors cannot define shift");
@ -640,13 +651,15 @@ void ReadData::command(int narg, char **arg)
} else if (triclinic) {
if (!domain->triclinic || domain->triclinic_general)
error->all(FLERR, "Read_data subsequent file cannot switch to restricted triclinic");
error->all(FLERR, Error::ARGZERO,
"Read_data subsequent file cannot switch to restricted triclinic");
if (xy != domain->xy || xz != domain->xz || yz != domain->yz)
error->all(FLERR, "Read_data subsequent file tilt factors must be same as first file");
} else {
if (domain->triclinic)
error->all(FLERR, "Read_data subsequent file cannot switch to orthogonal");
error->all(FLERR, Error::ARGZERO,
"Read_data subsequent file cannot switch to orthogonal");
}
double oldboxlo[3] = {domain->boxlo[0], domain->boxlo[1], domain->boxlo[2]};
@ -715,7 +728,7 @@ void ReadData::command(int narg, char **arg)
skip_lines(natoms);
} else if (strcmp(keyword, "Velocities") == 0) {
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Velocities");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Velocities");
if (firstpass)
velocities();
else
@ -723,32 +736,35 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "Bonds") == 0) {
topoflag = bondflag = 1;
if (nbonds == 0) error->all(FLERR, "Invalid data file section: Bonds");
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Bonds");
if (nbonds == 0) error->all(FLERR, Error::ARGZERO, "Invalid data file section: Bonds");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Bonds");
bonds(firstpass);
} else if (strcmp(keyword, "Angles") == 0) {
topoflag = angleflag = 1;
if (nangles == 0) error->all(FLERR, "Invalid data file section: Angles");
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Angles");
if (nangles == 0) error->all(FLERR, Error::ARGZERO, "Invalid data file section: Angles");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Angles");
angles(firstpass);
} else if (strcmp(keyword, "Dihedrals") == 0) {
topoflag = dihedralflag = 1;
if (ndihedrals == 0) error->all(FLERR, "Invalid data file section: Dihedrals");
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Dihedrals");
if (ndihedrals == 0)
error->all(FLERR, Error::ARGZERO, "Invalid data file section: Dihedrals");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Dihedrals");
dihedrals(firstpass);
} else if (strcmp(keyword, "Impropers") == 0) {
topoflag = improperflag = 1;
if (nimpropers == 0) error->all(FLERR, "Invalid data file section: Impropers");
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Impropers");
if (nimpropers == 0)
error->all(FLERR, Error::ARGZERO, "Invalid data file section: Impropers");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Impropers");
impropers(firstpass);
} else if (strcmp(keyword, "Ellipsoids") == 0) {
ellipsoidflag = 1;
if (!avec_ellipsoid) error->all(FLERR, "Invalid data file section: Ellipsoids");
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Ellipsoids");
if (!avec_ellipsoid)
error->all(FLERR, Error::ARGZERO, "Invalid data file section: Ellipsoids");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Ellipsoids");
if (firstpass)
bonus(nellipsoids, (AtomVec *) avec_ellipsoid, "ellipsoids");
else
@ -756,8 +772,8 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "Lines") == 0) {
lineflag = 1;
if (!avec_line) error->all(FLERR, "Invalid data file section: Lines");
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Lines");
if (!avec_line) error->all(FLERR, Error::ARGZERO, "Invalid data file section: Lines");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Lines");
if (firstpass)
bonus(nlines, (AtomVec *) avec_line, "lines");
else
@ -765,8 +781,8 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "Triangles") == 0) {
triflag = 1;
if (!avec_tri) error->all(FLERR, "Invalid data file section: Triangles");
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Triangles");
if (!avec_tri) error->all(FLERR, Error::ARGZERO, "Invalid data file section: Triangles");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Triangles");
if (firstpass)
bonus(ntris, (AtomVec *) avec_tri, "triangles");
else
@ -774,8 +790,8 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "Bodies") == 0) {
bodyflag = 1;
if (!avec_body) error->all(FLERR, "Invalid data file section: Bodies");
if (atomflag == 0) error->all(FLERR, "Must read Atoms before Bodies");
if (!avec_body) error->all(FLERR, Error::ARGZERO, "Invalid data file section: Bodies");
if (atomflag == 0) error->all(FLERR, Error::ARGZERO, "Must read Atoms before Bodies");
bodies(firstpass, (AtomVec *) avec_body);
} else if (strcmp(keyword, "Masses") == 0) {
@ -784,7 +800,8 @@ void ReadData::command(int narg, char **arg)
else
skip_lines(ntypes);
} else if (strcmp(keyword, "Pair Coeffs") == 0) {
if (force->pair == nullptr) error->all(FLERR, "Must define pair_style before Pair Coeffs");
if (force->pair == nullptr)
error->all(FLERR, Error::ARGZERO, "Must define pair_style before Pair Coeffs");
if (firstpass) {
if (me == 0 && !style_match(style, force->pair_style))
error->warning(
@ -795,7 +812,7 @@ void ReadData::command(int narg, char **arg)
skip_lines(ntypes);
} else if (strcmp(keyword, "PairIJ Coeffs") == 0) {
if (force->pair == nullptr)
error->all(FLERR, "Must define pair_style before PairIJ Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define pair_style before PairIJ Coeffs");
if (firstpass) {
if (me == 0 && !style_match(style, force->pair_style))
error->warning(
@ -806,8 +823,9 @@ void ReadData::command(int narg, char **arg)
skip_lines(ntypes * (ntypes + 1) / 2);
} else if (strcmp(keyword, "Bond Coeffs") == 0) {
if (atom->avec->bonds_allow == 0)
error->all(FLERR, "Invalid data file section: Bond Coeffs");
if (force->bond == nullptr) error->all(FLERR, "Must define bond_style before Bond Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: Bond Coeffs");
if (force->bond == nullptr)
error->all(FLERR, Error::ARGZERO, "Must define bond_style before Bond Coeffs");
if (firstpass) {
if (me == 0 && !style_match(style, force->bond_style))
error->warning(
@ -818,9 +836,9 @@ void ReadData::command(int narg, char **arg)
skip_lines(nbondtypes);
} else if (strcmp(keyword, "Angle Coeffs") == 0) {
if (atom->avec->angles_allow == 0)
error->all(FLERR, "Invalid data file section: Angle Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: Angle Coeffs");
if (force->angle == nullptr)
error->all(FLERR, "Must define angle_style before Angle Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define angle_style before Angle Coeffs");
if (firstpass) {
if (me == 0 && !style_match(style, force->angle_style))
error->warning(
@ -831,9 +849,9 @@ void ReadData::command(int narg, char **arg)
skip_lines(nangletypes);
} else if (strcmp(keyword, "Dihedral Coeffs") == 0) {
if (atom->avec->dihedrals_allow == 0)
error->all(FLERR, "Invalid data file section: Dihedral Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: Dihedral Coeffs");
if (force->dihedral == nullptr)
error->all(FLERR, "Must define dihedral_style before Dihedral Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define dihedral_style before Dihedral Coeffs");
if (firstpass) {
if (me == 0 && !style_match(style, force->dihedral_style))
error->warning(
@ -845,9 +863,9 @@ void ReadData::command(int narg, char **arg)
skip_lines(ndihedraltypes);
} else if (strcmp(keyword, "Improper Coeffs") == 0) {
if (atom->avec->impropers_allow == 0)
error->all(FLERR, "Invalid data file section: Improper Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: Improper Coeffs");
if (force->improper == nullptr)
error->all(FLERR, "Must define improper_style before Improper Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define improper_style before Improper Coeffs");
if (firstpass) {
if (me == 0 && !style_match(style, force->improper_style))
error->warning(
@ -860,9 +878,9 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "BondBond Coeffs") == 0) {
if (atom->avec->angles_allow == 0)
error->all(FLERR, "Invalid data file section: BondBond Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: BondBond Coeffs");
if (force->angle == nullptr)
error->all(FLERR, "Must define angle_style before BondBond Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define angle_style before BondBond Coeffs");
if (firstpass)
anglecoeffs(1);
else
@ -870,18 +888,18 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "BondAngle Coeffs") == 0) {
if (atom->avec->angles_allow == 0)
error->all(FLERR, "Invalid data file section: BondAngle Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: BondAngle Coeffs");
if (force->angle == nullptr)
error->all(FLERR, "Must define angle_style before BondAngle Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define angle_style before BondAngle Coeffs");
if (firstpass)
anglecoeffs(2);
else
skip_lines(nangletypes);
} else if (strcmp(keyword, "UreyBradley Coeffs") == 0) {
if (atom->avec->angles_allow == 0)
error->all(FLERR, "Invalid data file section: UreyBradley Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: UreyBradley Coeffs");
if (force->angle == nullptr)
error->all(FLERR, "Must define angle_style before UreyBradley Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define angle_style before UreyBradley Coeffs");
if (firstpass)
anglecoeffs(3);
else
@ -889,9 +907,10 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "MiddleBondTorsion Coeffs") == 0) {
if (atom->avec->dihedrals_allow == 0)
error->all(FLERR, "Invalid data file section: MiddleBondTorsion Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: MiddleBondTorsion Coeffs");
if (force->dihedral == nullptr)
error->all(FLERR, "Must define dihedral_style before MiddleBondTorsion Coeffs");
error->all(FLERR, Error::ARGZERO,
"Must define dihedral_style before MiddleBondTorsion Coeffs");
if (firstpass)
dihedralcoeffs(1);
else
@ -899,9 +918,10 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "EndBondTorsion Coeffs") == 0) {
if (atom->avec->dihedrals_allow == 0)
error->all(FLERR, "Invalid data file section: EndBondTorsion Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: EndBondTorsion Coeffs");
if (force->dihedral == nullptr)
error->all(FLERR, "Must define dihedral_style before EndBondTorsion Coeffs");
error->all(FLERR, Error::ARGZERO,
"Must define dihedral_style before EndBondTorsion Coeffs");
if (firstpass)
dihedralcoeffs(2);
else
@ -909,9 +929,10 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "AngleTorsion Coeffs") == 0) {
if (atom->avec->dihedrals_allow == 0)
error->all(FLERR, "Invalid data file section: AngleTorsion Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: AngleTorsion Coeffs");
if (force->dihedral == nullptr)
error->all(FLERR, "Must define dihedral_style before AngleTorsion Coeffs");
error->all(FLERR, Error::ARGZERO,
"Must define dihedral_style before AngleTorsion Coeffs");
if (firstpass)
dihedralcoeffs(3);
else
@ -919,9 +940,10 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "AngleAngleTorsion Coeffs") == 0) {
if (atom->avec->dihedrals_allow == 0)
error->all(FLERR, "Invalid data file section: AngleAngleTorsion Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: AngleAngleTorsion Coeffs");
if (force->dihedral == nullptr)
error->all(FLERR, "Must define dihedral_style before AngleAngleTorsion Coeffs");
error->all(FLERR, Error::ARGZERO,
"Must define dihedral_style before AngleAngleTorsion Coeffs");
if (firstpass)
dihedralcoeffs(4);
else
@ -929,9 +951,9 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "BondBond13 Coeffs") == 0) {
if (atom->avec->dihedrals_allow == 0)
error->all(FLERR, "Invalid data file section: BondBond13 Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: BondBond13 Coeffs");
if (force->dihedral == nullptr)
error->all(FLERR, "Must define dihedral_style before BondBond13 Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define dihedral_style before BondBond13 Coeffs");
if (firstpass)
dihedralcoeffs(5);
else
@ -939,9 +961,9 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "AngleAngle Coeffs") == 0) {
if (atom->avec->impropers_allow == 0)
error->all(FLERR, "Invalid data file section: AngleAngle Coeffs");
error->all(FLERR, Error::ARGZERO, "Invalid data file section: AngleAngle Coeffs");
if (force->improper == nullptr)
error->all(FLERR, "Must define improper_style before AngleAngle Coeffs");
error->all(FLERR, Error::ARGZERO, "Must define improper_style before AngleAngle Coeffs");
if (firstpass)
impropercoeffs(1);
else
@ -949,7 +971,8 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "Atom Type Labels") == 0) {
if (firstpass) {
if (atomflag == 1) error->all(FLERR, "Must read Atom Type Labels before Atoms");
if (atomflag == 1)
error->all(FLERR, Error::ARGZERO, "Must read Atom Type Labels before Atoms");
tlabelflag = 1;
typelabels(Atom::ATOM);
} else
@ -958,7 +981,8 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "Bond Type Labels") == 0) {
if (nbondtypes) {
if (firstpass) {
if (bondflag == 1) error->all(FLERR, "Must read Bond Type Labels before Bonds");
if (bondflag == 1)
error->all(FLERR, Error::ARGZERO, "Must read Bond Type Labels before Bonds");
blabelflag = 1;
typelabels(Atom::BOND);
} else
@ -968,7 +992,8 @@ void ReadData::command(int narg, char **arg)
} else if (strcmp(keyword, "Angle Type Labels") == 0) {
if (nangletypes) {
if (firstpass) {
if (angleflag == 1) error->all(FLERR, "Must read Angle Type Labels before Angles");
if (angleflag == 1)
error->all(FLERR, Error::ARGZERO, "Must read Angle Type Labels before Angles");
alabelflag = 1;
typelabels(Atom::ANGLE);
} else
@ -979,7 +1004,7 @@ void ReadData::command(int narg, char **arg)
if (ndihedraltypes) {
if (firstpass) {
if (dihedralflag == 1)
error->all(FLERR, "Must read Dihedral Type Labels before Dihedrals");
error->all(FLERR, Error::ARGZERO, "Must read Dihedral Type Labels before Dihedrals");
dlabelflag = 1;
typelabels(Atom::DIHEDRAL);
} else
@ -990,7 +1015,7 @@ void ReadData::command(int narg, char **arg)
if (nimpropertypes) {
if (firstpass) {
if (improperflag == 1)
error->all(FLERR, "Must read Improper Type Labels before Impropers");
error->all(FLERR, Error::ARGZERO, "Must read Improper Type Labels before Impropers");
ilabelflag = 1;
typelabels(Atom::IMPROPER);
} else
@ -1013,17 +1038,20 @@ void ReadData::command(int narg, char **arg)
break;
}
if (i == nfix)
error->all(FLERR, "Unknown identifier in data file: {}{}", keyword, utils::errorurl(1));
error->all(FLERR, Error::ARGZERO, "Unknown identifier in data file: {}{}", keyword,
utils::errorurl(1));
} else
error->all(FLERR, "Unknown identifier in data file: {}{}", keyword, utils::errorurl(1));
error->all(FLERR, Error::ARGZERO, "Unknown identifier in data file: {}{}", keyword,
utils::errorurl(1));
parse_keyword(0);
}
// error if natoms > 0 yet no atoms were read
if (natoms > 0 && atomflag == 0) error->all(FLERR, "No valid atoms found in data file");
if (natoms > 0 && atomflag == 0)
error->all(FLERR, Error::ARGZERO, "No valid atoms found in data file");
// close file

View File

@ -902,7 +902,7 @@ int utils::expand_args(const char *file, int line, int narg, char **arg, int mod
// match grids
if (strmatch(word, "^[cf]_\\w+:\\w+:\\w+\\[\\d*\\*\\d*\\]")) {
auto gridid = utils::parse_grid_id(FLERR, word, lmp->error);
auto gridid = utils::parse_grid_id(file, line, word, lmp->error);
size_t first = gridid[2].find('[');
size_t second = gridid[2].find(']', first + 1);
@ -1046,6 +1046,9 @@ int utils::expand_args(const char *file, int line, int narg, char **arg, int mod
if (nhi < MAXSMALLINT) {
nmax = nhi;
expandflag = 1;
} else {
lmp->error->all(file, line, ioffset + iarg,
"Upper bound required to expand vector style variable {}", id);
}
}
}

View File

@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.16)
project(lammps-gui VERSION 1.6.13 LANGUAGES CXX)
project(lammps-gui VERSION 1.6.14 LANGUAGES CXX)
set(CMAKE_AUTOUIC ON)
set(CMAKE_AUTOMOC ON)

View File

@ -55,6 +55,11 @@
</provides>
<releases>
<release version="1.6.14" timestamp="1747828753">
<description>
...
</description>
</release>
<release version="1.6.13" timestamp="1743734509">
<description>
Change working directory to user's home dir if initial directory is "/" or the Application folder
@ -68,7 +73,7 @@
Double-click on highlighted URL opens it in web browser. Also available via context menu.
</description>
</release>
<release version="1.6.12" timestamp="1734890080">
<release version="1.6.12" timestamp="1734890080">
<description>
Update Tutorial menu entries to cover all 8 tutorials
Highlight warnings and error messages in Output window

View File

@ -1,30 +1,23 @@
# Add the contents of this file to either $HOME/.magic
# or /etc/magic so that the file(1) command can recognize
# These definitions are included in file since version 5.41.
# https://www.darwinsys.com/file/ https://github.com/file/file
# If you have an older version of the software, add the
# contents of this file to either $HOME/.magic or
# /etc/magic so that the file(1) command can recognize
# your LAMMPS files and print some useful information.
# Last change: 2021-03-15 by akohlmey@gmail.com
#
# Last change: 2025-05-28 by akohlmey@gmail.com
# Binary restart file for the LAMMPS MD code, https://www.lammps.org
# written on a little endian machine
# Binary restart file for the LAMMPS MD code
0 string LammpS\ RestartT LAMMPS binary restart file
>16 lelong 0x0001
>>20 lelong x (rev %d),
>>>32 string x Version %s,
>>>>&41 string x Units %s,
>>>>>&5 lequad x Timestep %lld,
>>>>>>0x10 lelong 0x0001 Little Endian
# written on a big endian machine
0 string LammpS\ RestartT LAMMPS binary restart file
>16 lelong 0x1000 Big Endian
>>20 belong x (rev %d),
>>>32 string x Version %s,
>>>>&41 string x Units %s,
>>>>>&5 bequad x Timestep %lld,
>>>>>>0x10 lelong 0x1000 Big Endian
>0x14 long x (rev %d),
>>0x20 string x Version %s,
>>>&41 string x Units %s,
>>>>&5 quad x Timestep %lld,
>>>>>0x10 lelong 0x0001 Little Endian
>>>>>0x10 lelong 0x1000 Big Endian
# Atom style binary dump file for the LAMMPS MD code, https://www.lammps.org
# written on a little endian machine
0 lequad -8
>0x08 string DUMPATOM LAMMPS atom style binary dump
@ -39,7 +32,7 @@
>>>0x10 lelong 0x1000 Big Endian,
>>>>0x18 bequad x First time step: %lld
# Atom style binary dump file for the LAMMPS MD code, https://www.lammps.org
# Atom style binary dump file for the LAMMPS MD code
# written on a little endian machine
0 lequad -10
>0x08 string DUMPCUSTOM LAMMPS custom style binary dump
@ -64,7 +57,7 @@
>0x12 string msi2lmp written by msi2lmp
>0x11 string via\ write_data written by LAMMPS
# Data file written by OVITO
# LAMMPS data file written by OVITO
0 string #\ LAMMPS\ data\ file LAMMPS data file
>0x13 string written\ by\ OVITO written by OVITO

View File

@ -2,8 +2,8 @@ BootStrap: docker
From: fedora:41
%setup
curl -L -o musl-gcc-f37.tar.gz https://download.lammps.org/static/musl-gcc-f37.tar.gz
cp musl-gcc-f37.tar.gz ${APPTAINER_ROOTFS}
curl -L -o musl-gcc-f41.tar.gz https://download.lammps.org/static/musl-gcc-f41.tar.gz
cp musl-gcc-f41.tar.gz ${APPTAINER_ROOTFS}
%post
dnf -y update
@ -52,8 +52,8 @@ From: fedora:41
dnf clean all
# install musl-libc Linux-2-Linux cross-compiler
tar -C /usr/ -xvf /musl-gcc-f37.tar.gz
rm -f /musl-gcc-f37.tar.gz
tar -C /usr/ -xvf /musl-gcc-f41.tar.gz
rm -f /musl-gcc-f41.tar.gz
# install NSIS EnVar plugin
curl -L -o EnVar_plugin.zip https://nsis.sourceforge.io/mediawiki/images/7/7f/EnVar_plugin.zip

View File

@ -153,7 +153,7 @@ TEST_F(Advanced_utils, expand_args)
// disable use of input->command and input->arg which point to the last run command right now
lmp->input->command = nullptr;
lmp->input->arg = nullptr;
lmp->input->arg = nullptr;
auto narg = utils::expand_args(FLERR, oarg, args, 0, earg, lmp);
EXPECT_EQ(narg, 16);
@ -214,8 +214,13 @@ TEST_F(Advanced_utils, expand_args)
args[5][7] = '3';
delete[] args[4];
args[4] = utils::strdup("v_temp[2*]");
TEST_FAILURE("ERROR: Upper bound required to expand vector style variable temp.*",
utils::expand_args(FLERR, oarg, args, 0, earg, lmp););
delete[] args[4];
args[4] = utils::strdup("v_temp[*2]");
narg = utils::expand_args(FLERR, oarg, args, 0, earg, lmp);
EXPECT_EQ(narg, 13);
EXPECT_EQ(narg, 14);
EXPECT_STREQ(earg[0], "v_step");
EXPECT_STREQ(earg[1], "c_temp");
EXPECT_STREQ(earg[2], "f_1[1]");
@ -224,11 +229,12 @@ TEST_F(Advanced_utils, expand_args)
EXPECT_STREQ(earg[5], "c_temp[2]");
EXPECT_STREQ(earg[6], "c_temp[3]");
EXPECT_STREQ(earg[7], "c_temp[4]");
EXPECT_STREQ(earg[8], "v_temp[2*]");
EXPECT_STREQ(earg[9], "c_gofr[3*]");
EXPECT_STREQ(earg[10], "c_gofr[1][*]");
EXPECT_STREQ(earg[11], "c_gofr[*2][2]");
EXPECT_STREQ(earg[12], "c_gofr[*][*]");
EXPECT_STREQ(earg[8], "v_temp[1]");
EXPECT_STREQ(earg[9], "v_temp[2]");
EXPECT_STREQ(earg[10], "c_gofr[3*]");
EXPECT_STREQ(earg[11], "c_gofr[1][*]");
EXPECT_STREQ(earg[12], "c_gofr[*2][2]");
EXPECT_STREQ(earg[13], "c_gofr[*][*]");
for (int i = 0; i < narg; ++i)
delete[] earg[i];

View File

@ -1,6 +1,5 @@
---
lammps_version: 4 Feb 2025
tags: generated
date_generated: Fri Feb 21 16:17:55 2025
epsilon: 5e-13
skip_tests: numdiff

View File

@ -1,6 +1,5 @@
---
lammps_version: 19 Nov 2024
tags: generated
date_generated: Fri Jan 10 13:54:41 2025
epsilon: 2.5e-13
skip_tests:

View File

@ -7,7 +7,8 @@ prerequisites: ! |
atom full
bond fene
pre_commands: ! ""
post_commands: ! ""
post_commands: ! |
special_bonds lj/coul 0.0 1.0 1.0
input_file: in.fourmol
bond_style: fene
bond_coeff: ! |

View File

@ -7,7 +7,8 @@ prerequisites: ! |
atom full
bond fene/expand
pre_commands: ! ""
post_commands: ! ""
post_commands: ! |
special_bonds lj/coul 0.0 1.0 1.0
input_file: in.fourmol
bond_style: fene/expand
bond_coeff: ! |

View File

@ -7,7 +7,8 @@ prerequisites: ! |
atom full
bond fene/nm
pre_commands: ! ""
post_commands: ! ""
post_commands: ! |
special_bonds lj/coul 0.0 1.0 1.0
input_file: in.fourmol
bond_style: fene/nm
bond_coeff: ! |

View File

@ -1,6 +1,5 @@
---
lammps_version: 29 Aug 2024
tags: generated
date_generated: Tue Nov 12 08:53:18 2024
epsilon: 2e-13
skip_tests:

View File

@ -1,6 +1,5 @@
---
lammps_version: 29 Aug 2024
tags: generated
date_generated: Tue Nov 12 08:53:32 2024
epsilon: 2e-13
skip_tests:

View File

@ -1,6 +1,5 @@
---
lammps_version: 2 Apr 2025
tags: generated
date_generated: Fri Apr 18 00:29:06 2025
epsilon: 5e-14
skip_tests: