Merge branch 'develop' into pair_d3

This commit is contained in:
Axel Kohlmeyer
2025-01-14 16:05:29 -05:00
1000 changed files with 35345 additions and 27449 deletions

View File

@ -67,7 +67,6 @@ jobs:
-D PKG_MANIFOLD=on \
-D PKG_MDI=on \
-D PKG_MGPT=on \
-D PKG_ML-PACE=on \
-D PKG_ML-RANN=on \
-D PKG_MOLFILE=on \
-D PKG_NETCDF=on \

View File

@ -0,0 +1,53 @@
# GitHub action to build LAMMPS-GUI as a flatpak bundle
name: "Build LAMMPS-GUI as flatpak bundle"
on:
push:
branches:
- develop
workflow_dispatch:
concurrency:
group: ${{ github.event_name }}-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{github.event_name == 'pull_request'}}
jobs:
build:
name: LAMMPS-GUI flatpak build
if: ${{ github.repository == 'lammps/lammps' }}
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Install extra packages
run: |
sudo apt-get update
sudo apt-get install -y ccache \
libeigen3-dev \
libcurl4-openssl-dev \
mold \
ninja-build \
python3-dev \
flatpak \
flatpak-builder
- name: Set up access to flatpak repo
run: flatpak --user remote-add --if-not-exists flathub https://dl.flathub.org/repo/flathub.flatpakrepo
- name: Build flatpak
run: |
mkdir flatpack-state
sed -i -e 's/branch:.*/branch: develop/' tools/lammps-gui/org.lammps.lammps-gui.yml
flatpak-builder --force-clean --verbose --repo=flatpak-repo \
--install-deps-from=flathub --state-dir=flatpak-state \
--user --ccache --default-branch=${{ github.ref_name }} \
flatpak-build tools/lammps-gui/org.lammps.lammps-gui.yml
flatpak build-bundle --runtime-repo=https://flathub.org/repo/flathub.flatpakrepo \
--verbose flatpak-repo LAMMPS-Linux-x86_64-GUI.flatpak \
org.lammps.lammps-gui ${{ github.ref_name }}
flatpak install -y -v --user LAMMPS-Linux-x86_64-GUI.flatpak

View File

@ -3,6 +3,9 @@
# CMake build system
# This file is part of LAMMPS
cmake_minimum_required(VERSION 3.16)
if(CMAKE_VERSION VERSION_LESS 3.20)
message(WARNING "LAMMPS is planning to require at least CMake version 3.20 by Summer 2025. Please upgrade!")
endif()
########################################
# set policy to silence warnings about ignoring <PackageName>_ROOT but use it
if(POLICY CMP0074)
@ -95,26 +98,23 @@ check_for_autogen_files(${LAMMPS_SOURCE_DIR})
#####################################################################
include(CheckIncludeFileCXX)
# set required compiler flags and compiler/CPU arch specific optimizations
# set required compiler flags, apply checks, and compiler/CPU arch specific optimizations
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
# Intel classic compilers version 19 are broken and fail to compile the embedded fmtlib
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 20.0)
message(ERROR "Intel classic compiler version ${CMAKE_CXX_COMPILER_VERSION} is too old")
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Qrestrict")
endif()
if(CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 17.3 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 17.4)
set(CMAKE_TUNE_DEFAULT "/QxCOMMON-AVX512")
else()
set(CMAKE_TUNE_DEFAULT "/QxHost")
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -restrict")
if(CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 17.3 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 17.4)
set(CMAKE_TUNE_DEFAULT "-xCOMMON-AVX512")
else()
set(CMAKE_TUNE_DEFAULT "-xHost -fp-model fast=2 -no-prec-div -qoverride-limits -diag-disable=10441 -diag-disable=11074 -diag-disable=11076 -diag-disable=2196")
endif()
endif()
endif()
# silence excessive warnings for new Intel Compilers
if(CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM")
@ -144,16 +144,28 @@ if((PKG_KOKKOS) AND (Kokkos_ENABLE_CUDA) AND NOT (CMAKE_CXX_COMPILER_ID STREQUAL
set(CMAKE_TUNE_DEFAULT "${CMAKE_TUNE_DEFAULT}" "-Xcudafe --diag_suppress=unrecognized_pragma,--diag_suppress=128")
endif()
# we require C++11 without extensions. Kokkos requires at least C++17 (currently)
# we *require* C++11 without extensions but prefer C++17.
# Kokkos requires at least C++17 (currently)
if(NOT CMAKE_CXX_STANDARD)
if(cxx_std_17 IN_LIST CMAKE_CXX_COMPILE_FEATURES)
set(CMAKE_CXX_STANDARD 17)
else()
set(CMAKE_CXX_STANDARD 11)
endif()
endif()
if(CMAKE_CXX_STANDARD LESS 11)
message(FATAL_ERROR "C++ standard must be set to at least 11")
endif()
if(CMAKE_CXX_STANDARD LESS 17)
message(WARNING "Selecting C++17 standard is preferred over C++${CMAKE_CXX_STANDARD}")
endif()
if(PKG_KOKKOS AND (CMAKE_CXX_STANDARD LESS 17))
set(CMAKE_CXX_STANDARD 17)
endif()
# turn off C++17 check in lmptype.h
if(LAMMPS_CXX11)
add_compile_definitions(LAMMPS_CXX11)
endif()
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF CACHE BOOL "Use compiler extensions")
# ugly hacks for MSVC which by default always reports an old C++ standard in the __cplusplus macro
@ -347,6 +359,17 @@ foreach(PKG ${STANDARD_PACKAGES} ${SUFFIX_PACKAGES})
option(PKG_${PKG} "Build ${PKG} Package" OFF)
endforeach()
set(DEPRECATED_PACKAGES AWPMD ATC POEMS)
foreach(PKG ${DEPRECATED_PACKAGES})
if(PKG_${PKG})
message(WARNING
"The ${PKG} package will be removed from LAMMPS in Summer 2025 due to lack of "
"maintenance and use of code constructs that conflict with modern C++ compilers "
"and standards. Please contact developers@lammps.org if you have any concerns "
"about this step.")
endif()
endforeach()
######################################################
# packages with special compiler needs or external libs
######################################################
@ -579,6 +602,16 @@ foreach(PKG_WITH_INCL KSPACE PYTHON ML-IAP VORONOI COLVARS ML-HDNNP MDI MOLFILE
endif()
endforeach()
# settings for misc packages and styles
if(PKG_MISC)
option(LAMMPS_ASYNC_IMD "Asynchronous IMD processing" OFF)
mark_as_advanced(LAMMPS_ASYNC_IMD)
if(LAMMPS_ASYNC_IMD)
target_compile_definitions(lammps PRIVATE -DLAMMPS_ASYNC_IMD)
message(STATUS "Using IMD in asynchronous mode")
endif()
endif()
# optionally enable building script wrappers using swig
option(WITH_SWIG "Build scripting language wrappers with SWIG" OFF)
if(WITH_SWIG)
@ -1078,12 +1111,15 @@ if(BUILD_TOOLS)
message(STATUS "<<< Building Tools >>>")
endif()
if(BUILD_LAMMPS_GUI)
message(STATUS "<<< Building LAMMPS GUI >>>")
message(STATUS "<<< Building LAMMPS-GUI >>>")
if(LAMMPS_GUI_USE_PLUGIN)
message(STATUS "Loading LAMMPS library as plugin at run time")
else()
message(STATUS "Linking LAMMPS library at compile time")
endif()
if(BUILD_WHAM)
message(STATUS "<<< Building WHAM >>>")
endif()
endif()
if(ENABLE_TESTING)
message(STATUS "<<< Building Unit Tests >>>")

View File

@ -7,26 +7,13 @@ endif()
########################################################################
# consistency checks and Kokkos options/settings required by LAMMPS
if(Kokkos_ENABLE_CUDA)
option(Kokkos_ENABLE_IMPL_CUDA_MALLOC_ASYNC "CUDA asynchronous malloc support" OFF)
mark_as_advanced(Kokkos_ENABLE_IMPL_CUDA_MALLOC_ASYNC)
if(Kokkos_ENABLE_IMPL_CUDA_MALLOC_ASYNC)
message(STATUS "KOKKOS: CUDA malloc async support enabled")
else()
message(STATUS "KOKKOS: CUDA malloc async support disabled")
endif()
endif()
if(Kokkos_ENABLE_HIP)
option(Kokkos_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATIONS "Enable multiple kernel instantiations with HIP" ON)
mark_as_advanced(Kokkos_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATIONS)
option(Kokkos_ENABLE_ROCTHRUST "Use RoCThrust library" ON)
mark_as_advanced(Kokkos_ENABLE_ROCTHRUST)
endif()
if(Kokkos_ARCH_AMD_GFX942 OR Kokkos_ARCH_AMD_GFX940)
option(Kokkos_ENABLE_IMPL_HIP_UNIFIED_MEMORY "Enable unified memory with HIP" ON)
mark_as_advanced(Kokkos_ENABLE_IMPL_HIP_UNIFIED_MEMORY)
endif()
endif()
# Adding OpenMP compiler flags without the checks done for
# BUILD_OMP can result in compile failures. Enforce consistency.
if(Kokkos_ENABLE_OPENMP)
@ -70,8 +57,8 @@ if(DOWNLOAD_KOKKOS)
list(APPEND KOKKOS_LIB_BUILD_ARGS "-DCMAKE_CXX_EXTENSIONS=${CMAKE_CXX_EXTENSIONS}")
list(APPEND KOKKOS_LIB_BUILD_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}")
include(ExternalProject)
set(KOKKOS_URL "https://github.com/kokkos/kokkos/archive/4.4.01.tar.gz" CACHE STRING "URL for KOKKOS tarball")
set(KOKKOS_MD5 "de6ee80d00b6212b02bfb7f1e71a8392" CACHE STRING "MD5 checksum of KOKKOS tarball")
set(KOKKOS_URL "https://github.com/kokkos/kokkos/archive/4.5.01.tar.gz" CACHE STRING "URL for KOKKOS tarball")
set(KOKKOS_MD5 "4d832aa0284169d9e3fbae3165286bc6" CACHE STRING "MD5 checksum of KOKKOS tarball")
mark_as_advanced(KOKKOS_URL)
mark_as_advanced(KOKKOS_MD5)
GetFallbackURL(KOKKOS_URL KOKKOS_FALLBACK)
@ -96,7 +83,7 @@ if(DOWNLOAD_KOKKOS)
add_dependencies(LAMMPS::KOKKOSCORE kokkos_build)
add_dependencies(LAMMPS::KOKKOSCONTAINERS kokkos_build)
elseif(EXTERNAL_KOKKOS)
find_package(Kokkos 4.4.01 REQUIRED CONFIG)
find_package(Kokkos 4.5.01 REQUIRED CONFIG)
target_link_libraries(lammps PRIVATE Kokkos::kokkos)
else()
set(LAMMPS_LIB_KOKKOS_SRC_DIR ${LAMMPS_LIB_SOURCE_DIR}/kokkos)

View File

@ -1,12 +1,17 @@
# PACE library support for ML-PACE package
find_package(pace QUIET)
if(pace_FOUND)
find_package(pace)
target_link_libraries(lammps PRIVATE pace::pace)
else()
# set policy to silence warnings about timestamps of downloaded files. review occasionally if it may be set to NEW
if(POLICY CMP0135)
cmake_policy(SET CMP0135 OLD)
endif()
set(PACELIB_URL "https://github.com/ICAMS/lammps-user-pace/archive/refs/tags/v.2023.11.25.fix.tar.gz" CACHE STRING "URL for PACE evaluator library sources")
set(PACELIB_MD5 "b45de9a633f42ed65422567e3ce56f9f" CACHE STRING "MD5 checksum of PACE evaluator library tarball")
set(PACELIB_URL "https://github.com/ICAMS/lammps-user-pace/archive/refs/tags/v.2023.11.25.fix2.tar.gz" CACHE STRING "URL for PACE evaluator library sources")
set(PACELIB_MD5 "a53bd87cfee8b07d9f44bc17aad69c3f" CACHE STRING "MD5 checksum of PACE evaluator library tarball")
mark_as_advanced(PACELIB_URL)
mark_as_advanced(PACELIB_MD5)
GetFallbackURL(PACELIB_URL PACELIB_FALLBACK)
@ -42,9 +47,16 @@ else()
get_newest_file(${CMAKE_BINARY_DIR}/lammps-user-pace-* lib-pace)
endif()
# some preinstalled yaml-cpp versions don't provide a namespaced target
find_package(yaml-cpp QUIET)
if(TARGET yaml-cpp AND NOT TARGET yaml-cpp::yaml-cpp)
add_library(yaml-cpp::yaml-cpp ALIAS yaml-cpp)
endif()
add_subdirectory(${lib-pace} build-pace)
set_target_properties(pace PROPERTIES CXX_EXTENSIONS ON OUTPUT_NAME lammps_pace${LAMMPS_MACHINE})
if(CMAKE_PROJECT_NAME STREQUAL "lammps")
target_link_libraries(lammps PRIVATE pace)
endif()
endif()

View File

@ -1,3 +1,5 @@
# FindVTK requires that C support is enabled when looking for MPI support
enable_language(C)
find_package(VTK REQUIRED NO_MODULE)
target_compile_definitions(lammps PRIVATE -DLAMMPS_VTK)
if (VTK_MAJOR_VERSION VERSION_LESS 9.0)

View File

@ -1,10 +1,14 @@
Build LAMMPS
============
LAMMPS is built as a library and an executable from source code using
either traditional makefiles for use with GNU make (which may require
manual editing), or using a build environment generated by CMake (Unix
Makefiles, Ninja, Xcode, Visual Studio, KDevelop, CodeBlocks and more).
LAMMPS is built as a library and an executable from source code using a
build environment generated by CMake (Unix Makefiles, Ninja, Xcode,
Visual Studio, KDevelop, CodeBlocks and more depending on the platform).
Using CMake is the preferred way to build LAMMPS. In addition, LAMMPS
can be compiled using the legacy build system based on traditional
makefiles for use with GNU make (which may require manual editing).
Support for the legacy build system is slowly being phased out and may
not be available for all optional features.
As an alternative, you can download a package with pre-built executables
or automated build trees, as described in the :doc:`Install <Install>`

View File

@ -160,7 +160,7 @@ with the OpenMP 3.1 semantics used in LAMMPS for maximal compatibility
with compiler versions in use. If compilation with OpenMP enabled fails
because of your compiler requiring strict OpenMP 4.0 semantics, you can
change the behavior by adding ``-D LAMMPS_OMP_COMPAT=4`` to the
``LMP_INC`` variable in your makefile, or add it to the command line
``LMP_INC`` variable in your makefile, or add it to the command-line flags
while configuring with CMake. LAMMPS will auto-detect a suitable setting
for most GNU, Clang, and Intel compilers.
@ -502,6 +502,8 @@ using CMake or Make.
# chain.x, micelle2d.x, msi2lmp, phana,
# stl_bin2txt
-D BUILD_LAMMPS_GUI=value # yes or no (default). Build LAMMPS-GUI
-D BUILD_WHAM=value # yes (default). Download and build WHAM;
# only available for BUILD_LAMMPS_GUI=yes
The generated binaries will also become part of the LAMMPS installation
(see below).

View File

@ -8,7 +8,7 @@ packages. Links to those pages on the :doc:`Build overview <Build>`
page.
The following text assumes some familiarity with CMake and focuses on
using the command line tool ``cmake`` and what settings are supported
using the command-line tool ``cmake`` and what settings are supported
for building LAMMPS. A more detailed tutorial on how to use CMake
itself, the text mode or graphical user interface, to change the
generated output files for different build tools and development
@ -16,7 +16,7 @@ environments is on a :doc:`separate page <Howto_cmake>`.
.. note::
LAMMPS currently requires that CMake version 3.16 or later is available.
LAMMPS currently requires that CMake version 3.20 or later is available.
.. warning::
@ -32,19 +32,19 @@ environments is on a :doc:`separate page <Howto_cmake>`.
Advantages of using CMake
^^^^^^^^^^^^^^^^^^^^^^^^^
CMake is an alternative to compiling LAMMPS in the traditional way
through :doc:`(manually customized) makefiles <Build_make>`. Using
CMake has multiple advantages that are specifically helpful for
people with limited experience in compiling software or for people
that want to modify or extend LAMMPS.
CMake is the preferred way of compiling LAMMPS in contrast to the legacy
build system based on GNU make and through :doc:`(manually customized)
makefiles <Build_make>`. Using CMake has multiple advantages that are
specifically helpful for people with limited experience in compiling
software or for people that want to modify or extend LAMMPS.
- CMake can detect available hardware, tools, features, and libraries
and adapt the LAMMPS default build configuration accordingly.
- CMake can generate files for different build tools and integrated
development environments (IDE).
- CMake supports customization of settings with a command line, text
- CMake supports customization of settings with a command-line, text
mode, or graphical user interface. No manual editing of files,
knowledge of file formats or complex command line syntax is required.
knowledge of file formats or complex command-line syntax is required.
- All enabled components are compiled in a single build operation.
- Automated dependency tracking for all files and configuration options.
- Support for true out-of-source compilation. Multiple configurations
@ -68,7 +68,7 @@ that purpose you can use either the command-line utility ``cmake`` (or
graphical utility ``cmake-gui``, or use them interchangeably. The
second step is then the compilation and linking of all objects,
libraries, and executables using the selected build tool. Here is a
minimal example using the command line version of CMake to build LAMMPS
minimal example using the command-line version of CMake to build LAMMPS
with no add-on packages enabled and no customization:
.. code-block:: bash
@ -131,7 +131,7 @@ file called ``CMakeLists.txt`` (for LAMMPS it is located in the
configuration step. The cache file contains all current CMake settings.
To modify settings, enable or disable features, you need to set
*variables* with either the ``-D`` command line flag (``-D
*variables* with either the ``-D`` command-line flag (``-D
VARIABLE1_NAME=value``) or change them in the text mode of the graphical
user interface. The ``-D`` flag can be used several times in one command.
@ -141,11 +141,11 @@ a different compiler tool chain. Those are loaded with the ``-C`` flag
(``-C ../cmake/presets/basic.cmake``). This step would only be needed
once, as the settings from the preset files are stored in the
``CMakeCache.txt`` file. It is also possible to customize the build
by adding one or more ``-D`` flags to the CMake command line.
by adding one or more ``-D`` flags to the CMake command.
Generating files for alternate build tools (e.g. Ninja) and project files
for IDEs like Eclipse, CodeBlocks, or Kate can be selected using the ``-G``
command line flag. A list of available generator settings for your
command-line flag. A list of available generator settings for your
specific CMake version is given when running ``cmake --help``.
.. _cmake_multiconfig:

View File

@ -263,9 +263,9 @@ will be skipped if prerequisite features are not available in LAMMPS.
time. Preference is given to parts of the code base that are easy to
test or commonly used.
Tests as shown by the ``ctest`` program are command lines defined in the
Tests as shown by the ``ctest`` program are commands defined in the
``CMakeLists.txt`` files in the ``unittest`` directory tree. A few
tests simply execute LAMMPS with specific command line flags and check
tests simply execute LAMMPS with specific command-line flags and check
the output to the screen for expected content. A large number of unit
tests are special tests programs using the `GoogleTest framework
<https://github.com/google/googletest/>`_ and linked to the LAMMPS
@ -420,7 +420,7 @@ during MD timestepping and manipulate per-atom properties like
positions, velocities, and forces. For those fix styles, testing can be
done in a very similar fashion as for force fields and thus there is a
test program `test_fix_timestep` that shares a lot of code, properties,
and command line flags with the force field style testers described in
and command-line flags with the force field style testers described in
the previous section.
This tester will set up a small molecular system run with verlet run
@ -642,10 +642,10 @@ The following target are available for both, GNU make and CMake:
.. _gh-cli:
GitHub command line interface
GitHub command-line interface
-----------------------------
GitHub has developed a `command line tool <https://cli.github.com>`_
GitHub has developed a `command-line tool <https://cli.github.com>`_
to interact with the GitHub website via a command called ``gh``.
This is extremely convenient when working with a Git repository hosted
on GitHub (like LAMMPS). It is thus highly recommended to install it

View File

@ -48,6 +48,7 @@ This is the list of packages that may require additional steps.
* :ref:`LEPTON <lepton>`
* :ref:`MACHDYN <machdyn>`
* :ref:`MDI <mdi>`
* :ref:`MISC <misc>`
* :ref:`ML-HDNNP <ml-hdnnp>`
* :ref:`ML-IAP <mliap>`
* :ref:`ML-PACE <ml-pace>`
@ -209,7 +210,7 @@ necessary for ``hipcc`` and the linker to work correctly.
Using the CHIP-SPV implementation of HIP is supported. It allows one to
run HIP code on Intel GPUs via the OpenCL or Level Zero back ends. To use
CHIP-SPV, you must set ``-DHIP_USE_DEVICE_SORT=OFF`` in your CMake
command line as CHIP-SPV does not yet support hipCUB. As of Summer 2022,
command-line as CHIP-SPV does not yet support hipCUB. As of Summer 2022,
the use of HIP for Intel GPUs is experimental. You should only use this
option in preparations to run on Aurora system at Argonne.
@ -232,7 +233,7 @@ option in preparations to run on Aurora system at Argonne.
.. code:: bash
# CUDA target (not recommended, use GPU_ARCH=cuda)
# CUDA target (not recommended, use GPU_API=cuda)
# !!! DO NOT set CMAKE_CXX_COMPILER !!!
export HIP_PLATFORM=nvcc
export HIP_PATH=/path/to/HIP/install
@ -421,9 +422,10 @@ minutes to hours) to build. Of course you only need to do that once.)
cmake build system. The ``lib/kim/Install.py`` script supports a
``CMAKE`` environment variable if the cmake executable is named other
than ``cmake`` on your system. Additional environment variables may be
provided on the command line for use by cmake. For example, to use the
``cmake3`` executable and tell it to use the gnu version 11 compilers
to build KIM, one could use the following command line.
set with the ``make`` command for use by cmake. For example, to use the
``cmake3`` executable and tell it to use the GNU version 11 compilers
called ``g++-11``, ``gcc-11`` and ``gfortran-11`` to build KIM, one
could use the following command.
.. code-block:: bash
@ -546,16 +548,7 @@ They must be specified in uppercase.
- Local machine
* - AMDAVX
- HOST
- AMD 64-bit x86 CPU (AVX 1)
* - ZEN
- HOST
- AMD Zen class CPU (AVX 2)
* - ZEN2
- HOST
- AMD Zen2 class CPU (AVX 2)
* - ZEN3
- HOST
- AMD Zen3 class CPU (AVX 2)
- AMD chip
* - ARMV80
- HOST
- ARMv8.0 Compatible CPU
@ -571,105 +564,126 @@ They must be specified in uppercase.
* - A64FX
- HOST
- ARMv8.2 with SVE Support
* - ARMV9_GRACE
- HOST
- ARMv9 NVIDIA Grace CPU
* - SNB
- HOST
- Intel Sandy/Ivy Bridge CPU (AVX 1)
- Intel Sandy/Ivy Bridge CPUs
* - HSW
- HOST
- Intel Haswell CPU (AVX 2)
- Intel Haswell CPUs
* - BDW
- HOST
- Intel Broadwell Xeon E-class CPU (AVX 2 + transactional mem)
* - SKL
- HOST
- Intel Skylake Client CPU
* - SKX
- HOST
- Intel Skylake Xeon Server CPU (AVX512)
- Intel Broadwell Xeon E-class CPUs
* - ICL
- HOST
- Intel Ice Lake Client CPU (AVX512)
- Intel Ice Lake Client CPUs (AVX512)
* - ICX
- HOST
- Intel Ice Lake Xeon Server CPU (AVX512)
* - SPR
- Intel Ice Lake Xeon Server CPUs (AVX512)
* - SKL
- HOST
- Intel Sapphire Rapids Xeon Server CPU (AVX512)
- Intel Skylake Client CPUs
* - SKX
- HOST
- Intel Skylake Xeon Server CPUs (AVX512)
* - KNC
- HOST
- Intel Knights Corner Xeon Phi
* - KNL
- HOST
- Intel Knights Landing Xeon Phi
* - SPR
- HOST
- Intel Sapphire Rapids Xeon Server CPUs (AVX512)
* - POWER8
- HOST
- IBM POWER8 CPU
- IBM POWER8 CPUs
* - POWER9
- HOST
- IBM POWER9 CPU
- IBM POWER9 CPUs
* - ZEN
- HOST
- AMD Zen architecture
* - ZEN2
- HOST
- AMD Zen2 architecture
* - ZEN3
- HOST
- AMD Zen3 architecture
* - RISCV_SG2042
- HOST
- SG2042 (RISC-V) CPU
- SG2042 (RISC-V) CPUs
* - RISCV_RVA22V
- HOST
- RVA22V (RISC-V) CPUs
* - KEPLER30
- GPU
- NVIDIA Kepler generation CC 3.0 GPU
- NVIDIA Kepler generation CC 3.0
* - KEPLER32
- GPU
- NVIDIA Kepler generation CC 3.2 GPU
- NVIDIA Kepler generation CC 3.2
* - KEPLER35
- GPU
- NVIDIA Kepler generation CC 3.5 GPU
- NVIDIA Kepler generation CC 3.5
* - KEPLER37
- GPU
- NVIDIA Kepler generation CC 3.7 GPU
- NVIDIA Kepler generation CC 3.7
* - MAXWELL50
- GPU
- NVIDIA Maxwell generation CC 5.0 GPU
- NVIDIA Maxwell generation CC 5.0
* - MAXWELL52
- GPU
- NVIDIA Maxwell generation CC 5.2 GPU
- NVIDIA Maxwell generation CC 5.2
* - MAXWELL53
- GPU
- NVIDIA Maxwell generation CC 5.3 GPU
- NVIDIA Maxwell generation CC 5.3
* - PASCAL60
- GPU
- NVIDIA Pascal generation CC 6.0 GPU
- NVIDIA Pascal generation CC 6.0
* - PASCAL61
- GPU
- NVIDIA Pascal generation CC 6.1 GPU
- NVIDIA Pascal generation CC 6.1
* - VOLTA70
- GPU
- NVIDIA Volta generation CC 7.0 GPU
- NVIDIA Volta generation CC 7.0
* - VOLTA72
- GPU
- NVIDIA Volta generation CC 7.2 GPU
- NVIDIA Volta generation CC 7.2
* - TURING75
- GPU
- NVIDIA Turing generation CC 7.5 GPU
- NVIDIA Turing generation CC 7.5
* - AMPERE80
- GPU
- NVIDIA Ampere generation CC 8.0 GPU
- NVIDIA Ampere generation CC 8.0
* - AMPERE86
- GPU
- NVIDIA Ampere generation CC 8.6 GPU
- NVIDIA Ampere generation CC 8.6
* - ADA89
- GPU
- NVIDIA Ada Lovelace generation CC 8.9 GPU
- NVIDIA Ada generation CC 8.9
* - HOPPER90
- GPU
- NVIDIA Hopper generation CC 9.0 GPU
- NVIDIA Hopper generation CC 9.0
* - AMD_GFX906
- GPU
- AMD GPU MI50/MI60
- AMD GPU MI50/60
* - AMD_GFX908
- GPU
- AMD GPU MI100
* - AMD_GFX90A
- GPU
- AMD GPU MI200
* - AMD_GFX940
- GPU
- AMD GPU MI300
* - AMD_GFX942
- GPU
- AMD GPU MI300
* - AMD_GFX942_APU
- GPU
- AMD APU MI300A
* - AMD_GFX1030
- GPU
- AMD GPU V620/W6800
@ -678,7 +692,7 @@ They must be specified in uppercase.
- AMD GPU RX7900XTX
* - AMD_GFX1103
- GPU
- AMD Phoenix APU with Radeon 740M/760M/780M/880M/890M
- AMD APU Phoenix
* - INTEL_GEN
- GPU
- SPIR64-based devices, e.g. Intel GPUs, using JIT
@ -701,7 +715,7 @@ They must be specified in uppercase.
- GPU
- Intel GPU Ponte Vecchio
This list was last updated for version 4.3.0 of the Kokkos library.
This list was last updated for version 4.5.1 of the Kokkos library.
.. tabs::
@ -2018,7 +2032,7 @@ TBB and MKL.
.. _mdi:
MDI package
-----------------------------
-----------
.. tabs::
@ -2045,6 +2059,37 @@ MDI package
----------
.. _misc:
MISC package
------------
The :doc:`fix imd <fix_imd>` style in this package can be run either
synchronously (communication with IMD clients is done in the main
process) or asynchronously (the fix spawns a separate thread that can
communicate with IMD clients concurrently to the LAMMPS execution).
.. tabs::
.. tab:: CMake build
.. code-block:: bash
-D LAMMPS_ASYNC_IMD=value # Run IMD server asynchronously
# value = no (default) or yes
.. tab:: Traditional make
To enable asynchronous mode the ``-DLAMMPS_ASYNC_IMD`` define
needs to be added to the ``LMP_INC`` variable in the
``Makefile.machine`` you are using. For example:
.. code-block:: make
LMP_INC = -DLAMMPS_ASYNC_IMD -DLAMMPS_MEMALIGN=64
----------
.. _molfile:
MOLFILE package
@ -2191,7 +2236,7 @@ verified to work in February 2020 with Quantum Espresso versions 6.3 to
from the sources in the *lib* folder (including the essential
libqmmm.a) are not included in the static LAMMPS library and
(currently) not installed, while their code is included in the
shared LAMMPS library. Thus a typical command line to configure
shared LAMMPS library. Thus a typical command to configure
building LAMMPS for QMMM would be:
.. code-block:: bash

View File

@ -8,6 +8,10 @@ Building LAMMPS with traditional makefiles requires that you have a
for customizing your LAMMPS build with a number of global compilation
options and features.
This build system is slowly being phased out and may not support all
optional features and packages in LAMMPS. It is recommended to switch
to the :doc:`CMake based build system <Build_cmake>`.
Requirements
^^^^^^^^^^^^

View File

@ -49,6 +49,7 @@ packages:
* :ref:`LEPTON <lepton>`
* :ref:`MACHDYN <machdyn>`
* :ref:`MDI <mdi>`
* :ref:`MISC <misc>`
* :ref:`ML-HDNNP <ml-hdnnp>`
* :ref:`ML-IAP <mliap>`
* :ref:`ML-PACE <ml-pace>`

View File

@ -100,9 +100,9 @@ procedure.
It is possible to use both the integrated CMake support of the Visual
Studio IDE or use an external CMake installation (e.g. downloaded from
cmake.org) to create build files and compile LAMMPS from the command line.
cmake.org) to create build files and compile LAMMPS from the command-line.
Compilation via command line and unit tests are checked automatically
Compilation via command-line and unit tests are checked automatically
for the LAMMPS development branch through
`GitHub Actions <https://github.com/lammps/lammps/actions/workflows/compile-msvc.yml>`_.
@ -115,7 +115,7 @@ for the LAMMPS development branch through
Please note, that for either approach CMake will create a so-called
:ref:`"multi-configuration" build environment <cmake_multiconfig>`, and
the command lines for building and testing LAMMPS must be adjusted
the commands for building and testing LAMMPS must be adjusted
accordingly.
The LAMMPS cmake folder contains a ``CMakeSettings.json`` file with

View File

@ -4,7 +4,7 @@ LAMMPS Class
The LAMMPS class is encapsulating an MD simulation state and thus it is
the class that needs to be created when starting a new simulation system
state. The LAMMPS executable essentially creates one instance of this
class and passes the command line flags and tells it to process the
class and passes the command-line flags and tells it to process the
provided input (a file or ``stdin``). It shuts the class down when
control is returned to it and then exits. When using LAMMPS as a
library from another code it is required to create an instance of this

View File

@ -90,6 +90,7 @@ OPT.
* :doc:`lepton (o) <angle_lepton>`
* :doc:`mesocnt <angle_mesocnt>`
* :doc:`mm3 <angle_mm3>`
* :doc:`mwlc <angle_mwlc>`
* :doc:`quartic (o) <angle_quartic>`
* :doc:`spica (ko) <angle_spica>`
* :doc:`table (o) <angle_table>`

View File

@ -69,7 +69,7 @@ WARNING message is printed. The :doc:`Errors <Errors>` page gives
more information on what errors mean. The documentation for each
command lists restrictions on how the command can be used.
You can use the :ref:`-skiprun <skiprun>` command line flag
You can use the :ref:`-skiprun <skiprun>` command-line flag
to have LAMMPS skip the execution of any ``run``, ``minimize``, or similar
commands to check the entire input for correct syntax to avoid crashes
on typos or syntax errors in long runs.

View File

@ -1,6 +1,10 @@
Removed commands and packages
=============================
.. contents::
------
This page lists LAMMPS commands and packages that have been removed from
the distribution and provides suggestions for alternatives or
replacements. LAMMPS has special dummy styles implemented, that will
@ -8,47 +12,60 @@ stop LAMMPS and print a suitable error message in most cases, when a
style/command is used that has been removed or will replace the command
with the direct alternative (if available) and print a warning.
restart2data tool
-----------------
LAMMPS shell
------------
.. versionchanged:: 23Nov2013
.. versionchanged:: 29Aug2024
The functionality of the restart2data tool has been folded into the
LAMMPS executable directly instead of having a separate tool. A
combination of the commands :doc:`read_restart <read_restart>` and
:doc:`write_data <write_data>` can be used to the same effect. For
added convenience this conversion can also be triggered by
:doc:`command line flags <Run_options>`
The LAMMPS shell has been removed from the LAMMPS distribution. Users
are encouraged to use the :ref:`LAMMPS-GUI <lammps_gui>` tool instead.
Fix ave/spatial and fix ave/spatial/sphere
------------------------------------------
i-PI tool
---------
.. deprecated:: 11Dec2015
.. versionchanged:: 27Jun2024
The fixes ave/spatial and ave/spatial/sphere have been removed from LAMMPS
since they were superseded by the more general and extensible "chunk
infrastructure". Here the system is partitioned in one of many possible
ways through the :doc:`compute chunk/atom <compute_chunk_atom>` command
and then averaging is done using :doc:`fix ave/chunk <fix_ave_chunk>`.
Please refer to the :doc:`chunk HOWTO <Howto_chunk>` section for an overview.
The i-PI tool has been removed from the LAMMPS distribution. Instead,
instructions to install i-PI from PyPI via pip are provided.
Box command
-----------
USER-REAXC package
------------------
.. deprecated:: 22Dec2022
.. deprecated:: 7Feb2024
The *box* command has been removed and the LAMMPS code changed so it won't
be needed. If present, LAMMPS will ignore the command and print a warning.
The USER-REAXC package has been renamed to :ref:`REAXFF <PKG-REAXFF>`.
In the process also the pair style and related fixes were renamed to use
the "reaxff" string instead of "reax/c". For a while LAMMPS was maintaining
backward compatibility by providing aliases for the styles. These have
been removed, so using "reaxff" is now *required*.
Reset_ids, reset_atom_ids, reset_mol_ids commands
-------------------------------------------------
MPIIO package
-------------
.. deprecated:: 22Dec2022
.. deprecated:: 21Nov2023
The *reset_ids*, *reset_atom_ids*, and *reset_mol_ids* commands have
been folded into the :doc:`reset_atoms <reset_atoms>` command. If
present, LAMMPS will replace the commands accordingly and print a
warning.
The MPIIO package has been removed from LAMMPS since it was unmaintained
for many years and thus not updated to incorporate required changes that
had been applied to the corresponding non-MPIIO commands. As a
consequence the MPIIO commands had become unreliable and sometimes
crashing LAMMPS or corrupting data. Similar functionality is available
through the :ref:`ADIOS package <PKG-ADIOS>` and the :ref:`NETCDF
package <PKG-NETCDF>`. Also, the :doc:`dump_modify nfile or dump_modify
fileper <dump_modify>` keywords may be used for an efficient way of
writing out dump files when running on large numbers of processors.
Similarly, the "nfile" and "fileper" keywords exist for restarts:
see :doc:`restart <restart>`, :doc:`read_restart <read_restart>`,
:doc:`write_restart <write_restart>`.
MSCG package
------------
.. deprecated:: 21Nov2023
The MSCG package has been removed from LAMMPS since it was unmaintained
for many years and instead superseded by the `OpenMSCG software
<https://software.rcc.uchicago.edu/mscg/>`_ of the Voth group at the
University of Chicago, which can be used independent from LAMMPS.
LATTE package
-------------
@ -64,18 +81,6 @@ packages, including LATTE. See the ``examples/QUANTUM`` dir and the
with LATTE as a plugin library (similar to the way fix latte worked), as
well as on a different set of MPI processors.
MEAM package
------------
The MEAM package in Fortran has been replaced by a C++ implementation.
The code in the :ref:`MEAM package <PKG-MEAM>` is a translation of the
Fortran code of MEAM into C++, which removes several restrictions
(e.g. there can be multiple instances in hybrid pair styles) and allows
for some optimizations leading to better performance. The pair style
:doc:`meam <pair_meam>` has the exact same syntax. For a transition
period the C++ version of MEAM was called USER-MEAMC so it could
coexist with the Fortran version.
Minimize style fire/old
-----------------------
@ -97,38 +102,38 @@ The same functionality is available through
:doc:`bond style mesocnt <bond_mesocnt>` and
:doc:`angle style mesocnt <angle_mesocnt>`.
MPIIO package
-------------
Box command
-----------
.. deprecated:: 21Nov2023
.. deprecated:: 22Dec2022
The MPIIO package has been removed from LAMMPS since it was unmaintained
for many years and thus not updated to incorporate required changes that
had been applied to the corresponding non-MPIIO commands. As a
consequence the MPIIO commands had become unreliable and sometimes
crashing LAMMPS or corrupting data. Similar functionality is available
through the :ref:`ADIOS package <PKG-ADIOS>` and the :ref:`NETCDF
package <PKG-NETCDF>`. Also, the :doc:`dump_modify nfile or dump_modify
fileper <dump_modify>` keywords may be used for an efficient way of
writing out dump files when running on large numbers of processors.
Similarly, the "nfile" and "fileper" keywords exist for restarts:
see :doc:`restart <restart>`, :doc:`read_restart <read_restart>`,
:doc:`write_restart <write_restart>`.
The *box* command has been removed and the LAMMPS code changed so it won't
be needed. If present, LAMMPS will ignore the command and print a warning.
Reset_ids, reset_atom_ids, reset_mol_ids commands
-------------------------------------------------
MSCG package
------------
.. deprecated:: 22Dec2022
.. deprecated:: 21Nov2023
The *reset_ids*, *reset_atom_ids*, and *reset_mol_ids* commands have
been folded into the :doc:`reset_atoms <reset_atoms>` command. If
present, LAMMPS will replace the commands accordingly and print a
warning.
The MSCG package has been removed from LAMMPS since it was unmaintained
for many years and instead superseded by the `OpenMSCG software
<https://software.rcc.uchicago.edu/mscg/>`_ of the Voth group at the
University of Chicago, which can be used independent from LAMMPS.
MESSAGE package
---------------
.. deprecated:: 4May2022
The MESSAGE package has been removed since it was superseded by the
:ref:`MDI package <PKG-MDI>`. MDI implements the same functionality
and in a more general way with direct support for more applications.
REAX package
------------
.. deprecated:: 4Jan2019
The REAX package has been removed since it was superseded by the
:ref:`REAXFF package <PKG-REAXFF>`. The REAXFF package has been tested
to yield equivalent results to the REAX package, offers better
@ -138,20 +143,25 @@ syntax compatible with the removed reax pair style, so input files will
have to be adapted. The REAXFF package was originally called
USER-REAXC.
USER-REAXC package
------------------
MEAM package
------------
.. deprecated:: 7Feb2024
.. deprecated:: 4Jan2019
The USER-REAXC package has been renamed to :ref:`REAXFF <PKG-REAXFF>`.
In the process also the pair style and related fixes were renamed to use
the "reaxff" string instead of "reax/c". For a while LAMMPS was maintaining
backward compatibility by providing aliases for the styles. These have
been removed, so using "reaxff" is now *required*.
The MEAM package in Fortran has been replaced by a C++ implementation.
The code in the :ref:`MEAM package <PKG-MEAM>` is a translation of the
Fortran code of MEAM into C++, which removes several restrictions
(e.g. there can be multiple instances in hybrid pair styles) and allows
for some optimizations leading to better performance. The pair style
:doc:`meam <pair_meam>` has the exact same syntax. For a transition
period the C++ version of MEAM was called USER-MEAMC so it could
coexist with the Fortran version.
USER-CUDA package
-----------------
.. deprecated:: 31May2016
The USER-CUDA package had been removed, since it had been unmaintained
for a long time and had known bugs and problems. Significant parts of
the design were transferred to the
@ -160,19 +170,27 @@ performance characteristics on NVIDIA GPUs. Both, the KOKKOS
and the :ref:`GPU package <PKG-GPU>` are maintained
and allow running LAMMPS with GPU acceleration.
i-PI tool
---------
Fix ave/spatial and fix ave/spatial/sphere
------------------------------------------
.. versionchanged:: 27Jun2024
.. deprecated:: 11Dec2015
The i-PI tool has been removed from the LAMMPS distribution. Instead,
instructions to install i-PI from PyPI via pip are provided.
The fixes ave/spatial and ave/spatial/sphere have been removed from LAMMPS
since they were superseded by the more general and extensible "chunk
infrastructure". Here the system is partitioned in one of many possible
ways through the :doc:`compute chunk/atom <compute_chunk_atom>` command
and then averaging is done using :doc:`fix ave/chunk <fix_ave_chunk>`.
Please refer to the :doc:`chunk HOWTO <Howto_chunk>` section for an overview.
LAMMPS shell
------------
restart2data tool
-----------------
.. versionchanged:: 29Aug2024
.. deprecated:: 23Nov2013
The LAMMPS shell has been removed from the LAMMPS distribution. Users
are encouraged to use the :ref:`LAMMPS-GUI <lammps_gui>` tool instead.
The functionality of the restart2data tool has been folded into the
LAMMPS executable directly instead of having a separate tool. A
combination of the commands :doc:`read_restart <read_restart>` and
:doc:`write_data <write_data>` can be used to the same effect. For
added convenience this conversion can also be triggered by
:doc:`command-line flags <Run_options>`

View File

@ -94,12 +94,12 @@ represents what is generally referred to as an "instance of LAMMPS". It
is a composite holding pointers to instances of other core classes
providing the core functionality of the MD engine in LAMMPS and through
them abstractions of the required operations. The constructor of the
LAMMPS class will instantiate those instances, process the command line
LAMMPS class will instantiate those instances, process the command-line
flags, initialize MPI (if not already done) and set up file pointers for
input and output. The destructor will shut everything down and free all
associated memory. Thus code for the standalone LAMMPS executable in
``main.cpp`` simply initializes MPI, instantiates a single instance of
LAMMPS while passing it the command line flags and input script. It
LAMMPS while passing it the command-line flags and input script. It
deletes the LAMMPS instance after the method reading the input returns
and shuts down the MPI environment before it exits the executable.

View File

@ -227,12 +227,12 @@ Tests for the C-style library interface
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Tests for validating the LAMMPS C-style library interface are in the
``unittest/c-library`` folder. They are implemented either to be used
for utility functions or for LAMMPS commands, but use the functions
implemented in the ``src/library.cpp`` file as much as possible. There
may be some overlap with other tests, but only in as much as is required
to test the C-style library API. The tests are distributed over
multiple test programs which try to match the grouping of the
``unittest/c-library`` folder. They text either utility functions or
LAMMPS commands, but use the functions implemented in
``src/library.cpp`` as much as possible. There may be some overlap with
other tests as far as the LAMMPS functionality is concerned, but the
focus is on testing the C-style library API. The tests are distributed
over multiple test programs which try to match the grouping of the
functions in the source code and :ref:`in the manual <lammps_c_api>`.
This group of tests also includes tests invoking LAMMPS in parallel
@ -258,7 +258,7 @@ Tests for the Python module and package
The ``unittest/python`` folder contains primarily tests for classes and
functions in the LAMMPS python module but also for commands in the
PYTHON package. These tests are only enabled if the necessary
PYTHON package. These tests are only enabled, if the necessary
prerequisites are detected or enabled during configuration and
compilation of LAMMPS (shared library build enabled, Python interpreter
found, Python development files found).
@ -272,29 +272,30 @@ Tests for the Fortran interface
Tests for using the Fortran module are in the ``unittest/fortran``
folder. Since they are also using the GoogleTest library, they require
implementing test wrappers in C++ that will call fortran functions
which provide a C function interface through ISO_C_BINDINGS that will in
turn call the functions in the LAMMPS Fortran module.
test wrappers written in C++ that will call fortran functions with a C
function interface through ISO_C_BINDINGS which will in turn call the
functions in the LAMMPS Fortran module.
Tests for the C++-style library interface
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The tests in the ``unittest/cplusplus`` folder are somewhat similar to
the tests for the C-style library interface, but do not need to test the
several convenience and utility functions that are only available through
the C-style interface. Instead it can focus on the more generic features
that are used internally. This part of the unit tests is currently still
mostly in the planning stage.
convenience and utility functions that are only available through the
C-style library interface. Instead they focus on the more generic
features that are used in LAMMPS internally. This part of the unit
tests is currently still mostly in the planning stage.
Tests for reading and writing file formats
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``unittest/formats`` folder contains test programs for reading and
writing files like data files, restart files, potential files or dump files.
This covers simple things like the file i/o convenience functions in the
``utils::`` namespace to complex tests of atom styles where creating and
deleting atoms with different properties is tested in different ways
and through script commands or reading and writing of data or restart files.
writing files like data files, restart files, potential files or dump
files. This covers simple things like the file i/o convenience
functions in the ``utils::`` namespace to complex tests of atom styles
where creating and deleting of atoms with different properties is tested
in different ways and through script commands or reading and writing of
data or restart files.
Tests for styles computing or modifying forces
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -443,7 +444,7 @@ file for a style that is similar to one to be tested. The file name should
follow the naming conventions described above and after copying the file,
the first step is to replace the style names where needed. The coefficient
values do not have to be meaningful, just in a reasonable range for the
given system. It does not matter if some forces are large, as long as
given system. It does not matter if some forces are large, for as long as
they do not diverge.
The template input files define a large number of index variables at the top
@ -476,7 +477,7 @@ the tabulated coulomb, to test both code paths. The reference results in the YA
files then should be compared manually, if they agree well enough within the limits
of those two approximations.
The ``test_pair_style`` and equivalent programs have special command line options
The ``test_pair_style`` and equivalent programs have special command-line options
to update the YAML files. Running a command like
.. code-block:: bash
@ -531,19 +532,20 @@ Python module.
Troubleshooting failed unit tests
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The are by default no unit tests for newly added features (e.g. pair, fix,
or compute styles) unless your pull request also includes tests for the
added features. If you are modifying some features, you may see failures
for existing tests, if your modifications have some unexpected side effects
or your changes render the existing test invalid. If you are adding an
accelerated version of an existing style, then only tests for INTEL,
KOKKOS (with OpenMP only), OPENMP, and OPT will be run automatically.
Tests for the GPU package are time consuming and thus are only run
*after* a merge, or when a special label, ``gpu_unit_tests`` is added
to the pull request. After the test has started, it is often best to
remove the label since every PR activity will re-trigger the test (that
is a limitation of triggering a test with a label). Support for unit
tests when using KOKKOS with GPU acceleration is currently not supported.
There are by default no unit tests for newly added features (e.g. pair,
fix, or compute styles) unless your pull request also includes tests for
these added features. If you are modifying some existing LAMMPS
features, you may see failures for existing tests, if your modifications
have some unexpected side effects or your changes render the existing
test invalid. If you are adding an accelerated version of an existing
style, then only tests for INTEL, KOKKOS (with OpenMP only), OPENMP, and
OPT will be run automatically. Tests for the GPU package are time
consuming and thus are only run *after* a merge, or when a special
label, ``gpu_unit_tests`` is added to the pull request. After the test
has started, it is often best to remove the label since every PR
activity will re-trigger the test (that is a limitation of triggering a
test with a label). Support for unit tests using KOKKOS with GPU
acceleration is currently not supported.
When you see a failed build on GitHub, click on ``Details`` to be taken
to the corresponding LAMMPS Jenkins CI web page. Click on the "Exit"
@ -588,7 +590,7 @@ While the epsilon (relative precision) for a single, `IEEE 754 compliant
<https://en.wikipedia.org/wiki/IEEE_754>`_, double precision floating
point operation is at about 2.2e-16, the achievable precision for the
tests is lower due to most numbers being sums over intermediate results
and the non-associativity of floating point math leading to larger
for which the non-associativity of floating point math leads to larger
errors. As a rule of thumb, the test epsilon can often be in the range
5.0e-14 to 1.0e-13. But for "noisy" force kernels, e.g. those a larger
amount of arithmetic operations involving `exp()`, `log()` or `sin()`
@ -602,14 +604,14 @@ of floating point operations or that some or most intermediate operations
may be done using approximations or with single precision floating point
math.
To rerun the failed unit test individually, change to the ``build`` directory
To rerun a failed unit test individually, change to the ``build`` directory
and run the test with verbose output. For example,
.. code-block:: bash
env TEST_ARGS=-v ctest -R ^MolPairStyle:lj_cut_coul_long -V
``ctest`` with the ``-V`` flag also shows the exact command line
``ctest`` with the ``-V`` flag also shows the exact command
of the test. One can then use ``gdb --args`` to further debug and
catch exceptions with the test command, for example,

View File

@ -310,7 +310,7 @@ the constructor and the destructor.
Pair styles are different from most classes in LAMMPS that define a
"style", as their constructor only uses the LAMMPS class instance
pointer as an argument, but **not** the command line arguments of the
pointer as an argument, but **not** the arguments of the
:doc:`pair_style command <pair_style>`. Instead, those arguments are
processed in the ``Pair::settings()`` function (or rather the version in
the derived class). The constructor is the place where global defaults
@ -891,7 +891,7 @@ originally created from mixing or not).
These data file output functions are only useful for true pair-wise
additive potentials, where the potential parameters can be entered
through *multiple* :doc:`pair_coeff commands <pair_coeff>`. Pair styles
that require a single "pair_coeff \* \*" command line are not compatible
that require a single "pair_coeff \* \*" command are not compatible
with reading their parameters from data files. For pair styles like
*born/gauss* that do support writing to data files, the potential
parameters will be read from the data file, if present, and
@ -1122,7 +1122,7 @@ once. Thus, the ``coeff()`` function has to do three tasks, each of
which is delegated to a function in the ``PairTersoff`` class:
#. map elements to atom types. Those follow the potential file name in the
command line arguments and are processed by the ``map_element2type()`` function.
command arguments and are processed by the ``map_element2type()`` function.
#. read and parse the potential parameter file in the ``read_file()`` function.
#. Build data structures where the original and derived parameters are
indexed by all possible triples of atom types and thus can be looked
@ -1356,8 +1356,8 @@ either 0 or 1.
The ``morseflag`` variable defaults to 0 and is set to 1 in the
``PairAIREBOMorse::settings()`` function which is called by the
:doc:`pair_style <pair_style>` command. This function delegates
all command line processing and setting of other parameters to the
:doc:`pair_style <pair_style>` command. This function delegates all
command argument processing and setting of other parameters to the
``PairAIREBO::settings()`` function of the base class.
.. code-block:: c++

View File

@ -83,7 +83,7 @@ Run LAMMPS from within the debugger
Running LAMMPS under the control of the debugger as shown below only
works for a single MPI rank (for debugging a program running in parallel
you usually need a parallel debugger program). A simple way to launch
GDB is to prefix the LAMMPS command line with ``gdb --args`` and then
GDB is to prefix the LAMMPS command-line with ``gdb --args`` and then
type the command "run" at the GDB prompt. This will launch the
debugger, load the LAMMPS executable and its debug info, and then run
it. When it reaches the code causing the segmentation fault, it will
@ -180,7 +180,7 @@ inspect the behavior of a compiled program by essentially emulating a
CPU and instrumenting the program while running. This slows down
execution quite significantly, but can also report issues that are not
resulting in a crash. The default valgrind tool is a memory checker and
you can use it by prefixing the normal command line with ``valgrind``.
you can use it by prefixing the normal command-line with ``valgrind``.
Unlike GDB, this will also work for parallel execution, but it is
recommended to redirect the valgrind output to a file (e.g. with
``--log-file=crash-%p.txt``, the %p will be substituted with the
@ -235,3 +235,53 @@ from GDB. In addition you get a more specific hint about what cause the
segmentation fault, i.e. that it is a NULL pointer dereference. To find
out which pointer exactly was NULL, you need to use the debugger, though.
Debugging when LAMMPS appears to be stuck
=========================================
Sometimes the LAMMPS calculation appears to be stuck, that is the LAMMPS
process or processes are active, but there is no visible progress. This
can have multiple reasons:
- The selected styles are slow and require a lot of CPU time and the
system is large. When extrapolating the expected speed from smaller
systems, one has to factor in that not all models scale linearly with
system size, e.g. :doc:`kspace styles like ewald or pppm
<kspace_style>`. There is very little that can be done in this case.
- The output interval is not set or set to a large value with the
:doc:`thermo <thermo>` command. I the first case, there will be output
only at the first and last step.
- The output is block-buffered and instead of line-buffered. The output
will only be written to the screen after 4096 or 8192 characters of
output have accumulated. This most often happens for files but also
with MPI parallel executables for output to the screen, since the
output to the screen is handled by the MPI library so that output from
all processes can be shown. This can be suppressed by using the
``-nonblock`` or ``-nb`` command-line flag, which turns off buffering
for screen and logfile output.
- An MPI parallel calculation has a bug where a collective MPI function
is called (e.g. ``MPI_Barrier()``, ``MPI_Bcast()``,
``MPI_Allreduce()`` and so on) before pending point-to-point
communications are completed or when the collective function is only
called from a subset of the MPI processes. This also applies to some
internal LAMMPS functions like ``Error::all()`` which uses
``MPI_Barrier()`` and thus ``Error::one()`` must be called, if the
error condition does not happen on all MPI processes simultaneously.
- Some function in LAMMPS has a bug where a ``for`` or ``while`` loop
does not trigger the exit condition and thus will loop forever. This
can happen when the wrong variable is incremented or when one value in
a comparison becomes ``NaN`` due to an overflow.
In the latter two cases, further information and stack traces (see above)
can be obtain by attaching a debugger to a running process. For that the
process ID (PID) is needed; this can be found on Linux machines with the
``top``, ``htop``, ``ps``, or ``pstree`` commands.
Then running the (GNU) debugger ``gdb`` with the ``-p`` flag followed by
the process id will attach the process to the debugger and stop
execution of that specific process. From there on it is possible to
issue all debugger commands in the same way as when LAMMPS was started
from the debugger (see above). Most importantly it is possible to
obtain a stack trace with the ``where`` command and thus determine where
in the execution of a timestep this process is. Also internal data can
be printed and execution single stepped or continued. When the debugger
is exited, the calculation will resume normally.

View File

@ -7774,7 +7774,7 @@ Doc page with :doc:`WARNING messages <Errors_warnings>`
*Too few values in body section of molecule file*
Self-explanatory.
*Too many -pk arguments in command line*
*Too many -pk arguments in command-line*
The string formed by concatenating the arguments is too long. Use a
package command in the input script instead.

View File

@ -146,6 +146,8 @@ Lowercase directories
+-------------+------------------------------------------------------------------+
| streitz | use of Streitz/Mintmire potential with charge equilibration |
+-------------+------------------------------------------------------------------+
| stress_vcm | removing binned rigid body motion from binned stress profile |
+-------------+------------------------------------------------------------------+
| tad | temperature-accelerated dynamics of vacancy diffusion in bulk Si |
+-------------+------------------------------------------------------------------+
| threebody | regression test input for a variety of manybody potentials |

View File

@ -16,7 +16,7 @@ compiled alongside the code using it from the source code in
``fortran/lammps.f90`` *and* with the same compiler used to build the
rest of the Fortran code that interfaces to LAMMPS. When linking, you
also need to :doc:`link to the LAMMPS library <Build_link>`. A typical
command line for a simple program using the Fortran interface would be:
command for a simple program using the Fortran interface would be:
.. code-block:: bash
@ -91,12 +91,12 @@ function and triggered with the optional logical argument set to
CALL lmp%close(.TRUE.)
END PROGRAM testlib
It is also possible to pass command line flags from Fortran to C/C++ and
It is also possible to pass command-line flags from Fortran to C/C++ and
thus make the resulting executable behave similarly to the standalone
executable (it will ignore the `-in/-i` flag, though). This allows
using the command line to configure accelerator and suffix settings,
using the command-line to configure accelerator and suffix settings,
configure screen and logfile output, or to set index style variables
from the command line and more. Here is a correspondingly adapted
from the command-line and more. Here is a correspondingly adapted
version of the previous example:
.. code-block:: fortran
@ -108,7 +108,7 @@ version of the previous example:
CHARACTER(LEN=128), ALLOCATABLE :: command_args(:)
INTEGER :: i, argc
! copy command line flags to `command_args()`
! copy command-line flags to `command_args()`
argc = COMMAND_ARGUMENT_COUNT()
ALLOCATE(command_args(0:argc))
DO i=0, argc
@ -321,6 +321,8 @@ of the contents of the :f:mod:`LIBLAMMPS` Fortran interface to LAMMPS.
:ftype set_string_variable: subroutine
:f set_internal_variable: :f:subr:`set_internal_variable`
:ftype set_internal_variable: subroutine
:f eval: :f:func:`eval`
:ftype eval: function
:f gather_atoms: :f:subr:`gather_atoms`
:ftype gather_atoms: subroutine
:f gather_atoms_concat: :f:subr:`gather_atoms_concat`
@ -448,7 +450,7 @@ of the contents of the :f:mod:`LIBLAMMPS` Fortran interface to LAMMPS.
compiled with MPI support, it will also initialize MPI, if it has
not already been initialized before.
The *args* argument with the list of command line parameters is
The *args* argument with the list of command-line parameters is
optional and so it the *comm* argument with the MPI communicator.
If *comm* is not provided, ``MPI_COMM_WORLD`` is assumed. For
more details please see the documentation of :cpp:func:`lammps_open`.

View File

@ -103,6 +103,7 @@ Tutorials howto
Howto_github
Howto_lammps_gui
Howto_moltemplate
Howto_python
Howto_pylammps
Howto_wsl

View File

@ -56,7 +56,7 @@ using a shell like Bash or Zsh.
Visual Studio IDE with the bundled CMake or from the Windows command prompt using
a separately installed CMake package, both using the native Microsoft Visual C++
compilers and (optionally) the Microsoft MPI SDK. This tutorial, however, only
covers unix-like command line interfaces.
covers unix-like command-line interfaces.
We also assume that you have downloaded and unpacked a recent LAMMPS source code package
or used Git to create a clone of the LAMMPS sources on your compilation machine.
@ -277,7 +277,7 @@ Setting options
---------------
Options that enable, disable or modify settings are modified by setting
the value of CMake variables. This is done on the command line with the
the value of CMake variables. This is done on the command-line with the
*-D* flag in the format ``-D VARIABLE=value``, e.g. ``-D
CMAKE_BUILD_TYPE=Release`` or ``-D BUILD_MPI=on``. There is one quirk:
when used before the CMake directory, there may be a space between the
@ -376,7 +376,7 @@ Using presets
-------------
Since LAMMPS has a lot of optional features and packages, specifying
them all on the command line can be tedious. Or when selecting a
them all on the command-line can be tedious. Or when selecting a
different compiler toolchain, multiple options have to be changed
consistently and that is rather error prone. Or when enabling certain
packages, they require consistent settings to be operated in a
@ -384,7 +384,7 @@ particular mode. For this purpose, we are providing a selection of
"preset files" for CMake in the folder ``cmake/presets``. They
represent a way to pre-load or override the CMake configuration cache by
setting or changing CMake variables. Preset files are loaded using the
*-C* command line flag. You can combine loading multiple preset files or
*-C* command-line flag. You can combine loading multiple preset files or
change some variables later with additional *-D* flags. A few examples:
.. code-block:: bash

View File

@ -163,7 +163,7 @@ After everything is done, add the files to the branch and commit them:
*git rm*, *git mv* for adding, removing, renaming individual files,
respectively, and then *git commit* to finalize the commit.
Carefully check all pending changes with *git status* before
committing them. If you find doing this on the command line too
committing them. If you find doing this on the command-line too
tedious, consider using a GUI, for example the one included in git
distributions written in Tk, i.e. use *git gui* (on some Linux
distributions it may be required to install an additional package to

View File

@ -20,8 +20,11 @@ to the online LAMMPS documentation for known LAMMPS commands and styles.
(Ubuntu 20.04LTS or later and compatible), macOS (version 11 aka Big
Sur or later), and Windows (version 10 or later) :ref:`are available
<lammps_gui_install>` for download. Non-MPI LAMMPS executables (as
``lmp``) for running LAMMPS from the command line and :doc:`some
``lmp``) for running LAMMPS from the command-line and :doc:`some
LAMMPS tools <Tools>` compiled executables are also included.
Also, the pre-compiled LAMMPS-GUI packages include the WHAM executables
from http://membrane.urmc.rochester.edu/content/wham/ for use with
LAMMPS tutorials.
The source code for LAMMPS-GUI is included in the LAMMPS source code
distribution and can be found in the ``tools/lammps-gui`` folder. It
@ -29,16 +32,16 @@ to the online LAMMPS documentation for known LAMMPS commands and styles.
<Build_cmake>`.
LAMMPS-GUI tries to provide an experience similar to what people
traditionally would have running LAMMPS using a command line window and
traditionally would have running LAMMPS using a command-line window and
the console LAMMPS executable but just rolled into a single executable:
- writing & editing LAMMPS input files with a text editor
- run LAMMPS on those input file with selected command line flags
- run LAMMPS on those input file with selected command-line flags
- extract data from the created files and visualize it with and
external software
That procedure is quite effective for people proficient in using the
command line, as that allows them to use tools for the individual steps
command-line, as that allows them to use tools for the individual steps
that they are most comfortable with. In fact, it is often *required* to
adopt this workflow when running LAMMPS simulations on high-performance
computing facilities.
@ -61,13 +64,18 @@ simple LAMMPS simulations. It is very suitable for tutorials on LAMMPS
since you only need to learn how to use a single program for most tasks
and thus time can be saved and people can focus on learning LAMMPS.
The tutorials at https://lammpstutorials.github.io/ are specifically
updated for use with LAMMPS-GUI.
updated for use with LAMMPS-GUI and can their tutorial materials can
be downloaded and loaded directly from the GUI.
Another design goal is to keep the barrier low when replacing part of
the functionality of LAMMPS-GUI with external tools. That said, LAMMPS-GUI
has some unique functionality that is not found elsewhere:
- auto-adapting to features available in the integrated LAMMPS library
- auto-completion for LAMMPS commands and options
- context-sensitive online help
- start and stop of simulations via mouse or keyboard
- monitoring of simulation progress
- interactive visualization using the :doc:`dump image <dump_image>`
command with the option to copy-paste the resulting settings
- automatic slide show generation from dump image out at runtime
@ -100,10 +108,11 @@ MacOS 11 and later
^^^^^^^^^^^^^^^^^^
After downloading the ``LAMMPS-macOS-multiarch-GUI-<version>.dmg``
installer package, you need to double-click it and then, in the window
that opens, drag the app bundle as indicated into the "Applications"
folder. The follow the instructions in the "README.txt" file to
get access to the other included executables.
application bundle disk image, you need to double-click it and then, in
the window that opens, drag the app bundle as indicated into the
"Applications" folder. Afterwards, the disk image can be unmounted.
Then follow the instructions in the "README.txt" file to get access to
the other included command-line executables.
Linux on x86\_64
^^^^^^^^^^^^^^^^
@ -117,15 +126,25 @@ into the "LAMMPS_GUI" folder and execute "./lammps-gui" directly.
The second variant uses `flatpak <https://www.flatpak.org>`_ and
requires the flatpak management and runtime software to be installed.
After downloading the ``LAMMPS-GUI-Linux-x86_64-GUI-<version>.tar.gz``
After downloading the ``LAMMPS-GUI-Linux-x86_64-GUI-<version>.flatpak``
flatpak bundle, you can install it with ``flatpak install --user
LAMMPS-GUI-Linux-x86_64-GUI-<version>.tar.gz``. After installation,
LAMMPS-GUI-Linux-x86_64-GUI-<version>.flatpak``. After installation,
LAMMPS-GUI should be integrated into your desktop environment under
"Applications > Science" but also can be launched from the console with
``flatpak run org.lammps.lammps-gui``. The flatpak bundle also includes
the console LAMMPS executable ``lmp`` which can be launched to run
simulations with, for example: ``flatpak run --command=lmp
org.lammps.lammps-gui -in in.melt``.
simulations with, for example with:
.. code-block:: sh
flatpak run --command=lmp org.lammps.lammps-gui -in in.melt
Other bundled command-line executables are run the same way and can be
listed with:
.. code-block:: sh
ls $(flatpak info --show-location org.lammps.lammps-gui )/files/bin
Compiling from Source
@ -165,9 +184,9 @@ window is stored when exiting and restored when starting again.
Opening Files
^^^^^^^^^^^^^
The LAMMPS-GUI application can be launched without command line arguments
The LAMMPS-GUI application can be launched without command-line arguments
and then starts with an empty buffer in the *Editor* window. If arguments
are given LAMMPS will use first command line argument as the file name for
are given LAMMPS will use first command-line argument as the file name for
the *Editor* buffer and reads its contents into the buffer, if the file
exists. All further arguments are ignored. Files can also be opened via
the *File* menu, the `Ctrl-O` (`Command-O` on macOS) keyboard shortcut
@ -261,14 +280,21 @@ Output Window
By default, when starting a run, an *Output* window opens that displays
the screen output of the running LAMMPS calculation, as shown below.
This text would normally be seen in the command line window.
This text would normally be seen in the command-line window.
.. image:: JPG/lammps-gui-log.png
:align: center
:scale: 50%
LAMMPS-GUI captures the screen output from LAMMPS as it is generated and
updates the *Output* window regularly during a run.
updates the *Output* window regularly during a run. If there are any
warnings or errors in the LAMMPS output, they are highlighted by using
bold text colored in red. There is a small panel at the bottom center
of the *Output* window showing how many warnings and errors were
detected and how many lines the entire output has. By clicking on the
button on the right with the warning symbol or by using the keyboard
shortcut `Ctrl-N` (`Command-N` on macOS), you can jump to the next
line with a warning or error.
By default, the *Output* window is replaced each time a run is started.
The runs are counted and the run number for the current run is displayed
@ -398,7 +424,7 @@ below.
Like for the *Output* and *Charts* windows, its content is continuously
updated during a run. It will show "(none)" if there are no variables
defined. Note that it is also possible to *set* :doc:`index style
variables <variable>`, that would normally be set via command line
variables <variable>`, that would normally be set via command-line
flags, via the "Set Variables..." dialog from the *Run* menu.
LAMMPS-GUI automatically defines the variable "gui_run" to the current
value of the run counter. That way it is possible to automatically
@ -775,11 +801,11 @@ General Settings:
- *Echo input to log:* when checked, all input commands, including
variable expansions, are echoed to the *Output* window. This is
equivalent to using `-echo screen` at the command line. There is no
equivalent to using `-echo screen` at the command-line. There is no
log *file* produced by default, since LAMMPS-GUI uses `-log none`.
- *Include citation details:* when checked full citation info will be
included to the log window. This is equivalent to using `-cite
screen` on the command line.
screen` on the command-line.
- *Show log window by default:* when checked, the screen output of a
LAMMPS run will be collected in a log window during the run
- *Show chart window by default:* when checked, the thermodynamic
@ -828,7 +854,7 @@ Accelerators:
This tab enables selection of an accelerator package for LAMMPS to use
and is equivalent to using the `-suffix` and `-package` flags on the
command line. Only settings supported by the LAMMPS library and local
command-line. Only settings supported by the LAMMPS library and local
hardware are available. The `Number of threads` field allows setting
the maximum number of threads for the accelerator packages that use
threads.

View File

@ -738,8 +738,8 @@ command.
This can be done, for example, by using the built-in visualizer of the
:doc:`dump image or dump movie <dump_image>` command to create snapshot
images or a movie. Below are example command lines for using dump image
with the :ref:`example listed below <periexample>` and a set of images
images or a movie. Below are example command for using dump image with
the :ref:`example listed below <periexample>` and a set of images
created for steps 300, 600, and 2000 this way.
.. code-block:: LAMMPS

View File

@ -1,564 +1,6 @@
PyLammps Tutorial
=================
.. contents::
Overview
--------
:py:class:`PyLammps <lammps.PyLammps>` is a Python wrapper class for
LAMMPS which can be created on its own or use an existing
:py:class:`lammps Python <lammps.lammps>` object. It creates a simpler,
more "pythonic" interface to common LAMMPS functionality, in contrast to
the :py:class:`lammps <lammps.lammps>` wrapper for the LAMMPS :ref:`C
language library interface API <lammps_c_api>` which is written using
`Python ctypes <ctypes_>`_. The :py:class:`lammps <lammps.lammps>`
wrapper is discussed on the :doc:`Python_head` doc page.
Unlike the flat `ctypes <ctypes_>`_ interface, PyLammps exposes a
discoverable API. It no longer requires knowledge of the underlying C++
code implementation. Finally, the :py:class:`IPyLammps
<lammps.IPyLammps>` wrapper builds on top of :py:class:`PyLammps
<lammps.PyLammps>` and adds some additional features for `IPython
integration <ipython_>`_ into `Jupyter notebooks <jupyter_>`_, e.g. for
embedded visualization output from :doc:`dump style image <dump_image>`.
.. _ctypes: https://docs.python.org/3/library/ctypes.html
.. _ipython: https://ipython.org/
.. _jupyter: https://jupyter.org/
Comparison of lammps and PyLammps interfaces
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
lammps.lammps
"""""""""""""
* uses `ctypes <ctypes_>`_
* direct memory access to native C++ data with optional support for NumPy arrays
* provides functions to send and receive data to LAMMPS
* interface modeled after the LAMMPS :ref:`C language library interface API <lammps_c_api>`
* requires knowledge of how LAMMPS internally works (C pointers, etc)
* full support for running Python with MPI using `mpi4py <https://mpi4py.readthedocs.io>`_
* no overhead from creating a more Python-like interface
lammps.PyLammps
"""""""""""""""
* higher-level abstraction built on *top* of the original :py:class:`ctypes based interface <lammps.lammps>`
* manipulation of Python objects
* communication with LAMMPS is hidden from API user
* shorter, more concise Python
* better IPython integration, designed for quick prototyping
* designed for serial execution
* additional overhead from capturing and parsing the LAMMPS screen output
Quick Start
-----------
System-wide Installation
^^^^^^^^^^^^^^^^^^^^^^^^
Step 1: Building LAMMPS as a shared library
"""""""""""""""""""""""""""""""""""""""""""
To use LAMMPS inside of Python it has to be compiled as shared
library. This library is then loaded by the Python interface. In this
example we enable the MOLECULE package and compile LAMMPS with PNG, JPEG
and FFMPEG output support enabled.
Step 1a: For the CMake based build system, the steps are:
.. code-block:: bash
mkdir $LAMMPS_DIR/build-shared
cd $LAMMPS_DIR/build-shared
# MPI, PNG, Jpeg, FFMPEG are auto-detected
cmake ../cmake -DPKG_MOLECULE=yes -DBUILD_LIB=yes -DBUILD_SHARED_LIBS=yes
make
Step 1b: For the legacy, make based build system, the steps are:
.. code-block:: bash
cd $LAMMPS_DIR/src
# add packages if necessary
make yes-MOLECULE
# compile shared library using Makefile
make mpi mode=shlib LMP_INC="-DLAMMPS_PNG -DLAMMPS_JPEG -DLAMMPS_FFMPEG" JPG_LIB="-lpng -ljpeg"
Step 2: Installing the LAMMPS Python package
""""""""""""""""""""""""""""""""""""""""""""
PyLammps is part of the lammps Python package. To install it simply install
that package into your current Python installation with:
.. code-block:: bash
make install-python
.. note::
Recompiling the shared library requires re-installing the Python package
Installation inside of a virtualenv
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can use virtualenv to create a custom Python environment specifically tuned
for your workflow.
Benefits of using a virtualenv
""""""""""""""""""""""""""""""
* isolation of your system Python installation from your development installation
* installation can happen in your user directory without root access (useful for HPC clusters)
* installing packages through pip allows you to get newer versions of packages than e.g., through apt-get or yum package managers (and without root access)
* you can even install specific old versions of a package if necessary
**Prerequisite (e.g. on Ubuntu)**
.. code-block:: bash
apt-get install python-virtualenv
Creating a virtualenv with lammps installed
"""""""""""""""""""""""""""""""""""""""""""
.. code-block:: bash
# create virtualenv named 'testing'
virtualenv $HOME/python/testing
# activate 'testing' environment
source $HOME/python/testing/bin/activate
Now configure and compile the LAMMPS shared library as outlined above.
When using CMake and the shared library has already been build, you
need to re-run CMake to update the location of the python executable
to the location in the virtual environment with:
.. code-block:: bash
cmake . -DPython_EXECUTABLE=$(which python)
# install LAMMPS package in virtualenv
(testing) make install-python
# install other useful packages
(testing) pip install matplotlib jupyter mpi4py
...
# return to original shell
(testing) deactivate
Creating a new instance of PyLammps
-----------------------------------
To create a PyLammps object you need to first import the class from the lammps
module. By using the default constructor, a new *lammps* instance is created.
.. code-block:: python
from lammps import PyLammps
L = PyLammps()
You can also initialize PyLammps on top of this existing *lammps* object:
.. code-block:: python
from lammps import lammps, PyLammps
lmp = lammps()
L = PyLammps(ptr=lmp)
Commands
--------
Sending a LAMMPS command with the existing library interfaces is done using
the command method of the lammps object instance.
For instance, let's take the following LAMMPS command:
.. code-block:: LAMMPS
region box block 0 10 0 5 -0.5 0.5
In the original interface this command can be executed with the following
Python code if *L* was a lammps instance:
.. code-block:: python
L.command("region box block 0 10 0 5 -0.5 0.5")
With the PyLammps interface, any command can be split up into arbitrary parts
separated by white-space, passed as individual arguments to a region method.
.. code-block:: python
L.region("box block", 0, 10, 0, 5, -0.5, 0.5)
Note that each parameter is set as Python literal floating-point number. In the
PyLammps interface, each command takes an arbitrary parameter list and transparently
merges it to a single command string, separating individual parameters by white-space.
The benefit of this approach is avoiding redundant command calls and easier
parameterization. In the original interface parameterization needed to be done
manually by creating formatted strings.
.. code-block:: python
L.command("region box block %f %f %f %f %f %f" % (xlo, xhi, ylo, yhi, zlo, zhi))
In contrast, methods of PyLammps accept parameters directly and will convert
them automatically to a final command string.
.. code-block:: python
L.region("box block", xlo, xhi, ylo, yhi, zlo, zhi)
System state
------------
In addition to dispatching commands directly through the PyLammps object, it
also provides several properties which allow you to query the system state.
L.system
Is a dictionary describing the system such as the bounding box or number of atoms
L.system.xlo, L.system.xhi
bounding box limits along x-axis
L.system.ylo, L.system.yhi
bounding box limits along y-axis
L.system.zlo, L.system.zhi
bounding box limits along z-axis
L.communication
configuration of communication subsystem, such as the number of threads or processors
L.communication.nthreads
number of threads used by each LAMMPS process
L.communication.nprocs
number of MPI processes used by LAMMPS
L.fixes
List of fixes in the current system
L.computes
List of active computes in the current system
L.dump
List of active dumps in the current system
L.groups
List of groups present in the current system
Working with LAMMPS variables
-----------------------------
LAMMPS variables can be both defined and accessed via the PyLammps interface.
To define a variable you can use the :doc:`variable <variable>` command:
.. code-block:: python
L.variable("a index 2")
A dictionary of all variables is returned by L.variables
you can access an individual variable by retrieving a variable object from the
L.variables dictionary by name
.. code-block:: python
a = L.variables['a']
The variable value can then be easily read and written by accessing the value
property of this object.
.. code-block:: python
print(a.value)
a.value = 4
Retrieving the value of an arbitrary LAMMPS expressions
-------------------------------------------------------
LAMMPS expressions can be immediately evaluated by using the eval method. The
passed string parameter can be any expression containing global thermo values,
variables, compute or fix data.
.. code-block:: python
result = L.eval("ke") # kinetic energy
result = L.eval("pe") # potential energy
result = L.eval("v_t/2.0")
Accessing atom data
-------------------
All atoms in the current simulation can be accessed by using the L.atoms list.
Each element of this list is an object which exposes its properties (id, type,
position, velocity, force, etc.).
.. code-block:: python
# access first atom
L.atoms[0].id
L.atoms[0].type
# access second atom
L.atoms[1].position
L.atoms[1].velocity
L.atoms[1].force
Some properties can also be used to set:
.. code-block:: python
# set position in 2D simulation
L.atoms[0].position = (1.0, 0.0)
# set position in 3D simulation
L.atoms[0].position = (1.0, 0.0, 1.)
Evaluating thermo data
----------------------
Each simulation run usually produces thermo output based on system state,
computes, fixes or variables. The trajectories of these values can be queried
after a run via the L.runs list. This list contains a growing list of run data.
The first element is the output of the first run, the second element that of
the second run.
.. code-block:: python
L.run(1000)
L.runs[0] # data of first 1000 time steps
L.run(1000)
L.runs[1] # data of second 1000 time steps
Each run contains a dictionary of all trajectories. Each trajectory is
accessible through its thermo name:
.. code-block:: python
L.runs[0].thermo.Step # list of time steps in first run
L.runs[0].thermo.Ke # list of kinetic energy values in first run
Together with matplotlib plotting data out of LAMMPS becomes simple:
.. code-block:: python
import matplotlib.plot as plt
steps = L.runs[0].thermo.Step
ke = L.runs[0].thermo.Ke
plt.plot(steps, ke)
Error handling with PyLammps
----------------------------
Using C++ exceptions in LAMMPS for errors allows capturing them on the
C++ side and rethrowing them on the Python side. This way you can handle
LAMMPS errors through the Python exception handling mechanism.
.. warning::
Capturing a LAMMPS exception in Python can still mean that the
current LAMMPS process is in an illegal state and must be
terminated. It is advised to save your data and terminate the Python
instance as quickly as possible.
Using PyLammps in IPython notebooks and Jupyter
-----------------------------------------------
If the LAMMPS Python package is installed for the same Python interpreter as
IPython, you can use PyLammps directly inside of an IPython notebook inside of
Jupyter. Jupyter is a powerful integrated development environment (IDE) for
many dynamic languages like Python, Julia and others, which operates inside of
any web browser. Besides auto-completion and syntax highlighting it allows you
to create formatted documents using Markup, mathematical formulas, graphics and
animations intermixed with executable Python code. It is a great format for
tutorials and showcasing your latest research.
To launch an instance of Jupyter simply run the following command inside your
Python environment (this assumes you followed the Quick Start instructions):
.. code-block:: bash
jupyter notebook
IPyLammps Examples
------------------
Examples of IPython notebooks can be found in the python/examples/pylammps
subdirectory. To open these notebooks launch *jupyter notebook* inside this
directory and navigate to one of them. If you compiled and installed
a LAMMPS shared library with exceptions, PNG, JPEG and FFMPEG support
you should be able to rerun all of these notebooks.
Validating a dihedral potential
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example showcases how an IPython Notebook can be used to compare a simple
LAMMPS simulation of a harmonic dihedral potential to its analytical solution.
Four atoms are placed in the simulation and the dihedral potential is applied on
them using a datafile. Then one of the atoms is rotated along the central axis by
setting its position from Python, which changes the dihedral angle.
.. code-block:: python
phi = [d \* math.pi / 180 for d in range(360)]
pos = [(1.0, math.cos(p), math.sin(p)) for p in phi]
pe = []
for p in pos:
L.atoms[3].position = p
L.run(0)
pe.append(L.eval("pe"))
By evaluating the potential energy for each position we can verify that
trajectory with the analytical formula. To compare both solutions, we plot
both trajectories over each other using matplotlib, which embeds the generated
plot inside the IPython notebook.
.. image:: JPG/pylammps_dihedral.jpg
:align: center
Running a Monte Carlo relaxation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This second example shows how to use PyLammps to create a 2D Monte Carlo Relaxation
simulation, computing and plotting energy terms and even embedding video output.
Initially, a 2D system is created in a state with minimal energy.
.. image:: JPG/pylammps_mc_minimum.jpg
:align: center
It is then disordered by moving each atom by a random delta.
.. code-block:: python
random.seed(27848)
deltaperturb = 0.2
for i in range(L.system.natoms):
x, y = L.atoms[i].position
dx = deltaperturb \* random.uniform(-1, 1)
dy = deltaperturb \* random.uniform(-1, 1)
L.atoms[i].position = (x+dx, y+dy)
L.run(0)
.. image:: JPG/pylammps_mc_disordered.jpg
:align: center
Finally, the Monte Carlo algorithm is implemented in Python. It continuously
moves random atoms by a random delta and only accepts certain moves.
.. code-block:: python
estart = L.eval("pe")
elast = estart
naccept = 0
energies = [estart]
niterations = 3000
deltamove = 0.1
kT = 0.05
natoms = L.system.natoms
for i in range(niterations):
iatom = random.randrange(0, natoms)
current_atom = L.atoms[iatom]
x0, y0 = current_atom.position
dx = deltamove \* random.uniform(-1, 1)
dy = deltamove \* random.uniform(-1, 1)
current_atom.position = (x0+dx, y0+dy)
L.run(1, "pre no post no")
e = L.eval("pe")
energies.append(e)
if e <= elast:
naccept += 1
elast = e
elif random.random() <= math.exp(natoms\*(elast-e)/kT):
naccept += 1
elast = e
else:
current_atom.position = (x0, y0)
The energies of each iteration are collected in a Python list and finally plotted using matplotlib.
.. image:: JPG/pylammps_mc_energies_plot.jpg
:align: center
The IPython notebook also shows how to use dump commands and embed video files
inside of the IPython notebook.
Using PyLammps and mpi4py (Experimental)
----------------------------------------
PyLammps can be run in parallel using `mpi4py
<https://mpi4py.readthedocs.io>`_. This python package can be installed
using
.. code-block:: bash
pip install mpi4py
.. warning::
Usually, any :py:class:`PyLammps <lammps.PyLammps>` command must be
executed by *all* MPI processes. However, evaluations and querying
the system state is only available on MPI rank 0. Using these
functions from other MPI ranks will raise an exception.
The following is a short example which reads in an existing LAMMPS input
file and executes it in parallel. You can find in.melt in the
examples/melt folder. Please take note that the
:py:meth:`PyLammps.eval() <lammps.PyLammps.eval>` is called only from
MPI rank 0.
.. code-block:: python
from mpi4py import MPI
from lammps import PyLammps
L = PyLammps()
L.file("in.melt")
if MPI.COMM_WORLD.rank == 0:
print("Potential energy: ", L.eval("pe"))
MPI.Finalize()
To run this script (melt.py) in parallel using 4 MPI processes we invoke the
following mpirun command:
.. code-block:: bash
mpirun -np 4 python melt.py
Feedback and Contributing
-------------------------
If you find this Python interface useful, please feel free to provide feedback
and ideas on how to improve it to Richard Berger (richard.berger@outlook.com). We also
want to encourage people to write tutorial style IPython notebooks showcasing LAMMPS usage
and maybe their latest research results.
The PyLammps interface is deprecated and will be removed in a future release of
LAMMPS. As such, the PyLammps version of this tutorial has been removed and is
replaced by the :doc:`Python_head`.

441
doc/src/Howto_python.rst Normal file
View File

@ -0,0 +1,441 @@
LAMMPS Python Tutorial
======================
.. contents::
-----
Overview
--------
The :py:class:`lammps <lammps.lammps>` Python module is a wrapper class for the
LAMMPS :ref:`C language library interface API <lammps_c_api>` which is written using
`Python ctypes <ctypes_>`_. The design choice of this wrapper class is to
follow the C language API closely with only small changes related to Python
specific requirements and to better accommodate object oriented programming.
In addition to this flat `ctypes <ctypes_>`_ interface, the
:py:class:`lammps <lammps.lammps>` wrapper class exposes a discoverable
API that doesn't require as much knowledge of the underlying C language
library interface or LAMMPS C++ code implementation.
Finally, the API exposes some additional features for `IPython integration
<ipython_>`_ into `Jupyter notebooks <jupyter_>`_, e.g. for embedded
visualization output from :doc:`dump style image <dump_image>`.
.. _ctypes: https://docs.python.org/3/library/ctypes.html
.. _ipython: https://ipython.org/
.. _jupyter: https://jupyter.org/
-----
Quick Start
-----------
System-wide or User Installation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Step 1: Building LAMMPS as a shared library
"""""""""""""""""""""""""""""""""""""""""""
To use LAMMPS inside of Python it has to be compiled as shared library.
This library is then loaded by the Python interface. In this example we
enable the :ref:`MOLECULE package <PKG-MOLECULE>` and compile LAMMPS
with :ref:`PNG, JPEG and FFMPEG output support <graphics>` enabled.
.. tabs::
.. tab:: CMake build
.. code-block:: bash
mkdir $LAMMPS_DIR/build-shared
cd $LAMMPS_DIR/build-shared
# MPI, PNG, Jpeg, FFMPEG are auto-detected
cmake ../cmake -DPKG_MOLECULE=yes -DPKG_PYTHON=on -DBUILD_SHARED_LIBS=yes
make
.. tab:: Traditional make
.. code-block:: bash
cd $LAMMPS_DIR/src
# add packages if necessary
make yes-MOLECULE
make yes-PYTHON
# compile shared library using Makefile
make mpi mode=shlib LMP_INC="-DLAMMPS_PNG -DLAMMPS_JPEG -DLAMMPS_FFMPEG" JPG_LIB="-lpng -ljpeg"
Step 2: Installing the LAMMPS Python package
""""""""""""""""""""""""""""""""""""""""""""
Next install the LAMMPS Python package into your current Python installation with:
.. code-block:: bash
make install-python
This will create a so-called `"wheel"
<https://packaging.python.org/en/latest/discussions/package-formats/#what-is-a-wheel>`_
and then install the LAMMPS Python module from that "wheel" into either
into a system folder (provided the command is executed with root
privileges) or into your personal Python module folder.
.. note::
Recompiling the shared library requires re-installing the Python
package.
Installation inside of a virtual environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can use virtual environments to create a custom Python environment
specifically tuned for your workflow.
Benefits of using a virtualenv
""""""""""""""""""""""""""""""
* isolation of your system Python installation from your development installation
* installation can happen in your user directory without root access (useful for HPC clusters)
* installing packages through pip allows you to get newer versions of packages than e.g., through apt-get or yum package managers (and without root access)
* you can even install specific old versions of a package if necessary
**Prerequisite (e.g. on Ubuntu)**
.. code-block:: bash
apt-get install python-venv
Creating a virtualenv with lammps installed
"""""""""""""""""""""""""""""""""""""""""""
.. code-block:: bash
# create virtual envrionment named 'testing'
python3 -m venv $HOME/python/testing
# activate 'testing' environment
source $HOME/python/testing/bin/activate
Now configure and compile the LAMMPS shared library as outlined above.
When using CMake and the shared library has already been build, you
need to re-run CMake to update the location of the python executable
to the location in the virtual environment with:
.. code-block:: bash
cmake . -DPython_EXECUTABLE=$(which python)
# install LAMMPS package in virtualenv
(testing) make install-python
# install other useful packages
(testing) pip install matplotlib jupyter mpi4py pandas
...
# return to original shell
(testing) deactivate
-------
Creating a new lammps instance
------------------------------
To create a lammps object you need to first import the class from the lammps
module. By using the default constructor, a new :py:class:`lammps
<lammps.lammps>` instance is created.
.. code-block:: python
from lammps import lammps
L = lammps()
See the :doc:`LAMMPS Python documentation <Python_create>` for how to customize
the instance creation with optional arguments.
-----
Commands
--------
Sending a LAMMPS command with the library interface is done using
the ``command`` method of the lammps object.
For instance, let's take the following LAMMPS command:
.. code-block:: LAMMPS
region box block 0 10 0 5 -0.5 0.5
This command can be executed with the following Python code if ``L`` is a ``lammps``
instance:
.. code-block:: python
L.command("region box block 0 10 0 5 -0.5 0.5")
For convenience, the ``lammps`` class also provides a command wrapper ``cmd``
that turns any LAMMPS command into a regular function call:
.. code-block:: python
L.cmd.region("box block", 0, 10, 0, 5, -0.5, 0.5)
Note that each parameter is set as Python number literal. With
the wrapper each command takes an arbitrary parameter list and transparently
merges it to a single command string, separating individual parameters by
white-space.
The benefit of this approach is avoiding redundant command calls and easier
parameterization. With the ``command`` function each call needs to be assembled
manually using formatted strings.
.. code-block:: python
L.command(f"region box block {xlo} {xhi} {ylo} {yhi} {zlo} {zhi}")
The wrapper accepts parameters directly and will convert
them automatically to a final command string.
.. code-block:: python
L.cmd.region("box block", xlo, xhi, ylo, yhi, zlo, zhi)
.. note::
When running in IPython you can use Tab-completion after ``L.cmd.`` to see
all available LAMMPS commands.
-----
Accessing atom data
-------------------
All per-atom properties that are part of the :doc:`atom style
<atom_style>` in the current simulation can be accessed using the
:py:meth:`extract_atoms() <lammps.lammps.extract_atoms()>` method. This
can be retrieved as ctypes objects or as NumPy arrays through the
lammps.numpy module. Those represent the *local* atoms of the
individual sub-domain for the current MPI process and may contain
information for the local ghost atoms or not depending on the property.
Both can be accessed as lists, but for the ctypes list object the size
is not known and hast to be retrieved first to avoid out-of-bounds
accesses.
.. code-block:: python
nlocal = L.extract_setting("nlocal")
nall = L.extract_setting("nall")
print("Number of local atoms ", nlocal, " Number of local and ghost atoms ", nall);
# access via ctypes directly
atom_id = L.extract_atom("id")
print("Atom IDs", atom_id[0:nlocal])
# access through numpy wrapper
atom_type = L.numpy.extract_atom("type")
print("Atom types", atom_type)
x = L.numpy.extract_atom("x")
v = L.numpy.extract_atom("v")
print("positions array shape", x.shape)
print("velocity array shape", v.shape)
# turn on communicating velocities to ghost atoms
L.cmd.comm_modify("vel", "yes")
v = L.numpy.extract_atom('v')
print("velocity array shape", v.shape)
Some properties can also be set from Python since internally the
data of the C++ code is accessed directly:
.. code-block:: python
# set position in 2D simulation
x[0] = (1.0, 0.0)
# set position in 3D simulation
x[0] = (1.0, 0.0, 1.)
------
Retrieving the values of thermodynamic data and variables
---------------------------------------------------------
To access thermodynamic data from the last completed timestep,
you can use the :py:meth:`get_thermo() <lammps.lammps.get_thermo>`
method, and to extract the value of (compatible) variables, you
can use the :py:meth:`extract_variable() <lammps.lammps.extract_variable>`
method.
.. code-block:: python
result = L.get_thermo("ke") # kinetic energy
result = L.get_thermo("pe") # potential energy
result = L.extract_variable("t") / 2.0
Error handling
--------------
We are using C++ exceptions in LAMMPS for errors and the C language
library interface captures and records them. This allows checking
whether errors have happened in Python during a call into LAMMPS and
then re-throw the error as a Python exception. This way you can handle
LAMMPS errors in the conventional way through the Python exception
handling mechanism.
.. warning::
Capturing a LAMMPS exception in Python can still mean that the
current LAMMPS process is in an illegal state and must be
terminated. It is advised to save your data and terminate the Python
instance as quickly as possible.
Using LAMMPS in IPython notebooks and Jupyter
---------------------------------------------
If the LAMMPS Python package is installed for the same Python
interpreter as IPython, you can use LAMMPS directly inside of an IPython
notebook inside of Jupyter. Jupyter is a powerful integrated development
environment (IDE) for many dynamic languages like Python, Julia and
others, which operates inside of any web browser. Besides
auto-completion and syntax highlighting it allows you to create
formatted documents using Markup, mathematical formulas, graphics and
animations intermixed with executable Python code. It is a great format
for tutorials and showcasing your latest research.
To launch an instance of Jupyter simply run the following command inside your
Python environment (this assumes you followed the Quick Start instructions):
.. code-block:: bash
jupyter notebook
Interactive Python Examples
---------------------------
Examples of IPython notebooks can be found in the ``python/examples/ipython``
subdirectory. To open these notebooks launch ``jupyter notebook`` inside this
directory and navigate to one of them. If you compiled and installed
a LAMMPS shared library with PNG, JPEG and FFMPEG support
you should be able to rerun all of these notebooks.
Validating a dihedral potential
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example showcases how an IPython Notebook can be used to compare a simple
LAMMPS simulation of a harmonic dihedral potential to its analytical solution.
Four atoms are placed in the simulation and the dihedral potential is applied on
them using a datafile. Then one of the atoms is rotated along the central axis by
setting its position from Python, which changes the dihedral angle.
.. code-block:: python
phi = [d \* math.pi / 180 for d in range(360)]
pos = [(1.0, math.cos(p), math.sin(p)) for p in phi]
x = L.numpy.extract_atom("x")
pe = []
for p in pos:
x[3] = p
L.cmd.run(0, "post", "no")
pe.append(L.get_thermo("pe"))
By evaluating the potential energy for each position we can verify that
trajectory with the analytical formula. To compare both solutions, we plot
both trajectories over each other using matplotlib, which embeds the generated
plot inside the IPython notebook.
.. image:: JPG/pylammps_dihedral.jpg
:align: center
Running a Monte Carlo relaxation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This second example shows how to use the `lammps` Python interface to create a
2D Monte Carlo Relaxation simulation, computing and plotting energy terms and
even embedding video output.
Initially, a 2D system is created in a state with minimal energy.
.. image:: JPG/pylammps_mc_minimum.jpg
:align: center
It is then disordered by moving each atom by a random delta.
.. code-block:: python
random.seed(27848)
deltaperturb = 0.2
x = L.numpy.extract_atom("x")
natoms = x.shape[0]
for i in range(natoms):
dx = deltaperturb \* random.uniform(-1, 1)
dy = deltaperturb \* random.uniform(-1, 1)
x[i][0] += dx
x[i][1] += dy
L.cmd.run(0, "post", "no")
.. image:: JPG/pylammps_mc_disordered.jpg
:align: center
Finally, the Monte Carlo algorithm is implemented in Python. It continuously
moves random atoms by a random delta and only accepts certain moves.
.. code-block:: python
estart = L.get_thermo("pe")
elast = estart
naccept = 0
energies = [estart]
niterations = 3000
deltamove = 0.1
kT = 0.05
for i in range(niterations):
x = L.numpy.extract_atom("x")
natoms = x.shape[0]
iatom = random.randrange(0, natoms)
current_atom = x[iatom]
x0 = current_atom[0]
y0 = current_atom[1]
dx = deltamove \* random.uniform(-1, 1)
dy = deltamove \* random.uniform(-1, 1)
current_atom[0] = x0 + dx
current_atom[1] = y0 + dy
L.cmd.run(1, "pre no post no")
e = L.get_thermo("pe")
energies.append(e)
if e <= elast:
naccept += 1
elast = e
elif random.random() <= math.exp(natoms\*(elast-e)/kT):
naccept += 1
elast = e
else:
current_atom[0] = x0
current_atom[1] = y0
The energies of each iteration are collected in a Python list and finally plotted using matplotlib.
.. image:: JPG/pylammps_mc_energies_plot.jpg
:align: center
The IPython notebook also shows how to use dump commands and embed video files
inside of the IPython notebook.

View File

@ -260,7 +260,7 @@ Switch into the :code:`examples/melt` folder:
cd ../examples/melt
To run this example in serial, use the following command line:
To run this example in serial, use the following command:
.. code-block::

View File

@ -60,7 +60,7 @@ between them at any time using "git checkout <branch name>".)
files (mostly by accident). If you do not need access to the entire
commit history (most people don't), you can speed up the "cloning"
process and reduce local disk space requirements by using the
``--depth`` git command line flag. That will create a "shallow clone"
``--depth`` git command-line flag. That will create a "shallow clone"
of the repository, which contains only a subset of the git history.
Using a depth of 1000 is usually sufficient to include the head
commits of the *develop*, the *release*, and the *maintenance*

Binary file not shown.

Before

Width:  |  Height:  |  Size: 103 KiB

After

Width:  |  Height:  |  Size: 78 KiB

View File

@ -131,16 +131,15 @@ run LAMMPS in serial mode.
.. _lammps_python_api:
LAMMPS Python APIs
==================
LAMMPS Python API
=================
The LAMMPS Python module enables calling the LAMMPS C library API from
Python by dynamically loading functions in the LAMMPS shared library through
the `Python ctypes module <https://docs.python.org/3/library/ctypes.html>`_.
Because of the dynamic loading, it is **required** that LAMMPS is compiled
in :ref:`"shared" mode <exe>`. The Python interface is object-oriented, but
otherwise tries to be very similar to the C library API. Three different
Python classes to run LAMMPS are available and they build on each other.
otherwise tries to be very similar to the C library API.
More information on this is in the :doc:`Python_head`
section of the manual. Use of the LAMMPS Python module is described in
:doc:`Python_module`.

View File

@ -7,6 +7,7 @@ This section documents the following functions:
- :cpp:func:`lammps_command`
- :cpp:func:`lammps_commands_list`
- :cpp:func:`lammps_commands_string`
- :cpp:func:`lammps_expand`
--------------------
@ -79,3 +80,8 @@ Below is a short example using some of these functions.
.. doxygenfunction:: lammps_commands_string
:project: progguide
-----------------------
.. doxygenfunction:: lammps_expand
:project: progguide

View File

@ -1,5 +1,5 @@
Compute, fixes, variables
=========================
Computes, fixes, variables
==========================
This section documents accessing or modifying data stored by computes,
fixes, or variables in LAMMPS using the following functions:
@ -12,6 +12,7 @@ fixes, or variables in LAMMPS using the following functions:
- :cpp:func:`lammps_set_string_variable`
- :cpp:func:`lammps_set_internal_variable`
- :cpp:func:`lammps_variable_info`
- :cpp:func:`lammps_eval`
-----------------------
@ -55,6 +56,11 @@ fixes, or variables in LAMMPS using the following functions:
-----------------------
.. doxygenfunction:: lammps_eval
:project: progguide
-----------------------
.. doxygenenum:: _LMP_DATATYPE_CONST
.. doxygenenum:: _LMP_STYLE_CONST

View File

@ -208,20 +208,21 @@ Build system (strict)
LAMMPS currently supports two build systems: one that is based on
:doc:`traditional Makefiles <Build_make>` and one that is based on
:doc:`CMake <Build_cmake>`. Therefore, your contribution must be
compatible with and support both build systems.
:doc:`CMake <Build_cmake>`. As of fall 2024, it is no longer required
to support the traditional make build system. New packages may choose
to only support building with CMake. Additions to existing packages
must follow the requirements set by that package.
For a single pair of header and implementation files that are an
independent feature, it is usually only required to add them to
``src/.gitignore``.
For traditional make, if your contributed files or package depend on
other LAMMPS style files or packages also being installed
(e.g. because your file is a derived class from the other LAMMPS
class), then an ``Install.sh`` file is also needed to check for those
dependencies and modifications to ``src/Depend.sh`` to trigger the checks.
See other README and Install.sh files in other directories as
examples.
other LAMMPS style files or packages also being installed (e.g. because
your file is a derived class from the other LAMMPS class), then an
``Install.sh`` file is also needed to check for those dependencies and
modifications to ``src/Depend.sh`` to trigger the checks. See other
README and Install.sh files in other directories as examples.
Similarly, for CMake support, changes may need to be made to
``cmake/CMakeLists.txt``, some of the files in ``cmake/presets``, and

View File

@ -46,7 +46,7 @@ Include files (varied)
but instead should be initialized either in the initializer list of
the constructor or explicitly assigned in the body of the constructor.
If the member variable is relevant to the functionality of a class
(for example when it stores a value from a command line argument), the
(for example when it stores a value from a command-line argument), the
member variable declaration is followed by a brief comment explaining
its purpose and what its values can be. Class members that are
pointers should always be initialized to ``nullptr`` in the

View File

@ -2172,8 +2172,8 @@ the :doc:`Build extras <Build_extras>` page.
* ``src/OPENMP/README``
* :doc:`Accelerator packages <Speed_packages>`
* :doc:`OPENMP package <Speed_omp>`
* :doc:`Command line option -suffix/-sf omp <Run_options>`
* :doc:`Command line option -package/-pk omp <Run_options>`
* :doc:`Command-line option -suffix/-sf omp <Run_options>`
* :doc:`Command-line option -package/-pk omp <Run_options>`
* :doc:`package omp <package>`
* Search the :doc:`commands <Commands_all>` pages (:doc:`fix <Commands_fix>`, :doc:`compute <Commands_compute>`,
:doc:`pair <Commands_pair>`, :doc:`bond, angle, dihedral, improper <Commands_bond>`,

View File

@ -2,14 +2,8 @@ Per-atom properties
===================
Similar to what is described in :doc:`Library_atoms`, the instances of
:py:class:`lammps <lammps.lammps>`, :py:class:`PyLammps <lammps.PyLammps>`, or
:py:class:`IPyLammps <lammps.IPyLammps>` can be used to extract atom quantities
and modify some of them. The main difference between the interfaces is how the information
is exposed.
While the :py:class:`lammps <lammps.lammps>` is just a thin layer that wraps C API calls,
:py:class:`PyLammps <lammps.PyLammps>` and :py:class:`IPyLammps <lammps.IPyLammps>` expose
information as objects and properties.
:py:class:`lammps <lammps.lammps>` can be used to extract atom quantities
and modify some of them.
In some cases the data returned is a direct reference to the original data
inside LAMMPS cast to ``ctypes`` pointers. Where possible, the wrappers will
@ -25,10 +19,6 @@ against invalid accesses.
accordingly. These arrays can change sizes and order at every neighbor list
rebuild and atom sort event as atoms are migrating between subdomains.
.. tabs::
.. tab:: lammps API
.. code-block:: python
from lammps import lammps
@ -36,14 +26,30 @@ against invalid accesses.
lmp = lammps()
lmp.file("in.sysinit")
# Read/Write access via ctypes
nlocal = lmp.extract_global("nlocal")
x = lmp.extract_atom("x")
for i in range(nlocal):
print("(x,y,z) = (", x[i][0], x[i][1], x[i][2], ")")
# Read/Write access via NumPy arrays
atom_id = L.numpy.extract_atom("id")
atom_type = L.numpy.extract_atom("type")
x = L.numpy.extract_atom("x")
v = L.numpy.extract_atom("v")
f = L.numpy.extract_atom("f")
# set position in 2D simulation
x[0] = (1.0, 0.0)
# set position in 3D simulation
x[0] = (1.0, 0.0, 1.)
lmp.close()
**Methods**:
* :py:meth:`extract_atom() <lammps.lammps.extract_atom()>`: extract a per-atom quantity
@ -51,31 +57,3 @@ against invalid accesses.
**Numpy Methods**:
* :py:meth:`numpy.extract_atom() <lammps.numpy_wrapper.numpy_wrapper.extract_atom()>`: extract a per-atom quantity as numpy array
.. tab:: PyLammps/IPyLammps API
All atoms in the current simulation can be accessed by using the :py:attr:`PyLammps.atoms <lammps.PyLammps.atoms>` property.
Each element of this list is a :py:class:`Atom <lammps.Atom>` or :py:class:`Atom2D <lammps.Atom2D>` object. The attributes of
these objects provide access to their data (id, type, position, velocity, force, etc.):
.. code-block:: python
# access first atom
L.atoms[0].id
L.atoms[0].type
# access second atom
L.atoms[1].position
L.atoms[1].velocity
L.atoms[1].force
Some attributes can be changed:
.. code-block:: python
# set position in 2D simulation
L.atoms[0].position = (1.0, 0.0)
# set position in 3D simulation
L.atoms[0].position = (1.0, 0.0, 1.0)

View File

@ -6,11 +6,10 @@ Creating or deleting a LAMMPS object
====================================
With the Python interface the creation of a :cpp:class:`LAMMPS
<LAMMPS_NS::LAMMPS>` instance is included in the constructors for the
:py:class:`lammps <lammps.lammps>`, :py:class:`PyLammps <lammps.PyLammps>`,
and :py:class:`IPyLammps <lammps.IPyLammps>` classes.
Internally it will call either :cpp:func:`lammps_open` or :cpp:func:`lammps_open_no_mpi` from the C
library API to create the class instance.
<LAMMPS_NS::LAMMPS>` instance is included in the constructor for the
:py:class:`lammps <lammps.lammps>` class. Internally it will call either
:cpp:func:`lammps_open` or :cpp:func:`lammps_open_no_mpi` from the C library
API to create the class instance.
All arguments are optional. The *name* argument allows loading a
LAMMPS shared library that is named ``liblammps_machine.so`` instead of
@ -26,11 +25,7 @@ to run the Python module like the library interface on a subset of the
MPI ranks after splitting the communicator.
Here are simple examples using all three Python interfaces:
.. tabs::
.. tab:: lammps API
Here is a simple example using the LAMMPS Python interface:
.. code-block:: python
@ -48,86 +43,7 @@ Here are simple examples using all three Python interfaces:
# explicitly close and delete LAMMPS instance (optional)
lmp.close()
.. tab:: PyLammps API
The :py:class:`PyLammps <lammps.PyLammps>` class is a wrapper around the
:py:class:`lammps <lammps.lammps>` class and all of its lower level functions.
By default, it will create a new instance of :py:class:`lammps <lammps.lammps>` passing
along all arguments to the constructor of :py:class:`lammps <lammps.lammps>`.
.. code-block:: python
from lammps import PyLammps
# NOTE: argv[0] is set by the lammps class constructor
args = ["-log", "none"]
# create LAMMPS instance
L = PyLammps(cmdargs=args)
# get and print numerical version code
print("LAMMPS Version: ", L.version())
# explicitly close and delete LAMMPS instance (optional)
L.close()
:py:class:`PyLammps <lammps.PyLammps>` objects can also be created on top of an existing
:py:class:`lammps <lammps.lammps>` object:
.. code-block:: python
from lammps import lammps, PyLammps
...
# create LAMMPS instance
lmp = lammps(cmdargs=args)
# create PyLammps instance using previously created LAMMPS instance
L = PyLammps(ptr=lmp)
This is useful if you have to create the :py:class:`lammps <lammps.lammps>`
instance is a specific way, but want to take advantage of the
:py:class:`PyLammps <lammps.PyLammps>` interface.
.. tab:: IPyLammps API
The :py:class:`IPyLammps <lammps.IPyLammps>` class is an extension of the
:py:class:`PyLammps <lammps.PyLammps>` class. It has the same construction behavior. By
default, it will create a new instance of :py:class:`lammps` passing
along all arguments to the constructor of :py:class:`lammps`.
.. code-block:: python
from lammps import IPyLammps
# NOTE: argv[0] is set by the lammps class constructor
args = ["-log", "none"]
# create LAMMPS instance
L = IPyLammps(cmdargs=args)
# get and print numerical version code
print("LAMMPS Version: ", L.version())
# explicitly close and delete LAMMPS instance (optional)
L.close()
You can also initialize IPyLammps on top of an existing :py:class:`lammps` or :py:class:`PyLammps` object:
.. code-block:: python
from lammps import lammps, IPyLammps
...
# create LAMMPS instance
lmp = lammps(cmdargs=args)
# create PyLammps instance using previously created LAMMPS instance
L = PyLammps(ptr=lmp)
This is useful if you have to create the :py:class:`lammps <lammps.lammps>`
instance is a specific way, but want to take advantage of the
:py:class:`IPyLammps <lammps.IPyLammps>` interface.
In all of the above cases, same as with the :ref:`C library API <lammps_c_api>`, this will use the
Same as with the :ref:`C library API <lammps_c_api>`, this will use the
``MPI_COMM_WORLD`` communicator for the MPI library that LAMMPS was
compiled with.

View File

@ -1,24 +1,17 @@
Executing commands
==================
Once an instance of the :py:class:`lammps <lammps.lammps>`,
:py:class:`PyLammps <lammps.PyLammps>`, or
:py:class:`IPyLammps <lammps.IPyLammps>` class is created, there are
Once an instance of the :py:class:`lammps <lammps.lammps>` class is created, there are
multiple ways to "feed" it commands. In a way that is not very different from
running a LAMMPS input script, except that Python has many more facilities
for structured programming than the LAMMPS input script syntax. Furthermore
it is possible to "compute" what the next LAMMPS command should be.
.. tabs::
.. tab:: lammps API
Same as in the equivalent
:doc:`C library functions <Library_execute>`, commands can be read from a file, a
single string, a list of strings and a block of commands in a single
multi-line string. They are processed under the same boundary conditions
as the C library counterparts. The example below demonstrates the use
of :py:func:`lammps.file()`, :py:func:`lammps.command()`,
Same as in the equivalent :doc:`C library functions <Library_execute>`,
commands can be read from a file, a single string, a list of strings and a
block of commands in a single multi-line string. They are processed under the
same boundary conditions as the C library counterparts. The example below
demonstrates the use of :py:func:`lammps.file()`, :py:func:`lammps.command()`,
:py:func:`lammps.commands_list()`, and :py:func:`lammps.commands_string()`:
.. code-block:: python
@ -33,7 +26,7 @@ it is possible to "compute" what the next LAMMPS command should be.
lmp.command('variable zpos index 1.0')
# create 10 groups with 10 atoms each
cmds = ["group g{} id {}:{}".format(i,10*i+1,10*(i+1)) for i in range(10)]
cmds = [f"group g{i} id {10*i+1}:{10*(i+1)}" for i in range(10)]
lmp.commands_list(cmds)
# run commands from a multi-line string
@ -45,11 +38,9 @@ it is possible to "compute" what the next LAMMPS command should be.
"""
lmp.commands_string(block)
.. tab:: PyLammps/IPyLammps API
Unlike the lammps API, the PyLammps/IPyLammps APIs allow running LAMMPS
commands by calling equivalent member functions of :py:class:`PyLammps <lammps.PyLammps>`
and :py:class:`IPyLammps <lammps.IPyLammps>` instances.
For convenience, the :py:class:`lammps <lammps.lammps>` class also provides a
command wrapper ``cmd`` that turns any LAMMPS command into a regular function
call.
For instance, the following LAMMPS command
@ -57,8 +48,7 @@ it is possible to "compute" what the next LAMMPS command should be.
region box block 0 10 0 5 -0.5 0.5
can be executed using with the lammps API with the following Python code if ``lmp`` is an
instance of :py:class:`lammps <lammps.lammps>`:
would normally be executed with the following Python code:
.. code-block:: python
@ -67,7 +57,7 @@ it is possible to "compute" what the next LAMMPS command should be.
lmp = lammps()
lmp.command("region box block 0 10 0 5 -0.5 0.5")
With the PyLammps interface, any LAMMPS command can be split up into arbitrary parts.
With the ``cmd`` wrapper, any LAMMPS command can be split up into arbitrary parts.
These parts are then passed to a member function with the name of the :doc:`command <Commands_all>`.
For the :doc:`region <region>` command that means the :code:`region()` method can be called.
The arguments of the command can be passed as one string, or
@ -75,53 +65,59 @@ it is possible to "compute" what the next LAMMPS command should be.
.. code-block:: python
from lammps import PyLammps
from lammps import lammps
L = PyLammps()
L = lammps()
# pass command parameters as one string
L.region("box block 0 10 0 5 -0.5 0.5")
L.cmd.region("box block 0 10 0 5 -0.5 0.5")
# OR pass them individually
L.region("box block", 0, 10, 0, 5, -0.5, 0.5)
L.cmd.region("box block", 0, 10, 0, 5, -0.5, 0.5)
In the latter example, all parameters except the first are Python floating-point literals. The
member function takes the entire parameter list and transparently merges it to a single command
string.
The benefit of this approach is avoiding redundant command calls and easier
parameterization. In the lammps API parameterization needed to be done
manually by creating formatted command strings.
parameterization. With `command`, `commands_list`, and `commands_string` the
parameterization needed to be done manually by creating formatted command
strings.
.. code-block:: python
lmp.command("region box block %f %f %f %f %f %f" % (xlo, xhi, ylo, yhi, zlo, zhi))
In contrast, methods of PyLammps accept parameters directly and will convert
In contrast, methods of the `cmd` wrapper accept parameters directly and will convert
them automatically to a final command string.
.. code-block:: python
L.region("box block", xlo, xhi, ylo, yhi, zlo, zhi)
L.cmd.region("box block", xlo, xhi, ylo, yhi, zlo, zhi)
Using these facilities, the example shown for the lammps API can be rewritten as follows:
.. note::
When running in IPython you can use Tab-completion after ``L.cmd.`` to see
all available LAMMPS commands.
Using these facilities, the previous example shown above can be rewritten as follows:
.. code-block:: python
from lammps import PyLammps
L = PyLammps()
from lammps import lammps
L = lammps()
# read commands from file 'in.melt'
L.file('in.melt')
# issue a single command
L.variable('zpos', 'index', 1.0)
L.cmd.variable('zpos', 'index', 1.0)
# create 10 groups with 10 atoms each
for i in range(10):
L.group(f"g{i}", "id", f"{10*i+1}:{10*(i+1)}")
L.cmd.group(f"g{i}", "id", f"{10*i+1}:{10*(i+1)}")
L.clear()
L.region("box block", 0, 2, 0, 2, 0, 2)
L.create_box(1, "box")
L.create_atoms(1, "single", 1.0, 1.0, "${zpos}")
L.cmd.clear()
L.cmd.region("box block", 0, 2, 0, 2, 0, 2)
L.cmd.create_box(1, "box")
L.cmd.create_atoms(1, "single", 1.0, 1.0, "${zpos}")

View File

@ -15,6 +15,7 @@ together.
Python_call
Python_formats
Python_examples
Python_jupyter
Python_error
Python_trouble

View File

@ -0,0 +1,45 @@
Using LAMMPS in IPython notebooks and Jupyter
=============================================
If the LAMMPS Python package is installed for the same Python interpreter as
`IPython <ipython>`_, you can use LAMMPS directly inside of an IPython notebook inside of
Jupyter. `Jupyter <juypter>`_ is a powerful integrated development environment (IDE) for
many dynamic languages like Python, Julia and others, which operates inside of
any web browser. Besides auto-completion and syntax highlighting it allows you
to create formatted documents using Markup, mathematical formulas, graphics and
animations intermixed with executable Python code. It is a great format for
tutorials and showcasing your latest research.
The easiest way to install it is via ``pip``:
.. code-block:: bash
pip install --user jupyter
To launch an instance of Jupyter simply run the following command inside your
Python environment:
.. code-block:: bash
jupyter notebook
Interactive Python Examples
---------------------------
Examples of IPython notebooks can be found in the ``python/examples/ipython``
subdirectory. They require LAMMPS to be compiled as shared library with PYTHON,
PNG, JPEG and FFMPEG support.
To open these notebooks launch ``jupyter notebook index.ipynb`` inside this
directory. The opened file provides an overview of the available examples.
- Example 1: Using LAMMPS with Python (``simple.ipynb``)
- Example 2: Analyzing LAMMPS thermodynamic data (``thermo.ipynb``)
- Example 3: Working with Per-Atom Data (``atoms.ipynb``)
- Example 4: Working with LAMMPS variables (``variables.ipynb``)
- Example 5: Validating a dihedral potential (``dihedrals/dihedral.ipynb``)
- Example 6: Running a Monte Carlo relaxation (``montecarlo/mc.ipynb``)
.. note::
Typically clicking a link in Jupyter will open a new tab, which might be blocked by your pop-up blocker.

View File

@ -10,19 +10,11 @@ be installed into a Python system folder or a user folder with ``make
install-python``. Components of the module can then loaded into a Python
session with the ``import`` command.
There are multiple Python interface classes in the :py:mod:`lammps` module:
.. warning::
- the :py:class:`lammps <lammps.lammps>` class. This is a wrapper around
the C-library interface and its member functions try to replicate the
:ref:`C-library API <lammps_c_api>` closely. This is the most
feature-complete Python API.
- the :py:class:`PyLammps <lammps.PyLammps>` class. This is a more high-level
and more Python style class implemented on top of the
:py:class:`lammps <lammps.lammps>` class.
- the :py:class:`IPyLammps <lammps.IPyLammps>` class is derived from
:py:class:`PyLammps <lammps.PyLammps>` and adds embedded graphics
features to conveniently include LAMMPS into `Jupyter
<https://jupyter.org/>`_ notebooks.
Alternative interfaces such as :py:class:`PyLammps <lammps.PyLammps>` and
:py:class:`IPyLammps <lammps.IPyLammps>` classes have been deprecated and
will be removed in a future version of LAMMPS.
.. _mpi4py_url: https://mpi4py.readthedocs.io
@ -49,7 +41,7 @@ The ``lammps`` class API
========================
The :py:class:`lammps <lammps.lammps>` class is the core of the LAMMPS
Python interfaces. It is a wrapper around the :ref:`LAMMPS C library
Python interface. It is a wrapper around the :ref:`LAMMPS C library
API <lammps_c_api>` using the `Python ctypes module
<https://docs.python.org/3/library/ctypes.html>`_ and a shared library
compiled from the LAMMPS sources code. The individual methods in this
@ -64,40 +56,7 @@ functions. Below is a detailed documentation of the API.
.. autoclass:: lammps.numpy_wrapper::numpy_wrapper
:members:
----------
The ``PyLammps`` class API
==========================
The :py:class:`PyLammps <lammps.PyLammps>` class is a wrapper that creates a
simpler, more "Pythonic" interface to common LAMMPS functionality. LAMMPS
data structures are exposed through objects and properties. This makes Python
scripts shorter and more concise. See the :doc:`PyLammps Tutorial
<Howto_pylammps>` for an introduction on how to use this interface.
.. autoclass:: lammps.PyLammps
:members:
.. autoclass:: lammps.AtomList
:members:
.. autoclass:: lammps.Atom
:members:
.. autoclass:: lammps.Atom2D
:members:
----------
The ``IPyLammps`` class API
===========================
The :py:class:`IPyLammps <lammps.PyLammps>` class is an extension of
:py:class:`PyLammps <lammps.PyLammps>`, adding additional functions to
quickly display visualizations such as images and videos inside of IPython.
See the :doc:`PyLammps Tutorial <Howto_pylammps>` for examples.
.. autoclass:: lammps.IPyLammps
.. autoclass:: lammps.ipython::wrapper
:members:
----------

View File

@ -4,10 +4,6 @@ Compute, fixes, variables
This section documents accessing or modifying data from objects like
computes, fixes, or variables in LAMMPS using the :py:mod:`lammps` module.
.. tabs::
.. tab:: lammps API
For :py:meth:`lammps.extract_compute() <lammps.lammps.extract_compute()>` and
:py:meth:`lammps.extract_fix() <lammps.lammps.extract_fix()>`, the global, per-atom,
or local data calculated by the compute or fix can be accessed. What is returned
@ -57,42 +53,3 @@ computes, fixes, or variables in LAMMPS using the :py:mod:`lammps` module.
* :py:meth:`lammps.numpy.extract_compute() <lammps.numpy_wrapper.numpy_wrapper.extract_compute()>`: extract value(s) from a compute, return arrays as numpy arrays
* :py:meth:`lammps.numpy.extract_fix() <lammps.numpy_wrapper.numpy_wrapper.extract_fix()>`: extract value(s) from a fix, return arrays as numpy arrays
* :py:meth:`lammps.numpy.extract_variable() <lammps.numpy_wrapper.numpy_wrapper.extract_variable()>`: extract value(s) from a variable, return arrays as numpy arrays
.. tab:: PyLammps/IPyLammps API
PyLammps and IPyLammps classes currently do not add any additional ways of
retrieving information out of computes and fixes. This information can still be accessed by using the lammps API:
.. code-block:: python
L.lmp.extract_compute(...)
L.lmp.extract_fix(...)
# OR
L.lmp.numpy.extract_compute(...)
L.lmp.numpy.extract_fix(...)
LAMMPS variables can be both defined and accessed via the :py:class:`PyLammps <lammps.PyLammps>` interface.
To define a variable you can use the :doc:`variable <variable>` command:
.. code-block:: python
L.variable("a index 2")
A dictionary of all variables is returned by the :py:attr:`PyLammps.variables <lammps.PyLammps.variables>` property:
you can access an individual variable by retrieving a variable object from the
``L.variables`` dictionary by name
.. code-block:: python
a = L.variables['a']
The variable value can then be easily read and written by accessing the value
property of this object.
.. code-block:: python
print(a.value)
a.value = 4

View File

@ -56,7 +56,7 @@ Below is an example output for Python version 3.8.5.
---------
LAMMPS can work together with Python in three ways. First, Python can
LAMMPS can work together with Python in two ways. First, Python can
wrap LAMMPS through the its :doc:`library interface <Library>`, so
that a Python script can create one or more instances of LAMMPS and
launch one or more simulations. In Python terms, this is referred to as
@ -67,22 +67,7 @@ launch one or more simulations. In Python terms, this is referred to as
Launching LAMMPS via Python
Second, the lower-level Python interface in the :py:class:`lammps Python
class <lammps.lammps>` can be used indirectly through the provided
:py:class:`PyLammps <lammps.PyLammps>` and :py:class:`IPyLammps
<lammps.IPyLammps>` wrapper classes, also written in Python. These
wrappers try to simplify the usage of LAMMPS in Python by providing a
more object-based interface to common LAMMPS functionality. They also
reduce the amount of code necessary to parameterize LAMMPS scripts
through Python and make variables and computes directly accessible.
.. figure:: JPG/pylammps-invoke-lammps.png
:figclass: align-center
Using the PyLammps / IPyLammps wrappers
Third, LAMMPS can use the Python interpreter, so that a LAMMPS input
Second, LAMMPS can use the Python interpreter, so that a LAMMPS input
script or styles can invoke Python code directly, and pass information
back-and-forth between the input script and Python functions you write.
This Python code can also call back to LAMMPS to query or change its

View File

@ -2,14 +2,8 @@ System properties
=================
Similar to what is described in :doc:`Library_properties`, the instances of
:py:class:`lammps <lammps.lammps>`, :py:class:`PyLammps <lammps.PyLammps>`, or
:py:class:`IPyLammps <lammps.IPyLammps>` can be used to extract different kinds
of information about the active LAMMPS instance and also to modify some of it. The
main difference between the interfaces is how the information is exposed.
While the :py:class:`lammps <lammps.lammps>` is just a thin layer that wraps C API calls,
:py:class:`PyLammps <lammps.PyLammps>` and :py:class:`IPyLammps <lammps.IPyLammps>` expose
information as objects and properties.
:py:class:`lammps <lammps.lammps>` can be used to extract different kinds
of information about the active LAMMPS instance and also to modify some of it.
In some cases the data returned is a direct reference to the original data
inside LAMMPS cast to ``ctypes`` pointers. Where possible, the wrappers will
@ -25,10 +19,6 @@ against invalid accesses.
accordingly. These arrays can change sizes and order at every neighbor list
rebuild and atom sort event as atoms are migrating between subdomains.
.. tabs::
.. tab:: lammps API
.. code-block:: python
from lammps import lammps
@ -64,74 +54,3 @@ against invalid accesses.
**Properties**:
* :py:attr:`last_thermo_step <lammps.lammps.last_thermo_step>`: the last timestep thermodynamic output was computed
.. tab:: PyLammps/IPyLammps API
In addition to the functions provided by :py:class:`lammps <lammps.lammps>`, :py:class:`PyLammps <lammps.PyLammps>` objects
have several properties which allow you to query the system state:
L.system
Is a dictionary describing the system such as the bounding box or number of atoms
L.system.xlo, L.system.xhi
bounding box limits along x-axis
L.system.ylo, L.system.yhi
bounding box limits along y-axis
L.system.zlo, L.system.zhi
bounding box limits along z-axis
L.communication
configuration of communication subsystem, such as the number of threads or processors
L.communication.nthreads
number of threads used by each LAMMPS process
L.communication.nprocs
number of MPI processes used by LAMMPS
L.fixes
List of fixes in the current system
L.computes
List of active computes in the current system
L.dump
List of active dumps in the current system
L.groups
List of groups present in the current system
**Retrieving the value of an arbitrary LAMMPS expressions**
LAMMPS expressions can be immediately evaluated by using the ``eval`` method. The
passed string parameter can be any expression containing global :doc:`thermo` values,
variables, compute or fix data (see :doc:`Howto_output`):
.. code-block:: python
result = L.eval("ke") # kinetic energy
result = L.eval("pe") # potential energy
result = L.eval("v_t/2.0")
**Example**
.. code-block:: python
from lammps import PyLammps
L = PyLammps()
L.file("in.sysinit")
print(f"running simulation with {L.system.natoms} atoms")
L.run(1000, "post no");
for i in range(10):
L.run(100, "pre no post no")
pe = L.eval("pe")
ke = L.eval("ke")
print(f"PE = {pe}\nKE = {ke}")

View File

@ -1,8 +1,8 @@
Basics of running LAMMPS
========================
LAMMPS is run from the command line, reading commands from a file via
the ``-in`` command line flag, or from standard input. Using the ``-in
LAMMPS is run from the command-line, reading commands from a file via
the ``-in`` command-line flag, or from standard input. Using the ``-in
in.file`` variant is recommended (see note below). The name of the
LAMMPS executable is either ``lmp`` or ``lmp_<machine>`` with
`<machine>` being the machine string used when compiling LAMMPS. This
@ -25,7 +25,7 @@ build LAMMPS:
You normally run the LAMMPS command in the directory where your input
script is located. That is also where output files are produced by
default, unless you provide specific other paths in your input script or
on the command line. As in some of the examples above, the LAMMPS
on the command-line. As in some of the examples above, the LAMMPS
executable itself can be placed elsewhere.
.. note::

View File

@ -632,7 +632,7 @@ the ``-package omp`` command-line switch or the :doc:`package omp <package>` com
The :doc:`suffix <suffix>` command can also be used within an input
script to set a suffix, or to turn off or back on any suffix setting
made via the command line.
made via the command-line.
----------

View File

@ -20,7 +20,7 @@ To run with 4 threads, you can type this:
lmp -in in.lj.lmp -k on t 4 -sf kk
Alternately, you can also install a package with LAMMPS-GUI included and
open the LAMMPS-GUI app (the package includes the command line version
open the LAMMPS-GUI app (the package includes the command-line version
of LAMMPS as well) and open the input file in the GUI and run it from
there. For details on LAMMPS-GUI, see :doc:`Howto_lammps_gui`.

View File

@ -31,7 +31,8 @@ Coulombics. It has the following general features:
(for Nvidia GPUs, AMD GPUs, Intel GPUs, and multicore CPUs).
so that the same functionality is supported on a variety of hardware.
**Required hardware/software:**
Required hardware/software
""""""""""""""""""""""""""
To compile and use this package in CUDA mode, you currently need
to have an NVIDIA GPU and install the corresponding NVIDIA CUDA
@ -69,12 +70,14 @@ To compile and use this package in HIP mode, you have to have the AMD ROCm
software installed. Versions of ROCm older than 3.5 are currently deprecated
by AMD.
**Building LAMMPS with the GPU package:**
Building LAMMPS with the GPU package
""""""""""""""""""""""""""""""""""""
See the :ref:`Build extras <gpu>` page for
instructions.
**Run with the GPU package from the command line:**
Run with the GPU package from the command-line
""""""""""""""""""""""""""""""""""""""""""""""
The ``mpirun`` or ``mpiexec`` command sets the total number of MPI tasks
used by LAMMPS (one or multiple per compute node) and the number of MPI
@ -133,7 +136,8 @@ affect the setting for bonded interactions (LAMMPS default is "on").
The "off" setting for pairwise interaction is currently required for
GPU package pair styles.
**Or run with the GPU package by editing an input script:**
Run with the GPU package by editing an input script
"""""""""""""""""""""""""""""""""""""""""""""""""""
The discussion above for the ``mpirun`` or ``mpiexec`` command, MPI
tasks/node, and use of multiple MPI tasks/GPU is the same.
@ -149,7 +153,8 @@ You must also use the :doc:`package gpu <package>` command to enable the
GPU package, unless the ``-sf gpu`` or ``-pk gpu`` :doc:`command-line switches <Run_options>` were used. It specifies the number of
GPUs/node to use, as well as other options.
**Speed-ups to expect:**
Speed-up to expect
""""""""""""""""""
The performance of a GPU versus a multicore CPU is a function of your
hardware, which pair style is used, the number of atoms/GPU, and the
@ -176,10 +181,13 @@ better with multiple OMP threads because the inter-process communication
is higher for these styles with the GPU package in order to allow
deterministic results.
**Guidelines for best performance:**
Guidelines for best performance
"""""""""""""""""""""""""""""""
* Using multiple MPI tasks per GPU will often give the best performance,
as allowed my most multicore CPU/GPU configurations.
* Using multiple MPI tasks (2-10) per GPU will often give the best
performance, as allowed my most multicore CPU/GPU configurations.
Using too many MPI tasks will result in worse performance due to
growing overhead with the growing number of MPI tasks.
* If the number of particles per MPI task is small (e.g. 100s of
particles), it can be more efficient to run with fewer MPI tasks per
GPU, even if you do not use all the cores on the compute node.
@ -199,12 +207,13 @@ deterministic results.
:doc:`angle <angle_style>`, :doc:`dihedral <dihedral_style>`,
:doc:`improper <improper_style>`, and :doc:`long-range <kspace_style>`
calculations will not be included in the "Pair" time.
* Since only part of the pppm kspace style is GPU accelerated, it
may be faster to only use GPU acceleration for Pair styles with
long-range electrostatics. See the "pair/only" keyword of the
package command for a shortcut to do that. The work between kspace
on the CPU and non-bonded interactions on the GPU can be balanced
through adjusting the coulomb cutoff without loss of accuracy.
* Since only part of the pppm kspace style is GPU accelerated, it may be
faster to only use GPU acceleration for Pair styles with long-range
electrostatics. See the "pair/only" keyword of the :doc:`package
command <package>` for a shortcut to do that. The distribution of
work between kspace on the CPU and non-bonded interactions on the GPU
can be balanced through adjusting the coulomb cutoff without loss of
accuracy.
* When the *mode* setting for the package gpu command is force/neigh,
the time for neighbor list calculations on the GPU will be added into
the "Pair" time, not the "Neigh" time. An additional breakdown of the
@ -220,4 +229,6 @@ deterministic results.
Restrictions
""""""""""""
None.
When using :doc:`hybrid pair styles <pair_hybrid>`, the neighbor list
must be generated on the host instead of the GPU and thus the potential
GPU acceleration is reduced.

View File

@ -1,5 +1,5 @@
INTEL package
==================
=============
The INTEL package is maintained by Mike Brown at Intel
Corporation. It provides two methods for accelerating simulations,
@ -13,18 +13,18 @@ twice, once on the CPU and once with an offload flag. This allows
LAMMPS to run on the CPU cores and co-processor cores simultaneously.
Currently Available INTEL Styles
"""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""
* Angle Styles: charmm, harmonic
* Bond Styles: fene, fourier, harmonic
* Bond Styles: fene, harmonic
* Dihedral Styles: charmm, fourier, harmonic, opls
* Fixes: nve, npt, nvt, nvt/sllod, nve/asphere
* Fixes: nve, npt, nvt, nvt/sllod, nve/asphere, electrode/conp, electrode/conq, electrode/thermo
* Improper Styles: cvff, harmonic
* Pair Styles: airebo, airebo/morse, buck/coul/cut, buck/coul/long,
buck, dpd, eam, eam/alloy, eam/fs, gayberne, lj/charmm/coul/charmm,
lj/charmm/coul/long, lj/cut, lj/cut/coul/long, lj/long/coul/long,
rebo, sw, tersoff
* K-Space Styles: pppm, pppm/disp
rebo, snap, sw, tersoff
* K-Space Styles: pppm, pppm/disp, pppm/electrode
.. warning::
@ -33,7 +33,7 @@ Currently Available INTEL Styles
input requires it, LAMMPS will abort with an error message.
Speed-up to expect
"""""""""""""""""""
""""""""""""""""""
The speedup will depend on your simulation, the hardware, which
styles are used, the number of atoms, and the floating-point
@ -312,21 +312,21 @@ almost all cases.
recommended, especially when running on a machine with Intel
Hyper-Threading technology disabled.
Run with the INTEL package from the command line
"""""""""""""""""""""""""""""""""""""""""""""""""""""
Run with the INTEL package from the command-line
""""""""""""""""""""""""""""""""""""""""""""""""
To enable INTEL optimizations for all available styles used in
the input script, the ``-sf intel`` :doc:`command-line switch <Run_options>` can be used without any requirement for
editing the input script. This switch will automatically append
"intel" to styles that support it. It also invokes a default command:
:doc:`package intel 1 <package>`. This package command is used to set
options for the INTEL package. The default package command will
specify that INTEL calculations are performed in mixed precision,
that the number of OpenMP threads is specified by the OMP_NUM_THREADS
environment variable, and that if co-processors are present and the
binary was built with offload support, that 1 co-processor per node
will be used with automatic balancing of work between the CPU and the
co-processor.
To enable INTEL optimizations for all available styles used in the input
script, the ``-sf intel`` :doc:`command-line switch <Run_options>` can
be used without any requirement for editing the input script. This
switch will automatically append "intel" to styles that support it. It
also invokes a default command: :doc:`package intel 1 <package>`. This
package command is used to set options for the INTEL package. The
default package command will specify that INTEL calculations are
performed in mixed precision, that the number of OpenMP threads is
specified by the OMP_NUM_THREADS environment variable, and that if
co-processors are present and the binary was built with offload support,
that 1 co-processor per node will be used with automatic balancing of
work between the CPU and the co-processor.
You can specify different options for the INTEL package by using
the ``-pk intel Nphi`` :doc:`command-line switch <Run_options>` with

View File

@ -77,7 +77,7 @@ version 23 November 2023 and Kokkos version 4.2.
rank. When running with multiple MPI ranks, you may see segmentation
faults without GPU-aware MPI support. These can be avoided by adding
the flags :doc:`-pk kokkos gpu/aware off <Run_options>` to the
LAMMPS command line or by using the command :doc:`package kokkos
LAMMPS command-line or by using the command :doc:`package kokkos
gpu/aware off <package>` in the input file.
.. admonition:: Intel Data Center GPU support
@ -423,7 +423,7 @@ in the ``kokkos-cuda.cmake`` CMake preset file.
cmake -DKokkos_ENABLE_CUDA=yes -DKokkos_ENABLE_OPENMP=yes ../cmake
The suffix "/kk" is equivalent to "/kk/device", and for Kokkos CUDA,
using the ``-sf kk`` in the command line gives the default CUDA version
using the ``-sf kk`` in the command-line gives the default CUDA version
everywhere. However, if the "/kk/host" suffix is added to a specific
style in the input script, the Kokkos OpenMP (CPU) version of that
specific style will be used instead. Set the number of OpenMP threads
@ -439,7 +439,7 @@ For example, the command to run with 1 GPU and 8 OpenMP threads is then:
mpiexec -np 1 lmp_kokkos_cuda_openmpi -in in.lj -k on g 1 t 8 -sf kk
Conversely, if the ``-sf kk/host`` is used in the command line and then
Conversely, if the ``-sf kk/host`` is used in the command-line and then
the "/kk" or "/kk/device" suffix is added to a specific style in your
input script, then only that specific style will run on the GPU while
everything else will run on the CPU in OpenMP mode. Note that the
@ -451,7 +451,7 @@ on the host CPU can overlap with a pair style running on the
GPU. First compile with ``--default-stream per-thread`` added to ``CCFLAGS``
in the Kokkos CUDA Makefile. Then explicitly use the "/kk/host"
suffix for kspace and bonds, angles, etc. in the input file and the
"kk" suffix (equal to "kk/device") on the command line. Also make
"kk" suffix (equal to "kk/device") on the command-line. Also make
sure the environment variable ``CUDA_LAUNCH_BLOCKING`` is not set to "1"
so CPU/GPU overlap can occur.

View File

@ -21,7 +21,7 @@ Building LAMMPS with the OPENMP package
See the :ref:`Build extras <openmp>` page for
instructions.
Run with the OPENMP package from the command line
Run with the OPENMP package from the command-line
"""""""""""""""""""""""""""""""""""""""""""""""""""
These examples assume one or more 16-core nodes.

View File

@ -17,7 +17,7 @@ Building LAMMPS with the OPT package
See the :ref:`Build extras <opt>` page for instructions.
Run with the OPT package from the command line
Run with the OPT package from the command-line
""""""""""""""""""""""""""""""""""""""""""""""
.. code-block:: bash

View File

@ -501,7 +501,7 @@ Here are a few highlights of LAMMPS-GUI
- Indicator for line that caused an error
- Visualization of current state in Image Viewer (via calling :doc:`write_dump image <dump_image>`)
- Capture of images created via :doc:`dump image <dump_image>` in Slide show window
- Dialog to set variables, similar to the LAMMPS command line flag '-v' / '-var'
- Dialog to set variables, similar to the LAMMPS command-line flag '-v' / '-var'
- Support for GPU, INTEL, KOKKOS/OpenMP, OPENMAP, and OPT and accelerator packages
Parallelization
@ -550,7 +550,7 @@ will be found automatically. 2) you can download the `Flatpak file
*flatpak* command: ``flatpak install --user
LAMMPS-Linux-x86_64-GUI-<version>.flatpak`` and run it with ``flatpak
run org.lammps.lammps-gui``. The flatpak bundle also includes the
command line version of LAMMPS and some LAMMPS tools like msi2lmp. The
command-line version of LAMMPS and some LAMMPS tools like msi2lmp. The
can be launched by using the ``--command`` flag. For example to run
LAMMPS directly on the ``in.lj`` benchmark input you would type in the
``bench`` folder: ``flatpak run --command=lmp -in in.lj`` The flatpak
@ -608,10 +608,10 @@ would be the ``examples/COUPLE/plugin`` folder of the LAMMPS
distribution.
When compiling LAMMPS-GUI with plugin support, there is an additional
command line flag (``-p <path>`` or ``--pluginpath <path>``) which
command-line flag (``-p <path>`` or ``--pluginpath <path>``) which
allows to override the path to LAMMPS shared library used by the GUI.
This is usually auto-detected on the first run and can be changed in the
LAMMPS-GUI *Preferences* dialog. The command line flag allows to reset
LAMMPS-GUI *Preferences* dialog. The command-line flag allows to reset
this path to a valid value in case the original setting has become
invalid. An empty path ("") as argument restores the default setting.
@ -656,7 +656,7 @@ it will create a compressed ``LAMMPS-Win10-amd64.zip`` zip file with the
executables and required dependent .dll files. This zip file can be
uncompressed and ``lammps-gui.exe`` run directly from there. The
uncompressed folder can be added to the ``PATH`` environment and LAMMPS
and LAMMPS-GUI can be launched from anywhere from the command line.
and LAMMPS-GUI can be launched from anywhere from the command-line.
**MinGW64 Cross-compiler**
@ -876,7 +876,7 @@ the same ``LAMMPS_CACHING_DIR``. This script does the following:
#. Start a simple local HTTP server using Python to host files for CMake
Afterwards, it will print out instruction on how to modify the CMake
command line to make sure it uses the local HTTP server.
commands to make sure it uses the local HTTP server.
To undo the environment changes and shutdown the local HTTP server,
run the ``deactivate_caches`` command.
@ -1025,7 +1025,7 @@ with those in the provided log file with the same number of processors
in the same subdirectory. If the differences between the actual and
reference values are within specified tolerances, the test is considered
passed. For each test batch, that is, a set of example input scripts,
the mpirun command, the LAMMPS command line arguments, and the
the mpirun command, the LAMMPS command-line arguments, and the
tolerances for individual thermo quantities can be specified in a
configuration file in YAML format.

94
doc/src/angle_mwlc.rst Normal file
View File

@ -0,0 +1,94 @@
.. index:: angle_style mwlc
angle_style mwlc command
==========================
Syntax
""""""
.. code-block:: LAMMPS
angle_style mwlc
Examples
""""""""
.. code-block:: LAMMPS
angle_style mwlc
angle_coeff * 25 1 10 1
Description
"""""""""""
.. versionadded:: TBD
The *mwlc* angle style models a meltable wormlike chain and can be used
to model non-linear bending elasticity of polymers, e.g. DNA. *mwlc*
uses a potential that is a canonical-ensemble superposition of a
non-melted and a melted state :ref:`(Farrell) <Farrell>`. The potential
is
.. math::
E = -k_{B}T\,\log [q + q^{m}] + E_{0},
where the non-melted and melted partition functions are
.. math::
q = \exp [-k_{1}(1+\cos{\theta})/k_{B}T]; \\
q^{m} = \exp [-(\mu+k_{2}(1+\cos{\theta}))/k_{B}T].
:math:`k_1` is the bending elastic constant of the non-melted state,
:math:`k_2` is the bending elastic constant of the melted state,
:math:`\mu` is the melting energy, and
:math:`T` is the reference temperature.
The reference energy,
.. math::
E_{0} = -k_{B}T\,\log [1 + \exp[-\mu/k_{B}T]],
ensures that E is zero for a fully extended chain.
This potential is a continuous version of the two-state potential
introduced by :ref:`(Yan) <Yan>`.
The following coefficients must be defined for each angle type via the
:doc:`angle_coeff <angle_coeff>` command as in the example above, or in
the data file or restart files read by the :doc:`read_data <read_data>`
or :doc:`read_restart <read_restart>` commands:
* :math:`k_1` (energy)
* :math:`k_2` (energy)
* :math:`\mu` (energy)
* :math:`T` (temperature)
----------
Restrictions
""""""""""""
This angle style can only be used if LAMMPS was built with the
EXTRA-MOLECULE package. See the :doc:`Build package <Build_package>`
doc page for more info.
Related commands
""""""""""""""""
:doc:`angle_coeff <angle_coeff>`
Default
"""""""
none
----------
.. _Farrell:
**(Farrell)** `Farrell, Dobnikar, Podgornik, Curk, Phys Rev Lett, 133, 148101 (2024). <https://doi.org/10.1103/PhysRevLett.133.148101>`_
.. _Yan:
**(Yan)** `Yan, Marko, Phys Rev Lett, 93, 108108 (2004). <https://doi.org/10.1103/PhysRevLett.93.108108>`_

View File

@ -94,6 +94,7 @@ of (g,i,k,o,t) to indicate which accelerated styles exist.
* :doc:`lepton <angle_lepton>` - angle potential from evaluating a string
* :doc:`mesocnt <angle_mesocnt>` - piecewise harmonic and linear angle for bending-buckling of nanotubes
* :doc:`mm3 <angle_mm3>` - anharmonic angle
* :doc:`mwlc <angle_mwlc>` - meltable wormlike chain
* :doc:`quartic <angle_quartic>` - angle with cubic and quartic terms
* :doc:`spica <angle_spica>` - harmonic angle with repulsive SPICA pair style between 1-3 atoms
* :doc:`table <angle_table>` - tabulated by angle

View File

@ -184,11 +184,24 @@ temp/chunk calculation to a file is to use the
The keyword/value option pairs are used in the following ways.
The *com* keyword can be used with a value of *yes* to subtract the
velocity of the center-of-mass for each chunk from the velocity of the
atoms in that chunk, before calculating either the global or per-chunk
velocity of the center-of-mass (VCM) for each chunk from the velocity of
the atoms in that chunk, before calculating either the global or per-chunk
temperature. This can be useful if the atoms are streaming or
otherwise moving collectively, and you wish to calculate only the
thermal temperature.
thermal temperature. This per-chunk VCM bias can be used in other fixes and
computes that can incorporate a temperature bias. If this compute is used
as a temperature bias in other commands then this bias is subtracted from
each atom, the command runs with the remaining thermal velocities, and
then the bias is added back in. This includes thermostatting
fixes like :doc:`fix nvt <fix_nh>`,
:doc:`fix temp/rescale <fix_temp_rescale>`,
:doc:`fix temp/berendsen <fix_temp_berendsen>`, and
:doc:`fix langevin <fix_langevin>`, and computes like
:doc:`compute stress/atom <compute_stress_atom>` and
:doc:`compute pressure <compute_pressure>`. See the input script in
examples/stress_vcm for an example of how to use the *com* keyword in
conjunction with compute stress/atom to create a stress profile of a rigid
body while removing the overall motion of the rigid body.
For the *bias* keyword, *bias-ID* refers to the ID of a temperature
compute that removes a "bias" velocity from each atom. This also

View File

@ -681,7 +681,7 @@ MPEG or other movie file you can use:
* c) Use FFmpeg
FFmpeg is a command line tool that is available on many platforms and
FFmpeg is a command-line tool that is available on many platforms and
allows extremely flexible encoding and decoding of movies.
.. code-block:: bash

View File

@ -406,6 +406,8 @@ sub-style name. The angle styles that currently work with fix adapt are:
+--------------------------------------------------------------------+--------------------+-------------+
| :doc:`mm3 <angle_mm3>` | k,theta0 | type angles |
+--------------------------------------------------------------------+--------------------+-------------+
| :doc:`mwlc <angle_mwlc>` | k1,k2,mu,T | type angles |
+--------------------------------------------------------------------+--------------------+-------------+
| :doc:`quartic <angle_quartic>` | k2,k3,k4,theta0 | type angles |
+--------------------------------------------------------------------+--------------------+-------------+
| :doc:`spica <angle_spica>` | k,theta0 | type angles |

View File

@ -26,6 +26,29 @@ Syntax
*nowait* arg = *on* or *off*
off = LAMMPS waits to be connected to an IMD client before continuing (default)
on = LAMMPS listens for an IMD client, but continues with the run
*version* arg = *2* or *3*
2 = use IMD protocol version 2 (default)
3 = use IMD protocol version 3.
The following keywords are only supported for IMD protocol version 3.
.. parsed-literal::
*time* arg = *on* or *off*
off = simulation time is not transmitted (default)
on = simulation time is transmitted.
*box* arg = *on* or *off*
off = simulation box data is not transmitted (default)
on = simulation box data is transmitted.
*coordinates* arg = *on* or *off*
off = atomic coordinates are not transmitted (default)
on = atomic coordinates are transmitted.
*velocities* arg = *on* or *off*
off = atomic velocities are not transmitted (default)
on = atomic velocities are transmitted.
*forces* arg = *on* or *off*
off = atomic forces are not transmitted (default)
on = atomic forces are transmitted.
Examples
""""""""
@ -40,16 +63,19 @@ Description
This fix implements the "Interactive MD" (IMD) protocol which allows
realtime visualization and manipulation of MD simulations through the
IMD protocol, as initially implemented in VMD and NAMD. Specifically
it allows LAMMPS to connect an IMD client, for example the `VMD visualization program <VMD_>`_, so that it can monitor the progress of the
simulation and interactively apply forces to selected atoms.
IMD protocol, as initially implemented in VMD and NAMD. Specifically it
allows LAMMPS to connect an IMD client, for example the `VMD
visualization program <VMD_>`_ (currently only supports IMDv2) or the
`Python IMDClient <IMDClient_>`_ (supports both IMDv2 and IMDv3), so
that it can monitor the progress of the simulation and interactively
apply forces to selected atoms.
If LAMMPS is compiled with the pre-processor flag -DLAMMPS_ASYNC_IMD
then fix imd will use POSIX threads to spawn a IMD communication
thread on MPI rank 0 in order to offload data reading and writing
from the main execution thread and potentially lower the inferred
latencies for slow communication links. This feature has only been
tested under linux.
If LAMMPS is compiled with the pre-processor flag
:ref:`-DLAMMPS_ASYNC_IMD <misc>` then fix imd will use POSIX threads to
spawn an IMD communication thread on MPI rank 0 in order to offload data
exchange with the IMD client from the main execution thread and
potentially lower the inferred latencies for slow communication
links. This feature has only been tested under linux.
The source code for this fix includes code developed by the Theoretical
and Computational Biophysics Group in the Beckman Institute for Advanced
@ -94,10 +120,19 @@ with different units or as a measure to tweak the forces generated by
the manipulation of the IMD client, this option allows to make
adjustments.
.. versionadded:: TBD
In `IMDv3 <IMDv3_>`_, the IMD protocol has been extended to allow for
the transmission of simulation time, box dimensions, atomic coordinates,
velocities, and forces. The *version* keyword allows to select the
version of the protocol to be used. The *time*, *box*, *coordinates*,
*velocities*, and *forces* keywords allow to select which data is
transmitted to the IMD client. The default is to transmit all data.
To connect VMD to a listening LAMMPS simulation on the same machine
with fix imd enabled, one needs to start VMD and load a coordinate or
topology file that matches the fix group. When the VMD command
prompts appears, one types the command line:
prompts appears, one types the command:
.. parsed-literal::
@ -129,6 +164,10 @@ screen output is active.
.. _imdvmd: https://www.ks.uiuc.edu/Research/vmd/imd/
.. _IMDClient: https://github.com/Becksteinlab/imdclient/tree/main/imdclient
.. _IMDv3: https://imdclient.readthedocs.io/en/latest/protocol_v3.html
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
@ -147,14 +186,14 @@ This fix is part of the MISC package. It is only enabled if LAMMPS was
built with that package. See the :doc:`Build package <Build_package>`
page for more info.
When used in combination with VMD, a topology or coordinate file has
to be loaded, which matches (in number and ordering of atoms) the
group the fix is applied to. The fix internally sorts atom IDs by
ascending integer value; in VMD (and thus the IMD protocol) those will
be assigned 0-based consecutive index numbers.
When used in combination with VMD, a topology or coordinate file has to
be loaded, which matches (in number and ordering of atoms) the group the
fix is applied to. The fix internally sorts atom IDs by ascending
integer value; in VMD (and thus the IMD protocol) those will be assigned
0-based consecutive index numbers.
When using multiple active IMD connections at the same time, each
needs to use a different port number.
fix instance needs to use a different port number.
Related commands
""""""""""""""""

View File

@ -135,7 +135,7 @@ directions for the forces. Only the direction of the vector is
important; its length is ignored (the entered vectors are
normalized).
Those styles can be combined within one single command line.
Those styles can be combined within one single command.
.. note::

View File

@ -1084,10 +1084,11 @@ the form of *key_name_key*-*key_name_value* pairs). For example,
kim property modify 1 key mass source-value 26.98154
kim property modify 1 key mass source-unit amu
where the special keyword "key" is followed by a *key_name* ("species" or
"mass" in the above) and one or more key-value pairs. These key-value pairs
may continue until either another "key" keyword is given or the end of the
command line is reached. Thus, the above could equivalently be written as
where the special keyword "key" is followed by a *key_name* ("species"
or "mass" in the above) and one or more key-value pairs. These
key-value pairs may continue until either another "key" keyword is given
or the end of the line is reached. Thus, the above could equivalently
be written as
.. code-block:: LAMMPS

View File

@ -24,12 +24,12 @@ Description
"""""""""""
Label this line of the input script with the chosen ID. Unless a jump
command was used previously, this does nothing. But if a
:doc:`jump <jump>` command was used with a label argument to begin
invoking this script file, then all command lines in the script prior
to this line will be ignored. I.e. execution of the script will begin
at this line. This is useful for looping over a section of the input
script as discussed in the :doc:`jump <jump>` command.
command was used previously, this does nothing. But if a :doc:`jump
<jump>` command was used with a label argument to begin invoking this
script file, then all commands in the script prior to this line will be
ignored. I.e. execution of the script will begin at this line. This is
useful for looping over a section of the input script as discussed in
the :doc:`jump <jump>` command.
Restrictions
""""""""""""

View File

@ -504,7 +504,7 @@ as it is for non-accelerated pair styles
The *binsize* keyword sets the size of bins used to bin atoms during
neighbor list builds. The same value can be set by the
:doc:`neigh_modify binsize <neigh_modify>` command. Making it an option
in the package kokkos command allows it to be set from the command line.
in the package kokkos command allows it to be set from the command-line.
The default value for CPUs is 0.0, which means the LAMMPS default will be
used, which is bins = 1/2 the size of the pairwise cutoff + neighbor skin
distance. This is fine when neighbor lists are built on the CPU. For GPU
@ -664,7 +664,7 @@ too.
Also note that if the :doc:`-sf hybrid intel omp command-line switch <Run_options>` is used, it invokes a "package intel" command, followed by a
"package omp" command, both with a setting of *Nthreads* = 0. Likewise
for a hybrid suffix for gpu and omp. Note that KOKKOS also supports
setting the number of OpenMP threads from the command line using the
setting the number of OpenMP threads from the command-line using the
"-k on" :doc:`command-line switch <Run_options>`. The default for
KOKKOS is 1 thread per MPI task, so any other number of threads should
be explicitly set using the "-k on" command-line switch (and this

View File

@ -111,8 +111,8 @@ routines. For x-86 machines, there is a provided Makefile.mgptfast
which enables the fast algebra routines, i.e. build LAMMPS with "make
mgptfast". The user will be informed in the output files of the
matrix kernels in use. To further improve speed, on x86 the option
precision single can be added to the :doc:`pair_coeff <pair_coeff>`
command line, which improves speed (up to a factor of two) at the cost
*precision single* can be added to the :doc:`pair_coeff <pair_coeff>`
command, which improves speed (up to a factor of two) at the cost
of doing matrix calculations with 7 digit precision instead of the
default 16. For consistency the default option can be specified
explicitly by the option precision double.

View File

@ -131,7 +131,7 @@ command.
* LJ cutoff (distance units)
The last two coefficients are optional and default to the global values from
the *pair_style* command line.
the *pair_style* command.
----------

View File

@ -48,9 +48,9 @@ meaning that the trajectories of a restarted run will precisely match
those produced by the original run had it continued on.
Some information about a restart file can be gathered directly from the
command line when using LAMMPS with the :ref:`-restart2info
<restart2info>` command line flag. On Unix-like operating systems (like
Linux or macOS), one can also :ref:`configure the "file" command line
command-line when using LAMMPS with the :ref:`-restart2info
<restart2info>` command-line flag. On Unix-like operating systems (like
Linux or macOS), one can also :ref:`configure the "file" command-line
program <magic>` to display basic information about a restart file
The binary restart file format was not designed with backward, forward,
@ -60,9 +60,9 @@ Changes to the architecture, compilation settings, or LAMMPS version can
render a restart file unreadable or it may read the data incorrectly.
If you want a more portable format, you can use the data file format as
created by the :doc:`write_data <write_data>` command. Binary restart
files can also be converted into a data file from the command line by
files can also be converted into a data file from the command-line by
the LAMMPS executable that wrote them using the :ref:`-restart2data
<restart2data>` command line flag.
<restart2data>` command-line flag.
Several things can prevent exact restarts due to round-off effects, in
which case the trajectories in the 2 runs will slowly diverge. These

View File

@ -30,7 +30,7 @@ Description
This command allows you to use variants of various styles if they
exist. In that respect it operates the same as the :doc:`-suffix command-line switch <Run_options>`. It also has options to turn
off or back on any suffix setting made via the command line.
off or back on any suffix setting made via the command-line.
The specified style can be *gpu*, *intel*, *kk*, *omp*, *opt* or
*hybrid*\ . These refer to optional packages that LAMMPS can be built

View File

@ -71,9 +71,9 @@ Syntax
feature functions = is_available(category,feature), is_active(category,feature), is_defined(category,id)
atom value = id[i], mass[i], type[i], mol[i], x[i], y[i], z[i], vx[i], vy[i], vz[i], fx[i], fy[i], fz[i], q[i]
atom vector = id, mass, type, mol, radius, q, x, y, z, vx, vy, vz, fx, fy, fz
custom atom property = i_name, d_name, i_name[i], d_name[i], i2_name[i], d2_name[i], i2_name[i][j], d_name[i][j]
compute references = c_ID, c_ID[i], c_ID[i][j], C_ID, C_ID[i]
fix references = f_ID, f_ID[i], f_ID[i][j], F_ID, F_ID[i]
custom atom property = i_name, d_name, i_name[i], d_name[i], i2_name[i], d2_name[i], i2_name[i][j], d2_name[i][j]
compute references = c_ID, c_ID[i], c_ID[i][j], C_ID, C_ID[i], C_ID[i][j]
fix references = f_ID, f_ID[i], f_ID[i][j], F_ID, F_ID[i], F_ID[i][j]
variable references = v_name, v_name[i]
vector initialization = [1,3,7,10] (for *vector* variables only)

View File

@ -9,3 +9,4 @@ Pygments
six
pyyaml
linkchecker
ipython

View File

@ -2126,6 +2126,7 @@ Marchi
Mariella
Marinica
Markland
Marko
Marrink
Marroquin
Marsaglia
@ -2208,6 +2209,7 @@ Meissner
Melchor
Meloni
Melrose
meltable
mem
Mem
memalign
@ -2418,6 +2420,7 @@ mV
Mvapich
mvh
mvv
mwlc
MxN
myCompute
myIndex
@ -2942,6 +2945,7 @@ Pmoltrans
pN
png
podd
Podgornik
Podhorszki
Poiseuille
poisson
@ -4128,6 +4132,7 @@ workflow
workflows
Workum
Worley
wormlike
wpbe
Wriggers
writedata
@ -4197,6 +4202,7 @@ yaff
YAFF
Yamada
yaml
Yan
Yanxon
Yaser
Yazdani

View File

@ -117,6 +117,7 @@ liblammpsplugin_t *liblammpsplugin_load(const char *lib)
ADDSYM(set_string_variable);
ADDSYM(set_internal_variable);
ADDSYM(variable_info);
ADDSYM(eval);
ADDSYM(gather_atoms);
ADDSYM(gather_atoms_concat);

View File

@ -163,6 +163,7 @@ struct _liblammpsplugin {
int (*set_string_variable)(void *, const char *, const char *);
int (*set_internal_variable)(void *, const char *, double);
int (*variable_info)(void *, int, char *, int);
double (*eval)(void *, const char *);
void (*gather_atoms)(void *, const char *, int, int, void *);
void (*gather_atoms_concat)(void *, const char *, int, int, void *);

View File

@ -0,0 +1,13 @@
"""
For use with 'in.deca-ala-solv_imd_v3'.
Tested with imdclient v0.1.4 and MDAnalysis v2.8.0
"""
from imdclient.IMD import IMDReader
import MDAnalysis as mda
u = mda.Universe('data.deca-ala-solv', "imd://localhost:5678", topology_format='DATA')
for ts in u.trajectory:
print(ts.time)
print(ts.velocities)

View File

@ -0,0 +1,31 @@
#
units real
neighbor 2.5 bin
neigh_modify delay 1 every 1
atom_style full
bond_style harmonic
angle_style charmm
dihedral_style charmm
improper_style harmonic
pair_style lj/charmm/coul/long 8 10
pair_modify mix arithmetic
special_bonds charmm
read_data data.deca-ala-solv
group peptide id <= 103
fix rigidh all shake 1e-6 100 1000 t 1 2 3 4 5 a 23
thermo 100
thermo_style multi
timestep 2.0
kspace_style pppm 1e-5
fix ensemble all npt temp 300.0 300.0 100.0 iso 1.0 1.0 1000.0 drag 0.2
# IMD setup. Client code available in 'deca-ala-solv_imd_v3.py'
fix comm all imd 5678 unwrap on trate 10 version 3 time on box on coordinates on velocities on forces off
run 5000000

View File

@ -112,6 +112,7 @@ snap: examples for using several bundled SNAP potentials
srd: stochastic rotation dynamics (SRD) particles as solvent
steinhardt: Steinhardt-Nelson Q_l and W_l parameters usng orientorder/atom
streitz: Streitz-Mintmire potential for Al2O3
stress_vcm: removing binned rigid body motion from binned stress profile
tad: temperature-accelerated dynamics of vacancy diffusion in bulk Si
template: examples for using atom_style template and comparing to atom style molecular
tersoff: regression test input for Tersoff variants

View File

@ -0,0 +1,32 @@
README stress_vcm
=================
Contents:
- in.stress_vcm: Example script showing how to remove binned
velocities of center of mass (VCM) from stress calculations.
- stress_comparison.19Nov24.png: Plot shows the stress
calculated in bars on the y axis for each positional bin on
the x axis. Plotted are three different time steps from
stress profiles with and without the VCM removed. Plot
generated using Python.
- stress_xx.19Nov24.out: Output file generated by fix ave/time.
- log.19Nov24.stress_vcm.g++.1: LAMMPS log file with 1 proc.
- log.19Nov24.stress_vcm.g++.4: LAMMPS log file with 4 procs.
Notes:
- Running this script as-is will generate two files. A log
file with thermodynamic data and a stress_xx.out file
containing the binned stress profile with the VCM removed.
- To generate the binned stress profile without removing the
VCM then the compute stress/atom command at step three
needs the last keyword "ch_temp_vcm" to be replaced with
"NULL".
- Uncommenting the line under "Atom dump" will generate an
all atom dump file every 50 time steps containing atom ID,
type, and xyz coordinates.
- Uncommenting the lines under "Image dumps" will generate
.jpg image files every 250 timesteps.
- Uncommenting lines under "Movie dump" will generate a .avi
movie file showing timesteps every 125 timesteps.

View File

@ -0,0 +1,113 @@
# Removing Binned Velocities of Center of Mass (VCM) from Stress
# This example shows how to remove rigid body motion from
# binned stress calculations. This uses a combination of commands
# from compute chunk/atom, compute temp/chunk, compute
# stress/atom and fix ave/time. We'll show how these commands
# work in the context of a shockwave experiment on a cube of
# atoms. To shock the cube, a rectangular region of atoms is
# frozen, moved into the cube with a constant velocity along the
# x direction, and then unfrozen. As the shockwave begins
# propagating, the body of the cube also moves along the x
# direction. To better understand the stress dynamics of the
# cube we remove the velocity component belonging to the overall
# motion of each bin.
units metal
boundary p p p
atom_style atomic
lattice fcc 5.3589
processors 1 * *
# Defining regions for box and atoms.
# In this experiment an elongated simulation cell is
# defined in the x direction to allow for non-periodic
# motion of the atoms.
region box1 block -3 24 0 12 0 12 units lattice
region box2 block 0 12 0 12 0 12 units lattice
# Creating box and atoms
create_box 1 box1
create_atoms 1 region box2
mass 1 40.00
# Adding energy to the system
velocity all create 600.0 9999
pair_style lj/cut 10
pair_coeff 1 1 0.04 3.405
# Begin time integration
timestep 2e-3
fix fix_nve all nve
thermo 100
run 500
#--------------------------------------#
# Chunk, Stress, and VCM removal steps #
#--------------------------------------#
# 1. Create 20 equispaced bins sliced along the x direction.
# -"units reduced" normalizes the distance from 0.0 to 1.0
variable nbins index 20
variable fraction equal 1.0/v_nbins
variable volfrac equal 1/(vol*${fraction})
compute ch_id all chunk/atom bin/1d x lower ${fraction} units reduced
# 2. Calculate temperature bins with VCM aka COM velocities removed.
compute ch_temp_vcm all temp/chunk ch_id com yes
# 3. Compute per atom stress with VCM removed via temp-ID.
# -The velocities from specified temp-ID are used to compute stress.
# -Stress/atom units are pressure*volume! Optionally handled next step.
compute atom_stress_vcm all stress/atom ch_temp_vcm
# 4. Divide out bin volume from xx stress component.
variable stress atom -(c_atom_stress_vcm[1])/(vol*${fraction})
# 5. Sum the per atom stresses in each bin.
compute ch_stress_vcm all reduce/chunk ch_id sum v_stress
# 6. Average and output to file.
# -The average output is every 100 steps with samples collected 20 times with 5 step intervals.
fix ave_stress_vcm all ave/time 5 20 100 c_ch_stress_vcm mode vector file stress_xx.out
#--------------------------------------#
# Piston compressing along x direction
region piston block -1 1 INF INF INF INF units lattice
group piston region piston
fix fix_piston piston move linear 5 0 0 units box # strain rate ~ 8e10 1/s
thermo_style custom step temp ke pe lx ly lz pxx pyy pzz econserve
# Atom dump
# dump atom_dump all atom 50 dump.vcm
# # Image dumps
# dump 2 all image 250 image.*.jpg type type &
# axes yes 0.8 0.02 view 60 -30
# dump_modify 2 pad 1
# # Movie dump
# dump 3 all movie 125 movie.avi type type &
# axes yes 0.8 0.02 view 60 -30
# dump_modify 3 pad 1
run 500
unfix fix_piston
run 1500

View File

@ -0,0 +1,253 @@
LAMMPS (19 Nov 2024)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:99)
using 1 OpenMP thread(s) per MPI task
# Removing Binned Velocities of Center of Mass (VCM) from Stress
# This example shows how to remove rigid body motion from
# binned stress calculations. This uses a combination of commands
# from compute chunk/atom, compute temp/chunk, compute
# stress/atom and fix ave/time. We'll show how these commands
# work in the context of a shockwave experiment on a cube of
# atoms. To shock the cube, a rectangular region of atoms is
# frozen, moved into the cube with a constant velocity along the
# x direction, and then unfrozen. As the shockwave begins
# propagating, the body of the cube also moves along the x
# direction. To better understand the stress dynamics of the
# cube we remove the velocity component belonging to the overall
# motion of each bin.
units metal
boundary p p p
atom_style atomic
lattice fcc 5.3589
Lattice spacing in x,y,z = 5.3589 5.3589 5.3589
processors 1 * *
# Defining regions for box and atoms.
# In this experiment an elongated simulation cell is
# defined in the x direction to allow for non-periodic
# motion of the atoms.
region box1 block -3 24 0 12 0 12 units lattice
region box2 block 0 12 0 12 0 12 units lattice
# Creating box and atoms
create_box 1 box1
Created orthogonal box = (-16.0767 0 0) to (128.6136 64.3068 64.3068)
1 by 1 by 1 MPI processor grid
create_atoms 1 region box2
Created 7200 atoms
using lattice units in orthogonal box = (-16.0767 0 0) to (128.6136 64.3068 64.3068)
create_atoms CPU = 0.002 seconds
mass 1 40.00
# Adding energy to the system
velocity all create 600.0 9999
pair_style lj/cut 10
pair_coeff 1 1 0.04 3.405
# Begin time integration
timestep 2e-3
fix fix_nve all nve
thermo 100
run 500
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 12
ghost atom cutoff = 12
binsize = 6, bins = 25 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.721 | 5.721 | 5.721 Mbytes
Step Temp E_pair E_mol TotEng Press
0 600 -2252.7567 0 -1694.4304 -974.62456
100 284.72172 -1977.4291 0 -1712.483 2453.7429
200 304.44519 -1994.7937 0 -1711.4941 1822.2699
300 304.28012 -1993.2958 0 -1710.1498 1498.3794
400 296.76492 -1985.1364 0 -1708.9836 1259.9474
500 295.00895 -1982.4224 0 -1707.9036 964.9526
Loop time of 3.01696 on 1 procs for 500 steps with 7200 atoms
Performance: 28.638 ns/day, 0.838 hours/ns, 165.730 timesteps/s, 1.193 Matom-step/s
99.4% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.8439 | 2.8439 | 2.8439 | 0.0 | 94.26
Neigh | 0.11212 | 0.11212 | 0.11212 | 0.0 | 3.72
Comm | 0.015585 | 0.015585 | 0.015585 | 0.0 | 0.52
Output | 0.003747 | 0.003747 | 0.003747 | 0.0 | 0.12
Modify | 0.026097 | 0.026097 | 0.026097 | 0.0 | 0.87
Other | | 0.01551 | | | 0.51
Nlocal: 7200 ave 7200 max 7200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 6410 ave 6410 max 6410 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 615095 ave 615095 max 615095 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 615095
Ave neighs/atom = 85.429861
Neighbor list builds = 9
Dangerous builds = 0
#------------------------------------#
# Chunk, Stress, and VCM removal steps
#------------------------------------#
# 1. Create 20 equispaced bins sliced along the x direction.
# "units reduced" normalizes the distance from 0 to 1
variable nbins index 20
variable fraction equal 1.0/v_nbins
variable volfrac equal 1/(vol*${fraction})
variable volfrac equal 1/(vol*0.05)
compute ch_id all chunk/atom bin/1d x lower ${fraction} units reduced
compute ch_id all chunk/atom bin/1d x lower 0.05 units reduced
# 2. Calculate temperature bins with VCM aka COM velocities removed.
compute ch_temp_vcm all temp/chunk ch_id com yes
# 3. Compute per atom stress with VCM removed via temp-ID.
# The velocities from specified temp-ID are used to compute stress
# Stress/atom units are pressure*volume! Optionally handled next step.
compute atom_stress_vcm all stress/atom ch_temp_vcm
# 4. Divide out bin volume from xx stress component.
variable stress atom -(c_atom_stress_vcm[1])/(vol*${fraction})
variable stress atom -(c_atom_stress_vcm[1])/(vol*0.05)
# 5. Sum the per atom stresses in each bin.
compute ch_stress_vcm all reduce/chunk ch_id sum v_stress
# 6. Average and output to file.
# The average output is every 100 steps with samples collected 20 times with 5 step intervals
# fix ave_stress_vcm all ave/time 5 20 100 c_ch_stress_vcm mode vector file stress_xx.out
#------------------------------------#
# Piston compressing along x direction
region piston block -1 1 INF INF INF INF units lattice
group piston region piston
863 atoms in group piston
fix fix_piston piston move linear 5 0 0 units box # strain rate ~ 8e10 1/s
thermo_style custom step temp ke pe lx ly lz pxx pyy pzz econserve
# Atom dump
# dump atom_dump all atom 50 dump.vcm
# # Image dumps
# dump 2 all image 250 image.*.jpg type type # axes yes 0.8 0.02 view 60 -30
# dump_modify 2 pad 1
# # Movie dump
# dump 3 all movie 125 movie.avi type type # axes yes 0.8 0.02 view 60 -30
# dump_modify 3 pad 1
run 500
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
WARNING: One or more atoms are time integrated more than once (src/modify.cpp:296)
Per MPI rank memory allocation (min/avg/max) = 6.975 | 6.975 | 6.975 Mbytes
Step Temp KinEng PotEng Lx Ly Lz Pxx Pyy Pzz Econserve
500 295.00895 274.51875 -1982.4224 144.6903 64.3068 64.3068 631.89976 1127.2965 1135.6616 -1707.9036
600 357.38902 332.56613 -1951.3422 144.6903 64.3068 64.3068 2236.6706 2003.2726 1943.6815 -1618.7761
700 420.30268 391.11005 -1911.8178 144.6903 64.3068 64.3068 3761.5011 3065.4699 3140.3169 -1520.7077
800 484.96279 451.27911 -1875.379 144.6903 64.3068 64.3068 5362.254 4174.4201 4166.0818 -1424.0999
900 587.78954 546.96391 -1871.217 144.6903 64.3068 64.3068 6481.4714 4875.705 4676.6083 -1324.2531
1000 684.07997 636.56636 -1868.1639 144.6903 64.3068 64.3068 7734.6158 5271.3524 5272.1276 -1231.5975
Loop time of 3.09383 on 1 procs for 500 steps with 7200 atoms
Performance: 27.927 ns/day, 0.859 hours/ns, 161.612 timesteps/s, 1.164 Matom-step/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.8485 | 2.8485 | 2.8485 | 0.0 | 92.07
Neigh | 0.18767 | 0.18767 | 0.18767 | 0.0 | 6.07
Comm | 0.011533 | 0.011533 | 0.011533 | 0.0 | 0.37
Output | 0.003323 | 0.003323 | 0.003323 | 0.0 | 0.11
Modify | 0.031777 | 0.031777 | 0.031777 | 0.0 | 1.03
Other | | 0.01107 | | | 0.36
Nlocal: 7200 ave 7200 max 7200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 6409 ave 6409 max 6409 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 646408 ave 646408 max 646408 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 646408
Ave neighs/atom = 89.778889
Neighbor list builds = 15
Dangerous builds = 0
unfix fix_piston
run 1500
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 6.6 | 6.6 | 6.6 Mbytes
Step Temp KinEng PotEng Lx Ly Lz Pxx Pyy Pzz Econserve
1000 684.07997 636.56636 -1868.1639 144.6903 64.3068 64.3068 7734.6158 5271.3524 5272.1276 -1231.5975
1100 710.19886 660.87113 -1894.0485 144.6903 64.3068 64.3068 8048.3485 5396.6668 5376.5956 -1233.1774
1200 717.16487 667.35331 -1901.3849 144.6903 64.3068 64.3068 8009.7984 5634.5121 5349.4113 -1234.0316
1300 710.26037 660.92837 -1894.9802 144.6903 64.3068 64.3068 8063.4125 5572.1245 5530.174 -1234.0519
1400 715.93921 666.21278 -1898.8885 144.6903 64.3068 64.3068 7752.0927 5293.5463 5322.2312 -1232.6757
1500 748.85411 696.84154 -1926.4891 144.6903 64.3068 64.3068 6030.5428 4076.8886 4012.7653 -1229.6475
1600 767.98982 714.64815 -1939.8556 144.6903 64.3068 64.3068 4200.3475 2532.5711 2530.5518 -1225.2075
1700 757.22042 704.62675 -1925.553 144.6903 64.3068 64.3068 2686.7843 1482.2796 1505.8073 -1220.9262
1800 727.30327 676.78754 -1894.6635 144.6903 64.3068 64.3068 1764.2793 781.37451 801.18668 -1217.8759
1900 688.82146 640.97853 -1856.5007 144.6903 64.3068 64.3068 1022.805 417.32394 359.74951 -1215.5221
2000 655.91228 610.35509 -1823.954 144.6903 64.3068 64.3068 551.98825 -20.148643 -56.976652 -1213.5989
2100 620.22468 577.14622 -1789.1761 144.6903 64.3068 64.3068 264.05975 -266.8323 -314.45533 -1212.0299
2200 589.13325 548.21428 -1758.9252 144.6903 64.3068 64.3068 41.369707 -533.503 -525.69401 -1210.7109
2300 563.20394 524.08593 -1733.6036 144.6903 64.3068 64.3068 -220.99189 -810.90513 -774.65084 -1209.5176
2400 540.44236 502.90528 -1711.3384 144.6903 64.3068 64.3068 -358.01508 -962.31635 -977.3253 -1208.4332
2500 523.5718 487.20648 -1694.7088 144.6903 64.3068 64.3068 -521.87444 -1152.8386 -1231.7615 -1207.5023
Loop time of 9.34327 on 1 procs for 1500 steps with 7200 atoms
Performance: 27.742 ns/day, 0.865 hours/ns, 160.543 timesteps/s, 1.156 Matom-step/s
98.5% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 8.4692 | 8.4692 | 8.4692 | 0.0 | 90.65
Neigh | 0.7512 | 0.7512 | 0.7512 | 0.0 | 8.04
Comm | 0.031189 | 0.031189 | 0.031189 | 0.0 | 0.33
Output | 0.010584 | 0.010584 | 0.010584 | 0.0 | 0.11
Modify | 0.053052 | 0.053052 | 0.053052 | 0.0 | 0.57
Other | | 0.02803 | | | 0.30
Nlocal: 7200 ave 7200 max 7200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 6380 ave 6380 max 6380 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 515773 ave 515773 max 515773 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 515773
Ave neighs/atom = 71.635139
Neighbor list builds = 57
Dangerous builds = 0
Total wall time: 0:00:15

View File

@ -0,0 +1,253 @@
LAMMPS (19 Nov 2024)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:99)
using 1 OpenMP thread(s) per MPI task
# Removing Binned Velocities of Center of Mass (VCM) from Stress
# This example shows how to remove rigid body motion from
# binned stress calculations. This uses a combination of commands
# from compute chunk/atom, compute temp/chunk, compute
# stress/atom and fix ave/time. We'll show how these commands
# work in the context of a shockwave experiment on a cube of
# atoms. To shock the cube, a rectangular region of atoms is
# frozen, moved into the cube with a constant velocity along the
# x direction, and then unfrozen. As the shockwave begins
# propagating, the body of the cube also moves along the x
# direction. To better understand the stress dynamics of the
# cube we remove the velocity component belonging to the overall
# motion of each bin.
units metal
boundary p p p
atom_style atomic
lattice fcc 5.3589
Lattice spacing in x,y,z = 5.3589 5.3589 5.3589
processors 1 * *
# Defining regions for box and atoms.
# In this experiment an elongated simulation cell is
# defined in the x direction to allow for non-periodic
# motion of the atoms.
region box1 block -3 24 0 12 0 12 units lattice
region box2 block 0 12 0 12 0 12 units lattice
# Creating box and atoms
create_box 1 box1
Created orthogonal box = (-16.0767 0 0) to (128.6136 64.3068 64.3068)
1 by 2 by 2 MPI processor grid
create_atoms 1 region box2
Created 7200 atoms
using lattice units in orthogonal box = (-16.0767 0 0) to (128.6136 64.3068 64.3068)
create_atoms CPU = 0.001 seconds
mass 1 40.00
# Adding energy to the system
velocity all create 600.0 9999
pair_style lj/cut 10
pair_coeff 1 1 0.04 3.405
# Begin time integration
timestep 2e-3
fix fix_nve all nve
thermo 100
run 500
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Neighbor list info ...
update: every = 1 steps, delay = 0 steps, check = yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 12
ghost atom cutoff = 12
binsize = 6, bins = 25 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut/opt, perpetual
attributes: half, newton on
pair build: half/bin/atomonly/newton
stencil: half/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.662 | 3.662 | 3.662 Mbytes
Step Temp E_pair E_mol TotEng Press
0 600 -2252.7567 0 -1694.4304 -974.62456
100 284.1896 -1976.961 0 -1712.5101 2462.6396
200 308.58965 -1998.6349 0 -1711.4787 1789.0033
300 300.55093 -1989.9838 0 -1710.308 1545.8576
400 297.91491 -1986.2519 0 -1709.029 1247.7121
500 294.66041 -1982.1097 0 -1707.9153 961.03073
Loop time of 0.942408 on 4 procs for 500 steps with 7200 atoms
Performance: 91.680 ns/day, 0.262 hours/ns, 530.556 timesteps/s, 3.820 Matom-step/s
82.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.61287 | 0.63781 | 0.65858 | 2.1 | 67.68
Neigh | 0.030246 | 0.031529 | 0.034546 | 1.0 | 3.35
Comm | 0.23074 | 0.25145 | 0.27819 | 3.7 | 26.68
Output | 0.000282 | 0.0003735 | 0.000463 | 0.0 | 0.04
Modify | 0.005566 | 0.0057635 | 0.005989 | 0.2 | 0.61
Other | | 0.01548 | | | 1.64
Nlocal: 1800 ave 1814 max 1787 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 3713.5 ave 3727 max 3699 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Neighs: 153532 ave 154995 max 152312 min
Histogram: 1 0 1 0 0 1 0 0 0 1
Total # of neighbors = 614128
Ave neighs/atom = 85.295556
Neighbor list builds = 9
Dangerous builds = 0
#------------------------------------#
# Chunk, Stress, and VCM removal steps
#------------------------------------#
# 1. Create 20 equispaced bins sliced along the x direction.
# "units reduced" normalizes the distance from 0 to 1
variable nbins index 20
variable fraction equal 1.0/v_nbins
variable volfrac equal 1/(vol*${fraction})
variable volfrac equal 1/(vol*0.05)
compute ch_id all chunk/atom bin/1d x lower ${fraction} units reduced
compute ch_id all chunk/atom bin/1d x lower 0.05 units reduced
# 2. Calculate temperature bins with VCM aka COM velocities removed.
compute ch_temp_vcm all temp/chunk ch_id com yes
# 3. Compute per atom stress with VCM removed via temp-ID.
# The velocities from specified temp-ID are used to compute stress
# Stress/atom units are pressure*volume! Optionally handled next step.
compute atom_stress_vcm all stress/atom ch_temp_vcm
# 4. Divide out bin volume from xx stress component.
variable stress atom -(c_atom_stress_vcm[1])/(vol*${fraction})
variable stress atom -(c_atom_stress_vcm[1])/(vol*0.05)
# 5. Sum the per atom stresses in each bin.
compute ch_stress_vcm all reduce/chunk ch_id sum v_stress
# 6. Average and output to file.
# The average output is every 100 steps with samples collected 20 times with 5 step intervals
# fix ave_stress_vcm all ave/time 5 20 100 c_ch_stress_vcm mode vector file stress_xx.out
#------------------------------------#
# Piston compressing along x direction
region piston block -1 1 INF INF INF INF units lattice
group piston region piston
864 atoms in group piston
fix fix_piston piston move linear 5 0 0 units box # strain rate ~ 8e10 1/s
thermo_style custom step temp ke pe lx ly lz pxx pyy pzz econserve
# Atom dump
# dump atom_dump all atom 50 dump.vcm
# # Image dumps
# dump 2 all image 250 image.*.jpg type type # axes yes 0.8 0.02 view 60 -30
# dump_modify 2 pad 1
# # Movie dump
# dump 3 all movie 125 movie.avi type type # axes yes 0.8 0.02 view 60 -30
# dump_modify 3 pad 1
run 500
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
WARNING: One or more atoms are time integrated more than once (src/modify.cpp:296)
Per MPI rank memory allocation (min/avg/max) = 4.916 | 4.916 | 4.916 Mbytes
Step Temp KinEng PotEng Lx Ly Lz Pxx Pyy Pzz Econserve
500 294.66041 274.19441 -1982.1097 144.6903 64.3068 64.3068 645.25795 1119.5337 1118.3006 -1707.9153
600 357.88641 333.02897 -1951.8158 144.6903 64.3068 64.3068 2176.0343 1929.2787 1981.8479 -1618.7869
700 418.41159 389.3503 -1912.8337 144.6903 64.3068 64.3068 3702.2875 3043.7607 3081.1607 -1523.4834
800 483.71102 450.11428 -1875.7955 144.6903 64.3068 64.3068 5254.3875 4190.9789 4158.3561 -1425.6813
900 586.0893 545.38176 -1870.9313 144.6903 64.3068 64.3068 6509.1439 4756.2216 4724.7086 -1325.5495
1000 686.32946 638.65962 -1874.811 144.6903 64.3068 64.3068 7515.1606 5193.049 5261.8688 -1236.1514
Loop time of 0.656417 on 4 procs for 500 steps with 7200 atoms
Performance: 131.624 ns/day, 0.182 hours/ns, 761.711 timesteps/s, 5.484 Matom-step/s
92.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.51672 | 0.52334 | 0.53259 | 0.8 | 79.73
Neigh | 0.045091 | 0.045915 | 0.047402 | 0.4 | 6.99
Comm | 0.060735 | 0.071794 | 0.079302 | 2.6 | 10.94
Output | 0.000208 | 0.000389 | 0.000926 | 0.0 | 0.06
Modify | 0.006007 | 0.0061595 | 0.00626 | 0.1 | 0.94
Other | | 0.008815 | | | 1.34
Nlocal: 1800 ave 1811 max 1785 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Nghost: 3713.25 ave 3727 max 3702 min
Histogram: 2 0 0 0 0 0 0 1 0 1
Neighs: 161477 ave 162958 max 159732 min
Histogram: 1 0 0 0 1 0 0 1 0 1
Total # of neighbors = 645909
Ave neighs/atom = 89.709583
Neighbor list builds = 15
Dangerous builds = 0
unfix fix_piston
run 1500
Generated 0 of 0 mixed pair_coeff terms from geometric mixing rule
Per MPI rank memory allocation (min/avg/max) = 4.541 | 4.541 | 4.541 Mbytes
Step Temp KinEng PotEng Lx Ly Lz Pxx Pyy Pzz Econserve
1000 686.32946 638.65962 -1874.811 144.6903 64.3068 64.3068 7515.1606 5193.049 5261.8688 -1236.1514
1100 709.7333 660.43791 -1898.2844 144.6903 64.3068 64.3068 7932.8638 5334.6171 5364.5335 -1237.8465
1200 713.27253 663.73132 -1902.4588 144.6903 64.3068 64.3068 7957.2574 5500.6231 5538.0516 -1238.7275
1300 705.44796 656.45022 -1895.1575 144.6903 64.3068 64.3068 7996.7265 5584.6233 5538.2494 -1238.7072
1400 711.86463 662.42121 -1899.8416 144.6903 64.3068 64.3068 7674.2462 5292.4915 5294.5366 -1237.4204
1500 742.18946 690.63979 -1924.9562 144.6903 64.3068 64.3068 6047.915 4056.6156 4014.4446 -1234.3164
1600 762.81764 709.83522 -1939.8563 144.6903 64.3068 64.3068 4185.5873 2530.0572 2576.1943 -1230.0211
1700 754.40428 702.00621 -1927.7337 144.6903 64.3068 64.3068 2662.7604 1509.1985 1484.7252 -1225.7275
1800 721.03504 670.95468 -1893.5556 144.6903 64.3068 64.3068 1765.8783 835.89765 861.9432 -1222.6009
1900 689.64162 641.74172 -1861.8886 144.6903 64.3068 64.3068 941.58148 312.93205 409.79901 -1220.1469
2000 650.79664 605.59477 -1823.9889 144.6903 64.3068 64.3068 543.39234 28.48735 80.396505 -1218.3941
2100 616.04072 573.25286 -1790.1764 144.6903 64.3068 64.3068 308.16444 -235.20997 -248.22531 -1216.9235
2200 587.18712 546.40333 -1761.8878 144.6903 64.3068 64.3068 37.044801 -476.50396 -470.83059 -1215.4845
2300 562.84178 523.74892 -1738.2239 144.6903 64.3068 64.3068 -139.28348 -711.17273 -730.80877 -1214.475
2400 540.48362 502.94367 -1716.3529 144.6903 64.3068 64.3068 -320.98222 -951.2066 -943.93966 -1213.4093
2500 519.80431 483.70067 -1696.1896 144.6903 64.3068 64.3068 -471.61317 -1088.8457 -1131.5396 -1212.4889
Loop time of 1.97213 on 4 procs for 1500 steps with 7200 atoms
Performance: 131.431 ns/day, 0.183 hours/ns, 760.598 timesteps/s, 5.476 Matom-step/s
95.3% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.5455 | 1.5599 | 1.5723 | 0.8 | 79.10
Neigh | 0.16844 | 0.1704 | 0.17237 | 0.4 | 8.64
Comm | 0.19002 | 0.2047 | 0.22068 | 2.4 | 10.38
Output | 0.000525 | 0.0006785 | 0.001077 | 0.0 | 0.03
Modify | 0.012434 | 0.012601 | 0.012777 | 0.1 | 0.64
Other | | 0.02388 | | | 1.21
Nlocal: 1800 ave 1833 max 1776 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Nghost: 3702 ave 3732 max 3674 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Neighs: 129380 ave 132578 max 127003 min
Histogram: 1 0 0 2 0 0 0 0 0 1
Total # of neighbors = 517520
Ave neighs/atom = 71.877778
Neighbor list builds = 54
Dangerous builds = 0
Total wall time: 0:00:03

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

View File

@ -0,0 +1,423 @@
# Time-averaged data for fix ave_stress_vcm
# TimeStep Number-of-rows
# Row c_ch_stress_vcm
600 20
1 0
2 -142.965
3 2142.79
4 12968.3
5 -336.7
6 2638.09
7 4214.83
8 3187.61
9 -488.891
10 -49.3553
11 151.373
12 -317.663
13 0
14 0
15 0
16 0
17 0
18 0
19 0
20 0
700 20
1 0
2 -14.3195
3 -1238.9
4 30664.3
5 18805.2
6 498.562
7 930.874
8 660.655
9 -266.903
10 -317.877
11 -386.989
12 -304.697
13 0
14 0
15 0
16 0
17 0
18 0
19 0
20 0
800 20
1 0
2 0
3 -1656.7
4 30424.3
5 37003.5
6 15562.5
7 -2441.9
8 -1766.09
9 272.718
10 -664.774
11 -72.6933
12 -469.765
13 0
14 0
15 0
16 0
17 0
18 0
19 0
20 0
900 20
1 0
2 0
3 -1567.21
4 24987.6
5 38068.9
6 31595
7 8864.94
8 -3423.99
9 -753.063
10 125.21
11 -50.4895
12 -172.14
13 0
14 0
15 0
16 0
17 0
18 0
19 0
20 0
1000 20
1 0
2 0
3 -893.168
4 15591.6
5 32690.6
6 30183
7 27172
8 9459.75
9 -1416.35
10 -432.731
11 444.323
12 -424.357
13 0
14 0
15 0
16 0
17 0
18 0
19 0
20 0
1100 20
1 0
2 0
3 -601.805
4 8890.79
5 23345.1
6 28529.2
7 29111.9
8 25846.2
9 7451.83
10 -1624.2
11 320.704
12 -50.9865
13 -5.50481
14 0
15 0
16 0
17 0
18 0
19 0
20 0
1200 20
1 0
2 0
3 1435.39
4 8818.29
5 7129.61
6 20281.7
7 28026.1
8 28327.7
9 26918.6
10 8277.12
11 -249.644
12 -171.806
13 -7.19065
14 0
15 0
16 0
17 0
18 0
19 0
20 0
1300 20
1 0
2 0
3 -718.118
4 3021.9
5 9010.51
6 9500.87
7 19432.8
8 27254.3
9 28638.5
10 25568.5
11 8094.66
12 -368.293
13 -2.20997
14 0
15 0
16 0
17 0
18 0
19 0
20 0
1400 20
1 0
2 0
3 -650.581
4 190.19
5 5465.38
6 7489.23
7 7575.16
8 18433.5
9 26975.3
10 28981.5
11 26987.9
12 7502.07
13 0.117312
14 0
15 0
16 0
17 0
18 0
19 0
20 0
1500 20
1 0
2 0
3 -619.311
4 561.257
5 461.5
6 4105.68
7 9272.68
8 10445.6
9 18826.1
10 25434.8
11 25653.8
12 10981.2
13 33.682
14 0
15 0
16 0
17 0
18 0
19 0
20 0
1600 20
1 0
2 0
3 -349.345
4 513.579
5 -471.384
6 1257.81
7 7122.9
8 8659.35
9 8452.08
10 16013.5
11 17091
12 5476.24
13 -136.183
14 0
15 0
16 0
17 0
18 0
19 0
20 0
1700 20
1 0
2 0
3 -273.839
4 -907.407
5 -272.136
6 594.363
7 3302.77
8 5564.07
9 8689.92
10 6446.06
11 1779.37
12 338.998
13 -171.408
14 -1.21548
15 0
16 0
17 0
18 0
19 0
20 0
1800 20
1 0
2 0
3 -164.819
4 383.877
5 -140.681
6 -10.0153
7 907.937
8 3269.05
9 5325.22
10 395.73
11 -4103.73
12 -2787.16
13 -1357.04
14 -35.2044
15 0
16 0
17 0
18 0
19 0
20 0
1900 20
1 0
2 0
3 -80.813
4 334.225
5 248.55
6 82.0566
7 207.763
8 185.714
9 -55.8635
10 -2758.51
11 -4619.33
12 -5521.92
13 -2346.36
14 -415.324
15 0
16 0
17 0
18 0
19 0
20 0
2000 20
1 0
2 0
3 -83.1832
4 264.023
5 596.087
6 40.8157
7 -267.093
8 -2288.15
9 -3387.64
10 -5566.79
11 -5640.76
12 -4925.74
13 -3096.01
14 -757.817
15 -1.13042
16 0
17 0
18 0
19 0
20 0
2100 20
1 0
2 0
3 -17.4378
4 62.1251
5 740.988
6 357.467
7 -1137.61
8 -4266.83
9 -4962.9
10 -5322.45
11 -5437.58
12 -4846.56
13 -3651.28
14 -1151.01
15 -28.3074
16 0
17 0
18 0
19 0
20 0
2200 20
1 0
2 0
3 -10.8779
4 -56.7926
5 400.261
6 -568.63
7 -2193.36
8 -3856.71
9 -6603
10 -5717.11
11 -4868.64
12 -4173.5
13 -3402.64
14 -1712.44
15 -80.6771
16 -0.123189
17 0
18 0
19 0
20 0
2300 20
1 0
2 0
3 -22.8402
4 -44.5496
5 -365.476
6 -1285.6
7 -2887.76
8 -4022.77
9 -6280.86
10 -6055.26
11 -4921.51
12 -4445.37
13 -3531.69
14 -1360.49
15 -258.99
16 0.196931
17 0
18 0
19 0
20 0
2400 20
1 0
2 0
3 -0.594396
4 -148.921
5 -1118.18
6 -2071.85
7 -3989.41
8 -4567.01
9 -4939.36
10 -5170.94
11 -4922.25
12 -4587.5
13 -3748.19
14 -1785.46
15 -460.491
16 2.54038
17 0
18 0
19 0
20 0
2500 20
1 0
2 0
3 5.64755
4 -485.854
5 -2525.68
6 -2642.35
7 -5066.15
8 -4546.03
9 -4429.45
10 -4579.15
11 -4829.56
12 -4384.77
13 -3525.99
14 -1708.9
15 -627.176
16 -23.5581
17 0
18 0
19 0
20 0

View File

@ -126,6 +126,7 @@ MODULE LIBLAMMPS
PROCEDURE :: set_variable => lmp_set_variable
PROCEDURE :: set_string_variable => lmp_set_string_variable
PROCEDURE :: set_internal_variable => lmp_set_internal_variable
PROCEDURE :: eval => lmp_eval
PROCEDURE, PRIVATE :: lmp_gather_atoms_int
PROCEDURE, PRIVATE :: lmp_gather_atoms_double
GENERIC :: gather_atoms => lmp_gather_atoms_int, &
@ -618,7 +619,14 @@ MODULE LIBLAMMPS
INTEGER(c_int) :: lammps_set_internal_variable
END FUNCTION lammps_set_internal_variable
SUBROUTINE lammps_gather_atoms(handle, name, type, count, data) BIND(C)
FUNCTION lammps_eval(handle, expr) BIND(C)
IMPORT :: c_ptr, c_double
IMPLICIT NONE
TYPE(c_ptr), VALUE :: handle, expr
REAL(c_double) :: lammps_eval
END FUNCTION lammps_eval
SUBROUTINE lammps_gather_atoms(handle, name, TYPE, count, DATA) BIND(C)
IMPORT :: c_int, c_ptr
IMPLICIT NONE
TYPE(c_ptr), VALUE :: handle, name, data
@ -1812,7 +1820,7 @@ CONTAINS
SUBROUTINE lmp_set_internal_variable(self, name, val)
CLASS(lammps), INTENT(IN) :: self
CHARACTER(LEN=*), INTENT(IN) :: name
REAL(KIND=c_double), INTENT(IN) :: val
REAL(c_double), INTENT(IN) :: val
INTEGER :: err
TYPE(c_ptr) :: Cname
@ -1826,6 +1834,18 @@ CONTAINS
END IF
END SUBROUTINE lmp_set_internal_variable
! equivalent function to lammps_eval
FUNCTION lmp_eval(self, expr)
CLASS(lammps), INTENT(IN) :: self
CHARACTER(LEN=*), INTENT(IN) :: expr
REAL(c_double) :: lmp_eval
TYPE(c_ptr) :: Cexpr
Cexpr = f2c_string(expr)
lmp_eval = lammps_eval(self%handle, Cexpr)
CALL lammps_free(Cexpr)
END FUNCTION lmp_eval
! equivalent function to lammps_gather_atoms (for integers)
SUBROUTINE lmp_gather_atoms_int(self, name, count, data)
CLASS(lammps), INTENT(IN) :: self

View File

@ -1,7 +1,112 @@
# CHANGELOG
## 4.5.01
[Full Changelog](https://github.com/kokkos/kokkos/compare/4.5.00...4.5.01)
### Bug Fixes
* Fix re-builds after cleaning the binary tree when doing `add_subdirectory` on the Kokkos source [\#7557](https://github.com/kokkos/kokkos/pull/7557)
* Update mdspan to include fix for submdspan and bracket operator with clang 15&16 [\#7559](https://github.com/kokkos/kokkos/pull/7559)
* Fix DynRankView performance regression by re-introducing shortcut operator() impls [\#7606](https://github.com/kokkos/kokkos/pull/7606)
* Add missing MI300A (`GFX942_APU`) option to Makefile build-system
## 4.5.00
[Full Changelog](https://github.com/kokkos/kokkos/compare/4.4.01...4.5.00)
### Features
* SYCL backend graduated to production ready
* Introduce new `SequentialHostInit` view allocation property [\#7229](https://github.com/kokkos/kokkos/pull/7229) (backported in 4.4.01)
* Support building with Run-Time Type Information (RTTI) disabled
* Add new `KOKKOS_RELOCATABLE_FUNCTION` function annotation macro [\#5993](https://github.com/kokkos/kokkos/pull/5993)
### Backend and Architecture Enhancements
#### CUDA
* Adding occupancy tuning for CUDA architectures [\#6788](https://github.com/kokkos/kokkos/pull/6788)
* By default disable `cudaMallocAsync` (i.e., revert the change made in version 4.2) [\#7353](https://github.com/kokkos/kokkos/pull/7353)
#### HIP
* Add support for AMD Phoenix APUs with Radeon 740M/760M/780M/880M/890M [\#7162](https://github.com/kokkos/kokkos/pull/7162)
* Update maximum waves per CU values for consumer card [\#7347](https://github.com/kokkos/kokkos/pull/7347)
* Check that Kokkos is running on the architecture it was compiled for [\#7379](https://github.com/kokkos/kokkos/pull/7379)
* Add opt-in option to use `hipMallocAsync` instead of `hipMalloc` [\#7324](https://github.com/kokkos/kokkos/pull/7324)
* Introduce new architecture option `AMD_GFX942_APU` for MI300A [\#7462](https://github.com/kokkos/kokkos/pull/7462)
#### SYCL
* Move the `SYCL` backend out of the `Experimental` namespace [\#7171](https://github.com/kokkos/kokkos/pull/7171)
* Introduce `KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE` as CMake option [\#5993](https://github.com/kokkos/kokkos/pull/5993)
#### OpenACC
* Add support for building with the Clacc compiler [\#7198](https://github.com/kokkos/kokkos/pull/7198)
* Workaround NVHPC collapse clause bug for `MDRangePolicy` [\#7425](https://github.com/kokkos/kokkos/pull/7425)
#### HPX
* Implement `Experimental::partition_space` to produce truly independent execution spaces [\#7287](https://github.com/kokkos/kokkos/pull/7287)
#### Threads
* Fix compilation for `parallel_reduce` `MDRange` with `Dynamic` scheduling [\#7478](https://github.com/kokkos/kokkos/pull/7478)
* Fix race conditions on ARM architectures [\#7498](https://github.com/kokkos/kokkos/pull/7498)
#### OpenMP
* Fix run time behavior when compiling with `-fvisibility-hidden` [\#7284](https://github.com/kokkos/kokkos/pull/7284) (backported in 4.4.01)
* Fix linking with Cray Clang compiler [\#7341](https://github.com/kokkos/kokkos/pull/7341)
#### Serial
* Allow `Kokkos_ENABLE_ATOMICS_BYPASS` to skip mutexes to remediate performance regression in 4.4 [\#7369](https://github.com/kokkos/kokkos/pull/7369)
### General Enhancements
* Improve `View` initialization/destruction for non-scalar trivial and trivially-destructible types [\#7219](https://github.com/kokkos/kokkos/pull/7219) [\#7225](https://github.com/kokkos/kokkos/pull/7225)
* Add getters for default tile sizes used in `MDRangePolicy` [\#6839](https://github.com/kokkos/kokkos/pull/6839)
* Improve performance of `Kokkos::sort` when `std::sort` is used [\#7264](https://github.com/kokkos/kokkos/pull/7264)
* Add range-based for loop support for `Array<T, N>` [\#7293](https://github.com/kokkos/kokkos/pull/7293)
* Allow functors as reducers for nested team parallel reduce [\#6921](https://github.com/kokkos/kokkos/pull/6921)
* Avoid making copies of string rvalue reference arguments to `view_alloc()` [\#7364](https://github.com/kokkos/kokkos/pull/7364)
* Add `atomic_{mod,xor,nand,lshift,rshift}` [\#7458](https://github.com/kokkos/kokkos/pull/7458)
* Allow using `SequentialHostInit` with `Kokkos::DualView` [\#7456](https://github.com/kokkos/kokkos/pull/7456)
* Add `Graph::instantiate()` [\#7240](https://github.com/kokkos/kokkos/pull/7240)
* Allow an arbitrary execution space instance to be used in `Kokkos::Graph::submit()` [\#7249](https://github.com/kokkos/kokkos/pull/7249)
* Enable compile-time diagnostic of illegal reduction target for graphs [\#7460](https://github.com/kokkos/kokkos/pull/7460)
### Build System Changes
* Make sure backend-specific options such as `IMPL_CUDA_MALLOC_ASYNC` only show when that backend is actually enabled [\#7228](https://github.com/kokkos/kokkos/pull/7228)
* Major refactoring removing `TriBITS` paths [\#6164](https://github.com/kokkos/kokkos/pull/6164)
* Add support for SpacemiT K60 (RISC-V) [\#7160](https://github.com/kokkos/kokkos/pull/7160)
### Deprecations
* Deprecate Tasking interface [\#7393](https://github.com/kokkos/kokkos/pull/7393)
* Deprecate `atomic_query_version`, `atomic_assign`, `atomic_compare_exchange_strong`, `atomic_{inc, dec}rement` [\#7458](https://github.com/kokkos/kokkos/pull/7458)
* Deprecate `{OpenMP,HPX}::is_asynchronous()` [\#7322](https://github.com/kokkos/kokkos/pull/7322)
### Bug Fixes
* Fix undefined behavior in `BinSort` when sorting within bins on host [\#7223](https://github.com/kokkos/kokkos/pull/7223)
* Using CUDA limits to set extents for blocks, grids [\#7235](https://github.com/kokkos/kokkos/pull/7235)
* Fix `deep_copy (serial_exec, dst, src)` with multiple host backends [\#7245](https://github.com/kokkos/kokkos/pull/7245)
* Skip `RangePolicy` bounds conversion checks if roundtrip convertibility is not provided [\#7172](https://github.com/kokkos/kokkos/pull/7172)
* Allow extracting host and device views from `DualView` with `const` value type [\#7242](https://github.com/kokkos/kokkos/pull/7242)
* Fix `TeamPolicy` array reduction for CUDA and HIP [\#6296](https://github.com/kokkos/kokkos/pull/6296)
* Fix implicit copy assignment operators in few AVX2 masks being deleted [\#7296](https://github.com/kokkos/kokkos/pull/7296)
* Fix configuring without architecture flags for SYCL [\#7303](https://github.com/kokkos/kokkos/pull/7303)
* Set an initial value index during join of `MinLoc`, `MaxLoc` or `MinMaxLoc` [\#7330](https://github.com/kokkos/kokkos/pull/7330)
* Fix storage lifetime of driver for global launch of graph nodes for CUDA and HIP [\#7365](https://github.com/kokkos/kokkos/pull/7365)
* Make `value_type` for `RandomAccessIterator` non-`const` [\#7485](https://github.com/kokkos/kokkos/pull/7485)
## [4.4.01](https://github.com/kokkos/kokkos/tree/4.4.01)
[Full Changelog](https://github.com/kokkos/kokkos/compare/4.0.00...4.4.01)
[Full Changelog](https://github.com/kokkos/kokkos/compare/4.4.00...4.4.01)
### Features:
* Introduce new SequentialHostInit view allocation property [\#7229](https://github.com/kokkos/kokkos/pull/7229)
@ -13,7 +118,7 @@
### Bug Fixes
* OpenMP: Fix issue related to the visibility of an internal symbol with shared libraries that affected `ScatterView` in particular [\#7284](https://github.com/kokkos/kokkos/pull/7284)
* Fix implicit copy assignment operators in few AVX2 masks being deleted [#7296](https://github.com/kokkos/kokkos/pull/7296)
* Fix implicit copy assignment operators in few AVX2 masks being deleted [\#7296](https://github.com/kokkos/kokkos/pull/7296)
## [4.4.00](https://github.com/kokkos/kokkos/tree/4.4.00)
[Full Changelog](https://github.com/kokkos/kokkos/compare/4.3.01...4.4.00)
@ -57,6 +162,7 @@
* SIMD: Allow flexible vector width for 32 bit types [\#6802](https://github.com/kokkos/kokkos/pull/6802)
* Updates for `Kokkos::Array`: add `kokkos_swap(Array<T, N>)` specialization [\#6943](https://github.com/kokkos/kokkos/pull/6943), add `Kokkos::to_array` [\#6375](https://github.com/kokkos/kokkos/pull/6375), make `Kokkos::Array` equality-comparable [\#7148](https://github.com/kokkos/kokkos/pull/7148)
* Structured binding support for `Kokkos::complex` [\#7040](https://github.com/kokkos/kokkos/pull/7040)
* Introduce `KOKKOS_DEDUCTION_GUIDE` macro to allow for portable user-defined deduction guides [\#6954](https://github.com/kokkos/kokkos/pull/6954)
### Build System Changes
* Do not require OpenMP support for languages other than CXX [\#6965](https://github.com/kokkos/kokkos/pull/6965)
@ -1388,7 +1494,7 @@
**Closed issues:**
- Silent error (Validate storage level arg to set_scratch_size) [\#3097](https://github.com/kokkos/kokkos/issues/3097)
- Remove KOKKKOS\_ENABLE\_PROFILING Option [\#3095](https://github.com/kokkos/kokkos/issues/3095)
- Remove KOKKOS\_ENABLE\_PROFILING Option [\#3095](https://github.com/kokkos/kokkos/issues/3095)
- Cuda 11 -\> allow C++17 [\#3083](https://github.com/kokkos/kokkos/issues/3083)
- In source build failure not explained [\#3081](https://github.com/kokkos/kokkos/issues/3081)
- Allow naming of Views for initialization kernel [\#3070](https://github.com/kokkos/kokkos/issues/3070)

View File

@ -2,11 +2,10 @@ cmake_minimum_required(VERSION 3.16 FATAL_ERROR)
# Disable in-source builds to prevent source tree corruption.
if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
message( FATAL_ERROR "FATAL: In-source builds are not allowed. You should create a separate directory for build files and delete CMakeCache.txt." )
endif()
if (COMMAND TRIBITS_PACKAGE)
TRIBITS_PACKAGE(Kokkos)
message(
FATAL_ERROR
"FATAL: In-source builds are not allowed. You should create a separate directory for build files and delete CMakeCache.txt."
)
endif()
# We want to determine if options are given with the wrong case
@ -15,72 +14,66 @@ endif()
# form a list of all the given variables. If it begins with any
# case of KoKkOS, we add it to the list.
GET_CMAKE_PROPERTY(_variableNames VARIABLES)
SET(KOKKOS_GIVEN_VARIABLES)
FOREACH (var ${_variableNames})
STRING(TOUPPER ${var} UC_VAR)
STRING(FIND ${UC_VAR} KOKKOS IDX)
IF (${IDX} EQUAL 0)
LIST(APPEND KOKKOS_GIVEN_VARIABLES ${var})
ENDIF()
ENDFOREACH()
get_cmake_property(_variableNames VARIABLES)
set(KOKKOS_GIVEN_VARIABLES)
foreach(var ${_variableNames})
string(TOUPPER ${var} UC_VAR)
string(FIND ${UC_VAR} KOKKOS IDX)
if(${IDX} EQUAL 0)
list(APPEND KOKKOS_GIVEN_VARIABLES ${var})
endif()
endforeach()
# Basic initialization (Used in KOKKOS_SETTINGS)
SET(Kokkos_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
SET(KOKKOS_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
SET(KOKKOS_SRC_PATH ${Kokkos_SOURCE_DIR})
SET(KOKKOS_PATH ${Kokkos_SOURCE_DIR})
SET(KOKKOS_TOP_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(Kokkos_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(KOKKOS_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(KOKKOS_SRC_PATH ${Kokkos_SOURCE_DIR})
set(KOKKOS_PATH ${Kokkos_SOURCE_DIR})
set(KOKKOS_TOP_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(PACKAGE_NAME Kokkos)
set(PACKAGE_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
# Is this a build as part of Trilinos?
IF(COMMAND TRIBITS_PACKAGE_DECL)
SET(KOKKOS_HAS_TRILINOS ON)
ELSE()
SET(KOKKOS_HAS_TRILINOS OFF)
SET(PACKAGE_NAME Kokkos)
SET(PACKAGE_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
ENDIF()
# Is this build a subdirectory of another project
GET_DIRECTORY_PROPERTY(HAS_PARENT PARENT_DIRECTORY)
get_directory_property(HAS_PARENT PARENT_DIRECTORY)
include(${KOKKOS_SRC_PATH}/cmake/kokkos_functions.cmake)
include(${KOKKOS_SRC_PATH}/cmake/kokkos_pick_cxx_std.cmake)
INCLUDE(${KOKKOS_SRC_PATH}/cmake/kokkos_functions.cmake)
INCLUDE(${KOKKOS_SRC_PATH}/cmake/kokkos_pick_cxx_std.cmake)
SET(KOKKOS_ENABLED_OPTIONS) #exported in config file
SET(KOKKOS_ENABLED_DEVICES) #exported in config file
SET(KOKKOS_ENABLED_TPLS) #exported in config file
SET(KOKKOS_ENABLED_ARCH_LIST) #exported in config file
set(KOKKOS_ENABLED_OPTIONS) #exported in config file
set(KOKKOS_ENABLED_DEVICES) #exported in config file
set(KOKKOS_ENABLED_TPLS) #exported in config file
set(KOKKOS_ENABLED_ARCH_LIST) #exported in config file
#These are helper flags used for sanity checks during config
#Certain features should depend on other features being configured first
SET(KOKKOS_CFG_DAG_NONE On) #sentinel to indicate no dependencies
SET(KOKKOS_CFG_DAG_DEVICES_DONE Off)
SET(KOKKOS_CFG_DAG_OPTIONS_DONE Off)
SET(KOKKOS_CFG_DAG_ARCH_DONE Off)
SET(KOKKOS_CFG_DAG_CXX_STD_DONE Off)
SET(KOKKOS_CFG_DAG_COMPILER_ID_DONE Off)
FUNCTION(KOKKOS_CFG_DEPENDS SUCCESSOR PRECURSOR)
SET(PRE_FLAG KOKKOS_CFG_DAG_${PRECURSOR})
SET(POST_FLAG KOKKOS_CFG_DAG_${SUCCESSOR})
IF (NOT ${PRE_FLAG})
MESSAGE(FATAL_ERROR "Bad CMake refactor: feature ${SUCCESSOR} cannot be configured until ${PRECURSOR} is configured")
ENDIF()
GLOBAL_SET(${POST_FLAG} On)
ENDFUNCTION()
set(KOKKOS_CFG_DAG_NONE On) #sentinel to indicate no dependencies
set(KOKKOS_CFG_DAG_DEVICES_DONE Off)
set(KOKKOS_CFG_DAG_OPTIONS_DONE Off)
set(KOKKOS_CFG_DAG_ARCH_DONE Off)
set(KOKKOS_CFG_DAG_CXX_STD_DONE Off)
set(KOKKOS_CFG_DAG_COMPILER_ID_DONE Off)
function(KOKKOS_CFG_DEPENDS SUCCESSOR PRECURSOR)
set(PRE_FLAG KOKKOS_CFG_DAG_${PRECURSOR})
set(POST_FLAG KOKKOS_CFG_DAG_${SUCCESSOR})
if(NOT ${PRE_FLAG})
message(
FATAL_ERROR "Bad CMake refactor: feature ${SUCCESSOR} cannot be configured until ${PRECURSOR} is configured"
)
endif()
global_set(${POST_FLAG} On)
endfunction()
list(APPEND CMAKE_MODULE_PATH cmake/Modules)
LIST(APPEND CMAKE_MODULE_PATH cmake/Modules)
IF(NOT KOKKOS_HAS_TRILINOS)
set(CMAKE_DISABLE_SOURCE_CHANGES ON)
set(CMAKE_DISABLE_IN_SOURCE_BUILD ON)
# What language are we compiling Kokkos as
# downstream dependencies need to match this!
SET(KOKKOS_COMPILE_LANGUAGE CXX)
set(KOKKOS_COMPILE_LANGUAGE CXX)
# use lower case here since we didn't parse options yet
IF (Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE AND Kokkos_ENABLE_CUDA)
if(Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE AND Kokkos_ENABLE_CUDA)
# Without this as a language for the package we would get a C++ compiler enabled.
# but we still need a C++ compiler even if we build all our cpp files as CUDA only
@ -88,69 +81,74 @@ IF(NOT KOKKOS_HAS_TRILINOS)
# This is just the rather odd way CMake does this, since CUDA doesn't imply C++ even
# though it is a C++ extension ... (but I guess it didn't use to be back in CUDA 4 or 5
# days.
SET(KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE CXX)
set(KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE CXX)
SET(KOKKOS_COMPILE_LANGUAGE CUDA)
ENDIF()
set(KOKKOS_COMPILE_LANGUAGE CUDA)
endif()
# use lower case here since we haven't parsed options yet
IF (Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE AND Kokkos_ENABLE_HIP)
if(Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE AND Kokkos_ENABLE_HIP)
# Without this as a language for the package we would get a C++ compiler enabled.
# but we still need a C++ compiler even if we build all our cpp files as HIP only
# because otherwise the C++ features don't work etc.
SET(KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE CXX)
set(KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE CXX)
SET(KOKKOS_COMPILE_LANGUAGE HIP)
ENDIF()
set(KOKKOS_COMPILE_LANGUAGE HIP)
endif()
IF (Spack_WORKAROUND)
IF (Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE)
MESSAGE(FATAL_ERROR "Can't currently use Kokkos_ENABLE_COMPILER_AS_CMAKE_LANGUAGE in a spack installation!")
ENDIF()
if(Spack_WORKAROUND)
if(Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE)
message(FATAL_ERROR "Can't currently use Kokkos_ENABLE_COMPILER_AS_CMAKE_LANGUAGE in a spack installation!")
endif()
#if we are explicitly using Spack for development,
#nuke the Spack compiler
SET(SPACK_CXX $ENV{SPACK_CXX})
IF(SPACK_CXX)
SET(CMAKE_CXX_COMPILER ${SPACK_CXX} CACHE STRING "the C++ compiler" FORCE)
SET(ENV{CXX} ${SPACK_CXX})
ENDIF()
ENDIF()
set(SPACK_CXX $ENV{SPACK_CXX})
if(SPACK_CXX)
set(CMAKE_CXX_COMPILER ${SPACK_CXX} CACHE STRING "the C++ compiler" FORCE)
set(ENV{CXX} ${SPACK_CXX})
endif()
endif()
# Always call the project command to define Kokkos_ variables
# and to make sure that C++ is an enabled language
PROJECT(Kokkos ${KOKKOS_COMPILE_LANGUAGE} ${KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE})
IF(NOT HAS_PARENT)
IF (NOT CMAKE_BUILD_TYPE)
SET(DEFAULT_BUILD_TYPE "RelWithDebInfo")
MESSAGE(STATUS "Setting build type to '${DEFAULT_BUILD_TYPE}' as none was specified.")
SET(CMAKE_BUILD_TYPE "${DEFAULT_BUILD_TYPE}" CACHE STRING
"Choose the type of build, options are: Debug, Release, RelWithDebInfo and MinSizeRel."
FORCE)
ENDIF()
ENDIF()
ELSE()
SET(KOKKOS_COMPILE_LANGUAGE CXX)
ENDIF()
IF (NOT CMAKE_SIZEOF_VOID_P)
STRING(FIND ${CMAKE_CXX_COMPILER} nvcc_wrapper FIND_IDX)
IF (NOT FIND_IDX STREQUAL -1)
MESSAGE(FATAL_ERROR "Kokkos did not configure correctly and failed to validate compiler. The most likely cause is CUDA linkage using nvcc_wrapper. Please ensure your CUDA environment is correctly configured.")
ELSE()
MESSAGE(FATAL_ERROR "Kokkos did not configure correctly and failed to validate compiler. The most likely cause is linkage errors during CMake compiler validation. Please consult the CMake error log shown below for the exact error during compiler validation")
ENDIF()
ELSEIF (NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
IF(CMAKE_SIZEOF_VOID_P EQUAL 4)
MESSAGE(WARNING "32-bit builds are experimental and not officially supported.")
SET(KOKKOS_IMPL_32BIT ON)
ELSE()
MESSAGE(FATAL_ERROR "Kokkos assumes a 64-bit build, i.e., 8-byte pointers, but found ${CMAKE_SIZEOF_VOID_P}-byte pointers instead;")
ENDIF()
ENDIF()
project(Kokkos ${KOKKOS_COMPILE_LANGUAGE} ${KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE})
if(NOT HAS_PARENT)
if(NOT CMAKE_BUILD_TYPE)
set(DEFAULT_BUILD_TYPE "RelWithDebInfo")
message(STATUS "Setting build type to '${DEFAULT_BUILD_TYPE}' as none was specified.")
set(CMAKE_BUILD_TYPE "${DEFAULT_BUILD_TYPE}"
CACHE STRING "Choose the type of build, options are: Debug, Release, RelWithDebInfo and MinSizeRel." FORCE
)
endif()
endif()
if(NOT CMAKE_SIZEOF_VOID_P)
string(FIND ${CMAKE_CXX_COMPILER} nvcc_wrapper FIND_IDX)
if(NOT FIND_IDX STREQUAL -1)
message(
FATAL_ERROR
"Kokkos did not configure correctly and failed to validate compiler. The most likely cause is CUDA linkage using nvcc_wrapper. Please ensure your CUDA environment is correctly configured."
)
else()
message(
FATAL_ERROR
"Kokkos did not configure correctly and failed to validate compiler. The most likely cause is linkage errors during CMake compiler validation. Please consult the CMake error log shown below for the exact error during compiler validation"
)
endif()
elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
if(CMAKE_SIZEOF_VOID_P EQUAL 4)
message(WARNING "32-bit builds are experimental and not officially supported.")
set(KOKKOS_IMPL_32BIT ON)
else()
message(
FATAL_ERROR
"Kokkos assumes a 64-bit build, i.e., 8-byte pointers, but found ${CMAKE_SIZEOF_VOID_P}-byte pointers instead;"
)
endif()
endif()
set(Kokkos_VERSION_MAJOR 4)
set(Kokkos_VERSION_MINOR 4)
set(Kokkos_VERSION_MINOR 5)
set(Kokkos_VERSION_PATCH 1)
set(Kokkos_VERSION "${Kokkos_VERSION_MAJOR}.${Kokkos_VERSION_MINOR}.${Kokkos_VERSION_PATCH}")
message(STATUS "Kokkos version: ${Kokkos_VERSION}")
@ -164,58 +162,54 @@ math(EXPR KOKKOS_VERSION_PATCH "${KOKKOS_VERSION} % 100")
# Load either the real TriBITS or a TriBITS wrapper
# for certain utility functions that are universal (like GLOBAL_SET)
INCLUDE(${KOKKOS_SRC_PATH}/cmake/fake_tribits.cmake)
include(${KOKKOS_SRC_PATH}/cmake/fake_tribits.cmake)
IF (Kokkos_ENABLE_CUDA)
if(Kokkos_ENABLE_CUDA)
# If we are building CUDA, we have tricked CMake because we declare a CXX project
# If the default C++ standard for a given compiler matches the requested
# standard, then CMake just omits the -std flag in later versions of CMake
# This breaks CUDA compilation (CUDA compiler can have a different default
# -std then the underlying host compiler by itself). Setting this variable
# forces CMake to always add the -std flag even if it thinks it doesn't need it
GLOBAL_SET(CMAKE_CXX_STANDARD_DEFAULT 98)
ENDIF()
global_set(CMAKE_CXX_STANDARD_DEFAULT 98)
endif()
# These are the variables we will append to as we go
# I really wish these were regular variables
# but scoping issues can make it difficult
GLOBAL_SET(KOKKOS_COMPILE_OPTIONS)
GLOBAL_SET(KOKKOS_LINK_OPTIONS)
GLOBAL_SET(KOKKOS_AMDGPU_OPTIONS)
GLOBAL_SET(KOKKOS_CUDA_OPTIONS)
GLOBAL_SET(KOKKOS_CUDAFE_OPTIONS)
GLOBAL_SET(KOKKOS_XCOMPILER_OPTIONS)
global_set(KOKKOS_COMPILE_OPTIONS)
global_set(KOKKOS_LINK_OPTIONS)
global_set(KOKKOS_AMDGPU_OPTIONS)
global_set(KOKKOS_CUDA_OPTIONS)
global_set(KOKKOS_CUDAFE_OPTIONS)
global_set(KOKKOS_XCOMPILER_OPTIONS)
# We need to append text here for making sure TPLs
# we import are available for an installed Kokkos
GLOBAL_SET(KOKKOS_TPL_EXPORTS)
global_set(KOKKOS_TPL_EXPORTS)
# KOKKOS_DEPENDENCE is used by kokkos_launch_compiler
GLOBAL_SET(KOKKOS_COMPILE_DEFINITIONS KOKKOS_DEPENDENCE)
global_set(KOKKOS_COMPILE_DEFINITIONS KOKKOS_DEPENDENCE)
# MSVC never goes through kokkos_launch_compiler
IF(NOT MSVC)
GLOBAL_APPEND(KOKKOS_LINK_OPTIONS -DKOKKOS_DEPENDENCE)
ENDIF()
if(NOT MSVC)
global_append(KOKKOS_LINK_OPTIONS -DKOKKOS_DEPENDENCE)
endif()
IF(Kokkos_ENABLE_TESTS AND NOT KOKKOS_HAS_TRILINOS)
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/kokkos_configure_trilinos.cmake)
if(Kokkos_ENABLE_TESTS)
find_package(GTest QUIET)
ENDIF()
endif()
# Include a set of Kokkos-specific wrapper functions that
# will either call raw CMake or TriBITS
# These are functions like KOKKOS_INCLUDE_DIRECTORIES
INCLUDE(${KOKKOS_SRC_PATH}/cmake/kokkos_tribits.cmake)
include(${KOKKOS_SRC_PATH}/cmake/kokkos_tribits.cmake)
# Check the environment and set certain variables
# to allow platform-specific checks
INCLUDE(${KOKKOS_SRC_PATH}/cmake/kokkos_check_env.cmake)
include(${KOKKOS_SRC_PATH}/cmake/kokkos_check_env.cmake)
IF(NOT KOKKOS_HAS_TRILINOS)
# This does not work in Trilinos and we simply don't care
# to fix it for Trilinos
# Gather information about the runtime environment
INCLUDE(${KOKKOS_SRC_PATH}/cmake/build_env_info.cmake)
include(${KOKKOS_SRC_PATH}/cmake/build_env_info.cmake)
check_git_setup()
ENDIF()
# The build environment setup goes in the following steps
# 1) Check all the enable options. This includes checking Kokkos_DEVICES
@ -223,102 +217,54 @@ ENDIF()
# 3) Check the CXX standard and select important CXX flags
# 4) Check for any third-party libraries (TPLs) like hwloc
# 5) Check if optimizing for a particular architecture and add arch-specific flags
KOKKOS_SETUP_BUILD_ENVIRONMENT()
kokkos_setup_build_environment()
# Finish off the build
# 6) Recurse into subdirectories and configure individual libraries
# 7) Export and install targets
OPTION(BUILD_SHARED_LIBS "Build shared libraries" OFF)
option(BUILD_SHARED_LIBS "Build shared libraries" OFF)
SET(KOKKOS_COMPONENT_LIBRARIES kokkoscore kokkoscontainers kokkosalgorithms kokkossimd)
SET_PROPERTY(GLOBAL PROPERTY KOKKOS_INT_LIBRARIES kokkos ${KOKKOS_COMPONENT_LIBRARIES})
set(KOKKOS_COMPONENT_LIBRARIES kokkoscore kokkoscontainers kokkosalgorithms kokkossimd)
set_property(GLOBAL PROPERTY KOKKOS_INT_LIBRARIES kokkos ${KOKKOS_COMPONENT_LIBRARIES})
IF (KOKKOS_HAS_TRILINOS)
SET(TRILINOS_INCDIR ${${PROJECT_NAME}_INSTALL_INCLUDE_DIR})
SET(KOKKOS_HEADER_DIR ${TRILINOS_INCDIR})
SET(KOKKOS_IS_SUBDIRECTORY TRUE)
ELSEIF(HAS_PARENT)
SET(KOKKOS_HEADER_DIR "include/kokkos")
SET(KOKKOS_IS_SUBDIRECTORY TRUE)
ELSE()
SET(KOKKOS_HEADER_DIR "${CMAKE_INSTALL_INCLUDEDIR}")
SET(KOKKOS_IS_SUBDIRECTORY FALSE)
ENDIF()
if(HAS_PARENT)
set(KOKKOS_HEADER_DIR "include/kokkos")
set(KOKKOS_IS_SUBDIRECTORY TRUE)
else()
set(KOKKOS_HEADER_DIR "${CMAKE_INSTALL_INCLUDEDIR}")
set(KOKKOS_IS_SUBDIRECTORY FALSE)
endif()
#------------------------------------------------------------------------------
#
# A) Forward declare the package so that certain options are also defined for
# subpackages
## This restores the old behavior of ProjectCompilerPostConfig.cmake
# We must do this before KOKKOS_PACKAGE_DECL
IF (KOKKOS_HAS_TRILINOS)
# Overwrite the old flags at the top-level
# Because Tribits doesn't use lists, it uses spaces for the list of CXX flags
# we have to match the annoying behavior, also we have to preserve quotes
# which needs another workaround.
SET(KOKKOS_COMPILE_OPTIONS_TMP)
IF (KOKKOS_ENABLE_HIP)
LIST(APPEND KOKKOS_COMPILE_OPTIONS ${KOKKOS_AMDGPU_OPTIONS})
ENDIF()
FOREACH(OPTION ${KOKKOS_COMPILE_OPTIONS})
STRING(FIND "${OPTION}" " " OPTION_HAS_WHITESPACE)
IF(OPTION_HAS_WHITESPACE EQUAL -1)
LIST(APPEND KOKKOS_COMPILE_OPTIONS_TMP "${OPTION}")
ELSE()
LIST(APPEND KOKKOS_COMPILE_OPTIONS_TMP "\"${OPTION}\"")
ENDIF()
ENDFOREACH()
STRING(REPLACE ";" " " KOKKOSCORE_COMPILE_OPTIONS "${KOKKOS_COMPILE_OPTIONS_TMP}")
LIST(APPEND KOKKOS_ALL_COMPILE_OPTIONS ${KOKKOS_COMPILE_OPTIONS})
IF (KOKKOS_ENABLE_CUDA)
LIST(APPEND KOKKOS_ALL_COMPILE_OPTIONS ${KOKKOS_CUDA_OPTIONS})
ENDIF()
FOREACH(XCOMP_FLAG ${KOKKOS_XCOMPILER_OPTIONS})
SET(KOKKOSCORE_XCOMPILER_OPTIONS "${KOKKOSCORE_XCOMPILER_OPTIONS} -Xcompiler ${XCOMP_FLAG}")
LIST(APPEND KOKKOS_ALL_COMPILE_OPTIONS -Xcompiler ${XCOMP_FLAG})
ENDFOREACH()
IF (KOKKOS_ENABLE_CUDA)
STRING(REPLACE ";" " " KOKKOSCORE_CUDA_OPTIONS "${KOKKOS_CUDA_OPTIONS}")
FOREACH(CUDAFE_FLAG ${KOKKOS_CUDAFE_OPTIONS})
SET(KOKKOSCORE_CUDAFE_OPTIONS "${KOKKOSCORE_CUDAFE_OPTIONS} -Xcudafe ${CUDAFE_FLAG}")
LIST(APPEND KOKKOS_ALL_COMPILE_OPTIONS -Xcudafe ${CUDAFE_FLAG})
ENDFOREACH()
ENDIF()
#These flags get set up in KOKKOS_PACKAGE_DECL, which means they
#must be configured before KOKKOS_PACKAGE_DECL
SET(KOKKOS_ALL_COMPILE_OPTIONS
$<$<COMPILE_LANGUAGE:CXX>:${KOKKOS_ALL_COMPILE_OPTIONS}>)
ENDIF()
#------------------------------------------------------------------------------
#
# D) Process the subpackages (subdirectories) for Kokkos
#
KOKKOS_PROCESS_SUBPACKAGES()
kokkos_process_subpackages()
#------------------------------------------------------------------------------
#
# E) If Kokkos itself is enabled, process the Kokkos package
#
KOKKOS_PACKAGE_POSTPROCESS()
KOKKOS_CONFIGURE_CORE()
kokkos_configure_core()
IF (NOT KOKKOS_HAS_TRILINOS AND NOT Kokkos_INSTALL_TESTING)
ADD_LIBRARY(kokkos INTERFACE)
if(NOT Kokkos_INSTALL_TESTING)
add_library(kokkos INTERFACE)
#Make sure in-tree projects can reference this as Kokkos::
#to match the installed target names
ADD_LIBRARY(Kokkos::kokkos ALIAS kokkos)
add_library(Kokkos::kokkos ALIAS kokkos)
# all_libs target is required for TriBITS-compliance
ADD_LIBRARY(Kokkos::all_libs ALIAS kokkos)
TARGET_LINK_LIBRARIES(kokkos INTERFACE ${KOKKOS_COMPONENT_LIBRARIES})
KOKKOS_INTERNAL_ADD_LIBRARY_INSTALL(kokkos)
ENDIF()
INCLUDE(${KOKKOS_SRC_PATH}/cmake/kokkos_install.cmake)
add_library(Kokkos::all_libs ALIAS kokkos)
target_link_libraries(kokkos INTERFACE ${KOKKOS_COMPONENT_LIBRARIES})
kokkos_internal_add_library_install(kokkos)
endif()
include(${KOKKOS_SRC_PATH}/cmake/kokkos_install.cmake)
# nvcc_wrapper is Kokkos' wrapper for NVIDIA's NVCC CUDA compiler.
# Kokkos needs nvcc_wrapper in order to build. Other libraries and
@ -327,16 +273,15 @@ INCLUDE(${KOKKOS_SRC_PATH}/cmake/kokkos_install.cmake)
# as relative to ${CMAKE_INSTALL_PATH}.
# KOKKOS_INSTALL_ADDITIONAL_FILES will install nvcc wrapper and other generated
# files
KOKKOS_INSTALL_ADDITIONAL_FILES()
kokkos_install_additional_files()
# Finally - if we are a subproject - make sure the enabled devices are visible
IF (HAS_PARENT)
FOREACH(DEV Kokkos_ENABLED_DEVICES)
if(HAS_PARENT)
foreach(DEV Kokkos_ENABLED_DEVICES)
#I would much rather not make these cache variables or global properties, but I can't
#make any guarantees on whether PARENT_SCOPE is good enough to make
#these variables visible where I need them
SET(Kokkos_ENABLE_${DEV} ON PARENT_SCOPE)
SET_PROPERTY(GLOBAL PROPERTY Kokkos_ENABLE_${DEV} ON)
ENDFOREACH()
ENDIF()
set(Kokkos_ENABLE_${DEV} ON PARENT_SCOPE)
set_property(GLOBAL PROPERTY Kokkos_ENABLE_${DEV} ON)
endforeach()
endif()

View File

@ -7,6 +7,8 @@ We actively welcome pull requests.
3. If you've changed APIs, update the documentation.
4. Ensure the test suite passes.
Before sending your patch for review, please try to ensure that it is formatted properly. We use clang-format version 16 for this.
## Issues
We use GitHub issues to track public bugs. Please ensure your description is clear and has sufficient instructions to be able to reproduce the issue.

View File

@ -1,73 +0,0 @@
Developers of Kokkos (those who commit modifications to Kokkos)
must maintain the snapshot of Kokkos in the Trilinos repository.
This file contains instructions for how to
snapshot Kokkos from github.com/kokkos to Trilinos.
------------------------------------------------------------------------
*** EVERYTHING GOES RIGHT WORKFLOW ***
1) Given a 'git clone' of Kokkos and of Trilinos repositories.
1.1) Let ${KOKKOS} be the absolute path to the Kokkos clone.
This path *must* terminate with the directory name 'kokkos';
e.g., ${HOME}/kokkos .
1.2) Let ${TRILINOS} be the absolute path to the Trilinos directory.
2) Given that the Kokkos build & test is clean and
changes are committed to the Kokkos clone.
3) Snapshot the current commit in the Kokkos clone into the Trilinos clone.
This overwrites ${TRILINOS}/packages/kokkos with the content of ${KOKKOS}:
${KOKKOS}/scripts/snapshot.py --verbose ${KOKKOS} ${TRILINOS}/packages
4) Verify the snapshot commit happened as expected
cd ${TRILINOS}/packages/kokkos
git log -1 --name-only
5) Modify, build, and test Trilinos with the Kokkos snapshot.
6) Given that that the Trilinos build & test is clean and
changes are committed to the Trilinos clone.
7) Attempt push to the Kokkos repository.
If push fails then you must 'remove the Kokkos snapshot'
from your Trilinos clone.
See below.
8) Attempt to push to the Trilinos repository.
If updating for a failed push requires you to change Kokkos you must
'remove the Kokkos snapshot' from your Trilinos clone.
See below.
------------------------------------------------------------------------
*** WHEN SOMETHING GOES WRONG AND YOU MUST ***
*** REMOVE THE KOKKOS SNAPSHOT FROM YOUR TRILINOS CLONE ***
1) Query the Trilinos clone commit log.
git log --oneline
2) Note the <SHA1> of the commit to the Trillinos clone
immediately BEFORE the Kokkos snapshot commit.
Copy this <SHA1> for use in the next command.
3) IF more than one outstanding commit then you can remove just the
Kokkos snapshot commit with 'git rebase -i'. Edit the rebase file.
Remove or comment out the Kokkos snapshot commit entry.
git rebase -i <SHA1>
4) IF the Kokkos snapshot commit is the one and only
outstanding commit then remove just than commit.
git reset --hard HEAD~1
------------------------------------------------------------------------
*** REGARDING 'snapshot.py' TOOL ***
The 'snapshot.py' tool is developed and maintained by the
Center for Computing Research (CCR)
Software Engineering, Maintenance, and Support (SEMS) team.
Contact Brent Perschbacher <bmpersc@sandia.gov> for questions>
------------------------------------------------------------------------

View File

@ -1,6 +1,6 @@
# Default settings common options.
#LAMMPS specific settings:
#SPARTA specific settings:
ifndef KOKKOS_PATH
KOKKOS_PATH=../../lib/kokkos
endif
@ -11,7 +11,7 @@ CXXFLAGS += $(SHFLAGS)
endif
KOKKOS_VERSION_MAJOR = 4
KOKKOS_VERSION_MINOR = 4
KOKKOS_VERSION_MINOR = 5
KOKKOS_VERSION_PATCH = 1
KOKKOS_VERSION = $(shell echo $(KOKKOS_VERSION_MAJOR)*10000+$(KOKKOS_VERSION_MINOR)*100+$(KOKKOS_VERSION_PATCH) | bc)
@ -23,7 +23,7 @@ KOKKOS_DEVICES ?= "OpenMP"
# NVIDIA: Kepler,Kepler30,Kepler32,Kepler35,Kepler37,Maxwell,Maxwell50,Maxwell52,Maxwell53,Pascal60,Pascal61,Volta70,Volta72,Turing75,Ampere80,Ampere86,Ada89,Hopper90
# ARM: ARMv80,ARMv81,ARMv8-ThunderX,ARMv8-TX2,A64FX,ARMv9-Grace
# IBM: Power8,Power9
# AMD-GPUS: AMD_GFX906,AMD_GFX908,AMD_GFX90A,AMD_GFX940,AMD_GFX942,AMD_GFX1030,AMD_GFX1100,AMD_GFX1103
# AMD-GPUS: AMD_GFX906,AMD_GFX908,AMD_GFX90A,AMD_GFX940,AMD_GFX942,AMD_GFX942_APU,AMD_GFX1030,AMD_GFX1100,AMD_GFX1103
# AMD-CPUS: AMDAVX,Zen,Zen2,Zen3
# Intel-GPUs: Intel_Gen,Intel_Gen9,Intel_Gen11,Intel_Gen12LP,Intel_DG1,Intel_XeHP,Intel_PVC
KOKKOS_ARCH ?= ""
@ -40,16 +40,19 @@ KOKKOS_TRIBITS ?= "no"
KOKKOS_STANDALONE_CMAKE ?= "no"
# Default settings specific options.
# Options: force_uvm,use_ldg,rdc,enable_lambda,enable_constexpr,disable_malloc_async
KOKKOS_CUDA_OPTIONS ?= "disable_malloc_async"
# Options: force_uvm,use_ldg,rdc,enable_lambda,enable_constexpr,enable_malloc_async
KOKKOS_CUDA_OPTIONS ?= ""
# Options: rdc
# Options: rdc,enable_malloc_async
KOKKOS_HIP_OPTIONS ?= ""
# Default settings specific options.
# Options: enable_async_dispatch
KOKKOS_HPX_OPTIONS ?= ""
#Options : force_host_as_device
KOKKOS_OPENACC_OPTIONS ?= ""
# Helper functions for conversion to upper case
uppercase_TABLE:=a,A b,B c,C d,D e,E f,F g,G h,H i,I j,J k,K l,L m,M n,N o,O p,P q,Q r,R s,S t,T u,U v,V w,W x,X y,Y z,Z
uppercase_internal=$(if $1,$$(subst $(firstword $1),$(call uppercase_internal,$(wordlist 2,$(words $1),$1),$2)),$2)
@ -92,7 +95,7 @@ KOKKOS_INTERNAL_CUDA_USE_UVM := $(call kokkos_has_string,$(KOKKOS_CUDA_OPTIONS),
KOKKOS_INTERNAL_CUDA_USE_RELOC := $(call kokkos_has_string,$(KOKKOS_CUDA_OPTIONS),rdc)
KOKKOS_INTERNAL_CUDA_USE_LAMBDA := $(call kokkos_has_string,$(KOKKOS_CUDA_OPTIONS),enable_lambda)
KOKKOS_INTERNAL_CUDA_USE_CONSTEXPR := $(call kokkos_has_string,$(KOKKOS_CUDA_OPTIONS),enable_constexpr)
KOKKOS_INTERNAL_CUDA_DISABLE_MALLOC_ASYNC := $(call kokkos_has_string,$(KOKKOS_CUDA_OPTIONS),disable_malloc_async)
KOKKOS_INTERNAL_CUDA_ENABLE_MALLOC_ASYNC := $(call kokkos_has_string,$(KOKKOS_CUDA_OPTIONS),enable_malloc_async)
KOKKOS_INTERNAL_HPX_ENABLE_ASYNC_DISPATCH := $(call kokkos_has_string,$(KOKKOS_HPX_OPTIONS),enable_async_dispatch)
# deprecated
KOKKOS_INTERNAL_ENABLE_DESUL_ATOMICS := $(call kokkos_has_string,$(KOKKOS_OPTIONS),enable_desul_atomics)
@ -103,6 +106,8 @@ KOKKOS_INTERNAL_DISABLE_DEPRECATED_CODE := $(call kokkos_has_string,$(KOKKOS_OPT
KOKKOS_INTERNAL_ENABLE_DEPRECATION_WARNINGS := $(call kokkos_has_string,$(KOKKOS_OPTIONS),enable_deprecation_warnings)
KOKKOS_INTERNAL_HIP_USE_RELOC := $(call kokkos_has_string,$(KOKKOS_HIP_OPTIONS),rdc)
KOKKOS_INTERNAL_HIP_ENABLE_MALLOC_ASYNC := $(call kokkos_has_string,$(KOKKOS_HIP_OPTIONS),enable_malloc_async)
KOKKOS_INTERNAL_OPENACC_FORCE_HOST_AS_DEVICE := $(call kokkos_has_string,$(KOKKOS_OPENACC_OPTIONS),force_host_as_device)
# Check for Kokkos Host Execution Spaces one of which must be on.
KOKKOS_INTERNAL_USE_OPENMP := $(call kokkos_has_string,$(subst OpenMPTarget,,$(KOKKOS_DEVICES)),OpenMP)
@ -178,7 +183,7 @@ KOKKOS_INTERNAL_COMPILER_CRAY := $(strip $(shell $(CXX) -craype-verbose 2
KOKKOS_INTERNAL_COMPILER_NVCC := $(strip $(shell echo "$(shell export OMPI_CXX=$(OMPI_CXX); export MPICH_CXX=$(MPICH_CXX); $(CXX) --version 2>&1 | grep -c nvcc)>0" | bc))
KOKKOS_INTERNAL_COMPILER_NVHPC := $(strip $(shell $(CXX) --version 2>&1 | grep -c "nvc++"))
KOKKOS_INTERNAL_COMPILER_CLANG := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),clang)
KOKKOS_INTERNAL_COMPILER_CRAY_CLANG := $(strip $(shell $(CXX) -craype-verbose 2>&1 | grep -c "clang++"))
KOKKOS_INTERNAL_COMPILER_CRAY_CLANG := $(strip $(shell $(CXX) -craype-verbose 2>&1 | grep -v "error:" | grep -c "clang++"))
KOKKOS_INTERNAL_COMPILER_INTEL_CLANG := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),oneAPI)
KOKKOS_INTERNAL_COMPILER_APPLE_CLANG := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),Apple clang)
KOKKOS_INTERNAL_COMPILER_HCC := $(call kokkos_has_string,$(KOKKOS_CXX_VERSION),HCC)
@ -292,6 +297,8 @@ ifeq ($(KOKKOS_INTERNAL_USE_OPENACC), 1)
# Set OpenACC flags.
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 1)
KOKKOS_INTERNAL_OPENACC_FLAG := -acc
else ifeq ($(KOKKOS_INTERNAL_COMPILER_CLANG), 1)
KOKKOS_INTERNAL_OPENACC_FLAG := -fopenacc -fopenacc-fake-async-wait -fopenacc-implicit-worker=vector -Wno-openacc-and-cxx -Wno-openmp-mapping -Wno-unknown-cuda-version -Wno-pass-failed
else
$(error Makefile.kokkos: OpenACC is enabled but the compiler must be NVHPC (got version string $(KOKKOS_CXX_VERSION)))
endif
@ -411,8 +418,8 @@ ifeq ($(KOKKOS_INTERNAL_USE_ARCH_NVIDIA), 0)
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_NVIDIA), 1)
ifeq ($(KOKKOS_INTERNAL_COMPILER_CLANG), 1)
KOKKOS_INTERNAL_NVCC_PATH := $(shell which nvcc)
ifeq ($(KOKKOS_INTERNAL_COMPILER_CLANG), 1)
CUDA_PATH ?= $(KOKKOS_INTERNAL_NVCC_PATH:/bin/nvcc=)
ifeq ($(KOKKOS_INTERNAL_USE_OPENMPTARGET), 1)
KOKKOS_INTERNAL_OPENMPTARGET_FLAG := $(KOKKOS_INTERNAL_OPENMPTARGET_FLAG) --cuda-path=$(CUDA_PATH)
@ -457,6 +464,7 @@ ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX90A), 0)
endif
KOKKOS_INTERNAL_USE_ARCH_AMD_GFX940 := $(call kokkos_has_string,$(KOKKOS_ARCH),AMD_GFX940)
KOKKOS_INTERNAL_USE_ARCH_AMD_GFX942 := $(call kokkos_has_string,$(KOKKOS_ARCH),AMD_GFX942)
KOKKOS_INTERNAL_USE_ARCH_AMD_GFX942_APU := $(call kokkos_has_string,$(KOKKOS_ARCH),AMD_GFX942_APU)
KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1030 := $(call kokkos_has_string,$(KOKKOS_ARCH),AMD_GFX1030)
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1030), 0)
KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1030 := $(call kokkos_has_string,$(KOKKOS_ARCH),NAVI1030)
@ -466,6 +474,15 @@ ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1100), 0)
KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1100 := $(call kokkos_has_string,$(KOKKOS_ARCH),NAVI1100)
endif
KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1103 := $(call kokkos_has_string,$(KOKKOS_ARCH),AMD_GFX1103)
KOKKOS_INTERNAL_USE_ARCH_AMD := $(shell expr $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX906) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX908) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX90A) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX940) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX942) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX942_APU) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1030) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1100) \
+ $(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1103))
# Any AVX?
KOKKOS_INTERNAL_USE_ARCH_AVX := $(shell expr $(KOKKOS_INTERNAL_USE_ARCH_SNB) + $(KOKKOS_INTERNAL_USE_ARCH_AMDAVX))
@ -561,6 +578,9 @@ endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENACC), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_OPENACC")
ifeq ($(KOKKOS_INTERNAL_OPENACC_FORCE_HOST_AS_DEVICE), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_OPENACC_FORCE_HOST_AS_DEVICE")
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMP), 1)
@ -733,7 +753,7 @@ ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1)
endif
endif
ifeq ($(KOKKOS_INTERNAL_CUDA_DISABLE_MALLOC_ASYNC), 0)
ifeq ($(KOKKOS_INTERNAL_CUDA_ENABLE_MALLOC_ASYNC), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC")
else
tmp := $(call kokkos_append_header,"/* $H""undef KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC */")
@ -1024,86 +1044,122 @@ ifeq ($(KOKKOS_INTERNAL_USE_OPENMPTARGET), 1)
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENACC), 1)
ifeq ($(KOKKOS_INTERNAL_COMPILER_CLANG), 1)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG=--offload-arch
endif
endif
# Do not add this flag if its the cray compiler or the nvhpc compiler.
ifeq ($(KOKKOS_INTERNAL_COMPILER_CRAY_CLANG), 0)
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
# Lets start with adding architecture defines
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_KEPLER30), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_KEPLER")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_KEPLER30")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_30
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_KEPLER32), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_KEPLER")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_KEPLER32")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_32
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_KEPLER35), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_KEPLER")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_KEPLER35")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_35
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_KEPLER37), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_KEPLER")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_KEPLER37")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_37
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_MAXWELL50), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_MAXWELL")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_MAXWELL50")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_50
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_MAXWELL52), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_MAXWELL")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_MAXWELL52")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_52
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_MAXWELL53), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_MAXWELL")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_MAXWELL53")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_53
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_PASCAL60), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_PASCAL")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_PASCAL60")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_60
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_PASCAL61), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_PASCAL")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_PASCAL61")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_61
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_VOLTA70), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_VOLTA")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_VOLTA70")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_70
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_VOLTA72), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_VOLTA")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_VOLTA72")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_72
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_TURING75), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_TURING75")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_75
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMPERE80), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMPERE")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMPERE80")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_80
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMPERE86), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMPERE")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMPERE86")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_86
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_ADA89), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_ADA89")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_89
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_HOPPER90), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_HOPPER")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_HOPPER90")
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 0)
KOKKOS_INTERNAL_CUDA_ARCH_FLAG := $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)=sm_90
endif
endif
@ -1119,6 +1175,9 @@ ifneq ($(KOKKOS_INTERNAL_USE_ARCH_NVIDIA), 0)
ifeq ($(KOKKOS_INTERNAL_USE_OPENMPTARGET), 1)
KOKKOS_LDFLAGS += $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENACC), 1)
KOKKOS_LDFLAGS += $(KOKKOS_INTERNAL_CUDA_ARCH_FLAG)
endif
endif
endif
@ -1126,43 +1185,48 @@ endif
# Figure out the architecture flag for ROCm.
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX906), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX906")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --offload-arch=gfx906
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx906\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx906
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX908), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX908")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --offload-arch=gfx908
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx908\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx908
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX90A), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX90A")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --offload-arch=gfx90a
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx90A\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx90a
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX940), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX940")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --offload-arch=gfx940
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx940\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx940
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX942), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX942")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --offload-arch=gfx942
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx942\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx942
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX942_APU), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX942_APU")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx942\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx942
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1030), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX1030")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --offload-arch=gfx1030
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx1030\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx1030
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1100), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX1100")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --offload-arch=gfx1100
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx1100\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx1100
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD_GFX1103), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GFX1103")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --offload-arch=gfx1103
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_GPU \"gfx1103\"")
KOKKOS_INTERNAL_AMD_ARCH_FLAG := --offload-arch=gfx1103
endif
@ -1171,8 +1235,8 @@ ifeq ($(KOKKOS_INTERNAL_USE_HIP), 1)
KOKKOS_SRC += $(KOKKOS_PATH)/tpls/desul/src/Lock_Array_HIP.cpp
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/HIP/*.hpp)
KOKKOS_CXXFLAGS+=$(KOKKOS_INTERNAL_HIP_ARCH_FLAG)
KOKKOS_LDFLAGS+=$(KOKKOS_INTERNAL_HIP_ARCH_FLAG)
KOKKOS_CXXFLAGS+=$(KOKKOS_INTERNAL_AMD_ARCH_FLAG)
KOKKOS_LDFLAGS+=$(KOKKOS_INTERNAL_AMD_ARCH_FLAG)
ifeq ($(KOKKOS_INTERNAL_HIP_USE_RELOC), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE")
@ -1182,6 +1246,21 @@ ifeq ($(KOKKOS_INTERNAL_USE_HIP), 1)
KOKKOS_CXXFLAGS+=-fno-gpu-rdc
KOKKOS_LDFLAGS+=-fno-gpu-rdc
endif
ifeq ($(KOKKOS_INTERNAL_HIP_ENABLE_MALLOC_ASYNC), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_IMPL_HIP_MALLOC_ASYNC")
else
tmp := $(call kokkos_append_header,"/* $H""undef KOKKOS_ENABLE_IMPL_HIP_MALLOC_ASYNC */")
endif
endif
ifneq ($(KOKKOS_INTERNAL_USE_ARCH_AMD), 0)
ifeq ($(KOKKOS_INTERNAL_COMPILER_CLANG), 1)
ifeq ($(KOKKOS_INTERNAL_USE_OPENACC), 1)
KOKKOS_CXXFLAGS += $(KOKKOS_INTERNAL_AMD_ARCH_FLAG)
KOKKOS_LDFLAGS += $(KOKKOS_INTERNAL_AMD_ARCH_FLAG)
endif
endif
endif
# Figure out Intel architecture flags.
@ -1235,6 +1314,8 @@ ifeq ($(KOKKOS_INTERNAL_USE_SYCL), 1)
KOKKOS_CXXFLAGS+=$(KOKKOS_INTERNAL_INTEL_ARCH_FLAG)
KOKKOS_LDFLAGS+=-fsycl
KOKKOS_LDFLAGS+=$(KOKKOS_INTERNAL_INTEL_ARCH_FLAG)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE")
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMPTARGET), 1)
@ -1322,6 +1403,8 @@ ifneq ($(KOKKOS_INTERNAL_NEW_CONFIG), 0)
endif
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/*.hpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/View/*.hpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/View/MDSpan/*.hpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/impl/*.hpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/containers/src/*.hpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/containers/src/impl/*.hpp)
@ -1374,6 +1457,48 @@ ifeq ($(KOKKOS_INTERNAL_USE_OPENACC), 1)
KOKKOS_CXXFLAGS += $(KOKKOS_INTERNAL_OPENACC_FLAG)
KOKKOS_LDFLAGS += $(KOKKOS_INTERNAL_OPENACC_FLAG)
KOKKOS_LIBS += $(KOKKOS_INTERNAL_OPENACC_LIB)
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_NVIDIA), 1)
ifneq ($(CUDA_PATH),)
ifeq ($(call kokkos_path_exists,$(CUDA_PATH)/lib), 1)
CUDA_PATH := $(CUDA_PATH:/compilers=/cuda)
endif
endif
ifeq ($(KOKKOS_INTERNAL_COMPILER_CLANG), 1)
ifneq ($(CUDA_PATH),)
KOKKOS_LDFLAGS += -L$(CUDA_PATH)/lib64
endif
KOKKOS_LIBS += -lcudart
endif
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 1)
KOKKOS_LIBS += -cuda
endif
ifeq ($(KOKKOS_INTERNAL_OPENACC_FORCE_HOST_AS_DEVICE), 1)
$(error If a GPU architecture is specified, KOKKOS_OPENACC_OPTIONS = force_host_as_device cannot be used. Disable the force_host_as_device option)
endif
else ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AMD), 1)
ifeq ($(KOKKOS_INTERNAL_COMPILER_CLANG), 1)
ifneq ($(ROCM_PATH),)
KOKKOS_CPPFLAGS += -I$(ROCM_PATH)/include
KOKKOS_LDFLAGS += -L$(ROCM_PATH)/lib
endif
KOKKOS_LIBS += -lamdhip64
endif
ifeq ($(KOKKOS_INTERNAL_OPENACC_FORCE_HOST_AS_DEVICE), 1)
$(error If a GPU architecture is specified, KOKKOS_OPENACC_OPTIONS = force_host_as_device cannot be used. Disable the force_host_as_device option)
endif
else ifeq ($(KOKKOS_INTERNAL_OPENACC_FORCE_HOST_AS_DEVICE), 1)
# Compile for kernel execution on the host. In that case,
# memory is shared between the OpenACC space and the host space.
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 1)
KOKKOS_CXXFLAGS += -acc=multicore
endif
else
# Automatic fallback mode; try to offload any available GPU, and fall back
# to the host CPU if no available GPU is found.
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 1)
KOKKOS_CXXFLAGS += -acc=gpu,multicore
endif
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMP), 1)
@ -1484,10 +1609,14 @@ else
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENACC), 1)
ifeq ($(KOKKOS_INTERNAL_COMPILER_NVHPC), 1)
tmp := $(call desul_append_header,"$H""define DESUL_ATOMICS_ENABLE_OPENACC")
else
tmp := $(call desul_append_header,"/* $H""undef DESUL_ATOMICS_ENABLE_OPENACC */")
endif
else
tmp := $(call desul_append_header,"/* $H""undef DESUL_ATOMICS_ENABLE_OPENACC */")
endif
tmp := $(call desul_append_header, "")
tmp := $(call desul_append_header, "$H""endif")
@ -1512,6 +1641,12 @@ $(DESUL_CONFIG_HEADER):
KOKKOS_CPP_DEPENDS := $(DESUL_CONFIG_HEADER) KokkosCore_config.h $(KOKKOS_HEADERS)
# Tasking is deprecated
ifeq ($(KOKKOS_INTERNAL_DISABLE_DEPRECATED_CODE), 1)
TMP_KOKKOS_SRC := $(KOKKOS_SRC)
KOKKOS_SRC = $(patsubst %Task.cpp,, $(TMP_KOKKOS_SRC))
endif
KOKKOS_OBJ = $(KOKKOS_SRC:.cpp=.o)
KOKKOS_OBJ_LINK = $(notdir $(KOKKOS_OBJ))

View File

@ -16,8 +16,6 @@ Kokkos_HostSpace.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_Ho
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_HostSpace.cpp
Kokkos_hwloc.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_hwloc.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_hwloc.cpp
Kokkos_TaskQueue.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_TaskQueue.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_TaskQueue.cpp
Kokkos_HostThreadTeam.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_HostThreadTeam.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_HostThreadTeam.cpp
Kokkos_HostBarrier.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_HostBarrier.cpp
@ -38,17 +36,21 @@ Kokkos_Abort.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_Abort.
ifeq ($(KOKKOS_INTERNAL_USE_SERIAL), 1)
Kokkos_Serial.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/Serial/Kokkos_Serial.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/Serial/Kokkos_Serial.cpp
ifneq ($(KOKKOS_INTERNAL_DISABLE_DEPRECATED_CODE), 1)
Kokkos_Serial_Task.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/Serial/Kokkos_Serial_Task.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/Serial/Kokkos_Serial_Task.cpp
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1)
Kokkos_Cuda_Instance.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/Cuda/Kokkos_Cuda_Instance.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/Cuda/Kokkos_Cuda_Instance.cpp
Kokkos_CudaSpace.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/Cuda/Kokkos_CudaSpace.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/Cuda/Kokkos_CudaSpace.cpp
ifneq ($(KOKKOS_INTERNAL_DISABLE_DEPRECATED_CODE), 1)
Kokkos_Cuda_Task.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/Cuda/Kokkos_Cuda_Task.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/Cuda/Kokkos_Cuda_Task.cpp
endif
Lock_Array_CUDA.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/tpls/desul/src/Lock_Array_CUDA.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/tpls/desul/src/Lock_Array_CUDA.cpp
endif
@ -73,6 +75,8 @@ Kokkos_HIP_Space.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_Space.cpp
Kokkos_HIP_Instance.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_Instance.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_Instance.cpp
Kokkos_HIP_ZeroMemset.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_ZeroMemset.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_ZeroMemset.cpp
Lock_Array_HIP.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/tpls/desul/src/Lock_Array_HIP.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/tpls/desul/src/Lock_Array_HIP.cpp
endif
@ -89,26 +93,26 @@ Kokkos_OpenMP.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/OpenMP/Kokkos_Ope
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/OpenMP/Kokkos_OpenMP.cpp
Kokkos_OpenMP_Instance.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/OpenMP/Kokkos_OpenMP_Instance.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/OpenMP/Kokkos_OpenMP_Instance.cpp
ifneq ($(KOKKOS_INTERNAL_DISABLE_DEPRECATED_CODE), 1)
Kokkos_OpenMP_Task.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/OpenMP/Kokkos_OpenMP_Task.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/OpenMP/Kokkos_OpenMP_Task.cpp
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_HPX), 1)
Kokkos_HPX.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HPX/Kokkos_HPX.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HPX/Kokkos_HPX.cpp
ifneq ($(KOKKOS_INTERNAL_DISABLE_DEPRECATED_CODE), 1)
Kokkos_HPX_Task.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HPX/Kokkos_HPX_Task.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HPX/Kokkos_HPX_Task.cpp
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMPTARGET), 1)
Kokkos_OpenMPTarget_Exec.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.cpp
Kokkos_OpenMPTarget_Instance.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.cpp
Kokkos_OpenMPTargetSpace.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/OpenMPTarget/Kokkos_OpenMPTargetSpace.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/OpenMPTarget/Kokkos_OpenMPTargetSpace.cpp
Kokkos_OpenMPTarget_Task.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.cpp
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENACC), 1)

View File

@ -30,12 +30,12 @@ To start learning about Kokkos:
The latest release of Kokkos can be obtained from the [GitHub releases page](https://github.com/kokkos/kokkos/releases/latest).
The current release is [4.3.01](https://github.com/kokkos/kokkos/releases/tag/4.3.01).
The current release is [4.5.01](https://github.com/kokkos/kokkos/releases/tag/4.5.01).
```bash
curl -OJ -L https://github.com/kokkos/kokkos/archive/refs/tags/4.3.01.tar.gz
curl -OJ -L https://github.com/kokkos/kokkos/releases/download/4.5.01/kokkos-4.5.01.tar.gz
# Or with wget
wget https://github.com/kokkos/kokkos/archive/refs/tags/4.3.01.tar.gz
wget https://github.com/kokkos/kokkos/releases/download/4.5.01/kokkos-4.5.01.tar.gz
```
To clone the latest development version of Kokkos from GitHub:

View File

@ -1,7 +1,7 @@
IF (NOT Kokkos_INSTALL_TESTING)
ADD_SUBDIRECTORY(src)
ENDIF()
if(NOT Kokkos_INSTALL_TESTING)
add_subdirectory(src)
endif()
# FIXME_OPENACC: temporarily disabled due to unimplemented features
IF(NOT ((KOKKOS_ENABLE_OPENMPTARGET AND KOKKOS_CXX_COMPILER_ID STREQUAL NVHPC) OR KOKKOS_ENABLE_OPENACC))
KOKKOS_ADD_TEST_DIRECTORIES(unit_tests)
ENDIF()
if(NOT ((KOKKOS_ENABLE_OPENMPTARGET AND KOKKOS_CXX_COMPILER_ID STREQUAL NVHPC) OR KOKKOS_ENABLE_OPENACC))
kokkos_add_test_directories(unit_tests)
endif()

View File

@ -1,34 +1,29 @@
#I have to leave these here for tribits
KOKKOS_INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR})
KOKKOS_INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
kokkos_include_directories(${CMAKE_CURRENT_BINARY_DIR})
kokkos_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
#-----------------------------------------------------------------------------
FILE(GLOB ALGO_HEADERS *.hpp)
FILE(GLOB ALGO_SOURCES *.cpp)
APPEND_GLOB(ALGO_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/std_algorithms/*.hpp)
APPEND_GLOB(ALGO_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/std_algorithms/impl/*.hpp)
file(GLOB ALGO_HEADERS *.hpp)
file(GLOB ALGO_SOURCES *.cpp)
append_glob(ALGO_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/std_algorithms/*.hpp)
append_glob(ALGO_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/std_algorithms/impl/*.hpp)
INSTALL (
install(
DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/"
DESTINATION ${KOKKOS_HEADER_DIR}
FILES_MATCHING PATTERN "*.hpp"
FILES_MATCHING
PATTERN "*.hpp"
)
#-----------------------------------------------------------------------------
# We have to pass the sources in here for Tribits
# These will get ignored for standalone CMake and a true interface library made
KOKKOS_ADD_INTERFACE_LIBRARY(
kokkosalgorithms
NOINSTALLHEADERS ${ALGO_HEADERS}
SOURCES ${ALGO_SOURCES}
)
KOKKOS_LIB_INCLUDE_DIRECTORIES(kokkosalgorithms
${KOKKOS_TOP_BUILD_DIR}
${CMAKE_CURRENT_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}
kokkos_add_interface_library(kokkosalgorithms NOINSTALLHEADERS ${ALGO_HEADERS} SOURCES ${ALGO_SOURCES})
kokkos_lib_include_directories(
kokkosalgorithms ${KOKKOS_TOP_BUILD_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
)
KOKKOS_LINK_TPL(kokkoscontainers PUBLIC ROCTHRUST)
KOKKOS_LINK_TPL(kokkoscore PUBLIC ONEDPL)
kokkos_link_tpl(kokkoscontainers PUBLIC ROCTHRUST)
kokkos_link_tpl(kokkoscore PUBLIC ONEDPL)

View File

@ -615,7 +615,7 @@ template <class DeviceType>
struct Random_UniqueIndex {
using locks_view_type = View<int**, DeviceType>;
KOKKOS_FUNCTION
static int get_state_idx(const locks_view_type) {
static int get_state_idx(const locks_view_type&) {
KOKKOS_IF_ON_HOST(
(return DeviceType::execution_space::impl_hardware_thread_id();))
@ -665,10 +665,9 @@ struct Random_UniqueIndex<
#ifdef KOKKOS_ENABLE_SYCL
template <class MemorySpace>
struct Random_UniqueIndex<
Kokkos::Device<Kokkos::Experimental::SYCL, MemorySpace>> {
struct Random_UniqueIndex<Kokkos::Device<Kokkos::SYCL, MemorySpace>> {
using locks_view_type =
View<int**, Kokkos::Device<Kokkos::Experimental::SYCL, MemorySpace>>;
View<int**, Kokkos::Device<Kokkos::SYCL, MemorySpace>>;
KOKKOS_FUNCTION
static int get_state_idx(const locks_view_type& locks_) {
auto item = sycl::ext::oneapi::experimental::this_nd_item<3>();

View File

@ -35,11 +35,11 @@ struct BinOp1D {
#endif
// Construct BinOp with number of bins, minimum value and maximum value
BinOp1D(int max_bins__, typename KeyViewType::const_value_type min,
BinOp1D(int max_bins, typename KeyViewType::const_value_type min,
typename KeyViewType::const_value_type max)
: max_bins_(max_bins__ + 1),
: max_bins_(max_bins + 1),
// Cast to double to avoid possible overflow when using integer
mul_(static_cast<double>(max_bins__) /
mul_(static_cast<double>(max_bins) /
(static_cast<double>(max) - static_cast<double>(min))),
min_(static_cast<double>(min)) {
// For integral types the number of bins may be larger than the range
@ -47,7 +47,7 @@ struct BinOp1D {
// and then don't need to sort bins.
if (std::is_integral<typename KeyViewType::const_value_type>::value &&
(static_cast<double>(max) - static_cast<double>(min)) <=
static_cast<double>(max_bins__)) {
static_cast<double>(max_bins)) {
mul_ = 1.;
}
}
@ -82,16 +82,16 @@ struct BinOp3D {
BinOp3D() = delete;
#endif
BinOp3D(int max_bins__[], typename KeyViewType::const_value_type min[],
BinOp3D(int max_bins[], typename KeyViewType::const_value_type min[],
typename KeyViewType::const_value_type max[]) {
max_bins_[0] = max_bins__[0];
max_bins_[1] = max_bins__[1];
max_bins_[2] = max_bins__[2];
mul_[0] = static_cast<double>(max_bins__[0]) /
max_bins_[0] = max_bins[0];
max_bins_[1] = max_bins[1];
max_bins_[2] = max_bins[2];
mul_[0] = static_cast<double>(max_bins[0]) /
(static_cast<double>(max[0]) - static_cast<double>(min[0]));
mul_[1] = static_cast<double>(max_bins__[1]) /
mul_[1] = static_cast<double>(max_bins[1]) /
(static_cast<double>(max[1]) - static_cast<double>(min[1]));
mul_[2] = static_cast<double>(max_bins__[2]) /
mul_[2] = static_cast<double>(max_bins[2]) /
(static_cast<double>(max[2]) - static_cast<double>(min[2]));
min_[0] = static_cast<double>(min[0]);
min_[1] = static_cast<double>(min[1]);

View File

@ -388,7 +388,8 @@ class BinSort {
// reasonable experimentally.
if (use_std_sort && bin_size > 10) {
KOKKOS_IF_ON_HOST(
(std::sort(&sort_order(lower_bound), &sort_order(upper_bound),
(std::sort(sort_order.data() + lower_bound,
sort_order.data() + upper_bound,
[this](int p, int q) { return bin_op(keys_rnd, p, q); });))
} else {
for (int k = lower_bound + 1; k < upper_bound; ++k) {

Some files were not shown because too many files have changed in this diff Show More