Merge branch 'collected-small-fixes' into lammps-gui

# Conflicts:
#	cmake/CMakeLists.txt
This commit is contained in:
Axel Kohlmeyer
2023-08-01 03:07:56 -04:00
17 changed files with 170 additions and 126 deletions

View File

@ -1,6 +1,6 @@
# Contributing to LAMMPS via GitHub # Contributing to LAMMPS via GitHub
Thank your for considering to contribute to the LAMMPS software project. Thank you for considering to contribute to the LAMMPS software project.
The following is a set of guidelines as well as explanations of policies and work flows for contributing to the LAMMPS molecular dynamics software project. These guidelines focus on submitting issues or pull requests on the LAMMPS GitHub project. The following is a set of guidelines as well as explanations of policies and work flows for contributing to the LAMMPS molecular dynamics software project. These guidelines focus on submitting issues or pull requests on the LAMMPS GitHub project.

View File

@ -796,9 +796,11 @@ include(Tools)
include(Documentation) include(Documentation)
############################################################################### ###############################################################################
# Install potential and force field files in data directory # Install bench, potential and force field files in data directory
############################################################################### ###############################################################################
set(LAMMPS_INSTALL_DATADIR ${CMAKE_INSTALL_DATADIR}/lammps) set(LAMMPS_INSTALL_DATADIR ${CMAKE_INSTALL_DATADIR}/lammps)
install(DIRECTORY ${LAMMPS_DIR}/bench DESTINATION ${LAMMPS_INSTALL_DATADIR})
install(DIRECTORY ${LAMMPS_POTENTIALS_DIR} DESTINATION ${LAMMPS_INSTALL_DATADIR}) install(DIRECTORY ${LAMMPS_POTENTIALS_DIR} DESTINATION ${LAMMPS_INSTALL_DATADIR})
if(BUILD_TOOLS) if(BUILD_TOOLS)
install(DIRECTORY ${LAMMPS_TOOLS_DIR}/msi2lmp/frc_files DESTINATION ${LAMMPS_INSTALL_DATADIR}) install(DIRECTORY ${LAMMPS_TOOLS_DIR}/msi2lmp/frc_files DESTINATION ${LAMMPS_INSTALL_DATADIR})
@ -866,22 +868,6 @@ if(ClangFormat_FOUND)
WORKING_DIRECTORY ${LAMMPS_SOURCE_DIR}) WORKING_DIRECTORY ${LAMMPS_SOURCE_DIR})
endif() endif()
# Packaging
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "LAMMPS")
set(CPACK_PACKAGE_VENDOR "The LAMMPS Developers")
set(CPACK_PACKAGE_NAME "LAMMPS")
set(CPACK_PACKAGE_VERSION "${PROJECT_VERSION}")
if(CPACK_GENERATOR STREQUAL "DragNDrop")
set(CPACK_DMG_VOLUME_NAME "LAMMPS-macOS-universal")
set(CPACK_DMG_BACKGROUND_IMAGE "${LAMMPS_DIR}/cmake/packaging/LAMMPS_DMG_BACKGROUND")
set(CPACK_DS_STORE_SETUP_SCRIPT "${LAMMPS_DIR}/cmake/packaging/LAMMPS_DMG_Setup.scpt")
set(CPACK_COMPONENTS_GROUPING ALL_COMPONENTS_IN_ONE)
install(FILES ${LAMMPS_DIR/cmake/packaging/README.macos DESTINATION . RENAME README.txt)
endif()
include(CPack)
# extract Kokkos compilation settings # extract Kokkos compilation settings
get_cmake_property(_allvars VARIABLES) get_cmake_property(_allvars VARIABLES)
foreach(_var ${_allvars}) foreach(_var ${_allvars})

View File

@ -64,6 +64,8 @@ if(GPU_API STREQUAL "CUDA")
endif() endif()
set(GPU_CUDA_MPS_FLAGS "-DCUDA_MPS_SUPPORT") set(GPU_CUDA_MPS_FLAGS "-DCUDA_MPS_SUPPORT")
endif() endif()
option(CUDA_BUILD_MULTIARCH "Enable building CUDA kernels for all supported GPU architectures" ON)
mark_as_advanced(GPU_BUILD_MULTIARCH)
set(GPU_ARCH "sm_50" CACHE STRING "LAMMPS GPU CUDA SM primary architecture (e.g. sm_60)") set(GPU_ARCH "sm_50" CACHE STRING "LAMMPS GPU CUDA SM primary architecture (e.g. sm_60)")
@ -93,6 +95,7 @@ if(GPU_API STREQUAL "CUDA")
# --arch translates directly instead of JIT, so this should be for the preferred or most common architecture # --arch translates directly instead of JIT, so this should be for the preferred or most common architecture
set(GPU_CUDA_GENCODE "-arch=${GPU_ARCH}") set(GPU_CUDA_GENCODE "-arch=${GPU_ARCH}")
if(CUDA_BUILD_MULTIARCH)
# apply the following to build "fat" CUDA binaries only for known CUDA toolkits since version 8.0 # apply the following to build "fat" CUDA binaries only for known CUDA toolkits since version 8.0
# only the Kepler achitecture and beyond is supported # only the Kepler achitecture and beyond is supported
# comparison chart according to: https://en.wikipedia.org/wiki/CUDA#GPUs_supported # comparison chart according to: https://en.wikipedia.org/wiki/CUDA#GPUs_supported
@ -145,6 +148,7 @@ if(GPU_API STREQUAL "CUDA")
string(APPEND GPU_CUDA_GENCODE " -gencode arch=compute_90,code=[sm_90,compute_90]") string(APPEND GPU_CUDA_GENCODE " -gencode arch=compute_90,code=[sm_90,compute_90]")
endif() endif()
endif() endif()
endif()
cuda_compile_fatbin(GPU_GEN_OBJS ${GPU_LIB_CU} OPTIONS ${CUDA_REQUEST_PIC} cuda_compile_fatbin(GPU_GEN_OBJS ${GPU_LIB_CU} OPTIONS ${CUDA_REQUEST_PIC}
-DUNIX -O3 --use_fast_math -Wno-deprecated-gpu-targets -DNV_KERNEL -DUCL_CUDADR ${GPU_CUDA_GENCODE} -D_${GPU_PREC_SETTING} -DLAMMPS_${LAMMPS_SIZES}) -DUNIX -O3 --use_fast_math -Wno-deprecated-gpu-targets -DNV_KERNEL -DUCL_CUDADR ${GPU_CUDA_GENCODE} -D_${GPU_PREC_SETTING} -DLAMMPS_${LAMMPS_SIZES})

View File

@ -37,7 +37,11 @@ if(BUILD_TOOLS)
add_subdirectory(${LAMMPS_TOOLS_DIR}/phonon ${CMAKE_BINARY_DIR}/phana_build) add_subdirectory(${LAMMPS_TOOLS_DIR}/phonon ${CMAKE_BINARY_DIR}/phana_build)
endif() endif()
find_package(PkgConfig QUIET)
if(BUILD_LAMMPS_SHELL) if(BUILD_LAMMPS_SHELL)
if(NOT PkgConfig_FOUND)
message(FATAL_ERROR "Must have pkg-config installed for building LAMMPS shell")
endif()
find_package(PkgConfig REQUIRED) find_package(PkgConfig REQUIRED)
pkg_check_modules(READLINE IMPORTED_TARGET REQUIRED readline) pkg_check_modules(READLINE IMPORTED_TARGET REQUIRED readline)
if(NOT LAMMPS_EXCEPTIONS) if(NOT LAMMPS_EXCEPTIONS)

View File

@ -0,0 +1,14 @@
# preset that will build portable multi-arch binaries on macOS without MPI and OpenMP
set(CMAKE_OSX_ARCHITECTURES "arm64;x86_64" CACHE STRING "" FORCE)
set(CMAKE_OSX_DEPLOYMENT_TARGET 11.0 CACHE STRING "" FORCE)
set(CMAKE_BUILD_TYPE Release CACHE STRING "" FORCE)
set(CMAKE_CXX_COMPILER "clang++" CACHE STRING "" FORCE)
set(CMAKE_C_COMPILER "clang" CACHE STRING "" FORCE)
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG" CACHE STRING "" FORCE)
set(CMAKE_C_FLAGS_RELEASE "-O3 -DNDEBUG" CACHE STRING "" FORCE)
set(BUILD_MPI FALSE CACHE BOOL "" FORCE)
set(BUILD_OMP FALSE CACHE BOOL "" FORCE)
set(BUILD_SHARED_LIBS FALSE CACHE BOOL "" FORCE)

View File

@ -140,6 +140,8 @@ CMake build
# value = yes or no (default) # value = yes or no (default)
-D CUDA_MPS_SUPPORT=value # enables some tweaks required to run with active nvidia-cuda-mps daemon -D CUDA_MPS_SUPPORT=value # enables some tweaks required to run with active nvidia-cuda-mps daemon
# value = yes or no (default) # value = yes or no (default)
-D CUDA_BUILD_MULTIARCH=value # enables building CUDA kernels for all supported GPU architectures
# value = yes (default) or no
-D USE_STATIC_OPENCL_LOADER=value # downloads/includes OpenCL ICD loader library, no local OpenCL headers/libs needed -D USE_STATIC_OPENCL_LOADER=value # downloads/includes OpenCL ICD loader library, no local OpenCL headers/libs needed
# value = yes (default) or no # value = yes (default) or no
@ -158,41 +160,49 @@ CMake build
A more detailed list can be found, for example, A more detailed list can be found, for example,
at `Wikipedia's CUDA article <https://en.wikipedia.org/wiki/CUDA#GPUs_supported>`_ at `Wikipedia's CUDA article <https://en.wikipedia.org/wiki/CUDA#GPUs_supported>`_
CMake can detect which version of the CUDA toolkit is used and thus will try CMake can detect which version of the CUDA toolkit is used and thus will
to include support for **all** major GPU architectures supported by this toolkit. try to include support for **all** major GPU architectures supported by
Thus the GPU_ARCH setting is merely an optimization, to have code for this toolkit. Thus the GPU_ARCH setting is merely an optimization, to
the preferred GPU architecture directly included rather than having to wait have code for the preferred GPU architecture directly included rather
for the JIT compiler of the CUDA driver to translate it. than having to wait for the JIT compiler of the CUDA driver to translate
it. This behavior can be turned off (e.g. to speed up compilation) by
setting :code:`CUDA_ENABLE_MULTIARCH` to :code:`no`.
When compiling for CUDA or HIP with CUDA, version 8.0 or later of the CUDA toolkit When compiling for CUDA or HIP with CUDA, version 8.0 or later of the
is required and a GPU architecture of Kepler or later, which must *also* be CUDA toolkit is required and a GPU architecture of Kepler or later,
supported by the CUDA toolkit in use **and** the CUDA driver in use. which must *also* be supported by the CUDA toolkit in use **and** the
When compiling for OpenCL, OpenCL version 1.2 or later is required and the CUDA driver in use. When compiling for OpenCL, OpenCL version 1.2 or
GPU must be supported by the GPU driver and OpenCL runtime bundled with the driver. later is required and the GPU must be supported by the GPU driver and
OpenCL runtime bundled with the driver.
When building with CMake, you **must NOT** build the GPU library in ``lib/gpu`` When building with CMake, you **must NOT** build the GPU library in
using the traditional build procedure. CMake will detect files generated by that ``lib/gpu`` using the traditional build procedure. CMake will detect
process and will terminate with an error and a suggestion for how to remove them. files generated by that process and will terminate with an error and a
suggestion for how to remove them.
If you are compiling for OpenCL, the default setting is to download, build, and If you are compiling for OpenCL, the default setting is to download,
link with a static OpenCL ICD loader library and standard OpenCL headers. This build, and link with a static OpenCL ICD loader library and standard
way no local OpenCL development headers or library needs to be present and only OpenCL headers. This way no local OpenCL development headers or library
OpenCL compatible drivers need to be installed to use OpenCL. If this is not needs to be present and only OpenCL compatible drivers need to be
desired, you can set :code:`USE_STATIC_OPENCL_LOADER` to :code:`no`. installed to use OpenCL. If this is not desired, you can set
:code:`USE_STATIC_OPENCL_LOADER` to :code:`no`.
The GPU library has some multi-thread support using OpenMP. If LAMMPS is built The GPU library has some multi-thread support using OpenMP. If LAMMPS
with ``-D BUILD_OMP=on`` this will also be enabled. is built with ``-D BUILD_OMP=on`` this will also be enabled.
If you are compiling with HIP, note that before running CMake you will have to If you are compiling with HIP, note that before running CMake you will
set appropriate environment variables. Some variables such as have to set appropriate environment variables. Some variables such as
:code:`HCC_AMDGPU_TARGET` (for ROCm <= 4.0) or :code:`CUDA_PATH` are necessary for :code:`hipcc` :code:`HCC_AMDGPU_TARGET` (for ROCm <= 4.0) or :code:`CUDA_PATH` are
and the linker to work correctly. necessary for :code:`hipcc` and the linker to work correctly.
Using CHIP-SPV implementation of HIP is now supported. It allows one to run HIP .. versionadded:: 3Aug2022
code on Intel GPUs via the OpenCL or Level Zero backends. To use CHIP-SPV, you must
set :code:`-DHIP_USE_DEVICE_SORT=OFF` in your CMake command line as CHIP-SPV does not Using the CHIP-SPV implementation of HIP is supported. It allows one to
yet support hipCUB. The use of HIP for Intel GPUs is still experimental so you run HIP code on Intel GPUs via the OpenCL or Level Zero backends. To use
should only use this option in preparations to run on Aurora system at ANL. CHIP-SPV, you must set :code:`-DHIP_USE_DEVICE_SORT=OFF` in your CMake
command line as CHIP-SPV does not yet support hipCUB. As of Summer 2022,
the use of HIP for Intel GPUs is experimental. You should only use this
option in preparations to run on Aurora system at Argonne.
.. code:: bash .. code:: bash

View File

@ -28,20 +28,34 @@ Include files (varied)
packages and hard-to-find bugs have regularly manifested in the packages and hard-to-find bugs have regularly manifested in the
past. past.
- Header files, especially those defining a "style", should only use - Header files, especially those defining a "style", should only use the
the absolute minimum number of include files and **must not** absolute minimum number of include files and **must not** contain any
contain any ``using`` statements. Typically, that would only be the ``using`` statements. Typically, that would only be the header for the
header for the base class. Instead, any include statements should base class. Instead, any include statements should be put in the
be put in the corresponding implementation files and forward corresponding implementation files and forward declarations be used.
declarations be used. For implementation files, the "include what For implementation files, the "include what you use" principle should
you use" principle should be employed. However, there is the be employed. However, there is the notable exception that when the
notable exception that when the ``pointers.h`` header is included ``pointers.h`` header is included (or the header of one of the classes
(or one of the base classes derived from it) certain headers will derived from it), certain headers will *always* be included and thus
always be included and thus do not need to be explicitly specified. do not need to be explicitly specified. These are: `mpi.h`,
These are: `mpi.h`, `cstddef`, `cstdio`, `cstdlib`, `string`, `cstddef`, `cstdio`, `cstdlib`, `string`, `utils.h`, `vector`,
`utils.h`, `vector`, `fmt/format.h`, `climits`, `cinttypes`. This `fmt/format.h`, `climits`, `cinttypes`. This also means any such file
also means any such file can assume that `FILE`, `NULL`, and can assume that `FILE`, `NULL`, and `INT_MAX` are defined.
`INT_MAX` are defined.
- Class members variables should not be initialized in the header file,
but instead should be initialized either in the initializer list of
the constructor or explicitly assigned in the body of the constructor.
If the member variable is relevant to the functionality of a class
(for example when it stores a value from a command line argument), the
member variable declaration is followed by a brief comment explaining
its purpose and what its values can be. Class members that are
pointers should always be initialized to ``nullptr`` in the
initializer list of the constructor. This reduces clutter in the
header and avoids accessing uninitialized pointers, which leads to
hard to debug issues, class members are often implicitly initialized
to ``NULL`` on the first use (but *not* after a :doc:`clear command
<clear>`). Please see the files ``reset_atoms_mol.h`` and
``reset_atoms_mol.cpp`` as an example.
- System headers or headers from installed libraries are included with - System headers or headers from installed libraries are included with
angular brackets (example: ``#include <vector>``), while local angular brackets (example: ``#include <vector>``), while local

View File

@ -1,4 +1,4 @@
Sphinx >= 5.3.0, <7.1.0 Sphinx >= 5.3.0, <7.2.0
sphinxcontrib-spelling sphinxcontrib-spelling
sphinxcontrib-jquery sphinxcontrib-jquery
git+https://github.com/akohlmey/sphinx-fortran@parallel-read git+https://github.com/akohlmey/sphinx-fortran@parallel-read

View File

@ -1528,6 +1528,7 @@ inhomogeneous
init init
initialdelay initialdelay
initialisms initialisms
initializer
initializations initializations
InitiatorIDs InitiatorIDs
initio initio

2
src/.gitignore vendored
View File

@ -60,6 +60,8 @@
/pair_lepton.h /pair_lepton.h
/pair_lepton_coul.cpp /pair_lepton_coul.cpp
/pair_lepton_coul.h /pair_lepton_coul.h
/pair_lepton_sphere.cpp
/pair_lepton_sphere.h
/bond_lepton.cpp /bond_lepton.cpp
/bond_lepton.h /bond_lepton.h
/angle_lepton.cpp /angle_lepton.cpp

View File

@ -417,7 +417,7 @@ void FixElectrodeConp::post_constructor()
input->variable->set(fmt::format("{} equal f_{}[{}]", var_vtop, fixname, 1 + top_group)); input->variable->set(fmt::format("{} equal f_{}[{}]", var_vtop, fixname, 1 + top_group));
input->variable->set(fmt::format("{} equal (v_{}-v_{})/lz", var_efield, var_vbot, var_vtop)); input->variable->set(fmt::format("{} equal (v_{}-v_{})/lz", var_efield, var_vbot, var_vtop));
// check for other efields and warn if found // check for other efields and warn if found
if (modify->get_fix_by_style("efield").size() > 0 && comm->me == 0) if (modify->get_fix_by_style("^efield").size() > 0 && comm->me == 0)
error->warning(FLERR, "Other efield fixes found -- please make sure this is intended!"); error->warning(FLERR, "Other efield fixes found -- please make sure this is intended!");
// call fix command: // call fix command:
// fix [varstem]_efield all efield 0.0 0.0 [var_vdiff]/lz // fix [varstem]_efield all efield 0.0 0.0 [var_vdiff]/lz

View File

@ -749,7 +749,7 @@ struct intr_types<float,double> {
#include <cassert> #include <cassert>
#include <immintrin.h> #include <immintrin.h>
#include <stdint.h> // <cstdint> requires C++-11 #include <cstdint>
#define VEC_INLINE __attribute__((always_inline)) #define VEC_INLINE __attribute__((always_inline))

View File

@ -44,7 +44,6 @@
#include <cinttypes> // IWYU pragma: export #include <cinttypes> // IWYU pragma: export
#include <climits> // IWYU pragma: export #include <climits> // IWYU pragma: export
#include <cstdint> // IWYU pragma: export
#include <cstdlib> // IWYU pragma: export #include <cstdlib> // IWYU pragma: export
// grrr - IBM Power6 does not provide this def in their system header files // grrr - IBM Power6 does not provide this def in their system header files

View File

@ -1228,13 +1228,13 @@ int Thermo::evaluate_keyword(const std::string &word, double *answer)
} else if (word == "elapsed") { } else if (word == "elapsed") {
if (update->whichflag == 0) if (update->whichflag == 0)
error->all(FLERR, "This variable thermo keyword cannot be used between runs"); error->all(FLERR, "This variable thermo keyword elapsed cannot be used between runs");
compute_elapsed(); compute_elapsed();
dvalue = bivalue; dvalue = bivalue;
} else if (word == "elaplong") { } else if (word == "elaplong") {
if (update->whichflag == 0) if (update->whichflag == 0)
error->all(FLERR, "This variable thermo keyword cannot be used between runs"); error->all(FLERR, "This variable thermo keyword elaplong cannot be used between runs");
compute_elapsed_long(); compute_elapsed_long();
dvalue = bivalue; dvalue = bivalue;
@ -1246,22 +1246,22 @@ int Thermo::evaluate_keyword(const std::string &word, double *answer)
} else if (word == "cpu") { } else if (word == "cpu") {
if (update->whichflag == 0) if (update->whichflag == 0)
error->all(FLERR, "This variable thermo keyword cannot be used between runs"); error->all(FLERR, "This variable thermo keyword cpu cannot be used between runs");
compute_cpu(); compute_cpu();
} else if (word == "tpcpu") { } else if (word == "tpcpu") {
if (update->whichflag == 0) if (update->whichflag == 0)
error->all(FLERR, "This variable thermo keyword cannot be used between runs"); error->all(FLERR, "This variable thermo keyword tpcpu cannot be used between runs");
compute_tpcpu(); compute_tpcpu();
} else if (word == "spcpu") { } else if (word == "spcpu") {
if (update->whichflag == 0) if (update->whichflag == 0)
error->all(FLERR, "This variable thermo keyword cannot be used between runs"); error->all(FLERR, "This variable thermo keyword spcpu cannot be used between runs");
compute_spcpu(); compute_spcpu();
} else if (word == "cpuremain") { } else if (word == "cpuremain") {
if (update->whichflag == 0) if (update->whichflag == 0)
error->all(FLERR, "This variable thermo keyword cannot be used between runs"); error->all(FLERR, "This variable thermo keyword cpuremain cannot be used between runs");
compute_cpuremain(); compute_cpuremain();
} else if (word == "part") { } else if (word == "part") {
@ -1350,7 +1350,7 @@ int Thermo::evaluate_keyword(const std::string &word, double *answer)
} else if (word == "etail") { } else if (word == "etail") {
if (update->eflag_global != update->ntimestep) if (update->eflag_global != update->ntimestep)
error->all(FLERR, "Energy was not tallied on needed timestep"); error->all(FLERR, "Energy was not tallied on needed timestep for thermo keyword etail");
compute_etail(); compute_etail();
} else if (word == "enthalpy") { } else if (word == "enthalpy") {

View File

@ -17,19 +17,19 @@ syn keyword lammpsLattice delete_atoms displace_atoms change_box dimension
syn keyword lammpsParticle pair_coeff pair_style pair_modify pair_write mass velocity angle_coeff angle_style angle_write syn keyword lammpsParticle pair_coeff pair_style pair_modify pair_write mass velocity angle_coeff angle_style angle_write
syn keyword lammpsParticle atom_modify atom_style bond_coeff bond_style bond_write create_bonds delete_bonds kspace_style syn keyword lammpsParticle atom_modify atom_style bond_coeff bond_style bond_write create_bonds delete_bonds kspace_style
syn keyword lammpsParticle kspace_modify dihedral_style dihedral_coeff dihedral_write improper_style improper_coeff labelmap syn keyword lammpsParticle kspace_modify dihedral_style dihedral_coeff dihedral_write improper_style improper_coeff labelmap
syn keyword lammpsSetup min_style min_modify fix_modify run_style timestep neighbor neigh_modify fix unfix suffix special_bonds syn keyword lammpsSetup min_style min_modify fix_modify run_style timestep neighbor neigh_modify fix unfix suffix special_bonds dump_modify
syn keyword lammpsSetup balance box clear comm_modify comm_style newton package processors reset_atoms reset_ids reset_timestep syn keyword lammpsSetup balance box clear comm_modify comm_style newton package processors reset_atoms reset_ids reset_timestep
syn keyword lammpsRun minimize minimize/kk run rerun tad neb neb/spin prd quit server temper/npt temper/grem temper syn keyword lammpsRun minimize minimize/kk run rerun tad neb neb/spin prd quit server temper/npt temper/grem temper
syn keyword lammpsRun message hyper dynamical_matrix dynamical_matrix/kk third_order third_order/kk fitpod syn keyword lammpsRun message hyper dynamical_matrix dynamical_matrix/kk third_order third_order/kk fitpod
syn keyword lammpsDefine variable group compute python set uncompute kim_query kim group2ndx ndx2group mdi syn keyword lammpsDefine variable group compute python set uncompute kim_query kim group2ndx ndx2group mdi
syn keyword lammpsRepeat jump next loop syn keyword lammpsRepeat jump next loop label
syn keyword lammpsOperator equal add sub mult div syn keyword lammpsOperator equal add sub mult div
syn keyword lammpsConditional if then elif else syn keyword lammpsConditional if then elif else
syn keyword lammpsSpecial EDGE NULL & syn keyword lammpsSpecial EDGE NULL INF &
syn region lammpsString start=+'+ end=+'+ oneline syn region lammpsString start=+'+ end=+'+ oneline
syn region lammpsString start=+"+ end=+"+ oneline syn region lammpsString start=+"+ end=+"+ oneline

View File

@ -199,6 +199,9 @@ add_executable(test_fix_timestep test_fix_timestep.cpp)
if(NOT BUILD_SHARED_LIBS) if(NOT BUILD_SHARED_LIBS)
target_compile_definitions(test_fix_timestep PRIVATE USING_STATIC_LIBS=1) target_compile_definitions(test_fix_timestep PRIVATE USING_STATIC_LIBS=1)
endif() endif()
if(FFT_SINGLE)
target_compile_definitions(test_fix_timestep PRIVATE -DFFT_SINGLE)
endif()
target_link_libraries(test_fix_timestep PRIVATE lammps style_tests) target_link_libraries(test_fix_timestep PRIVATE lammps style_tests)
# tests for timestep related fixes (time integration, thermostat, force manipulation, constraints/restraints) # tests for timestep related fixes (time integration, thermostat, force manipulation, constraints/restraints)

View File

@ -279,6 +279,10 @@ TEST(FixTimestep, plain)
ASSERT_EQ(lmp->atom->natoms, nlocal); ASSERT_EQ(lmp->atom->natoms, nlocal);
double epsilon = test_config.epsilon; double epsilon = test_config.epsilon;
// relax test precision when using pppm and single precision FFTs
#if defined(FFT_SINGLE)
if (lmp->force->kspace && utils::strmatch(lmp->force->kspace_style, "^pppm")) epsilon *= 2.0e8;
#endif
ErrorStats stats; ErrorStats stats;
@ -411,7 +415,6 @@ TEST(FixTimestep, plain)
ifix = lmp->modify->find_fix("test"); ifix = lmp->modify->find_fix("test");
if (!utils::strmatch(lmp->modify->fix[ifix]->style, "^rigid") && if (!utils::strmatch(lmp->modify->fix[ifix]->style, "^rigid") &&
!utils::strmatch(lmp->modify->fix[ifix]->style, "^nve/limit")) { !utils::strmatch(lmp->modify->fix[ifix]->style, "^nve/limit")) {
if (!verbose) ::testing::internal::CaptureStdout(); if (!verbose) ::testing::internal::CaptureStdout();
cleanup_lammps(lmp, test_config); cleanup_lammps(lmp, test_config);
if (!verbose) ::testing::internal::GetCapturedStdout(); if (!verbose) ::testing::internal::GetCapturedStdout();
@ -579,6 +582,10 @@ TEST(FixTimestep, omp)
ASSERT_EQ(lmp->atom->natoms, nlocal); ASSERT_EQ(lmp->atom->natoms, nlocal);
double epsilon = test_config.epsilon; double epsilon = test_config.epsilon;
// relax test precision when using pppm and single precision FFTs
#if defined(FFT_SINGLE)
if (lmp->force->kspace && utils::strmatch(lmp->force->kspace_style, "^pppm")) epsilon *= 2.0e8;
#endif
ErrorStats stats; ErrorStats stats;