Merge branch 'master' of github.com:lammps/lammps into gran_mods

This commit is contained in:
Dan S. Bolintineanu
2019-04-05 07:53:51 -06:00
91 changed files with 2287 additions and 2249 deletions

View File

@ -322,7 +322,7 @@ pkg_depends(USER-SCAFACOS MPI)
find_package(OpenMP QUIET)
option(BUILD_OMP "Build with OpenMP support" ${OpenMP_FOUND})
if(BUILD_OMP OR PKG_KOKKOS OR PKG_USER-INTEL)
if(BUILD_OMP)
find_package(OpenMP REQUIRED)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
@ -560,6 +560,20 @@ if(PKG_USER-PLUMED)
option(DOWNLOAD_PLUMED "Download Plumed package instead of using an already installed one" OFF)
if(DOWNLOAD_PLUMED)
if(BUILD_MPI)
set(PLUMED_CONFIG_MPI "--enable-mpi")
set(PLUMED_CONFIG_CC ${CMAKE_MPI_C_COMPILER})
set(PLUMED_CONFIG_CXX ${CMAKE_MPI_CXX_COMPILER})
else()
set(PLUMED_CONFIG_MPI "--disable-mpi")
set(PLUMED_CONFIG_CC ${CMAKE_C_COMPILER})
set(PLUMED_CONFIG_CXX ${CMAKE_CXX_COMPILER})
endif()
if(BUILD_OMP)
set(PLUMED_CONFIG_OMP "--enable-openmp")
else()
set(PLUMED_CONFIG_OMP "--disable-openmp")
endif()
message(STATUS "PLUMED download requested - we will build our own")
include(ExternalProject)
ExternalProject_Add(plumed_build
@ -568,9 +582,11 @@ if(PKG_USER-PLUMED)
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND <SOURCE_DIR>/configure --prefix=<INSTALL_DIR>
${CONFIGURE_REQUEST_PIC}
--enable-modules=all
CXX=${CMAKE_MPI_CXX_COMPILER}
CC=${CMAKE_MPI_C_COMPILER}
--enable-modules=all
${PLUMED_CONFIG_MPI}
${PLUMED_CONFIG_OMP}
CXX=${PLUMED_CONFIG_CXX}
CC=${PLUMED_CONFIG_CC}
)
ExternalProject_get_property(plumed_build INSTALL_DIR)
set(PLUMED_INSTALL_DIR ${INSTALL_DIR})
@ -607,7 +623,10 @@ if(PKG_USER-MOLFILE)
set(MOLFILE_INCLUDE_DIRS "${LAMMPS_LIB_SOURCE_DIR}/molfile" CACHE STRING "Path to VMD molfile plugin headers")
add_library(molfile INTERFACE)
target_include_directories(molfile INTERFACE ${MOLFILE_INCLUDE_DIRS})
target_link_libraries(molfile INTERFACE ${CMAKE_DL_LIBS})
# no need to link with -ldl on windows
if(NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
target_link_libraries(molfile INTERFACE ${CMAKE_DL_LIBS})
endif()
list(APPEND LAMMPS_LINK_LIBS molfile)
endif()
@ -709,6 +728,7 @@ if(PKG_MESSAGE)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csmpi")
else()
target_compile_definitions(cslib PRIVATE -DMPI_NO)
target_include_directories(cslib PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/message/cslib/src/STUBS_MPI)
set_target_properties(cslib PROPERTIES OUTPUT_NAME "csnompi")
endif()
@ -944,7 +964,7 @@ if(PKG_USER-OMP)
# detects styles which have USER-OMP version
RegisterStylesExt(${USER-OMP_SOURCES_DIR} omp OMP_SOURCES)
RegisterFixStyle("${USER-OMP_SOURCES_DIR}/fix_omp.h")
RegisterFixStyle(${USER-OMP_SOURCES_DIR}/fix_omp.h)
get_property(USER-OMP_SOURCES GLOBAL PROPERTY OMP_SOURCES)
@ -1057,37 +1077,79 @@ if(PKG_OPT)
endif()
if(PKG_USER-INTEL)
find_package(TBB REQUIRED)
find_package(MKL REQUIRED)
if(LAMMPS_SIZES STREQUAL BIGBIG)
message(FATAL_ERROR "The USER-INTEL Package is not compatible with -DLAMMPS_BIGBIG")
endif()
add_definitions(-DLMP_USER_INTEL)
if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
message(FATAL_ERROR "USER-INTEL is only useful together with intel compiler")
endif()
set(INTEL_ARCH "cpu" CACHE STRING "Architectures used by USER-INTEL (cpu or knl)")
set(INTEL_ARCH_VALUES cpu knl)
set_property(CACHE INTEL_ARCH PROPERTY STRINGS ${INTEL_ARCH_VALUES})
validate_option(INTEL_ARCH INTEL_ARCH_VALUES)
string(TOUPPER ${INTEL_ARCH} INTEL_ARCH)
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
message(FATAL_ERROR "USER-INTEL needs at least a 2016 intel compiler, found ${CMAKE_CXX_COMPILER_VERSION}")
endif()
if(NOT BUILD_OMP)
message(FATAL_ERROR "USER-INTEL requires OpenMP")
endif()
if(NOT ${LAMMPS_MEMALIGN} STREQUAL "64")
message(FATAL_ERROR "USER-INTEL is only useful with LAMMPS_MEMALIGN=64")
endif()
set(INTEL_ARCH "cpu" CACHE STRING "Architectures used by USER-INTEL (cpu or knl)")
set(INTEL_ARCH_VALUES cpu knl)
set_property(CACHE INTEL_ARCH PROPERTY STRINGS ${INTEL_ARCH_VALUES})
validate_option(INTEL_ARCH INTEL_ARCH_VALUES)
string(TOUPPER ${INTEL_ARCH} INTEL_ARCH)
if(INTEL_ARCH STREQUAL "KNL")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -xHost -qopenmp -qoffload")
set(MIC_OPTIONS "-qoffload-option,mic,compiler,\"-fp-model fast=2 -mGLOB_default_function_attrs=\\\"gather_scatter_loop_unroll=4\\\"\"")
add_compile_options(-xMIC-AVX512 -qoffload -fno-alias -ansi-alias -restrict -qoverride-limits ${MIC_OPTIONS})
add_definitions(-DLMP_INTEL_OFFLOAD)
find_package(Threads QUIET)
if(Threads_FOUND)
set(INTEL_LRT_MODE "threads" CACHE STRING "Long-range threads mode (none, threads, or c++11)")
else()
set(INTEL_LRT_MODE "none" CACHE STRING "Long-range threads mode (none, threads, or c++11)")
endif()
set(INTEL_LRT_VALUES none threads c++11)
set_property(CACHE INTEL_LRT_MODE PROPERTY STRINGS ${INTEL_LRT_VALUES})
validate_option(INTEL_LRT_MODE INTEL_LRT_VALUES)
string(TOUPPER ${INTEL_LRT_MODE} INTEL_LRT_MODE)
if(INTEL_LRT_MODE STREQUAL "THREADS")
if(Threads_FOUND)
add_definitions(-DLMP_INTEL_USELRT)
list(APPEND LAMMPS_LINK_LIBS ${CMAKE_THREAD_LIBS_INIT})
else()
message(FATAL_ERROR "Must have working threads library for Long-range thread support")
endif()
endif()
if(INTEL_LRT_MODE STREQUAL "C++11")
add_definitions(-DLMP_INTEL_USERLRT -DLMP_INTEL_LRT11)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16)
message(FATAL_ERROR "USER-INTEL needs at least a 2016 Intel compiler, found ${CMAKE_CXX_COMPILER_VERSION}")
endif()
else()
message(WARNING "USER-INTEL gives best performance with Intel compilers")
endif()
find_package(TBB QUIET)
if(TBB_FOUND)
list(APPEND LAMMPS_LINK_LIBS ${TBB_MALLOC_LIBRARIES})
else()
add_definitions(-DLMP_INTEL_NO_TBB)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
message(WARNING "USER-INTEL with Intel compilers should use TBB malloc libraries")
endif()
endif()
find_package(MKL QUIET)
if(MKL_FOUND)
add_definitions(-DLMP_USE_MKL_RNG)
list(APPEND LAMMPS_LINK_LIBS ${MKL_LIBRARIES})
else()
message(STATUS "Pair style dpd/intel will be faster with MKL libraries")
endif()
if((NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") AND (NOT ${LAMMPS_MEMALIGN} STREQUAL "64") AND (NOT ${LAMMPS_MEMALIGN} STREQUAL "128") AND (NOT ${LAMMPS_MEMALIGN} STREQUAL "256"))
message(FATAL_ERROR "USER-INTEL only supports memory alignment of 64, 128 or 256 on this platform")
endif()
if(INTEL_ARCH STREQUAL "KNL")
if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
message(FATAL_ERROR "Must use Intel compiler with USER-INTEL for KNL architecture")
endif()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -xHost -qopenmp -qoffload")
set(MIC_OPTIONS "-qoffload-option,mic,compiler,\"-fp-model fast=2 -mGLOB_default_function_attrs=\\\"gather_scatter_loop_unroll=4\\\"\"")
add_compile_options(-xMIC-AVX512 -qoffload -fno-alias -ansi-alias -restrict -qoverride-limits ${MIC_OPTIONS})
add_definitions(-DLMP_INTEL_OFFLOAD)
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
if(CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 17.3 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 17.4)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -xCOMMON-AVX512")
else()
@ -1100,33 +1162,33 @@ if(PKG_USER-INTEL)
add_compile_options(${_FLAG})
endif()
endforeach()
else()
add_compile_options(-O3 -ffast-math)
endif()
endif()
add_definitions(-DLMP_INTEL_USELRT -DLMP_USE_MKL_RNG)
# collect sources
set(USER-INTEL_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/USER-INTEL)
set(USER-INTEL_SOURCES ${USER-INTEL_SOURCES_DIR}/fix_intel.cpp
${USER-INTEL_SOURCES_DIR}/fix_nh_intel.cpp
${USER-INTEL_SOURCES_DIR}/intel_buffers.cpp
${USER-INTEL_SOURCES_DIR}/nbin_intel.cpp
${USER-INTEL_SOURCES_DIR}/npair_intel.cpp
${USER-INTEL_SOURCES_DIR}/verlet_lrt_intel.cpp)
list(APPEND LAMMPS_LINK_LIBS ${TBB_MALLOC_LIBRARIES} ${MKL_LIBRARIES})
set_property(GLOBAL PROPERTY "USER-INTEL_SOURCES" "${USER-INTEL_SOURCES}")
set(USER-INTEL_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/USER-INTEL)
set(USER-INTEL_SOURCES ${USER-INTEL_SOURCES_DIR}/intel_preprocess.h
${USER-INTEL_SOURCES_DIR}/intel_buffers.h
${USER-INTEL_SOURCES_DIR}/intel_buffers.cpp
${USER-INTEL_SOURCES_DIR}/math_extra_intel.h
${USER-INTEL_SOURCES_DIR}/nbin_intel.h
${USER-INTEL_SOURCES_DIR}/nbin_intel.cpp
${USER-INTEL_SOURCES_DIR}/npair_intel.h
${USER-INTEL_SOURCES_DIR}/npair_intel.cpp
${USER-INTEL_SOURCES_DIR}/intel_simd.h
${USER-INTEL_SOURCES_DIR}/intel_intrinsics.h)
# detect styles which have a USER-INTEL version
RegisterStylesExt(${USER-INTEL_SOURCES_DIR} intel USER-INTEL_SOURCES)
RegisterNBinStyle(${USER-INTEL_SOURCES_DIR}/nbin_intel.h)
RegisterNPairStyle(${USER-INTEL_SOURCES_DIR}/npair_intel.h)
RegisterFixStyle(${USER-INTEL_SOURCES_DIR}/fix_intel.h)
RegisterIntegrateStyle(${USER-INTEL_SOURCES_DIR}/verlet_lrt_intel.h)
set_property(GLOBAL PROPERTY "USER-INTEL_SOURCES" "${USER-INTEL_SOURCES}")
get_property(USER-INTEL_SOURCES GLOBAL PROPERTY USER-INTEL_SOURCES)
# detects styles which have USER-INTEL version
RegisterStylesExt(${USER-INTEL_SOURCES_DIR} opt USER-INTEL_SOURCES)
get_property(USER-INTEL_SOURCES GLOBAL PROPERTY USER-INTEL_SOURCES)
list(APPEND LIB_SOURCES ${USER-INTEL_SOURCES})
include_directories(${USER-INTEL_SOURCES_DIR})
list(APPEND LIB_SOURCES ${USER-INTEL_SOURCES})
include_directories(${USER-INTEL_SOURCES_DIR})
endif()
if(PKG_GPU)

View File

@ -91,6 +91,10 @@ function(RegisterFixStyle path)
AddStyleHeader(${path} FIX)
endfunction(RegisterFixStyle)
function(RegisterIntegrateStyle path)
AddStyleHeader(${path} INTEGRATE)
endfunction(RegisterIntegrateStyle)
function(RegisterStyles search_path)
FindStyleHeaders(${search_path} ANGLE_CLASS angle_ ANGLE ) # angle ) # force
FindStyleHeaders(${search_path} ATOM_CLASS atom_vec_ ATOM_VEC ) # atom ) # atom atom_vec_hybrid

View File

@ -155,11 +155,13 @@ make
The CMake build exposes a lot of different options. In the old build system
some of the package selections were possible by using special make target like
`make yes-std` or `make no-lib`. Achieving the same result with cmake requires
`make yes-std` or `make no-lib`. Achieving a similar result with cmake requires
specifying all options manually. This can quickly become a very long command
line that is hard to handle. While these could be stored in a simple script
file, there is another way of defining "presets" to compile LAMMPS in a certain
way.
way. Since the cmake build process - contrary to the conventional build system -
includes the compilation of the bundled libraries into the standard build process,
the grouping of those presets is somewhat different.
A preset is a regular CMake script file that can use constructs such as
variables, lists and for-loops to manipulate configuration options and create
@ -171,10 +173,10 @@ Such a file can then be passed to cmake via the `-C` flag. Several examples of
presets can be found in the `cmake/presets` folder.
```bash
# build LAMMPS with all "standard" packages which don't use libraries and enable GPU package
# build LAMMPS with all packages enabled which don't use external libraries and enable GPU package
mkdir build
cd build
cmake -C ../cmake/presets/std_nolib.cmake -D PKG_GPU=on ../cmake
cmake -C ../cmake/presets/all_on.cmake -C ../cmake/presets/nolib.cmake -D PKG_GPU=on ../cmake
```
# Reference
@ -1429,6 +1431,17 @@ TODO
</dl>
</td>
</tr>
<tr>
<td><code>INTEL_LRT_MODE</code></td>
<td>How to support Long-range thread mode in Verlet integration</td>
<td>
<dl>
<dt><code>threads</code> (default, if pthreads available)</dt>
<dt><code>none</code> (default, if pthreads not available)</dt>
<dt><code>c++11</code></dt>
</dl>
</td>
</tr>
</tbody>
</table>

View File

@ -1,21 +1,17 @@
set(STANDARD_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KIM KOKKOS KSPACE LATTE MANYBODY MC MEAM MISC
MOLECULE MPIIO MSCG OPT PERI POEMS
PYTHON QEQ REAX REPLICA RIGID SHOCK SNAP SRD VORONOI)
# preset that turns on all existing packages off. can be used to reset
# an existing package selection without losing any other settings
set(USER_PACKAGES USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS
USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD
USER-INTEL USER-LB USER-MANIFOLD USER-MEAMC USER-MESO
USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE
USER-NETCDF USER-OMP USER-PHONON USER-QMMM USER-QTB
USER-QUIP USER-REAXC USER-SDPD USER-SMD USER-SMTBQ USER-SPH USER-TALLY
USER-UEF USER-VTK)
set(PACKAGES_WITH_LIB COMPRESS GPU KIM KOKKOS LATTE MEAM MPIIO MSCG POEMS PYTHON REAX VORONOI
USER-ATC USER-AWPMD USER-COLVARS USER-H5MD USER-LB USER-MOLFILE
USER-NETCDF USER-PLUMED USER-QMMM USER-QUIP USER-SMD USER-VTK)
set(ALL_PACKAGES ${STANDARD_PACKAGES} ${USER_PACKAGES})
set(ALL_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KIM KOKKOS KSPACE LATTE MANYBODY MC MISC MESSAGE MOLECULE
MPIIO MSCG OPT PERI POEMS PYTHON QEQ REPLICA RIGID SHOCK SNAP SPIN
SRD VORONOI
USER-ADIOS USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-CGSDK
USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP
USER-H5MD USER-INTEL USER-LB USER-MANIFOLD USER-MEAMC USER-MESO
USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE USER-NETCDF USER-OMP
USER-PHONON USER-PLUMED USER-PTM USER-QMMM USER-QTB USER-QUIP
USER-REAXC USER-SCAFACOS USER-SDPD USER-SMD USER-SMTBQ USER-SPH
USER-TALLY USER-UEF USER-VTK USER-YAFF)
foreach(PKG ${ALL_PACKAGES})
set(PKG_${PKG} OFF CACHE BOOL "" FORCE)

View File

@ -1,21 +1,19 @@
set(STANDARD_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KIM KOKKOS KSPACE LATTE MANYBODY MC MEAM MISC
MOLECULE MPIIO MSCG OPT PERI POEMS
PYTHON QEQ REAX REPLICA RIGID SHOCK SNAP SRD VORONOI)
# preset that turns on all existing packages. using the combination
# this preset followed by the nolib.cmake preset should configure a
# LAMMPS binary, with as many packages included, that can be compiled
# with just a working C++ compiler and an MPI library.
set(USER_PACKAGES USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS
USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD
USER-INTEL USER-LB USER-MANIFOLD USER-MEAMC USER-MESO
USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE
USER-NETCDF USER-OMP USER-PHONON USER-QMMM USER-QTB
USER-QUIP USER-REAXC USER-SDPD USER-SMD USER-SMTBQ USER-SPH USER-TALLY
USER-UEF USER-VTK)
set(PACKAGES_WITH_LIB COMPRESS GPU KIM KOKKOS LATTE MEAM MPIIO MSCG POEMS PYTHON REAX VORONOI
USER-ATC USER-AWPMD USER-COLVARS USER-H5MD USER-LB USER-MOLFILE
USER-NETCDF USER-PLUMED USER-QMMM USER-QUIP USER-SMD USER-VTK)
set(ALL_PACKAGES ${STANDARD_PACKAGES} ${USER_PACKAGES})
set(ALL_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KIM KOKKOS KSPACE LATTE MANYBODY MC MISC MESSAGE MOLECULE
MPIIO MSCG OPT PERI POEMS PYTHON QEQ REPLICA RIGID SHOCK SNAP SPIN
SRD VORONOI
USER-ADIOS USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-CGSDK
USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP
USER-H5MD USER-INTEL USER-LB USER-MANIFOLD USER-MEAMC USER-MESO
USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE USER-NETCDF USER-OMP
USER-PHONON USER-PLUMED USER-PTM USER-QMMM USER-QTB USER-QUIP
USER-REAXC USER-SCAFACOS USER-SDPD USER-SMD USER-SMTBQ USER-SPH
USER-TALLY USER-UEF USER-VTK USER-YAFF)
foreach(PKG ${ALL_PACKAGES})
set(PKG_${PKG} ON CACHE BOOL "" FORCE)

View File

@ -1,71 +0,0 @@
set(PKG_ASPHERE OFF CACHE BOOL "" FORCE)
set(PKG_BODY OFF CACHE BOOL "" FORCE)
set(PKG_CLASS2 OFF CACHE BOOL "" FORCE)
set(PKG_COLLOID OFF CACHE BOOL "" FORCE)
set(PKG_COMPRESS OFF CACHE BOOL "" FORCE)
set(PKG_CORESHELL OFF CACHE BOOL "" FORCE)
set(PKG_DIPOLE OFF CACHE BOOL "" FORCE)
set(PKG_GPU OFF CACHE BOOL "" FORCE)
set(PKG_GRANULAR OFF CACHE BOOL "" FORCE)
set(PKG_KIM OFF CACHE BOOL "" FORCE)
set(PKG_KOKKOS OFF CACHE BOOL "" FORCE)
set(PKG_KSPACE OFF CACHE BOOL "" FORCE)
set(PKG_LATTE OFF CACHE BOOL "" FORCE)
set(PKG_LIB OFF CACHE BOOL "" FORCE)
set(PKG_MANYBODY OFF CACHE BOOL "" FORCE)
set(PKG_MC OFF CACHE BOOL "" FORCE)
set(PKG_MEAM OFF CACHE BOOL "" FORCE)
set(PKG_MISC OFF CACHE BOOL "" FORCE)
set(PKG_MOLECULE OFF CACHE BOOL "" FORCE)
set(PKG_MPIIO OFF CACHE BOOL "" FORCE)
set(PKG_MSCG OFF CACHE BOOL "" FORCE)
set(PKG_OPT OFF CACHE BOOL "" FORCE)
set(PKG_PERI OFF CACHE BOOL "" FORCE)
set(PKG_POEMS OFF CACHE BOOL "" FORCE)
set(PKG_PYTHOFF OFF CACHE BOOL "" FORCE)
set(PKG_QEQ OFF CACHE BOOL "" FORCE)
set(PKG_REAX OFF CACHE BOOL "" FORCE)
set(PKG_REPLICA OFF CACHE BOOL "" FORCE)
set(PKG_RIGID OFF CACHE BOOL "" FORCE)
set(PKG_SHOCK OFF CACHE BOOL "" FORCE)
set(PKG_SNAP OFF CACHE BOOL "" FORCE)
set(PKG_SRD OFF CACHE BOOL "" FORCE)
set(PKG_VOROFFOI OFF CACHE BOOL "" FORCE)
set(PKG_USER OFF CACHE BOOL "" FORCE)
set(PKG_USER-ATC OFF CACHE BOOL "" FORCE)
set(PKG_USER-AWPMD OFF CACHE BOOL "" FORCE)
set(PKG_USER-BOCS OFF CACHE BOOL "" FORCE)
set(PKG_USER-CGDNA OFF CACHE BOOL "" FORCE)
set(PKG_USER-CGSDK OFF CACHE BOOL "" FORCE)
set(PKG_USER-COLVARS OFF CACHE BOOL "" FORCE)
set(PKG_USER-DIFFRACTIOFF OFF CACHE BOOL "" FORCE)
set(PKG_USER-DPD OFF CACHE BOOL "" FORCE)
set(PKG_USER-DRUDE OFF CACHE BOOL "" FORCE)
set(PKG_USER-EFF OFF CACHE BOOL "" FORCE)
set(PKG_USER-FEP OFF CACHE BOOL "" FORCE)
set(PKG_USER-H5MD OFF CACHE BOOL "" FORCE)
set(PKG_USER-INTEL OFF CACHE BOOL "" FORCE)
set(PKG_USER-LB OFF CACHE BOOL "" FORCE)
set(PKG_USER-MANIFOLD OFF CACHE BOOL "" FORCE)
set(PKG_USER-MEAMC OFF CACHE BOOL "" FORCE)
set(PKG_USER-MESO OFF CACHE BOOL "" FORCE)
set(PKG_USER-MGPT OFF CACHE BOOL "" FORCE)
set(PKG_USER-MISC OFF CACHE BOOL "" FORCE)
set(PKG_USER-MOFFF OFF CACHE BOOL "" FORCE)
set(PKG_USER-MOLFILE OFF CACHE BOOL "" FORCE)
set(PKG_USER-NETCDF OFF CACHE BOOL "" FORCE)
set(PKG_USER-OMP OFF CACHE BOOL "" FORCE)
set(PKG_USER-PHONON OFF CACHE BOOL "" FORCE)
set(PKG_USER-PLUMED OFF CACHE BOOL "" FORCE)
set(PKG_USER-QMMM OFF CACHE BOOL "" FORCE)
set(PKG_USER-QTB OFF CACHE BOOL "" FORCE)
set(PKG_USER-QUIP OFF CACHE BOOL "" FORCE)
set(PKG_USER-REAXC OFF CACHE BOOL "" FORCE)
set(PKG_USER-SDPD OFF CACHE BOOL "" FORCE)
set(PKG_USER-SMD OFF CACHE BOOL "" FORCE)
set(PKG_USER-SMTBQ OFF CACHE BOOL "" FORCE)
set(PKG_USER-SPH OFF CACHE BOOL "" FORCE)
set(PKG_USER-TALLY OFF CACHE BOOL "" FORCE)
set(PKG_USER-UEF OFF CACHE BOOL "" FORCE)
set(PKG_USER-VTK OFF CACHE BOOL "" FORCE)

View File

@ -0,0 +1,17 @@
set(WIN_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KSPACE MANYBODY MC MISC MOLECULE OPT PERI POEMS QEQ
REPLICA RIGID SHOCK SNAP SPIN SRD VORONOI USER-ATC USER-AWPMD
USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS USER-DIFFRACTION
USER-DPD USER-DRUDE USER-EFF USER-FEP USER-INTEL USER-MANIFOLD
USER-MEAMC USER-MESO USER-MISC USER-MOFFF USER-MOLFILE USER-OMP
USER-PHONON USER-PTM USER-QTB USER-REAXC USER-SDPD USER-SMD
USER-SMTBQ USER-SPH USER-TALLY USER-UEF USER-YAFF)
foreach(PKG ${WIN_PACKAGES})
set(PKG_${PKG} ON CACHE BOOL "" FORCE)
endforeach()
set(DOWNLOAD_VORO ON CACHE BOOL "" FORCE)
set(DOWNLOAD_EIGEN3 ON CACHE BOOL "" FORCE)
set(LAMMPS_MEMALIGN "0" CACHE STRING "" FORCE)
set(INTEL_LRT_MODE "none" CACHE STRING "" FORCE)

View File

@ -0,0 +1,8 @@
# preset that turns on just a few, frequently used packages
# this will be compiled quickly and handle a lot of common inputs.
set(ALL_PACKAGES KSPACE MANYBODY MOLECULE RIGID)
foreach(PKG ${ALL_PACKAGES})
set(PKG_${PKG} ON CACHE BOOL "" FORCE)
endforeach()

15
cmake/presets/most.cmake Normal file
View File

@ -0,0 +1,15 @@
# preset that turns on a wide range of packages, some of which require
# external libraries. Compared to all_on.cmake some more unusual packages
# are removed. The resulting binary should be able to run most inputs.
set(ALL_PACKAGES ASPHERE CLASS2 COLLOID CORESHELL DIPOLE
GRANULAR KSPACE MANYBODY MC MISC MOLECULE OPT PERI
PYTHON QEQ REPLICA RIGID SHOCK SRD VORONOI
USER-CGDNA USER-CGSDK USER-COLVARS USER-DIFFRACTION USER-DPD
USER-DRUDE USER-FEP USER-MEAMC USER-MESO
USER-MISC USER-MOFFF USER-OMP USER-PLUMED USER-PHONON USER-REAXC
USER-SPH USER-SMD USER-UEF USER-YAFF)
foreach(PKG ${ALL_PACKAGES})
set(PKG_${PKG} ON CACHE BOOL "" FORCE)
endforeach()

View File

@ -1,21 +1,10 @@
set(STANDARD_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KIM KOKKOS KSPACE LATTE MANYBODY MC MEAM MISC
MOLECULE MPIIO MSCG OPT PERI POEMS
PYTHON QEQ REAX REPLICA RIGID SHOCK SNAP SRD VORONOI)
# preset that turns off all packages that require some form of external
# library or special compiler (fortran or cuda) or equivalent.
set(USER_PACKAGES USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS
USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD
USER-INTEL USER-LB USER-MANIFOLD USER-MEAMC USER-MESO
USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE
USER-NETCDF USER-OMP USER-PHONON USER-QMMM USER-QTB
USER-QUIP USER-REAXC USER-SDPD USER-SMD USER-SMTBQ USER-SPH USER-TALLY
USER-UEF USER-VTK)
set(PACKAGES_WITH_LIB COMPRESS GPU KIM KOKKOS LATTE MEAM MPIIO MSCG POEMS PYTHON REAX VORONOI
USER-ATC USER-AWPMD USER-COLVARS USER-H5MD USER-LB USER-MOLFILE
USER-NETCDF USER-PLUMED USER-QMMM USER-QUIP USER-SMD USER-VTK)
set(ALL_PACKAGES ${STANDARD_PACKAGES} ${USER_PACKAGES})
set(PACKAGES_WITH_LIB COMPRESS GPU KIM KOKKOS LATTE MPIIO MSCG PYTHON
VORONOI USER-ADIOS USER-ATC USER-AWPMD USER-H5MD USER-LB
USER-MOLFILE USER-NETCDF USER-PLUMED USER-QMMM USER-QUIP
USER-SCAFACOS USER-SMD USER-VTK)
foreach(PKG ${PACKAGES_WITH_LIB})
set(PKG_${PKG} OFF CACHE BOOL "" FORCE)

View File

@ -1,22 +0,0 @@
set(STANDARD_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KIM KOKKOS KSPACE LATTE MANYBODY MC MEAM MISC
MOLECULE MPIIO MSCG OPT PERI POEMS
PYTHON QEQ REAX REPLICA RIGID SHOCK SNAP SRD VORONOI)
set(USER_PACKAGES USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS
USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD
USER-INTEL USER-LB USER-MANIFOLD USER-MEAMC USER-MESO
USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE
USER-NETCDF USER-OMP USER-PHONON USER-QMMM USER-QTB
USER-QUIP USER-REAXC USER-SDPD USER-SMD USER-SMTBQ USER-SPH USER-TALLY
USER-UEF USER-VTK)
set(PACKAGES_WITH_LIB COMPRESS GPU KIM KOKKOS LATTE MEAM MPIIO MSCG POEMS PYTHON REAX VORONOI
USER-ATC USER-AWPMD USER-COLVARS USER-H5MD USER-LB USER-MOLFILE
USER-NETCDF USER-QMMM USER-QUIP USER-SMD USER-VTK)
set(ALL_PACKAGES ${STANDARD_PACKAGES} ${USER_PACKAGES})
foreach(PKG ${STANDARD_PACKAGES})
set(PKG_${PKG} ON CACHE BOOL "" FORCE)
endforeach()

View File

@ -1,26 +0,0 @@
set(STANDARD_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KIM KOKKOS KSPACE LATTE MANYBODY MC MEAM MISC
MOLECULE MPIIO MSCG OPT PERI POEMS
PYTHON QEQ REAX REPLICA RIGID SHOCK SNAP SRD VORONOI)
set(USER_PACKAGES USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS
USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD
USER-INTEL USER-LB USER-MANIFOLD USER-MEAMC USER-MESO
USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE
USER-NETCDF USER-OMP USER-PHONON USER-QMMM USER-QTB
USER-QUIP USER-REAXC USER-SDPD USER-SMD USER-SMTBQ USER-SPH USER-TALLY
USER-UEF USER-VTK)
set(PACKAGES_WITH_LIB COMPRESS GPU KIM KOKKOS LATTE MEAM MPIIO MSCG POEMS PYTHON REAX VORONOI
USER-ATC USER-AWPMD USER-COLVARS USER-H5MD USER-LB USER-MOLFILE
USER-NETCDF USER-QMMM USER-QUIP USER-SMD USER-VTK)
set(ALL_PACKAGES ${STANDARD_PACKAGES} ${USER_PACKAGES})
foreach(PKG ${STANDARD_PACKAGES})
set(PKG_${PKG} ON CACHE BOOL "" FORCE)
endforeach()
foreach(PKG ${PACKAGES_WITH_LIB})
set(PKG_${PKG} OFF CACHE BOOL "" FORCE)
endforeach()

View File

@ -1,22 +0,0 @@
set(STANDARD_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS CORESHELL DIPOLE GPU
GRANULAR KIM KOKKOS KSPACE LATTE MANYBODY MC MEAM MISC
MOLECULE MPIIO MSCG OPT PERI POEMS
PYTHON QEQ REAX REPLICA RIGID SHOCK SNAP SRD VORONOI)
set(USER_PACKAGES USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-CGSDK USER-COLVARS
USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD
USER-INTEL USER-LB USER-MANIFOLD USER-MEAMC USER-MESO
USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE
USER-NETCDF USER-OMP USER-PHONON USER-QMMM USER-QTB
USER-QUIP USER-REAXC USER-SDPD USER-SMD USER-SMTBQ USER-SPH USER-TALLY
USER-UEF USER-VTK)
set(PACKAGES_WITH_LIB COMPRESS GPU KIM KOKKOS LATTE MEAM MPIIO MSCG POEMS PYTHON REAX VORONOI
USER-ATC USER-AWPMD USER-COLVARS USER-H5MD USER-LB USER-MOLFILE
USER-NETCDF USER-PLUMED USER-QMMM USER-QUIP USER-SMD USER-VTK)
set(ALL_PACKAGES ${STANDARD_PACKAGES} ${USER_PACKAGES})
foreach(PKG ${USER_PACKAGES})
set(PKG_${PKG} ON CACHE BOOL "" FORCE)
endforeach()

View File

@ -28,7 +28,7 @@ Makefile(s). Example:
cd lammps # change to the LAMMPS distribution directory
mkdir build; cd build # create a new directory (folder) for build
cmake ../cmake \[options ...\] # configuration with (command-line) cmake
cmake \[options ...\] ../cmake # configuration with (command-line) cmake
make # compilation :pre
The cmake command will detect available features, enable selected
@ -41,7 +41,8 @@ If your machine has multiple CPU cores (most do these days), using a
command like "make -jN" (with N being the number of available local
CPU cores) can be much faster. If you plan to do development on
LAMMPS or need to re-compile LAMMPS repeatedly, installation of the
ccache (= Compiler Cache) software may speed up compilation even more.
ccache (= Compiler Cache) software may speed up repeated compilation
even more.
After compilation, you can optionally copy the LAMMPS executable and
library into your system folders (by default under $HOME/.local) with:
@ -108,7 +109,8 @@ command-line options. Several useful ones are:
-D CMAKE_BUILD_TYPE=type # type = Release or Debug
-G output # style of output CMake generates
-DVARIABLE=value # setting for a LAMMPS feature to enable
-D VARIABLE=value # ditto, but cannot come after CMakeLists.txt dir :pre
-D VARIABLE=value # ditto, but cannot come after CMakeLists.txt dir
-C path/to/preset/file # load some CMake settings before configuring :pre
All the LAMMPS-specific -D variables that a LAMMPS build supports are
described on the pages linked to from the "Build"_Build.html doc page.

View File

@ -859,23 +859,34 @@ file.
USER-INTEL package :h4,link(user-intel)
To build with this package, you must choose which hardware you want to
build for, either Intel CPUs or Intel KNLs. You should also typically
"install the USER-OMP package"_#user-omp, as it can be used in tandem
with the USER-INTEL package to good effect, as explained on the "Speed
intel"_Speed_intel.html doc page.
build for, either x86 CPUs or Intel KNLs in offload mode. You should
also typically "install the USER-OMP package"_#user-omp, as it can be
used in tandem with the USER-INTEL package to good effect, as explained
on the "Speed intel"_Speed_intel.html doc page.
[CMake build]:
-D INTEL_ARCH=value # value = cpu (default) or knl
-D BUILD_OMP=yes # also required to build with the USER-INTEl package :pre
-D INTEL_LRT_MODE=value # value = threads, none, or c++11 :pre
Requires an Intel compiler as well as the Intel TBB and MKL libraries.
In Long-range thread mode (LRT) a modified verlet style is used, that
operates the Kspace calculation in a separate thread concurrently to
other calculations. This has to be enabled in the "package intel"_package.html
command at runtime. With the setting "threads" it used the pthreads
library, while c++11 will use the built-in thread support of C++11
compilers. The option "none" skips compilation of this feature. The
default is to use "threads" if pthreads is available and otherwise "none".
Best performance is achieved with Intel hardware, Intel compilers, as well as
the Intel TBB and MKL libraries. However, the code also compiles, links, and
runs with other compilers and without TBB and MKL.
[Traditional make]:
Choose which hardware to compile for in Makefile.machine via the
following settings. See src/MAKE/OPTIONS/Makefile.intel_cpu* and
Makefile.knl files for examples.
Makefile.knl files for examples. and src/USER-INTEL/README for
additional information.
For CPUs:

View File

@ -149,26 +149,39 @@ system. Using these files you can enable/disable portions of the
available packages in LAMMPS. If you need a custom preset you can take
one of them as a starting point and customize it to your needs.
cmake -C ../cmake/presets/all_on.cmake \[OPTIONS\] ../cmake | enable all packages
cmake -C ../cmake/presets/all_off.cmake \[OPTIONS\] ../cmake | disable all packages
cmake -C ../cmake/presets/std.cmake \[OPTIONS\] ../cmake | enable standard packages
cmake -C ../cmake/presets/user.cmake \[OPTIONS\] ../cmake | enable user packages
cmake -C ../cmake/presets/std_nolib.cmake \[OPTIONS\] ../cmake | enable standard packages that do not require extra libraries
cmake -C ../cmake/presets/nolib.cmake \[OPTIONS\] ../cmake | disable all packages that do not require extra libraries
cmake -C ../cmake/presets/manual_selection.cmake \[OPTIONS\] ../cmake | example of how to create a manual selection of packages :tb(s=|,a=l)
cmake -C ../cmake/presets/all_on.cmake \[OPTIONS\] ../cmake |
enable all packages |
cmake -C ../cmake/presets/all_off.cmake \[OPTIONS\] ../cmake |
disable all packages |
cmake -C ../cmake/presets/minimal.cmake \[OPTIONS\] ../cmake |
enable just a few core packages |
cmake -C ../cmake/presets/most.cmake \[OPTIONS\] ../cmake |
enable most common packages |
cmake -C ../cmake/presets/nolib.cmake \[OPTIONS\] ../cmake |
disable packages that do require extra libraries or tools |
cmake -C ../cmake/presets/mingw.cmake \[OPTIONS\] ../cmake |
enable all packages compatible with MinGW compilers :tb(c=2,s=|,a=l)
NOTE: Running cmake this way manipulates the variable cache in your
current build directory. You can combine presets and options with
multiple cmake runs.
current build directory. You can combine multiple presets and options
in a single cmake run, or change settings incrementally by running
cmake with new flags.
[Example:]
# build LAMMPS with all "standard" packages which don't
# use libraries and enable GPU package
# build LAMMPS with most commonly used packages, but then remove
# those requiring additional library or tools, but still enable
# GPU package and configure it for using CUDA. You can run.
mkdir build
cd build
cmake -C ../cmake/presets/std_nolib.cmake -D PKG_GPU=on ../cmake :pre
cmake -C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake -D PKG_GPU=on -D GPU_API=cuda ../cmake :pre
# to add another package, say BODY to the previous configuration you can run:
cmake -D PKG_BODY=on . :pre
# to reset the package selection from above to the default of no packages
# but leaving all other settings untouched. You can run:
cmake -C ../cmake/presets/no_all.cmake . :pre
:line
[Make shortcuts for installing many packages]:

View File

@ -57,10 +57,10 @@ FFT_INC = -DFFT_SINGLE # do not specify for double precision
FFT_INC = -DFFT_PACK_ARRAY # or -DFFT_PACK_POINTER or -DFFT_PACK_MEMCPY :pre
# default is FFT_PACK_ARRAY if not specified
FFT_INC = -I/usr/local/include
FFT_INC = -I/usr/local/include
FFT_PATH = -L/usr/local/lib
FFT_LIB = -lfftw3 # FFTW3 double precision
FFT_LIB = -lfftw3 -lfftw3f # FFTW3 single precision
FFT_LIB = -lfftw3 # FFTW3 double precision
FFT_LIB = -lfftw3 -lfftw3f # FFTW3 single precision
FFT_LIB = -lmkl_intel_lp64 -lmkl_sequential -lmkl_core # MKL with Intel compiler
FFT_LIB = -lmkl_gf_lp64 -lmkl_sequential -lmkl_core # MKL with GNU compier :pre
@ -179,8 +179,11 @@ e.g. from 511 to -512, which can cause diagnostics like the
mean-squared displacement, as calculated by the "compute
msd"_compute_msd.html command, to be faulty.
Note that the USER-ATC package is not currently compatible with the
"bigbig" setting.
Note that the USER-ATC package and the USER-INTEL package are currently
not compatible with the "bigbig" setting. Also, there are limitations
when using the library interface. Some functions with known issues
have been replaced by dummy calls printing a corresponding error rather
than crashing randomly or corrupting data.
Also note that the GPU package requires its lib/gpu library to be
compiled with the same size setting, or the link will fail. A CMake

View File

@ -51,11 +51,10 @@ provides a unix/linux interface to low-level Windows functions, so LAMMPS
can be compiled on Windows. The necessary (minor) modifications to LAMMPS
are included, but may not always up-to-date for recently added functionality
and the corresponding new code. A machine makefile for using cygwin for
the old build system is provided. The CMake build system is untested
for this; you will have to request that makefiles are generated and
manually set the compiler.
the old build system is provided. Using CMake for this mode of compilation
is untested and not likely to work.
When compiling for Windows [not] set the -DLAMMPS_MEMALIGN define
When compiling for Windows do [not] set the -DLAMMPS_MEMALIGN define
in the LMP_INC makefile variable and add -lwsock32 -lpsapi to the linker
flags in LIB makefile variable. Try adding -static-libgcc or -static or
both to the linker flags when your resulting LAMMPS Windows executable
@ -79,7 +78,13 @@ probably the currently best tested and supported way to build LAMMPS
executables for Windows. There are makefiles provided for the
traditional build system, but CMake has also been successfully tested
using the mingw32-cmake and mingw64-cmake wrappers that are bundled
with the cross-compiler environment on Fedora machines.
with the cross-compiler environment on Fedora machines. A CMake preset
selecting all packages compatible with this cross-compilation build
is provided. You likely need to disable the GPU package unless you
download and install the contents of the pre-compiled "OpenCL ICD loader
library"_https://download.lammps.org/thirdparty/opencl-win-devel.tar.gz
into your MinGW64 cross-compiler environment. The cross-compilation
currently will only produce non-MPI serial binaries.
Please keep in mind, though, that this only applies to compiling LAMMPS.
Whether the resulting binaries do work correctly is no tested by the

View File

@ -98,6 +98,7 @@ OPT.
"gran/hertz/history (o)"_pair_gran.html,
"gran/hooke (o)"_pair_gran.html,
"gran/hooke/history (ko)"_pair_gran.html,
"granular"_pair_granular.html,
"gw"_pair_gw.html,
"gw/zbl"_pair_gw.html,
"hbond/dreiding/lj (o)"_pair_hbond_dreiding.html,

View File

@ -166,9 +166,6 @@ void lammps_gather_atoms_subset(void *, char *, int, int, int, int *, void *)
void lammps_scatter_atoms(void *, char *, int, int, void *)
void lammps_scatter_atoms_subset(void *, char *, int, int, int, int *, void *) :pre
void lammps_create_atoms(void *, int, tagint *, int *, double *, double *,
imageint *, int) :pre
The gather functions collect peratom info of the requested type (atom
coords, atom types, forces, etc) from all processors, and returns the
same vector of values to each calling processor. The scatter
@ -176,6 +173,11 @@ functions do the inverse. They distribute a vector of peratom values,
passed by all calling processors, to individual atoms, which may be
owned by different processors.
IMPORTANT NOTE: These functions are not compatible with the
-DLAMMPS_BIGBIG setting when compiling LAMMPS. Dummy functions
that result in an error message and abort will be substituted
instead of resulting in random crashes and memory corruption.
The lammps_gather_atoms() function does this for all N atoms in the
system, ordered by atom ID, from 1 to N. The
lammps_gather_atoms_concat() function does it for all N atoms, but
@ -196,6 +198,9 @@ those values to each atom in the system. The
lammps_scatter_atoms_subset() function takes a subset of IDs as an
argument and only scatters those values to the owning atoms.
void lammps_create_atoms(void *, int, tagint *, int *, double *, double *,
imageint *, int) :pre
The lammps_create_atoms() function takes a list of N atoms as input
with atom types and coords (required), an optionally atom IDs and
velocities and image flags. It uses the coords of each atom to assign

View File

@ -24,7 +24,7 @@ LAMMPS to run on the CPU cores and co-processor cores simultaneously.
Angle Styles: charmm, harmonic :ulb,l
Bond Styles: fene, fourier, harmonic :l
Dihedral Styles: charmm, harmonic, opls :l
Dihedral Styles: charmm, fourier, harmonic, opls :l
Fixes: nve, npt, nvt, nvt/sllod, nve/asphere :l
Improper Styles: cvff, harmonic :l
Pair Styles: airebo, airebo/morse, buck/coul/cut, buck/coul/long,

View File

@ -26,12 +26,13 @@ kim_query latconst get_test_result test=TE_156715955670 model=MO_800509458712 &
The kim_query command allows to retrieve properties from the OpenKIM
through a web query. The result is stored in a string style
"variable"_variable.html, the name of which must be given as the first
argument of the kim_query command. The second required argument is the
name of the actual query function (e.g. {get_test_result}). All following
argument of the kim_query command. The second required argument is the
name of the actual query function (e.g. {get_test_result}). All following
arguments are parameters handed over to the web query in the format
{keyword=value}. This list of supported keywords and the type of how
the value has to be encoded depends on the query function used.
For more details on this, please refer to the OpenKIM homepage.
{keyword=value}. The list of supported keywords and the type of how
the value has to be encoded depends on the query function used. This
mirrors the functionality available on the OpenKIM webpage at
"https://query.openkim.org"_https://query.openkim.org/
[Restrictions:]

View File

@ -19,7 +19,7 @@ pair_style granular command :h3
pair_style granular cutoff :pre
cutoff = global cutoff (optional). See discussion below. :l
cutoff = global cutoff (optional). See discussion below. :ul
[Examples:]

View File

@ -134,7 +134,7 @@ The {mom} and {rot} keywords are used by {create}. If mom = yes, the
linear momentum of the newly created ensemble of velocities is zeroed;
if rot = yes, the angular momentum is zeroed.
*line
:line
If specified, the {temp} keyword is used by {create} and {scale} to
specify a "compute"_compute.html that calculates temperature in a

View File

@ -58,7 +58,7 @@ pair_coeff 1 4 1.0 1.0 1.12246
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 1000
thermo 50
#dump 1 all atom 2000 dump.micelle
@ -69,4 +69,4 @@ thermo 1000
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
run 60000
run 1000

View File

@ -0,0 +1,87 @@
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
special_bonds fene
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 500
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 50
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
group solvent molecule 0
group solute subtract all solvent
unfix 1
unfix 2
unfix 4
fix 1 solvent nve
fix 2 solvent temp/rescale 100 0.45 0.45 0.02 1.0
fix 5 solute rigid molecule langevin 0.45 0.45 0.5 112211
fix 4 all enforce2d
run 500
unfix 2
unfix 4
unfix 5
fix 5 solute rigid/small molecule
fix 4 all enforce2d
run 500

View File

@ -1,259 +0,0 @@
LAMMPS (28 Feb 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:88)
using 1 OpenMP thread(s) per MPI task
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
orthogonal box = (0 0 -0.1) to (35.8569 35.8569 0.1)
1 by 1 by 1 MPI processor grid
reading atoms ...
1200 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
300 bonds
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000394821 secs
read_data CPU = 0.00212336 secs
special_bonds fene
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 0.00018549 secs
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 1000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.42246
ghost atom cutoff = 1.42246
binsize = 0.71123, bins = 51 51 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair soft, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.799 | 3.799 | 3.799 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 0.40003481 2.2200223e-06 0.84966203 0.78952518
50 0.47411013 0.67721272 0.057404514 1.2083323 1.3375852
100 0.45 0.73046745 0.054836584 1.234929 2.3196516
150 0.67521742 0.72402001 0.043490075 1.4421648 2.8744416
200 0.45 0.78481891 0.076931503 1.3113754 3.0412388
250 0.66479018 0.69790602 0.081075564 1.4432178 3.6917024
300 0.45 0.76820218 0.066727591 1.2845548 3.7861054
350 0.67619136 0.625715 0.072722727 1.3740656 4.2861621
400 0.45 0.68527759 0.090724527 1.2256271 4.4725214
450 0.56702844 0.64402767 0.080555563 1.2911391 4.7402211
500 0.45 0.64883009 0.078376672 1.1768318 4.7919294
550 0.564664 0.58260368 0.080779475 1.2275766 4.9855705
600 0.45 0.58193041 0.088386617 1.119942 5.131481
650 0.52110993 0.5415273 0.097683746 1.1598867 5.2500294
700 0.45 0.50856787 0.088471208 1.0466641 5.2550165
750 0.51510855 0.47441291 0.089429375 1.0785216 5.375763
800 0.45 0.49926696 0.085958476 1.0348504 5.4665914
850 0.50688494 0.46614429 0.088962292 1.0615691 5.556932
900 0.45 0.47785593 0.10150857 1.0289895 5.7765975
950 0.49590559 0.46050477 0.096404887 1.052402 5.8649245
1000 0.45 0.47691182 0.08808163 1.0146185 6.0177568
Loop time of 0.113919 on 1 procs for 1000 steps with 1200 atoms
Performance: 3792167.464 tau/day, 8778.165 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.076825 | 0.076825 | 0.076825 | 0.0 | 67.44
Bond | 0.0041864 | 0.0041864 | 0.0041864 | 0.0 | 3.67
Neigh | 0.017061 | 0.017061 | 0.017061 | 0.0 | 14.98
Comm | 0.0019042 | 0.0019042 | 0.0019042 | 0.0 | 1.67
Output | 0.00017285 | 0.00017285 | 0.00017285 | 0.0 | 0.15
Modify | 0.011218 | 0.011218 | 0.011218 | 0.0 | 9.85
Other | | 0.002551 | | | 2.24
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 195 ave 195 max 195 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 3136 ave 3136 max 3136 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 3136
Ave neighs/atom = 2.61333
Ave special neighs/atom = 0.5
Neighbor list builds = 92
Dangerous builds = 0
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 1000
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
run 60000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 26 26 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.024 | 4.024 | 4.024 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 -1.7056163 0.08808163 -1.1679097 3.9431686
1000 0.45 -1.9727644 0.05860769 -1.4645317 1.9982326
2000 0.46143408 -1.9889684 0.058103225 -1.4698156 1.7806269
3000 0.44459291 -1.9997961 0.068724164 -1.4868496 1.4369618
4000 0.46939549 -2.0330437 0.073499424 -1.4905399 1.3780016
5000 0.44411088 -2.0339936 0.05862049 -1.5316323 1.2544164
6000 0.44034597 -2.0265475 0.066481992 -1.5200864 1.2362891
7000 0.45097378 -2.0331083 0.058467565 -1.5240428 1.2762333
8000 0.45797632 -2.0330255 0.060048036 -1.5153828 1.3862396
9000 0.45297811 -2.0383417 0.067056519 -1.5186845 1.2762554
10000 0.45 -2.0628269 0.065650067 -1.5475518 1.0566213
11000 0.44466757 -2.0593977 0.06190999 -1.5531907 1.1452469
12000 0.46743534 -2.0684295 0.061056278 -1.5403274 1.0824225
13000 0.45601091 -2.0689708 0.054868536 -1.5584713 0.96703283
14000 0.44111882 -2.0553174 0.058249816 -1.5563164 1.0986427
15000 0.43894405 -2.0866829 0.064117804 -1.5839869 0.90031836
16000 0.43856814 -2.0879319 0.056024166 -1.593705 0.96387323
17000 0.45977841 -2.103188 0.058097306 -1.5856955 0.83352919
18000 0.43423341 -2.0813151 0.066623991 -1.5808196 0.98157638
19000 0.44245939 -2.0851261 0.057637655 -1.5853978 0.84228341
20000 0.43144678 -2.0895403 0.06536727 -1.5930858 0.88177768
21000 0.45014968 -2.106686 0.059137572 -1.5977739 0.89408935
22000 0.4575126 -2.1024115 0.063013023 -1.5822672 0.84886734
23000 0.45 -2.10897 0.06724784 -1.5920971 0.66205013
24000 0.43055602 -2.0894725 0.061566464 -1.5977089 0.81764789
25000 0.4366384 -2.0926743 0.059609321 -1.5967905 0.85549875
26000 0.4521714 -2.0963996 0.062031863 -1.5825731 0.80137118
27000 0.45734834 -2.1060987 0.061712636 -1.5874188 0.82899415
28000 0.44803467 -2.0859226 0.061871856 -1.5763894 0.97007526
29000 0.45 -2.1106243 0.063825481 -1.5971738 0.63798376
30000 0.44932806 -2.1006036 0.053053934 -1.598596 0.63907113
31000 0.44713779 -2.1096164 0.066470416 -1.5963808 0.66832708
32000 0.4373357 -2.0941237 0.058871613 -1.5982808 0.78176106
33000 0.44030485 -2.105644 0.058804306 -1.6069017 0.66286458
34000 0.43781175 -2.1233209 0.064611206 -1.6212628 0.56342584
35000 0.45670132 -2.1059408 0.053049584 -1.5965705 0.73992396
36000 0.45555427 -2.1149877 0.057627709 -1.6021854 0.85854939
37000 0.44134236 -2.1106202 0.064444306 -1.6052013 0.74674603
38000 0.44812623 -2.1003681 0.057266258 -1.5953491 0.78239359
39000 0.44167062 -2.11141 0.055354 -1.6147534 0.7066385
40000 0.46103176 -2.1166687 0.062155412 -1.5938657 0.73620955
41000 0.44537102 -2.0993898 0.05631213 -1.5980778 0.87348756
42000 0.44752506 -2.1115212 0.057506521 -1.6068625 0.72999561
43000 0.4483886 -2.1184719 0.066943915 -1.6035131 0.78112063
44000 0.45944897 -2.0916657 0.055242781 -1.5773568 0.98660473
45000 0.46238513 -2.1163075 0.0530031 -1.6013046 0.74416054
46000 0.45979064 -2.1165545 0.060657581 -1.5964895 0.63516974
47000 0.45936546 -2.1140678 0.049931919 -1.6051532 0.76425182
48000 0.45424613 -2.1122681 0.061885599 -1.5965149 0.71981142
49000 0.44449524 -2.1147361 0.06626748 -1.6043438 0.78720467
50000 0.4641185 -2.1114668 0.055104874 -1.5926302 0.70195865
51000 0.44220655 -2.1075773 0.0589109 -1.6068283 0.73806859
52000 0.43097906 -2.1189493 0.061502241 -1.6268271 0.69622593
53000 0.45 -2.137688 0.053631829 -1.6344311 0.48269158
54000 0.43777118 -2.1089246 0.047098534 -1.6244197 0.70423814
55000 0.46061985 -2.1129502 0.062520353 -1.5901938 0.72492307
56000 0.4524841 -2.1195648 0.06580089 -1.6016569 0.52709892
57000 0.44914574 -2.1041993 0.061040876 -1.594387 0.7979988
58000 0.46446286 -2.1181238 0.055741995 -1.598306 0.51009146
59000 0.4632674 -2.1169321 0.050672678 -1.6033781 0.83110911
60000 0.46340478 -2.122846 0.058485209 -1.6013422 0.69966471
Loop time of 8.01683 on 1 procs for 60000 steps with 1200 atoms
Performance: 3233199.903 tau/day, 7484.259 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 5.4027 | 5.4027 | 5.4027 | 0.0 | 67.39
Bond | 0.23585 | 0.23585 | 0.23585 | 0.0 | 2.94
Neigh | 1.5188 | 1.5188 | 1.5188 | 0.0 | 18.95
Comm | 0.14452 | 0.14452 | 0.14452 | 0.0 | 1.80
Output | 0.00060487 | 0.00060487 | 0.00060487 | 0.0 | 0.01
Modify | 0.56352 | 0.56352 | 0.56352 | 0.0 | 7.03
Other | | 0.1508 | | | 1.88
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 395 ave 395 max 395 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 9652 ave 9652 max 9652 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 9652
Ave neighs/atom = 8.04333
Ave special neighs/atom = 0.5
Neighbor list builds = 4886
Dangerous builds = 0
Total wall time: 0:00:08

View File

@ -1,259 +0,0 @@
LAMMPS (28 Feb 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:88)
using 1 OpenMP thread(s) per MPI task
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
orthogonal box = (0 0 -0.1) to (35.8569 35.8569 0.1)
2 by 2 by 1 MPI processor grid
reading atoms ...
1200 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
300 bonds
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000130415 secs
read_data CPU = 0.00132132 secs
special_bonds fene
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 4.76837e-05 secs
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 1000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.42246
ghost atom cutoff = 1.42246
binsize = 0.71123, bins = 51 51 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair soft, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.758 | 3.85 | 4.126 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 0.40003481 2.2200223e-06 0.84966203 0.78952518
50 0.47411013 0.67721272 0.057404514 1.2083323 1.3375852
100 0.45 0.73046745 0.054836584 1.234929 2.3196516
150 0.67521742 0.72402001 0.043490075 1.4421648 2.8744416
200 0.45 0.78481891 0.076931503 1.3113754 3.0412388
250 0.66479018 0.69790602 0.081075564 1.4432178 3.6917024
300 0.45 0.76820218 0.066727591 1.2845548 3.7861054
350 0.67619136 0.625715 0.072722727 1.3740656 4.2861621
400 0.45 0.68527759 0.090724527 1.2256271 4.4725214
450 0.56702844 0.64402767 0.080555563 1.2911391 4.7402211
500 0.45 0.64883009 0.078376672 1.1768318 4.7919294
550 0.564664 0.58260368 0.080779475 1.2275766 4.9855705
600 0.45 0.58193041 0.088386617 1.119942 5.131481
650 0.52110993 0.5415273 0.097683746 1.1598867 5.2500294
700 0.45 0.50856787 0.088471208 1.0466641 5.2550165
750 0.51510855 0.47441291 0.089429375 1.0785216 5.375763
800 0.45 0.49926696 0.085958476 1.0348504 5.4665914
850 0.50688494 0.46614429 0.088962292 1.0615691 5.556932
900 0.45 0.47785593 0.10150857 1.0289895 5.7765975
950 0.49590559 0.46050477 0.096404887 1.052402 5.8649245
1000 0.45 0.47691182 0.08808163 1.0146185 6.0177568
Loop time of 0.0377742 on 4 procs for 1000 steps with 1200 atoms
Performance: 11436375.633 tau/day, 26473.092 timesteps/s
96.6% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.016871 | 0.017299 | 0.018185 | 0.4 | 45.80
Bond | 0.0010128 | 0.0010633 | 0.001116 | 0.1 | 2.81
Neigh | 0.004832 | 0.0048565 | 0.0048807 | 0.0 | 12.86
Comm | 0.0066509 | 0.0077528 | 0.0084352 | 0.8 | 20.52
Output | 0.00022054 | 0.00028259 | 0.00046587 | 0.0 | 0.75
Modify | 0.0035386 | 0.0036086 | 0.0036943 | 0.1 | 9.55
Other | | 0.002912 | | | 7.71
Nlocal: 300 ave 305 max 292 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Nghost: 100.25 ave 108 max 93 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 784 ave 815 max 739 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Total # of neighbors = 3136
Ave neighs/atom = 2.61333
Ave special neighs/atom = 0.5
Neighbor list builds = 92
Dangerous builds = 0
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 1000
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
run 60000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 26 26 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.001 | 4.032 | 4.124 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 -1.7056163 0.08808163 -1.1679097 3.9431686
1000 0.45 -1.9727652 0.058608073 -1.4645321 1.9982444
2000 0.44428815 -1.9902282 0.064240544 -1.4820698 1.7051263
3000 0.46641766 -1.9856844 0.065017468 -1.4546379 1.6939772
4000 0.45734058 -2.0242583 0.070494626 -1.4968042 1.3474276
5000 0.44904747 -2.0086954 0.058801142 -1.501221 1.4632351
6000 0.44961405 -2.0334509 0.05721299 -1.5269985 1.3093586
7000 0.45474928 -2.0453645 0.064725006 -1.5262692 1.1581035
8000 0.44274767 -2.0375379 0.062216035 -1.5329431 1.312914
9000 0.46176571 -2.0473031 0.065581966 -1.5203402 1.2013868
10000 0.45046977 -2.0540466 0.065402724 -1.5385495 0.95819581
11000 0.45016671 -2.0610028 0.056993955 -1.5542172 1.0433435
12000 0.43823039 -2.073155 0.065171939 -1.5701178 1.1400059
13000 0.44482161 -2.0678338 0.063901045 -1.5594819 0.97993813
14000 0.45 -2.0892562 0.061753632 -1.5778776 0.89841778
15000 0.44328626 -2.0859346 0.059956258 -1.5830615 0.90664821
16000 0.45666508 -2.0859262 0.059582346 -1.5700593 0.9702235
17000 0.44832038 -2.0762124 0.059153394 -1.5691122 0.93020504
18000 0.4555831 -2.0844959 0.057986324 -1.5713062 0.87398232
19000 0.45257867 -2.0671736 0.062190389 -1.5527816 0.89208496
20000 0.44010419 -2.1020944 0.062053708 -1.6003033 0.84140973
21000 0.45239369 -2.0820308 0.060981799 -1.5690323 0.98502522
22000 0.44607468 -2.0820602 0.051731316 -1.5846259 0.86120529
23000 0.45088473 -2.0865286 0.05727778 -1.5787418 1.1479844
24000 0.45526919 -2.1086678 0.057378327 -1.5963997 0.86944138
25000 0.46536624 -2.1055425 0.05665328 -1.5839108 0.72895438
26000 0.46716668 -2.1035267 0.057498747 -1.5792505 0.85105386
27000 0.44374699 -2.0932213 0.060937242 -1.5889069 0.93200759
28000 0.45944001 -2.0968869 0.053052954 -1.5847768 0.78909249
29000 0.4543632 -2.10493 0.061511018 -1.5894345 0.85862527
30000 0.44987776 -2.0942536 0.062431086 -1.5823197 0.7349894
31000 0.43829016 -2.0951259 0.060245682 -1.5969553 0.86702973
32000 0.45416601 -2.0991679 0.055978905 -1.5894015 0.75777153
33000 0.4605079 -2.1118364 0.058205688 -1.5935066 0.86041104
34000 0.43638213 -2.0925345 0.067533519 -1.5889825 0.85100425
35000 0.46912252 -2.1082718 0.051646432 -1.5878938 0.73613751
36000 0.45 -2.0966442 0.052507159 -1.5945121 0.88722487
37000 0.44970507 -2.1029685 0.065454263 -1.588184 0.76033821
38000 0.44910233 -2.097751 0.05767009 -1.5913528 0.95830923
39000 0.4322161 -2.1060426 0.062453704 -1.611733 0.74681695
40000 0.46143858 -2.1328575 0.057333011 -1.6144704 0.58326322
41000 0.43180549 -2.1070656 0.064150563 -1.6114694 0.82842684
42000 0.46738909 -2.1067947 0.058017036 -1.5817781 0.73292362
43000 0.43699124 -2.1171964 0.062817262 -1.6177521 0.73354741
44000 0.45262916 -2.1281307 0.055228619 -1.6206502 0.64167946
45000 0.43905419 -2.088789 0.055597999 -1.5945027 0.8002542
46000 0.44485569 -2.1035061 0.067828181 -1.5911929 0.71861494
47000 0.44496824 -2.0968296 0.0632326 -1.5889996 0.75202899
48000 0.46567244 -2.1235948 0.061032118 -1.5972783 0.64094556
49000 0.43202506 -2.0986097 0.053464022 -1.6134806 0.83857984
50000 0.45454698 -2.1263344 0.058119708 -1.6140465 0.67030037
51000 0.43702766 -2.1292347 0.074047424 -1.6185238 0.52896462
52000 0.46367081 -2.1177288 0.06726625 -1.5871781 0.74343227
53000 0.45 -2.1341074 0.062769314 -1.6217131 0.51130365
54000 0.44862492 -2.1272108 0.057723381 -1.6212364 0.54735429
55000 0.44926027 -2.1350444 0.066186625 -1.6199719 0.66821299
56000 0.4544227 -2.1325537 0.065298628 -1.6132111 0.63597556
57000 0.45697003 -2.1323238 0.053312855 -1.6224218 0.55572633
58000 0.45698902 -2.1043208 0.055835989 -1.5918766 0.63502658
59000 0.4425306 -2.1120353 0.056617261 -1.6132563 0.65681272
60000 0.44319296 -2.1171981 0.058330294 -1.6160442 0.63602511
Loop time of 2.63918 on 4 procs for 60000 steps with 1200 atoms
Performance: 9821248.084 tau/day, 22734.371 timesteps/s
97.3% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.1742 | 1.278 | 1.3471 | 5.9 | 48.43
Bond | 0.046621 | 0.06565 | 0.081322 | 5.1 | 2.49
Neigh | 0.46642 | 0.46917 | 0.47105 | 0.3 | 17.78
Comm | 0.47295 | 0.55928 | 0.67758 | 10.5 | 21.19
Output | 0.00073624 | 0.00173 | 0.0047016 | 4.1 | 0.07
Modify | 0.14511 | 0.15226 | 0.15887 | 1.5 | 5.77
Other | | 0.1131 | | | 4.28
Nlocal: 300 ave 309 max 281 min
Histogram: 1 0 0 0 0 0 0 1 0 2
Nghost: 232.75 ave 234 max 231 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Neighs: 2450.25 ave 2576 max 2179 min
Histogram: 1 0 0 0 0 0 0 0 1 2
Total # of neighbors = 9801
Ave neighs/atom = 8.1675
Ave special neighs/atom = 0.5
Neighbor list builds = 4887
Dangerous builds = 0
Total wall time: 0:00:02

View File

@ -1,290 +0,0 @@
LAMMPS (28 Feb 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:88)
using 1 OpenMP thread(s) per MPI task
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
orthogonal box = (0 0 -0.1) to (35.8569 35.8569 0.1)
1 by 1 by 1 MPI processor grid
reading atoms ...
1200 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
300 bonds
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000271559 secs
read_data CPU = 0.00115585 secs
special_bonds fene
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 8.39233e-05 secs
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 1000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.42246
ghost atom cutoff = 1.42246
binsize = 0.71123, bins = 51 51 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair soft, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.799 | 3.799 | 3.799 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 0.40003481 2.2200223e-06 0.84966203 0.78952518
50 0.47411013 0.67721272 0.057404514 1.2083323 1.3375852
100 0.45 0.73046745 0.054836584 1.234929 2.3196516
150 0.67521742 0.72402001 0.043490075 1.4421648 2.8744416
200 0.45 0.78481891 0.076931503 1.3113754 3.0412388
250 0.66479018 0.69790602 0.081075564 1.4432178 3.6917024
300 0.45 0.76820218 0.066727591 1.2845548 3.7861054
350 0.67619136 0.625715 0.072722727 1.3740656 4.2861621
400 0.45 0.68527759 0.090724527 1.2256271 4.4725214
450 0.56702844 0.64402767 0.080555563 1.2911391 4.7402211
500 0.45 0.64883009 0.078376672 1.1768318 4.7919294
550 0.564664 0.58260368 0.080779475 1.2275766 4.9855705
600 0.45 0.58193041 0.088386617 1.119942 5.131481
650 0.52110993 0.5415273 0.097683746 1.1598867 5.2500294
700 0.45 0.50856787 0.088471208 1.0466641 5.2550165
750 0.51510855 0.47441291 0.089429375 1.0785216 5.375763
800 0.45 0.49926696 0.085958476 1.0348504 5.4665914
850 0.50688494 0.46614429 0.088962292 1.0615691 5.556932
900 0.45 0.47785593 0.10150857 1.0289895 5.7765975
950 0.49590559 0.46050477 0.096404887 1.052402 5.8649245
1000 0.45 0.47691182 0.08808163 1.0146185 6.0177568
Loop time of 0.107201 on 1 procs for 1000 steps with 1200 atoms
Performance: 4029800.456 tau/day, 9328.242 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.072035 | 0.072035 | 0.072035 | 0.0 | 67.20
Bond | 0.0039918 | 0.0039918 | 0.0039918 | 0.0 | 3.72
Neigh | 0.016078 | 0.016078 | 0.016078 | 0.0 | 15.00
Comm | 0.0018375 | 0.0018375 | 0.0018375 | 0.0 | 1.71
Output | 0.00016379 | 0.00016379 | 0.00016379 | 0.0 | 0.15
Modify | 0.010665 | 0.010665 | 0.010665 | 0.0 | 9.95
Other | | 0.002429 | | | 2.27
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 195 ave 195 max 195 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 3136 ave 3136 max 3136 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 3136
Ave neighs/atom = 2.61333
Ave special neighs/atom = 0.5
Neighbor list builds = 92
Dangerous builds = 0
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 1000
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
group solvent molecule 0
750 atoms in group solvent
group solute subtract all solvent
450 atoms in group solute
unfix 1
unfix 2
unfix 4
fix 1 solvent nve
fix 2 solvent temp/rescale 100 0.45 0.45 0.02 1.0
fix 5 solute rigid molecule langevin 0.45 0.45 0.5 112211
150 rigid bodies with 450 atoms
fix 4 all enforce2d
run 20000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 26 26 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.274 | 5.274 | 5.274 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.44603578 -1.7056163 0.08808163 -1.2555023 3.4039736
1000 0.46008168 -1.9040837 0.08808163 -1.4425691 0.93225457
2000 0.44520658 -1.9317253 0.08808163 -1.4822843 3.8192896
3000 0.43988556 -1.945898 0.08808163 -1.5007759 3.0371634
4000 0.4646519 -1.9753553 0.08808163 -1.5101312 -1.8041178
5000 0.4362993 -1.9763715 0.08808163 -1.5341603 1.5037284
6000 0.47007384 -1.9833154 0.08808163 -1.5136905 2.1227653
7000 0.44854623 -1.9914288 0.08808163 -1.5392772 3.9458099
8000 0.43841372 -1.9779603 0.08808163 -1.5340328 -4.5429769
9000 0.4518303 -1.9834387 0.08808163 -1.5286215 4.4230447
10000 0.43562904 -2.001471 0.08808163 -1.5598038 1.8919582
11000 0.44014575 -1.9820611 0.08808163 -1.5367278 -2.1189418
12000 0.44466956 -2.0134014 0.08808163 -1.5643963 -2.5218497
13000 0.45274369 -2.021443 0.08808163 -1.5658844 2.4795173
14000 0.44742645 -2.011108 0.08808163 -1.5598653 -0.74697767
15000 0.4674843 -2.024737 0.08808163 -1.5572139 -1.9539999
16000 0.45610154 -2.0401029 0.08808163 -1.5818189 -0.53082066
17000 0.44679292 -2.0365577 0.08808163 -1.5858291 -6.5040295
18000 0.44279107 -2.0500326 0.08808163 -1.6025522 -0.051597102
19000 0.45603993 -2.0306289 0.08808163 -1.5723948 1.0986608
20000 0.44519606 -2.0412229 0.08808163 -1.5917904 -1.0406746
Loop time of 3.68102 on 1 procs for 20000 steps with 1200 atoms
Performance: 2347175.802 tau/day, 5433.277 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.7349 | 1.7349 | 1.7349 | 0.0 | 47.13
Bond | 0.079483 | 0.079483 | 0.079483 | 0.0 | 2.16
Neigh | 0.49063 | 0.49063 | 0.49063 | 0.0 | 13.33
Comm | 0.049093 | 0.049093 | 0.049093 | 0.0 | 1.33
Output | 0.00022578 | 0.00022578 | 0.00022578 | 0.0 | 0.01
Modify | 1.273 | 1.273 | 1.273 | 0.0 | 34.58
Other | | 0.05369 | | | 1.46
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 395 ave 395 max 395 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 8915 ave 8915 max 8915 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8915
Ave neighs/atom = 7.42917
Ave special neighs/atom = 0.5
Neighbor list builds = 1580
Dangerous builds = 0
unfix 5
unfix 4
fix 5 solute rigid/small molecule langevin 0.45 0.45 0.5 112211
create bodies CPU = 0.00012517 secs
150 rigid bodies with 450 atoms
1.04536 = max distance from body owner to body atom
fix 4 all enforce2d
run 20000
Per MPI rank memory allocation (min/avg/max) = 8.633 | 8.633 | 8.633 Mbytes
Step Temp E_pair E_mol TotEng Press
20000 0.44519606 -2.0412229 0.08808163 -1.5917904 1.3058893
21000 0.4353376 -2.0483342 0.08808163 -1.6069035 0.53023317
22000 0.44034324 -2.0416876 0.08808163 -1.5961941 4.0327077
23000 0.4685403 -2.05295 0.08808163 -1.5845698 3.6792349
24000 0.44872075 -2.0320623 0.08808163 -1.579769 -2.0476923
25000 0.46829594 -2.0671408 0.08808163 -1.5989589 2.180811
26000 0.45257544 -2.0418792 0.08808163 -1.5864572 3.3924018
27000 0.44269664 -2.0409905 0.08808163 -1.5935868 -0.17012673
28000 0.46961216 -2.0552479 0.08808163 -1.5859978 -7.2870888
29000 0.46683129 -2.0438334 0.08808163 -1.5768404 3.0583141
30000 0.44262228 -2.036737 0.08808163 -1.5893937 0.087520915
31000 0.43517227 -2.0479672 0.08808163 -1.6066708 -0.3426009
32000 0.44543779 -2.0538031 0.08808163 -1.6041744 -0.2093148
33000 0.44629079 -2.0409901 0.08808163 -1.5906691 3.310113
34000 0.43058831 -2.0713827 0.08808163 -1.6338069 0.14128843
35000 0.44546512 -2.0427068 0.08808163 -1.593056 -3.1386697
36000 0.42971129 -2.0527435 0.08808163 -1.6158795 -2.7334963
37000 0.44707969 -2.0461803 0.08808163 -1.595219 -3.8777678
38000 0.43150818 -2.0435276 0.08808163 -1.6052052 0.2905487
39000 0.44463343 -2.0522113 0.08808163 -1.6032355 3.543123
40000 0.44582593 -2.052213 0.08808163 -1.6022693 1.1486536
Loop time of 3.69012 on 1 procs for 20000 steps with 1200 atoms
Performance: 2341388.948 tau/day, 5419.882 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.756 | 1.756 | 1.756 | 0.0 | 47.59
Bond | 0.079221 | 0.079221 | 0.079221 | 0.0 | 2.15
Neigh | 0.49085 | 0.49085 | 0.49085 | 0.0 | 13.30
Comm | 0.048317 | 0.048317 | 0.048317 | 0.0 | 1.31
Output | 0.0002315 | 0.0002315 | 0.0002315 | 0.0 | 0.01
Modify | 1.2616 | 1.2616 | 1.2616 | 0.0 | 34.19
Other | | 0.05386 | | | 1.46
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 393 ave 393 max 393 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 9091 ave 9091 max 9091 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 9091
Ave neighs/atom = 7.57583
Ave special neighs/atom = 0.5
Neighbor list builds = 1582
Dangerous builds = 0
Total wall time: 0:00:07

View File

@ -1,290 +0,0 @@
LAMMPS (28 Feb 2019)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:88)
using 1 OpenMP thread(s) per MPI task
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
orthogonal box = (0 0 -0.1) to (35.8569 35.8569 0.1)
2 by 2 by 1 MPI processor grid
reading atoms ...
1200 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
300 bonds
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000175714 secs
read_data CPU = 0.00145626 secs
special_bonds fene
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 7.22408e-05 secs
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 1000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.42246
ghost atom cutoff = 1.42246
binsize = 0.71123, bins = 51 51 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair soft, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.758 | 3.85 | 4.126 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 0.40003481 2.2200223e-06 0.84966203 0.78952518
50 0.47411013 0.67721272 0.057404514 1.2083323 1.3375852
100 0.45 0.73046745 0.054836584 1.234929 2.3196516
150 0.67521742 0.72402001 0.043490075 1.4421648 2.8744416
200 0.45 0.78481891 0.076931503 1.3113754 3.0412388
250 0.66479018 0.69790602 0.081075564 1.4432178 3.6917024
300 0.45 0.76820218 0.066727591 1.2845548 3.7861054
350 0.67619136 0.625715 0.072722727 1.3740656 4.2861621
400 0.45 0.68527759 0.090724527 1.2256271 4.4725214
450 0.56702844 0.64402767 0.080555563 1.2911391 4.7402211
500 0.45 0.64883009 0.078376672 1.1768318 4.7919294
550 0.564664 0.58260368 0.080779475 1.2275766 4.9855705
600 0.45 0.58193041 0.088386617 1.119942 5.131481
650 0.52110993 0.5415273 0.097683746 1.1598867 5.2500294
700 0.45 0.50856787 0.088471208 1.0466641 5.2550165
750 0.51510855 0.47441291 0.089429375 1.0785216 5.375763
800 0.45 0.49926696 0.085958476 1.0348504 5.4665914
850 0.50688494 0.46614429 0.088962292 1.0615691 5.556932
900 0.45 0.47785593 0.10150857 1.0289895 5.7765975
950 0.49590559 0.46050477 0.096404887 1.052402 5.8649245
1000 0.45 0.47691182 0.08808163 1.0146185 6.0177568
Loop time of 0.0389124 on 4 procs for 1000 steps with 1200 atoms
Performance: 11101855.138 tau/day, 25698.739 timesteps/s
95.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.016776 | 0.017405 | 0.018435 | 0.5 | 44.73
Bond | 0.0010033 | 0.0011995 | 0.0015519 | 0.6 | 3.08
Neigh | 0.0044944 | 0.0045093 | 0.0045218 | 0.0 | 11.59
Comm | 0.0080328 | 0.0093863 | 0.010242 | 0.9 | 24.12
Output | 0.00021577 | 0.00027579 | 0.00045323 | 0.0 | 0.71
Modify | 0.0034575 | 0.0036355 | 0.0040002 | 0.4 | 9.34
Other | | 0.002501 | | | 6.43
Nlocal: 300 ave 305 max 292 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Nghost: 100.25 ave 108 max 93 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 784 ave 815 max 739 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Total # of neighbors = 3136
Ave neighs/atom = 2.61333
Ave special neighs/atom = 0.5
Neighbor list builds = 92
Dangerous builds = 0
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 1000
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
group solvent molecule 0
750 atoms in group solvent
group solute subtract all solvent
450 atoms in group solute
unfix 1
unfix 2
unfix 4
fix 1 solvent nve
fix 2 solvent temp/rescale 100 0.45 0.45 0.02 1.0
fix 5 solute rigid molecule langevin 0.45 0.45 0.5 112211
150 rigid bodies with 450 atoms
fix 4 all enforce2d
run 20000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 26 26 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.251 | 5.282 | 5.374 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.44603578 -1.7056163 0.08808163 -1.2555023 3.4039736
1000 0.46008163 -1.9040835 0.08808163 -1.4425689 0.93225869
2000 0.44943348 -1.9355135 0.08808163 -1.4826417 3.8399671
3000 0.4448437 -1.9480307 0.08808163 -1.4988842 2.5506553
4000 0.46013872 -1.9783821 0.08808163 -1.5168212 -1.8963215
5000 0.45520233 -1.9659462 0.08808163 -1.5083921 1.9238897
6000 0.44942049 -1.9663403 0.08808163 -1.513479 3.0633512
7000 0.45975758 -1.988462 0.08808163 -1.5272105 4.8267309
8000 0.45125238 -1.9913522 0.08808163 -1.5370041 -4.6644852
9000 0.45863606 -1.9792375 0.08808163 -1.5188962 4.3655071
10000 0.46264541 -1.9864611 0.08808163 -1.5228656 2.2176464
11000 0.45048361 -1.9907235 0.08808163 -1.5369994 -0.055360699
12000 0.44536719 -2.012875 0.08808163 -1.5633037 -0.2583823
13000 0.44212663 -2.0060111 0.08808163 -1.55907 3.3616171
14000 0.44984353 -2.0335408 0.08808163 -1.5803361 -0.21585645
15000 0.44896672 -2.0385265 0.08808163 -1.5860335 -4.6186206
16000 0.46694997 -2.032795 0.08808163 -1.5657056 0.53443281
17000 0.43208201 -2.0272255 0.08808163 -1.5884373 -6.5239975
18000 0.43281873 -2.0331268 0.08808163 -1.5937406 -0.048319943
19000 0.44704527 -2.0286742 0.08808163 -1.5777408 1.6356417
20000 0.44279735 -2.0443561 0.08808163 -1.5968706 -3.8337952
Loop time of 1.71924 on 4 procs for 20000 steps with 1200 atoms
Performance: 5025468.853 tau/day, 11633.030 timesteps/s
98.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.39864 | 0.40873 | 0.42192 | 1.6 | 23.77
Bond | 0.02118 | 0.021816 | 0.022785 | 0.4 | 1.27
Neigh | 0.13931 | 0.14031 | 0.14117 | 0.2 | 8.16
Comm | 0.13974 | 0.15328 | 0.16884 | 3.3 | 8.92
Output | 0.00026131 | 0.00044435 | 0.00099206 | 0.0 | 0.03
Modify | 0.93275 | 0.94138 | 0.95072 | 0.7 | 54.76
Other | | 0.05327 | | | 3.10
Nlocal: 300 ave 303 max 298 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Nghost: 218.5 ave 226 max 215 min
Histogram: 2 1 0 0 0 0 0 0 0 1
Neighs: 2258.75 ave 2283 max 2216 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Total # of neighbors = 9035
Ave neighs/atom = 7.52917
Ave special neighs/atom = 0.5
Neighbor list builds = 1580
Dangerous builds = 0
unfix 5
unfix 4
fix 5 solute rigid/small molecule langevin 0.45 0.45 0.5 112211
create bodies CPU = 5.43594e-05 secs
150 rigid bodies with 450 atoms
0.916597 = max distance from body owner to body atom
fix 4 all enforce2d
run 20000
Per MPI rank memory allocation (min/avg/max) = 8.568 | 8.6 | 8.691 Mbytes
Step Temp E_pair E_mol TotEng Press
20000 0.44279735 -2.0443561 0.08808163 -1.5968706 -1.033643
21000 0.4529129 -2.049461 0.08808163 -1.5937651 0.93160285
22000 0.45039188 -2.0530092 0.08808163 -1.5993595 -0.10608965
23000 0.45261583 -2.0336042 0.08808163 -1.5781494 -2.5769871
24000 0.4608331 -2.0404645 0.08808163 -1.57834 3.1931675
25000 0.43479001 -2.0617104 0.08808163 -1.6207242 2.8190122
26000 0.47009651 -2.0754873 0.08808163 -1.605844 -0.9158501
27000 0.45002704 -2.0782104 0.08808163 -1.6248568 0.98629661
28000 0.45126136 -2.0592619 0.08808163 -1.6049065 0.03305448
29000 0.44355328 -2.0572858 0.08808163 -1.6091868 -6.0797989
30000 0.45053899 -2.0530953 0.08808163 -1.5993261 0.38382951
31000 0.46931923 -2.0718827 0.08808163 -1.6028703 2.2346891
32000 0.45348857 -2.0744024 0.08808163 -1.6182393 4.5028966
33000 0.44767742 -2.0597127 0.08808163 -1.6082662 -2.8021641
34000 0.45287544 -2.0857303 0.08808163 -1.6300648 -5.384091
35000 0.44743898 -2.0927246 0.08808163 -1.6414717 1.4800508
36000 0.45627028 -2.0720546 0.08808163 -1.6136336 -2.9961696
37000 0.4641334 -2.0701098 0.08808163 -1.6053065 8.4186854
38000 0.45922901 -2.0962331 0.08808163 -1.6354106 0.38361763
39000 0.4692834 -2.0573815 0.08808163 -1.5883982 -2.2177345
40000 0.46206931 -2.057851 0.08808163 -1.5947231 -1.0405727
Loop time of 1.25476 on 4 procs for 20000 steps with 1200 atoms
Performance: 6885775.862 tau/day, 15939.296 timesteps/s
98.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.40627 | 0.43037 | 0.45515 | 2.6 | 34.30
Bond | 0.020504 | 0.021573 | 0.022739 | 0.5 | 1.72
Neigh | 0.14337 | 0.14438 | 0.1453 | 0.2 | 11.51
Comm | 0.13776 | 0.16647 | 0.19351 | 5.0 | 13.27
Output | 0.00025082 | 0.00052994 | 0.0013635 | 0.0 | 0.04
Modify | 0.45467 | 0.45822 | 0.46259 | 0.5 | 36.52
Other | | 0.03321 | | | 2.65
Nlocal: 300 ave 304 max 293 min
Histogram: 1 0 0 0 0 1 0 0 0 2
Nghost: 215.25 ave 217 max 213 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Neighs: 2340 ave 2378 max 2290 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 9360
Ave neighs/atom = 7.8
Ave special neighs/atom = 0.5
Neighbor list builds = 1579
Dangerous builds = 0
Total wall time: 0:00:03

View File

@ -0,0 +1,260 @@
LAMMPS (29 Mar 2019)
using 1 OpenMP thread(s) per MPI task
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
orthogonal box = (0 0 -0.1) to (35.8569 35.8569 0.1)
1 by 1 by 1 MPI processor grid
reading atoms ...
1200 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
300 bonds
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000473022 secs
read_data CPU = 0.0024147 secs
special_bonds fene
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 0.00022316 secs
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 500
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.42246
ghost atom cutoff = 1.42246
binsize = 0.71123, bins = 51 51 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair soft, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.799 | 3.799 | 3.799 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 0.40003481 2.2200223e-06 0.84966203 0.78952518
50 0.54981866 0.93548899 0.068440043 1.5532895 1.9232786
100 0.45 0.99659327 0.079228519 1.5254468 3.2135679
150 0.86965411 0.90456016 0.07493355 1.8484231 4.3821925
200 0.45 1.01454 0.10663502 1.5708 4.7598476
250 0.79636561 0.82567712 0.12105337 1.7424325 5.4983899
300 0.45 0.86475538 0.11819875 1.4325791 5.8554758
350 0.72135464 0.70693069 0.10912636 1.5368106 6.0388247
400 0.45 0.75067331 0.14165013 1.3419484 6.3840708
450 0.64839221 0.62402486 0.14173679 1.4136135 6.4791009
500 0.45 0.66669513 0.13695201 1.2532721 6.807146
Loop time of 0.103162 on 1 procs for 500 steps with 1200 atoms
Performance: 2093802.885 tau/day, 4846.766 timesteps/s
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.068308 | 0.068308 | 0.068308 | 0.0 | 66.21
Bond | 0.004235 | 0.004235 | 0.004235 | 0.0 | 4.11
Neigh | 0.014069 | 0.014069 | 0.014069 | 0.0 | 13.64
Comm | 0.0019219 | 0.0019219 | 0.0019219 | 0.0 | 1.86
Output | 0.00017262 | 0.00017262 | 0.00017262 | 0.0 | 0.17
Modify | 0.011728 | 0.011728 | 0.011728 | 0.0 | 11.37
Other | | 0.002726 | | | 2.64
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 197 ave 197 max 197 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 3094 ave 3094 max 3094 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 3094
Ave neighs/atom = 2.57833
Ave special neighs/atom = 0.5
Neighbor list builds = 52
Dangerous builds = 0
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 50
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
group solvent molecule 0
750 atoms in group solvent
group solute subtract all solvent
450 atoms in group solute
unfix 1
unfix 2
unfix 4
fix 1 solvent nve
fix 2 solvent temp/rescale 100 0.45 0.45 0.02 1.0
fix 5 solute rigid molecule langevin 0.45 0.45 0.5 112211
150 rigid bodies with 450 atoms
fix 4 all enforce2d
run 500
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 26 26 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.274 | 5.274 | 5.274 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45318168 -1.3753652 0.13695201 -0.8705807 1.975423
50 0.77871641 -1.6955252 0.13695201 -0.92651507 0.64222539
100 0.5336062 -1.7124572 0.13695201 -1.1423948 -0.11959696
150 0.58789067 -1.7926109 0.13695201 -1.1784877 1.2592743
200 0.47864796 -1.8040298 0.13695201 -1.2785752 3.6739793
250 0.51124651 -1.8614797 0.13695201 -1.309566 2.5817722
300 0.45695639 -1.8708384 0.13695201 -1.3629901 3.0833794
350 0.477504 -1.8924359 0.13695201 -1.3679098 -5.1605926
400 0.45328205 -1.87754 0.13695201 -1.372674 -4.0355858
450 0.47465031 -1.9071924 0.13695201 -1.3849826 3.1949617
500 0.45533691 -1.9072316 0.13695201 -1.4006978 0.48079061
Loop time of 0.178806 on 1 procs for 500 steps with 1200 atoms
Performance: 1208012.705 tau/day, 2796.326 timesteps/s
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.086131 | 0.086131 | 0.086131 | 0.0 | 48.17
Bond | 0.0042472 | 0.0042472 | 0.0042472 | 0.0 | 2.38
Neigh | 0.021317 | 0.021317 | 0.021317 | 0.0 | 11.92
Comm | 0.0025985 | 0.0025985 | 0.0025985 | 0.0 | 1.45
Output | 0.000175 | 0.000175 | 0.000175 | 0.0 | 0.10
Modify | 0.061408 | 0.061408 | 0.061408 | 0.0 | 34.34
Other | | 0.00293 | | | 1.64
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 416 ave 416 max 416 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 8769 ave 8769 max 8769 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8769
Ave neighs/atom = 7.3075
Ave special neighs/atom = 0.5
Neighbor list builds = 47
Dangerous builds = 2
unfix 2
unfix 4
unfix 5
fix 5 solute rigid/small molecule
create bodies CPU = 0.00015378 secs
150 rigid bodies with 450 atoms
1.30435 = max distance from body owner to body atom
fix 4 all enforce2d
run 500
Per MPI rank memory allocation (min/avg/max) = 8.64 | 8.64 | 8.64 Mbytes
Step Temp E_pair E_mol TotEng Press
500 0.45533691 -1.9072316 0.13695201 -1.4006978 2.4545793
550 0.45627282 -1.912409 0.13695201 -1.4051155 2.1845065
600 0.44734553 -1.8890695 0.13695201 -1.389022 2.3458965
650 0.46444648 -1.9042462 0.13695201 -1.3903185 2.1609319
700 0.47113236 -1.8977576 0.13695201 -1.3784032 2.2420351
750 0.48554548 -1.9253545 0.13695201 -1.3943015 2.143907
800 0.46350091 -1.8865749 0.13695201 -1.3734146 2.294431
850 0.4766104 -1.9094039 0.13695201 -1.3856031 2.2077157
900 0.48988467 -1.9051538 0.13695201 -1.3705787 2.0107056
950 0.48351943 -1.9162485 0.13695201 -1.3868399 2.1891332
1000 0.49033701 -1.9115165 0.13695201 -1.3765742 2.1508141
Loop time of 0.166502 on 1 procs for 500 steps with 1200 atoms
Performance: 1297278.008 tau/day, 3002.958 timesteps/s
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.085767 | 0.085767 | 0.085767 | 0.0 | 51.51
Bond | 0.0042562 | 0.0042562 | 0.0042562 | 0.0 | 2.56
Neigh | 0.018039 | 0.018039 | 0.018039 | 0.0 | 10.83
Comm | 0.0024002 | 0.0024002 | 0.0024002 | 0.0 | 1.44
Output | 0.00018239 | 0.00018239 | 0.00018239 | 0.0 | 0.11
Modify | 0.052717 | 0.052717 | 0.052717 | 0.0 | 31.66
Other | | 0.003141 | | | 1.89
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 415 ave 415 max 415 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 8743 ave 8743 max 8743 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8743
Ave neighs/atom = 7.28583
Ave special neighs/atom = 0.5
Neighbor list builds = 40
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,260 @@
LAMMPS (29 Mar 2019)
using 1 OpenMP thread(s) per MPI task
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
orthogonal box = (0 0 -0.1) to (35.8569 35.8569 0.1)
2 by 2 by 1 MPI processor grid
reading atoms ...
1200 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
300 bonds
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000422001 secs
read_data CPU = 0.00473404 secs
special_bonds fene
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000183344 secs
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 500
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.42246
ghost atom cutoff = 1.42246
binsize = 0.71123, bins = 51 51 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair soft, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.758 | 3.85 | 4.126 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 0.40003481 2.2200223e-06 0.84966203 0.78952518
50 0.54981866 0.93548899 0.068440043 1.5532895 1.9232786
100 0.45 0.99659327 0.079228519 1.5254468 3.2135679
150 0.86965411 0.90456016 0.07493355 1.8484231 4.3821925
200 0.45 1.01454 0.10663502 1.5708 4.7598476
250 0.79636561 0.82567712 0.12105337 1.7424325 5.4983899
300 0.45 0.86475538 0.11819875 1.4325791 5.8554758
350 0.72135464 0.70693069 0.10912636 1.5368106 6.0388247
400 0.45 0.75067331 0.14165013 1.3419484 6.3840708
450 0.64839221 0.62402486 0.14173679 1.4136135 6.4791009
500 0.45 0.66669513 0.13695201 1.2532721 6.807146
Loop time of 0.0426326 on 4 procs for 500 steps with 1200 atoms
Performance: 5066547.720 tau/day, 11728.120 timesteps/s
98.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.016784 | 0.019254 | 0.022154 | 1.5 | 45.16
Bond | 0.0010612 | 0.0012558 | 0.0014153 | 0.4 | 2.95
Neigh | 0.0046048 | 0.0046697 | 0.0047245 | 0.1 | 10.95
Comm | 0.0064592 | 0.0097114 | 0.012527 | 2.4 | 22.78
Output | 0.00022507 | 0.00026393 | 0.00033951 | 0.0 | 0.62
Modify | 0.0041659 | 0.0048084 | 0.0053945 | 0.8 | 11.28
Other | | 0.002669 | | | 6.26
Nlocal: 300 ave 304 max 292 min
Histogram: 1 0 0 0 0 0 0 0 2 1
Nghost: 103.5 ave 108 max 98 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Neighs: 773.5 ave 792 max 735 min
Histogram: 1 0 0 0 0 0 0 0 2 1
Total # of neighbors = 3094
Ave neighs/atom = 2.57833
Ave special neighs/atom = 0.5
Neighbor list builds = 52
Dangerous builds = 0
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 50
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
group solvent molecule 0
750 atoms in group solvent
group solute subtract all solvent
450 atoms in group solute
unfix 1
unfix 2
unfix 4
fix 1 solvent nve
fix 2 solvent temp/rescale 100 0.45 0.45 0.02 1.0
fix 5 solute rigid molecule langevin 0.45 0.45 0.5 112211
150 rigid bodies with 450 atoms
fix 4 all enforce2d
run 500
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 26 26 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 5.251 | 5.282 | 5.374 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45318168 -1.3753652 0.13695201 -0.8705807 1.975423
50 0.77871641 -1.6955252 0.13695201 -0.92651507 0.64222539
100 0.5336062 -1.7124572 0.13695201 -1.1423948 -0.11959696
150 0.58789067 -1.7926109 0.13695201 -1.1784877 1.2592743
200 0.47864796 -1.8040298 0.13695201 -1.2785752 3.6739793
250 0.51124651 -1.8614797 0.13695201 -1.309566 2.5817722
300 0.45695639 -1.8708384 0.13695201 -1.3629901 3.0833794
350 0.477504 -1.8924359 0.13695201 -1.3679098 -5.1605926
400 0.45328205 -1.87754 0.13695201 -1.372674 -4.0355858
450 0.47465031 -1.9071924 0.13695201 -1.3849826 3.1949617
500 0.45533691 -1.9072316 0.13695201 -1.4006978 0.48079061
Loop time of 0.0887392 on 4 procs for 500 steps with 1200 atoms
Performance: 2434100.210 tau/day, 5634.491 timesteps/s
98.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.022611 | 0.022839 | 0.023082 | 0.1 | 25.74
Bond | 0.0010793 | 0.0011569 | 0.0012515 | 0.2 | 1.30
Neigh | 0.0064609 | 0.0064996 | 0.0065265 | 0.0 | 7.32
Comm | 0.0071712 | 0.0073687 | 0.0077734 | 0.3 | 8.30
Output | 0.00023389 | 0.00025356 | 0.00030327 | 0.0 | 0.29
Modify | 0.047258 | 0.047683 | 0.048503 | 0.2 | 53.73
Other | | 0.002938 | | | 3.31
Nlocal: 300 ave 309 max 291 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Nghost: 218.75 ave 223 max 216 min
Histogram: 1 0 2 0 0 0 0 0 0 1
Neighs: 2192.25 ave 2251 max 2113 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 8769
Ave neighs/atom = 7.3075
Ave special neighs/atom = 0.5
Neighbor list builds = 47
Dangerous builds = 2
unfix 2
unfix 4
unfix 5
fix 5 solute rigid/small molecule
create bodies CPU = 7.70092e-05 secs
150 rigid bodies with 450 atoms
1.30435 = max distance from body owner to body atom
fix 4 all enforce2d
run 500
Per MPI rank memory allocation (min/avg/max) = 8.565 | 8.597 | 8.69 Mbytes
Step Temp E_pair E_mol TotEng Press
500 0.45533691 -1.9072316 0.13695201 -1.4006978 2.4545793
550 0.45627282 -1.912409 0.13695201 -1.4051155 2.1845065
600 0.44734553 -1.8890695 0.13695201 -1.389022 2.3458965
650 0.46444648 -1.9042462 0.13695201 -1.3903185 2.1609319
700 0.47113236 -1.8977576 0.13695201 -1.3784032 2.2420351
750 0.48554548 -1.9253545 0.13695201 -1.3943015 2.143907
800 0.46350091 -1.8865749 0.13695201 -1.3734146 2.294431
850 0.4766104 -1.9094039 0.13695201 -1.3856031 2.2077157
900 0.48988467 -1.9051538 0.13695201 -1.3705787 2.0107056
950 0.48351942 -1.9162485 0.13695201 -1.3868399 2.1891332
1000 0.490337 -1.9115164 0.13695201 -1.3765742 2.1508141
Loop time of 0.0588261 on 4 procs for 500 steps with 1200 atoms
Performance: 3671840.233 tau/day, 8499.630 timesteps/s
98.3% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.022407 | 0.022631 | 0.0229 | 0.1 | 38.47
Bond | 0.0010669 | 0.0011355 | 0.0012124 | 0.2 | 1.93
Neigh | 0.0052333 | 0.00528 | 0.0053182 | 0.0 | 8.98
Comm | 0.0063677 | 0.0066406 | 0.0068488 | 0.2 | 11.29
Output | 0.00023055 | 0.00024778 | 0.00028086 | 0.0 | 0.42
Modify | 0.020577 | 0.020651 | 0.020834 | 0.1 | 35.11
Other | | 0.00224 | | | 3.81
Nlocal: 300 ave 303 max 295 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Nghost: 219 ave 224 max 215 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 2185.75 ave 2244 max 2143 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Total # of neighbors = 8743
Ave neighs/atom = 7.28583
Ave special neighs/atom = 0.5
Neighbor list builds = 40
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,218 @@
LAMMPS (29 Mar 2019)
using 1 OpenMP thread(s) per MPI task
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
orthogonal box = (0 0 -0.1) to (35.8569 35.8569 0.1)
1 by 1 by 1 MPI processor grid
reading atoms ...
1200 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
300 bonds
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.00037837 secs
read_data CPU = 0.00206876 secs
special_bonds fene
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000177383 secs
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 1000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.42246
ghost atom cutoff = 1.42246
binsize = 0.71123, bins = 51 51 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair soft, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.799 | 3.799 | 3.799 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 0.40003481 2.2200223e-06 0.84966203 0.78952518
50 0.47411013 0.67721272 0.057404514 1.2083323 1.3375852
100 0.45 0.73046745 0.054836584 1.234929 2.3196516
150 0.67521742 0.72402001 0.043490075 1.4421648 2.8744416
200 0.45 0.78481891 0.076931503 1.3113754 3.0412388
250 0.66479018 0.69790602 0.081075564 1.4432178 3.6917024
300 0.45 0.76820218 0.066727591 1.2845548 3.7861054
350 0.67619136 0.625715 0.072722727 1.3740656 4.2861621
400 0.45 0.68527759 0.090724527 1.2256271 4.4725214
450 0.56702844 0.64402767 0.080555563 1.2911391 4.7402211
500 0.45 0.64883009 0.078376672 1.1768318 4.7919294
550 0.564664 0.58260368 0.080779475 1.2275766 4.9855705
600 0.45 0.58193041 0.088386617 1.119942 5.131481
650 0.52110993 0.5415273 0.097683746 1.1598867 5.2500294
700 0.45 0.50856787 0.088471208 1.0466641 5.2550165
750 0.51510855 0.47441291 0.089429375 1.0785216 5.375763
800 0.45 0.49926696 0.085958476 1.0348504 5.4665914
850 0.50688494 0.46614429 0.088962292 1.0615691 5.556932
900 0.45 0.47785593 0.10150857 1.0289895 5.7765975
950 0.49590559 0.46050477 0.096404887 1.052402 5.8649245
1000 0.45 0.47691182 0.08808163 1.0146185 6.0177568
Loop time of 0.208895 on 1 procs for 1000 steps with 1200 atoms
Performance: 2068027.282 tau/day, 4787.100 timesteps/s
99.4% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.14142 | 0.14142 | 0.14142 | 0.0 | 67.70
Bond | 0.008441 | 0.008441 | 0.008441 | 0.0 | 4.04
Neigh | 0.025716 | 0.025716 | 0.025716 | 0.0 | 12.31
Comm | 0.0036864 | 0.0036864 | 0.0036864 | 0.0 | 1.76
Output | 0.0003562 | 0.0003562 | 0.0003562 | 0.0 | 0.17
Modify | 0.023699 | 0.023699 | 0.023699 | 0.0 | 11.35
Other | | 0.00558 | | | 2.67
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 195 ave 195 max 195 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 3136 ave 3136 max 3136 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 3136
Ave neighs/atom = 2.61333
Ave special neighs/atom = 0.5
Neighbor list builds = 92
Dangerous builds = 0
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 50
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
run 1000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 26 26 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.024 | 4.024 | 4.024 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 -1.7056163 0.08808163 -1.1679097 3.9431686
50 0.59734982 -1.8103783 0.076066922 -1.1374593 3.2770557
100 0.45 -1.8347112 0.093132329 -1.2919539 3.024661
150 0.51924311 -1.8943977 0.076004124 -1.2995832 2.5570373
200 0.45 -1.8918672 0.082422107 -1.3598201 2.5629655
250 0.50281134 -1.920406 0.074011331 -1.3440023 2.3518682
300 0.45 -1.9351047 0.075337265 -1.4101424 2.3249947
350 0.47650026 -1.9313687 0.072115117 -1.3831504 2.1987532
400 0.45 -1.9554318 0.081603939 -1.4242028 2.0787066
450 0.47220236 -1.9468502 0.065625624 -1.4094157 2.0984288
500 0.4684673 -1.9444333 0.076696283 -1.3996601 2.0528682
550 0.47683128 -1.958676 0.070589719 -1.4116523 2.0856022
600 0.46851243 -1.9338267 0.07060548 -1.3950992 2.26405
650 0.46874142 -1.9462493 0.069134685 -1.4087638 2.1070263
700 0.46437384 -1.9309953 0.071977522 -1.3950309 2.2256923
750 0.47326225 -1.9484255 0.075435845 -1.4001218 2.0880254
800 0.45 -1.9646005 0.064159585 -1.4508159 2.0612696
850 0.46748307 -1.970559 0.060384874 -1.4430806 1.9472879
900 0.46909484 -1.953723 0.062470295 -1.4225488 2.0222909
950 0.45631531 -1.9387753 0.067536568 -1.4153037 2.0638421
1000 0.45 -1.9727646 0.058607721 -1.4645318 1.9982315
Loop time of 0.252254 on 1 procs for 1000 steps with 1200 atoms
Performance: 1712557.882 tau/day, 3964.254 timesteps/s
99.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.17177 | 0.17177 | 0.17177 | 0.0 | 68.09
Bond | 0.0084555 | 0.0084555 | 0.0084555 | 0.0 | 3.35
Neigh | 0.03991 | 0.03991 | 0.03991 | 0.0 | 15.82
Comm | 0.0049119 | 0.0049119 | 0.0049119 | 0.0 | 1.95
Output | 0.00039077 | 0.00039077 | 0.00039077 | 0.0 | 0.15
Modify | 0.021131 | 0.021131 | 0.021131 | 0.0 | 8.38
Other | | 0.005685 | | | 2.25
Nlocal: 1200 ave 1200 max 1200 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 415 ave 415 max 415 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 8586 ave 8586 max 8586 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8586
Ave neighs/atom = 7.155
Ave special neighs/atom = 0.5
Neighbor list builds = 86
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,218 @@
LAMMPS (29 Mar 2019)
using 1 OpenMP thread(s) per MPI task
# 2d micelle simulation
dimension 2
neighbor 0.3 bin
neigh_modify delay 5
atom_style bond
# Soft potential push-off
read_data data.micelle
orthogonal box = (0 0 -0.1) to (35.8569 35.8569 0.1)
2 by 2 by 1 MPI processor grid
reading atoms ...
1200 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
300 bonds
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000413656 secs
read_data CPU = 0.00487924 secs
special_bonds fene
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 0.000178576 secs
pair_style soft 1.12246
pair_coeff * * 0.0 1.12246
bond_style harmonic
bond_coeff 1 50.0 0.75
velocity all create 0.45 2349852
variable prefactor equal ramp(1.0,20.0)
fix 1 all nve
fix 2 all temp/rescale 100 0.45 0.45 0.02 1.0
fix 3 all adapt 1 pair soft a * * v_prefactor
fix 4 all enforce2d
thermo 50
run 1000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 1.42246
ghost atom cutoff = 1.42246
binsize = 0.71123, bins = 51 51 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair soft, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.758 | 3.85 | 4.126 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 0.40003481 2.2200223e-06 0.84966203 0.78952518
50 0.47411013 0.67721272 0.057404514 1.2083323 1.3375852
100 0.45 0.73046745 0.054836584 1.234929 2.3196516
150 0.67521742 0.72402001 0.043490075 1.4421648 2.8744416
200 0.45 0.78481891 0.076931503 1.3113754 3.0412388
250 0.66479018 0.69790602 0.081075564 1.4432178 3.6917024
300 0.45 0.76820218 0.066727591 1.2845548 3.7861054
350 0.67619136 0.625715 0.072722727 1.3740656 4.2861621
400 0.45 0.68527759 0.090724527 1.2256271 4.4725214
450 0.56702844 0.64402767 0.080555563 1.2911391 4.7402211
500 0.45 0.64883009 0.078376672 1.1768318 4.7919294
550 0.564664 0.58260368 0.080779475 1.2275766 4.9855705
600 0.45 0.58193041 0.088386617 1.119942 5.131481
650 0.52110993 0.5415273 0.097683746 1.1598867 5.2500294
700 0.45 0.50856787 0.088471208 1.0466641 5.2550165
750 0.51510855 0.47441291 0.089429375 1.0785216 5.375763
800 0.45 0.49926696 0.085958476 1.0348504 5.4665914
850 0.50688494 0.46614429 0.088962292 1.0615691 5.556932
900 0.45 0.47785593 0.10150857 1.0289895 5.7765975
950 0.49590559 0.46050477 0.096404887 1.052402 5.8649245
1000 0.45 0.47691182 0.08808163 1.0146185 6.0177568
Loop time of 0.0906248 on 4 procs for 1000 steps with 1200 atoms
Performance: 4766906.584 tau/day, 11034.506 timesteps/s
98.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.036572 | 0.039266 | 0.041216 | 1.0 | 43.33
Bond | 0.0023205 | 0.0024512 | 0.0025697 | 0.2 | 2.70
Neigh | 0.0088909 | 0.0089301 | 0.0089679 | 0.0 | 9.85
Comm | 0.022308 | 0.024047 | 0.027175 | 1.3 | 26.53
Output | 0.00057411 | 0.00061274 | 0.00071025 | 0.0 | 0.68
Modify | 0.0083182 | 0.0092374 | 0.0098341 | 0.6 | 10.19
Other | | 0.006081 | | | 6.71
Nlocal: 300 ave 305 max 292 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Nghost: 100.25 ave 108 max 93 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 784 ave 815 max 739 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Total # of neighbors = 3136
Ave neighs/atom = 2.61333
Ave special neighs/atom = 0.5
Neighbor list builds = 92
Dangerous builds = 0
unfix 3
# Main run
pair_style lj/cut 2.5
# solvent/head - full-size and long-range
pair_coeff 1 1 1.0 1.0 2.5
pair_coeff 2 2 1.0 1.0 2.5
pair_coeff 1 2 1.0 1.0 2.5
# tail/tail - size-averaged and long-range
pair_coeff 3 3 1.0 0.75 2.5
pair_coeff 4 4 1.0 0.50 2.5
pair_coeff 3 4 1.0 0.67 2.5
# solvent/tail - full-size and repulsive
pair_coeff 1 3 1.0 1.0 1.12246
pair_coeff 1 4 1.0 1.0 1.12246
# head/tail - size-averaged and repulsive
pair_coeff 2 3 1.0 0.88 1.12246
pair_coeff 2 4 1.0 0.75 1.12246
thermo 50
#dump 1 all atom 2000 dump.micelle
#dump 2 all image 2000 image.*.jpg type type zoom 1.6
#dump_modify 2 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
#dump 3 all movie 2000 movie.mpg type type zoom 1.6
#dump_modify 3 pad 5 adiam 1 0.5 adiam 2 1.5 adiam 3 1.0 adiam 4 0.75
reset_timestep 0
run 1000
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 2.8
ghost atom cutoff = 2.8
binsize = 1.4, bins = 26 26 1
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/2d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.001 | 4.032 | 4.124 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.45 -1.7056163 0.08808163 -1.1679097 3.9431686
50 0.59734982 -1.8103783 0.076066922 -1.1374593 3.2770557
100 0.45 -1.8347112 0.093132329 -1.2919539 3.024661
150 0.51924311 -1.8943977 0.076004124 -1.2995832 2.5570373
200 0.45 -1.8918672 0.082422107 -1.3598201 2.5629655
250 0.50281134 -1.920406 0.074011331 -1.3440023 2.3518682
300 0.45 -1.9351047 0.075337265 -1.4101424 2.3249947
350 0.47650026 -1.9313687 0.072115117 -1.3831504 2.1987532
400 0.45 -1.9554318 0.081603939 -1.4242028 2.0787066
450 0.47220236 -1.9468502 0.065625625 -1.4094157 2.0984288
500 0.4684673 -1.9444333 0.076696285 -1.3996601 2.0528682
550 0.47683128 -1.958676 0.070589721 -1.4116523 2.0856023
600 0.46851245 -1.9338267 0.070605469 -1.3950992 2.26405
650 0.46874143 -1.9462493 0.069134686 -1.4087638 2.1070262
700 0.4643739 -1.9309953 0.071977511 -1.3950309 2.225692
750 0.47326259 -1.9484258 0.075435808 -1.4001218 2.0880235
800 0.45 -1.9646003 0.06415956 -1.4508158 2.0612703
850 0.46748278 -1.9705588 0.06038513 -1.4430804 1.9472884
900 0.46909438 -1.9537221 0.062470305 -1.4225483 2.0223008
950 0.45631508 -1.9387742 0.067536066 -1.4153033 2.063854
1000 0.45 -1.9727651 0.058608085 -1.464532 1.9982447
Loop time of 0.0878521 on 4 procs for 1000 steps with 1200 atoms
Performance: 4917357.613 tau/day, 11382.772 timesteps/s
99.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.043517 | 0.044455 | 0.046903 | 0.7 | 50.60
Bond | 0.0020199 | 0.0022303 | 0.0024347 | 0.4 | 2.54
Neigh | 0.012207 | 0.012335 | 0.012512 | 0.1 | 14.04
Comm | 0.014938 | 0.018265 | 0.020068 | 1.5 | 20.79
Output | 0.00061369 | 0.00064814 | 0.00073504 | 0.0 | 0.74
Modify | 0.0052264 | 0.0053691 | 0.0055039 | 0.2 | 6.11
Other | | 0.00455 | | | 5.18
Nlocal: 300 ave 305 max 296 min
Histogram: 1 1 0 0 0 0 1 0 0 1
Nghost: 219.5 ave 228 max 214 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Neighs: 2146.5 ave 2201 max 2114 min
Histogram: 1 1 0 1 0 0 0 0 0 1
Total # of neighbors = 8586
Ave neighs/atom = 7.155
Ave special neighs/atom = 0.5
Neighbor list builds = 86
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -12,7 +12,11 @@
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
#include <stdio.h>
#include <string.h>
#include <stdlib.h>

View File

@ -14,7 +14,11 @@
// C style library interface to CSlib class
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
#include <string.h>
#include <stdio.h>
#include <stdlib.h>

View File

@ -12,7 +12,11 @@
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
#include <cstdio>
#include <cstring>
#include <cstdlib>

View File

@ -15,7 +15,11 @@
#ifndef MSG_H
#define MSG_H
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
namespace CSLIB_NS {
@ -37,7 +41,7 @@ class Msg {
int nfield;
int *fieldID,*fieldtype,*fieldlen;
int lengths[2];
void init(int);
void allocate(int, int &, int *&, int, int &, char *&);
void *smalloc(int);

View File

@ -12,7 +12,11 @@
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
#include <stdio.h>
#include <string.h>
#include <stdlib.h>

View File

@ -12,7 +12,11 @@
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
#include <string.h>
#include <stdlib.h>
#include <stdint.h>

View File

@ -12,7 +12,11 @@
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
#include <string.h>
#include <stdlib.h>
#include <stdint.h>

View File

@ -12,7 +12,11 @@
See the README file in the top-level CSlib directory.
------------------------------------------------------------------------- */
#ifdef MPI_YES
#include <mpi.h>
#else
#include <mpi_dummy.h>
#endif
#include <zmq.h>
#include <string.h>
#include <stdlib.h>

View File

@ -872,16 +872,16 @@ void PairGranular::coeff(int narg, char **arg)
if (iarg + 4 >= narg)
error->all(FLERR,"Illegal pair_coeff command, "
"not enough parameters provided for twist model");
twist_model_one = TWIST_SDS;
twist_history = 1;
// kt and gammat and friction coeff
twist_coeffs_one[0] = force->numeric(FLERR,arg[iarg+2]);
twist_coeffs_one[1] = force->numeric(FLERR,arg[iarg+3]);
twist_coeffs_one[2] = force->numeric(FLERR,arg[iarg+4]);
iarg += 5;
twist_model_one = TWIST_SDS;
twist_history = 1;
// kt and gammat and friction coeff
twist_coeffs_one[0] = force->numeric(FLERR,arg[iarg+2]);
twist_coeffs_one[1] = force->numeric(FLERR,arg[iarg+3]);
twist_coeffs_one[2] = force->numeric(FLERR,arg[iarg+4]);
iarg += 5;
} else {
error->all(FLERR, "Illegal pair_coeff command, "
"twisting friction model not recognized");
error->all(FLERR, "Illegal pair_coeff command, "
"twisting friction model not recognized");
}
} else if (strcmp(arg[iarg], "cutoff") == 0) {
if (iarg + 1 >= narg)

View File

@ -36,10 +36,12 @@ class DomainKokkos : public Domain {
void image_flip(int, int, int);
void x2lamda(int);
void lamda2x(int);
// these lines bring in the x2lamda signatures from Domain
// that are not overloaded here
using Domain::x2lamda;
using Domain::lamda2x;
// forward remaining x2lamda() and lambda2x() variants to parent class
void x2lamda(double *a, double *b) { Domain::x2lamda(a,b); }
void lamda2x(double *a, double *b) { Domain::lamda2x(a,b); }
void x2lamda(double *a, double *b, double *c, double *d) {
Domain::x2lamda(a,b,c,d);
}
int closest_image(const int, int) const;

View File

@ -36,6 +36,10 @@
#include "atom_kokkos.h"
#include "kokkos.h"
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace LAMMPS_NS;
using namespace MathConst;
using namespace MathSpecialKokkos;

View File

@ -447,23 +447,23 @@ int PairReaxCKokkos<DeviceType>::Init_Lookup_Tables()
num_atom_types = atom->ntypes;
dr = control->nonb_cut / control->tabulate;
h = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:h", world );
smalloc( control->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:h");
fh = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fh", world );
smalloc( control->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fh");
fvdw = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fvdw", world );
smalloc( control->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fvdw");
fCEvd = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fCEvd", world );
smalloc( control->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fCEvd");
fele = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fele", world );
smalloc( control->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fele");
fCEclmb = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fCEclmb", world );
smalloc( control->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fCEclmb");
LR = (LR_lookup_table**)
scalloc( num_atom_types+1, sizeof(LR_lookup_table*), "lookup:LR", world );
scalloc( control->error_ptr, num_atom_types+1, sizeof(LR_lookup_table*), "lookup:LR");
for( i = 0; i < num_atom_types+1; ++i )
LR[i] = (LR_lookup_table*)
scalloc( num_atom_types+1, sizeof(LR_lookup_table), "lookup:LR[i]", world );
scalloc( control->error_ptr, num_atom_types+1, sizeof(LR_lookup_table), "lookup:LR[i]");
for( i = 1; i <= num_atom_types; ++i ) {
for( j = i; j <= num_atom_types; ++j ) {
@ -473,22 +473,18 @@ int PairReaxCKokkos<DeviceType>::Init_Lookup_Tables()
LR[i][j].dx = dr;
LR[i][j].inv_dx = control->tabulate / control->nonb_cut;
LR[i][j].y = (LR_data*)
smalloc( LR[i][j].n * sizeof(LR_data), "lookup:LR[i,j].y", world );
smalloc( control->error_ptr, LR[i][j].n * sizeof(LR_data), "lookup:LR[i,j].y");
LR[i][j].H = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].H" ,
world );
smalloc( control->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].H");
LR[i][j].vdW = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].vdW",
world);
smalloc( control->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].vdW");
LR[i][j].CEvd = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].CEvd",
world);
smalloc( control->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].CEvd");
LR[i][j].ele = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].ele",
world );
smalloc( control->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].ele");
LR[i][j].CEclmb = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),
"lookup:LR[i,j].CEclmb", world );
smalloc( control->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),
"lookup:LR[i,j].CEclmb");
for( r = 1; r <= control->tabulate; ++r ) {
LR_vdW_Coulomb(i, j, r * dr, &(LR[i][j].y[r]) );
@ -512,24 +508,20 @@ int PairReaxCKokkos<DeviceType>::Init_Lookup_Tables()
vlast_vdw = fCEvd[r-1];
vlast_ele = fele[r-1];
Natural_Cubic_Spline( &h[1], &fh[1],
&(LR[i][j].H[1]), control->tabulate+1, world );
Natural_Cubic_Spline( control->error_ptr, &h[1], &fh[1],
&(LR[i][j].H[1]), control->tabulate+1 );
Complete_Cubic_Spline( &h[1], &fvdw[1], v0_vdw, vlast_vdw,
&(LR[i][j].vdW[1]), control->tabulate+1,
world );
Complete_Cubic_Spline( control->error_ptr, &h[1], &fvdw[1], v0_vdw, vlast_vdw,
&(LR[i][j].vdW[1]), control->tabulate+1 );
Natural_Cubic_Spline( &h[1], &fCEvd[1],
&(LR[i][j].CEvd[1]), control->tabulate+1,
world );
Natural_Cubic_Spline( control->error_ptr, &h[1], &fCEvd[1],
&(LR[i][j].CEvd[1]), control->tabulate+1 );
Complete_Cubic_Spline( &h[1], &fele[1], v0_ele, vlast_ele,
&(LR[i][j].ele[1]), control->tabulate+1,
world );
Complete_Cubic_Spline( control->error_ptr, &h[1], &fele[1], v0_ele, vlast_ele,
&(LR[i][j].ele[1]), control->tabulate+1 );
Natural_Cubic_Spline( &h[1], &fCEclmb[1],
&(LR[i][j].CEclmb[1]), control->tabulate+1,
world );
Natural_Cubic_Spline( control->error_ptr, &h[1], &fCEclmb[1],
&(LR[i][j].CEclmb[1]), control->tabulate+1 );
}
}
free(h);
@ -555,16 +547,16 @@ void PairReaxCKokkos<DeviceType>::Deallocate_Lookup_Tables()
for( i = 0; i <= ntypes; ++i ) {
for( j = i; j <= ntypes; ++j )
if (LR[i][j].n) {
sfree( LR[i][j].y, "LR[i,j].y" );
sfree( LR[i][j].H, "LR[i,j].H" );
sfree( LR[i][j].vdW, "LR[i,j].vdW" );
sfree( LR[i][j].CEvd, "LR[i,j].CEvd" );
sfree( LR[i][j].ele, "LR[i,j].ele" );
sfree( LR[i][j].CEclmb, "LR[i,j].CEclmb" );
sfree( control->error_ptr, LR[i][j].y, "LR[i,j].y" );
sfree( control->error_ptr, LR[i][j].H, "LR[i,j].H" );
sfree( control->error_ptr, LR[i][j].vdW, "LR[i,j].vdW" );
sfree( control->error_ptr, LR[i][j].CEvd, "LR[i,j].CEvd" );
sfree( control->error_ptr, LR[i][j].ele, "LR[i,j].ele" );
sfree( control->error_ptr, LR[i][j].CEclmb, "LR[i,j].CEclmb" );
}
sfree( LR[i], "LR[i]" );
sfree( control->error_ptr, LR[i], "LR[i]" );
}
sfree( LR, "LR" );
sfree( control->error_ptr, LR, "LR" );
}
/* ---------------------------------------------------------------------- */

View File

@ -32,7 +32,7 @@
#define MAXLINE 1024
#define MAXWORD 3
using namespace LAMMPS_NS;
namespace LAMMPS_NS {
// Outstanding issues with quadratic term
// 1. there seems to a problem with compute_optimized energy calc
@ -674,3 +674,4 @@ double PairSNAPKokkos<DeviceType>::memory_usage()
bytes += snaKK.memory_usage();
return bytes;
}
}

View File

@ -20,7 +20,7 @@
#include <cstring>
#include <cstdlib>
using namespace LAMMPS_NS;
namespace LAMMPS_NS {
static const double MY_PI = 3.14159265358979323846; // pi
@ -1300,3 +1300,5 @@ double SNAKokkos<DeviceType>::memory_usage()
bytes += jdim * jdim * jdim * jdim * jdim * sizeof(std::complex<double>);
return bytes;
}
} // namespace LAMMPS_NS

View File

@ -89,7 +89,7 @@ FixClientMD::~FixClientMD()
int nfield;
int *fieldID,*fieldtype,*fieldlen;
int msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
cs->recv(nfield,fieldID,fieldtype,fieldlen);
// clean-up
@ -173,8 +173,6 @@ void FixClientMD::min_setup(int vflag)
void FixClientMD::post_force(int vflag)
{
int i,j,m;
// energy and virial setup
if (vflag) v_setup(vflag);
@ -286,7 +284,7 @@ void FixClientMD::receive_fev(int vflag)
int nfield;
int *fieldID,*fieldtype,*fieldlen;
int msgID = cs->recv(nfield,fieldID,fieldtype,fieldlen);
cs->recv(nfield,fieldID,fieldtype,fieldlen);
double *forces = (double *) cs->unpack(FORCES);
double **f = atom->f;

View File

@ -22,10 +22,6 @@
using namespace LAMMPS_NS;
using namespace CSLIB_NS;
// customize by adding a new server protocol enum
enum{MD,MC};
/* ---------------------------------------------------------------------- */
void Message::command(int narg, char **arg)
@ -38,12 +34,10 @@ void Message::command(int narg, char **arg)
else error->all(FLERR,"Illegal message command");
lmp->clientserver = clientserver;
// customize by adding a new server protocol
// validate supported protocols
int protocol;
if (strcmp(arg[1],"md") == 0) protocol = MD;
else if (strcmp(arg[1],"mc") == 0) protocol = MC;
else error->all(FLERR,"Unknown message protocol");
if ((strcmp(arg[1],"md") != 0) && (strcmp(arg[1],"mc") != 0))
error->all(FLERR,"Unknown message protocol");
// instantiate CSlib with chosen communication mode

View File

@ -37,8 +37,8 @@ ServerMC::ServerMC(LAMMPS *lmp) : Pointers(lmp) {}
void ServerMC::loop()
{
int i,j,m;
double xold[3],xnew[3];
int m;
double xold[3];
tagint atomid;
CSlib *cs = (CSlib *) lmp->cslib;

View File

@ -78,7 +78,7 @@ ServerMD::~ServerMD()
void ServerMD::loop()
{
int i,j,m;
int j,m;
// cs = instance of CSlib

View File

@ -200,7 +200,11 @@ int MPI_Request_free(MPI_Request *request)
int MPI_Send(const void *buf, int count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm)
{
printf("MPI Stub WARNING: Should not send message to self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not send message to self\n");
++callcount;
}
return 0;
}
@ -209,7 +213,11 @@ int MPI_Send(const void *buf, int count, MPI_Datatype datatype,
int MPI_Isend(const void *buf, int count, MPI_Datatype datatype,
int source, int tag, MPI_Comm comm, MPI_Request *request)
{
printf("MPI Stub WARNING: Should not send message to self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not send message to self\n");
++callcount;
}
return 0;
}
@ -218,7 +226,11 @@ int MPI_Isend(const void *buf, int count, MPI_Datatype datatype,
int MPI_Rsend(const void *buf, int count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm)
{
printf("MPI Stub WARNING: Should not rsend message to self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not rsend message to self\n");
++callcount;
}
return 0;
}
@ -227,7 +239,11 @@ int MPI_Rsend(const void *buf, int count, MPI_Datatype datatype,
int MPI_Recv(void *buf, int count, MPI_Datatype datatype,
int source, int tag, MPI_Comm comm, MPI_Status *status)
{
printf("MPI Stub WARNING: Should not recv message from self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not recv message from self\n");
++callcount;
}
return 0;
}
@ -236,7 +252,11 @@ int MPI_Recv(void *buf, int count, MPI_Datatype datatype,
int MPI_Irecv(void *buf, int count, MPI_Datatype datatype,
int source, int tag, MPI_Comm comm, MPI_Request *request)
{
printf("MPI Stub WARNING: Should not recv message from self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not recv message from self\n");
++callcount;
}
return 0;
}
@ -244,7 +264,11 @@ int MPI_Irecv(void *buf, int count, MPI_Datatype datatype,
int MPI_Wait(MPI_Request *request, MPI_Status *status)
{
printf("MPI Stub WARNING: Should not wait on message from self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not wait on message from self\n");
++callcount;
}
return 0;
}
@ -252,7 +276,11 @@ int MPI_Wait(MPI_Request *request, MPI_Status *status)
int MPI_Waitall(int n, MPI_Request *request, MPI_Status *status)
{
printf("MPI Stub WARNING: Should not wait on message from self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not wait on message from self\n");
++callcount;
}
return 0;
}
@ -261,7 +289,11 @@ int MPI_Waitall(int n, MPI_Request *request, MPI_Status *status)
int MPI_Waitany(int count, MPI_Request *request, int *index,
MPI_Status *status)
{
printf("MPI Stub WARNING: Should not wait on message from self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not wait on message from self\n");
++callcount;
}
return 0;
}
@ -272,7 +304,11 @@ int MPI_Sendrecv(const void *sbuf, int scount, MPI_Datatype sdatatype,
MPI_Datatype rdatatype, int source, int rtag,
MPI_Comm comm, MPI_Status *status)
{
printf("MPI Stub WARNING: Should not send message to self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not send message to self\n");
++callcount;
}
return 0;
}
@ -280,7 +316,11 @@ int MPI_Sendrecv(const void *sbuf, int scount, MPI_Datatype sdatatype,
int MPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count)
{
printf("MPI Stub WARNING: Should not get count of message to self\n");
static int callcount=0;
if (callcount == 0) {
printf("MPI Stub WARNING: Should not get count of message to self\n");
++callcount;
}
return 0;
}

View File

@ -67,14 +67,17 @@ improper_style distance, Paolo Raiteri, p.raiteri at curtin.edu.au, 2 Dec 15
pair_style agni, Axel Kohlmeyer, akohlmey at gmail.com, 9 Nov 16
pair_style buck/mdf, Paolo Raiteri, p.raiteri at curtin.edu.au, 2 Dec 15
pair_style coul/diel, Axel Kohlmeyer, akohlmey at gmail.com, 1 Dec 11
pair_style coul/shield, Wengen Ouyang (Tel Aviv University), w.g.ouyang at gmail dot com, 30 Mar 18
pair_style dipole/sf, Mario Orsi, orsimario at gmail.com, 8 Aug 11
pair_style edip, Luca Ferraro, luca.ferraro at caspur.it, 15 Sep 11
pair_style extep, Jaap Kroes (Radboud U), jaapkroes at gmail dot com, 28 Nov 17
pair_style gauss/cut, Axel Kohlmeyer, akohlmey at gmail.com, 1 Dec 11
pair_style ilp/graphene/hbn, Wengen Ouyang (Tel Aviv University), w.g.ouyang at gmail dot com, 30 Mar 18
pair_style lebedeva/z, Zbigniew Koziol (National Center for Nuclear Research), softquake at gmail dot com, 4 Jan 19
pair_style lennard/mdf, Paolo Raiteri, p.raiteri at curtin.edu.au, 2 Dec 15
pair_style list, Axel Kohlmeyer (Temple U), akohlmey at gmail.com, 1 Jun 13
pair_style lj/mdf, Paolo Raiteri, p.raiteri at curtin.edu.au, 2 Dec 15
pair_style kolmogorov/crespi/full, Wengen Ouyang (Tel Aviv University), w.g.ouyang at gmail dot com, 30 Mar 18
pair_style kolmogorov/crespi/z, Jaap Kroes (Radboud U), jaapkroes at gmail dot com, 28 Feb 17
pair_style meam/spline, Alexander Stukowski (LLNL), alex at stukowski.com, 1 Feb 12
pair_style meam/sw/spline, Robert Rudd (LLNL), robert.rudd at llnl.gov, 1 Oct 12

View File

@ -637,14 +637,14 @@ void FixPIMD::comm_exec(double **ptr)
if(nsend > max_nsend)
{
max_nsend = nsend+200;
tag_send = (int*) memory->srealloc(tag_send, sizeof(int)*max_nsend, "FixPIMD:tag_send");
tag_send = (tagint*) memory->srealloc(tag_send, sizeof(tagint)*max_nsend, "FixPIMD:tag_send");
buf_send = (double*) memory->srealloc(buf_send, sizeof(double)*max_nsend*3, "FixPIMD:x_send");
}
// send tags
MPI_Sendrecv( atom->tag, nlocal, MPI_INT, plan_send[iplan], 0,
tag_send, nsend, MPI_INT, plan_recv[iplan], 0, universe->uworld, MPI_STATUS_IGNORE);
MPI_Sendrecv( atom->tag, nlocal, MPI_LMP_TAGINT, plan_send[iplan], 0,
tag_send, nsend, MPI_LMP_TAGINT, plan_recv[iplan], 0, universe->uworld, MPI_STATUS_IGNORE);
// wrap positions
@ -661,7 +661,7 @@ void FixPIMD::comm_exec(double **ptr)
sprintf(error_line, "Atom " TAGINT_FORMAT " is missing at world [%d] "
"rank [%d] required by rank [%d] (" TAGINT_FORMAT ", "
TAGINT_FORMAT ", " TAGINT_FORMAT ").\n",tag_send[i],
TAGINT_FORMAT ", " TAGINT_FORMAT ").\n", tag_send[i],
universe->iworld, comm->me, plan_recv[iplan],
atom->tag[0], atom->tag[1], atom->tag[2]);

View File

@ -68,7 +68,7 @@ class FixPIMD : public Fix {
/* inter-partition communication */
int max_nsend;
int* tag_send;
tagint* tag_send;
double *buf_send;
int max_nlocal;

View File

@ -113,7 +113,7 @@ PairReaxCOMP::~PairReaxCOMP()
if (setup_flag) {
reax_list * bonds = lists+BONDS;
for (int i=0; i<bonds->num_intrs; ++i)
sfree(bonds->select.bond_list[i].bo_data.CdboReduction, "CdboReduction");
sfree(error, bonds->select.bond_list[i].bo_data.CdboReduction, "CdboReduction");
}
memory->destroy(num_nbrs_offset);
@ -209,7 +209,7 @@ void PairReaxCOMP::compute(int eflag, int vflag)
setup();
Reset( system, control, data, workspace, &lists, world );
Reset( system, control, data, workspace, &lists );
// Why not update workspace like in MPI-only code?
// Using the MPI-only way messes up the hb energy
@ -410,12 +410,12 @@ void PairReaxCOMP::setup( )
// initialize my data structures
PreAllocate_Space( system, control, workspace, world );
PreAllocate_Space( system, control, workspace );
write_reax_atoms();
int num_nbrs = estimate_reax_lists();
if(!Make_List(system->total_cap, num_nbrs, TYP_FAR_NEIGHBOR,
lists+FAR_NBRS, world))
lists+FAR_NBRS))
error->all(FLERR,"Pair reax/c problem in far neighbor list");
write_reax_lists();
@ -445,7 +445,7 @@ void PairReaxCOMP::setup( )
// check if I need to shrink/extend my data-structs
ReAllocate( system, control, data, workspace, &lists, mpi_data );
ReAllocate( system, control, data, workspace, &lists );
}
}

View File

@ -289,9 +289,10 @@ void Validate_ListsOMP( reax_system *system, storage * /*workspace */, reax_list
else comp = bonds->num_intrs;
if (End_Index(i, bonds) > comp) {
fprintf( stderr, "step%d-bondchk failed: i=%d end(i)=%d str(i+1)=%d\n",
step, i, End_Index(i,bonds), comp );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
char errmsg[256];
snprintf(errmsg, 256, "step%d-bondchk failed: i=%d end(i)=%d str(i+1)=%d\n",
step, i, End_Index(i,bonds), comp );
system->error_ptr->one(FLERR,errmsg);
}
}
}
@ -315,9 +316,10 @@ void Validate_ListsOMP( reax_system *system, storage * /*workspace */, reax_list
else comp = hbonds->num_intrs;
if (End_Index(Hindex, hbonds) > comp) {
fprintf(stderr,"step%d-hbondchk failed: H=%d end(H)=%d str(H+1)=%d\n",
char errmsg[256];
snprintf(errmsg, 256, "step%d-hbondchk failed: H=%d end(H)=%d str(H+1)=%d\n",
step, Hindex, End_Index(Hindex,hbonds), comp );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
system->error_ptr->one(FLERR, errmsg);
}
}
}

View File

@ -43,7 +43,7 @@
extern int Init_MPI_Datatypes(reax_system*, storage*, mpi_datatypes*, MPI_Comm, char*);
extern int Init_System(reax_system*, control_params*, char*);
extern int Init_Simulation_Data(reax_system*, control_params*, simulation_data*, char*);
extern int Init_Workspace(reax_system*, control_params*, storage*, MPI_Comm, char*);
extern int Init_Workspace(reax_system*, control_params*, storage*, char*);
/* ---------------------------------------------------------------------- */
@ -63,7 +63,7 @@ int Init_ListsOMP( reax_system *system, control_params *control,
bond_top = (int*) calloc( system->total_cap, sizeof(int) );
hb_top = (int*) calloc( system->local_cap, sizeof(int) );
Estimate_Storages( system, control, lists,
&Htop, hb_top, bond_top, &num_3body, comm );
&Htop, hb_top, bond_top, &num_3body );
if (control->hbond_cut > 0) {
/* init H indexes */
@ -75,9 +75,8 @@ int Init_ListsOMP( reax_system *system, control_params *control,
total_hbonds = (int)(MAX( total_hbonds*saferzone, mincap*MIN_HBONDS ));
if( !Make_List( system->Hcap, total_hbonds, TYP_HBOND,
*lists+HBONDS, comm ) ) {
fprintf( stderr, "not enough space for hbonds list. terminating!\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
*lists+HBONDS ) ) {
system->error_ptr->one( FLERR, "Not enough space for hbonds list. Terminating!" );
}
}
@ -89,9 +88,8 @@ int Init_ListsOMP( reax_system *system, control_params *control,
bond_cap = (int)(MAX( total_bonds*safezone, mincap*MIN_BONDS ));
if( !Make_List( system->total_cap, bond_cap, TYP_BOND,
*lists+BONDS, comm ) ) {
fprintf( stderr, "not enough space for bonds list. terminating!\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
*lists+BONDS ) ) {
system->error_ptr->one( FLERR, "Not enough space for bonds list. Terminating!\n" );
}
int nthreads = control->nthreads;
@ -99,15 +97,14 @@ int Init_ListsOMP( reax_system *system, control_params *control,
for (i = 0; i < bonds->num_intrs; ++i)
bonds->select.bond_list[i].bo_data.CdboReduction =
(double*) smalloc(sizeof(double)*nthreads, "CdboReduction", comm);
(double*) smalloc(system->error_ptr, sizeof(double)*nthreads, "CdboReduction");
/* 3bodies list */
cap_3body = (int)(MAX( num_3body*safezone, MIN_3BODIES ));
if( !Make_List( bond_cap, cap_3body, TYP_THREE_BODY,
*lists+THREE_BODIES, comm ) ){
*lists+THREE_BODIES ) ){
fprintf( stderr, "Problem in initializing angles list. Terminating!\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
system->error_ptr->one( FLERR, "Problem in initializing angles list. Terminating!" );
}
free( hb_top );
@ -125,60 +122,50 @@ void InitializeOMP( reax_system *system, control_params *control,
mpi_datatypes *mpi_data, MPI_Comm comm )
{
char msg[MAX_STR];
char errmsg[512];
if (Init_MPI_Datatypes(system, workspace, mpi_data, comm, msg) == FAILURE) {
fprintf( stderr, "p%d: init_mpi_datatypes: could not create datatypes\n",
system->my_rank );
fprintf( stderr, "p%d: mpi_data couldn't be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
system->error_ptr->one( FLERR, "init_mpi_datatypes: could not create datatypes. "
"Mpi_data couldn't be initialized! Terminating.");
}
if (Init_System(system, control, msg) == FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: system could not be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
snprintf( errmsg, 512, "Error on: %s. "
"System could not be initialized! Terminating.", msg );
system->error_ptr->one(FLERR, errmsg);
}
if (Init_Simulation_Data( system, control, data, msg ) == FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: sim_data couldn't be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
snprintf( errmsg, 512, "Error on: %s. "
"Sim_data couldn't be initialized! Terminating.", msg );
system->error_ptr->one(FLERR, errmsg);
}
if (Init_Workspace( system, control, workspace, mpi_data->world, msg ) ==
if (Init_Workspace( system, control, workspace, msg ) ==
FAILURE) {
fprintf( stderr, "p%d:init_workspace: not enough memory\n",
system->my_rank );
fprintf( stderr, "p%d:workspace couldn't be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
system->error_ptr->one(FLERR, "init_workspace: not enough memory. "
"Workspace couldn't be initialized! Terminating.");
}
if (Init_ListsOMP( system, control, data, workspace, lists, mpi_data, msg ) ==
FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: system could not be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
snprintf( errmsg, 512, "Error on: %s. "
"System could not be initialized! Terminating.", msg );
system->error_ptr->one(FLERR, errmsg);
}
if (Init_Output_Files(system,control,out_control,mpi_data,msg)== FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: could not open output files! terminating...\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
snprintf( errmsg, 512, "Error on: %s"
"Could not open output files! Terminating.", msg );
system->error_ptr->one(FLERR, errmsg);
}
if (control->tabulate) {
if (Init_Lookup_Tables( system, control, workspace, mpi_data, msg ) == FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: couldn't create lookup table! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
snprintf( errmsg, 512, "Error on: %s."
" Couldn't create lookup table! Terminating.", msg );
system->error_ptr->one(FLERR, errmsg);
}
}

View File

@ -237,12 +237,12 @@ void Valence_AnglesOMP( reax_system *system, control_params *control,
// Confirm that thb_intrs->num_intrs / nthreads is enough to hold all angles from a single atom
if(my_offset >= (tid+1)*per_thread) {
int me;
MPI_Comm_rank(MPI_COMM_WORLD,&me);
fprintf( stderr, "step%d-ran out of space on angle_list on proc %i for atom %i:", data->step, me, j);
fprintf( stderr, " nthreads= %d, tid=%d, my_offset=%d, per_thread=%d\n", nthreads, tid, my_offset, per_thread);
fprintf( stderr, " num_intrs= %i N= %i\n",thb_intrs->num_intrs , system->N);
MPI_Abort( MPI_COMM_WORLD, INSUFFICIENT_MEMORY );
char errmsg[512];
snprintf( errmsg, 512, "step%d-ran out of space on angle_list for atom %i:\n"
" nthreads= %d, tid=%d, my_offset=%d, per_thread=%d\n"
" num_intrs= %i N= %i\n"
, data->step, j, nthreads, tid, my_offset, per_thread,thb_intrs->num_intrs , system->N);
control->error_ptr->one(FLERR, errmsg);
}
// Number of angles owned by this atom
@ -601,9 +601,10 @@ void Valence_AnglesOMP( reax_system *system, control_params *control,
if (num_thb_intrs >= thb_intrs->num_intrs * DANGER_ZONE) {
workspace->realloc.num_3body = num_thb_intrs * TWICE;
if (num_thb_intrs > thb_intrs->num_intrs) {
fprintf( stderr, "step%d-ran out of space on angle_list: top=%d, max=%d",
data->step, num_thb_intrs, thb_intrs->num_intrs );
MPI_Abort( MPI_COMM_WORLD, INSUFFICIENT_MEMORY );
char errmsg[128];
snprintf(errmsg, 128, "step%d-ran out of space on angle_list: top=%d, max=%d",
data->step, num_thb_intrs, thb_intrs->num_intrs);
control->error_ptr->one(FLERR, errmsg);
}
}

View File

@ -88,6 +88,7 @@
#include <cmath>
#include <algorithm>
#include <cstring>
#include "ptm_polar.h"
#include "ptm_quat.h"

View File

@ -91,6 +91,7 @@ PairReaxC::PairReaxC(LAMMPS *lmp) : Pair(lmp)
memory->smalloc(sizeof(mpi_datatypes),"reax:mpi");
MPI_Comm_rank(world,&system->my_rank);
control->me = system->my_rank;
system->my_coords[0] = 0;
system->my_coords[1] = 0;
@ -108,6 +109,8 @@ PairReaxC::PairReaxC(LAMMPS *lmp) : Pair(lmp)
system->bndry_cuts.ghost_cutoff = 0;
system->my_atoms = NULL;
system->pair_ptr = this;
system->error_ptr = error;
control->error_ptr = error;
system->omp_active = 0;
@ -139,10 +142,10 @@ PairReaxC::~PairReaxC()
if (control->tabulate ) Deallocate_Lookup_Tables( system);
if (control->hbond_cut > 0 ) Delete_List( lists+HBONDS, world);
Delete_List( lists+BONDS, world );
Delete_List( lists+THREE_BODIES, world );
Delete_List( lists+FAR_NBRS, world );
if (control->hbond_cut > 0 ) Delete_List( lists+HBONDS );
Delete_List( lists+BONDS );
Delete_List( lists+THREE_BODIES );
Delete_List( lists+FAR_NBRS );
DeAllocate_Workspace( control, workspace );
DeAllocate_System( system );
@ -394,7 +397,8 @@ void PairReaxC::init_style( )
"increased neighbor list skin.");
for( int i = 0; i < LIST_N; ++i )
lists[i].allocated = 0;
if (lists[i].allocated != 1)
lists[i].allocated = 0;
if (fix_reax == NULL) {
char **fixarg = new char*[3];
@ -436,13 +440,14 @@ void PairReaxC::setup( )
// initialize my data structures
PreAllocate_Space( system, control, workspace, world );
PreAllocate_Space( system, control, workspace );
write_reax_atoms();
int num_nbrs = estimate_reax_lists();
if(!Make_List(system->total_cap, num_nbrs, TYP_FAR_NEIGHBOR,
lists+FAR_NBRS, world))
error->all(FLERR,"Pair reax/c problem in far neighbor list");
lists+FAR_NBRS))
error->one(FLERR,"Pair reax/c problem in far neighbor list");
(lists+FAR_NBRS)->error_ptr=error;
write_reax_lists();
Initialize( system, control, data, workspace, &lists, out_control,
@ -465,7 +470,7 @@ void PairReaxC::setup( )
// check if I need to shrink/extend my data-structs
ReAllocate( system, control, data, workspace, &lists, mpi_data );
ReAllocate( system, control, data, workspace, &lists );
}
bigint local_ngroup = list->inum;
@ -516,7 +521,7 @@ void PairReaxC::compute(int eflag, int vflag)
setup();
Reset( system, control, data, workspace, &lists, world );
Reset( system, control, data, workspace, &lists );
workspace->realloc.num_far = write_reax_lists();
// timing for filling in the reax lists
if (comm->me == 0) {

View File

@ -35,12 +35,14 @@
#include <omp.h>
#endif
#include "error.h"
/* allocate space for my_atoms
important: we cannot know the exact number of atoms that will fall into a
process's box throughout the whole simulation. therefore
we need to make upper bound estimates for various data structures */
int PreAllocate_Space( reax_system *system, control_params * /*control*/,
storage * workspace, MPI_Comm comm )
storage * workspace )
{
int mincap = system->mincap;
double safezone = system->safezone;
@ -51,7 +53,7 @@ int PreAllocate_Space( reax_system *system, control_params * /*control*/,
system->total_cap = MAX( (int)(system->N * safezone), mincap );
system->my_atoms = (reax_atom*)
scalloc( system->total_cap, sizeof(reax_atom), "my_atoms", comm );
scalloc(system->error_ptr, system->total_cap, sizeof(reax_atom), "my_atoms");
// Nullify some arrays only used in omp styles
// Should be safe to do here since called in pair->setup();
@ -86,39 +88,39 @@ void DeAllocate_System( reax_system *system )
int ntypes;
reax_interaction *ff_params;
// dealloocate the atom list
sfree( system->my_atoms, "system->my_atoms" );
// deallocate the atom list
sfree(system->error_ptr, system->my_atoms, "system->my_atoms" );
// deallocate the ffield parameters storage
ff_params = &(system->reax_param);
ntypes = ff_params->num_atom_types;
sfree( ff_params->gp.l, "ff:globals" );
sfree(system->error_ptr, ff_params->gp.l, "ff:globals" );
for( i = 0; i < ntypes; ++i ) {
for( j = 0; j < ntypes; ++j ) {
for( k = 0; k < ntypes; ++k ) {
sfree( ff_params->fbp[i][j][k], "ff:fbp[i,j,k]" );
sfree(system->error_ptr, ff_params->fbp[i][j][k], "ff:fbp[i,j,k]" );
}
sfree( ff_params->fbp[i][j], "ff:fbp[i,j]" );
sfree( ff_params->thbp[i][j], "ff:thbp[i,j]" );
sfree( ff_params->hbp[i][j], "ff:hbp[i,j]" );
sfree(system->error_ptr, ff_params->fbp[i][j], "ff:fbp[i,j]" );
sfree(system->error_ptr, ff_params->thbp[i][j], "ff:thbp[i,j]" );
sfree(system->error_ptr, ff_params->hbp[i][j], "ff:hbp[i,j]" );
}
sfree( ff_params->fbp[i], "ff:fbp[i]" );
sfree( ff_params->thbp[i], "ff:thbp[i]" );
sfree( ff_params->hbp[i], "ff:hbp[i]" );
sfree( ff_params->tbp[i], "ff:tbp[i]" );
sfree(system->error_ptr, ff_params->fbp[i], "ff:fbp[i]" );
sfree(system->error_ptr, ff_params->thbp[i], "ff:thbp[i]" );
sfree(system->error_ptr, ff_params->hbp[i], "ff:hbp[i]" );
sfree(system->error_ptr, ff_params->tbp[i], "ff:tbp[i]" );
}
sfree( ff_params->fbp, "ff:fbp" );
sfree( ff_params->thbp, "ff:thbp" );
sfree( ff_params->hbp, "ff:hbp" );
sfree( ff_params->tbp, "ff:tbp" );
sfree( ff_params->sbp, "ff:sbp" );
sfree(system->error_ptr, ff_params->fbp, "ff:fbp" );
sfree(system->error_ptr, ff_params->thbp, "ff:thbp" );
sfree(system->error_ptr, ff_params->hbp, "ff:hbp" );
sfree(system->error_ptr, ff_params->tbp, "ff:tbp" );
sfree(system->error_ptr, ff_params->sbp, "ff:sbp" );
}
/************* workspace *************/
void DeAllocate_Workspace( control_params * /*control*/, storage *workspace )
void DeAllocate_Workspace( control_params * control, storage *workspace )
{
int i;
@ -129,86 +131,86 @@ void DeAllocate_Workspace( control_params * /*control*/, storage *workspace )
/* communication storage */
for( i = 0; i < MAX_NBRS; ++i ) {
sfree( workspace->tmp_dbl[i], "tmp_dbl[i]" );
sfree( workspace->tmp_rvec[i], "tmp_rvec[i]" );
sfree( workspace->tmp_rvec2[i], "tmp_rvec2[i]" );
sfree(control->error_ptr, workspace->tmp_dbl[i], "tmp_dbl[i]" );
sfree(control->error_ptr, workspace->tmp_rvec[i], "tmp_rvec[i]" );
sfree(control->error_ptr, workspace->tmp_rvec2[i], "tmp_rvec2[i]" );
}
/* bond order storage */
sfree( workspace->within_bond_box, "skin" );
sfree( workspace->total_bond_order, "total_bo" );
sfree( workspace->Deltap, "Deltap" );
sfree( workspace->Deltap_boc, "Deltap_boc" );
sfree( workspace->dDeltap_self, "dDeltap_self" );
sfree( workspace->Delta, "Delta" );
sfree( workspace->Delta_lp, "Delta_lp" );
sfree( workspace->Delta_lp_temp, "Delta_lp_temp" );
sfree( workspace->dDelta_lp, "dDelta_lp" );
sfree( workspace->dDelta_lp_temp, "dDelta_lp_temp" );
sfree( workspace->Delta_e, "Delta_e" );
sfree( workspace->Delta_boc, "Delta_boc" );
sfree( workspace->Delta_val, "Delta_val" );
sfree( workspace->nlp, "nlp" );
sfree( workspace->nlp_temp, "nlp_temp" );
sfree( workspace->Clp, "Clp" );
sfree( workspace->vlpex, "vlpex" );
sfree( workspace->bond_mark, "bond_mark" );
sfree( workspace->done_after, "done_after" );
sfree(control->error_ptr, workspace->within_bond_box, "skin" );
sfree(control->error_ptr, workspace->total_bond_order, "total_bo" );
sfree(control->error_ptr, workspace->Deltap, "Deltap" );
sfree(control->error_ptr, workspace->Deltap_boc, "Deltap_boc" );
sfree(control->error_ptr, workspace->dDeltap_self, "dDeltap_self" );
sfree(control->error_ptr, workspace->Delta, "Delta" );
sfree(control->error_ptr, workspace->Delta_lp, "Delta_lp" );
sfree(control->error_ptr, workspace->Delta_lp_temp, "Delta_lp_temp" );
sfree(control->error_ptr, workspace->dDelta_lp, "dDelta_lp" );
sfree(control->error_ptr, workspace->dDelta_lp_temp, "dDelta_lp_temp" );
sfree(control->error_ptr, workspace->Delta_e, "Delta_e" );
sfree(control->error_ptr, workspace->Delta_boc, "Delta_boc" );
sfree(control->error_ptr, workspace->Delta_val, "Delta_val" );
sfree(control->error_ptr, workspace->nlp, "nlp" );
sfree(control->error_ptr, workspace->nlp_temp, "nlp_temp" );
sfree(control->error_ptr, workspace->Clp, "Clp" );
sfree(control->error_ptr, workspace->vlpex, "vlpex" );
sfree(control->error_ptr, workspace->bond_mark, "bond_mark" );
sfree(control->error_ptr, workspace->done_after, "done_after" );
/* QEq storage */
sfree( workspace->Hdia_inv, "Hdia_inv" );
sfree( workspace->b_s, "b_s" );
sfree( workspace->b_t, "b_t" );
sfree( workspace->b_prc, "b_prc" );
sfree( workspace->b_prm, "b_prm" );
sfree( workspace->s, "s" );
sfree( workspace->t, "t" );
sfree( workspace->droptol, "droptol" );
sfree( workspace->b, "b" );
sfree( workspace->x, "x" );
sfree(control->error_ptr, workspace->Hdia_inv, "Hdia_inv" );
sfree(control->error_ptr, workspace->b_s, "b_s" );
sfree(control->error_ptr, workspace->b_t, "b_t" );
sfree(control->error_ptr, workspace->b_prc, "b_prc" );
sfree(control->error_ptr, workspace->b_prm, "b_prm" );
sfree(control->error_ptr, workspace->s, "s" );
sfree(control->error_ptr, workspace->t, "t" );
sfree(control->error_ptr, workspace->droptol, "droptol" );
sfree(control->error_ptr, workspace->b, "b" );
sfree(control->error_ptr, workspace->x, "x" );
/* GMRES storage */
for( i = 0; i < RESTART+1; ++i ) {
sfree( workspace->h[i], "h[i]" );
sfree( workspace->v[i], "v[i]" );
sfree(control->error_ptr, workspace->h[i], "h[i]" );
sfree(control->error_ptr, workspace->v[i], "v[i]" );
}
sfree( workspace->h, "h" );
sfree( workspace->v, "v" );
sfree( workspace->y, "y" );
sfree( workspace->z, "z" );
sfree( workspace->g, "g" );
sfree( workspace->hs, "hs" );
sfree( workspace->hc, "hc" );
sfree(control->error_ptr, workspace->h, "h" );
sfree(control->error_ptr, workspace->v, "v" );
sfree(control->error_ptr, workspace->y, "y" );
sfree(control->error_ptr, workspace->z, "z" );
sfree(control->error_ptr, workspace->g, "g" );
sfree(control->error_ptr, workspace->hs, "hs" );
sfree(control->error_ptr, workspace->hc, "hc" );
/* CG storage */
sfree( workspace->r, "r" );
sfree( workspace->d, "d" );
sfree( workspace->q, "q" );
sfree( workspace->p, "p" );
sfree( workspace->r2, "r2" );
sfree( workspace->d2, "d2" );
sfree( workspace->q2, "q2" );
sfree( workspace->p2, "p2" );
sfree(control->error_ptr, workspace->r, "r" );
sfree(control->error_ptr, workspace->d, "d" );
sfree(control->error_ptr, workspace->q, "q" );
sfree(control->error_ptr, workspace->p, "p" );
sfree(control->error_ptr, workspace->r2, "r2" );
sfree(control->error_ptr, workspace->d2, "d2" );
sfree(control->error_ptr, workspace->q2, "q2" );
sfree(control->error_ptr, workspace->p2, "p2" );
/* integrator storage */
sfree( workspace->v_const, "v_const" );
sfree(control->error_ptr, workspace->v_const, "v_const" );
/* force related storage */
sfree( workspace->f, "f" );
sfree( workspace->CdDelta, "CdDelta" );
sfree(control->error_ptr, workspace->f, "f" );
sfree(control->error_ptr, workspace->CdDelta, "CdDelta" );
/* reductions */
#ifdef LMP_USER_OMP
if (workspace->CdDeltaReduction) sfree( workspace->CdDeltaReduction, "cddelta_reduce" );
if (workspace->forceReduction) sfree( workspace->forceReduction, "f_reduce" );
if (workspace->valence_angle_atom_myoffset) sfree( workspace->valence_angle_atom_myoffset, "valence_angle_atom_myoffset");
if (workspace->my_ext_pressReduction) sfree( workspace->my_ext_pressReduction, "ext_press_reduce");
if (workspace->CdDeltaReduction) sfree(control->error_ptr, workspace->CdDeltaReduction, "cddelta_reduce" );
if (workspace->forceReduction) sfree(control->error_ptr, workspace->forceReduction, "f_reduce" );
if (workspace->valence_angle_atom_myoffset) sfree(control->error_ptr, workspace->valence_angle_atom_myoffset, "valence_angle_atom_myoffset");
if (workspace->my_ext_pressReduction) sfree(control->error_ptr, workspace->my_ext_pressReduction, "ext_press_reduce");
#endif
}
int Allocate_Workspace( reax_system * /*system*/, control_params * control,
storage *workspace, int local_cap, int total_cap,
MPI_Comm comm, char * /*msg*/ )
char * /*msg*/ )
{
int i, total_real, total_rvec, local_rvec;
@ -220,94 +222,94 @@ int Allocate_Workspace( reax_system * /*system*/, control_params * control,
/* communication storage */
for( i = 0; i < MAX_NBRS; ++i ) {
workspace->tmp_dbl[i] = (double*)
scalloc( total_cap, sizeof(double), "tmp_dbl", comm );
scalloc(control->error_ptr, total_cap, sizeof(double), "tmp_dbl");
workspace->tmp_rvec[i] = (rvec*)
scalloc( total_cap, sizeof(rvec), "tmp_rvec", comm );
scalloc(control->error_ptr, total_cap, sizeof(rvec), "tmp_rvec");
workspace->tmp_rvec2[i] = (rvec2*)
scalloc( total_cap, sizeof(rvec2), "tmp_rvec2", comm );
scalloc(control->error_ptr, total_cap, sizeof(rvec2), "tmp_rvec2");
}
/* bond order related storage */
workspace->within_bond_box = (int*)
scalloc( total_cap, sizeof(int), "skin", comm );
workspace->total_bond_order = (double*) smalloc( total_real, "total_bo", comm );
workspace->Deltap = (double*) smalloc( total_real, "Deltap", comm );
workspace->Deltap_boc = (double*) smalloc( total_real, "Deltap_boc", comm );
workspace->dDeltap_self = (rvec*) smalloc( total_rvec, "dDeltap_self", comm );
workspace->Delta = (double*) smalloc( total_real, "Delta", comm );
workspace->Delta_lp = (double*) smalloc( total_real, "Delta_lp", comm );
scalloc(control->error_ptr, total_cap, sizeof(int), "skin");
workspace->total_bond_order = (double*) smalloc(control->error_ptr, total_real, "total_bo");
workspace->Deltap = (double*) smalloc(control->error_ptr, total_real, "Deltap");
workspace->Deltap_boc = (double*) smalloc(control->error_ptr, total_real, "Deltap_boc");
workspace->dDeltap_self = (rvec*) smalloc(control->error_ptr, total_rvec, "dDeltap_self");
workspace->Delta = (double*) smalloc(control->error_ptr, total_real, "Delta");
workspace->Delta_lp = (double*) smalloc(control->error_ptr, total_real, "Delta_lp");
workspace->Delta_lp_temp = (double*)
smalloc( total_real, "Delta_lp_temp", comm );
workspace->dDelta_lp = (double*) smalloc( total_real, "dDelta_lp", comm );
smalloc(control->error_ptr, total_real, "Delta_lp_temp");
workspace->dDelta_lp = (double*) smalloc(control->error_ptr, total_real, "dDelta_lp");
workspace->dDelta_lp_temp = (double*)
smalloc( total_real, "dDelta_lp_temp", comm );
workspace->Delta_e = (double*) smalloc( total_real, "Delta_e", comm );
workspace->Delta_boc = (double*) smalloc( total_real, "Delta_boc", comm );
workspace->Delta_val = (double*) smalloc( total_real, "Delta_val", comm );
workspace->nlp = (double*) smalloc( total_real, "nlp", comm );
workspace->nlp_temp = (double*) smalloc( total_real, "nlp_temp", comm );
workspace->Clp = (double*) smalloc( total_real, "Clp", comm );
workspace->vlpex = (double*) smalloc( total_real, "vlpex", comm );
smalloc(control->error_ptr, total_real, "dDelta_lp_temp");
workspace->Delta_e = (double*) smalloc(control->error_ptr, total_real, "Delta_e");
workspace->Delta_boc = (double*) smalloc(control->error_ptr, total_real, "Delta_boc");
workspace->Delta_val = (double*) smalloc(control->error_ptr, total_real, "Delta_val");
workspace->nlp = (double*) smalloc(control->error_ptr, total_real, "nlp");
workspace->nlp_temp = (double*) smalloc(control->error_ptr, total_real, "nlp_temp");
workspace->Clp = (double*) smalloc(control->error_ptr, total_real, "Clp");
workspace->vlpex = (double*) smalloc(control->error_ptr, total_real, "vlpex");
workspace->bond_mark = (int*)
scalloc( total_cap, sizeof(int), "bond_mark", comm );
scalloc(control->error_ptr, total_cap, sizeof(int), "bond_mark");
workspace->done_after = (int*)
scalloc( total_cap, sizeof(int), "done_after", comm );
scalloc(control->error_ptr, total_cap, sizeof(int), "done_after");
/* QEq storage */
workspace->Hdia_inv = (double*)
scalloc( total_cap, sizeof(double), "Hdia_inv", comm );
workspace->b_s = (double*) scalloc( total_cap, sizeof(double), "b_s", comm );
workspace->b_t = (double*) scalloc( total_cap, sizeof(double), "b_t", comm );
workspace->b_prc = (double*) scalloc( total_cap, sizeof(double), "b_prc", comm );
workspace->b_prm = (double*) scalloc( total_cap, sizeof(double), "b_prm", comm );
workspace->s = (double*) scalloc( total_cap, sizeof(double), "s", comm );
workspace->t = (double*) scalloc( total_cap, sizeof(double), "t", comm );
scalloc(control->error_ptr, total_cap, sizeof(double), "Hdia_inv");
workspace->b_s = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "b_s");
workspace->b_t = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "b_t");
workspace->b_prc = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "b_prc");
workspace->b_prm = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "b_prm");
workspace->s = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "s");
workspace->t = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "t");
workspace->droptol = (double*)
scalloc( total_cap, sizeof(double), "droptol", comm );
workspace->b = (rvec2*) scalloc( total_cap, sizeof(rvec2), "b", comm );
workspace->x = (rvec2*) scalloc( total_cap, sizeof(rvec2), "x", comm );
scalloc(control->error_ptr, total_cap, sizeof(double), "droptol");
workspace->b = (rvec2*) scalloc(control->error_ptr, total_cap, sizeof(rvec2), "b");
workspace->x = (rvec2*) scalloc(control->error_ptr, total_cap, sizeof(rvec2), "x");
/* GMRES storage */
workspace->y = (double*) scalloc( RESTART+1, sizeof(double), "y", comm );
workspace->z = (double*) scalloc( RESTART+1, sizeof(double), "z", comm );
workspace->g = (double*) scalloc( RESTART+1, sizeof(double), "g", comm );
workspace->h = (double**) scalloc( RESTART+1, sizeof(double*), "h", comm );
workspace->hs = (double*) scalloc( RESTART+1, sizeof(double), "hs", comm );
workspace->hc = (double*) scalloc( RESTART+1, sizeof(double), "hc", comm );
workspace->v = (double**) scalloc( RESTART+1, sizeof(double*), "v", comm );
workspace->y = (double*) scalloc(control->error_ptr, RESTART+1, sizeof(double), "y");
workspace->z = (double*) scalloc(control->error_ptr, RESTART+1, sizeof(double), "z");
workspace->g = (double*) scalloc(control->error_ptr, RESTART+1, sizeof(double), "g");
workspace->h = (double**) scalloc(control->error_ptr, RESTART+1, sizeof(double*), "h");
workspace->hs = (double*) scalloc(control->error_ptr, RESTART+1, sizeof(double), "hs");
workspace->hc = (double*) scalloc(control->error_ptr, RESTART+1, sizeof(double), "hc");
workspace->v = (double**) scalloc(control->error_ptr, RESTART+1, sizeof(double*), "v");
for( i = 0; i < RESTART+1; ++i ) {
workspace->h[i] = (double*) scalloc( RESTART+1, sizeof(double), "h[i]", comm );
workspace->v[i] = (double*) scalloc( total_cap, sizeof(double), "v[i]", comm );
workspace->h[i] = (double*) scalloc(control->error_ptr, RESTART+1, sizeof(double), "h[i]");
workspace->v[i] = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "v[i]");
}
/* CG storage */
workspace->r = (double*) scalloc( total_cap, sizeof(double), "r", comm );
workspace->d = (double*) scalloc( total_cap, sizeof(double), "d", comm );
workspace->q = (double*) scalloc( total_cap, sizeof(double), "q", comm );
workspace->p = (double*) scalloc( total_cap, sizeof(double), "p", comm );
workspace->r2 = (rvec2*) scalloc( total_cap, sizeof(rvec2), "r2", comm );
workspace->d2 = (rvec2*) scalloc( total_cap, sizeof(rvec2), "d2", comm );
workspace->q2 = (rvec2*) scalloc( total_cap, sizeof(rvec2), "q2", comm );
workspace->p2 = (rvec2*) scalloc( total_cap, sizeof(rvec2), "p2", comm );
workspace->r = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "r");
workspace->d = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "d");
workspace->q = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "q");
workspace->p = (double*) scalloc(control->error_ptr, total_cap, sizeof(double), "p");
workspace->r2 = (rvec2*) scalloc(control->error_ptr, total_cap, sizeof(rvec2), "r2");
workspace->d2 = (rvec2*) scalloc(control->error_ptr, total_cap, sizeof(rvec2), "d2");
workspace->q2 = (rvec2*) scalloc(control->error_ptr, total_cap, sizeof(rvec2), "q2");
workspace->p2 = (rvec2*) scalloc(control->error_ptr, total_cap, sizeof(rvec2), "p2");
/* integrator storage */
workspace->v_const = (rvec*) smalloc( local_rvec, "v_const", comm );
workspace->v_const = (rvec*) smalloc(control->error_ptr, local_rvec, "v_const");
/* force related storage */
workspace->f = (rvec*) scalloc( total_cap, sizeof(rvec), "f", comm );
workspace->f = (rvec*) scalloc(control->error_ptr, total_cap, sizeof(rvec), "f");
workspace->CdDelta = (double*)
scalloc( total_cap, sizeof(double), "CdDelta", comm );
scalloc(control->error_ptr, total_cap, sizeof(double), "CdDelta");
// storage for reductions with multiple threads
#ifdef LMP_USER_OMP
workspace->CdDeltaReduction = (double *) scalloc(sizeof(double), total_cap*control->nthreads,
"cddelta_reduce", comm);
workspace->CdDeltaReduction = (double *) scalloc(control->error_ptr, sizeof(double), total_cap*control->nthreads,
"cddelta_reduce");
workspace->forceReduction = (rvec *) scalloc(sizeof(rvec), total_cap*control->nthreads,
"forceReduction", comm);
workspace->forceReduction = (rvec *) scalloc(control->error_ptr, sizeof(rvec), total_cap*control->nthreads,
"forceReduction");
workspace->valence_angle_atom_myoffset = (int *) scalloc(sizeof(int), total_cap, "valence_angle_atom_myoffset", comm);
workspace->valence_angle_atom_myoffset = (int *) scalloc(control->error_ptr, sizeof(int), total_cap, "valence_angle_atom_myoffset");
workspace->my_ext_pressReduction = (rvec *) calloc(sizeof(rvec), control->nthreads);
#else
LMP_UNUSED_PARAM(control);
@ -318,18 +320,16 @@ int Allocate_Workspace( reax_system * /*system*/, control_params * control,
static void Reallocate_Neighbor_List( reax_list *far_nbrs, int n,
int num_intrs, MPI_Comm comm )
int num_intrs )
{
Delete_List( far_nbrs, comm );
if(!Make_List( n, num_intrs, TYP_FAR_NEIGHBOR, far_nbrs, comm )){
fprintf(stderr, "Problem in initializing far nbrs list. Terminating!\n");
MPI_Abort( comm, INSUFFICIENT_MEMORY );
Delete_List( far_nbrs);
if(!Make_List( n, num_intrs, TYP_FAR_NEIGHBOR, far_nbrs )){
far_nbrs->error_ptr->one(FLERR,"Problem in initializing far neighbors list");
}
}
static int Reallocate_HBonds_List( reax_system *system, reax_list *hbonds,
MPI_Comm comm )
static int Reallocate_HBonds_List( reax_system *system, reax_list *hbonds )
{
int i, total_hbonds;
@ -343,10 +343,9 @@ static int Reallocate_HBonds_List( reax_system *system, reax_list *hbonds,
}
total_hbonds = (int)(MAX( total_hbonds*saferzone, mincap*MIN_HBONDS ));
Delete_List( hbonds, comm );
if (!Make_List( system->Hcap, total_hbonds, TYP_HBOND, hbonds, comm )) {
fprintf( stderr, "not enough space for hbonds list. terminating!\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
Delete_List( hbonds);
if (!Make_List( system->Hcap, total_hbonds, TYP_HBOND, hbonds )) {
hbonds->error_ptr->one(FLERR, "Not enough space for hydrogen bonds list");
}
return total_hbonds;
@ -354,8 +353,7 @@ static int Reallocate_HBonds_List( reax_system *system, reax_list *hbonds,
static int Reallocate_Bonds_List( reax_system *system, reax_list *bonds,
int *total_bonds, int *est_3body,
MPI_Comm comm )
int *total_bonds, int *est_3body )
{
int i;
@ -373,13 +371,12 @@ static int Reallocate_Bonds_List( reax_system *system, reax_list *bonds,
#ifdef LMP_USER_OMP
if (system->omp_active)
for (i = 0; i < bonds->num_intrs; ++i)
sfree(bonds->select.bond_list[i].bo_data.CdboReduction, "CdboReduction");
sfree(system->error_ptr, bonds->select.bond_list[i].bo_data.CdboReduction, "CdboReduction");
#endif
Delete_List( bonds, comm );
if(!Make_List(system->total_cap, *total_bonds, TYP_BOND, bonds, comm)) {
fprintf( stderr, "not enough space for bonds list. terminating!\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
Delete_List( bonds);
if(!Make_List(system->total_cap, *total_bonds, TYP_BOND, bonds)) {
bonds->error_ptr->one(FLERR, "Not enough space for bonds list");
}
#ifdef LMP_USER_OMP
@ -392,7 +389,7 @@ static int Reallocate_Bonds_List( reax_system *system, reax_list *bonds,
if (system->omp_active)
for (i = 0; i < bonds->num_intrs; ++i)
bonds->select.bond_list[i].bo_data.CdboReduction =
(double*) smalloc(sizeof(double)*nthreads, "CdboReduction", comm);
(double*) smalloc(system->error_ptr, sizeof(double)*nthreads, "CdboReduction");
#endif
return SUCCESS;
@ -400,14 +397,12 @@ static int Reallocate_Bonds_List( reax_system *system, reax_list *bonds,
void ReAllocate( reax_system *system, control_params *control,
simulation_data *data, storage *workspace, reax_list **lists,
mpi_datatypes *mpi_data )
simulation_data *data, storage *workspace, reax_list **lists )
{
int num_bonds, est_3body, Hflag, ret;
int renbr, newsize;
reallocate_data *realloc;
reax_list *far_nbrs;
MPI_Comm comm;
char msg[200];
int mincap = system->mincap;
@ -415,7 +410,6 @@ void ReAllocate( reax_system *system, control_params *control,
double saferzone = system->saferzone;
realloc = &(workspace->realloc);
comm = mpi_data->world;
if( system->n >= DANGER_ZONE * system->local_cap ||
(0 && system->n <= LOOSE_ZONE * system->local_cap) ) {
@ -433,21 +427,19 @@ void ReAllocate( reax_system *system, control_params *control,
/* system */
ret = Allocate_System( system, system->local_cap, system->total_cap, msg );
if (ret != SUCCESS) {
fprintf( stderr, "not enough space for atom_list: total_cap=%d",
system->total_cap );
fprintf( stderr, "terminating...\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
char errmsg[256];
snprintf(errmsg, 256, "Not enough space for atom_list: total_cap=%d", system->total_cap);
system->error_ptr->one(FLERR, errmsg);
}
/* workspace */
DeAllocate_Workspace( control, workspace );
ret = Allocate_Workspace( system, control, workspace, system->local_cap,
system->total_cap, comm, msg );
system->total_cap, msg );
if (ret != SUCCESS) {
fprintf( stderr, "no space for workspace: local_cap=%d total_cap=%d",
system->local_cap, system->total_cap );
fprintf( stderr, "terminating...\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
char errmsg[256];
snprintf(errmsg, 256, "Not enough space for workspace: local_cap=%d total_cap=%d", system->local_cap, system->total_cap);
system->error_ptr->one(FLERR, errmsg);
}
}
@ -459,15 +451,15 @@ void ReAllocate( reax_system *system, control_params *control,
if (Nflag || realloc->num_far >= far_nbrs->num_intrs * DANGER_ZONE) {
if (realloc->num_far > far_nbrs->num_intrs) {
fprintf( stderr, "step%d-ran out of space on far_nbrs: top=%d, max=%d",
data->step, realloc->num_far, far_nbrs->num_intrs );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
char errmsg[256];
snprintf(errmsg, 256, "step%d-ran out of space on far_nbrs: top=%d, max=%d", data->step, realloc->num_far, far_nbrs->num_intrs);
system->error_ptr->one(FLERR, errmsg);
}
newsize = static_cast<int>
(MAX( realloc->num_far*safezone, mincap*MIN_NBRS ));
Reallocate_Neighbor_List( far_nbrs, system->total_cap, newsize, comm );
Reallocate_Neighbor_List( far_nbrs, system->total_cap, newsize);
realloc->num_far = 0;
}
}
@ -482,7 +474,7 @@ void ReAllocate( reax_system *system, control_params *control,
}
if (Hflag || realloc->hbonds) {
ret = Reallocate_HBonds_List( system, (*lists)+HBONDS, comm );
ret = Reallocate_HBonds_List( system, (*lists)+HBONDS);
realloc->hbonds = 0;
}
}
@ -491,14 +483,14 @@ void ReAllocate( reax_system *system, control_params *control,
num_bonds = est_3body = -1;
if (Nflag || realloc->bonds) {
Reallocate_Bonds_List( system, (*lists)+BONDS, &num_bonds,
&est_3body, comm );
&est_3body);
realloc->bonds = 0;
realloc->num_3body = MAX( realloc->num_3body, est_3body ) * 2;
}
/* 3-body list */
if (realloc->num_3body > 0) {
Delete_List( (*lists)+THREE_BODIES, comm );
Delete_List( (*lists)+THREE_BODIES);
if (num_bonds == -1)
num_bonds = ((*lists)+BONDS)->num_intrs;
@ -506,9 +498,8 @@ void ReAllocate( reax_system *system, control_params *control,
realloc->num_3body = (int)(MAX(realloc->num_3body*safezone, MIN_3BODIES));
if( !Make_List( num_bonds, realloc->num_3body, TYP_THREE_BODY,
(*lists)+THREE_BODIES, comm ) ) {
fprintf( stderr, "Problem in initializing angles list. Terminating!\n" );
MPI_Abort( comm, CANNOT_INITIALIZE );
(*lists)+THREE_BODIES ) ) {
system->error_ptr->one(FLERR, "Problem in initializing angles list");
}
realloc->num_3body = -1;
}

View File

@ -28,15 +28,15 @@
#define __ALLOCATE_H_
#include "reaxc_types.h"
int PreAllocate_Space( reax_system*, control_params*, storage*, MPI_Comm );
int PreAllocate_Space( reax_system*, control_params*, storage* );
int Allocate_System( reax_system*, int, int, char* );
void DeAllocate_System( reax_system* );
int Allocate_Workspace( reax_system*, control_params*, storage*,
int, int, MPI_Comm, char* );
int, int, char* );
void DeAllocate_Workspace( control_params*, storage* );
void ReAllocate( reax_system*, control_params*, simulation_data*, storage*,
reax_list**, mpi_datatypes* );
reax_list** );
#endif

View File

@ -28,6 +28,8 @@
#include "reaxc_control.h"
#include "reaxc_tool_box.h"
#include "error.h"
char Read_Control_File( char *control_file, control_params* control,
output_controls *out_control )
{
@ -38,8 +40,7 @@ char Read_Control_File( char *control_file, control_params* control,
/* open control file */
if ( (fp = fopen( control_file, "r" ) ) == NULL ) {
fprintf( stderr, "error opening the control file! terminating...\n" );
MPI_Abort( MPI_COMM_WORLD, FILE_NOT_FOUND );
control->error_ptr->all(FLERR, "The control file cannot be opened");
}
/* assign default values */
@ -364,8 +365,9 @@ char Read_Control_File( char *control_file, control_params* control,
control->restrict_type = ival;
}
else {
fprintf( stderr, "WARNING: unknown parameter %s\n", tmp[0] );
MPI_Abort( MPI_COMM_WORLD, 15 );
char errmsg[128];
snprintf(errmsg,128,"Unknown parameter %s in the control file", tmp[0]);
control->error_ptr->all(FLERR, errmsg);
}
}

View File

@ -29,6 +29,7 @@
#include "reaxc_ffield.h"
#include "reaxc_tool_box.h"
char Read_Force_Field( FILE *fp, reax_interaction *reax,
control_params *control )
{
@ -41,7 +42,6 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
double val;
MPI_Comm comm;
int me;
comm = MPI_COMM_WORLD;
MPI_Comm_rank(comm, &me);
@ -61,7 +61,7 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
n = atoi(tmp[0]);
if (n < 1) {
if (me == 0)
fprintf( stderr, "WARNING: number of globals in ffield file is 0!\n" );
control->error_ptr->warning( FLERR, "Number of globals in ffield file is 0. The file will not be read." );
fclose(fp);
free(s);
free(tmp);
@ -96,61 +96,52 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
/* Allocating structures in reax_interaction */
reax->sbp = (single_body_parameters*)
scalloc( reax->num_atom_types, sizeof(single_body_parameters), "sbp",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(single_body_parameters), "sbp");
reax->tbp = (two_body_parameters**)
scalloc( reax->num_atom_types, sizeof(two_body_parameters*), "tbp", comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(two_body_parameters*), "tbp");
reax->thbp= (three_body_header***)
scalloc( reax->num_atom_types, sizeof(three_body_header**), "thbp", comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(three_body_header**), "thbp");
reax->hbp = (hbond_parameters***)
scalloc( reax->num_atom_types, sizeof(hbond_parameters**), "hbp", comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(hbond_parameters**), "hbp");
reax->fbp = (four_body_header****)
scalloc( reax->num_atom_types, sizeof(four_body_header***), "fbp", comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(four_body_header***), "fbp");
tor_flag = (char****)
scalloc( reax->num_atom_types, sizeof(char***), "tor_flag", comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(char***), "tor_flag");
for( i = 0; i < reax->num_atom_types; i++ ) {
reax->tbp[i] = (two_body_parameters*)
scalloc( reax->num_atom_types, sizeof(two_body_parameters), "tbp[i]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(two_body_parameters), "tbp[i]");
reax->thbp[i]= (three_body_header**)
scalloc( reax->num_atom_types, sizeof(three_body_header*), "thbp[i]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(three_body_header*), "thbp[i]");
reax->hbp[i] = (hbond_parameters**)
scalloc( reax->num_atom_types, sizeof(hbond_parameters*), "hbp[i]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(hbond_parameters*), "hbp[i]");
reax->fbp[i] = (four_body_header***)
scalloc( reax->num_atom_types, sizeof(four_body_header**), "fbp[i]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(four_body_header**), "fbp[i]");
tor_flag[i] = (char***)
scalloc( reax->num_atom_types, sizeof(char**), "tor_flag[i]", comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(char**), "tor_flag[i]");
for( j = 0; j < reax->num_atom_types; j++ ) {
reax->thbp[i][j]= (three_body_header*)
scalloc( reax->num_atom_types, sizeof(three_body_header), "thbp[i,j]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(three_body_header), "thbp[i,j]");
reax->hbp[i][j] = (hbond_parameters*)
scalloc( reax->num_atom_types, sizeof(hbond_parameters), "hbp[i,j]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(hbond_parameters), "hbp[i,j]");
reax->fbp[i][j] = (four_body_header**)
scalloc( reax->num_atom_types, sizeof(four_body_header*), "fbp[i,j]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(four_body_header*), "fbp[i,j]");
tor_flag[i][j] = (char**)
scalloc( reax->num_atom_types, sizeof(char*), "tor_flag[i,j]", comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(char*), "tor_flag[i,j]");
for (k=0; k < reax->num_atom_types; k++) {
reax->fbp[i][j][k] = (four_body_header*)
scalloc( reax->num_atom_types, sizeof(four_body_header), "fbp[i,j,k]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(four_body_header), "fbp[i,j,k]");
tor_flag[i][j][k] = (char*)
scalloc( reax->num_atom_types, sizeof(char), "tor_flag[i,j,k]",
comm );
scalloc(control->error_ptr, reax->num_atom_types, sizeof(char), "tor_flag[i,j,k]");
}
}
}
reax->gp.vdw_type = 0;
char errmsg[1024];
for( i = 0; i < reax->num_atom_types; i++ ) {
/* line one */
@ -158,15 +149,12 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
c = Tokenize( s, &tmp );
/* Sanity checks */
if (c == 2 && !lgflag) {
if (me == 0)
fprintf(stderr, "Force field file requires using 'lgvdw yes'\n");
MPI_Abort( comm, FILE_NOT_FOUND );
}
if (c == 2 && !lgflag)
control->error_ptr->all(FLERR, "Force field file requires using 'lgvdw yes'");
if (c < 9) {
if (me == 0)
fprintf(stderr, "Inconsistent ffield file (reaxc_ffield.cpp) \n");
MPI_Abort( comm, FILE_NOT_FOUND );
snprintf (errmsg, 1024, "Missing parameter(s) in line %s", s);
control->error_ptr->all(FLERR, errmsg);
}
for( j = 0; j < (int)(strlen(tmp[0])); ++j )
@ -188,9 +176,8 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
/* Sanity check */
if (c < 8) {
if (me == 0)
fprintf(stderr, "Inconsistent ffield file (reaxc_ffield.cpp) \n");
MPI_Abort( comm, FILE_NOT_FOUND );
snprintf (errmsg, 1024, "Missing parameter(s) in line %s", s);
control->error_ptr->all(FLERR, errmsg);
}
val = atof(tmp[0]); reax->sbp[i].alpha = val;
@ -208,9 +195,8 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
/* Sanity check */
if (c < 8) {
if (me == 0)
fprintf(stderr, "Inconsistent ffield file (reaxc_ffield.cpp) \n");
MPI_Abort( comm, FILE_NOT_FOUND );
snprintf (errmsg, 1024, "Missing parameter(s) in line %s", s);
control->error_ptr->all(FLERR, errmsg);
}
val = atof(tmp[0]); reax->sbp[i].r_pi_pi = val;
@ -228,9 +214,8 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
/* Sanity check */
if (c < 8) {
if (me == 0)
fprintf(stderr, "Inconsistent ffield file (reaxc_ffield.cpp) \n");
MPI_Abort( comm, FILE_NOT_FOUND );
snprintf (errmsg, 1024, "Missing parameter(s) in line %s", s);
control->error_ptr->all(FLERR, errmsg);
}
val = atof(tmp[0]); reax->sbp[i].p_ovun2 = val;
@ -249,9 +234,7 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
/* Sanity check */
if (c > 2) {
if (me == 0)
fprintf(stderr, "Force field file incompatible with 'lgvdw yes'\n");
MPI_Abort( comm, FILE_NOT_FOUND );
control->error_ptr->all(FLERR,"Force field file incompatible with 'lgvdw yes'");
}
val = atof(tmp[0]); reax->sbp[i].lgcij = val;
@ -261,28 +244,32 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
if (reax->sbp[i].rcore2>0.01 && reax->sbp[i].acore2>0.01) { // Inner-wall
if (reax->sbp[i].gamma_w>0.5) { // Shielding vdWaals
if (reax->gp.vdw_type != 0 && reax->gp.vdw_type != 3) {
if (errorflag && (me == 0))
fprintf( stderr, "Warning: inconsistent vdWaals-parameters\n" \
"Force field parameters for element %s\n" \
"indicate inner wall+shielding, but earlier\n" \
"atoms indicate different vdWaals-method.\n" \
"This may cause division-by-zero errors.\n" \
"Keeping vdWaals-setting for earlier atoms.\n",
reax->sbp[i].name );
if (errorflag && (me == 0)) {
char errmsg[512];
snprintf(errmsg, 512, "VdWaals-parameters for element %s "
"indicate inner wall+shielding, but earlier "
"atoms indicate different vdWaals-method. "
"This may cause division-by-zero errors. "
"Keeping vdWaals-setting for earlier atoms.",
reax->sbp[i].name);
control->error_ptr->warning(FLERR,errmsg);
}
errorflag = 0;
} else {
reax->gp.vdw_type = 3;
}
} else { // No shielding vdWaals parameters present
if (reax->gp.vdw_type != 0 && reax->gp.vdw_type != 2) {
if (me == 0)
fprintf( stderr, "Warning: inconsistent vdWaals-parameters\n" \
"Force field parameters for element %s\n" \
"indicate inner wall without shielding, but earlier\n" \
"atoms indicate different vdWaals-method.\n" \
"This may cause division-by-zero errors.\n" \
"Keeping vdWaals-setting for earlier atoms.\n",
reax->sbp[i].name );
if (me == 0) {
char errmsg[512];
snprintf(errmsg, 512, "VdWaals-parameters for element %s "
"indicate inner wall without shielding, but earlier "
"atoms indicate different vdWaals-method. "
"This may cause division-by-zero errors. "
"Keeping vdWaals-setting for earlier atoms.",
reax->sbp[i].name);
control->error_ptr->warning(FLERR,errmsg);
}
} else {
reax->gp.vdw_type = 2;
}
@ -290,23 +277,25 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
} else { // No Inner wall parameters present
if (reax->sbp[i].gamma_w>0.5) { // Shielding vdWaals
if (reax->gp.vdw_type != 0 && reax->gp.vdw_type != 1) {
if (me == 0)
fprintf( stderr, "Warning: inconsistent vdWaals-parameters\n" \
"Force field parameters for element %s\n" \
"indicate shielding without inner wall, but earlier\n" \
"atoms indicate different vdWaals-method.\n" \
"This may cause division-by-zero errors.\n" \
"Keeping vdWaals-setting for earlier atoms.\n",
reax->sbp[i].name );
if (me == 0) {
char errmsg[512];
snprintf(errmsg, 512, "VdWaals parameters for element %s "
"indicate shielding without inner wall, but earlier "
"elements indicate different vdWaals-method. "
"This may cause division-by-zero errors. "
"Keeping vdWaals-setting for earlier atoms.",
reax->sbp[i].name);
control->error_ptr->warning(FLERR,errmsg);
}
} else {
reax->gp.vdw_type = 1;
}
} else {
if (me == 0)
fprintf( stderr, "Error: inconsistent vdWaals-parameters\n" \
"No shielding or inner-wall set for element %s\n",
reax->sbp[i].name );
MPI_Abort( comm, INVALID_INPUT );
char errmsg[256];
snprintf(errmsg, 256, "Inconsistent vdWaals-parameters: "
"No shielding or inner-wall set for element %s",
reax->sbp[i].name);
control->error_ptr->all(FLERR, errmsg);
}
}
}
@ -315,15 +304,23 @@ char Read_Force_Field( FILE *fp, reax_interaction *reax,
for( i = 0; i < reax->num_atom_types; i++ )
if( reax->sbp[i].mass < 21 &&
reax->sbp[i].valency_val != reax->sbp[i].valency_boc ) {
if (me == 0)
fprintf(stderr,"Warning: changed valency_val to valency_boc for %s\n",
reax->sbp[i].name );
if (me == 0) {
char errmsg[256];
snprintf(errmsg, 256, "Changed valency_val to valency_boc for %s",
reax->sbp[i].name);
control->error_ptr->warning(FLERR,errmsg);
}
reax->sbp[i].valency_val = reax->sbp[i].valency_boc;
}
/* next line is number of two body combination and some comments */
fgets(s,MAX_LINE,fp);
c=Tokenize(s,&tmp);
if (c == 2 && !lgflag) {
control->error_ptr->all(FLERR, "Force field file requires using 'lgvdw yes'");
}
l = atoi(tmp[0]);
/* a line of comments */

View File

@ -39,11 +39,14 @@
#include "reaxc_valence_angles.h"
#include "reaxc_vector.h"
#include "error.h"
interaction_function Interaction_Functions[NUM_INTRS];
void Dummy_Interaction( reax_system * /*system*/, control_params * /*control*/,
simulation_data * /*data*/, storage * /*workspace*/,
reax_list **/*lists*/, output_controls * /*out_control*/ )
reax_list ** /*lists*/, output_controls * /*out_control*/ )
{
}
@ -115,7 +118,7 @@ void Compute_Total_Force( reax_system *system, control_params *control,
}
void Validate_Lists( reax_system *system, storage * /*workspace*/, reax_list **lists,
int step, int /*n*/, int N, int numH, MPI_Comm comm )
int step, int /*n*/, int N, int numH )
{
int i, comp, Hindex;
reax_list *bonds, *hbonds;
@ -134,9 +137,10 @@ void Validate_Lists( reax_system *system, storage * /*workspace*/, reax_list **l
else comp = bonds->num_intrs;
if (End_Index(i, bonds) > comp) {
fprintf( stderr, "step%d-bondchk failed: i=%d end(i)=%d str(i+1)=%d\n",
char errmsg[256];
snprintf(errmsg, 256, "step%d-bondchk failed: i=%d end(i)=%d str(i+1)=%d\n",
step, i, End_Index(i,bonds), comp );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
system->error_ptr->one(FLERR,errmsg);
}
}
}
@ -161,9 +165,10 @@ void Validate_Lists( reax_system *system, storage * /*workspace*/, reax_list **l
else comp = hbonds->num_intrs;
if (End_Index(Hindex, hbonds) > comp) {
fprintf(stderr,"step%d-hbondchk failed: H=%d end(H)=%d str(H+1)=%d\n",
char errmsg[256];
snprintf(errmsg, 256, "step%d-hbondchk failed: H=%d end(H)=%d str(H+1)=%d\n",
step, Hindex, End_Index(Hindex,hbonds), comp );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
system->error_ptr->one(FLERR, errmsg);
}
}
}
@ -173,8 +178,7 @@ void Validate_Lists( reax_system *system, storage * /*workspace*/, reax_list **l
void Init_Forces_noQEq( reax_system *system, control_params *control,
simulation_data *data, storage *workspace,
reax_list **lists, output_controls * /*out_control*/,
MPI_Comm comm ) {
reax_list **lists, output_controls * /*out_control*/ ) {
int i, j, pj;
int start_i, end_i;
int type_i, type_j;
@ -308,13 +312,13 @@ void Init_Forces_noQEq( reax_system *system, control_params *control,
workspace->realloc.num_hbonds = num_hbonds;
Validate_Lists( system, workspace, lists, data->step,
system->n, system->N, system->numH, comm );
system->n, system->N, system->numH);
}
void Estimate_Storages( reax_system *system, control_params *control,
reax_list **lists, int *Htop, int *hb_top,
int *bond_top, int *num_3body, MPI_Comm /*comm*/ )
int *bond_top, int *num_3body )
{
int i, j, pj;
int start_i, end_i;
@ -436,10 +440,9 @@ void Compute_Forces( reax_system *system, control_params *control,
reax_list **lists, output_controls *out_control,
mpi_datatypes *mpi_data )
{
MPI_Comm comm = mpi_data->world;
Init_Forces_noQEq( system, control, data, workspace,
lists, out_control, comm );
lists, out_control);
/********* bonded interactions ************/
Compute_Bonded_Forces( system, control, data, workspace,

View File

@ -36,5 +36,5 @@ void Init_Force_Functions( control_params* );
void Compute_Forces( reax_system*, control_params*, simulation_data*,
storage*, reax_list**, output_controls*, mpi_datatypes* );
void Estimate_Storages( reax_system*, control_params*, reax_list**,
int*, int*, int*, int*, MPI_Comm );
int*, int*, int*, int* );
#endif

View File

@ -36,6 +36,8 @@
#include "reaxc_tool_box.h"
#include "reaxc_vector.h"
#include "error.h"
int Init_System( reax_system *system, control_params *control, char * /*msg*/ )
{
int i;
@ -80,7 +82,7 @@ int Init_Simulation_Data( reax_system *system, control_params *control,
return SUCCESS;
}
void Init_Taper( control_params *control, storage *workspace, MPI_Comm comm )
void Init_Taper( control_params *control, storage *workspace )
{
double d1, d7;
double swa, swa2, swa3;
@ -89,15 +91,17 @@ void Init_Taper( control_params *control, storage *workspace, MPI_Comm comm )
swa = control->nonb_low;
swb = control->nonb_cut;
if (fabs( swa ) > 0.01)
fprintf( stderr, "Warning: non-zero lower Taper-radius cutoff\n" );
if (fabs( swa ) > 0.01 && control->me == 0)
control->error_ptr->warning( FLERR, "Non-zero lower Taper-radius cutoff" );
if (swb < 0) {
fprintf( stderr, "Negative upper Taper-radius cutoff\n" );
MPI_Abort( comm, INVALID_INPUT );
control->error_ptr->all(FLERR,"Negative upper Taper-radius cutoff");
}
else if( swb < 5 && control->me == 0) {
char errmsg[256];
snprintf(errmsg, 256, "Very low Taper-radius cutoff: %f", swb );
control->error_ptr->warning( FLERR, errmsg );
}
else if( swb < 5 )
fprintf( stderr, "Warning: very low Taper-radius cutoff: %f\n", swb );
d1 = swb - swa;
d7 = pow( d1, 7.0 );
@ -119,12 +123,12 @@ void Init_Taper( control_params *control, storage *workspace, MPI_Comm comm )
int Init_Workspace( reax_system *system, control_params *control,
storage *workspace, MPI_Comm comm, char *msg )
storage *workspace, char *msg )
{
int ret;
ret = Allocate_Workspace( system, control, workspace,
system->local_cap, system->total_cap, comm, msg );
system->local_cap, system->total_cap, msg );
if (ret != SUCCESS)
return ret;
@ -132,7 +136,7 @@ int Init_Workspace( reax_system *system, control_params *control,
Reset_Workspace( system, workspace );
/* Initialize the Taper function */
Init_Taper( control, workspace, comm );
Init_Taper( control, workspace);
return SUCCESS;
}
@ -156,17 +160,15 @@ int Init_Lists( reax_system *system, control_params *control,
{
int i, total_hbonds, total_bonds, bond_cap, num_3body, cap_3body, Htop;
int *hb_top, *bond_top;
MPI_Comm comm;
int mincap = system->mincap;
double safezone = system->safezone;
double saferzone = system->saferzone;
comm = mpi_data->world;
bond_top = (int*) calloc( system->total_cap, sizeof(int) );
hb_top = (int*) calloc( system->local_cap, sizeof(int) );
Estimate_Storages( system, control, lists,
&Htop, hb_top, bond_top, &num_3body, comm );
&Htop, hb_top, bond_top, &num_3body);
if (control->hbond_cut > 0) {
/* init H indexes */
@ -178,10 +180,10 @@ int Init_Lists( reax_system *system, control_params *control,
total_hbonds = (int)(MAX( total_hbonds*saferzone, mincap*MIN_HBONDS ));
if( !Make_List( system->Hcap, total_hbonds, TYP_HBOND,
*lists+HBONDS, comm ) ) {
fprintf( stderr, "not enough space for hbonds list. terminating!\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
*lists+HBONDS ) ) {
control->error_ptr->one(FLERR, "Not enough space for hbonds list.");
}
(*lists+HBONDS)->error_ptr = system->error_ptr;
}
total_bonds = 0;
@ -192,18 +194,18 @@ int Init_Lists( reax_system *system, control_params *control,
bond_cap = (int)(MAX( total_bonds*safezone, mincap*MIN_BONDS ));
if( !Make_List( system->total_cap, bond_cap, TYP_BOND,
*lists+BONDS, comm ) ) {
fprintf( stderr, "not enough space for bonds list. terminating!\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
*lists+BONDS ) ) {
control->error_ptr->one(FLERR, "Not enough space for bonds list.");
}
(*lists+BONDS)->error_ptr = system->error_ptr;
/* 3bodies list */
cap_3body = (int)(MAX( num_3body*safezone, MIN_3BODIES ));
if( !Make_List( bond_cap, cap_3body, TYP_THREE_BODY,
*lists+THREE_BODIES, comm ) ){
fprintf( stderr, "Problem in initializing angles list. Terminating!\n" );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
*lists+THREE_BODIES ) ){
control->error_ptr->one(FLERR,"Problem in initializing angles list.");
}
(*lists+THREE_BODIES)->error_ptr = system->error_ptr;
free( hb_top );
free( bond_top );
@ -217,60 +219,38 @@ void Initialize( reax_system *system, control_params *control,
mpi_datatypes *mpi_data, MPI_Comm comm )
{
char msg[MAX_STR];
char errmsg[128];
if (Init_MPI_Datatypes(system, workspace, mpi_data, comm, msg) == FAILURE) {
fprintf( stderr, "p%d: init_mpi_datatypes: could not create datatypes\n",
system->my_rank );
fprintf( stderr, "p%d: mpi_data couldn't be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
control->error_ptr->one(FLERR,"Could not create datatypes");
}
if (Init_System(system, control, msg) == FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: system could not be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
control->error_ptr->one(FLERR,"System could not be initialized");
}
if (Init_Simulation_Data( system, control, data, msg ) == FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: sim_data couldn't be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
control->error_ptr->one(FLERR,"Sim_data could not be initialized");
}
if (Init_Workspace( system, control, workspace, mpi_data->world, msg ) ==
if (Init_Workspace( system, control, workspace, msg ) ==
FAILURE) {
fprintf( stderr, "p%d:init_workspace: not enough memory\n",
system->my_rank );
fprintf( stderr, "p%d:workspace couldn't be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
control->error_ptr->one(FLERR,"Workspace could not be initialized");
}
if (Init_Lists( system, control, data, workspace, lists, mpi_data, msg ) ==
FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: system could not be initialized! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
control->error_ptr->one(FLERR,"Lists could not be initialized");
}
if (Init_Output_Files(system,control,out_control,mpi_data,msg)== FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: could not open output files! terminating...\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
control->error_ptr->one(FLERR,"Could not open output files");
}
if (control->tabulate) {
if (Init_Lookup_Tables( system, control, workspace, mpi_data, msg ) == FAILURE) {
fprintf( stderr, "p%d: %s\n", system->my_rank, msg );
fprintf( stderr, "p%d: couldn't create lookup table! terminating.\n",
system->my_rank );
MPI_Abort( mpi_data->world, CANNOT_INITIALIZE );
control->error_ptr->one(FLERR,"Lookup table could not be created");
}
}

View File

@ -29,6 +29,7 @@
#include "reaxc_types.h"
void Initialize( reax_system*, control_params*, simulation_data*, storage*,
reax_list**, output_controls*, mpi_datatypes*, MPI_Comm );
#endif

View File

@ -28,117 +28,121 @@
#include "reaxc_list.h"
#include "reaxc_tool_box.h"
#include "error.h"
/************* allocate list space ******************/
int Make_List(int n, int num_intrs, int type, reax_list *l, MPI_Comm comm)
int Make_List(int n, int num_intrs, int type, reax_list *l )
{
l->allocated = 1;
l->n = n;
l->num_intrs = num_intrs;
if (l->index) sfree(l->index, "list:index");
if (l->end_index) sfree(l->end_index, "list:end_index");
l->index = (int*) smalloc( n * sizeof(int), "list:index", comm );
l->end_index = (int*) smalloc( n * sizeof(int), "list:end_index", comm );
if (l->index) sfree(l->error_ptr, l->index, "list:index");
if (l->end_index) sfree(l->error_ptr, l->end_index, "list:end_index");
l->index = (int*) smalloc(l->error_ptr, n * sizeof(int), "list:index");
l->end_index = (int*) smalloc(l->error_ptr, n * sizeof(int), "list:end_index");
l->type = type;
switch(l->type) {
case TYP_VOID:
if (l->select.v) sfree(l->select.v, "list:v");
l->select.v = (void*) smalloc(l->num_intrs * sizeof(void*), "list:v", comm);
if (l->select.v) sfree(l->error_ptr, l->select.v, "list:v");
l->select.v = (void*) smalloc(l->error_ptr, l->num_intrs * sizeof(void*), "list:v");
break;
case TYP_THREE_BODY:
if (l->select.three_body_list) sfree(l->select.three_body_list,"list:three_bodies");
if (l->select.three_body_list) sfree(l->error_ptr, l->select.three_body_list,"list:three_bodies");
l->select.three_body_list = (three_body_interaction_data*)
smalloc( l->num_intrs * sizeof(three_body_interaction_data),
"list:three_bodies", comm );
smalloc(l->error_ptr, l->num_intrs * sizeof(three_body_interaction_data),
"list:three_bodies");
break;
case TYP_BOND:
if (l->select.bond_list) sfree(l->select.bond_list,"list:bonds");
if (l->select.bond_list) sfree(l->error_ptr, l->select.bond_list,"list:bonds");
l->select.bond_list = (bond_data*)
smalloc( l->num_intrs * sizeof(bond_data), "list:bonds", comm );
smalloc(l->error_ptr, l->num_intrs * sizeof(bond_data), "list:bonds");
break;
case TYP_DBO:
if (l->select.dbo_list) sfree(l->select.dbo_list,"list:dbonds");
if (l->select.dbo_list) sfree(l->error_ptr, l->select.dbo_list,"list:dbonds");
l->select.dbo_list = (dbond_data*)
smalloc( l->num_intrs * sizeof(dbond_data), "list:dbonds", comm );
smalloc(l->error_ptr, l->num_intrs * sizeof(dbond_data), "list:dbonds");
break;
case TYP_DDELTA:
if (l->select.dDelta_list) sfree(l->select.dDelta_list,"list:dDeltas");
if (l->select.dDelta_list) sfree(l->error_ptr, l->select.dDelta_list,"list:dDeltas");
l->select.dDelta_list = (dDelta_data*)
smalloc( l->num_intrs * sizeof(dDelta_data), "list:dDeltas", comm );
smalloc(l->error_ptr, l->num_intrs * sizeof(dDelta_data), "list:dDeltas");
break;
case TYP_FAR_NEIGHBOR:
if (l->select.far_nbr_list) sfree(l->select.far_nbr_list,"list:far_nbrs");
if (l->select.far_nbr_list) sfree(l->error_ptr, l->select.far_nbr_list,"list:far_nbrs");
l->select.far_nbr_list = (far_neighbor_data*)
smalloc(l->num_intrs * sizeof(far_neighbor_data), "list:far_nbrs", comm);
smalloc(l->error_ptr, l->num_intrs * sizeof(far_neighbor_data), "list:far_nbrs");
break;
case TYP_HBOND:
if (l->select.hbond_list) sfree(l->select.hbond_list,"list:hbonds");
if (l->select.hbond_list) sfree(l->error_ptr, l->select.hbond_list,"list:hbonds");
l->select.hbond_list = (hbond_data*)
smalloc( l->num_intrs * sizeof(hbond_data), "list:hbonds", comm );
smalloc(l->error_ptr, l->num_intrs * sizeof(hbond_data), "list:hbonds");
break;
default:
fprintf( stderr, "ERROR: no %d list type defined!\n", l->type );
MPI_Abort( comm, INVALID_INPUT );
char errmsg[128];
snprintf(errmsg, 128, "No %d list type defined", l->type);
l->error_ptr->one(FLERR,errmsg);
}
return SUCCESS;
}
void Delete_List( reax_list *l, MPI_Comm comm )
void Delete_List( reax_list *l )
{
if (l->allocated == 0)
return;
l->allocated = 0;
sfree( l->index, "list:index" );
sfree( l->end_index, "list:end_index" );
sfree(l->error_ptr, l->index, "list:index" );
sfree(l->error_ptr, l->end_index, "list:end_index" );
l->index = NULL;
l->end_index = NULL;
switch(l->type) {
case TYP_VOID:
sfree( l->select.v, "list:v" );
sfree(l->error_ptr, l->select.v, "list:v" );
l->select.v = NULL;
break;
case TYP_HBOND:
sfree( l->select.hbond_list, "list:hbonds" );
sfree(l->error_ptr, l->select.hbond_list, "list:hbonds" );
l->select.hbond_list = NULL;
break;
case TYP_FAR_NEIGHBOR:
sfree( l->select.far_nbr_list, "list:far_nbrs" );
sfree(l->error_ptr, l->select.far_nbr_list, "list:far_nbrs" );
l->select.far_nbr_list = NULL;
break;
case TYP_BOND:
sfree( l->select.bond_list, "list:bonds" );
sfree(l->error_ptr, l->select.bond_list, "list:bonds" );
l->select.bond_list = NULL;
break;
case TYP_DBO:
sfree( l->select.dbo_list, "list:dbos" );
sfree(l->error_ptr, l->select.dbo_list, "list:dbos" );
l->select.dbo_list = NULL;
break;
case TYP_DDELTA:
sfree( l->select.dDelta_list, "list:dDeltas" );
sfree(l->error_ptr, l->select.dDelta_list, "list:dDeltas" );
l->select.dDelta_list = NULL;
break;
case TYP_THREE_BODY:
sfree( l->select.three_body_list, "list:three_bodies" );
sfree(l->error_ptr, l->select.three_body_list, "list:three_bodies" );
l->select.three_body_list = NULL;
break;
default:
fprintf( stderr, "ERROR: no %d list type defined!\n", l->type );
MPI_Abort( comm, INVALID_INPUT );
char errmsg[128];
snprintf(errmsg, 128, "No %d list type defined", l->type);
l->error_ptr->all(FLERR,errmsg);
}
}

View File

@ -29,8 +29,8 @@
#include "reaxc_types.h"
int Make_List( int, int, int, reax_list*, MPI_Comm );
void Delete_List( reax_list*, MPI_Comm );
int Make_List( int, int, int, reax_list* );
void Delete_List( reax_list* );
inline int Num_Entries(int,reax_list*);
inline int Start_Index( int, reax_list* );

View File

@ -50,19 +50,18 @@ void Tridiagonal_Solve( const double *a, const double *b,
}
void Natural_Cubic_Spline( const double *h, const double *f,
cubic_spline_coef *coef, unsigned int n,
MPI_Comm comm )
void Natural_Cubic_Spline( LAMMPS_NS::Error* error_ptr, const double *h, const double *f,
cubic_spline_coef *coef, unsigned int n )
{
int i;
double *a, *b, *c, *d, *v;
/* allocate space for the linear system */
a = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
b = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
c = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
d = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
v = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
a = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
b = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
c = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
d = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
v = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
/* build the linear system */
a[0] = a[1] = a[n-1] = 0;
@ -92,28 +91,27 @@ void Natural_Cubic_Spline( const double *h, const double *f,
coef[i-1].a = f[i];
}
sfree( a, "cubic_spline:a" );
sfree( b, "cubic_spline:b" );
sfree( c, "cubic_spline:c" );
sfree( d, "cubic_spline:d" );
sfree( v, "cubic_spline:v" );
sfree(error_ptr, a, "cubic_spline:a" );
sfree(error_ptr, b, "cubic_spline:b" );
sfree(error_ptr, c, "cubic_spline:c" );
sfree(error_ptr, d, "cubic_spline:d" );
sfree(error_ptr, v, "cubic_spline:v" );
}
void Complete_Cubic_Spline( const double *h, const double *f, double v0, double vlast,
cubic_spline_coef *coef, unsigned int n,
MPI_Comm comm )
void Complete_Cubic_Spline( LAMMPS_NS::Error* error_ptr, const double *h, const double *f, double v0, double vlast,
cubic_spline_coef *coef, unsigned int n )
{
int i;
double *a, *b, *c, *d, *v;
/* allocate space for the linear system */
a = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
b = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
c = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
d = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
v = (double*) smalloc( n * sizeof(double), "cubic_spline:a", comm );
a = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
b = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
c = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
d = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
v = (double*) smalloc(error_ptr, n * sizeof(double), "cubic_spline:a");
/* build the linear system */
a[0] = 0;
@ -142,11 +140,11 @@ void Complete_Cubic_Spline( const double *h, const double *f, double v0, double
coef[i-1].a = f[i];
}
sfree( a, "cubic_spline:a" );
sfree( b, "cubic_spline:b" );
sfree( c, "cubic_spline:c" );
sfree( d, "cubic_spline:d" );
sfree( v, "cubic_spline:v" );
sfree(error_ptr, a, "cubic_spline:a" );
sfree(error_ptr, b, "cubic_spline:b" );
sfree(error_ptr, c, "cubic_spline:c" );
sfree(error_ptr, d, "cubic_spline:d" );
sfree(error_ptr, v, "cubic_spline:v" );
}
@ -159,35 +157,33 @@ int Init_Lookup_Tables( reax_system *system, control_params *control,
double dr;
double *h, *fh, *fvdw, *fele, *fCEvd, *fCEclmb;
double v0_vdw, v0_ele, vlast_vdw, vlast_ele;
MPI_Comm comm;
/* initializations */
v0_vdw = 0;
v0_ele = 0;
vlast_vdw = 0;
vlast_ele = 0;
comm = mpi_data->world;
num_atom_types = system->reax_param.num_atom_types;
dr = control->nonb_cut / control->tabulate;
h = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:h", comm );
smalloc(system->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:h");
fh = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fh", comm );
smalloc(system->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fh");
fvdw = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fvdw", comm );
smalloc(system->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fvdw");
fCEvd = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fCEvd", comm );
smalloc(system->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fCEvd");
fele = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fele", comm );
smalloc(system->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fele");
fCEclmb = (double*)
smalloc( (control->tabulate+2) * sizeof(double), "lookup:fCEclmb", comm );
smalloc(system->error_ptr, (control->tabulate+2) * sizeof(double), "lookup:fCEclmb");
LR = (LR_lookup_table**)
scalloc( num_atom_types, sizeof(LR_lookup_table*), "lookup:LR", comm );
scalloc(system->error_ptr, num_atom_types, sizeof(LR_lookup_table*), "lookup:LR");
for( i = 0; i < num_atom_types; ++i )
LR[i] = (LR_lookup_table*)
scalloc( num_atom_types, sizeof(LR_lookup_table), "lookup:LR[i]", comm );
scalloc(system->error_ptr, num_atom_types, sizeof(LR_lookup_table), "lookup:LR[i]");
for( i = 0; i < MAX_ATOM_TYPES; ++i )
existing_types[i] = 0;
@ -207,22 +203,18 @@ int Init_Lookup_Tables( reax_system *system, control_params *control,
LR[i][j].dx = dr;
LR[i][j].inv_dx = control->tabulate / control->nonb_cut;
LR[i][j].y = (LR_data*)
smalloc( LR[i][j].n * sizeof(LR_data), "lookup:LR[i,j].y", comm );
smalloc(system->error_ptr, LR[i][j].n * sizeof(LR_data), "lookup:LR[i,j].y");
LR[i][j].H = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].H" ,
comm );
smalloc(system->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].H");
LR[i][j].vdW = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].vdW",
comm);
smalloc(system->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].vdW");
LR[i][j].CEvd = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].CEvd",
comm);
smalloc(system->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].CEvd");
LR[i][j].ele = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].ele",
comm );
smalloc(system->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),"lookup:LR[i,j].ele");
LR[i][j].CEclmb = (cubic_spline_coef*)
smalloc( LR[i][j].n*sizeof(cubic_spline_coef),
"lookup:LR[i,j].CEclmb", comm );
smalloc(system->error_ptr, LR[i][j].n*sizeof(cubic_spline_coef),
"lookup:LR[i,j].CEclmb");
for( r = 1; r <= control->tabulate; ++r ) {
LR_vdW_Coulomb( system, workspace, control, i, j, r * dr, &(LR[i][j].y[r]) );
@ -246,24 +238,20 @@ int Init_Lookup_Tables( reax_system *system, control_params *control,
vlast_vdw = fCEvd[r-1];
vlast_ele = fele[r-1];
Natural_Cubic_Spline( &h[1], &fh[1],
&(LR[i][j].H[1]), control->tabulate+1, comm );
Natural_Cubic_Spline( control->error_ptr, &h[1], &fh[1],
&(LR[i][j].H[1]), control->tabulate+1);
Complete_Cubic_Spline( &h[1], &fvdw[1], v0_vdw, vlast_vdw,
&(LR[i][j].vdW[1]), control->tabulate+1,
comm );
Complete_Cubic_Spline( control->error_ptr, &h[1], &fvdw[1], v0_vdw, vlast_vdw,
&(LR[i][j].vdW[1]), control->tabulate+1);
Natural_Cubic_Spline( &h[1], &fCEvd[1],
&(LR[i][j].CEvd[1]), control->tabulate+1,
comm );
Natural_Cubic_Spline( control->error_ptr, &h[1], &fCEvd[1],
&(LR[i][j].CEvd[1]), control->tabulate+1);
Complete_Cubic_Spline( &h[1], &fele[1], v0_ele, vlast_ele,
&(LR[i][j].ele[1]), control->tabulate+1,
comm );
Complete_Cubic_Spline( control->error_ptr, &h[1], &fele[1], v0_ele, vlast_ele,
&(LR[i][j].ele[1]), control->tabulate+1);
Natural_Cubic_Spline( &h[1], &fCEclmb[1],
&(LR[i][j].CEclmb[1]), control->tabulate+1,
comm );
Natural_Cubic_Spline( control->error_ptr, &h[1], &fCEclmb[1],
&(LR[i][j].CEclmb[1]), control->tabulate+1);
} else {
LR[i][j].n = 0;
}
@ -291,14 +279,14 @@ void Deallocate_Lookup_Tables( reax_system *system )
for( i = 0; i < ntypes; ++i ) {
for( j = i; j < ntypes; ++j )
if (LR[i][j].n) {
sfree( LR[i][j].y, "LR[i,j].y" );
sfree( LR[i][j].H, "LR[i,j].H" );
sfree( LR[i][j].vdW, "LR[i,j].vdW" );
sfree( LR[i][j].CEvd, "LR[i,j].CEvd" );
sfree( LR[i][j].ele, "LR[i,j].ele" );
sfree( LR[i][j].CEclmb, "LR[i,j].CEclmb" );
sfree(system->error_ptr, LR[i][j].y, "LR[i,j].y" );
sfree(system->error_ptr, LR[i][j].H, "LR[i,j].H" );
sfree(system->error_ptr, LR[i][j].vdW, "LR[i,j].vdW" );
sfree(system->error_ptr, LR[i][j].CEvd, "LR[i,j].CEvd" );
sfree(system->error_ptr, LR[i][j].ele, "LR[i,j].ele" );
sfree(system->error_ptr, LR[i][j].CEclmb, "LR[i,j].CEclmb" );
}
sfree( LR[i], "LR[i]" );
sfree(system->error_ptr, LR[i], "LR[i]" );
}
sfree( LR, "LR" );
sfree(system->error_ptr, LR, "LR" );
}

View File

@ -32,13 +32,11 @@
void Tridiagonal_Solve( const double *a, const double *b,
double *c, double *d, double *x, unsigned int n);
void Natural_Cubic_Spline( const double *h, const double *f,
cubic_spline_coef *coef, unsigned int n,
MPI_Comm comm );
void Natural_Cubic_Spline( LAMMPS_NS::Error*, const double *h, const double *f,
cubic_spline_coef *coef, unsigned int n );
void Complete_Cubic_Spline( const double *h, const double *f, double v0, double vlast,
cubic_spline_coef *coef, unsigned int n,
MPI_Comm comm );
void Complete_Cubic_Spline( LAMMPS_NS::Error*, const double *h, const double *f, double v0, double vlast,
cubic_spline_coef *coef, unsigned int n );
int Init_Lookup_Tables( reax_system*, control_params*, storage*,
mpi_datatypes*, char* );

View File

@ -30,6 +30,9 @@
#include "reaxc_tool_box.h"
#include "reaxc_vector.h"
#include "error.h"
void Reset_Atoms( reax_system* system, control_params *control )
{
int i;
@ -120,8 +123,7 @@ void Reset_Workspace( reax_system *system, storage *workspace )
void Reset_Neighbor_Lists( reax_system *system, control_params *control,
storage *workspace, reax_list **lists,
MPI_Comm comm )
storage *workspace, reax_list **lists )
{
int i, total_bonds, Hindex, total_hbonds;
reax_list *bonds, *hbonds;
@ -142,10 +144,10 @@ void Reset_Neighbor_Lists( reax_system *system, control_params *control,
if (total_bonds >= bonds->num_intrs * DANGER_ZONE) {
workspace->realloc.bonds = 1;
if (total_bonds >= bonds->num_intrs) {
fprintf(stderr,
"p%d: not enough space for bonds! total=%d allocated=%d\n",
system->my_rank, total_bonds, bonds->num_intrs );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
char errmsg[256];
snprintf(errmsg, 256, "Not enough space for bonds! total=%d allocated=%d\n",
total_bonds, bonds->num_intrs);
control->error_ptr->one(FLERR, errmsg);
}
}
}
@ -168,10 +170,10 @@ void Reset_Neighbor_Lists( reax_system *system, control_params *control,
if (total_hbonds >= hbonds->num_intrs * 0.90/*DANGER_ZONE*/) {
workspace->realloc.hbonds = 1;
if (total_hbonds >= hbonds->num_intrs) {
fprintf(stderr,
"p%d: not enough space for hbonds! total=%d allocated=%d\n",
system->my_rank, total_hbonds, hbonds->num_intrs );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
char errmsg[256];
snprintf(errmsg, 256, "Not enough space for hbonds! total=%d allocated=%d\n",
total_hbonds, hbonds->num_intrs);
control->error_ptr->one(FLERR, errmsg);
}
}
}
@ -179,7 +181,7 @@ void Reset_Neighbor_Lists( reax_system *system, control_params *control,
void Reset( reax_system *system, control_params *control, simulation_data *data,
storage *workspace, reax_list **lists, MPI_Comm comm )
storage *workspace, reax_list **lists )
{
Reset_Atoms( system, control );
@ -187,6 +189,6 @@ void Reset( reax_system *system, control_params *control, simulation_data *data,
Reset_Workspace( system, workspace );
Reset_Neighbor_Lists( system, control, workspace, lists, comm );
Reset_Neighbor_Lists( system, control, workspace, lists );
}

View File

@ -34,7 +34,7 @@ void Reset_Simulation_Data( simulation_data*, int );
void Reset_Timing( reax_timing* );
void Reset_Workspace( reax_system*, storage* );
void Reset_Neighbor_Lists( reax_system*, control_params*, storage*,
reax_list**, MPI_Comm );
reax_list** );
void Reset( reax_system*, control_params*, simulation_data*, storage*,
reax_list**, MPI_Comm );
reax_list** );
#endif

View File

@ -27,6 +27,8 @@
#include "pair_reaxc.h"
#include "reaxc_tool_box.h"
#include "error.h"
struct timeval tim;
double t_end;
@ -53,23 +55,25 @@ int Tokenize( char* s, char*** tok )
return count;
}
/* safe malloc */
void *smalloc( rc_bigint n, const char *name, MPI_Comm comm )
void *smalloc( LAMMPS_NS::Error *error_ptr, rc_bigint n, const char *name )
{
void *ptr;
char errmsg[256];
if (n <= 0) {
fprintf( stderr, "WARNING: trying to allocate %ld bytes for array %s. ",
n, name );
fprintf( stderr, "returning NULL.\n" );
snprintf(errmsg, 256, "Trying to allocate %ld bytes for array %s. "
"returning NULL.", n, name);
error_ptr->one(FLERR,errmsg);
return NULL;
}
ptr = malloc( n );
if (ptr == NULL) {
fprintf( stderr, "ERROR: failed to allocate %ld bytes for array %s",
n, name );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
snprintf(errmsg, 256, "Failed to allocate %ld bytes for array %s", n, name);
error_ptr->one(FLERR,errmsg);
}
return ptr;
@ -77,29 +81,30 @@ void *smalloc( rc_bigint n, const char *name, MPI_Comm comm )
/* safe calloc */
void *scalloc( rc_bigint n, rc_bigint size, const char *name, MPI_Comm comm )
void *scalloc( LAMMPS_NS::Error *error_ptr, rc_bigint n, rc_bigint size, const char *name )
{
void *ptr;
char errmsg[256];
if (n <= 0) {
fprintf( stderr, "WARNING: trying to allocate %ld elements for array %s. ",
n, name );
fprintf( stderr, "returning NULL.\n" );
snprintf(errmsg, 256, "Trying to allocate %ld elements for array %s. "
"returning NULL.\n", n, name );
error_ptr->one(FLERR,errmsg);
return NULL;
}
if (size <= 0) {
fprintf( stderr, "WARNING: elements size for array %s is %ld. ",
name, size );
fprintf( stderr, "returning NULL.\n" );
snprintf(errmsg, 256, "Elements size for array %s is %ld. "
"returning NULL", name, size );
error_ptr->one(FLERR,errmsg);
return NULL;
}
ptr = calloc( n, size );
if (ptr == NULL) {
fprintf( stderr, "ERROR: failed to allocate %ld bytes for array %s",
n*size, name );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
char errmsg[256];
snprintf(errmsg, 256, "Failed to allocate %ld bytes for array %s", n*size, name);
error_ptr->one(FLERR,errmsg);
}
return ptr;
@ -107,11 +112,12 @@ void *scalloc( rc_bigint n, rc_bigint size, const char *name, MPI_Comm comm )
/* safe free */
void sfree( void *ptr, const char *name )
void sfree( LAMMPS_NS::Error* error_ptr, void *ptr, const char *name )
{
if (ptr == NULL) {
fprintf( stderr, "WARNING: trying to free the already NULL pointer %s!\n",
name );
char errmsg[256];
snprintf(errmsg, 256, "Trying to free the already NULL pointer %s", name );
error_ptr->one(FLERR,errmsg);
return;
}

View File

@ -37,7 +37,7 @@ double Get_Time( );
int Tokenize( char*, char*** );
/* from lammps */
void *smalloc( rc_bigint, const char*, MPI_Comm );
void *scalloc( rc_bigint, rc_bigint, const char*, MPI_Comm );
void sfree( void*, const char* );
void *smalloc( LAMMPS_NS::Error*, rc_bigint, const char* );
void *scalloc( LAMMPS_NS::Error*, rc_bigint, rc_bigint, const char* );
void sfree( LAMMPS_NS::Error*, void*, const char* );
#endif

View File

@ -29,8 +29,9 @@
#include "reaxc_list.h"
#include "reaxc_tool_box.h"
int Reallocate_Output_Buffer( output_controls *out_control, int req_space,
MPI_Comm comm )
#include "error.h"
int Reallocate_Output_Buffer( LAMMPS_NS::Error *error_ptr, output_controls *out_control, int req_space )
{
if (out_control->buffer_len > 0)
free( out_control->buffer );
@ -38,10 +39,9 @@ int Reallocate_Output_Buffer( output_controls *out_control, int req_space,
out_control->buffer_len = (int)(req_space*SAFE_ZONE);
out_control->buffer = (char*) malloc(out_control->buffer_len*sizeof(char));
if (out_control->buffer == NULL) {
fprintf( stderr,
"insufficient memory for required buffer size %d. terminating!\n",
(int) (req_space*SAFE_ZONE) );
MPI_Abort( comm, INSUFFICIENT_MEMORY );
char errmsg[256];
snprintf(errmsg, 256, "Insufficient memory for required buffer size %d", (int) (req_space*SAFE_ZONE));
error_ptr->one(FLERR,errmsg);
}
return SUCCESS;
@ -83,7 +83,7 @@ int Write_Header( reax_system *system, control_params *control,
my_hdr_lines = num_hdr_lines * ( system->my_rank == MASTER_NODE );
buffer_req = my_hdr_lines * HEADER_LINE_LEN;
if (buffer_req > out_control->buffer_len * DANGER_ZONE)
Reallocate_Output_Buffer( out_control, buffer_req, mpi_data->world );
Reallocate_Output_Buffer( control->error_ptr, out_control, buffer_req );
/* only the master node writes into trajectory header */
if (system->my_rank == MASTER_NODE) {
@ -278,7 +278,7 @@ int Write_Init_Desc( reax_system *system, control_params * /*control*/,
else buffer_req = system->n * INIT_DESC_LEN + 1;
if (buffer_req > out_control->buffer_len * DANGER_ZONE)
Reallocate_Output_Buffer( out_control, buffer_req, mpi_data->world );
Reallocate_Output_Buffer( system->error_ptr, out_control, buffer_req );
out_control->line[0] = 0;
out_control->buffer[0] = 0;
@ -367,7 +367,7 @@ int Write_Frame_Header( reax_system *system, control_params *control,
my_frm_hdr_lines = num_frm_hdr_lines * ( me == MASTER_NODE );
buffer_req = my_frm_hdr_lines * HEADER_LINE_LEN;
if (buffer_req > out_control->buffer_len * DANGER_ZONE)
Reallocate_Output_Buffer( out_control, buffer_req, mpi_data->world );
Reallocate_Output_Buffer( control->error_ptr, out_control, buffer_req );
/* only the master node writes into trajectory header */
if (me == MASTER_NODE) {
@ -500,7 +500,7 @@ int Write_Atoms( reax_system *system, control_params * /*control*/,
else buffer_req = system->n * line_len + 1;
if (buffer_req > out_control->buffer_len * DANGER_ZONE)
Reallocate_Output_Buffer( out_control, buffer_req, mpi_data->world );
Reallocate_Output_Buffer( system->error_ptr, out_control, buffer_req );
/* fill in buffer */
out_control->line[0] = 0;
@ -531,9 +531,7 @@ int Write_Atoms( reax_system *system, control_params * /*control*/,
p_atom->f[0], p_atom->f[1], p_atom->f[2], p_atom->q );
break;
default:
fprintf( stderr,
"write_traj_atoms: unknown atom trajectroy format!\n");
MPI_Abort( mpi_data->world, UNKNOWN_OPTION );
system->error_ptr->one(FLERR,"Write_traj_atoms: unknown atom trajectory format");
}
strncpy( out_control->buffer + i*line_len, out_control->line, line_len+1 );
@ -592,7 +590,7 @@ int Write_Bonds(reax_system *system, control_params *control, reax_list *bonds,
else buffer_req = my_bonds * line_len + 1;
if (buffer_req > out_control->buffer_len * DANGER_ZONE)
Reallocate_Output_Buffer( out_control, buffer_req, mpi_data->world );
Reallocate_Output_Buffer( system->error_ptr, out_control, buffer_req );
/* fill in the buffer */
out_control->line[0] = 0;
@ -619,8 +617,7 @@ int Write_Bonds(reax_system *system, control_params *control, reax_list *bonds,
bo_ij->bo_data.BO_pi, bo_ij->bo_data.BO_pi2 );
break;
default:
fprintf(stderr, "write_traj_bonds: FATAL! invalid bond_info option");
MPI_Abort( mpi_data->world, UNKNOWN_OPTION );
system->error_ptr->one(FLERR, "Write_traj_bonds: FATAL! invalid bond_info option");
}
strncpy( out_control->buffer + my_bonds*line_len,
out_control->line, line_len+1 );
@ -693,7 +690,7 @@ int Write_Angles( reax_system *system, control_params *control,
else buffer_req = my_angles * line_len + 1;
if (buffer_req > out_control->buffer_len * DANGER_ZONE)
Reallocate_Output_Buffer( out_control, buffer_req, mpi_data->world );
Reallocate_Output_Buffer( system->error_ptr, out_control, buffer_req );
/* fill in the buffer */
my_angles = 0;

View File

@ -39,6 +39,9 @@
#include <sys/time.h>
#include "accelerator_kokkos.h"
namespace LAMMPS_NS { class Error;}
#if defined LMP_USER_OMP
#define OMP_TIMING 0
@ -392,8 +395,6 @@ typedef struct
double ghost_cutoff;
} boundary_cutoff;
using LAMMPS_NS::Pair;
struct _reax_system
{
reax_interaction reax_param;
@ -411,7 +412,8 @@ struct _reax_system
boundary_cutoff bndry_cuts;
reax_atom *my_atoms;
class Pair *pair_ptr;
class LAMMPS_NS::Error *error_ptr;
class LAMMPS_NS::Pair *pair_ptr;
int my_bonds;
int mincap;
double safezone, saferzone;
@ -488,6 +490,8 @@ typedef struct
int lgflag;
int enobondsflag;
class LAMMPS_NS::Error *error_ptr;
int me;
} control_params;
@ -774,6 +778,7 @@ struct _reax_list
int type;
list_type select;
class LAMMPS_NS::Error *error_ptr;
};
typedef _reax_list reax_list;

View File

@ -30,6 +30,8 @@
#include "reaxc_list.h"
#include "reaxc_vector.h"
#include "error.h"
static double Dot( double* v1, double* v2, int k )
{
double ret = 0.0;
@ -405,9 +407,10 @@ void Valence_Angles( reax_system *system, control_params *control,
if (num_thb_intrs >= thb_intrs->num_intrs * DANGER_ZONE) {
workspace->realloc.num_3body = num_thb_intrs;
if (num_thb_intrs > thb_intrs->num_intrs) {
fprintf( stderr, "step%d-ran out of space on angle_list: top=%d, max=%d",
data->step, num_thb_intrs, thb_intrs->num_intrs );
MPI_Abort( MPI_COMM_WORLD, INSUFFICIENT_MEMORY );
char errmsg[128];
snprintf(errmsg, 128, "step%d-ran out of space on angle_list: top=%d, max=%d",
data->step, num_thb_intrs, thb_intrs->num_intrs);
control->error_ptr->one(FLERR, errmsg);
}
}

View File

@ -22,10 +22,10 @@
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef SMD_MATERIAL_MODELS_H_
#define SMD_MATERIAL_MODELS_H_
#ifndef SMD_MATERIAL_MODELS_H
#define SMD_MATERIAL_MODELS_H
using namespace Eigen;
#include <Eigen/Eigen>
/*
* EOS models
@ -42,22 +42,22 @@ void PerfectGasEOS(const double gamma, const double vol, const double mass, cons
/*
* Material strength models
*/
void LinearStrength(const double mu, const Matrix3d sigmaInitial_dev, const Matrix3d d_dev, const double dt,
Matrix3d &sigmaFinal_dev__, Matrix3d &sigma_dev_rate__);
void LinearPlasticStrength(const double G, const double yieldStress, const Matrix3d sigmaInitial_dev, const Matrix3d d_dev,
const double dt, Matrix3d &sigmaFinal_dev__, Matrix3d &sigma_dev_rate__, double &plastic_strain_increment);
void LinearStrength(const double mu, const Eigen::Matrix3d sigmaInitial_dev, const Eigen::Matrix3d d_dev, const double dt,
Eigen::Matrix3d &sigmaFinal_dev__, Eigen::Matrix3d &sigma_dev_rate__);
void LinearPlasticStrength(const double G, const double yieldStress, const Eigen::Matrix3d sigmaInitial_dev, const Eigen::Matrix3d d_dev,
const double dt, Eigen::Matrix3d &sigmaFinal_dev__, Eigen::Matrix3d &sigma_dev_rate__, double &plastic_strain_increment);
void JohnsonCookStrength(const double G, const double cp, const double espec, const double A, const double B, const double a,
const double C, const double epdot0, const double T0, const double Tmelt, const double M, const double dt, const double ep,
const double epdot, const Matrix3d sigmaInitial_dev, const Matrix3d d_dev, Matrix3d &sigmaFinal_dev__,
Matrix3d &sigma_dev_rate__, double &plastic_strain_increment);
const double epdot, const Eigen::Matrix3d sigmaInitial_dev, const Eigen::Matrix3d d_dev, Eigen::Matrix3d &sigmaFinal_dev__,
Eigen::Matrix3d &sigma_dev_rate__, double &plastic_strain_increment);
/*
* Damage models
*/
bool IsotropicMaxStrainDamage(const Matrix3d E, const double maxStrain);
bool IsotropicMaxStressDamage(const Matrix3d E, const double maxStrain);
double JohnsonCookFailureStrain(const double p, const Matrix3d Sdev, const double d1, const double d2, const double d3,
bool IsotropicMaxStrainDamage(const Eigen::Matrix3d E, const double maxStrain);
bool IsotropicMaxStressDamage(const Eigen::Matrix3d E, const double maxStrain);
double JohnsonCookFailureStrain(const double p, const Eigen::Matrix3d Sdev, const double d1, const double d2, const double d3,
const double d4, const double epdot0, const double epdot);

View File

@ -9,14 +9,11 @@
*
* ----------------------------------------------------------------------- */
//test
#ifndef SMD_MATH_H_
#define SMD_MATH_H_
#ifndef SMD_MATH_H
#define SMD_MATH_H
#include <Eigen/Eigen>
#include <iostream>
using namespace Eigen;
using namespace std;
namespace SMD_Math {
static inline void LimitDoubleMagnitude(double &x, const double limit) {
@ -31,8 +28,8 @@ static inline void LimitDoubleMagnitude(double &x, const double limit) {
/*
* deviator of a tensor
*/
static inline Matrix3d Deviator(const Matrix3d M) {
Matrix3d eye;
static inline Eigen::Matrix3d Deviator(const Eigen::Matrix3d M) {
Eigen::Matrix3d eye;
eye.setIdentity();
eye *= M.trace() / 3.0;
return M - eye;
@ -53,14 +50,14 @@ static inline Matrix3d Deviator(const Matrix3d M) {
* obtained again from an SVD. The rotation should proper now, i.e., det(R) = +1.
*/
static inline bool PolDec(Matrix3d M, Matrix3d &R, Matrix3d &T, bool scaleF) {
static inline bool PolDec(Eigen::Matrix3d M, Eigen::Matrix3d &R, Eigen::Matrix3d &T, bool scaleF) {
JacobiSVD<Matrix3d> svd(M, ComputeFullU | ComputeFullV); // SVD(A) = U S V*
Vector3d S_eigenvalues = svd.singularValues();
Matrix3d S = svd.singularValues().asDiagonal();
Matrix3d U = svd.matrixU();
Matrix3d V = svd.matrixV();
Matrix3d eye;
Eigen::JacobiSVD<Eigen::Matrix3d> svd(M, Eigen::ComputeFullU | Eigen::ComputeFullV); // SVD(A) = U S V*
Eigen::Vector3d S_eigenvalues = svd.singularValues();
Eigen::Matrix3d S = svd.singularValues().asDiagonal();
Eigen::Matrix3d U = svd.matrixU();
Eigen::Matrix3d V = svd.matrixV();
Eigen::Matrix3d eye;
eye.setIdentity();
// now do polar decomposition into M = R * T, where R is rotation
@ -105,16 +102,12 @@ static inline bool PolDec(Matrix3d M, Matrix3d &R, Matrix3d &T, bool scaleF) {
* Pseudo-inverse via SVD
*/
static inline void pseudo_inverse_SVD(Matrix3d &M) {
static inline void pseudo_inverse_SVD(Eigen::Matrix3d &M) {
//JacobiSVD < Matrix3d > svd(M, ComputeFullU | ComputeFullV);
JacobiSVD<Matrix3d> svd(M, ComputeFullU); // one Eigevector base is sufficient because matrix is square and symmetric
Eigen::JacobiSVD<Eigen::Matrix3d> svd(M, Eigen::ComputeFullU); // one Eigevector base is sufficient because matrix is square and symmetric
Vector3d singularValuesInv;
Vector3d singularValues = svd.singularValues();
//cout << "Here is the matrix V:" << endl << V * singularValues.asDiagonal() * U << endl;
//cout << "Its singular values are:" << endl << singularValues << endl;
Eigen::Vector3d singularValuesInv;
Eigen::Vector3d singularValues = svd.singularValues();
double pinvtoler = 1.0e-16; // 2d machining example goes unstable if this value is increased (1.0e-16).
for (int row = 0; row < 3; row++) {
@ -126,39 +119,19 @@ static inline void pseudo_inverse_SVD(Matrix3d &M) {
}
M = svd.matrixU() * singularValuesInv.asDiagonal() * svd.matrixU().transpose();
// JacobiSVD < Matrix3d > svd(M, ComputeFullU | ComputeFullV);
//
// Vector3d singularValuesInv;
// Vector3d singularValues = svd.singularValues();
//
// //cout << "Here is the matrix V:" << endl << V * singularValues.asDiagonal() * U << endl;
// //cout << "Its singular values are:" << endl << singularValues << endl;
//
// double pinvtoler = 1.0e-16; // 2d machining example goes unstable if this value is increased (1.0e-16).
// for (int row = 0; row < 3; row++) {
// if (singularValues(row) > pinvtoler) {
// singularValuesInv(row) = 1.0 / singularValues(row);
// } else {
// singularValuesInv(row) = 0.0;
// }
// }
//
// M = svd.matrixU() * singularValuesInv.asDiagonal() * svd.matrixV().transpose();
}
/*
* test if two matrices are equal
*/
static inline double TestMatricesEqual(Matrix3d A, Matrix3d B, double eps) {
Matrix3d diff;
static inline double TestMatricesEqual(Eigen::Matrix3d A, Eigen::Matrix3d B, double eps) {
Eigen::Matrix3d diff;
diff = A - B;
double norm = diff.norm();
if (norm > eps) {
printf("Matrices A and B are not equal! The L2-norm difference is: %g\n", norm);
cout << "Here is matrix A:" << endl << A << endl;
cout << "Here is matrix B:" << endl << B << endl;
std::cout << "Matrices A and B are not equal! The L2-norm difference is: " << norm << "\n"
<< "Here is matrix A:\n" << A << "\n"
<< "Here is matrix B:\n" << B << std::endl;
}
return norm;
}
@ -167,12 +140,12 @@ static inline double TestMatricesEqual(Matrix3d A, Matrix3d B, double eps) {
Limit eigenvalues of a matrix to upper and lower bounds.
------------------------------------------------------------------------- */
static inline Matrix3d LimitEigenvalues(Matrix3d S, double limitEigenvalue) {
static inline Eigen::Matrix3d LimitEigenvalues(Eigen::Matrix3d S, double limitEigenvalue) {
/*
* compute Eigenvalues of matrix S
*/
SelfAdjointEigenSolver < Matrix3d > es;
Eigen::SelfAdjointEigenSolver < Eigen::Matrix3d > es;
es.compute(S);
double max_eigenvalue = es.eigenvalues().maxCoeff();
@ -183,17 +156,17 @@ static inline Matrix3d LimitEigenvalues(Matrix3d S, double limitEigenvalue) {
if ((amax_eigenvalue > limitEigenvalue) || (amin_eigenvalue > limitEigenvalue)) {
if (amax_eigenvalue > amin_eigenvalue) { // need to scale with max_eigenvalue
double scale = amax_eigenvalue / limitEigenvalue;
Matrix3d V = es.eigenvectors();
Matrix3d S_diag = V.inverse() * S * V; // diagonalized input matrix
Eigen::Matrix3d V = es.eigenvectors();
Eigen::Matrix3d S_diag = V.inverse() * S * V; // diagonalized input matrix
S_diag /= scale;
Matrix3d S_scaled = V * S_diag * V.inverse(); // undiagonalize matrix
Eigen::Matrix3d S_scaled = V * S_diag * V.inverse(); // undiagonalize matrix
return S_scaled;
} else { // need to scale using min_eigenvalue
double scale = amin_eigenvalue / limitEigenvalue;
Matrix3d V = es.eigenvectors();
Matrix3d S_diag = V.inverse() * S * V; // diagonalized input matrix
Eigen::Matrix3d V = es.eigenvectors();
Eigen::Matrix3d S_diag = V.inverse() * S * V; // diagonalized input matrix
S_diag /= scale;
Matrix3d S_scaled = V * S_diag * V.inverse(); // undiagonalize matrix
Eigen::Matrix3d S_scaled = V * S_diag * V.inverse(); // undiagonalize matrix
return S_scaled;
}
} else { // limiting does not apply
@ -201,17 +174,17 @@ static inline Matrix3d LimitEigenvalues(Matrix3d S, double limitEigenvalue) {
}
}
static inline bool LimitMinMaxEigenvalues(Matrix3d &S, double min, double max) {
static inline bool LimitMinMaxEigenvalues(Eigen::Matrix3d &S, double min, double max) {
/*
* compute Eigenvalues of matrix S
*/
SelfAdjointEigenSolver < Matrix3d > es;
Eigen::SelfAdjointEigenSolver < Eigen::Matrix3d > es;
es.compute(S);
if ((es.eigenvalues().maxCoeff() > max) || (es.eigenvalues().minCoeff() < min)) {
Matrix3d S_diag = es.eigenvalues().asDiagonal();
Matrix3d V = es.eigenvectors();
Eigen::Matrix3d S_diag = es.eigenvalues().asDiagonal();
Eigen::Matrix3d V = es.eigenvectors();
for (int i = 0; i < 3; i++) {
if (S_diag(i, i) < min) {
//printf("limiting eigenvalue %f --> %f\n", S_diag(i, i), min);
@ -229,10 +202,10 @@ static inline bool LimitMinMaxEigenvalues(Matrix3d &S, double min, double max) {
}
}
static inline void reconstruct_rank_deficient_shape_matrix(Matrix3d &K) {
static inline void reconstruct_rank_deficient_shape_matrix(Eigen::Matrix3d &K) {
JacobiSVD<Matrix3d> svd(K, ComputeFullU | ComputeFullV);
Vector3d singularValues = svd.singularValues();
Eigen::JacobiSVD<Eigen::Matrix3d> svd(K, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Vector3d singularValues = svd.singularValues();
for (int i = 0; i < 3; i++) {
if (singularValues(i) < 1.0e-8) {

View File

@ -23,6 +23,16 @@
# endif
#endif
// declaration to indicate intended fallthrough cases in switch statements
// and thus silence the warnings produced by g++ -Wextra
#if defined(__GNUC__)
#define _fallthrough __attribute__ ((fallthrough))
#else
#define _fallthrough
#endif
#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
/*
@ -291,17 +301,17 @@ uint32_t hashlittle( const void *key, size_t length, uint32_t initval)
/*-------------------------------- last block: affect all 32 bits of (c) */
switch(length) /* all the case statements fall through */
{
case 12: c+=((uint32_t)k[11])<<24;
case 11: c+=((uint32_t)k[10])<<16;
case 10: c+=((uint32_t)k[9])<<8;
case 9 : c+=k[8];
case 8 : b+=((uint32_t)k[7])<<24;
case 7 : b+=((uint32_t)k[6])<<16;
case 6 : b+=((uint32_t)k[5])<<8;
case 5 : b+=k[4];
case 4 : a+=((uint32_t)k[3])<<24;
case 3 : a+=((uint32_t)k[2])<<16;
case 2 : a+=((uint32_t)k[1])<<8;
case 12: c+=((uint32_t)k[11])<<24; _fallthrough;
case 11: c+=((uint32_t)k[10])<<16; _fallthrough;
case 10: c+=((uint32_t)k[9])<<8; _fallthrough;
case 9 : c+=k[8]; _fallthrough;
case 8 : b+=((uint32_t)k[7])<<24; _fallthrough;
case 7 : b+=((uint32_t)k[6])<<16; _fallthrough;
case 6 : b+=((uint32_t)k[5])<<8; _fallthrough;
case 5 : b+=k[4]; _fallthrough;
case 4 : a+=((uint32_t)k[3])<<24; _fallthrough;
case 3 : a+=((uint32_t)k[2])<<16; _fallthrough;
case 2 : a+=((uint32_t)k[1])<<8; _fallthrough;
case 1 : a+=k[0];
break;
case 0 : return c;

View File

@ -921,7 +921,7 @@ void Irregular::exchange_data(char *sendbuf, int nbytes, char *recvbuf)
// post all receives, starting after self copies
bigint offset = num_self*nbytes;
bigint offset = num_self*(bigint)nbytes;
for (int irecv = 0; irecv < nrecv_proc; irecv++) {
MPI_Irecv(&recvbuf[offset],num_recv[irecv]*nbytes,MPI_CHAR,
proc_recv[irecv],0,world,&request[irecv]);
@ -964,13 +964,6 @@ void Irregular::exchange_data(char *sendbuf, int nbytes, char *recvbuf)
// wait on all incoming messages
if (nrecv_proc) MPI_Waitall(nrecv_proc,request,status);
// approximate memory tally
// DEBUG lines
//bigint irregular_bytes = 2*nprocs*sizeof(int);
//irregular_bytes += maxindex*sizeof(int);
//irregular_bytes += maxbuf;
}
/* ----------------------------------------------------------------------

View File

@ -801,6 +801,17 @@ void lammps_reset_box(void *ptr, double *boxlo, double *boxhi,
Allreduce to sum vector into data across all procs
------------------------------------------------------------------------- */
#if defined(LAMMPS_BIGBIG)
void lammps_gather_atoms(void *ptr, char * /*name */,
int /*type*/, int /*count*/, void * /*data*/)
{
LAMMPS *lmp = (LAMMPS *) ptr;
BEGIN_CAPTURE
lmp->error->all(FLERR,"Library function lammps_gather_atoms() not compatible with -DLAMMPS_BIGBIG");
END_CAPTURE
}
#else
void lammps_gather_atoms(void *ptr, char *name,
int type, int count, void *data)
{
@ -905,6 +916,7 @@ void lammps_gather_atoms(void *ptr, char *name,
}
END_CAPTURE
}
#endif
/* ----------------------------------------------------------------------
gather the named atom-based entity for all atoms
@ -927,6 +939,17 @@ void lammps_gather_atoms(void *ptr, char *name,
Allgather Nlocal atoms from each proc into data
------------------------------------------------------------------------- */
#if defined(LAMMPS_BIGBIG)
void lammps_gather_atoms_concat(void *ptr, char * /*name */,
int /*type*/, int /*count*/, void * /*data*/)
{
LAMMPS *lmp = (LAMMPS *) ptr;
BEGIN_CAPTURE
lmp->error->all(FLERR,"Library function lammps_gather_atoms_concat() not compatible with -DLAMMPS_BIGBIG");
END_CAPTURE
}
#else
void lammps_gather_atoms_concat(void *ptr, char *name,
int type, int count, void *data)
{
@ -1047,6 +1070,7 @@ void lammps_gather_atoms_concat(void *ptr, char *name,
}
END_CAPTURE
}
#endif
/* ----------------------------------------------------------------------
gather the named atom-based entity for a subset of atoms
@ -1071,6 +1095,18 @@ void lammps_gather_atoms_concat(void *ptr, char *name,
Allreduce to sum vector into data across all procs
------------------------------------------------------------------------- */
#if defined(LAMMPS_BIGBIG)
void lammps_gather_atoms_subset(void *ptr, char * /*name */,
int /*type*/, int /*count*/,
int /*ndata*/, int * /*ids*/, void * /*data*/)
{
LAMMPS *lmp = (LAMMPS *) ptr;
BEGIN_CAPTURE
lmp->error->all(FLERR,"Library function lammps_gather_atoms_subset() not compatible with -DLAMMPS_BIGBIG");
END_CAPTURE
}
#else
void lammps_gather_atoms_subset(void *ptr, char *name,
int type, int count,
int ndata, int *ids, void *data)
@ -1188,6 +1224,7 @@ void lammps_gather_atoms_subset(void *ptr, char *name,
}
END_CAPTURE
}
#endif
/* ----------------------------------------------------------------------
scatter the named atom-based entity in data to all atoms
@ -1205,6 +1242,17 @@ void lammps_gather_atoms_subset(void *ptr, char *name,
loop over Natoms, if I own atom ID, set its values from data
------------------------------------------------------------------------- */
#if defined(LAMMPS_BIGBIG)
void lammps_scatter_atoms(void *ptr, char * /*name */,
int /*type*/, int /*count*/, void * /*data*/)
{
LAMMPS *lmp = (LAMMPS *) ptr;
BEGIN_CAPTURE
lmp->error->all(FLERR,"Library function lammps_scatter_atoms() not compatible with -DLAMMPS_BIGBIG");
END_CAPTURE
}
#else
void lammps_scatter_atoms(void *ptr, char *name,
int type, int count, void *data)
{
@ -1299,6 +1347,7 @@ void lammps_scatter_atoms(void *ptr, char *name,
}
END_CAPTURE
}
#endif
/* ----------------------------------------------------------------------
scatter the named atom-based entity in data to a subset of atoms
@ -1318,6 +1367,18 @@ void lammps_scatter_atoms(void *ptr, char *name,
loop over Ndata, if I own atom ID, set its values from data
------------------------------------------------------------------------- */
#if defined(LAMMPS_BIGBIG)
void lammps_scatter_atoms_subset(void *ptr, char * /*name */,
int /*type*/, int /*count*/,
int /*ndata*/, int * /*ids*/, void * /*data*/)
{
LAMMPS *lmp = (LAMMPS *) ptr;
BEGIN_CAPTURE
lmp->error->all(FLERR,"Library function lammps_scatter_atoms_subset() not compatible with -DLAMMPS_BIGBIG");
END_CAPTURE
}
#else
void lammps_scatter_atoms_subset(void *ptr, char *name,
int type, int count,
int ndata, int *ids, void *data)
@ -1420,6 +1481,7 @@ void lammps_scatter_atoms_subset(void *ptr, char *name,
}
END_CAPTURE
}
#endif
/* ----------------------------------------------------------------------
create N atoms and assign them to procs based on coords

View File

@ -24,14 +24,13 @@
inline double pow(int i, int j){
return pow((double)i,j);
}
#endif
inline double sqrt(int i){
return sqrt((double) i);
}
inline double fabs(int i){
return fabs((double) i);
}
inline double sqrt(int i){
return sqrt((double) i);
}
#endif
inline double trunc(double x) {
return x > 0 ? floor(x) : ceil(x);

View File

@ -26,7 +26,7 @@
#endif
#endif
#if defined(LMP_USER_INTEL) && !defined(LAMMPS_MEMALIGN)
#if defined(LMP_USER_INTEL) && !defined(LAMMPS_MEMALIGN) && !defined(_WIN32)
#define LAMMPS_MEMALIGN 64
#endif

View File

@ -48,7 +48,7 @@ methods:
#ifndef LAMMPS_MY_PAGE_H
#define LAMMPS_MY_PAGE_H
#if defined(LMP_USER_INTEL) && !defined(LAMMPS_MEMALIGN)
#if defined(LMP_USER_INTEL) && !defined(LAMMPS_MEMALIGN) && !defined(_WIN32)
#define LAMMPS_MEMALIGN 64
#endif

View File

@ -821,21 +821,26 @@ void Special::angle_trim()
int nsend = 0;
for (i = 0; i < nlocal; i++) {
for (j = 0; j < num_angle[i]; j++) {
if (tag[i] != angle_atom2[i][j]) continue;
m = atom->map(angle_atom1[i][j]);
if (m < 0 || m >= nlocal) nsend++;
m = atom->map(angle_atom3[i][j]);
if (m < 0 || m >= nlocal) nsend++;
if (num_angle) {
for (j = 0; j < num_angle[i]; j++) {
if (tag[i] != angle_atom2[i][j]) continue;
m = atom->map(angle_atom1[i][j]);
if (m < 0 || m >= nlocal) nsend++;
m = atom->map(angle_atom3[i][j]);
if (m < 0 || m >= nlocal) nsend++;
}
}
for (j = 0; j < num_dihedral[i]; j++) {
if (tag[i] != dihedral_atom2[i][j]) continue;
m = atom->map(dihedral_atom1[i][j]);
if (m < 0 || m >= nlocal) nsend++;
m = atom->map(dihedral_atom3[i][j]);
if (m < 0 || m >= nlocal) nsend++;
m = atom->map(dihedral_atom4[i][j]);
if (m < 0 || m >= nlocal) nsend++;
if (num_dihedral) {
for (j = 0; j < num_dihedral[i]; j++) {
if (tag[i] != dihedral_atom2[i][j]) continue;
m = atom->map(dihedral_atom1[i][j]);
if (m < 0 || m >= nlocal) nsend++;
m = atom->map(dihedral_atom3[i][j]);
if (m < 0 || m >= nlocal) nsend++;
m = atom->map(dihedral_atom4[i][j]);
if (m < 0 || m >= nlocal) nsend++;
}
}
}
@ -852,51 +857,55 @@ void Special::angle_trim()
nsend = 0;
for (i = 0; i < nlocal; i++) {
for (j = 0; j < num_angle[i]; j++) {
if (tag[i] != angle_atom2[i][j]) continue;
if (num_angle) {
for (j = 0; j < num_angle[i]; j++) {
if (tag[i] != angle_atom2[i][j]) continue;
m = atom->map(angle_atom1[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = angle_atom1[i][j] % nprocs;
inbuf[nsend].atomID = angle_atom1[i][j];
inbuf[nsend].partnerID = angle_atom3[i][j];
nsend++;
}
m = atom->map(angle_atom1[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = angle_atom1[i][j] % nprocs;
inbuf[nsend].atomID = angle_atom1[i][j];
inbuf[nsend].partnerID = angle_atom3[i][j];
nsend++;
}
m = atom->map(angle_atom3[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = angle_atom3[i][j] % nprocs;
inbuf[nsend].atomID = angle_atom3[i][j];
inbuf[nsend].partnerID = angle_atom1[i][j];
nsend++;
m = atom->map(angle_atom3[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = angle_atom3[i][j] % nprocs;
inbuf[nsend].atomID = angle_atom3[i][j];
inbuf[nsend].partnerID = angle_atom1[i][j];
nsend++;
}
}
}
for (j = 0; j < num_dihedral[i]; j++) {
if (tag[i] != dihedral_atom2[i][j]) continue;
if (num_dihedral) {
for (j = 0; j < num_dihedral[i]; j++) {
if (tag[i] != dihedral_atom2[i][j]) continue;
m = atom->map(dihedral_atom1[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = dihedral_atom1[i][j] % nprocs;
inbuf[nsend].atomID = dihedral_atom1[i][j];
inbuf[nsend].partnerID = dihedral_atom3[i][j];
nsend++;
}
m = atom->map(dihedral_atom1[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = dihedral_atom1[i][j] % nprocs;
inbuf[nsend].atomID = dihedral_atom1[i][j];
inbuf[nsend].partnerID = dihedral_atom3[i][j];
nsend++;
}
m = atom->map(dihedral_atom3[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = dihedral_atom3[i][j] % nprocs;
inbuf[nsend].atomID = dihedral_atom3[i][j];
inbuf[nsend].partnerID = dihedral_atom1[i][j];
nsend++;
}
m = atom->map(dihedral_atom3[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = dihedral_atom3[i][j] % nprocs;
inbuf[nsend].atomID = dihedral_atom3[i][j];
inbuf[nsend].partnerID = dihedral_atom1[i][j];
nsend++;
}
m = atom->map(dihedral_atom4[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = dihedral_atom4[i][j] % nprocs;
inbuf[nsend].atomID = dihedral_atom4[i][j];
inbuf[nsend].partnerID = dihedral_atom2[i][j];
nsend++;
m = atom->map(dihedral_atom4[i][j]);
if (m < 0 || m >= nlocal) {
proclist[nsend] = dihedral_atom4[i][j] % nprocs;
inbuf[nsend].atomID = dihedral_atom4[i][j];
inbuf[nsend].partnerID = dihedral_atom2[i][j];
nsend++;
}
}
}
}
@ -932,56 +941,60 @@ void Special::angle_trim()
// output datums = pairs of atoms that are 1-3 neighbors
for (i = 0; i < nlocal; i++) {
for (j = 0; j < num_angle[i]; j++) {
if (tag[i] != angle_atom2[i][j]) continue;
if (num_angle) {
for (j = 0; j < num_angle[i]; j++) {
if (tag[i] != angle_atom2[i][j]) continue;
m = atom->map(angle_atom1[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == angle_atom3[i][j]) {
flag[m][k] = 1;
break;
}
}
m = atom->map(angle_atom1[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == angle_atom3[i][j]) {
flag[m][k] = 1;
break;
}
}
m = atom->map(angle_atom3[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == angle_atom1[i][j]) {
flag[m][k] = 1;
break;
}
m = atom->map(angle_atom3[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == angle_atom1[i][j]) {
flag[m][k] = 1;
break;
}
}
}
}
for (j = 0; j < num_dihedral[i]; j++) {
if (tag[i] != dihedral_atom2[i][j]) continue;
if (num_dihedral) {
for (j = 0; j < num_dihedral[i]; j++) {
if (tag[i] != dihedral_atom2[i][j]) continue;
m = atom->map(dihedral_atom1[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == dihedral_atom3[i][j]) {
flag[m][k] = 1;
break;
}
}
m = atom->map(dihedral_atom1[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == dihedral_atom3[i][j]) {
flag[m][k] = 1;
break;
}
}
m = atom->map(dihedral_atom3[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == dihedral_atom1[i][j]) {
flag[m][k] = 1;
break;
}
}
m = atom->map(dihedral_atom3[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == dihedral_atom1[i][j]) {
flag[m][k] = 1;
break;
}
}
m = atom->map(dihedral_atom4[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == dihedral_atom2[i][j]) {
flag[m][k] = 1;
break;
}
m = atom->map(dihedral_atom4[i][j]);
if (m >= 0 && m < nlocal) {
for (k = 0; k < nspecial[m][1]; k++)
if (onethree[m][k] == dihedral_atom2[i][j]) {
flag[m][k] = 1;
break;
}
}
}
}
}
@ -1012,7 +1025,7 @@ void Special::angle_trim()
memory->destroy(flag);
// if no angles or dihedrals are defined, delete all 1-3 neighs
// if no angles or dihedrals are defined, delete all 1-3 neighs
} else {
for (i = 0; i < nlocal; i++) nspecial[i][1] = 0;
@ -1225,7 +1238,7 @@ void Special::dihedral_trim()
------------------------------------------------------------------------- */
int Special::rendezvous_ids(int n, char *inbuf,
int &flag, int *&proclist, char *&outbuf,
int &flag, int *& /*proclist*/, char *& /*outbuf*/,
void *ptr)
{
Special *sptr = (Special *) ptr;