Updating to master

This commit is contained in:
Joel Clemmer
2021-02-05 17:01:32 -07:00
1424 changed files with 53674 additions and 56307 deletions

47
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,47 @@
# GitHub action to run static code analysis on C++ and Python code
name: "CodeQL Code Analysis"
on:
push:
branches: [master]
jobs:
analyze:
name: Analyze
if: ${{ github.repository == 'lammps/lammps' }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: ['cpp', 'python']
steps:
- name: Checkout repository
uses: actions/checkout@v2
with:
fetch-depth: 2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
- name: Create Build Environment
run: cmake -E make_directory ${{github.workspace}}/build
- name: Building LAMMPS via CMake
if: ${{ matrix.language == 'cpp' }}
shell: bash
working-directory: ${{github.workspace}}/build
run: |
cmake -C $GITHUB_WORKSPACE/cmake/presets/most.cmake $GITHUB_WORKSPACE/cmake
cmake --build . --parallel 2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

34
.github/workflows/unittest-macos.yml vendored Normal file
View File

@ -0,0 +1,34 @@
# GitHub action to build LAMMPS on MacOS and run unit tests
name: "Unittest for MacOS"
on:
push:
branches: [master]
jobs:
build:
name: MacOS Unit Test
if: ${{ github.repository == 'lammps/lammps' }}
runs-on: macos-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
with:
fetch-depth: 2
- name: Create Build Environment
run: cmake -E make_directory ${{github.workspace}}/build
- name: Building LAMMPS via CMake
shell: bash
working-directory: ${{github.workspace}}/build
run: |
cmake -C $GITHUB_WORKSPACE/cmake/presets/most.cmake $GITHUB_WORKSPACE/cmake \
-DENABLE_TESTING=ON -DBUILD_SHARED_LIBS=ON -DLAMMPS_EXCEPTIONS=ON
cmake --build . --parallel 2
- name: Run Tests
working-directory: ${{github.workspace}}/build
shell: bash
run: ctest -V

View File

@ -107,13 +107,15 @@ option(CMAKE_VERBOSE_MAKEFILE "Generate verbose Makefiles" OFF)
set(STANDARD_PACKAGES ASPHERE BODY CLASS2 COLLOID COMPRESS DIPOLE
GRANULAR KSPACE LATTE MANYBODY MC MESSAGE MISC MLIAP MOLECULE PERI POEMS
QEQ REPLICA RIGID SHOCK SPIN SNAP SRD KIM PYTHON MSCG MPIIO VORONOI
USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-MESODPD USER-CGSDK USER-COLVARS
USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD USER-LB
USER-MANIFOLD USER-MEAMC USER-MESONT USER-MGPT USER-MISC USER-MOFFF USER-MOLFILE
USER-NETCDF USER-PHONON USER-PLUMED USER-PTM USER-QTB USER-REACTION
USER-REAXC USER-SCAFACOS USER-SDPD USER-SMD USER-SMTBQ USER-SPH USER-TALLY
USER-UEF USER-VTK USER-QUIP USER-QMMM USER-YAFF USER-ADIOS)
set(SUFFIX_PACKAGES CORESHELL USER-OMP KOKKOS OPT USER-INTEL GPU)
USER-ADIOS USER-ATC USER-AWPMD USER-BOCS USER-CGDNA USER-MESODPD USER-CGSDK
USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF USER-FEP USER-H5MD
USER-LB USER-MANIFOLD USER-MEAMC USER-MESONT USER-MGPT USER-MISC USER-MOFFF
USER-MOLFILE USER-NETCDF USER-PHONON USER-PLUMED USER-PTM USER-QTB
USER-REACTION USER-REAXC USER-SCAFACOS USER-SDPD USER-SMD USER-SMTBQ USER-SPH
USER-TALLY USER-UEF USER-VTK USER-QUIP USER-QMMM USER-YAFF)
set(SUFFIX_PACKAGES CORESHELL GPU KOKKOS OPT USER-INTEL USER-OMP)
foreach(PKG ${STANDARD_PACKAGES} ${SUFFIX_PACKAGES})
option(PKG_${PKG} "Build ${PKG} Package" OFF)
endforeach()

View File

@ -2,6 +2,7 @@ set(GPU_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/GPU)
set(GPU_SOURCES ${GPU_SOURCES_DIR}/gpu_extra.h
${GPU_SOURCES_DIR}/fix_gpu.h
${GPU_SOURCES_DIR}/fix_gpu.cpp)
target_compile_definitions(lammps PRIVATE -DLMP_GPU)
set(GPU_API "opencl" CACHE STRING "API used by GPU package")
set(GPU_API_VALUES opencl cuda hip)

View File

@ -1,4 +1,7 @@
########################################################################
# As of version 3.3.0 Kokkos requires C++14
set(CMAKE_CXX_STANDARD 14)
########################################################################
# consistency checks and Kokkos options/settings required by LAMMPS
if(Kokkos_ENABLE_CUDA)
message(STATUS "KOKKOS: Enabling CUDA LAMBDA function support")
@ -35,8 +38,8 @@ if(DOWNLOAD_KOKKOS)
list(APPEND KOKKOS_LIB_BUILD_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}")
include(ExternalProject)
ExternalProject_Add(kokkos_build
URL https://github.com/kokkos/kokkos/archive/3.2.01.tar.gz
URL_MD5 ba72440e285ccde05b403694ea0c92e5
URL https://github.com/kokkos/kokkos/archive/3.3.01.tar.gz
URL_MD5 08201d1c7cf5bc458ce0f5b44a629d5a
CMAKE_ARGS ${KOKKOS_LIB_BUILD_ARGS}
BUILD_BYPRODUCTS <INSTALL_DIR>/lib/libkokkoscore.a
)
@ -50,7 +53,7 @@ if(DOWNLOAD_KOKKOS)
target_link_libraries(lammps PRIVATE LAMMPS::KOKKOS)
add_dependencies(LAMMPS::KOKKOS kokkos_build)
elseif(EXTERNAL_KOKKOS)
find_package(Kokkos 3.2.01 REQUIRED CONFIG)
find_package(Kokkos 3.3.01 REQUIRED CONFIG)
target_link_libraries(lammps PRIVATE Kokkos::kokkos)
else()
set(LAMMPS_LIB_KOKKOS_SRC_DIR ${LAMMPS_LIB_SOURCE_DIR}/kokkos)

View File

@ -94,7 +94,7 @@ $(SPHINXCONFIG)/conf.py: $(SPHINXCONFIG)/conf.py.in
-e 's,@LAMMPS_PYTHON_DIR@,$(BUILDDIR)/../python,g' \
-e 's,@LAMMPS_DOC_DIR@,$(BUILDDIR),g' $< > $@
html: xmlgen $(SPHINXCONFIG)/conf.py $(ANCHORCHECK) $(MATHJAX)
html: xmlgen $(VENV) $(SPHINXCONFIG)/conf.py $(ANCHORCHECK) $(MATHJAX)
@if [ "$(HAS_BASH)" == "NO" ] ; then echo "bash was not found at $(OSHELL)! Please use: $(MAKE) SHELL=/path/to/bash" 1>&2; exit 1; fi
@$(MAKE) $(MFLAGS) -C graphviz all
@(\
@ -118,7 +118,7 @@ html: xmlgen $(SPHINXCONFIG)/conf.py $(ANCHORCHECK) $(MATHJAX)
@rm -rf html/PDF/.[sg]*
@echo "Build finished. The HTML pages are in doc/html."
spelling: xmlgen $(VENV) $(SPHINXCONFIG)/false_positives.txt
spelling: xmlgen $(SPHINXCONFIG)/conf.py $(VENV) $(SPHINXCONFIG)/false_positives.txt
@if [ "$(HAS_BASH)" == "NO" ] ; then echo "bash was not found at $(OSHELL)! Please use: $(MAKE) SHELL=/path/to/bash" 1>&2; exit 1; fi
@(\
. $(VENV)/bin/activate ; env PYTHONWARNINGS= \

View File

@ -424,6 +424,8 @@ INPUT = @LAMMPS_SOURCE_DIR@/utils.cpp \
@LAMMPS_SOURCE_DIR@/input.h \
@LAMMPS_SOURCE_DIR@/tokenizer.cpp \
@LAMMPS_SOURCE_DIR@/tokenizer.h \
@LAMMPS_SOURCE_DIR@/arg_info.cpp \
@LAMMPS_SOURCE_DIR@/arg_info.h \
@LAMMPS_SOURCE_DIR@/text_file_reader.cpp \
@LAMMPS_SOURCE_DIR@/text_file_reader.h \
@LAMMPS_SOURCE_DIR@/potential_file_reader.cpp \

View File

@ -521,11 +521,14 @@ They must be specified in uppercase.
* - VEGA906
- GPU
- AMD GPU MI50/MI60 GFX906
* - VEGA908
- GPU
- AMD GPU GFX908
* - INTEL_GEN
- GPU
- Intel GPUs Gen9+
This list was last updated for version 3.2 of the Kokkos library.
This list was last updated for version 3.3 of the Kokkos library.
.. tabs::

View File

@ -96,6 +96,7 @@ OPT.
* :doc:`eam/cd <pair_eam>`
* :doc:`eam/cd/old <pair_eam>`
* :doc:`eam/fs (gikot) <pair_eam>`
* :doc:`eam/he <pair_eam>`
* :doc:`edip (o) <pair_edip>`
* :doc:`edip/multi <pair_edip>`
* :doc:`edpd <pair_mesodpd>`
@ -262,6 +263,7 @@ OPT.
* :doc:`ufm (got) <pair_ufm>`
* :doc:`vashishta (gko) <pair_vashishta>`
* :doc:`vashishta/table (o) <pair_vashishta>`
* :doc:`wf/cut <pair_wf_cut>`
* :doc:`yukawa (gko) <pair_yukawa>`
* :doc:`yukawa/colloid (go) <pair_yukawa_colloid>`
* :doc:`zbl (gko) <pair_zbl>`

View File

@ -162,3 +162,26 @@ LAMMPS:
triple quotes can be nested in the usual manner. See the doc pages
for those commands for examples. Only one of level of nesting is
allowed, but that should be sufficient for most use cases.
.. admonition:: ASCII versus UTF-8
:class: note
LAMMPS expects and processes 7-bit ASCII format text internally.
Many modern environments use UTF-8 encoding, which is a superset
of the 7-bit ASCII character table and thus mostly compatible.
However, there are several non-ASCII characters that can look
very similar to their ASCII equivalents or are invisible (so they
look like a blank), but are encoded differently. Web browsers,
PDF viewers, document editors are known to sometimes replace one
with the other for a better looking output. However, that can
lead to problems, for instance, when using cut-n-paste of input
file examples from web pages, or when using a document editor
(not a dedicated plain text editor) for writing LAMMPS inputs.
LAMMPS will try to detect this and substitute the non-ASCII
characters with their ASCII equivalents where known. There also
is going to be a warning printed, if this occurs. It is
recommended to avoid such characters altogether in LAMMPS input,
data and potential files. The replacement tables are likely
incomplete and dependent on users reporting problems processing
correctly looking input containing UTF-8 encoded non-ASCII
characters.

View File

@ -1,68 +1,75 @@
Source files
------------
The source files of the LAMMPS code are found in two
directories of the distribution: ``src`` and ``lib``.
Most of the code is C++ but there are small numbers of files
in several other languages.
The source files of the LAMMPS code are found in two directories of the
distribution: ``src`` and ``lib``. Most of the code is written in C++
but there are small a number of files in several other languages like C,
Fortran, Shell script, or Python.
The core of the code is located in the
``src`` folder and its sub-directories.
A sizable number of these files are in the ``src`` directory
itself, but there are plenty of :doc:`packages <Packages>`, which can be
included or excluded when LAMMPS is built. See the :doc:`Include
packages in build <Build_package>` section of the manual for more
information about that part of the build process. LAMMPS currently
supports building with :doc:`conventional makefiles <Build_make>` and
through :doc:`CMake <Build_cmake>` which differ in how packages are
enabled or disabled for a LAMMPS binary. The source files for each
The core of the code is located in the ``src`` folder and its
sub-directories. A sizable number of these files are in the ``src``
directory itself, but there are plenty of :doc:`packages <Packages>`,
which can be included or excluded when LAMMPS is built. See the
:doc:`Include packages in build <Build_package>` section of the manual
for more information about that part of the build process. LAMMPS
currently supports building with :doc:`conventional makefiles
<Build_make>` and through :doc:`CMake <Build_cmake>`. Those procedures
differ in how packages are enabled or disabled for inclusion into a
LAMMPS binary so they cannot be mixed. The source files for each
package are in all-uppercase sub-directories of the ``src`` folder, for
example ``src/MOLECULE`` or ``src/USER-MISC``. The ``src/STUBS``
sub-directory is not a package but contains a dummy MPI library, that is
used when building a serial version of the code. The ``src/MAKE``
directory contains makefiles with settings and flags for a variety of
configuration and machines for the build process with traditional
makefiles.
directory and its sub-directories contain makefiles with settings and
flags for a variety of configuration and machines for the build process
with traditional makefiles.
The ``lib`` directory contains the source code for several supporting
libraries or files with configuration settings to use globally installed
libraries, that are required by some of the optional packages.
Each sub-directory, like ``lib/poems`` or ``lib/gpu``, contains the
source files, some of which are in different languages such as Fortran
or CUDA. These libraries are linked to during a LAMMPS build, if the
corresponding package is installed.
libraries, that are required by some of the optional packages. They may
include python scripts that can transparently download additional source
code on request. Each sub-directory, like ``lib/poems`` or ``lib/gpu``,
contains the source files, some of which are in different languages such
as Fortran or CUDA. These libraries included in the LAMMPS build,
if the corresponding package is installed.
LAMMPS C++ source files almost always come in pairs, such as
``src/run.cpp`` (implementation file) and ``src/run.h`` (header file).
Each pair of files defines a C++
class, for example the :cpp:class:`LAMMPS_NS::Run` class which contains
the code invoked by the :doc:`run <run>` command in a LAMMPS input script.
As this example illustrates, source file and class names often have a
one-to-one correspondence with a command used in a LAMMPS input script.
Some source files and classes do not have a corresponding input script
Each pair of files defines a C++ class, for example the
:cpp:class:`LAMMPS_NS::Run` class which contains the code invoked by the
:doc:`run <run>` command in a LAMMPS input script. As this example
illustrates, source file and class names often have a one-to-one
correspondence with a command used in a LAMMPS input script. Some
source files and classes do not have a corresponding input script
command, e.g. ``src/force.cpp`` and the :cpp:class:`LAMMPS_NS::Force`
class. They are discussed in the next section.
A small number of C++ classes and utility functions are implemented with
only a ``.h`` file. Examples are the Pointer class or the MathVec functions.
The names of all source files are in lower case and may use the
underscore character '_' to separate words. Outside of bundled libraries
which may have different conventions, all C and C++ header files have a
``.h`` extension, all C++ files have a ``.cpp`` extension, and C files a
``.c`` extension. A small number of C++ classes and utility functions
are implemented with only a ``.h`` file. Examples are the Pointer class
or the MathVec functions.
Class topology
--------------
Though LAMMPS has a lot of source files and classes, its class topology
is relative flat, as outlined in the :ref:`class-topology` figure. Each
name refers to a class and has a pair of associated source files in the
``src`` folder, for example the class :cpp:class:`LAMMPS_NS::Memory`
corresponds to the files ``memory.cpp`` and ``memory.h``, or the class
:cpp:class:`LAMMPS_NS::AtomVec` corresponds to the files
``atom_vec.cpp`` and ``atom_vec.h``. Full lines in the figure represent
compositing: that is the class to the left holds a pointer to an
instance of the class to the right. Dashed lines instead represent
inheritance: the class to the right is derived from the class on the
left. Classes with a red boundary are not instantiated directly, but
they represent the base classes for "styles". Those "styles" make up
the bulk of the LAMMPS code and only a few typical examples are included
in the figure for demonstration purposes.
is not very deep, which can be seen from the :ref:`class-topology`
figure. In that figure, each name refers to a class and has a pair of
associated source files in the ``src`` folder, for example the class
:cpp:class:`LAMMPS_NS::Memory` corresponds to the files ``memory.cpp``
and ``memory.h``, or the class :cpp:class:`LAMMPS_NS::AtomVec`
corresponds to the files ``atom_vec.cpp`` and ``atom_vec.h``. Full
lines in the figure represent compositing: that is the class at the base
of the arrow holds a pointer to an instance of the class at the tip.
Dashed lines instead represent inheritance: the class to the tip of the
arrow is derived from the class at the base. Classes with a red boundary
are not instantiated directly, but they represent the base classes for
"styles". Those "styles" make up the bulk of the LAMMPS code and only
a few representative examples are included in the figure so it remains
readable.
.. _class-topology:
.. figure:: JPG/lammps-classes.png
@ -82,8 +89,8 @@ in the figure for demonstration purposes.
derived classes, which may also hold instances of other classes.
The :cpp:class:`LAMMPS_NS::LAMMPS` class is the topmost class and
represents what is referred to an "instance" of LAMMPS. It is a
composite holding references to instances of other core classes
represents what is generally referred to an "instance" of LAMMPS. It is
a composite holding pointers to instances of other core classes
providing the core functionality of the MD engine in LAMMPS and through
them abstractions of the required operations. The constructor of the
LAMMPS class will instantiate those instances, process the command line
@ -91,60 +98,67 @@ flags, initialize MPI (if not already done) and set up file pointers for
input and output. The destructor will shut everything down and free all
associated memory. Thus code for the standalone LAMMPS executable in
``main.cpp`` simply initializes MPI, instantiates a single instance of
LAMMPS, and passes it the command line flags and input script. It
LAMMPS while passing it the command line flags and input script. It
deletes the LAMMPS instance after the method reading the input returns
and shuts down the MPI environment before it exits the executable.
The :cpp:class:`LAMMPS_NS::Pointers` is not shown in the
:ref:`class-topology` figure, it holds references to members of the
`LAMMPS_NS::LAMMPS`, so that all classes derived from
:cpp:class:`LAMMPS_NS::Pointers` have direct access to those reference.
From the class topology all classes with blue boundary are referenced in
this class and all classes in the second and third columns, that are not
listed as derived classes are instead derived from
:cpp:class:`LAMMPS_NS::Pointers`.
:ref:`class-topology` figure for clarity. It holds references to many
of the members of the `LAMMPS_NS::LAMMPS`, so that all classes derived
from :cpp:class:`LAMMPS_NS::Pointers` have direct access to those
reference. From the class topology all classes with blue boundary are
referenced in the Pointers class and all classes in the second and third
columns, that are not listed as derived classes are instead derived from
:cpp:class:`LAMMPS_NS::Pointers`. To initialize the pointer references
in Pointers, a pointer to the LAMMPS class instance needs to be passed
to the constructor and thus all constructors for classes derived from it
must do so and pass this pointer to the constructor for Pointers.
Since all storage is encapsulated, the LAMMPS class can also be
instantiated multiple times by a calling code, and that can be either
simultaneously or consecutively. When running in parallel with MPI,
care has to be taken, that suitable communicators are used to not
create conflicts between different instances.
Since all storage is supposed to be encapsulated (there are a few
exceptions), the LAMMPS class can also be instantiated multiple times by
a calling code. Outside of the aforementioned exceptions, those LAMMPS
instances can be used alternately. As of the time of this writing
(early 2021) LAMMPS is not yet sufficiently thread-safe for concurrent
execution. When running in parallel with MPI, care has to be taken,
that suitable copies of communicators are used to not create conflicts
between different instances.
The LAMMPS class currently holds instances of 19 classes representing
different core functionalities There are a handful of virtual parent
classes in LAMMPS that define what LAMMPS calls ``styles``. They are
shaded red in the :ref:`class-topology` figure. Each of these are
The LAMMPS class currently (early 2021) holds instances of 19 classes
representing the core functionality. There are a handful of virtual
parent classes in LAMMPS that define what LAMMPS calls ``styles``. They
are shaded red in the :ref:`class-topology` figure. Each of these are
parents of a number of child classes that implement the interface
defined by the parent class. There are two main categories of these
``styles``: some may only have one instance active at a time (e.g. atom,
pair, bond, angle, dihedral, improper, kspace, comm) and there is a
dedicated pointer variable in the composite class that manages them.
dedicated pointer variable for each of them in the composite class.
Setups that require a mix of different such styles have to use a
*hybrid* class that manages and forwards calls to the corresponding
sub-styles for the designated subset of atoms or data. or the composite
class may have lists of class instances, e.g. Modify handles lists of
compute and fix styles, while Output handles dumps class instances.
*hybrid* class that takes the place of the one allowed instance and then
manages and forwards calls to the corresponding sub-styles for the
designated subset of atoms or data. The composite class may also have
lists of class instances, e.g. Modify handles lists of compute and fix
styles, while Output handles a list of dump class instances.
The exception to this scheme are the ``command`` style classes. These
implement specific commands that can be invoked before, after, or between
runs or are commands which launch a simulation. For these an instance
of the class is created, its command() method called and then, after
completion, the class instance deleted. Examples for this are the
create_box, create_atoms, minimize, run, or velocity command styles.
implement specific commands that can be invoked before, after, or in
between runs. For these an instance of the class is created, its
command() method called and then, after completion, the class instance
deleted. Examples for this are the create_box, create_atoms, minimize,
run, or velocity command styles.
For all those ``styles`` certain naming conventions are employed: for
the fix nve command the class is called FixNVE and the files are
the fix nve command the class is called FixNVE and the source files are
``fix_nve.h`` and ``fix_nve.cpp``. Similarly for fix ave/time we have
FixAveTime and ``fix_ave_time.h`` and ``fix_ave_time.cpp``. Style names
are lower case and without spaces or special characters. A suffix or
multiple appended with a forward slash '/' denotes a variant of the
corresponding class without the suffix. To connect the style name and
the class name, LAMMPS uses macros like the following ATOM\_CLASS,
PAIR\_CLASS, BOND\_CLASS, REGION\_CLASS, FIX\_CLASS, COMPUTE\_CLASS,
or DUMP\_CLASS in the corresponding header file. During compilation
files with the pattern ``style_name.h`` are created that contain include
statements including all headers of all styles of a given type that
are currently active (or "installed).
words are appended with a forward slash '/' which denotes a variant of
the corresponding class without the suffix. To connect the style name
and the class name, LAMMPS uses macros like: ``AtomStyle()``,
``PairStyle()``, ``BondStyle()``, ``RegionStyle()``, and so on in the
corresponding header file. During configuration or compilation files
with the pattern ``style_<name>.h`` are created that consist of a list
of include statements including all headers of all styles of a given
type that are currently active (or "installed).
More details on individual classes in the :ref:`class-topology` are as
@ -152,11 +166,11 @@ follows:
- The Memory class handles allocation of all large vectors and arrays.
- The Error class prints all error and warning messages.
- The Error class prints all (terminal) error and warning messages.
- The Universe class sets up partitions of processors so that multiple
simulations can be run, each on a subset of the processors allocated
for a run, e.g. by the mpirun command.
- The Universe class sets up one or more partitions of processors so
that one or multiple simulations can be run, on the processors
allocated for a run, e.g. by the mpirun command.
- The Input class reads and processes input input strings and files,
stores variables, and invokes :doc:`commands <Commands_all>`.
@ -241,7 +255,8 @@ follows:
.. TODO section on "Spatial decomposition and parallel operations"
.. diagram of 3d processor grid, brick vs. tiled. local vs. ghost
.. atoms, 6-way communication with pack/unpack functions,
.. PBC as part of the communication
.. PBC as part of the communication, forward and reverse communication
.. rendezvous communication, ring communication.
.. TODO section on "Fixes, Computes, and Variables"
.. how and when data is computed and provided and how it is

View File

@ -71,12 +71,21 @@ and parsing files or arguments.
----------
.. doxygenfunction:: strdup
:project: progguide
.. doxygenfunction:: trim
:project: progguide
.. doxygenfunction:: trim_comment
:project: progguide
.. doxygenfunction:: has_utf8
:project: progguide
.. doxygenfunction:: utf8_subst
:project: progguide
.. doxygenfunction:: count_words(const char *text)
:project: progguide
@ -286,6 +295,50 @@ This code example should produce the following output:
----------
Argument parsing classes
---------------------------
The purpose of argument parsing classes it to simplify and unify how
arguments of commands in LAMMPS are parsed and to make abstractions of
repetitive tasks.
The :cpp:class:`LAMMPS_NS::ArgInfo` class provides an abstraction
for parsing references to compute or fix styles or variables. These
would start with a "c\_", "f\_", "v\_" followed by the ID or name of
than instance and may be postfixed with one or two array indices
"[<number>]" with numbers > 0.
A typical code segment would look like this:
.. code-block:: C++
:caption: Usage example for ArgInfo class
int nvalues = 0;
for (iarg = 0; iarg < nargnew; iarg++) {
ArgInfo argi(arg[iarg]);
which[nvalues] = argi.get_type();
argindex[nvalues] = argi.get_index1();
ids[nvalues] = argi.copy_name();
if ((which[nvalues] == ArgInfo::UNKNOWN)
|| (which[nvalues] == ArgInfo::NONE)
|| (argi.get_dim() > 1))
error->all(FLERR,"Illegal compute XXX command");
nvalues++;
}
----------
.. doxygenclass:: LAMMPS_NS::ArgInfo
:project: progguide
:members:
----------
File reader classes
-------------------

BIN
doc/src/JPG/WF_LJ.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 292 KiB

View File

@ -14,6 +14,7 @@ This section documents the following functions:
- :cpp:func:`lammps_config_has_package`
- :cpp:func:`lammps_config_package_count`
- :cpp:func:`lammps_config_package_name`
- :cpp:func:`lammps_config_accelerator`
- :cpp:func:`lammps_has_style`
- :cpp:func:`lammps_style_count`
- :cpp:func:`lammps_style_name`
@ -126,6 +127,11 @@ approach.
-----------------------
.. doxygenfunction:: lammps_config_accelerator
:project: progguide
-----------------------
.. doxygenfunction:: lammps_has_style
:project: progguide

View File

@ -39,7 +39,9 @@ about compile time settings and included packages and styles.
* :py:attr:`lammps.has_jpeg_support <lammps.lammps.has_jpeg_support>`
* :py:attr:`lammps.has_ffmpeg_support <lammps.lammps.has_ffmpeg_support>`
* :py:attr:`lammps.installed_packages <lammps.lammps.installed_pages>`
* :py:attr:`lammps.installed_packages <lammps.lammps.installed_packages>`
* :py:meth:`lammps.get_accelerator_config <lammps.lammps.accelerator_config>`
* :py:meth:`lammps.has_style() <lammps.lammps.has_style()>`
* :py:meth:`lammps.available_styles() <lammps.lammps.available_styles()>`

View File

@ -69,7 +69,7 @@ this.
cd build
# configure LAMMPS compilation
cmake -C cmake/presets/minimal.cmake -D BUILD_SHARED_LIBS=on \
cmake -C ../cmake/presets/minimal.cmake -D BUILD_SHARED_LIBS=on \
-D LAMMPS_EXCEPTIONS=on -D PKG_PYTHON=on ../cmake
# compile LAMMPS
@ -97,10 +97,12 @@ this.
For a system-wide installation you need to set
``CMAKE_INSTALL_PREFIX`` to a system folder like ``/usr`` (or
``/usr/local``). The installation step (**not** the
``/usr/local``); the default is ``${HOME}/.local``. The
installation step for a system folder installation (**not** the
configuration/compilation) needs to be done with superuser
privilege, e.g. by using ``sudo cmake --install .``. The
installation folders will then by changed to:
installation folders will then be changed to (assuming ``/usr`` as
prefix):
+------------------------+---------------------------------------------------------+-------------------------------------------------------------+
| File | Location | Notes |

View File

@ -26,6 +26,15 @@ task). These are Serial (MPI-only for CPUs and Intel Phi), OpenMP
GPUs) and HIP (for AMD GPUs). You choose the mode at build time to
produce an executable compatible with a specific hardware.
.. admonition:: C++14 support
:class: note
Kokkos requires using a compiler that supports the c++14 standard. For
some compilers, it may be necessary to add a flag to enable c++14 support.
For example, the GNU compiler uses the -std=c++14 flag. For a list of
compilers that have been tested with the Kokkos library, see the Kokkos
`README <https://github.com/kokkos/kokkos/blob/master/README.md>`_.
.. admonition:: NVIDIA CUDA support
:class: note

View File

@ -620,4 +620,4 @@ reset_mol_ids = yes, custom_charges = no, molecule = off
.. _Gissinger2020:
**(Gissinger)** Gissinger, Jensen and Wise, Macromolecules, 53, 22, 99539961 (2020).
**(Gissinger)** Gissinger, Jensen and Wise, Macromolecules, 53, 22, 9953-9961 (2020).

View File

@ -120,22 +120,28 @@ increase in computational resources cost.
Restart, fix_modify, output, run start/stop, minimize info
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
This fix writes the original coordinates of tethered atoms to :doc:`binary restart files <restart>`, so that the spring effect will be the
same in a restarted simulation. See the :doc:`read restart <read_restart>` command for info on how to re-specify a fix
in an input script that reads a restart file, so that the operation of
the fix continues in an uninterrupted fashion.
This fix writes the original coordinates of tethered atoms to
:doc:`binary restart files <restart>`, so that the spring effect will
be the same in a restarted simulation. See the :doc:`read restart
<read_restart>` command for info on how to re-specify a fix in an
input script that reads a restart file, so that the operation of the
fix continues in an uninterrupted fashion.
The :doc:`fix modify <fix_modify>` *energy* option is supported by this
fix to add the energy stored in the per-atom springs to the system's
potential energy as part of :doc:`thermodynamic output <thermo_style>`.
The :doc:`fix modify <fix_modify>` *energy* option is supported by
this fix to add the energy stored in the per-atom springs to the
system's potential energy as part of :doc:`thermodynamic output
<thermo_style>`.
This fix computes a global scalar and a global vector quantities which
can be accessed by various :doc:`output commands <Howto_output>`. The
scalar is an energy which is the sum of the spring energy for each
atom, where the per-atom energy is 0.5 \* k \* r\^2. The vector has 2
positions, the first one is the coupling parameter lambda and the
second one is the time derivative of lambda. The scalar and vector
values calculated by this fix are "extensive".
atom, where the per-atom energy is 0.5 \* k \* r\^2. The vector stores
2 values. The first value is the coupling parameter lambda. The
second value is the derivative of lambda with respect to the integer
timestep *s*, i.e. d lambda / ds. In order to obtain d lambda / dt,
where t is simulation time, this 2nd value needs to be divided by the
timestep size (e.g. 0.5 fs). The scalar and vector values calculated
by this fix are "extensive".
No parameter of this fix can be used with the *start/stop* keywords of
the :doc:`run <run>` command.

View File

@ -10,7 +10,7 @@ Syntax
info args
* args = one or more of the following keywords: *out*\ , *all*\ , *system*\ , *memory*\ , *communication*\ , *computes*\ , *dumps*\ , *fixes*\ , *groups*\ , *regions*\ , *variables*\ , *coeffs*\ , *styles*\ , *time*\ , or *configuration*
* args = one or more of the following keywords: *out*\ , *all*\ , *system*\ , *memory*\ , *communication*\ , *computes*\ , *dumps*\ , *fixes*\ , *groups*\ , *regions*\ , *variables*\ , *coeffs*\ , *styles*\ , *time*\ , *accelerator*\ , or *configuration*
* *out* values = *screen*\ , *log*\ , *append* filename, *overwrite* filename
* *styles* values = *all*\ , *angle*\ , *atom*\ , *bond*\ , *compute*\ , *command*\ , *dump*\ , *dihedral*\ , *fix*\ , *improper*\ , *integrate*\ , *kspace*\ , *minimize*\ , *pair*\ , *region*
@ -88,6 +88,10 @@ The *coeffs* category prints a list for each defined force style
corresponding coefficients have been set. This can be very helpful
to debug error messages like "All pair coeffs are not set".
The *accelerator* category prints out information about compile time
settings of included accelerator support for the GPU, KOKKOS, USER-INTEL,
and USER-OMP packages.
The *styles* category prints the list of styles available in the
current LAMMPS binary. It supports one of the following options
to control which category of styles is printed out:

View File

@ -18,6 +18,7 @@
.. index:: pair_style eam/fs/kk
.. index:: pair_style eam/fs/omp
.. index:: pair_style eam/fs/opt
.. index:: pair_style eam/he
pair_style eam command
======================
@ -38,6 +39,9 @@ pair_style eam/cd/old command
pair_style eam/fs command
=========================
pair_style eam/he command
=========================
Accelerator Variants: *eam/fs/gpu*, *eam/fs/intel*, *eam/fs/kk*, *eam/fs/omp*, *eam/fs/opt*
Syntax
@ -47,7 +51,7 @@ Syntax
pair_style style
* style = *eam* or *eam/alloy* or *eam/cd* or *eam/cd/old* or *eam/fs*
* style = *eam* or *eam/alloy* or *eam/cd* or *eam/cd/old* or *eam/fs* or *eam/he*
Examples
""""""""
@ -67,6 +71,9 @@ Examples
pair_style eam/fs
pair_coeff * * NiAlH_jea.eam.fs Ni Al Ni Ni
pair_style eam/he
pair_coeff * * PdHHe.eam.he Pd H He
Description
"""""""""""
@ -104,8 +111,8 @@ are parameterized in terms of LAMMPS :doc:`metal units <units>`.
potentials, the same way that DYNAMO does. Alternatively, a single
DYNAMO *setfl* file or Finnis/Sinclair EAM file can be used by LAMMPS
to model alloy systems by invoking the *eam/alloy* or *eam/cd* or
*eam/fs* styles as described below. These files require no mixing
since they specify alloy interactions explicitly.
*eam/fs* or *eam/he* styles as described below. These files require no
mixing since they specify alloy interactions explicitly.
.. note::
@ -143,10 +150,6 @@ DYNAMO single-element *funcfl* format. If the DYNAMO file was created
by a Fortran program, it cannot have "D" values in it for exponents.
C only recognizes "e" or "E" for scientific notation.
Note that unlike for other potentials, cutoffs for EAM potentials are
not set in the pair_style or pair_coeff command; they are specified in
the EAM potential files themselves.
For style *eam* a potential file must be assigned to each I,I pair of
atom types by using one or more pair_coeff commands, each with a
single argument:
@ -336,8 +339,11 @@ distribution have a ".cdeam" suffix.
Style *eam/fs* computes pairwise interactions for metals and metal
alloys using a generalized form of EAM potentials due to Finnis and
Sinclair :ref:`(Finnis) <Finnis1>`. The total energy Ei of an atom I is
given by
Sinclair :ref:`(Finnis) <Finnis1>`. Style *eam/he* is similar to
*eam/fs* except that it allows for negative electron density in
order to capture the behavior of helium in metals :ref:`(Zhou6) <Zhou6>`.
The total energy Ei of an atom I is given by
.. math::
@ -355,36 +361,36 @@ electron density at an atomic site depending on the identity of the
element at that atomic site.
The associated :doc:`pair_coeff <pair_coeff>` command for style *eam/fs*
reads a DYNAMO *setfl* file that has been extended to include
additional rho_alpha_beta arrays of tabulated values. A discussion of
how FS EAM differs from conventional EAM alloy potentials is given in
:ref:`(Ackland1) <Ackland1>`. An example of such a potential is the same
author's Fe-P FS potential :ref:`(Ackland2) <Ackland2>`. Note that while FS
potentials always specify the embedding energy with a square root
or *eam/he* reads a DYNAMO *setfl* file that has been extended to include
additional :math:`\rho_{\alpha\beta}` arrays of tabulated values. A
discussion of how FS EAM differs from conventional EAM alloy potentials is
given in :ref:`(Ackland1) <Ackland1>`. An example of such a potential is the
same author's Fe-P FS potential :ref:`(Ackland2) <Ackland2>`. Note that while
FS potentials always specify the embedding energy with a square root
dependence on the total density, the implementation in LAMMPS does not
require that; the user can tabulate any functional form desired in the
FS potential files.
For style *eam/fs*\ , the form of the pair_coeff command is exactly the
same as for style *eam/alloy*\ , e.g.
For style *eam/fs* and *eam/he* the form of the pair_coeff command is exactly
the same as for style *eam/alloy*\ , e.g.
.. code-block:: LAMMPS
pair_coeff * * NiAlH_jea.eam.fs Ni Ni Ni Al
where there are N additional arguments after the filename, where N is
with N additional arguments after the filename, where N is
the number of LAMMPS atom types. See the :doc:`pair_coeff <pair_coeff>`
doc page for alternate ways to specify the path for the potential
file. The N values determine the mapping of LAMMPS atom types to EAM
elements in the file, as described above for style *eam/alloy*\ . As
with *eam/alloy*\ , if a mapping value is NULL, the mapping is not
performed. This can be used when an *eam/fs* potential is used as
part of the *hybrid* pair style. The NULL values are used as
performed. This can be used when an *eam/fs* or *eam/he* potential is
used as part of a *hybrid* pair style. The NULL values are used as
placeholders for atom types that will be used with other potentials.
FS EAM files include more information than the DYNAMO *setfl* format
files read by *eam/alloy*\ , in that i,j density functionals for all
pairs of elements are included as needed by the Finnis/Sinclair
FS EAM and HE EAM files include more information than the DYNAMO *setfl*
format files read by *eam/alloy*\ , in that i,j density functionals for
all pairs of elements are included as needed by the Finnis/Sinclair
formulation of the EAM.
FS EAM files in the *potentials* directory of the LAMMPS distribution
@ -417,6 +423,25 @@ eV-Angstroms) as in EAM *setfl* files. Note that in Finnis/Sinclair,
the phi(r) arrays are still symmetric, so only phi arrays for i >= j
are listed.
HE EAM files in the *potentials* directory of the LAMMPS distribution
have an ".eam.he" suffix. They are formatted as follows:
* lines 1,2,3 = comments (ignored)
* line 4: Nelements Element1 Element2 ... ElementN
* line 5: Nrho, drho, Nr, dr, cutoff, rhomax
The 5-line header section is identical to an FS EAM file
except that line 5 lists an additional value, rhomax. Unlike in FS EAM
files where embedding energies F(rho) are always defined between rho = 0
and rho = (Nrho -1)drho, F(rho) in HE EAM files are defined between
rho = rhomin and rho = rhomax. Since drho = (rhomax - rhomin)/(Nrho - 1),
rhomin = rhomax - (Nrho - 1)drho. The embedding energies F(rho) are
listed for rho = rhomin, rhomin + drho, rhomin + 2drho, ..., rhomax.
This gives users additional flexibility to define a negative rhomin and
therefore an embedding energy function that works for both positive and
negative electron densities. The format and units of these sections are
identical to the FS EAM files (see above).
----------
.. include:: accel_styles.rst
@ -480,6 +505,10 @@ Daw, Baskes, Phys Rev B, 29, 6443 (1984).
**(Finnis)** Finnis, Sinclair, Philosophical Magazine A, 50, 45 (1984).
.. _Zhou6:
**(Zhou6)** Zhou, Bartelt, Sills, Physical Review B, 103, 014108 (2021).
.. _Stukowski:
**(Stukowski)** Stukowski, Sadigh, Erhart, Caro; Modeling Simulation

View File

@ -159,6 +159,7 @@ accelerated styles exist.
* :doc:`eam/cd <pair_eam>` - concentration-dependent EAM
* :doc:`eam/cd/old <pair_eam>` - older two-site model for concentration-dependent EAM
* :doc:`eam/fs <pair_eam>` - Finnis-Sinclair EAM
* :doc:`eam/he <pair_eam>` - Finnis-Sinclair EAM modified for Helium in metals
* :doc:`edip <pair_edip>` - three-body EDIP potential
* :doc:`edip/multi <pair_edip>` - multi-element EDIP potential
* :doc:`edpd <pair_mesodpd>` - eDPD particle interactions
@ -326,6 +327,7 @@ accelerated styles exist.
* :doc:`ufm <pair_ufm>` -
* :doc:`vashishta <pair_vashishta>` - Vashishta 2-body and 3-body potential
* :doc:`vashishta/table <pair_vashishta>` -
* :doc:`wf/cut <pair_wf_cut>` - Wang-Frenkel Potential for short-ranged interactions
* :doc:`yukawa <pair_yukawa>` - Yukawa potential
* :doc:`yukawa/colloid <pair_yukawa_colloid>` - screened Yukawa potential for finite-size particles
* :doc:`zbl <pair_zbl>` - Ziegler-Biersack-Littmark potential

View File

@ -16,14 +16,23 @@ pair_style tersoff/table command
Accelerator Variants: *tersoff/table/omp*
pair_style tersoff/shift command
================================
Syntax
""""""
.. code-block:: LAMMPS
pair_style style
pair_style style keywords values
* style = *tersoff* or *tersoff/table* or *tersoff/gpu* or *tersoff/omp* or *tersoff/table/omp*
* keyword = *shift*
.. parsed-literal::
*shift* value = delta
delta = negative shift in equilibrium bond length
Examples
""""""""
@ -37,6 +46,9 @@ Examples
pair_style tersoff/table
pair_coeff * * SiCGe.tersoff Si(D)
pair_style tersoff shift 0.05
pair_coeff * * Si.tersoff Si
Description
"""""""""""
@ -46,7 +58,7 @@ The *tersoff* style computes a 3-body Tersoff potential
.. math::
E & = \frac{1}{2} \sum_i \sum_{j \neq i} V_{ij} \\
V_{ij} & = f_C(r_{ij}) \left[ f_R(r_{ij}) + b_{ij} f_A(r_{ij}) \right] \\
V_{ij} & = f_C(r_{ij} + \delta) \left[ f_R(r_{ij} + \delta) + b_{ij} f_A(r_{ij} + \delta) \right] \\
f_C(r) & = \left\{ \begin{array} {r@{\quad:\quad}l}
1 & r < R - D \\
\frac{1}{2} - \frac{1}{2} \sin \left( \frac{\pi}{2} \frac{r-R}{D} \right) &
@ -56,14 +68,15 @@ The *tersoff* style computes a 3-body Tersoff potential
f_R(r) & = A \exp (-\lambda_1 r) \\
f_A(r) & = -B \exp (-\lambda_2 r) \\
b_{ij} & = \left( 1 + \beta^n {\zeta_{ij}}^n \right)^{-\frac{1}{2n}} \\
\zeta_{ij} & = \sum_{k \neq i,j} f_C(r_{ik}) g(\theta_{ijk})
\zeta_{ij} & = \sum_{k \neq i,j} f_C(r_{ik} + \delta) g \left[ \theta_{ijk}(r_{ij}, r_{ik}) \right]
\exp \left[ {\lambda_3}^m (r_{ij} - r_{ik})^m \right] \\
g(\theta) & = \gamma_{ijk} \left( 1 + \frac{c^2}{d^2} -
\frac{c^2}{\left[ d^2 + (\cos \theta - \cos \theta_0)^2\right]} \right)
where :math:`f_R` is a two-body term and :math:`f_A` includes three-body
interactions. The summations in the formula are over all neighbors
J and K of atom I within a cutoff distance = R + D.
J and K of atom I within a cutoff distance = R + D. :math:`\delta` is
an optional negative shift of the equilibrium bond length, as described below.
The *tersoff/table* style uses tabulated forms for the two-body,
environment and angular functions. Linear interpolation is performed
@ -167,6 +180,18 @@ Note that the twobody parameters in entries such as SiCC and CSiSi
are often the same, due to the common use of symmetric mixing rules,
but this is not always the case. For example, the beta and n parameters in
Tersoff_2 :ref:`(Tersoff_2) <Tersoff_21>` are not symmetric.
Similarly, the threebody parameters in entries such as SiCSi and SiSiC
are often the same, but this is not always the case, particularly
the value of R, which is sometimes typed on the
first and second elements, sometimes on the first and third elements.
Hence the need to specify R and D explicitly for all element triples.
For example, while Tersoff's notation
in Tersoff_2 :ref:`(Tersoff_2) <Tersoff_21>` is ambiguous on this point,
and properties of the zincblende lattice are the same for either choice,
Tersoff's results for rocksalt are consistent with typing on the first
and third elements. :ref:`Albe et al. <Albe>` adopts the same convention.
Conversely, the potential for B/N/C from the Cagin group
uses the opposite convention, typing on the first and second elements.
We chose the above form so as to enable users to define all commonly
used variants of the Tersoff potential. In particular, our form
@ -212,6 +237,37 @@ Many thanks to Rutuparna Narulkar, David Farrell, and Xiaowang Zhou
for helping clarify how Tersoff parameters for alloys have been
defined in various papers.
The *shift* keyword computes the energy E of a system of atoms, whose
formula is the same as the Tersoff potential. The only modification is
that the original equilibrium bond length ( :math:`r_0`) of the system
is shifted to :math:`r_0-\delta`. The minus sign arises because each
radial distance :math:`r` is replaced by :math:`r+\delta`.
The *shift* keyword is designed for simulations of closely matched van
der Waals heterostructures. For instance, consider the case of a system
with few-layers graphene atop a thick hexagonal boron nitride (h-BN)
substrate simulated using periodic boundary conditions. The experimental
lattice mismatch of ~1.8% between graphene and h-BN is not well captured
by the equilibrium lattice constants of available potentials, thus a
small in-plane strain will be introduced in the system when building a
periodic supercell. To minimize the effect of strain on simulation
results, the *shift* keyword allows adjusting the equilibrium bond
length of one of the two materials (e.g., h-BN). Validation, benchmark
tests, and applications of the *shift* keyword can be found in
:ref:`(Mandelli_1) <Mandelli1>` and :ref:`(Ouyang_1) <Ouyang5>`.
For the specific case discussed above, the force field can be defined as
.. code-block:: LAMMPS
pair_style hybrid/overlay rebo tersoff shift -0.00407 ilp/graphene/hbn 16.0 coul/shield 16.0
pair_coeff * * rebo CH.rebo NULL NULL C
pair_coeff * * tersoff BNC.tersoff B N NULL
pair_coeff * * ilp/graphene/hbn BNCH.ILP B N C
pair_coeff 1 1 coul/shield 0.70
pair_coeff 1 2 coul/shield 0.695
pair_coeff 2 2 coul/shield 0.69
----------
.. include:: accel_styles.rst
@ -228,9 +284,10 @@ described above from values in the potential file.
This pair style does not support the :doc:`pair_modify <pair_modify>`
shift, table, and tail options.
This pair style does not write its information to :doc:`binary restart files <restart>`, since it is stored in potential files. Thus, you
need to re-specify the pair_style and pair_coeff commands in an input
script that reads a restart file.
This pair style does not write its information to :doc:`binary restart
files <restart>`, since it is stored in potential files. Thus, you need
to re-specify the pair_style and pair_coeff commands in an input script
that reads a restart file.
This pair style can only be used via the *pair* keyword of the
:doc:`run_style respa <run_style>` command. It does not support the
@ -241,17 +298,24 @@ This pair style can only be used via the *pair* keyword of the
Restrictions
""""""""""""
This pair style is part of the MANYBODY package. It is only enabled
if LAMMPS was built with that package. See the :doc:`Build package <Build_package>` doc page for more info.
This pair style is part of the MANYBODY package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` doc page for more info.
This pair style requires the :doc:`newton <newton>` setting to be "on"
for pair interactions.
The *shift* keyword is not supported by the *tersoff/gpu*,
*tersoff/intel*, *tersoff/kk*, *tersoff/table* or *tersoff/table/omp*
variants.
The Tersoff potential files provided with LAMMPS (see the potentials
directory) are parameterized for metal :doc:`units <units>`. You can
use the Tersoff potential with any LAMMPS units, but you would need to
directory) are parameterized for :doc:`"metal" units <units>`. In addition
the pair style supports converting potential parameters on-the-fly between
"metal" and "real" units. You can use the *tersoff* pair style variants
with any LAMMPS units setting, but you would need to
create your own Tersoff potential file with coefficients listed in the
appropriate units if your simulation does not use "metal" units.
appropriate units if your simulation does not use "metal" or "real" units.
Related commands
""""""""""""""""
@ -261,7 +325,7 @@ Related commands
Default
"""""""
none
shift delta = 0.0
----------
@ -277,3 +341,11 @@ Condens. Matter, 15, 5649(2003).
.. _Tersoff_21:
**(Tersoff_2)** J. Tersoff, Phys Rev B, 39, 5566 (1989); errata (PRB 41, 3248)
.. _Mandelli1:
**(Mandelli_1)** D. Mandelli, W. Ouyang, M. Urbakh, and O. Hod, ACS Nano 13(7), 7603-7609 (2019).
.. _Ouyang5:
**(Ouyang_1)** W. Ouyang et al., J. Chem. Theory Comput. 16(1), 666-676 (2020).

View File

@ -20,9 +20,15 @@ Syntax
.. code-block:: LAMMPS
pair_style tersoff/mod
pair_style style keywords values
pair_style tersoff/mod/c
* style = *tersoff/mod* or *tersoff/mod/c*
* keyword = *shift*
.. parsed-literal::
*shift* value = delta
delta = negative shift in equilibrium bond length
Examples
""""""""
@ -47,7 +53,7 @@ E of a system of atoms as
.. math::
E & = \frac{1}{2} \sum_i \sum_{j \neq i} V_{ij} \\
V_{ij} & = f_C(r_{ij}) \left[ f_R(r_{ij}) + b_{ij} f_A(r_{ij}) \right] \\
V_{ij} & = f_C(r_{ij} + \delta) \left[ f_R(r_{ij} + \delta) + b_{ij} f_A(r_{ij} + \delta) \right] \\
f_C(r) & = \left\{ \begin{array} {r@{\quad:\quad}l}
1 & r < R - D \\
\frac{1}{2} - \frac{9}{16} \sin \left( \frac{\pi}{2} \frac{r-R}{D} \right) - \frac{1}{16} \sin \left( \frac{3\pi}{2} \frac{r-R}{D} \right) &
@ -57,13 +63,16 @@ E of a system of atoms as
f_R(r) & = A \exp (-\lambda_1 r) \\
f_A(r) & = -B \exp (-\lambda_2 r) \\
b_{ij} & = \left( 1 + {\zeta_{ij}}^\eta \right)^{-\frac{1}{2n}} \\
\zeta_{ij} & = \sum_{k \neq i,j} f_C(r_{ik}) g(\theta_{ijk})
\zeta_{ij} & = \sum_{k \neq i,j} f_C(r_{ik} + \delta) g(\theta_{ijk})
\exp \left[ \alpha (r_{ij} - r_{ik})^\beta \right] \\
g(\theta) & = c_1 + g_o(\theta) g_a(\theta) \\
g_o(\theta) & = \frac{c_2 (h - \cos \theta)^2}{c_3 + (h - \cos \theta)^2} \\
g_a(\theta) & = 1 + c_4 \exp \left[ -c_5 (h - \cos \theta)^2 \right] \\
where :math:`f_R` is a two-body term and :math:`f_A` includes three-body interactions.
:math:`\delta` is an optional negative shift of the
equilibrium bond length, as described below.
The summations in the formula are over all neighbors J and K of atom I
within a cutoff distance = R + D.
The *tersoff/mod/c* style differs from *tersoff/mod* only in the
@ -71,7 +80,7 @@ formulation of the V_ij term, where it contains an additional c0 term.
.. math::
V_{ij} = f_C(r_{ij}) \left[ f_R(r_{ij}) + b_{ij} f_A(r_{ij}) + c_0 \right]
V_{ij} = f_C(r_{ij} + \delta) \left[ f_R(r_{ij} + \delta) + b_{ij} f_A(r_{ij} + \delta) + c_0 \right] \\
The modified cutoff function :math:`f_C` proposed by :ref:`(Murty) <Murty>` and
having a continuous second-order differential is employed. The
@ -156,6 +165,12 @@ the center atom in a three-body interaction and it is bonded to the
second atom and the bond is influenced by the third atom. Thus an entry
for SiSiSi means Si bonded to a Si with another Si atom influencing the bond.
The *shift* keyword computes the energy E of a system of atoms, whose formula
is the same as the Tersoff potential. The only modification is that the original
equilibrium bond length ( :math:`r_0`) of the system is shifted to :math:`r_0-\delta`.
The minus sign arises because each radial distance :math:`r` is replaced by :math:`r+\delta`.
More information on this option is given on the main :doc:`pair_tersoff <pair_tersoff>` page.
----------
.. include:: accel_styles.rst
@ -187,9 +202,13 @@ if LAMMPS was built with that package. See the :doc:`Build package <Build_packa
This pair style requires the :doc:`newton <newton>` setting to be "on"
for pair interactions.
The Tersoff/MOD potential files provided with LAMMPS (see the potentials
The *shift* keyword is not supported by the *tersoff/gpu*,
*tersoff/intel*, *tersoff/kk*, *tersoff/table* or *tersoff/table/omp*
variants.
The *tersoff/mod* potential files provided with LAMMPS (see the potentials
directory) are parameterized for metal :doc:`units <units>`. You can
use the Tersoff/MOD potential with any LAMMPS units, but you would need to
use the *tersoff/mod* pair style with any LAMMPS units, but you would need to
create your own Tersoff/MOD potential file with coefficients listed in the
appropriate units if your simulation does not use "metal" units.

View File

@ -13,7 +13,14 @@ Syntax
.. code-block:: LAMMPS
pair_style tersoff/zbl
pair_style tersoff/zbl keywords values
* keyword = *shift*
.. parsed-literal::
*shift* value = delta
delta = negative shift in equilibrium bond length
Examples
""""""""
@ -35,16 +42,17 @@ system of atoms as
.. math::
E & = \frac{1}{2} \sum_i \sum_{j \neq i} V_{ij} \\
V_{ij} & = (1 - f_F(r_{ij})) V^{ZBL}_{ij} + f_F(r_{ij}) V^{Tersoff}_{ij} \\
f_F(r_{ij}) & = \frac{1}{1 + e^{-A_F(r_{ij} - r_C)}}\\
V_{ij} & = (1 - f_F(r_{ij} + \delta)) V^{ZBL}(r_{ij} + \delta)
+ f_F(r_{ij} + \delta) V^{Tersoff}(r_{ij} + \delta) \\
f_F(r) & = \frac{1}{1 + e^{-A_F(r - r_C)}}\\
\\
\\
V^{ZBL}_{ij} & = \frac{1}{4\pi\epsilon_0} \frac{Z_1 Z_2 \,e^2}{r_{ij}} \phi(r_{ij}/a) \\
V^{ZBL}(r) & = \frac{1}{4\pi\epsilon_0} \frac{Z_1 Z_2 \,e^2}{r} \phi(r/a) \\
a & = \frac{0.8854\,a_0}{Z_{1}^{0.23} + Z_{2}^{0.23}}\\
\phi(x) & = 0.1818e^{-3.2x} + 0.5099e^{-0.9423x} + 0.2802e^{-0.4029x} + 0.02817e^{-0.2016x}\\
\\
\\
V^{Tersoff}_{ij} & = f_C(r_{ij}) \left[ f_R(r_{ij}) + b_{ij} f_A(r_{ij}) \right] \\
V^{Tersoff}(r) & = f_C(r) \left[ f_R(r) + b_{ij} f_A(r) \right] \\
f_C(r) & = \left\{ \begin{array} {r@{\quad:\quad}l}
1 & r < R - D \\
\frac{1}{2} - \frac{1}{2} \sin \left( \frac{\pi}{2} \frac{r-R}{D} \right) &
@ -54,7 +62,7 @@ system of atoms as
f_R(r) & = A \exp (-\lambda_1 r) \\
f_A(r) & = -B \exp (-\lambda_2 r) \\
b_{ij} & = \left( 1 + \beta^n {\zeta_{ij}}^n \right)^{-\frac{1}{2n}} \\
\zeta_{ij} & = \sum_{k \neq i,j} f_C(r_{ik}) g(\theta_{ijk})
\zeta_{ij} & = \sum_{k \neq i,j} f_C(r_{ik} + \delta) g(\theta_{ijk})
\exp \left[ {\lambda_3}^m (r_{ij} - r_{ik})^m \right] \\
g(\theta) & = \gamma_{ijk} \left( 1 + \frac{c^2}{d^2} -
\frac{c^2}{\left[ d^2 + (\cos \theta - \cos \theta_0)^2\right]} \right)
@ -81,6 +89,9 @@ includes
three-body interactions. The summations in the formula are over all
neighbors J and K of atom I within a cutoff distance = R + D.
:math:`\delta` is an optional negative shift of the
equilibrium bond length, as described below.
Only a single pair_coeff command is used with the *tersoff/zbl* style
which specifies a Tersoff/ZBL potential file with parameters for all
needed elements. These are mapped to LAMMPS atom types by specifying
@ -228,6 +239,14 @@ for helping clarify how Tersoff parameters for alloys have been
defined in various papers. Also thanks to Ram Devanathan for
providing the base ZBL implementation.
The *shift* keyword computes the energy E of a system of atoms, whose
formula is the same as the Tersoff potential. The only modification is
that the original equilibrium bond length ( :math:`r_0`) of the system
is shifted to :math:`r_0-\delta`. The minus sign arises because each
radial distance :math:`r` is replaced by :math:`r+\delta`. More
information on this option is given on the main :doc:`pair_tersoff
<pair_tersoff>` page.
----------
.. include:: accel_styles.rst
@ -244,9 +263,10 @@ described above from values in the potential file.
This pair style does not support the :doc:`pair_modify <pair_modify>`
shift, table, and tail options.
This pair style does not write its information to :doc:`binary restart files <restart>`, since it is stored in potential files. Thus, you
need to re-specify the pair_style and pair_coeff commands in an input
script that reads a restart file.
This pair style does not write its information to :doc:`binary restart
files <restart>`, since it is stored in potential files. Thus, you need
to re-specify the pair_style and pair_coeff commands in an input script
that reads a restart file.
This pair style can only be used via the *pair* keyword of the
:doc:`run_style respa <run_style>` command. It does not support the
@ -257,18 +277,23 @@ This pair style can only be used via the *pair* keyword of the
Restrictions
""""""""""""
This pair style is part of the MANYBODY package. It is only enabled
if LAMMPS was built with that package. See the :doc:`Build package <Build_package>` doc page for more info.
This pair style is part of the MANYBODY package. It is only enabled if
LAMMPS was built with that package. See the :doc:`Build package
<Build_package>` doc page for more info.
This pair style requires the :doc:`newton <newton>` setting to be "on"
for pair interactions.
The Tersoff/ZBL potential files provided with LAMMPS (see the
potentials directory) are parameterized for metal :doc:`units <units>`.
You can use the Tersoff potential with any LAMMPS units, but you would
need to create your own Tersoff potential file with coefficients
listed in the appropriate units if your simulation does not use "metal"
units.
The *shift* keyword is currently not supported for the *tersoff/gpu* and
*tersoff/kk* variants of this pair style.
The tersoff/zbl potential files provided with LAMMPS (see the potentials
directory) are parameterized for :doc:`"metal" units <units>`. Also the
pair style supports converting potential file parameters on-the-fly
between "metal" and "real" units. You can use the tersoff/zbl pair
style with any LAMMPS units, but you would need to create your own
tersoff/zbl potential file with coefficients listed in the appropriate
units if your simulation does not use "metal" or "real" units.
Related commands
""""""""""""""""

117
doc/src/pair_wf_cut.rst Normal file
View File

@ -0,0 +1,117 @@
.. index:: pair_style wf/cut
pair_style wf/cut command
===========================
Syntax
""""""
.. code-block:: LAMMPS
pair_style wf/cut cutoff
* cutoff = cutoff for wf interactions (distance units)
Examples
""""""""
.. code-block:: LAMMPS
pair_style wf/cut 2.0
pair_coeff 1 1 1.0 1.0 1 1 2.0
Description
"""""""""""
The *wf/cut* (Wang-Frenkel) style computes LJ-like potentials as
described in :ref:`Wang2020 <Wang2020>`. This potential is by
construction finite ranged and it vanishes quadratically at the cutoff
distance, avoiding truncation, shifting, interpolation and other typical
procedures with the LJ potential. The *wf/cut* can be used when a
typical short-ranged potential with attraction is required. The
potential is given by which is given by:
.. math::
\phi(r)= \epsilon \alpha \left(\left[{\sigma\over r}\right]^{2\mu} -1 \right)\left(\left[{r_c\over r}\right]^{2\mu}-1\right)^{2\nu}
with
.. math::
\alpha=2\nu\left(\frac{r_c}{\sigma}\right)^{2\mu}\left[\frac{1+2\nu}{2\nu\left[(r_c/\sigma)^{2\mu}-1\right]}\right]^{2\nu+1}
and
.. math::
r_{min}=r_c\left[\frac{1+2\nu}{1+2\nu(r_c/\sigma)^{2\nu}}\right]^{1/{2\nu}}
:math:`r_c` is the cutoff.
Comparison of the non-truncated Lennard-Jones 12-6 potential (red curve),
and the WF potentials with :math:`\mu=1` and :math:`\nu=1` are shown in
the figure below. The blue curve has :math:`r_c =2.0` and the green
curve has :math:`r_c =1.2` and can be used to describe colloidal
interactions.
.. image:: JPG/WF_LJ.jpg
:align: center
:scale: 33%
The following coefficients must be defined for each pair of atoms
types via the :doc:`pair_coeff <pair_coeff>` command as in the example
above, or in the data file or restart files read by the
:doc:`read_data <read_data>` or :doc:`read_restart <read_restart>`
commands:
* :math:`\epsilon` (energy units)
* :math:`\sigma` (distance units)
* :math:`\nu`
* :math:`\mu`
* :math:`r_c` (distance units)
The last coefficient is optional. If not specified, the global cutoff
given in the pair_style command is used. The exponents :math:`\nu` and
:math:`\mu` are positive integers, usually set to 1. There is usually
little to be gained by choosing other values of :math:`\nu` and
:math:`\mu` (See discussion in :ref:`Wang2020 <Wang2020>`)
----------
**Mixing, shift, table, tail correction, restart, rRESPA info**\ :
This pair style does not support the :doc:`pair_modify <pair_modify>`
mixing and table options.
The :doc:`pair_modify <pair_modify>` tail option is not relevant
for this pair style as it goes to zero at the cut-off radius.
This pair style writes its information to :doc:`binary restart files
<restart>`, so pair_style and pair_coeff commands do not need to be
specified in an input script that reads a restart file.
This pair style does not support the use of the *inner*\ , *middle*\ ,
and *outer* keywords of the :doc:`run_style respa <run_style>` command.
----------
Restrictions
""""""""""""
This pair style can only be used if LAMMPS was built with the
USER-MISC package. See the :doc:`Build package <Build_package>` doc
page for more info.
Related commands
""""""""""""""""
:doc:`pair_coeff <pair_coeff>`
**Default:** none
----------
.. _Wang2020:
**(Wang2020)** X. Wang, S. Ramirez-Hinestrosa, J. Dobnikar, and D. Frenkel, Phys. Chem. Chem. Phys. 22, 10624 (2020).

View File

@ -96,9 +96,11 @@ always include a divide by the number of atoms in the variable formula
if this is not the case.
The *flush* keyword invokes a flush operation after thermodynamic info
is written to the log file. This insures the output in that file is
current (no buffering by the OS), even if LAMMPS halts before the
simulation completes.
is written to the screen and log file. This insures the output is
updated and not buffered (by the application) even if LAMMPS halts
before the simulation completes. Please note that this does not
affect buffering by the OS or devices, so you may still lose data
in case the simulation stops due to a hardware failure.
The *line* keyword determines whether thermodynamics will be output as
a series of numeric values on one line or in a multi-line format with

View File

@ -200,6 +200,7 @@ barostatted
barostatting
Barostatting
Barrat
Bartelt
Bartels
barycenter
barye
@ -329,6 +330,7 @@ Buyl
Bybee
bz
cadetblue
Cagin
calc
calibre
caltech
@ -690,6 +692,7 @@ dmi
dnf
DNi
Dobson
Dobnikar
Dodds
docenv
dodgerblue
@ -1218,6 +1221,7 @@ Herrmann
Hertizian
hertzian
Hertzsch
heterostructures
hexahedrons
hexatic
hexorder
@ -1229,6 +1233,7 @@ Hibbs
Higdon
Hijazi
Hilger
Hinestrosa
histo
histogrammed
histogramming
@ -2473,6 +2478,9 @@ Poresag
pos
Poschel
posix
postfix
postfixed
postfixes
Postma
Potapkin
potin
@ -2678,6 +2686,8 @@ rhodo
Rhodo
rhodopsin
rhok
rhomax
rhomin
rhorho
rhosum
ri
@ -2726,6 +2736,7 @@ rNEMD
ro
Rochus
Rockett
rocksalt
Rodrigues
Rohart
Ronchetti
@ -3400,6 +3411,7 @@ WeinanE
Wennberg
Westmere
Westview
wf
wget
Whelan
whitesmoke
@ -3545,6 +3557,7 @@ zz
Zm
PowerShell
filesystems
zincblende
Zstandard
Zstd
zstd

View File

@ -112,7 +112,8 @@ steinhardt: Steinhardt-Nelson Q_l and W_l parameters usng orientorder/atom
streitz: Streitz-Mintmire potential for Al2O3
tad: temperature-accelerated dynamics of vacancy diffusion in bulk Si
template: examples for using atom_style template and comparing to atom style molecular
threebody: regression test input for a variety of manybody potentials
tersoff: regression test input for Tersoff variants
threebody: regression test input for a variety of threebody potentials
vashishta: models using the Vashishta potential
voronoi: Voronoi tesselation via compute voronoi/atom command
wall: use of reflective walls with different stochastic models

View File

@ -0,0 +1,92 @@
LAMMPS (24 Dec 2020)
using 1 OpenMP thread(s) per MPI task
processors * * 1
units metal
boundary p p f
read_data adatom.data
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (17.121441 14.827603 39.319732)
1 by 1 by 1 MPI processor grid
reading atoms ...
181 atoms
read_data CPU = 0.002 seconds
pair_style agni
pair_coeff * * Al_jpc.agni Al
Reading agni potential file Al_jpc.agni with DATE: 2017-02-24
WARNING: Ignoring unknown tag 'Rs' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
WARNING: Ignoring unknown tag 'neighbors' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
WARNING: Ignoring unknown tag 'lambda' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 500 12345
fix 1 all nvt temp 250 250 0.2
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15, bins = 5 4 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair agni, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.072 | 3.072 | 3.072 Mbytes
Step KinEng Temp
0 11.633413 500
100 4.6059941 197.96401
200 7.3700156 316.76068
300 6.0443915 259.78582
400 6.163119 264.88869
500 6.2647284 269.25582
600 5.2732533 226.64257
700 5.651448 242.89725
800 6.5572404 281.82788
900 6.0576743 260.35671
1000 6.5622234 282.04205
Loop time of 16.4158 on 1 procs for 1000 steps with 181 atoms
Performance: 2.632 ns/day, 9.120 hours/ns, 60.917 timesteps/s
97.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 16.395 | 16.395 | 16.395 | 0.0 | 99.88
Neigh | 0.013275 | 0.013275 | 0.013275 | 0.0 | 0.08
Comm | 0.0023484 | 0.0023484 | 0.0023484 | 0.0 | 0.01
Output | 0.00014842 | 0.00014842 | 0.00014842 | 0.0 | 0.00
Modify | 0.0035522 | 0.0035522 | 0.0035522 | 0.0 | 0.02
Other | | 0.001173 | | | 0.01
Nlocal: 181.000 ave 181 max 181 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 563.000 ave 563 max 563 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 19484.0 ave 19484 max 19484 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19484
Ave neighs/atom = 107.64641
Neighbor list builds = 33
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:16

View File

@ -0,0 +1,92 @@
LAMMPS (24 Dec 2020)
using 1 OpenMP thread(s) per MPI task
processors * * 1
units metal
boundary p p f
read_data adatom.data
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (17.121441 14.827603 39.319732)
2 by 2 by 1 MPI processor grid
reading atoms ...
181 atoms
read_data CPU = 0.001 seconds
pair_style agni
pair_coeff * * Al_jpc.agni Al
Reading agni potential file Al_jpc.agni with DATE: 2017-02-24
WARNING: Ignoring unknown tag 'Rs' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
WARNING: Ignoring unknown tag 'neighbors' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
WARNING: Ignoring unknown tag 'lambda' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 500 12345
fix 1 all nvt temp 250 250 0.2
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15, bins = 5 4 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair agni, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.063 | 3.063 | 3.063 Mbytes
Step KinEng Temp
0 11.633413 500
100 4.6059939 197.964
200 7.3700154 316.76067
300 6.0443914 259.78582
400 6.1631193 264.8887
500 6.2647281 269.25581
600 5.273254 226.6426
700 5.6514484 242.89726
800 6.5572409 281.82791
900 6.0576737 260.35668
1000 6.5622233 282.04205
Loop time of 4.67437 on 4 procs for 1000 steps with 181 atoms
Performance: 9.242 ns/day, 2.597 hours/ns, 213.933 timesteps/s
98.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 4.0668 | 4.2058 | 4.4078 | 7.1 | 89.98
Neigh | 0.0033048 | 0.0033794 | 0.0034381 | 0.1 | 0.07
Comm | 0.2547 | 0.45656 | 0.59576 | 21.5 | 9.77
Output | 9.8817e-05 | 0.00035464 | 0.001121 | 0.0 | 0.01
Modify | 0.0059429 | 0.0060754 | 0.0061966 | 0.2 | 0.13
Other | | 0.002172 | | | 0.05
Nlocal: 45.2500 ave 52 max 40 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Nghost: 376.500 ave 382 max 366 min
Histogram: 1 0 0 0 0 0 0 0 2 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 4871.00 ave 5578 max 4374 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Total # of neighbors = 19484
Ave neighs/atom = 107.64641
Neighbor list builds = 33
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:04

View File

@ -0,0 +1,92 @@
LAMMPS (24 Dec 2020)
using 1 OpenMP thread(s) per MPI task
units metal
boundary p p p
read_data vacancy.data
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (8.0711250 8.0711250 8.0711250)
1 by 1 by 1 MPI processor grid
reading atoms ...
31 atoms
read_data CPU = 0.002 seconds
pair_style agni
pair_coeff * * Al_jpc.agni Al
Reading agni potential file Al_jpc.agni with DATE: 2017-02-24
WARNING: Ignoring unknown tag 'Rs' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
WARNING: Ignoring unknown tag 'neighbors' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
WARNING: Ignoring unknown tag 'lambda' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 1000 12345
fix 1 all nvt temp 900 900 200
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke etotal temp
dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15, bins = 2 2 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair agni, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.241 | 4.241 | 4.241 Mbytes
Step KinEng TotEng Temp
0 3.8778043 3.8778043 1000
100 2.9986261 2.9986261 773.27936
200 3.6860313 3.6860313 950.54598
300 3.8133153 3.8133153 983.3697
400 3.7330285 3.7330285 962.6655
500 3.5875467 3.5875467 925.14897
600 3.533152 3.533152 911.12178
700 2.6509457 2.6509457 683.62028
800 3.376349 3.376349 870.68576
900 3.9036736 3.9036736 1006.6711
1000 3.0884833 3.0884833 796.45156
Loop time of 2.92678 on 1 procs for 1000 steps with 31 atoms
Performance: 14.760 ns/day, 1.626 hours/ns, 341.673 timesteps/s
97.5% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.9144 | 2.9144 | 2.9144 | 0.0 | 99.58
Neigh | 0.0068263 | 0.0068263 | 0.0068263 | 0.0 | 0.23
Comm | 0.0029868 | 0.0029868 | 0.0029868 | 0.0 | 0.10
Output | 0.00050202 | 0.00050202 | 0.00050202 | 0.0 | 0.02
Modify | 0.0013382 | 0.0013382 | 0.0013382 | 0.0 | 0.05
Other | | 0.0007672 | | | 0.03
Nlocal: 31.0000 ave 31 max 31 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 869.000 ave 869 max 869 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 4360.00 ave 4360 max 4360 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 4360
Ave neighs/atom = 140.64516
Neighbor list builds = 53
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:02

View File

@ -0,0 +1,92 @@
LAMMPS (24 Dec 2020)
using 1 OpenMP thread(s) per MPI task
units metal
boundary p p p
read_data vacancy.data
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (8.0711250 8.0711250 8.0711250)
1 by 2 by 2 MPI processor grid
reading atoms ...
31 atoms
read_data CPU = 0.001 seconds
pair_style agni
pair_coeff * * Al_jpc.agni Al
Reading agni potential file Al_jpc.agni with DATE: 2017-02-24
WARNING: Ignoring unknown tag 'Rs' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
WARNING: Ignoring unknown tag 'neighbors' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
WARNING: Ignoring unknown tag 'lambda' in AGNI potential file. (src/USER-MISC/pair_agni.cpp:440)
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 1000 12345
fix 1 all nvt temp 900 900 200
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke etotal temp
dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15, bins = 2 2 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair agni, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.227 | 4.227 | 4.227 Mbytes
Step KinEng TotEng Temp
0 3.8778043 3.8778043 1000
100 2.9986264 2.9986264 773.27944
200 3.6860316 3.6860316 950.54606
300 3.8133152 3.8133152 983.36966
400 3.7330288 3.7330288 962.66559
500 3.5875468 3.5875468 925.149
600 3.5331519 3.5331519 911.12176
700 2.6509452 2.6509452 683.62015
800 3.3763492 3.3763492 870.68579
900 3.9036736 3.9036736 1006.6711
1000 3.0884821 3.0884821 796.45125
Loop time of 0.91769 on 4 procs for 1000 steps with 31 atoms
Performance: 47.075 ns/day, 0.510 hours/ns, 1089.693 timesteps/s
95.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.67405 | 0.76047 | 0.81748 | 6.1 | 82.87
Neigh | 0.0015411 | 0.001691 | 0.001774 | 0.2 | 0.18
Comm | 0.091364 | 0.14959 | 0.23513 | 13.8 | 16.30
Output | 0.00027996 | 0.00040391 | 0.00075917 | 0.0 | 0.04
Modify | 0.0028397 | 0.0039247 | 0.0050072 | 1.7 | 0.43
Other | | 0.001611 | | | 0.18
Nlocal: 7.75000 ave 8 max 7 min
Histogram: 1 0 0 0 0 0 0 0 0 3
Nghost: 617.250 ave 621 max 612 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 1090.00 ave 1131 max 993 min
Histogram: 1 0 0 0 0 0 0 0 1 2
Total # of neighbors = 4360
Ave neighs/atom = 140.64516
Neighbor list builds = 53
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:00

View File

@ -1,87 +0,0 @@
LAMMPS (21 Feb 2017)
using 1 OpenMP thread(s) per MPI task
processors * * 1
units metal
boundary p p f
read_data adatom.data
orthogonal box = (0 0 0) to (17.1214 14.8276 39.3197)
1 by 1 by 1 MPI processor grid
reading atoms ...
181 atoms
pair_style agni
pair_coeff * * Al_jpc.agni Al
Reading potential file Al_jpc.agni with DATE: 2017-02-24
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 500 12345
fix 1 all nvt temp 250 250 0.2
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15, bins = 5 4 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair agni, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Memory usage per processor = 2.69795 Mbytes
Step KinEng Temp
0 11.633413 500
100 4.6059939 197.964
200 7.3700149 316.76065
300 6.0443913 259.78581
400 6.1631189 264.88868
500 6.2647272 269.25577
600 5.2732539 226.6426
700 5.6514471 242.89721
800 6.5572407 281.8279
900 6.0576738 260.35669
1000 6.5622233 282.04205
Loop time of 51.9308 on 1 procs for 1000 steps with 181 atoms
Performance: 0.832 ns/day, 28.850 hours/ns, 19.256 timesteps/s
99.4% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 51.89 | 51.89 | 51.89 | 0.0 | 99.92
Neigh | 0.023158 | 0.023158 | 0.023158 | 0.0 | 0.04
Comm | 0.0049036 | 0.0049036 | 0.0049036 | 0.0 | 0.01
Output | 0.0002594 | 0.0002594 | 0.0002594 | 0.0 | 0.00
Modify | 0.010244 | 0.010244 | 0.010244 | 0.0 | 0.02
Other | | 0.002483 | | | 0.00
Nlocal: 181 ave 181 max 181 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 563 ave 563 max 563 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 19484 ave 19484 max 19484 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 19484
Ave neighs/atom = 107.646
Neighbor list builds = 33
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:52

View File

@ -1,87 +0,0 @@
LAMMPS (21 Feb 2017)
using 1 OpenMP thread(s) per MPI task
processors * * 1
units metal
boundary p p f
read_data adatom.data
orthogonal box = (0 0 0) to (17.1214 14.8276 39.3197)
2 by 2 by 1 MPI processor grid
reading atoms ...
181 atoms
pair_style agni
pair_coeff * * Al_jpc.agni Al
Reading potential file Al_jpc.agni with DATE: 2017-02-24
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 500 12345
fix 1 all nvt temp 250 250 0.2
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15, bins = 5 4 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair agni, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Memory usage per processor = 3.06041 Mbytes
Step KinEng Temp
0 11.633413 500
100 4.6059941 197.96401
200 7.3700154 316.76067
300 6.0443913 259.78581
400 6.1631193 264.8887
500 6.2647281 269.25581
600 5.2732537 226.64259
700 5.651448 242.89725
800 6.5572405 281.82789
900 6.0576741 260.3567
1000 6.562224 282.04208
Loop time of 14.5263 on 4 procs for 1000 steps with 181 atoms
Performance: 2.974 ns/day, 8.070 hours/ns, 68.841 timesteps/s
99.3% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 12.858 | 13.227 | 13.574 | 8.0 | 91.06
Neigh | 0.0056965 | 0.0058173 | 0.0060787 | 0.2 | 0.04
Comm | 0.92934 | 1.276 | 1.6455 | 25.7 | 8.78
Output | 0.00013971 | 0.00017625 | 0.00025463 | 0.0 | 0.00
Modify | 0.012693 | 0.012756 | 0.012911 | 0.1 | 0.09
Other | | 0.004066 | | | 0.03
Nlocal: 45.25 ave 52 max 40 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Nghost: 376.5 ave 382 max 366 min
Histogram: 1 0 0 0 0 0 0 0 2 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 4871 ave 5578 max 4374 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Total # of neighbors = 19484
Ave neighs/atom = 107.646
Neighbor list builds = 33
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:14

View File

@ -1,87 +0,0 @@
LAMMPS (21 Feb 2017)
using 1 OpenMP thread(s) per MPI task
units metal
boundary p p p
read_data vacancy.data
orthogonal box = (0 0 0) to (8.07113 8.07113 8.07113)
1 by 1 by 1 MPI processor grid
reading atoms ...
31 atoms
pair_style agni
pair_coeff * * Al_jpc.agni Al
Reading potential file Al_jpc.agni with DATE: 2017-02-24
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 1000 12345
fix 1 all nvt temp 900 900 200
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke etotal temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15, bins = 2 2 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair agni, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Memory usage per processor = 2.73416 Mbytes
Step KinEng TotEng Temp
0 3.8778043 3.8778043 1000
100 2.9986261 2.9986261 773.27937
200 3.6860314 3.6860314 950.54599
300 3.813315 3.813315 983.36961
400 3.7330285 3.7330285 962.6655
500 3.5875467 3.5875467 925.14896
600 3.5331529 3.5331529 911.12202
700 2.6509449 2.6509449 683.62008
800 3.3763492 3.3763492 870.68582
900 3.903673 3.903673 1006.6709
1000 3.0884824 3.0884824 796.45133
Loop time of 9.02712 on 1 procs for 1000 steps with 31 atoms
Performance: 4.786 ns/day, 5.015 hours/ns, 110.777 timesteps/s
99.4% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 9.0039 | 9.0039 | 9.0039 | 0.0 | 99.74
Neigh | 0.011892 | 0.011892 | 0.011892 | 0.0 | 0.13
Comm | 0.0061693 | 0.0061693 | 0.0061693 | 0.0 | 0.07
Output | 0.00014615 | 0.00014615 | 0.00014615 | 0.0 | 0.00
Modify | 0.0035009 | 0.0035009 | 0.0035009 | 0.0 | 0.04
Other | | 0.001521 | | | 0.02
Nlocal: 31 ave 31 max 31 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 869 ave 869 max 869 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 4360 ave 4360 max 4360 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 4360
Ave neighs/atom = 140.645
Neighbor list builds = 53
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:09

View File

@ -1,87 +0,0 @@
LAMMPS (21 Feb 2017)
using 1 OpenMP thread(s) per MPI task
units metal
boundary p p p
read_data vacancy.data
orthogonal box = (0 0 0) to (8.07113 8.07113 8.07113)
1 by 2 by 2 MPI processor grid
reading atoms ...
31 atoms
pair_style agni
pair_coeff * * Al_jpc.agni Al
Reading potential file Al_jpc.agni with DATE: 2017-02-24
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 1000 12345
fix 1 all nvt temp 900 900 200
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke etotal temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15, bins = 2 2 2
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair agni, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Memory usage per processor = 2.72175 Mbytes
Step KinEng TotEng Temp
0 3.8778044 3.8778044 1000
100 2.9986263 2.9986263 773.27942
200 3.6860315 3.6860315 950.54602
300 3.8133145 3.8133145 983.3695
400 3.7330282 3.7330282 962.66543
500 3.5875466 3.5875466 925.14895
600 3.5331523 3.5331523 911.12186
700 2.6509448 2.6509448 683.62005
800 3.3763493 3.3763493 870.68584
900 3.9036733 3.9036733 1006.671
1000 3.0884818 3.0884818 796.45116
Loop time of 2.46785 on 4 procs for 1000 steps with 31 atoms
Performance: 17.505 ns/day, 1.371 hours/ns, 405.212 timesteps/s
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.0737 | 2.299 | 2.3902 | 8.7 | 93.16
Neigh | 0.0025222 | 0.0027327 | 0.0028174 | 0.2 | 0.11
Comm | 0.059817 | 0.15141 | 0.37684 | 33.8 | 6.14
Output | 0.0001502 | 0.00016767 | 0.00021219 | 0.0 | 0.01
Modify | 0.0098755 | 0.010248 | 0.010664 | 0.3 | 0.42
Other | | 0.004321 | | | 0.18
Nlocal: 7.75 ave 8 max 7 min
Histogram: 1 0 0 0 0 0 0 0 0 3
Nghost: 617.25 ave 621 max 612 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 1090 ave 1131 max 993 min
Histogram: 1 0 0 0 0 0 0 0 1 2
Total # of neighbors = 4360
Ave neighs/atom = 140.645
Neighbor list builds = 53
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:02

View File

@ -0,0 +1,892 @@
Makeup graphene nanoribbon on hBN
880 atoms
2 atom types
0.000000000000000 46.152979739999999 xlo xhi
0.000000000000000 48.443364211584992 ylo yhi
0.000000000000000 100.000000000000000 zlo zhi
Atoms
1 1 0.420 0.000000000000000 0.000000000000000 0.000000000000000
2 2 -0.420 0.698900000000000 1.210530287683010 0.000000000000000
3 1 0.420 2.096700000000000 1.210530287683010 0.000000000000000
4 2 -0.420 2.795600000000000 0.000000000000000 0.000000000000000
5 1 0.420 4.193400000000000 0.000000000000000 0.000000000000000
6 2 -0.420 4.892300000000000 1.210530287683010 0.000000000000000
7 1 0.420 6.290100000000000 1.210530287683010 0.000000000000000
8 2 -0.420 6.989000000000000 0.000000000000000 0.000000000000000
9 1 0.420 8.386799999999999 0.000000000000000 0.000000000000000
10 2 -0.420 9.085699999999999 1.210530287683010 0.000000000000000
11 1 0.420 10.483499999999999 1.210530287683010 0.000000000000000
12 2 -0.420 11.182399999999999 0.000000000000000 0.000000000000000
13 1 0.420 12.580200000000000 0.000000000000000 0.000000000000000
14 2 -0.420 13.279100000000000 1.210530287683010 0.000000000000000
15 1 0.420 14.676900000000000 1.210530287683010 0.000000000000000
16 2 -0.420 15.375800000000000 0.000000000000000 0.000000000000000
17 1 0.420 16.773599999999998 0.000000000000000 0.000000000000000
18 2 -0.420 17.472500000000000 1.210530287683010 0.000000000000000
19 1 0.420 18.870300000000000 1.210530287683010 0.000000000000000
20 2 -0.420 19.569199999999999 0.000000000000000 0.000000000000000
21 1 0.420 20.966999999999999 0.000000000000000 0.000000000000000
22 2 -0.420 21.665900000000001 1.210530287683010 0.000000000000000
23 1 0.420 23.063699999999997 1.210530287683010 0.000000000000000
24 2 -0.420 23.762599999999999 0.000000000000000 0.000000000000000
25 1 0.420 25.160399999999999 0.000000000000000 0.000000000000000
26 2 -0.420 25.859299999999998 1.210530287683010 0.000000000000000
27 1 0.420 27.257099999999998 1.210530287683010 0.000000000000000
28 2 -0.420 27.956000000000000 0.000000000000000 0.000000000000000
29 1 0.420 29.353800000000000 0.000000000000000 0.000000000000000
30 2 -0.420 30.052699999999998 1.210530287683010 0.000000000000000
31 1 0.420 31.450499999999998 1.210530287683010 0.000000000000000
32 2 -0.420 32.149400000000000 0.000000000000000 0.000000000000000
33 1 0.420 33.547199999999997 0.000000000000000 0.000000000000000
34 2 -0.420 34.246099999999998 1.210530287683010 0.000000000000000
35 1 0.420 35.643899999999995 1.210530287683010 0.000000000000000
36 2 -0.420 36.342799999999997 0.000000000000000 0.000000000000000
37 1 0.420 37.740600000000001 0.000000000000000 0.000000000000000
38 2 -0.420 38.439499999999995 1.210530287683010 0.000000000000000
39 1 0.420 39.837299999999999 1.210530287683010 0.000000000000000
40 2 -0.420 40.536200000000001 0.000000000000000 0.000000000000000
41 1 0.420 41.933999999999997 0.000000000000000 0.000000000000000
42 2 -0.420 42.632899999999999 1.210530287683010 0.000000000000000
43 1 0.420 44.030699999999996 1.210530287683010 0.000000000000000
44 2 -0.420 44.729599999999998 0.000000000000000 0.000000000000000
45 1 0.420 0.000000000000000 2.421060575366020 0.000000000000000
46 2 -0.420 0.698900000000000 3.631590863049030 0.000000000000000
47 1 0.420 2.096700000000000 3.631590863049030 0.000000000000000
48 2 -0.420 2.795600000000000 2.421060575366020 0.000000000000000
49 1 0.420 4.193400000000000 2.421060575366020 0.000000000000000
50 2 -0.420 4.892300000000000 3.631590863049030 0.000000000000000
51 1 0.420 6.290100000000000 3.631590863049030 0.000000000000000
52 2 -0.420 6.989000000000000 2.421060575366020 0.000000000000000
53 1 0.420 8.386799999999999 2.421060575366020 0.000000000000000
54 2 -0.420 9.085699999999999 3.631590863049030 0.000000000000000
55 1 0.420 10.483499999999999 3.631590863049030 0.000000000000000
56 2 -0.420 11.182399999999999 2.421060575366020 0.000000000000000
57 1 0.420 12.580200000000000 2.421060575366020 0.000000000000000
58 2 -0.420 13.279100000000000 3.631590863049030 0.000000000000000
59 1 0.420 14.676900000000000 3.631590863049030 0.000000000000000
60 2 -0.420 15.375800000000000 2.421060575366020 0.000000000000000
61 1 0.420 16.773599999999998 2.421060575366020 0.000000000000000
62 2 -0.420 17.472500000000000 3.631590863049030 0.000000000000000
63 1 0.420 18.870300000000000 3.631590863049030 0.000000000000000
64 2 -0.420 19.569199999999999 2.421060575366020 0.000000000000000
65 1 0.420 20.966999999999999 2.421060575366020 0.000000000000000
66 2 -0.420 21.665900000000001 3.631590863049030 0.000000000000000
67 1 0.420 23.063699999999997 3.631590863049030 0.000000000000000
68 2 -0.420 23.762599999999999 2.421060575366020 0.000000000000000
69 1 0.420 25.160399999999999 2.421060575366020 0.000000000000000
70 2 -0.420 25.859299999999998 3.631590863049030 0.000000000000000
71 1 0.420 27.257099999999998 3.631590863049030 0.000000000000000
72 2 -0.420 27.956000000000000 2.421060575366020 0.000000000000000
73 1 0.420 29.353800000000000 2.421060575366020 0.000000000000000
74 2 -0.420 30.052699999999998 3.631590863049030 0.000000000000000
75 1 0.420 31.450499999999998 3.631590863049030 0.000000000000000
76 2 -0.420 32.149400000000000 2.421060575366020 0.000000000000000
77 1 0.420 33.547199999999997 2.421060575366020 0.000000000000000
78 2 -0.420 34.246099999999998 3.631590863049030 0.000000000000000
79 1 0.420 35.643899999999995 3.631590863049030 0.000000000000000
80 2 -0.420 36.342799999999997 2.421060575366020 0.000000000000000
81 1 0.420 37.740600000000001 2.421060575366020 0.000000000000000
82 2 -0.420 38.439499999999995 3.631590863049030 0.000000000000000
83 1 0.420 39.837299999999999 3.631590863049030 0.000000000000000
84 2 -0.420 40.536200000000001 2.421060575366020 0.000000000000000
85 1 0.420 41.933999999999997 2.421060575366020 0.000000000000000
86 2 -0.420 42.632899999999999 3.631590863049030 0.000000000000000
87 1 0.420 44.030699999999996 3.631590863049030 0.000000000000000
88 2 -0.420 44.729599999999998 2.421060575366020 0.000000000000000
89 1 0.420 0.000000000000000 4.842121150732040 0.000000000000000
90 2 -0.420 0.698900000000000 6.052651438415050 0.000000000000000
91 1 0.420 2.096700000000000 6.052651438415050 0.000000000000000
92 2 -0.420 2.795600000000000 4.842121150732040 0.000000000000000
93 1 0.420 4.193400000000000 4.842121150732040 0.000000000000000
94 2 -0.420 4.892300000000000 6.052651438415050 0.000000000000000
95 1 0.420 6.290100000000000 6.052651438415050 0.000000000000000
96 2 -0.420 6.989000000000000 4.842121150732040 0.000000000000000
97 1 0.420 8.386799999999999 4.842121150732040 0.000000000000000
98 2 -0.420 9.085699999999999 6.052651438415050 0.000000000000000
99 1 0.420 10.483499999999999 6.052651438415050 0.000000000000000
100 2 -0.420 11.182399999999999 4.842121150732040 0.000000000000000
101 1 0.420 12.580200000000000 4.842121150732040 0.000000000000000
102 2 -0.420 13.279100000000000 6.052651438415050 0.000000000000000
103 1 0.420 14.676900000000000 6.052651438415050 0.000000000000000
104 2 -0.420 15.375800000000000 4.842121150732040 0.000000000000000
105 1 0.420 16.773599999999998 4.842121150732040 0.000000000000000
106 2 -0.420 17.472500000000000 6.052651438415050 0.000000000000000
107 1 0.420 18.870300000000000 6.052651438415050 0.000000000000000
108 2 -0.420 19.569199999999999 4.842121150732040 0.000000000000000
109 1 0.420 20.966999999999999 4.842121150732040 0.000000000000000
110 2 -0.420 21.665900000000001 6.052651438415050 0.000000000000000
111 1 0.420 23.063699999999997 6.052651438415050 0.000000000000000
112 2 -0.420 23.762599999999999 4.842121150732040 0.000000000000000
113 1 0.420 25.160399999999999 4.842121150732040 0.000000000000000
114 2 -0.420 25.859299999999998 6.052651438415050 0.000000000000000
115 1 0.420 27.257099999999998 6.052651438415050 0.000000000000000
116 2 -0.420 27.956000000000000 4.842121150732040 0.000000000000000
117 1 0.420 29.353800000000000 4.842121150732040 0.000000000000000
118 2 -0.420 30.052699999999998 6.052651438415050 0.000000000000000
119 1 0.420 31.450499999999998 6.052651438415050 0.000000000000000
120 2 -0.420 32.149400000000000 4.842121150732040 0.000000000000000
121 1 0.420 33.547199999999997 4.842121150732040 0.000000000000000
122 2 -0.420 34.246099999999998 6.052651438415050 0.000000000000000
123 1 0.420 35.643899999999995 6.052651438415050 0.000000000000000
124 2 -0.420 36.342799999999997 4.842121150732040 0.000000000000000
125 1 0.420 37.740600000000001 4.842121150732040 0.000000000000000
126 2 -0.420 38.439499999999995 6.052651438415050 0.000000000000000
127 1 0.420 39.837299999999999 6.052651438415050 0.000000000000000
128 2 -0.420 40.536200000000001 4.842121150732040 0.000000000000000
129 1 0.420 41.933999999999997 4.842121150732040 0.000000000000000
130 2 -0.420 42.632899999999999 6.052651438415050 0.000000000000000
131 1 0.420 44.030699999999996 6.052651438415050 0.000000000000000
132 2 -0.420 44.729599999999998 4.842121150732040 0.000000000000000
133 1 0.420 0.000000000000000 7.263181726098059 0.000000000000000
134 2 -0.420 0.698900000000000 8.473712013781070 0.000000000000000
135 1 0.420 2.096700000000000 8.473712013781070 0.000000000000000
136 2 -0.420 2.795600000000000 7.263181726098059 0.000000000000000
137 1 0.420 4.193400000000000 7.263181726098059 0.000000000000000
138 2 -0.420 4.892300000000000 8.473712013781070 0.000000000000000
139 1 0.420 6.290100000000000 8.473712013781070 0.000000000000000
140 2 -0.420 6.989000000000000 7.263181726098059 0.000000000000000
141 1 0.420 8.386799999999999 7.263181726098059 0.000000000000000
142 2 -0.420 9.085699999999999 8.473712013781070 0.000000000000000
143 1 0.420 10.483499999999999 8.473712013781070 0.000000000000000
144 2 -0.420 11.182399999999999 7.263181726098059 0.000000000000000
145 1 0.420 12.580200000000000 7.263181726098059 0.000000000000000
146 2 -0.420 13.279100000000000 8.473712013781070 0.000000000000000
147 1 0.420 14.676900000000000 8.473712013781070 0.000000000000000
148 2 -0.420 15.375800000000000 7.263181726098059 0.000000000000000
149 1 0.420 16.773599999999998 7.263181726098059 0.000000000000000
150 2 -0.420 17.472500000000000 8.473712013781070 0.000000000000000
151 1 0.420 18.870300000000000 8.473712013781070 0.000000000000000
152 2 -0.420 19.569199999999999 7.263181726098059 0.000000000000000
153 1 0.420 20.966999999999999 7.263181726098059 0.000000000000000
154 2 -0.420 21.665900000000001 8.473712013781070 0.000000000000000
155 1 0.420 23.063699999999997 8.473712013781070 0.000000000000000
156 2 -0.420 23.762599999999999 7.263181726098059 0.000000000000000
157 1 0.420 25.160399999999999 7.263181726098059 0.000000000000000
158 2 -0.420 25.859299999999998 8.473712013781070 0.000000000000000
159 1 0.420 27.257099999999998 8.473712013781070 0.000000000000000
160 2 -0.420 27.956000000000000 7.263181726098059 0.000000000000000
161 1 0.420 29.353800000000000 7.263181726098059 0.000000000000000
162 2 -0.420 30.052699999999998 8.473712013781070 0.000000000000000
163 1 0.420 31.450499999999998 8.473712013781070 0.000000000000000
164 2 -0.420 32.149400000000000 7.263181726098059 0.000000000000000
165 1 0.420 33.547199999999997 7.263181726098059 0.000000000000000
166 2 -0.420 34.246099999999998 8.473712013781070 0.000000000000000
167 1 0.420 35.643899999999995 8.473712013781070 0.000000000000000
168 2 -0.420 36.342799999999997 7.263181726098059 0.000000000000000
169 1 0.420 37.740600000000001 7.263181726098059 0.000000000000000
170 2 -0.420 38.439499999999995 8.473712013781070 0.000000000000000
171 1 0.420 39.837299999999999 8.473712013781070 0.000000000000000
172 2 -0.420 40.536200000000001 7.263181726098059 0.000000000000000
173 1 0.420 41.933999999999997 7.263181726098059 0.000000000000000
174 2 -0.420 42.632899999999999 8.473712013781070 0.000000000000000
175 1 0.420 44.030699999999996 8.473712013781070 0.000000000000000
176 2 -0.420 44.729599999999998 7.263181726098059 0.000000000000000
177 1 0.420 0.000000000000000 9.684242301464080 0.000000000000000
178 2 -0.420 0.698900000000000 10.894772589147090 0.000000000000000
179 1 0.420 2.096700000000000 10.894772589147090 0.000000000000000
180 2 -0.420 2.795600000000000 9.684242301464080 0.000000000000000
181 1 0.420 4.193400000000000 9.684242301464080 0.000000000000000
182 2 -0.420 4.892300000000000 10.894772589147090 0.000000000000000
183 1 0.420 6.290100000000000 10.894772589147090 0.000000000000000
184 2 -0.420 6.989000000000000 9.684242301464080 0.000000000000000
185 1 0.420 8.386799999999999 9.684242301464080 0.000000000000000
186 2 -0.420 9.085699999999999 10.894772589147090 0.000000000000000
187 1 0.420 10.483499999999999 10.894772589147090 0.000000000000000
188 2 -0.420 11.182399999999999 9.684242301464080 0.000000000000000
189 1 0.420 12.580200000000000 9.684242301464080 0.000000000000000
190 2 -0.420 13.279100000000000 10.894772589147090 0.000000000000000
191 1 0.420 14.676900000000000 10.894772589147090 0.000000000000000
192 2 -0.420 15.375800000000000 9.684242301464080 0.000000000000000
193 1 0.420 16.773599999999998 9.684242301464080 0.000000000000000
194 2 -0.420 17.472500000000000 10.894772589147090 0.000000000000000
195 1 0.420 18.870300000000000 10.894772589147090 0.000000000000000
196 2 -0.420 19.569199999999999 9.684242301464080 0.000000000000000
197 1 0.420 20.966999999999999 9.684242301464080 0.000000000000000
198 2 -0.420 21.665900000000001 10.894772589147090 0.000000000000000
199 1 0.420 23.063699999999997 10.894772589147090 0.000000000000000
200 2 -0.420 23.762599999999999 9.684242301464080 0.000000000000000
201 1 0.420 25.160399999999999 9.684242301464080 0.000000000000000
202 2 -0.420 25.859299999999998 10.894772589147090 0.000000000000000
203 1 0.420 27.257099999999998 10.894772589147090 0.000000000000000
204 2 -0.420 27.956000000000000 9.684242301464080 0.000000000000000
205 1 0.420 29.353800000000000 9.684242301464080 0.000000000000000
206 2 -0.420 30.052699999999998 10.894772589147090 0.000000000000000
207 1 0.420 31.450499999999998 10.894772589147090 0.000000000000000
208 2 -0.420 32.149400000000000 9.684242301464080 0.000000000000000
209 1 0.420 33.547199999999997 9.684242301464080 0.000000000000000
210 2 -0.420 34.246099999999998 10.894772589147090 0.000000000000000
211 1 0.420 35.643899999999995 10.894772589147090 0.000000000000000
212 2 -0.420 36.342799999999997 9.684242301464080 0.000000000000000
213 1 0.420 37.740600000000001 9.684242301464080 0.000000000000000
214 2 -0.420 38.439499999999995 10.894772589147090 0.000000000000000
215 1 0.420 39.837299999999999 10.894772589147090 0.000000000000000
216 2 -0.420 40.536200000000001 9.684242301464080 0.000000000000000
217 1 0.420 41.933999999999997 9.684242301464080 0.000000000000000
218 2 -0.420 42.632899999999999 10.894772589147090 0.000000000000000
219 1 0.420 44.030699999999996 10.894772589147090 0.000000000000000
220 2 -0.420 44.729599999999998 9.684242301464080 0.000000000000000
221 1 0.420 0.000000000000000 12.105302876830100 0.000000000000000
222 2 -0.420 0.698900000000000 13.315833164513110 0.000000000000000
223 1 0.420 2.096700000000000 13.315833164513110 0.000000000000000
224 2 -0.420 2.795600000000000 12.105302876830100 0.000000000000000
225 1 0.420 4.193400000000000 12.105302876830100 0.000000000000000
226 2 -0.420 4.892300000000000 13.315833164513110 0.000000000000000
227 1 0.420 6.290100000000000 13.315833164513110 0.000000000000000
228 2 -0.420 6.989000000000000 12.105302876830100 0.000000000000000
229 1 0.420 8.386799999999999 12.105302876830100 0.000000000000000
230 2 -0.420 9.085699999999999 13.315833164513110 0.000000000000000
231 1 0.420 10.483499999999999 13.315833164513110 0.000000000000000
232 2 -0.420 11.182399999999999 12.105302876830100 0.000000000000000
233 1 0.420 12.580200000000000 12.105302876830100 0.000000000000000
234 2 -0.420 13.279100000000000 13.315833164513110 0.000000000000000
235 1 0.420 14.676900000000000 13.315833164513110 0.000000000000000
236 2 -0.420 15.375800000000000 12.105302876830100 0.000000000000000
237 1 0.420 16.773599999999998 12.105302876830100 0.000000000000000
238 2 -0.420 17.472500000000000 13.315833164513110 0.000000000000000
239 1 0.420 18.870300000000000 13.315833164513110 0.000000000000000
240 2 -0.420 19.569199999999999 12.105302876830100 0.000000000000000
241 1 0.420 20.966999999999999 12.105302876830100 0.000000000000000
242 2 -0.420 21.665900000000001 13.315833164513110 0.000000000000000
243 1 0.420 23.063699999999997 13.315833164513110 0.000000000000000
244 2 -0.420 23.762599999999999 12.105302876830100 0.000000000000000
245 1 0.420 25.160399999999999 12.105302876830100 0.000000000000000
246 2 -0.420 25.859299999999998 13.315833164513110 0.000000000000000
247 1 0.420 27.257099999999998 13.315833164513110 0.000000000000000
248 2 -0.420 27.956000000000000 12.105302876830100 0.000000000000000
249 1 0.420 29.353800000000000 12.105302876830100 0.000000000000000
250 2 -0.420 30.052699999999998 13.315833164513110 0.000000000000000
251 1 0.420 31.450499999999998 13.315833164513110 0.000000000000000
252 2 -0.420 32.149400000000000 12.105302876830100 0.000000000000000
253 1 0.420 33.547199999999997 12.105302876830100 0.000000000000000
254 2 -0.420 34.246099999999998 13.315833164513110 0.000000000000000
255 1 0.420 35.643899999999995 13.315833164513110 0.000000000000000
256 2 -0.420 36.342799999999997 12.105302876830100 0.000000000000000
257 1 0.420 37.740600000000001 12.105302876830100 0.000000000000000
258 2 -0.420 38.439499999999995 13.315833164513110 0.000000000000000
259 1 0.420 39.837299999999999 13.315833164513110 0.000000000000000
260 2 -0.420 40.536200000000001 12.105302876830100 0.000000000000000
261 1 0.420 41.933999999999997 12.105302876830100 0.000000000000000
262 2 -0.420 42.632899999999999 13.315833164513110 0.000000000000000
263 1 0.420 44.030699999999996 13.315833164513110 0.000000000000000
264 2 -0.420 44.729599999999998 12.105302876830100 0.000000000000000
265 1 0.420 0.000000000000000 14.526363452196119 0.000000000000000
266 2 -0.420 0.698900000000000 15.736893739879129 0.000000000000000
267 1 0.420 2.096700000000000 15.736893739879129 0.000000000000000
268 2 -0.420 2.795600000000000 14.526363452196119 0.000000000000000
269 1 0.420 4.193400000000000 14.526363452196119 0.000000000000000
270 2 -0.420 4.892300000000000 15.736893739879129 0.000000000000000
271 1 0.420 6.290100000000000 15.736893739879129 0.000000000000000
272 2 -0.420 6.989000000000000 14.526363452196119 0.000000000000000
273 1 0.420 8.386799999999999 14.526363452196119 0.000000000000000
274 2 -0.420 9.085699999999999 15.736893739879129 0.000000000000000
275 1 0.420 10.483499999999999 15.736893739879129 0.000000000000000
276 2 -0.420 11.182399999999999 14.526363452196119 0.000000000000000
277 1 0.420 12.580200000000000 14.526363452196119 0.000000000000000
278 2 -0.420 13.279100000000000 15.736893739879129 0.000000000000000
279 1 0.420 14.676900000000000 15.736893739879129 0.000000000000000
280 2 -0.420 15.375800000000000 14.526363452196119 0.000000000000000
281 1 0.420 16.773599999999998 14.526363452196119 0.000000000000000
282 2 -0.420 17.472500000000000 15.736893739879129 0.000000000000000
283 1 0.420 18.870300000000000 15.736893739879129 0.000000000000000
284 2 -0.420 19.569199999999999 14.526363452196119 0.000000000000000
285 1 0.420 20.966999999999999 14.526363452196119 0.000000000000000
286 2 -0.420 21.665900000000001 15.736893739879129 0.000000000000000
287 1 0.420 23.063699999999997 15.736893739879129 0.000000000000000
288 2 -0.420 23.762599999999999 14.526363452196119 0.000000000000000
289 1 0.420 25.160399999999999 14.526363452196119 0.000000000000000
290 2 -0.420 25.859299999999998 15.736893739879129 0.000000000000000
291 1 0.420 27.257099999999998 15.736893739879129 0.000000000000000
292 2 -0.420 27.956000000000000 14.526363452196119 0.000000000000000
293 1 0.420 29.353800000000000 14.526363452196119 0.000000000000000
294 2 -0.420 30.052699999999998 15.736893739879129 0.000000000000000
295 1 0.420 31.450499999999998 15.736893739879129 0.000000000000000
296 2 -0.420 32.149400000000000 14.526363452196119 0.000000000000000
297 1 0.420 33.547199999999997 14.526363452196119 0.000000000000000
298 2 -0.420 34.246099999999998 15.736893739879129 0.000000000000000
299 1 0.420 35.643899999999995 15.736893739879129 0.000000000000000
300 2 -0.420 36.342799999999997 14.526363452196119 0.000000000000000
301 1 0.420 37.740600000000001 14.526363452196119 0.000000000000000
302 2 -0.420 38.439499999999995 15.736893739879129 0.000000000000000
303 1 0.420 39.837299999999999 15.736893739879129 0.000000000000000
304 2 -0.420 40.536200000000001 14.526363452196119 0.000000000000000
305 1 0.420 41.933999999999997 14.526363452196119 0.000000000000000
306 2 -0.420 42.632899999999999 15.736893739879129 0.000000000000000
307 1 0.420 44.030699999999996 15.736893739879129 0.000000000000000
308 2 -0.420 44.729599999999998 14.526363452196119 0.000000000000000
309 1 0.420 0.000000000000000 16.947424027562139 0.000000000000000
310 2 -0.420 0.698900000000000 18.157954315245149 0.000000000000000
311 1 0.420 2.096700000000000 18.157954315245149 0.000000000000000
312 2 -0.420 2.795600000000000 16.947424027562139 0.000000000000000
313 1 0.420 4.193400000000000 16.947424027562139 0.000000000000000
314 2 -0.420 4.892300000000000 18.157954315245149 0.000000000000000
315 1 0.420 6.290100000000000 18.157954315245149 0.000000000000000
316 2 -0.420 6.989000000000000 16.947424027562139 0.000000000000000
317 1 0.420 8.386799999999999 16.947424027562139 0.000000000000000
318 2 -0.420 9.085699999999999 18.157954315245149 0.000000000000000
319 1 0.420 10.483499999999999 18.157954315245149 0.000000000000000
320 2 -0.420 11.182399999999999 16.947424027562139 0.000000000000000
321 1 0.420 12.580200000000000 16.947424027562139 0.000000000000000
322 2 -0.420 13.279100000000000 18.157954315245149 0.000000000000000
323 1 0.420 14.676900000000000 18.157954315245149 0.000000000000000
324 2 -0.420 15.375800000000000 16.947424027562139 0.000000000000000
325 1 0.420 16.773599999999998 16.947424027562139 0.000000000000000
326 2 -0.420 17.472500000000000 18.157954315245149 0.000000000000000
327 1 0.420 18.870300000000000 18.157954315245149 0.000000000000000
328 2 -0.420 19.569199999999999 16.947424027562139 0.000000000000000
329 1 0.420 20.966999999999999 16.947424027562139 0.000000000000000
330 2 -0.420 21.665900000000001 18.157954315245149 0.000000000000000
331 1 0.420 23.063699999999997 18.157954315245149 0.000000000000000
332 2 -0.420 23.762599999999999 16.947424027562139 0.000000000000000
333 1 0.420 25.160399999999999 16.947424027562139 0.000000000000000
334 2 -0.420 25.859299999999998 18.157954315245149 0.000000000000000
335 1 0.420 27.257099999999998 18.157954315245149 0.000000000000000
336 2 -0.420 27.956000000000000 16.947424027562139 0.000000000000000
337 1 0.420 29.353800000000000 16.947424027562139 0.000000000000000
338 2 -0.420 30.052699999999998 18.157954315245149 0.000000000000000
339 1 0.420 31.450499999999998 18.157954315245149 0.000000000000000
340 2 -0.420 32.149400000000000 16.947424027562139 0.000000000000000
341 1 0.420 33.547199999999997 16.947424027562139 0.000000000000000
342 2 -0.420 34.246099999999998 18.157954315245149 0.000000000000000
343 1 0.420 35.643899999999995 18.157954315245149 0.000000000000000
344 2 -0.420 36.342799999999997 16.947424027562139 0.000000000000000
345 1 0.420 37.740600000000001 16.947424027562139 0.000000000000000
346 2 -0.420 38.439499999999995 18.157954315245149 0.000000000000000
347 1 0.420 39.837299999999999 18.157954315245149 0.000000000000000
348 2 -0.420 40.536200000000001 16.947424027562139 0.000000000000000
349 1 0.420 41.933999999999997 16.947424027562139 0.000000000000000
350 2 -0.420 42.632899999999999 18.157954315245149 0.000000000000000
351 1 0.420 44.030699999999996 18.157954315245149 0.000000000000000
352 2 -0.420 44.729599999999998 16.947424027562139 0.000000000000000
353 1 0.420 0.000000000000000 19.368484602928159 0.000000000000000
354 2 -0.420 0.698900000000000 20.579014890611170 0.000000000000000
355 1 0.420 2.096700000000000 20.579014890611170 0.000000000000000
356 2 -0.420 2.795600000000000 19.368484602928159 0.000000000000000
357 1 0.420 4.193400000000000 19.368484602928159 0.000000000000000
358 2 -0.420 4.892300000000000 20.579014890611170 0.000000000000000
359 1 0.420 6.290100000000000 20.579014890611170 0.000000000000000
360 2 -0.420 6.989000000000000 19.368484602928159 0.000000000000000
361 1 0.420 8.386799999999999 19.368484602928159 0.000000000000000
362 2 -0.420 9.085699999999999 20.579014890611170 0.000000000000000
363 1 0.420 10.483499999999999 20.579014890611170 0.000000000000000
364 2 -0.420 11.182399999999999 19.368484602928159 0.000000000000000
365 1 0.420 12.580200000000000 19.368484602928159 0.000000000000000
366 2 -0.420 13.279100000000000 20.579014890611170 0.000000000000000
367 1 0.420 14.676900000000000 20.579014890611170 0.000000000000000
368 2 -0.420 15.375800000000000 19.368484602928159 0.000000000000000
369 1 0.420 16.773599999999998 19.368484602928159 0.000000000000000
370 2 -0.420 17.472500000000000 20.579014890611170 0.000000000000000
371 1 0.420 18.870300000000000 20.579014890611170 0.000000000000000
372 2 -0.420 19.569199999999999 19.368484602928159 0.000000000000000
373 1 0.420 20.966999999999999 19.368484602928159 0.000000000000000
374 2 -0.420 21.665900000000001 20.579014890611170 0.000000000000000
375 1 0.420 23.063699999999997 20.579014890611170 0.000000000000000
376 2 -0.420 23.762599999999999 19.368484602928159 0.000000000000000
377 1 0.420 25.160399999999999 19.368484602928159 0.000000000000000
378 2 -0.420 25.859299999999998 20.579014890611170 0.000000000000000
379 1 0.420 27.257099999999998 20.579014890611170 0.000000000000000
380 2 -0.420 27.956000000000000 19.368484602928159 0.000000000000000
381 1 0.420 29.353800000000000 19.368484602928159 0.000000000000000
382 2 -0.420 30.052699999999998 20.579014890611170 0.000000000000000
383 1 0.420 31.450499999999998 20.579014890611170 0.000000000000000
384 2 -0.420 32.149400000000000 19.368484602928159 0.000000000000000
385 1 0.420 33.547199999999997 19.368484602928159 0.000000000000000
386 2 -0.420 34.246099999999998 20.579014890611170 0.000000000000000
387 1 0.420 35.643899999999995 20.579014890611170 0.000000000000000
388 2 -0.420 36.342799999999997 19.368484602928159 0.000000000000000
389 1 0.420 37.740600000000001 19.368484602928159 0.000000000000000
390 2 -0.420 38.439499999999995 20.579014890611170 0.000000000000000
391 1 0.420 39.837299999999999 20.579014890611170 0.000000000000000
392 2 -0.420 40.536200000000001 19.368484602928159 0.000000000000000
393 1 0.420 41.933999999999997 19.368484602928159 0.000000000000000
394 2 -0.420 42.632899999999999 20.579014890611170 0.000000000000000
395 1 0.420 44.030699999999996 20.579014890611170 0.000000000000000
396 2 -0.420 44.729599999999998 19.368484602928159 0.000000000000000
397 1 0.420 0.000000000000000 21.789545178294180 0.000000000000000
398 2 -0.420 0.698900000000000 23.000075465977190 0.000000000000000
399 1 0.420 2.096700000000000 23.000075465977190 0.000000000000000
400 2 -0.420 2.795600000000000 21.789545178294180 0.000000000000000
401 1 0.420 4.193400000000000 21.789545178294180 0.000000000000000
402 2 -0.420 4.892300000000000 23.000075465977190 0.000000000000000
403 1 0.420 6.290100000000000 23.000075465977190 0.000000000000000
404 2 -0.420 6.989000000000000 21.789545178294180 0.000000000000000
405 1 0.420 8.386799999999999 21.789545178294180 0.000000000000000
406 2 -0.420 9.085699999999999 23.000075465977190 0.000000000000000
407 1 0.420 10.483499999999999 23.000075465977190 0.000000000000000
408 2 -0.420 11.182399999999999 21.789545178294180 0.000000000000000
409 1 0.420 12.580200000000000 21.789545178294180 0.000000000000000
410 2 -0.420 13.279100000000000 23.000075465977190 0.000000000000000
411 1 0.420 14.676900000000000 23.000075465977190 0.000000000000000
412 2 -0.420 15.375800000000000 21.789545178294180 0.000000000000000
413 1 0.420 16.773599999999998 21.789545178294180 0.000000000000000
414 2 -0.420 17.472500000000000 23.000075465977190 0.000000000000000
415 1 0.420 18.870300000000000 23.000075465977190 0.000000000000000
416 2 -0.420 19.569199999999999 21.789545178294180 0.000000000000000
417 1 0.420 20.966999999999999 21.789545178294180 0.000000000000000
418 2 -0.420 21.665900000000001 23.000075465977190 0.000000000000000
419 1 0.420 23.063699999999997 23.000075465977190 0.000000000000000
420 2 -0.420 23.762599999999999 21.789545178294180 0.000000000000000
421 1 0.420 25.160399999999999 21.789545178294180 0.000000000000000
422 2 -0.420 25.859299999999998 23.000075465977190 0.000000000000000
423 1 0.420 27.257099999999998 23.000075465977190 0.000000000000000
424 2 -0.420 27.956000000000000 21.789545178294180 0.000000000000000
425 1 0.420 29.353800000000000 21.789545178294180 0.000000000000000
426 2 -0.420 30.052699999999998 23.000075465977190 0.000000000000000
427 1 0.420 31.450499999999998 23.000075465977190 0.000000000000000
428 2 -0.420 32.149400000000000 21.789545178294180 0.000000000000000
429 1 0.420 33.547199999999997 21.789545178294180 0.000000000000000
430 2 -0.420 34.246099999999998 23.000075465977190 0.000000000000000
431 1 0.420 35.643899999999995 23.000075465977190 0.000000000000000
432 2 -0.420 36.342799999999997 21.789545178294180 0.000000000000000
433 1 0.420 37.740600000000001 21.789545178294180 0.000000000000000
434 2 -0.420 38.439499999999995 23.000075465977190 0.000000000000000
435 1 0.420 39.837299999999999 23.000075465977190 0.000000000000000
436 2 -0.420 40.536200000000001 21.789545178294180 0.000000000000000
437 1 0.420 41.933999999999997 21.789545178294180 0.000000000000000
438 2 -0.420 42.632899999999999 23.000075465977190 0.000000000000000
439 1 0.420 44.030699999999996 23.000075465977190 0.000000000000000
440 2 -0.420 44.729599999999998 21.789545178294180 0.000000000000000
441 1 0.420 0.000000000000000 24.210605753660200 0.000000000000000
442 2 -0.420 0.698900000000000 25.421136041343210 0.000000000000000
443 1 0.420 2.096700000000000 25.421136041343210 0.000000000000000
444 2 -0.420 2.795600000000000 24.210605753660200 0.000000000000000
445 1 0.420 4.193400000000000 24.210605753660200 0.000000000000000
446 2 -0.420 4.892300000000000 25.421136041343210 0.000000000000000
447 1 0.420 6.290100000000000 25.421136041343210 0.000000000000000
448 2 -0.420 6.989000000000000 24.210605753660200 0.000000000000000
449 1 0.420 8.386799999999999 24.210605753660200 0.000000000000000
450 2 -0.420 9.085699999999999 25.421136041343210 0.000000000000000
451 1 0.420 10.483499999999999 25.421136041343210 0.000000000000000
452 2 -0.420 11.182399999999999 24.210605753660200 0.000000000000000
453 1 0.420 12.580200000000000 24.210605753660200 0.000000000000000
454 2 -0.420 13.279100000000000 25.421136041343210 0.000000000000000
455 1 0.420 14.676900000000000 25.421136041343210 0.000000000000000
456 2 -0.420 15.375800000000000 24.210605753660200 0.000000000000000
457 1 0.420 16.773599999999998 24.210605753660200 0.000000000000000
458 2 -0.420 17.472500000000000 25.421136041343210 0.000000000000000
459 1 0.420 18.870300000000000 25.421136041343210 0.000000000000000
460 2 -0.420 19.569199999999999 24.210605753660200 0.000000000000000
461 1 0.420 20.966999999999999 24.210605753660200 0.000000000000000
462 2 -0.420 21.665900000000001 25.421136041343210 0.000000000000000
463 1 0.420 23.063699999999997 25.421136041343210 0.000000000000000
464 2 -0.420 23.762599999999999 24.210605753660200 0.000000000000000
465 1 0.420 25.160399999999999 24.210605753660200 0.000000000000000
466 2 -0.420 25.859299999999998 25.421136041343210 0.000000000000000
467 1 0.420 27.257099999999998 25.421136041343210 0.000000000000000
468 2 -0.420 27.956000000000000 24.210605753660200 0.000000000000000
469 1 0.420 29.353800000000000 24.210605753660200 0.000000000000000
470 2 -0.420 30.052699999999998 25.421136041343210 0.000000000000000
471 1 0.420 31.450499999999998 25.421136041343210 0.000000000000000
472 2 -0.420 32.149400000000000 24.210605753660200 0.000000000000000
473 1 0.420 33.547199999999997 24.210605753660200 0.000000000000000
474 2 -0.420 34.246099999999998 25.421136041343210 0.000000000000000
475 1 0.420 35.643899999999995 25.421136041343210 0.000000000000000
476 2 -0.420 36.342799999999997 24.210605753660200 0.000000000000000
477 1 0.420 37.740600000000001 24.210605753660200 0.000000000000000
478 2 -0.420 38.439499999999995 25.421136041343210 0.000000000000000
479 1 0.420 39.837299999999999 25.421136041343210 0.000000000000000
480 2 -0.420 40.536200000000001 24.210605753660200 0.000000000000000
481 1 0.420 41.933999999999997 24.210605753660200 0.000000000000000
482 2 -0.420 42.632899999999999 25.421136041343210 0.000000000000000
483 1 0.420 44.030699999999996 25.421136041343210 0.000000000000000
484 2 -0.420 44.729599999999998 24.210605753660200 0.000000000000000
485 1 0.420 0.000000000000000 26.631666329026221 0.000000000000000
486 2 -0.420 0.698900000000000 27.842196616709231 0.000000000000000
487 1 0.420 2.096700000000000 27.842196616709231 0.000000000000000
488 2 -0.420 2.795600000000000 26.631666329026221 0.000000000000000
489 1 0.420 4.193400000000000 26.631666329026221 0.000000000000000
490 2 -0.420 4.892300000000000 27.842196616709231 0.000000000000000
491 1 0.420 6.290100000000000 27.842196616709231 0.000000000000000
492 2 -0.420 6.989000000000000 26.631666329026221 0.000000000000000
493 1 0.420 8.386799999999999 26.631666329026221 0.000000000000000
494 2 -0.420 9.085699999999999 27.842196616709231 0.000000000000000
495 1 0.420 10.483499999999999 27.842196616709231 0.000000000000000
496 2 -0.420 11.182399999999999 26.631666329026221 0.000000000000000
497 1 0.420 12.580200000000000 26.631666329026221 0.000000000000000
498 2 -0.420 13.279100000000000 27.842196616709231 0.000000000000000
499 1 0.420 14.676900000000000 27.842196616709231 0.000000000000000
500 2 -0.420 15.375800000000000 26.631666329026221 0.000000000000000
501 1 0.420 16.773599999999998 26.631666329026221 0.000000000000000
502 2 -0.420 17.472500000000000 27.842196616709231 0.000000000000000
503 1 0.420 18.870300000000000 27.842196616709231 0.000000000000000
504 2 -0.420 19.569199999999999 26.631666329026221 0.000000000000000
505 1 0.420 20.966999999999999 26.631666329026221 0.000000000000000
506 2 -0.420 21.665900000000001 27.842196616709231 0.000000000000000
507 1 0.420 23.063699999999997 27.842196616709231 0.000000000000000
508 2 -0.420 23.762599999999999 26.631666329026221 0.000000000000000
509 1 0.420 25.160399999999999 26.631666329026221 0.000000000000000
510 2 -0.420 25.859299999999998 27.842196616709231 0.000000000000000
511 1 0.420 27.257099999999998 27.842196616709231 0.000000000000000
512 2 -0.420 27.956000000000000 26.631666329026221 0.000000000000000
513 1 0.420 29.353800000000000 26.631666329026221 0.000000000000000
514 2 -0.420 30.052699999999998 27.842196616709231 0.000000000000000
515 1 0.420 31.450499999999998 27.842196616709231 0.000000000000000
516 2 -0.420 32.149400000000000 26.631666329026221 0.000000000000000
517 1 0.420 33.547199999999997 26.631666329026221 0.000000000000000
518 2 -0.420 34.246099999999998 27.842196616709231 0.000000000000000
519 1 0.420 35.643899999999995 27.842196616709231 0.000000000000000
520 2 -0.420 36.342799999999997 26.631666329026221 0.000000000000000
521 1 0.420 37.740600000000001 26.631666329026221 0.000000000000000
522 2 -0.420 38.439499999999995 27.842196616709231 0.000000000000000
523 1 0.420 39.837299999999999 27.842196616709231 0.000000000000000
524 2 -0.420 40.536200000000001 26.631666329026221 0.000000000000000
525 1 0.420 41.933999999999997 26.631666329026221 0.000000000000000
526 2 -0.420 42.632899999999999 27.842196616709231 0.000000000000000
527 1 0.420 44.030699999999996 27.842196616709231 0.000000000000000
528 2 -0.420 44.729599999999998 26.631666329026221 0.000000000000000
529 1 0.420 0.000000000000000 29.052726904392237 0.000000000000000
530 2 -0.420 0.698900000000000 30.263257192075248 0.000000000000000
531 1 0.420 2.096700000000000 30.263257192075248 0.000000000000000
532 2 -0.420 2.795600000000000 29.052726904392237 0.000000000000000
533 1 0.420 4.193400000000000 29.052726904392237 0.000000000000000
534 2 -0.420 4.892300000000000 30.263257192075248 0.000000000000000
535 1 0.420 6.290100000000000 30.263257192075248 0.000000000000000
536 2 -0.420 6.989000000000000 29.052726904392237 0.000000000000000
537 1 0.420 8.386799999999999 29.052726904392237 0.000000000000000
538 2 -0.420 9.085699999999999 30.263257192075248 0.000000000000000
539 1 0.420 10.483499999999999 30.263257192075248 0.000000000000000
540 2 -0.420 11.182399999999999 29.052726904392237 0.000000000000000
541 1 0.420 12.580200000000000 29.052726904392237 0.000000000000000
542 2 -0.420 13.279100000000000 30.263257192075248 0.000000000000000
543 1 0.420 14.676900000000000 30.263257192075248 0.000000000000000
544 2 -0.420 15.375800000000000 29.052726904392237 0.000000000000000
545 1 0.420 16.773599999999998 29.052726904392237 0.000000000000000
546 2 -0.420 17.472500000000000 30.263257192075248 0.000000000000000
547 1 0.420 18.870300000000000 30.263257192075248 0.000000000000000
548 2 -0.420 19.569199999999999 29.052726904392237 0.000000000000000
549 1 0.420 20.966999999999999 29.052726904392237 0.000000000000000
550 2 -0.420 21.665900000000001 30.263257192075248 0.000000000000000
551 1 0.420 23.063699999999997 30.263257192075248 0.000000000000000
552 2 -0.420 23.762599999999999 29.052726904392237 0.000000000000000
553 1 0.420 25.160399999999999 29.052726904392237 0.000000000000000
554 2 -0.420 25.859299999999998 30.263257192075248 0.000000000000000
555 1 0.420 27.257099999999998 30.263257192075248 0.000000000000000
556 2 -0.420 27.956000000000000 29.052726904392237 0.000000000000000
557 1 0.420 29.353800000000000 29.052726904392237 0.000000000000000
558 2 -0.420 30.052699999999998 30.263257192075248 0.000000000000000
559 1 0.420 31.450499999999998 30.263257192075248 0.000000000000000
560 2 -0.420 32.149400000000000 29.052726904392237 0.000000000000000
561 1 0.420 33.547199999999997 29.052726904392237 0.000000000000000
562 2 -0.420 34.246099999999998 30.263257192075248 0.000000000000000
563 1 0.420 35.643899999999995 30.263257192075248 0.000000000000000
564 2 -0.420 36.342799999999997 29.052726904392237 0.000000000000000
565 1 0.420 37.740600000000001 29.052726904392237 0.000000000000000
566 2 -0.420 38.439499999999995 30.263257192075248 0.000000000000000
567 1 0.420 39.837299999999999 30.263257192075248 0.000000000000000
568 2 -0.420 40.536200000000001 29.052726904392237 0.000000000000000
569 1 0.420 41.933999999999997 29.052726904392237 0.000000000000000
570 2 -0.420 42.632899999999999 30.263257192075248 0.000000000000000
571 1 0.420 44.030699999999996 30.263257192075248 0.000000000000000
572 2 -0.420 44.729599999999998 29.052726904392237 0.000000000000000
573 1 0.420 0.000000000000000 31.473787479758258 0.000000000000000
574 2 -0.420 0.698900000000000 32.684317767441271 0.000000000000000
575 1 0.420 2.096700000000000 32.684317767441271 0.000000000000000
576 2 -0.420 2.795600000000000 31.473787479758258 0.000000000000000
577 1 0.420 4.193400000000000 31.473787479758258 0.000000000000000
578 2 -0.420 4.892300000000000 32.684317767441271 0.000000000000000
579 1 0.420 6.290100000000000 32.684317767441271 0.000000000000000
580 2 -0.420 6.989000000000000 31.473787479758258 0.000000000000000
581 1 0.420 8.386799999999999 31.473787479758258 0.000000000000000
582 2 -0.420 9.085699999999999 32.684317767441271 0.000000000000000
583 1 0.420 10.483499999999999 32.684317767441271 0.000000000000000
584 2 -0.420 11.182399999999999 31.473787479758258 0.000000000000000
585 1 0.420 12.580200000000000 31.473787479758258 0.000000000000000
586 2 -0.420 13.279100000000000 32.684317767441271 0.000000000000000
587 1 0.420 14.676900000000000 32.684317767441271 0.000000000000000
588 2 -0.420 15.375800000000000 31.473787479758258 0.000000000000000
589 1 0.420 16.773599999999998 31.473787479758258 0.000000000000000
590 2 -0.420 17.472500000000000 32.684317767441271 0.000000000000000
591 1 0.420 18.870300000000000 32.684317767441271 0.000000000000000
592 2 -0.420 19.569199999999999 31.473787479758258 0.000000000000000
593 1 0.420 20.966999999999999 31.473787479758258 0.000000000000000
594 2 -0.420 21.665900000000001 32.684317767441271 0.000000000000000
595 1 0.420 23.063699999999997 32.684317767441271 0.000000000000000
596 2 -0.420 23.762599999999999 31.473787479758258 0.000000000000000
597 1 0.420 25.160399999999999 31.473787479758258 0.000000000000000
598 2 -0.420 25.859299999999998 32.684317767441271 0.000000000000000
599 1 0.420 27.257099999999998 32.684317767441271 0.000000000000000
600 2 -0.420 27.956000000000000 31.473787479758258 0.000000000000000
601 1 0.420 29.353800000000000 31.473787479758258 0.000000000000000
602 2 -0.420 30.052699999999998 32.684317767441271 0.000000000000000
603 1 0.420 31.450499999999998 32.684317767441271 0.000000000000000
604 2 -0.420 32.149400000000000 31.473787479758258 0.000000000000000
605 1 0.420 33.547199999999997 31.473787479758258 0.000000000000000
606 2 -0.420 34.246099999999998 32.684317767441271 0.000000000000000
607 1 0.420 35.643899999999995 32.684317767441271 0.000000000000000
608 2 -0.420 36.342799999999997 31.473787479758258 0.000000000000000
609 1 0.420 37.740600000000001 31.473787479758258 0.000000000000000
610 2 -0.420 38.439499999999995 32.684317767441271 0.000000000000000
611 1 0.420 39.837299999999999 32.684317767441271 0.000000000000000
612 2 -0.420 40.536200000000001 31.473787479758258 0.000000000000000
613 1 0.420 41.933999999999997 31.473787479758258 0.000000000000000
614 2 -0.420 42.632899999999999 32.684317767441271 0.000000000000000
615 1 0.420 44.030699999999996 32.684317767441271 0.000000000000000
616 2 -0.420 44.729599999999998 31.473787479758258 0.000000000000000
617 1 0.420 0.000000000000000 33.894848055124278 0.000000000000000
618 2 -0.420 0.698900000000000 35.105378342807292 0.000000000000000
619 1 0.420 2.096700000000000 35.105378342807292 0.000000000000000
620 2 -0.420 2.795600000000000 33.894848055124278 0.000000000000000
621 1 0.420 4.193400000000000 33.894848055124278 0.000000000000000
622 2 -0.420 4.892300000000000 35.105378342807292 0.000000000000000
623 1 0.420 6.290100000000000 35.105378342807292 0.000000000000000
624 2 -0.420 6.989000000000000 33.894848055124278 0.000000000000000
625 1 0.420 8.386799999999999 33.894848055124278 0.000000000000000
626 2 -0.420 9.085699999999999 35.105378342807292 0.000000000000000
627 1 0.420 10.483499999999999 35.105378342807292 0.000000000000000
628 2 -0.420 11.182399999999999 33.894848055124278 0.000000000000000
629 1 0.420 12.580200000000000 33.894848055124278 0.000000000000000
630 2 -0.420 13.279100000000000 35.105378342807292 0.000000000000000
631 1 0.420 14.676900000000000 35.105378342807292 0.000000000000000
632 2 -0.420 15.375800000000000 33.894848055124278 0.000000000000000
633 1 0.420 16.773599999999998 33.894848055124278 0.000000000000000
634 2 -0.420 17.472500000000000 35.105378342807292 0.000000000000000
635 1 0.420 18.870300000000000 35.105378342807292 0.000000000000000
636 2 -0.420 19.569199999999999 33.894848055124278 0.000000000000000
637 1 0.420 20.966999999999999 33.894848055124278 0.000000000000000
638 2 -0.420 21.665900000000001 35.105378342807292 0.000000000000000
639 1 0.420 23.063699999999997 35.105378342807292 0.000000000000000
640 2 -0.420 23.762599999999999 33.894848055124278 0.000000000000000
641 1 0.420 25.160399999999999 33.894848055124278 0.000000000000000
642 2 -0.420 25.859299999999998 35.105378342807292 0.000000000000000
643 1 0.420 27.257099999999998 35.105378342807292 0.000000000000000
644 2 -0.420 27.956000000000000 33.894848055124278 0.000000000000000
645 1 0.420 29.353800000000000 33.894848055124278 0.000000000000000
646 2 -0.420 30.052699999999998 35.105378342807292 0.000000000000000
647 1 0.420 31.450499999999998 35.105378342807292 0.000000000000000
648 2 -0.420 32.149400000000000 33.894848055124278 0.000000000000000
649 1 0.420 33.547199999999997 33.894848055124278 0.000000000000000
650 2 -0.420 34.246099999999998 35.105378342807292 0.000000000000000
651 1 0.420 35.643899999999995 35.105378342807292 0.000000000000000
652 2 -0.420 36.342799999999997 33.894848055124278 0.000000000000000
653 1 0.420 37.740600000000001 33.894848055124278 0.000000000000000
654 2 -0.420 38.439499999999995 35.105378342807292 0.000000000000000
655 1 0.420 39.837299999999999 35.105378342807292 0.000000000000000
656 2 -0.420 40.536200000000001 33.894848055124278 0.000000000000000
657 1 0.420 41.933999999999997 33.894848055124278 0.000000000000000
658 2 -0.420 42.632899999999999 35.105378342807292 0.000000000000000
659 1 0.420 44.030699999999996 35.105378342807292 0.000000000000000
660 2 -0.420 44.729599999999998 33.894848055124278 0.000000000000000
661 1 0.420 0.000000000000000 36.315908630490298 0.000000000000000
662 2 -0.420 0.698900000000000 37.526438918173312 0.000000000000000
663 1 0.420 2.096700000000000 37.526438918173312 0.000000000000000
664 2 -0.420 2.795600000000000 36.315908630490298 0.000000000000000
665 1 0.420 4.193400000000000 36.315908630490298 0.000000000000000
666 2 -0.420 4.892300000000000 37.526438918173312 0.000000000000000
667 1 0.420 6.290100000000000 37.526438918173312 0.000000000000000
668 2 -0.420 6.989000000000000 36.315908630490298 0.000000000000000
669 1 0.420 8.386799999999999 36.315908630490298 0.000000000000000
670 2 -0.420 9.085699999999999 37.526438918173312 0.000000000000000
671 1 0.420 10.483499999999999 37.526438918173312 0.000000000000000
672 2 -0.420 11.182399999999999 36.315908630490298 0.000000000000000
673 1 0.420 12.580200000000000 36.315908630490298 0.000000000000000
674 2 -0.420 13.279100000000000 37.526438918173312 0.000000000000000
675 1 0.420 14.676900000000000 37.526438918173312 0.000000000000000
676 2 -0.420 15.375800000000000 36.315908630490298 0.000000000000000
677 1 0.420 16.773599999999998 36.315908630490298 0.000000000000000
678 2 -0.420 17.472500000000000 37.526438918173312 0.000000000000000
679 1 0.420 18.870300000000000 37.526438918173312 0.000000000000000
680 2 -0.420 19.569199999999999 36.315908630490298 0.000000000000000
681 1 0.420 20.966999999999999 36.315908630490298 0.000000000000000
682 2 -0.420 21.665900000000001 37.526438918173312 0.000000000000000
683 1 0.420 23.063699999999997 37.526438918173312 0.000000000000000
684 2 -0.420 23.762599999999999 36.315908630490298 0.000000000000000
685 1 0.420 25.160399999999999 36.315908630490298 0.000000000000000
686 2 -0.420 25.859299999999998 37.526438918173312 0.000000000000000
687 1 0.420 27.257099999999998 37.526438918173312 0.000000000000000
688 2 -0.420 27.956000000000000 36.315908630490298 0.000000000000000
689 1 0.420 29.353800000000000 36.315908630490298 0.000000000000000
690 2 -0.420 30.052699999999998 37.526438918173312 0.000000000000000
691 1 0.420 31.450499999999998 37.526438918173312 0.000000000000000
692 2 -0.420 32.149400000000000 36.315908630490298 0.000000000000000
693 1 0.420 33.547199999999997 36.315908630490298 0.000000000000000
694 2 -0.420 34.246099999999998 37.526438918173312 0.000000000000000
695 1 0.420 35.643899999999995 37.526438918173312 0.000000000000000
696 2 -0.420 36.342799999999997 36.315908630490298 0.000000000000000
697 1 0.420 37.740600000000001 36.315908630490298 0.000000000000000
698 2 -0.420 38.439499999999995 37.526438918173312 0.000000000000000
699 1 0.420 39.837299999999999 37.526438918173312 0.000000000000000
700 2 -0.420 40.536200000000001 36.315908630490298 0.000000000000000
701 1 0.420 41.933999999999997 36.315908630490298 0.000000000000000
702 2 -0.420 42.632899999999999 37.526438918173312 0.000000000000000
703 1 0.420 44.030699999999996 37.526438918173312 0.000000000000000
704 2 -0.420 44.729599999999998 36.315908630490298 0.000000000000000
705 1 0.420 0.000000000000000 38.736969205856319 0.000000000000000
706 2 -0.420 0.698900000000000 39.947499493539325 0.000000000000000
707 1 0.420 2.096700000000000 39.947499493539325 0.000000000000000
708 2 -0.420 2.795600000000000 38.736969205856319 0.000000000000000
709 1 0.420 4.193400000000000 38.736969205856319 0.000000000000000
710 2 -0.420 4.892300000000000 39.947499493539325 0.000000000000000
711 1 0.420 6.290100000000000 39.947499493539325 0.000000000000000
712 2 -0.420 6.989000000000000 38.736969205856319 0.000000000000000
713 1 0.420 8.386799999999999 38.736969205856319 0.000000000000000
714 2 -0.420 9.085699999999999 39.947499493539325 0.000000000000000
715 1 0.420 10.483499999999999 39.947499493539325 0.000000000000000
716 2 -0.420 11.182399999999999 38.736969205856319 0.000000000000000
717 1 0.420 12.580200000000000 38.736969205856319 0.000000000000000
718 2 -0.420 13.279100000000000 39.947499493539325 0.000000000000000
719 1 0.420 14.676900000000000 39.947499493539325 0.000000000000000
720 2 -0.420 15.375800000000000 38.736969205856319 0.000000000000000
721 1 0.420 16.773599999999998 38.736969205856319 0.000000000000000
722 2 -0.420 17.472500000000000 39.947499493539325 0.000000000000000
723 1 0.420 18.870300000000000 39.947499493539325 0.000000000000000
724 2 -0.420 19.569199999999999 38.736969205856319 0.000000000000000
725 1 0.420 20.966999999999999 38.736969205856319 0.000000000000000
726 2 -0.420 21.665900000000001 39.947499493539325 0.000000000000000
727 1 0.420 23.063699999999997 39.947499493539325 0.000000000000000
728 2 -0.420 23.762599999999999 38.736969205856319 0.000000000000000
729 1 0.420 25.160399999999999 38.736969205856319 0.000000000000000
730 2 -0.420 25.859299999999998 39.947499493539325 0.000000000000000
731 1 0.420 27.257099999999998 39.947499493539325 0.000000000000000
732 2 -0.420 27.956000000000000 38.736969205856319 0.000000000000000
733 1 0.420 29.353800000000000 38.736969205856319 0.000000000000000
734 2 -0.420 30.052699999999998 39.947499493539325 0.000000000000000
735 1 0.420 31.450499999999998 39.947499493539325 0.000000000000000
736 2 -0.420 32.149400000000000 38.736969205856319 0.000000000000000
737 1 0.420 33.547199999999997 38.736969205856319 0.000000000000000
738 2 -0.420 34.246099999999998 39.947499493539325 0.000000000000000
739 1 0.420 35.643899999999995 39.947499493539325 0.000000000000000
740 2 -0.420 36.342799999999997 38.736969205856319 0.000000000000000
741 1 0.420 37.740600000000001 38.736969205856319 0.000000000000000
742 2 -0.420 38.439499999999995 39.947499493539325 0.000000000000000
743 1 0.420 39.837299999999999 39.947499493539325 0.000000000000000
744 2 -0.420 40.536200000000001 38.736969205856319 0.000000000000000
745 1 0.420 41.933999999999997 38.736969205856319 0.000000000000000
746 2 -0.420 42.632899999999999 39.947499493539325 0.000000000000000
747 1 0.420 44.030699999999996 39.947499493539325 0.000000000000000
748 2 -0.420 44.729599999999998 38.736969205856319 0.000000000000000
749 1 0.420 0.000000000000000 41.158029781222339 0.000000000000000
750 2 -0.420 0.698900000000000 42.368560068905346 0.000000000000000
751 1 0.420 2.096700000000000 42.368560068905346 0.000000000000000
752 2 -0.420 2.795600000000000 41.158029781222339 0.000000000000000
753 1 0.420 4.193400000000000 41.158029781222339 0.000000000000000
754 2 -0.420 4.892300000000000 42.368560068905346 0.000000000000000
755 1 0.420 6.290100000000000 42.368560068905346 0.000000000000000
756 2 -0.420 6.989000000000000 41.158029781222339 0.000000000000000
757 1 0.420 8.386799999999999 41.158029781222339 0.000000000000000
758 2 -0.420 9.085699999999999 42.368560068905346 0.000000000000000
759 1 0.420 10.483499999999999 42.368560068905346 0.000000000000000
760 2 -0.420 11.182399999999999 41.158029781222339 0.000000000000000
761 1 0.420 12.580200000000000 41.158029781222339 0.000000000000000
762 2 -0.420 13.279100000000000 42.368560068905346 0.000000000000000
763 1 0.420 14.676900000000000 42.368560068905346 0.000000000000000
764 2 -0.420 15.375800000000000 41.158029781222339 0.000000000000000
765 1 0.420 16.773599999999998 41.158029781222339 0.000000000000000
766 2 -0.420 17.472500000000000 42.368560068905346 0.000000000000000
767 1 0.420 18.870300000000000 42.368560068905346 0.000000000000000
768 2 -0.420 19.569199999999999 41.158029781222339 0.000000000000000
769 1 0.420 20.966999999999999 41.158029781222339 0.000000000000000
770 2 -0.420 21.665900000000001 42.368560068905346 0.000000000000000
771 1 0.420 23.063699999999997 42.368560068905346 0.000000000000000
772 2 -0.420 23.762599999999999 41.158029781222339 0.000000000000000
773 1 0.420 25.160399999999999 41.158029781222339 0.000000000000000
774 2 -0.420 25.859299999999998 42.368560068905346 0.000000000000000
775 1 0.420 27.257099999999998 42.368560068905346 0.000000000000000
776 2 -0.420 27.956000000000000 41.158029781222339 0.000000000000000
777 1 0.420 29.353800000000000 41.158029781222339 0.000000000000000
778 2 -0.420 30.052699999999998 42.368560068905346 0.000000000000000
779 1 0.420 31.450499999999998 42.368560068905346 0.000000000000000
780 2 -0.420 32.149400000000000 41.158029781222339 0.000000000000000
781 1 0.420 33.547199999999997 41.158029781222339 0.000000000000000
782 2 -0.420 34.246099999999998 42.368560068905346 0.000000000000000
783 1 0.420 35.643899999999995 42.368560068905346 0.000000000000000
784 2 -0.420 36.342799999999997 41.158029781222339 0.000000000000000
785 1 0.420 37.740600000000001 41.158029781222339 0.000000000000000
786 2 -0.420 38.439499999999995 42.368560068905346 0.000000000000000
787 1 0.420 39.837299999999999 42.368560068905346 0.000000000000000
788 2 -0.420 40.536200000000001 41.158029781222339 0.000000000000000
789 1 0.420 41.933999999999997 41.158029781222339 0.000000000000000
790 2 -0.420 42.632899999999999 42.368560068905346 0.000000000000000
791 1 0.420 44.030699999999996 42.368560068905346 0.000000000000000
792 2 -0.420 44.729599999999998 41.158029781222339 0.000000000000000
793 1 0.420 0.000000000000000 43.579090356588360 0.000000000000000
794 2 -0.420 0.698900000000000 44.789620644271366 0.000000000000000
795 1 0.420 2.096700000000000 44.789620644271366 0.000000000000000
796 2 -0.420 2.795600000000000 43.579090356588360 0.000000000000000
797 1 0.420 4.193400000000000 43.579090356588360 0.000000000000000
798 2 -0.420 4.892300000000000 44.789620644271366 0.000000000000000
799 1 0.420 6.290100000000000 44.789620644271366 0.000000000000000
800 2 -0.420 6.989000000000000 43.579090356588360 0.000000000000000
801 1 0.420 8.386799999999999 43.579090356588360 0.000000000000000
802 2 -0.420 9.085699999999999 44.789620644271366 0.000000000000000
803 1 0.420 10.483499999999999 44.789620644271366 0.000000000000000
804 2 -0.420 11.182399999999999 43.579090356588360 0.000000000000000
805 1 0.420 12.580200000000000 43.579090356588360 0.000000000000000
806 2 -0.420 13.279100000000000 44.789620644271366 0.000000000000000
807 1 0.420 14.676900000000000 44.789620644271366 0.000000000000000
808 2 -0.420 15.375800000000000 43.579090356588360 0.000000000000000
809 1 0.420 16.773599999999998 43.579090356588360 0.000000000000000
810 2 -0.420 17.472500000000000 44.789620644271366 0.000000000000000
811 1 0.420 18.870300000000000 44.789620644271366 0.000000000000000
812 2 -0.420 19.569199999999999 43.579090356588360 0.000000000000000
813 1 0.420 20.966999999999999 43.579090356588360 0.000000000000000
814 2 -0.420 21.665900000000001 44.789620644271366 0.000000000000000
815 1 0.420 23.063699999999997 44.789620644271366 0.000000000000000
816 2 -0.420 23.762599999999999 43.579090356588360 0.000000000000000
817 1 0.420 25.160399999999999 43.579090356588360 0.000000000000000
818 2 -0.420 25.859299999999998 44.789620644271366 0.000000000000000
819 1 0.420 27.257099999999998 44.789620644271366 0.000000000000000
820 2 -0.420 27.956000000000000 43.579090356588360 0.000000000000000
821 1 0.420 29.353800000000000 43.579090356588360 0.000000000000000
822 2 -0.420 30.052699999999998 44.789620644271366 0.000000000000000
823 1 0.420 31.450499999999998 44.789620644271366 0.000000000000000
824 2 -0.420 32.149400000000000 43.579090356588360 0.000000000000000
825 1 0.420 33.547199999999997 43.579090356588360 0.000000000000000
826 2 -0.420 34.246099999999998 44.789620644271366 0.000000000000000
827 1 0.420 35.643899999999995 44.789620644271366 0.000000000000000
828 2 -0.420 36.342799999999997 43.579090356588360 0.000000000000000
829 1 0.420 37.740600000000001 43.579090356588360 0.000000000000000
830 2 -0.420 38.439499999999995 44.789620644271366 0.000000000000000
831 1 0.420 39.837299999999999 44.789620644271366 0.000000000000000
832 2 -0.420 40.536200000000001 43.579090356588360 0.000000000000000
833 1 0.420 41.933999999999997 43.579090356588360 0.000000000000000
834 2 -0.420 42.632899999999999 44.789620644271366 0.000000000000000
835 1 0.420 44.030699999999996 44.789620644271366 0.000000000000000
836 2 -0.420 44.729599999999998 43.579090356588360 0.000000000000000
837 1 0.420 0.000000000000000 46.000150931954380 0.000000000000000
838 2 -0.420 0.698900000000000 47.210681219637387 0.000000000000000
839 1 0.420 2.096700000000000 47.210681219637387 0.000000000000000
840 2 -0.420 2.795600000000000 46.000150931954380 0.000000000000000
841 1 0.420 4.193400000000000 46.000150931954380 0.000000000000000
842 2 -0.420 4.892300000000000 47.210681219637387 0.000000000000000
843 1 0.420 6.290100000000000 47.210681219637387 0.000000000000000
844 2 -0.420 6.989000000000000 46.000150931954380 0.000000000000000
845 1 0.420 8.386799999999999 46.000150931954380 0.000000000000000
846 2 -0.420 9.085699999999999 47.210681219637387 0.000000000000000
847 1 0.420 10.483499999999999 47.210681219637387 0.000000000000000
848 2 -0.420 11.182399999999999 46.000150931954380 0.000000000000000
849 1 0.420 12.580200000000000 46.000150931954380 0.000000000000000
850 2 -0.420 13.279100000000000 47.210681219637387 0.000000000000000
851 1 0.420 14.676900000000000 47.210681219637387 0.000000000000000
852 2 -0.420 15.375800000000000 46.000150931954380 0.000000000000000
853 1 0.420 16.773599999999998 46.000150931954380 0.000000000000000
854 2 -0.420 17.472500000000000 47.210681219637387 0.000000000000000
855 1 0.420 18.870300000000000 47.210681219637387 0.000000000000000
856 2 -0.420 19.569199999999999 46.000150931954380 0.000000000000000
857 1 0.420 20.966999999999999 46.000150931954380 0.000000000000000
858 2 -0.420 21.665900000000001 47.210681219637387 0.000000000000000
859 1 0.420 23.063699999999997 47.210681219637387 0.000000000000000
860 2 -0.420 23.762599999999999 46.000150931954380 0.000000000000000
861 1 0.420 25.160399999999999 46.000150931954380 0.000000000000000
862 2 -0.420 25.859299999999998 47.210681219637387 0.000000000000000
863 1 0.420 27.257099999999998 47.210681219637387 0.000000000000000
864 2 -0.420 27.956000000000000 46.000150931954380 0.000000000000000
865 1 0.420 29.353800000000000 46.000150931954380 0.000000000000000
866 2 -0.420 30.052699999999998 47.210681219637387 0.000000000000000
867 1 0.420 31.450499999999998 47.210681219637387 0.000000000000000
868 2 -0.420 32.149400000000000 46.000150931954380 0.000000000000000
869 1 0.420 33.547199999999997 46.000150931954380 0.000000000000000
870 2 -0.420 34.246099999999998 47.210681219637387 0.000000000000000
871 1 0.420 35.643899999999995 47.210681219637387 0.000000000000000
872 2 -0.420 36.342799999999997 46.000150931954380 0.000000000000000
873 1 0.420 37.740600000000001 46.000150931954380 0.000000000000000
874 2 -0.420 38.439499999999995 47.210681219637387 0.000000000000000
875 1 0.420 39.837299999999999 47.210681219637387 0.000000000000000
876 2 -0.420 40.536200000000001 46.000150931954380 0.000000000000000
877 1 0.420 41.933999999999997 46.000150931954380 0.000000000000000
878 2 -0.420 42.632899999999999 47.210681219637387 0.000000000000000
879 1 0.420 44.030699999999996 47.210681219637387 0.000000000000000
880 2 -0.420 44.729599999999998 46.000150931954380 0.000000000000000

37
examples/in.hBN_shift Normal file
View File

@ -0,0 +1,37 @@
# Initialization
units metal
boundary p p p
atom_style charge
processors * * 1 # domain decomposition over x and y
# System and atom definition
# we use different molecule ids for each layer of hBN
# so that inter- and intra-layer
# interactions can be specified separately
read_data hBN-momolayer-5nm.data
mass 1 10.8110 # boron mass (g/mole) | membrane
mass 2 14.0067 # nitrogen mass (g/mole) | adsorbate
######################## Potential defition ########################
pair_style tersoff shift 0.05
pair_coeff * * BNC.tersoff B N
####################################################################
# Neighbor update settings
neighbor 2.0 bin
neigh_modify every 1
neigh_modify delay 0
neigh_modify check yes
#### Simulation settings ####
timestep 0.001
velocity all create 300.0 4928459 loop geom
fix thermostat all nve
############# Output ###############
thermo 100
thermo_style custom step etotal pe ke temp
#thermo_modify lost warn
thermo_modify line one format float %20.16g lost warn
###### Run molecular dynamics ######
run 1000

View File

@ -0,0 +1 @@
../../potentials/BNC.tersoff

1
examples/tersoff/Si.tersoff Symbolic link
View File

@ -0,0 +1 @@
../../potentials/Si.tersoff

View File

@ -0,0 +1,892 @@
Makeup graphene nanoribbon on hBN
880 atoms
2 atom types
0.000000000000000 46.152979739999999 xlo xhi
0.000000000000000 48.443364211584992 ylo yhi
0.000000000000000 100.000000000000000 zlo zhi
Atoms
1 1 0.420 0.000000000000000 0.000000000000000 0.000000000000000
2 2 -0.420 0.698900000000000 1.210530287683010 0.000000000000000
3 1 0.420 2.096700000000000 1.210530287683010 0.000000000000000
4 2 -0.420 2.795600000000000 0.000000000000000 0.000000000000000
5 1 0.420 4.193400000000000 0.000000000000000 0.000000000000000
6 2 -0.420 4.892300000000000 1.210530287683010 0.000000000000000
7 1 0.420 6.290100000000000 1.210530287683010 0.000000000000000
8 2 -0.420 6.989000000000000 0.000000000000000 0.000000000000000
9 1 0.420 8.386799999999999 0.000000000000000 0.000000000000000
10 2 -0.420 9.085699999999999 1.210530287683010 0.000000000000000
11 1 0.420 10.483499999999999 1.210530287683010 0.000000000000000
12 2 -0.420 11.182399999999999 0.000000000000000 0.000000000000000
13 1 0.420 12.580200000000000 0.000000000000000 0.000000000000000
14 2 -0.420 13.279100000000000 1.210530287683010 0.000000000000000
15 1 0.420 14.676900000000000 1.210530287683010 0.000000000000000
16 2 -0.420 15.375800000000000 0.000000000000000 0.000000000000000
17 1 0.420 16.773599999999998 0.000000000000000 0.000000000000000
18 2 -0.420 17.472500000000000 1.210530287683010 0.000000000000000
19 1 0.420 18.870300000000000 1.210530287683010 0.000000000000000
20 2 -0.420 19.569199999999999 0.000000000000000 0.000000000000000
21 1 0.420 20.966999999999999 0.000000000000000 0.000000000000000
22 2 -0.420 21.665900000000001 1.210530287683010 0.000000000000000
23 1 0.420 23.063699999999997 1.210530287683010 0.000000000000000
24 2 -0.420 23.762599999999999 0.000000000000000 0.000000000000000
25 1 0.420 25.160399999999999 0.000000000000000 0.000000000000000
26 2 -0.420 25.859299999999998 1.210530287683010 0.000000000000000
27 1 0.420 27.257099999999998 1.210530287683010 0.000000000000000
28 2 -0.420 27.956000000000000 0.000000000000000 0.000000000000000
29 1 0.420 29.353800000000000 0.000000000000000 0.000000000000000
30 2 -0.420 30.052699999999998 1.210530287683010 0.000000000000000
31 1 0.420 31.450499999999998 1.210530287683010 0.000000000000000
32 2 -0.420 32.149400000000000 0.000000000000000 0.000000000000000
33 1 0.420 33.547199999999997 0.000000000000000 0.000000000000000
34 2 -0.420 34.246099999999998 1.210530287683010 0.000000000000000
35 1 0.420 35.643899999999995 1.210530287683010 0.000000000000000
36 2 -0.420 36.342799999999997 0.000000000000000 0.000000000000000
37 1 0.420 37.740600000000001 0.000000000000000 0.000000000000000
38 2 -0.420 38.439499999999995 1.210530287683010 0.000000000000000
39 1 0.420 39.837299999999999 1.210530287683010 0.000000000000000
40 2 -0.420 40.536200000000001 0.000000000000000 0.000000000000000
41 1 0.420 41.933999999999997 0.000000000000000 0.000000000000000
42 2 -0.420 42.632899999999999 1.210530287683010 0.000000000000000
43 1 0.420 44.030699999999996 1.210530287683010 0.000000000000000
44 2 -0.420 44.729599999999998 0.000000000000000 0.000000000000000
45 1 0.420 0.000000000000000 2.421060575366020 0.000000000000000
46 2 -0.420 0.698900000000000 3.631590863049030 0.000000000000000
47 1 0.420 2.096700000000000 3.631590863049030 0.000000000000000
48 2 -0.420 2.795600000000000 2.421060575366020 0.000000000000000
49 1 0.420 4.193400000000000 2.421060575366020 0.000000000000000
50 2 -0.420 4.892300000000000 3.631590863049030 0.000000000000000
51 1 0.420 6.290100000000000 3.631590863049030 0.000000000000000
52 2 -0.420 6.989000000000000 2.421060575366020 0.000000000000000
53 1 0.420 8.386799999999999 2.421060575366020 0.000000000000000
54 2 -0.420 9.085699999999999 3.631590863049030 0.000000000000000
55 1 0.420 10.483499999999999 3.631590863049030 0.000000000000000
56 2 -0.420 11.182399999999999 2.421060575366020 0.000000000000000
57 1 0.420 12.580200000000000 2.421060575366020 0.000000000000000
58 2 -0.420 13.279100000000000 3.631590863049030 0.000000000000000
59 1 0.420 14.676900000000000 3.631590863049030 0.000000000000000
60 2 -0.420 15.375800000000000 2.421060575366020 0.000000000000000
61 1 0.420 16.773599999999998 2.421060575366020 0.000000000000000
62 2 -0.420 17.472500000000000 3.631590863049030 0.000000000000000
63 1 0.420 18.870300000000000 3.631590863049030 0.000000000000000
64 2 -0.420 19.569199999999999 2.421060575366020 0.000000000000000
65 1 0.420 20.966999999999999 2.421060575366020 0.000000000000000
66 2 -0.420 21.665900000000001 3.631590863049030 0.000000000000000
67 1 0.420 23.063699999999997 3.631590863049030 0.000000000000000
68 2 -0.420 23.762599999999999 2.421060575366020 0.000000000000000
69 1 0.420 25.160399999999999 2.421060575366020 0.000000000000000
70 2 -0.420 25.859299999999998 3.631590863049030 0.000000000000000
71 1 0.420 27.257099999999998 3.631590863049030 0.000000000000000
72 2 -0.420 27.956000000000000 2.421060575366020 0.000000000000000
73 1 0.420 29.353800000000000 2.421060575366020 0.000000000000000
74 2 -0.420 30.052699999999998 3.631590863049030 0.000000000000000
75 1 0.420 31.450499999999998 3.631590863049030 0.000000000000000
76 2 -0.420 32.149400000000000 2.421060575366020 0.000000000000000
77 1 0.420 33.547199999999997 2.421060575366020 0.000000000000000
78 2 -0.420 34.246099999999998 3.631590863049030 0.000000000000000
79 1 0.420 35.643899999999995 3.631590863049030 0.000000000000000
80 2 -0.420 36.342799999999997 2.421060575366020 0.000000000000000
81 1 0.420 37.740600000000001 2.421060575366020 0.000000000000000
82 2 -0.420 38.439499999999995 3.631590863049030 0.000000000000000
83 1 0.420 39.837299999999999 3.631590863049030 0.000000000000000
84 2 -0.420 40.536200000000001 2.421060575366020 0.000000000000000
85 1 0.420 41.933999999999997 2.421060575366020 0.000000000000000
86 2 -0.420 42.632899999999999 3.631590863049030 0.000000000000000
87 1 0.420 44.030699999999996 3.631590863049030 0.000000000000000
88 2 -0.420 44.729599999999998 2.421060575366020 0.000000000000000
89 1 0.420 0.000000000000000 4.842121150732040 0.000000000000000
90 2 -0.420 0.698900000000000 6.052651438415050 0.000000000000000
91 1 0.420 2.096700000000000 6.052651438415050 0.000000000000000
92 2 -0.420 2.795600000000000 4.842121150732040 0.000000000000000
93 1 0.420 4.193400000000000 4.842121150732040 0.000000000000000
94 2 -0.420 4.892300000000000 6.052651438415050 0.000000000000000
95 1 0.420 6.290100000000000 6.052651438415050 0.000000000000000
96 2 -0.420 6.989000000000000 4.842121150732040 0.000000000000000
97 1 0.420 8.386799999999999 4.842121150732040 0.000000000000000
98 2 -0.420 9.085699999999999 6.052651438415050 0.000000000000000
99 1 0.420 10.483499999999999 6.052651438415050 0.000000000000000
100 2 -0.420 11.182399999999999 4.842121150732040 0.000000000000000
101 1 0.420 12.580200000000000 4.842121150732040 0.000000000000000
102 2 -0.420 13.279100000000000 6.052651438415050 0.000000000000000
103 1 0.420 14.676900000000000 6.052651438415050 0.000000000000000
104 2 -0.420 15.375800000000000 4.842121150732040 0.000000000000000
105 1 0.420 16.773599999999998 4.842121150732040 0.000000000000000
106 2 -0.420 17.472500000000000 6.052651438415050 0.000000000000000
107 1 0.420 18.870300000000000 6.052651438415050 0.000000000000000
108 2 -0.420 19.569199999999999 4.842121150732040 0.000000000000000
109 1 0.420 20.966999999999999 4.842121150732040 0.000000000000000
110 2 -0.420 21.665900000000001 6.052651438415050 0.000000000000000
111 1 0.420 23.063699999999997 6.052651438415050 0.000000000000000
112 2 -0.420 23.762599999999999 4.842121150732040 0.000000000000000
113 1 0.420 25.160399999999999 4.842121150732040 0.000000000000000
114 2 -0.420 25.859299999999998 6.052651438415050 0.000000000000000
115 1 0.420 27.257099999999998 6.052651438415050 0.000000000000000
116 2 -0.420 27.956000000000000 4.842121150732040 0.000000000000000
117 1 0.420 29.353800000000000 4.842121150732040 0.000000000000000
118 2 -0.420 30.052699999999998 6.052651438415050 0.000000000000000
119 1 0.420 31.450499999999998 6.052651438415050 0.000000000000000
120 2 -0.420 32.149400000000000 4.842121150732040 0.000000000000000
121 1 0.420 33.547199999999997 4.842121150732040 0.000000000000000
122 2 -0.420 34.246099999999998 6.052651438415050 0.000000000000000
123 1 0.420 35.643899999999995 6.052651438415050 0.000000000000000
124 2 -0.420 36.342799999999997 4.842121150732040 0.000000000000000
125 1 0.420 37.740600000000001 4.842121150732040 0.000000000000000
126 2 -0.420 38.439499999999995 6.052651438415050 0.000000000000000
127 1 0.420 39.837299999999999 6.052651438415050 0.000000000000000
128 2 -0.420 40.536200000000001 4.842121150732040 0.000000000000000
129 1 0.420 41.933999999999997 4.842121150732040 0.000000000000000
130 2 -0.420 42.632899999999999 6.052651438415050 0.000000000000000
131 1 0.420 44.030699999999996 6.052651438415050 0.000000000000000
132 2 -0.420 44.729599999999998 4.842121150732040 0.000000000000000
133 1 0.420 0.000000000000000 7.263181726098059 0.000000000000000
134 2 -0.420 0.698900000000000 8.473712013781070 0.000000000000000
135 1 0.420 2.096700000000000 8.473712013781070 0.000000000000000
136 2 -0.420 2.795600000000000 7.263181726098059 0.000000000000000
137 1 0.420 4.193400000000000 7.263181726098059 0.000000000000000
138 2 -0.420 4.892300000000000 8.473712013781070 0.000000000000000
139 1 0.420 6.290100000000000 8.473712013781070 0.000000000000000
140 2 -0.420 6.989000000000000 7.263181726098059 0.000000000000000
141 1 0.420 8.386799999999999 7.263181726098059 0.000000000000000
142 2 -0.420 9.085699999999999 8.473712013781070 0.000000000000000
143 1 0.420 10.483499999999999 8.473712013781070 0.000000000000000
144 2 -0.420 11.182399999999999 7.263181726098059 0.000000000000000
145 1 0.420 12.580200000000000 7.263181726098059 0.000000000000000
146 2 -0.420 13.279100000000000 8.473712013781070 0.000000000000000
147 1 0.420 14.676900000000000 8.473712013781070 0.000000000000000
148 2 -0.420 15.375800000000000 7.263181726098059 0.000000000000000
149 1 0.420 16.773599999999998 7.263181726098059 0.000000000000000
150 2 -0.420 17.472500000000000 8.473712013781070 0.000000000000000
151 1 0.420 18.870300000000000 8.473712013781070 0.000000000000000
152 2 -0.420 19.569199999999999 7.263181726098059 0.000000000000000
153 1 0.420 20.966999999999999 7.263181726098059 0.000000000000000
154 2 -0.420 21.665900000000001 8.473712013781070 0.000000000000000
155 1 0.420 23.063699999999997 8.473712013781070 0.000000000000000
156 2 -0.420 23.762599999999999 7.263181726098059 0.000000000000000
157 1 0.420 25.160399999999999 7.263181726098059 0.000000000000000
158 2 -0.420 25.859299999999998 8.473712013781070 0.000000000000000
159 1 0.420 27.257099999999998 8.473712013781070 0.000000000000000
160 2 -0.420 27.956000000000000 7.263181726098059 0.000000000000000
161 1 0.420 29.353800000000000 7.263181726098059 0.000000000000000
162 2 -0.420 30.052699999999998 8.473712013781070 0.000000000000000
163 1 0.420 31.450499999999998 8.473712013781070 0.000000000000000
164 2 -0.420 32.149400000000000 7.263181726098059 0.000000000000000
165 1 0.420 33.547199999999997 7.263181726098059 0.000000000000000
166 2 -0.420 34.246099999999998 8.473712013781070 0.000000000000000
167 1 0.420 35.643899999999995 8.473712013781070 0.000000000000000
168 2 -0.420 36.342799999999997 7.263181726098059 0.000000000000000
169 1 0.420 37.740600000000001 7.263181726098059 0.000000000000000
170 2 -0.420 38.439499999999995 8.473712013781070 0.000000000000000
171 1 0.420 39.837299999999999 8.473712013781070 0.000000000000000
172 2 -0.420 40.536200000000001 7.263181726098059 0.000000000000000
173 1 0.420 41.933999999999997 7.263181726098059 0.000000000000000
174 2 -0.420 42.632899999999999 8.473712013781070 0.000000000000000
175 1 0.420 44.030699999999996 8.473712013781070 0.000000000000000
176 2 -0.420 44.729599999999998 7.263181726098059 0.000000000000000
177 1 0.420 0.000000000000000 9.684242301464080 0.000000000000000
178 2 -0.420 0.698900000000000 10.894772589147090 0.000000000000000
179 1 0.420 2.096700000000000 10.894772589147090 0.000000000000000
180 2 -0.420 2.795600000000000 9.684242301464080 0.000000000000000
181 1 0.420 4.193400000000000 9.684242301464080 0.000000000000000
182 2 -0.420 4.892300000000000 10.894772589147090 0.000000000000000
183 1 0.420 6.290100000000000 10.894772589147090 0.000000000000000
184 2 -0.420 6.989000000000000 9.684242301464080 0.000000000000000
185 1 0.420 8.386799999999999 9.684242301464080 0.000000000000000
186 2 -0.420 9.085699999999999 10.894772589147090 0.000000000000000
187 1 0.420 10.483499999999999 10.894772589147090 0.000000000000000
188 2 -0.420 11.182399999999999 9.684242301464080 0.000000000000000
189 1 0.420 12.580200000000000 9.684242301464080 0.000000000000000
190 2 -0.420 13.279100000000000 10.894772589147090 0.000000000000000
191 1 0.420 14.676900000000000 10.894772589147090 0.000000000000000
192 2 -0.420 15.375800000000000 9.684242301464080 0.000000000000000
193 1 0.420 16.773599999999998 9.684242301464080 0.000000000000000
194 2 -0.420 17.472500000000000 10.894772589147090 0.000000000000000
195 1 0.420 18.870300000000000 10.894772589147090 0.000000000000000
196 2 -0.420 19.569199999999999 9.684242301464080 0.000000000000000
197 1 0.420 20.966999999999999 9.684242301464080 0.000000000000000
198 2 -0.420 21.665900000000001 10.894772589147090 0.000000000000000
199 1 0.420 23.063699999999997 10.894772589147090 0.000000000000000
200 2 -0.420 23.762599999999999 9.684242301464080 0.000000000000000
201 1 0.420 25.160399999999999 9.684242301464080 0.000000000000000
202 2 -0.420 25.859299999999998 10.894772589147090 0.000000000000000
203 1 0.420 27.257099999999998 10.894772589147090 0.000000000000000
204 2 -0.420 27.956000000000000 9.684242301464080 0.000000000000000
205 1 0.420 29.353800000000000 9.684242301464080 0.000000000000000
206 2 -0.420 30.052699999999998 10.894772589147090 0.000000000000000
207 1 0.420 31.450499999999998 10.894772589147090 0.000000000000000
208 2 -0.420 32.149400000000000 9.684242301464080 0.000000000000000
209 1 0.420 33.547199999999997 9.684242301464080 0.000000000000000
210 2 -0.420 34.246099999999998 10.894772589147090 0.000000000000000
211 1 0.420 35.643899999999995 10.894772589147090 0.000000000000000
212 2 -0.420 36.342799999999997 9.684242301464080 0.000000000000000
213 1 0.420 37.740600000000001 9.684242301464080 0.000000000000000
214 2 -0.420 38.439499999999995 10.894772589147090 0.000000000000000
215 1 0.420 39.837299999999999 10.894772589147090 0.000000000000000
216 2 -0.420 40.536200000000001 9.684242301464080 0.000000000000000
217 1 0.420 41.933999999999997 9.684242301464080 0.000000000000000
218 2 -0.420 42.632899999999999 10.894772589147090 0.000000000000000
219 1 0.420 44.030699999999996 10.894772589147090 0.000000000000000
220 2 -0.420 44.729599999999998 9.684242301464080 0.000000000000000
221 1 0.420 0.000000000000000 12.105302876830100 0.000000000000000
222 2 -0.420 0.698900000000000 13.315833164513110 0.000000000000000
223 1 0.420 2.096700000000000 13.315833164513110 0.000000000000000
224 2 -0.420 2.795600000000000 12.105302876830100 0.000000000000000
225 1 0.420 4.193400000000000 12.105302876830100 0.000000000000000
226 2 -0.420 4.892300000000000 13.315833164513110 0.000000000000000
227 1 0.420 6.290100000000000 13.315833164513110 0.000000000000000
228 2 -0.420 6.989000000000000 12.105302876830100 0.000000000000000
229 1 0.420 8.386799999999999 12.105302876830100 0.000000000000000
230 2 -0.420 9.085699999999999 13.315833164513110 0.000000000000000
231 1 0.420 10.483499999999999 13.315833164513110 0.000000000000000
232 2 -0.420 11.182399999999999 12.105302876830100 0.000000000000000
233 1 0.420 12.580200000000000 12.105302876830100 0.000000000000000
234 2 -0.420 13.279100000000000 13.315833164513110 0.000000000000000
235 1 0.420 14.676900000000000 13.315833164513110 0.000000000000000
236 2 -0.420 15.375800000000000 12.105302876830100 0.000000000000000
237 1 0.420 16.773599999999998 12.105302876830100 0.000000000000000
238 2 -0.420 17.472500000000000 13.315833164513110 0.000000000000000
239 1 0.420 18.870300000000000 13.315833164513110 0.000000000000000
240 2 -0.420 19.569199999999999 12.105302876830100 0.000000000000000
241 1 0.420 20.966999999999999 12.105302876830100 0.000000000000000
242 2 -0.420 21.665900000000001 13.315833164513110 0.000000000000000
243 1 0.420 23.063699999999997 13.315833164513110 0.000000000000000
244 2 -0.420 23.762599999999999 12.105302876830100 0.000000000000000
245 1 0.420 25.160399999999999 12.105302876830100 0.000000000000000
246 2 -0.420 25.859299999999998 13.315833164513110 0.000000000000000
247 1 0.420 27.257099999999998 13.315833164513110 0.000000000000000
248 2 -0.420 27.956000000000000 12.105302876830100 0.000000000000000
249 1 0.420 29.353800000000000 12.105302876830100 0.000000000000000
250 2 -0.420 30.052699999999998 13.315833164513110 0.000000000000000
251 1 0.420 31.450499999999998 13.315833164513110 0.000000000000000
252 2 -0.420 32.149400000000000 12.105302876830100 0.000000000000000
253 1 0.420 33.547199999999997 12.105302876830100 0.000000000000000
254 2 -0.420 34.246099999999998 13.315833164513110 0.000000000000000
255 1 0.420 35.643899999999995 13.315833164513110 0.000000000000000
256 2 -0.420 36.342799999999997 12.105302876830100 0.000000000000000
257 1 0.420 37.740600000000001 12.105302876830100 0.000000000000000
258 2 -0.420 38.439499999999995 13.315833164513110 0.000000000000000
259 1 0.420 39.837299999999999 13.315833164513110 0.000000000000000
260 2 -0.420 40.536200000000001 12.105302876830100 0.000000000000000
261 1 0.420 41.933999999999997 12.105302876830100 0.000000000000000
262 2 -0.420 42.632899999999999 13.315833164513110 0.000000000000000
263 1 0.420 44.030699999999996 13.315833164513110 0.000000000000000
264 2 -0.420 44.729599999999998 12.105302876830100 0.000000000000000
265 1 0.420 0.000000000000000 14.526363452196119 0.000000000000000
266 2 -0.420 0.698900000000000 15.736893739879129 0.000000000000000
267 1 0.420 2.096700000000000 15.736893739879129 0.000000000000000
268 2 -0.420 2.795600000000000 14.526363452196119 0.000000000000000
269 1 0.420 4.193400000000000 14.526363452196119 0.000000000000000
270 2 -0.420 4.892300000000000 15.736893739879129 0.000000000000000
271 1 0.420 6.290100000000000 15.736893739879129 0.000000000000000
272 2 -0.420 6.989000000000000 14.526363452196119 0.000000000000000
273 1 0.420 8.386799999999999 14.526363452196119 0.000000000000000
274 2 -0.420 9.085699999999999 15.736893739879129 0.000000000000000
275 1 0.420 10.483499999999999 15.736893739879129 0.000000000000000
276 2 -0.420 11.182399999999999 14.526363452196119 0.000000000000000
277 1 0.420 12.580200000000000 14.526363452196119 0.000000000000000
278 2 -0.420 13.279100000000000 15.736893739879129 0.000000000000000
279 1 0.420 14.676900000000000 15.736893739879129 0.000000000000000
280 2 -0.420 15.375800000000000 14.526363452196119 0.000000000000000
281 1 0.420 16.773599999999998 14.526363452196119 0.000000000000000
282 2 -0.420 17.472500000000000 15.736893739879129 0.000000000000000
283 1 0.420 18.870300000000000 15.736893739879129 0.000000000000000
284 2 -0.420 19.569199999999999 14.526363452196119 0.000000000000000
285 1 0.420 20.966999999999999 14.526363452196119 0.000000000000000
286 2 -0.420 21.665900000000001 15.736893739879129 0.000000000000000
287 1 0.420 23.063699999999997 15.736893739879129 0.000000000000000
288 2 -0.420 23.762599999999999 14.526363452196119 0.000000000000000
289 1 0.420 25.160399999999999 14.526363452196119 0.000000000000000
290 2 -0.420 25.859299999999998 15.736893739879129 0.000000000000000
291 1 0.420 27.257099999999998 15.736893739879129 0.000000000000000
292 2 -0.420 27.956000000000000 14.526363452196119 0.000000000000000
293 1 0.420 29.353800000000000 14.526363452196119 0.000000000000000
294 2 -0.420 30.052699999999998 15.736893739879129 0.000000000000000
295 1 0.420 31.450499999999998 15.736893739879129 0.000000000000000
296 2 -0.420 32.149400000000000 14.526363452196119 0.000000000000000
297 1 0.420 33.547199999999997 14.526363452196119 0.000000000000000
298 2 -0.420 34.246099999999998 15.736893739879129 0.000000000000000
299 1 0.420 35.643899999999995 15.736893739879129 0.000000000000000
300 2 -0.420 36.342799999999997 14.526363452196119 0.000000000000000
301 1 0.420 37.740600000000001 14.526363452196119 0.000000000000000
302 2 -0.420 38.439499999999995 15.736893739879129 0.000000000000000
303 1 0.420 39.837299999999999 15.736893739879129 0.000000000000000
304 2 -0.420 40.536200000000001 14.526363452196119 0.000000000000000
305 1 0.420 41.933999999999997 14.526363452196119 0.000000000000000
306 2 -0.420 42.632899999999999 15.736893739879129 0.000000000000000
307 1 0.420 44.030699999999996 15.736893739879129 0.000000000000000
308 2 -0.420 44.729599999999998 14.526363452196119 0.000000000000000
309 1 0.420 0.000000000000000 16.947424027562139 0.000000000000000
310 2 -0.420 0.698900000000000 18.157954315245149 0.000000000000000
311 1 0.420 2.096700000000000 18.157954315245149 0.000000000000000
312 2 -0.420 2.795600000000000 16.947424027562139 0.000000000000000
313 1 0.420 4.193400000000000 16.947424027562139 0.000000000000000
314 2 -0.420 4.892300000000000 18.157954315245149 0.000000000000000
315 1 0.420 6.290100000000000 18.157954315245149 0.000000000000000
316 2 -0.420 6.989000000000000 16.947424027562139 0.000000000000000
317 1 0.420 8.386799999999999 16.947424027562139 0.000000000000000
318 2 -0.420 9.085699999999999 18.157954315245149 0.000000000000000
319 1 0.420 10.483499999999999 18.157954315245149 0.000000000000000
320 2 -0.420 11.182399999999999 16.947424027562139 0.000000000000000
321 1 0.420 12.580200000000000 16.947424027562139 0.000000000000000
322 2 -0.420 13.279100000000000 18.157954315245149 0.000000000000000
323 1 0.420 14.676900000000000 18.157954315245149 0.000000000000000
324 2 -0.420 15.375800000000000 16.947424027562139 0.000000000000000
325 1 0.420 16.773599999999998 16.947424027562139 0.000000000000000
326 2 -0.420 17.472500000000000 18.157954315245149 0.000000000000000
327 1 0.420 18.870300000000000 18.157954315245149 0.000000000000000
328 2 -0.420 19.569199999999999 16.947424027562139 0.000000000000000
329 1 0.420 20.966999999999999 16.947424027562139 0.000000000000000
330 2 -0.420 21.665900000000001 18.157954315245149 0.000000000000000
331 1 0.420 23.063699999999997 18.157954315245149 0.000000000000000
332 2 -0.420 23.762599999999999 16.947424027562139 0.000000000000000
333 1 0.420 25.160399999999999 16.947424027562139 0.000000000000000
334 2 -0.420 25.859299999999998 18.157954315245149 0.000000000000000
335 1 0.420 27.257099999999998 18.157954315245149 0.000000000000000
336 2 -0.420 27.956000000000000 16.947424027562139 0.000000000000000
337 1 0.420 29.353800000000000 16.947424027562139 0.000000000000000
338 2 -0.420 30.052699999999998 18.157954315245149 0.000000000000000
339 1 0.420 31.450499999999998 18.157954315245149 0.000000000000000
340 2 -0.420 32.149400000000000 16.947424027562139 0.000000000000000
341 1 0.420 33.547199999999997 16.947424027562139 0.000000000000000
342 2 -0.420 34.246099999999998 18.157954315245149 0.000000000000000
343 1 0.420 35.643899999999995 18.157954315245149 0.000000000000000
344 2 -0.420 36.342799999999997 16.947424027562139 0.000000000000000
345 1 0.420 37.740600000000001 16.947424027562139 0.000000000000000
346 2 -0.420 38.439499999999995 18.157954315245149 0.000000000000000
347 1 0.420 39.837299999999999 18.157954315245149 0.000000000000000
348 2 -0.420 40.536200000000001 16.947424027562139 0.000000000000000
349 1 0.420 41.933999999999997 16.947424027562139 0.000000000000000
350 2 -0.420 42.632899999999999 18.157954315245149 0.000000000000000
351 1 0.420 44.030699999999996 18.157954315245149 0.000000000000000
352 2 -0.420 44.729599999999998 16.947424027562139 0.000000000000000
353 1 0.420 0.000000000000000 19.368484602928159 0.000000000000000
354 2 -0.420 0.698900000000000 20.579014890611170 0.000000000000000
355 1 0.420 2.096700000000000 20.579014890611170 0.000000000000000
356 2 -0.420 2.795600000000000 19.368484602928159 0.000000000000000
357 1 0.420 4.193400000000000 19.368484602928159 0.000000000000000
358 2 -0.420 4.892300000000000 20.579014890611170 0.000000000000000
359 1 0.420 6.290100000000000 20.579014890611170 0.000000000000000
360 2 -0.420 6.989000000000000 19.368484602928159 0.000000000000000
361 1 0.420 8.386799999999999 19.368484602928159 0.000000000000000
362 2 -0.420 9.085699999999999 20.579014890611170 0.000000000000000
363 1 0.420 10.483499999999999 20.579014890611170 0.000000000000000
364 2 -0.420 11.182399999999999 19.368484602928159 0.000000000000000
365 1 0.420 12.580200000000000 19.368484602928159 0.000000000000000
366 2 -0.420 13.279100000000000 20.579014890611170 0.000000000000000
367 1 0.420 14.676900000000000 20.579014890611170 0.000000000000000
368 2 -0.420 15.375800000000000 19.368484602928159 0.000000000000000
369 1 0.420 16.773599999999998 19.368484602928159 0.000000000000000
370 2 -0.420 17.472500000000000 20.579014890611170 0.000000000000000
371 1 0.420 18.870300000000000 20.579014890611170 0.000000000000000
372 2 -0.420 19.569199999999999 19.368484602928159 0.000000000000000
373 1 0.420 20.966999999999999 19.368484602928159 0.000000000000000
374 2 -0.420 21.665900000000001 20.579014890611170 0.000000000000000
375 1 0.420 23.063699999999997 20.579014890611170 0.000000000000000
376 2 -0.420 23.762599999999999 19.368484602928159 0.000000000000000
377 1 0.420 25.160399999999999 19.368484602928159 0.000000000000000
378 2 -0.420 25.859299999999998 20.579014890611170 0.000000000000000
379 1 0.420 27.257099999999998 20.579014890611170 0.000000000000000
380 2 -0.420 27.956000000000000 19.368484602928159 0.000000000000000
381 1 0.420 29.353800000000000 19.368484602928159 0.000000000000000
382 2 -0.420 30.052699999999998 20.579014890611170 0.000000000000000
383 1 0.420 31.450499999999998 20.579014890611170 0.000000000000000
384 2 -0.420 32.149400000000000 19.368484602928159 0.000000000000000
385 1 0.420 33.547199999999997 19.368484602928159 0.000000000000000
386 2 -0.420 34.246099999999998 20.579014890611170 0.000000000000000
387 1 0.420 35.643899999999995 20.579014890611170 0.000000000000000
388 2 -0.420 36.342799999999997 19.368484602928159 0.000000000000000
389 1 0.420 37.740600000000001 19.368484602928159 0.000000000000000
390 2 -0.420 38.439499999999995 20.579014890611170 0.000000000000000
391 1 0.420 39.837299999999999 20.579014890611170 0.000000000000000
392 2 -0.420 40.536200000000001 19.368484602928159 0.000000000000000
393 1 0.420 41.933999999999997 19.368484602928159 0.000000000000000
394 2 -0.420 42.632899999999999 20.579014890611170 0.000000000000000
395 1 0.420 44.030699999999996 20.579014890611170 0.000000000000000
396 2 -0.420 44.729599999999998 19.368484602928159 0.000000000000000
397 1 0.420 0.000000000000000 21.789545178294180 0.000000000000000
398 2 -0.420 0.698900000000000 23.000075465977190 0.000000000000000
399 1 0.420 2.096700000000000 23.000075465977190 0.000000000000000
400 2 -0.420 2.795600000000000 21.789545178294180 0.000000000000000
401 1 0.420 4.193400000000000 21.789545178294180 0.000000000000000
402 2 -0.420 4.892300000000000 23.000075465977190 0.000000000000000
403 1 0.420 6.290100000000000 23.000075465977190 0.000000000000000
404 2 -0.420 6.989000000000000 21.789545178294180 0.000000000000000
405 1 0.420 8.386799999999999 21.789545178294180 0.000000000000000
406 2 -0.420 9.085699999999999 23.000075465977190 0.000000000000000
407 1 0.420 10.483499999999999 23.000075465977190 0.000000000000000
408 2 -0.420 11.182399999999999 21.789545178294180 0.000000000000000
409 1 0.420 12.580200000000000 21.789545178294180 0.000000000000000
410 2 -0.420 13.279100000000000 23.000075465977190 0.000000000000000
411 1 0.420 14.676900000000000 23.000075465977190 0.000000000000000
412 2 -0.420 15.375800000000000 21.789545178294180 0.000000000000000
413 1 0.420 16.773599999999998 21.789545178294180 0.000000000000000
414 2 -0.420 17.472500000000000 23.000075465977190 0.000000000000000
415 1 0.420 18.870300000000000 23.000075465977190 0.000000000000000
416 2 -0.420 19.569199999999999 21.789545178294180 0.000000000000000
417 1 0.420 20.966999999999999 21.789545178294180 0.000000000000000
418 2 -0.420 21.665900000000001 23.000075465977190 0.000000000000000
419 1 0.420 23.063699999999997 23.000075465977190 0.000000000000000
420 2 -0.420 23.762599999999999 21.789545178294180 0.000000000000000
421 1 0.420 25.160399999999999 21.789545178294180 0.000000000000000
422 2 -0.420 25.859299999999998 23.000075465977190 0.000000000000000
423 1 0.420 27.257099999999998 23.000075465977190 0.000000000000000
424 2 -0.420 27.956000000000000 21.789545178294180 0.000000000000000
425 1 0.420 29.353800000000000 21.789545178294180 0.000000000000000
426 2 -0.420 30.052699999999998 23.000075465977190 0.000000000000000
427 1 0.420 31.450499999999998 23.000075465977190 0.000000000000000
428 2 -0.420 32.149400000000000 21.789545178294180 0.000000000000000
429 1 0.420 33.547199999999997 21.789545178294180 0.000000000000000
430 2 -0.420 34.246099999999998 23.000075465977190 0.000000000000000
431 1 0.420 35.643899999999995 23.000075465977190 0.000000000000000
432 2 -0.420 36.342799999999997 21.789545178294180 0.000000000000000
433 1 0.420 37.740600000000001 21.789545178294180 0.000000000000000
434 2 -0.420 38.439499999999995 23.000075465977190 0.000000000000000
435 1 0.420 39.837299999999999 23.000075465977190 0.000000000000000
436 2 -0.420 40.536200000000001 21.789545178294180 0.000000000000000
437 1 0.420 41.933999999999997 21.789545178294180 0.000000000000000
438 2 -0.420 42.632899999999999 23.000075465977190 0.000000000000000
439 1 0.420 44.030699999999996 23.000075465977190 0.000000000000000
440 2 -0.420 44.729599999999998 21.789545178294180 0.000000000000000
441 1 0.420 0.000000000000000 24.210605753660200 0.000000000000000
442 2 -0.420 0.698900000000000 25.421136041343210 0.000000000000000
443 1 0.420 2.096700000000000 25.421136041343210 0.000000000000000
444 2 -0.420 2.795600000000000 24.210605753660200 0.000000000000000
445 1 0.420 4.193400000000000 24.210605753660200 0.000000000000000
446 2 -0.420 4.892300000000000 25.421136041343210 0.000000000000000
447 1 0.420 6.290100000000000 25.421136041343210 0.000000000000000
448 2 -0.420 6.989000000000000 24.210605753660200 0.000000000000000
449 1 0.420 8.386799999999999 24.210605753660200 0.000000000000000
450 2 -0.420 9.085699999999999 25.421136041343210 0.000000000000000
451 1 0.420 10.483499999999999 25.421136041343210 0.000000000000000
452 2 -0.420 11.182399999999999 24.210605753660200 0.000000000000000
453 1 0.420 12.580200000000000 24.210605753660200 0.000000000000000
454 2 -0.420 13.279100000000000 25.421136041343210 0.000000000000000
455 1 0.420 14.676900000000000 25.421136041343210 0.000000000000000
456 2 -0.420 15.375800000000000 24.210605753660200 0.000000000000000
457 1 0.420 16.773599999999998 24.210605753660200 0.000000000000000
458 2 -0.420 17.472500000000000 25.421136041343210 0.000000000000000
459 1 0.420 18.870300000000000 25.421136041343210 0.000000000000000
460 2 -0.420 19.569199999999999 24.210605753660200 0.000000000000000
461 1 0.420 20.966999999999999 24.210605753660200 0.000000000000000
462 2 -0.420 21.665900000000001 25.421136041343210 0.000000000000000
463 1 0.420 23.063699999999997 25.421136041343210 0.000000000000000
464 2 -0.420 23.762599999999999 24.210605753660200 0.000000000000000
465 1 0.420 25.160399999999999 24.210605753660200 0.000000000000000
466 2 -0.420 25.859299999999998 25.421136041343210 0.000000000000000
467 1 0.420 27.257099999999998 25.421136041343210 0.000000000000000
468 2 -0.420 27.956000000000000 24.210605753660200 0.000000000000000
469 1 0.420 29.353800000000000 24.210605753660200 0.000000000000000
470 2 -0.420 30.052699999999998 25.421136041343210 0.000000000000000
471 1 0.420 31.450499999999998 25.421136041343210 0.000000000000000
472 2 -0.420 32.149400000000000 24.210605753660200 0.000000000000000
473 1 0.420 33.547199999999997 24.210605753660200 0.000000000000000
474 2 -0.420 34.246099999999998 25.421136041343210 0.000000000000000
475 1 0.420 35.643899999999995 25.421136041343210 0.000000000000000
476 2 -0.420 36.342799999999997 24.210605753660200 0.000000000000000
477 1 0.420 37.740600000000001 24.210605753660200 0.000000000000000
478 2 -0.420 38.439499999999995 25.421136041343210 0.000000000000000
479 1 0.420 39.837299999999999 25.421136041343210 0.000000000000000
480 2 -0.420 40.536200000000001 24.210605753660200 0.000000000000000
481 1 0.420 41.933999999999997 24.210605753660200 0.000000000000000
482 2 -0.420 42.632899999999999 25.421136041343210 0.000000000000000
483 1 0.420 44.030699999999996 25.421136041343210 0.000000000000000
484 2 -0.420 44.729599999999998 24.210605753660200 0.000000000000000
485 1 0.420 0.000000000000000 26.631666329026221 0.000000000000000
486 2 -0.420 0.698900000000000 27.842196616709231 0.000000000000000
487 1 0.420 2.096700000000000 27.842196616709231 0.000000000000000
488 2 -0.420 2.795600000000000 26.631666329026221 0.000000000000000
489 1 0.420 4.193400000000000 26.631666329026221 0.000000000000000
490 2 -0.420 4.892300000000000 27.842196616709231 0.000000000000000
491 1 0.420 6.290100000000000 27.842196616709231 0.000000000000000
492 2 -0.420 6.989000000000000 26.631666329026221 0.000000000000000
493 1 0.420 8.386799999999999 26.631666329026221 0.000000000000000
494 2 -0.420 9.085699999999999 27.842196616709231 0.000000000000000
495 1 0.420 10.483499999999999 27.842196616709231 0.000000000000000
496 2 -0.420 11.182399999999999 26.631666329026221 0.000000000000000
497 1 0.420 12.580200000000000 26.631666329026221 0.000000000000000
498 2 -0.420 13.279100000000000 27.842196616709231 0.000000000000000
499 1 0.420 14.676900000000000 27.842196616709231 0.000000000000000
500 2 -0.420 15.375800000000000 26.631666329026221 0.000000000000000
501 1 0.420 16.773599999999998 26.631666329026221 0.000000000000000
502 2 -0.420 17.472500000000000 27.842196616709231 0.000000000000000
503 1 0.420 18.870300000000000 27.842196616709231 0.000000000000000
504 2 -0.420 19.569199999999999 26.631666329026221 0.000000000000000
505 1 0.420 20.966999999999999 26.631666329026221 0.000000000000000
506 2 -0.420 21.665900000000001 27.842196616709231 0.000000000000000
507 1 0.420 23.063699999999997 27.842196616709231 0.000000000000000
508 2 -0.420 23.762599999999999 26.631666329026221 0.000000000000000
509 1 0.420 25.160399999999999 26.631666329026221 0.000000000000000
510 2 -0.420 25.859299999999998 27.842196616709231 0.000000000000000
511 1 0.420 27.257099999999998 27.842196616709231 0.000000000000000
512 2 -0.420 27.956000000000000 26.631666329026221 0.000000000000000
513 1 0.420 29.353800000000000 26.631666329026221 0.000000000000000
514 2 -0.420 30.052699999999998 27.842196616709231 0.000000000000000
515 1 0.420 31.450499999999998 27.842196616709231 0.000000000000000
516 2 -0.420 32.149400000000000 26.631666329026221 0.000000000000000
517 1 0.420 33.547199999999997 26.631666329026221 0.000000000000000
518 2 -0.420 34.246099999999998 27.842196616709231 0.000000000000000
519 1 0.420 35.643899999999995 27.842196616709231 0.000000000000000
520 2 -0.420 36.342799999999997 26.631666329026221 0.000000000000000
521 1 0.420 37.740600000000001 26.631666329026221 0.000000000000000
522 2 -0.420 38.439499999999995 27.842196616709231 0.000000000000000
523 1 0.420 39.837299999999999 27.842196616709231 0.000000000000000
524 2 -0.420 40.536200000000001 26.631666329026221 0.000000000000000
525 1 0.420 41.933999999999997 26.631666329026221 0.000000000000000
526 2 -0.420 42.632899999999999 27.842196616709231 0.000000000000000
527 1 0.420 44.030699999999996 27.842196616709231 0.000000000000000
528 2 -0.420 44.729599999999998 26.631666329026221 0.000000000000000
529 1 0.420 0.000000000000000 29.052726904392237 0.000000000000000
530 2 -0.420 0.698900000000000 30.263257192075248 0.000000000000000
531 1 0.420 2.096700000000000 30.263257192075248 0.000000000000000
532 2 -0.420 2.795600000000000 29.052726904392237 0.000000000000000
533 1 0.420 4.193400000000000 29.052726904392237 0.000000000000000
534 2 -0.420 4.892300000000000 30.263257192075248 0.000000000000000
535 1 0.420 6.290100000000000 30.263257192075248 0.000000000000000
536 2 -0.420 6.989000000000000 29.052726904392237 0.000000000000000
537 1 0.420 8.386799999999999 29.052726904392237 0.000000000000000
538 2 -0.420 9.085699999999999 30.263257192075248 0.000000000000000
539 1 0.420 10.483499999999999 30.263257192075248 0.000000000000000
540 2 -0.420 11.182399999999999 29.052726904392237 0.000000000000000
541 1 0.420 12.580200000000000 29.052726904392237 0.000000000000000
542 2 -0.420 13.279100000000000 30.263257192075248 0.000000000000000
543 1 0.420 14.676900000000000 30.263257192075248 0.000000000000000
544 2 -0.420 15.375800000000000 29.052726904392237 0.000000000000000
545 1 0.420 16.773599999999998 29.052726904392237 0.000000000000000
546 2 -0.420 17.472500000000000 30.263257192075248 0.000000000000000
547 1 0.420 18.870300000000000 30.263257192075248 0.000000000000000
548 2 -0.420 19.569199999999999 29.052726904392237 0.000000000000000
549 1 0.420 20.966999999999999 29.052726904392237 0.000000000000000
550 2 -0.420 21.665900000000001 30.263257192075248 0.000000000000000
551 1 0.420 23.063699999999997 30.263257192075248 0.000000000000000
552 2 -0.420 23.762599999999999 29.052726904392237 0.000000000000000
553 1 0.420 25.160399999999999 29.052726904392237 0.000000000000000
554 2 -0.420 25.859299999999998 30.263257192075248 0.000000000000000
555 1 0.420 27.257099999999998 30.263257192075248 0.000000000000000
556 2 -0.420 27.956000000000000 29.052726904392237 0.000000000000000
557 1 0.420 29.353800000000000 29.052726904392237 0.000000000000000
558 2 -0.420 30.052699999999998 30.263257192075248 0.000000000000000
559 1 0.420 31.450499999999998 30.263257192075248 0.000000000000000
560 2 -0.420 32.149400000000000 29.052726904392237 0.000000000000000
561 1 0.420 33.547199999999997 29.052726904392237 0.000000000000000
562 2 -0.420 34.246099999999998 30.263257192075248 0.000000000000000
563 1 0.420 35.643899999999995 30.263257192075248 0.000000000000000
564 2 -0.420 36.342799999999997 29.052726904392237 0.000000000000000
565 1 0.420 37.740600000000001 29.052726904392237 0.000000000000000
566 2 -0.420 38.439499999999995 30.263257192075248 0.000000000000000
567 1 0.420 39.837299999999999 30.263257192075248 0.000000000000000
568 2 -0.420 40.536200000000001 29.052726904392237 0.000000000000000
569 1 0.420 41.933999999999997 29.052726904392237 0.000000000000000
570 2 -0.420 42.632899999999999 30.263257192075248 0.000000000000000
571 1 0.420 44.030699999999996 30.263257192075248 0.000000000000000
572 2 -0.420 44.729599999999998 29.052726904392237 0.000000000000000
573 1 0.420 0.000000000000000 31.473787479758258 0.000000000000000
574 2 -0.420 0.698900000000000 32.684317767441271 0.000000000000000
575 1 0.420 2.096700000000000 32.684317767441271 0.000000000000000
576 2 -0.420 2.795600000000000 31.473787479758258 0.000000000000000
577 1 0.420 4.193400000000000 31.473787479758258 0.000000000000000
578 2 -0.420 4.892300000000000 32.684317767441271 0.000000000000000
579 1 0.420 6.290100000000000 32.684317767441271 0.000000000000000
580 2 -0.420 6.989000000000000 31.473787479758258 0.000000000000000
581 1 0.420 8.386799999999999 31.473787479758258 0.000000000000000
582 2 -0.420 9.085699999999999 32.684317767441271 0.000000000000000
583 1 0.420 10.483499999999999 32.684317767441271 0.000000000000000
584 2 -0.420 11.182399999999999 31.473787479758258 0.000000000000000
585 1 0.420 12.580200000000000 31.473787479758258 0.000000000000000
586 2 -0.420 13.279100000000000 32.684317767441271 0.000000000000000
587 1 0.420 14.676900000000000 32.684317767441271 0.000000000000000
588 2 -0.420 15.375800000000000 31.473787479758258 0.000000000000000
589 1 0.420 16.773599999999998 31.473787479758258 0.000000000000000
590 2 -0.420 17.472500000000000 32.684317767441271 0.000000000000000
591 1 0.420 18.870300000000000 32.684317767441271 0.000000000000000
592 2 -0.420 19.569199999999999 31.473787479758258 0.000000000000000
593 1 0.420 20.966999999999999 31.473787479758258 0.000000000000000
594 2 -0.420 21.665900000000001 32.684317767441271 0.000000000000000
595 1 0.420 23.063699999999997 32.684317767441271 0.000000000000000
596 2 -0.420 23.762599999999999 31.473787479758258 0.000000000000000
597 1 0.420 25.160399999999999 31.473787479758258 0.000000000000000
598 2 -0.420 25.859299999999998 32.684317767441271 0.000000000000000
599 1 0.420 27.257099999999998 32.684317767441271 0.000000000000000
600 2 -0.420 27.956000000000000 31.473787479758258 0.000000000000000
601 1 0.420 29.353800000000000 31.473787479758258 0.000000000000000
602 2 -0.420 30.052699999999998 32.684317767441271 0.000000000000000
603 1 0.420 31.450499999999998 32.684317767441271 0.000000000000000
604 2 -0.420 32.149400000000000 31.473787479758258 0.000000000000000
605 1 0.420 33.547199999999997 31.473787479758258 0.000000000000000
606 2 -0.420 34.246099999999998 32.684317767441271 0.000000000000000
607 1 0.420 35.643899999999995 32.684317767441271 0.000000000000000
608 2 -0.420 36.342799999999997 31.473787479758258 0.000000000000000
609 1 0.420 37.740600000000001 31.473787479758258 0.000000000000000
610 2 -0.420 38.439499999999995 32.684317767441271 0.000000000000000
611 1 0.420 39.837299999999999 32.684317767441271 0.000000000000000
612 2 -0.420 40.536200000000001 31.473787479758258 0.000000000000000
613 1 0.420 41.933999999999997 31.473787479758258 0.000000000000000
614 2 -0.420 42.632899999999999 32.684317767441271 0.000000000000000
615 1 0.420 44.030699999999996 32.684317767441271 0.000000000000000
616 2 -0.420 44.729599999999998 31.473787479758258 0.000000000000000
617 1 0.420 0.000000000000000 33.894848055124278 0.000000000000000
618 2 -0.420 0.698900000000000 35.105378342807292 0.000000000000000
619 1 0.420 2.096700000000000 35.105378342807292 0.000000000000000
620 2 -0.420 2.795600000000000 33.894848055124278 0.000000000000000
621 1 0.420 4.193400000000000 33.894848055124278 0.000000000000000
622 2 -0.420 4.892300000000000 35.105378342807292 0.000000000000000
623 1 0.420 6.290100000000000 35.105378342807292 0.000000000000000
624 2 -0.420 6.989000000000000 33.894848055124278 0.000000000000000
625 1 0.420 8.386799999999999 33.894848055124278 0.000000000000000
626 2 -0.420 9.085699999999999 35.105378342807292 0.000000000000000
627 1 0.420 10.483499999999999 35.105378342807292 0.000000000000000
628 2 -0.420 11.182399999999999 33.894848055124278 0.000000000000000
629 1 0.420 12.580200000000000 33.894848055124278 0.000000000000000
630 2 -0.420 13.279100000000000 35.105378342807292 0.000000000000000
631 1 0.420 14.676900000000000 35.105378342807292 0.000000000000000
632 2 -0.420 15.375800000000000 33.894848055124278 0.000000000000000
633 1 0.420 16.773599999999998 33.894848055124278 0.000000000000000
634 2 -0.420 17.472500000000000 35.105378342807292 0.000000000000000
635 1 0.420 18.870300000000000 35.105378342807292 0.000000000000000
636 2 -0.420 19.569199999999999 33.894848055124278 0.000000000000000
637 1 0.420 20.966999999999999 33.894848055124278 0.000000000000000
638 2 -0.420 21.665900000000001 35.105378342807292 0.000000000000000
639 1 0.420 23.063699999999997 35.105378342807292 0.000000000000000
640 2 -0.420 23.762599999999999 33.894848055124278 0.000000000000000
641 1 0.420 25.160399999999999 33.894848055124278 0.000000000000000
642 2 -0.420 25.859299999999998 35.105378342807292 0.000000000000000
643 1 0.420 27.257099999999998 35.105378342807292 0.000000000000000
644 2 -0.420 27.956000000000000 33.894848055124278 0.000000000000000
645 1 0.420 29.353800000000000 33.894848055124278 0.000000000000000
646 2 -0.420 30.052699999999998 35.105378342807292 0.000000000000000
647 1 0.420 31.450499999999998 35.105378342807292 0.000000000000000
648 2 -0.420 32.149400000000000 33.894848055124278 0.000000000000000
649 1 0.420 33.547199999999997 33.894848055124278 0.000000000000000
650 2 -0.420 34.246099999999998 35.105378342807292 0.000000000000000
651 1 0.420 35.643899999999995 35.105378342807292 0.000000000000000
652 2 -0.420 36.342799999999997 33.894848055124278 0.000000000000000
653 1 0.420 37.740600000000001 33.894848055124278 0.000000000000000
654 2 -0.420 38.439499999999995 35.105378342807292 0.000000000000000
655 1 0.420 39.837299999999999 35.105378342807292 0.000000000000000
656 2 -0.420 40.536200000000001 33.894848055124278 0.000000000000000
657 1 0.420 41.933999999999997 33.894848055124278 0.000000000000000
658 2 -0.420 42.632899999999999 35.105378342807292 0.000000000000000
659 1 0.420 44.030699999999996 35.105378342807292 0.000000000000000
660 2 -0.420 44.729599999999998 33.894848055124278 0.000000000000000
661 1 0.420 0.000000000000000 36.315908630490298 0.000000000000000
662 2 -0.420 0.698900000000000 37.526438918173312 0.000000000000000
663 1 0.420 2.096700000000000 37.526438918173312 0.000000000000000
664 2 -0.420 2.795600000000000 36.315908630490298 0.000000000000000
665 1 0.420 4.193400000000000 36.315908630490298 0.000000000000000
666 2 -0.420 4.892300000000000 37.526438918173312 0.000000000000000
667 1 0.420 6.290100000000000 37.526438918173312 0.000000000000000
668 2 -0.420 6.989000000000000 36.315908630490298 0.000000000000000
669 1 0.420 8.386799999999999 36.315908630490298 0.000000000000000
670 2 -0.420 9.085699999999999 37.526438918173312 0.000000000000000
671 1 0.420 10.483499999999999 37.526438918173312 0.000000000000000
672 2 -0.420 11.182399999999999 36.315908630490298 0.000000000000000
673 1 0.420 12.580200000000000 36.315908630490298 0.000000000000000
674 2 -0.420 13.279100000000000 37.526438918173312 0.000000000000000
675 1 0.420 14.676900000000000 37.526438918173312 0.000000000000000
676 2 -0.420 15.375800000000000 36.315908630490298 0.000000000000000
677 1 0.420 16.773599999999998 36.315908630490298 0.000000000000000
678 2 -0.420 17.472500000000000 37.526438918173312 0.000000000000000
679 1 0.420 18.870300000000000 37.526438918173312 0.000000000000000
680 2 -0.420 19.569199999999999 36.315908630490298 0.000000000000000
681 1 0.420 20.966999999999999 36.315908630490298 0.000000000000000
682 2 -0.420 21.665900000000001 37.526438918173312 0.000000000000000
683 1 0.420 23.063699999999997 37.526438918173312 0.000000000000000
684 2 -0.420 23.762599999999999 36.315908630490298 0.000000000000000
685 1 0.420 25.160399999999999 36.315908630490298 0.000000000000000
686 2 -0.420 25.859299999999998 37.526438918173312 0.000000000000000
687 1 0.420 27.257099999999998 37.526438918173312 0.000000000000000
688 2 -0.420 27.956000000000000 36.315908630490298 0.000000000000000
689 1 0.420 29.353800000000000 36.315908630490298 0.000000000000000
690 2 -0.420 30.052699999999998 37.526438918173312 0.000000000000000
691 1 0.420 31.450499999999998 37.526438918173312 0.000000000000000
692 2 -0.420 32.149400000000000 36.315908630490298 0.000000000000000
693 1 0.420 33.547199999999997 36.315908630490298 0.000000000000000
694 2 -0.420 34.246099999999998 37.526438918173312 0.000000000000000
695 1 0.420 35.643899999999995 37.526438918173312 0.000000000000000
696 2 -0.420 36.342799999999997 36.315908630490298 0.000000000000000
697 1 0.420 37.740600000000001 36.315908630490298 0.000000000000000
698 2 -0.420 38.439499999999995 37.526438918173312 0.000000000000000
699 1 0.420 39.837299999999999 37.526438918173312 0.000000000000000
700 2 -0.420 40.536200000000001 36.315908630490298 0.000000000000000
701 1 0.420 41.933999999999997 36.315908630490298 0.000000000000000
702 2 -0.420 42.632899999999999 37.526438918173312 0.000000000000000
703 1 0.420 44.030699999999996 37.526438918173312 0.000000000000000
704 2 -0.420 44.729599999999998 36.315908630490298 0.000000000000000
705 1 0.420 0.000000000000000 38.736969205856319 0.000000000000000
706 2 -0.420 0.698900000000000 39.947499493539325 0.000000000000000
707 1 0.420 2.096700000000000 39.947499493539325 0.000000000000000
708 2 -0.420 2.795600000000000 38.736969205856319 0.000000000000000
709 1 0.420 4.193400000000000 38.736969205856319 0.000000000000000
710 2 -0.420 4.892300000000000 39.947499493539325 0.000000000000000
711 1 0.420 6.290100000000000 39.947499493539325 0.000000000000000
712 2 -0.420 6.989000000000000 38.736969205856319 0.000000000000000
713 1 0.420 8.386799999999999 38.736969205856319 0.000000000000000
714 2 -0.420 9.085699999999999 39.947499493539325 0.000000000000000
715 1 0.420 10.483499999999999 39.947499493539325 0.000000000000000
716 2 -0.420 11.182399999999999 38.736969205856319 0.000000000000000
717 1 0.420 12.580200000000000 38.736969205856319 0.000000000000000
718 2 -0.420 13.279100000000000 39.947499493539325 0.000000000000000
719 1 0.420 14.676900000000000 39.947499493539325 0.000000000000000
720 2 -0.420 15.375800000000000 38.736969205856319 0.000000000000000
721 1 0.420 16.773599999999998 38.736969205856319 0.000000000000000
722 2 -0.420 17.472500000000000 39.947499493539325 0.000000000000000
723 1 0.420 18.870300000000000 39.947499493539325 0.000000000000000
724 2 -0.420 19.569199999999999 38.736969205856319 0.000000000000000
725 1 0.420 20.966999999999999 38.736969205856319 0.000000000000000
726 2 -0.420 21.665900000000001 39.947499493539325 0.000000000000000
727 1 0.420 23.063699999999997 39.947499493539325 0.000000000000000
728 2 -0.420 23.762599999999999 38.736969205856319 0.000000000000000
729 1 0.420 25.160399999999999 38.736969205856319 0.000000000000000
730 2 -0.420 25.859299999999998 39.947499493539325 0.000000000000000
731 1 0.420 27.257099999999998 39.947499493539325 0.000000000000000
732 2 -0.420 27.956000000000000 38.736969205856319 0.000000000000000
733 1 0.420 29.353800000000000 38.736969205856319 0.000000000000000
734 2 -0.420 30.052699999999998 39.947499493539325 0.000000000000000
735 1 0.420 31.450499999999998 39.947499493539325 0.000000000000000
736 2 -0.420 32.149400000000000 38.736969205856319 0.000000000000000
737 1 0.420 33.547199999999997 38.736969205856319 0.000000000000000
738 2 -0.420 34.246099999999998 39.947499493539325 0.000000000000000
739 1 0.420 35.643899999999995 39.947499493539325 0.000000000000000
740 2 -0.420 36.342799999999997 38.736969205856319 0.000000000000000
741 1 0.420 37.740600000000001 38.736969205856319 0.000000000000000
742 2 -0.420 38.439499999999995 39.947499493539325 0.000000000000000
743 1 0.420 39.837299999999999 39.947499493539325 0.000000000000000
744 2 -0.420 40.536200000000001 38.736969205856319 0.000000000000000
745 1 0.420 41.933999999999997 38.736969205856319 0.000000000000000
746 2 -0.420 42.632899999999999 39.947499493539325 0.000000000000000
747 1 0.420 44.030699999999996 39.947499493539325 0.000000000000000
748 2 -0.420 44.729599999999998 38.736969205856319 0.000000000000000
749 1 0.420 0.000000000000000 41.158029781222339 0.000000000000000
750 2 -0.420 0.698900000000000 42.368560068905346 0.000000000000000
751 1 0.420 2.096700000000000 42.368560068905346 0.000000000000000
752 2 -0.420 2.795600000000000 41.158029781222339 0.000000000000000
753 1 0.420 4.193400000000000 41.158029781222339 0.000000000000000
754 2 -0.420 4.892300000000000 42.368560068905346 0.000000000000000
755 1 0.420 6.290100000000000 42.368560068905346 0.000000000000000
756 2 -0.420 6.989000000000000 41.158029781222339 0.000000000000000
757 1 0.420 8.386799999999999 41.158029781222339 0.000000000000000
758 2 -0.420 9.085699999999999 42.368560068905346 0.000000000000000
759 1 0.420 10.483499999999999 42.368560068905346 0.000000000000000
760 2 -0.420 11.182399999999999 41.158029781222339 0.000000000000000
761 1 0.420 12.580200000000000 41.158029781222339 0.000000000000000
762 2 -0.420 13.279100000000000 42.368560068905346 0.000000000000000
763 1 0.420 14.676900000000000 42.368560068905346 0.000000000000000
764 2 -0.420 15.375800000000000 41.158029781222339 0.000000000000000
765 1 0.420 16.773599999999998 41.158029781222339 0.000000000000000
766 2 -0.420 17.472500000000000 42.368560068905346 0.000000000000000
767 1 0.420 18.870300000000000 42.368560068905346 0.000000000000000
768 2 -0.420 19.569199999999999 41.158029781222339 0.000000000000000
769 1 0.420 20.966999999999999 41.158029781222339 0.000000000000000
770 2 -0.420 21.665900000000001 42.368560068905346 0.000000000000000
771 1 0.420 23.063699999999997 42.368560068905346 0.000000000000000
772 2 -0.420 23.762599999999999 41.158029781222339 0.000000000000000
773 1 0.420 25.160399999999999 41.158029781222339 0.000000000000000
774 2 -0.420 25.859299999999998 42.368560068905346 0.000000000000000
775 1 0.420 27.257099999999998 42.368560068905346 0.000000000000000
776 2 -0.420 27.956000000000000 41.158029781222339 0.000000000000000
777 1 0.420 29.353800000000000 41.158029781222339 0.000000000000000
778 2 -0.420 30.052699999999998 42.368560068905346 0.000000000000000
779 1 0.420 31.450499999999998 42.368560068905346 0.000000000000000
780 2 -0.420 32.149400000000000 41.158029781222339 0.000000000000000
781 1 0.420 33.547199999999997 41.158029781222339 0.000000000000000
782 2 -0.420 34.246099999999998 42.368560068905346 0.000000000000000
783 1 0.420 35.643899999999995 42.368560068905346 0.000000000000000
784 2 -0.420 36.342799999999997 41.158029781222339 0.000000000000000
785 1 0.420 37.740600000000001 41.158029781222339 0.000000000000000
786 2 -0.420 38.439499999999995 42.368560068905346 0.000000000000000
787 1 0.420 39.837299999999999 42.368560068905346 0.000000000000000
788 2 -0.420 40.536200000000001 41.158029781222339 0.000000000000000
789 1 0.420 41.933999999999997 41.158029781222339 0.000000000000000
790 2 -0.420 42.632899999999999 42.368560068905346 0.000000000000000
791 1 0.420 44.030699999999996 42.368560068905346 0.000000000000000
792 2 -0.420 44.729599999999998 41.158029781222339 0.000000000000000
793 1 0.420 0.000000000000000 43.579090356588360 0.000000000000000
794 2 -0.420 0.698900000000000 44.789620644271366 0.000000000000000
795 1 0.420 2.096700000000000 44.789620644271366 0.000000000000000
796 2 -0.420 2.795600000000000 43.579090356588360 0.000000000000000
797 1 0.420 4.193400000000000 43.579090356588360 0.000000000000000
798 2 -0.420 4.892300000000000 44.789620644271366 0.000000000000000
799 1 0.420 6.290100000000000 44.789620644271366 0.000000000000000
800 2 -0.420 6.989000000000000 43.579090356588360 0.000000000000000
801 1 0.420 8.386799999999999 43.579090356588360 0.000000000000000
802 2 -0.420 9.085699999999999 44.789620644271366 0.000000000000000
803 1 0.420 10.483499999999999 44.789620644271366 0.000000000000000
804 2 -0.420 11.182399999999999 43.579090356588360 0.000000000000000
805 1 0.420 12.580200000000000 43.579090356588360 0.000000000000000
806 2 -0.420 13.279100000000000 44.789620644271366 0.000000000000000
807 1 0.420 14.676900000000000 44.789620644271366 0.000000000000000
808 2 -0.420 15.375800000000000 43.579090356588360 0.000000000000000
809 1 0.420 16.773599999999998 43.579090356588360 0.000000000000000
810 2 -0.420 17.472500000000000 44.789620644271366 0.000000000000000
811 1 0.420 18.870300000000000 44.789620644271366 0.000000000000000
812 2 -0.420 19.569199999999999 43.579090356588360 0.000000000000000
813 1 0.420 20.966999999999999 43.579090356588360 0.000000000000000
814 2 -0.420 21.665900000000001 44.789620644271366 0.000000000000000
815 1 0.420 23.063699999999997 44.789620644271366 0.000000000000000
816 2 -0.420 23.762599999999999 43.579090356588360 0.000000000000000
817 1 0.420 25.160399999999999 43.579090356588360 0.000000000000000
818 2 -0.420 25.859299999999998 44.789620644271366 0.000000000000000
819 1 0.420 27.257099999999998 44.789620644271366 0.000000000000000
820 2 -0.420 27.956000000000000 43.579090356588360 0.000000000000000
821 1 0.420 29.353800000000000 43.579090356588360 0.000000000000000
822 2 -0.420 30.052699999999998 44.789620644271366 0.000000000000000
823 1 0.420 31.450499999999998 44.789620644271366 0.000000000000000
824 2 -0.420 32.149400000000000 43.579090356588360 0.000000000000000
825 1 0.420 33.547199999999997 43.579090356588360 0.000000000000000
826 2 -0.420 34.246099999999998 44.789620644271366 0.000000000000000
827 1 0.420 35.643899999999995 44.789620644271366 0.000000000000000
828 2 -0.420 36.342799999999997 43.579090356588360 0.000000000000000
829 1 0.420 37.740600000000001 43.579090356588360 0.000000000000000
830 2 -0.420 38.439499999999995 44.789620644271366 0.000000000000000
831 1 0.420 39.837299999999999 44.789620644271366 0.000000000000000
832 2 -0.420 40.536200000000001 43.579090356588360 0.000000000000000
833 1 0.420 41.933999999999997 43.579090356588360 0.000000000000000
834 2 -0.420 42.632899999999999 44.789620644271366 0.000000000000000
835 1 0.420 44.030699999999996 44.789620644271366 0.000000000000000
836 2 -0.420 44.729599999999998 43.579090356588360 0.000000000000000
837 1 0.420 0.000000000000000 46.000150931954380 0.000000000000000
838 2 -0.420 0.698900000000000 47.210681219637387 0.000000000000000
839 1 0.420 2.096700000000000 47.210681219637387 0.000000000000000
840 2 -0.420 2.795600000000000 46.000150931954380 0.000000000000000
841 1 0.420 4.193400000000000 46.000150931954380 0.000000000000000
842 2 -0.420 4.892300000000000 47.210681219637387 0.000000000000000
843 1 0.420 6.290100000000000 47.210681219637387 0.000000000000000
844 2 -0.420 6.989000000000000 46.000150931954380 0.000000000000000
845 1 0.420 8.386799999999999 46.000150931954380 0.000000000000000
846 2 -0.420 9.085699999999999 47.210681219637387 0.000000000000000
847 1 0.420 10.483499999999999 47.210681219637387 0.000000000000000
848 2 -0.420 11.182399999999999 46.000150931954380 0.000000000000000
849 1 0.420 12.580200000000000 46.000150931954380 0.000000000000000
850 2 -0.420 13.279100000000000 47.210681219637387 0.000000000000000
851 1 0.420 14.676900000000000 47.210681219637387 0.000000000000000
852 2 -0.420 15.375800000000000 46.000150931954380 0.000000000000000
853 1 0.420 16.773599999999998 46.000150931954380 0.000000000000000
854 2 -0.420 17.472500000000000 47.210681219637387 0.000000000000000
855 1 0.420 18.870300000000000 47.210681219637387 0.000000000000000
856 2 -0.420 19.569199999999999 46.000150931954380 0.000000000000000
857 1 0.420 20.966999999999999 46.000150931954380 0.000000000000000
858 2 -0.420 21.665900000000001 47.210681219637387 0.000000000000000
859 1 0.420 23.063699999999997 47.210681219637387 0.000000000000000
860 2 -0.420 23.762599999999999 46.000150931954380 0.000000000000000
861 1 0.420 25.160399999999999 46.000150931954380 0.000000000000000
862 2 -0.420 25.859299999999998 47.210681219637387 0.000000000000000
863 1 0.420 27.257099999999998 47.210681219637387 0.000000000000000
864 2 -0.420 27.956000000000000 46.000150931954380 0.000000000000000
865 1 0.420 29.353800000000000 46.000150931954380 0.000000000000000
866 2 -0.420 30.052699999999998 47.210681219637387 0.000000000000000
867 1 0.420 31.450499999999998 47.210681219637387 0.000000000000000
868 2 -0.420 32.149400000000000 46.000150931954380 0.000000000000000
869 1 0.420 33.547199999999997 46.000150931954380 0.000000000000000
870 2 -0.420 34.246099999999998 47.210681219637387 0.000000000000000
871 1 0.420 35.643899999999995 47.210681219637387 0.000000000000000
872 2 -0.420 36.342799999999997 46.000150931954380 0.000000000000000
873 1 0.420 37.740600000000001 46.000150931954380 0.000000000000000
874 2 -0.420 38.439499999999995 47.210681219637387 0.000000000000000
875 1 0.420 39.837299999999999 47.210681219637387 0.000000000000000
876 2 -0.420 40.536200000000001 46.000150931954380 0.000000000000000
877 1 0.420 41.933999999999997 46.000150931954380 0.000000000000000
878 2 -0.420 42.632899999999999 47.210681219637387 0.000000000000000
879 1 0.420 44.030699999999996 47.210681219637387 0.000000000000000
880 2 -0.420 44.729599999999998 46.000150931954380 0.000000000000000

View File

@ -0,0 +1,37 @@
# Initialization
units metal
boundary p p p
atom_style charge
processors * * 1 # domain decomposition over x and y
# System and atom definition
# we use different molecule ids for each layer of hBN
# so that inter- and intra-layer
# interactions can be specified separately
read_data hBN-monolayer-5nm.data
mass 1 10.8110 # boron mass (g/mole) | membrane
mass 2 14.0067 # nitrogen mass (g/mole) | adsorbate
######################## Potential defition ########################
pair_style tersoff shift 0.05
pair_coeff * * BNC.tersoff B N
####################################################################
# Neighbor update settings
neighbor 2.0 bin
neigh_modify every 1
neigh_modify delay 0
neigh_modify check yes
#### Simulation settings ####
timestep 0.001
velocity all create 300.0 4928459 loop geom
fix thermostat all nve
############# Output ###############
thermo 100
thermo_style custom step etotal pe ke temp
#thermo_modify lost warn
thermo_modify line one format float %20.16g lost warn
###### Run molecular dynamics ######
run 1000

135
examples/tersoff/in.tersoff Normal file
View File

@ -0,0 +1,135 @@
# Simple regression tests for Tersoff potentials
# NOTE: These are not intended to represent real materials
units metal
atom_style atomic
atom_modify map array
boundary p p p
atom_modify sort 0 0.0
# temperature
variable t equal 1800.0
# cubic diamond unit cell
variable a equal 5.431
lattice custom $a &
a1 1.0 0.0 0.0 &
a2 0.0 1.0 0.0 &
a3 0.0 0.0 1.0 &
basis 0.0 0.0 0.0 &
basis 0.0 0.5 0.5 &
basis 0.5 0.0 0.5 &
basis 0.5 0.5 0.0 &
basis 0.25 0.25 0.25 &
basis 0.25 0.75 0.75 &
basis 0.75 0.25 0.75 &
basis 0.75 0.75 0.25
region myreg block 0 4 &
0 4 &
0 4
create_box 8 myreg
create_atoms 1 region myreg &
basis 1 1 &
basis 2 2 &
basis 3 3 &
basis 4 4 &
basis 5 5 &
basis 6 6 &
basis 7 7 &
basis 8 8
mass * 28.06
velocity all create $t 5287287 loop geom
# Equilibrate using Tersoff model for silicon
pair_style tersoff
pair_coeff * * Si.tersoff Si Si Si Si Si Si Si Si
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
write_restart restart.equil
# Test Tersoff/Mod model for Si
clear
read_restart restart.equil
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
# Test Tersoff/Mod/C model for Si
clear
read_restart restart.equil
newton on on
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
# Test Tersoff model for B/N/C
clear
read_restart restart.equil
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
pair_style tersoff
pair_coeff * * BNC.tersoff N N N C B B C B
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
# Test Tersoff model for B/N/C
clear
read_restart restart.equil
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
pair_style tersoff shift 0.05
pair_coeff * * BNC.tersoff N N N C B B C B
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100

View File

@ -0,0 +1,101 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# Initialization
units metal
boundary p p p
atom_style charge
processors * * 1 # domain decomposition over x and y
# System and atom definition
# we use different molecule ids for each layer of hBN
# so that inter- and intra-layer
# interactions can be specified separately
read_data hBN-monolayer-5nm.data
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (46.152980 48.443364 100.00000)
1 by 1 by 1 MPI processor grid
reading atoms ...
880 atoms
read_data CPU = 0.003 seconds
mass 1 10.8110 # boron mass (g/mole) | membrane
mass 2 14.0067 # nitrogen mass (g/mole) | adsorbate
######################## Potential defition ########################
pair_style tersoff shift 0.05
pair_coeff * * BNC.tersoff B N
Reading tersoff potential file BNC.tersoff with DATE: 2013-03-21
####################################################################
# Neighbor update settings
neighbor 2.0 bin
neigh_modify every 1
neigh_modify delay 0
neigh_modify check yes
#### Simulation settings ####
timestep 0.001
velocity all create 300.0 4928459 loop geom
fix thermostat all nve
############# Output ###############
thermo 100
thermo_style custom step etotal pe ke temp
#thermo_modify lost warn
thermo_modify line one format float %20.16g lost warn
###### Run molecular dynamics ######
run 1000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.1
ghost atom cutoff = 4.1
binsize = 2.05, bins = 23 24 49
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.545 | 3.545 | 3.545 Mbytes
Step TotEng PotEng KinEng Temp
0 -6570.525999324201 -6604.6118995607 34.0859002365 300
100 -6570.310828576855 -6584.788128201216 14.47729962436115 127.4189579026448
200 -6570.372315727984 -6587.98140230295 17.60908657496611 154.9827329140911
300 -6570.341402414283 -6587.005611017601 16.66420860331832 146.6665849019345
400 -6570.373381655075 -6587.812074340169 17.43869268509366 153.48304633967
500 -6570.357279692746 -6587.139105628337 16.78182593559217 147.7017695218898
600 -6570.364533408486 -6588.199043380888 17.83450997240158 156.9667503160497
700 -6570.362000654088 -6587.514376495223 17.15237584113499 150.9630878644169
800 -6570.358394689025 -6587.200486866491 16.84209217746576 148.2321903832028
900 -6570.372035122432 -6588.096861613308 17.72482649087559 156.0013938422734
1000 -6570.355748883583 -6587.399428461676 17.04367957809225 150.0064201899072
Loop time of 0.998048 on 1 procs for 1000 steps with 880 atoms
Performance: 86.569 ns/day, 0.277 hours/ns, 1001.956 timesteps/s
98.3% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.98001 | 0.98001 | 0.98001 | 0.0 | 98.19
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.006538 | 0.006538 | 0.006538 | 0.0 | 0.66
Output | 0.000224 | 0.000224 | 0.000224 | 0.0 | 0.02
Modify | 0.00781 | 0.00781 | 0.00781 | 0.0 | 0.78
Other | | 0.003467 | | | 0.35
Nlocal: 880.000 ave 880 max 880 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1518.00 ave 1518 max 1518 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 15840.0 ave 15840 max 15840 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 15840
Ave neighs/atom = 18.000000
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,101 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# Initialization
units metal
boundary p p p
atom_style charge
processors * * 1 # domain decomposition over x and y
# System and atom definition
# we use different molecule ids for each layer of hBN
# so that inter- and intra-layer
# interactions can be specified separately
read_data hBN-monolayer-5nm.data
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (46.152980 48.443364 100.00000)
2 by 2 by 1 MPI processor grid
reading atoms ...
880 atoms
read_data CPU = 0.010 seconds
mass 1 10.8110 # boron mass (g/mole) | membrane
mass 2 14.0067 # nitrogen mass (g/mole) | adsorbate
######################## Potential defition ########################
pair_style tersoff shift 0.05
pair_coeff * * BNC.tersoff B N
Reading tersoff potential file BNC.tersoff with DATE: 2013-03-21
####################################################################
# Neighbor update settings
neighbor 2.0 bin
neigh_modify every 1
neigh_modify delay 0
neigh_modify check yes
#### Simulation settings ####
timestep 0.001
velocity all create 300.0 4928459 loop geom
fix thermostat all nve
############# Output ###############
thermo 100
thermo_style custom step etotal pe ke temp
#thermo_modify lost warn
thermo_modify line one format float %20.16g lost warn
###### Run molecular dynamics ######
run 1000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.1
ghost atom cutoff = 4.1
binsize = 2.05, bins = 23 24 49
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.297 | 3.297 | 3.297 Mbytes
Step TotEng PotEng KinEng Temp
0 -6570.525999324095 -6604.611899560595 34.08590023650001 300.0000000000001
100 -6570.310828576847 -6584.788128201208 14.47729962436078 127.4189579026416
200 -6570.372315727983 -6587.981402302948 17.60908657496569 154.9827329140874
300 -6570.341402414288 -6587.005611017607 16.66420860331862 146.6665849019371
400 -6570.373381655087 -6587.81207434018 17.43869268509319 153.4830463396659
500 -6570.357279692746 -6587.139105628339 16.78182593559348 147.7017695219013
600 -6570.364533408483 -6588.199043380885 17.83450997240217 156.9667503160549
700 -6570.362000654081 -6587.514376495217 17.15237584113621 150.9630878644276
800 -6570.358394689017 -6587.200486866481 16.84209217746396 148.2321903831871
900 -6570.372035122433 -6588.096861613309 17.72482649087623 156.001393842279
1000 -6570.355748883587 -6587.399428461678 17.0436795780913 150.0064201898988
Loop time of 0.32051 on 4 procs for 1000 steps with 880 atoms
Performance: 269.571 ns/day, 0.089 hours/ns, 3120.030 timesteps/s
97.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.24483 | 0.26461 | 0.28741 | 2.9 | 82.56
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.026425 | 0.049039 | 0.068616 | 6.8 | 15.30
Output | 0.000126 | 0.00016475 | 0.000248 | 0.0 | 0.05
Modify | 0.002472 | 0.002794 | 0.002933 | 0.4 | 0.87
Other | | 0.003905 | | | 1.22
Nlocal: 220.000 ave 241 max 199 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Nghost: 563.000 ave 583 max 543 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 3960.00 ave 4338 max 3582 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 15840
Ave neighs/atom = 18.000000
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,472 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# Simple regression tests for Tersoff potentials
# NOTE: These are not intended to represent real materials
units metal
atom_style atomic
atom_modify map array
boundary p p p
atom_modify sort 0 0.0
# temperature
variable t equal 1800.0
# cubic diamond unit cell
variable a equal 5.431
lattice custom $a a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
lattice custom 5.431 a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
Lattice spacing in x,y,z = 5.4310000 5.4310000 5.4310000
region myreg block 0 4 0 4 0 4
create_box 8 myreg
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
create_atoms 1 region myreg basis 1 1 basis 2 2 basis 3 3 basis 4 4 basis 5 5 basis 6 6 basis 7 7 basis 8 8
Created 512 atoms
create_atoms CPU = 0.000 seconds
mass * 28.06
velocity all create $t 5287287 loop geom
velocity all create 1800 5287287 loop geom
# Equilibrate using Tersoff model for silicon
pair_style tersoff
pair_coeff * * Si.tersoff Si Si Si Si Si Si Si Si
Reading tersoff potential file Si.tersoff with DATE: 2007-10-25
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.2
ghost atom cutoff = 4.2
binsize = 2.1, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.985 | 2.985 | 2.985 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1800 -2370.771 0 -2251.8775 12511.419
10 1144.7447 -2327.3227 0 -2251.7759 21852.599
20 770.19243 -2302.1547 0 -2251.7633 22286.587
30 1059.4324 -2320.1988 0 -2251.8159 6242.222
40 1000.972 -2314.6531 0 -2251.796 -3069.9273
50 803.91758 -2300.1702 0 -2251.7834 -7154.1383
60 761.38639 -2296.1731 0 -2251.7928 -14520.921
70 750.57677 -2294.3086 0 -2251.7965 -21400.198
80 676.66672 -2288.2634 0 -2251.7899 -23480.201
90 640.24103 -2284.6678 0 -2251.7848 -20659.983
100 742.67188 -2290.0616 0 -2251.7855 -16211.799
Loop time of 0.107338 on 1 procs for 100 steps with 512 atoms
Performance: 80.493 ns/day, 0.298 hours/ns, 931.637 timesteps/s
98.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.10455 | 0.10455 | 0.10455 | 0.0 | 97.40
Neigh | 0.001115 | 0.001115 | 0.001115 | 0.0 | 1.04
Comm | 0.000482 | 0.000482 | 0.000482 | 0.0 | 0.45
Output | 0.000194 | 0.000194 | 0.000194 | 0.0 | 0.18
Modify | 0.000787 | 0.000787 | 0.000787 | 0.0 | 0.73
Other | | 0.000209 | | | 0.19
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 857.000 ave 857 max 857 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 8404.00 ave 8404 max 8404 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8404
Ave neighs/atom = 16.414062
Neighbor list builds = 2
Dangerous builds = 0
write_restart restart.equil
System init for write_restart ...
# Test Tersoff/Mod model for Si
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 24 Dec 2020, LAMMPS = 24 Dec 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style tersoff stores no restart info
512 atoms
read_restart CPU = 0.006 seconds
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
Reading tersoff/mod potential file Si.tersoff.mod with DATE: 2013-07-26
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.979 | 2.979 | 2.979 Mbytes
Step Temp E_pair E_mol TotEng Press
100 742.67188 -2210.6446 0 -2172.3685 -6444.2163
110 1135.5853 -2234.6974 0 -2172.3908 113.80404
120 1462.8415 -2253.8186 0 -2172.3853 10922.229
130 1755.9617 -2270.5152 0 -2172.3964 18780.707
140 1895.1939 -2277.1484 0 -2172.3965 22357.106
150 1869.5375 -2273.2734 0 -2172.3851 22616.492
160 1824.0448 -2268.4342 0 -2172.393 19254.299
170 1637.9038 -2254.5219 0 -2172.3815 15904.928
180 1451.9871 -2240.7199 0 -2172.3771 12064.754
190 1362.8248 -2233.1942 0 -2172.3789 7970.534
200 1341.1467 -2229.8951 0 -2172.3717 6244.8542
Loop time of 0.128972 on 1 procs for 100 steps with 512 atoms
Performance: 66.991 ns/day, 0.358 hours/ns, 775.362 timesteps/s
98.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.12498 | 0.12498 | 0.12498 | 0.0 | 96.91
Neigh | 0.002322 | 0.002322 | 0.002322 | 0.0 | 1.80
Comm | 0.000537 | 0.000537 | 0.000537 | 0.0 | 0.42
Output | 0.000177 | 0.000177 | 0.000177 | 0.0 | 0.14
Modify | 0.000761 | 0.000761 | 0.000761 | 0.0 | 0.59
Other | | 0.000192 | | | 0.15
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 960.000 ave 960 max 960 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 9244.00 ave 9244 max 9244 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 9244
Ave neighs/atom = 18.054688
Neighbor list builds = 4
Dangerous builds = 0
# Test Tersoff/Mod/C model for Si
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 24 Dec 2020, LAMMPS = 24 Dec 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style tersoff stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
newton on on
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
Reading tersoff/mod/c potential file Si.tersoff.modc with DATE: 2016-11-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.205694
ghost atom cutoff = 4.205694
binsize = 2.102847, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod/c, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.976 | 2.976 | 2.976 Mbytes
Step Temp E_pair E_mol TotEng Press
100 742.67188 -2221.9308 0 -2183.6547 -11721.269
110 1106.895 -2244.1196 0 -2183.6843 -2359.7819
120 1327.6674 -2256.3155 0 -2183.6767 7904.6604
130 1487.0219 -2264.3656 0 -2183.6707 14660.783
140 1709.1746 -2276.4761 0 -2183.6886 19298.791
150 1710.6528 -2274.1129 0 -2183.6764 22026.559
160 1651.0659 -2267.9877 0 -2183.6699 20916.722
170 1632.7705 -2264.7081 0 -2183.6777 17339.031
180 1477.693 -2252.4683 0 -2183.6706 12563.594
190 1310.8768 -2239.5419 0 -2183.6581 9591.0484
200 1356.7172 -2240.5315 0 -2183.668 5584.6734
Loop time of 0.133106 on 1 procs for 100 steps with 512 atoms
Performance: 64.911 ns/day, 0.370 hours/ns, 751.281 timesteps/s
96.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.1291 | 0.1291 | 0.1291 | 0.0 | 96.99
Neigh | 0.002343 | 0.002343 | 0.002343 | 0.0 | 1.76
Comm | 0.0005 | 0.0005 | 0.0005 | 0.0 | 0.38
Output | 0.000186 | 0.000186 | 0.000186 | 0.0 | 0.14
Modify | 0.000786 | 0.000786 | 0.000786 | 0.0 | 0.59
Other | | 0.000191 | | | 0.14
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 903.000 ave 903 max 903 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 8414.00 ave 8414 max 8414 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8414
Ave neighs/atom = 16.433594
Neighbor list builds = 4
Dangerous builds = 0
# Test Tersoff model for B/N/C
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 24 Dec 2020, LAMMPS = 24 Dec 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style tersoff stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale 0.6 remap
Changing box ...
orthogonal box = (4.3448000 0.0000000 0.0000000) to (17.379200 21.724000 21.724000)
orthogonal box = (4.3448000 4.3448000 0.0000000) to (17.379200 17.379200 21.724000)
orthogonal box = (4.3448000 4.3448000 4.3448000) to (17.379200 17.379200 17.379200)
pair_style tersoff
pair_coeff * * BNC.tersoff N N N C B B C B
Reading tersoff potential file BNC.tersoff with DATE: 2013-03-21
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 3.1
ghost atom cutoff = 3.1
binsize = 1.55, bins = 9 9 9
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.985 | 2.985 | 2.985 Mbytes
Step Temp E_pair E_mol TotEng Press
100 742.67188 -2973.8527 0 -2935.5766 3438975.9
110 4061.1085 -3183.2489 0 -2930.1208 2211712.7
120 4120.3231 -3187.0108 0 -2928.3047 2166764.3
130 3602.7602 -3158.5939 0 -2926.6167 2244475.7
140 3222.7773 -3141.7275 0 -2925.5369 2161607
150 3487.4703 -3163.7495 0 -2921.2462 2222150.2
160 3436.3009 -3169.4234 0 -2920.8775 2144368.7
170 3308.1796 -3170.3773 0 -2920.8967 2223612.9
180 3304.3776 -3178.7805 0 -2920.102 2072546.6
190 3217.3561 -3180.7963 0 -2918.4548 2118776.2
200 3041.6832 -3176.1794 0 -2916.5787 2130124.6
Loop time of 0.134621 on 1 procs for 100 steps with 512 atoms
Performance: 64.180 ns/day, 0.374 hours/ns, 742.826 timesteps/s
98.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.12837 | 0.12837 | 0.12837 | 0.0 | 95.35
Neigh | 0.004553 | 0.004553 | 0.004553 | 0.0 | 3.38
Comm | 0.000601 | 0.000601 | 0.000601 | 0.0 | 0.45
Output | 0.000177 | 0.000177 | 0.000177 | 0.0 | 0.13
Modify | 0.000742 | 0.000742 | 0.000742 | 0.0 | 0.55
Other | | 0.000181 | | | 0.13
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1051.00 ave 1051 max 1051 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 14760.0 ave 14760 max 14760 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14760
Ave neighs/atom = 28.828125
Neighbor list builds = 6
Dangerous builds = 0
# Test Tersoff model for B/N/C
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 24 Dec 2020, LAMMPS = 24 Dec 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style tersoff stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale 0.6 remap
Changing box ...
orthogonal box = (4.3448000 0.0000000 0.0000000) to (17.379200 21.724000 21.724000)
orthogonal box = (4.3448000 4.3448000 0.0000000) to (17.379200 17.379200 21.724000)
orthogonal box = (4.3448000 4.3448000 4.3448000) to (17.379200 17.379200 17.379200)
pair_style tersoff shift 0.05
pair_coeff * * BNC.tersoff N N N C B B C B
Reading tersoff potential file BNC.tersoff with DATE: 2013-03-21
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 3.1
ghost atom cutoff = 3.1
binsize = 1.55, bins = 9 9 9
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.985 | 2.985 | 2.985 Mbytes
Step Temp E_pair E_mol TotEng Press
100 742.67188 -3294.0266 0 -3255.7505 1615779.4
110 2870.7114 -3432.8485 0 -3257.629 1053310.6
120 2898.0798 -3431.4968 0 -3256.6851 1223402.3
130 2708.4483 -3419.0142 0 -3256.436 1105893.8
140 2307.8661 -3394.1268 0 -3256.1686 1148075.8
150 2215.3423 -3390.1427 0 -3255.8733 1138540
160 2515.488 -3412.6704 0 -3255.1731 1122902.8
170 2485.7109 -3415.0402 0 -3255.3787 1097748.5
180 2327.476 -3408.2463 0 -3254.6537 1061602.6
190 2339.5966 -3413.3961 0 -3254.7496 1088059
200 2260.5961 -3411.477 0 -3254.0771 1104581.5
Loop time of 0.120764 on 1 procs for 100 steps with 512 atoms
Performance: 71.545 ns/day, 0.335 hours/ns, 828.061 timesteps/s
98.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.11521 | 0.11521 | 0.11521 | 0.0 | 95.40
Neigh | 0.003874 | 0.003874 | 0.003874 | 0.0 | 3.21
Comm | 0.000542 | 0.000542 | 0.000542 | 0.0 | 0.45
Output | 0.000177 | 0.000177 | 0.000177 | 0.0 | 0.15
Modify | 0.000774 | 0.000774 | 0.000774 | 0.0 | 0.64
Other | | 0.00019 | | | 0.16
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1037.00 ave 1037 max 1037 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 14676.0 ave 14676 max 14676 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14676
Ave neighs/atom = 28.664062
Neighbor list builds = 5
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,472 @@
LAMMPS (24 Dec 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# Simple regression tests for Tersoff potentials
# NOTE: These are not intended to represent real materials
units metal
atom_style atomic
atom_modify map array
boundary p p p
atom_modify sort 0 0.0
# temperature
variable t equal 1800.0
# cubic diamond unit cell
variable a equal 5.431
lattice custom $a a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
lattice custom 5.431 a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
Lattice spacing in x,y,z = 5.4310000 5.4310000 5.4310000
region myreg block 0 4 0 4 0 4
create_box 8 myreg
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
create_atoms 1 region myreg basis 1 1 basis 2 2 basis 3 3 basis 4 4 basis 5 5 basis 6 6 basis 7 7 basis 8 8
Created 512 atoms
create_atoms CPU = 0.000 seconds
mass * 28.06
velocity all create $t 5287287 loop geom
velocity all create 1800 5287287 loop geom
# Equilibrate using Tersoff model for silicon
pair_style tersoff
pair_coeff * * Si.tersoff Si Si Si Si Si Si Si Si
Reading tersoff potential file Si.tersoff with DATE: 2007-10-25
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.2
ghost atom cutoff = 4.2
binsize = 2.1, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.958 | 2.958 | 2.958 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1800 -2370.771 0 -2251.8775 12511.419
10 1144.7447 -2327.3227 0 -2251.7759 21852.599
20 770.19243 -2302.1547 0 -2251.7633 22286.587
30 1059.4324 -2320.1988 0 -2251.8159 6242.222
40 1000.972 -2314.6531 0 -2251.796 -3069.9273
50 803.91758 -2300.1702 0 -2251.7834 -7154.1383
60 761.38639 -2296.1731 0 -2251.7928 -14520.921
70 750.57677 -2294.3086 0 -2251.7965 -21400.198
80 676.66672 -2288.2634 0 -2251.7899 -23480.201
90 640.24103 -2284.6678 0 -2251.7848 -20659.983
100 742.67188 -2290.0616 0 -2251.7855 -16211.799
Loop time of 0.0321762 on 4 procs for 100 steps with 512 atoms
Performance: 268.521 ns/day, 0.089 hours/ns, 3107.882 timesteps/s
98.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.026599 | 0.02712 | 0.027602 | 0.2 | 84.28
Neigh | 0.000285 | 0.00028875 | 0.000294 | 0.0 | 0.90
Comm | 0.003471 | 0.0039375 | 0.004446 | 0.6 | 12.24
Output | 0.000112 | 0.00013675 | 0.000203 | 0.0 | 0.43
Modify | 0.000443 | 0.0004555 | 0.000471 | 0.0 | 1.42
Other | | 0.000238 | | | 0.74
Nlocal: 128.000 ave 131 max 126 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Nghost: 447.750 ave 453 max 440 min
Histogram: 1 0 0 0 0 1 0 0 1 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 2101.00 ave 2152 max 2032 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 8404
Ave neighs/atom = 16.414062
Neighbor list builds = 2
Dangerous builds = 0
write_restart restart.equil
System init for write_restart ...
# Test Tersoff/Mod model for Si
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 24 Dec 2020, LAMMPS = 24 Dec 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style tersoff stores no restart info
512 atoms
read_restart CPU = 0.002 seconds
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
Reading tersoff/mod potential file Si.tersoff.mod with DATE: 2013-07-26
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.949 | 2.950 | 2.950 Mbytes
Step Temp E_pair E_mol TotEng Press
100 742.67188 -2210.6446 0 -2172.3685 -6444.2163
110 1135.5853 -2234.6974 0 -2172.3908 113.80404
120 1462.8415 -2253.8186 0 -2172.3853 10922.229
130 1755.9617 -2270.5152 0 -2172.3964 18780.707
140 1895.1939 -2277.1484 0 -2172.3965 22357.106
150 1869.5375 -2273.2734 0 -2172.3851 22616.492
160 1824.0448 -2268.4342 0 -2172.393 19254.299
170 1637.9038 -2254.5219 0 -2172.3815 15904.928
180 1451.9871 -2240.7199 0 -2172.3771 12064.754
190 1362.8248 -2233.1942 0 -2172.3789 7970.534
200 1341.1467 -2229.8951 0 -2172.3717 6244.8542
Loop time of 0.0389003 on 4 procs for 100 steps with 512 atoms
Performance: 222.107 ns/day, 0.108 hours/ns, 2570.678 timesteps/s
98.6% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.031362 | 0.032656 | 0.033605 | 0.5 | 83.95
Neigh | 0.000575 | 0.000599 | 0.000613 | 0.0 | 1.54
Comm | 0.003768 | 0.004733 | 0.006014 | 1.2 | 12.17
Output | 0.000207 | 0.00022525 | 0.000276 | 0.0 | 0.58
Modify | 0.000445 | 0.00047975 | 0.0005 | 0.0 | 1.23
Other | | 0.0002077 | | | 0.53
Nlocal: 128.000 ave 135 max 123 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Nghost: 491.500 ave 499 max 486 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 2311.00 ave 2457 max 2192 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Total # of neighbors = 9244
Ave neighs/atom = 18.054688
Neighbor list builds = 4
Dangerous builds = 0
# Test Tersoff/Mod/C model for Si
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 24 Dec 2020, LAMMPS = 24 Dec 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style tersoff stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
newton on on
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
Reading tersoff/mod/c potential file Si.tersoff.modc with DATE: 2016-11-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.205694
ghost atom cutoff = 4.205694
binsize = 2.102847, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod/c, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.949 | 2.949 | 2.949 Mbytes
Step Temp E_pair E_mol TotEng Press
100 742.67188 -2221.9308 0 -2183.6547 -11721.269
110 1106.895 -2244.1196 0 -2183.6843 -2359.7819
120 1327.6674 -2256.3155 0 -2183.6767 7904.6604
130 1487.0219 -2264.3656 0 -2183.6707 14660.783
140 1709.1746 -2276.4761 0 -2183.6886 19298.791
150 1710.6528 -2274.1129 0 -2183.6764 22026.559
160 1651.0659 -2267.9877 0 -2183.6699 20916.722
170 1632.7705 -2264.7081 0 -2183.6777 17339.031
180 1477.693 -2252.4683 0 -2183.6706 12563.594
190 1310.8768 -2239.5419 0 -2183.6581 9591.0484
200 1356.7172 -2240.5315 0 -2183.668 5584.6734
Loop time of 0.039244 on 4 procs for 100 steps with 512 atoms
Performance: 220.161 ns/day, 0.109 hours/ns, 2548.160 timesteps/s
98.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.03126 | 0.032782 | 0.033915 | 0.5 | 83.53
Neigh | 0.000599 | 0.000707 | 0.000821 | 0.0 | 1.80
Comm | 0.00363 | 0.004893 | 0.006493 | 1.5 | 12.47
Output | 0.000122 | 0.0001425 | 0.000192 | 0.0 | 0.36
Modify | 0.000497 | 0.00050925 | 0.000522 | 0.0 | 1.30
Other | | 0.0002105 | | | 0.54
Nlocal: 128.000 ave 133 max 124 min
Histogram: 1 0 1 0 0 1 0 0 0 1
Nghost: 459.000 ave 470 max 452 min
Histogram: 1 0 2 0 0 0 0 0 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 2103.50 ave 2204 max 2014 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Total # of neighbors = 8414
Ave neighs/atom = 16.433594
Neighbor list builds = 4
Dangerous builds = 0
# Test Tersoff model for B/N/C
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 24 Dec 2020, LAMMPS = 24 Dec 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style tersoff stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale 0.6 remap
Changing box ...
orthogonal box = (4.3448000 0.0000000 0.0000000) to (17.379200 21.724000 21.724000)
orthogonal box = (4.3448000 4.3448000 0.0000000) to (17.379200 17.379200 21.724000)
orthogonal box = (4.3448000 4.3448000 4.3448000) to (17.379200 17.379200 17.379200)
pair_style tersoff
pair_coeff * * BNC.tersoff N N N C B B C B
Reading tersoff potential file BNC.tersoff with DATE: 2013-03-21
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 3.1
ghost atom cutoff = 3.1
binsize = 1.55, bins = 9 9 9
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.952 | 2.952 | 2.952 Mbytes
Step Temp E_pair E_mol TotEng Press
100 742.67188 -2973.8527 0 -2935.5766 3438975.9
110 4061.1085 -3183.2489 0 -2930.1208 2211712.7
120 4120.3231 -3187.0108 0 -2928.3047 2166764.3
130 3602.7602 -3158.5939 0 -2926.6167 2244475.7
140 3222.7773 -3141.7275 0 -2925.5369 2161607
150 3487.4703 -3163.7495 0 -2921.2462 2222150.2
160 3436.3009 -3169.4234 0 -2920.8775 2144368.7
170 3308.1796 -3170.3773 0 -2920.8967 2223612.9
180 3304.3776 -3178.7805 0 -2920.102 2072546.6
190 3217.3561 -3180.7963 0 -2918.4548 2118776.2
200 3041.6832 -3176.1794 0 -2916.5787 2130124.6
Loop time of 0.0488862 on 4 procs for 100 steps with 512 atoms
Performance: 176.737 ns/day, 0.136 hours/ns, 2045.565 timesteps/s
93.6% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.037364 | 0.039346 | 0.041066 | 0.8 | 80.49
Neigh | 0.001207 | 0.0012568 | 0.00136 | 0.2 | 2.57
Comm | 0.005218 | 0.007064 | 0.009117 | 1.9 | 14.45
Output | 0.000173 | 0.00020325 | 0.000277 | 0.0 | 0.42
Modify | 0.000709 | 0.000715 | 0.000723 | 0.0 | 1.46
Other | | 0.0003008 | | | 0.62
Nlocal: 128.000 ave 132 max 123 min
Histogram: 1 0 0 0 1 0 0 1 0 1
Nghost: 543.000 ave 548 max 535 min
Histogram: 1 0 0 0 0 0 1 0 1 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 3690.00 ave 3811 max 3529 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Total # of neighbors = 14760
Ave neighs/atom = 28.828125
Neighbor list builds = 6
Dangerous builds = 0
# Test Tersoff model for B/N/C
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 24 Dec 2020, LAMMPS = 24 Dec 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style tersoff stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale 0.6 remap
Changing box ...
orthogonal box = (4.3448000 0.0000000 0.0000000) to (17.379200 21.724000 21.724000)
orthogonal box = (4.3448000 4.3448000 0.0000000) to (17.379200 17.379200 21.724000)
orthogonal box = (4.3448000 4.3448000 4.3448000) to (17.379200 17.379200 17.379200)
pair_style tersoff shift 0.05
pair_coeff * * BNC.tersoff N N N C B B C B
Reading tersoff potential file BNC.tersoff with DATE: 2013-03-21
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 3.1
ghost atom cutoff = 3.1
binsize = 1.55, bins = 9 9 9
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.952 | 2.952 | 2.952 Mbytes
Step Temp E_pair E_mol TotEng Press
100 742.67188 -3294.0266 0 -3255.7505 1615779.4
110 2870.7114 -3432.8485 0 -3257.629 1053310.6
120 2898.0798 -3431.4968 0 -3256.6851 1223402.3
130 2708.4483 -3419.0142 0 -3256.436 1105893.8
140 2307.8661 -3394.1268 0 -3256.1686 1148075.8
150 2215.3423 -3390.1427 0 -3255.8733 1138540
160 2515.488 -3412.6704 0 -3255.1731 1122902.8
170 2485.7109 -3415.0402 0 -3255.3787 1097748.5
180 2327.476 -3408.2463 0 -3254.6537 1061602.6
190 2339.5966 -3413.3961 0 -3254.7496 1088059
200 2260.5961 -3411.477 0 -3254.0771 1104581.5
Loop time of 0.0409132 on 4 procs for 100 steps with 512 atoms
Performance: 211.179 ns/day, 0.114 hours/ns, 2444.196 timesteps/s
97.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.03285 | 0.033327 | 0.03406 | 0.3 | 81.46
Neigh | 0.000975 | 0.0010677 | 0.001184 | 0.2 | 2.61
Comm | 0.004915 | 0.005528 | 0.006044 | 0.7 | 13.51
Output | 0.000129 | 0.0001535 | 0.000226 | 0.0 | 0.38
Modify | 0.000564 | 0.0005885 | 0.000604 | 0.0 | 1.44
Other | | 0.0002483 | | | 0.61
Nlocal: 128.000 ave 133 max 123 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Nghost: 535.250 ave 542 max 531 min
Histogram: 1 0 2 0 0 0 0 0 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 3669.00 ave 3822 max 3522 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Total # of neighbors = 14676
Ave neighs/atom = 28.664062
Neighbor list builds = 5
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -46,7 +46,7 @@ create_atoms 1 region myreg &
mass * 28.06
velocity all create $t 5287287 mom yes rot yes dist gaussian
velocity all create $t 5287287 loop geom
# Equilibrate using Stillinger-Weber model for silicon
@ -114,35 +114,3 @@ neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
# Test Tersoff/Mod model for Si
clear
read_restart restart.equil
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
# Test Tersoff/Mod/C model for Si
clear
read_restart restart.equil
newton on on
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100

View File

@ -0,0 +1,547 @@
LAMMPS (30 Nov 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# Simple regression tests for threebody potentials
# NOTE: These are not intended to represent real materials
units metal
atom_style atomic
atom_modify map array
boundary p p p
atom_modify sort 0 0.0
# temperature
variable t equal 1800.0
# cubic diamond unit cell
variable a equal 5.431
lattice custom $a a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
lattice custom 5.431 a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
Lattice spacing in x,y,z = 5.4310000 5.4310000 5.4310000
region myreg block 0 4 0 4 0 4
create_box 8 myreg
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
create_atoms 1 region myreg basis 1 1 basis 2 2 basis 3 3 basis 4 4 basis 5 5 basis 6 6 basis 7 7 basis 8 8
Created 512 atoms
create_atoms CPU = 0.000 seconds
mass * 28.06
velocity all create $t 5287287 loop geom
velocity all create 1800 5287287 loop geom
# Equilibrate using Stillinger-Weber model for silicon
pair_style sw
pair_coeff * * Si.sw Si Si Si Si Si Si Si Si
Reading sw potential file Si.sw with DATE: 2007-06-11
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.77118
ghost atom cutoff = 4.77118
binsize = 2.38559, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair sw, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.983 | 2.983 | 2.983 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1800 -2220.3392 0 -2101.4457 12358.626
10 1006.0192 -2167.7053 0 -2101.3286 13892.426
20 588.26396 -2139.7132 0 -2101.3117 11295.566
30 990.55956 -2165.2164 0 -2101.3931 6279.0239
40 700.12917 -2144.4279 0 -2101.3427 5594.2388
50 523.64239 -2131.7796 0 -2101.3122 6013.0994
60 989.47092 -2161.3716 0 -2101.3839 5819.2688
70 877.27433 -2152.4432 0 -2101.3461 9116.6569
80 800.80221 -2146.1371 0 -2101.313 11995.66
90 1293.9689 -2176.9021 0 -2101.3848 11692.45
100 1112.9699 -2162.7259 0 -2101.3478 12263.758
Loop time of 0.092666 on 1 procs for 100 steps with 512 atoms
Performance: 93.238 ns/day, 0.257 hours/ns, 1079.144 timesteps/s
99.1% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.089633 | 0.089633 | 0.089633 | 0.0 | 96.73
Neigh | 0.001474 | 0.001474 | 0.001474 | 0.0 | 1.59
Comm | 0.00041 | 0.00041 | 0.00041 | 0.0 | 0.44
Output | 0.000153 | 0.000153 | 0.000153 | 0.0 | 0.17
Modify | 0.000782 | 0.000782 | 0.000782 | 0.0 | 0.84
Other | | 0.000214 | | | 0.23
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1017.00 ave 1017 max 1017 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 13988.0 ave 13988 max 13988 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 13988
Ave neighs/atom = 27.320312
Neighbor list builds = 2
Dangerous builds = 0
write_restart restart.equil
System init for write_restart ...
# Test Stillinger-Weber model for Cd/Te/Zn/Se/Hg/S
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.002 seconds
pair_style sw
pair_coeff * * CdTeZnSeHgS0.sw Cd Zn Hg Cd Te S Se Te
Reading sw potential file CdTeZnSeHgS0.sw with DATE: 2013-08-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 5.6320004
ghost atom cutoff = 5.6320004
binsize = 2.8160002, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair sw, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.001 | 3.001 | 3.001 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -625.76163 0 -564.38354 462129.66
110 1502.8461 -649.55768 0 -564.45814 463413.45
120 1926.4523 -674.71265 0 -564.53612 486338.88
130 1152.6663 -621.47264 0 -564.37203 514892.2
140 1762.244 -659.86941 0 -564.4985 488159.88
150 1767.8665 -657.67178 0 -564.48386 466721.31
160 1075.2874 -610.12809 0 -564.36709 470151.9
170 1697.9313 -649.3684 0 -564.47207 467953.71
180 1856.1197 -657.14338 0 -564.48754 488372.27
190 1346.1107 -621.42431 0 -564.38065 511750.04
200 1919.5266 -657.26587 0 -564.47797 488684.56
Loop time of 0.289193 on 1 procs for 100 steps with 512 atoms
Performance: 29.876 ns/day, 0.803 hours/ns, 345.790 timesteps/s
98.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.28463 | 0.28463 | 0.28463 | 0.0 | 98.42
Neigh | 0.002821 | 0.002821 | 0.002821 | 0.0 | 0.98
Comm | 0.000605 | 0.000605 | 0.000605 | 0.0 | 0.21
Output | 0.000176 | 0.000176 | 0.000176 | 0.0 | 0.06
Modify | 0.000769 | 0.000769 | 0.000769 | 0.0 | 0.27
Other | | 0.000188 | | | 0.07
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1428.00 ave 1428 max 1428 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 17344.0 ave 17344 max 17344 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 17344
Ave neighs/atom = 33.875000
Neighbor list builds = 3
Dangerous builds = 0
# Test Vashishta model for In/P
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
pair_style vashishta
pair_coeff * * InP.vashishta In In In In P P P P
Reading vashishta potential file InP.vashishta with DATE: 2015-10-14
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5, bins = 7 7 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair vashishta, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.025 | 3.025 | 3.025 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -1497.2988 0 -1435.9207 355619.19
110 1250.545 -1504.5795 0 -1435.9786 345188.52
120 1360.2275 -1509.3443 0 -1435.9801 333306.3
130 1066.4516 -1487.9076 0 -1435.9076 334465.11
140 1481.0477 -1513.0511 0 -1435.988 308725.1
150 1216.1167 -1493.0774 0 -1435.9217 304249.09
160 1211.4398 -1490.7459 0 -1435.9164 288897.09
170 1542.2025 -1510.0774 0 -1435.9608 260104.14
180 1302.9041 -1491.7765 0 -1435.8971 249514.04
190 1332.3326 -1491.5271 0 -1435.9213 227537.99
200 1352.1813 -1490.4513 0 -1435.9049 207626.42
Loop time of 0.126684 on 1 procs for 100 steps with 512 atoms
Performance: 68.201 ns/day, 0.352 hours/ns, 789.366 timesteps/s
99.3% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.11981 | 0.11981 | 0.11981 | 0.0 | 94.57
Neigh | 0.004903 | 0.004903 | 0.004903 | 0.0 | 3.87
Comm | 0.000846 | 0.000846 | 0.000846 | 0.0 | 0.67
Output | 0.000145 | 0.000145 | 0.000145 | 0.0 | 0.11
Modify | 0.000772 | 0.000772 | 0.000772 | 0.0 | 0.61
Other | | 0.000207 | | | 0.16
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1838.00 ave 1838 max 1838 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 36482.0 ave 36482 max 36482 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 36482
Ave neighs/atom = 71.253906
Neighbor list builds = 4
Dangerous builds = 0
# Test Tersoff model for B/N/C
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale 0.6 remap
Changing box ...
orthogonal box = (4.3448000 0.0000000 0.0000000) to (17.379200 21.724000 21.724000)
orthogonal box = (4.3448000 4.3448000 0.0000000) to (17.379200 17.379200 21.724000)
orthogonal box = (4.3448000 4.3448000 4.3448000) to (17.379200 17.379200 17.379200)
pair_style tersoff
pair_coeff * * BNC.tersoff N N N C B B C B
Reading tersoff potential file BNC.tersoff with DATE: 2013-03-21
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 3.1
ghost atom cutoff = 3.1
binsize = 1.55, bins = 9 9 9
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.982 | 2.982 | 2.982 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -3259.7676 0 -3198.3895 1912461.3
110 1772.8268 -3301.5479 0 -3198.8218 1885295.6
120 1169.7287 -3258.74 0 -3197.9294 1898705.2
130 1308.5623 -3265.1338 0 -3197.5922 1894187.5
140 1486.0361 -3274.951 0 -3197.776 1871927.6
150 1419.0362 -3267.7302 0 -3197.2296 1925234.6
160 1196.6689 -3250.1492 0 -3196.7078 1902235.1
170 1707.5846 -3281.7658 0 -3196.9721 1863047.3
180 1337.4358 -3254.9844 0 -3196.8222 1880420.9
190 1441.8052 -3259.0364 0 -3196.3556 1904512.1
200 1569.0317 -3265.0089 0 -3196.3328 1899462.7
Loop time of 0.114312 on 1 procs for 100 steps with 512 atoms
Performance: 75.583 ns/day, 0.318 hours/ns, 874.799 timesteps/s
99.3% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.1121 | 0.1121 | 0.1121 | 0.0 | 98.06
Neigh | 0.000773 | 0.000773 | 0.000773 | 0.0 | 0.68
Comm | 0.000415 | 0.000415 | 0.000415 | 0.0 | 0.36
Output | 0.000136 | 0.000136 | 0.000136 | 0.0 | 0.12
Modify | 0.000703 | 0.000703 | 0.000703 | 0.0 | 0.61
Other | | 0.000186 | | | 0.16
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1028.00 ave 1028 max 1028 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 14604.0 ave 14604 max 14604 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14604
Ave neighs/atom = 28.523438
Neighbor list builds = 1
Dangerous builds = 0
# Test Tersoff/Mod model for Si
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
Reading tersoff/mod potential file Si.tersoff.mod with DATE: 2013-07-26
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.986 | 2.986 | 2.986 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -2309.6047 0 -2248.2266 17662.891
110 835.77436 -2289.6119 0 -2248.1918 19964.211
120 1067.0735 -2303.0587 0 -2248.2414 13767.101
130 957.60664 -2293.7047 0 -2248.2139 14850.338
140 865.12471 -2285.7774 0 -2248.1971 17101.553
150 1104.7368 -2299.5468 0 -2248.2286 13031.988
160 1077.1682 -2295.3841 0 -2248.2227 13615.019
170 843.8591 -2277.9713 0 -2248.1911 18966.532
180 1008.7412 -2286.922 0 -2248.2075 17275.649
190 1237.9346 -2299.5487 0 -2248.2305 14334.006
200 1060.2161 -2285.3352 0 -2248.1952 18999.834
Loop time of 0.12412 on 1 procs for 100 steps with 512 atoms
Performance: 69.610 ns/day, 0.345 hours/ns, 805.672 timesteps/s
99.2% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.12079 | 0.12079 | 0.12079 | 0.0 | 97.32
Neigh | 0.001821 | 0.001821 | 0.001821 | 0.0 | 1.47
Comm | 0.000407 | 0.000407 | 0.000407 | 0.0 | 0.33
Output | 0.000159 | 0.000159 | 0.000159 | 0.0 | 0.13
Modify | 0.000736 | 0.000736 | 0.000736 | 0.0 | 0.59
Other | | 0.000203 | | | 0.16
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1007.00 ave 1007 max 1007 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 8884.00 ave 8884 max 8884 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8884
Ave neighs/atom = 17.351562
Neighbor list builds = 3
Dangerous builds = 0
# Test Tersoff/Mod/C model for Si
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
newton on on
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
Reading tersoff/mod/c potential file Si.tersoff.modc with DATE: 2016-11-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.205694
ghost atom cutoff = 4.205694
binsize = 2.102847, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod/c, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.980 | 2.980 | 2.980 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -2309.1331 0 -2247.755 20346.718
110 831.93715 -2288.8853 0 -2247.7187 21758.195
120 1077.6698 -2303.2846 0 -2247.7693 16036.053
130 972.43247 -2294.1847 0 -2247.7467 16614.835
140 815.76148 -2282.0495 0 -2247.7194 18310.116
150 1072.7096 -2297.0491 0 -2247.7574 13896.767
160 1061.8824 -2294.0028 0 -2247.7522 13663.179
170 787.17244 -2273.8946 0 -2247.7175 18586.606
180 932.5662 -2281.6828 0 -2247.7315 18154.167
190 1205.7299 -2297.2769 0 -2247.7608 14504.136
200 1022.5285 -2282.7039 0 -2247.7245 18710.495
Loop time of 0.12973 on 1 procs for 100 steps with 512 atoms
Performance: 66.600 ns/day, 0.360 hours/ns, 770.832 timesteps/s
99.3% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.12643 | 0.12643 | 0.12643 | 0.0 | 97.45
Neigh | 0.001798 | 0.001798 | 0.001798 | 0.0 | 1.39
Comm | 0.000421 | 0.000421 | 0.000421 | 0.0 | 0.32
Output | 0.00016 | 0.00016 | 0.00016 | 0.0 | 0.12
Modify | 0.000733 | 0.000733 | 0.000733 | 0.0 | 0.57
Other | | 0.000192 | | | 0.15
Nlocal: 512.000 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 958.000 ave 958 max 958 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 8416.00 ave 8416 max 8416 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8416
Ave neighs/atom = 16.437500
Neighbor list builds = 3
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,547 @@
LAMMPS (30 Nov 2020)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
# Simple regression tests for threebody potentials
# NOTE: These are not intended to represent real materials
units metal
atom_style atomic
atom_modify map array
boundary p p p
atom_modify sort 0 0.0
# temperature
variable t equal 1800.0
# cubic diamond unit cell
variable a equal 5.431
lattice custom $a a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
lattice custom 5.431 a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
Lattice spacing in x,y,z = 5.4310000 5.4310000 5.4310000
region myreg block 0 4 0 4 0 4
create_box 8 myreg
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
create_atoms 1 region myreg basis 1 1 basis 2 2 basis 3 3 basis 4 4 basis 5 5 basis 6 6 basis 7 7 basis 8 8
Created 512 atoms
create_atoms CPU = 0.001 seconds
mass * 28.06
velocity all create $t 5287287 loop geom
velocity all create 1800 5287287 loop geom
# Equilibrate using Stillinger-Weber model for silicon
pair_style sw
pair_coeff * * Si.sw Si Si Si Si Si Si Si Si
Reading sw potential file Si.sw with DATE: 2007-06-11
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.77118
ghost atom cutoff = 4.77118
binsize = 2.38559, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair sw, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.958 | 2.958 | 2.958 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1800 -2220.3392 0 -2101.4457 12358.626
10 1006.0192 -2167.7053 0 -2101.3286 13892.426
20 588.26396 -2139.7132 0 -2101.3117 11295.566
30 990.55956 -2165.2164 0 -2101.3931 6279.0239
40 700.12917 -2144.4279 0 -2101.3427 5594.2388
50 523.64239 -2131.7796 0 -2101.3122 6013.0994
60 989.47092 -2161.3716 0 -2101.3839 5819.2688
70 877.27433 -2152.4432 0 -2101.3461 9116.6569
80 800.80221 -2146.1371 0 -2101.313 11995.66
90 1293.9689 -2176.9021 0 -2101.3848 11692.45
100 1112.9699 -2162.7259 0 -2101.3478 12263.758
Loop time of 0.0284905 on 4 procs for 100 steps with 512 atoms
Performance: 303.259 ns/day, 0.079 hours/ns, 3509.942 timesteps/s
99.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.022257 | 0.023383 | 0.025192 | 0.7 | 82.07
Neigh | 0.00036 | 0.00037475 | 0.000387 | 0.0 | 1.32
Comm | 0.002084 | 0.0039075 | 0.005034 | 1.8 | 13.72
Output | 9.9e-05 | 0.00011525 | 0.00016 | 0.0 | 0.40
Modify | 0.000428 | 0.00043675 | 0.000443 | 0.0 | 1.53
Other | | 0.0002728 | | | 0.96
Nlocal: 128.000 ave 132 max 125 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Nghost: 525.000 ave 528 max 521 min
Histogram: 1 0 0 0 1 0 0 0 1 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 3497.00 ave 3619 max 3397 min
Histogram: 1 1 0 0 0 0 1 0 0 1
Total # of neighbors = 13988
Ave neighs/atom = 27.320312
Neighbor list builds = 2
Dangerous builds = 0
write_restart restart.equil
System init for write_restart ...
# Test Stillinger-Weber model for Cd/Te/Zn/Se/Hg/S
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.000 seconds
pair_style sw
pair_coeff * * CdTeZnSeHgS0.sw Cd Zn Hg Cd Te S Se Te
Reading sw potential file CdTeZnSeHgS0.sw with DATE: 2013-08-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 5.6320004
ghost atom cutoff = 5.6320004
binsize = 2.8160002, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair sw, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.967 | 2.967 | 2.968 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -625.76163 0 -564.38354 462129.66
110 1502.8461 -649.55768 0 -564.45814 463413.45
120 1926.4523 -674.71265 0 -564.53613 486338.88
130 1152.6663 -621.47265 0 -564.37203 514892.19
140 1762.244 -659.86941 0 -564.4985 488159.88
150 1767.8665 -657.67179 0 -564.48386 466721.31
160 1075.2874 -610.1281 0 -564.36709 470151.9
170 1697.9313 -649.3684 0 -564.47208 467953.7
180 1856.1197 -657.14338 0 -564.48754 488372.26
190 1346.1107 -621.42432 0 -564.38065 511750.03
200 1919.5266 -657.26587 0 -564.47797 488684.56
Loop time of 0.084576 on 4 procs for 100 steps with 512 atoms
Performance: 102.157 ns/day, 0.235 hours/ns, 1182.369 timesteps/s
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.072089 | 0.074912 | 0.076672 | 0.7 | 88.57
Neigh | 0.000745 | 0.0008125 | 0.000883 | 0.0 | 0.96
Comm | 0.006054 | 0.0077975 | 0.010598 | 2.1 | 9.22
Output | 0.000129 | 0.00015525 | 0.000219 | 0.0 | 0.18
Modify | 0.000523 | 0.000578 | 0.000641 | 0.0 | 0.68
Other | | 0.0003213 | | | 0.38
Nlocal: 128.000 ave 135 max 122 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Nghost: 759.750 ave 770 max 751 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 4336.00 ave 4563 max 4128 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Total # of neighbors = 17344
Ave neighs/atom = 33.875000
Neighbor list builds = 3
Dangerous builds = 0
# Test Vashishta model for In/P
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
pair_style vashishta
pair_coeff * * InP.vashishta In In In In P P P P
Reading vashishta potential file InP.vashishta with DATE: 2015-10-14
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5, bins = 7 7 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair vashishta, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.988 | 2.988 | 2.988 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -1497.2988 0 -1435.9207 355619.19
110 1250.545 -1504.5795 0 -1435.9786 345188.52
120 1360.2275 -1509.3443 0 -1435.9801 333306.3
130 1066.4516 -1487.9076 0 -1435.9076 334465.11
140 1481.0477 -1513.0511 0 -1435.988 308725.1
150 1216.1167 -1493.0774 0 -1435.9217 304249.09
160 1211.4398 -1490.7459 0 -1435.9164 288897.09
170 1542.2025 -1510.0774 0 -1435.9608 260104.14
180 1302.9041 -1491.7765 0 -1435.8971 249514.04
190 1332.3326 -1491.5271 0 -1435.9213 227537.99
200 1352.1813 -1490.4513 0 -1435.9049 207626.42
Loop time of 0.0404882 on 4 procs for 100 steps with 512 atoms
Performance: 213.395 ns/day, 0.112 hours/ns, 2469.852 timesteps/s
99.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.032713 | 0.033094 | 0.033544 | 0.2 | 81.74
Neigh | 0.001251 | 0.0012875 | 0.001308 | 0.1 | 3.18
Comm | 0.004788 | 0.005204 | 0.00557 | 0.4 | 12.85
Output | 0.000123 | 0.0001385 | 0.000182 | 0.0 | 0.34
Modify | 0.000492 | 0.00050725 | 0.000533 | 0.0 | 1.25
Other | | 0.0002565 | | | 0.63
Nlocal: 128.000 ave 131 max 124 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 1013.25 ave 1025 max 1002 min
Histogram: 1 1 0 0 0 0 0 0 1 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 9120.50 ave 9356 max 8868 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Total # of neighbors = 36482
Ave neighs/atom = 71.253906
Neighbor list builds = 4
Dangerous builds = 0
# Test Tersoff model for B/N/C
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale 0.6 remap
Changing box ...
orthogonal box = (4.3448000 0.0000000 0.0000000) to (17.379200 21.724000 21.724000)
orthogonal box = (4.3448000 4.3448000 0.0000000) to (17.379200 17.379200 21.724000)
orthogonal box = (4.3448000 4.3448000 4.3448000) to (17.379200 17.379200 17.379200)
pair_style tersoff
pair_coeff * * BNC.tersoff N N N C B B C B
Reading tersoff potential file BNC.tersoff with DATE: 2013-03-21
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 3.1
ghost atom cutoff = 3.1
binsize = 1.55, bins = 9 9 9
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.948 | 2.948 | 2.948 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -3259.7676 0 -3198.3895 1912461.3
110 1772.8268 -3301.5479 0 -3198.8218 1885295.6
120 1169.7287 -3258.74 0 -3197.9294 1898705.2
130 1308.5623 -3265.1338 0 -3197.5922 1894187.5
140 1486.0361 -3274.951 0 -3197.776 1871927.6
150 1419.0362 -3267.7302 0 -3197.2296 1925234.6
160 1196.6689 -3250.1492 0 -3196.7078 1902235.1
170 1707.5846 -3281.7658 0 -3196.9721 1863047.3
180 1337.4358 -3254.9844 0 -3196.8222 1880420.9
190 1441.8052 -3259.0364 0 -3196.3556 1904512.1
200 1569.0317 -3265.0089 0 -3196.3328 1899462.7
Loop time of 0.03452 on 4 procs for 100 steps with 512 atoms
Performance: 250.290 ns/day, 0.096 hours/ns, 2896.871 timesteps/s
99.3% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.029269 | 0.029729 | 0.030688 | 0.3 | 86.12
Neigh | 0.000203 | 0.00023375 | 0.000271 | 0.0 | 0.68
Comm | 0.00275 | 0.0036492 | 0.004132 | 0.9 | 10.57
Output | 0.000104 | 0.000121 | 0.000165 | 0.0 | 0.35
Modify | 0.000456 | 0.0004605 | 0.000463 | 0.0 | 1.33
Other | | 0.000326 | | | 0.94
Nlocal: 128.000 ave 132 max 123 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Nghost: 529.500 ave 533 max 524 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 3651.00 ave 3783 max 3494 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Total # of neighbors = 14604
Ave neighs/atom = 28.523438
Neighbor list builds = 1
Dangerous builds = 0
# Test Tersoff/Mod model for Si
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
Reading tersoff/mod potential file Si.tersoff.mod with DATE: 2013-07-26
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.950 | 2.950 | 2.950 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -2309.6047 0 -2248.2266 17662.891
110 835.77436 -2289.6119 0 -2248.1918 19964.211
120 1067.0735 -2303.0587 0 -2248.2414 13767.101
130 957.60664 -2293.7047 0 -2248.2139 14850.338
140 865.12471 -2285.7774 0 -2248.1971 17101.553
150 1104.7368 -2299.5468 0 -2248.2286 13031.988
160 1077.1682 -2295.3841 0 -2248.2227 13615.019
170 843.8591 -2277.9713 0 -2248.1911 18966.532
180 1008.7412 -2286.922 0 -2248.2075 17275.649
190 1237.9346 -2299.5487 0 -2248.2305 14334.006
200 1060.2161 -2285.3352 0 -2248.1952 18999.834
Loop time of 0.043388 on 4 procs for 100 steps with 512 atoms
Performance: 199.133 ns/day, 0.121 hours/ns, 2304.785 timesteps/s
98.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.033874 | 0.036197 | 0.037433 | 0.7 | 83.43
Neigh | 0.000538 | 0.00055575 | 0.000575 | 0.0 | 1.28
Comm | 0.004381 | 0.0055505 | 0.007783 | 1.8 | 12.79
Output | 0.000141 | 0.0001635 | 0.000228 | 0.0 | 0.38
Modify | 0.000532 | 0.000615 | 0.000692 | 0.0 | 1.42
Other | | 0.000306 | | | 0.71
Nlocal: 128.000 ave 135 max 121 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Nghost: 515.000 ave 518 max 508 min
Histogram: 1 0 0 0 0 0 0 0 1 2
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 2221.00 ave 2328 max 2103 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Total # of neighbors = 8884
Ave neighs/atom = 17.351562
Neighbor list builds = 3
Dangerous builds = 0
# Test Tersoff/Mod/C model for Si
clear
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (src/comm.cpp:94)
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
Reading restart file ...
restart file = 30 Nov 2020, LAMMPS = 30 Nov 2020
restoring atom style atomic from restart
orthogonal box = (0.0000000 0.0000000 0.0000000) to (21.724000 21.724000 21.724000)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
read_restart CPU = 0.001 seconds
newton on on
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
Reading tersoff/mod/c potential file Si.tersoff.modc with DATE: 2016-11-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.205694
ghost atom cutoff = 4.205694
binsize = 2.102847, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod/c, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.950 | 2.950 | 2.950 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1112.9699 -2309.1331 0 -2247.755 20346.718
110 831.93715 -2288.8853 0 -2247.7187 21758.195
120 1077.6698 -2303.2846 0 -2247.7693 16036.053
130 972.43247 -2294.1847 0 -2247.7467 16614.835
140 815.76148 -2282.0495 0 -2247.7194 18310.116
150 1072.7096 -2297.0491 0 -2247.7574 13896.767
160 1061.8824 -2294.0028 0 -2247.7522 13663.179
170 787.17244 -2273.8946 0 -2247.7175 18586.606
180 932.5662 -2281.6828 0 -2247.7315 18154.167
190 1205.7299 -2297.2769 0 -2247.7608 14504.136
200 1022.5285 -2282.7039 0 -2247.7245 18710.495
Loop time of 0.0526065 on 4 procs for 100 steps with 512 atoms
Performance: 164.238 ns/day, 0.146 hours/ns, 1900.906 timesteps/s
98.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.044962 | 0.045875 | 0.046737 | 0.3 | 87.20
Neigh | 0.000603 | 0.00062075 | 0.000646 | 0.0 | 1.18
Comm | 0.003882 | 0.0047085 | 0.005598 | 1.0 | 8.95
Output | 0.000159 | 0.0001995 | 0.000321 | 0.0 | 0.38
Modify | 0.000767 | 0.0007775 | 0.000792 | 0.0 | 1.48
Other | | 0.0004255 | | | 0.81
Nlocal: 128.000 ave 131 max 122 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Nghost: 483.000 ave 485 max 479 min
Histogram: 1 0 0 0 0 0 1 0 0 2
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 2104.00 ave 2169 max 2008 min
Histogram: 1 0 0 0 0 1 0 0 1 1
Total # of neighbors = 8416
Ave neighs/atom = 16.437500
Neighbor list builds = 3
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,524 +0,0 @@
LAMMPS (27 Nov 2018)
using 1 OpenMP thread(s) per MPI task
# Simple regression tests for threebody potentials
# NOTE: These are not intended to represent real materials
units metal
atom_style atomic
atom_modify map array
boundary p p p
atom_modify sort 0 0.0
# temperature
variable t equal 1800.0
# cubic diamond unit cell
variable a equal 5.431
lattice custom $a a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
lattice custom 5.431 a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
Lattice spacing in x,y,z = 5.431 5.431 5.431
region myreg block 0 4 0 4 0 4
create_box 8 myreg
Created orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 1 by 1 MPI processor grid
create_atoms 1 region myreg basis 1 1 basis 2 2 basis 3 3 basis 4 4 basis 5 5 basis 6 6 basis 7 7 basis 8 8
Created 512 atoms
Time spent = 0.000483751 secs
mass * 28.06
velocity all create $t 5287287 mom yes rot yes dist gaussian
velocity all create 1800 5287287 mom yes rot yes dist gaussian
# Equilibrate using Stillinger-Weber model for silicon
pair_style sw
pair_coeff * * Si.sw Si Si Si Si Si Si Si Si
Reading potential file Si.sw with DATE: 2007-06-11
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.77118
ghost atom cutoff = 4.77118
binsize = 2.38559, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair sw, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.982 | 2.982 | 2.982 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1800 -2220.3392 0 -2101.4457 12358.626
10 993.48824 -2166.8749 0 -2101.3265 14121.853
20 587.90768 -2139.6876 0 -2101.3093 11864.886
30 1030.8616 -2167.8446 0 -2101.3947 6649.2566
40 724.62874 -2145.9637 0 -2101.3436 5859.6447
50 490.9053 -2129.5491 0 -2101.3077 6326.5434
60 960.24533 -2159.427 0 -2101.3794 5733.9889
70 906.76518 -2154.3675 0 -2101.3496 8380.4834
80 800.7253 -2146.0714 0 -2101.3113 11515.737
90 1249.2719 -2173.9445 0 -2101.3818 11593.728
100 1080.3797 -2160.6174 0 -2101.3445 12414.602
Loop time of 0.129171 on 1 procs for 100 steps with 512 atoms
Performance: 66.888 ns/day, 0.359 hours/ns, 774.165 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.12468 | 0.12468 | 0.12468 | 0.0 | 96.52
Neigh | 0.0017159 | 0.0017159 | 0.0017159 | 0.0 | 1.33
Comm | 0.00088573 | 0.00088573 | 0.00088573 | 0.0 | 0.69
Output | 0.00011206 | 0.00011206 | 0.00011206 | 0.0 | 0.09
Modify | 0.0014365 | 0.0014365 | 0.0014365 | 0.0 | 1.11
Other | | 0.0003417 | | | 0.26
Nlocal: 512 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1017 ave 1017 max 1017 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 14044 ave 14044 max 14044 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14044
Ave neighs/atom = 27.4297
Neighbor list builds = 2
Dangerous builds = 0
write_restart restart.equil
# Test Stillinger-Weber model for Cd/Te/Zn/Se/Hg/S
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
pair_style sw
pair_coeff * * CdTeZnSeHgS0.sw Cd Zn Hg Cd Te S Se Te
Reading potential file CdTeZnSeHgS0.sw with DATE: 2013-08-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 5.632
ghost atom cutoff = 5.632
binsize = 2.816, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair sw, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.758 | 2.758 | 2.758 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1080.3797 -621.93681 0 -562.66385 464005.01
110 1512.4547 -648.51133 0 -562.74194 463733.24
120 1959.7565 -675.18668 0 -562.8255 486580.33
130 1138.5498 -618.80419 0 -562.6427 515406.41
140 1827.3403 -662.43867 0 -562.78985 485719
150 1822.9803 -659.56725 0 -562.76627 465652.59
160 1128.3444 -611.92808 0 -562.65512 469836.33
170 1699.6402 -647.82381 0 -562.75353 467977.54
180 1840.047 -654.5552 0 -562.77005 488397.41
190 1312.0974 -617.70228 0 -562.6629 510856.48
200 1809.8409 -648.6173 0 -562.75377 490822.97
Loop time of 0.399255 on 1 procs for 100 steps with 512 atoms
Performance: 21.640 ns/day, 1.109 hours/ns, 250.467 timesteps/s
99.2% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.38979 | 0.38979 | 0.38979 | 0.0 | 97.63
Neigh | 0.0061283 | 0.0061283 | 0.0061283 | 0.0 | 1.53
Comm | 0.0013483 | 0.0013483 | 0.0013483 | 0.0 | 0.34
Output | 0.00016642 | 0.00016642 | 0.00016642 | 0.0 | 0.04
Modify | 0.0014937 | 0.0014937 | 0.0014937 | 0.0 | 0.37
Other | | 0.0003231 | | | 0.08
Nlocal: 512 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1386 ave 1386 max 1386 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 17388 ave 17388 max 17388 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 17388
Ave neighs/atom = 33.9609
Neighbor list builds = 5
Dangerous builds = 0
# Test Vashishta model for In/P
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
pair_style vashishta
pair_coeff * * InP.vashishta In In In In P P P P
Reading potential file InP.vashishta with DATE: 2015-10-14
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5, bins = 7 7 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair vashishta, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.783 | 2.783 | 2.783 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1080.3797 -1491.8652 0 -1432.5922 358930.65
110 1299.3069 -1504.494 0 -1432.6803 347566.41
120 1396.3035 -1508.3817 0 -1432.672 336798.42
130 1055.9254 -1483.8342 0 -1432.5849 341035.18
140 1543.9033 -1513.8559 0 -1432.6931 314268.54
150 1242.5383 -1491.417 0 -1432.6036 311068.53
160 1240.1733 -1489.2495 0 -1432.6033 295776.98
170 1559.5037 -1507.824 0 -1432.6417 266170.25
180 1379.0358 -1493.4092 0 -1432.5869 251410.63
190 1465.8073 -1496.8564 0 -1432.6105 226461.31
200 1456.6383 -1493.8133 0 -1432.5852 208188.34
Loop time of 0.198058 on 1 procs for 100 steps with 512 atoms
Performance: 43.624 ns/day, 0.550 hours/ns, 504.903 timesteps/s
99.3% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.18746 | 0.18746 | 0.18746 | 0.0 | 94.65
Neigh | 0.00688 | 0.00688 | 0.00688 | 0.0 | 3.47
Comm | 0.001718 | 0.001718 | 0.001718 | 0.0 | 0.87
Output | 0.00013351 | 0.00013351 | 0.00013351 | 0.0 | 0.07
Modify | 0.0015109 | 0.0015109 | 0.0015109 | 0.0 | 0.76
Other | | 0.0003531 | | | 0.18
Nlocal: 512 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1810 ave 1810 max 1810 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 36480 ave 36480 max 36480 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 36480
Ave neighs/atom = 71.25
Neighbor list builds = 4
Dangerous builds = 0
# Test Tersoff model for B/N/C
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale 0.6 remap
orthogonal box = (4.3448 0 0) to (17.3792 21.724 21.724)
orthogonal box = (4.3448 4.3448 0) to (17.3792 17.3792 21.724)
orthogonal box = (4.3448 4.3448 4.3448) to (17.3792 17.3792 17.3792)
pair_style tersoff
pair_coeff * * BNC.tersoff N N N C B B C B
Reading potential file BNC.tersoff with DATE: 2013-03-21
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 3.1
ghost atom cutoff = 3.1
binsize = 1.55, bins = 9 9 9
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.74 | 2.74 | 2.74 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1080.3797 -3249.8451 0 -3190.5722 1925371.8
110 1878.2843 -3300.7205 0 -3191.0964 1893058.6
120 1153.8494 -3250.2114 0 -3190.5436 1932141.4
130 1363.8664 -3261.6889 0 -3190.6116 1902268.1
140 1565.5505 -3273.0405 0 -3190.6982 1878817
150 1416.9458 -3261.1805 0 -3190.865 1916554.4
160 1288.6997 -3249.8279 0 -3190.4141 1933227.4
170 1768.3043 -3279.2164 0 -3190.4535 1898123.4
180 1375.1942 -3251.0929 0 -3190.3781 1896205.1
190 1527.0671 -3258.7679 0 -3190.4344 1900673.6
200 1586.8691 -3260.4713 0 -3190.434 1912454.8
Loop time of 0.210082 on 1 procs for 100 steps with 512 atoms
Performance: 41.127 ns/day, 0.584 hours/ns, 476.006 timesteps/s
98.5% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.20628 | 0.20628 | 0.20628 | 0.0 | 98.19
Neigh | 0.00099182 | 0.00099182 | 0.00099182 | 0.0 | 0.47
Comm | 0.00085139 | 0.00085139 | 0.00085139 | 0.0 | 0.41
Output | 0.00013494 | 0.00013494 | 0.00013494 | 0.0 | 0.06
Modify | 0.0014665 | 0.0014665 | 0.0014665 | 0.0 | 0.70
Other | | 0.0003612 | | | 0.17
Nlocal: 512 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1027 ave 1027 max 1027 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 14602 ave 14602 max 14602 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14602
Ave neighs/atom = 28.5195
Neighbor list builds = 1
Dangerous builds = 0
# Test Tersoff/Mod model for Si
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
Reading potential file Si.tersoff.mod with DATE: 2013-07-26
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.744 | 2.744 | 2.744 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1080.3797 -2307.5808 0 -2248.3078 18624.75
110 851.21757 -2290.7529 0 -2248.2817 21041.97
120 1074.6349 -2303.6437 0 -2248.321 15244.809
130 1006.5662 -2296.9866 0 -2248.3057 15954.09
140 855.64354 -2285.1581 0 -2248.2781 19346.662
150 1111.1546 -2300.024 0 -2248.314 14698.998
160 1114.7933 -2297.8629 0 -2248.3098 14414.14
170 853.9568 -2278.6047 0 -2248.2716 20120.632
180 1031.0925 -2288.3481 0 -2248.2949 18332.008
190 1230.3458 -2298.9946 0 -2248.3111 15520.302
200 1053.8799 -2284.8931 0 -2248.2816 20033.536
Loop time of 0.158428 on 1 procs for 100 steps with 512 atoms
Performance: 54.536 ns/day, 0.440 hours/ns, 631.200 timesteps/s
99.3% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.15318 | 0.15318 | 0.15318 | 0.0 | 96.69
Neigh | 0.0022631 | 0.0022631 | 0.0022631 | 0.0 | 1.43
Comm | 0.00093651 | 0.00093651 | 0.00093651 | 0.0 | 0.59
Output | 0.00019622 | 0.00019622 | 0.00019622 | 0.0 | 0.12
Modify | 0.0015035 | 0.0015035 | 0.0015035 | 0.0 | 0.95
Other | | 0.0003462 | | | 0.22
Nlocal: 512 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1001 ave 1001 max 1001 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 8872 ave 8872 max 8872 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8872
Ave neighs/atom = 17.3281
Neighbor list builds = 3
Dangerous builds = 0
# Test Tersoff/Mod/C model for Si
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 1 by 1 MPI processor grid
pair style sw stores no restart info
512 atoms
newton on on
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
Reading potential file Si.tersoff.modc with DATE: 2016-11-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.20569
ghost atom cutoff = 4.20569
binsize = 2.10285, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod/c, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.739 | 2.739 | 2.739 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1080.3797 -2307.5509 0 -2248.2779 21368.36
110 846.70637 -2290.4239 0 -2248.2488 22856.37
120 1083.0906 -2304.1686 0 -2248.2895 17530.457
130 1008.7015 -2297.0835 0 -2248.2781 17698.498
140 811.71413 -2282.2528 0 -2248.2417 19986.737
150 1094.4969 -2298.9718 0 -2248.2836 15822.967
160 1100.1109 -2296.9461 0 -2248.2804 15213.75
170 801.20165 -2275.2175 0 -2248.2372 20153.957
180 994.63485 -2286.1145 0 -2248.2621 19167.021
190 1215.6425 -2298.2082 0 -2248.284 15895.654
200 992.20385 -2281.0372 0 -2248.2425 19643.792
Loop time of 0.282035 on 1 procs for 100 steps with 512 atoms
Performance: 30.634 ns/day, 0.783 hours/ns, 354.565 timesteps/s
98.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.27701 | 0.27701 | 0.27701 | 0.0 | 98.22
Neigh | 0.0021927 | 0.0021927 | 0.0021927 | 0.0 | 0.78
Comm | 0.00089169 | 0.00089169 | 0.00089169 | 0.0 | 0.32
Output | 0.0001452 | 0.0001452 | 0.0001452 | 0.0 | 0.05
Modify | 0.0014682 | 0.0014682 | 0.0014682 | 0.0 | 0.52
Other | | 0.0003295 | | | 0.12
Nlocal: 512 ave 512 max 512 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 972 ave 972 max 972 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 8390 ave 8390 max 8390 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 8390
Ave neighs/atom = 16.3867
Neighbor list builds = 3
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -1,524 +0,0 @@
LAMMPS (27 Nov 2018)
using 1 OpenMP thread(s) per MPI task
# Simple regression tests for threebody potentials
# NOTE: These are not intended to represent real materials
units metal
atom_style atomic
atom_modify map array
boundary p p p
atom_modify sort 0 0.0
# temperature
variable t equal 1800.0
# cubic diamond unit cell
variable a equal 5.431
lattice custom $a a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
lattice custom 5.431 a1 1.0 0.0 0.0 a2 0.0 1.0 0.0 a3 0.0 0.0 1.0 basis 0.0 0.0 0.0 basis 0.0 0.5 0.5 basis 0.5 0.0 0.5 basis 0.5 0.5 0.0 basis 0.25 0.25 0.25 basis 0.25 0.75 0.75 basis 0.75 0.25 0.75 basis 0.75 0.75 0.25
Lattice spacing in x,y,z = 5.431 5.431 5.431
region myreg block 0 4 0 4 0 4
create_box 8 myreg
Created orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 2 by 2 MPI processor grid
create_atoms 1 region myreg basis 1 1 basis 2 2 basis 3 3 basis 4 4 basis 5 5 basis 6 6 basis 7 7 basis 8 8
Created 512 atoms
Time spent = 0.000348091 secs
mass * 28.06
velocity all create $t 5287287 mom yes rot yes dist gaussian
velocity all create 1800 5287287 mom yes rot yes dist gaussian
# Equilibrate using Stillinger-Weber model for silicon
pair_style sw
pair_coeff * * Si.sw Si Si Si Si Si Si Si Si
Reading potential file Si.sw with DATE: 2007-06-11
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.77118
ghost atom cutoff = 4.77118
binsize = 2.38559, bins = 10 10 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair sw, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.957 | 2.957 | 2.957 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1800 -2220.3392 0 -2101.4457 12358.626
10 979.93888 -2165.9769 0 -2101.3242 14172.134
20 579.6159 -2139.1412 0 -2101.3085 11866.038
30 1027.5223 -2167.6187 0 -2101.3942 6885.6751
40 711.03138 -2145.0665 0 -2101.3405 6357.0866
50 497.18304 -2129.9748 0 -2101.3052 7137.6093
60 1008.411 -2162.5834 0 -2101.3804 6745.1775
70 947.58674 -2156.9872 0 -2101.3508 9678.1622
80 798.43683 -2145.8382 0 -2101.3074 12728.694
90 1259.2065 -2174.5324 0 -2101.3799 12033.395
100 1100.5203 -2161.8599 0 -2101.3468 12356.137
Loop time of 0.0414283 on 4 procs for 100 steps with 512 atoms
Performance: 208.553 ns/day, 0.115 hours/ns, 2413.811 timesteps/s
95.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.030011 | 0.032047 | 0.034036 | 0.8 | 77.36
Neigh | 0.00043392 | 0.00045151 | 0.0004642 | 0.0 | 1.09
Comm | 0.0050831 | 0.0067263 | 0.0091536 | 1.8 | 16.24
Output | 0.00018191 | 0.00018775 | 0.00020432 | 0.0 | 0.45
Modify | 0.00068116 | 0.0010136 | 0.001817 | 1.5 | 2.45
Other | | 0.001002 | | | 2.42
Nlocal: 128 ave 132 max 126 min
Histogram: 1 2 0 0 0 0 0 0 0 1
Nghost: 525 ave 527 max 521 min
Histogram: 1 0 0 0 0 0 0 0 2 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 3507 ave 3596 max 3470 min
Histogram: 3 0 0 0 0 0 0 0 0 1
Total # of neighbors = 14028
Ave neighs/atom = 27.3984
Neighbor list builds = 2
Dangerous builds = 0
write_restart restart.equil
# Test Stillinger-Weber model for Cd/Te/Zn/Se/Hg/S
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
pair_style sw
pair_coeff * * CdTeZnSeHgS0.sw Cd Zn Hg Cd Te S Se Te
Reading potential file CdTeZnSeHgS0.sw with DATE: 2013-08-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 5.632
ghost atom cutoff = 5.632
binsize = 2.816, bins = 8 8 8
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair sw, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.717 | 2.718 | 2.718 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1100.5203 -621.61663 0 -561.10352 462716.2
110 1478.2708 -644.61282 0 -561.16927 464567.29
120 1989.4577 -675.5312 0 -561.25985 486476.63
130 1185.4652 -620.23829 0 -561.09359 514103.86
140 1726.9774 -654.21952 0 -561.20676 488915.22
150 1863.2061 -660.66167 0 -561.21416 466985.15
160 1119.0313 -609.68409 0 -561.08624 471511.61
170 1708.2653 -646.79823 0 -561.18829 468697.42
180 1891.8451 -656.32585 0 -561.2042 489912.65
190 1345.7703 -618.29258 0 -561.09909 512364.68
200 1865.9507 -650.72167 0 -561.19281 491531.23
Loop time of 0.117532 on 4 procs for 100 steps with 512 atoms
Performance: 73.512 ns/day, 0.326 hours/ns, 850.834 timesteps/s
97.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.096347 | 0.10005 | 0.10244 | 0.8 | 85.13
Neigh | 0.000947 | 0.0010053 | 0.0011535 | 0.3 | 0.86
Comm | 0.0098422 | 0.012949 | 0.016335 | 2.2 | 11.02
Output | 0.00017452 | 0.00022596 | 0.00037885 | 0.0 | 0.19
Modify | 0.00078702 | 0.0013881 | 0.0021975 | 1.6 | 1.18
Other | | 0.001909 | | | 1.62
Nlocal: 128 ave 133 max 125 min
Histogram: 1 0 2 0 0 0 0 0 0 1
Nghost: 754.5 ave 759 max 748 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 4353 ave 4512 max 4252 min
Histogram: 1 0 2 0 0 0 0 0 0 1
Total # of neighbors = 17412
Ave neighs/atom = 34.0078
Neighbor list builds = 3
Dangerous builds = 0
# Test Vashishta model for In/P
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
pair_style vashishta
pair_coeff * * InP.vashishta In In In In P P P P
Reading potential file InP.vashishta with DATE: 2015-10-14
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5, bins = 7 7 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair vashishta, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.739 | 2.739 | 2.739 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1100.5203 -1495.2135 0 -1434.7004 357102.51
110 1238.7371 -1502.5372 0 -1434.7528 348870.19
120 1409.575 -1511.3137 0 -1434.7612 336831.6
130 1081.541 -1487.5554 0 -1434.6881 339244.71
140 1484.7455 -1512.0076 0 -1434.7646 313026.51
150 1263.0321 -1494.834 0 -1434.7018 306496.02
160 1253.4363 -1492.1804 0 -1434.7025 290683.01
170 1536.7348 -1508.3735 0 -1434.7312 261755.69
180 1342.0478 -1493.0615 0 -1434.6754 248519.54
190 1457.921 -1498.4355 0 -1434.6991 223607.39
200 1499.5384 -1498.7005 0 -1434.6898 204647.17
Loop time of 0.060006 on 4 procs for 100 steps with 512 atoms
Performance: 143.986 ns/day, 0.167 hours/ns, 1666.501 timesteps/s
95.6% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.043862 | 0.047754 | 0.04964 | 1.0 | 79.58
Neigh | 0.0018079 | 0.0018883 | 0.0019724 | 0.1 | 3.15
Comm | 0.0067413 | 0.0083464 | 0.011978 | 2.3 | 13.91
Output | 0.00018859 | 0.00029892 | 0.00062919 | 0.0 | 0.50
Modify | 0.00088429 | 0.0011829 | 0.0014911 | 0.8 | 1.97
Other | | 0.0005357 | | | 0.89
Nlocal: 128 ave 129 max 127 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Nghost: 992.75 ave 1001 max 987 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 9116 ave 9213 max 9051 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Total # of neighbors = 36464
Ave neighs/atom = 71.2188
Neighbor list builds = 4
Dangerous builds = 0
# Test Tersoff model for B/N/C
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
variable fac equal 0.6
change_box all x scale ${fac} y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale ${fac} z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale ${fac} remap
change_box all x scale 0.6 y scale 0.6 z scale 0.6 remap
orthogonal box = (4.3448 0 0) to (17.3792 21.724 21.724)
orthogonal box = (4.3448 4.3448 0) to (17.3792 17.3792 21.724)
orthogonal box = (4.3448 4.3448 4.3448) to (17.3792 17.3792 17.3792)
pair_style tersoff
pair_coeff * * BNC.tersoff N N N C B B C B
Reading potential file BNC.tersoff with DATE: 2013-03-21
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 3.1
ghost atom cutoff = 3.1
binsize = 1.55, bins = 9 9 9
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.699 | 2.699 | 2.699 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1100.5203 -3252.8699 0 -3192.3567 1930742.5
110 1858.7735 -3301.1227 0 -3192.8403 1901753.1
120 1107.7693 -3248.8236 0 -3192.2522 1953649.4
130 1315.4397 -3260.1471 0 -3192.2616 1898713.9
140 1541.4371 -3273.1832 0 -3192.41 1887352.3
150 1424.7104 -3262.9648 0 -3192.216 1960688.1
160 1291.2932 -3251.5548 0 -3192.0201 1914541.2
170 1719.1133 -3277.7936 0 -3192.33 1893926
180 1312.8066 -3248.6914 0 -3192.1477 1921417.4
190 1481.3472 -3257.1585 0 -3191.8991 1927045.6
200 1615.282 -3263.8377 0 -3192.055 1920616.3
Loop time of 0.0635643 on 4 procs for 100 steps with 512 atoms
Performance: 135.925 ns/day, 0.177 hours/ns, 1573.210 timesteps/s
97.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.051391 | 0.053262 | 0.05459 | 0.6 | 83.79
Neigh | 0.00052404 | 0.00052798 | 0.00053144 | 0.0 | 0.83
Comm | 0.0063059 | 0.0079132 | 0.0095143 | 1.5 | 12.45
Output | 0.00017905 | 0.00029147 | 0.00062609 | 0.0 | 0.46
Modify | 0.00064421 | 0.00096625 | 0.0012674 | 0.0 | 1.52
Other | | 0.0006039 | | | 0.95
Nlocal: 128 ave 131 max 125 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Nghost: 526.75 ave 532 max 523 min
Histogram: 1 1 0 0 0 1 0 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 3646.5 ave 3722 max 3567 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Total # of neighbors = 14586
Ave neighs/atom = 28.4883
Neighbor list builds = 2
Dangerous builds = 0
# Test Tersoff/Mod model for Si
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
Reading potential file Si.tersoff.mod with DATE: 2013-07-26
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.701 | 2.701 | 2.701 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1100.5203 -2308.936 0 -2248.4229 18087.231
110 821.82813 -2288.8477 0 -2248.3856 21245.995
120 1082.3774 -2304.2287 0 -2248.4366 15467.483
130 995.26561 -2296.2957 0 -2248.4145 16341.517
140 826.97009 -2283.3692 0 -2248.3897 19123.5
150 1048.4178 -2296.0496 0 -2248.4172 15060.312
160 1101.7884 -2297.2417 0 -2248.426 15070.879
170 859.48562 -2279.1747 0 -2248.383 21416.479
180 1041.679 -2289.2303 0 -2248.4038 19572.707
190 1278.0514 -2302.2822 0 -2248.4257 16006.173
200 1097.0682 -2287.8121 0 -2248.3929 20906.774
Loop time of 0.046846 on 4 procs for 100 steps with 512 atoms
Performance: 184.434 ns/day, 0.130 hours/ns, 2134.653 timesteps/s
98.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.037434 | 0.039142 | 0.041109 | 0.7 | 83.55
Neigh | 0.0005877 | 0.00059921 | 0.00061464 | 0.0 | 1.28
Comm | 0.0036416 | 0.0055436 | 0.0073476 | 1.8 | 11.83
Output | 0.00016212 | 0.00027066 | 0.00059533 | 0.0 | 0.58
Modify | 0.00063038 | 0.00065678 | 0.00067234 | 0.0 | 1.40
Other | | 0.0006335 | | | 1.35
Nlocal: 128 ave 131 max 126 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Nghost: 518 ave 525 max 513 min
Histogram: 1 0 1 0 1 0 0 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 2202 ave 2257 max 2175 min
Histogram: 1 1 1 0 0 0 0 0 0 1
Total # of neighbors = 8808
Ave neighs/atom = 17.2031
Neighbor list builds = 3
Dangerous builds = 0
# Test Tersoff/Mod/C model for Si
clear
using 1 OpenMP thread(s) per MPI task
read_restart restart.equil
restoring atom style atomic from restart
orthogonal box = (0 0 0) to (21.724 21.724 21.724)
1 by 2 by 2 MPI processor grid
pair style sw stores no restart info
512 atoms
newton on on
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
Reading potential file Si.tersoff.modc with DATE: 2016-11-09
thermo 10
fix 1 all nvt temp $t $t 0.1
fix 1 all nvt temp 1800 $t 0.1
fix 1 all nvt temp 1800 1800 0.1
Resetting global fix info from restart file:
fix style: nvt, fix ID: 1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
All restart file global fix info was re-assigned
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.20569
ghost atom cutoff = 4.20569
binsize = 2.10285, bins = 11 11 11
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair tersoff/mod/c, perpetual
attributes: full, newton on
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.701 | 2.701 | 2.701 Mbytes
Step Temp E_pair E_mol TotEng Press
100 1100.5203 -2308.5955 0 -2248.0824 20775.991
110 813.33618 -2287.9469 0 -2248.04 22564.16
120 1100.0902 -2305.051 0 -2248.092 17540.971
130 1022.9471 -2297.7384 0 -2248.0801 17985.336
140 761.99242 -2278.7251 0 -2248.0345 20462.975
150 1053.3973 -2296.1039 0 -2248.081 16130.559
160 1089.2622 -2296.1059 0 -2248.0833 15434.234
170 776.25788 -2273.4496 0 -2248.0321 20642.702
180 1016.1885 -2287.4305 0 -2248.0641 19276.936
190 1266.3943 -2301.3675 0 -2248.0865 16199.281
200 1012.4185 -2282.134 0 -2248.0423 20795.113
Loop time of 0.0840016 on 4 procs for 100 steps with 512 atoms
Performance: 102.855 ns/day, 0.233 hours/ns, 1190.454 timesteps/s
97.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.068133 | 0.071658 | 0.075315 | 1.0 | 85.31
Neigh | 0.00057864 | 0.00058872 | 0.00060058 | 0.0 | 0.70
Comm | 0.0049844 | 0.008617 | 0.012086 | 2.8 | 10.26
Output | 0.0001626 | 0.00027418 | 0.00060463 | 0.0 | 0.33
Modify | 0.00070477 | 0.0011689 | 0.0022428 | 1.8 | 1.39
Other | | 0.001694 | | | 2.02
Nlocal: 128 ave 129 max 127 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 489.75 ave 501 max 481 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 2094.5 ave 2121 max 2060 min
Histogram: 1 0 0 1 0 0 0 0 0 2
Total # of neighbors = 8378
Ave neighs/atom = 16.3633
Neighbor list builds = 3
Dangerous builds = 0
Total wall time: 0:00:00

149
lib/gpu/Makefile.cuda Normal file
View File

@ -0,0 +1,149 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# - change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
ifeq ($(CUDA_HOME),)
CUDA_HOME = /usr/local/cuda
endif
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
LMP_INC = -DLAMMPS_SMALLBIG
# precision for GPU calculations
# -D_SINGLE_SINGLE # Single precision for all calculations
# -D_DOUBLE_DOUBLE # Double precision for all calculations
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
CUDA_PRECISION = -D_SINGLE_DOUBLE
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
# device code compiler and settings
NVCC = nvcc
CUDA_ARCH = -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] \
-gencode arch=compute_60,code=[sm_60,compute_60] -gencode arch=compute_61,code=[sm_61,compute_61] \
-gencode arch=compute_70,code=[sm_70,compute_70] -gencode arch=compute_75,code=[sm_75,compute_75]
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64 -L$(CUDA_HOME)/lib64/stubs
CUDA_OPTS = -DUNIX -O3 --use_fast_math $(LMP_INC) -Xcompiler -fPIC
CUDA_LINK = $(CUDA_LIB) -lcudart
CUDA = $(NVCC) $(CUDA_INCLUDE) $(CUDA_OPTS) -Icudpp_mini $(CUDA_ARCH) \
$(CUDA_PRECISION)
BIN2C = $(CUDA_HOME)/bin/bin2c
# host code compiler and settings
CUDR_CPP = mpicxx -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK -DOMPI_SKIP_MPICXX=1 -fPIC
CUDR_OPTS = -O2 $(LMP_INC)
CUDR = $(CUDR_CPP) $(CUDR_OPTS) $(CUDA_PRECISION) $(CUDA_INCLUDE) \
$(CUDPP_OPT)
# Headers for Geryon
UCL_H = $(wildcard ./geryon/ucl*.h)
NVD_H = $(wildcard ./geryon/nvd*.h) $(UCL_H) lal_preprocessor.h
ALL_H = $(NVD_H) $(wildcard ./lal_*.h)
# Source files
SRCS := $(wildcard ./lal_*.cpp)
OBJS := $(subst ./,$(OBJ_DIR)/,$(SRCS:%.cpp=%.o))
CUS := $(wildcard lal_*.cu)
CUHS := $(filter-out pppm_cubin.h, $(CUS:lal_%.cu=%_cubin.h)) pppm_f_cubin.h pppm_d_cubin.h
CUHS := $(addprefix $(OBJ_DIR)/, $(CUHS))
ifdef CUDPP_OPT
CUDPP = $(OBJ_DIR)/cudpp.o $(OBJ_DIR)/cudpp_plan.o \
$(OBJ_DIR)/cudpp_maximal_launch.o $(OBJ_DIR)/cudpp_plan_manager.o \
$(OBJ_DIR)/radixsort_app.cu_o $(OBJ_DIR)/scan_app.cu_o
endif
# targets
GPU_LIB = $(LIB_DIR)/libgpu.a
EXECS = $(BIN_DIR)/nvc_get_devices
all: $(OBJ_DIR) $(CUHS) $(GPU_LIB) $(EXECS)
$(OBJ_DIR):
mkdir -p $@
# device code compilation
$(OBJ_DIR)/pppm_f.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -Dgrdtyp=float -Dgrdtyp4=float4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_f_cubin.h: $(OBJ_DIR)/pppm_f.cubin
$(BIN2C) -c -n pppm_f $(OBJ_DIR)/pppm_f.cubin > $(OBJ_DIR)/pppm_f_cubin.h
$(OBJ_DIR)/pppm_d.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -Dgrdtyp=double -Dgrdtyp4=double4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_d_cubin.h: $(OBJ_DIR)/pppm_d.cubin
$(BIN2C) -c -n pppm_d $(OBJ_DIR)/pppm_d.cubin > $(OBJ_DIR)/pppm_d_cubin.h
$(OBJ_DIR)/%_cubin.h: lal_%.cu $(ALL_H)
$(CUDA) --fatbin -DNV_KERNEL -o $(OBJ_DIR)/$*.cubin $(OBJ_DIR)/lal_$*.cu
$(BIN2C) -c -n $* $(OBJ_DIR)/$*.cubin > $@
@rm $(OBJ_DIR)/$*.cubin
# host code compilation
$(OBJ_DIR)/lal_%.o: lal_%.cpp $(CUHS) $(ALL_H)
$(CUDR) -o $@ -c $< -I$(OBJ_DIR)
#ifdef CUDPP_OPT
$(OBJ_DIR)/cudpp.o: cudpp_mini/cudpp.cpp
$(CUDR) -o $@ -c cudpp_mini/cudpp.cpp -Icudpp_mini
$(OBJ_DIR)/cudpp_plan.o: cudpp_mini/cudpp_plan.cpp
$(CUDR) -o $@ -c cudpp_mini/cudpp_plan.cpp -Icudpp_mini
$(OBJ_DIR)/cudpp_maximal_launch.o: cudpp_mini/cudpp_maximal_launch.cpp
$(CUDR) -o $@ -c cudpp_mini/cudpp_maximal_launch.cpp -Icudpp_mini
$(OBJ_DIR)/cudpp_plan_manager.o: cudpp_mini/cudpp_plan_manager.cpp
$(CUDR) -o $@ -c cudpp_mini/cudpp_plan_manager.cpp -Icudpp_mini
$(OBJ_DIR)/radixsort_app.cu_o: cudpp_mini/radixsort_app.cu
$(CUDA) -o $@ -c cudpp_mini/radixsort_app.cu
$(OBJ_DIR)/scan_app.cu_o: cudpp_mini/scan_app.cu
$(CUDA) -o $@ -c cudpp_mini/scan_app.cu
#endif
# build libgpu.a
$(GPU_LIB): $(OBJS) $(CUDPP)
$(AR) -crusv $(GPU_LIB) $(OBJS) $(CUDPP)
@cp $(EXTRAMAKE) Makefile.lammps
# test app for querying device info
$(BIN_DIR)/nvc_get_devices: ./geryon/ucl_get_devices.cpp $(NVD_H)
$(CUDR) -o $@ ./geryon/ucl_get_devices.cpp -DUCL_CUDADR $(CUDA_LIB) -lcuda
clean:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CUDPP) $(CUHS) *.linkinfo
veryclean: clean
-rm -rf *~ *.linkinfo
cleanlib:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CUHS) *.linkinfo

View File

@ -1,72 +0,0 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# - Change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
ifeq ($(CUDA_HOME),)
CUDA_HOME = /usr/local/cuda
endif
NVCC = nvcc
# obsolete hardware. not supported by current drivers anymore.
#CUDA_ARCH = -arch=sm_13
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
# Fermi hardware
#CUDA_ARCH = -arch=sm_20
#CUDA_ARCH = -arch=sm_21
# Kepler hardware
#CUDA_ARCH = -arch=sm_30
#CUDA_ARCH = -arch=sm_32
#CUDA_ARCH = -arch=sm_35
#CUDA_ARCH = -arch=sm_37
# Maxwell hardware
CUDA_ARCH = -arch=sm_50
#CUDA_ARCH = -arch=sm_52
# Pascal hardware
#CUDA_ARCH = -arch=sm_60
#CUDA_ARCH = -arch=sm_61
# Volta hardware
#CUDA_ARCH = -arch=sm_70
# Turing hardware
#CUDA_ARCH = -arch=sm_75
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
LMP_INC = -DLAMMPS_SMALLBIG
# precision for GPU calculations
# -D_SINGLE_SINGLE # Single precision for all calculations
# -D_DOUBLE_DOUBLE # Double precision for all calculations
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
CUDA_PRECISION = -D_DOUBLE_DOUBLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64 -L$(CUDA_HOME)/lib64/stubs
CUDA_OPTS = -DUNIX -O3 --use_fast_math $(LMP_INC) -Xcompiler -fPIC
CUDR_CPP = mpicxx -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK -DOMPI_SKIP_MPICXX=1 -fPIC -std=c++11
CUDR_OPTS = -O2 $(LMP_INC) # -xHost -no-prec-div -ansi-alias
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -1,73 +0,0 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# - Change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
ifeq ($(CUDA_HOME),)
CUDA_HOME = /usr/local/cuda
endif
NVCC = nvcc
# obsolete hardware. not supported by current drivers anymore.
#CUDA_ARCH = -arch=sm_13
# older CUDA
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
# Fermi hardware
#CUDA_ARCH = -arch=sm_20
#CUDA_ARCH = -arch=sm_21
# Kepler hardware
#CUDA_ARCH = -arch=sm_30
#CUDA_ARCH = -arch=sm_32
#CUDA_ARCH = -arch=sm_35
#CUDA_ARCH = -arch=sm_37
# Maxwell hardware
CUDA_ARCH = -arch=sm_50
#CUDA_ARCH = -arch=sm_52
# Pascal hardware
#CUDA_ARCH = -arch=sm_60
#CUDA_ARCH = -arch=sm_61
# Volta hardware
#CUDA_ARCH = -arch=sm_70
# Turing hardware
#CUDA_ARCH = -arch=sm_75
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
LMP_INC = -DLAMMPS_SMALLBIG
# precision for GPU calculations
# -D_SINGLE_SINGLE # Single precision for all calculations
# -D_DOUBLE_DOUBLE # Double precision for all calculations
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
CUDA_PRECISION = -D_SINGLE_DOUBLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64 -L$(CUDA_HOME)/lib64/stubs
CUDA_OPTS = -DUNIX -O3 --use_fast_math $(LMP_INC) -Xcompiler -fPIC
CUDR_CPP = mpicxx -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK -DOMPI_SKIP_MPICXX=1 -fPIC -std=c++11
CUDR_OPTS = -O2 $(LMP_INC) # -xHost -no-prec-div -ansi-alias
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -1,72 +0,0 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# - Change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
ifeq ($(CUDA_HOME),)
CUDA_HOME = /usr/local/cuda
endif
NVCC = nvcc
# obsolete hardware. not supported by current drivers anymore.
#CUDA_ARCH = -arch=sm_13
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
# Fermi hardware
#CUDA_ARCH = -arch=sm_20
#CUDA_ARCH = -arch=sm_21
# Kepler hardware
#CUDA_ARCH = -arch=sm_30
#CUDA_ARCH = -arch=sm_32
#CUDA_ARCH = -arch=sm_35
#CUDA_ARCH = -arch=sm_37
# Maxwell hardware
CUDA_ARCH = -arch=sm_50
#CUDA_ARCH = -arch=sm_52
# Pascal hardware
#CUDA_ARCH = -arch=sm_60
#CUDA_ARCH = -arch=sm_61
# Volta hardware
#CUDA_ARCH = -arch=sm_70
# Turing hardware
#CUDA_ARCH = -arch=sm_75
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
LMP_INC = -DLAMMPS_SMALLBIG
# precision for GPU calculations
# -D_SINGLE_SINGLE # Single precision for all calculations
# -D_DOUBLE_DOUBLE # Double precision for all calculations
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
CUDA_PRECISION = -D_SINGLE_SINGLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64 -L$(CUDA_HOME)/lib64/stubs
CUDA_OPTS = -DUNIX -O3 --use_fast_math $(LMP_INC) -Xcompiler -fPIC
CUDR_CPP = mpicxx -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK -DOMPI_SKIP_MPICXX=1 -fPIC -std=c++11
CUDR_OPTS = -O2 $(LMP_INC) # -xHost -no-prec-div -ansi-alias
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -1 +0,0 @@
Makefile.linux

92
lib/gpu/Makefile.opencl Normal file
View File

@ -0,0 +1,92 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for OpenCL
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.opencl
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
LMP_INC = -DLAMMPS_SMALLBIG
# precision for GPU calculations
# -D_SINGLE_SINGLE # Single precision for all calculations
# -D_DOUBLE_DOUBLE # Double precision for all calculations
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
OCL_PREC = -D_SINGLE_DOUBLE
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
# Compiler and linker settings
# OCL_TUNE = -DFERMI_OCL # -- Uncomment for NVIDIA Fermi
# OCL_TUNE = -DKEPLER_OCL # -- Uncomment for NVIDIA Kepler
# OCL_TUNE = -DCYPRESS_OCL # -- Uncomment for AMD Cypress
OCL_TUNE = -DGENERIC_OCL # -- Uncomment for generic device
OCL_INC = -I/usr/local/cuda/include # Path to CL directory
OCL_CPP = mpic++ $(DEFAULT_DEVICE) -g -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK $(LMP_INC) $(OCL_INC)
OCL_LINK = -lOpenCL
OCL = $(OCL_CPP) $(OCL_PREC) $(OCL_TUNE) -DUSE_OPENCL
# Headers for Geryon
UCL_H = $(wildcard ./geryon/ucl*.h)
OCL_H = $(wildcard ./geryon/ocl*.h) $(UCL_H) lal_preprocessor.h
PRE1_H = lal_preprocessor.h lal_aux_fun1.h
ALL_H = $(OCL_H) $(wildcard ./lal_*.h)
# Source files
SRCS := $(wildcard ./lal_*.cpp)
OBJS := $(subst ./,$(OBJ_DIR)/,$(SRCS:%.cpp=%.o))
CUS := $(wildcard lal_*.cu)
KERS := $(subst ./,$(OBJ_DIR)/,$(CUS:lal_%.cu=%_cl.h))
KERS := $(addprefix $(OBJ_DIR)/, $(KERS))
# targets
GPU_LIB = $(LIB_DIR)/libgpu.a
EXECS = $(BIN_DIR)/ocl_get_devices
all: $(OBJ_DIR) $(KERS) $(GPU_LIB) $(EXECS)
$(OBJ_DIR):
mkdir -p $@
# device code compilation
$(OBJ_DIR)/%_cl.h: lal_%.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh $* $(PRE1_H) $< $@;
# host code compilation
$(OBJ_DIR)/lal_%.o: lal_%.cpp $(KERS)
$(OCL) -o $@ -c $< -I$(OBJ_DIR)
# build libgpu.a
$(GPU_LIB): $(OBJS)
$(AR) -crusv $(GPU_LIB) $(OBJS)
@cp $(EXTRAMAKE) Makefile.lammps
# test app for querying device info
$(BIN_DIR)/ocl_get_devices: ./geryon/ucl_get_devices.cpp $(OCL_H)
$(OCL) -o $@ ./geryon/ucl_get_devices.cpp -DUCL_OPENCL $(OCL_LINK)
clean:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(KERS) *.linkinfo
veryclean: clean
-rm -rf *~ *.linkinfo
cleanlib:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(KERS) *.linkinfo

View File

@ -1,24 +0,0 @@
# /* ----------------------------------------------------------------------
# Generic Makefile for OpenCL for use with MPI STUBS library
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
OCL_HOME = $(HOME)/intelocl
OCL_CPP = g++ -O3 -DMPI_GERYON -DUCL_NO_EXIT -I../../src/STUBS -I$(OCL_HOME)/include/
# available tuned parameter sets: FERMI_OCL, CYPRESS_OCL
#OCL_TUNE = -DFERMI_OCL
OCL_TUNE = -DCYPRESS_OCL
OCL_LINK = -lOpenCL -L../../src/STUBS -lmpi
OCL_PREC = -D_SINGLE_DOUBLE
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
include Opencl.makefile

View File

@ -1,7 +1,7 @@
NVCC = $(CUDA_HOME)/bin/nvcc
EXTRAMAKE = Makefile.lammps.standard
CUDA_ARCH = -arch=sm_35
CUDA_ARCH = -arch=sm_75
CUDA_PRECISION = -D_SINGLE_DOUBLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64 -Xlinker -rpath -Xlinker $(CUDA_HOME)/lib64 -lcudart

View File

@ -1,32 +0,0 @@
# /* ----------------------------------------------------------------------
# Makefile for Cray XK7 Architecture supporting Hyper-Q with Proxy
# - Uses PrgEnv-gnu
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
CUDA_HOME = $(CRAY_CUDATOOLKIT_DIR)
NVCC = nvcc
CUDA_ARCH = -arch=sm_35
CUDA_PRECISION = -D_SINGLE_DOUBLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64 -L$(CUDA_HOME)/lib64/stubs
CUDA_OPTS = -DUNIX -O3 --use_fast_math
CUDR_CPP = CC -DCUDA_PROXY -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK
CUDR_OPTS = -O2 -march=bdver1 -ftree-vectorize
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
CUDPP_OPT = # -DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -1,157 +1,67 @@
CUDA = $(NVCC) $(CUDA_INCLUDE) $(CUDA_OPTS) -Icudpp_mini $(CUDA_ARCH) \
$(CUDA_PRECISION)
CUDR = $(CUDR_CPP) $(CUDR_OPTS) $(CUDA_PRECISION) $(CUDA_INCLUDE) \
$(CUDPP_OPT)
CUDA_LINK = $(CUDA_LIB) -lcudart
BIN2C = $(CUDA_HOME)/bin/bin2c
GPU_LIB = $(LIB_DIR)/libgpu.a
# Headers for Geryon
UCL_H = $(wildcard ./geryon/ucl*.h)
NVC_H = $(wildcard ./geryon/nvc*.h) $(UCL_H)
NVD_H = $(wildcard ./geryon/nvd*.h) $(UCL_H) lal_preprocessor.h
# Headers for Pair Stuff
PAIR_H = lal_atom.h lal_answer.h lal_neighbor_shared.h \
lal_neighbor.h lal_precision.h lal_device.h \
lal_balance.h lal_pppm.h
ALL_H = $(NVD_H) $(wildcard ./lal_*.h)
ALL_H = $(NVD_H) $(PAIR_H)
# Source files
SRCS := $(wildcard ./lal_*.cpp)
OBJS := $(subst ./,$(OBJ_DIR)/,$(SRCS:%.cpp=%.o))
CUS := $(wildcard lal_*.cu)
CUHS := $(filter-out pppm_cubin.h, $(CUS:lal_%.cu=%_cubin.h)) pppm_f_cubin.h pppm_d_cubin.h
CUHS := $(addprefix $(OBJ_DIR)/, $(CUHS))
EXECS = $(BIN_DIR)/nvc_get_devices
ifdef CUDPP_OPT
CUDPP = $(OBJ_DIR)/cudpp.o $(OBJ_DIR)/cudpp_plan.o \
$(OBJ_DIR)/cudpp_maximal_launch.o $(OBJ_DIR)/cudpp_plan_manager.o \
$(OBJ_DIR)/radixsort_app.cu_o $(OBJ_DIR)/scan_app.cu_o
endif
OBJS = $(OBJ_DIR)/lal_atom.o $(OBJ_DIR)/lal_ans.o \
$(OBJ_DIR)/lal_neighbor.o $(OBJ_DIR)/lal_neighbor_shared.o \
$(OBJ_DIR)/lal_device.o $(OBJ_DIR)/lal_base_atomic.o \
$(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_base_ellipsoid.o \
$(OBJ_DIR)/lal_base_dipole.o $(OBJ_DIR)/lal_base_three.o \
$(OBJ_DIR)/lal_base_dpd.o \
$(OBJ_DIR)/lal_pppm.o $(OBJ_DIR)/lal_pppm_ext.o \
$(OBJ_DIR)/lal_gayberne.o $(OBJ_DIR)/lal_gayberne_ext.o \
$(OBJ_DIR)/lal_re_squared.o $(OBJ_DIR)/lal_re_squared_ext.o \
$(OBJ_DIR)/lal_lj.o $(OBJ_DIR)/lal_lj_ext.o \
$(OBJ_DIR)/lal_lj96.o $(OBJ_DIR)/lal_lj96_ext.o \
$(OBJ_DIR)/lal_lj_expand.o $(OBJ_DIR)/lal_lj_expand_ext.o \
$(OBJ_DIR)/lal_lj_coul.o $(OBJ_DIR)/lal_lj_coul_ext.o \
$(OBJ_DIR)/lal_lj_coul_long.o $(OBJ_DIR)/lal_lj_coul_long_ext.o \
$(OBJ_DIR)/lal_lj_dsf.o $(OBJ_DIR)/lal_lj_dsf_ext.o \
$(OBJ_DIR)/lal_lj_class2_long.o $(OBJ_DIR)/lal_lj_class2_long_ext.o \
$(OBJ_DIR)/lal_coul_long.o $(OBJ_DIR)/lal_coul_long_ext.o \
$(OBJ_DIR)/lal_morse.o $(OBJ_DIR)/lal_morse_ext.o \
$(OBJ_DIR)/lal_charmm_long.o $(OBJ_DIR)/lal_charmm_long_ext.o \
$(OBJ_DIR)/lal_lj_sdk.o $(OBJ_DIR)/lal_lj_sdk_ext.o \
$(OBJ_DIR)/lal_lj_sdk_long.o $(OBJ_DIR)/lal_lj_sdk_long_ext.o \
$(OBJ_DIR)/lal_eam.o $(OBJ_DIR)/lal_eam_ext.o \
$(OBJ_DIR)/lal_eam_fs_ext.o $(OBJ_DIR)/lal_eam_alloy_ext.o \
$(OBJ_DIR)/lal_buck.o $(OBJ_DIR)/lal_buck_ext.o \
$(OBJ_DIR)/lal_buck_coul.o $(OBJ_DIR)/lal_buck_coul_ext.o \
$(OBJ_DIR)/lal_buck_coul_long.o $(OBJ_DIR)/lal_buck_coul_long_ext.o \
$(OBJ_DIR)/lal_table.o $(OBJ_DIR)/lal_table_ext.o \
$(OBJ_DIR)/lal_yukawa.o $(OBJ_DIR)/lal_yukawa_ext.o \
$(OBJ_DIR)/lal_born.o $(OBJ_DIR)/lal_born_ext.o \
$(OBJ_DIR)/lal_born_coul_wolf.o $(OBJ_DIR)/lal_born_coul_wolf_ext.o \
$(OBJ_DIR)/lal_born_coul_long.o $(OBJ_DIR)/lal_born_coul_long_ext.o \
$(OBJ_DIR)/lal_dipole_lj.o $(OBJ_DIR)/lal_dipole_lj_ext.o \
$(OBJ_DIR)/lal_dipole_lj_sf.o $(OBJ_DIR)/lal_dipole_lj_sf_ext.o \
$(OBJ_DIR)/lal_colloid.o $(OBJ_DIR)/lal_colloid_ext.o \
$(OBJ_DIR)/lal_gauss.o $(OBJ_DIR)/lal_gauss_ext.o \
$(OBJ_DIR)/lal_yukawa_colloid.o $(OBJ_DIR)/lal_yukawa_colloid_ext.o \
$(OBJ_DIR)/lal_lj_coul_debye.o $(OBJ_DIR)/lal_lj_coul_debye_ext.o \
$(OBJ_DIR)/lal_coul_dsf.o $(OBJ_DIR)/lal_coul_dsf_ext.o \
$(OBJ_DIR)/lal_sw.o $(OBJ_DIR)/lal_sw_ext.o \
$(OBJ_DIR)/lal_vashishta.o $(OBJ_DIR)/lal_vashishta_ext.o \
$(OBJ_DIR)/lal_beck.o $(OBJ_DIR)/lal_beck_ext.o \
$(OBJ_DIR)/lal_mie.o $(OBJ_DIR)/lal_mie_ext.o \
$(OBJ_DIR)/lal_soft.o $(OBJ_DIR)/lal_soft_ext.o \
$(OBJ_DIR)/lal_lj_coul_msm.o $(OBJ_DIR)/lal_lj_coul_msm_ext.o \
$(OBJ_DIR)/lal_lj_gromacs.o $(OBJ_DIR)/lal_lj_gromacs_ext.o \
$(OBJ_DIR)/lal_dpd.o $(OBJ_DIR)/lal_dpd_ext.o $(OBJ_DIR)/lal_dpd_tstat_ext.o \
$(OBJ_DIR)/lal_tersoff.o $(OBJ_DIR)/lal_tersoff_ext.o \
$(OBJ_DIR)/lal_tersoff_zbl.o $(OBJ_DIR)/lal_tersoff_zbl_ext.o \
$(OBJ_DIR)/lal_tersoff_mod.o $(OBJ_DIR)/lal_tersoff_mod_ext.o \
$(OBJ_DIR)/lal_coul.o $(OBJ_DIR)/lal_coul_ext.o \
$(OBJ_DIR)/lal_coul_debye.o $(OBJ_DIR)/lal_coul_debye_ext.o \
$(OBJ_DIR)/lal_zbl.o $(OBJ_DIR)/lal_zbl_ext.o \
$(OBJ_DIR)/lal_lj_cubic.o $(OBJ_DIR)/lal_lj_cubic_ext.o \
$(OBJ_DIR)/lal_ufm.o $(OBJ_DIR)/lal_ufm_ext.o \
$(OBJ_DIR)/lal_dipole_long_lj.o $(OBJ_DIR)/lal_dipole_long_lj_ext.o \
$(OBJ_DIR)/lal_lj_expand_coul_long.o $(OBJ_DIR)/lal_lj_expand_coul_long_ext.o \
$(OBJ_DIR)/lal_coul_long_cs.o $(OBJ_DIR)/lal_coul_long_cs_ext.o \
$(OBJ_DIR)/lal_born_coul_long_cs.o $(OBJ_DIR)/lal_born_coul_long_cs_ext.o \
$(OBJ_DIR)/lal_born_coul_wolf_cs.o $(OBJ_DIR)/lal_born_coul_wolf_cs_ext.o \
$(OBJ_DIR)/lal_lj_tip4p_long.o $(OBJ_DIR)/lal_lj_tip4p_long_ext.o
CBNS = $(OBJ_DIR)/device.cubin $(OBJ_DIR)/device_cubin.h \
$(OBJ_DIR)/atom.cubin $(OBJ_DIR)/atom_cubin.h \
$(OBJ_DIR)/neighbor_cpu.cubin $(OBJ_DIR)/neighbor_cpu_cubin.h \
$(OBJ_DIR)/neighbor_gpu.cubin $(OBJ_DIR)/neighbor_gpu_cubin.h \
$(OBJ_DIR)/pppm_f.cubin $(OBJ_DIR)/pppm_f_cubin.h \
$(OBJ_DIR)/pppm_d.cubin $(OBJ_DIR)/pppm_d_cubin.h \
$(OBJ_DIR)/ellipsoid_nbor.cubin $(OBJ_DIR)/ellipsoid_nbor_cubin.h \
$(OBJ_DIR)/gayberne.cubin $(OBJ_DIR)/gayberne_lj.cubin \
$(OBJ_DIR)/gayberne_cubin.h $(OBJ_DIR)/gayberne_lj_cubin.h \
$(OBJ_DIR)/re_squared.cubin $(OBJ_DIR)/re_squared_lj.cubin \
$(OBJ_DIR)/re_squared_cubin.h $(OBJ_DIR)/re_squared_lj_cubin.h \
$(OBJ_DIR)/lj.cubin $(OBJ_DIR)/lj_cubin.h \
$(OBJ_DIR)/lj96.cubin $(OBJ_DIR)/lj96_cubin.h \
$(OBJ_DIR)/lj_expand.cubin $(OBJ_DIR)/lj_expand_cubin.h \
$(OBJ_DIR)/lj_coul.cubin $(OBJ_DIR)/lj_coul_cubin.h \
$(OBJ_DIR)/lj_coul_long.cubin $(OBJ_DIR)/lj_coul_long_cubin.h \
$(OBJ_DIR)/lj_dsf.cubin $(OBJ_DIR)/lj_dsf_cubin.h \
$(OBJ_DIR)/lj_class2_long.cubin $(OBJ_DIR)/lj_class2_long_cubin.h \
$(OBJ_DIR)/coul_long.cubin $(OBJ_DIR)/coul_long_cubin.h \
$(OBJ_DIR)/morse.cubin $(OBJ_DIR)/morse_cubin.h \
$(OBJ_DIR)/charmm_long.cubin $(OBJ_DIR)/charmm_long_cubin.h \
$(OBJ_DIR)/lj_sdk.cubin $(OBJ_DIR)/lj_sdk_cubin.h \
$(OBJ_DIR)/lj_sdk_long.cubin $(OBJ_DIR)/lj_sdk_long_cubin.h \
$(OBJ_DIR)/eam.cubin $(OBJ_DIR)/eam_cubin.h \
$(OBJ_DIR)/buck.cubin $(OBJ_DIR)/buck_cubin.h \
$(OBJ_DIR)/buck_coul_long.cubin $(OBJ_DIR)/buck_coul_long_cubin.h \
$(OBJ_DIR)/buck_coul.cubin $(OBJ_DIR)/buck_coul_cubin.h \
$(OBJ_DIR)/table.cubin $(OBJ_DIR)/table_cubin.h \
$(OBJ_DIR)/yukawa.cubin $(OBJ_DIR)/yukawa_cubin.h \
$(OBJ_DIR)/born.cubin $(OBJ_DIR)/born_cubin.h \
$(OBJ_DIR)/born_coul_wolf.cubin $(OBJ_DIR)/born_coul_wolf_cubin.h \
$(OBJ_DIR)/born_coul_long.cubin $(OBJ_DIR)/born_coul_long_cubin.h \
$(OBJ_DIR)/dipole_lj.cubin $(OBJ_DIR)/dipole_lj_cubin.h \
$(OBJ_DIR)/dipole_lj_sf.cubin $(OBJ_DIR)/dipole_lj_sf_cubin.h \
$(OBJ_DIR)/colloid.cubin $(OBJ_DIR)/colloid_cubin.h \
$(OBJ_DIR)/gauss.cubin $(OBJ_DIR)/gauss_cubin.h \
$(OBJ_DIR)/yukawa_colloid.cubin $(OBJ_DIR)/yukawa_colloid_cubin.h \
$(OBJ_DIR)/lj_coul_debye.cubin $(OBJ_DIR)/lj_coul_debye_cubin.h \
$(OBJ_DIR)/coul_dsf.cubin $(OBJ_DIR)/coul_dsf_cubin.h \
$(OBJ_DIR)/sw.cubin $(OBJ_DIR)/sw_cubin.h \
$(OBJ_DIR)/vashishta.cubin $(OBJ_DIR)/vashishta_cubin.h \
$(OBJ_DIR)/beck.cubin $(OBJ_DIR)/beck_cubin.h \
$(OBJ_DIR)/mie.cubin $(OBJ_DIR)/mie_cubin.h \
$(OBJ_DIR)/soft.cubin $(OBJ_DIR)/soft_cubin.h \
$(OBJ_DIR)/lj_coul_msm.cubin $(OBJ_DIR)/lj_coul_msm_cubin.h \
$(OBJ_DIR)/lj_gromacs.cubin $(OBJ_DIR)/lj_gromacs_cubin.h \
$(OBJ_DIR)/dpd.cubin $(OBJ_DIR)/dpd_cubin.h \
$(OBJ_DIR)/tersoff.cubin $(OBJ_DIR)/tersoff_cubin.h \
$(OBJ_DIR)/tersoff_zbl.cubin $(OBJ_DIR)/tersoff_zbl_cubin.h \
$(OBJ_DIR)/tersoff_mod.cubin $(OBJ_DIR)/tersoff_mod_cubin.h \
$(OBJ_DIR)/coul.cubin $(OBJ_DIR)/coul_cubin.h \
$(OBJ_DIR)/coul_debye.cubin $(OBJ_DIR)/coul_debye_cubin.h \
$(OBJ_DIR)/zbl.cubin $(OBJ_DIR)/zbl_cubin.h \
$(OBJ_DIR)/lj_cubic.cubin $(OBJ_DIR)/lj_cubic_cubin.h \
$(OBJ_DIR)/ufm.cubin $(OBJ_DIR)/ufm_cubin.h \
$(OBJ_DIR)/dipole_long_lj.cubin $(OBJ_DIR)/dipole_long_lj_cubin.h \
$(OBJ_DIR)/lj_expand_coul_long.cubin $(OBJ_DIR)/lj_expand_coul_long_cubin.h \
$(OBJ_DIR)/coul_long_cs.cubin $(OBJ_DIR)/coul_long_cs_cubin.h \
$(OBJ_DIR)/born_coul_long_cs.cubin $(OBJ_DIR)/born_coul_long_cs_cubin.h \
$(OBJ_DIR)/born_coul_wolf_cs.cubin $(OBJ_DIR)/born_coul_wolf_cs_cubin.h \
$(OBJ_DIR)/lj_tip4p_long.cubin $(OBJ_DIR)/lj_tip4p_long_cubin.h
# targets
all: $(OBJ_DIR) $(GPU_LIB) $(EXECS)
GPU_LIB = $(LIB_DIR)/libgpu.a
EXECS = $(BIN_DIR)/nvc_get_devices
all: $(OBJ_DIR) $(CUHS) $(GPU_LIB) $(EXECS)
$(OBJ_DIR):
mkdir -p $@
# Compilers and linkers
CUDA = $(NVCC) $(CUDA_INCLUDE) $(CUDA_OPTS) -Icudpp_mini $(CUDA_ARCH) \
$(CUDA_PRECISION)
CUDR = $(CUDR_CPP) $(CUDR_OPTS) $(CUDA_PRECISION) $(CUDA_INCLUDE) \
$(CUDPP_OPT)
CUDA_LINK = $(CUDA_LIB) -lcudart
BIN2C = $(CUDA_HOME)/bin/bin2c
# device code compilation
$(OBJ_DIR)/pppm_f.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -Dgrdtyp=float -Dgrdtyp4=float4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_f_cubin.h: $(OBJ_DIR)/pppm_f.cubin
$(BIN2C) -c -n pppm_f $(OBJ_DIR)/pppm_f.cubin > $(OBJ_DIR)/pppm_f_cubin.h
$(OBJ_DIR)/pppm_d.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -Dgrdtyp=double -Dgrdtyp4=double4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_d_cubin.h: $(OBJ_DIR)/pppm_d.cubin
$(BIN2C) -c -n pppm_d $(OBJ_DIR)/pppm_d.cubin > $(OBJ_DIR)/pppm_d_cubin.h
$(OBJ_DIR)/%_cubin.h: lal_%.cu $(ALL_H)
$(CUDA) --fatbin -DNV_KERNEL -o $(OBJ_DIR)/$*.cubin $(OBJ_DIR)/lal_$*.cu
$(BIN2C) -c -n $* $(OBJ_DIR)/$*.cubin > $@
@rm $(OBJ_DIR)/$*.cubin
# host code compilation
$(OBJ_DIR)/lal_%.o: lal_%.cpp $(CUHS) $(ALL_H)
$(CUDR) -o $@ -c $< -I$(OBJ_DIR)
#ifdef CUDPP_OPT
$(OBJ_DIR)/cudpp.o: cudpp_mini/cudpp.cpp
$(CUDR) -o $@ -c cudpp_mini/cudpp.cpp -Icudpp_mini
@ -169,745 +79,24 @@ $(OBJ_DIR)/radixsort_app.cu_o: cudpp_mini/radixsort_app.cu
$(OBJ_DIR)/scan_app.cu_o: cudpp_mini/scan_app.cu
$(CUDA) -o $@ -c cudpp_mini/scan_app.cu
#endif
$(OBJ_DIR)/atom.cubin: lal_atom.cu lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_atom.cu
$(OBJ_DIR)/atom_cubin.h: $(OBJ_DIR)/atom.cubin
$(BIN2C) -c -n atom $(OBJ_DIR)/atom.cubin > $(OBJ_DIR)/atom_cubin.h
$(OBJ_DIR)/lal_atom.o: lal_atom.cpp lal_atom.h $(NVD_H) $(OBJ_DIR)/atom_cubin.h
$(CUDR) -o $@ -c lal_atom.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_ans.o: lal_answer.cpp lal_answer.h $(NVD_H)
$(CUDR) -o $@ -c lal_answer.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/neighbor_cpu.cubin: lal_neighbor_cpu.cu lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_neighbor_cpu.cu
$(OBJ_DIR)/neighbor_cpu_cubin.h: $(OBJ_DIR)/neighbor_cpu.cubin
$(BIN2C) -c -n neighbor_cpu $(OBJ_DIR)/neighbor_cpu.cubin > $(OBJ_DIR)/neighbor_cpu_cubin.h
$(OBJ_DIR)/neighbor_gpu.cubin: lal_neighbor_gpu.cu lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_neighbor_gpu.cu
$(OBJ_DIR)/neighbor_gpu_cubin.h: $(OBJ_DIR)/neighbor_gpu.cubin
$(BIN2C) -c -n neighbor_gpu $(OBJ_DIR)/neighbor_gpu.cubin > $(OBJ_DIR)/neighbor_gpu_cubin.h
$(OBJ_DIR)/lal_neighbor_shared.o: lal_neighbor_shared.cpp lal_neighbor_shared.h $(OBJ_DIR)/neighbor_cpu_cubin.h $(OBJ_DIR)/neighbor_gpu_cubin.h $(NVD_H)
$(CUDR) -o $@ -c lal_neighbor_shared.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_neighbor.o: lal_neighbor.cpp lal_neighbor.h lal_neighbor_shared.h $(NVD_H)
$(CUDR) -o $@ -c lal_neighbor.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/device.cubin: lal_device.cu lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_device.cu
$(OBJ_DIR)/device_cubin.h: $(OBJ_DIR)/device.cubin
$(BIN2C) -c -n device $(OBJ_DIR)/device.cubin > $(OBJ_DIR)/device_cubin.h
$(OBJ_DIR)/lal_device.o: lal_device.cpp lal_device.h $(ALL_H) $(OBJ_DIR)/device_cubin.h
$(CUDR) -o $@ -c lal_device.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_base_atomic.o: $(ALL_H) lal_base_atomic.h lal_base_atomic.cpp
$(CUDR) -o $@ -c lal_base_atomic.cpp
$(OBJ_DIR)/lal_base_charge.o: $(ALL_H) lal_base_charge.h lal_base_charge.cpp
$(CUDR) -o $@ -c lal_base_charge.cpp
$(OBJ_DIR)/lal_base_ellipsoid.o: $(ALL_H) lal_base_ellipsoid.h lal_base_ellipsoid.cpp $(OBJ_DIR)/ellipsoid_nbor_cubin.h
$(CUDR) -o $@ -c lal_base_ellipsoid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_base_dipole.o: $(ALL_H) lal_base_dipole.h lal_base_dipole.cpp
$(CUDR) -o $@ -c lal_base_dipole.cpp
$(OBJ_DIR)/lal_base_three.o: $(ALL_H) lal_base_three.h lal_base_three.cpp
$(CUDR) -o $@ -c lal_base_three.cpp
$(OBJ_DIR)/lal_base_dpd.o: $(ALL_H) lal_base_dpd.h lal_base_dpd.cpp
$(CUDR) -o $@ -c lal_base_dpd.cpp
$(OBJ_DIR)/pppm_f.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -Dgrdtyp=float -Dgrdtyp4=float4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_f_cubin.h: $(OBJ_DIR)/pppm_f.cubin
$(BIN2C) -c -n pppm_f $(OBJ_DIR)/pppm_f.cubin > $(OBJ_DIR)/pppm_f_cubin.h
$(OBJ_DIR)/pppm_d.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -Dgrdtyp=double -Dgrdtyp4=double4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_d_cubin.h: $(OBJ_DIR)/pppm_d.cubin
$(BIN2C) -c -n pppm_d $(OBJ_DIR)/pppm_d.cubin > $(OBJ_DIR)/pppm_d_cubin.h
$(OBJ_DIR)/lal_pppm.o: $(ALL_H) lal_pppm.h lal_pppm.cpp $(OBJ_DIR)/pppm_f_cubin.h $(OBJ_DIR)/pppm_d_cubin.h
$(CUDR) -o $@ -c lal_pppm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_pppm_ext.o: $(ALL_H) lal_pppm.h lal_pppm_ext.cpp
$(CUDR) -o $@ -c lal_pppm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/ellipsoid_nbor.cubin: lal_ellipsoid_nbor.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_ellipsoid_nbor.cu
$(OBJ_DIR)/ellipsoid_nbor_cubin.h: $(OBJ_DIR)/ellipsoid_nbor.cubin
$(BIN2C) -c -n ellipsoid_nbor $(OBJ_DIR)/ellipsoid_nbor.cubin > $(OBJ_DIR)/ellipsoid_nbor_cubin.h
$(OBJ_DIR)/gayberne.cubin: lal_gayberne.cu lal_precision.h lal_ellipsoid_extra.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_gayberne.cu
$(OBJ_DIR)/gayberne_lj.cubin: lal_gayberne_lj.cu lal_precision.h lal_ellipsoid_extra.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_gayberne_lj.cu
$(OBJ_DIR)/gayberne_cubin.h: $(OBJ_DIR)/gayberne.cubin
$(BIN2C) -c -n gayberne $(OBJ_DIR)/gayberne.cubin > $(OBJ_DIR)/gayberne_cubin.h
$(OBJ_DIR)/gayberne_lj_cubin.h: $(OBJ_DIR)/gayberne_lj.cubin
$(BIN2C) -c -n gayberne_lj $(OBJ_DIR)/gayberne_lj.cubin > $(OBJ_DIR)/gayberne_lj_cubin.h
$(OBJ_DIR)/lal_gayberne.o: $(ALL_H) lal_gayberne.h lal_gayberne.cpp $(OBJ_DIR)/gayberne_cubin.h $(OBJ_DIR)/gayberne_lj_cubin.h $(OBJ_DIR)/lal_base_ellipsoid.o
$(CUDR) -o $@ -c lal_gayberne.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_gayberne_ext.o: $(ALL_H) $(OBJ_DIR)/lal_gayberne.o lal_gayberne_ext.cpp
$(CUDR) -o $@ -c lal_gayberne_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/re_squared.cubin: lal_re_squared.cu lal_precision.h lal_ellipsoid_extra.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_re_squared.cu
$(OBJ_DIR)/re_squared_lj.cubin: lal_re_squared_lj.cu lal_precision.h lal_ellipsoid_extra.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_re_squared_lj.cu
$(OBJ_DIR)/re_squared_cubin.h: $(OBJ_DIR)/re_squared.cubin
$(BIN2C) -c -n re_squared $(OBJ_DIR)/re_squared.cubin > $(OBJ_DIR)/re_squared_cubin.h
$(OBJ_DIR)/re_squared_lj_cubin.h: $(OBJ_DIR)/re_squared_lj.cubin
$(BIN2C) -c -n re_squared_lj $(OBJ_DIR)/re_squared_lj.cubin > $(OBJ_DIR)/re_squared_lj_cubin.h
$(OBJ_DIR)/lal_re_squared.o: $(ALL_H) lal_re_squared.h lal_re_squared.cpp $(OBJ_DIR)/re_squared_cubin.h $(OBJ_DIR)/re_squared_lj_cubin.h $(OBJ_DIR)/lal_base_ellipsoid.o
$(CUDR) -o $@ -c lal_re_squared.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_re_squared_ext.o: $(ALL_H) $(OBJ_DIR)/lal_re_squared.o lal_re_squared_ext.cpp
$(CUDR) -o $@ -c lal_re_squared_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj.cubin: lal_lj.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj.cu
$(OBJ_DIR)/lj_cubin.h: $(OBJ_DIR)/lj.cubin $(OBJ_DIR)/lj.cubin
$(BIN2C) -c -n lj $(OBJ_DIR)/lj.cubin > $(OBJ_DIR)/lj_cubin.h
$(OBJ_DIR)/lal_lj.o: $(ALL_H) lal_lj.h lal_lj.cpp $(OBJ_DIR)/lj_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_ext.o: $(ALL_H) lal_lj.h lal_lj_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_tip4p_long.cubin: lal_lj_tip4p_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_tip4p_long.cu
$(OBJ_DIR)/lj_tip4p_long_cubin.h: $(OBJ_DIR)/lj_tip4p_long.cubin $(OBJ_DIR)/lj_tip4p_long.cubin
$(BIN2C) -c -n lj_tip4p_long $(OBJ_DIR)/lj_tip4p_long.cubin > $(OBJ_DIR)/lj_tip4p_long_cubin.h
$(OBJ_DIR)/lal_lj_tip4p_long.o: $(ALL_H) lal_lj_tip4p_long.h lal_lj_tip4p_long.cpp $(OBJ_DIR)/lj_tip4p_long_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_tip4p_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_tip4p_long_ext.o: $(ALL_H) lal_lj_tip4p_long.h lal_lj_tip4p_long_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_tip4p_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul.cubin: lal_lj_coul.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_coul.cu
$(OBJ_DIR)/lj_coul_cubin.h: $(OBJ_DIR)/lj_coul.cubin $(OBJ_DIR)/lj_coul.cubin
$(BIN2C) -c -n lj_coul $(OBJ_DIR)/lj_coul.cubin > $(OBJ_DIR)/lj_coul_cubin.h
$(OBJ_DIR)/lal_lj_coul.o: $(ALL_H) lal_lj_coul.h lal_lj_coul.cpp $(OBJ_DIR)/lj_coul_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_ext.o: $(ALL_H) lal_lj_coul.h lal_lj_coul_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_class2_long.cubin: lal_lj_class2_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_class2_long.cu
$(OBJ_DIR)/lj_class2_long_cubin.h: $(OBJ_DIR)/lj_class2_long.cubin $(OBJ_DIR)/lj_class2_long.cubin
$(BIN2C) -c -n lj_class2_long $(OBJ_DIR)/lj_class2_long.cubin > $(OBJ_DIR)/lj_class2_long_cubin.h
$(OBJ_DIR)/lal_lj_class2_long.o: $(ALL_H) lal_lj_class2_long.h lal_lj_class2_long.cpp $(OBJ_DIR)/lj_class2_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_class2_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_class2_long_ext.o: $(ALL_H) lal_lj_class2_long.h lal_lj_class2_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_class2_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_long.cubin: lal_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_coul_long.cu
$(OBJ_DIR)/coul_long_cubin.h: $(OBJ_DIR)/coul_long.cubin $(OBJ_DIR)/coul_long.cubin
$(BIN2C) -c -n coul_long $(OBJ_DIR)/coul_long.cubin > $(OBJ_DIR)/coul_long_cubin.h
$(OBJ_DIR)/lal_coul_long.o: $(ALL_H) lal_coul_long.h lal_coul_long.cpp $(OBJ_DIR)/coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_long_ext.o: $(ALL_H) lal_coul_long.h lal_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_long.cubin: lal_lj_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_coul_long.cu
$(OBJ_DIR)/lj_coul_long_cubin.h: $(OBJ_DIR)/lj_coul_long.cubin $(OBJ_DIR)/lj_coul_long.cubin
$(BIN2C) -c -n lj_coul_long $(OBJ_DIR)/lj_coul_long.cubin > $(OBJ_DIR)/lj_coul_long_cubin.h
$(OBJ_DIR)/lal_lj_coul_long.o: $(ALL_H) lal_lj_coul_long.h lal_lj_coul_long.cpp $(OBJ_DIR)/lj_coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_long_ext.o: $(ALL_H) lal_lj_coul_long.h lal_lj_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_dsf.cubin: lal_lj_dsf.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_dsf.cu
$(OBJ_DIR)/lj_dsf_cubin.h: $(OBJ_DIR)/lj_dsf.cubin $(OBJ_DIR)/lj_dsf.cubin
$(BIN2C) -c -n lj_dsf $(OBJ_DIR)/lj_dsf.cubin > $(OBJ_DIR)/lj_dsf_cubin.h
$(OBJ_DIR)/lal_lj_dsf.o: $(ALL_H) lal_lj_dsf.h lal_lj_dsf.cpp $(OBJ_DIR)/lj_dsf_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_dsf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_dsf_ext.o: $(ALL_H) lal_lj_dsf.h lal_lj_dsf_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_dsf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/morse.cubin: lal_morse.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_morse.cu
$(OBJ_DIR)/morse_cubin.h: $(OBJ_DIR)/morse.cubin $(OBJ_DIR)/morse.cubin
$(BIN2C) -c -n morse $(OBJ_DIR)/morse.cubin > $(OBJ_DIR)/morse_cubin.h
$(OBJ_DIR)/lal_morse.o: $(ALL_H) lal_morse.h lal_morse.cpp $(OBJ_DIR)/morse_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_morse.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_morse_ext.o: $(ALL_H) lal_morse.h lal_morse_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_morse_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/charmm_long.cubin: lal_charmm_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_charmm_long.cu
$(OBJ_DIR)/charmm_long_cubin.h: $(OBJ_DIR)/charmm_long.cubin $(OBJ_DIR)/charmm_long.cubin
$(BIN2C) -c -n charmm_long $(OBJ_DIR)/charmm_long.cubin > $(OBJ_DIR)/charmm_long_cubin.h
$(OBJ_DIR)/lal_charmm_long.o: $(ALL_H) lal_charmm_long.h lal_charmm_long.cpp $(OBJ_DIR)/charmm_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_charmm_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_charmm_long_ext.o: $(ALL_H) lal_charmm_long.h lal_charmm_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_charmm_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj96.cubin: lal_lj96.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj96.cu
$(OBJ_DIR)/lj96_cubin.h: $(OBJ_DIR)/lj96.cubin $(OBJ_DIR)/lj96.cubin
$(BIN2C) -c -n lj96 $(OBJ_DIR)/lj96.cubin > $(OBJ_DIR)/lj96_cubin.h
$(OBJ_DIR)/lal_lj96.o: $(ALL_H) lal_lj96.h lal_lj96.cpp $(OBJ_DIR)/lj96_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj96.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj96_ext.o: $(ALL_H) lal_lj96.h lal_lj96_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj96_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_expand.cubin: lal_lj_expand.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_expand.cu
$(OBJ_DIR)/lj_expand_cubin.h: $(OBJ_DIR)/lj_expand.cubin $(OBJ_DIR)/lj_expand.cubin
$(BIN2C) -c -n lj_expand $(OBJ_DIR)/lj_expand.cubin > $(OBJ_DIR)/lj_expand_cubin.h
$(OBJ_DIR)/lal_lj_expand.o: $(ALL_H) lal_lj_expand.h lal_lj_expand.cpp $(OBJ_DIR)/lj_expand_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_expand.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_expand_ext.o: $(ALL_H) lal_lj_expand.h lal_lj_expand_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_expand_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_sdk.cubin: lal_lj_sdk.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_sdk.cu
$(OBJ_DIR)/lj_sdk_cubin.h: $(OBJ_DIR)/lj_sdk.cubin $(OBJ_DIR)/lj_sdk.cubin
$(BIN2C) -c -n lj_sdk $(OBJ_DIR)/lj_sdk.cubin > $(OBJ_DIR)/lj_sdk_cubin.h
$(OBJ_DIR)/lal_lj_sdk.o: $(ALL_H) lal_lj_sdk.h lal_lj_sdk.cpp $(OBJ_DIR)/lj_sdk_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_sdk.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_sdk_ext.o: $(ALL_H) lal_lj_sdk.h lal_lj_sdk_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_sdk_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_sdk_long.cubin: lal_lj_sdk_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_sdk_long.cu
$(OBJ_DIR)/lj_sdk_long_cubin.h: $(OBJ_DIR)/lj_sdk_long.cubin $(OBJ_DIR)/lj_sdk_long.cubin
$(BIN2C) -c -n lj_sdk_long $(OBJ_DIR)/lj_sdk_long.cubin > $(OBJ_DIR)/lj_sdk_long_cubin.h
$(OBJ_DIR)/lal_lj_sdk_long.o: $(ALL_H) lal_lj_sdk_long.h lal_lj_sdk_long.cpp $(OBJ_DIR)/lj_sdk_long_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_sdk_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_sdk_long_ext.o: $(ALL_H) lal_lj_sdk_long.h lal_lj_sdk_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_sdk_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/eam.cubin: lal_eam.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_eam.cu
$(OBJ_DIR)/eam_cubin.h: $(OBJ_DIR)/eam.cubin $(OBJ_DIR)/eam.cubin
$(BIN2C) -c -n eam $(OBJ_DIR)/eam.cubin > $(OBJ_DIR)/eam_cubin.h
$(OBJ_DIR)/lal_eam.o: $(ALL_H) lal_eam.h lal_eam.cpp $(OBJ_DIR)/eam_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_eam.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_ext.o: $(ALL_H) lal_eam.h lal_eam_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_eam_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_fs_ext.o: $(ALL_H) lal_eam.h lal_eam_fs_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_eam_fs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_alloy_ext.o: $(ALL_H) lal_eam.h lal_eam_alloy_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_eam_alloy_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck.cubin: lal_buck.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_buck.cu
$(OBJ_DIR)/buck_cubin.h: $(OBJ_DIR)/buck.cubin $(OBJ_DIR)/buck.cubin
$(BIN2C) -c -n buck $(OBJ_DIR)/buck.cubin > $(OBJ_DIR)/buck_cubin.h
$(OBJ_DIR)/lal_buck.o: $(ALL_H) lal_buck.h lal_buck.cpp $(OBJ_DIR)/buck_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_buck.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_ext.o: $(ALL_H) lal_buck.h lal_buck_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_buck_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck_coul.cubin: lal_buck_coul.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_buck_coul.cu
$(OBJ_DIR)/buck_coul_cubin.h: $(OBJ_DIR)/buck_coul.cubin $(OBJ_DIR)/buck_coul.cubin
$(BIN2C) -c -n buck_coul $(OBJ_DIR)/buck_coul.cubin > $(OBJ_DIR)/buck_coul_cubin.h
$(OBJ_DIR)/lal_buck_coul.o: $(ALL_H) lal_buck_coul.h lal_buck_coul.cpp $(OBJ_DIR)/buck_coul_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_buck_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_coul_ext.o: $(ALL_H) lal_buck_coul.h lal_buck_coul_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_buck_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck_coul_long.cubin: lal_buck_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_buck_coul_long.cu
$(OBJ_DIR)/buck_coul_long_cubin.h: $(OBJ_DIR)/buck_coul_long.cubin $(OBJ_DIR)/buck_coul_long.cubin
$(BIN2C) -c -n buck_coul_long $(OBJ_DIR)/buck_coul_long.cubin > $(OBJ_DIR)/buck_coul_long_cubin.h
$(OBJ_DIR)/lal_buck_coul_long.o: $(ALL_H) lal_buck_coul_long.h lal_buck_coul_long.cpp $(OBJ_DIR)/buck_coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_buck_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_coul_long_ext.o: $(ALL_H) lal_buck_coul_long.h lal_buck_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_buck_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/table.cubin: lal_table.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_table.cu
$(OBJ_DIR)/table_cubin.h: $(OBJ_DIR)/table.cubin $(OBJ_DIR)/table.cubin
$(BIN2C) -c -n table $(OBJ_DIR)/table.cubin > $(OBJ_DIR)/table_cubin.h
$(OBJ_DIR)/lal_table.o: $(ALL_H) lal_table.h lal_table.cpp $(OBJ_DIR)/table_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_table.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_table_ext.o: $(ALL_H) lal_table.h lal_table_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_table_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/yukawa.cubin: lal_yukawa.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_yukawa.cu
$(OBJ_DIR)/yukawa_cubin.h: $(OBJ_DIR)/yukawa.cubin $(OBJ_DIR)/yukawa.cubin
$(BIN2C) -c -n yukawa $(OBJ_DIR)/yukawa.cubin > $(OBJ_DIR)/yukawa_cubin.h
$(OBJ_DIR)/lal_yukawa.o: $(ALL_H) lal_yukawa.h lal_yukawa.cpp $(OBJ_DIR)/yukawa_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_yukawa.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_yukawa_ext.o: $(ALL_H) lal_yukawa.h lal_yukawa_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_yukawa_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born.cubin: lal_born.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_born.cu
$(OBJ_DIR)/born_cubin.h: $(OBJ_DIR)/born.cubin $(OBJ_DIR)/born.cubin
$(BIN2C) -c -n born $(OBJ_DIR)/born.cubin > $(OBJ_DIR)/born_cubin.h
$(OBJ_DIR)/lal_born.o: $(ALL_H) lal_born.h lal_born.cpp $(OBJ_DIR)/born_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_born.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_ext.o: $(ALL_H) lal_born.h lal_born_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_born_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_wolf.cubin: lal_born_coul_wolf.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_born_coul_wolf.cu
$(OBJ_DIR)/born_coul_wolf_cubin.h: $(OBJ_DIR)/born_coul_wolf.cubin $(OBJ_DIR)/born_coul_wolf.cubin
$(BIN2C) -c -n born_coul_wolf $(OBJ_DIR)/born_coul_wolf.cubin > $(OBJ_DIR)/born_coul_wolf_cubin.h
$(OBJ_DIR)/lal_born_coul_wolf.o: $(ALL_H) lal_born_coul_wolf.h lal_born_coul_wolf.cpp $(OBJ_DIR)/born_coul_wolf_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_born_coul_wolf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_wolf_ext.o: $(ALL_H) lal_born_coul_wolf.h lal_born_coul_wolf_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_born_coul_wolf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_long.cubin: lal_born_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_born_coul_long.cu
$(OBJ_DIR)/born_coul_long_cubin.h: $(OBJ_DIR)/born_coul_long.cubin $(OBJ_DIR)/born_coul_long.cubin
$(BIN2C) -c -n born_coul_long $(OBJ_DIR)/born_coul_long.cubin > $(OBJ_DIR)/born_coul_long_cubin.h
$(OBJ_DIR)/lal_born_coul_long.o: $(ALL_H) lal_born_coul_long.h lal_born_coul_long.cpp $(OBJ_DIR)/born_coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_born_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_long_ext.o: $(ALL_H) lal_born_coul_long.h lal_born_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_born_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_lj.cubin: lal_dipole_lj.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_dipole_lj.cu
$(OBJ_DIR)/dipole_lj_cubin.h: $(OBJ_DIR)/dipole_lj.cubin $(OBJ_DIR)/dipole_lj.cubin
$(BIN2C) -c -n dipole_lj $(OBJ_DIR)/dipole_lj.cubin > $(OBJ_DIR)/dipole_lj_cubin.h
$(OBJ_DIR)/lal_dipole_lj.o: $(ALL_H) lal_dipole_lj.h lal_dipole_lj.cpp $(OBJ_DIR)/dipole_lj_cubin.h $(OBJ_DIR)/lal_base_dipole.o
$(CUDR) -o $@ -c lal_dipole_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_lj_ext.o: $(ALL_H) lal_dipole_lj.h lal_dipole_lj_ext.cpp lal_base_dipole.h
$(CUDR) -o $@ -c lal_dipole_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_lj_sf.cubin: lal_dipole_lj_sf.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_dipole_lj_sf.cu
$(OBJ_DIR)/dipole_lj_sf_cubin.h: $(OBJ_DIR)/dipole_lj_sf.cubin $(OBJ_DIR)/dipole_lj_sf.cubin
$(BIN2C) -c -n dipole_lj_sf $(OBJ_DIR)/dipole_lj_sf.cubin > $(OBJ_DIR)/dipole_lj_sf_cubin.h
$(OBJ_DIR)/lal_dipole_lj_sf.o: $(ALL_H) lal_dipole_lj_sf.h lal_dipole_lj_sf.cpp $(OBJ_DIR)/dipole_lj_sf_cubin.h $(OBJ_DIR)/lal_base_dipole.o
$(CUDR) -o $@ -c lal_dipole_lj_sf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_lj_sf_ext.o: $(ALL_H) lal_dipole_lj_sf.h lal_dipole_lj_sf_ext.cpp lal_base_dipole.h
$(CUDR) -o $@ -c lal_dipole_lj_sf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/colloid.cubin: lal_colloid.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_colloid.cu
$(OBJ_DIR)/colloid_cubin.h: $(OBJ_DIR)/colloid.cubin $(OBJ_DIR)/colloid.cubin
$(BIN2C) -c -n colloid $(OBJ_DIR)/colloid.cubin > $(OBJ_DIR)/colloid_cubin.h
$(OBJ_DIR)/lal_colloid.o: $(ALL_H) lal_colloid.h lal_colloid.cpp $(OBJ_DIR)/colloid_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_colloid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_colloid_ext.o: $(ALL_H) lal_colloid.h lal_colloid_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_colloid_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/gauss.cubin: lal_gauss.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_gauss.cu
$(OBJ_DIR)/gauss_cubin.h: $(OBJ_DIR)/gauss.cubin $(OBJ_DIR)/gauss.cubin
$(BIN2C) -c -n gauss $(OBJ_DIR)/gauss.cubin > $(OBJ_DIR)/gauss_cubin.h
$(OBJ_DIR)/lal_gauss.o: $(ALL_H) lal_gauss.h lal_gauss.cpp $(OBJ_DIR)/gauss_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_gauss.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_gauss_ext.o: $(ALL_H) lal_gauss.h lal_gauss_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_gauss_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/yukawa_colloid.cubin: lal_yukawa_colloid.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_yukawa_colloid.cu
$(OBJ_DIR)/yukawa_colloid_cubin.h: $(OBJ_DIR)/yukawa_colloid.cubin $(OBJ_DIR)/yukawa_colloid.cubin
$(BIN2C) -c -n yukawa_colloid $(OBJ_DIR)/yukawa_colloid.cubin > $(OBJ_DIR)/yukawa_colloid_cubin.h
$(OBJ_DIR)/lal_yukawa_colloid.o: $(ALL_H) lal_yukawa_colloid.h lal_yukawa_colloid.cpp $(OBJ_DIR)/yukawa_colloid_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_yukawa_colloid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_yukawa_colloid_ext.o: $(ALL_H) lal_yukawa_colloid.h lal_yukawa_colloid_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_yukawa_colloid_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_debye.cubin: lal_lj_coul_debye.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_coul_debye.cu
$(OBJ_DIR)/lj_coul_debye_cubin.h: $(OBJ_DIR)/lj_coul_debye.cubin $(OBJ_DIR)/lj_coul_debye.cubin
$(BIN2C) -c -n lj_coul_debye $(OBJ_DIR)/lj_coul_debye.cubin > $(OBJ_DIR)/lj_coul_debye_cubin.h
$(OBJ_DIR)/lal_lj_coul_debye.o: $(ALL_H) lal_lj_coul_debye.h lal_lj_coul_debye.cpp $(OBJ_DIR)/lj_coul_debye_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_coul_debye.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_debye_ext.o: $(ALL_H) lal_lj_coul_debye.h lal_lj_coul_debye_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_coul_debye_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_dsf.cubin: lal_coul_dsf.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_coul_dsf.cu
$(OBJ_DIR)/coul_dsf_cubin.h: $(OBJ_DIR)/coul_dsf.cubin $(OBJ_DIR)/coul_dsf.cubin
$(BIN2C) -c -n coul_dsf $(OBJ_DIR)/coul_dsf.cubin > $(OBJ_DIR)/coul_dsf_cubin.h
$(OBJ_DIR)/lal_coul_dsf.o: $(ALL_H) lal_coul_dsf.h lal_coul_dsf.cpp $(OBJ_DIR)/coul_dsf_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_coul_dsf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_dsf_ext.o: $(ALL_H) lal_coul_dsf.h lal_coul_dsf_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_coul_dsf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/vashishta.cubin: lal_vashishta.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_vashishta.cu
$(OBJ_DIR)/vashishta_cubin.h: $(OBJ_DIR)/vashishta.cubin $(OBJ_DIR)/vashishta.cubin
$(BIN2C) -c -n vashishta $(OBJ_DIR)/vashishta.cubin > $(OBJ_DIR)/vashishta_cubin.h
$(OBJ_DIR)/lal_vashishta.o: $(ALL_H) lal_vashishta.h lal_vashishta.cpp $(OBJ_DIR)/vashishta_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_vashishta.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_vashishta_ext.o: $(ALL_H) lal_vashishta.h lal_vashishta_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_vashishta_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/sw.cubin: lal_sw.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_sw.cu
$(OBJ_DIR)/sw_cubin.h: $(OBJ_DIR)/sw.cubin $(OBJ_DIR)/sw.cubin
$(BIN2C) -c -n sw $(OBJ_DIR)/sw.cubin > $(OBJ_DIR)/sw_cubin.h
$(OBJ_DIR)/lal_sw.o: $(ALL_H) lal_sw.h lal_sw.cpp $(OBJ_DIR)/sw_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_sw.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_sw_ext.o: $(ALL_H) lal_sw.h lal_sw_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_sw_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/beck.cubin: lal_beck.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_beck.cu
$(OBJ_DIR)/beck_cubin.h: $(OBJ_DIR)/beck.cubin $(OBJ_DIR)/beck.cubin
$(BIN2C) -c -n beck $(OBJ_DIR)/beck.cubin > $(OBJ_DIR)/beck_cubin.h
$(OBJ_DIR)/lal_beck.o: $(ALL_H) lal_beck.h lal_beck.cpp $(OBJ_DIR)/beck_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_beck.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_beck_ext.o: $(ALL_H) lal_beck.h lal_beck_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_beck_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/mie.cubin: lal_mie.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_mie.cu
$(OBJ_DIR)/mie_cubin.h: $(OBJ_DIR)/mie.cubin $(OBJ_DIR)/mie.cubin
$(BIN2C) -c -n mie $(OBJ_DIR)/mie.cubin > $(OBJ_DIR)/mie_cubin.h
$(OBJ_DIR)/lal_mie.o: $(ALL_H) lal_mie.h lal_mie.cpp $(OBJ_DIR)/mie_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_mie.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_mie_ext.o: $(ALL_H) lal_mie.h lal_mie_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_mie_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/soft.cubin: lal_soft.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_soft.cu
$(OBJ_DIR)/soft_cubin.h: $(OBJ_DIR)/soft.cubin $(OBJ_DIR)/soft.cubin
$(BIN2C) -c -n soft $(OBJ_DIR)/soft.cubin > $(OBJ_DIR)/soft_cubin.h
$(OBJ_DIR)/lal_soft.o: $(ALL_H) lal_soft.h lal_soft.cpp $(OBJ_DIR)/soft_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_soft.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_soft_ext.o: $(ALL_H) lal_soft.h lal_soft_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_soft_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_msm.cubin: lal_lj_coul_msm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_coul_msm.cu
$(OBJ_DIR)/lj_coul_msm_cubin.h: $(OBJ_DIR)/lj_coul_msm.cubin $(OBJ_DIR)/lj_coul_msm.cubin
$(BIN2C) -c -n lj_coul_msm $(OBJ_DIR)/lj_coul_msm.cubin > $(OBJ_DIR)/lj_coul_msm_cubin.h
$(OBJ_DIR)/lal_lj_coul_msm.o: $(ALL_H) lal_lj_coul_msm.h lal_lj_coul_msm.cpp $(OBJ_DIR)/lj_coul_msm_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_coul_msm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_msm_ext.o: $(ALL_H) lal_lj_coul_msm.h lal_lj_coul_msm_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_coul_msm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_gromacs.cubin: lal_lj_gromacs.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_gromacs.cu
$(OBJ_DIR)/lj_gromacs_cubin.h: $(OBJ_DIR)/lj_gromacs.cubin $(OBJ_DIR)/lj_gromacs.cubin
$(BIN2C) -c -n lj_gromacs $(OBJ_DIR)/lj_gromacs.cubin > $(OBJ_DIR)/lj_gromacs_cubin.h
$(OBJ_DIR)/lal_lj_gromacs.o: $(ALL_H) lal_lj_gromacs.h lal_lj_gromacs.cpp $(OBJ_DIR)/lj_gromacs_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_gromacs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_gromacs_ext.o: $(ALL_H) lal_lj_gromacs.h lal_lj_gromacs_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_gromacs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dpd.cubin: lal_dpd.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_dpd.cu
$(OBJ_DIR)/dpd_cubin.h: $(OBJ_DIR)/dpd.cubin $(OBJ_DIR)/dpd.cubin
$(BIN2C) -c -n dpd $(OBJ_DIR)/dpd.cubin > $(OBJ_DIR)/dpd_cubin.h
$(OBJ_DIR)/lal_dpd.o: $(ALL_H) lal_dpd.h lal_dpd.cpp $(OBJ_DIR)/dpd_cubin.h $(OBJ_DIR)/lal_base_dpd.o
$(CUDR) -o $@ -c lal_dpd.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dpd_ext.o: $(ALL_H) lal_dpd.h lal_dpd_ext.cpp lal_base_dpd.h
$(CUDR) -o $@ -c lal_dpd_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dpd_tstat_ext.o: $(ALL_H) lal_dpd.h lal_dpd_tstat_ext.cpp lal_base_dpd.h
$(CUDR) -o $@ -c lal_dpd_tstat_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/ufm.cubin: lal_ufm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_ufm.cu
$(OBJ_DIR)/ufm_cubin.h: $(OBJ_DIR)/ufm.cubin $(OBJ_DIR)/ufm.cubin
$(BIN2C) -c -n ufm $(OBJ_DIR)/ufm.cubin > $(OBJ_DIR)/ufm_cubin.h
$(OBJ_DIR)/lal_ufm.o: $(ALL_H) lal_ufm.h lal_ufm.cpp $(OBJ_DIR)/ufm_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_ufm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_ufm_ext.o: $(ALL_H) lal_ufm.h lal_ufm_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_ufm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff.cubin: lal_tersoff.cu lal_precision.h lal_tersoff_extra.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_tersoff.cu
$(OBJ_DIR)/tersoff_cubin.h: $(OBJ_DIR)/tersoff.cubin $(OBJ_DIR)/tersoff.cubin
$(BIN2C) -c -n tersoff $(OBJ_DIR)/tersoff.cubin > $(OBJ_DIR)/tersoff_cubin.h
$(OBJ_DIR)/lal_tersoff.o: $(ALL_H) lal_tersoff.h lal_tersoff.cpp $(OBJ_DIR)/tersoff_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_tersoff.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_ext.o: $(ALL_H) lal_tersoff.h lal_tersoff_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_tersoff_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff_zbl.cubin: lal_tersoff_zbl.cu lal_precision.h lal_tersoff_zbl_extra.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_tersoff_zbl.cu
$(OBJ_DIR)/tersoff_zbl_cubin.h: $(OBJ_DIR)/tersoff_zbl.cubin $(OBJ_DIR)/tersoff_zbl.cubin
$(BIN2C) -c -n tersoff_zbl $(OBJ_DIR)/tersoff_zbl.cubin > $(OBJ_DIR)/tersoff_zbl_cubin.h
$(OBJ_DIR)/lal_tersoff_zbl.o: $(ALL_H) lal_tersoff_zbl.h lal_tersoff_zbl.cpp $(OBJ_DIR)/tersoff_zbl_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_tersoff_zbl.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_zbl_ext.o: $(ALL_H) lal_tersoff_zbl.h lal_tersoff_zbl_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_tersoff_zbl_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff_mod.cubin: lal_tersoff_mod.cu lal_precision.h lal_tersoff_mod_extra.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_tersoff_mod.cu
$(OBJ_DIR)/tersoff_mod_cubin.h: $(OBJ_DIR)/tersoff_mod.cubin $(OBJ_DIR)/tersoff_mod.cubin
$(BIN2C) -c -n tersoff_mod $(OBJ_DIR)/tersoff_mod.cubin > $(OBJ_DIR)/tersoff_mod_cubin.h
$(OBJ_DIR)/lal_tersoff_mod.o: $(ALL_H) lal_tersoff_mod.h lal_tersoff_mod.cpp $(OBJ_DIR)/tersoff_mod_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_tersoff_mod.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_mod_ext.o: $(ALL_H) lal_tersoff_mod.h lal_tersoff_mod_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_tersoff_mod_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul.cubin: lal_coul.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_coul.cu
$(OBJ_DIR)/coul_cubin.h: $(OBJ_DIR)/coul.cubin $(OBJ_DIR)/coul.cubin
$(BIN2C) -c -n coul $(OBJ_DIR)/coul.cubin > $(OBJ_DIR)/coul_cubin.h
$(OBJ_DIR)/lal_coul.o: $(ALL_H) lal_coul.h lal_coul.cpp $(OBJ_DIR)/coul_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_ext.o: $(ALL_H) lal_coul.h lal_coul_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_debye.cubin: lal_coul_debye.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_coul_debye.cu
$(OBJ_DIR)/coul_debye_cubin.h: $(OBJ_DIR)/coul_debye.cubin $(OBJ_DIR)/coul_debye.cubin
$(BIN2C) -c -n coul_debye $(OBJ_DIR)/coul_debye.cubin > $(OBJ_DIR)/coul_debye_cubin.h
$(OBJ_DIR)/lal_coul_debye.o: $(ALL_H) lal_coul_debye.h lal_coul_debye.cpp $(OBJ_DIR)/coul_debye_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_coul_debye.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_debye_ext.o: $(ALL_H) lal_coul_debye.h lal_coul_debye_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_coul_debye_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/zbl.cubin: lal_zbl.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_zbl.cu
$(OBJ_DIR)/zbl_cubin.h: $(OBJ_DIR)/zbl.cubin $(OBJ_DIR)/zbl.cubin
$(BIN2C) -c -n zbl $(OBJ_DIR)/zbl.cubin > $(OBJ_DIR)/zbl_cubin.h
$(OBJ_DIR)/lal_zbl.o: $(ALL_H) lal_zbl.h lal_zbl.cpp $(OBJ_DIR)/zbl_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_zbl.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_zbl_ext.o: $(ALL_H) lal_zbl.h lal_zbl_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_zbl_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_cubic.cubin: lal_lj_cubic.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_cubic.cu
$(OBJ_DIR)/lj_cubic_cubin.h: $(OBJ_DIR)/lj_cubic.cubin $(OBJ_DIR)/lj_cubic.cubin
$(BIN2C) -c -n lj_cubic $(OBJ_DIR)/lj_cubic.cubin > $(OBJ_DIR)/lj_cubic_cubin.h
$(OBJ_DIR)/lal_lj_cubic.o: $(ALL_H) lal_lj_cubic.h lal_lj_cubic.cpp $(OBJ_DIR)/lj_cubic_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_cubic.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_cubic_ext.o: $(ALL_H) lal_lj_cubic.h lal_lj_cubic_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_cubic_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_long_lj.cubin: lal_dipole_long_lj.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_dipole_long_lj.cu
$(OBJ_DIR)/dipole_long_lj_cubin.h: $(OBJ_DIR)/dipole_long_lj.cubin $(OBJ_DIR)/dipole_long_lj.cubin
$(BIN2C) -c -n dipole_long_lj $(OBJ_DIR)/dipole_long_lj.cubin > $(OBJ_DIR)/dipole_long_lj_cubin.h
$(OBJ_DIR)/lal_dipole_long_lj.o: $(ALL_H) lal_dipole_long_lj.h lal_dipole_long_lj.cpp $(OBJ_DIR)/dipole_long_lj_cubin.h $(OBJ_DIR)/lal_base_dipole.o
$(CUDR) -o $@ -c lal_dipole_long_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_long_lj_ext.o: $(ALL_H) lal_dipole_long_lj.h lal_dipole_long_lj_ext.cpp lal_base_dipole.h
$(CUDR) -o $@ -c lal_dipole_long_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_expand_coul_long.cubin: lal_lj_expand_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_lj_expand_coul_long.cu
$(OBJ_DIR)/lj_expand_coul_long_cubin.h: $(OBJ_DIR)/lj_expand_coul_long.cubin $(OBJ_DIR)/lj_expand_coul_long.cubin
$(BIN2C) -c -n lj_expand_coul_long $(OBJ_DIR)/lj_expand_coul_long.cubin > $(OBJ_DIR)/lj_expand_coul_long_cubin.h
$(OBJ_DIR)/lal_lj_expand_coul_long.o: $(ALL_H) lal_lj_expand_coul_long.h lal_lj_expand_coul_long.cpp $(OBJ_DIR)/lj_expand_coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_expand_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_expand_coul_long_ext.o: $(ALL_H) lal_lj_expand_coul_long.h lal_lj_expand_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_expand_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_long_cs.cubin: lal_coul_long_cs.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_coul_long_cs.cu
$(OBJ_DIR)/coul_long_cs_cubin.h: $(OBJ_DIR)/coul_long_cs.cubin $(OBJ_DIR)/coul_long_cs.cubin
$(BIN2C) -c -n coul_long_cs $(OBJ_DIR)/coul_long_cs.cubin > $(OBJ_DIR)/coul_long_cs_cubin.h
$(OBJ_DIR)/lal_coul_long_cs.o: $(ALL_H) lal_coul_long_cs.h lal_coul_long_cs.cpp $(OBJ_DIR)/coul_long_cs_cubin.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_coul_long.o
$(CUDR) -o $@ -c lal_coul_long_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_long_cs_ext.o: $(ALL_H) lal_coul_long_cs.h lal_coul_long_cs_ext.cpp lal_coul_long.h
$(CUDR) -o $@ -c lal_coul_long_cs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_long_cs.cubin: lal_born_coul_long_cs.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_born_coul_long_cs.cu
$(OBJ_DIR)/born_coul_long_cs_cubin.h: $(OBJ_DIR)/born_coul_long_cs.cubin $(OBJ_DIR)/born_coul_long_cs.cubin
$(BIN2C) -c -n born_coul_long_cs $(OBJ_DIR)/born_coul_long_cs.cubin > $(OBJ_DIR)/born_coul_long_cs_cubin.h
$(OBJ_DIR)/lal_born_coul_long_cs.o: $(ALL_H) lal_born_coul_long_cs.h lal_born_coul_long_cs.cpp $(OBJ_DIR)/born_coul_long_cs_cubin.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_born_coul_long.o
$(CUDR) -o $@ -c lal_born_coul_long_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_long_cs_ext.o: $(ALL_H) lal_born_coul_long_cs.h lal_born_coul_long_cs_ext.cpp lal_born_coul_long.h
$(CUDR) -o $@ -c lal_born_coul_long_cs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_wolf_cs.cubin: lal_born_coul_wolf_cs.cu lal_precision.h lal_preprocessor.h
$(CUDA) --cubin -DNV_KERNEL -o $@ lal_born_coul_wolf_cs.cu
$(OBJ_DIR)/born_coul_wolf_cs_cubin.h: $(OBJ_DIR)/born_coul_wolf_cs.cubin $(OBJ_DIR)/born_coul_wolf_cs.cubin
$(BIN2C) -c -n born_coul_wolf_cs $(OBJ_DIR)/born_coul_wolf_cs.cubin > $(OBJ_DIR)/born_coul_wolf_cs_cubin.h
$(OBJ_DIR)/lal_born_coul_wolf_cs.o: $(ALL_H) lal_born_coul_wolf_cs.h lal_born_coul_wolf_cs.cpp $(OBJ_DIR)/born_coul_wolf_cs_cubin.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_born_coul_wolf.o
$(CUDR) -o $@ -c lal_born_coul_wolf_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_wolf_cs_ext.o: $(ALL_H) lal_born_coul_wolf_cs.h lal_born_coul_wolf_cs_ext.cpp lal_born_coul_wolf.h
$(CUDR) -o $@ -c lal_born_coul_wolf_cs_ext.cpp -I$(OBJ_DIR)
$(BIN_DIR)/nvc_get_devices: ./geryon/ucl_get_devices.cpp $(NVD_H)
$(CUDR) -o $@ ./geryon/ucl_get_devices.cpp -DUCL_CUDADR $(CUDA_LIB) -lcuda
# build libgpu.a
$(GPU_LIB): $(OBJS) $(CUDPP)
$(AR) -crusv $(GPU_LIB) $(OBJS) $(CUDPP)
@cp $(EXTRAMAKE) Makefile.lammps
# test app for querying device info
$(BIN_DIR)/nvc_get_devices: ./geryon/ucl_get_devices.cpp $(NVD_H)
$(CUDR) -o $@ ./geryon/ucl_get_devices.cpp -DUCL_CUDADR $(CUDA_LIB) -lcuda
clean:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CUDPP) $(CBNS) *.linkinfo
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CUDPP) $(CUHS) *.linkinfo
veryclean: clean
-rm -rf *~ *.linkinfo
cleanlib:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CBNS) *.linkinfo
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CUHS) *.linkinfo

View File

@ -1,157 +1,67 @@
CUDA = $(NVCC) $(CUDA_INCLUDE) $(CUDA_OPTS) -Icudpp_mini $(CUDA_ARCH) \
$(CUDA_PRECISION)
CUDR = $(CUDR_CPP) $(CUDR_OPTS) $(CUDA_PRECISION) $(CUDA_INCLUDE) \
$(CUDPP_OPT)
CUDA_LINK = $(CUDA_LIB) -lcudart
BIN2C = $(CUDA_HOME)/bin/bin2c
GPU_LIB = $(LIB_DIR)/libgpu.a
# Headers for Geryon
UCL_H = $(wildcard ./geryon/ucl*.h)
NVC_H = $(wildcard ./geryon/nvc*.h) $(UCL_H)
NVD_H = $(wildcard ./geryon/nvd*.h) $(UCL_H) lal_preprocessor.h
# Headers for Pair Stuff
PAIR_H = lal_atom.h lal_answer.h lal_neighbor_shared.h \
lal_neighbor.h lal_precision.h lal_device.h \
lal_balance.h lal_pppm.h
ALL_H = $(NVD_H) $(wildcard ./lal_*.h)
ALL_H = $(NVD_H) $(PAIR_H)
# Source files
SRCS := $(wildcard ./lal_*.cpp)
OBJS := $(subst ./,$(OBJ_DIR)/,$(SRCS:%.cpp=%.o))
CUS := $(wildcard lal_*.cu)
CUHS := $(filter-out pppm_cubin.h, $(CUS:lal_%.cu=%_cubin.h)) pppm_f_cubin.h pppm_d_cubin.h
CUHS := $(addprefix $(OBJ_DIR)/, $(CUHS))
EXECS = $(BIN_DIR)/nvc_get_devices
ifdef CUDPP_OPT
CUDPP = $(OBJ_DIR)/cudpp.o $(OBJ_DIR)/cudpp_plan.o \
$(OBJ_DIR)/cudpp_maximal_launch.o $(OBJ_DIR)/cudpp_plan_manager.o \
$(OBJ_DIR)/radixsort_app.cu_o $(OBJ_DIR)/scan_app.cu_o
endif
OBJS = $(OBJ_DIR)/lal_atom.o $(OBJ_DIR)/lal_ans.o \
$(OBJ_DIR)/lal_neighbor.o $(OBJ_DIR)/lal_neighbor_shared.o \
$(OBJ_DIR)/lal_device.o $(OBJ_DIR)/lal_base_atomic.o \
$(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_base_ellipsoid.o \
$(OBJ_DIR)/lal_base_dipole.o $(OBJ_DIR)/lal_base_three.o \
$(OBJ_DIR)/lal_base_dpd.o \
$(OBJ_DIR)/lal_pppm.o $(OBJ_DIR)/lal_pppm_ext.o \
$(OBJ_DIR)/lal_gayberne.o $(OBJ_DIR)/lal_gayberne_ext.o \
$(OBJ_DIR)/lal_re_squared.o $(OBJ_DIR)/lal_re_squared_ext.o \
$(OBJ_DIR)/lal_lj.o $(OBJ_DIR)/lal_lj_ext.o \
$(OBJ_DIR)/lal_lj96.o $(OBJ_DIR)/lal_lj96_ext.o \
$(OBJ_DIR)/lal_lj_expand.o $(OBJ_DIR)/lal_lj_expand_ext.o \
$(OBJ_DIR)/lal_lj_coul.o $(OBJ_DIR)/lal_lj_coul_ext.o \
$(OBJ_DIR)/lal_lj_coul_long.o $(OBJ_DIR)/lal_lj_coul_long_ext.o \
$(OBJ_DIR)/lal_lj_dsf.o $(OBJ_DIR)/lal_lj_dsf_ext.o \
$(OBJ_DIR)/lal_lj_class2_long.o $(OBJ_DIR)/lal_lj_class2_long_ext.o \
$(OBJ_DIR)/lal_coul_long.o $(OBJ_DIR)/lal_coul_long_ext.o \
$(OBJ_DIR)/lal_morse.o $(OBJ_DIR)/lal_morse_ext.o \
$(OBJ_DIR)/lal_charmm_long.o $(OBJ_DIR)/lal_charmm_long_ext.o \
$(OBJ_DIR)/lal_lj_sdk.o $(OBJ_DIR)/lal_lj_sdk_ext.o \
$(OBJ_DIR)/lal_lj_sdk_long.o $(OBJ_DIR)/lal_lj_sdk_long_ext.o \
$(OBJ_DIR)/lal_eam.o $(OBJ_DIR)/lal_eam_ext.o \
$(OBJ_DIR)/lal_eam_fs_ext.o $(OBJ_DIR)/lal_eam_alloy_ext.o \
$(OBJ_DIR)/lal_buck.o $(OBJ_DIR)/lal_buck_ext.o \
$(OBJ_DIR)/lal_buck_coul.o $(OBJ_DIR)/lal_buck_coul_ext.o \
$(OBJ_DIR)/lal_buck_coul_long.o $(OBJ_DIR)/lal_buck_coul_long_ext.o \
$(OBJ_DIR)/lal_table.o $(OBJ_DIR)/lal_table_ext.o \
$(OBJ_DIR)/lal_yukawa.o $(OBJ_DIR)/lal_yukawa_ext.o \
$(OBJ_DIR)/lal_born.o $(OBJ_DIR)/lal_born_ext.o \
$(OBJ_DIR)/lal_born_coul_wolf.o $(OBJ_DIR)/lal_born_coul_wolf_ext.o \
$(OBJ_DIR)/lal_born_coul_long.o $(OBJ_DIR)/lal_born_coul_long_ext.o \
$(OBJ_DIR)/lal_dipole_lj.o $(OBJ_DIR)/lal_dipole_lj_ext.o \
$(OBJ_DIR)/lal_dipole_lj_sf.o $(OBJ_DIR)/lal_dipole_lj_sf_ext.o \
$(OBJ_DIR)/lal_colloid.o $(OBJ_DIR)/lal_colloid_ext.o \
$(OBJ_DIR)/lal_gauss.o $(OBJ_DIR)/lal_gauss_ext.o \
$(OBJ_DIR)/lal_yukawa_colloid.o $(OBJ_DIR)/lal_yukawa_colloid_ext.o \
$(OBJ_DIR)/lal_lj_coul_debye.o $(OBJ_DIR)/lal_lj_coul_debye_ext.o \
$(OBJ_DIR)/lal_coul_dsf.o $(OBJ_DIR)/lal_coul_dsf_ext.o \
$(OBJ_DIR)/lal_sw.o $(OBJ_DIR)/lal_sw_ext.o \
$(OBJ_DIR)/lal_vashishta.o $(OBJ_DIR)/lal_vashishta_ext.o \
$(OBJ_DIR)/lal_beck.o $(OBJ_DIR)/lal_beck_ext.o \
$(OBJ_DIR)/lal_mie.o $(OBJ_DIR)/lal_mie_ext.o \
$(OBJ_DIR)/lal_soft.o $(OBJ_DIR)/lal_soft_ext.o \
$(OBJ_DIR)/lal_lj_coul_msm.o $(OBJ_DIR)/lal_lj_coul_msm_ext.o \
$(OBJ_DIR)/lal_lj_gromacs.o $(OBJ_DIR)/lal_lj_gromacs_ext.o \
$(OBJ_DIR)/lal_dpd.o $(OBJ_DIR)/lal_dpd_ext.o $(OBJ_DIR)/lal_dpd_tstat_ext.o \
$(OBJ_DIR)/lal_tersoff.o $(OBJ_DIR)/lal_tersoff_ext.o \
$(OBJ_DIR)/lal_tersoff_zbl.o $(OBJ_DIR)/lal_tersoff_zbl_ext.o \
$(OBJ_DIR)/lal_tersoff_mod.o $(OBJ_DIR)/lal_tersoff_mod_ext.o \
$(OBJ_DIR)/lal_coul.o $(OBJ_DIR)/lal_coul_ext.o \
$(OBJ_DIR)/lal_coul_debye.o $(OBJ_DIR)/lal_coul_debye_ext.o \
$(OBJ_DIR)/lal_zbl.o $(OBJ_DIR)/lal_zbl_ext.o \
$(OBJ_DIR)/lal_lj_cubic.o $(OBJ_DIR)/lal_lj_cubic_ext.o \
$(OBJ_DIR)/lal_ufm.o $(OBJ_DIR)/lal_ufm_ext.o \
$(OBJ_DIR)/lal_dipole_long_lj.o $(OBJ_DIR)/lal_dipole_long_lj_ext.o \
$(OBJ_DIR)/lal_lj_expand_coul_long.o $(OBJ_DIR)/lal_lj_expand_coul_long_ext.o \
$(OBJ_DIR)/lal_coul_long_cs.o $(OBJ_DIR)/lal_coul_long_cs_ext.o \
$(OBJ_DIR)/lal_born_coul_long_cs.o $(OBJ_DIR)/lal_born_coul_long_cs_ext.o \
$(OBJ_DIR)/lal_born_coul_wolf_cs.o $(OBJ_DIR)/lal_born_coul_wolf_cs_ext.o \
$(OBJ_DIR)/lal_lj_tip4p_long.o $(OBJ_DIR)/lal_lj_tip4p_long_ext.o
CBNS = $(OBJ_DIR)/device.cubin $(OBJ_DIR)/device_cubin.h \
$(OBJ_DIR)/atom.cubin $(OBJ_DIR)/atom_cubin.h \
$(OBJ_DIR)/neighbor_cpu.cubin $(OBJ_DIR)/neighbor_cpu_cubin.h \
$(OBJ_DIR)/neighbor_gpu.cubin $(OBJ_DIR)/neighbor_gpu_cubin.h \
$(OBJ_DIR)/pppm_f.cubin $(OBJ_DIR)/pppm_f_cubin.h \
$(OBJ_DIR)/pppm_d.cubin $(OBJ_DIR)/pppm_d_cubin.h \
$(OBJ_DIR)/ellipsoid_nbor.cubin $(OBJ_DIR)/ellipsoid_nbor_cubin.h \
$(OBJ_DIR)/gayberne.cubin $(OBJ_DIR)/gayberne_lj.cubin \
$(OBJ_DIR)/gayberne_cubin.h $(OBJ_DIR)/gayberne_lj_cubin.h \
$(OBJ_DIR)/re_squared.cubin $(OBJ_DIR)/re_squared_lj.cubin \
$(OBJ_DIR)/re_squared_cubin.h $(OBJ_DIR)/re_squared_lj_cubin.h \
$(OBJ_DIR)/lj.cubin $(OBJ_DIR)/lj_cubin.h \
$(OBJ_DIR)/lj96.cubin $(OBJ_DIR)/lj96_cubin.h \
$(OBJ_DIR)/lj_expand.cubin $(OBJ_DIR)/lj_expand_cubin.h \
$(OBJ_DIR)/lj_coul.cubin $(OBJ_DIR)/lj_coul_cubin.h \
$(OBJ_DIR)/lj_coul_long.cubin $(OBJ_DIR)/lj_coul_long_cubin.h \
$(OBJ_DIR)/lj_dsf.cubin $(OBJ_DIR)/lj_dsf_cubin.h \
$(OBJ_DIR)/lj_class2_long.cubin $(OBJ_DIR)/lj_class2_long_cubin.h \
$(OBJ_DIR)/coul_long.cubin $(OBJ_DIR)/coul_long_cubin.h \
$(OBJ_DIR)/morse.cubin $(OBJ_DIR)/morse_cubin.h \
$(OBJ_DIR)/charmm_long.cubin $(OBJ_DIR)/charmm_long_cubin.h \
$(OBJ_DIR)/lj_sdk.cubin $(OBJ_DIR)/lj_sdk_cubin.h \
$(OBJ_DIR)/lj_sdk_long.cubin $(OBJ_DIR)/lj_sdk_long_cubin.h \
$(OBJ_DIR)/eam.cubin $(OBJ_DIR)/eam_cubin.h \
$(OBJ_DIR)/buck.cubin $(OBJ_DIR)/buck_cubin.h \
$(OBJ_DIR)/buck_coul_long.cubin $(OBJ_DIR)/buck_coul_long_cubin.h \
$(OBJ_DIR)/buck_coul.cubin $(OBJ_DIR)/buck_coul_cubin.h \
$(OBJ_DIR)/table.cubin $(OBJ_DIR)/table_cubin.h \
$(OBJ_DIR)/yukawa.cubin $(OBJ_DIR)/yukawa_cubin.h \
$(OBJ_DIR)/born.cubin $(OBJ_DIR)/born_cubin.h \
$(OBJ_DIR)/born_coul_wolf.cubin $(OBJ_DIR)/born_coul_wolf_cubin.h \
$(OBJ_DIR)/born_coul_long.cubin $(OBJ_DIR)/born_coul_long_cubin.h \
$(OBJ_DIR)/dipole_lj.cubin $(OBJ_DIR)/dipole_lj_cubin.h \
$(OBJ_DIR)/dipole_lj_sf.cubin $(OBJ_DIR)/dipole_lj_sf_cubin.h \
$(OBJ_DIR)/colloid.cubin $(OBJ_DIR)/colloid_cubin.h \
$(OBJ_DIR)/gauss.cubin $(OBJ_DIR)/gauss_cubin.h \
$(OBJ_DIR)/yukawa_colloid.cubin $(OBJ_DIR)/yukawa_colloid_cubin.h \
$(OBJ_DIR)/lj_coul_debye.cubin $(OBJ_DIR)/lj_coul_debye_cubin.h \
$(OBJ_DIR)/coul_dsf.cubin $(OBJ_DIR)/coul_dsf_cubin.h \
$(OBJ_DIR)/sw.cubin $(OBJ_DIR)/sw_cubin.h \
$(OBJ_DIR)/vashishta.cubin $(OBJ_DIR)/vashishta_cubin.h \
$(OBJ_DIR)/beck.cubin $(OBJ_DIR)/beck_cubin.h \
$(OBJ_DIR)/mie.cubin $(OBJ_DIR)/mie_cubin.h \
$(OBJ_DIR)/soft.cubin $(OBJ_DIR)/soft_cubin.h \
$(OBJ_DIR)/lj_coul_msm.cubin $(OBJ_DIR)/lj_coul_msm_cubin.h \
$(OBJ_DIR)/lj_gromacs.cubin $(OBJ_DIR)/lj_gromacs_cubin.h \
$(OBJ_DIR)/dpd.cubin $(OBJ_DIR)/dpd_cubin.h \
$(OBJ_DIR)/tersoff.cubin $(OBJ_DIR)/tersoff_cubin.h \
$(OBJ_DIR)/tersoff_zbl.cubin $(OBJ_DIR)/tersoff_zbl_cubin.h \
$(OBJ_DIR)/tersoff_mod.cubin $(OBJ_DIR)/tersoff_mod_cubin.h \
$(OBJ_DIR)/coul.cubin $(OBJ_DIR)/coul_cubin.h \
$(OBJ_DIR)/coul_debye.cubin $(OBJ_DIR)/coul_debye_cubin.h \
$(OBJ_DIR)/zbl.cubin $(OBJ_DIR)/zbl_cubin.h \
$(OBJ_DIR)/lj_cubic.cubin $(OBJ_DIR)/lj_cubic_cubin.h \
$(OBJ_DIR)/ufm.cubin $(OBJ_DIR)/ufm_cubin.h \
$(OBJ_DIR)/dipole_long_lj.cubin $(OBJ_DIR)/dipole_long_lj_cubin.h \
$(OBJ_DIR)/lj_expand_coul_long.cubin $(OBJ_DIR)/lj_expand_coul_long_cubin.h \
$(OBJ_DIR)/coul_long_cs.cubin $(OBJ_DIR)/coul_long_cs_cubin.h \
$(OBJ_DIR)/born_coul_long_cs.cubin $(OBJ_DIR)/born_coul_long_cs_cubin.h \
$(OBJ_DIR)/born_coul_wolf_cs.cubin $(OBJ_DIR)/born_coul_wolf_cs_cubin.h \
$(OBJ_DIR)/lj_tip4p_long.cubin $(OBJ_DIR)/lj_tip4p_long_cubin.h
# targets
all: $(OBJ_DIR) $(GPU_LIB) $(EXECS)
GPU_LIB = $(LIB_DIR)/libgpu.a
EXECS = $(BIN_DIR)/nvc_get_devices
all: $(OBJ_DIR) $(CUHS) $(GPU_LIB) $(EXECS)
$(OBJ_DIR):
mkdir -p $@
# Compilers and linkers
CUDA = $(NVCC) $(CUDA_INCLUDE) $(CUDA_OPTS) -Icudpp_mini $(CUDA_ARCH) \
$(CUDA_PRECISION)
CUDR = $(CUDR_CPP) $(CUDR_OPTS) $(CUDA_PRECISION) $(CUDA_INCLUDE) \
$(CUDPP_OPT)
CUDA_LINK = $(CUDA_LIB) -lcudart
BIN2C = $(CUDA_HOME)/bin/bin2c
# device code compilation
$(OBJ_DIR)/pppm_f.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -Dgrdtyp=float -Dgrdtyp4=float4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_f_cubin.h: $(OBJ_DIR)/pppm_f.cubin
$(BIN2C) -c -n pppm_f $(OBJ_DIR)/pppm_f.cubin > $(OBJ_DIR)/pppm_f_cubin.h
$(OBJ_DIR)/pppm_d.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -Dgrdtyp=double -Dgrdtyp4=double4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_d_cubin.h: $(OBJ_DIR)/pppm_d.cubin
$(BIN2C) -c -n pppm_d $(OBJ_DIR)/pppm_d.cubin > $(OBJ_DIR)/pppm_d_cubin.h
$(OBJ_DIR)/%_cubin.h: lal_%.cu $(ALL_H)
$(CUDA) --fatbin -DNV_KERNEL -o $(OBJ_DIR)/$*.cubin $(OBJ_DIR)/lal_$*.cu
$(BIN2C) -c -n $* $(OBJ_DIR)/$*.cubin > $@
@rm $(OBJ_DIR)/$*.cubin
# host code compilation
$(OBJ_DIR)/lal_%.o: lal_%.cpp $(CUHS) $(ALL_H)
$(CUDR) -o $@ -c $< -I$(OBJ_DIR)
#ifdef CUDPP_OPT
$(OBJ_DIR)/cudpp.o: cudpp_mini/cudpp.cpp
$(CUDR) -o $@ -c cudpp_mini/cudpp.cpp -Icudpp_mini
@ -169,745 +79,24 @@ $(OBJ_DIR)/radixsort_app.cu_o: cudpp_mini/radixsort_app.cu
$(OBJ_DIR)/scan_app.cu_o: cudpp_mini/scan_app.cu
$(CUDA) -o $@ -c cudpp_mini/scan_app.cu
#endif
$(OBJ_DIR)/atom.cubin: lal_atom.cu lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_atom.cu
$(OBJ_DIR)/atom_cubin.h: $(OBJ_DIR)/atom.cubin
$(BIN2C) -c -n atom $(OBJ_DIR)/atom.cubin > $(OBJ_DIR)/atom_cubin.h
$(OBJ_DIR)/lal_atom.o: lal_atom.cpp lal_atom.h $(NVD_H) $(OBJ_DIR)/atom_cubin.h
$(CUDR) -o $@ -c lal_atom.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_ans.o: lal_answer.cpp lal_answer.h $(NVD_H)
$(CUDR) -o $@ -c lal_answer.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/neighbor_cpu.cubin: lal_neighbor_cpu.cu lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_neighbor_cpu.cu
$(OBJ_DIR)/neighbor_cpu_cubin.h: $(OBJ_DIR)/neighbor_cpu.cubin
$(BIN2C) -c -n neighbor_cpu $(OBJ_DIR)/neighbor_cpu.cubin > $(OBJ_DIR)/neighbor_cpu_cubin.h
$(OBJ_DIR)/neighbor_gpu.cubin: lal_neighbor_gpu.cu lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_neighbor_gpu.cu
$(OBJ_DIR)/neighbor_gpu_cubin.h: $(OBJ_DIR)/neighbor_gpu.cubin
$(BIN2C) -c -n neighbor_gpu $(OBJ_DIR)/neighbor_gpu.cubin > $(OBJ_DIR)/neighbor_gpu_cubin.h
$(OBJ_DIR)/lal_neighbor_shared.o: lal_neighbor_shared.cpp lal_neighbor_shared.h $(OBJ_DIR)/neighbor_cpu_cubin.h $(OBJ_DIR)/neighbor_gpu_cubin.h $(NVD_H)
$(CUDR) -o $@ -c lal_neighbor_shared.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_neighbor.o: lal_neighbor.cpp lal_neighbor.h lal_neighbor_shared.h $(NVD_H)
$(CUDR) -o $@ -c lal_neighbor.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/device.cubin: lal_device.cu lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_device.cu
$(OBJ_DIR)/device_cubin.h: $(OBJ_DIR)/device.cubin
$(BIN2C) -c -n device $(OBJ_DIR)/device.cubin > $(OBJ_DIR)/device_cubin.h
$(OBJ_DIR)/lal_device.o: lal_device.cpp lal_device.h $(ALL_H) $(OBJ_DIR)/device_cubin.h
$(CUDR) -o $@ -c lal_device.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_base_atomic.o: $(ALL_H) lal_base_atomic.h lal_base_atomic.cpp
$(CUDR) -o $@ -c lal_base_atomic.cpp
$(OBJ_DIR)/lal_base_charge.o: $(ALL_H) lal_base_charge.h lal_base_charge.cpp
$(CUDR) -o $@ -c lal_base_charge.cpp
$(OBJ_DIR)/lal_base_ellipsoid.o: $(ALL_H) lal_base_ellipsoid.h lal_base_ellipsoid.cpp $(OBJ_DIR)/ellipsoid_nbor_cubin.h
$(CUDR) -o $@ -c lal_base_ellipsoid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_base_dipole.o: $(ALL_H) lal_base_dipole.h lal_base_dipole.cpp
$(CUDR) -o $@ -c lal_base_dipole.cpp
$(OBJ_DIR)/lal_base_three.o: $(ALL_H) lal_base_three.h lal_base_three.cpp
$(CUDR) -o $@ -c lal_base_three.cpp
$(OBJ_DIR)/lal_base_dpd.o: $(ALL_H) lal_base_dpd.h lal_base_dpd.cpp
$(CUDR) -o $@ -c lal_base_dpd.cpp
$(OBJ_DIR)/pppm_f.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -Dgrdtyp=float -Dgrdtyp4=float4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_f_cubin.h: $(OBJ_DIR)/pppm_f.cubin
$(BIN2C) -c -n pppm_f $(OBJ_DIR)/pppm_f.cubin > $(OBJ_DIR)/pppm_f_cubin.h
$(OBJ_DIR)/pppm_d.cubin: lal_pppm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -Dgrdtyp=double -Dgrdtyp4=double4 -o $@ lal_pppm.cu
$(OBJ_DIR)/pppm_d_cubin.h: $(OBJ_DIR)/pppm_d.cubin
$(BIN2C) -c -n pppm_d $(OBJ_DIR)/pppm_d.cubin > $(OBJ_DIR)/pppm_d_cubin.h
$(OBJ_DIR)/lal_pppm.o: $(ALL_H) lal_pppm.h lal_pppm.cpp $(OBJ_DIR)/pppm_f_cubin.h $(OBJ_DIR)/pppm_d_cubin.h
$(CUDR) -o $@ -c lal_pppm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_pppm_ext.o: $(ALL_H) lal_pppm.h lal_pppm_ext.cpp
$(CUDR) -o $@ -c lal_pppm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/ellipsoid_nbor.cubin: lal_ellipsoid_nbor.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_ellipsoid_nbor.cu
$(OBJ_DIR)/ellipsoid_nbor_cubin.h: $(OBJ_DIR)/ellipsoid_nbor.cubin
$(BIN2C) -c -n ellipsoid_nbor $(OBJ_DIR)/ellipsoid_nbor.cubin > $(OBJ_DIR)/ellipsoid_nbor_cubin.h
$(OBJ_DIR)/gayberne.cubin: lal_gayberne.cu lal_precision.h lal_ellipsoid_extra.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_gayberne.cu
$(OBJ_DIR)/gayberne_lj.cubin: lal_gayberne_lj.cu lal_precision.h lal_ellipsoid_extra.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_gayberne_lj.cu
$(OBJ_DIR)/gayberne_cubin.h: $(OBJ_DIR)/gayberne.cubin
$(BIN2C) -c -n gayberne $(OBJ_DIR)/gayberne.cubin > $(OBJ_DIR)/gayberne_cubin.h
$(OBJ_DIR)/gayberne_lj_cubin.h: $(OBJ_DIR)/gayberne_lj.cubin
$(BIN2C) -c -n gayberne_lj $(OBJ_DIR)/gayberne_lj.cubin > $(OBJ_DIR)/gayberne_lj_cubin.h
$(OBJ_DIR)/lal_gayberne.o: $(ALL_H) lal_gayberne.h lal_gayberne.cpp $(OBJ_DIR)/gayberne_cubin.h $(OBJ_DIR)/gayberne_lj_cubin.h $(OBJ_DIR)/lal_base_ellipsoid.o
$(CUDR) -o $@ -c lal_gayberne.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_gayberne_ext.o: $(ALL_H) $(OBJ_DIR)/lal_gayberne.o lal_gayberne_ext.cpp
$(CUDR) -o $@ -c lal_gayberne_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/re_squared.cubin: lal_re_squared.cu lal_precision.h lal_ellipsoid_extra.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_re_squared.cu
$(OBJ_DIR)/re_squared_lj.cubin: lal_re_squared_lj.cu lal_precision.h lal_ellipsoid_extra.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_re_squared_lj.cu
$(OBJ_DIR)/re_squared_cubin.h: $(OBJ_DIR)/re_squared.cubin
$(BIN2C) -c -n re_squared $(OBJ_DIR)/re_squared.cubin > $(OBJ_DIR)/re_squared_cubin.h
$(OBJ_DIR)/re_squared_lj_cubin.h: $(OBJ_DIR)/re_squared_lj.cubin
$(BIN2C) -c -n re_squared_lj $(OBJ_DIR)/re_squared_lj.cubin > $(OBJ_DIR)/re_squared_lj_cubin.h
$(OBJ_DIR)/lal_re_squared.o: $(ALL_H) lal_re_squared.h lal_re_squared.cpp $(OBJ_DIR)/re_squared_cubin.h $(OBJ_DIR)/re_squared_lj_cubin.h $(OBJ_DIR)/lal_base_ellipsoid.o
$(CUDR) -o $@ -c lal_re_squared.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_re_squared_ext.o: $(ALL_H) $(OBJ_DIR)/lal_re_squared.o lal_re_squared_ext.cpp
$(CUDR) -o $@ -c lal_re_squared_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj.cubin: lal_lj.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj.cu
$(OBJ_DIR)/lj_cubin.h: $(OBJ_DIR)/lj.cubin $(OBJ_DIR)/lj.cubin
$(BIN2C) -c -n lj $(OBJ_DIR)/lj.cubin > $(OBJ_DIR)/lj_cubin.h
$(OBJ_DIR)/lal_lj.o: $(ALL_H) lal_lj.h lal_lj.cpp $(OBJ_DIR)/lj_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_ext.o: $(ALL_H) lal_lj.h lal_lj_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_tip4p_long.cubin: lal_lj_tip4p_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_tip4p_long.cu
$(OBJ_DIR)/lj_tip4p_long_cubin.h: $(OBJ_DIR)/lj_tip4p_long.cubin $(OBJ_DIR)/lj_tip4p_long.cubin
$(BIN2C) -c -n lj_tip4p_long $(OBJ_DIR)/lj_tip4p_long.cubin > $(OBJ_DIR)/lj_tip4p_long_cubin.h
$(OBJ_DIR)/lal_lj_tip4p_long.o: $(ALL_H) lal_lj_tip4p_long.h lal_lj_tip4p_long.cpp $(OBJ_DIR)/lj_tip4p_long_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_tip4p_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_tip4p_long_ext.o: $(ALL_H) lal_lj_tip4p_long.h lal_lj_tip4p_long_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_tip4p_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul.cubin: lal_lj_coul.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_coul.cu
$(OBJ_DIR)/lj_coul_cubin.h: $(OBJ_DIR)/lj_coul.cubin $(OBJ_DIR)/lj_coul.cubin
$(BIN2C) -c -n lj_coul $(OBJ_DIR)/lj_coul.cubin > $(OBJ_DIR)/lj_coul_cubin.h
$(OBJ_DIR)/lal_lj_coul.o: $(ALL_H) lal_lj_coul.h lal_lj_coul.cpp $(OBJ_DIR)/lj_coul_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_ext.o: $(ALL_H) lal_lj_coul.h lal_lj_coul_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_class2_long.cubin: lal_lj_class2_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_class2_long.cu
$(OBJ_DIR)/lj_class2_long_cubin.h: $(OBJ_DIR)/lj_class2_long.cubin $(OBJ_DIR)/lj_class2_long.cubin
$(BIN2C) -c -n lj_class2_long $(OBJ_DIR)/lj_class2_long.cubin > $(OBJ_DIR)/lj_class2_long_cubin.h
$(OBJ_DIR)/lal_lj_class2_long.o: $(ALL_H) lal_lj_class2_long.h lal_lj_class2_long.cpp $(OBJ_DIR)/lj_class2_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_class2_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_class2_long_ext.o: $(ALL_H) lal_lj_class2_long.h lal_lj_class2_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_class2_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_long.cubin: lal_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_coul_long.cu
$(OBJ_DIR)/coul_long_cubin.h: $(OBJ_DIR)/coul_long.cubin $(OBJ_DIR)/coul_long.cubin
$(BIN2C) -c -n coul_long $(OBJ_DIR)/coul_long.cubin > $(OBJ_DIR)/coul_long_cubin.h
$(OBJ_DIR)/lal_coul_long.o: $(ALL_H) lal_coul_long.h lal_coul_long.cpp $(OBJ_DIR)/coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_long_ext.o: $(ALL_H) lal_coul_long.h lal_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_long.cubin: lal_lj_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_coul_long.cu
$(OBJ_DIR)/lj_coul_long_cubin.h: $(OBJ_DIR)/lj_coul_long.cubin $(OBJ_DIR)/lj_coul_long.cubin
$(BIN2C) -c -n lj_coul_long $(OBJ_DIR)/lj_coul_long.cubin > $(OBJ_DIR)/lj_coul_long_cubin.h
$(OBJ_DIR)/lal_lj_coul_long.o: $(ALL_H) lal_lj_coul_long.h lal_lj_coul_long.cpp $(OBJ_DIR)/lj_coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_long_ext.o: $(ALL_H) lal_lj_coul_long.h lal_lj_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_dsf.cubin: lal_lj_dsf.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_dsf.cu
$(OBJ_DIR)/lj_dsf_cubin.h: $(OBJ_DIR)/lj_dsf.cubin $(OBJ_DIR)/lj_dsf.cubin
$(BIN2C) -c -n lj_dsf $(OBJ_DIR)/lj_dsf.cubin > $(OBJ_DIR)/lj_dsf_cubin.h
$(OBJ_DIR)/lal_lj_dsf.o: $(ALL_H) lal_lj_dsf.h lal_lj_dsf.cpp $(OBJ_DIR)/lj_dsf_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_dsf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_dsf_ext.o: $(ALL_H) lal_lj_dsf.h lal_lj_dsf_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_dsf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/morse.cubin: lal_morse.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_morse.cu
$(OBJ_DIR)/morse_cubin.h: $(OBJ_DIR)/morse.cubin $(OBJ_DIR)/morse.cubin
$(BIN2C) -c -n morse $(OBJ_DIR)/morse.cubin > $(OBJ_DIR)/morse_cubin.h
$(OBJ_DIR)/lal_morse.o: $(ALL_H) lal_morse.h lal_morse.cpp $(OBJ_DIR)/morse_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_morse.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_morse_ext.o: $(ALL_H) lal_morse.h lal_morse_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_morse_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/charmm_long.cubin: lal_charmm_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_charmm_long.cu
$(OBJ_DIR)/charmm_long_cubin.h: $(OBJ_DIR)/charmm_long.cubin $(OBJ_DIR)/charmm_long.cubin
$(BIN2C) -c -n charmm_long $(OBJ_DIR)/charmm_long.cubin > $(OBJ_DIR)/charmm_long_cubin.h
$(OBJ_DIR)/lal_charmm_long.o: $(ALL_H) lal_charmm_long.h lal_charmm_long.cpp $(OBJ_DIR)/charmm_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_charmm_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_charmm_long_ext.o: $(ALL_H) lal_charmm_long.h lal_charmm_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_charmm_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj96.cubin: lal_lj96.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj96.cu
$(OBJ_DIR)/lj96_cubin.h: $(OBJ_DIR)/lj96.cubin $(OBJ_DIR)/lj96.cubin
$(BIN2C) -c -n lj96 $(OBJ_DIR)/lj96.cubin > $(OBJ_DIR)/lj96_cubin.h
$(OBJ_DIR)/lal_lj96.o: $(ALL_H) lal_lj96.h lal_lj96.cpp $(OBJ_DIR)/lj96_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj96.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj96_ext.o: $(ALL_H) lal_lj96.h lal_lj96_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj96_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_expand.cubin: lal_lj_expand.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_expand.cu
$(OBJ_DIR)/lj_expand_cubin.h: $(OBJ_DIR)/lj_expand.cubin $(OBJ_DIR)/lj_expand.cubin
$(BIN2C) -c -n lj_expand $(OBJ_DIR)/lj_expand.cubin > $(OBJ_DIR)/lj_expand_cubin.h
$(OBJ_DIR)/lal_lj_expand.o: $(ALL_H) lal_lj_expand.h lal_lj_expand.cpp $(OBJ_DIR)/lj_expand_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_expand.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_expand_ext.o: $(ALL_H) lal_lj_expand.h lal_lj_expand_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_expand_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_sdk.cubin: lal_lj_sdk.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_sdk.cu
$(OBJ_DIR)/lj_sdk_cubin.h: $(OBJ_DIR)/lj_sdk.cubin $(OBJ_DIR)/lj_sdk.cubin
$(BIN2C) -c -n lj_sdk $(OBJ_DIR)/lj_sdk.cubin > $(OBJ_DIR)/lj_sdk_cubin.h
$(OBJ_DIR)/lal_lj_sdk.o: $(ALL_H) lal_lj_sdk.h lal_lj_sdk.cpp $(OBJ_DIR)/lj_sdk_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_sdk.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_sdk_ext.o: $(ALL_H) lal_lj_sdk.h lal_lj_sdk_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_sdk_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_sdk_long.cubin: lal_lj_sdk_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_sdk_long.cu
$(OBJ_DIR)/lj_sdk_long_cubin.h: $(OBJ_DIR)/lj_sdk_long.cubin $(OBJ_DIR)/lj_sdk_long.cubin
$(BIN2C) -c -n lj_sdk_long $(OBJ_DIR)/lj_sdk_long.cubin > $(OBJ_DIR)/lj_sdk_long_cubin.h
$(OBJ_DIR)/lal_lj_sdk_long.o: $(ALL_H) lal_lj_sdk_long.h lal_lj_sdk_long.cpp $(OBJ_DIR)/lj_sdk_long_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_sdk_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_sdk_long_ext.o: $(ALL_H) lal_lj_sdk_long.h lal_lj_sdk_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_sdk_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/eam.cubin: lal_eam.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_eam.cu
$(OBJ_DIR)/eam_cubin.h: $(OBJ_DIR)/eam.cubin $(OBJ_DIR)/eam.cubin
$(BIN2C) -c -n eam $(OBJ_DIR)/eam.cubin > $(OBJ_DIR)/eam_cubin.h
$(OBJ_DIR)/lal_eam.o: $(ALL_H) lal_eam.h lal_eam.cpp $(OBJ_DIR)/eam_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_eam.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_ext.o: $(ALL_H) lal_eam.h lal_eam_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_eam_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_fs_ext.o: $(ALL_H) lal_eam.h lal_eam_fs_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_eam_fs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_alloy_ext.o: $(ALL_H) lal_eam.h lal_eam_alloy_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_eam_alloy_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck.cubin: lal_buck.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_buck.cu
$(OBJ_DIR)/buck_cubin.h: $(OBJ_DIR)/buck.cubin $(OBJ_DIR)/buck.cubin
$(BIN2C) -c -n buck $(OBJ_DIR)/buck.cubin > $(OBJ_DIR)/buck_cubin.h
$(OBJ_DIR)/lal_buck.o: $(ALL_H) lal_buck.h lal_buck.cpp $(OBJ_DIR)/buck_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_buck.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_ext.o: $(ALL_H) lal_buck.h lal_buck_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_buck_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck_coul.cubin: lal_buck_coul.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_buck_coul.cu
$(OBJ_DIR)/buck_coul_cubin.h: $(OBJ_DIR)/buck_coul.cubin $(OBJ_DIR)/buck_coul.cubin
$(BIN2C) -c -n buck_coul $(OBJ_DIR)/buck_coul.cubin > $(OBJ_DIR)/buck_coul_cubin.h
$(OBJ_DIR)/lal_buck_coul.o: $(ALL_H) lal_buck_coul.h lal_buck_coul.cpp $(OBJ_DIR)/buck_coul_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_buck_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_coul_ext.o: $(ALL_H) lal_buck_coul.h lal_buck_coul_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_buck_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck_coul_long.cubin: lal_buck_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_buck_coul_long.cu
$(OBJ_DIR)/buck_coul_long_cubin.h: $(OBJ_DIR)/buck_coul_long.cubin $(OBJ_DIR)/buck_coul_long.cubin
$(BIN2C) -c -n buck_coul_long $(OBJ_DIR)/buck_coul_long.cubin > $(OBJ_DIR)/buck_coul_long_cubin.h
$(OBJ_DIR)/lal_buck_coul_long.o: $(ALL_H) lal_buck_coul_long.h lal_buck_coul_long.cpp $(OBJ_DIR)/buck_coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_buck_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_coul_long_ext.o: $(ALL_H) lal_buck_coul_long.h lal_buck_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_buck_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/table.cubin: lal_table.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_table.cu
$(OBJ_DIR)/table_cubin.h: $(OBJ_DIR)/table.cubin $(OBJ_DIR)/table.cubin
$(BIN2C) -c -n table $(OBJ_DIR)/table.cubin > $(OBJ_DIR)/table_cubin.h
$(OBJ_DIR)/lal_table.o: $(ALL_H) lal_table.h lal_table.cpp $(OBJ_DIR)/table_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_table.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_table_ext.o: $(ALL_H) lal_table.h lal_table_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_table_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/yukawa.cubin: lal_yukawa.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_yukawa.cu
$(OBJ_DIR)/yukawa_cubin.h: $(OBJ_DIR)/yukawa.cubin $(OBJ_DIR)/yukawa.cubin
$(BIN2C) -c -n yukawa $(OBJ_DIR)/yukawa.cubin > $(OBJ_DIR)/yukawa_cubin.h
$(OBJ_DIR)/lal_yukawa.o: $(ALL_H) lal_yukawa.h lal_yukawa.cpp $(OBJ_DIR)/yukawa_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_yukawa.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_yukawa_ext.o: $(ALL_H) lal_yukawa.h lal_yukawa_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_yukawa_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born.cubin: lal_born.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_born.cu
$(OBJ_DIR)/born_cubin.h: $(OBJ_DIR)/born.cubin $(OBJ_DIR)/born.cubin
$(BIN2C) -c -n born $(OBJ_DIR)/born.cubin > $(OBJ_DIR)/born_cubin.h
$(OBJ_DIR)/lal_born.o: $(ALL_H) lal_born.h lal_born.cpp $(OBJ_DIR)/born_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_born.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_ext.o: $(ALL_H) lal_born.h lal_born_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_born_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_wolf.cubin: lal_born_coul_wolf.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_born_coul_wolf.cu
$(OBJ_DIR)/born_coul_wolf_cubin.h: $(OBJ_DIR)/born_coul_wolf.cubin $(OBJ_DIR)/born_coul_wolf.cubin
$(BIN2C) -c -n born_coul_wolf $(OBJ_DIR)/born_coul_wolf.cubin > $(OBJ_DIR)/born_coul_wolf_cubin.h
$(OBJ_DIR)/lal_born_coul_wolf.o: $(ALL_H) lal_born_coul_wolf.h lal_born_coul_wolf.cpp $(OBJ_DIR)/born_coul_wolf_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_born_coul_wolf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_wolf_ext.o: $(ALL_H) lal_born_coul_wolf.h lal_born_coul_wolf_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_born_coul_wolf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_long.cubin: lal_born_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_born_coul_long.cu
$(OBJ_DIR)/born_coul_long_cubin.h: $(OBJ_DIR)/born_coul_long.cubin $(OBJ_DIR)/born_coul_long.cubin
$(BIN2C) -c -n born_coul_long $(OBJ_DIR)/born_coul_long.cubin > $(OBJ_DIR)/born_coul_long_cubin.h
$(OBJ_DIR)/lal_born_coul_long.o: $(ALL_H) lal_born_coul_long.h lal_born_coul_long.cpp $(OBJ_DIR)/born_coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_born_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_long_ext.o: $(ALL_H) lal_born_coul_long.h lal_born_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_born_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_lj.cubin: lal_dipole_lj.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_dipole_lj.cu
$(OBJ_DIR)/dipole_lj_cubin.h: $(OBJ_DIR)/dipole_lj.cubin $(OBJ_DIR)/dipole_lj.cubin
$(BIN2C) -c -n dipole_lj $(OBJ_DIR)/dipole_lj.cubin > $(OBJ_DIR)/dipole_lj_cubin.h
$(OBJ_DIR)/lal_dipole_lj.o: $(ALL_H) lal_dipole_lj.h lal_dipole_lj.cpp $(OBJ_DIR)/dipole_lj_cubin.h $(OBJ_DIR)/lal_base_dipole.o
$(CUDR) -o $@ -c lal_dipole_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_lj_ext.o: $(ALL_H) lal_dipole_lj.h lal_dipole_lj_ext.cpp lal_base_dipole.h
$(CUDR) -o $@ -c lal_dipole_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_lj_sf.cubin: lal_dipole_lj_sf.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_dipole_lj_sf.cu
$(OBJ_DIR)/dipole_lj_sf_cubin.h: $(OBJ_DIR)/dipole_lj_sf.cubin $(OBJ_DIR)/dipole_lj_sf.cubin
$(BIN2C) -c -n dipole_lj_sf $(OBJ_DIR)/dipole_lj_sf.cubin > $(OBJ_DIR)/dipole_lj_sf_cubin.h
$(OBJ_DIR)/lal_dipole_lj_sf.o: $(ALL_H) lal_dipole_lj_sf.h lal_dipole_lj_sf.cpp $(OBJ_DIR)/dipole_lj_sf_cubin.h $(OBJ_DIR)/lal_base_dipole.o
$(CUDR) -o $@ -c lal_dipole_lj_sf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_lj_sf_ext.o: $(ALL_H) lal_dipole_lj_sf.h lal_dipole_lj_sf_ext.cpp lal_base_dipole.h
$(CUDR) -o $@ -c lal_dipole_lj_sf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/colloid.cubin: lal_colloid.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_colloid.cu
$(OBJ_DIR)/colloid_cubin.h: $(OBJ_DIR)/colloid.cubin $(OBJ_DIR)/colloid.cubin
$(BIN2C) -c -n colloid $(OBJ_DIR)/colloid.cubin > $(OBJ_DIR)/colloid_cubin.h
$(OBJ_DIR)/lal_colloid.o: $(ALL_H) lal_colloid.h lal_colloid.cpp $(OBJ_DIR)/colloid_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_colloid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_colloid_ext.o: $(ALL_H) lal_colloid.h lal_colloid_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_colloid_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/gauss.cubin: lal_gauss.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_gauss.cu
$(OBJ_DIR)/gauss_cubin.h: $(OBJ_DIR)/gauss.cubin $(OBJ_DIR)/gauss.cubin
$(BIN2C) -c -n gauss $(OBJ_DIR)/gauss.cubin > $(OBJ_DIR)/gauss_cubin.h
$(OBJ_DIR)/lal_gauss.o: $(ALL_H) lal_gauss.h lal_gauss.cpp $(OBJ_DIR)/gauss_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_gauss.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_gauss_ext.o: $(ALL_H) lal_gauss.h lal_gauss_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_gauss_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/yukawa_colloid.cubin: lal_yukawa_colloid.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_yukawa_colloid.cu
$(OBJ_DIR)/yukawa_colloid_cubin.h: $(OBJ_DIR)/yukawa_colloid.cubin $(OBJ_DIR)/yukawa_colloid.cubin
$(BIN2C) -c -n yukawa_colloid $(OBJ_DIR)/yukawa_colloid.cubin > $(OBJ_DIR)/yukawa_colloid_cubin.h
$(OBJ_DIR)/lal_yukawa_colloid.o: $(ALL_H) lal_yukawa_colloid.h lal_yukawa_colloid.cpp $(OBJ_DIR)/yukawa_colloid_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_yukawa_colloid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_yukawa_colloid_ext.o: $(ALL_H) lal_yukawa_colloid.h lal_yukawa_colloid_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_yukawa_colloid_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_debye.cubin: lal_lj_coul_debye.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_coul_debye.cu
$(OBJ_DIR)/lj_coul_debye_cubin.h: $(OBJ_DIR)/lj_coul_debye.cubin $(OBJ_DIR)/lj_coul_debye.cubin
$(BIN2C) -c -n lj_coul_debye $(OBJ_DIR)/lj_coul_debye.cubin > $(OBJ_DIR)/lj_coul_debye_cubin.h
$(OBJ_DIR)/lal_lj_coul_debye.o: $(ALL_H) lal_lj_coul_debye.h lal_lj_coul_debye.cpp $(OBJ_DIR)/lj_coul_debye_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_coul_debye.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_debye_ext.o: $(ALL_H) lal_lj_coul_debye.h lal_lj_coul_debye_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_coul_debye_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_dsf.cubin: lal_coul_dsf.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_coul_dsf.cu
$(OBJ_DIR)/coul_dsf_cubin.h: $(OBJ_DIR)/coul_dsf.cubin $(OBJ_DIR)/coul_dsf.cubin
$(BIN2C) -c -n coul_dsf $(OBJ_DIR)/coul_dsf.cubin > $(OBJ_DIR)/coul_dsf_cubin.h
$(OBJ_DIR)/lal_coul_dsf.o: $(ALL_H) lal_coul_dsf.h lal_coul_dsf.cpp $(OBJ_DIR)/coul_dsf_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_coul_dsf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_dsf_ext.o: $(ALL_H) lal_coul_dsf.h lal_coul_dsf_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_coul_dsf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/vashishta.cubin: lal_vashishta.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_vashishta.cu
$(OBJ_DIR)/vashishta_cubin.h: $(OBJ_DIR)/vashishta.cubin $(OBJ_DIR)/vashishta.cubin
$(BIN2C) -c -n vashishta $(OBJ_DIR)/vashishta.cubin > $(OBJ_DIR)/vashishta_cubin.h
$(OBJ_DIR)/lal_vashishta.o: $(ALL_H) lal_vashishta.h lal_vashishta.cpp $(OBJ_DIR)/vashishta_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_vashishta.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_vashishta_ext.o: $(ALL_H) lal_vashishta.h lal_vashishta_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_vashishta_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/sw.cubin: lal_sw.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_sw.cu
$(OBJ_DIR)/sw_cubin.h: $(OBJ_DIR)/sw.cubin $(OBJ_DIR)/sw.cubin
$(BIN2C) -c -n sw $(OBJ_DIR)/sw.cubin > $(OBJ_DIR)/sw_cubin.h
$(OBJ_DIR)/lal_sw.o: $(ALL_H) lal_sw.h lal_sw.cpp $(OBJ_DIR)/sw_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_sw.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_sw_ext.o: $(ALL_H) lal_sw.h lal_sw_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_sw_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/beck.cubin: lal_beck.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_beck.cu
$(OBJ_DIR)/beck_cubin.h: $(OBJ_DIR)/beck.cubin $(OBJ_DIR)/beck.cubin
$(BIN2C) -c -n beck $(OBJ_DIR)/beck.cubin > $(OBJ_DIR)/beck_cubin.h
$(OBJ_DIR)/lal_beck.o: $(ALL_H) lal_beck.h lal_beck.cpp $(OBJ_DIR)/beck_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_beck.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_beck_ext.o: $(ALL_H) lal_beck.h lal_beck_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_beck_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/mie.cubin: lal_mie.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_mie.cu
$(OBJ_DIR)/mie_cubin.h: $(OBJ_DIR)/mie.cubin $(OBJ_DIR)/mie.cubin
$(BIN2C) -c -n mie $(OBJ_DIR)/mie.cubin > $(OBJ_DIR)/mie_cubin.h
$(OBJ_DIR)/lal_mie.o: $(ALL_H) lal_mie.h lal_mie.cpp $(OBJ_DIR)/mie_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_mie.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_mie_ext.o: $(ALL_H) lal_mie.h lal_mie_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_mie_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/soft.cubin: lal_soft.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_soft.cu
$(OBJ_DIR)/soft_cubin.h: $(OBJ_DIR)/soft.cubin $(OBJ_DIR)/soft.cubin
$(BIN2C) -c -n soft $(OBJ_DIR)/soft.cubin > $(OBJ_DIR)/soft_cubin.h
$(OBJ_DIR)/lal_soft.o: $(ALL_H) lal_soft.h lal_soft.cpp $(OBJ_DIR)/soft_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_soft.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_soft_ext.o: $(ALL_H) lal_soft.h lal_soft_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_soft_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_msm.cubin: lal_lj_coul_msm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_coul_msm.cu
$(OBJ_DIR)/lj_coul_msm_cubin.h: $(OBJ_DIR)/lj_coul_msm.cubin $(OBJ_DIR)/lj_coul_msm.cubin
$(BIN2C) -c -n lj_coul_msm $(OBJ_DIR)/lj_coul_msm.cubin > $(OBJ_DIR)/lj_coul_msm_cubin.h
$(OBJ_DIR)/lal_lj_coul_msm.o: $(ALL_H) lal_lj_coul_msm.h lal_lj_coul_msm.cpp $(OBJ_DIR)/lj_coul_msm_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_coul_msm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_msm_ext.o: $(ALL_H) lal_lj_coul_msm.h lal_lj_coul_msm_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_coul_msm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_gromacs.cubin: lal_lj_gromacs.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_gromacs.cu
$(OBJ_DIR)/lj_gromacs_cubin.h: $(OBJ_DIR)/lj_gromacs.cubin $(OBJ_DIR)/lj_gromacs.cubin
$(BIN2C) -c -n lj_gromacs $(OBJ_DIR)/lj_gromacs.cubin > $(OBJ_DIR)/lj_gromacs_cubin.h
$(OBJ_DIR)/lal_lj_gromacs.o: $(ALL_H) lal_lj_gromacs.h lal_lj_gromacs.cpp $(OBJ_DIR)/lj_gromacs_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_gromacs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_gromacs_ext.o: $(ALL_H) lal_lj_gromacs.h lal_lj_gromacs_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_gromacs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dpd.cubin: lal_dpd.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_dpd.cu
$(OBJ_DIR)/dpd_cubin.h: $(OBJ_DIR)/dpd.cubin $(OBJ_DIR)/dpd.cubin
$(BIN2C) -c -n dpd $(OBJ_DIR)/dpd.cubin > $(OBJ_DIR)/dpd_cubin.h
$(OBJ_DIR)/lal_dpd.o: $(ALL_H) lal_dpd.h lal_dpd.cpp $(OBJ_DIR)/dpd_cubin.h $(OBJ_DIR)/lal_base_dpd.o
$(CUDR) -o $@ -c lal_dpd.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dpd_ext.o: $(ALL_H) lal_dpd.h lal_dpd_ext.cpp lal_base_dpd.h
$(CUDR) -o $@ -c lal_dpd_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dpd_tstat_ext.o: $(ALL_H) lal_dpd.h lal_dpd_tstat_ext.cpp lal_base_dpd.h
$(CUDR) -o $@ -c lal_dpd_tstat_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/ufm.cubin: lal_ufm.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_ufm.cu
$(OBJ_DIR)/ufm_cubin.h: $(OBJ_DIR)/ufm.cubin $(OBJ_DIR)/ufm.cubin
$(BIN2C) -c -n ufm $(OBJ_DIR)/ufm.cubin > $(OBJ_DIR)/ufm_cubin.h
$(OBJ_DIR)/lal_ufm.o: $(ALL_H) lal_ufm.h lal_ufm.cpp $(OBJ_DIR)/ufm_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_ufm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_ufm_ext.o: $(ALL_H) lal_ufm.h lal_ufm_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_ufm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff.cubin: lal_tersoff.cu lal_precision.h lal_tersoff_extra.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_tersoff.cu
$(OBJ_DIR)/tersoff_cubin.h: $(OBJ_DIR)/tersoff.cubin $(OBJ_DIR)/tersoff.cubin
$(BIN2C) -c -n tersoff $(OBJ_DIR)/tersoff.cubin > $(OBJ_DIR)/tersoff_cubin.h
$(OBJ_DIR)/lal_tersoff.o: $(ALL_H) lal_tersoff.h lal_tersoff.cpp $(OBJ_DIR)/tersoff_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_tersoff.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_ext.o: $(ALL_H) lal_tersoff.h lal_tersoff_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_tersoff_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff_zbl.cubin: lal_tersoff_zbl.cu lal_precision.h lal_tersoff_zbl_extra.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_tersoff_zbl.cu
$(OBJ_DIR)/tersoff_zbl_cubin.h: $(OBJ_DIR)/tersoff_zbl.cubin $(OBJ_DIR)/tersoff_zbl.cubin
$(BIN2C) -c -n tersoff_zbl $(OBJ_DIR)/tersoff_zbl.cubin > $(OBJ_DIR)/tersoff_zbl_cubin.h
$(OBJ_DIR)/lal_tersoff_zbl.o: $(ALL_H) lal_tersoff_zbl.h lal_tersoff_zbl.cpp $(OBJ_DIR)/tersoff_zbl_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_tersoff_zbl.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_zbl_ext.o: $(ALL_H) lal_tersoff_zbl.h lal_tersoff_zbl_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_tersoff_zbl_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff_mod.cubin: lal_tersoff_mod.cu lal_precision.h lal_tersoff_mod_extra.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_tersoff_mod.cu
$(OBJ_DIR)/tersoff_mod_cubin.h: $(OBJ_DIR)/tersoff_mod.cubin $(OBJ_DIR)/tersoff_mod.cubin
$(BIN2C) -c -n tersoff_mod $(OBJ_DIR)/tersoff_mod.cubin > $(OBJ_DIR)/tersoff_mod_cubin.h
$(OBJ_DIR)/lal_tersoff_mod.o: $(ALL_H) lal_tersoff_mod.h lal_tersoff_mod.cpp $(OBJ_DIR)/tersoff_mod_cubin.h $(OBJ_DIR)/lal_base_three.o
$(CUDR) -o $@ -c lal_tersoff_mod.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_mod_ext.o: $(ALL_H) lal_tersoff_mod.h lal_tersoff_mod_ext.cpp lal_base_three.h
$(CUDR) -o $@ -c lal_tersoff_mod_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul.cubin: lal_coul.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_coul.cu
$(OBJ_DIR)/coul_cubin.h: $(OBJ_DIR)/coul.cubin $(OBJ_DIR)/coul.cubin
$(BIN2C) -c -n coul $(OBJ_DIR)/coul.cubin > $(OBJ_DIR)/coul_cubin.h
$(OBJ_DIR)/lal_coul.o: $(ALL_H) lal_coul.h lal_coul.cpp $(OBJ_DIR)/coul_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_ext.o: $(ALL_H) lal_coul.h lal_coul_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_debye.cubin: lal_coul_debye.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_coul_debye.cu
$(OBJ_DIR)/coul_debye_cubin.h: $(OBJ_DIR)/coul_debye.cubin $(OBJ_DIR)/coul_debye.cubin
$(BIN2C) -c -n coul_debye $(OBJ_DIR)/coul_debye.cubin > $(OBJ_DIR)/coul_debye_cubin.h
$(OBJ_DIR)/lal_coul_debye.o: $(ALL_H) lal_coul_debye.h lal_coul_debye.cpp $(OBJ_DIR)/coul_debye_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_coul_debye.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_debye_ext.o: $(ALL_H) lal_coul_debye.h lal_coul_debye_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_coul_debye_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/zbl.cubin: lal_zbl.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_zbl.cu
$(OBJ_DIR)/zbl_cubin.h: $(OBJ_DIR)/zbl.cubin $(OBJ_DIR)/zbl.cubin
$(BIN2C) -c -n zbl $(OBJ_DIR)/zbl.cubin > $(OBJ_DIR)/zbl_cubin.h
$(OBJ_DIR)/lal_zbl.o: $(ALL_H) lal_zbl.h lal_zbl.cpp $(OBJ_DIR)/zbl_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_zbl.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_zbl_ext.o: $(ALL_H) lal_zbl.h lal_zbl_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_zbl_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_cubic.cubin: lal_lj_cubic.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_cubic.cu
$(OBJ_DIR)/lj_cubic_cubin.h: $(OBJ_DIR)/lj_cubic.cubin $(OBJ_DIR)/lj_cubic.cubin
$(BIN2C) -c -n lj_cubic $(OBJ_DIR)/lj_cubic.cubin > $(OBJ_DIR)/lj_cubic_cubin.h
$(OBJ_DIR)/lal_lj_cubic.o: $(ALL_H) lal_lj_cubic.h lal_lj_cubic.cpp $(OBJ_DIR)/lj_cubic_cubin.h $(OBJ_DIR)/lal_base_atomic.o
$(CUDR) -o $@ -c lal_lj_cubic.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_cubic_ext.o: $(ALL_H) lal_lj_cubic.h lal_lj_cubic_ext.cpp lal_base_atomic.h
$(CUDR) -o $@ -c lal_lj_cubic_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_long_lj.cubin: lal_dipole_long_lj.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_dipole_long_lj.cu
$(OBJ_DIR)/dipole_long_lj_cubin.h: $(OBJ_DIR)/dipole_long_lj.cubin $(OBJ_DIR)/dipole_long_lj.cubin
$(BIN2C) -c -n dipole_long_lj $(OBJ_DIR)/dipole_long_lj.cubin > $(OBJ_DIR)/dipole_long_lj_cubin.h
$(OBJ_DIR)/lal_dipole_long_lj.o: $(ALL_H) lal_dipole_long_lj.h lal_dipole_long_lj.cpp $(OBJ_DIR)/dipole_long_lj_cubin.h $(OBJ_DIR)/lal_base_dipole.o
$(CUDR) -o $@ -c lal_dipole_long_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_long_lj_ext.o: $(ALL_H) lal_dipole_long_lj.h lal_dipole_long_lj_ext.cpp lal_base_dipole.h
$(CUDR) -o $@ -c lal_dipole_long_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_expand_coul_long.cubin: lal_lj_expand_coul_long.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_lj_expand_coul_long.cu
$(OBJ_DIR)/lj_expand_coul_long_cubin.h: $(OBJ_DIR)/lj_expand_coul_long.cubin $(OBJ_DIR)/lj_expand_coul_long.cubin
$(BIN2C) -c -n lj_expand_coul_long $(OBJ_DIR)/lj_expand_coul_long.cubin > $(OBJ_DIR)/lj_expand_coul_long_cubin.h
$(OBJ_DIR)/lal_lj_expand_coul_long.o: $(ALL_H) lal_lj_expand_coul_long.h lal_lj_expand_coul_long.cpp $(OBJ_DIR)/lj_expand_coul_long_cubin.h $(OBJ_DIR)/lal_base_charge.o
$(CUDR) -o $@ -c lal_lj_expand_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_expand_coul_long_ext.o: $(ALL_H) lal_lj_expand_coul_long.h lal_lj_expand_coul_long_ext.cpp lal_base_charge.h
$(CUDR) -o $@ -c lal_lj_expand_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_long_cs.cubin: lal_coul_long_cs.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_coul_long_cs.cu
$(OBJ_DIR)/coul_long_cs_cubin.h: $(OBJ_DIR)/coul_long_cs.cubin $(OBJ_DIR)/coul_long_cs.cubin
$(BIN2C) -c -n coul_long_cs $(OBJ_DIR)/coul_long_cs.cubin > $(OBJ_DIR)/coul_long_cs_cubin.h
$(OBJ_DIR)/lal_coul_long_cs.o: $(ALL_H) lal_coul_long_cs.h lal_coul_long_cs.cpp $(OBJ_DIR)/coul_long_cs_cubin.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_coul_long.o
$(CUDR) -o $@ -c lal_coul_long_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_long_cs_ext.o: $(ALL_H) lal_coul_long_cs.h lal_coul_long_cs_ext.cpp lal_coul_long.h
$(CUDR) -o $@ -c lal_coul_long_cs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_long_cs.cubin: lal_born_coul_long_cs.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_born_coul_long_cs.cu
$(OBJ_DIR)/born_coul_long_cs_cubin.h: $(OBJ_DIR)/born_coul_long_cs.cubin $(OBJ_DIR)/born_coul_long_cs.cubin
$(BIN2C) -c -n born_coul_long_cs $(OBJ_DIR)/born_coul_long_cs.cubin > $(OBJ_DIR)/born_coul_long_cs_cubin.h
$(OBJ_DIR)/lal_born_coul_long_cs.o: $(ALL_H) lal_born_coul_long_cs.h lal_born_coul_long_cs.cpp $(OBJ_DIR)/born_coul_long_cs_cubin.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_born_coul_long.o
$(CUDR) -o $@ -c lal_born_coul_long_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_long_cs_ext.o: $(ALL_H) lal_born_coul_long_cs.h lal_born_coul_long_cs_ext.cpp lal_born_coul_long.h
$(CUDR) -o $@ -c lal_born_coul_long_cs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_wolf_cs.cubin: lal_born_coul_wolf_cs.cu lal_precision.h lal_preprocessor.h
$(CUDA) --fatbin -DNV_KERNEL -o $@ lal_born_coul_wolf_cs.cu
$(OBJ_DIR)/born_coul_wolf_cs_cubin.h: $(OBJ_DIR)/born_coul_wolf_cs.cubin $(OBJ_DIR)/born_coul_wolf_cs.cubin
$(BIN2C) -c -n born_coul_wolf_cs $(OBJ_DIR)/born_coul_wolf_cs.cubin > $(OBJ_DIR)/born_coul_wolf_cs_cubin.h
$(OBJ_DIR)/lal_born_coul_wolf_cs.o: $(ALL_H) lal_born_coul_wolf_cs.h lal_born_coul_wolf_cs.cpp $(OBJ_DIR)/born_coul_wolf_cs_cubin.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_born_coul_wolf.o
$(CUDR) -o $@ -c lal_born_coul_wolf_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_wolf_cs_ext.o: $(ALL_H) lal_born_coul_wolf_cs.h lal_born_coul_wolf_cs_ext.cpp lal_born_coul_wolf.h
$(CUDR) -o $@ -c lal_born_coul_wolf_cs_ext.cpp -I$(OBJ_DIR)
$(BIN_DIR)/nvc_get_devices: ./geryon/ucl_get_devices.cpp $(NVD_H)
$(CUDR) -o $@ ./geryon/ucl_get_devices.cpp -DUCL_CUDADR $(CUDA_LIB) -lcuda
# build libgpu.a
$(GPU_LIB): $(OBJS) $(CUDPP)
$(AR) -crusv $(GPU_LIB) $(OBJS) $(CUDPP)
@cp $(EXTRAMAKE) Makefile.lammps
# test app for querying device info
$(BIN_DIR)/nvc_get_devices: ./geryon/ucl_get_devices.cpp $(NVD_H)
$(CUDR) -o $@ ./geryon/ucl_get_devices.cpp -DUCL_CUDADR $(CUDA_LIB) -lcuda
clean:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CUDPP) $(CBNS) *.linkinfo
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CUDPP) $(CUHS) *.linkinfo
veryclean: clean
-rm -rf *~ *.linkinfo
cleanlib:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CBNS) *.linkinfo
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(CUHS) *.linkinfo

View File

@ -1,671 +1,54 @@
OCL = $(OCL_CPP) $(OCL_PREC) $(OCL_TUNE) -DUSE_OPENCL
OCL_LIB = $(LIB_DIR)/libgpu.a
# Headers for Geryon
UCL_H = $(wildcard ./geryon/ucl*.h)
OCL_H = $(wildcard ./geryon/ocl*.h) $(UCL_H)
# Headers for Pair Stuff
PAIR_H = lal_atom.h lal_answer.h lal_neighbor_shared.h \
lal_neighbor.h lal_precision.h lal_device.h \
lal_balance.h lal_pppm.h
# Headers for Preprocessor/Auxiliary Functions
OCL_H = $(wildcard ./geryon/ocl*.h) $(UCL_H) lal_preprocessor.h
PRE1_H = lal_preprocessor.h lal_aux_fun1.h
ALL_H = $(OCL_H) $(wildcard ./lal_*.h)
ALL_H = $(OCL_H) $(PAIR_H)
# Source files
SRCS := $(wildcard ./lal_*.cpp)
OBJS := $(subst ./,$(OBJ_DIR)/,$(SRCS:%.cpp=%.o))
CUS := $(wildcard lal_*.cu)
KERS := $(subst ./,$(OBJ_DIR)/,$(CUS:lal_%.cu=%_cl.h))
KERS := $(addprefix $(OBJ_DIR)/, $(KERS))
# targets
GPU_LIB = $(LIB_DIR)/libgpu.a
EXECS = $(BIN_DIR)/ocl_get_devices
OBJS = $(OBJ_DIR)/lal_atom.o $(OBJ_DIR)/lal_answer.o \
$(OBJ_DIR)/lal_neighbor_shared.o $(OBJ_DIR)/lal_neighbor.o \
$(OBJ_DIR)/lal_device.o $(OBJ_DIR)/lal_base_atomic.o \
$(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_base_ellipsoid.o \
$(OBJ_DIR)/lal_base_dipole.o $(OBJ_DIR)/lal_base_three.o \
$(OBJ_DIR)/lal_base_dpd.o \
$(OBJ_DIR)/lal_pppm.o $(OBJ_DIR)/lal_pppm_ext.o \
$(OBJ_DIR)/lal_gayberne.o $(OBJ_DIR)/lal_gayberne_ext.o \
$(OBJ_DIR)/lal_re_squared.o $(OBJ_DIR)/lal_re_squared_ext.o \
$(OBJ_DIR)/lal_lj.o $(OBJ_DIR)/lal_lj_ext.o \
$(OBJ_DIR)/lal_lj96.o $(OBJ_DIR)/lal_lj96_ext.o \
$(OBJ_DIR)/lal_lj_expand.o $(OBJ_DIR)/lal_lj_expand_ext.o \
$(OBJ_DIR)/lal_lj_coul.o $(OBJ_DIR)/lal_lj_coul_ext.o \
$(OBJ_DIR)/lal_lj_coul_long.o $(OBJ_DIR)/lal_lj_coul_long_ext.o \
$(OBJ_DIR)/lal_lj_dsf.o $(OBJ_DIR)/lal_lj_dsf_ext.o \
$(OBJ_DIR)/lal_lj_class2_long.o $(OBJ_DIR)/lal_lj_class2_long_ext.o \
$(OBJ_DIR)/lal_coul_long.o $(OBJ_DIR)/lal_coul_long_ext.o \
$(OBJ_DIR)/lal_morse.o $(OBJ_DIR)/lal_morse_ext.o \
$(OBJ_DIR)/lal_charmm_long.o $(OBJ_DIR)/lal_charmm_long_ext.o \
$(OBJ_DIR)/lal_lj_sdk.o $(OBJ_DIR)/lal_lj_sdk_ext.o \
$(OBJ_DIR)/lal_lj_sdk_long.o $(OBJ_DIR)/lal_lj_sdk_long_ext.o \
$(OBJ_DIR)/lal_eam.o $(OBJ_DIR)/lal_eam_ext.o \
$(OBJ_DIR)/lal_eam_fs_ext.o $(OBJ_DIR)/lal_eam_alloy_ext.o \
$(OBJ_DIR)/lal_buck.o $(OBJ_DIR)/lal_buck_ext.o \
$(OBJ_DIR)/lal_buck_coul.o $(OBJ_DIR)/lal_buck_coul_ext.o \
$(OBJ_DIR)/lal_buck_coul_long.o $(OBJ_DIR)/lal_buck_coul_long_ext.o \
$(OBJ_DIR)/lal_table.o $(OBJ_DIR)/lal_table_ext.o \
$(OBJ_DIR)/lal_yukawa.o $(OBJ_DIR)/lal_yukawa_ext.o \
$(OBJ_DIR)/lal_born.o $(OBJ_DIR)/lal_born_ext.o \
$(OBJ_DIR)/lal_born_coul_wolf.o $(OBJ_DIR)/lal_born_coul_wolf_ext.o \
$(OBJ_DIR)/lal_born_coul_long.o $(OBJ_DIR)/lal_born_coul_long_ext.o \
$(OBJ_DIR)/lal_dipole_lj.o $(OBJ_DIR)/lal_dipole_lj_ext.o \
$(OBJ_DIR)/lal_dipole_lj_sf.o $(OBJ_DIR)/lal_dipole_lj_sf_ext.o \
$(OBJ_DIR)/lal_colloid.o $(OBJ_DIR)/lal_colloid_ext.o \
$(OBJ_DIR)/lal_gauss.o $(OBJ_DIR)/lal_gauss_ext.o \
$(OBJ_DIR)/lal_yukawa_colloid.o $(OBJ_DIR)/lal_yukawa_colloid_ext.o \
$(OBJ_DIR)/lal_lj_coul_debye.o $(OBJ_DIR)/lal_lj_coul_debye_ext.o \
$(OBJ_DIR)/lal_coul_dsf.o $(OBJ_DIR)/lal_coul_dsf_ext.o \
$(OBJ_DIR)/lal_sw.o $(OBJ_DIR)/lal_sw_ext.o \
$(OBJ_DIR)/lal_vashishta.o $(OBJ_DIR)/lal_vashishta_ext.o \
$(OBJ_DIR)/lal_beck.o $(OBJ_DIR)/lal_beck_ext.o \
$(OBJ_DIR)/lal_mie.o $(OBJ_DIR)/lal_mie_ext.o \
$(OBJ_DIR)/lal_soft.o $(OBJ_DIR)/lal_soft_ext.o \
$(OBJ_DIR)/lal_lj_coul_msm.o $(OBJ_DIR)/lal_lj_coul_msm_ext.o \
$(OBJ_DIR)/lal_lj_gromacs.o $(OBJ_DIR)/lal_lj_gromacs_ext.o \
$(OBJ_DIR)/lal_dpd.o $(OBJ_DIR)/lal_dpd_ext.o $(OBJ_DIR)/lal_dpd_tstat_ext.o \
$(OBJ_DIR)/lal_tersoff.o $(OBJ_DIR)/lal_tersoff_ext.o \
$(OBJ_DIR)/lal_tersoff_zbl.o $(OBJ_DIR)/lal_tersoff_zbl_ext.o \
$(OBJ_DIR)/lal_tersoff_mod.o $(OBJ_DIR)/lal_tersoff_mod_ext.o \
$(OBJ_DIR)/lal_coul.o $(OBJ_DIR)/lal_coul_ext.o \
$(OBJ_DIR)/lal_coul_debye.o $(OBJ_DIR)/lal_coul_debye_ext.o \
$(OBJ_DIR)/lal_zbl.o $(OBJ_DIR)/lal_zbl_ext.o \
$(OBJ_DIR)/lal_lj_cubic.o $(OBJ_DIR)/lal_lj_cubic_ext.o \
$(OBJ_DIR)/lal_ufm.o $(OBJ_DIR)/lal_ufm_ext.o \
$(OBJ_DIR)/lal_dipole_long_lj.o $(OBJ_DIR)/lal_dipole_long_lj_ext.o \
$(OBJ_DIR)/lal_lj_expand_coul_long.o $(OBJ_DIR)/lal_lj_expand_coul_long_ext.o \
$(OBJ_DIR)/lal_coul_long_cs.o $(OBJ_DIR)/lal_coul_long_cs_ext.o \
$(OBJ_DIR)/lal_born_coul_long_cs.o $(OBJ_DIR)/lal_born_coul_long_cs_ext.o \
$(OBJ_DIR)/lal_born_coul_wolf_cs.o $(OBJ_DIR)/lal_born_coul_wolf_cs_ext.o \
$(OBJ_DIR)/lal_lj_tip4p_long.o $(OBJ_DIR)/lal_lj_tip4p_long_ext.o
KERS = $(OBJ_DIR)/device_cl.h $(OBJ_DIR)/atom_cl.h \
$(OBJ_DIR)/neighbor_cpu_cl.h $(OBJ_DIR)/pppm_cl.h \
$(OBJ_DIR)/ellipsoid_nbor_cl.h $(OBJ_DIR)/gayberne_cl.h \
$(OBJ_DIR)/gayberne_lj_cl.h $(OBJ_DIR)/re_squared_cl.h \
$(OBJ_DIR)/re_squared_lj_cl.h $(OBJ_DIR)/lj_cl.h $(OBJ_DIR)/lj96_cl.h \
$(OBJ_DIR)/lj_expand_cl.h $(OBJ_DIR)/lj_coul_cl.h \
$(OBJ_DIR)/lj_coul_long_cl.h $(OBJ_DIR)/lj_dsf_cl.h \
$(OBJ_DIR)/lj_class2_long_cl.h \
$(OBJ_DIR)/coul_long_cl.h $(OBJ_DIR)/morse_cl.h \
$(OBJ_DIR)/charmm_long_cl.h $(OBJ_DIR)/lj_sdk_cl.h \
$(OBJ_DIR)/lj_sdk_long_cl.h $(OBJ_DIR)/neighbor_gpu_cl.h \
$(OBJ_DIR)/eam_cl.h $(OBJ_DIR)/buck_cl.h \
$(OBJ_DIR)/buck_coul_cl.h $(OBJ_DIR)/buck_coul_long_cl.h \
$(OBJ_DIR)/table_cl.h $(OBJ_DIR)/yukawa_cl.h \
$(OBJ_DIR)/born_cl.h $(OBJ_DIR)/born_coul_wolf_cl.h \
$(OBJ_DIR)/born_coul_long_cl.h $(OBJ_DIR)/dipole_lj_cl.h \
$(OBJ_DIR)/dipole_lj_sf_cl.h $(OBJ_DIR)/colloid_cl.h \
$(OBJ_DIR)/gauss_cl.h $(OBJ_DIR)/yukawa_colloid_cl.h \
$(OBJ_DIR)/lj_coul_debye_cl.h $(OBJ_DIR)/coul_dsf_cl.h \
$(OBJ_DIR)/sw_cl.h $(OBJ_DIR)/beck_cl.h $(OBJ_DIR)/mie_cl.h \
$(OBJ_DIR)/soft_cl.h $(OBJ_DIR)/lj_coul_msm_cl.h \
$(OBJ_DIR)/lj_gromacs_cl.h $(OBJ_DIR)/dpd_cl.h \
$(OBJ_DIR)/lj_gauss_cl.h $(OBJ_DIR)/dzugutov_cl.h \
$(OBJ_DIR)/tersoff_cl.h $(OBJ_DIR)/tersoff_zbl_cl.h \
$(OBJ_DIR)/tersoff_mod_cl.h $(OBJ_DIR)/coul_cl.h \
$(OBJ_DIR)/coul_debye_cl.h $(OBJ_DIR)/zbl_cl.h \
$(OBJ_DIR)/lj_cubic_cl.h $(OBJ_DIR)/vashishta_cl.h \
$(OBJ_DIR)/ufm_cl.h $(OBJ_DIR)/dipole_long_lj_cl.h \
$(OBJ_DIR)/lj_expand_coul_long_cl.h $(OBJ_DIR)/coul_long_cs_cl.h \
$(OBJ_DIR)/born_coul_long_cs_cl.h $(OBJ_DIR)/born_coul_wolf_cs_cl.h \
$(OBJ_DIR)/lj_tip4p_long_cl.h
OCL_EXECS = $(BIN_DIR)/ocl_get_devices
all: $(OBJ_DIR) $(OCL_LIB) $(EXECS)
all: $(OBJ_DIR) $(KERS) $(GPU_LIB) $(EXECS)
$(OBJ_DIR):
mkdir -p $@
$(OBJ_DIR)/atom_cl.h: lal_atom.cu lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh atom lal_preprocessor.h lal_atom.cu $(OBJ_DIR)/atom_cl.h
# Compiler and linker
$(OBJ_DIR)/lal_atom.o: lal_atom.cpp lal_atom.h $(OCL_H) $(OBJ_DIR)/atom_cl.h
$(OCL) -o $@ -c lal_atom.cpp -I$(OBJ_DIR)
OCL = $(OCL_CPP) $(OCL_PREC) $(OCL_TUNE) -DUSE_OPENCL
$(OBJ_DIR)/lal_answer.o: lal_answer.cpp lal_answer.h $(OCL_H)
$(OCL) -o $@ -c lal_answer.cpp -I$(OBJ_DIR)
# device code compilation
$(OBJ_DIR)/neighbor_cpu_cl.h: lal_neighbor_cpu.cu lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh neighbor_cpu lal_preprocessor.h lal_neighbor_cpu.cu $(OBJ_DIR)/neighbor_cpu_cl.h
$(OBJ_DIR)/%_cl.h: lal_%.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh $* $(PRE1_H) $< $@;
$(OBJ_DIR)/neighbor_gpu_cl.h: lal_neighbor_gpu.cu lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh neighbor_gpu lal_preprocessor.h lal_neighbor_gpu.cu $(OBJ_DIR)/neighbor_gpu_cl.h
# host code compilation
$(OBJ_DIR)/lal_neighbor_shared.o: lal_neighbor_shared.cpp lal_neighbor_shared.h $(OCL_H) $(OBJ_DIR)/neighbor_cpu_cl.h $(OBJ_DIR)/neighbor_gpu_cl.h
$(OCL) -o $@ -c lal_neighbor_shared.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_neighbor.o: lal_neighbor.cpp lal_neighbor.h $(OCL_H) lal_neighbor_shared.h
$(OCL) -o $@ -c lal_neighbor.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/device_cl.h: lal_device.cu lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh device lal_preprocessor.h lal_device.cu $(OBJ_DIR)/device_cl.h
$(OBJ_DIR)/lal_device.o: lal_device.cpp lal_device.h $(ALL_H) $(OBJ_DIR)/device_cl.h
$(OCL) -o $@ -c lal_device.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_base_atomic.o: $(OCL_H) lal_base_atomic.h lal_base_atomic.cpp
$(OCL) -o $@ -c lal_base_atomic.cpp
$(OBJ_DIR)/lal_base_charge.o: $(OCL_H) lal_base_charge.h lal_base_charge.cpp
$(OCL) -o $@ -c lal_base_charge.cpp
$(OBJ_DIR)/lal_base_ellipsoid.o: $(OCL_H) lal_base_ellipsoid.h lal_base_ellipsoid.cpp $(OBJ_DIR)/ellipsoid_nbor_cl.h
$(OCL) -o $@ -c lal_base_ellipsoid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_base_dipole.o: $(OCL_H) lal_base_dipole.h lal_base_dipole.cpp
$(OCL) -o $@ -c lal_base_dipole.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_base_three.o: $(OCL_H) lal_base_three.h lal_base_three.cpp
$(OCL) -o $@ -c lal_base_three.cpp
$(OBJ_DIR)/lal_base_dpd.o: $(OCL_H) lal_base_dpd.h lal_base_dpd.cpp
$(OCL) -o $@ -c lal_base_dpd.cpp
$(OBJ_DIR)/pppm_cl.h: lal_pppm.cu lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh pppm lal_preprocessor.h lal_pppm.cu $(OBJ_DIR)/pppm_cl.h;
$(OBJ_DIR)/lal_pppm.o: $(ALL_H) lal_pppm.h lal_pppm.cpp $(OBJ_DIR)/pppm_cl.h $(OBJ_DIR)/pppm_cl.h
$(OCL) -o $@ -c lal_pppm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_pppm_ext.o: $(ALL_H) lal_pppm.h lal_pppm_ext.cpp
$(OCL) -o $@ -c lal_pppm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/ellipsoid_nbor_cl.h: lal_ellipsoid_nbor.cu lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh ellipsoid_nbor lal_preprocessor.h lal_ellipsoid_nbor.cu $(OBJ_DIR)/ellipsoid_nbor_cl.h
$(OBJ_DIR)/gayberne_cl.h: lal_gayberne.cu lal_ellipsoid_extra.h lal_aux_fun1.h lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh gayberne lal_preprocessor.h lal_aux_fun1.h lal_ellipsoid_extra.h lal_gayberne.cu $(OBJ_DIR)/gayberne_cl.h;
$(OBJ_DIR)/gayberne_lj_cl.h: lal_gayberne_lj.cu lal_ellipsoid_extra.h lal_aux_fun1.h lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh gayberne_lj lal_preprocessor.h lal_aux_fun1.h lal_ellipsoid_extra.h lal_gayberne_lj.cu $(OBJ_DIR)/gayberne_lj_cl.h;
$(OBJ_DIR)/lal_gayberne.o: $(ALL_H) lal_gayberne.h lal_gayberne.cpp $(OBJ_DIR)/gayberne_cl.h $(OBJ_DIR)/gayberne_lj_cl.h $(OBJ_DIR)/lal_base_ellipsoid.o
$(OCL) -o $@ -c lal_gayberne.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_gayberne_ext.o: $(ALL_H) $(OBJ_DIR)/lal_gayberne.o lal_gayberne_ext.cpp
$(OCL) -o $@ -c lal_gayberne_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/re_squared_cl.h: lal_re_squared.cu lal_ellipsoid_extra.h lal_aux_fun1.h lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh re_squared lal_preprocessor.h lal_aux_fun1.h lal_ellipsoid_extra.h lal_re_squared.cu $(OBJ_DIR)/re_squared_cl.h;
$(OBJ_DIR)/re_squared_lj_cl.h: lal_re_squared_lj.cu lal_ellipsoid_extra.h lal_aux_fun1.h lal_preprocessor.h
$(BSH) ./geryon/file_to_cstr.sh re_squared_lj lal_preprocessor.h lal_aux_fun1.h lal_ellipsoid_extra.h lal_re_squared_lj.cu $(OBJ_DIR)/re_squared_lj_cl.h;
$(OBJ_DIR)/lal_re_squared.o: $(ALL_H) lal_re_squared.h lal_re_squared.cpp $(OBJ_DIR)/re_squared_cl.h $(OBJ_DIR)/re_squared_lj_cl.h $(OBJ_DIR)/lal_base_ellipsoid.o
$(OCL) -o $@ -c lal_re_squared.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_re_squared_ext.o: $(ALL_H) $(OBJ_DIR)/lal_re_squared.o lal_re_squared_ext.cpp
$(OCL) -o $@ -c lal_re_squared_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_cl.h: lal_lj.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj $(PRE1_H) lal_lj.cu $(OBJ_DIR)/lj_cl.h;
$(OBJ_DIR)/lal_lj.o: $(ALL_H) lal_lj.h lal_lj.cpp $(OBJ_DIR)/lj_cl.h $(OBJ_DIR)/lj_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_ext.o: $(ALL_H) lal_lj.h lal_lj_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_tip4p_long_cl.h: lal_lj_tip4p_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_tip4p_long $(PRE1_H) lal_lj_tip4p_long.cu $(OBJ_DIR)/lj_tip4p_long_cl.h;
$(OBJ_DIR)/lal_lj_tip4p_long.o: $(ALL_H) lal_lj_tip4p_long.h lal_lj_tip4p_long.cpp $(OBJ_DIR)/lj_tip4p_long_cl.h $(OBJ_DIR)/lj_tip4p_long_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_lj_tip4p_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_tip4p_long_ext.o: $(ALL_H) lal_lj_tip4p_long.h lal_lj_tip4p_long_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_lj_tip4p_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_cl.h: lal_lj_coul.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_coul $(PRE1_H) lal_lj_coul.cu $(OBJ_DIR)/lj_coul_cl.h;
$(OBJ_DIR)/lal_lj_coul.o: $(ALL_H) lal_lj_coul.h lal_lj_coul.cpp $(OBJ_DIR)/lj_coul_cl.h $(OBJ_DIR)/lj_coul_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_lj_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_ext.o: $(ALL_H) lal_lj_coul.h lal_lj_coul_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_lj_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_long_cl.h: lal_lj_coul_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_coul_long $(PRE1_H) lal_lj_coul_long.cu $(OBJ_DIR)/lj_coul_long_cl.h;
$(OBJ_DIR)/lal_lj_coul_long.o: $(ALL_H) lal_lj_coul_long.h lal_lj_coul_long.cpp $(OBJ_DIR)/lj_coul_long_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_lj_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_long_ext.o: $(ALL_H) lal_lj_coul_long.h lal_lj_coul_long_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_lj_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_dsf_cl.h: lal_lj_dsf.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_dsf $(PRE1_H) lal_lj_dsf.cu $(OBJ_DIR)/lj_dsf_cl.h;
$(OBJ_DIR)/lal_lj_dsf.o: $(ALL_H) lal_lj_dsf.h lal_lj_dsf.cpp $(OBJ_DIR)/lj_dsf_cl.h $(OBJ_DIR)/lj_dsf_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_lj_dsf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_dsf_ext.o: $(ALL_H) lal_lj_dsf.h lal_lj_dsf_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_lj_dsf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_class2_long_cl.h: lal_lj_class2_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_class2_long $(PRE1_H) lal_lj_class2_long.cu $(OBJ_DIR)/lj_class2_long_cl.h;
$(OBJ_DIR)/lal_lj_class2_long.o: $(ALL_H) lal_lj_class2_long.h lal_lj_class2_long.cpp $(OBJ_DIR)/lj_class2_long_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_lj_class2_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_class2_long_ext.o: $(ALL_H) lal_lj_class2_long.h lal_lj_class2_long_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_lj_class2_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_long_cl.h: lal_coul_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh coul_long $(PRE1_H) lal_coul_long.cu $(OBJ_DIR)/coul_long_cl.h;
$(OBJ_DIR)/lal_coul_long.o: $(ALL_H) lal_coul_long.h lal_coul_long.cpp $(OBJ_DIR)/coul_long_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_long_ext.o: $(ALL_H) lal_coul_long.h lal_coul_long_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/morse_cl.h: lal_morse.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh morse $(PRE1_H) lal_morse.cu $(OBJ_DIR)/morse_cl.h;
$(OBJ_DIR)/lal_morse.o: $(ALL_H) lal_morse.h lal_morse.cpp $(OBJ_DIR)/morse_cl.h $(OBJ_DIR)/morse_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_morse.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_morse_ext.o: $(ALL_H) lal_morse.h lal_morse_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_morse_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/charmm_long_cl.h: lal_charmm_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh charmm_long $(PRE1_H) lal_charmm_long.cu $(OBJ_DIR)/charmm_long_cl.h;
$(OBJ_DIR)/lal_charmm_long.o: $(ALL_H) lal_charmm_long.h lal_charmm_long.cpp $(OBJ_DIR)/charmm_long_cl.h $(OBJ_DIR)/charmm_long_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_charmm_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_charmm_long_ext.o: $(ALL_H) lal_charmm_long.h lal_charmm_long_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_charmm_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj96_cl.h: lal_lj96.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj96 $(PRE1_H) lal_lj96.cu $(OBJ_DIR)/lj96_cl.h;
$(OBJ_DIR)/lal_lj96.o: $(ALL_H) lal_lj96.h lal_lj96.cpp $(OBJ_DIR)/lj96_cl.h $(OBJ_DIR)/lj96_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_lj96.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj96_ext.o: $(ALL_H) lal_lj96.h lal_lj96_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_lj96_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_expand_cl.h: lal_lj_expand.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_expand $(PRE1_H) lal_lj_expand.cu $(OBJ_DIR)/lj_expand_cl.h;
$(OBJ_DIR)/lal_lj_expand.o: $(ALL_H) lal_lj_expand.h lal_lj_expand.cpp $(OBJ_DIR)/lj_expand_cl.h $(OBJ_DIR)/lj_expand_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_lj_expand.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_expand_ext.o: $(ALL_H) lal_lj_expand.h lal_lj_expand_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_lj_expand_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_sdk_cl.h: lal_lj_sdk.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_sdk $(PRE1_H) lal_lj_sdk.cu $(OBJ_DIR)/lj_sdk_cl.h;
$(OBJ_DIR)/lal_lj_sdk.o: $(ALL_H) lal_lj_sdk.h lal_lj_sdk.cpp $(OBJ_DIR)/lj_sdk_cl.h $(OBJ_DIR)/lj_sdk_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_lj_sdk.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_sdk_ext.o: $(ALL_H) lal_lj_sdk.h lal_lj_sdk_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_lj_sdk_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_sdk_long_cl.h: lal_lj_sdk_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_sdk_long $(PRE1_H) lal_lj_sdk_long.cu $(OBJ_DIR)/lj_sdk_long_cl.h;
$(OBJ_DIR)/lal_lj_sdk_long.o: $(ALL_H) lal_lj_sdk_long.h lal_lj_sdk_long.cpp $(OBJ_DIR)/lj_sdk_long_cl.h $(OBJ_DIR)/lj_sdk_long_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_lj_sdk_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_sdk_long_ext.o: $(ALL_H) lal_lj_sdk_long.h lal_lj_sdk_long_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_lj_sdk_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/eam_cl.h: lal_eam.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh eam $(PRE1_H) lal_eam.cu $(OBJ_DIR)/eam_cl.h;
$(OBJ_DIR)/lal_eam.o: $(ALL_H) lal_eam.h lal_eam.cpp $(OBJ_DIR)/eam_cl.h $(OBJ_DIR)/eam_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_eam.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_ext.o: $(ALL_H) lal_eam.h lal_eam_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_eam_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_fs_ext.o: $(ALL_H) lal_eam.h lal_eam_fs_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_eam_fs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_eam_alloy_ext.o: $(ALL_H) lal_eam.h lal_eam_alloy_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_eam_alloy_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck_cl.h: lal_buck.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh buck $(PRE1_H) lal_buck.cu $(OBJ_DIR)/buck_cl.h;
$(OBJ_DIR)/lal_buck.o: $(ALL_H) lal_buck.h lal_buck.cpp $(OBJ_DIR)/buck_cl.h $(OBJ_DIR)/buck_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_buck.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_ext.o: $(ALL_H) lal_buck.h lal_buck_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_buck_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck_coul_cl.h: lal_buck_coul.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh buck_coul $(PRE1_H) lal_buck_coul.cu $(OBJ_DIR)/buck_coul_cl.h;
$(OBJ_DIR)/lal_buck_coul.o: $(ALL_H) lal_buck_coul.h lal_buck_coul.cpp $(OBJ_DIR)/buck_coul_cl.h $(OBJ_DIR)/buck_coul_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_buck_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_coul_ext.o: $(ALL_H) lal_buck_coul.h lal_buck_coul_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_buck_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/buck_coul_long_cl.h: lal_buck_coul_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh buck_coul_long $(PRE1_H) lal_buck_coul_long.cu $(OBJ_DIR)/buck_coul_long_cl.h;
$(OBJ_DIR)/lal_buck_coul_long.o: $(ALL_H) lal_buck_coul_long.h lal_buck_coul_long.cpp $(OBJ_DIR)/buck_coul_long_cl.h $(OBJ_DIR)/buck_coul_long_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_buck_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_buck_coul_long_ext.o: $(ALL_H) lal_buck_coul_long.h lal_buck_coul_long_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_buck_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/table_cl.h: lal_table.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh table $(PRE1_H) lal_table.cu $(OBJ_DIR)/table_cl.h;
$(OBJ_DIR)/lal_table.o: $(ALL_H) lal_table.h lal_table.cpp $(OBJ_DIR)/table_cl.h $(OBJ_DIR)/table_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_table.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_table_ext.o: $(ALL_H) lal_table.h lal_table_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_table_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/yukawa_cl.h: lal_yukawa.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh yukawa $(PRE1_H) lal_yukawa.cu $(OBJ_DIR)/yukawa_cl.h;
$(OBJ_DIR)/lal_yukawa.o: $(ALL_H) lal_yukawa.h lal_yukawa.cpp $(OBJ_DIR)/yukawa_cl.h $(OBJ_DIR)/yukawa_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_yukawa.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_yukawa_ext.o: $(ALL_H) lal_yukawa.h lal_yukawa_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_yukawa_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_cl.h: lal_born.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh born $(PRE1_H) lal_born.cu $(OBJ_DIR)/born_cl.h;
$(OBJ_DIR)/lal_born.o: $(ALL_H) lal_born.h lal_born.cpp $(OBJ_DIR)/born_cl.h $(OBJ_DIR)/born_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_born.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_ext.o: $(ALL_H) lal_born.h lal_born_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_born_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_wolf_cl.h: lal_born_coul_wolf.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh born_coul_wolf $(PRE1_H) lal_born_coul_wolf.cu $(OBJ_DIR)/born_coul_wolf_cl.h;
$(OBJ_DIR)/lal_born_coul_wolf.o: $(ALL_H) lal_born_coul_wolf.h lal_born_coul_wolf.cpp $(OBJ_DIR)/born_coul_wolf_cl.h $(OBJ_DIR)/born_coul_wolf_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_born_coul_wolf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_wolf_ext.o: $(ALL_H) lal_born_coul_wolf.h lal_born_coul_wolf_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_born_coul_wolf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_long_cl.h: lal_born_coul_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh born_coul_long $(PRE1_H) lal_born_coul_long.cu $(OBJ_DIR)/born_coul_long_cl.h;
$(OBJ_DIR)/lal_born_coul_long.o: $(ALL_H) lal_born_coul_long.h lal_born_coul_long.cpp $(OBJ_DIR)/born_coul_long_cl.h $(OBJ_DIR)/born_coul_long_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_born_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_long_ext.o: $(ALL_H) lal_born_coul_long.h lal_born_coul_long_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_born_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_lj_cl.h: lal_dipole_lj.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh dipole_lj $(PRE1_H) lal_dipole_lj.cu $(OBJ_DIR)/dipole_lj_cl.h;
$(OBJ_DIR)/lal_dipole_lj.o: $(ALL_H) lal_dipole_lj.h lal_dipole_lj.cpp $(OBJ_DIR)/dipole_lj_cl.h $(OBJ_DIR)/dipole_lj_cl.h $(OBJ_DIR)/lal_base_dipole.o
$(OCL) -o $@ -c lal_dipole_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_lj_ext.o: $(ALL_H) lal_dipole_lj.h lal_dipole_lj_ext.cpp lal_base_dipole.h
$(OCL) -o $@ -c lal_dipole_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_lj_sf_cl.h: lal_dipole_lj_sf.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh dipole_lj_sf $(PRE1_H) lal_dipole_lj_sf.cu $(OBJ_DIR)/dipole_lj_sf_cl.h;
$(OBJ_DIR)/lal_dipole_lj_sf.o: $(ALL_H) lal_dipole_lj_sf.h lal_dipole_lj_sf.cpp $(OBJ_DIR)/dipole_lj_sf_cl.h $(OBJ_DIR)/dipole_lj_sf_cl.h $(OBJ_DIR)/lal_base_dipole.o
$(OCL) -o $@ -c lal_dipole_lj_sf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_lj_sf_ext.o: $(ALL_H) lal_dipole_lj_sf.h lal_dipole_lj_sf_ext.cpp lal_base_dipole.h
$(OCL) -o $@ -c lal_dipole_lj_sf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/colloid_cl.h: lal_colloid.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh colloid $(PRE1_H) lal_colloid.cu $(OBJ_DIR)/colloid_cl.h;
$(OBJ_DIR)/lal_colloid.o: $(ALL_H) lal_colloid.h lal_colloid.cpp $(OBJ_DIR)/colloid_cl.h $(OBJ_DIR)/colloid_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_colloid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_colloid_ext.o: $(ALL_H) lal_colloid.h lal_colloid_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_colloid_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/gauss_cl.h: lal_gauss.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh gauss $(PRE1_H) lal_gauss.cu $(OBJ_DIR)/gauss_cl.h;
$(OBJ_DIR)/lal_gauss.o: $(ALL_H) lal_gauss.h lal_gauss.cpp $(OBJ_DIR)/gauss_cl.h $(OBJ_DIR)/gauss_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_gauss.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_gauss_ext.o: $(ALL_H) lal_gauss.h lal_gauss_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_gauss_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/yukawa_colloid_cl.h: lal_yukawa_colloid.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh yukawa_colloid $(PRE1_H) lal_yukawa_colloid.cu $(OBJ_DIR)/yukawa_colloid_cl.h;
$(OBJ_DIR)/lal_yukawa_colloid.o: $(ALL_H) lal_yukawa_colloid.h lal_yukawa_colloid.cpp $(OBJ_DIR)/yukawa_colloid_cl.h $(OBJ_DIR)/yukawa_colloid_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_yukawa_colloid.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_yukawa_colloid_ext.o: $(ALL_H) lal_yukawa_colloid.h lal_yukawa_colloid_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_yukawa_colloid_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_debye_cl.h: lal_lj_coul_debye.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_coul_debye $(PRE1_H) lal_lj_coul_debye.cu $(OBJ_DIR)/lj_coul_debye_cl.h;
$(OBJ_DIR)/lal_lj_coul_debye.o: $(ALL_H) lal_lj_coul_debye.h lal_lj_coul_debye.cpp $(OBJ_DIR)/lj_coul_debye_cl.h $(OBJ_DIR)/lj_coul_debye_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_lj_coul_debye.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_debye_ext.o: $(ALL_H) lal_lj_coul_debye.h lal_lj_coul_debye_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_lj_coul_debye_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_dsf_cl.h: lal_coul_dsf.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh coul_dsf $(PRE1_H) lal_coul_dsf.cu $(OBJ_DIR)/coul_dsf_cl.h;
$(OBJ_DIR)/lal_coul_dsf.o: $(ALL_H) lal_coul_dsf.h lal_coul_dsf.cpp $(OBJ_DIR)/coul_dsf_cl.h $(OBJ_DIR)/coul_dsf_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_coul_dsf.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_dsf_ext.o: $(ALL_H) lal_coul_dsf.h lal_coul_dsf_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_coul_dsf_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/sw_cl.h: lal_sw.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh sw $(PRE1_H) lal_sw.cu $(OBJ_DIR)/sw_cl.h;
$(OBJ_DIR)/lal_sw.o: $(ALL_H) lal_sw.h lal_sw.cpp $(OBJ_DIR)/sw_cl.h $(OBJ_DIR)/sw_cl.h $(OBJ_DIR)/lal_base_three.o
$(OCL) -o $@ -c lal_sw.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_sw_ext.o: $(ALL_H) lal_sw.h lal_sw_ext.cpp lal_base_three.h
$(OCL) -o $@ -c lal_sw_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/vashishta_cl.h: lal_vashishta.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh vashishta $(PRE1_H) lal_vashishta.cu $(OBJ_DIR)/vashishta_cl.h;
$(OBJ_DIR)/lal_vashishta.o: $(ALL_H) lal_vashishta.h lal_vashishta.cpp $(OBJ_DIR)/vashishta_cl.h $(OBJ_DIR)/vashishta_cl.h $(OBJ_DIR)/lal_base_three.o
$(OCL) -o $@ -c lal_vashishta.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_vashishta_ext.o: $(ALL_H) lal_vashishta.h lal_vashishta_ext.cpp lal_base_three.h
$(OCL) -o $@ -c lal_vashishta_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/beck_cl.h: lal_beck.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh beck $(PRE1_H) lal_beck.cu $(OBJ_DIR)/beck_cl.h;
$(OBJ_DIR)/lal_beck.o: $(ALL_H) lal_beck.h lal_beck.cpp $(OBJ_DIR)/beck_cl.h $(OBJ_DIR)/beck_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_beck.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_beck_ext.o: $(ALL_H) lal_beck.h lal_beck_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_beck_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/mie_cl.h: lal_mie.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh mie $(PRE1_H) lal_mie.cu $(OBJ_DIR)/mie_cl.h;
$(OBJ_DIR)/lal_mie.o: $(ALL_H) lal_mie.h lal_mie.cpp $(OBJ_DIR)/mie_cl.h $(OBJ_DIR)/mie_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_mie.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_mie_ext.o: $(ALL_H) lal_mie.h lal_mie_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_mie_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/soft_cl.h: lal_soft.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh soft $(PRE1_H) lal_soft.cu $(OBJ_DIR)/soft_cl.h;
$(OBJ_DIR)/lal_soft.o: $(ALL_H) lal_soft.h lal_soft.cpp $(OBJ_DIR)/soft_cl.h $(OBJ_DIR)/soft_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_soft.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_soft_ext.o: $(ALL_H) lal_soft.h lal_soft_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_soft_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_coul_msm_cl.h: lal_lj_coul_msm.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_coul_msm $(PRE1_H) lal_lj_coul_msm.cu $(OBJ_DIR)/lj_coul_msm_cl.h;
$(OBJ_DIR)/lal_lj_coul_msm.o: $(ALL_H) lal_lj_coul_msm.h lal_lj_coul_msm.cpp $(OBJ_DIR)/lj_coul_msm_cl.h $(OBJ_DIR)/lj_coul_msm_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_lj_coul_msm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_coul_msm_ext.o: $(ALL_H) lal_lj_coul_msm.h lal_lj_coul_msm_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_lj_coul_msm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_gromacs_cl.h: lal_lj_gromacs.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_gromacs $(PRE1_H) lal_lj_gromacs.cu $(OBJ_DIR)/lj_gromacs_cl.h;
$(OBJ_DIR)/lal_lj_gromacs.o: $(ALL_H) lal_lj_gromacs.h lal_lj_gromacs.cpp $(OBJ_DIR)/lj_gromacs_cl.h $(OBJ_DIR)/lj_gromacs_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_lj_gromacs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_gromacs_ext.o: $(ALL_H) lal_lj_gromacs.h lal_lj_gromacs_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_lj_gromacs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dpd_cl.h: lal_dpd.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh dpd $(PRE1_H) lal_dpd.cu $(OBJ_DIR)/dpd_cl.h;
$(OBJ_DIR)/lal_dpd.o: $(ALL_H) lal_dpd.h lal_dpd.cpp $(OBJ_DIR)/dpd_cl.h $(OBJ_DIR)/dpd_cl.h $(OBJ_DIR)/lal_base_dpd.o
$(OCL) -o $@ -c lal_dpd.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dpd_ext.o: $(ALL_H) lal_dpd.h lal_dpd_ext.cpp lal_base_dpd.h
$(OCL) -o $@ -c lal_dpd_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dpd_tstat_ext.o: $(ALL_H) lal_dpd.h lal_dpd_tstat_ext.cpp lal_base_dpd.h
$(OCL) -o $@ -c lal_dpd_tstat_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff_cl.h: lal_tersoff.cu lal_tersoff_extra.h $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh tersoff $(PRE1_H) lal_tersoff_extra.h lal_tersoff.cu $(OBJ_DIR)/tersoff_cl.h;
$(OBJ_DIR)/lal_tersoff.o: $(ALL_H) lal_tersoff.h lal_tersoff.cpp $(OBJ_DIR)/tersoff_cl.h $(OBJ_DIR)/tersoff_cl.h $(OBJ_DIR)/lal_base_three.o
$(OCL) -o $@ -c lal_tersoff.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_ext.o: $(ALL_H) lal_tersoff.h lal_tersoff_ext.cpp lal_base_three.h
$(OCL) -o $@ -c lal_tersoff_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff_zbl_cl.h: lal_tersoff_zbl.cu lal_tersoff_zbl_extra.h $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh tersoff_zbl $(PRE1_H) lal_tersoff_zbl_extra.h lal_tersoff_zbl.cu $(OBJ_DIR)/tersoff_zbl_cl.h;
$(OBJ_DIR)/lal_tersoff_zbl.o: $(ALL_H) lal_tersoff_zbl.h lal_tersoff_zbl.cpp $(OBJ_DIR)/tersoff_zbl_cl.h $(OBJ_DIR)/tersoff_zbl_cl.h $(OBJ_DIR)/lal_base_three.o
$(OCL) -o $@ -c lal_tersoff_zbl.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_zbl_ext.o: $(ALL_H) lal_tersoff_zbl.h lal_tersoff_zbl_ext.cpp lal_base_three.h
$(OCL) -o $@ -c lal_tersoff_zbl_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/tersoff_mod_cl.h: lal_tersoff_mod.cu lal_tersoff_mod_extra.h $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh tersoff_mod $(PRE1_H) lal_tersoff_mod_extra.h lal_tersoff_mod.cu $(OBJ_DIR)/tersoff_mod_cl.h;
$(OBJ_DIR)/lal_tersoff_mod.o: $(ALL_H) lal_tersoff_mod.h lal_tersoff_mod.cpp $(OBJ_DIR)/tersoff_mod_cl.h $(OBJ_DIR)/tersoff_mod_cl.h $(OBJ_DIR)/lal_base_three.o
$(OCL) -o $@ -c lal_tersoff_mod.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_tersoff_mod_ext.o: $(ALL_H) lal_tersoff_mod.h lal_tersoff_mod_ext.cpp lal_base_three.h
$(OCL) -o $@ -c lal_tersoff_mod_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_cl.h: lal_coul.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh coul $(PRE1_H) lal_coul.cu $(OBJ_DIR)/coul_cl.h;
$(OBJ_DIR)/lal_coul.o: $(ALL_H) lal_coul.h lal_coul.cpp $(OBJ_DIR)/coul_cl.h $(OBJ_DIR)/coul_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_coul.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_ext.o: $(ALL_H) lal_coul.h lal_coul_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_coul_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_debye_cl.h: lal_coul_debye.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh coul_debye $(PRE1_H) lal_coul_debye.cu $(OBJ_DIR)/coul_debye_cl.h;
$(OBJ_DIR)/lal_coul_debye.o: $(ALL_H) lal_coul_debye.h lal_coul_debye.cpp $(OBJ_DIR)/coul_debye_cl.h $(OBJ_DIR)/coul_debye_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_coul_debye.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_debye_ext.o: $(ALL_H) lal_coul_debye.h lal_coul_debye_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_coul_debye_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/zbl_cl.h: lal_zbl.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh zbl $(PRE1_H) lal_zbl.cu $(OBJ_DIR)/zbl_cl.h;
$(OBJ_DIR)/lal_zbl.o: $(ALL_H) lal_zbl.h lal_zbl.cpp $(OBJ_DIR)/zbl_cl.h $(OBJ_DIR)/zbl_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_zbl.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_zbl_ext.o: $(ALL_H) lal_zbl.h lal_zbl_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_zbl_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_cubic_cl.h: lal_lj_cubic.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_cubic $(PRE1_H) lal_lj_cubic.cu $(OBJ_DIR)/lj_cubic_cl.h;
$(OBJ_DIR)/lal_lj_cubic.o: $(ALL_H) lal_lj_cubic.h lal_lj_cubic.cpp $(OBJ_DIR)/lj_cubic_cl.h $(OBJ_DIR)/lj_cubic_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_lj_cubic.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_cubic_ext.o: $(ALL_H) lal_lj_cubic.h lal_lj_cubic_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_lj_cubic_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/ufm_cl.h: lal_ufm.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh ufm $(PRE1_H) lal_ufm.cu $(OBJ_DIR)/ufm_cl.h;
$(OBJ_DIR)/lal_ufm.o: $(ALL_H) lal_ufm.h lal_ufm.cpp $(OBJ_DIR)/ufm_cl.h $(OBJ_DIR)/ufm_cl.h $(OBJ_DIR)/lal_base_atomic.o
$(OCL) -o $@ -c lal_ufm.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_ufm_ext.o: $(ALL_H) lal_ufm.h lal_ufm_ext.cpp lal_base_atomic.h
$(OCL) -o $@ -c lal_ufm_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/dipole_long_lj_cl.h: lal_dipole_long_lj.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh dipole_long_lj $(PRE1_H) lal_dipole_long_lj.cu $(OBJ_DIR)/dipole_long_lj_cl.h;
$(OBJ_DIR)/lal_dipole_long_lj.o: $(ALL_H) lal_dipole_long_lj.h lal_dipole_long_lj.cpp $(OBJ_DIR)/dipole_long_lj_cl.h $(OBJ_DIR)/lj_expand_coul_long_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_dipole_long_lj.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_dipole_long_lj_ext.o: $(ALL_H) lal_dipole_long_lj.h lal_dipole_long_lj_ext.cpp lal_base_dipole.h
$(OCL) -o $@ -c lal_dipole_long_lj_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lj_expand_coul_long_cl.h: lal_lj_expand_coul_long.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh lj_expand_coul_long $(PRE1_H) lal_lj_expand_coul_long.cu $(OBJ_DIR)/lj_expand_coul_long_cl.h;
$(OBJ_DIR)/lal_lj_expand_coul_long.o: $(ALL_H) lal_lj_expand_coul_long.h lal_lj_expand_coul_long.cpp $(OBJ_DIR)/lj_expand_coul_long_cl.h $(OBJ_DIR)/lj_expand_coul_long_cl.h $(OBJ_DIR)/lal_base_charge.o
$(OCL) -o $@ -c lal_lj_expand_coul_long.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_lj_expand_coul_long_ext.o: $(ALL_H) lal_lj_expand_coul_long.h lal_lj_expand_coul_long_ext.cpp lal_base_charge.h
$(OCL) -o $@ -c lal_lj_expand_coul_long_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/coul_long_cs_cl.h: lal_coul_long_cs.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh coul_long_cs $(PRE1_H) lal_coul_long_cs.cu $(OBJ_DIR)/coul_long_cs_cl.h;
$(OBJ_DIR)/lal_coul_long_cs.o: $(ALL_H) lal_coul_long_cs.h lal_coul_long_cs.cpp $(OBJ_DIR)/coul_long_cs_cl.h $(OBJ_DIR)/coul_long_cs_cl.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_coul_long.o
$(OCL) -o $@ -c lal_coul_long_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_coul_long_cs_ext.o: $(ALL_H) lal_coul_long_cs.h lal_coul_long_cs_ext.cpp lal_coul_long.h
$(OCL) -o $@ -c lal_coul_long_cs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_long_cs_cl.h: lal_born_coul_long_cs.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh born_coul_long_cs $(PRE1_H) lal_born_coul_long_cs.cu $(OBJ_DIR)/born_coul_long_cs_cl.h;
$(OBJ_DIR)/lal_born_coul_long_cs.o: $(ALL_H) lal_born_coul_long_cs.h lal_born_coul_long_cs.cpp $(OBJ_DIR)/born_coul_long_cs_cl.h $(OBJ_DIR)/born_coul_long_cs_cl.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_born_coul_long.o
$(OCL) -o $@ -c lal_born_coul_long_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_long_cs_ext.o: $(ALL_H) lal_born_coul_long_cs.h lal_born_coul_long_cs_ext.cpp lal_born_coul_long.h
$(OCL) -o $@ -c lal_born_coul_long_cs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/born_coul_wolf_cs_cl.h: lal_born_coul_wolf_cs.cu $(PRE1_H)
$(BSH) ./geryon/file_to_cstr.sh born_coul_wolf_cs $(PRE1_H) lal_born_coul_wolf_cs.cu $(OBJ_DIR)/born_coul_wolf_cs_cl.h;
$(OBJ_DIR)/lal_born_coul_wolf_cs.o: $(ALL_H) lal_born_coul_wolf_cs.h lal_born_coul_wolf_cs.cpp $(OBJ_DIR)/born_coul_wolf_cs_cl.h $(OBJ_DIR)/born_coul_wolf_cs_cl.h $(OBJ_DIR)/lal_base_charge.o $(OBJ_DIR)/lal_born_coul_wolf.o
$(OCL) -o $@ -c lal_born_coul_wolf_cs.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_born_coul_wolf_cs_ext.o: $(ALL_H) lal_born_coul_wolf_cs.h lal_born_coul_wolf_cs_ext.cpp lal_born_coul_wolf.h
$(OCL) -o $@ -c lal_born_coul_wolf_cs_ext.cpp -I$(OBJ_DIR)
$(OBJ_DIR)/lal_%.o: lal_%.cpp $(KERS)
$(OCL) -o $@ -c $< -I$(OBJ_DIR)
$(BIN_DIR)/ocl_get_devices: ./geryon/ucl_get_devices.cpp $(OCL_H)
$(OCL) -o $@ ./geryon/ucl_get_devices.cpp -DUCL_OPENCL $(OCL_LINK)
$(OCL_LIB): $(OBJS) $(PTXS)
$(AR) -crusv $(OCL_LIB) $(OBJS)
$(GPU_LIB): $(OBJS)
$(AR) -crusv $(GPU_LIB) $(OBJS)
@cp $(EXTRAMAKE) Makefile.lammps
opencl: $(OCL_EXECS)
clean:
-rm -rf $(EXECS) $(OCL_EXECS) $(OCL_LIB) $(OBJS) $(KERS) *.linkinfo
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(KERS) *.linkinfo
veryclean: clean
-rm -rf *~ *.linkinfo
cleanlib:
-rm -f $(EXECS) $(GPU_LIB) $(OBJS) $(KERS) *.linkinfo

View File

@ -783,3 +783,39 @@ double lmp_gpu_forces(double **f, double **tor, double *eatom,
double **vatom, double *virial, double &ecoul) {
return global_device.fix_gpu(f,tor,eatom,vatom,virial,ecoul);
}
bool lmp_gpu_config(const std::string &category, const std::string &setting)
{
if (category == "api") {
#if defined(USE_OPENCL)
return setting == "opencl";
#elif defined(USE_HIP)
return setting == "hip";
#elif defined(USE_CUDA)
return setting == "cuda";
#endif
return false;
}
if (category == "precision") {
if (setting == "single") {
#if defined(_SINGLE_SINGLE)
return true;
#else
return false;
#endif
} else if (setting == "mixed") {
#if defined(_SINGLE_DOUBLE)
return true;
#else
return false;
#endif
} else if (setting == "double") {
#if defined(_DOUBLE_DOUBLE)
return true;
#else
return false;
#endif
} else return false;
}
return false;
}

View File

@ -65,10 +65,15 @@ which activates the OpenMP backend. All of the options controlling device backen
## Spack
An alternative to manually building with the CMake is to use the Spack package manager.
To do so, download the `kokkos-spack` git repo and add to the package list:
Make sure you have downloaded [Spack](https://github.com/spack/spack).
The easiest way to configure the Spack environment is:
````bash
> spack repo add $path-to-kokkos-spack
> source spack/share/spack/setup-env.sh
````
with other scripts available for other shells.
You can display information about how to install packages with:
````bash
> spack info kokkos
A basic installation would be done as:
````bash
> spack install kokkos
@ -178,8 +183,8 @@ Options can be enabled by specifying `-DKokkos_ENABLE_X`.
## Other Options
* Kokkos_CXX_STANDARD
* The C++ standard for Kokkos to use: c++11, c++14, c++17, or c++20. This should be given in CMake style as 11, 14, 17, or 20.
* STRING Default: 11
* The C++ standard for Kokkos to use: c++14, c++17, or c++20. This should be given in CMake style as 14, 17, or 20.
* STRING Default: 14
## Third-party Libraries (TPLs)
The following options control enabling TPLs:

View File

@ -1,5 +1,111 @@
# Change Log
## [3.3.01](https://github.com/kokkos/kokkos/tree/3.3.01) (2021-01-06)
[Full Changelog](https://github.com/kokkos/kokkos/compare/3.3.00...3.3.01)
**Bug Fixes:**
- Fix severe performance bug in DualView which added memcpys for sync and modify [\#3693](https://github.com/kokkos/kokkos/issues/#3693)
- Fix performance bug in CUDA backend, where the cuda Cache config was not set correct.
## [3.3.00](https://github.com/kokkos/kokkos/tree/3.3.00) (2020-12-16)
[Full Changelog](https://github.com/kokkos/kokkos/compare/3.2.01...3.3.00)
**Features:**
- Require C++14 as minimum C++ standard. C++17 and C++20 are supported too.
- HIP backend is nearly feature complete. Kokkos Dynamic Task Graphs are missing.
- Major update for OpenMPTarget: many capabilities now work. For details contact us.
- Added DPC++/SYCL backend: primary capabilites are working.
- Added Kokkos Graph API analogous to CUDA Graphs.
- Added parallel_scan support with TeamThreadRange [\#3536](https://github.com/kokkos/kokkos/pull/#3536)
- Added Logical Memory Spaces [\#3546](https://github.com/kokkos/kokkos/pull/#3546)
- Added initial half precision support [\#3439](https://github.com/kokkos/kokkos/pull/#3439)
- Experimental feature: control cuda occupancy [\#3379](https://github.com/kokkos/kokkos/pull/#3379)
**Implemented enhancements Backends and Archs:**
- Add a64fx and fujitsu Compiler support [\#3614](https://github.com/kokkos/kokkos/pull/#3614)
- Adding support for AMD gfx908 archictecture [\#3375](https://github.com/kokkos/kokkos/pull/#3375)
- SYCL parallel\_for MDRangePolicy [\#3583](https://github.com/kokkos/kokkos/pull/#3583)
- SYCL add parallel\_scan [\#3577](https://github.com/kokkos/kokkos/pull/#3577)
- SYCL custom reductions [\#3544](https://github.com/kokkos/kokkos/pull/#3544)
- SYCL Enable container unit tests [\#3550](https://github.com/kokkos/kokkos/pull/#3550)
- SYCL feature level 5 [\#3480](https://github.com/kokkos/kokkos/pull/#3480)
- SYCL Feature level 4 (parallel\_for) [\#3474](https://github.com/kokkos/kokkos/pull/#3474)
- SYCL feature level 3 [\#3451](https://github.com/kokkos/kokkos/pull/#3451)
- SYCL feature level 2 [\#3447](https://github.com/kokkos/kokkos/pull/#3447)
- OpenMPTarget: Hierarchial reduction for + operator on scalars [\#3504](https://github.com/kokkos/kokkos/pull/#3504)
- OpenMPTarget hierarchical [\#3411](https://github.com/kokkos/kokkos/pull/#3411)
- HIP Add Impl::atomic\_[store,load] [\#3440](https://github.com/kokkos/kokkos/pull/#3440)
- HIP enable global lock arrays [\#3418](https://github.com/kokkos/kokkos/pull/#3418)
- HIP Implement multiple occupancy paths for various HIP kernel launchers [\#3366](https://github.com/kokkos/kokkos/pull/#3366)
**Implemented enhancements Policies:**
- MDRangePolicy: Let it be semiregular [\#3494](https://github.com/kokkos/kokkos/pull/#3494)
- MDRangePolicy: Check narrowing conversion in construction [\#3527](https://github.com/kokkos/kokkos/pull/#3527)
- MDRangePolicy: CombinedReducers support [\#3395](https://github.com/kokkos/kokkos/pull/#3395)
- Kokkos Graph: Interface and Default Implementation [\#3362](https://github.com/kokkos/kokkos/pull/#3362)
- Kokkos Graph: add Cuda Graph implementation [\#3369](https://github.com/kokkos/kokkos/pull/#3369)
- TeamPolicy: implemented autotuning of team sizes and vector lengths [\#3206](https://github.com/kokkos/kokkos/pull/#3206)
- RangePolicy: Initialize all data members in default constructor [\#3509](https://github.com/kokkos/kokkos/pull/#3509)
**Implemented enhancements BuildSystem:**
- Auto-generate core test files for all backends [\#3488](https://github.com/kokkos/kokkos/pull/#3488)
- Avoid rewriting test files when calling cmake [\#3548](https://github.com/kokkos/kokkos/pull/#3548)
- RULE\_LAUNCH\_COMPILE and RULE\_LAUNCH\_LINK system for nvcc\_wrapper [\#3136](https://github.com/kokkos/kokkos/pull/#3136)
- Adding -include as a known argument to nvcc\_wrapper [\#3434](https://github.com/kokkos/kokkos/pull/#3434)
- Install hpcbind script [\#3402](https://github.com/kokkos/kokkos/pull/#3402)
- cmake/kokkos\_tribits.cmake: add parsing for args [\#3457](https://github.com/kokkos/kokkos/pull/#3457)
**Implemented enhancements Tools:**
- Changed namespacing of Kokkos::Tools::Impl::Impl::tune\_policy [\#3455](https://github.com/kokkos/kokkos/pull/#3455)
- Delegate to an impl allocate/deallocate method to allow specifying a SpaceHandle for MemorySpaces [\#3530](https://github.com/kokkos/kokkos/pull/#3530)
- Use the Kokkos Profiling interface rather than the Impl interface [\#3518](https://github.com/kokkos/kokkos/pull/#3518)
- Runtime option for tuning [\#3459](https://github.com/kokkos/kokkos/pull/#3459)
- Dual View Tool Events [\#3326](https://github.com/kokkos/kokkos/pull/#3326)
**Implemented enhancements Other:**
- Abort on errors instead of just printing [\#3528](https://github.com/kokkos/kokkos/pull/#3528)
- Enable C++14 macros unconditionally [\#3449](https://github.com/kokkos/kokkos/pull/#3449)
- Make ViewMapping trivially copyable [\#3436](https://github.com/kokkos/kokkos/pull/#3436)
- Rename struct ViewMapping to class [\#3435](https://github.com/kokkos/kokkos/pull/#3435)
- Replace enums in Kokkos\_ViewMapping.hpp (removes -Wextra) [\#3422](https://github.com/kokkos/kokkos/pull/#3422)
- Use bool for enums representing bools [\#3416](https://github.com/kokkos/kokkos/pull/#3416)
- Fence active instead of default execution space instances [\#3388](https://github.com/kokkos/kokkos/pull/#3388)
- Refactor parallel\_reduce fence usage [\#3359](https://github.com/kokkos/kokkos/pull/#3359)
- Moved Space EBO helpers to Kokkos\_EBO [\#3357](https://github.com/kokkos/kokkos/pull/#3357)
- Add remove\_cvref type trait [\#3340](https://github.com/kokkos/kokkos/pull/#3340)
- Adding identity type traits and update definition of identity\_t alias [\#3339](https://github.com/kokkos/kokkos/pull/#3339)
- Add is\_specialization\_of type trait [\#3338](https://github.com/kokkos/kokkos/pull/#3338)
- Make ScratchMemorySpace semi-regular [\#3309](https://github.com/kokkos/kokkos/pull/#3309)
- Optimize min/max atomics with early exit on no-op case [\#3265](https://github.com/kokkos/kokkos/pull/#3265)
- Refactor Backend Development [\#2941](https://github.com/kokkos/kokkos/pull/#2941)
**Fixed bugs:**
- Fixup MDRangePolicy construction from Kokkos arrays [\#3591](https://github.com/kokkos/kokkos/pull/#3591)
- Add atomic functions for unsigned long long using gcc built-in [\#3588](https://github.com/kokkos/kokkos/pull/#3588)
- Fixup silent pointless comparison with zero in checked\_narrow\_cast (compiler workaround) [\#3566](https://github.com/kokkos/kokkos/pull/#3566)
- Fixes for ROCm 3.9 [\#3565](https://github.com/kokkos/kokkos/pull/#3565)
- Fix windows build issues which crept in for the CUDA build [\#3532](https://github.com/kokkos/kokkos/pull/#3532)
- HIP Fix atomics of large data types and clean up lock arrays [\#3529](https://github.com/kokkos/kokkos/pull/#3529)
- Pthreads fix exception resulting from 0 grain size [\#3510](https://github.com/kokkos/kokkos/pull/#3510)
- Fixup do not require atomic operation to be default constructible [\#3503](https://github.com/kokkos/kokkos/pull/#3503)
- Fix race condition in HIP backend [\#3467](https://github.com/kokkos/kokkos/pull/#3467)
- Replace KOKKOS\_DEBUG with KOKKOS\_ENABLE\_DEBUG [\#3458](https://github.com/kokkos/kokkos/pull/#3458)
- Fix multi-stream team scratch space definition for HIP [\#3398](https://github.com/kokkos/kokkos/pull/#3398)
- HIP fix template deduction [\#3393](https://github.com/kokkos/kokkos/pull/#3393)
- Fix compiling with HIP and C++17 [\#3390](https://github.com/kokkos/kokkos/pull/#3390)
- Fix sigFPE in HIP blocksize deduction [\#3378](https://github.com/kokkos/kokkos/pull/#3378)
- Type alias change: replace CS with CTS to avoid conflicts with NVSHMEM [\#3348](https://github.com/kokkos/kokkos/pull/#3348)
- Clang compilation of CUDA backend on Windows [\#3345](https://github.com/kokkos/kokkos/pull/#3345)
- Fix HBW support [\#3343](https://github.com/kokkos/kokkos/pull/#3343)
- Added missing fences to unique token [\#3260](https://github.com/kokkos/kokkos/pull/#3260)
**Incompatibilities:**
- Remove unused utilities (forward, move, and expand\_variadic) from Kokkos::Impl [\#3535](https://github.com/kokkos/kokkos/pull/#3535)
- Remove unused traits [\#3534](https://github.com/kokkos/kokkos/pull/#3534)
- HIP: Remove old HCC code [\#3301](https://github.com/kokkos/kokkos/pull/#3301)
- Prepare for deprecation of ViewAllocateWithoutInitializing [\#3264](https://github.com/kokkos/kokkos/pull/#3264)
- Remove ROCm backend [\#3148](https://github.com/kokkos/kokkos/pull/#3148)
## [3.2.01](https://github.com/kokkos/kokkos/tree/3.2.01) (2020-11-17)
[Full Changelog](https://github.com/kokkos/kokkos/compare/3.2.00...3.2.01)
@ -36,37 +142,31 @@
- Windows Cuda support [\#3018](https://github.com/kokkos/kokkos/issues/3018)
- Pass `-Wext-lambda-captures-this` to NVCC when support for `__host__ __device__` lambda is enabled from CUDA 11 [\#3241](https://github.com/kokkos/kokkos/issues/3241)
- Use explicit staging buffer for constant memory kernel launches and cleanup host/device synchronization [\#3234](https://github.com/kokkos/kokkos/issues/3234)
- Various fixup to policies including making TeamPolicy default constructible and making RangePolicy and TeamPolicy assignable 1: [\#3202](https://github.com/kokkos/kokkos/issues/3202)
- Various fixup to policies including making TeamPolicy default constructible and making RangePolicy and TeamPolicy assignable 2: [\#3203](https://github.com/kokkos/kokkos/issues/3203)
- Various fixup to policies including making TeamPolicy default constructible and making RangePolicy and TeamPolicy assignable 3: [\#3196](https://github.com/kokkos/kokkos/issues/3196)
- Various fixup to policies including making TeamPolicy default constructible and making RangePolicy and TeamPolicy assignable: [\#3202](https://github.com/kokkos/kokkos/issues/3202) , [\#3203](https://github.com/kokkos/kokkos/issues/3203) , [\#3196](https://github.com/kokkos/kokkos/issues/3196)
- Annotations for `DefaultExectutionSpace` and `DefaultHostExectutionSpace` to use in static analysis [\#3189](https://github.com/kokkos/kokkos/issues/3189)
- Add documentation on using Spack to install Kokkos and developing packages that depend on Kokkos [\#3187](https://github.com/kokkos/kokkos/issues/3187)
- Improve support for nvcc\_wrapper with exotic host compiler [\#3186](https://github.com/kokkos/kokkos/issues/3186)
- Add OpenMPTarget backend flags for NVC++ compiler [\#3185](https://github.com/kokkos/kokkos/issues/3185)
- Move deep\_copy/create\_mirror\_view on Experimental::OffsetView into Kokkos:: namespace [\#3166](https://github.com/kokkos/kokkos/issues/3166)
- Allow for larger block size in HIP [\#3165](https://github.com/kokkos/kokkos/issues/3165)
- View: Added names of Views to the different View initialize/free kernels [\#3159](https://github.com/kokkos/kokkos/issues/3159)
- Cuda: Caching cudaFunctorAttributes and whether L1/Shmem prefer was set [\#3151](https://github.com/kokkos/kokkos/issues/3151)
- BuildSystem: Provide an explicit default CMAKE\_BUILD\_TYPE [\#3131](https://github.com/kokkos/kokkos/issues/3131)
- BuildSystem: Improved performance in default configuration by defaulting to Release build [\#3131](https://github.com/kokkos/kokkos/issues/3131)
- Cuda: Update CUDA occupancy calculation [\#3124](https://github.com/kokkos/kokkos/issues/3124)
- Vector: Adding data() to Vector [\#3123](https://github.com/kokkos/kokkos/issues/3123)
- BuildSystem: Add CUDA Ampere configuration support [\#3122](https://github.com/kokkos/kokkos/issues/3122)
- General: Apply [[noreturn]] to Kokkos::abort when applicable [\#3106](https://github.com/kokkos/kokkos/issues/3106)
- TeamPolicy: Validate storage level argument passed to TeamPolicy::set\_scratch\_size() [\#3098](https://github.com/kokkos/kokkos/issues/3098)
- nvcc\_wrapper: send --cudart to nvcc instead of host compiler [\#3092](https://github.com/kokkos/kokkos/issues/3092)
- BuildSystem: Make kokkos\_has\_string() function in Makefile.kokkos case insensitive [\#3091](https://github.com/kokkos/kokkos/issues/3091)
- Modify KOKKOS\_FUNCTION macro for clang-tidy analysis [\#3087](https://github.com/kokkos/kokkos/issues/3087)
- Move allocation profiling to allocate/deallocate calls [\#3084](https://github.com/kokkos/kokkos/issues/3084)
- BuildSystem: FATAL\_ERROR when attempting in-source build [\#3082](https://github.com/kokkos/kokkos/issues/3082)
- Change enums in ScatterView to types [\#3076](https://github.com/kokkos/kokkos/issues/3076)
- HIP: Changes for new compiler/runtime [\#3067](https://github.com/kokkos/kokkos/issues/3067)
- Extract and use get\_gpu [\#3061](https://github.com/kokkos/kokkos/issues/3061)
- Extract and use get\_gpu [\#3048](https://github.com/kokkos/kokkos/issues/3048)
- Extract and use get\_gpu [\#3061](https://github.com/kokkos/kokkos/issues/3061) , [\#3048](https://github.com/kokkos/kokkos/issues/3048)
- Add is\_allocated to View-like containers [\#3059](https://github.com/kokkos/kokkos/issues/3059)
- Combined reducers for scalar references [\#3052](https://github.com/kokkos/kokkos/issues/3052)
- Add configurable capacity for UniqueToken [\#3051](https://github.com/kokkos/kokkos/issues/3051)
- Add installation testing [\#3034](https://github.com/kokkos/kokkos/issues/3034)
- BuildSystem: Add -expt-relaxed-constexpr flag to nvcc\_wrapper [\#3021](https://github.com/kokkos/kokkos/issues/3021)
- HIP: Add UniqueToken [\#3020](https://github.com/kokkos/kokkos/issues/3020)
- Autodetect number of devices [\#3013](https://github.com/kokkos/kokkos/issues/3013)
@ -82,11 +182,13 @@
- ScatterView: fix for OpenmpTarget remove inheritance from reducers [\#3162](https://github.com/kokkos/kokkos/issues/3162)
- BuildSystem: Set OpenMP flags according to host compiler [\#3127](https://github.com/kokkos/kokkos/issues/3127)
- OpenMP: Fix logic for nested omp in partition\_master bug [\#3101](https://github.com/kokkos/kokkos/issues/3101)
- nvcc\_wrapper: send --cudart to nvcc instead of host compiler [\#3092](https://github.com/kokkos/kokkos/issues/3092)
- BuildSystem: Fixes for Cuda/11 and c++17 [\#3085](https://github.com/kokkos/kokkos/issues/3085)
- HIP: Fix print\_configuration [\#3080](https://github.com/kokkos/kokkos/issues/3080)
- Conditionally define get\_gpu [\#3072](https://github.com/kokkos/kokkos/issues/3072)
- Fix bounds for ranges in random number generator [\#3069](https://github.com/kokkos/kokkos/issues/3069)
- Fix Cuda minor arch check [\#3035](https://github.com/kokkos/kokkos/issues/3035)
- BuildSystem: Add -expt-relaxed-constexpr flag to nvcc\_wrapper [\#3021](https://github.com/kokkos/kokkos/issues/3021)
**Incompatibilities:**

View File

@ -111,7 +111,7 @@ ENDIF()
set(Kokkos_VERSION_MAJOR 3)
set(Kokkos_VERSION_MINOR 2)
set(Kokkos_VERSION_MINOR 3)
set(Kokkos_VERSION_PATCH 1)
set(Kokkos_VERSION "${Kokkos_VERSION_MAJOR}.${Kokkos_VERSION_MINOR}.${Kokkos_VERSION_PATCH}")
math(EXPR KOKKOS_VERSION "${Kokkos_VERSION_MAJOR} * 10000 + ${Kokkos_VERSION_MINOR} * 100 + ${Kokkos_VERSION_PATCH}")
@ -139,13 +139,15 @@ ENDIF()
# I really wish these were regular variables
# but scoping issues can make it difficult
GLOBAL_SET(KOKKOS_COMPILE_OPTIONS)
GLOBAL_SET(KOKKOS_LINK_OPTIONS)
GLOBAL_SET(KOKKOS_LINK_OPTIONS -DKOKKOS_DEPENDENCE)
GLOBAL_SET(KOKKOS_CUDA_OPTIONS)
GLOBAL_SET(KOKKOS_CUDAFE_OPTIONS)
GLOBAL_SET(KOKKOS_XCOMPILER_OPTIONS)
# We need to append text here for making sure TPLs
# we import are available for an installed Kokkos
GLOBAL_SET(KOKKOS_TPL_EXPORTS)
# this could probably be scoped to project
GLOBAL_SET(KOKKOS_COMPILE_DEFINITIONS KOKKOS_DEPENDENCE)
# Include a set of Kokkos-specific wrapper functions that
# will either call raw CMake or TriBITS
@ -191,8 +193,6 @@ ELSE()
SET(KOKKOS_IS_SUBDIRECTORY FALSE)
ENDIF()
#------------------------------------------------------------------------------
#
# A) Forward declare the package so that certain options are also defined for
@ -253,9 +253,7 @@ KOKKOS_PROCESS_SUBPACKAGES()
KOKKOS_PACKAGE_DEF()
KOKKOS_EXCLUDE_AUTOTOOLS_FILES()
KOKKOS_PACKAGE_POSTPROCESS()
#We are ready to configure the header
CONFIGURE_FILE(cmake/KokkosCore_config.h.in KokkosCore_config.h @ONLY)
KOKKOS_CONFIGURE_CORE()
IF (NOT KOKKOS_HAS_TRILINOS AND NOT Kokkos_INSTALL_TESTING)
ADD_LIBRARY(kokkos INTERFACE)
@ -272,7 +270,10 @@ INCLUDE(${KOKKOS_SRC_PATH}/cmake/kokkos_install.cmake)
# executables also need nvcc_wrapper. Thus, we need to install it.
# If the argument of DESTINATION is a relative path, CMake computes it
# as relative to ${CMAKE_INSTALL_PATH}.
INSTALL(PROGRAMS ${CMAKE_CURRENT_SOURCE_DIR}/bin/nvcc_wrapper DESTINATION ${CMAKE_INSTALL_BINDIR})
# KOKKOS_INSTALL_ADDITIONAL_FILES will install nvcc wrapper and other generated
# files
KOKKOS_INSTALL_ADDITIONAL_FILES()
# Finally - if we are a subproject - make sure the enabled devices are visible
IF (HAS_PARENT)

View File

@ -11,27 +11,27 @@ CXXFLAGS += $(SHFLAGS)
endif
KOKKOS_VERSION_MAJOR = 3
KOKKOS_VERSION_MINOR = 2
KOKKOS_VERSION_MINOR = 3
KOKKOS_VERSION_PATCH = 1
KOKKOS_VERSION = $(shell echo $(KOKKOS_VERSION_MAJOR)*10000+$(KOKKOS_VERSION_MINOR)*100+$(KOKKOS_VERSION_PATCH) | bc)
# Options: Cuda,HIP,ROCm,OpenMP,Pthread,Serial
# Options: Cuda,HIP,OpenMP,Pthread,Serial
KOKKOS_DEVICES ?= "OpenMP"
#KOKKOS_DEVICES ?= "Pthread"
# Options:
# Intel: KNC,KNL,SNB,HSW,BDW,SKX
# NVIDIA: Kepler,Kepler30,Kepler32,Kepler35,Kepler37,Maxwell,Maxwell50,Maxwell52,Maxwell53,Pascal60,Pascal61,Volta70,Volta72,Turing75,Ampere80
# ARM: ARMv80,ARMv81,ARMv8-ThunderX,ARMv8-TX2
# ARM: ARMv80,ARMv81,ARMv8-ThunderX,ARMv8-TX2,A64FX
# IBM: BGQ,Power7,Power8,Power9
# AMD-GPUS: Vega900,Vega906
# AMD-GPUS: Vega900,Vega906,Vega908
# AMD-CPUS: AMDAVX,Zen,Zen2
KOKKOS_ARCH ?= ""
# Options: yes,no
KOKKOS_DEBUG ?= "no"
# Options: hwloc,librt,experimental_memkind
KOKKOS_USE_TPLS ?= ""
# Options: c++11,c++14,c++1y,c++17,c++1z,c++2a
KOKKOS_CXX_STANDARD ?= "c++11"
# Options: c++14,c++1y,c++17,c++1z,c++2a
KOKKOS_CXX_STANDARD ?= "c++14"
# Options: aggressive_vectorization,disable_profiling,enable_large_mem_tests,disable_complex_align
KOKKOS_OPTIONS ?= ""
KOKKOS_CMAKE ?= "no"
@ -66,7 +66,6 @@ kokkos_path_exists=$(if $(wildcard $1),1,0)
# Check for general settings
KOKKOS_INTERNAL_ENABLE_DEBUG := $(call kokkos_has_string,$(KOKKOS_DEBUG),yes)
KOKKOS_INTERNAL_ENABLE_CXX11 := $(call kokkos_has_string,$(KOKKOS_CXX_STANDARD),c++11)
KOKKOS_INTERNAL_ENABLE_CXX14 := $(call kokkos_has_string,$(KOKKOS_CXX_STANDARD),c++14)
KOKKOS_INTERNAL_ENABLE_CXX1Y := $(call kokkos_has_string,$(KOKKOS_CXX_STANDARD),c++1y)
KOKKOS_INTERNAL_ENABLE_CXX17 := $(call kokkos_has_string,$(KOKKOS_CXX_STANDARD),c++17)
@ -279,14 +278,12 @@ else
endif
endif
# Set C++11 flags.
# Set C++ version flags.
ifeq ($(KOKKOS_INTERNAL_COMPILER_PGI), 1)
KOKKOS_INTERNAL_CXX11_FLAG := --c++11
KOKKOS_INTERNAL_CXX14_FLAG := --c++14
KOKKOS_INTERNAL_CXX17_FLAG := --c++17
else
ifeq ($(KOKKOS_INTERNAL_COMPILER_XL), 1)
KOKKOS_INTERNAL_CXX11_FLAG := -std=c++11
KOKKOS_INTERNAL_CXX14_FLAG := -std=c++14
KOKKOS_INTERNAL_CXX1Y_FLAG := -std=c++1y
#KOKKOS_INTERNAL_CXX17_FLAG := -std=c++17
@ -294,17 +291,12 @@ else
#KOKKOS_INTERNAL_CXX2A_FLAG := -std=c++2a
else
ifeq ($(KOKKOS_INTERNAL_COMPILER_CRAY), 1)
KOKKOS_INTERNAL_CXX11_FLAG := -hstd=c++11
KOKKOS_INTERNAL_CXX14_FLAG := -hstd=c++14
#KOKKOS_INTERNAL_CXX1Y_FLAG := -hstd=c++1y
#KOKKOS_INTERNAL_CXX17_FLAG := -hstd=c++17
#KOKKOS_INTERNAL_CXX1Z_FLAG := -hstd=c++1z
#KOKKOS_INTERNAL_CXX2A_FLAG := -hstd=c++2a
else
ifeq ($(KOKKOS_INTERNAL_COMPILER_HCC), 1)
KOKKOS_INTERNAL_CXX11_FLAG :=
else
KOKKOS_INTERNAL_CXX11_FLAG := --std=c++11
KOKKOS_INTERNAL_CXX14_FLAG := --std=c++14
KOKKOS_INTERNAL_CXX1Y_FLAG := --std=c++1y
KOKKOS_INTERNAL_CXX17_FLAG := --std=c++17
@ -312,7 +304,6 @@ else
KOKKOS_INTERNAL_CXX2A_FLAG := --std=c++2a
endif
endif
endif
endif
# Check for Kokkos Architecture settings.
@ -377,7 +368,8 @@ KOKKOS_INTERNAL_USE_ARCH_ARMV80 := $(call kokkos_has_string,$(KOKKOS_ARCH),ARMv8
KOKKOS_INTERNAL_USE_ARCH_ARMV81 := $(call kokkos_has_string,$(KOKKOS_ARCH),ARMv81)
KOKKOS_INTERNAL_USE_ARCH_ARMV8_THUNDERX := $(call kokkos_has_string,$(KOKKOS_ARCH),ARMv8-ThunderX)
KOKKOS_INTERNAL_USE_ARCH_ARMV8_THUNDERX2 := $(call kokkos_has_string,$(KOKKOS_ARCH),ARMv8-TX2)
KOKKOS_INTERNAL_USE_ARCH_ARM := $(strip $(shell echo $(KOKKOS_INTERNAL_USE_ARCH_ARMV80)+$(KOKKOS_INTERNAL_USE_ARCH_ARMV81)+$(KOKKOS_INTERNAL_USE_ARCH_ARMV8_THUNDERX)+$(KOKKOS_INTERNAL_USE_ARCH_ARMV8_THUNDERX2) | bc))
KOKKOS_INTERNAL_USE_ARCH_A64FX := $(call kokkos_has_string,$(KOKKOS_ARCH),A64FX)
KOKKOS_INTERNAL_USE_ARCH_ARM := $(strip $(shell echo $(KOKKOS_INTERNAL_USE_ARCH_ARMV80)+$(KOKKOS_INTERNAL_USE_ARCH_ARMV81)+$(KOKKOS_INTERNAL_USE_ARCH_ARMV8_THUNDERX)+$(KOKKOS_INTERNAL_USE_ARCH_ARMV8_THUNDERX2)+$(KOKKOS_INTERNAL_USE_ARCH_A64FX) | bc))
# IBM based.
KOKKOS_INTERNAL_USE_ARCH_BGQ := $(call kokkos_has_string,$(KOKKOS_ARCH),BGQ)
@ -392,6 +384,7 @@ KOKKOS_INTERNAL_USE_ARCH_ZEN2 := $(call kokkos_has_string,$(KOKKOS_ARCH),Zen2)
KOKKOS_INTERNAL_USE_ARCH_ZEN := $(call kokkos_has_string,$(KOKKOS_ARCH),Zen)
KOKKOS_INTERNAL_USE_ARCH_VEGA900 := $(call kokkos_has_string,$(KOKKOS_ARCH),Vega900)
KOKKOS_INTERNAL_USE_ARCH_VEGA906 := $(call kokkos_has_string,$(KOKKOS_ARCH),Vega906)
KOKKOS_INTERNAL_USE_ARCH_VEGA908 := $(call kokkos_has_string,$(KOKKOS_ARCH),Vega908)
# Any AVX?
KOKKOS_INTERNAL_USE_ARCH_SSE42 := $(shell expr $(KOKKOS_INTERNAL_USE_ARCH_WSM))
@ -459,7 +452,6 @@ H := \#
# Do not append first line
tmp := $(shell echo "/* ---------------------------------------------" > KokkosCore_config.tmp)
tmp := $(call kokkos_append_header,"Makefile constructed configuration:")
tmp := $(call kokkos_append_header,"$(shell date)")
tmp := $(call kokkos_append_header,"----------------------------------------------*/")
tmp := $(call kokkos_append_header,'$H''if !defined(KOKKOS_MACROS_HPP) || defined(KOKKOS_CORE_CONFIG_H)')
@ -479,10 +471,6 @@ ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_COMPILER_CUDA_VERSION $(KOKKOS_INTERNAL_COMPILER_NVCC_VERSION)")
endif
ifeq ($(KOKKOS_INTERNAL_USE_ROCM), 1)
tmp := $(call kokkos_append_header,'$H''define KOKKOS_ENABLE_ROCM')
tmp := $(call kokkos_append_header,'$H''define KOKKOS_IMPL_ROCM_CLANG_WORKAROUND 1')
endif
ifeq ($(KOKKOS_INTERNAL_USE_HIP), 1)
tmp := $(call kokkos_append_header,'$H''define KOKKOS_ENABLE_HIP')
endif
@ -542,12 +530,6 @@ endif
#only add the c++ standard flags if this is not CMake
tmp := $(call kokkos_append_header,"/* General Settings */")
ifeq ($(KOKKOS_INTERNAL_ENABLE_CXX11), 1)
ifneq ($(KOKKOS_STANDALONE_CMAKE), yes)
KOKKOS_CXXFLAGS += $(KOKKOS_INTERNAL_CXX11_FLAG)
endif
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ENABLE_CXX11")
endif
ifeq ($(KOKKOS_INTERNAL_ENABLE_CXX14), 1)
ifneq ($(KOKKOS_STANDALONE_CMAKE), yes)
KOKKOS_CXXFLAGS += $(KOKKOS_INTERNAL_CXX14_FLAG)
@ -765,6 +747,13 @@ ifeq ($(KOKKOS_INTERNAL_USE_ARCH_ARMV81), 1)
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_A64FX), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_A64FX")
KOKKOS_CXXFLAGS += -march=armv8.2-a+sve
KOKKOS_LDFLAGS += -march=armv8.2-a+sve
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_ZEN), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_ZEN")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_AMD_AVX2")
@ -1143,6 +1132,12 @@ ifeq ($(KOKKOS_INTERNAL_USE_HIP), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_VEGA906")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --amdgpu-target=gfx906
endif
ifeq ($(KOKKOS_INTERNAL_USE_ARCH_VEGA908), 1)
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_HIP 908")
tmp := $(call kokkos_append_header,"$H""define KOKKOS_ARCH_VEGA908")
KOKKOS_INTERNAL_HIP_ARCH_FLAG := --amdgpu-target=gfx908
endif
KOKKOS_SRC += $(wildcard $(KOKKOS_PATH)/core/src/HIP/*.cpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/HIP/*.hpp)
@ -1173,6 +1168,55 @@ ifneq ($(KOKKOS_INTERNAL_NEW_CONFIG), 0)
tmp := $(shell cp KokkosCore_config.tmp KokkosCore_config.h)
endif
# Functions for generating config header file
kokkos_start_config_header = $(shell sed 's~@INCLUDE_NEXT_FILE@~~g' $(KOKKOS_PATH)/cmake/KokkosCore_Config_HeaderSet.in > $1)
kokkos_update_config_header = $(shell sed 's~@HEADER_GUARD_TAG@~$1~g' $2 > $3)
kokkos_append_config_header = $(shell echo $1 >> $2))
tmp := $(call kokkos_start_config_header, "KokkosCore_Config_FwdBackend.tmp")
tmp := $(call kokkos_start_config_header, "KokkosCore_Config_SetupBackend.tmp")
tmp := $(call kokkos_start_config_header, "KokkosCore_Config_DeclareBackend.tmp")
tmp := $(call kokkos_start_config_header, "KokkosCore_Config_PostInclude.tmp")
tmp := $(call kokkos_update_config_header, KOKKOS_FWD_HPP_, "KokkosCore_Config_FwdBackend.tmp", "KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_update_config_header, KOKKOS_SETUP_HPP_, "KokkosCore_Config_SetupBackend.tmp", "KokkosCore_Config_SetupBackend.hpp")
tmp := $(call kokkos_update_config_header, KOKKOS_DECLARE_HPP_, "KokkosCore_Config_DeclareBackend.tmp", "KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_update_config_header, KOKKOS_POST_INCLUDE_HPP_, "KokkosCore_Config_PostInclude.tmp", "KokkosCore_Config_PostInclude.hpp")
ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_CUDA.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_CUDA.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <setup/Kokkos_Setup_Cuda.hpp>","KokkosCore_Config_SetupBackend.hpp")
ifeq ($(KOKKOS_INTERNAL_CUDA_USE_UVM), 1)
else
endif
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMPTARGET), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_OPENMPTARGET.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_OPENMPTARGET.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_HIP), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_HIP.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_HIP.hpp>","KokkosCore_Config_DeclareBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <setup/Kokkos_Setup_HIP.hpp>","KokkosCore_Config_SetupBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_OPENMP), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_OPENMP.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_OPENMP.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_PTHREADS), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_THREADS.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_THREADS.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_HPX), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_HPX.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_HPX.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_SERIAL), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_SERIAL.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_SERIAL.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
ifeq ($(KOKKOS_INTERNAL_USE_MEMKIND), 1)
tmp := $(call kokkos_append_config_header,"\#include <fwd/Kokkos_Fwd_HBWSpace.hpp>","KokkosCore_Config_FwdBackend.hpp")
tmp := $(call kokkos_append_config_header,"\#include <decl/Kokkos_Declare_HBWSpace.hpp>","KokkosCore_Config_DeclareBackend.hpp")
endif
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/*.hpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/core/src/impl/*.hpp)
KOKKOS_HEADERS += $(wildcard $(KOKKOS_PATH)/containers/src/*.hpp)
@ -1290,7 +1334,7 @@ ifneq ($(KOKKOS_INTERNAL_USE_SERIAL), 1)
endif
# With Cygwin functions such as fdopen and fileno are not defined
# when strict ansi is enabled. strict ansi gets enabled with --std=c++11
# when strict ansi is enabled. strict ansi gets enabled with --std=c++14
# though. So we hard undefine it here. Not sure if that has any bad side effects
# This is needed for gtest actually, not for Kokkos itself!
ifeq ($(KOKKOS_INTERNAL_OS_CYGWIN), 1)
@ -1313,7 +1357,9 @@ KOKKOS_OBJ_LINK = $(notdir $(KOKKOS_OBJ))
include $(KOKKOS_PATH)/Makefile.targets
kokkos-clean:
rm -f $(KOKKOS_OBJ_LINK) KokkosCore_config.h KokkosCore_config.tmp libkokkos.a
rm -f $(KOKKOS_OBJ_LINK) KokkosCore_config.h KokkosCore_config.tmp libkokkos.a KokkosCore_Config_SetupBackend.hpp \
KokkosCore_Config_FwdBackend.hpp KokkosCore_Config_DeclareBackend.hpp KokkosCore_Config_DeclareBackend.tmp \
KokkosCore_Config_FwdBackend.tmp KokkosCore_Config_PostInclude.hpp KokkosCore_Config_PostInclude.tmp KokkosCore_Config_SetupBackend.tmp
libkokkos.a: $(KOKKOS_OBJ_LINK) $(KOKKOS_SRC) $(KOKKOS_HEADERS)
ar cr libkokkos.a $(KOKKOS_OBJ_LINK)

View File

@ -53,23 +53,10 @@ Kokkos_HIP_Space.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_Space.cpp
Kokkos_HIP_Instance.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_Instance.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_Instance.cpp
Kokkos_HIP_KernelLaunch.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_KernelLaunch.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_KernelLaunch.cpp
Kokkos_HIP_Locks.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_Locks.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/HIP/Kokkos_HIP_Locks.cpp
endif
ifeq ($(KOKKOS_INTERNAL_USE_ROCM), 1)
Kokkos_ROCm_Exec.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/ROCm/Kokkos_ROCm_Exec.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/ROCm/Kokkos_ROCm_Exec.cpp
Kokkos_ROCm_Space.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/ROCm/Kokkos_ROCm_Space.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/ROCm/Kokkos_ROCm_Space.cpp
Kokkos_ROCm_Task.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/ROCm/Kokkos_ROCm_Task.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/ROCm/Kokkos_ROCm_Task.cpp
Kokkos_ROCm_Impl.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/ROCm/Kokkos_ROCm_Impl.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/ROCm/Kokkos_ROCm_Impl.cpp
endif
ifeq ($(KOKKOS_INTERNAL_USE_PTHREADS), 1)
Kokkos_ThreadsExec_base.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/Threads/Kokkos_ThreadsExec_base.cpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/Threads/Kokkos_ThreadsExec_base.cpp

View File

@ -54,24 +54,16 @@ For specifics see the LICENSE file contained in the repository or distribution.
# Requirements
### Primary tested compilers on X86 are:
* GCC 4.8.4
* GCC 4.9.3
* GCC 5.1.0
* GCC 5.3.0
* GCC 5.4.0
* GCC 5.5.0
* GCC 6.1.0
* GCC 7.2.0
* GCC 7.3.0
* GCC 8.1.0
* Intel 15.0.2
* Intel 16.0.1
* Intel 17.0.1
* Intel 17.4.196
* Intel 18.2.128
* Clang 3.6.1
* Clang 3.7.1
* Clang 3.8.1
* Clang 3.9.0
* Clang 4.0.0
* Clang 6.0.0 for CUDA (CUDA Toolkit 9.0)
* Clang 7.0.0 for CUDA (CUDA Toolkit 9.1)
@ -81,6 +73,7 @@ For specifics see the LICENSE file contained in the repository or distribution.
* NVCC 9.2 for CUDA (with gcc 7.2.0)
* NVCC 10.0 for CUDA (with gcc 7.4.0)
* NVCC 10.1 for CUDA (with gcc 7.4.0)
* NVCC 11.0 for CUDA (with gcc 8.4.0)
### Primary tested compilers on Power 8 are:
* GCC 6.4.0 (OpenMP,Serial)
@ -89,9 +82,8 @@ For specifics see the LICENSE file contained in the repository or distribution.
* NVCC 9.2.88 for CUDA (with gcc 7.2.0 and XL 16.1.0)
### Primary tested compilers on Intel KNL are:
* Intel 16.4.258 (with gcc 4.7.2)
* Intel 17.2.174 (with gcc 4.9.3)
* Intel 18.2.199 (with gcc 4.9.3)
* Intel 17.2.174 (with gcc 6.2.0 and 6.4.0)
* Intel 18.2.199 (with gcc 6.2.0 and 6.4.0)
### Primary tested compilers on ARM (Cavium ThunderX2)
* GCC 7.2.0

View File

@ -806,7 +806,7 @@ class Random_XorShift64 {
const double V = 2.0 * drand() - 1.0;
S = U * U + V * V;
}
return U * std::sqrt(-2.0 * log(S) / S);
return U * std::sqrt(-2.0 * std::log(S) / S);
}
KOKKOS_INLINE_FUNCTION
@ -1042,7 +1042,7 @@ class Random_XorShift1024 {
const double V = 2.0 * drand() - 1.0;
S = U * U + V * V;
}
return U * std::sqrt(-2.0 * log(S) / S);
return U * std::sqrt(-2.0 * std::log(S) / S);
}
KOKKOS_INLINE_FUNCTION

View File

@ -222,11 +222,11 @@ class BinSort {
"Kokkos::SortImpl::BinSortFunctor::bin_count", bin_op.max_bins());
bin_count_const = bin_count_atomic;
bin_offsets =
offset_type(ViewAllocateWithoutInitializing(
offset_type(view_alloc(WithoutInitializing,
"Kokkos::SortImpl::BinSortFunctor::bin_offsets"),
bin_op.max_bins());
sort_order =
offset_type(ViewAllocateWithoutInitializing(
offset_type(view_alloc(WithoutInitializing,
"Kokkos::SortImpl::BinSortFunctor::sort_order"),
range_end - range_begin);
}
@ -279,7 +279,7 @@ class BinSort {
}
scratch_view_type sorted_values(
ViewAllocateWithoutInitializing(
view_alloc(WithoutInitializing,
"Kokkos::SortImpl::BinSortFunctor::sorted_values"),
values.rank_dynamic > 0 ? len : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
values.rank_dynamic > 1 ? values.extent(1)

View File

@ -24,7 +24,7 @@ KOKKOS_ADD_TEST_LIBRARY(
# avoid deprecation warnings from MSVC
TARGET_COMPILE_DEFINITIONS(kokkosalgorithms_gtest PUBLIC GTEST_HAS_TR1_TUPLE=0 GTEST_HAS_PTHREAD=0)
IF(NOT (Kokkos_ENABLE_CUDA AND WIN32))
IF((NOT (Kokkos_ENABLE_CUDA AND WIN32)) AND (NOT ("${KOKKOS_CXX_COMPILER_ID}" STREQUAL "Fujitsu")))
TARGET_COMPILE_FEATURES(kokkosalgorithms_gtest PUBLIC cxx_std_11)
ENDIF()

View File

@ -31,10 +31,10 @@ ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1)
TEST_TARGETS += test-cuda
endif
ifeq ($(KOKKOS_INTERNAL_USE_ROCM), 1)
OBJ_ROCM = TestROCm.o UnitTestMain.o gtest-all.o
TARGETS += KokkosAlgorithms_UnitTest_ROCm
TEST_TARGETS += test-rocm
ifeq ($(KOKKOS_INTERNAL_USE_HIP), 1)
OBJ_HIP = TestHIP.o UnitTestMain.o gtest-all.o
TARGETS += KokkosAlgorithms_UnitTest_HIP
TEST_TARGETS += test-hip
endif
ifeq ($(KOKKOS_INTERNAL_USE_PTHREADS), 1)
@ -64,8 +64,8 @@ endif
KokkosAlgorithms_UnitTest_Cuda: $(OBJ_CUDA) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(EXTRA_PATH) $(OBJ_CUDA) $(KOKKOS_LIBS) $(LIB) $(KOKKOS_LDFLAGS) $(LDFLAGS) -o KokkosAlgorithms_UnitTest_Cuda
KokkosAlgorithms_UnitTest_ROCm: $(OBJ_ROCM) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(EXTRA_PATH) $(OBJ_ROCM) $(KOKKOS_LIBS) $(LIB) $(KOKKOS_LDFLAGS) $(LDFLAGS) -o KokkosAlgorithms_UnitTest_ROCm
KokkosAlgorithms_UnitTest_HIP: $(OBJ_HIP) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(EXTRA_PATH) $(OBJ_HIP) $(KOKKOS_LIBS) $(LIB) $(KOKKOS_LDFLAGS) $(LDFLAGS) -o KokkosAlgorithms_UnitTest_HIP
KokkosAlgorithms_UnitTest_Threads: $(OBJ_THREADS) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(EXTRA_PATH) $(OBJ_THREADS) $(KOKKOS_LIBS) $(LIB) $(KOKKOS_LDFLAGS) $(LDFLAGS) -o KokkosAlgorithms_UnitTest_Threads
@ -82,8 +82,8 @@ KokkosAlgorithms_UnitTest_Serial: $(OBJ_SERIAL) $(KOKKOS_LINK_DEPENDS)
test-cuda: KokkosAlgorithms_UnitTest_Cuda
./KokkosAlgorithms_UnitTest_Cuda
test-rocm: KokkosAlgorithms_UnitTest_ROCm
./KokkosAlgorithms_UnitTest_ROCm
test-hip: KokkosAlgorithms_UnitTest_HIP
./KokkosAlgorithms_UnitTest_HIP
test-threads: KokkosAlgorithms_UnitTest_Threads
./KokkosAlgorithms_UnitTest_Threads

View File

@ -1,31 +1,38 @@
KOKKOS_PATH = ${HOME}/kokkos
KOKKOS_DEVICES = "OpenMP"
KOKKOS_ARCH = "SNB"
EXE_NAME = "test"
KOKKOS_DEVICES=Cuda
KOKKOS_CUDA_OPTIONS=enable_lambda
KOKKOS_ARCH = "SNB,Volta70"
SRC = $(wildcard *.cpp)
MAKEFILE_PATH := $(subst Makefile,,$(abspath $(lastword $(MAKEFILE_LIST))))
ifndef KOKKOS_PATH
KOKKOS_PATH = $(MAKEFILE_PATH)../..
endif
SRC = $(wildcard $(MAKEFILE_PATH)*.cpp)
HEADERS = $(wildcard $(MAKEFILE_PATH)*.hpp)
vpath %.cpp $(sort $(dir $(SRC)))
default: build
echo "Start Build"
ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES)))
CXX = ${KOKKOS_PATH}/bin/nvcc_wrapper
EXE = ${EXE_NAME}.cuda
KOKKOS_CUDA_OPTIONS = "enable_lambda"
EXE = atomic_perf.cuda
else
CXX = g++
EXE = ${EXE_NAME}.host
EXE = atomic_perf.exe
endif
CXXFLAGS = -O3
LINK = ${CXX}
LINKFLAGS = -O3
CXXFLAGS ?= -O3 -g
override CXXFLAGS += -I$(MAKEFILE_PATH)
DEPFLAGS = -M
LINK = ${CXX}
LINKFLAGS =
OBJ = $(SRC:.cpp=.o)
OBJ = $(notdir $(SRC:.cpp=.o))
LIB =
include $(KOKKOS_PATH)/Makefile.kokkos
@ -36,9 +43,9 @@ $(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE)
clean: kokkos-clean
rm -f *.o *.cuda *.host
rm -f *.o atomic_perf.cuda atomic_perf.exe
# Compilation rules
%.o:%.cpp $(KOKKOS_CPP_DEPENDS)
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $<
%.o:%.cpp $(KOKKOS_CPP_DEPENDS) $(HEADERS)
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< -o $(notdir $@)

View File

@ -9,7 +9,7 @@ if [[ ${USE_CUDA} > 0 ]]; then
BAF_EXE=bytes_and_flops.cuda
TEAM_SIZE=256
else
BAF_EXE=bytes_and_flops.host
BAF_EXE=bytes_and_flops.exe
TEAM_SIZE=1
fi

View File

@ -1,6 +1,6 @@
KOKKOS_DEVICES=Cuda
KOKKOS_CUDA_OPTIONS=enable_lambda
KOKKOS_ARCH = "SNB,Kepler35"
KOKKOS_ARCH = "SNB,Volta70"
MAKEFILE_PATH := $(subst Makefile,,$(abspath $(lastword $(MAKEFILE_LIST))))
@ -22,7 +22,7 @@ CXX = ${KOKKOS_PATH}/bin/nvcc_wrapper
EXE = bytes_and_flops.cuda
else
CXX = g++
EXE = bytes_and_flops.host
EXE = bytes_and_flops.exe
endif
CXXFLAGS ?= -O3 -g

View File

@ -1,7 +1,18 @@
KOKKOS_PATH = ${HOME}/kokkos
SRC = $(wildcard *.cpp)
KOKKOS_DEVICES=Cuda
KOKKOS_CUDA_OPTIONS=enable_lambda
KOKKOS_ARCH = "SNB,Volta70"
MAKEFILE_PATH := $(subst Makefile,,$(abspath $(lastword $(MAKEFILE_LIST))))
ifndef KOKKOS_PATH
KOKKOS_PATH = $(MAKEFILE_PATH)../..
endif
SRC = $(wildcard $(MAKEFILE_PATH)*.cpp)
HEADERS = $(wildcard $(MAKEFILE_PATH)*.hpp)
vpath %.cpp $(sort $(dir $(SRC)))
default: build
echo "Start Build"
@ -9,36 +20,32 @@ default: build
ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES)))
CXX = ${KOKKOS_PATH}/bin/nvcc_wrapper
EXE = gather.cuda
KOKKOS_DEVICES = "Cuda,OpenMP"
KOKKOS_ARCH = "SNB,Kepler35"
else
CXX = g++
EXE = gather.host
KOKKOS_DEVICES = "OpenMP"
KOKKOS_ARCH = "SNB"
EXE = gather.exe
endif
CXXFLAGS = -O3 -g
CXXFLAGS ?= -O3 -g
override CXXFLAGS += -I$(MAKEFILE_PATH)
DEPFLAGS = -M
LINK = ${CXX}
LINKFLAGS =
OBJ = $(SRC:.cpp=.o)
OBJ = $(notdir $(SRC:.cpp=.o))
LIB =
include $(KOKKOS_PATH)/Makefile.kokkos
$(warning ${KOKKOS_CPPFLAGS})
build: $(EXE)
$(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE)
clean: kokkos-clean
rm -f *.o *.cuda *.host
rm -f *.o gather.cuda gather.exe
# Compilation rules
%.o:%.cpp $(KOKKOS_CPP_DEPENDS) gather_unroll.hpp gather.hpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $<
%.o:%.cpp $(KOKKOS_CPP_DEPENDS) $(HEADERS)
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< -o $(notdir $@)

View File

@ -1,28 +1,38 @@
#Set your Kokkos path to something appropriate
KOKKOS_PATH = ${HOME}/git/kokkos-github-repo
KOKKOS_DEVICES = "Cuda"
KOKKOS_ARCH = "Pascal60"
KOKKOS_CUDA_OPTIONS = enable_lambda
#KOKKOS_DEVICES = "OpenMP"
#KOKKOS_ARCH = "Power8"
KOKKOS_DEVICES=Cuda
KOKKOS_CUDA_OPTIONS=enable_lambda
KOKKOS_ARCH = "SNB,Volta70"
SRC = gups-kokkos.cc
MAKEFILE_PATH := $(subst Makefile,,$(abspath $(lastword $(MAKEFILE_LIST))))
ifndef KOKKOS_PATH
KOKKOS_PATH = $(MAKEFILE_PATH)../..
endif
SRC = $(wildcard $(MAKEFILE_PATH)*.cpp)
HEADERS = $(wildcard $(MAKEFILE_PATH)*.hpp)
vpath %.cpp $(sort $(dir $(SRC)))
default: build
echo "Start Build"
CXXFLAGS = -O3
CXX = ${HOME}/git/kokkos-github-repo/bin/nvcc_wrapper
#CXX = g++
ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES)))
CXX = ${KOKKOS_PATH}/bin/nvcc_wrapper
EXE = gups.cuda
else
CXX = g++
EXE = gups.exe
endif
LINK = ${CXX}
LINKFLAGS =
EXE = gups-kokkos
CXXFLAGS ?= -O3 -g
override CXXFLAGS += -I$(MAKEFILE_PATH)
DEPFLAGS = -M
LINK = ${CXX}
LINKFLAGS =
OBJ = $(SRC:.cc=.o)
OBJ = $(notdir $(SRC:.cpp=.o))
LIB =
include $(KOKKOS_PATH)/Makefile.kokkos
@ -33,9 +43,9 @@ $(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE)
clean: kokkos-clean
rm -f *.o $(EXE)
rm -f *.o gups.cuda gups.exe
# Compilation rules
%.o:%.cc $(KOKKOS_CPP_DEPENDS)
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $<
%.o:%.cpp $(KOKKOS_CPP_DEPENDS) $(HEADERS)
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< -o $(notdir $@)

View File

@ -1,31 +1,38 @@
KOKKOS_PATH = ../..
SRC = $(wildcard *.cpp)
KOKKOS_DEVICES=Cuda
KOKKOS_CUDA_OPTIONS=enable_lambda
KOKKOS_ARCH = "SNB,Volta70"
MAKEFILE_PATH := $(subst Makefile,,$(abspath $(lastword $(MAKEFILE_LIST))))
ifndef KOKKOS_PATH
KOKKOS_PATH = $(MAKEFILE_PATH)../..
endif
SRC = $(wildcard $(MAKEFILE_PATH)*.cpp)
HEADERS = $(wildcard $(MAKEFILE_PATH)*.hpp)
vpath %.cpp $(sort $(dir $(SRC)))
default: build
echo "Start Build"
ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES)))
CXX = ${KOKKOS_PATH}/bin/nvcc_wrapper
CXXFLAGS = -O3 -g
LINK = ${CXX}
LINKFLAGS =
EXE = policy_performance.cuda
KOKKOS_DEVICES = "Cuda,OpenMP"
KOKKOS_ARCH = "SNB,Kepler35"
KOKKOS_CUDA_OPTIONS+=enable_lambda
EXE = policy_perf.cuda
else
CXX = g++
CXXFLAGS = -O3 -g -Wall -Werror
LINK = ${CXX}
LINKFLAGS =
EXE = policy_performance.host
KOKKOS_DEVICES = "OpenMP"
KOKKOS_ARCH = "SNB"
EXE = policy_perf.exe
endif
DEPFLAGS = -M
CXXFLAGS ?= -O3 -g
override CXXFLAGS += -I$(MAKEFILE_PATH)
OBJ = $(SRC:.cpp=.o)
DEPFLAGS = -M
LINK = ${CXX}
LINKFLAGS =
OBJ = $(notdir $(SRC:.cpp=.o))
LIB =
include $(KOKKOS_PATH)/Makefile.kokkos
@ -36,9 +43,9 @@ $(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE)
clean: kokkos-clean
rm -f *.o *.cuda *.host
rm -f *.o policy_perf.cuda policy_perf.exe
# Compilation rules
%.o:%.cpp $(KOKKOS_CPP_DEPENDS) main.cpp policy_perf_test.hpp
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $<
%.o:%.cpp $(KOKKOS_CPP_DEPENDS) $(HEADERS)
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< -o $(notdir $@)

View File

@ -146,11 +146,11 @@ int main(int argc, char* argv[]) {
// Call a 'warmup' test with 1 repeat - this will initialize the corresponding
// view appropriately for test and should obey first-touch etc Second call to
// test is the one we actually care about and time
view_type_1d v_1(Kokkos::ViewAllocateWithoutInitializing("v_1"),
view_type_1d v_1(Kokkos::view_alloc(Kokkos::WithoutInitializing, "v_1"),
team_range * team_size);
view_type_2d v_2(Kokkos::ViewAllocateWithoutInitializing("v_2"),
view_type_2d v_2(Kokkos::view_alloc(Kokkos::WithoutInitializing, "v_2"),
team_range * team_size, thread_range);
view_type_3d v_3(Kokkos::ViewAllocateWithoutInitializing("v_3"),
view_type_3d v_3(Kokkos::view_alloc(Kokkos::WithoutInitializing, "v_3"),
team_range * team_size, thread_range, vector_range);
double result_computed = 0.0;

View File

@ -1,28 +1,38 @@
#Set your Kokkos path to something appropriate
KOKKOS_PATH = ${HOME}/git/kokkos-github-repo
#KOKKOS_DEVICES = "Cuda"
#KOKKOS_ARCH = "Pascal60"
#KOKKOS_CUDA_OPTIONS = enable_lambda
KOKKOS_DEVICES = "OpenMP"
KOKKOS_ARCH = "Power8"
KOKKOS_DEVICES=Cuda
KOKKOS_CUDA_OPTIONS=enable_lambda
KOKKOS_ARCH = "SNB,Volta70"
SRC = stream-kokkos.cc
MAKEFILE_PATH := $(subst Makefile,,$(abspath $(lastword $(MAKEFILE_LIST))))
ifndef KOKKOS_PATH
KOKKOS_PATH = $(MAKEFILE_PATH)../..
endif
SRC = $(wildcard $(MAKEFILE_PATH)*.cpp)
HEADERS = $(wildcard $(MAKEFILE_PATH)*.hpp)
vpath %.cpp $(sort $(dir $(SRC)))
default: build
echo "Start Build"
CXXFLAGS = -O3
#CXX = ${HOME}/git/kokkos-github-repo/bin/nvcc_wrapper
ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES)))
CXX = ${KOKKOS_PATH}/bin/nvcc_wrapper
EXE = stream.cuda
else
CXX = g++
EXE = stream.exe
endif
LINK = ${CXX}
LINKFLAGS =
EXE = stream-kokkos
CXXFLAGS ?= -O3 -g
override CXXFLAGS += -I$(MAKEFILE_PATH)
DEPFLAGS = -M
LINK = ${CXX}
LINKFLAGS =
OBJ = $(SRC:.cc=.o)
OBJ = $(notdir $(SRC:.cpp=.o))
LIB =
include $(KOKKOS_PATH)/Makefile.kokkos
@ -33,9 +43,9 @@ $(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS)
$(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE)
clean: kokkos-clean
rm -f *.o $(EXE)
rm -f *.o stream.cuda stream.exe
# Compilation rules
%.o:%.cc $(KOKKOS_CPP_DEPENDS)
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $<
%.o:%.cpp $(KOKKOS_CPP_DEPENDS) $(HEADERS)
$(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< -o $(notdir $@)

View File

@ -0,0 +1,87 @@
#!/bin/bash -e
#
# This script allows CMAKE_CXX_COMPILER to be a standard
# C++ compiler and Kokkos sets RULE_LAUNCH_COMPILE and
# RULE_LAUNCH_LINK in CMake so that all compiler and link
# commands are prefixed with this script followed by the
# C++ compiler. Thus if $1 == $2 then we know the command
# was intended for the C++ compiler and we discard both
# $1 and $2 and redirect the command to NVCC_WRAPPER.
# If $1 != $2 then we know that the command was not intended
# for the C++ compiler and we just discard $1 and launch
# the original command. Examples of when $2 will not equal
# $1 are 'ar', 'cmake', etc. during the linking phase
#
# check the arguments for the KOKKOS_DEPENDENCE compiler definition
KOKKOS_DEPENDENCE=0
for i in ${@}
do
if [ -n "$(echo ${i} | grep 'KOKKOS_DEPENDENCE$')" ]; then
KOKKOS_DEPENDENCE=1
break
fi
done
# if C++ is not passed, someone is probably trying to invoke it directly
if [ -z "${1}" ]; then
echo -e "\n${BASH_SOURCE[0]} was invoked without the C++ compiler as the first argument."
echo "This script is not indended to be directly invoked by any mechanism other"
echo -e "than through a RULE_LAUNCH_COMPILE or RULE_LAUNCH_LINK property set in CMake\n"
exit 1
fi
# if there aren't two args, this isn't necessarily invalid, just a bit strange
if [ -z "${2}" ]; then exit 0; fi
# store the expected C++ compiler
CXX_COMPILER=${1}
# remove the expected C++ compiler from the arguments
shift
# after the above shift, $1 is now the exe for the compile or link command, e.g.
# kokkos_launch_compiler g++ gcc -c file.c -o file.o
# becomes:
# kokkos_launch_compiler gcc -c file.c -o file.o
# Check to see if the executable is the C++ compiler and if it is not, then
# just execute the command.
#
# Summary:
# kokkos_launch_compiler g++ gcc -c file.c -o file.o
# results in this command being executed:
# gcc -c file.c -o file.o
# and
# kokkos_launch_compiler g++ g++ -c file.cpp -o file.o
# results in this command being executed:
# nvcc_wrapper -c file.cpp -o file.o
if [[ "${KOKKOS_DEPENDENCE}" -eq "0" || "${CXX_COMPILER}" != "${1}" ]]; then
# the command does not depend on Kokkos so just execute the command w/o re-directing to nvcc_wrapper
eval $@
else
# the executable is the C++ compiler, so we need to re-direct to nvcc_wrapper
# find the nvcc_wrapper from the same build/install
NVCC_WRAPPER="$(dirname ${BASH_SOURCE[0]})/nvcc_wrapper"
if [ -z "${NVCC_WRAPPER}" ]; then
echo -e "\nError: nvcc_wrapper not found in $(dirname ${BASH_SOURCE[0]}).\n"
exit 1
fi
# set default nvcc wrapper compiler if not specified
: ${NVCC_WRAPPER_DEFAULT_COMPILER:=${CXX_COMPILER}}
export NVCC_WRAPPER_DEFAULT_COMPILER
# calling itself will cause an infinitely long build
if [ "${NVCC_WRAPPER}" = "${NVCC_WRAPPER_DEFAULT_COMPILER}" ]; then
echo -e "\nError: NVCC_WRAPPER == NVCC_WRAPPER_DEFAULT_COMPILER. Terminating to avoid infinite loop!\n"
exit 1
fi
# discard the compiler from the command
shift
# execute nvcc_wrapper
${NVCC_WRAPPER} $@
fi

View File

@ -90,7 +90,12 @@ replace_pragma_ident=0
# Mark first host compiler argument
first_xcompiler_arg=1
temp_dir=${TMPDIR:-/tmp}
# Allow for setting temp dir without setting TMPDIR in parent (see https://docs.olcf.ornl.gov/systems/summit_user_guide.html#setting-tmpdir-causes-jsm-jsrun-errors-job-state-flip-flop)
if [[ ! -z ${NVCC_WRAPPER_TMPDIR+x} ]]; then
temp_dir=${TMPDIR:-/tmp}
else
temp_dir=${NVCC_WRAPPER_TMPDIR+x}
fi
# optimization flag added as a command-line argument
optimization_flag=""
@ -194,7 +199,7 @@ do
cuda_args="$cuda_args $1"
;;
#Handle known nvcc args that have an argument
-rdc|-maxrregcount|--default-stream|-Xnvlink|--fmad|-cudart|--cudart)
-rdc|-maxrregcount|--default-stream|-Xnvlink|--fmad|-cudart|--cudart|-include)
cuda_args="$cuda_args $1 $2"
shift
;;

View File

@ -1,3 +1,9 @@
# No need for policy push/pop. CMake also manages a new entry for scripts
# loaded by include() and find_package() commands except when invoked with
# the NO_POLICY_SCOPE option
# CMP0057 + NEW -> IN_LIST operator in IF(...)
CMAKE_POLICY(SET CMP0057 NEW)
# Compute paths
@PACKAGE_INIT@
@ -12,3 +18,18 @@ GET_FILENAME_COMPONENT(Kokkos_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
INCLUDE("${Kokkos_CMAKE_DIR}/KokkosTargets.cmake")
INCLUDE("${Kokkos_CMAKE_DIR}/KokkosConfigCommon.cmake")
UNSET(Kokkos_CMAKE_DIR)
# if CUDA was enabled and separable compilation was specified, e.g.
# find_package(Kokkos COMPONENTS separable_compilation)
# then we set the RULE_LAUNCH_COMPILE and RULE_LAUNCH_LINK
IF(@Kokkos_ENABLE_CUDA@ AND NOT "separable_compilation" IN_LIST Kokkos_FIND_COMPONENTS)
# run test to see if CMAKE_CXX_COMPILER=nvcc_wrapper
kokkos_compiler_is_nvcc(IS_NVCC ${CMAKE_CXX_COMPILER})
# if not nvcc_wrapper, use RULE_LAUNCH_COMPILE and RULE_LAUNCH_LINK
IF(NOT IS_NVCC AND NOT CMAKE_CXX_COMPILER_ID STREQUAL Clang AND
(NOT DEFINED Kokkos_LAUNCH_COMPILER OR Kokkos_LAUNCH_COMPILER))
MESSAGE(STATUS "kokkos_launch_compiler is enabled globally. C++ compiler commands with -DKOKKOS_DEPENDENCE will be redirected to nvcc_wrapper")
kokkos_compilation(GLOBAL)
ENDIF()
UNSET(IS_NVCC) # be mindful of the environment, pollution is bad
ENDIF()

View File

@ -89,3 +89,73 @@ function(kokkos_check)
set(${KOKKOS_CHECK_RETURN_VALUE} ${KOKKOS_CHECK_SUCCESS} PARENT_SCOPE)
endif()
endfunction()
# this function is provided to easily select which files use nvcc_wrapper:
#
# GLOBAL --> all files
# TARGET --> all files in a target
# SOURCE --> specific source files
# DIRECTORY --> all files in directory
# PROJECT --> all files/targets in a project/subproject
#
FUNCTION(kokkos_compilation)
CMAKE_PARSE_ARGUMENTS(COMP "GLOBAL;PROJECT" "" "DIRECTORY;TARGET;SOURCE" ${ARGN})
# search relative first and then absolute
SET(_HINTS "${CMAKE_CURRENT_LIST_DIR}/../.." "@CMAKE_INSTALL_PREFIX@")
# find kokkos_launch_compiler
FIND_PROGRAM(Kokkos_COMPILE_LAUNCHER
NAMES kokkos_launch_compiler
HINTS ${_HINTS}
PATHS ${_HINTS}
PATH_SUFFIXES bin)
IF(NOT Kokkos_COMPILE_LAUNCHER)
MESSAGE(FATAL_ERROR "Kokkos could not find 'kokkos_launch_compiler'. Please set '-DKokkos_COMPILE_LAUNCHER=/path/to/launcher'")
ENDIF()
IF(COMP_GLOBAL)
# if global, don't bother setting others
SET_PROPERTY(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${Kokkos_COMPILE_LAUNCHER} ${CMAKE_CXX_COMPILER}")
SET_PROPERTY(GLOBAL PROPERTY RULE_LAUNCH_LINK "${Kokkos_COMPILE_LAUNCHER} ${CMAKE_CXX_COMPILER}")
ELSE()
FOREACH(_TYPE PROJECT DIRECTORY TARGET SOURCE)
# make project/subproject scoping easy, e.g. KokkosCompilation(PROJECT) after project(...)
IF("${_TYPE}" STREQUAL "PROJECT" AND COMP_${_TYPE})
LIST(APPEND COMP_DIRECTORY ${PROJECT_SOURCE_DIR})
UNSET(COMP_${_TYPE})
ENDIF()
# set the properties if defined
IF(COMP_${_TYPE})
# MESSAGE(STATUS "Using nvcc_wrapper :: ${_TYPE} :: ${COMP_${_TYPE}}")
SET_PROPERTY(${_TYPE} ${COMP_${_TYPE}} PROPERTY RULE_LAUNCH_COMPILE "${Kokkos_COMPILE_LAUNCHER} ${CMAKE_CXX_COMPILER}")
SET_PROPERTY(${_TYPE} ${COMP_${_TYPE}} PROPERTY RULE_LAUNCH_LINK "${Kokkos_COMPILE_LAUNCHER} ${CMAKE_CXX_COMPILER}")
ENDIF()
ENDFOREACH()
ENDIF()
ENDFUNCTION()
# A test to check whether a downstream project set the C++ compiler to NVCC or not
# this is called only when Kokkos was installed with Kokkos_ENABLE_CUDA=ON
FUNCTION(kokkos_compiler_is_nvcc VAR COMPILER)
# Check if the compiler is nvcc (which really means nvcc_wrapper).
EXECUTE_PROCESS(COMMAND ${COMPILER} ${ARGN} --version
OUTPUT_VARIABLE INTERNAL_COMPILER_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE
RESULT_VARIABLE RET)
# something went wrong
IF(RET GREATER 0)
SET(${VAR} false PARENT_SCOPE)
ELSE()
STRING(REPLACE "\n" " - " INTERNAL_COMPILER_VERSION_ONE_LINE ${INTERNAL_COMPILER_VERSION} )
STRING(FIND ${INTERNAL_COMPILER_VERSION_ONE_LINE} "nvcc" INTERNAL_COMPILER_VERSION_CONTAINS_NVCC)
STRING(REGEX REPLACE "^ +" "" INTERNAL_HAVE_COMPILER_NVCC "${INTERNAL_HAVE_COMPILER_NVCC}")
IF(${INTERNAL_COMPILER_VERSION_CONTAINS_NVCC} GREATER -1)
SET(${VAR} true PARENT_SCOPE)
ELSE()
SET(${VAR} false PARENT_SCOPE)
ENDIF()
ENDIF()
ENDFUNCTION()

View File

@ -1,4 +1,3 @@
/*
//@HEADER
// ************************************************************************
@ -42,6 +41,9 @@
// ************************************************************************
//@HEADER
*/
#ifndef @HEADER_GUARD_TAG@
#define @HEADER_GUARD_TAG@
#include <cuda/TestCuda_Category.hpp>
#include <TestComplex.hpp>
@INCLUDE_NEXT_FILE@
#endif

View File

@ -21,6 +21,7 @@
#cmakedefine KOKKOS_ENABLE_HPX
#cmakedefine KOKKOS_ENABLE_MEMKIND
#cmakedefine KOKKOS_ENABLE_LIBRT
#cmakedefine KOKKOS_ENABLE_SYCL
#ifndef __CUDA_ARCH__
#cmakedefine KOKKOS_ENABLE_TM
@ -31,7 +32,6 @@
#endif
/* General Settings */
#cmakedefine KOKKOS_ENABLE_CXX11
#cmakedefine KOKKOS_ENABLE_CXX14
#cmakedefine KOKKOS_ENABLE_CXX17
#cmakedefine KOKKOS_ENABLE_CXX20
@ -58,7 +58,7 @@
/* TPL Settings */
#cmakedefine KOKKOS_ENABLE_HWLOC
#cmakedefine KOKKOS_USE_LIBRT
#cmakedefine KOKKOS_ENABLE_HWBSPACE
#cmakedefine KOKKOS_ENABLE_HBWSPACE
#cmakedefine KOKKOS_ENABLE_LIBDL
#cmakedefine KOKKOS_IMPL_CUDA_CLANG_WORKAROUND

View File

@ -73,20 +73,20 @@ Compiler features are more fine-grained and require conflicting requests to be r
Suppose I have
````
add_library(A a.cpp)
target_compile_features(A PUBLIC cxx_std_11)
target_compile_features(A PUBLIC cxx_std_14)
````
then another target
````
add_library(B b.cpp)
target_compile_features(B PUBLIC cxx_std_14)
target_compile_features(B PUBLIC cxx_std_17)
target_link_libraries(A B)
````
I have requested two different features.
CMake understands the requests and knows that `cxx_std_11` is a subset of `cxx_std_14`.
CMake then picks C++14 for library `B`.
CMake understands the requests and knows that `cxx_std_14` is a subset of `cxx_std_17`.
CMake then picks C++17 for library `B`.
CMake would not have been able to do feature resolution if we had directly done:
````
target_compile_options(A PUBLIC -std=c++11)
target_compile_options(A PUBLIC -std=c++14)
````
### Adding Kokkos Options

View File

@ -1,14 +1,16 @@
# @HEADER
# ************************************************************************
#
# Trilinos: An Object-Oriented Solver Framework
# Copyright (2001) Sandia Corporation
# Kokkos v. 3.0
# Copyright (2020) National Technology & Engineering
# Solutions of Sandia, LLC (NTESS).
#
# Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Copyright (2001) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000, there is a non-exclusive license for use of this
# work by or on behalf of the U.S. Government. Export of this program
# may require a license from the United States Government.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
@ -21,10 +23,10 @@
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ -33,22 +35,7 @@
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# NOTICE: The United States Government is granted for itself and others
# acting on its behalf a paid-up, nonexclusive, irrevocable worldwide
# license in this data to reproduce, prepare derivative works, and
# perform publicly and display publicly. Beginning five (5) years from
# July 25, 2001, the United States Government is granted for itself and
# others acting on its behalf a paid-up, nonexclusive, irrevocable
# worldwide license in this data to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
#
# NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT
# OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES
# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR
# RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY
# INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS
# THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS.
# Questions? Contact Christian R. Trott (crtrott@sandia.gov)
#
# ************************************************************************
# @HEADER

View File

@ -1,14 +1,16 @@
# @HEADER
# ************************************************************************
#
# Trilinos: An Object-Oriented Solver Framework
# Copyright (2001) Sandia Corporation
# Kokkos v. 3.0
# Copyright (2020) National Technology & Engineering
# Solutions of Sandia, LLC (NTESS).
#
# Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Copyright (2001) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000, there is a non-exclusive license for use of this
# work by or on behalf of the U.S. Government. Export of this program
# may require a license from the United States Government.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
@ -21,10 +23,10 @@
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ -33,22 +35,7 @@
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# NOTICE: The United States Government is granted for itself and others
# acting on its behalf a paid-up, nonexclusive, irrevocable worldwide
# license in this data to reproduce, prepare derivative works, and
# perform publicly and display publicly. Beginning five (5) years from
# July 25, 2001, the United States Government is granted for itself and
# others acting on its behalf a paid-up, nonexclusive, irrevocable
# worldwide license in this data to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
#
# NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT
# OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES
# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR
# RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY
# INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS
# THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS.
# Questions? Contact Christian R. Trott (crtrott@sandia.gov)
#
# ************************************************************************
# @HEADER

View File

@ -1,14 +1,16 @@
# @HEADER
# ************************************************************************
#
# Trilinos: An Object-Oriented Solver Framework
# Copyright (2001) Sandia Corporation
# Kokkos v. 3.0
# Copyright (2020) National Technology & Engineering
# Solutions of Sandia, LLC (NTESS).
#
# Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Copyright (2001) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000, there is a non-exclusive license for use of this
# work by or on behalf of the U.S. Government. Export of this program
# may require a license from the United States Government.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
@ -21,10 +23,10 @@
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ -33,22 +35,7 @@
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# NOTICE: The United States Government is granted for itself and others
# acting on its behalf a paid-up, nonexclusive, irrevocable worldwide
# license in this data to reproduce, prepare derivative works, and
# perform publicly and display publicly. Beginning five (5) years from
# July 25, 2001, the United States Government is granted for itself and
# others acting on its behalf a paid-up, nonexclusive, irrevocable
# worldwide license in this data to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
#
# NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT
# OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES
# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR
# RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY
# INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS
# THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS.
# Questions? Contact Christian R. Trott (crtrott@sandia.gov)
#
# ************************************************************************
# @HEADER

View File

@ -1,14 +1,16 @@
# @HEADER
# ************************************************************************
#
# Trilinos: An Object-Oriented Solver Framework
# Copyright (2001) Sandia Corporation
# Kokkos v. 3.0
# Copyright (2020) National Technology & Engineering
# Solutions of Sandia, LLC (NTESS).
#
# Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Copyright (2001) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000, there is a non-exclusive license for use of this
# work by or on behalf of the U.S. Government. Export of this program
# may require a license from the United States Government.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
@ -21,10 +23,10 @@
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ -33,22 +35,7 @@
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# NOTICE: The United States Government is granted for itself and others
# acting on its behalf a paid-up, nonexclusive, irrevocable worldwide
# license in this data to reproduce, prepare derivative works, and
# perform publicly and display publicly. Beginning five (5) years from
# July 25, 2001, the United States Government is granted for itself and
# others acting on its behalf a paid-up, nonexclusive, irrevocable
# worldwide license in this data to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
#
# NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT
# OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES
# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR
# RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY
# INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS
# THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS.
# Questions? Contact Christian R. Trott (crtrott@sandia.gov)
#
# ************************************************************************
# @HEADER

View File

@ -38,12 +38,6 @@ MACRO(GLOBAL_SET VARNAME)
SET(${VARNAME} ${ARGN} CACHE INTERNAL "" FORCE)
ENDMACRO()
FUNCTION(VERIFY_EMPTY CONTEXT)
if(${ARGN})
MESSAGE(FATAL_ERROR "Kokkos does not support all of Tribits. Unhandled arguments in ${CONTEXT}:\n${ARGN}")
endif()
ENDFUNCTION()
MACRO(PREPEND_GLOBAL_SET VARNAME)
ASSERT_DEFINED(${VARNAME})
GLOBAL_SET(${VARNAME} ${ARGN} ${${VARNAME}})
@ -89,7 +83,7 @@ FUNCTION(KOKKOS_ADD_TEST)
CMAKE_PARSE_ARGUMENTS(TEST
""
"EXE;NAME;TOOL"
""
"ARGS"
${ARGN})
IF(TEST_EXE)
SET(EXE_ROOT ${TEST_EXE})
@ -102,6 +96,7 @@ FUNCTION(KOKKOS_ADD_TEST)
NAME ${TEST_NAME}
COMM serial mpi
NUM_MPI_PROCS 1
ARGS ${TEST_ARGS}
${TEST_UNPARSED_ARGUMENTS}
ADDED_TESTS_NAMES_OUT ALL_TESTS_ADDED
)
@ -110,18 +105,25 @@ FUNCTION(KOKKOS_ADD_TEST)
SET(TEST_NAME ${PACKAGE_NAME}_${TEST_NAME})
SET(EXE ${PACKAGE_NAME}_${EXE_ROOT})
# The function TRIBITS_ADD_TEST() has a CATEGORIES argument that defaults
# to BASIC. If a project elects to only enable tests marked as PERFORMANCE,
# the test won't actually be added and attempting to set a property on it below
# will yield an error.
if(TARGET ${EXE})
if(TEST_TOOL)
add_dependencies(${EXE} ${TEST_TOOL}) #make sure the exe has to build the tool
foreach(TEST_ADDED ${ALL_TESTS_ADDED})
set_property(TEST ${TEST_ADDED} APPEND PROPERTY ENVIRONMENT "KOKKOS_PROFILE_LIBRARY=$<TARGET_FILE:${TEST_TOOL}>")
endforeach()
endif()
endif()
else()
CMAKE_PARSE_ARGUMENTS(TEST
"WILL_FAIL"
"FAIL_REGULAR_EXPRESSION;PASS_REGULAR_EXPRESSION;EXE;NAME;TOOL"
"CATEGORIES;CMD_ARGS"
"CATEGORIES;ARGS"
${ARGN})
SET(TESTS_ADDED)
# To match Tribits, we should always be receiving
# the root names of exes/libs
IF(TEST_EXE)
@ -133,11 +135,32 @@ FUNCTION(KOKKOS_ADD_TEST)
# These should be the full target name
SET(TEST_NAME ${PACKAGE_NAME}_${TEST_NAME})
SET(EXE ${PACKAGE_NAME}_${EXE_ROOT})
IF (TEST_ARGS)
SET(TEST_NUMBER 0)
FOREACH (ARG_STR ${TEST_ARGS})
# This is passed as a single string blob to match TriBITS behavior
# We need this to be turned into a list
STRING(REPLACE " " ";" ARG_STR_LIST ${ARG_STR})
IF(WIN32)
ADD_TEST(NAME ${TEST_NAME} WORKING_DIRECTORY ${LIBRARY_OUTPUT_PATH} COMMAND ${EXE}${CMAKE_EXECUTABLE_SUFFIX} ${TEST_CMD_ARGS})
ADD_TEST(NAME ${TEST_NAME}${TEST_NUMBER} WORKING_DIRECTORY ${LIBRARY_OUTPUT_PATH}
COMMAND ${EXE}${CMAKE_EXECUTABLE_SUFFIX} ${ARG_STR_LIST})
ELSE()
ADD_TEST(NAME ${TEST_NAME} COMMAND ${EXE} ${TEST_CMD_ARGS})
ADD_TEST(NAME ${TEST_NAME}${TEST_NUMBER} COMMAND ${EXE} ${ARG_STR_LIST})
ENDIF()
LIST(APPEND TESTS_ADDED "${TEST_NAME}${TEST_NUMBER}")
MATH(EXPR TEST_NUMBER "${TEST_NUMBER} + 1")
ENDFOREACH()
ELSE()
IF(WIN32)
ADD_TEST(NAME ${TEST_NAME} WORKING_DIRECTORY ${LIBRARY_OUTPUT_PATH}
COMMAND ${EXE}${CMAKE_EXECUTABLE_SUFFIX})
ELSE()
ADD_TEST(NAME ${TEST_NAME} COMMAND ${EXE})
ENDIF()
LIST(APPEND TESTS_ADDED "${TEST_NAME}")
ENDIF()
FOREACH(TEST_NAME ${TESTS_ADDED})
IF(TEST_WILL_FAIL)
SET_TESTS_PROPERTIES(${TEST_NAME} PROPERTIES WILL_FAIL ${TEST_WILL_FAIL})
ENDIF()
@ -151,6 +174,7 @@ FUNCTION(KOKKOS_ADD_TEST)
add_dependencies(${EXE} ${TEST_TOOL}) #make sure the exe has to build the tool
set_property(TEST ${TEST_NAME} APPEND_STRING PROPERTY ENVIRONMENT "KOKKOS_PROFILE_LIBRARY=$<TARGET_FILE:${TEST_TOOL}>")
endif()
ENDFOREACH()
VERIFY_EMPTY(KOKKOS_ADD_TEST ${TEST_UNPARSED_ARGUMENTS})
endif()
ENDFUNCTION()

Some files were not shown because too many files have changed in this diff Show More