Have PyTorch interface for MLIAP working in Kokkos. This uses cuPy and a simple example is provided

This commit is contained in:
Matt Bettencourt
2022-11-14 17:49:00 +01:00
parent 07fe2fa29d
commit d47acfc0c4
14 changed files with 742 additions and 38 deletions

View File

@ -3,6 +3,59 @@
if(CMAKE_CXX_STANDARD LESS 14)
message(FATAL_ERROR "The KOKKOS package requires the C++ standard to be set to at least C++14")
endif()
# if PYTHON package is included we may also include Python support in ML-IAP
set(MLIAP_ENABLE_PYTHON_DEFAULT_KOKKOS OFF)
if(PKG_PYTHON)
find_package(Cythonize QUIET)
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.14)
find_package(Python COMPONENTS NumPy QUIET)
else()
# assume we have NumPy
set(Python_NumPy_FOUND ON)
endif()
if(Cythonize_FOUND AND Python_NumPy_FOUND)
set(MLIAP_ENABLE_PYTHON_DEFAULT_KOKKOS ON)
endif()
endif()
option(MLIAP_ENABLE_PYTHON "Build ML-IAP package with Python support" ${MLIAP_ENABLE_PYTHON_DEFAULT_KOKKOS})
if(MLIAP_ENABLE_PYTHON)
find_package(Cythonize REQUIRED)
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.14)
find_package(Python COMPONENTS NumPy REQUIRED)
endif()
if(NOT PKG_PYTHON)
message(FATAL_ERROR "Must enable PYTHON package for including Python support in ML-IAP")
endif()
if(CMAKE_VERSION VERSION_LESS 3.12)
if(PYTHONLIBS_VERSION_STRING VERSION_LESS 3.6)
message(FATAL_ERROR "Python support in ML-IAP requires Python 3.6 or later")
endif()
else()
if(Python_VERSION VERSION_LESS 3.6)
message(FATAL_ERROR "Python support in ML-IAP requires Python 3.6 or later")
endif()
endif()
set(MLIAP_BINARY_DIR ${CMAKE_BINARY_DIR}/cython)
file(GLOB MLIAP_CYTHON_SRC ${LAMMPS_SOURCE_DIR}/KOKKOS/*.pyx)
file(MAKE_DIRECTORY ${MLIAP_BINARY_DIR})
foreach(MLIAP_CYTHON_FILE ${MLIAP_CYTHON_SRC})
get_filename_component(MLIAP_CYTHON_BASE ${MLIAP_CYTHON_FILE} NAME_WE)
add_custom_command(OUTPUT ${MLIAP_BINARY_DIR}/${MLIAP_CYTHON_BASE}.cpp ${MLIAP_BINARY_DIR}/${MLIAP_CYTHON_BASE}.h
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${MLIAP_CYTHON_FILE} ${MLIAP_BINARY_DIR}/${MLIAP_CYTHON_BASE}.pyx
COMMAND ${Cythonize_EXECUTABLE} -3 ${MLIAP_BINARY_DIR}/${MLIAP_CYTHON_BASE}.pyx
WORKING_DIRECTORY ${MLIAP_BINARY_DIR}
MAIN_DEPENDENCY ${MLIAP_CYTHON_FILE}
COMMENT "Generating C++ sources with cythonize...")
target_sources(lammps PRIVATE ${MLIAP_BINARY_DIR}/${MLIAP_CYTHON_BASE}.cpp)
endforeach()
target_compile_definitions(lammps PRIVATE -DMLIAP_PYTHON)
target_include_directories(lammps PRIVATE ${MLIAP_BINARY_DIR})
endif()
########################################################################
# consistency checks and Kokkos options/settings required by LAMMPS
if(Kokkos_ENABLE_CUDA)
@ -143,6 +196,7 @@ if(PKG_ML-IAP)
list(APPEND KOKKOS_PKG_SOURCES ${KOKKOS_PKG_SOURCES_DIR}/mliap_data_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/mliap_descriptor_so3_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/mliap_model_linear_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/mliap_model_python_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/mliap_so3_kokkos.cpp)
endif()

View File

@ -0,0 +1,104 @@
# Demonstrate how to load a model from the python side.
# This is essentially the same as in.mliap.pytorch.Ta06A
# except that python is the driving program, and lammps
# is in library mode.
before_loading =\
"""# Demonstrate MLIAP/PyTorch interface to linear SNAP potential
# Initialize simulation
variable nsteps index 100
variable nrep equal 4
variable a equal 3.316
units metal
# generate the box and atom positions using a BCC lattice
variable nx equal ${nrep}
variable ny equal ${nrep}
variable nz equal ${nrep}
boundary p p p
lattice bcc $a
region box block 0 ${nx} 0 ${ny} 0 ${nz}
create_box 1 box
create_atoms 1 box
mass 1 180.88
# choose potential
# DATE: 2014-09-05 UNITS: metal CONTRIBUTOR: Aidan Thompson athomps@sandia.gov CITATION: Thompson, Swiler, Trott, Foiles and Tucker, arxiv.org, 1409.3880 (2014)
# Definition of SNAP potential Ta_Cand06A
# Assumes 1 LAMMPS atom type
variable zblcutinner equal 4
variable zblcutouter equal 4.8
variable zblz equal 73
# Specify hybrid with SNAP, ZBL
pair_style hybrid/overlay &
zbl ${zblcutinner} ${zblcutouter} &
mliap model mliappy LATER &
descriptor sna Ta06A.mliap.descriptor
pair_coeff 1 1 zbl ${zblz} ${zblz}
pair_coeff * * mliap Ta
"""
after_loading =\
"""
# Setup output
compute eatom all pe/atom
compute energy all reduce sum c_eatom
compute satom all stress/atom NULL
compute str all reduce sum c_satom[1] c_satom[2] c_satom[3]
variable press equal (c_str[1]+c_str[2]+c_str[3])/(3*vol)
thermo_style custom step temp epair c_energy etotal press v_press
thermo 10
thermo_modify norm yes
# Set up NVE run
timestep 0.5e-3
neighbor 1.0 bin
neigh_modify once no every 1 delay 0 check yes
# Run MD
velocity all create 300.0 4928459 loop geom
fix 1 all nve
run ${nsteps}
"""
import lammps
lmp = lammps.lammps(cmdargs=['-k', 'on', 'g', '1', '-sf', 'kk', '-pk', 'kokkos', 'neigh', 'full', 'newton', 'on', '-echo', 'both'])
# Before defining the pair style, one must do the following:
import lammps.mliap
lammps.mliap.activate_mliappy_kokkos(lmp)
# Otherwise, when running lammps in library mode,
# you will get an error:
# "ERROR: Loading MLIAPPY coupling module failure."
# Setup the simulation and declare an empty model
# by specifying model filename as "LATER"
lmp.commands_string(before_loading)
# Define the model however you like. In this example
# we load it from disk:
import torch
model = torch.load('Ta06A.mliap.pytorch.model.pt')
# Connect the PyTorch model to the mliap pair style.
lammps.mliap.load_model_kokkos(model)
# run the simulation with the mliap pair style
lmp.commands_string(after_loading)

View File

@ -31,5 +31,8 @@ if not pylib.Py_IsInitialized():
"in undefined behavior.")
else:
from .loader import load_model, load_unified, activate_mliappy
try:
from .loader import load_model_kokkos, activate_mliappy_kokkos
except:
pass
del sysconfig, ctypes, library, pylib

View File

@ -56,6 +56,7 @@ class DynamicLoader(importlib.abc.Loader):
def activate_mliappy(lmp):
try:
print("activate_mliappy")
library = lmp.lib
module_names = ["mliap_model_python_couple", "mliap_unified_couple"]
api_version = library.lammps_python_api_version()
@ -72,8 +73,28 @@ def activate_mliappy(lmp):
except Exception as ee:
raise ImportError("Could not load ML-IAP python coupling module.") from ee
def activate_mliappy_kokkos(lmp):
try:
print("activate_mliappy_kokkos")
library = lmp.lib
module_names = ["mliap_model_python_couple_kokkos"]
api_version = library.lammps_python_api_version()
for module_name in module_names:
# Make Machinery
loader = DynamicLoader(module_name,library,api_version)
spec = importlib.util.spec_from_loader(module_name,loader)
# Do the import
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
except Exception as ee:
raise ImportError("Could not load ML-IAP python coupling module.") from ee
def load_model(model):
try:
print("load_model")
import mliap_model_python_couple
except ImportError as ie:
raise ImportError("ML-IAP python module must be activated before loading\n"
@ -81,6 +102,17 @@ def load_model(model):
) from ie
mliap_model_python_couple.load_from_python(model)
def load_model_kokkos(model):
try:
print("load_model_kokkos")
import mliap_model_python_couple_kokkos
except ImportError as ie:
raise ImportError("ML-IAP python module must be activated before loading\n"
"the pair style. Call lammps.mliap.activate_mliappy(lmp)."
) from ie
mliap_model_python_couple_kokkos.load_from_python(model)
def load_unified(model):
try:
import mliap_unified_couple
@ -89,3 +121,4 @@ def load_unified(model):
"the pair style. Call lammps.mliap.activate_mliappy(lmp)."
) from ie
mliap_unified_couple.load_from_python(model)

View File

@ -89,7 +89,6 @@ class TorchWrapper(torch.nn.Module):
"""
super().__init__()
self.model = model
self.device = device
self.dtype = dtype
@ -105,7 +104,7 @@ class TorchWrapper(torch.nn.Module):
self.n_descriptors = n_descriptors
self.n_elements = n_elements
def forward(self, elems, descriptors, beta, energy):
def forward(self, elems, descriptors, beta, energy,use_gpu_data=False):
"""
Takes element types and descriptors calculated via lammps and
calculates the per atom energies and forces.
@ -130,20 +129,28 @@ class TorchWrapper(torch.nn.Module):
-------
None
"""
descriptors = torch.from_numpy(descriptors).to(dtype=self.dtype, device=self.device).requires_grad_(True)
elems = torch.from_numpy(elems).to(dtype=torch.long, device=self.device) - 1
descriptors = torch.as_tensor(descriptors,dtype=self.dtype, device=self.device).requires_grad_(True)
elems = torch.as_tensor(elems,dtype=torch.int32, device=self.device)
elems=elems-1
with torch.autograd.enable_grad():
energy_nn = self.model(descriptors, elems)
if energy_nn.ndim > 1:
energy_nn = energy_nn.flatten()
beta_nn = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
beta[:] = beta_nn.detach().cpu().numpy().astype(np.float64)
if (use_gpu_data):
energy_nn = torch.as_tensor(energy,dtype=self.dtype, device=self.device)
energy_nn[:] = self.model(descriptors, elems).flatten()
else:
energy_nn = self.model(descriptors, elems).flatten()
energy[:] = energy_nn.detach().cpu().numpy().astype(np.float64)
#if energy_nn.ndim > 1:
# energy_nn = energy_nn.flatten()
if (use_gpu_data):
beta_nn = torch.as_tensor(beta,dtype=self.dtype, device=self.device)
beta_nn[:] = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
else:
beta_nn = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
beta[:] = beta_nn.detach().cpu().numpy().astype(np.float64)
elems=elems+1
class IgnoreElems(torch.nn.Module):

View File

@ -201,6 +201,8 @@ action mliap_descriptor_so3_kokkos.cpp mliap_descriptor_so3.cpp
action mliap_descriptor_so3_kokkos.h mliap_descriptor_so3.h
action mliap_model_linear_kokkos.cpp mliap_model_linear.cpp
action mliap_model_linear_kokkos.h mliap_model_linear.h
action mliap_model_python_kokkos.cpp mliap_model_linear.cpp
action mliap_model_python_kokkos.h mliap_model_linear.h
action mliap_model_kokkos.h mliap_model.h
action mliap_so3_kokkos.cpp mliap_so3.cpp
action mliap_so3_kokkos.h mliap_so3.h
@ -359,6 +361,9 @@ action transpose_helper_kokkos.h
action verlet_kokkos.cpp
action verlet_kokkos.h
# Install cython pyx file only if also Python is available
action mliap_model_python_couple_kokkos.pyx python_impl_kokkos.cpp
# edit 2 Makefile.package files to include/exclude package info
if (test $1 = 1) then
@ -409,3 +414,55 @@ elif (test $1 = 0) then
fi
fi
#Python cython stuff
# edit 2 Makefile.package files to include/exclude package info
if (test $1 = 1) then
if (type cythonize > /dev/null 2>&1 && test -e ../python_impl.cpp) then
if (test -e ../Makefile.package) then
sed -i -e 's|^PKG_INC =[ \t]*|&-DMLIAP_PYTHON |' ../Makefile.package
fi
if (test -e ../Makefile.package.settings) then
sed -i -e '/^include.*python.*mliap_python.*$/d' ../Makefile.package.settings
# multiline form needed for BSD sed on Macs
sed -i -e '4 i \
include ..\/..\/lib\/python\/Makefile.mliap_python
' ../Makefile.package.settings
fi
cythonize -3 ../mliap_model_python_couple_kokkos.pyx
fi
elif (test $1 = 0) then
if (test -e ../Makefile.package) then
sed -i -e 's/[^ \t]*-DMLIAP_PYTHON[^ \t]* //g' ../Makefile.package
fi
rm -f ../mliap_model_python_couple_kokkos.cpp ../mliap_model_python_couple_kokkos.h
sed -i -e '/^include.*python.*mliap_python.*$/d' ../Makefile.package.settings
elif (test $1 = 2) then
if (type cythonize > /dev/null 2>&1 && test -e ../python_impl.cpp) then
if (test -e ../Makefile.package) then
sed -i -e 's/[^ \t]*-DMLIAP_PYTHON[^ \t]* //g' ../Makefile.package
fi
rm -f ../mliap_model_python_couple_kokkos.cpp ../mliap_model_python_couple_kokkos.h
sed -i -e '/^include.*python.*mliap_python.*$/d' ../Makefile.package.settings
if (test -e ../Makefile.package) then
sed -i -e 's|^PKG_INC =[ \t]*|&-DMLIAP_PYTHON |' ../Makefile.package
fi
if (test -e ../Makefile.package.settings) then
sed -i -e '/^include.*python.*mliap_python.*$/d' ../Makefile.package.settings
# multiline form needed for BSD sed on Macs
sed -i -e '4 i \
include ..\/..\/lib\/python\/Makefile.mliap_python
' ../Makefile.package.settings
fi
cythonize -3 ../mliap_model_python_couple_kokkos.pyx
else
rm -f ../mliap_model_python_couple_kokkos.cpp ../mliap_model_python_couple_kokkos.h \
../mliap_model_python_couple_kokkos.cpp
fi
fi

View File

@ -0,0 +1,157 @@
# cython: language_level=3
# distutils: language = c++
cimport cython
from libc.stdint cimport uintptr_t
import pickle
# For converting C arrays to numpy arrays
import numpy
import torch
import cupy
# For converting void * to integer for tracking object identity
from libc.stdint cimport uintptr_t
from libcpp.string cimport string
cdef extern from "mliap_data_kokkos.h" namespace "LAMMPS_NS":
cdef cppclass MLIAPDataKokkosDevice:
# Array shapes
int nlistatoms
int ndescriptors
# Input data
int * ielems # types for all atoms in list
double * descriptors # descriptors for all atoms in list
# Output data to write to
double * betas # betas for all atoms in list
double * eatoms # energy for all atoms in list
double *energy
int dev
cdef extern from "mliap_model_python_kokkos.h" namespace "LAMMPS_NS":
cdef cppclass MLIAPModelPythonKokkosDevice:
void connect_param_counts()
class MLIAPPYKokkosModelNotLinked(Exception): pass
LOADED_MODELS = {}
cdef object c_id(MLIAPModelPythonKokkosDevice * c_model):
"""
Use python-style id of object to keep track of identity.
Note, this is probably not a perfect general strategy but it should work fine with LAMMPS pair styles.
"""
return int(<uintptr_t> c_model)
cdef object retrieve(MLIAPModelPythonKokkosDevice * c_model) with gil:
try:
model = LOADED_MODELS[c_id(c_model)]
except KeyError as ke:
raise KeyError("Model has not been loaded.") from ke
if model is None:
raise MLIAPPYKokkosModelNotLinked("Model not linked, connect the model from the python side.")
return model
cdef public int MLIAPPYKokkos_load_model(MLIAPModelPythonKokkosDevice * c_model, char* fname) with gil:
str_fname = fname.decode('utf-8') # Python 3 only; not Python 2 not supported.
if str_fname == "LATER":
model = None
returnval = 0
else:
if str_fname.endswith(".pt") or str_fname.endswith('.pth'):
import torch
model = torch.load(str_fname)
else:
with open(str_fname,'rb') as pfile:
model = pickle.load(pfile)
returnval = 1
LOADED_MODELS[c_id(c_model)] = model
return returnval
def load_from_python(model):
unloaded_models = [k for k, v in LOADED_MODELS.items() if v is None]
num_models = len(unloaded_models)
cdef MLIAPModelPythonKokkosDevice * lmp_model
if num_models == 0:
raise ValueError("No model in the waiting area.")
elif num_models > 1:
raise ValueError("Model is amibguous, more than one model in waiting area.")
else:
c_id = unloaded_models[0]
LOADED_MODELS[c_id]=model
lmp_model = <MLIAPModelPythonKokkosDevice *> <uintptr_t> c_id
lmp_model.connect_param_counts()
cdef public void MLIAPPYKokkos_unload_model(MLIAPModelPythonKokkosDevice * c_model) with gil:
del LOADED_MODELS[c_id(c_model)]
cdef public int MLIAPPYKokkos_nparams(MLIAPModelPythonKokkosDevice * c_model) with gil:
return int(retrieve(c_model).n_params)
cdef public int MLIAPPYKokkos_nelements(MLIAPModelPythonKokkosDevice * c_model) with gil:
return int(retrieve(c_model).n_elements)
cdef public int MLIAPPYKokkos_ndescriptors(MLIAPModelPythonKokkosDevice * c_model) with gil:
return int(retrieve(c_model).n_descriptors)
cdef create_array(device, void *pointer, shape,is_int):
size=1
for i in shape:
size = size*i
if ( device == 1):
mem = cupy.cuda.UnownedMemory(ptr=int( <uintptr_t> pointer), owner=None, size=size)
memptr = cupy.cuda.MemoryPointer(mem, 0)
type=cupy.double
if (is_int):
type=cupy.int32
return cupy.ndarray(shape, type, memptr=memptr)
else:
if (len(shape) == 1 ):
if (is_int):
return numpy.asarray(<int[:shape[0]]>pointer)
else:
return numpy.asarray(<double[:shape[0]]>pointer)
else:
if (is_int):
return numpy.asarray(<int[:shape[0],:shape[1]]>pointer)
else:
return numpy.asarray(<double[:shape[0],:shape[1]]>pointer)
cdef public void MLIAPPYKokkos_compute_gradients(MLIAPModelPythonKokkosDevice * c_model, MLIAPDataKokkosDevice * data) with gil:
dev=data.dev
torch.cuda.nvtx.range_push("set data fields")
model = retrieve(c_model)
n_d = data.ndescriptors
n_a = data.nlistatoms
cdef void* ptr = data.ielems
# Make numpy arrays from pointers
elem_cp = create_array(dev, data.ielems, (n_d,), True)
en_cp = create_array(dev, data.eatoms, (n_a,), False)
beta_cp = create_array(dev, data.betas, (n_a, n_d), False)
desc_cp = create_array(dev, data.descriptors, (n_a, n_d), False)
torch.cuda.nvtx.range_pop()
# Invoke python model on numpy arrays.
torch.cuda.nvtx.range_push("call model")
model(elem_cp,desc_cp,beta_cp,en_cp,dev==1)
torch.cuda.nvtx.range_pop()
# Get the total energy from the atom energy.
energy = cupy.sum(en_cp)
data.energy[0] = <double> energy
return

View File

@ -0,0 +1,181 @@
// clang-format off
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS Development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Matt Bettencourt (NVIDIA)
------------------------------------------------------------------------- */
#ifdef MLIAP_PYTHON
#include "mliap_model_python_kokkos.h"
#include "mliap_data_kokkos.h"
#include "comm.h"
#include "error.h"
#include "utils.h"
#include "mliap_model_python_couple_kokkos.h"
#include "lmppython.h"
#include "python_compat.h"
#include <type_traits>
using namespace LAMMPS_NS;
template<class DeviceType>
MLIAPModelPythonKokkos<DeviceType>::~MLIAPModelPythonKokkos() {
auto nontemplated_this = static_cast<MLIAPModelPythonKokkosDevice*>((void*)this);
if (model_loaded)
MLIAPPYKokkos_unload_model(nontemplated_this);
model_loaded=false;
}
template<class DeviceType>
MLIAPModelPythonKokkos<DeviceType>::MLIAPModelPythonKokkos(LAMMPS *lmp, char *coefffilename) :
MLIAPModelPython(lmp,coefffilename,true),
MLIAPModelKokkos<DeviceType>(lmp, this)
{
if (!std::is_same<DeviceType,LMPDeviceType>::value )
MLIAPModelKokkos<DeviceType>::error->all(FLERR, "MLIAP Kokkos version of the python interface is ONLY available on device");
model_loaded = 0;
MLIAPModelKokkos<DeviceType>::python->init();
PyGILState_STATE gstate = PyGILState_Ensure();
PyObject *pyMain = PyImport_AddModule("__main__");
if (!pyMain) {
PyGILState_Release(gstate);
MLIAPModelKokkos<DeviceType>::error->all(FLERR, "Could not initialize embedded Python");
}
PyObject *coupling_module = PyImport_ImportModule("mliap_model_python_couple_kokkos");
if (!coupling_module) {
PyErr_Print();
PyErr_Clear();
PyGILState_Release(gstate);
MLIAPModelKokkos<DeviceType>::error->all(FLERR, "Loading MLIAPPYKokkos coupling module failure.");
}
// Recipe from lammps/src/pair_python.cpp :
// add current directory to PYTHONPATH
PyObject *py_path = PySys_GetObject((char *) "path");
PyList_Append(py_path, PY_STRING_FROM_STRING("."));
// if LAMMPS_POTENTIALS environment variable is set, add it to PYTHONPATH as well
const char *potentials_path = getenv("LAMMPS_POTENTIALS");
if (potentials_path != nullptr) {
PyList_Append(py_path, PY_STRING_FROM_STRING(potentials_path));
}
PyGILState_Release(gstate);
if (coefffilename) read_coeffs(coefffilename);
if (coefffilename) MLIAPModelKokkos<DeviceType>::set_k_coeffelem();
nonlinearflag = 1;
}
/* ---------------------------------------------------------------------- */
template<class DeviceType>
void MLIAPModelPythonKokkos<DeviceType>::read_coeffs(char *fname)
{
PyGILState_STATE gstate = PyGILState_Ensure();
auto nontemplated_this = static_cast<MLIAPModelPythonKokkosDevice*>((void*)this);
model_loaded = MLIAPPYKokkos_load_model(nontemplated_this, fname);
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
PyGILState_Release(gstate);
MLIAPModelKokkos<DeviceType>::error->all(FLERR, "Loading python model failure.");
}
PyGILState_Release(gstate);
if (model_loaded) {
this->connect_param_counts();
} else {
if (MLIAPModelKokkos<DeviceType>::comm->me == 0)
utils::logmesg(MLIAPModelKokkos<DeviceType>::lmp, "Loading python model deferred.\n");
}
}
/* ---------------------------------------------------------------------- */
// Finalize loading of the model.
template<class DeviceType>
void MLIAPModelPythonKokkos<DeviceType>::connect_param_counts()
{
PyGILState_STATE gstate = PyGILState_Ensure();
auto nontemplated_this = static_cast<MLIAPModelPythonKokkosDevice*>((void*)this);
nelements = MLIAPPYKokkos_nelements(nontemplated_this);
nparams = MLIAPPYKokkos_nparams(nontemplated_this);
ndescriptors = MLIAPPYKokkos_ndescriptors(nontemplated_this);
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
PyGILState_Release(gstate);
MLIAPModelKokkos<DeviceType>::error->all(FLERR, "Loading python model failure.");
}
PyGILState_Release(gstate);
model_loaded = 1;
utils::logmesg(MLIAPModelKokkos<DeviceType>::lmp, "Loading python model complete.\n");
}
/* ---------------------------------------------------------------------- */
template<class DeviceType>
void MLIAPModelPythonKokkos<DeviceType>::compute_gradients(class MLIAPData *data)
{
if (!model_loaded) { MLIAPModelKokkos<DeviceType>::error->all(FLERR, "Model not loaded."); }
PyGILState_STATE gstate = PyGILState_Ensure();
auto nontemplated_this = static_cast<MLIAPModelPythonKokkosDevice*>((void*)this);
auto *kokkos_data = dynamic_cast<MLIAPDataKokkos<DeviceType>*>(data);
MLIAPDataKokkosDevice raw_data(*kokkos_data);
MLIAPPYKokkos_compute_gradients(nontemplated_this, &raw_data);
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
PyGILState_Release(gstate);
MLIAPModelKokkos<DeviceType>::error->all(FLERR, "Running python model failure.");
}
PyGILState_Release(gstate);
}
/* ---------------------------------------------------------------------- */
template<class DeviceType>
void MLIAPModelPythonKokkos<DeviceType>::compute_gradgrads(class MLIAPData *data)
{
MLIAPModelPython::compute_gradgrads(data);
}
/* ---------------------------------------------------------------------- */
template<class DeviceType>
void MLIAPModelPythonKokkos<DeviceType>::compute_force_gradients(class MLIAPData *data)
{
MLIAPModelPython::compute_force_gradients(data);
}
/* ---------------------------------------------------------------------- */
namespace LAMMPS_NS {
template class MLIAPModelPythonKokkos<LMPDeviceType>;
#ifdef LMP_KOKKOS_GPU
template class MLIAPModelPythonKokkos<LMPHostType>;
#endif
}
#endif

View File

@ -0,0 +1,86 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS Development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Matt Bettencourt (NVIDIA)
------------------------------------------------------------------------- */
#ifndef LMP_MLIAP_MODEL_PYTHON_KOKKOS_H
#define LMP_MLIAP_MODEL_PYTHON_KOKKOS_H
#include "mliap_model_python.h"
#include "mliap_model_kokkos.h"
#include "kokkos_type.h"
namespace LAMMPS_NS {
template <class DeviceType>
class MLIAPModelPythonKokkos : public MLIAPModelPython, public MLIAPModelKokkos<DeviceType> {
public:
MLIAPModelPythonKokkos(LAMMPS *, char * = nullptr);
~MLIAPModelPythonKokkos();
void read_coeffs(char *fname);
void compute_gradients(class MLIAPData *) override;
void compute_gradgrads(class MLIAPData *) override;
void compute_force_gradients(class MLIAPData *) override;
void connect_param_counts();
};
} // namespace LAMMPS_NS
#include "mliap_data_kokkos.h"
namespace LAMMPS_NS {
class MLIAPModelPythonKokkosDevice: public MLIAPModelPythonKokkos<LMPDeviceType> {
};
class MLIAPDataKokkosDevice {
public:
MLIAPDataKokkosDevice(MLIAPDataKokkos<LMPDeviceType> &base) :
ndescriptors(base.ndescriptors),
nlistatoms(base.nlistatoms),
ielems(base.k_ielems.d_view.data()),
descriptors(base.k_descriptors.d_view.data()),
betas(base.k_betas.d_view.data()),
eatoms(base.k_eatoms.d_view.data()),
energy(&base.energy),
#if defined(KOKKOS_ENABLE_CUDA)
dev(1)
#else
dev(0)
#endif
{ }
const int ndescriptors;
const int nlistatoms;
int *ielems;
double *descriptors;
double *betas;
double *eatoms;
double *energy;
int dev;
#ifdef LMP_KOKKOS_GPU
MLIAPDataKokkosDevice(MLIAPDataKokkos<LMPHostType> &base) : ndescriptors(-1),nlistatoms(-1)
{
// It cannot get here, but needed for compilation
}
#endif
};
}
#endif

View File

@ -22,6 +22,9 @@
#include "mliap_data_kokkos.h"
#include "mliap_descriptor_so3_kokkos.h"
#include "mliap_model_linear_kokkos.h"
#ifdef MLIAP_PYTHON
#include "mliap_model_python_kokkos.h"
#endif
#include "error.h"
#include "neigh_request.h"
#include "lammps.h"
@ -143,7 +146,6 @@ void PairMLIAPKokkos<DeviceType>::allocate()
template<class DeviceType>
void PairMLIAPKokkos<DeviceType>::settings(int narg, char ** arg)
{
PairMLIAP::settings(narg, arg);
int iarg=0;
while (iarg < narg) {
if (strcmp(arg[iarg],"model") == 0) {
@ -152,6 +154,15 @@ void PairMLIAPKokkos<DeviceType>::settings(int narg, char ** arg)
delete model;
model = new MLIAPModelLinearKokkos<DeviceType>(lmp,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg+1],"mliappy") == 0) {
#ifdef MLIAP_PYTHON
if (iarg+3 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap mliappy", error);
delete model;
model = new MLIAPModelPythonKokkos<DeviceType>(lmp,arg[iarg+2]);
iarg += 3;
#else
error->all(FLERR,"Using pair_style mliap model mliappy requires ML-IAP with python support");
#endif
} else
iarg += 2;
} else if (strcmp(arg[iarg],"descriptor") == 0) {
@ -165,6 +176,8 @@ void PairMLIAPKokkos<DeviceType>::settings(int narg, char ** arg)
} else
iarg++;
}
PairMLIAP::settings(narg, arg);
}
/* ---------------------------------------------------------------------- */

View File

@ -33,10 +33,14 @@ using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
MLIAPModelPython::MLIAPModelPython(LAMMPS *lmp, char *coefffilename) :
MLIAPModelPython::MLIAPModelPython(LAMMPS *lmp, char *coefffilename, bool is_child) :
MLIAPModel(lmp, coefffilename)
{
model_loaded = 0;
nonlinearflag = 1;
if (is_child)
return;
python->init();
PyGILState_STATE gstate = PyGILState_Ensure();
@ -66,17 +70,18 @@ MLIAPModelPython::MLIAPModelPython(LAMMPS *lmp, char *coefffilename) :
PyList_Append(py_path, PY_STRING_FROM_STRING(potentials_path));
}
PyGILState_Release(gstate);
if (coefffilename) read_coeffs(coefffilename);
nonlinearflag = 1;
}
/* ---------------------------------------------------------------------- */
MLIAPModelPython::~MLIAPModelPython()
{
if (model_loaded)
MLIAPPY_unload_model(this);
model_loaded=false;
}
/* ----------------------------------------------------------------------
@ -92,7 +97,7 @@ void MLIAPModelPython::read_coeffs(char *fname)
{
PyGILState_STATE gstate = PyGILState_Ensure();
int loaded = MLIAPPY_load_model(this, fname);
model_loaded = MLIAPPY_load_model(this, fname);
if (PyErr_Occurred()) {
PyErr_Print();
PyErr_Clear();
@ -101,7 +106,7 @@ void MLIAPModelPython::read_coeffs(char *fname)
}
PyGILState_Release(gstate);
if (loaded) {
if (model_loaded) {
this->connect_param_counts();
} else {
if (comm->me == 0) utils::logmesg(lmp, "Loading python model deferred.\n");

View File

@ -20,7 +20,7 @@ namespace LAMMPS_NS {
class MLIAPModelPython : public MLIAPModel {
public:
MLIAPModelPython(LAMMPS *, char * = nullptr);
MLIAPModelPython(LAMMPS *, char * = nullptr, bool is_child=false);
~MLIAPModelPython() override;
int get_nparams() override;
int get_gamma_nnz(class MLIAPData *) override;

View File

@ -131,52 +131,52 @@ void PairMLIAP::settings(int narg, char ** arg)
{
if (narg < 2) utils::missing_cmd_args(FLERR, "pair_style mliap", error);
// set flags for required keywords
delete model;
model = nullptr;
delete descriptor;
descriptor = nullptr;
// process keywords
int iarg = 0;
//Check to see if there are more than one model or descriptor
int nmodel=0,ndescriptor=0;
for (int iarg=0;iarg<narg;++iarg)
if (strcmp(arg[iarg],"model") == 0)
nmodel++;
else if (strcmp(arg[iarg],"descriptor") == 0)
ndescriptor++;
if (nmodel != 1 || ndescriptor != 1 )
error->all(FLERR,"One can only specify one model and one descriptor");
while (iarg < narg) {
if (strcmp(arg[iarg],"model") == 0) {
if (iarg+2 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap model", error);
if (model != nullptr) error->all(FLERR,"Illegal multiple pair_style mliap model definition");
if (strcmp(arg[iarg+1],"linear") == 0) {
if (iarg+3 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap model linear", error);
model = new MLIAPModelLinear(lmp,arg[iarg+2]);
if (model==nullptr) model = new MLIAPModelLinear(lmp,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg+1],"quadratic") == 0) {
if (iarg+3 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap model quadratic", error);
model = new MLIAPModelQuadratic(lmp,arg[iarg+2]);
if (model==nullptr) model = new MLIAPModelQuadratic(lmp,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg+1],"nn") == 0) {
if (iarg+3 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap model nn", error);
model = new MLIAPModelNN(lmp,arg[iarg+2]);
if (model==nullptr) model = new MLIAPModelNN(lmp,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg+1],"mliappy") == 0) {
#ifdef MLIAP_PYTHON
if (iarg+3 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap mliappy", error);
model = new MLIAPModelPython(lmp,arg[iarg+2]);
if (model==nullptr) model = new MLIAPModelPython(lmp,arg[iarg+2]);
iarg += 3;
#else
error->all(FLERR,"Using pair_style mliap model mliappy requires ML-IAP with python support");
#endif
} else error->all(FLERR,"Unknown pair_style mliap model keyword: {}", arg[iarg]);
} else if (strcmp(arg[iarg],"descriptor") == 0) {
} else if (strcmp(arg[iarg],"descriptor") == 0 && descriptor==nullptr) {
if (iarg+2 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap descriptor", error);
if (descriptor != nullptr) error->all(FLERR,"Illegal multiple pair_style mliap descriptor definition");
if (strcmp(arg[iarg+1],"sna") == 0) {
if (iarg+3 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap descriptor sna", error);
descriptor = new MLIAPDescriptorSNAP(lmp,arg[iarg+2]);
if (descriptor==nullptr) descriptor = new MLIAPDescriptorSNAP(lmp,arg[iarg+2]);
iarg += 3;
} else if (strcmp(arg[iarg+1],"so3") == 0) {
if (iarg+3 > narg) utils::missing_cmd_args(FLERR, "pair_style mliap descriptor so3", error);
descriptor = new MLIAPDescriptorSO3(lmp,arg[iarg+2]);
if (descriptor==nullptr) descriptor = new MLIAPDescriptorSO3(lmp,arg[iarg+2]);
iarg += 3;
} else error->all(FLERR,"Illegal pair_style mliap command");

View File

@ -442,7 +442,11 @@ LAMMPS::LAMMPS(int narg, char **arg, MPI_Comm communicator) :
iarg += 3;
while (iarg < narg && arg[iarg][0] != '-') iarg++;
} else error->universe_all(FLERR,"Invalid command-line argument");
} else {
std::string errmsg("Invalid command-line argument");
errmsg += arg[iarg];
error->universe_all(FLERR,errmsg.c_str());
}
}
// if no partition command-line switch, universe is one world with all procs