mliappy fixes for kokkos support
This commit is contained in:
@ -33,6 +33,7 @@ else:
|
||||
from .loader import load_model, load_unified, activate_mliappy
|
||||
try:
|
||||
from .loader import load_model_kokkos, activate_mliappy_kokkos
|
||||
except:
|
||||
except Exception as ee:
|
||||
# ignore import error, it means that the KOKKOS package was not included in LAMMPS
|
||||
pass
|
||||
del sysconfig, ctypes, library, pylib
|
||||
|
||||
@ -140,8 +140,6 @@ class TorchWrapper(torch.nn.Module):
|
||||
else:
|
||||
energy_nn = self.model(descriptors, elems).flatten()
|
||||
energy[:] = energy_nn.detach().cpu().numpy().astype(np.float64)
|
||||
#if energy_nn.ndim > 1:
|
||||
# energy_nn = energy_nn.flatten()
|
||||
|
||||
if (use_gpu_data):
|
||||
beta_nn = torch.as_tensor(beta,dtype=self.dtype, device=self.device)
|
||||
@ -150,9 +148,6 @@ class TorchWrapper(torch.nn.Module):
|
||||
beta_nn = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
|
||||
beta[:] = beta_nn.detach().cpu().numpy().astype(np.float64)
|
||||
|
||||
elems=elems+1
|
||||
|
||||
|
||||
class IgnoreElems(torch.nn.Module):
|
||||
"""
|
||||
A class to represent a NN model agnostic of element typing.
|
||||
|
||||
Reference in New Issue
Block a user