whitespace
This commit is contained in:
@ -33,6 +33,6 @@ else:
|
|||||||
from .loader import load_model, load_unified, activate_mliappy
|
from .loader import load_model, load_unified, activate_mliappy
|
||||||
try:
|
try:
|
||||||
from .loader import load_model_kokkos, activate_mliappy_kokkos
|
from .loader import load_model_kokkos, activate_mliappy_kokkos
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
del sysconfig, ctypes, library, pylib
|
del sysconfig, ctypes, library, pylib
|
||||||
|
|||||||
@ -135,7 +135,7 @@ class TorchWrapper(torch.nn.Module):
|
|||||||
with torch.autograd.enable_grad():
|
with torch.autograd.enable_grad():
|
||||||
|
|
||||||
if (use_gpu_data):
|
if (use_gpu_data):
|
||||||
energy_nn = torch.as_tensor(energy,dtype=self.dtype, device=self.device)
|
energy_nn = torch.as_tensor(energy,dtype=self.dtype, device=self.device)
|
||||||
energy_nn[:] = self.model(descriptors, elems).flatten()
|
energy_nn[:] = self.model(descriptors, elems).flatten()
|
||||||
else:
|
else:
|
||||||
energy_nn = self.model(descriptors, elems).flatten()
|
energy_nn = self.model(descriptors, elems).flatten()
|
||||||
@ -144,12 +144,12 @@ class TorchWrapper(torch.nn.Module):
|
|||||||
# energy_nn = energy_nn.flatten()
|
# energy_nn = energy_nn.flatten()
|
||||||
|
|
||||||
if (use_gpu_data):
|
if (use_gpu_data):
|
||||||
beta_nn = torch.as_tensor(beta,dtype=self.dtype, device=self.device)
|
beta_nn = torch.as_tensor(beta,dtype=self.dtype, device=self.device)
|
||||||
beta_nn[:] = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
|
beta_nn[:] = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
|
||||||
else:
|
else:
|
||||||
beta_nn = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
|
beta_nn = torch.autograd.grad(energy_nn.sum(), descriptors)[0]
|
||||||
beta[:] = beta_nn.detach().cpu().numpy().astype(np.float64)
|
beta[:] = beta_nn.detach().cpu().numpy().astype(np.float64)
|
||||||
|
|
||||||
elems=elems+1
|
elems=elems+1
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -417,7 +417,7 @@ fi
|
|||||||
|
|
||||||
# Python cython stuff. Only need to convert/remove sources.
|
# Python cython stuff. Only need to convert/remove sources.
|
||||||
# Package settings were already done in ML-IAP package Install.sh script.
|
# Package settings were already done in ML-IAP package Install.sh script.
|
||||||
|
|
||||||
if (test $1 = 1) then
|
if (test $1 = 1) then
|
||||||
if (type cythonize > /dev/null 2>&1 && test -e ../python_impl.cpp) then
|
if (type cythonize > /dev/null 2>&1 && test -e ../python_impl.cpp) then
|
||||||
cythonize -3 ../mliap_model_python_couple_kokkos.pyx
|
cythonize -3 ../mliap_model_python_couple_kokkos.pyx
|
||||||
@ -428,7 +428,7 @@ elif (test $1 = 0) then
|
|||||||
|
|
||||||
elif (test $1 = 2) then
|
elif (test $1 = 2) then
|
||||||
if (type cythonize > /dev/null 2>&1 && test -e ../python_impl.cpp) then
|
if (type cythonize > /dev/null 2>&1 && test -e ../python_impl.cpp) then
|
||||||
cythonize -3 ../mliap_model_python_couple_kokkos.pyx
|
cythonize -3 ../mliap_model_python_couple_kokkos.pyx
|
||||||
else
|
else
|
||||||
rm -f ../mliap_model_python_couple_kokkos.cpp ../mliap_model_python_couple_kokkos.h
|
rm -f ../mliap_model_python_couple_kokkos.cpp ../mliap_model_python_couple_kokkos.h
|
||||||
fi
|
fi
|
||||||
|
|||||||
Reference in New Issue
Block a user