Added classes for hippo/gpu, refactored BaseAmoeba and made room for the dispersion real-space term in hippo

This commit is contained in:
Trung Nguyen
2021-09-21 15:40:06 -05:00
parent a2fd784034
commit d77d5b7f0a
13 changed files with 3918 additions and 196 deletions

View File

@ -62,7 +62,7 @@ int AmoebaT::init(const int ntypes, const int max_amtype, const int max_amclass,
int success; int success;
success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,maxspecial15, success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,maxspecial15,
cell_size,gpu_split,_screen,amoeba, cell_size,gpu_split,_screen,amoeba,
"k_amoeba_dispersion", "k_amoeba_multipole", "k_amoeba_multipole",
"k_amoeba_udirect2b", "k_amoeba_umutual2b", "k_amoeba_udirect2b", "k_amoeba_umutual2b",
"k_amoeba_polar", "k_amoeba_short_nbor"); "k_amoeba_polar", "k_amoeba_short_nbor");
if (success!=0) if (success!=0)
@ -149,47 +149,6 @@ double AmoebaT::host_memory_usage() const {
return this->host_memory_usage_atomic()+sizeof(Amoeba<numtyp,acctyp>); return this->host_memory_usage_atomic()+sizeof(Amoeba<numtyp,acctyp>);
} }
// ---------------------------------------------------------------------------
// Calculate the dispersion real-space term, returning tep
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int AmoebaT::dispersion_real(const int eflag, const int vflag) {
int ainum=this->ans->inum();
if (ainum == 0)
return 0;
int _nall=this->atom->nall();
int nbor_pitch=this->nbor->nbor_pitch();
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
this->time_pair.start();
// Build the short neighbor list for the cutoff off2_mpole,
// at this point mpole is the first kernel in a time step
this->k_short_nbor.set_size(GX,BX);
this->k_short_nbor.run(&this->atom->x, &this->nbor->dev_nbor,
&this->_nbor_data->begin(),
&this->dev_short_nbor, &this->_off2_disp, &ainum,
&nbor_pitch, &this->_threads_per_atom);
printf("launching dispersion\n");
this->k_dispersion.set_size(GX,BX);
this->k_dispersion.run(&this->atom->x, &this->atom->extra,
&coeff_amtype, &coeff_amclass, &sp_nonpolar,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->dev_short_nbor,
&this->ans->force, &this->ans->engv,
&eflag, &vflag, &ainum, &_nall, &nbor_pitch,
&this->_threads_per_atom, &this->_aewald,
&this->_off2_disp);
this->time_pair.stop();
return GX;
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Calculate the multipole real-space term, returning tep // Calculate the multipole real-space term, returning tep
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -93,7 +93,6 @@ class Amoeba : public BaseAmoeba<numtyp, acctyp> {
protected: protected:
bool _allocated; bool _allocated;
int dispersion_real(const int eflag, const int vflag);
int multipole_real(const int eflag, const int vflag); int multipole_real(const int eflag, const int vflag);
int udirect2b(const int eflag, const int vflag); int udirect2b(const int eflag, const int vflag);
int umutual2b(const int eflag, const int vflag); int umutual2b(const int eflag, const int vflag);

View File

@ -116,7 +116,7 @@ int amoeba_gpu_init(const int ntypes, const int max_amtype, const int max_amclas
void amoeba_gpu_clear() { void amoeba_gpu_clear() {
AMOEBAMF.clear(); AMOEBAMF.clear();
} }
/*
int** amoeba_gpu_compute_dispersion_real(const int ago, const int inum_full, int** amoeba_gpu_compute_dispersion_real(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type, const int nall, double **host_x, int *host_type,
int *host_amtype, int *host_amgroup, double **host_rpole, int *host_amtype, int *host_amgroup, double **host_rpole,
@ -133,7 +133,7 @@ int** amoeba_gpu_compute_dispersion_real(const int ago, const int inum_full,
eflag, vflag, eatom, vatom, host_start, ilist, jnum, eflag, vflag, eatom, vatom, host_start, ilist, jnum,
cpu_time, success, aewald, off2, host_q, boxlo, prd); cpu_time, success, aewald, off2, host_q, boxlo, prd);
} }
*/
int** amoeba_gpu_compute_multipole_real(const int ago, const int inum_full, int** amoeba_gpu_compute_multipole_real(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type, const int nall, double **host_x, int *host_type,
int *host_amtype, int *host_amgroup, double **host_rpole, int *host_amtype, int *host_amgroup, double **host_rpole,

View File

@ -33,7 +33,6 @@ template <class numtyp, class acctyp>
BaseAmoebaT::~BaseAmoeba() { BaseAmoebaT::~BaseAmoeba() {
delete ans; delete ans;
delete nbor; delete nbor;
k_dispersion.clear();
k_multipole.clear(); k_multipole.clear();
k_udirect2b.clear(); k_udirect2b.clear();
k_umutual2b.clear(); k_umutual2b.clear();
@ -55,7 +54,6 @@ int BaseAmoebaT::init_atomic(const int nlocal, const int nall,
const int maxspecial15, const int maxspecial15,
const double cell_size, const double gpu_split, const double cell_size, const double gpu_split,
FILE *_screen, const void *pair_program, FILE *_screen, const void *pair_program,
const char *k_name_dispersion,
const char *k_name_multipole, const char *k_name_multipole,
const char *k_name_udirect2b, const char *k_name_udirect2b,
const char *k_name_umutual2b, const char *k_name_umutual2b,
@ -92,7 +90,7 @@ int BaseAmoebaT::init_atomic(const int nlocal, const int nall,
_block_size=device->pair_block_size(); _block_size=device->pair_block_size();
_block_bio_size=device->block_bio_pair(); _block_bio_size=device->block_bio_pair();
compile_kernels(*ucl_device,pair_program,k_name_dispersion,k_name_multipole, compile_kernels(*ucl_device,pair_program,k_name_multipole,
k_name_udirect2b, k_name_umutual2b,k_name_polar,k_name_short_nbor); k_name_udirect2b, k_name_umutual2b,k_name_polar,k_name_short_nbor);
if (_threads_per_atom>1 && gpu_nbor==0) { if (_threads_per_atom>1 && gpu_nbor==0) {
@ -428,73 +426,6 @@ int** BaseAmoebaT::precompute(const int ago, const int inum_full, const int nall
return nbor->host_jlist.begin()-host_start; return nbor->host_jlist.begin()-host_start;
} }
// ---------------------------------------------------------------------------
// Reneighbor on GPU if necessary, and then compute dispersion real-space
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int** BaseAmoebaT::compute_dispersion_real(const int ago, const int inum_full,
const int nall, double **host_x,
int *host_type, int *host_amtype,
int *host_amgroup, double **host_rpole,
double *sublo, double *subhi, tagint *tag,
int **nspecial, tagint **special,
int *nspecial15, tagint **special15,
const bool eflag_in, const bool vflag_in,
const bool eatom, const bool vatom,
int &host_start, int **ilist, int **jnum,
const double cpu_time, bool &success,
const double aewald, const double off2_disp,
double *host_q, double *boxlo, double *prd) {
acc_timers();
int eflag, vflag;
if (eatom) eflag=2;
else if (eflag_in) eflag=1;
else eflag=0;
if (vatom) vflag=2;
else if (vflag_in) vflag=1;
else vflag=0;
#ifdef LAL_NO_BLOCK_REDUCE
if (eflag) eflag=2;
if (vflag) vflag=2;
#endif
set_kernel(eflag,vflag);
// reallocate per-atom arrays, transfer data from the host
// and build the neighbor lists if needed
// NOTE:
// For now we invoke precompute() again here,
// to be able to turn on/off the udirect2b kernel (which comes before this)
// Once all the kernels are ready, precompute() is needed only once
// in the first kernel in a time step.
// We only need to cast uind and uinp from host to device here
// if the neighbor lists are rebuilt and other per-atom arrays
// (x, type, amtype, amgroup, rpole) are ready on the device.
int** firstneigh = nullptr;
firstneigh = precompute(ago, inum_full, nall, host_x, host_type,
host_amtype, host_amgroup, host_rpole,
nullptr, nullptr, sublo, subhi, tag,
nspecial, special, nspecial15, special15,
eflag_in, vflag_in, eatom, vatom,
host_start, ilist, jnum, cpu_time,
success, host_q, boxlo, prd);
_off2_disp = off2_disp;
_aewald = aewald;
const int red_blocks=dispersion_real(eflag,vflag);
// leave the answers (forces, energies and virial) on the device,
// only copy them back in the last kernel (polar_real)
//ans->copy_answers(eflag_in,vflag_in,eatom,vatom,red_blocks);
//device->add_ans_object(ans);
hd_balancer.stop_timer();
return firstneigh; // nbor->host_jlist.begin()-host_start;
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Reneighbor on GPU if necessary, and then compute multipole real-space // Reneighbor on GPU if necessary, and then compute multipole real-space
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -885,7 +816,6 @@ void BaseAmoebaT::cast_extra_data(int* amtype, int* amgroup, double** rpole,
template <class numtyp, class acctyp> template <class numtyp, class acctyp>
void BaseAmoebaT::compile_kernels(UCL_Device &dev, const void *pair_str, void BaseAmoebaT::compile_kernels(UCL_Device &dev, const void *pair_str,
const char *kname_dispersion,
const char *kname_multipole, const char *kname_multipole,
const char *kname_udirect2b, const char *kname_udirect2b,
const char *kname_umutual2b, const char *kname_umutual2b,
@ -899,7 +829,6 @@ void BaseAmoebaT::compile_kernels(UCL_Device &dev, const void *pair_str,
std::string oclstring = device->compile_string()+" -DEVFLAG=1"; std::string oclstring = device->compile_string()+" -DEVFLAG=1";
pair_program->load_string(pair_str,oclstring.c_str(),nullptr,screen); pair_program->load_string(pair_str,oclstring.c_str(),nullptr,screen);
k_dispersion.set_function(*pair_program,kname_dispersion);
k_multipole.set_function(*pair_program,kname_multipole); k_multipole.set_function(*pair_program,kname_multipole);
k_udirect2b.set_function(*pair_program,kname_udirect2b); k_udirect2b.set_function(*pair_program,kname_udirect2b);
k_umutual2b.set_function(*pair_program,kname_umutual2b); k_umutual2b.set_function(*pair_program,kname_umutual2b);

View File

@ -54,7 +54,7 @@ class BaseAmoeba {
int init_atomic(const int nlocal, const int nall, const int max_nbors, int init_atomic(const int nlocal, const int nall, const int max_nbors,
const int maxspecial, const int maxspecial15, const double cell_size, const int maxspecial, const int maxspecial15, const double cell_size,
const double gpu_split, FILE *screen, const void *pair_program, const double gpu_split, FILE *screen, const void *pair_program,
const char *kname_dispersion, const char *kname_multipole, const char *kname_multipole,
const char *kname_udirect2b, const char *kname_umutual2b, const char *kname_udirect2b, const char *kname_umutual2b,
const char *kname_polar, const char *kname_short_nbor); const char *kname_polar, const char *kname_short_nbor);
@ -142,18 +142,6 @@ class BaseAmoeba {
int **&ilist, int **&numj, const double cpu_time, bool &success, int **&ilist, int **&numj, const double cpu_time, bool &success,
double *charge, double *boxlo, double *prd); double *charge, double *boxlo, double *prd);
/// Compute dispersion real-space with device neighboring
int** compute_dispersion_real(const int ago, const int inum_full, const int nall,
double **host_x, int *host_type, int *host_amtype,
int *host_amgroup, double **host_rpole, double *sublo, double *subhi,
tagint *tag, int **nspecial, tagint **special,
int *nspecial15, tagint **special15,
const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
int **ilist, int **numj, const double cpu_time, bool &success,
const double aewald, const double off2_disp, double *charge,
double *boxlo, double *prd);
/// Compute multipole real-space with device neighboring /// Compute multipole real-space with device neighboring
int** compute_multipole_real(const int ago, const int inum_full, const int nall, int** compute_multipole_real(const int ago, const int inum_full, const int nall,
double **host_x, int *host_type, int *host_amtype, double **host_x, int *host_type, int *host_amtype,
@ -269,7 +257,7 @@ class BaseAmoeba {
// ------------------------- DEVICE KERNELS ------------------------- // ------------------------- DEVICE KERNELS -------------------------
UCL_Program *pair_program; UCL_Program *pair_program;
UCL_Kernel k_dispersion, k_multipole, k_udirect2b, k_umutual2b, k_polar; UCL_Kernel k_multipole, k_udirect2b, k_umutual2b, k_polar;
UCL_Kernel k_special15, k_short_nbor; UCL_Kernel k_special15, k_short_nbor;
inline int block_size() { return _block_size; } inline int block_size() { return _block_size; }
inline void set_kernel(const int eflag, const int vflag) {} inline void set_kernel(const int eflag, const int vflag) {}
@ -291,11 +279,10 @@ class BaseAmoeba {
numtyp _off2_hal,_off2_repulse,_off2_disp,_off2_mpole,_off2_polar; numtyp _off2_hal,_off2_repulse,_off2_disp,_off2_mpole,_off2_polar;
void compile_kernels(UCL_Device &dev, const void *pair_string, void compile_kernels(UCL_Device &dev, const void *pair_string,
const char *kname_dispersion, const char *kname_multipole, const char *kname_multipole,
const char *kname_udirect2b, const char *kname_umutual2b, const char *kname_udirect2b, const char *kname_umutual2b,
const char *kname_polar, const char *kname_short_nbor); const char *kname_polar, const char *kname_short_nbor);
virtual int dispersion_real(const int eflag, const int vflag) = 0;
virtual int multipole_real(const int eflag, const int vflag) = 0; virtual int multipole_real(const int eflag, const int vflag) = 0;
virtual int udirect2b(const int eflag, const int vflag) = 0; virtual int udirect2b(const int eflag, const int vflag) = 0;
virtual int umutual2b(const int eflag, const int vflag) = 0; virtual int umutual2b(const int eflag, const int vflag) = 0;

430
lib/gpu/lal_hippo.cpp Normal file
View File

@ -0,0 +1,430 @@
/***************************************************************************
hippo.cpp
-------------------
Trung Dac Nguyen (Northwestern)
Class for acceleration of the hippo pair style.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin :
email : trung.nguyen@northwestern.edu
***************************************************************************/
#if defined(USE_OPENCL)
#include "hippo_cl.h"
#elif defined(USE_CUDART)
const char *hippo=0;
#else
#include "hippo_cubin.h"
#endif
#include "lal_hippo.h"
#include <cassert>
namespace LAMMPS_AL {
#define HippoT Hippo<numtyp, acctyp>
extern Device<PRECISION,ACC_PRECISION> device;
template <class numtyp, class acctyp>
HippoT::Hippo() : BaseAmoeba<numtyp,acctyp>(),
_allocated(false) {
}
template <class numtyp, class acctyp>
HippoT::~Hippo() {
clear();
k_dispersion.clear();
}
template <class numtyp, class acctyp>
int HippoT::bytes_per_atom(const int max_nbors) const {
return this->bytes_per_atom_atomic(max_nbors);
}
template <class numtyp, class acctyp>
int HippoT::init(const int ntypes, const int max_amtype, const int max_amclass,
const double *host_pdamp, const double *host_thole,
const double *host_dirdamp, const int *host_amtype2class,
const double *host_special_hal,
const double *host_special_repel,
const double *host_special_disp,
const double *host_special_mpole,
const double *host_special_polar_wscale,
const double *host_special_polar_piscale,
const double *host_special_polar_pscale,
const double *host_csix, const double *host_adisp,
const int nlocal, const int nall, const int max_nbors,
const int maxspecial, const int maxspecial15,
const double cell_size, const double gpu_split, FILE *_screen,
const double polar_dscale, const double polar_uscale) {
int success;
success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,maxspecial15,
cell_size,gpu_split,_screen,hippo,
"k_hippo_multipole",
"k_hippo_udirect2b", "k_hippo_umutual2b",
"k_hippo_polar", "k_hippo_short_nbor");
if (success!=0)
return success;
k_dispersion.set_function(*(this->pair_program),"k_hippo_dispersion");
// If atom type constants fit in shared memory use fast kernel
int lj_types=ntypes;
shared_types=false;
int max_shared_types=this->device->max_shared_types();
if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
lj_types=max_shared_types;
shared_types=true;
}
_lj_types=lj_types;
// Allocate a host write buffer for data initialization
UCL_H_Vec<numtyp4> host_write(max_amtype, *(this->ucl_device), UCL_WRITE_ONLY);
for (int i = 0; i < max_amtype; i++) {
host_write[i].x = host_pdamp[i];
host_write[i].y = host_thole[i];
host_write[i].z = host_dirdamp[i];
host_write[i].w = host_amtype2class[i];
}
coeff_amtype.alloc(max_amtype,*(this->ucl_device), UCL_READ_ONLY);
ucl_copy(coeff_amtype,host_write,false);
UCL_H_Vec<numtyp4> host_write2(max_amclass, *(this->ucl_device), UCL_WRITE_ONLY);
for (int i = 0; i < max_amclass; i++) {
host_write2[i].x = host_csix[i];
host_write2[i].y = host_adisp[i];
host_write2[i].z = (numtyp)0;
host_write2[i].w = (numtyp)0;
}
coeff_amclass.alloc(max_amclass,*(this->ucl_device), UCL_READ_ONLY);
ucl_copy(coeff_amclass,host_write2,false);
UCL_H_Vec<numtyp4> dview(5, *(this->ucl_device), UCL_WRITE_ONLY);
sp_polar.alloc(5,*(this->ucl_device),UCL_READ_ONLY);
for (int i=0; i<5; i++) {
dview[i].x=host_special_polar_wscale[i];
dview[i].y=host_special_polar_piscale[i];
dview[i].z=host_special_polar_pscale[i];
dview[i].w=host_special_mpole[i];
}
ucl_copy(sp_polar,dview,5,false);
sp_nonpolar.alloc(5,*(this->ucl_device),UCL_READ_ONLY);
for (int i=0; i<5; i++) {
dview[i].x=host_special_hal[i];
dview[i].y=host_special_repel[i];
dview[i].z=host_special_disp[i];
dview[i].w=(numtyp)0;
}
ucl_copy(sp_nonpolar,dview,5,false);
_polar_dscale = polar_dscale;
_polar_uscale = polar_uscale;
_allocated=true;
this->_max_bytes=coeff_amtype.row_bytes() + coeff_amclass.row_bytes()
+ sp_polar.row_bytes() + sp_nonpolar.row_bytes() + this->_tep.row_bytes();
return 0;
}
template <class numtyp, class acctyp>
void HippoT::clear() {
if (!_allocated)
return;
_allocated=false;
coeff_amtype.clear();
coeff_amclass.clear();
sp_polar.clear();
sp_nonpolar.clear();
this->clear_atomic();
}
template <class numtyp, class acctyp>
double HippoT::host_memory_usage() const {
return this->host_memory_usage_atomic()+sizeof(Hippo<numtyp,acctyp>);
}
// ---------------------------------------------------------------------------
// Reneighbor on GPU if necessary, and then compute dispersion real-space
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int** HippoT::compute_dispersion_real(const int ago, const int inum_full,
const int nall, double **host_x,
int *host_type, int *host_amtype,
int *host_amgroup, double **host_rpole,
double *sublo, double *subhi, tagint *tag,
int **nspecial, tagint **special,
int *nspecial15, tagint **special15,
const bool eflag_in, const bool vflag_in,
const bool eatom, const bool vatom,
int &host_start, int **ilist, int **jnum,
const double cpu_time, bool &success,
const double aewald, const double off2_disp,
double *host_q, double *boxlo, double *prd) {
this->acc_timers();
int eflag, vflag;
if (eatom) eflag=2;
else if (eflag_in) eflag=1;
else eflag=0;
if (vatom) vflag=2;
else if (vflag_in) vflag=1;
else vflag=0;
#ifdef LAL_NO_BLOCK_REDUCE
if (eflag) eflag=2;
if (vflag) vflag=2;
#endif
this->set_kernel(eflag,vflag);
// reallocate per-atom arrays, transfer data from the host
// and build the neighbor lists if needed
// NOTE:
// For now we invoke precompute() again here,
// to be able to turn on/off the udirect2b kernel (which comes before this)
// Once all the kernels are ready, precompute() is needed only once
// in the first kernel in a time step.
// We only need to cast uind and uinp from host to device here
// if the neighbor lists are rebuilt and other per-atom arrays
// (x, type, amtype, amgroup, rpole) are ready on the device.
int** firstneigh = nullptr;
firstneigh = this->precompute(ago, inum_full, nall, host_x, host_type,
host_amtype, host_amgroup, host_rpole,
nullptr, nullptr, sublo, subhi, tag,
nspecial, special, nspecial15, special15,
eflag_in, vflag_in, eatom, vatom,
host_start, ilist, jnum, cpu_time,
success, host_q, boxlo, prd);
this->_off2_disp = off2_disp;
this->_aewald = aewald;
const int red_blocks=dispersion_real(eflag,vflag);
// leave the answers (forces, energies and virial) on the device,
// only copy them back in the last kernel (polar_real)
//ans->copy_answers(eflag_in,vflag_in,eatom,vatom,red_blocks);
//device->add_ans_object(ans);
this->hd_balancer.stop_timer();
return firstneigh; // nbor->host_jlist.begin()-host_start;
}
// ---------------------------------------------------------------------------
// Calculate the dispersion real-space term, returning tep
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int HippoT::dispersion_real(const int eflag, const int vflag) {
int ainum=this->ans->inum();
if (ainum == 0)
return 0;
int _nall=this->atom->nall();
int nbor_pitch=this->nbor->nbor_pitch();
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
this->time_pair.start();
// Build the short neighbor list for the cutoff off2_mpole,
// at this point mpole is the first kernel in a time step
this->k_short_nbor.set_size(GX,BX);
this->k_short_nbor.run(&this->atom->x, &this->nbor->dev_nbor,
&this->_nbor_data->begin(),
&this->dev_short_nbor, &this->_off2_disp, &ainum,
&nbor_pitch, &this->_threads_per_atom);
k_dispersion.set_size(GX,BX);
k_dispersion.run(&this->atom->x, &this->atom->extra,
&coeff_amtype, &coeff_amclass, &sp_nonpolar,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->dev_short_nbor,
&this->ans->force, &this->ans->engv,
&eflag, &vflag, &ainum, &_nall, &nbor_pitch,
&this->_threads_per_atom, &this->_aewald,
&this->_off2_disp);
this->time_pair.stop();
return GX;
}
// ---------------------------------------------------------------------------
// Calculate the multipole real-space term, returning tep
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int HippoT::multipole_real(const int eflag, const int vflag) {
int ainum=this->ans->inum();
if (ainum == 0)
return 0;
int _nall=this->atom->nall();
int nbor_pitch=this->nbor->nbor_pitch();
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
this->time_pair.start();
// Build the short neighbor list for the cutoff off2_mpole,
// at this point mpole is the first kernel in a time step
this->k_short_nbor.set_size(GX,BX);
this->k_short_nbor.run(&this->atom->x, &this->nbor->dev_nbor,
&this->_nbor_data->begin(),
&this->dev_short_nbor, &this->_off2_mpole, &ainum,
&nbor_pitch, &this->_threads_per_atom);
this->k_multipole.set_size(GX,BX);
this->k_multipole.run(&this->atom->x, &this->atom->extra, &coeff_amtype, &sp_polar,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->dev_short_nbor,
&this->ans->force, &this->ans->engv, &this->_tep,
&eflag, &vflag, &ainum, &_nall, &nbor_pitch,
&this->_threads_per_atom, &this->_aewald, &this->_felec,
&this->_off2_mpole, &_polar_dscale, &_polar_uscale);
this->time_pair.stop();
return GX;
}
// ---------------------------------------------------------------------------
// Calculate the real-space permanent field, returning field and fieldp
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int HippoT::udirect2b(const int eflag, const int vflag) {
int ainum=this->ans->inum();
if (ainum == 0)
return 0;
int _nall=this->atom->nall();
int nbor_pitch=this->nbor->nbor_pitch();
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
this->time_pair.start();
// Build the short neighbor list if not done yet
if (!this->short_nbor_polar_avail) {
this->k_short_nbor.set_size(GX,BX);
this->k_short_nbor.run(&this->atom->x, &this->nbor->dev_nbor,
&this->_nbor_data->begin(),
&this->dev_short_nbor, &this->_off2_polar, &ainum,
&nbor_pitch, &this->_threads_per_atom);
this->short_nbor_polar_avail = true;
}
this->k_udirect2b.set_size(GX,BX);
this->k_udirect2b.run(&this->atom->x, &this->atom->extra, &coeff_amtype, &sp_polar,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->dev_short_nbor,
&this->_fieldp, &ainum, &_nall, &nbor_pitch,
&this->_threads_per_atom, &this->_aewald, &this->_off2_polar,
&_polar_dscale, &_polar_uscale);
this->time_pair.stop();
return GX;
}
// ---------------------------------------------------------------------------
// Calculate the real-space induced field, returning field and fieldp
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int HippoT::umutual2b(const int eflag, const int vflag) {
int ainum=this->ans->inum();
if (ainum == 0)
return 0;
int _nall=this->atom->nall();
int nbor_pitch=this->nbor->nbor_pitch();
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
this->time_pair.start();
// Build the short neighbor list if not done yet
if (!this->short_nbor_polar_avail) {
this->k_short_nbor.set_size(GX,BX);
this->k_short_nbor.run(&this->atom->x, &this->nbor->dev_nbor,
&this->_nbor_data->begin(), &this->dev_short_nbor,
&this->_off2_polar, &ainum, &nbor_pitch,
&this->_threads_per_atom);
this->short_nbor_polar_avail = true;
}
this->k_umutual2b.set_size(GX,BX);
this->k_umutual2b.run(&this->atom->x, &this->atom->extra, &coeff_amtype, &sp_polar,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->dev_short_nbor, &this->_fieldp, &ainum, &_nall,
&nbor_pitch, &this->_threads_per_atom, &this->_aewald,
&this->_off2_polar, &_polar_dscale, &_polar_uscale);
this->time_pair.stop();
return GX;
}
// ---------------------------------------------------------------------------
// Calculate the polar real-space term, returning tep
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int HippoT::polar_real(const int eflag, const int vflag) {
int ainum=this->ans->inum();
if (ainum == 0)
return 0;
int _nall=this->atom->nall();
int nbor_pitch=this->nbor->nbor_pitch();
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
this->time_pair.start();
// Build the short neighbor list if not done yet
if (!this->short_nbor_polar_avail) {
this->k_short_nbor.set_size(GX,BX);
this->k_short_nbor.run(&this->atom->x, &this->nbor->dev_nbor,
&this->_nbor_data->begin(),
&this->dev_short_nbor, &this->_off2_polar, &ainum,
&nbor_pitch, &this->_threads_per_atom);
this->short_nbor_polar_avail = true;
}
this->k_polar.set_size(GX,BX);
this->k_polar.run(&this->atom->x, &this->atom->extra, &coeff_amtype, &sp_polar,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->dev_short_nbor,
&this->ans->force, &this->ans->engv, &this->_tep,
&eflag, &vflag, &ainum, &_nall, &nbor_pitch,
&this->_threads_per_atom, &this->_aewald, &this->_felec,
&this->_off2_polar, &_polar_dscale, &_polar_uscale);
this->time_pair.stop();
// Signal that short nbor list is not avail for the next time step
// do it here because polar_real() is the last kernel in a time step at this point
this->short_nbor_polar_avail = false;
return GX;
}
template class Hippo<PRECISION,ACC_PRECISION>;
}

1892
lib/gpu/lal_hippo.cu Normal file

File diff suppressed because it is too large Load Diff

120
lib/gpu/lal_hippo.h Normal file
View File

@ -0,0 +1,120 @@
/***************************************************************************
hippo.h
-------------------
Trung Dac Nguyen (Northwestern)
Class for acceleration of the hippo pair style.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin :
email : trung.nguyen@northwestern.edu
***************************************************************************/
#ifndef LAL_HIPPO_H
#define LAL_HIPPO_H
#include "lal_base_amoeba.h"
namespace LAMMPS_AL {
template <class numtyp, class acctyp>
class Hippo : public BaseAmoeba<numtyp, acctyp> {
public:
Hippo();
~Hippo();
/// Clear any previous data and set up for a new LAMMPS run
/** \param max_nbors initial number of rows in the neighbor matrix
* \param cell_size cutoff + skin
* \param gpu_split fraction of particles handled by device
*
* Returns:
* - 0 if successful
* - -1 if fix gpu not found
* - -3 if there is an out of memory error
* - -4 if the GPU library was not compiled for GPU
* - -5 Double precision is not supported on card **/
int init(const int ntypes, const int max_amtype, const int max_amclass,
const double *host_pdamp, const double *host_thole,
const double *host_dirdamp, const int *host_amtype2class,
const double *host_special_mpole,
const double *host_special_hal,
const double *host_special_repel,
const double *host_special_disp,
const double *host_special_polar_wscale,
const double *host_special_polar_piscale,
const double *host_special_polar_pscale,
const double *host_csix, const double *host_adisp,
const int nlocal, const int nall, const int max_nbors,
const int maxspecial, const int maxspecial15, const double cell_size,
const double gpu_split, FILE *_screen,
const double polar_dscale, const double polar_uscale);
/// Compute dispersion real-space with device neighboring
int** compute_dispersion_real(const int ago, const int inum_full, const int nall,
double **host_x, int *host_type, int *host_amtype,
int *host_amgroup, double **host_rpole, double *sublo, double *subhi,
tagint *tag, int **nspecial, tagint **special,
int *nspecial15, tagint **special15,
const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
int **ilist, int **numj, const double cpu_time, bool &success,
const double aewald, const double off2_disp, double *charge,
double *boxlo, double *prd);
/// Clear all host and device data
/** \note This is called at the beginning of the init() routine **/
void clear();
/// Returns memory usage on device per atom
int bytes_per_atom(const int max_nbors) const;
/// Total host memory used by library for pair style
double host_memory_usage() const;
// --------------------------- TYPE DATA --------------------------
/// pdamp = coeff_amtype.x; thole = coeff_amtype.y;
/// dirdamp = coeff_amtype.z; amtype2class = coeff_amtype.w
UCL_D_Vec<numtyp4> coeff_amtype;
/// csix = coeff_amclass.x; adisp = coeff_amclass.y;
UCL_D_Vec<numtyp4> coeff_amclass;
/// Special polar values [0-4]:
/// sp_polar.x = special_polar_wscale
/// sp_polar.y special_polar_pscale,
/// sp_polar.z = special_polar_piscale
/// sp_polar.w = special_mpole
UCL_D_Vec<numtyp4> sp_polar;
/// Special nonpolar values [0-4]:
/// sp_nonpolar.x = special_hal
/// sp_nonpolar.y special_repel
/// sp_nonpolar.z = special_disp
UCL_D_Vec<numtyp4> sp_nonpolar;
/// If atom type constants fit in shared memory, use fast kernels
bool shared_types;
/// Number of atom types
int _lj_types;
numtyp _polar_dscale, _polar_uscale;
numtyp _qqrd2e;
UCL_Kernel k_dispersion;
protected:
bool _allocated;
int dispersion_real(const int eflag, const int vflag);
int multipole_real(const int eflag, const int vflag);
int udirect2b(const int eflag, const int vflag);
int umutual2b(const int eflag, const int vflag);
int polar_real(const int eflag, const int vflag);
};
}
#endif

210
lib/gpu/lal_hippo_ext.cpp Normal file
View File

@ -0,0 +1,210 @@
/***************************************************************************
hippo_ext.cpp
-------------------
Trung Dac Nguyen (Northwestern)
Functions for LAMMPS access to hippo acceleration routines.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin :
email : trung.nguyen@northwestern.edu
***************************************************************************/
#include <iostream>
#include <cassert>
#include <cmath>
#include "lal_hippo.h"
using namespace std;
using namespace LAMMPS_AL;
static Hippo<PRECISION,ACC_PRECISION> HIPPOMF;
// ---------------------------------------------------------------------------
// Allocate memory on host and device and copy constants to device
// ---------------------------------------------------------------------------
int hippo_gpu_init(const int ntypes, const int max_amtype, const int max_amclass,
const double *host_pdamp, const double *host_thole,
const double *host_dirdamp, const int *host_amtype2class,
const double *host_special_hal,
const double *host_special_repel,
const double *host_special_disp,
const double *host_special_mpole,
const double *host_special_polar_wscale,
const double *host_special_polar_piscale,
const double *host_special_polar_pscale,
const double *host_csix, const double *host_adisp,
const int nlocal, const int nall, const int max_nbors,
const int maxspecial, const int maxspecial15,
const double cell_size, int &gpu_mode, FILE *screen,
const double polar_dscale, const double polar_uscale,
int& tep_size) {
HIPPOMF.clear();
gpu_mode=HIPPOMF.device->gpu_mode();
double gpu_split=HIPPOMF.device->particle_split();
int first_gpu=HIPPOMF.device->first_device();
int last_gpu=HIPPOMF.device->last_device();
int world_me=HIPPOMF.device->world_me();
int gpu_rank=HIPPOMF.device->gpu_rank();
int procs_per_gpu=HIPPOMF.device->procs_per_gpu();
tep_size=sizeof(ACC_PRECISION); // tep_size=sizeof(PRECISION);
HIPPOMF.device->init_message(screen,"HIPPO",first_gpu,last_gpu);
bool message=false;
if (HIPPOMF.device->replica_me()==0 && screen)
message=true;
if (message) {
fprintf(screen,"Initializing GPU and compiling on process 0...");
fflush(screen);
}
int init_ok=0;
if (world_me==0)
init_ok=HIPPOMF.init(ntypes, max_amtype, max_amclass,
host_pdamp, host_thole, host_dirdamp,
host_amtype2class, host_special_hal,
host_special_repel, host_special_disp,
host_special_mpole, host_special_polar_wscale,
host_special_polar_piscale, host_special_polar_pscale,
host_csix, host_adisp, nlocal, nall, max_nbors,
maxspecial, maxspecial15, cell_size, gpu_split,
screen, polar_dscale, polar_uscale);
HIPPOMF.device->world_barrier();
if (message)
fprintf(screen,"Done.\n");
for (int i=0; i<procs_per_gpu; i++) {
if (message) {
if (last_gpu-first_gpu==0)
fprintf(screen,"Initializing GPU %d on core %d...",first_gpu,i);
else
fprintf(screen,"Initializing GPUs %d-%d on core %d...",first_gpu,
last_gpu,i);
fflush(screen);
}
if (gpu_rank==i && world_me!=0)
init_ok=HIPPOMF.init(ntypes, max_amtype, max_amclass,
host_pdamp, host_thole, host_dirdamp,
host_amtype2class, host_special_hal,
host_special_repel, host_special_disp,
host_special_mpole, host_special_polar_wscale,
host_special_polar_piscale, host_special_polar_pscale,
host_csix, host_adisp, nlocal, nall, max_nbors,
maxspecial, maxspecial15, cell_size, gpu_split,
screen, polar_dscale, polar_uscale);
HIPPOMF.device->gpu_barrier();
if (message)
fprintf(screen,"Done.\n");
}
if (message)
fprintf(screen,"\n");
if (init_ok==0)
HIPPOMF.estimate_gpu_overhead();
return init_ok;
}
void hippo_gpu_clear() {
HIPPOMF.clear();
}
int** hippo_gpu_compute_dispersion_real(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
int *host_amtype, int *host_amgroup, double **host_rpole,
double *sublo, double *subhi, tagint *tag, int **nspecial,
tagint **special, int *nspecial15, tagint** special15,
const bool eflag, const bool vflag, const bool eatom,
const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success, const double aewald, const double off2,
double *host_q, double *boxlo, double *prd) {
return HIPPOMF.compute_dispersion_real(ago, inum_full, nall, host_x, host_type,
host_amtype, host_amgroup, host_rpole, sublo, subhi,
tag, nspecial, special, nspecial15, special15,
eflag, vflag, eatom, vatom, host_start, ilist, jnum,
cpu_time, success, aewald, off2, host_q, boxlo, prd);
}
int** hippo_gpu_compute_multipole_real(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
int *host_amtype, int *host_amgroup, double **host_rpole,
double *sublo, double *subhi, tagint *tag, int **nspecial,
tagint **special, int *nspecial15, tagint** special15,
const bool eflag, const bool vflag, const bool eatom,
const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success, const double aewald, const double felec, const double off2,
double *host_q, double *boxlo, double *prd, void **tep_ptr) {
return HIPPOMF.compute_multipole_real(ago, inum_full, nall, host_x, host_type,
host_amtype, host_amgroup, host_rpole, sublo, subhi,
tag, nspecial, special, nspecial15, special15,
eflag, vflag, eatom, vatom, host_start, ilist, jnum,
cpu_time, success, aewald, felec, off2, host_q, boxlo, prd, tep_ptr);
}
int** hippo_gpu_compute_udirect2b(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
int *host_amtype, int *host_amgroup, double **host_rpole,
double **host_uind, double **host_uinp,
double *sublo, double *subhi, tagint *tag, int **nspecial,
tagint **special, int *nspecial15, tagint** special15,
const bool eflag, const bool vflag, const bool eatom,
const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success, const double aewald, const double off2, double *host_q,
double *boxlo, double *prd, void **fieldp_ptr) {
return HIPPOMF.compute_udirect2b(ago, inum_full, nall, host_x, host_type,
host_amtype, host_amgroup, host_rpole, host_uind, host_uinp,
sublo, subhi, tag, nspecial, special, nspecial15, special15,
eflag, vflag, eatom, vatom, host_start, ilist, jnum,
cpu_time, success, aewald, off2, host_q, boxlo, prd, fieldp_ptr);
}
int** hippo_gpu_compute_umutual2b(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
int *host_amtype, int *host_amgroup, double **host_rpole,
double **host_uind, double **host_uinp,
double *sublo, double *subhi, tagint *tag, int **nspecial,
tagint **special, int *nspecial15, tagint** special15,
const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success, const double aewald, const double off2, double *host_q,
double *boxlo, double *prd, void **fieldp_ptr) {
return HIPPOMF.compute_umutual2b(ago, inum_full, nall, host_x, host_type,
host_amtype, host_amgroup, host_rpole, host_uind, host_uinp,
sublo, subhi, tag, nspecial, special, nspecial15, special15,
eflag, vflag, eatom, vatom, host_start, ilist, jnum,
cpu_time, success, aewald, off2, host_q, boxlo, prd, fieldp_ptr);
}
int** hippo_gpu_compute_polar_real(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
int *host_amtype, int *host_amgroup,
double **host_rpole, double **host_uind, double **host_uinp,
double *sublo, double *subhi, tagint *tag, int **nspecial,
tagint **special, int *nspecial15, tagint** special15,
const bool eflag, const bool vflag, const bool eatom,
const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success, const double aewald, const double felec, const double off2,
double *host_q, double *boxlo, double *prd, void **tep_ptr) {
return HIPPOMF.compute_polar_real(ago, inum_full, nall, host_x, host_type,
host_amtype, host_amgroup, host_rpole, host_uind, host_uinp,
sublo, subhi, tag, nspecial, special, nspecial15, special15,
eflag, vflag, eatom, vatom, host_start, ilist, jnum,
cpu_time, success, aewald, felec, off2, host_q, boxlo, prd, tep_ptr);
}
double hippo_gpu_bytes() {
return HIPPOMF.host_memory_usage();
}

View File

@ -65,17 +65,6 @@ int amoeba_gpu_init(const int ntypes, const int max_amtype, const int max_amclas
const double polar_dscale, const double polar_uscale, int& tq_size); const double polar_dscale, const double polar_uscale, int& tq_size);
void amoeba_gpu_clear(); void amoeba_gpu_clear();
int** amoeba_gpu_compute_dispersion_real(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
int *host_amtype, int *host_amgroup, double **host_rpole,
double *sublo, double *subhi, tagint *tag, int **nspecial,
tagint **special, int *nspecial15, tagint** special15,
const bool eflag, const bool vflag, const bool eatom,
const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success, const double aewald, const double off2,
double *host_q, double *boxlo, double *prd);
int ** amoeba_gpu_compute_multipole_real(const int ago, const int inum, const int nall, int ** amoeba_gpu_compute_multipole_real(const int ago, const int inum, const int nall,
double **host_x, int *host_type, int *host_amtype, int *host_amgroup, double **host_x, int *host_type, int *host_amtype, int *host_amgroup,
double **host_rpole, double *sublo, double *subhi, tagint *tag, double **host_rpole, double *sublo, double *subhi, tagint *tag,
@ -128,9 +117,9 @@ PairAmoebaGPU::PairAmoebaGPU(LAMMPS *lmp) : PairAmoeba(lmp), gpu_mode(GPU_FORCE)
fieldp_pinned = nullptr; fieldp_pinned = nullptr;
tq_pinned = nullptr; tq_pinned = nullptr;
gpu_hal_ready = false; gpu_hal_ready = false; // true for AMOEBA when ready
gpu_repulsion_ready = false; // true for HIPPO gpu_repulsion_ready = false; // always false for AMOEBA
gpu_dispersion_real_ready = false; // true for HIPPO gpu_dispersion_real_ready = false; // always false for AMOEBA
gpu_multipole_real_ready = true; gpu_multipole_real_ready = true;
gpu_udirect2b_ready = true; gpu_udirect2b_ready = true;
gpu_umutual2b_ready = true; gpu_umutual2b_ready = true;
@ -205,54 +194,6 @@ void PairAmoebaGPU::init_style()
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
void PairAmoebaGPU::dispersion_real()
{
if (!gpu_dispersion_real_ready) {
PairAmoeba::dispersion_real();
return;
}
int eflag=1, vflag=1;
int nall = atom->nlocal + atom->nghost;
int inum, host_start;
bool success = true;
int *ilist, *numneigh, **firstneigh;
double sublo[3],subhi[3];
if (domain->triclinic == 0) {
sublo[0] = domain->sublo[0];
sublo[1] = domain->sublo[1];
sublo[2] = domain->sublo[2];
subhi[0] = domain->subhi[0];
subhi[1] = domain->subhi[1];
subhi[2] = domain->subhi[2];
} else {
domain->bbox(domain->sublo_lamda,domain->subhi_lamda,sublo,subhi);
}
inum = atom->nlocal;
// select the correct cutoff for the term
if (use_dewald) choose(DISP_LONG);
else choose(DISP);
firstneigh = amoeba_gpu_compute_dispersion_real(neighbor->ago, inum, nall, atom->x,
atom->type, amtype, amgroup, rpole,
sublo, subhi, atom->tag,
atom->nspecial, atom->special,
atom->nspecial15, atom->special15,
eflag, vflag, eflag_atom, vflag_atom,
host_start, &ilist, &numneigh, cpu_time,
success, aewald, off2, atom->q,
domain->boxlo, domain->prd);
if (!success)
error->one(FLERR,"Insufficient memory on accelerator");
}
/* ---------------------------------------------------------------------- */
void PairAmoebaGPU::multipole_real() void PairAmoebaGPU::multipole_real()
{ {
if (!gpu_multipole_real_ready) { if (!gpu_multipole_real_ready) {

View File

@ -35,7 +35,7 @@ class PairAmoebaGPU : public PairAmoeba {
virtual void induce(); virtual void induce();
virtual void dispersion_real(); //virtual void dispersion_real();
virtual void multipole_real(); virtual void multipole_real();
virtual void udirect2b(double **, double **); virtual void udirect2b(double **, double **);
virtual void umutual2b(double **, double **); virtual void umutual2b(double **, double **);

1175
src/GPU/pair_hippo_gpu.cpp Normal file

File diff suppressed because it is too large Load Diff

80
src/GPU/pair_hippo_gpu.h Normal file
View File

@ -0,0 +1,80 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef PAIR_CLASS
// clang-format off
PairStyle(hippo/gpu,PairHippoGPU);
// clang-format on
#else
#ifndef LMP_PAIR_HIPPO_GPU_H
#define LMP_PAIR_HIPPO_GPU_H
#include "pair_amoeba.h"
namespace LAMMPS_NS {
class PairHippoGPU : public PairAmoeba {
public:
PairHippoGPU(LAMMPS *lmp);
~PairHippoGPU();
void init_style();
double memory_usage();
enum { GPU_FORCE, GPU_NEIGH, GPU_HYB_NEIGH };
virtual void induce();
virtual void dispersion_real();
virtual void multipole_real();
virtual void udirect2b(double **, double **);
virtual void umutual2b(double **, double **);
virtual void polar_real();
private:
int gpu_mode;
double cpu_time;
void *tq_pinned;
void *fieldp_pinned;
bool tq_single;
bool gpu_hal_ready;
bool gpu_repulsion_ready;
bool gpu_dispersion_real_ready;
bool gpu_multipole_real_ready;
bool gpu_udirect2b_ready;
bool gpu_umutual2b_ready;
bool gpu_polar_real_ready;
void udirect2b_cpu();
template<class numtyp>
void compute_force_from_torque(const numtyp*, double**, double*);
};
} // namespace LAMMPS_NS
#endif
#endif
/* ERROR/WARNING messages:
E: Insufficient memory on accelerator
There is insufficient memory on one of the devices specified for the gpu
package
E: Pair style hippo/gpu requires atom attribute q
The atom style defined does not have this attribute.
*/