Added the GPU version of coul/slater/long

This commit is contained in:
Trung Nguyen
2023-12-05 13:32:33 -06:00
parent 9588ddabf6
commit 086cf49a8c
6 changed files with 915 additions and 0 deletions

View File

@ -0,0 +1,150 @@
/***************************************************************************
coul_slater_long_ext.cpp
-------------------
Trung Nguyen (U Chicago)
Class for acceleration of the coul/slater/long pair style.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin : September 2023
email : ndactrung@gmail.com
***************************************************************************/
#if defined(USE_OPENCL)
#include "coul_slater_long_cl.h"
#elif defined(USE_CUDART)
const char *coul_slater_long=0;
#else
#include "coul_slater_long_cubin.h"
#endif
#include "lal_coul_slater_long.h"
#include <cassert>
namespace LAMMPS_AL {
#define CoulSlaterLongT CoulSlaterLong<numtyp, acctyp>
extern Device<PRECISION,ACC_PRECISION> pair_gpu_device;
template <class numtyp, class acctyp>
CoulSlaterLongT::CoulSlaterLong() : BaseCharge<numtyp,acctyp>(), _allocated(false) {
}
template <class numtyp, class acctyp>
CoulSlaterLongT::~CoulSlaterLong() {
clear();
}
template <class numtyp, class acctyp>
int CoulSlaterLongT::bytes_per_atom(const int max_nbors) const {
return this->bytes_per_atom_atomic(max_nbors);
}
template <class numtyp, class acctyp>
int CoulSlaterLongT::init(const int ntypes, double **host_scale,
const int nlocal, const int nall, const int max_nbors,
const int maxspecial, const double cell_size,
const double gpu_split, FILE *_screen,
const double host_cut_coulsq, double *host_special_coul,
const double qqrd2e, const double g_ewald, double lamda) {
int success;
success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,
gpu_split,_screen,coul_slater_long,"k_coul_slater_long");
if (success!=0)
return success;
int lj_types=ntypes;
shared_types=false;
int max_shared_types=this->device->max_shared_types();
if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
lj_types=max_shared_types;
shared_types=true;
}
_lj_types=lj_types;
// Allocate a host write buffer for data initialization
UCL_H_Vec<numtyp> host_write(lj_types*lj_types*32,*(this->ucl_device),
UCL_WRITE_ONLY);
for (int i=0; i<lj_types*lj_types; i++)
host_write[i]=0.0;
scale.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
this->atom->type_pack1(ntypes,lj_types,scale,host_write,host_scale);
sp_cl.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
for (int i=0; i<4; i++) {
host_write[i]=host_special_coul[i];
}
ucl_copy(sp_cl,host_write,4,false);
_cut_coulsq=host_cut_coulsq;
_qqrd2e=qqrd2e;
_g_ewald=g_ewald;
_lamda=lamda;
_allocated=true;
this->_max_bytes=scale.row_bytes()+sp_cl.row_bytes();
return 0;
}
template <class numtyp, class acctyp>
void CoulSlaterLongT::reinit(const int ntypes, double **host_scale) {
UCL_H_Vec<numtyp> hscale(_lj_types*_lj_types,*(this->ucl_device),
UCL_WRITE_ONLY);
this->atom->type_pack1(ntypes,_lj_types,scale,hscale,host_scale);
}
template <class numtyp, class acctyp>
void CoulSlaterLongT::clear() {
if (!_allocated)
return;
_allocated=false;
scale.clear();
sp_cl.clear();
this->clear_atomic();
}
template <class numtyp, class acctyp>
double CoulSlaterLongT::host_memory_usage() const {
return this->host_memory_usage_atomic()+sizeof(CoulSlaterLong<numtyp,acctyp>);
}
// ---------------------------------------------------------------------------
// Calculate energies, forces, and torques
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int CoulSlaterLongT::loop(const int eflag, const int vflag) {
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
int ainum=this->ans->inum();
int nbor_pitch=this->nbor->nbor_pitch();
this->time_pair.start();
if (shared_types) {
this->k_pair_sel->set_size(GX,BX);
this->k_pair_sel->run(&this->atom->x, &scale, &sp_cl,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->ans->force, &this->ans->engv,
&eflag, &vflag, &ainum, &nbor_pitch,
&this->atom->q, &_cut_coulsq, &_qqrd2e, &_g_ewald,
&_lamda, &this->_threads_per_atom);
} else {
this->k_pair.set_size(GX,BX);
this->k_pair.run(&this->atom->x, &scale, &_lj_types, &sp_cl,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->ans->force, &this->ans->engv, &eflag, &vflag,
&ainum, &nbor_pitch, &this->atom->q, &_cut_coulsq,
&_qqrd2e, &_g_ewald, &_lamda, &this->_threads_per_atom);
}
this->time_pair.stop();
return GX;
}
template class CoulSlaterLong<PRECISION,ACC_PRECISION>;
}

View File

@ -0,0 +1,237 @@
// **************************************************************************
// coul_slater_long.cu
// -------------------
// Trung Nguyen (U Chicago)
//
// Device code for acceleration of the coul/slater/long pair style
//
// __________________________________________________________________________
// This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
// __________________________________________________________________________
//
// begin : September 2023
// email : ndactrung@gmail.com
// ***************************************************************************
#if defined(NV_KERNEL) || defined(USE_HIP)
#include "lal_aux_fun1.h"
#ifndef _DOUBLE_DOUBLE
_texture( pos_tex,float4);
_texture( q_tex,float);
#else
_texture_2d( pos_tex,int4);
_texture( q_tex,int2);
#endif
#else
#define pos_tex x_
#define q_tex q_
#endif
__kernel void k_coul_slater_long(const __global numtyp4 *restrict x_,
const __global numtyp *restrict scale,
const int lj_types,
const __global numtyp *restrict sp_cl_in,
const __global int *dev_nbor,
const __global int *dev_packed,
__global acctyp3 *restrict ans,
__global acctyp *restrict engv,
const int eflag, const int vflag, const int inum,
const int nbor_pitch,
const __global numtyp *restrict q_,
const numtyp cut_coulsq, const numtyp qqrd2e,
const numtyp g_ewald, const numtyp lamda,
const int t_per_atom) {
int tid, ii, offset;
atom_info(t_per_atom,ii,tid,offset);
__local numtyp sp_cl[4];
int n_stride;
local_allocate_store_charge();
sp_cl[0]=sp_cl_in[0];
sp_cl[1]=sp_cl_in[1];
sp_cl[2]=sp_cl_in[2];
sp_cl[3]=sp_cl_in[3];
acctyp3 f;
f.x=(acctyp)0; f.y=(acctyp)0; f.z=(acctyp)0;
acctyp e_coul, virial[6];
if (EVFLAG) {
e_coul=(acctyp)0;
for (int i=0; i<6; i++) virial[i]=(acctyp)0;
}
if (ii<inum) {
int nbor, nbor_end;
int i, numj;
nbor_info(dev_nbor,dev_packed,nbor_pitch,t_per_atom,ii,offset,i,numj,
n_stride,nbor_end,nbor);
numtyp4 ix; fetch4(ix,i,pos_tex); //x_[i];
int itype=ix.w;
numtyp qtmp; fetch(qtmp,i,q_tex);
for ( ; nbor<nbor_end; nbor+=n_stride) {
ucl_prefetch(dev_packed+nbor+n_stride);
int j=dev_packed[nbor];
numtyp factor_coul;
factor_coul = (numtyp)1.0-sp_cl[sbmask(j)];
j &= NEIGHMASK;
numtyp4 jx; fetch4(jx,j,pos_tex); //x_[j];
int jtype=jx.w;
// Compute r12
numtyp delx = ix.x-jx.x;
numtyp dely = ix.y-jx.y;
numtyp delz = ix.z-jx.z;
numtyp rsq = delx*delx+dely*dely+delz*delz;
int mtype=itype*lj_types+jtype;
if (rsq < cut_coulsq) {
numtyp r2inv=ucl_recip(rsq);
numtyp force, prefactor, _erfc;
numtyp r = ucl_rsqrt(r2inv);
numtyp grij = g_ewald * r;
numtyp expm2 = ucl_exp(-grij*grij);
numtyp t = ucl_recip((numtyp)1.0 + EWALD_P*grij);
_erfc = t * (A1+t*(A2+t*(A3+t*(A4+t*A5)))) * expm2;
fetch(prefactor,j,q_tex);
prefactor *= qqrd2e * scale[mtype] * qtmp/r;
numtyp slater_term = ucl_exp(-2*r/lamda)*(1 + (2*r/lamda*(1+r/lamda)));
force = prefactor * (_erfc + EWALD_F*grij*expm2 - slater_term -factor_coul) * r2inv;
f.x+=delx*force;
f.y+=dely*force;
f.z+=delz*force;
if (EVFLAG && eflag) {
numtyp e_slater = (1 + r/lamda)*ucl_exp(-2*r/lamda);
e_coul += prefactor*(_erfc-e_slater - factor_coul);
}
if (EVFLAG && vflag) {
virial[0] += delx*delx*force;
virial[1] += dely*dely*force;
virial[2] += delz*delz*force;
virial[3] += delx*dely*force;
virial[4] += delx*delz*force;
virial[5] += dely*delz*force;
}
}
} // for nbor
} // if ii
acctyp energy;
if (EVFLAG) energy=(acctyp)0.0;
store_answers_q(f,energy,e_coul,virial,ii,inum,tid,t_per_atom,offset,eflag,
vflag,ans,engv);
}
__kernel void k_coul_slater_long_fast(const __global numtyp4 *restrict x_,
const __global numtyp *restrict scale_in,
const __global numtyp *restrict sp_cl_in,
const __global int *dev_nbor,
const __global int *dev_packed,
__global acctyp3 *restrict ans,
__global acctyp *restrict engv,
const int eflag, const int vflag, const int inum,
const int nbor_pitch,
const __global numtyp *restrict q_,
const numtyp cut_coulsq, const numtyp qqrd2e,
const numtyp g_ewald, const numtyp lamda,
const int t_per_atom) {
int tid, ii, offset;
atom_info(t_per_atom,ii,tid,offset);
__local numtyp scale[MAX_SHARED_TYPES*MAX_SHARED_TYPES];
__local numtyp sp_cl[4];
int n_stride;
local_allocate_store_charge();
if (tid<4)
sp_cl[tid]=sp_cl_in[tid];
if (tid<MAX_SHARED_TYPES*MAX_SHARED_TYPES)
scale[tid]=scale_in[tid];
acctyp3 f;
f.x=(acctyp)0; f.y=(acctyp)0; f.z=(acctyp)0;
acctyp e_coul, virial[6];
if (EVFLAG) {
e_coul=(acctyp)0;
for (int i=0; i<6; i++) virial[i]=(acctyp)0;
}
__syncthreads();
if (ii<inum) {
int nbor, nbor_end;
int i, numj;
nbor_info(dev_nbor,dev_packed,nbor_pitch,t_per_atom,ii,offset,i,numj,
n_stride,nbor_end,nbor);
numtyp4 ix; fetch4(ix,i,pos_tex); //x_[i];
numtyp qtmp; fetch(qtmp,i,q_tex);
int iw=ix.w;
int itype=fast_mul((int)MAX_SHARED_TYPES,iw);
for ( ; nbor<nbor_end; nbor+=n_stride) {
ucl_prefetch(dev_packed+nbor+n_stride);
int j=dev_packed[nbor];
numtyp factor_coul;
factor_coul = (numtyp)1.0-sp_cl[sbmask(j)];
j &= NEIGHMASK;
numtyp4 jx; fetch4(jx,j,pos_tex); //x_[j];
int mtype=itype+jx.w;
// Compute r12
numtyp delx = ix.x-jx.x;
numtyp dely = ix.y-jx.y;
numtyp delz = ix.z-jx.z;
numtyp rsq = delx*delx+dely*dely+delz*delz;
if (rsq < cut_coulsq) {
numtyp r2inv=ucl_recip(rsq);
numtyp force, prefactor, _erfc;
numtyp r = ucl_rsqrt(r2inv);
numtyp grij = g_ewald * r;
numtyp expm2 = ucl_exp(-grij*grij);
numtyp t = ucl_recip((numtyp)1.0 + EWALD_P*grij);
_erfc = t * (A1+t*(A2+t*(A3+t*(A4+t*A5)))) * expm2;
fetch(prefactor,j,q_tex);
prefactor *= qqrd2e * scale[mtype] * qtmp/r;
numtyp slater_term = ucl_exp(-2*r/lamda)*(1 + (2*r/lamda*(1+r/lamda)));
force = prefactor * (_erfc + EWALD_F*grij*expm2 - slater_term -factor_coul) * r2inv;
f.x+=delx*force;
f.y+=dely*force;
f.z+=delz*force;
if (EVFLAG && eflag) {
numtyp e_slater = (1 + r/lamda)*ucl_exp(-2*r/lamda);
e_coul += prefactor*(_erfc-e_slater-factor_coul);
}
if (EVFLAG && vflag) {
virial[0] += delx*delx*force;
virial[1] += dely*dely*force;
virial[2] += delz*delz*force;
virial[3] += delx*dely*force;
virial[4] += delx*delz*force;
virial[5] += dely*delz*force;
}
}
} // for nbor
} // if ii
acctyp energy;
if (EVFLAG) energy=(acctyp)0.0;
store_answers_q(f,energy,e_coul,virial,ii,inum,tid,t_per_atom,offset,eflag,
vflag,ans,engv);
}

View File

@ -0,0 +1,82 @@
/***************************************************************************
coul_slater_long.h
-------------------
Trung Nguyen (U Chicago)
Class for acceleration of the coul/slater/long pair style.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin : September 2023
email : ndactrung@gmail.com
***************************************************************************/
#ifndef LAL_Coul_Slater_Long_H
#define LAL_Coul_Slater_Long_H
#include "lal_base_charge.h"
namespace LAMMPS_AL {
template <class numtyp, class acctyp>
class CoulSlaterLong : public BaseCharge<numtyp, acctyp> {
public:
CoulSlaterLong();
~CoulSlaterLong();
/// Clear any previous data and set up for a new LAMMPS run
/** \param max_nbors initial number of rows in the neighbor matrix
* \param cell_size cutoff + skin
* \param gpu_split fraction of particles handled by device
*
* Returns:
* - 0 if successful
* - -1 if fix gpu not found
* - -3 if there is an out of memory error
* - -4 if the GPU library was not compiled for GPU
* - -5 Double precision is not supported on card **/
int init(const int ntypes, double **scale,
const int nlocal, const int nall, const int max_nbors,
const int maxspecial, const double cell_size,
const double gpu_split, FILE *screen,
const double host_cut_coulsq, double *host_special_coul,
const double qqrd2e, const double g_ewald, const double lamda);
/// Send updated coeffs from host to device (to be compatible with fix adapt)
void reinit(const int ntypes, double **scale);
/// Clear all host and device data
/** \note This is called at the beginning of the init() routine **/
void clear();
/// Returns memory usage on device per atom
int bytes_per_atom(const int max_nbors) const;
/// Total host memory used by library for pair style
double host_memory_usage() const;
// --------------------------- TYPE DATA --------------------------
/// scale
UCL_D_Vec<numtyp> scale;
/// Special Coul values [0-3]
UCL_D_Vec<numtyp> sp_cl;
/// If atom type constants fit in shared memory, use fast kernels
bool shared_types;
/// Number of atom types
int _lj_types;
numtyp _cut_coulsq, _qqrd2e, _g_ewald, _lamda;
protected:
bool _allocated;
int loop(const int eflag, const int vflag);
};
}
#endif

View File

@ -0,0 +1,145 @@
/***************************************************************************
coul_slater_long_ext.cpp
-------------------
Trung Nguyen (U Chicago)
Functions for LAMMPS access to coul/slater/long acceleration routines.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin : September 2023
email : ndactrung@gmail.com
***************************************************************************/
#include <iostream>
#include <cassert>
#include <cmath>
#include "lal_coul_slater_long.h"
using namespace std;
using namespace LAMMPS_AL;
static CoulSlaterLong<PRECISION,ACC_PRECISION> CSLMF;
// ---------------------------------------------------------------------------
// Allocate memory on host and device and copy constants to device
// ---------------------------------------------------------------------------
int csl_gpu_init(const int ntypes, double **host_scale,
const int inum, const int nall, const int max_nbors,
const int maxspecial, const double cell_size, int &gpu_mode,
FILE *screen, double host_cut_coulsq, double *host_special_coul,
const double qqrd2e, const double g_ewald, const double lamda) {
CSLMF.clear();
gpu_mode=CSLMF.device->gpu_mode();
double gpu_split=CSLMF.device->particle_split();
int first_gpu=CSLMF.device->first_device();
int last_gpu=CSLMF.device->last_device();
int world_me=CSLMF.device->world_me();
int gpu_rank=CSLMF.device->gpu_rank();
int procs_per_gpu=CSLMF.device->procs_per_gpu();
CSLMF.device->init_message(screen,"coul/slater/long",first_gpu,last_gpu);
bool message=false;
if (CSLMF.device->replica_me()==0 && screen)
message=true;
if (message) {
fprintf(screen,"Initializing Device and compiling on process 0...");
fflush(screen);
}
int init_ok=0;
if (world_me==0)
init_ok=CSLMF.init(ntypes, host_scale, inum, nall, max_nbors, maxspecial,
cell_size, gpu_split, screen, host_cut_coulsq,
host_special_coul, qqrd2e, g_ewald, lamda);
CSLMF.device->world_barrier();
if (message)
fprintf(screen,"Done.\n");
for (int i=0; i<procs_per_gpu; i++) {
if (message) {
if (last_gpu-first_gpu==0)
fprintf(screen,"Initializing Device %d on core %d...",first_gpu,i);
else
fprintf(screen,"Initializing Devices %d-%d on core %d...",first_gpu,
last_gpu,i);
fflush(screen);
}
if (gpu_rank==i && world_me!=0)
init_ok=CSLMF.init(ntypes, host_scale, inum, nall, max_nbors, maxspecial,
cell_size, gpu_split, screen, host_cut_coulsq,
host_special_coul, qqrd2e, g_ewald, lamda);
CSLMF.device->serialize_init();
if (message)
fprintf(screen,"Done.\n");
}
if (message)
fprintf(screen,"\n");
if (init_ok==0)
CSLMF.estimate_gpu_overhead();
return init_ok;
}
// ---------------------------------------------------------------------------
// Copy updated coeffs from host to device
// ---------------------------------------------------------------------------
void csl_gpu_reinit(const int ntypes, double **host_scale) {
int world_me=CSLMF.device->world_me();
int gpu_rank=CSLMF.device->gpu_rank();
int procs_per_gpu=CSLMF.device->procs_per_gpu();
if (world_me==0)
CSLMF.reinit(ntypes, host_scale);
CSLMF.device->world_barrier();
for (int i=0; i<procs_per_gpu; i++) {
if (gpu_rank==i && world_me!=0)
CSLMF.reinit(ntypes, host_scale);
CSLMF.device->serialize_init();
}
}
void csl_gpu_clear() {
CSLMF.clear();
}
int** csl_gpu_compute_n(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
double *sublo, double *subhi, tagint *tag, int **nspecial,
tagint **special, const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success, double *host_q, double *boxlo,
double *prd) {
return CSLMF.compute(ago, inum_full, nall, host_x, host_type, sublo,
subhi, tag, nspecial, special, eflag, vflag, eatom,
vatom, host_start, ilist, jnum, cpu_time, success,
host_q, boxlo, prd);
}
void csl_gpu_compute(const int ago, const int inum_full, const int nall,
double **host_x, int *host_type, int *ilist, int *numj,
int **firstneigh, const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
const double cpu_time, bool &success, double *host_q,
const int nlocal, double *boxlo, double *prd) {
CSLMF.compute(ago,inum_full,nall,host_x,host_type,ilist,numj,
firstneigh,eflag,vflag,eatom,vatom,host_start,cpu_time,success,
host_q,nlocal,boxlo,prd);
}
double csl_gpu_bytes() {
return CSLMF.host_memory_usage();
}