Added the GPU version of sph/heatconduction

This commit is contained in:
Trung Nguyen
2023-12-09 15:15:13 -06:00
parent 54a6143e9e
commit 3830711dec
7 changed files with 961 additions and 5 deletions

View File

@ -0,0 +1,222 @@
/***************************************************************************
sph_heatconduction.cpp
-------------------
Trung Nguyen (U Chicago)
Class for acceleration of the sph_heatconduction pair style.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin : September 2023
email : ndactrung@gmail.com
***************************************************************************/
#if defined(USE_OPENCL)
#include "sph_heatconduction_cl.h"
#elif defined(USE_CUDART)
const char *sph_heatconduction=0;
#else
#include "sph_heatconduction_cubin.h"
#endif
#include "lal_sph_heatconduction.h"
#include <cassert>
namespace LAMMPS_AL {
#define SPHHeatConductionT SPHHeatConduction<numtyp, acctyp>
extern Device<PRECISION,ACC_PRECISION> device;
template <class numtyp, class acctyp>
SPHHeatConductionT::SPHHeatConduction() : BaseSPH<numtyp,acctyp>(), _allocated(false) {
_max_dE_size = 0;
}
template <class numtyp, class acctyp>
SPHHeatConductionT::~SPHHeatConduction() {
clear();
}
template <class numtyp, class acctyp>
int SPHHeatConductionT::bytes_per_atom(const int max_nbors) const {
return this->bytes_per_atom_atomic(max_nbors);
}
template <class numtyp, class acctyp>
int SPHHeatConductionT::init(const int ntypes,
double **host_cutsq, double **host_cut,
double **host_alpha, double* host_mass,
const int dimension, double *host_special_lj,
const int nlocal, const int nall,
const int max_nbors, const int maxspecial,
const double cell_size,
const double gpu_split, FILE *_screen) {
const int max_shared_types=this->device->max_shared_types();
int onetype=0;
#ifdef USE_OPENCL
if (maxspecial==0)
for (int i=1; i<ntypes; i++)
for (int j=i; j<ntypes; j++)
if (host_cutsq[i][j]>0) {
if (onetype>0)
onetype=-1;
else if (onetype==0)
onetype=i*max_shared_types+j;
}
if (onetype<0) onetype=0;
#endif
int success;
int extra_fields = 4; // round up to accomodate quadruples of numtyp values
// rho, esph
success=this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,
gpu_split,_screen,sph_heatconduction,"k_sph_heatconduction",
onetype,extra_fields);
if (success!=0)
return success;
// If atom type constants fit in shared memory use fast kernel
int lj_types=ntypes;
shared_types=false;
if (lj_types<=max_shared_types && this->_block_size>=max_shared_types) {
lj_types=max_shared_types;
shared_types=true;
}
_lj_types=lj_types;
// Allocate a host write buffer for data initialization
UCL_H_Vec<numtyp> host_write(lj_types*lj_types*32,*(this->ucl_device),
UCL_WRITE_ONLY);
for (int i=0; i<lj_types*lj_types; i++)
host_write[i]=0.0;
coeff.alloc(lj_types*lj_types,*(this->ucl_device),UCL_READ_ONLY);
this->atom->type_pack4(ntypes,lj_types,coeff,host_write,host_alpha,
host_cut, host_cutsq);
UCL_H_Vec<numtyp> dview_mass(ntypes, *(this->ucl_device), UCL_WRITE_ONLY);
for (int i = 0; i < ntypes; i++)
dview_mass[i] = host_mass[i];
mass.alloc(ntypes,*(this->ucl_device), UCL_READ_ONLY);
ucl_copy(mass,dview_mass,false);
UCL_H_Vec<double> dview;
sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
dview.view(host_special_lj,4,*(this->ucl_device));
ucl_copy(sp_lj,dview,false);
// allocate per-atom array Q
int ef_nall=nall;
if (ef_nall==0)
ef_nall=2000;
_max_dE_size=static_cast<int>(static_cast<double>(ef_nall)*1.10);
dE.alloc(_max_dE_size,*(this->ucl_device),UCL_READ_WRITE,UCL_READ_WRITE);
_dimension = dimension;
_allocated=true;
this->_max_bytes=coeff.row_bytes()+dE.row_bytes()+sp_lj.row_bytes();
return 0;
}
template <class numtyp, class acctyp>
void SPHHeatConductionT::clear() {
if (!_allocated)
return;
_allocated=false;
coeff.clear();
mass.clear();
dE.clear();
sp_lj.clear();
this->clear_atomic();
}
template <class numtyp, class acctyp>
double SPHHeatConductionT::host_memory_usage() const {
return this->host_memory_usage_atomic()+sizeof(SPHHeatConduction<numtyp,acctyp>);
}
template <class numtyp, class acctyp>
void SPHHeatConductionT::update_dE(void **dE_ptr) {
*dE_ptr=dE.host.begin();
dE.update_host(_max_dE_size,false);
}
// ---------------------------------------------------------------------------
// Calculate energies, forces, and torques
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
int SPHHeatConductionT::loop(const int eflag, const int vflag) {
int nall = this->atom->nall();
// Resize dE array if necessary
if (nall > _max_dE_size) {
_max_dE_size=static_cast<int>(static_cast<double>(nall)*1.10);
dE.resize(_max_dE_size);
}
// signal that we need to transfer extra data from the host
this->atom->extra_data_unavail();
numtyp4 *pextra=reinterpret_cast<numtyp4*>(&(this->atom->extra[0]));
int n = 0;
int nstride = 1;
for (int i = 0; i < nall; i++) {
int idx = n+i*nstride;
numtyp4 v;
v.x = rho[i];
v.y = esph[i];
v.z = 0;
v.w = 0;
pextra[idx] = v;
}
this->atom->add_extra_data();
// Compute the block size and grid size to keep all cores busy
const int BX=this->block_size();
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/
(BX/this->_threads_per_atom)));
int ainum=this->ans->inum();
int nbor_pitch=this->nbor->nbor_pitch();
this->time_pair.start();
if (shared_types) {
this->k_pair_sel->set_size(GX,BX);
this->k_pair_sel->run(&this->atom->x, &this->atom->extra, &coeff, &mass, &sp_lj,
&this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->ans->force, &this->ans->engv, &dE, &eflag, &vflag,
&ainum, &nbor_pitch, &this->atom->v, &_dimension, &this->_threads_per_atom);
} else {
this->k_pair.set_size(GX,BX);
this->k_pair.run(&this->atom->x, &this->atom->extra, &coeff, &mass,
&_lj_types, &sp_lj, &this->nbor->dev_nbor, &this->_nbor_data->begin(),
&this->ans->force, &this->ans->engv, &dE, &eflag, &vflag,
&ainum, &nbor_pitch, &this->atom->v, &_dimension, &this->_threads_per_atom);
}
this->time_pair.stop();
return GX;
}
// ---------------------------------------------------------------------------
// Get the extra data pointers from host
// ---------------------------------------------------------------------------
template <class numtyp, class acctyp>
void SPHHeatConductionT::get_extra_data(double *host_rho, double *host_esph) {
rho = host_rho;
esph = host_esph;
}
template class SPHHeatConduction<PRECISION,ACC_PRECISION>;
}

View File

@ -0,0 +1,265 @@
// **************************************************************************
// sph_heatconduction.cu
// ---------------------
// Trung Dac Nguyen (U Chicago)
//
// Device code for acceleration of the sph/heatconduction pair style
//
// __________________________________________________________________________
// This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
// __________________________________________________________________________
//
// begin : September 2023
// email : ndactrung@gmail.com
// ***************************************************************************
#if defined(NV_KERNEL) || defined(USE_HIP)
#include "lal_aux_fun1.h"
#ifndef _DOUBLE_DOUBLE
_texture( pos_tex,float4);
_texture( vel_tex,float4);
#else
_texture_2d( pos_tex,int4);
_texture_2d( vel_tex,int4);
#endif
#else
#define pos_tex x_
#define vel_tex v_
#endif
#if (SHUFFLE_AVAIL == 0)
#define store_dE(dEacc, ii, inum, tid, t_per_atom, offset, dE) \
if (t_per_atom>1) { \
simdsync(); \
simd_reduce_add1(t_per_atom, red_acc, offset, tid, dEacc); \
} \
if (offset==0 && ii<inum) { \
dE[ii]=dEacc; \
}
#else
#define store_drhoE(dEacc, ii, inum, tid, t_per_atom, offset, dE) \
if (t_per_atom>1) { \
for (unsigned int s=t_per_atom/2; s>0; s>>=1) { \
dEacc += shfl_down(dEacc, s, t_per_atom); \
} \
} \
if (offset==0 && ii<inum) { \
dE[ii]=dEacc; \
}
#endif
/* ------------------------------------------------------------------------ */
__kernel void k_sph_heatconduction(const __global numtyp4 *restrict x_,
const __global numtyp4 *restrict extra,
const __global numtyp4 *restrict coeff,
const __global numtyp *restrict mass,
const int lj_types,
const __global numtyp *restrict sp_lj,
const __global int * dev_nbor,
const __global int * dev_packed,
__global acctyp3 *restrict ans,
__global acctyp *restrict engv,
__global acctyp *restrict dE,
const int eflag, const int vflag,
const int inum, const int nbor_pitch,
const __global numtyp4 *restrict v_,
const int dimension, const int t_per_atom) {
int tid, ii, offset;
atom_info(t_per_atom,ii,tid,offset);
int n_stride;
local_allocate_store_pair();
acctyp dEacc = (acctyp)0;
if (ii<inum) {
int i, numj, nbor, nbor_end;
nbor_info(dev_nbor,dev_packed,nbor_pitch,t_per_atom,ii,offset,i,numj,
n_stride,nbor_end,nbor);
numtyp4 ix; fetch4(ix,i,pos_tex); //x_[i];
int itype=ix.w;
numtyp mass_itype = mass[itype];
numtyp4 iv; fetch4(iv,i,vel_tex); //v_[i];
const numtyp4 extrai = extra[i];
numtyp rhoi = extrai.x;
numtyp esphi = extrai.y;
for ( ; nbor<nbor_end; nbor+=n_stride) {
ucl_prefetch(dev_packed+nbor+n_stride);
int j=dev_packed[nbor];
j &= NEIGHMASK;
numtyp4 jx; fetch4(jx,j,pos_tex); //x_[j];
int jtype=jx.w;
numtyp4 jv; fetch4(jv,j,vel_tex); //v_[j];
// Compute r12
numtyp delx = ix.x-jx.x;
numtyp dely = ix.y-jx.y;
numtyp delz = ix.z-jx.z;
numtyp rsq = delx*delx+dely*dely+delz*delz;
int mtype=itype*lj_types+jtype;
if (rsq<coeff[mtype].z) { // cutsq[itype][jtype]
numtyp mass_jtype = mass[jtype];
const numtyp coeffx=coeff[mtype].x; // alpha[itype][jtype]
const numtyp coeffy=coeff[mtype].y; // cut[itype][jtype]
const numtyp4 extraj = extra[j];
numtyp rhoj = extraj.x;
numtyp esphj = extraj.y;
numtyp h = coeffy; // cut[itype][jtype]
ih = ucl_recip(h); // (numtyp)1.0 / h;
numtyp ihsq = ih * ih;
numtyp ihcub = ihsq * ih;
numtyp wfd = h - ucl_sqrt(rsq);
if (dimension == 3) {
// Lucy Kernel, 3d
wfd = (numtyp)-25.066903536973515383 * wfd * wfd * ihsq * ihsq * ihsq * ih;
} else {
// Lucy Kernel, 2d
wfd = (numtyp)-19.098593171027440292 * wfd * wfd * ihsq * ihsq * ihsq;
}
// total thermal energy increment
numtyp D = coeffx; // alpha[itype][jtype] diffusion coefficient
numtyp deltaE = (numtyp)2.0 * mass_itype * mass_jtype / (mass_itype + mass_jtype);
deltaE *= (rhoi + rhoj) / (rhoi * rhoj);
deltaE *= D * (esphi - esphj) * wfd;
// change in thermal energy, desph[i]
dEacc += deltaE;
}
} // for nbor
} // if ii
store_drhoE(dEacc,ii,inum,tid,t_per_atom,offset,drhoE);
}
__kernel void k_sph_heatconduction_fast(const __global numtyp4 *restrict x_,
const __global numtyp4 *restrict extra,
const __global numtyp4 *restrict coeff_in,
const __global numtyp *restrict mass,
const __global numtyp *restrict sp_lj_in,
const __global int * dev_nbor,
const __global int * dev_packed,
__global acctyp3 *restrict ans,
__global acctyp *restrict engv,
__global acctyp *restrict dE,
const int eflag, const int vflag,
const int inum, const int nbor_pitch,
const __global numtyp4 *restrict v_,
const int dimension, const int t_per_atom) {
int tid, ii, offset;
atom_info(t_per_atom,ii,tid,offset);
#ifndef ONETYPE
__local numtyp4 coeff[MAX_SHARED_TYPES*MAX_SHARED_TYPES];
__local numtyp sp_lj[4];
if (tid<4) {
sp_lj[tid]=sp_lj_in[tid];
}
if (tid<MAX_SHARED_TYPES*MAX_SHARED_TYPES) {
coeff[tid]=coeff_in[tid];
}
__syncthreads();
#else
const numtyp coeffx=coeff_in[ONETYPE].x; // viscosity[itype][jtype]
const numtyp coeffy=coeff_in[ONETYPE].y; // cut[itype][jtype]
const numtyp cutsq_p=coeff_in[ONETYPE].z; // cutsq[itype][jtype]
#endif
int n_stride;
local_allocate_store_pair();
acctyp dEacc = (acctyp)0;
if (ii<inum) {
int i, numj, nbor, nbor_end;
nbor_info(dev_nbor,dev_packed,nbor_pitch,t_per_atom,ii,offset,i,numj,
n_stride,nbor_end,nbor);
numtyp4 ix; fetch4(ix,i,pos_tex); //x_[i];
int iw=ix.w;
numtyp mass_itype = mass[iw];
#ifndef ONETYPE
int itype=fast_mul((int)MAX_SHARED_TYPES,iw);
#endif
numtyp4 iv; fetch4(iv,i,vel_tex); //v_[i];
int itag=iv.w;
const numtyp4 extrai = extra[i];
numtyp rhoi = extrai.x;
numtyp esphi = extrai.y;
for ( ; nbor<nbor_end; nbor+=n_stride) {
ucl_prefetch(dev_packed+nbor+n_stride);
int j=dev_packed[nbor];
#ifndef ONETYPE
j &= NEIGHMASK;
#endif
numtyp4 jx; fetch4(jx,j,pos_tex); //x_[j];
int jtype = jx.w;
#ifndef ONETYPE
int mtype=itype+jx.w;
const numtyp cutsq_p=cutsq[mtype];
#endif
numtyp4 jv; fetch4(jv,j,vel_tex); //v_[j];
int jtag=jv.w;
// Compute r12
numtyp delx = ix.x-jx.x;
numtyp dely = ix.y-jx.y;
numtyp delz = ix.z-jx.z;
numtyp rsq = delx*delx+dely*dely+delz*delz;
if (rsq<cutsq_p) {
numtyp mass_jtype = mass[jtype];
#ifndef ONETYPE
const numtyp coeffx=coeff[mtype].x; // viscosity[itype][jtype]
const numtyp coeffy=coeff[mtype].y; // cut[itype][jtype]
#endif
const numtyp4 extraj = extra[j];
numtyp rhoj = extraj.x;
numtyp esphj = extraj.y;
numtyp h = coeffy; // cut[itype][jtype]
ih = ih = ucl_recip(h); // (numtyp)1.0 / h;
numtyp ihsq = ih * ih;
numtyp ihcub = ihsq * ih;
numtyp wfd = h - ucl_sqrt(rsq);
if (dimension == 3) {
// Lucy Kernel, 3d
wfd = (numtyp)-25.066903536973515383 * wfd * wfd * ihsq * ihsq * ihsq * ih;
} else {
// Lucy Kernel, 2d
wfd = (numtyp)-19.098593171027440292 * wfd * wfd * ihsq * ihsq * ihsq;
}
// total thermal energy increment
numtyp D = coeffx; // alpha[itype][jtype] diffusion coefficient
numtyp deltaE = (numtyp)2.0 * mass_itype * mass_jtype / (mass_itype + mass_jtype);
deltaE *= (rhoi + rhoj) / (rhoi * rhoj);
deltaE *= D * (esphi - esphj) * wfd;
// change in thermal energy, desph[i]
dEacc += deltaE;
}
} // for nbor
} // if ii
store_drhoE(dEacc,ii,inum,tid,t_per_atom,offset,dE);
}

View File

@ -0,0 +1,95 @@
/***************************************************************************
sph_heatconduction.h
--------------------
Trung Nguyen (U Chicago)
Class for acceleration of the sph heatconduction pair style.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin : December 2023
email : ndactrung@gmail.com
***************************************************************************/
#ifndef LAL_SPH_LJ_H
#define LAL_SPH_LJ_H
#include "lal_base_sph.h"
namespace LAMMPS_AL {
template <class numtyp, class acctyp>
class SPHHeatConduction : public BaseSPH<numtyp, acctyp> {
public:
SPHHeatConduction();
~SPHHeatConduction();
/// Clear any previous data and set up for a new LAMMPS run
/** \param max_nbors initial number of rows in the neighbor matrix
* \param cell_size cutoff + skin
* \param gpu_split fraction of particles handled by device
*
* Returns:
* - 0 if successful
* - -1 if fix gpu not found
* - -3 if there is an out of memory error
* - -4 if the GPU library was not compiled for GPU
* - -5 Double precision is not supported on card **/
int init(const int ntypes, double **host_cutsq,
double** host_cut, double **host_alpha, double *host_mass,
const int dimension, double *host_special_lj,
const int nlocal, const int nall, const int max_nbors,
const int maxspecial, const double cell_size,
const double gpu_split, FILE *screen);
/// Clear all host and device data
/** \note This is called at the beginning of the init() routine **/
void clear();
/// Returns memory usage on device per atom
int bytes_per_atom(const int max_nbors) const;
/// Total host memory used by library for pair style
double host_memory_usage() const;
void get_extra_data(double *host_rho, double *host_esph);
/// copy desph from device to host
void update_dE(void **dE_ptr);
// --------------------------- TYPE DATA --------------------------
/// coeff.x = alpha, coeff.y = cut, coeff.z = cutsq
UCL_D_Vec<numtyp4> coeff;
/// per-type coeffs
UCL_D_Vec<numtyp> mass;
/// Special LJ values
UCL_D_Vec<numtyp> sp_lj;
/// If atom type constants fit in shared memory, use fast kernels
bool shared_types;
/// Number of atom types
int _lj_types;
/// Per-atom arrays
UCL_Vector<acctyp,acctyp> dE;
int _max_dE_size;
int _dimension;
/// pointer to host data
double *rho, *esph, *cv;
private:
bool _allocated;
int loop(const int eflag, const int vflag);
};
}
#endif

View File

@ -0,0 +1,129 @@
/***************************************************************************
sph_heatconduction_ext.cpp
--------------------------
Trung Dac Nguyen (U Chicago)
Functions for LAMMPS access to sph/heatconduction acceleration routines.
__________________________________________________________________________
This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
__________________________________________________________________________
begin : December 2023
email : ndactrung@gmail.com
***************************************************************************/
#include <iostream>
#include <cassert>
#include <cmath>
#include "lal_sph_heatconduction.h"
using namespace std;
using namespace LAMMPS_AL;
static SPHHeatConduction<PRECISION,ACC_PRECISION> SPHHeatConductionMF;
// ---------------------------------------------------------------------------
// Allocate memory on host and device and copy constants to device
// ---------------------------------------------------------------------------
int sph_heatconduction_gpu_init(const int ntypes, double **cutsq, double** host_cut,
double **host_alpha, double* host_mass, const int dimension,
double *special_lj, const int inum, const int nall,
const int max_nbors, const int maxspecial,
const double cell_size, int &gpu_mode, FILE *screen) {
SPHHeatConductionMF.clear();
gpu_mode=SPHHeatConductionMF.device->gpu_mode();
double gpu_split=SPHHeatConductionMF.device->particle_split();
int first_gpu=SPHHeatConductionMF.device->first_device();
int last_gpu=SPHHeatConductionMF.device->last_device();
int world_me=SPHHeatConductionMF.device->world_me();
int gpu_rank=SPHHeatConductionMF.device->gpu_rank();
int procs_per_gpu=SPHHeatConductionMF.device->procs_per_gpu();
SPHHeatConductionMF.device->init_message(screen,"sph_lj",first_gpu,last_gpu);
bool message=false;
if (SPHHeatConductionMF.device->replica_me()==0 && screen)
message=true;
if (message) {
fprintf(screen,"Initializing Device and compiling on process 0...");
fflush(screen);
}
int init_ok=0;
if (world_me==0)
init_ok=SPHHeatConductionMF.init(ntypes, cutsq, host_cut, host_alpha, host_mass,
dimension, special_lj, inum, nall, max_nbors, maxspecial,
cell_size, gpu_split, screen);
SPHHeatConductionMF.device->world_barrier();
if (message)
fprintf(screen,"Done.\n");
for (int i=0; i<procs_per_gpu; i++) {
if (message) {
if (last_gpu-first_gpu==0)
fprintf(screen,"Initializing Device %d on core %d...",first_gpu,i);
else
fprintf(screen,"Initializing Devices %d-%d on core %d...",first_gpu,
last_gpu,i);
fflush(screen);
}
if (gpu_rank==i && world_me!=0)
init_ok=SPHHeatConductionMF.init(ntypes, cutsq, host_cut, host_alpha, host_mass,
dimension, special_lj, inum, nall, max_nbors, maxspecial,
cell_size, gpu_split, screen);
SPHHeatConductionMF.device->serialize_init();
if (message)
fprintf(screen,"Done.\n");
}
if (message)
fprintf(screen,"\n");
if (init_ok==0)
SPHHeatConductionMF.estimate_gpu_overhead();
return init_ok;
}
void sph_heatconduction_gpu_clear() {
SPHHeatConductionMF.clear();
}
int ** sph_heatconduction_gpu_compute_n(const int ago, const int inum_full, const int nall,
double **host_x, int *host_type, double *sublo,
double *subhi, tagint *host_tag, int **nspecial,
tagint **special, const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time, bool &success,
double **host_v) {
return SPHHeatConductionMF.compute(ago, inum_full, nall, host_x, host_type, sublo,
subhi, host_tag, nspecial, special, eflag, vflag,
eatom, vatom, host_start, ilist, jnum, cpu_time, success,
host_v);
}
void sph_heatconduction_gpu_compute(const int ago, const int inum_full, const int nall,
double **host_x, int *host_type, int *ilist, int *numj,
int **firstneigh, const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
const double cpu_time, bool &success, tagint *host_tag,
double **host_v, const int nlocal) {
SPHHeatConductionMF.compute(ago, inum_full, nall, host_x, host_type, ilist, numj,
firstneigh, eflag, vflag, eatom, vatom, host_start, cpu_time, success,
host_tag, host_v, nlocal);
}
void sph_heatconduction_gpu_get_extra_data(double *host_rho, double *host_esph) {
SPHHeatConductionMF.get_extra_data(host_rho, host_esph);
}
void sph_heatconduction_gpu_update_dE(void **dE_ptr) {
SPHHeatConductionMF.update_dE(dE_ptr);
}
double sph_heatconduction_gpu_bytes() {
return SPHHeatConductionMF.host_memory_usage();
}

View File

@ -224,7 +224,7 @@ __kernel void k_sph_lj(const __global numtyp4 *restrict x_,
f.z+=delz*force;
// and change in density, drho[i]
drhoEacc.x += massj * delVdotDelR * wfd;
drhoEacc.x += mass_jtype * delVdotDelR * wfd;
// change in thermal energy, desph[i]
drhoEacc.y += deltaE;
@ -313,7 +313,6 @@ __kernel void k_sph_lj_fast(const __global numtyp4 *restrict x_,
numtyp rhoi = extrai.x;
numtyp esphi = extrai.y;
numtyp cvi = extrai.z;
numtyp massi= extrai.w;
// compute pressure of particle i with LJ EOS
numtyp fci[2];
@ -331,6 +330,7 @@ __kernel void k_sph_lj_fast(const __global numtyp4 *restrict x_,
#endif
numtyp4 jx; fetch4(jx,j,pos_tex); //x_[j];
int jtype = jx.w;
#ifndef ONETYPE
int mtype=itype+jx.w;
const numtyp cutsq_p=cutsq[mtype];
@ -345,6 +345,7 @@ __kernel void k_sph_lj_fast(const __global numtyp4 *restrict x_,
numtyp rsq = delx*delx+dely*dely+delz*delz;
if (rsq<cutsq_p) {
numtyp mass_jtype = mass[jtype];
#ifndef ONETYPE
const numtyp coeffx=coeff[mtype].x; // viscosity[itype][jtype]
const numtyp coeffy=coeff[mtype].y; // cut[itype][jtype]
@ -353,7 +354,6 @@ __kernel void k_sph_lj_fast(const __global numtyp4 *restrict x_,
numtyp rhoj = extraj.x;
numtyp esphj = extraj.y;
numtyp cvj = extraj.z;
numtyp massj= extraj.w;
numtyp h = coeffy; // cut[itype][jtype]
ih = ih = ucl_recip(h); // (numtyp)1.0 / h;
@ -396,7 +396,7 @@ __kernel void k_sph_lj_fast(const __global numtyp4 *restrict x_,
}
// total pair force & thermal energy increment
numtyp force = -massi * massj * (fi + fj + fvisc) * wfd;
numtyp force = -mass_itype * mass_jtype * (fi + fj + fvisc) * wfd;
numtyp deltaE = (numtyp)-0.5 * force * delVdotDelR;
f.x+=delx*force;
@ -404,7 +404,7 @@ __kernel void k_sph_lj_fast(const __global numtyp4 *restrict x_,
f.z+=delz*force;
// and change in density, drho[i]
drhoEacc.x += massj * delVdotDelR * wfd;
drhoEacc.x += mass_jtype * delVdotDelR * wfd;
// change in thermal energy, desph[i]
drhoEacc.y += deltaE;