diff --git a/src/GPU/pair_cg_cmm_coul_msm.cpp b/src/GPU/pair_cg_cmm_coul_msm.cpp deleted file mode 100644 index 5321945eb2..0000000000 --- a/src/GPU/pair_cg_cmm_coul_msm.cpp +++ /dev/null @@ -1,290 +0,0 @@ -/* ---------------------------------------------------------------------- - LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - http://lammps.sandia.gov, Sandia National Laboratories - Steve Plimpton, sjplimp@sandia.gov - - Copyright (2003) Sandia Corporation. Under the terms of Contract - DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains - certain rights in this software. This software is distributed under - the GNU General Public License. - - See the README file in the top-level LAMMPS directory. -------------------------------------------------------------------------- */ - -/* ---------------------------------------------------------------------- - CMM coarse grained MD potentials. Coulomb with MSM version. - Contributing author: Mike Brown -------------------------------------------------------------------------- */ - -#include "string.h" -#include "pair_cg_cmm_coul_msm.h" -#include "memory.h" -#include "atom.h" -#include "force.h" -#include "kspace.h" - -using namespace LAMMPS_NS; - -enum {C3=0,C4=1}; - -/* ---------------------------------------------------------------------- */ - -PairCGCMMCoulMSM::PairCGCMMCoulMSM(LAMMPS *lmp) : PairCMMCommon(lmp) -{ - respa_enable = 0; - single_enable = 0; -} - -/* ---------------------------------------------------------------------- */ - -PairCGCMMCoulMSM::~PairCGCMMCoulMSM() -{ - if (allocated_coul) { - memory->destroy(cut_lj); - memory->destroy(cut_ljsq); - memory->destroy(cut_coul); - memory->destroy(cut_coulsq); - allocated_coul=0; - } -} - -/* ---------------------------------------------------------------------- */ - -void PairCGCMMCoulMSM::allocate() -{ - PairCMMCommon::allocate(); - allocated_coul = 1; - - int n = atom->ntypes; - - memory->create(cut_lj,n+1,n+1,"paircg:cut_lj"); - memory->create(cut_ljsq,n+1,n+1,"paircg:cut_ljsq"); - memory->create(cut_coul,n+1,n+1,"paircg:cut_coul"); - memory->create(cut_coulsq,n+1,n+1,"paircg:cut_coulsq"); -} - -/* ---------------------------------------------------------------------- - global settings -------------------------------------------------------------------------- */ - -void PairCGCMMCoulMSM::settings(int narg, char **arg) -{ - // strip off smoothing type and send args to parent - - if (narg < 1) error->all(FLERR,"Illegal pair_style command"); - - if (strcmp(arg[0],"C3") == 0) - _smooth = C3; - else if (strcmp(arg[0],"C4") == 0) - _smooth = C4; - else error->all(FLERR,"Illegal pair_style command"); - - PairCMMCommon::settings(narg-1,&arg[1]); -} - -/* ---------------------------------------------------------------------- */ - -void PairCGCMMCoulMSM::init_style() -{ - if (!atom->q_flag) - error->all(FLERR,"Pair style cg/cut/coul/msm requires atom attribute q"); - - PairCMMCommon::init_style(); - _ia=-1.0/cut_coul_global; - _ia2=_ia*_ia; - _ia3=_ia2*_ia; - - cut_respa = NULL; -} - -/* ---------------------------------------------------------------------- */ - -double PairCGCMMCoulMSM::init_one(int i, int j) -{ - double mycut = PairCMMCommon::init_one(i,j); - - return mycut; -} - -/* ---------------------------------------------------------------------- */ - -void PairCGCMMCoulMSM::compute(int eflag, int vflag) -{ - int i,j,ii,jj,inum,jnum,itype,jtype,itable; - double qtmp,xtmp,ytmp,ztmp,delx,dely,delz; - double fraction,table; - double r,r2inv,r6inv,forcecoul,forcelj,factor_coul,factor_lj; - double grij,expm2,prefactor,t,erfc; - int *ilist,*jlist,*numneigh,**firstneigh; - double rsq; - - if (eflag || vflag) ev_setup(eflag,vflag); - else evflag = vflag_fdotr = 0; - - double **x = atom->x; - double **f = atom->f; - double *q = atom->q; - int *type = atom->type; - int nlocal = atom->nlocal; - int nall = nlocal + atom->nghost; - double *special_coul = force->special_coul; - double *special_lj = force->special_lj; - double qqrd2e = force->qqrd2e; - int newton_pair = force->newton_pair; - - inum = list->inum; - ilist = list->ilist; - numneigh = list->numneigh; - firstneigh = list->firstneigh; - - // loop over neighbors of my atoms - - for (ii = 0; ii < inum; ii++) { - i = ilist[ii]; - qtmp = q[i]; - xtmp = x[i][0]; - ytmp = x[i][1]; - ztmp = x[i][2]; - itype = type[i]; - jlist = firstneigh[i]; - jnum = numneigh[i]; - - for (jj = 0; jj < jnum; jj++) { - j = jlist[jj]; - factor_lj = special_lj[sbmask(j)]; - factor_coul = special_coul[sbmask(j)]; - j &= NEIGHMASK; - - const double delx = xtmp - x[j][0]; - const double dely = ytmp - x[j][1]; - const double delz = ztmp - x[j][2]; - const double rsq = delx*delx + dely*dely + delz*delz; - const int jtype = type[j]; - - double evdwl = 0.0; - double ecoul = 0.0; - double fpair = 0.0; - - if (rsq < cutsq[itype][jtype]) { - const double r2inv = 1.0/rsq; - const int cgt=cg_type[itype][jtype]; - - double forcelj = 0.0; - double forcecoul = 0.0; - - if (rsq < cut_ljsq[itype][jtype]) { - forcelj=factor_lj; - if (eflag) evdwl=factor_lj; - - if (cgt == CG_LJ12_4) { - const double r4inv=r2inv*r2inv; - forcelj *= r4inv*(lj1[itype][jtype]*r4inv*r4inv - - lj2[itype][jtype]); - if (eflag) { - evdwl *= r4inv*(lj3[itype][jtype]*r4inv*r4inv - - lj4[itype][jtype]) - offset[itype][jtype]; - } - } else if (cgt == CG_LJ9_6) { - const double r3inv = r2inv*sqrt(r2inv); - const double r6inv = r3inv*r3inv; - forcelj *= r6inv*(lj1[itype][jtype]*r3inv - - lj2[itype][jtype]); - if (eflag) { - evdwl *= r6inv*(lj3[itype][jtype]*r3inv - - lj4[itype][jtype]) - offset[itype][jtype]; - } - } else { - const double r6inv = r2inv*r2inv*r2inv; - forcelj *= r6inv*(lj1[itype][jtype]*r6inv - - lj2[itype][jtype]); - if (eflag) { - evdwl *= r6inv*(lj3[itype][jtype]*r6inv - - lj4[itype][jtype]) - offset[itype][jtype]; - } - } - } - - if (rsq < cut_coulsq_global) { - const double ir = 1.0/sqrt(rsq); - const double prefactor = qqrd2e * qtmp*q[j]; - const double r2_ia2 = rsq*_ia2; - const double r4_ia4 = r2_ia2*r2_ia2; - if (_smooth==C3) { - forcecoul = prefactor*(_ia3*(-4.375+5.25*r2_ia2-1.875*r4_ia4)- - ir/rsq); - if (eflag) - ecoul = prefactor*(ir+_ia*(2.1875-2.1875*r2_ia2+ - 1.3125*r4_ia4-0.3125*r2_ia2*r4_ia4)); - } else { - const double r6_ia6 = r2_ia2*r4_ia4; - forcecoul = prefactor*(_ia3*(-6.5625+11.8125*r2_ia2-8.4375*r4_ia4+ - 2.1875*r6_ia6)-ir/rsq); - if (eflag) - ecoul = prefactor*(ir+_ia*(2.4609375-3.28125*r2_ia2+ - 2.953125*r4_ia4-1.40625*r6_ia6+ - 0.2734375*r4_ia4*r4_ia4)); - } - if (factor_coul < 1.0) { - forcecoul -= (1.0-factor_coul)*prefactor*ir; - if (eflag) ecoul -= (1.0-factor_coul)*prefactor*ir; - } - } - fpair = forcecoul + forcelj * r2inv; - - f[i][0] += delx*fpair; - f[i][1] += dely*fpair; - f[i][2] += delz*fpair; - if (newton_pair || j < nlocal) { - f[j][0] -= delx*fpair; - f[j][1] -= dely*fpair; - f[j][2] -= delz*fpair; - } - if (evflag) ev_tally(i,j,nlocal,newton_pair, - evdwl,ecoul,fpair,delx,dely,delz); - } - } - } - - if (vflag_fdotr) virial_fdotr_compute(); -} - -/* ---------------------------------------------------------------------- */ - -void PairCGCMMCoulMSM::write_restart(FILE *fp) -{ - write_restart_settings(fp); - PairCMMCommon::write_restart(fp); -} - -/* ---------------------------------------------------------------------- */ - -void PairCGCMMCoulMSM::read_restart(FILE *fp) -{ - read_restart_settings(fp); - allocate(); - PairCMMCommon::read_restart(fp); -} - -/* ---------------------------------------------------------------------- */ - -double PairCGCMMCoulMSM::memory_usage() -{ - double bytes=PairCMMCommon::memory_usage(); - - int n = atom->ntypes; - - // cut_coul/cut_coulsq/cut_ljsq - bytes += (n+1)*(n+1)*sizeof(double)*4; - - return bytes; -} - -/* ---------------------------------------------------------------------- */ - -void *PairCGCMMCoulMSM::extract(char *str) -{ - if (strcmp(str,"cut_coul") == 0) return (void *) &cut_coul_global; - return NULL; -} - -/* ---------------------------------------------------------------------- */ diff --git a/src/GPU/pair_cg_cmm_coul_msm.h b/src/GPU/pair_cg_cmm_coul_msm.h deleted file mode 100644 index 91fd9c17a4..0000000000 --- a/src/GPU/pair_cg_cmm_coul_msm.h +++ /dev/null @@ -1,53 +0,0 @@ -/* ---------------------------------------------------------------------- - LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - http://lammps.sandia.gov, Sandia National Laboratories - Steve Plimpton, sjplimp@sandia.gov - - Copyright (2003) Sandia Corporation. Under the terms of Contract - DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains - certain rights in this software. This software is distributed under - the GNU General Public License. - - See the README file in the top-level LAMMPS directory. -------------------------------------------------------------------------- */ - -#ifdef PAIR_CLASS - -PairStyle(cg/cmm/coul/msm,PairCGCMMCoulMSM) - -#else - -#ifndef LMP_PAIR_CG_CMM_COUL_MSM_H -#define LMP_PAIR_CG_CMM_COUL_MSM_H - -#include "pair_cmm_common.h" - -namespace LAMMPS_NS { - -class PairCGCMMCoulMSM : public PairCMMCommon { - public: - PairCGCMMCoulMSM(class LAMMPS *); - ~PairCGCMMCoulMSM(); - - void compute(int, int); - void settings(int, char **); - void init_style(); - double init_one(int, int); - - void write_restart(FILE *); - void read_restart(FILE *); - - double memory_usage(); - - void *extract(char *str); - - protected: - void allocate(); - double _ia, _ia2, _ia3; - int _smooth; -}; - -} - -#endif -#endif diff --git a/src/GPU/pair_cg_cmm_coul_msm_gpu.cpp b/src/GPU/pair_cg_cmm_coul_msm_gpu.cpp deleted file mode 100644 index 0d079304f1..0000000000 --- a/src/GPU/pair_cg_cmm_coul_msm_gpu.cpp +++ /dev/null @@ -1,307 +0,0 @@ -/* ---------------------------------------------------------------------- - LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - http://lammps.sandia.gov, Sandia National Laboratories - Steve Plimpton, sjplimp@sandia.gov - - Copyright (2003) Sandia Corporation. Under the terms of Contract - DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains - certain rights in this software. This software is distributed under - the GNU General Public License. - - See the README file in the top-level LAMMPS directory. -------------------------------------------------------------------------- */ - -/* ---------------------------------------------------------------------- - Contributing author: Mike Brown (SNL) -------------------------------------------------------------------------- */ - -#include "math.h" -#include "stdio.h" -#include "stdlib.h" -#include "pair_cg_cmm_coul_msm_gpu.h" -#include "atom.h" -#include "atom_vec.h" -#include "comm.h" -#include "force.h" -#include "neighbor.h" -#include "neigh_list.h" -#include "integrate.h" -#include "memory.h" -#include "error.h" -#include "neigh_request.h" -#include "universe.h" -#include "update.h" -#include "domain.h" -#include "string.h" -#include "kspace.h" -#include "gpu_extra.h" - -// External functions from cuda library for atom decomposition - -int cmmm_gpu_init(const int ntypes, double **cutsq, int **cg_type, - double **host_lj1, double **host_lj2, double **host_lj3, - double **host_lj4, double **offset, double *special_lj, - const int nlocal, const int nall, const int max_nbors, - const int maxspecial, const double cell_size, int &gpu_mode, - FILE *screen, double **host_cut_ljsq, double host_cut_coulsq, - double *host_special_coul, const double qqrd2e, - const int smooth); -void cmmm_gpu_clear(); -int ** cmmm_gpu_compute_n(const int ago, const int inum, const int nall, - double **host_x, int *host_type, double *sublo, - double *subhi, int *tag, int **nspecial, - int **special, const bool eflag, const bool vflag, - const bool eatom, const bool vatom, int &host_start, - int **ilist, int **jnum, const double cpu_time, - bool &success, double *host_q, double *boxlo, - double *prd); -void cmmm_gpu_compute(const int ago, const int inum, const int nall, - double **host_x, int *host_type, int *ilist, int *numj, - int **firstneigh, const bool eflag, const bool vflag, - const bool eatom, const bool vatom, int &host_start, - const double cpu_time, bool &success, double *host_q, - const int nlocal, double *boxlo, double *prd); -double cmmm_gpu_bytes(); - -using namespace LAMMPS_NS; - -enum {C3=0,C4=1}; - -/* ---------------------------------------------------------------------- */ - -PairCGCMMCoulMSMGPU::PairCGCMMCoulMSMGPU(LAMMPS *lmp) : PairCGCMMCoulMSM(lmp), - gpu_mode(GPU_PAIR) -{ - respa_enable = 0; - cpu_time = 0.0; -} - -/* ---------------------------------------------------------------------- - free all arrays -------------------------------------------------------------------------- */ - -PairCGCMMCoulMSMGPU::~PairCGCMMCoulMSMGPU() -{ - cmmm_gpu_clear(); -} - -/* ---------------------------------------------------------------------- */ - -void PairCGCMMCoulMSMGPU::compute(int eflag, int vflag) -{ - if (eflag || vflag) ev_setup(eflag,vflag); - else evflag = vflag_fdotr = 0; - - int nall = atom->nlocal + atom->nghost; - int inum, host_start; - - bool success = true; - int *ilist, *numneigh, **firstneigh; - if (gpu_mode == GPU_NEIGH) { - inum = atom->nlocal; - firstneigh = cmmm_gpu_compute_n(neighbor->ago, inum, nall, atom->x, - atom->type, domain->sublo, domain->subhi, - atom->tag, atom->nspecial, atom->special, - eflag, vflag, eflag_atom, vflag_atom, - host_start, &ilist, &numneigh, cpu_time, - success, atom->q, domain->boxlo, - domain->prd); - } else { - inum = list->inum; - ilist = list->ilist; - numneigh = list->numneigh; - firstneigh = list->firstneigh; - cmmm_gpu_compute(neighbor->ago, inum, nall, atom->x, atom->type, - ilist, numneigh, firstneigh, eflag, vflag, eflag_atom, - vflag_atom, host_start, cpu_time, success, atom->q, - atom->nlocal, domain->boxlo, domain->prd); - } - if (!success) - error->one(FLERR,"Out of memory on GPGPU"); - - if (host_startnewton_pair) - error->all(FLERR,"Cannot use newton pair with cg/cmm/coul/msm/gpu pair style"); - - // Repeat cutsq calculation because done after call to init_style - double maxcut = -1.0; - double cut; - for (int i = 1; i <= atom->ntypes; i++) { - for (int j = i; j <= atom->ntypes; j++) { - if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) { - cut = init_one(i,j); - cut *= cut; - if (cut > maxcut) - maxcut = cut; - cutsq[i][j] = cutsq[j][i] = cut; - } else - cutsq[i][j] = cutsq[j][i] = 0.0; - } - } - double cell_size = sqrt(maxcut) + neighbor->skin; - - int maxspecial=0; - if (atom->molecular) - maxspecial=atom->maxspecial; - int success = cmmm_gpu_init(atom->ntypes+1, cutsq, cg_type, lj1, lj2, lj3, - lj4, offset, force->special_lj, atom->nlocal, - atom->nlocal+atom->nghost, 300, maxspecial, - cell_size, gpu_mode, screen, cut_ljsq, - cut_coulsq_global, force->special_coul, - force->qqrd2e,_smooth); - GPU_EXTRA::check_flag(success,error,world); - - if (gpu_mode != GPU_NEIGH) { - int irequest = neighbor->request(this); - neighbor->requests[irequest]->half = 0; - neighbor->requests[irequest]->full = 1; - } -} - -/* ---------------------------------------------------------------------- */ - -double PairCGCMMCoulMSMGPU::memory_usage() -{ - double bytes = Pair::memory_usage(); - return bytes + cmmm_gpu_bytes(); -} - -/* ---------------------------------------------------------------------- */ - -void PairCGCMMCoulMSMGPU::cpu_compute(int start, int inum, int eflag, - int vflag, int *ilist, int *numneigh, - int **firstneigh) -{ - int i,j,ii,jj,jnum,itype,jtype,itable; - double qtmp,xtmp,ytmp,ztmp,delx,dely,delz; - double fraction,table; - double r,r2inv,r6inv,forcecoul,forcelj,factor_coul,factor_lj; - double grij,expm2,prefactor,t,erfc; - int *jlist; - double rsq; - - double **x = atom->x; - double **f = atom->f; - double *q = atom->q; - int *type = atom->type; - int nlocal = atom->nlocal; - int nall = nlocal + atom->nghost; - double *special_coul = force->special_coul; - double *special_lj = force->special_lj; - double qqrd2e = force->qqrd2e; - - // loop over neighbors of my atoms - - for (ii = start; ii < inum; ii++) { - i = ilist[ii]; - qtmp = q[i]; - xtmp = x[i][0]; - ytmp = x[i][1]; - ztmp = x[i][2]; - itype = type[i]; - jlist = firstneigh[i]; - jnum = numneigh[i]; - - for (jj = 0; jj < jnum; jj++) { - j = jlist[jj]; - factor_lj = special_lj[sbmask(j)]; - factor_coul = special_coul[sbmask(j)]; - j &= NEIGHMASK; - - const double delx = xtmp - x[j][0]; - const double dely = ytmp - x[j][1]; - const double delz = ztmp - x[j][2]; - const double rsq = delx*delx + dely*dely + delz*delz; - const int jtype = type[j]; - - double evdwl = 0.0; - double ecoul = 0.0; - double fpair = 0.0; - - if (rsq < cutsq[itype][jtype]) { - const double r2inv = 1.0/rsq; - const int cgt=cg_type[itype][jtype]; - - double forcelj = 0.0; - double forcecoul = 0.0; - - if (rsq < cut_ljsq[itype][jtype]) { - forcelj=factor_lj; - if (eflag) evdwl=factor_lj; - - if (cgt == CG_LJ12_4) { - const double r4inv=r2inv*r2inv; - forcelj *= r4inv*(lj1[itype][jtype]*r4inv*r4inv - - lj2[itype][jtype]); - if (eflag) { - evdwl *= r4inv*(lj3[itype][jtype]*r4inv*r4inv - - lj4[itype][jtype]) - offset[itype][jtype]; - } - } else if (cgt == CG_LJ9_6) { - const double r3inv = r2inv*sqrt(r2inv); - const double r6inv = r3inv*r3inv; - forcelj *= r6inv*(lj1[itype][jtype]*r3inv - - lj2[itype][jtype]); - if (eflag) { - evdwl *= r6inv*(lj3[itype][jtype]*r3inv - - lj4[itype][jtype]) - offset[itype][jtype]; - } - } else { - const double r6inv = r2inv*r2inv*r2inv; - forcelj *= r6inv*(lj1[itype][jtype]*r6inv - - lj2[itype][jtype]); - if (eflag) { - evdwl *= r6inv*(lj3[itype][jtype]*r6inv - - lj4[itype][jtype]) - offset[itype][jtype]; - } - } - } - - if (rsq < cut_coulsq_global) { - const double ir = 1.0/sqrt(rsq); - const double prefactor = qqrd2e * qtmp*q[j]; - const double r2_ia2 = rsq*_ia2; - const double r4_ia4 = r2_ia2*r2_ia2; - if (_smooth==C3) { - forcecoul = prefactor*(_ia3*(-4.375+5.25*r2_ia2-1.875*r4_ia4)- - ir/rsq); - if (eflag) - ecoul = prefactor*(ir+_ia*(2.1875-2.1875*r2_ia2+ - 1.3125*r4_ia4-0.3125*r2_ia2*r4_ia4)); - } else { - const double r6_ia6 = r2_ia2*r4_ia4; - forcecoul = prefactor*(_ia3*(-6.5625+11.8125*r2_ia2-8.4375*r4_ia4+ - 2.1875*r6_ia6)-ir/rsq); - if (eflag) - ecoul = prefactor*(ir+_ia*(2.4609375-3.28125*r2_ia2+ - 2.953125*r4_ia4-1.40625*r6_ia6+ - 0.2734375*r4_ia4*r4_ia4)); - } - if (factor_coul < 1.0) { - forcecoul -= (1.0-factor_coul)*prefactor*ir; - if (eflag) ecoul -= (1.0-factor_coul)*prefactor*ir; - } - } - fpair = forcecoul + forcelj * r2inv; - - f[i][0] += delx*fpair; - f[i][1] += dely*fpair; - f[i][2] += delz*fpair; - if (evflag) ev_tally_full(i,evdwl,ecoul,fpair,delx,dely,delz); - } - } - } -} diff --git a/src/GPU/pair_cg_cmm_coul_msm_gpu.h b/src/GPU/pair_cg_cmm_coul_msm_gpu.h deleted file mode 100644 index cfb36e0cff..0000000000 --- a/src/GPU/pair_cg_cmm_coul_msm_gpu.h +++ /dev/null @@ -1,47 +0,0 @@ -/* ---------------------------------------------------------------------- - LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - http://lammps.sandia.gov, Sandia National Laboratories - Steve Plimpton, sjplimp@sandia.gov - - Copyright (2003) Sandia Corporation. Under the terms of Contract - DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains - certain rights in this software. This software is distributed under - the GNU General Public License. - - See the README file in the top-level LAMMPS directory. -------------------------------------------------------------------------- */ - -#ifdef PAIR_CLASS - -PairStyle(cg/cmm/coul/msm/gpu,PairCGCMMCoulMSMGPU) - -#else - -#ifndef LMP_PAIR_CG_CMM_COUL_MSM_GPU_H -#define LMP_PAIR_CG_CMM_COUL_MSM_GPU_H - -#include "pair_cg_cmm_coul_msm.h" - -namespace LAMMPS_NS { - -class PairCGCMMCoulMSMGPU : public PairCGCMMCoulMSM { - public: - PairCGCMMCoulMSMGPU(LAMMPS *lmp); - ~PairCGCMMCoulMSMGPU(); - void cpu_compute(int, int, int, int, int *, int *, int **); - void compute(int, int); - void init_style(); - double memory_usage(); - - enum { GPU_PAIR, GPU_NEIGH }; - - private: - int gpu_mode; - double cpu_time; - int *gpulist; -}; - -} -#endif -#endif - diff --git a/src/GPU/pair_lj_cut_tgpu.cpp b/src/GPU/pair_lj_cut_tgpu.cpp deleted file mode 100644 index 7b9aa9af0b..0000000000 --- a/src/GPU/pair_lj_cut_tgpu.cpp +++ /dev/null @@ -1,249 +0,0 @@ -/* ---------------------------------------------------------------------- - LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - http://lammps.sandia.gov, Sandia National Laboratories - Steve Plimpton, sjplimp@sandia.gov - - Copyright (2003) Sandia Corporation. Under the terms of Contract - DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains - certain rights in this software. This software is distributed under - the GNU General Public License. - - See the README file in the top-level LAMMPS directory. -------------------------------------------------------------------------- */ - -/* ---------------------------------------------------------------------- - Contributing author: Mike Brown (SNL) -------------------------------------------------------------------------- */ - -#include "math.h" -#include "stdio.h" -#include "stdlib.h" -#include "pair_lj_cut_tgpu.h" -#include "atom.h" -#include "atom_vec.h" -#include "comm.h" -#include "force.h" -#include "neighbor.h" -#include "neigh_list.h" -#include "integrate.h" -#include "memory.h" -#include "error.h" -#include "neigh_request.h" -#include "universe.h" -#include "update.h" -#include "domain.h" -#include "string.h" -#include "gpu_extra.h" - -// External functions from cuda library for atom decomposition - -int ljl_gpu_init(const int ntypes, double **cutsq, double **host_lj1, - double **host_lj2, double **host_lj3, double **host_lj4, - double **offset, double *special_lj, const int nlocal, - const int nall, const int max_nbors, const int maxspecial, - const double cell_size, int &gpu_mode, FILE *screen); -void ljl_gpu_clear(); -int ** ljl_gpu_compute_n(const int ago, const int inum, - const int nall, double **host_x, int *host_type, - double *sublo, double *subhi, int *tag, int **nspecial, - int **special, const bool eflag, const bool vflag, - const bool eatom, const bool vatom, int &host_start, - int **ilist, int **jnum, - const double cpu_time, bool &success); -void ljl_gpu_compute(const int ago, const int inum, const int nall, - double **host_x, int *host_type, int *ilist, int *numj, - int **firstneigh, const bool eflag, const bool vflag, - const bool eatom, const bool vatom, int &host_start, - const double cpu_time, bool &success); -double ljl_gpu_bytes(); - -using namespace LAMMPS_NS; - -/* ---------------------------------------------------------------------- */ - -PairLJCutTGPU::PairLJCutTGPU(LAMMPS *lmp) : PairLJCut(lmp), gpu_mode(GPU_PAIR) -{ - respa_enable = 0; - cpu_time = 0.0; - - omp = new PairOMPGPU(lmp); -} - -/* ---------------------------------------------------------------------- - free all arrays -------------------------------------------------------------------------- */ - -PairLJCutTGPU::~PairLJCutTGPU() -{ - ljl_gpu_clear(); - delete omp; -} - -/* ---------------------------------------------------------------------- */ - -void PairLJCutTGPU::compute(int eflag, int vflag) -{ - if (eflag || vflag) { - ev_setup(eflag,vflag); - omp->ev_setup_thr(eflag,vflag,eflag_either,eflag_global,eflag_atom, - vflag_either,vflag_global,vflag_atom); - } else evflag = vflag_fdotr = 0; - - int nall = atom->nlocal + atom->nghost; - int inum, host_start; - - bool success = true; - int *ilist, *numneigh, **firstneigh; - if (gpu_mode == GPU_NEIGH) { - inum = atom->nlocal; - firstneigh = ljl_gpu_compute_n(neighbor->ago, inum, nall, - atom->x, atom->type, domain->sublo, - domain->subhi, atom->tag, atom->nspecial, - atom->special, eflag, vflag, eflag_atom, - vflag_atom, host_start, &ilist, &numneigh, - cpu_time, success); - } else { - inum = list->inum; - ilist = list->ilist; - numneigh = list->numneigh; - firstneigh = list->firstneigh; - ljl_gpu_compute(neighbor->ago, inum, nall, atom->x, atom->type, - ilist, numneigh, firstneigh, eflag, vflag, eflag_atom, - vflag_atom, host_start, cpu_time, success); - } - if (!success) - error->one(FLERR,"Out of memory on GPGPU"); - - if (host_startev_reduce_thr(*this); - cpu_time = MPI_Wtime() - cpu_time; - } -} - -/* ---------------------------------------------------------------------- - init specific to this pair style -------------------------------------------------------------------------- */ - -void PairLJCutTGPU::init_style() -{ - omp->init_style(); - cut_respa = NULL; - - if (force->newton_pair) - error->all(FLERR,"Cannot use newton pair with lj/cut/tgpu pair style"); - - // Repeat cutsq calculation because done after call to init_style - double maxcut = -1.0; - double cut; - for (int i = 1; i <= atom->ntypes; i++) { - for (int j = i; j <= atom->ntypes; j++) { - if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) { - cut = init_one(i,j); - cut *= cut; - if (cut > maxcut) - maxcut = cut; - cutsq[i][j] = cutsq[j][i] = cut; - } else - cutsq[i][j] = cutsq[j][i] = 0.0; - } - } - double cell_size = sqrt(maxcut) + neighbor->skin; - - int maxspecial=0; - if (atom->molecular) - maxspecial=atom->maxspecial; - int success = ljl_gpu_init(atom->ntypes+1, cutsq, lj1, lj2, lj3, lj4, - offset, force->special_lj, atom->nlocal, - atom->nlocal+atom->nghost, 300, maxspecial, - cell_size, gpu_mode, screen); - GPU_EXTRA::check_flag(success,error,world); - - if (gpu_mode != GPU_NEIGH) { - int irequest = neighbor->request(this); - neighbor->requests[irequest]->half = 0; - neighbor->requests[irequest]->full = 1; - } -} - -/* ---------------------------------------------------------------------- */ - -double PairLJCutTGPU::memory_usage() -{ - double bytes = Pair::memory_usage(); - return bytes + ljl_gpu_bytes()+omp->memory_usage(); -} - -/* ---------------------------------------------------------------------- */ - -void PairLJCutTGPU::cpu_compute(int start, int inum, int eflag, int vflag, - int *ilist, int *numneigh, int **firstneigh) -{ - int i,j,ii,jj,jnum,itype,jtype; - double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,fpair; - double rsq,r2inv,r6inv,forcelj,factor_lj; - int *jlist; - - double **x = atom->x; - int *type = atom->type; - int nlocal = atom->nlocal; - int nall = nlocal + atom->nghost; - double *special_lj = force->special_lj; - - // loop over neighbors of my atoms - int iifrom, iito, tid; - double **f = omp->loop_setup_thr_full(atom->f,iifrom,iito,tid,start,inum, - nall); - for (ii = iifrom; ii < iito; ii++) { - i = ilist[ii]; - xtmp = x[i][0]; - ytmp = x[i][1]; - ztmp = x[i][2]; - itype = type[i]; - jlist = firstneigh[i]; - jnum = numneigh[i]; - - for (jj = 0; jj < jnum; jj++) { - j = jlist[jj]; - factor_lj = special_lj[sbmask(j)]; - j &= NEIGHMASK; - - - delx = xtmp - x[j][0]; - dely = ytmp - x[j][1]; - delz = ztmp - x[j][2]; - rsq = delx*delx + dely*dely + delz*delz; - jtype = type[j]; - - if (rsq < cutsq[itype][jtype]) { - r2inv = 1.0/rsq; - r6inv = r2inv*r2inv*r2inv; - forcelj = r6inv * (lj1[itype][jtype]*r6inv - lj2[itype][jtype]); - fpair = factor_lj*forcelj*r2inv; - - f[i][0] += delx*fpair; - f[i][1] += dely*fpair; - f[i][2] += delz*fpair; - - if (eflag) { - evdwl = r6inv*(lj3[itype][jtype]*r6inv-lj4[itype][jtype]) - - offset[itype][jtype]; - evdwl *= factor_lj; - } - - #ifdef _OPENMP - if (evflag) omp->ev_tally_full_thr(i,evdwl,0.0,fpair,delx,dely,delz, - tid); - #else - if (evflag) ev_tally_full(i,evdwl,0.0,fpair,delx,dely,delz); - #endif - } - } - } -} diff --git a/src/GPU/pair_lj_cut_tgpu.h b/src/GPU/pair_lj_cut_tgpu.h deleted file mode 100644 index 3393bd06b5..0000000000 --- a/src/GPU/pair_lj_cut_tgpu.h +++ /dev/null @@ -1,50 +0,0 @@ -/* ---------------------------------------------------------------------- - LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - http://lammps.sandia.gov, Sandia National Laboratories - Steve Plimpton, sjplimp@sandia.gov - - Copyright (2003) Sandia Corporation. Under the terms of Contract - DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains - certain rights in this software. This software is distributed under - the GNU General Public License. - - See the README file in the top-level LAMMPS directory. -------------------------------------------------------------------------- */ - -#ifdef PAIR_CLASS - -PairStyle(lj/cut/tgpu,PairLJCutTGPU) - -#else - -#ifndef LMP_PAIR_LJ_LIGHT_TGPU_H -#define LMP_PAIR_LJ_LIGHT_TGPU_H - -#include "pair_lj_cut.h" -#include "pair_omp_gpu.h" - -namespace LAMMPS_NS { - -class PairLJCutTGPU : public PairLJCut { - public: - PairLJCutTGPU(LAMMPS *lmp); - ~PairLJCutTGPU(); - void cpu_compute(int, int, int, int, int *, int *, int **); - void compute(int, int); - void init_style(); - double memory_usage(); - - enum { GPU_PAIR, GPU_NEIGH }; - - private: - int gpu_mode; - double cpu_time; - int *gpulist; - - PairOMPGPU *omp; -}; - -} -#endif -#endif - diff --git a/src/GPU/pair_omp_gpu.cpp b/src/GPU/pair_omp_gpu.cpp deleted file mode 100644 index dc6229fd5e..0000000000 --- a/src/GPU/pair_omp_gpu.cpp +++ /dev/null @@ -1,665 +0,0 @@ -/* ---------------------------------------------------------------------- - LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - http://lammps.sandia.gov, Sandia National Laboratories - Steve Plimpton, sjplimp@sandia.gov - - Copyright (2003) Sandia Corporation. Under the terms of Contract - DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains - certain rights in this software. This software is distributed under - the GNU General Public License. - - See the README file in the top-level LAMMPS directory. -------------------------------------------------------------------------- */ - -/* ---------------------------------------------------------------------- - Contributing author: Axel Kohlmeyer (Temple U) - Modified by Mike for use with GPU library -------------------------------------------------------------------------- */ - -#if defined(_OPENMP) - -#include "atom.h" -#include "comm.h" -#include "force.h" -#include "pair_omp_gpu.h" -#include "memory.h" - -using namespace LAMMPS_NS; - -/* ---------------------------------------------------------------------- */ - -PairOMPGPU::PairOMPGPU(LAMMPS *lmp) : Pointers(lmp) -{ - eng_vdwl_thr = NULL; - eng_coul_thr = NULL; - virial_thr = NULL; - eatom_thr = NULL; - vatom_thr = NULL; - f_thr = NULL; -} - -/* ---------------------------------------------------------------------- */ - -PairOMPGPU::~PairOMPGPU() -{ - free_mem(); -} - -/* ---------------------------------------------------------------------- - free any allocated memory -------------------------------------------------------------------------- */ - -void PairOMPGPU::free_mem() { - memory->sfree(eng_vdwl_thr); - memory->sfree(eng_coul_thr); - memory->destroy(virial_thr); - memory->destroy(eatom_thr); - memory->destroy(vatom_thr); - memory->destroy(f_thr); - eng_vdwl_thr = NULL; - eng_coul_thr = NULL; - virial_thr = NULL; - eatom_thr = NULL; - vatom_thr = NULL; - f_thr = NULL; - _nmax = 0; -} - -/* ---------------------------------------------------------------------- - init specific to this pair style -------------------------------------------------------------------------- */ - -void PairOMPGPU::init_style() -{ - free_mem(); - - #pragma omp parallel - { - int th_id = omp_get_thread_num(); - #pragma omp barrier - if (th_id == 0) - _nthreads = omp_get_num_threads(); - } - - // for hybrid OpenMP/MPI we need multiple copies - // of some accumulators to avoid race conditions - - eng_vdwl_thr = (double *)memory->smalloc(_nthreads*sizeof(double), - "pair:eng_vdwl_thr"); - eng_coul_thr = (double *)memory->smalloc(_nthreads*sizeof(double), - "pair:eng_coul_thr"); - memory->create(virial_thr,_nthreads,6,"pair:virial_thr"); - - maxeatom_thr = maxvatom_thr = 0; -} - -/* ---------------------------------------------------------------------- - setup for energy, virial computation. additional code for multi-threading - see integrate::ev_set() for values of eflag (0-3) and vflag (0-6) -------------------------------------------------------------------------- */ - -void PairOMPGPU::ev_setup_thr(int eflag, int vflag, int _eflag_either, - int _eflag_global, int _eflag_atom, - int _vflag_either, int _vflag_global, - int _vflag_atom) -{ - eflag_either=_eflag_either; - eflag_global=_eflag_global; - eflag_atom=_eflag_atom; - vflag_either=_vflag_either; - vflag_global=_vflag_global; - vflag_atom=_vflag_atom; - int i,n,t; - - // reallocate per-atom arrays if necessary - if (eflag_atom && atom->nmax > maxeatom_thr) { - maxeatom_thr = atom->nmax; - memory->destroy(eatom_thr); - memory->create(eatom_thr,_nthreads,maxeatom_thr,"pair:eatom_thr"); - } - if (vflag_atom && atom->nmax > maxvatom_thr) { - maxvatom_thr = atom->nmax; - memory->destroy(vatom_thr); - memory->create(vatom_thr,_nthreads,maxvatom_thr,6,"pair:vatom_thr"); - } - - // zero per thread accumulators - // use force->newton instead of newton_pair - // b/c some bonds/dihedrals call pair::ev_tally with pairwise info - const int ntotal = (force->newton) ? - (atom->nlocal + atom->nghost) : atom->nlocal; - for (t = 0; t < _nthreads; ++t) { - if (eflag_global) eng_vdwl_thr[t] = eng_coul_thr[t] = 0.0; - if (vflag_global) for (i = 0; i < 6; ++i) virial_thr[t][i] = 0.0; - if (eflag_atom) { - for (i = 0; i < ntotal; ++i) eatom_thr[t][i] = 0.0; - } - if (vflag_atom) { - for (i = 0; i < ntotal; ++i) { - vatom_thr[t][i][0] = 0.0; - vatom_thr[t][i][1] = 0.0; - vatom_thr[t][i][2] = 0.0; - vatom_thr[t][i][3] = 0.0; - vatom_thr[t][i][4] = 0.0; - vatom_thr[t][i][5] = 0.0; - } - } - } -} - -/* ---------------------------------------------------------------------- - tally eng_vdwl and virial into per thread global and per-atom accumulators - need i < nlocal test since called by bond_quartic and dihedral_charmm -------------------------------------------------------------------------- */ - -void PairOMPGPU::ev_tally_thr(int i, int j, int nlocal, int newton_pair, - double evdwl, double ecoul, double fpair, - double delx, double dely, double delz, int tid) -{ - double evdwlhalf,ecoulhalf,epairhalf,v[6]; - - if (eflag_either) { - if (eflag_global) { - if (newton_pair) { - eng_vdwl_thr[tid] += evdwl; - eng_coul_thr[tid] += ecoul; - } else { - evdwlhalf = 0.5*evdwl; - ecoulhalf = 0.5*ecoul; - if (i < nlocal) { - eng_vdwl_thr[tid] += evdwlhalf; - eng_coul_thr[tid] += ecoulhalf; - } - if (j < nlocal) { - eng_vdwl_thr[tid] += evdwlhalf; - eng_coul_thr[tid] += ecoulhalf; - } - } - } - if (eflag_atom) { - epairhalf = 0.5 * (evdwl + ecoul); - if (newton_pair || i < nlocal) eatom_thr[tid][i] += epairhalf; - if (newton_pair || j < nlocal) eatom_thr[tid][j] += epairhalf; - } - } - - if (vflag_either) { - v[0] = delx*delx*fpair; - v[1] = dely*dely*fpair; - v[2] = delz*delz*fpair; - v[3] = delx*dely*fpair; - v[4] = delx*delz*fpair; - v[5] = dely*delz*fpair; - - if (vflag_global) { - if (newton_pair) { - virial_thr[tid][0] += v[0]; - virial_thr[tid][1] += v[1]; - virial_thr[tid][2] += v[2]; - virial_thr[tid][3] += v[3]; - virial_thr[tid][4] += v[4]; - virial_thr[tid][5] += v[5]; - } else { - if (i < nlocal) { - virial_thr[tid][0] += 0.5*v[0]; - virial_thr[tid][1] += 0.5*v[1]; - virial_thr[tid][2] += 0.5*v[2]; - virial_thr[tid][3] += 0.5*v[3]; - virial_thr[tid][4] += 0.5*v[4]; - virial_thr[tid][5] += 0.5*v[5]; - } - if (j < nlocal) { - virial_thr[tid][0] += 0.5*v[0]; - virial_thr[tid][1] += 0.5*v[1]; - virial_thr[tid][2] += 0.5*v[2]; - virial_thr[tid][3] += 0.5*v[3]; - virial_thr[tid][4] += 0.5*v[4]; - virial_thr[tid][5] += 0.5*v[5]; - } - } - } - - if (vflag_atom) { - if (newton_pair || i < nlocal) { - vatom_thr[tid][i][0] += 0.5*v[0]; - vatom_thr[tid][i][1] += 0.5*v[1]; - vatom_thr[tid][i][2] += 0.5*v[2]; - vatom_thr[tid][i][3] += 0.5*v[3]; - vatom_thr[tid][i][4] += 0.5*v[4]; - vatom_thr[tid][i][5] += 0.5*v[5]; - } - if (newton_pair || j < nlocal) { - vatom_thr[tid][j][0] += 0.5*v[0]; - vatom_thr[tid][j][1] += 0.5*v[1]; - vatom_thr[tid][j][2] += 0.5*v[2]; - vatom_thr[tid][j][3] += 0.5*v[3]; - vatom_thr[tid][j][4] += 0.5*v[4]; - vatom_thr[tid][j][5] += 0.5*v[5]; - } - } - } -} - -/* ---------------------------------------------------------------------- - tally eng_vdwl and virial into per thread global and per-atom accumulators - need i < nlocal test since called by bond_quartic and dihedral_charmm -------------------------------------------------------------------------- */ - -void PairOMPGPU::ev_tally_full_thr(int i, double evdwl, - double ecoul, double fpair, double delx, - double dely, double delz, int tid) -{ - double evdwlhalf,ecoulhalf,epairhalf,v[6]; - - if (eflag_either) { - if (eflag_global) { - evdwlhalf = 0.5*evdwl; - ecoulhalf = 0.5*ecoul; - eng_vdwl_thr[tid] += evdwlhalf; - eng_coul_thr[tid] += ecoulhalf; - } - if (eflag_atom) { - epairhalf = 0.5 * (evdwl + ecoul); - eatom_thr[tid][i] += epairhalf; - } - } - - if (vflag_either) { - v[0] = delx*delx*fpair; - v[1] = dely*dely*fpair; - v[2] = delz*delz*fpair; - v[3] = delx*dely*fpair; - v[4] = delx*delz*fpair; - v[5] = dely*delz*fpair; - - if (vflag_global) { - virial_thr[tid][0] += 0.5*v[0]; - virial_thr[tid][1] += 0.5*v[1]; - virial_thr[tid][2] += 0.5*v[2]; - virial_thr[tid][3] += 0.5*v[3]; - virial_thr[tid][4] += 0.5*v[4]; - virial_thr[tid][5] += 0.5*v[5]; - } - - if (vflag_atom) { - vatom_thr[tid][i][0] += 0.5*v[0]; - vatom_thr[tid][i][1] += 0.5*v[1]; - vatom_thr[tid][i][2] += 0.5*v[2]; - vatom_thr[tid][i][3] += 0.5*v[3]; - vatom_thr[tid][i][4] += 0.5*v[4]; - vatom_thr[tid][i][5] += 0.5*v[5]; - } - } -} - -/* ---------------------------------------------------------------------- - tally eng_vdwl and virial into global and per-atom accumulators - for virial, have delx,dely,delz and fx,fy,fz -------------------------------------------------------------------------- */ - -void PairOMPGPU::ev_tally_xyz_thr(int i, int j, int nlocal, int newton_pair, - double evdwl, double ecoul, - double fx, double fy, double fz, - double delx, double dely, double delz, int tid) -{ - double evdwlhalf,ecoulhalf,epairhalf,v[6]; - - if (eflag_either) { - if (eflag_global) { - if (newton_pair) { - eng_vdwl_thr[tid] += evdwl; - eng_coul_thr[tid] += ecoul; - } else { - evdwlhalf = 0.5*evdwl; - ecoulhalf = 0.5*ecoul; - if (i < nlocal) { - eng_vdwl_thr[tid] += evdwlhalf; - eng_coul_thr[tid] += ecoulhalf; - } - if (j < nlocal) { - eng_vdwl_thr[tid] += evdwlhalf; - eng_coul_thr[tid] += ecoulhalf; - } - } - } - if (eflag_atom) { - epairhalf = 0.5 * (evdwl + ecoul); - if (newton_pair || i < nlocal) eatom_thr[tid][i] += epairhalf; - if (newton_pair || j < nlocal) eatom_thr[tid][j] += epairhalf; - } - } - - if (vflag_either) { - v[0] = delx*fx; - v[1] = dely*fy; - v[2] = delz*fz; - v[3] = delx*fy; - v[4] = delx*fz; - v[5] = dely*fz; - - if (vflag_global) { - if (newton_pair) { - virial_thr[tid][0] += v[0]; - virial_thr[tid][1] += v[1]; - virial_thr[tid][2] += v[2]; - virial_thr[tid][3] += v[3]; - virial_thr[tid][4] += v[4]; - virial_thr[tid][5] += v[5]; - } else { - if (i < nlocal) { - virial_thr[tid][0] += 0.5*v[0]; - virial_thr[tid][1] += 0.5*v[1]; - virial_thr[tid][2] += 0.5*v[2]; - virial_thr[tid][3] += 0.5*v[3]; - virial_thr[tid][4] += 0.5*v[4]; - virial_thr[tid][5] += 0.5*v[5]; - } - if (j < nlocal) { - virial_thr[tid][0] += 0.5*v[0]; - virial_thr[tid][1] += 0.5*v[1]; - virial_thr[tid][2] += 0.5*v[2]; - virial_thr[tid][3] += 0.5*v[3]; - virial_thr[tid][4] += 0.5*v[4]; - virial_thr[tid][5] += 0.5*v[5]; - } - } - } - - if (vflag_atom) { - if (newton_pair || i < nlocal) { - vatom_thr[tid][i][0] += 0.5*v[0]; - vatom_thr[tid][i][1] += 0.5*v[1]; - vatom_thr[tid][i][2] += 0.5*v[2]; - vatom_thr[tid][i][3] += 0.5*v[3]; - vatom_thr[tid][i][4] += 0.5*v[4]; - vatom_thr[tid][i][5] += 0.5*v[5]; - } - if (newton_pair || j < nlocal) { - vatom_thr[tid][j][0] += 0.5*v[0]; - vatom_thr[tid][j][1] += 0.5*v[1]; - vatom_thr[tid][j][2] += 0.5*v[2]; - vatom_thr[tid][j][3] += 0.5*v[3]; - vatom_thr[tid][j][4] += 0.5*v[4]; - vatom_thr[tid][j][5] += 0.5*v[5]; - } - } - } -} - -/* ---------------------------------------------------------------------- - tally eng_vdwl and virial into global and per-atom accumulators - called by SW potential, newton_pair is always on - virial = riFi + rjFj + rkFk = (rj-ri) Fj + (rk-ri) Fk = drji*fj + drki*fk - ------------------------------------------------------------------------- */ - -void PairOMPGPU::ev_tally3_thr(int i, int j, int k, double evdwl, double ecoul, - double *fj, double *fk, double *drji, double *drki, int tid, - double THIRD) -{ - double epairthird,v[6]; - - if (eflag_either) { - if (eflag_global) { - eng_vdwl_thr[tid] += evdwl; - eng_coul_thr[tid] += ecoul; - } - if (eflag_atom) { - epairthird = THIRD * (evdwl + ecoul); - eatom_thr[tid][i] += epairthird; - eatom_thr[tid][j] += epairthird; - eatom_thr[tid][k] += epairthird; - } - } - - if (vflag_atom) { - v[0] = THIRD * (drji[0]*fj[0] + drki[0]*fk[0]); - v[1] = THIRD * (drji[1]*fj[1] + drki[1]*fk[1]); - v[2] = THIRD * (drji[2]*fj[2] + drki[2]*fk[2]); - v[3] = THIRD * (drji[0]*fj[1] + drki[0]*fk[1]); - v[4] = THIRD * (drji[0]*fj[2] + drki[0]*fk[2]); - v[5] = THIRD * (drji[1]*fj[2] + drki[1]*fk[2]); - - vatom_thr[tid][i][0] += v[0]; vatom_thr[tid][i][1] += v[1]; - vatom_thr[tid][i][2] += v[2]; vatom_thr[tid][i][3] += v[3]; - vatom_thr[tid][i][4] += v[4]; vatom_thr[tid][i][5] += v[5]; - vatom_thr[tid][j][0] += v[0]; vatom_thr[tid][j][1] += v[1]; - vatom_thr[tid][j][2] += v[2]; vatom_thr[tid][j][3] += v[3]; - vatom_thr[tid][j][4] += v[4]; vatom_thr[tid][j][5] += v[5]; - vatom_thr[tid][k][0] += v[0]; vatom_thr[tid][k][1] += v[1]; - vatom_thr[tid][k][2] += v[2]; vatom_thr[tid][k][3] += v[3]; - vatom_thr[tid][k][4] += v[4]; vatom_thr[tid][k][5] += v[5]; - } -} - -/* ---------------------------------------------------------------------- - tally eng_vdwl and virial into global and per-atom accumulators - called by AIREBO potential, newton_pair is always on - ------------------------------------------------------------------------- */ - -void PairOMPGPU::ev_tally4_thr(int i, int j, int k, int m, double evdwl, - double *fi, double *fj, double *fk, - double *drim, double *drjm, double *drkm, int tid) -{ - double epairfourth,v[6]; - - if (eflag_either) { - if (eflag_global) eng_vdwl_thr[tid] += evdwl; - if (eflag_atom) { - epairfourth = 0.25 * evdwl; - eatom_thr[tid][i] += epairfourth; - eatom_thr[tid][j] += epairfourth; - eatom_thr[tid][k] += epairfourth; - eatom_thr[tid][m] += epairfourth; - } - } - - if (vflag_atom) { - v[0] = 0.25 * (drim[0]*fi[0] + drjm[0]*fj[0] + drkm[0]*fk[0]); - v[1] = 0.25 * (drim[1]*fi[1] + drjm[1]*fj[1] + drkm[1]*fk[1]); - v[2] = 0.25 * (drim[2]*fi[2] + drjm[2]*fj[2] + drkm[2]*fk[2]); - v[3] = 0.25 * (drim[0]*fi[1] + drjm[0]*fj[1] + drkm[0]*fk[1]); - v[4] = 0.25 * (drim[0]*fi[2] + drjm[0]*fj[2] + drkm[0]*fk[2]); - v[5] = 0.25 * (drim[1]*fi[2] + drjm[1]*fj[2] + drkm[1]*fk[2]); - - vatom_thr[tid][i][0] += v[0]; vatom_thr[tid][i][1] += v[1]; - vatom_thr[tid][i][2] += v[2]; vatom_thr[tid][i][3] += v[3]; - vatom_thr[tid][i][4] += v[4]; vatom_thr[tid][i][5] += v[5]; - vatom_thr[tid][j][0] += v[0]; vatom_thr[tid][j][1] += v[1]; - vatom_thr[tid][j][2] += v[2]; vatom_thr[tid][j][3] += v[3]; - vatom_thr[tid][j][4] += v[4]; vatom_thr[tid][j][5] += v[5]; - vatom_thr[tid][k][0] += v[0]; vatom_thr[tid][k][1] += v[1]; - vatom_thr[tid][k][2] += v[2]; vatom_thr[tid][k][3] += v[3]; - vatom_thr[tid][k][4] += v[4]; vatom_thr[tid][k][5] += v[5]; - vatom_thr[tid][m][0] += v[0]; vatom_thr[tid][m][1] += v[1]; - vatom_thr[tid][m][2] += v[2]; vatom_thr[tid][m][3] += v[3]; - vatom_thr[tid][m][4] += v[4]; vatom_thr[tid][m][5] += v[5]; - } -} - -/* ---------------------------------------------------------------------- - tally ecoul and virial into each of n atoms in list - called by TIP4P potential, newton_pair is always on - changes v values by dividing by n - ------------------------------------------------------------------------- */ - -void PairOMPGPU::ev_tally_list_thr(int n, int *list, double ecoul, double *v, int tid) -{ - int i,j; - - if (eflag_either) { - if (eflag_global) eng_coul_thr[tid] += ecoul; - if (eflag_atom) { - double epairatom = ecoul/n; - for (i = 0; i < n; i++) eatom_thr[tid][list[i]] += epairatom; - } - } - - if (vflag_either) { - if (vflag_global) { - virial_thr[tid][0] += v[0]; - virial_thr[tid][1] += v[1]; - virial_thr[tid][2] += v[2]; - virial_thr[tid][3] += v[3]; - virial_thr[tid][4] += v[4]; - virial_thr[tid][5] += v[5]; - } - - if (vflag_atom) { - v[0] /= n; - v[1] /= n; - v[2] /= n; - v[3] /= n; - v[4] /= n; - v[5] /= n; - for (i = 0; i < n; i++) { - j = list[i]; - vatom_thr[tid][j][0] += v[0]; - vatom_thr[tid][j][1] += v[1]; - vatom_thr[tid][j][2] += v[2]; - vatom_thr[tid][j][3] += v[3]; - vatom_thr[tid][j][4] += v[4]; - vatom_thr[tid][j][5] += v[5]; - } - } - } -} - -/* ---------------------------------------------------------------------- - tally virial into per-atom accumulators - called by AIREBO potential, newton_pair is always on - fpair is magnitude of force on atom I -------------------------------------------------------------------------- */ - -void PairOMPGPU::v_tally2_thr(int i, int j, double fpair, double *drij, int tid) -{ - double v[6]; - - v[0] = 0.5 * drij[0]*drij[0]*fpair; - v[1] = 0.5 * drij[1]*drij[1]*fpair; - v[2] = 0.5 * drij[2]*drij[2]*fpair; - v[3] = 0.5 * drij[0]*drij[1]*fpair; - v[4] = 0.5 * drij[0]*drij[2]*fpair; - v[5] = 0.5 * drij[1]*drij[2]*fpair; - - vatom_thr[tid][i][0] += v[0]; vatom_thr[tid][i][1] += v[1]; - vatom_thr[tid][i][2] += v[2]; vatom_thr[tid][i][3] += v[3]; - vatom_thr[tid][i][4] += v[4]; vatom_thr[tid][i][5] += v[5]; - vatom_thr[tid][j][0] += v[0]; vatom_thr[tid][j][1] += v[1]; - vatom_thr[tid][j][2] += v[2]; vatom_thr[tid][j][3] += v[3]; - vatom_thr[tid][j][4] += v[4]; vatom_thr[tid][j][5] += v[5]; -} - -/* ---------------------------------------------------------------------- - tally virial into per-atom accumulators - called by AIREBO and Tersoff potential, newton_pair is always on -------------------------------------------------------------------------- */ - -void PairOMPGPU::v_tally3_thr(int i, int j, int k, double *fi, double *fj, - double *drik, double *drjk, int tid, - double THIRD) -{ - double v[6]; - - v[0] = THIRD * (drik[0]*fi[0] + drjk[0]*fj[0]); - v[1] = THIRD * (drik[1]*fi[1] + drjk[1]*fj[1]); - v[2] = THIRD * (drik[2]*fi[2] + drjk[2]*fj[2]); - v[3] = THIRD * (drik[0]*fi[1] + drjk[0]*fj[1]); - v[4] = THIRD * (drik[0]*fi[2] + drjk[0]*fj[2]); - v[5] = THIRD * (drik[1]*fi[2] + drjk[1]*fj[2]); - - vatom_thr[tid][i][0] += v[0]; vatom_thr[tid][i][1] += v[1]; - vatom_thr[tid][i][2] += v[2]; vatom_thr[tid][i][3] += v[3]; - vatom_thr[tid][i][4] += v[4]; vatom_thr[tid][i][5] += v[5]; - vatom_thr[tid][j][0] += v[0]; vatom_thr[tid][j][1] += v[1]; - vatom_thr[tid][j][2] += v[2]; vatom_thr[tid][j][3] += v[3]; - vatom_thr[tid][j][4] += v[4]; vatom_thr[tid][j][5] += v[5]; - vatom_thr[tid][k][0] += v[0]; vatom_thr[tid][k][1] += v[1]; - vatom_thr[tid][k][2] += v[2]; vatom_thr[tid][k][3] += v[3]; - vatom_thr[tid][k][4] += v[4]; vatom_thr[tid][k][5] += v[5]; -} - -/* ---------------------------------------------------------------------- - tally virial into per-atom accumulators - called by AIREBO potential, newton_pair is always on -------------------------------------------------------------------------- */ - -void PairOMPGPU::v_tally4_thr(int i, int j, int k, int m, - double *fi, double *fj, double *fk, - double *drim, double *drjm, double *drkm, int tid) -{ - double v[6]; - - v[0] = 0.25 * (drim[0]*fi[0] + drjm[0]*fj[0] + drkm[0]*fk[0]); - v[1] = 0.25 * (drim[1]*fi[1] + drjm[1]*fj[1] + drkm[1]*fk[1]); - v[2] = 0.25 * (drim[2]*fi[2] + drjm[2]*fj[2] + drkm[2]*fk[2]); - v[3] = 0.25 * (drim[0]*fi[1] + drjm[0]*fj[1] + drkm[0]*fk[1]); - v[4] = 0.25 * (drim[0]*fi[2] + drjm[0]*fj[2] + drkm[0]*fk[2]); - v[5] = 0.25 * (drim[1]*fi[2] + drjm[1]*fj[2] + drkm[1]*fk[2]); - - vatom_thr[tid][i][0] += v[0]; vatom_thr[tid][i][1] += v[1]; - vatom_thr[tid][i][2] += v[2]; vatom_thr[tid][i][3] += v[3]; - vatom_thr[tid][i][4] += v[4]; vatom_thr[tid][i][5] += v[5]; - vatom_thr[tid][j][0] += v[0]; vatom_thr[tid][j][1] += v[1]; - vatom_thr[tid][j][2] += v[2]; vatom_thr[tid][j][3] += v[3]; - vatom_thr[tid][j][4] += v[4]; vatom_thr[tid][j][5] += v[5]; - vatom_thr[tid][k][0] += v[0]; vatom_thr[tid][k][1] += v[1]; - vatom_thr[tid][k][2] += v[2]; vatom_thr[tid][k][3] += v[3]; - vatom_thr[tid][k][4] += v[4]; vatom_thr[tid][k][5] += v[5]; - vatom_thr[tid][m][0] += v[0]; vatom_thr[tid][m][1] += v[1]; - vatom_thr[tid][m][2] += v[2]; vatom_thr[tid][m][3] += v[3]; - vatom_thr[tid][m][4] += v[4]; vatom_thr[tid][m][5] += v[5]; -} - -/* ---------------------------------------------------------------------- - reduce the per thread accumulated E/V data into the canonical accumulators. -------------------------------------------------------------------------- */ -void PairOMPGPU::ev_reduce_thr(Pair &p) -{ - const int ntotal = (force->newton) ? - (atom->nlocal + atom->nghost) : atom->nlocal; - - for (int n = 0; n < _nthreads; ++n) { - p.eng_vdwl += eng_vdwl_thr[n]; - p.eng_coul += eng_coul_thr[n]; - if (vflag_either) { - p.virial[0] += virial_thr[n][0]; - p.virial[1] += virial_thr[n][1]; - p.virial[2] += virial_thr[n][2]; - p.virial[3] += virial_thr[n][3]; - p.virial[4] += virial_thr[n][4]; - p.virial[5] += virial_thr[n][5]; - if (vflag_atom) { - for (int i = 0; i < ntotal; ++i) { - p.vatom[i][0] += vatom_thr[n][i][0]; - p.vatom[i][1] += vatom_thr[n][i][1]; - p.vatom[i][2] += vatom_thr[n][i][2]; - p.vatom[i][3] += vatom_thr[n][i][3]; - p.vatom[i][4] += vatom_thr[n][i][4]; - p.vatom[i][5] += vatom_thr[n][i][5]; - } - } - } - if (eflag_atom) { - for (int i = 0; i < ntotal; ++i) { - p.eatom[i] += eatom_thr[n][i]; - } - } - } -} - -/* ---------------------------------------------------------------------- */ - -double PairOMPGPU::memory_usage() -{ - double bytes = 0.0; - - bytes += _nthreads * (2 + 7) * sizeof(double); - bytes += _nthreads * maxeatom_thr * sizeof(double); - bytes += _nthreads * maxvatom_thr * 6 * sizeof(double); - - if (f_thr != NULL) - bytes += _nthreads * _nmax * sizeof(double); - return bytes; -} - -#endif diff --git a/src/GPU/pair_omp_gpu.h b/src/GPU/pair_omp_gpu.h deleted file mode 100644 index 4532cf5722..0000000000 --- a/src/GPU/pair_omp_gpu.h +++ /dev/null @@ -1,254 +0,0 @@ -/* -*- c++ -*- ------------------------------------------------------------- - LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator - http://lammps.sandia.gov, Sandia National Laboratories - Steve Plimpton, sjplimp@sandia.gov - - Copyright (2003) Sandia Corporation. Under the terms of Contract - DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains - certain rights in this software. This software is distributed under - the GNU General Public License. - - See the README file in the top-level LAMMPS directory. -------------------------------------------------------------------------- */ - -/* ---------------------------------------------------------------------- - Contributing author: Axel Kohlmeyer (Temple U) - Modified by Mike for use with GPU library -------------------------------------------------------------------------- */ - -#ifndef LMP_PAIR_OMP_GPU_H -#define LMP_PAIR_OMP_GPU_H - -#include "pair.h" -#include - -#if defined(_OPENMP) -#include "atom.h" -#include "memory.h" -#include -#endif - -namespace LAMMPS_NS { - -#if defined(_OPENMP) - -class PairOMPGPU : protected Pointers { - - protected: - double *eng_vdwl_thr; // per thread accumulated vdw energy - double *eng_coul_thr; // per thread accumulated coulomb energies - double **virial_thr; // per thread virial - double **eatom_thr; // per thread per atom energy - double ***vatom_thr; // per thread per atom virial - double **f_thr; // per thread force (for thread id>0) - - int maxeatom_thr, maxvatom_thr; - int _nthreads, _nmax; - - int eflag_either,eflag_global,eflag_atom; - int vflag_either,vflag_global,vflag_atom; - - public: - PairOMPGPU(LAMMPS *); - ~PairOMPGPU(); - - void free_mem(); - void init_style(); - double memory_usage(); - - // set loop range for, thread id, and force array offset for threaded runs. - double **loop_setup_thr(double **f, int &ifrom, int &ito, int &tid, - const int start, const int inum, const int nall) - { - if (_nthreads > 1) { - tid = omp_get_thread_num(); - if (nall > _nmax) { -#pragma omp master - memory->grow(f_thr,atom->nmax*(_nthreads-1),3,"pair:f_thr"); -#pragma omp barrier - if (tid == 0) - _nmax = atom->nmax; - } - - // each thread works on a fixed chunk of atoms. - const int idelta = 1 + (inum - start)/_nthreads; - ifrom = tid * idelta + start; - ito = ifrom + idelta; - if (ito > inum) - ito = inum; - - // zero per thread force array - // keep thread memory access same as for force accumulation - if (tid > 0) { - double **f_zero = f_thr + nall * (tid - 1); - for (int i = 0; i < nall; i++) { - f_zero[i][0] = 0.0; - f_zero[i][1] = 0.0; - f_zero[i][2] = 0.0; - } - return f_zero; - } - return f; - } else { - tid = 0; - ifrom = start; - ito = inum; - return f; - } - }; - - // set loop range for, thread id, and force array offset for threaded runs. - double **loop_setup_thr(double **f, int &ifrom, int &ito, int &tid, - const int inum, const int nall) - { return loop_setup_thr(f,ifrom,ito,tid,0,inum,nall); } - - // loop setup with full neighbor list and nonzero starting index - double **loop_setup_thr_full(double **f, int &ifrom, int &ito, int &tid, - const int start, const int inum, const int nall) - { - if (_nthreads > 1) { - tid = omp_get_thread_num(); - - // each thread works on a fixed chunk of atoms. - const int idelta = 1 + (inum - start) / _nthreads; - ifrom = start + tid * idelta; - ito = ifrom + idelta; - if (ito > inum) - ito = inum; - - return f; - - } else { - tid = 0; - ifrom = start; - ito = inum; - return f; - } - }; - - // threading adapted versions of the ev_tally infrastructure. - void ev_setup_thr(int, int, int, int, int, int, int, int); - void ev_reduce_thr(Pair &); - void ev_tally_thr(int, int, int, int, double, double, double, - double, double, double, int); - void ev_tally_full_thr(int, double, double, double, - double, double, double, int); - void ev_tally_xyz_thr(int, int, int, int, double, double, - double, double, double, double, double, double, int); - void ev_tally3_thr(int, int, int, double, double, - double *, double *, double *, double *, int, double); - void ev_tally4_thr(int, int, int, int, double, - double *, double *, double *, double *, double *, - double *, int); - void ev_tally_list_thr(int, int *, double, double *, int); - void v_tally2_thr(int, int, double, double *, int); - void v_tally3_thr(int, int, int, double *, double *, double *, double *, int, - double); - void v_tally4_thr(int, int, int, int, double *, double *, double *, - double *, double *, double *, int); - - // reduce per thread forces into the first part of the force - // array that is used for the non-threaded parts and reset - // the temporary storage to 0.0. - // this is in the header to be inlined. - // need to post a barrier to wait until all threads are done - // with computing forces. the reduction can be threaded as well. - void force_reduce_thr(double **fall, const int nall, const int tid) - { - // NOOP in non-threaded execution. - if (_nthreads == 1) return; -#pragma omp barrier - { - double **f; - const int idelta = 1 + nall/_nthreads; - const int ifrom = tid*idelta; - const int ito = ((ifrom + idelta) > nall) ? nall : (ifrom + idelta); - for (int n = 1; n < _nthreads; ++n) { - const int toffs = (n-1)*nall; - f = f_thr + toffs; - for (int m = ifrom; m < ito; ++m) { - fall[m][0] += f[m][0]; - f[m][0] = 0.0; - fall[m][1] += f[m][1]; - f[m][1] = 0.0; - fall[m][2] += f[m][2]; - f[m][2] = 0.0; - } - } - } - }; - - // reduce per thread density into the first part of the rho - // array that is used for the non-threaded parts. for use with EAM. - // this is in the header to be inlined. - // we need to post a barrier to wait until all threads are done. - // the reduction can be threaded as well. - void rho_reduce_thr(double *rho, const int nmax, const int nrange, - const int tid) - { - // NOOP in non-threaded execution. - if (_nthreads == 1) return; -#pragma omp barrier - { - double *rho_thr; - const int idelta = 1 + nrange/_nthreads; - const int ifrom = tid*idelta; - const int ito = ((ifrom + idelta) > nrange) ? nrange : (ifrom + idelta); - for (int n = 1; n < _nthreads; ++n) { - const int toffs = n*nmax; - rho_thr = rho + toffs; - for (int m = ifrom; m < ito; ++m) - rho[m] += rho_thr[m]; - } - } - }; - -}; - -#else - -class PairOMPGPU { - - public: - inline PairOMPGPU(LAMMPS *) {}; - virtual inline ~PairOMPGPU() {}; - - inline void init_style() {} - inline double memory_usage() { return 0.0; } - - inline void ev_setup_thr(int, int, int, int, int, int, int, int) {} - inline void ev_reduce_thr(Pair &) {} - inline double **loop_setup_thr(double **f, int &ifrom, int &ito, int &tid, - const int start, const int inum, - const int nall) - { - ifrom = start; - ito = inum; - return f; - }; - - inline double **loop_setup_thr(double **f, int &ifrom, int &ito, int &tid, - const int inum, const int nall) - { return loop_setup_thr(f,ifrom,ito,tid,0,inum,nall); } - - // loop setup with full neighbor list and nonzero starting index - double **loop_setup_thr_full(double **f, int &ifrom, int &ito, int &tid, - const int start, const int inum, const int nall) - { - ifrom = start; - ito = inum; - return f; - }; - - inline void force_reduce_thr(double **fall, const int nall, const int tid) {}; - inline void rho_reduce_thr(double *rho, const int nmax, const int nrange, - const int tid) {}; - -}; - -#endif - -} - -#endif -