git-svn-id: svn://svn.icms.temple.edu/lammps-ro/trunk@11233 f3b2605a-c512-4ea7-a41b-209d697bcdaa
This commit is contained in:
@ -83,6 +83,8 @@ action pair_lj_cut_gpu.cpp
|
||||
action pair_lj_cut_gpu.h
|
||||
action pair_lj_expand_gpu.cpp
|
||||
action pair_lj_expand_gpu.h
|
||||
action pair_lj_gromacs_gpu.cpp
|
||||
action pair_lj_gromacs_gpu.h
|
||||
action pair_lj_sdk_coul_long_gpu.cpp pair_lj_sdk_coul_long.cpp
|
||||
action pair_lj_sdk_coul_long_gpu.h pair_lj_sdk_coul_long.cpp
|
||||
action pair_lj_sdk_gpu.cpp pair_lj_sdk.cpp
|
||||
|
||||
247
src/GPU/pair_lj_gromacs_gpu.cpp
Normal file
247
src/GPU/pair_lj_gromacs_gpu.cpp
Normal file
@ -0,0 +1,247 @@
|
||||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
Contributing author: Trung Dac Nguyen (ORNL)
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#include "lmptype.h"
|
||||
#include "math.h"
|
||||
#include "stdio.h"
|
||||
#include "stdlib.h"
|
||||
#include "pair_lj_gromacs_gpu.h"
|
||||
#include "atom.h"
|
||||
#include "atom_vec.h"
|
||||
#include "comm.h"
|
||||
#include "force.h"
|
||||
#include "neighbor.h"
|
||||
#include "neigh_list.h"
|
||||
#include "integrate.h"
|
||||
#include "memory.h"
|
||||
#include "error.h"
|
||||
#include "neigh_request.h"
|
||||
#include "universe.h"
|
||||
#include "update.h"
|
||||
#include "domain.h"
|
||||
#include "string.h"
|
||||
#include "kspace.h"
|
||||
#include "gpu_extra.h"
|
||||
|
||||
// External functions from cuda library for atom decomposition
|
||||
|
||||
int ljgrm_gpu_init(const int ntypes, double **cutsq, double **host_lj1,
|
||||
double **host_lj2, double **host_lj3, double **host_lj4,
|
||||
double *special_lj, const int inum,
|
||||
const int nall, const int max_nbors, const int maxspecial,
|
||||
const double cell_size, int &gpu_mode, FILE *screen,
|
||||
double **host_ljsw1, double **host_ljsw2, double **host_ljsw3,
|
||||
double **host_ljsw4, double **host_ljsw5,
|
||||
double **cut_inner, double **cut_innersq);
|
||||
void ljgrm_gpu_clear();
|
||||
int ** ljgrm_gpu_compute_n(const int ago, const int inum_full,
|
||||
const int nall, double **host_x, int *host_type,
|
||||
double *sublo, double *subhi, int *tag, int **nspecial,
|
||||
int **special, const bool eflag, const bool vflag,
|
||||
const bool eatom, const bool vatom, int &host_start,
|
||||
int **ilist, int **jnum, const double cpu_time,
|
||||
bool &success);
|
||||
void ljgrm_gpu_compute(const int ago, const int inum_full, const int nall,
|
||||
double **host_x, int *host_type, int *ilist, int *numj,
|
||||
int **firstneigh, const bool eflag, const bool vflag,
|
||||
const bool eatom, const bool vatom, int &host_start,
|
||||
const double cpu_time, bool &success);
|
||||
double ljgrm_gpu_bytes();
|
||||
|
||||
|
||||
using namespace LAMMPS_NS;
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
PairLJGromacsGPU::PairLJGromacsGPU(LAMMPS *lmp) :
|
||||
PairLJGromacs(lmp), gpu_mode(GPU_FORCE)
|
||||
{
|
||||
respa_enable = 0;
|
||||
cpu_time = 0.0;
|
||||
GPU_EXTRA::gpu_ready(lmp->modify, lmp->error);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
free all arrays
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
PairLJGromacsGPU::~PairLJGromacsGPU()
|
||||
{
|
||||
ljgrm_gpu_clear();
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void PairLJGromacsGPU::compute(int eflag, int vflag)
|
||||
{
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = 0;
|
||||
|
||||
int nall = atom->nlocal + atom->nghost;
|
||||
int inum, host_start;
|
||||
|
||||
bool success = true;
|
||||
int *ilist, *numneigh, **firstneigh;
|
||||
if (gpu_mode != GPU_FORCE) {
|
||||
inum = atom->nlocal;
|
||||
firstneigh = ljgrm_gpu_compute_n(neighbor->ago, inum, nall,
|
||||
atom->x, atom->type, domain->sublo,
|
||||
domain->subhi, atom->tag, atom->nspecial,
|
||||
atom->special, eflag, vflag, eflag_atom,
|
||||
vflag_atom, host_start, &ilist,
|
||||
&numneigh, cpu_time, success);
|
||||
} else {
|
||||
inum = list->inum;
|
||||
ilist = list->ilist;
|
||||
numneigh = list->numneigh;
|
||||
firstneigh = list->firstneigh;
|
||||
ljgrm_gpu_compute(neighbor->ago, inum, nall, atom->x, atom->type,
|
||||
ilist, numneigh, firstneigh, eflag, vflag, eflag_atom,
|
||||
vflag_atom, host_start, cpu_time, success);
|
||||
}
|
||||
if (!success)
|
||||
error->one(FLERR,"Insufficient memory on accelerator");
|
||||
|
||||
if (host_start<inum) {
|
||||
cpu_time = MPI_Wtime();
|
||||
cpu_compute(host_start, inum, eflag, vflag, ilist, numneigh, firstneigh);
|
||||
cpu_time = MPI_Wtime() - cpu_time;
|
||||
}
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
init specific to this pair style
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void PairLJGromacsGPU::init_style()
|
||||
{
|
||||
if (force->newton_pair)
|
||||
error->all(FLERR,"Cannot use newton pair with lj/gromacs/gpu pair style");
|
||||
|
||||
// Repeat cutsq calculation because done after call to init_style
|
||||
double maxcut = -1.0;
|
||||
double mcut;
|
||||
for (int i = 1; i <= atom->ntypes; i++) {
|
||||
for (int j = i; j <= atom->ntypes; j++) {
|
||||
if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) {
|
||||
mcut = init_one(i,j);
|
||||
mcut *= mcut;
|
||||
if (mcut > maxcut)
|
||||
maxcut = mcut;
|
||||
cutsq[i][j] = cutsq[j][i] = mcut;
|
||||
} else
|
||||
cutsq[i][j] = cutsq[j][i] = 0.0;
|
||||
}
|
||||
}
|
||||
double cell_size = sqrt(maxcut) + neighbor->skin;
|
||||
|
||||
int maxspecial=0;
|
||||
if (atom->molecular)
|
||||
maxspecial=atom->maxspecial;
|
||||
|
||||
int success = ljgrm_gpu_init(atom->ntypes+1, cutsq, lj1, lj2, lj3, lj4,
|
||||
force->special_lj, atom->nlocal,
|
||||
atom->nlocal+atom->nghost, 300, maxspecial,
|
||||
cell_size, gpu_mode, screen, ljsw1, ljsw2,
|
||||
ljsw3, ljsw4, ljsw5, cut_inner, cut_inner_sq);
|
||||
GPU_EXTRA::check_flag(success,error,world);
|
||||
|
||||
if (gpu_mode == GPU_FORCE) {
|
||||
int irequest = neighbor->request(this);
|
||||
neighbor->requests[irequest]->half = 0;
|
||||
neighbor->requests[irequest]->full = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
double PairLJGromacsGPU::memory_usage()
|
||||
{
|
||||
double bytes = Pair::memory_usage();
|
||||
return bytes + ljgrm_gpu_bytes();
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void PairLJGromacsGPU::cpu_compute(int start, int inum, int eflag,
|
||||
int vflag, int *ilist,
|
||||
int *numneigh, int **firstneigh)
|
||||
{
|
||||
int i,j,ii,jj,jnum,itype,jtype;
|
||||
double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,fpair;
|
||||
double rsq,r2inv,r6inv,forcelj,factor_lj;
|
||||
double r,t,fswitch,eswitch;
|
||||
int *jlist;
|
||||
|
||||
double **x = atom->x;
|
||||
double **f = atom->f;
|
||||
int *type = atom->type;
|
||||
double *special_lj = force->special_lj;
|
||||
|
||||
// loop over neighbors of my atoms
|
||||
|
||||
for (ii = start; ii < inum; ii++) {
|
||||
i = ilist[ii];
|
||||
xtmp = x[i][0];
|
||||
ytmp = x[i][1];
|
||||
ztmp = x[i][2];
|
||||
itype = type[i];
|
||||
jlist = firstneigh[i];
|
||||
jnum = numneigh[i];
|
||||
|
||||
for (jj = 0; jj < jnum; jj++) {
|
||||
j = jlist[jj];
|
||||
factor_lj = special_lj[sbmask(j)];
|
||||
j &= NEIGHMASK;
|
||||
|
||||
delx = xtmp - x[j][0];
|
||||
dely = ytmp - x[j][1];
|
||||
delz = ztmp - x[j][2];
|
||||
rsq = delx*delx + dely*dely + delz*delz;
|
||||
jtype = type[j];
|
||||
|
||||
if (rsq < cutsq[itype][jtype]) {
|
||||
r2inv = 1.0/rsq;
|
||||
r6inv = r2inv*r2inv*r2inv;
|
||||
forcelj = r6inv * (lj1[itype][jtype]*r6inv - lj2[itype][jtype]);
|
||||
if (rsq > cut_inner_sq[itype][jtype]) {
|
||||
r = sqrt(rsq);
|
||||
t = r - cut_inner[itype][jtype];
|
||||
fswitch = r*t*t*(ljsw1[itype][jtype] + ljsw2[itype][jtype]*t);
|
||||
forcelj += fswitch;
|
||||
}
|
||||
fpair = factor_lj*forcelj * r2inv;
|
||||
|
||||
f[i][0] += delx*fpair;
|
||||
f[i][1] += dely*fpair;
|
||||
f[i][2] += delz*fpair;
|
||||
|
||||
if (eflag) {
|
||||
evdwl = r6inv * (lj3[itype][jtype]*r6inv - lj4[itype][jtype]);
|
||||
evdwl += ljsw5[itype][jtype];
|
||||
if (rsq > cut_inner_sq[itype][jtype]) {
|
||||
eswitch = t*t*t*(ljsw3[itype][jtype] + ljsw4[itype][jtype]*t);
|
||||
evdwl += eswitch;
|
||||
}
|
||||
evdwl *= factor_lj;
|
||||
}
|
||||
|
||||
if (evflag) ev_tally_full(i,evdwl,0.0,fpair,delx,dely,delz);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
64
src/GPU/pair_lj_gromacs_gpu.h
Normal file
64
src/GPU/pair_lj_gromacs_gpu.h
Normal file
@ -0,0 +1,64 @@
|
||||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifdef PAIR_CLASS
|
||||
|
||||
PairStyle(lj/gromacs/gpu,PairLJGromacsGPU)
|
||||
|
||||
#else
|
||||
|
||||
#ifndef LMP_PAIR_LJ_GROMACS_GPU_H
|
||||
#define LMP_PAIR_LJ_GROMACS_GPU_H
|
||||
|
||||
#include "pair_lj_gromacs.h"
|
||||
|
||||
namespace LAMMPS_NS {
|
||||
|
||||
class PairLJGromacsGPU : public PairLJGromacs {
|
||||
public:
|
||||
PairLJGromacsGPU(LAMMPS *lmp);
|
||||
~PairLJGromacsGPU();
|
||||
void cpu_compute(int, int, int, int, int *, int *, int **);
|
||||
void compute(int, int);
|
||||
void init_style();
|
||||
double memory_usage();
|
||||
|
||||
enum { GPU_FORCE, GPU_NEIGH, GPU_HYB_NEIGH };
|
||||
|
||||
private:
|
||||
int gpu_mode;
|
||||
double cpu_time;
|
||||
int *gpulist;
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* ERROR/WARNING messages:
|
||||
|
||||
E: Insufficient memory on accelerator
|
||||
|
||||
There is insufficient memory on one of the devices specified for the gpu
|
||||
package
|
||||
|
||||
E: Cannot use newton pair with lj/gromacs/gpu pair style
|
||||
|
||||
Self-explanatory.
|
||||
|
||||
E: Pair style is incompatible with KSpace style
|
||||
|
||||
If a pair style with a long-range Coulombic component is selected,
|
||||
then a kspace style must also be used.
|
||||
|
||||
*/
|
||||
@ -35,11 +35,15 @@
|
||||
|
||||
// External functions from cuda library for atom decomposition
|
||||
|
||||
int sw_gpu_init(const int nlocal, const int nall, const int max_nbors,
|
||||
int sw_gpu_init(const int ntypes, const int inum, const int nall, const int max_nbors,
|
||||
const double cell_size, int &gpu_mode, FILE *screen,
|
||||
const double, const double, const double, const double,
|
||||
const double, const double, const double, const double,
|
||||
const double, const double, const double);
|
||||
int* host_map, const int nelements, int*** host_elem2param, const int nparams,
|
||||
const double* sw_epsilon, const double* sw_sigma,
|
||||
const double* sw_lambda, const double* sw_gamma,
|
||||
const double* sw_costheta, const double* sw_biga,
|
||||
const double* sw_bigb, const double* sw_powerp,
|
||||
const double* sw_powerq, const double* sw_cut,
|
||||
const double* sw_cutsq);
|
||||
void sw_gpu_clear();
|
||||
int ** sw_gpu_compute_n(const int ago, const int inum,
|
||||
const int nall, double **host_x, int *host_type,
|
||||
@ -135,21 +139,64 @@ void PairSWGPU::allocate()
|
||||
|
||||
void PairSWGPU::init_style()
|
||||
{
|
||||
double cell_size = sqrt(params[0].cutsq) + neighbor->skin;
|
||||
double cell_size = cutmax + neighbor->skin;
|
||||
|
||||
if (atom->tag_enable == 0)
|
||||
error->all(FLERR,"Pair style sw/gpu requires atom IDs");
|
||||
if (force->newton_pair != 0)
|
||||
error->all(FLERR,"Pair style sw/gpu requires newton pair off");
|
||||
if (nparams > 1)
|
||||
error->all(FLERR,"Pair style sw/gpu is currently limited to one element.");
|
||||
|
||||
int success = sw_gpu_init(atom->nlocal, atom->nlocal+atom->nghost, 300,
|
||||
cell_size, gpu_mode, screen,params[0].epsilon,
|
||||
params[0].sigma, params[0].lambda, params[0].gamma,
|
||||
params[0].costheta, params[0].biga, params[0].bigb,
|
||||
params[0].powerp, params[0].powerq, params[0].cut,
|
||||
params[0].cutsq);
|
||||
double *epsilon, *sigma, *lambda, *gamma;
|
||||
double *biga, *bigb, *powerp, *powerq;
|
||||
double *_cut, *_cutsq, *costheta;
|
||||
epsilon = sigma = lambda = gamma = NULL;
|
||||
biga = bigb = powerp = powerq = NULL;
|
||||
_cut = _cutsq = costheta = NULL;
|
||||
|
||||
memory->create(epsilon,nparams,"pair:epsilon");
|
||||
memory->create(sigma,nparams,"pair:sigma");
|
||||
memory->create(lambda,nparams,"pair:lambda");
|
||||
memory->create(gamma,nparams,"pair:gamma");
|
||||
memory->create(biga,nparams,"pair:biga");
|
||||
memory->create(bigb,nparams,"pair:bigb");
|
||||
memory->create(powerp,nparams,"pair:powerp");
|
||||
memory->create(powerq,nparams,"pair:powerq");
|
||||
memory->create(_cut,nparams,"pair:_cut");
|
||||
memory->create(_cutsq,nparams,"pair:_cutsq");
|
||||
memory->create(costheta,nparams,"pair:costheta");
|
||||
|
||||
for (int i = 0; i < nparams; i++) {
|
||||
epsilon[i] = params[i].epsilon;
|
||||
sigma[i] = params[i].sigma;
|
||||
lambda[i] = params[i].lambda;
|
||||
gamma[i] = params[i].gamma;
|
||||
biga[i] = params[i].biga;
|
||||
bigb[i] = params[i].bigb;
|
||||
powerp[i] = params[i].powerp;
|
||||
powerq[i] = params[i].powerq;
|
||||
_cut[i] = params[i].cut;
|
||||
_cutsq[i] = params[i].cutsq;
|
||||
costheta[i] = params[i].costheta;
|
||||
}
|
||||
|
||||
int success = sw_gpu_init(atom->ntypes+1, atom->nlocal, atom->nlocal+atom->nghost, 300,
|
||||
cell_size, gpu_mode, screen, map, nelements,
|
||||
elem2param, nparams, epsilon,
|
||||
sigma, lambda, gamma, costheta, biga, bigb,
|
||||
powerp, powerq, _cut, _cutsq);
|
||||
|
||||
memory->destroy(epsilon);
|
||||
memory->destroy(sigma);
|
||||
memory->destroy(lambda);
|
||||
memory->destroy(gamma);
|
||||
memory->destroy(biga);
|
||||
memory->destroy(bigb);
|
||||
memory->destroy(powerp);
|
||||
memory->destroy(powerq);
|
||||
memory->destroy(_cut);
|
||||
memory->destroy(_cutsq);
|
||||
memory->destroy(costheta);
|
||||
|
||||
GPU_EXTRA::check_flag(success,error,world);
|
||||
|
||||
if (gpu_mode == GPU_FORCE) {
|
||||
|
||||
Reference in New Issue
Block a user