Adding Morse potential (untested)
This commit is contained in:
@ -52,6 +52,7 @@ OBJS = $(OBJ_DIR)/pair_gpu_atom.o $(OBJ_DIR)/pair_gpu_ans.o \
|
||||
$(OBJ_DIR)/lj_expand_gpu_memory.o $(OBJ_DIR)/lj_expand_gpu.o \
|
||||
$(OBJ_DIR)/ljc_cut_gpu_memory.o $(OBJ_DIR)/ljc_cut_gpu.o \
|
||||
$(OBJ_DIR)/ljcl_cut_gpu_memory.o $(OBJ_DIR)/ljcl_cut_gpu.o \
|
||||
$(OBJ_DIR)/morse_gpu_memory.o $(OBJ_DIR)/morse_gpu.o \
|
||||
$(OBJ_DIR)/crml_gpu_memory.o $(OBJ_DIR)/crml_gpu.o \
|
||||
$(OBJ_DIR)/cmm_cut_gpu_memory.o $(OBJ_DIR)/cmm_cut_gpu.o \
|
||||
$(OBJ_DIR)/cmmc_long_gpu_memory.o $(OBJ_DIR)/cmmc_long_gpu.o \
|
||||
@ -68,6 +69,7 @@ PTXS = $(OBJ_DIR)/pair_gpu_atom_kernel.ptx $(OBJ_DIR)/pair_gpu_atom_ptx.h \
|
||||
$(OBJ_DIR)/lj_expand_gpu_kernel.ptx $(OBJ_DIR)/lj_expand_gpu_ptx.h \
|
||||
$(OBJ_DIR)/ljc_cut_gpu_kernel.ptx $(OBJ_DIR)/ljc_cut_gpu_ptx.h \
|
||||
$(OBJ_DIR)/ljcl_cut_gpu_kernel.ptx $(OBJ_DIR)/ljcl_cut_gpu_ptx.h \
|
||||
$(OBJ_DIR)/morse_gpu_kernel.ptx $(OBJ_DIR)/morse_gpu_ptx.h \
|
||||
$(OBJ_DIR)/crml_gpu_kernel.ptx $(OBJ_DIR)/crml_gpu_ptx.h \
|
||||
$(OBJ_DIR)/cmm_cut_gpu_kernel.ptx $(OBJ_DIR)/cmm_cut_gpu_ptx.h \
|
||||
$(OBJ_DIR)/cmmc_long_gpu_kernel.ptx $(OBJ_DIR)/cmmc_long_gpu_ptx.h \
|
||||
@ -198,6 +200,18 @@ $(OBJ_DIR)/ljcl_cut_gpu_memory.o: $(ALL_H) ljcl_cut_gpu_memory.h ljcl_cut_gpu_me
|
||||
$(OBJ_DIR)/ljcl_cut_gpu.o: $(ALL_H) ljcl_cut_gpu_memory.h ljcl_cut_gpu.cpp
|
||||
$(CUDR) -o $@ -c ljcl_cut_gpu.cpp -I$(OBJ_DIR)
|
||||
|
||||
$(OBJ_DIR)/morse_gpu_kernel.ptx: morse_gpu_kernel.cu pair_gpu_precision.h
|
||||
$(CUDA) --ptx -DNV_KERNEL -o $@ morse_gpu_kernel.cu
|
||||
|
||||
$(OBJ_DIR)/morse_gpu_ptx.h: $(OBJ_DIR)/morse_gpu_kernel.ptx $(OBJ_DIR)/morse_gpu_kernel.ptx
|
||||
$(BSH) ./geryon/file_to_cstr.sh $(OBJ_DIR)/morse_gpu_kernel.ptx $(OBJ_DIR)/morse_gpu_ptx.h
|
||||
|
||||
$(OBJ_DIR)/morse_gpu_memory.o: $(ALL_H) morse_gpu_memory.h morse_gpu_memory.cpp $(OBJ_DIR)/morse_gpu_ptx.h $(OBJ_DIR)/atomic_gpu_memory.o
|
||||
$(CUDR) -o $@ -c morse_gpu_memory.cpp -I$(OBJ_DIR)
|
||||
|
||||
$(OBJ_DIR)/morse_gpu.o: $(ALL_H) morse_gpu_memory.h morse_gpu.cpp
|
||||
$(CUDR) -o $@ -c morse_gpu.cpp -I$(OBJ_DIR)
|
||||
|
||||
$(OBJ_DIR)/crml_gpu_kernel.ptx: crml_gpu_kernel.cu pair_gpu_precision.h
|
||||
$(CUDA) --ptx -DNV_KERNEL -o $@ crml_gpu_kernel.cu
|
||||
|
||||
|
||||
@ -42,6 +42,7 @@ OBJS = $(OBJ_DIR)/pair_gpu_atom.o $(OBJ_DIR)/pair_gpu_ans.o \
|
||||
$(OBJ_DIR)/lj_expand_gpu_memory.o $(OBJ_DIR)/lj_expand_gpu.o \
|
||||
$(OBJ_DIR)/ljc_cut_gpu_memory.o $(OBJ_DIR)/ljc_cut_gpu.o \
|
||||
$(OBJ_DIR)/ljcl_cut_gpu_memory.o $(OBJ_DIR)/ljcl_cut_gpu.o \
|
||||
$(OBJ_DIR)/morse_gpu_memory.o $(OBJ_DIR)/morse_gpu.o \
|
||||
$(OBJ_DIR)/crml_gpu_memory.o $(OBJ_DIR)/crml_gpu.o \
|
||||
$(OBJ_DIR)/cmm_cut_gpu_memory.o $(OBJ_DIR)/cmm_cut_gpu.o \
|
||||
$(OBJ_DIR)/cmmc_long_gpu_memory.o $(OBJ_DIR)/cmmc_long_gpu.o
|
||||
@ -50,8 +51,9 @@ KERS = $(OBJ_DIR)/pair_gpu_atom_cl.h $(OBJ_DIR)/pair_gpu_nbor_cl.h \
|
||||
$(OBJ_DIR)/gb_gpu_nbor_cl.h $(OBJ_DIR)/gb_gpu_cl.h \
|
||||
$(OBJ_DIR)/lj_cut_gpu_cl.h $(OBJ_DIR)/lj96_cut_gpu_cl.h \
|
||||
$(OBJ_DIR)/lj_expand_gpu_cl.h $(OBJ_DIR)/ljc_cut_gpu_cl.h \
|
||||
$(OBJ_DIR)/ljcl_cut_gpu_cl.h $(OBJ_DIR)/crml_gpu_cl.h \
|
||||
$(OBJ_DIR)/cmm_cut_gpu_cl.h $(OBJ_DIR)/cmmc_long_gpu_cl.h
|
||||
$(OBJ_DIR)/ljcl_cut_gpu_cl.h $(OBJ_DIR)/morse_gpu_cl.h \
|
||||
$(OBJ_DIR)/crml_gpu_cl.h $(OBJ_DIR)/cmm_cut_gpu_cl.h \
|
||||
$(OBJ_DIR)/cmmc_long_gpu_cl.h
|
||||
|
||||
OCL_EXECS = $(BIN_DIR)/ocl_get_devices
|
||||
|
||||
@ -132,6 +134,15 @@ $(OBJ_DIR)/ljcl_cut_gpu_memory.o: $(ALL_H) ljcl_cut_gpu_memory.h ljcl_cut_gpu_me
|
||||
$(OBJ_DIR)/ljcl_cut_gpu.o: $(ALL_H) ljcl_cut_gpu_memory.h ljcl_cut_gpu.cpp
|
||||
$(OCL) -o $@ -c ljcl_cut_gpu.cpp -I$(OBJ_DIR)
|
||||
|
||||
$(OBJ_DIR)/morse_gpu_cl.h: morse_gpu_kernel.cu
|
||||
$(BSH) ./geryon/file_to_cstr.sh morse_gpu_kernel.cu $(OBJ_DIR)/morse_gpu_cl.h;
|
||||
|
||||
$(OBJ_DIR)/morse_gpu_memory.o: $(ALL_H) morse_gpu_memory.h morse_gpu_memory.cpp $(OBJ_DIR)/morse_gpu_cl.h $(OBJ_DIR)/pair_gpu_nbor_cl.h $(OBJ_DIR)/morse_gpu_cl.h $(OBJ_DIR)/atomic_gpu_memory.o
|
||||
$(OCL) -o $@ -c morse_gpu_memory.cpp -I$(OBJ_DIR)
|
||||
|
||||
$(OBJ_DIR)/morse_gpu.o: $(ALL_H) morse_gpu_memory.h morse_gpu.cpp
|
||||
$(OCL) -o $@ -c morse_gpu.cpp -I$(OBJ_DIR)
|
||||
|
||||
$(OBJ_DIR)/crml_gpu_cl.h: crml_gpu_kernel.cu
|
||||
$(BSH) ./geryon/file_to_cstr.sh crml_gpu_kernel.cu $(OBJ_DIR)/crml_gpu_cl.h;
|
||||
|
||||
|
||||
123
lib/gpu/morse_gpu.cpp
Normal file
123
lib/gpu/morse_gpu.cpp
Normal file
@ -0,0 +1,123 @@
|
||||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
Contributing authors: Mike Brown (ORNL), brownw@ornl.gov
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
#include <math.h>
|
||||
|
||||
#include "morse_gpu_memory.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
static MOR_GPU_Memory<PRECISION,ACC_PRECISION> MORMF;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Allocate memory on host and device and copy constants to device
|
||||
// ---------------------------------------------------------------------------
|
||||
bool mor_gpu_init(const int ntypes, double **cutsq,
|
||||
double **host_lj1, double **host_lj2, double **host_lj3,
|
||||
double **host_lj4, double **offset, double *special_lj,
|
||||
const int inum, const int nall, const int max_nbors,
|
||||
const int maxspecial, const double cell_size, int &gpu_mode,
|
||||
FILE *screen) {
|
||||
MORMF.clear();
|
||||
gpu_mode=MORMF.device->gpu_mode();
|
||||
double gpu_split=MORMF.device->particle_split();
|
||||
int first_gpu=MORMF.device->first_device();
|
||||
int last_gpu=MORMF.device->last_device();
|
||||
int world_me=MORMF.device->world_me();
|
||||
int gpu_rank=MORMF.device->gpu_rank();
|
||||
int procs_per_gpu=MORMF.device->procs_per_gpu();
|
||||
|
||||
MORMF.device->init_message(screen,"morse",first_gpu,last_gpu);
|
||||
|
||||
bool message=false;
|
||||
if (MORMF.device->replica_me()==0 && screen)
|
||||
message=true;
|
||||
|
||||
if (message) {
|
||||
fprintf(screen,"Initializing GPU and compiling on process 0...");
|
||||
fflush(screen);
|
||||
}
|
||||
|
||||
if (world_me==0) {
|
||||
bool init_ok=MORMF.init(ntypes, cutsq, host_lj1, host_lj2, host_lj3,
|
||||
host_lj4, offset, special_lj, inum, nall, 300,
|
||||
maxspecial, cell_size, gpu_split, screen);
|
||||
if (!init_ok)
|
||||
return false;
|
||||
}
|
||||
|
||||
MORMF.device->world_barrier();
|
||||
if (message)
|
||||
fprintf(screen,"Done.\n");
|
||||
|
||||
for (int i=0; i<procs_per_gpu; i++) {
|
||||
if (message) {
|
||||
if (last_gpu-first_gpu==0)
|
||||
fprintf(screen,"Initializing GPU %d on core %d...",first_gpu,i);
|
||||
else
|
||||
fprintf(screen,"Initializing GPUs %d-%d on core %d...",first_gpu,
|
||||
last_gpu,i);
|
||||
fflush(screen);
|
||||
}
|
||||
if (gpu_rank==i && world_me!=0) {
|
||||
bool init_ok=MORMF.init(ntypes, cutsq, host_lj1, host_lj2, host_lj3,
|
||||
host_lj4, offset, special_lj, inum, nall, 300,
|
||||
maxspecial, cell_size, gpu_split,
|
||||
screen);
|
||||
if (!init_ok)
|
||||
return false;
|
||||
}
|
||||
MORMF.device->gpu_barrier();
|
||||
if (message)
|
||||
fprintf(screen,"Done.\n");
|
||||
}
|
||||
if (message)
|
||||
fprintf(screen,"\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
void mor_gpu_clear() {
|
||||
MORMF.clear();
|
||||
}
|
||||
|
||||
int * mor_gpu_compute_n(const int ago, const int inum_full,
|
||||
const int nall, double **host_x, int *host_type,
|
||||
double *boxlo, double *boxhi, int *tag, int **nspecial,
|
||||
int **special, const bool eflag, const bool vflag,
|
||||
const bool eatom, const bool vatom, int &host_start,
|
||||
const double cpu_time, bool &success) {
|
||||
return MORMF.compute(ago, inum_full, nall, host_x, host_type, boxlo,
|
||||
boxhi, tag, nspecial, special, eflag, vflag, eatom,
|
||||
vatom, host_start, cpu_time, success);
|
||||
}
|
||||
|
||||
void mor_gpu_compute(const int ago, const int inum_full, const int nall,
|
||||
double **host_x, int *host_type, int *ilist, int *numj,
|
||||
int **firstneigh, const bool eflag, const bool vflag,
|
||||
const bool eatom, const bool vatom, int &host_start,
|
||||
const double cpu_time, bool &success) {
|
||||
MORMF.compute(ago,inum_full,nall,host_x,host_type,ilist,numj,
|
||||
firstneigh,eflag,vflag,eatom,vatom,host_start,cpu_time,success);
|
||||
}
|
||||
|
||||
double mor_gpu_bytes() {
|
||||
return MORMF.host_memory_usage();
|
||||
}
|
||||
|
||||
|
||||
279
lib/gpu/morse_gpu_kernel.cu
Normal file
279
lib/gpu/morse_gpu_kernel.cu
Normal file
@ -0,0 +1,279 @@
|
||||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
Contributing authors: Mike Brown (ORNL), brownw@ornl.gov
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef MORSE_GPU_KERNEL
|
||||
#define MORSE_GPU_KERNEL
|
||||
|
||||
#define MAX_SHARED_TYPES 8
|
||||
|
||||
#ifdef _DOUBLE_DOUBLE
|
||||
#define numtyp double
|
||||
#define numtyp2 double2
|
||||
#define numtyp4 double4
|
||||
#define acctyp double
|
||||
#define acctyp4 double4
|
||||
#endif
|
||||
|
||||
#ifdef _SINGLE_DOUBLE
|
||||
#define numtyp float
|
||||
#define numtyp2 float2
|
||||
#define numtyp4 float4
|
||||
#define acctyp double
|
||||
#define acctyp4 double4
|
||||
#endif
|
||||
|
||||
#ifndef numtyp
|
||||
#define numtyp float
|
||||
#define numtyp2 float2
|
||||
#define numtyp4 float4
|
||||
#define acctyp float
|
||||
#define acctyp4 float4
|
||||
#endif
|
||||
|
||||
#ifdef NV_KERNEL
|
||||
|
||||
#include "geryon/ucl_nv_kernel.h"
|
||||
texture<float4> pos_tex;
|
||||
|
||||
#ifdef _DOUBLE_DOUBLE
|
||||
__inline double4 fetch_pos(const int& i, const double4 *pos)
|
||||
{
|
||||
return pos[i];
|
||||
}
|
||||
#else
|
||||
__inline float4 fetch_pos(const int& i, const float4 *pos)
|
||||
{
|
||||
return tex1Dfetch(pos_tex, i);
|
||||
}
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64: enable
|
||||
#define GLOBAL_ID_X get_global_id(0)
|
||||
#define THREAD_ID_X get_local_id(0)
|
||||
#define BLOCK_ID_X get_group_id(0)
|
||||
#define BLOCK_SIZE_X get_local_size(0)
|
||||
#define __syncthreads() barrier(CLK_LOCAL_MEM_FENCE)
|
||||
#define __inline inline
|
||||
|
||||
#define fetch_pos(i,y) x_[i]
|
||||
|
||||
#endif
|
||||
|
||||
__kernel void kernel_pair(__global numtyp4 *x_, __global numtyp4 *lj1,
|
||||
__global numtyp4* lj3, const int lj_types,
|
||||
__global numtyp *sp_lj_in, __global int *dev_nbor,
|
||||
__global acctyp4 *ans, __global acctyp *engv,
|
||||
const int eflag, const int vflag, const int inum,
|
||||
const int nall, const int nbor_pitch) {
|
||||
// ii indexes the two interacting particles in gi
|
||||
int ii=GLOBAL_ID_X;
|
||||
__local numtyp sp_lj[4];
|
||||
sp_lj[0]=sp_lj_in[0];
|
||||
sp_lj[1]=sp_lj_in[1];
|
||||
sp_lj[2]=sp_lj_in[2];
|
||||
sp_lj[3]=sp_lj_in[3];
|
||||
|
||||
if (ii<inum) {
|
||||
|
||||
acctyp energy=(acctyp)0;
|
||||
acctyp4 f;
|
||||
f.x=(acctyp)0;
|
||||
f.y=(acctyp)0;
|
||||
f.z=(acctyp)0;
|
||||
acctyp virial[6];
|
||||
for (int i=0; i<6; i++)
|
||||
virial[i]=(acctyp)0;
|
||||
|
||||
__global int *nbor=dev_nbor+ii;
|
||||
int i=*nbor;
|
||||
nbor+=nbor_pitch;
|
||||
int numj=*nbor;
|
||||
nbor+=nbor_pitch;
|
||||
__global int *list_end=nbor+mul24(numj,nbor_pitch);
|
||||
|
||||
numtyp4 ix=fetch_pos(i,x_); //x_[i];
|
||||
int itype=ix.w;
|
||||
|
||||
numtyp factor_lj;
|
||||
for ( ; nbor<list_end; nbor+=nbor_pitch) {
|
||||
|
||||
int j=*nbor;
|
||||
if (j < nall)
|
||||
factor_lj = (numtyp)1.0;
|
||||
else {
|
||||
factor_lj = sp_lj[j/nall];
|
||||
j %= nall;
|
||||
}
|
||||
numtyp4 jx=fetch_pos(j,x_); //x_[j];
|
||||
int jtype=jx.w;
|
||||
|
||||
// Compute r12
|
||||
numtyp delx = ix.x-jx.x;
|
||||
numtyp dely = ix.y-jx.y;
|
||||
numtyp delz = ix.z-jx.z;
|
||||
numtyp r2inv = delx*delx+dely*dely+delz*delz;
|
||||
|
||||
int mtype=itype*lj_types+jtype;
|
||||
if (r2inv<lj1[mtype].z) {
|
||||
r2inv=(numtyp)1.0/r2inv;
|
||||
numtyp r6inv = r2inv*r2inv*r2inv;
|
||||
numtyp force = r2inv*r6inv*(lj1[mtype].x*r6inv-lj1[mtype].y);
|
||||
force*=factor_lj;
|
||||
|
||||
f.x+=delx*force;
|
||||
f.y+=dely*force;
|
||||
f.z+=delz*force;
|
||||
|
||||
if (eflag>0) {
|
||||
numtyp e=r6inv*(lj3[mtype].x*r6inv-lj3[mtype].y);
|
||||
energy+=factor_lj*(e-lj3[mtype].z);
|
||||
}
|
||||
if (vflag>0) {
|
||||
virial[0] += delx*delx*force;
|
||||
virial[1] += dely*dely*force;
|
||||
virial[2] += delz*delz*force;
|
||||
virial[3] += delx*dely*force;
|
||||
virial[4] += delx*delz*force;
|
||||
virial[5] += dely*delz*force;
|
||||
}
|
||||
}
|
||||
|
||||
} // for nbor
|
||||
|
||||
// Store answers
|
||||
__global acctyp *ap1=engv+ii;
|
||||
if (eflag>0) {
|
||||
*ap1=energy;
|
||||
ap1+=inum;
|
||||
}
|
||||
if (vflag>0) {
|
||||
for (int i=0; i<6; i++) {
|
||||
*ap1=virial[i];
|
||||
ap1+=inum;
|
||||
}
|
||||
}
|
||||
ans[ii]=f;
|
||||
} // if ii
|
||||
}
|
||||
|
||||
__kernel void kernel_pair_fast(__global numtyp4 *x_, __global numtyp4 *lj1_in,
|
||||
__global numtyp4* lj3_in,
|
||||
__global numtyp* sp_lj_in, __global int *dev_nbor,
|
||||
__global acctyp4 *ans, __global acctyp *engv,
|
||||
const int eflag, const int vflag, const int inum,
|
||||
const int nall, const int nbor_pitch) {
|
||||
// ii indexes the two interacting particles in gi
|
||||
int ii=THREAD_ID_X;
|
||||
__local numtyp4 lj1[MAX_SHARED_TYPES*MAX_SHARED_TYPES];
|
||||
__local numtyp4 lj3[MAX_SHARED_TYPES*MAX_SHARED_TYPES];
|
||||
__local numtyp sp_lj[4];
|
||||
if (ii<4)
|
||||
sp_lj[ii]=sp_lj_in[ii];
|
||||
if (ii<MAX_SHARED_TYPES*MAX_SHARED_TYPES) {
|
||||
lj1[ii]=lj1_in[ii];
|
||||
if (eflag>0)
|
||||
lj3[ii]=lj3_in[ii];
|
||||
}
|
||||
ii+=mul24((int)BLOCK_ID_X,(int)BLOCK_SIZE_X);
|
||||
__syncthreads();
|
||||
|
||||
if (ii<inum) {
|
||||
|
||||
acctyp energy=(acctyp)0;
|
||||
acctyp4 f;
|
||||
f.x=(acctyp)0;
|
||||
f.y=(acctyp)0;
|
||||
f.z=(acctyp)0;
|
||||
acctyp virial[6];
|
||||
for (int i=0; i<6; i++)
|
||||
virial[i]=(acctyp)0;
|
||||
|
||||
__global int *nbor=dev_nbor+ii;
|
||||
int i=*nbor;
|
||||
nbor+=nbor_pitch;
|
||||
int numj=*nbor;
|
||||
nbor+=nbor_pitch;
|
||||
__global int *list_end=nbor+mul24(numj,nbor_pitch);
|
||||
|
||||
numtyp4 ix=fetch_pos(i,x_); //x_[i];
|
||||
int iw=ix.w;
|
||||
int itype=mul24((int)MAX_SHARED_TYPES,iw);
|
||||
|
||||
numtyp factor_lj;
|
||||
for ( ; nbor<list_end; nbor+=nbor_pitch) {
|
||||
|
||||
int j=*nbor;
|
||||
if (j < nall)
|
||||
factor_lj = (numtyp)1.0;
|
||||
else {
|
||||
factor_lj = sp_lj[j/nall];
|
||||
j %= nall;
|
||||
}
|
||||
numtyp4 jx=fetch_pos(j,x_); //x_[j];
|
||||
int mtype=itype+jx.w;
|
||||
|
||||
// Compute r12
|
||||
numtyp delx = ix.x-jx.x;
|
||||
numtyp dely = ix.y-jx.y;
|
||||
numtyp delz = ix.z-jx.z;
|
||||
numtyp r2inv = delx*delx+dely*dely+delz*delz;
|
||||
|
||||
if (r2inv<lj1[mtype].z) {
|
||||
r2inv=(numtyp)1.0/r2inv;
|
||||
numtyp r6inv = r2inv*r2inv*r2inv;
|
||||
numtyp force = factor_lj*r2inv*r6inv*(lj1[mtype].x*r6inv-lj1[mtype].y);
|
||||
|
||||
f.x+=delx*force;
|
||||
f.y+=dely*force;
|
||||
f.z+=delz*force;
|
||||
|
||||
if (eflag>0) {
|
||||
numtyp e=r6inv*(lj3[mtype].x*r6inv-lj3[mtype].y);
|
||||
energy+=factor_lj*(e-lj3[mtype].z);
|
||||
}
|
||||
if (vflag>0) {
|
||||
virial[0] += delx*delx*force;
|
||||
virial[1] += dely*dely*force;
|
||||
virial[2] += delz*delz*force;
|
||||
virial[3] += delx*dely*force;
|
||||
virial[4] += delx*delz*force;
|
||||
virial[5] += dely*delz*force;
|
||||
}
|
||||
}
|
||||
|
||||
} // for nbor
|
||||
|
||||
// Store answers
|
||||
__global acctyp *ap1=engv+ii;
|
||||
if (eflag>0) {
|
||||
*ap1=energy;
|
||||
ap1+=inum;
|
||||
}
|
||||
if (vflag>0) {
|
||||
for (int i=0; i<6; i++) {
|
||||
*ap1=virial[i];
|
||||
ap1+=inum;
|
||||
}
|
||||
}
|
||||
ans[ii]=f;
|
||||
} // if ii*/
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
150
lib/gpu/morse_gpu_memory.cpp
Normal file
150
lib/gpu/morse_gpu_memory.cpp
Normal file
@ -0,0 +1,150 @@
|
||||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
Contributing authors: Mike Brown (ORNL), brownw@ornl.gov
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifdef USE_OPENCL
|
||||
#include "morse_gpu_cl.h"
|
||||
#else
|
||||
#include "morse_gpu_ptx.h"
|
||||
#endif
|
||||
|
||||
#include "morse_gpu_memory.h"
|
||||
#include <cassert>
|
||||
#define MOR_GPU_MemoryT MOR_GPU_Memory<numtyp, acctyp>
|
||||
|
||||
extern PairGPUDevice<PRECISION,ACC_PRECISION> pair_gpu_device;
|
||||
|
||||
template <class numtyp, class acctyp>
|
||||
MOR_GPU_MemoryT::MOR_GPU_Memory() : AtomicGPUMemory<numtyp,acctyp>(), _allocated(false) {
|
||||
}
|
||||
|
||||
template <class numtyp, class acctyp>
|
||||
MOR_GPU_MemoryT::~MOR_GPU_Memory() {
|
||||
clear();
|
||||
}
|
||||
|
||||
template <class numtyp, class acctyp>
|
||||
int MOR_GPU_MemoryT::bytes_per_atom(const int max_nbors) const {
|
||||
return this->bytes_per_atom_atomic(max_nbors);
|
||||
}
|
||||
|
||||
template <class numtyp, class acctyp>
|
||||
bool MOR_GPU_MemoryT::init(const int ntypes,
|
||||
double **host_cutsq, double **host_morse1,
|
||||
double **host_r0, double **host_alpha,
|
||||
double **host_d0, double **host_offset,
|
||||
double *host_special_lj, const int nlocal,
|
||||
const int nall, const int max_nbors,
|
||||
const int maxspecial, const double cell_size,
|
||||
const double gpu_split, FILE *_screen) {
|
||||
this->init_atomic(nlocal,nall,max_nbors,maxspecial,cell_size,gpu_split,
|
||||
_screen,morse_gpu_kernel);
|
||||
|
||||
// If atom type constants fit in shared memory use fast kernel
|
||||
int types=ntypes;
|
||||
shared_types=false;
|
||||
if (types<=MAX_SHARED_TYPES && this->_block_size>=MAX_SHARED_TYPES) {
|
||||
types=MAX_SHARED_TYPES;
|
||||
shared_types=true;
|
||||
}
|
||||
_types=types;
|
||||
|
||||
// Allocate a host write buffer for data initialization
|
||||
UCL_H_Vec<numtyp> host_write(types*types*32,*(this->ucl_device),
|
||||
UCL_WRITE_OPTIMIZED);
|
||||
|
||||
for (int i=0; i<types*types; i++)
|
||||
host_write[i]=0.0;
|
||||
|
||||
mor1.alloc(types*types,*(this->ucl_device),UCL_READ_ONLY);
|
||||
this->atom->type_pack4(ntypes,types,mor1,host_write,host_cutsq,host_morse1,
|
||||
host_r0,host_alpha);
|
||||
|
||||
mor2.alloc(types*types,*(this->ucl_device),UCL_READ_ONLY);
|
||||
this->atom->type_pack2(ntypes,types,mor2,host_write,host_d0,host_offset);
|
||||
|
||||
UCL_H_Vec<double> dview;
|
||||
sp_lj.alloc(4,*(this->ucl_device),UCL_READ_ONLY);
|
||||
dview.view(host_special_lj,4,*(this->ucl_device));
|
||||
ucl_copy(sp_lj,dview,false);
|
||||
|
||||
_allocated=true;
|
||||
this->_max_bytes=mor1.row_bytes()+mor2.row_bytes()+sp_lj.row_bytes();
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class numtyp, class acctyp>
|
||||
void MOR_GPU_MemoryT::clear() {
|
||||
if (!_allocated)
|
||||
return;
|
||||
_allocated=false;
|
||||
|
||||
mor1.clear();
|
||||
mor2.clear();
|
||||
sp_lj.clear();
|
||||
this->clear_atomic();
|
||||
}
|
||||
|
||||
template <class numtyp, class acctyp>
|
||||
double MOR_GPU_MemoryT::host_memory_usage() const {
|
||||
return this->host_memory_usage_atomic()+sizeof(MOR_GPU_Memory<numtyp,acctyp>);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Calculate energies, forces, and torques
|
||||
// ---------------------------------------------------------------------------
|
||||
template <class numtyp, class acctyp>
|
||||
void MOR_GPU_MemoryT::loop(const bool _eflag, const bool _vflag) {
|
||||
// Compute the block size and grid size to keep all cores busy
|
||||
const int BX=this->block_size();
|
||||
int eflag, vflag;
|
||||
if (_eflag)
|
||||
eflag=1;
|
||||
else
|
||||
eflag=0;
|
||||
|
||||
if (_vflag)
|
||||
vflag=1;
|
||||
else
|
||||
vflag=0;
|
||||
|
||||
int GX=static_cast<int>(ceil(static_cast<double>(this->ans->inum())/BX));
|
||||
|
||||
int ainum=this->ans->inum();
|
||||
int anall=this->atom->nall();
|
||||
int nbor_pitch=this->nbor->nbor_pitch();
|
||||
this->time_pair.start();
|
||||
if (shared_types) {
|
||||
this->k_pair_fast.set_size(GX,BX);
|
||||
this->k_pair_fast.run(&this->atom->dev_x.begin(), &mor1.begin(),
|
||||
&mor2.begin(), &sp_lj.begin(),
|
||||
&this->nbor->dev_nbor.begin(),
|
||||
&this->ans->dev_ans.begin(),
|
||||
&this->ans->dev_engv.begin(), &eflag, &vflag,
|
||||
&ainum, &anall, &nbor_pitch);
|
||||
} else {
|
||||
this->k_pair.set_size(GX,BX);
|
||||
this->k_pair.run(&this->atom->dev_x.begin(), &mor1.begin(), &mor2.begin(),
|
||||
&_types, &sp_lj.begin(), &this->nbor->dev_nbor.begin(),
|
||||
&this->ans->dev_ans.begin(),
|
||||
&this->ans->dev_engv.begin(), &eflag, &vflag, &ainum,
|
||||
&anall, &nbor_pitch);
|
||||
}
|
||||
this->time_pair.stop();
|
||||
}
|
||||
|
||||
template class MOR_GPU_Memory<PRECISION,ACC_PRECISION>;
|
||||
|
||||
71
lib/gpu/morse_gpu_memory.h
Normal file
71
lib/gpu/morse_gpu_memory.h
Normal file
@ -0,0 +1,71 @@
|
||||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
Contributing authors: Mike Brown (ORNL), brownw@ornl.gov
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef MOR_GPU_MEMORY_H
|
||||
#define MOR_GPU_MEMORY_H
|
||||
|
||||
#include "atomic_gpu_memory.h"
|
||||
|
||||
template <class numtyp, class acctyp>
|
||||
class MOR_GPU_Memory : public AtomicGPUMemory<numtyp, acctyp> {
|
||||
public:
|
||||
MOR_GPU_Memory();
|
||||
~MOR_GPU_Memory();
|
||||
|
||||
/// Clear any previous data and set up for a new LAMMPS run
|
||||
/** \param max_nbors initial number of rows in the neighbor matrix
|
||||
* \param cell_size cutoff + skin
|
||||
* \param gpu_split fraction of particles handled by device **/
|
||||
bool init(const int ntypes, double **host_cutsq,
|
||||
double **host_morse1, double **host_r0, double **host_alpha,
|
||||
double **host_d0, double **host_offset, double *host_special_lj,
|
||||
const int nlocal, const int nall, const int max_nbors,
|
||||
const int maxspecial, const double cell_size,
|
||||
const double gpu_split, FILE *screen);
|
||||
|
||||
/// Clear all host and device data
|
||||
/** \note This is called at the beginning of the init() routine **/
|
||||
void clear();
|
||||
|
||||
/// Returns memory usage on device per atom
|
||||
int bytes_per_atom(const int max_nbors) const;
|
||||
|
||||
/// Total host memory used by library for pair style
|
||||
double host_memory_usage() const;
|
||||
|
||||
// --------------------------- TYPE DATA --------------------------
|
||||
|
||||
/// mor1.x = cutsq, mor1.y = morse1, mor1.z = r0, mor1.w = alpha
|
||||
UCL_D_Vec<numtyp4> mor1;
|
||||
/// mor2.x = d0, mor2.y = offset
|
||||
UCL_D_Vec<numtyp2> mor2;
|
||||
/// Special LJ values
|
||||
UCL_D_Vec<numtyp> sp_lj;
|
||||
|
||||
/// If atom type constants fit in shared memory, use fast kernels
|
||||
bool shared_types;
|
||||
|
||||
/// Number of atom types
|
||||
int _types;
|
||||
|
||||
private:
|
||||
bool _allocated;
|
||||
void loop(const bool _eflag, const bool _vflag);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@ -49,11 +49,13 @@ if (test $1 = 1) then
|
||||
fi
|
||||
|
||||
cp pair_lj_cut_gpu.cpp ..
|
||||
cp pair_morse_gpu.cpp ..
|
||||
cp pair_lj96_cut_gpu.cpp ..
|
||||
cp pair_lj_expand_gpu.cpp ..
|
||||
cp pair_lj_cut_coul_cut_gpu.cpp ..
|
||||
cp pair_lj_cut_tgpu.cpp ..
|
||||
cp pair_lj_cut_gpu.h ..
|
||||
cp pair_morse_gpu.h ..
|
||||
cp pair_lj96_cut_gpu.h ..
|
||||
cp pair_lj_expand_gpu.h ..
|
||||
cp pair_lj_cut_coul_cut_gpu.h ..
|
||||
@ -75,6 +77,7 @@ elif (test $1 = 0) then
|
||||
rm ../pppm_gpu.cpp
|
||||
rm ../pair_gayberne_gpu.cpp
|
||||
rm ../pair_lj_cut_gpu.cpp
|
||||
rm ../pair_morse_gpu.cpp
|
||||
rm ../pair_lj96_cut_gpu.cpp
|
||||
rm ../pair_lj_expand_gpu.cpp
|
||||
rm ../pair_lj_cut_coul_cut_gpu.cpp
|
||||
@ -91,6 +94,7 @@ elif (test $1 = 0) then
|
||||
rm ../pppm_gpu.h
|
||||
rm ../pair_gayberne_gpu.h
|
||||
rm ../pair_lj_cut_gpu.h
|
||||
rm ../pair_morse_gpu.h
|
||||
rm ../pair_lj96_cut_gpu.h
|
||||
rm ../pair_lj_expand_gpu.h
|
||||
rm ../pair_lj_cut_coul_cut_gpu.h
|
||||
|
||||
311
src/GPU/pair_morse_gpu.cpp
Normal file
311
src/GPU/pair_morse_gpu.cpp
Normal file
@ -0,0 +1,311 @@
|
||||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
Contributing author: Mike Brown (SNL)
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#include "math.h"
|
||||
#include "stdio.h"
|
||||
#include "stdlib.h"
|
||||
#include "pair_morse_gpu.h"
|
||||
#include "lmptype.h"
|
||||
#include "atom.h"
|
||||
#include "atom_vec.h"
|
||||
#include "comm.h"
|
||||
#include "force.h"
|
||||
#include "neighbor.h"
|
||||
#include "neigh_list.h"
|
||||
#include "integrate.h"
|
||||
#include "memory.h"
|
||||
#include "error.h"
|
||||
#include "neigh_request.h"
|
||||
#include "universe.h"
|
||||
#include "update.h"
|
||||
#include "domain.h"
|
||||
#include "string.h"
|
||||
|
||||
#define MIN(a,b) ((a) < (b) ? (a) : (b))
|
||||
#define MAX(a,b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
// External functions from cuda library for atom decomposition
|
||||
|
||||
bool mor_gpu_init(const int ntypes, double **cutsq, double **host_morse1,
|
||||
double **host_r0, double **host_alpha, double **host_d0,
|
||||
double **offset, double *special_lj, const int nlocal,
|
||||
const int nall, const int max_nbors, const int maxspecial,
|
||||
const double cell_size, int &gpu_mode, FILE *screen);
|
||||
void mor_gpu_clear();
|
||||
int * mor_gpu_compute_n(const int ago, const int inum,
|
||||
const int nall, double **host_x, int *host_type,
|
||||
double *boxlo, double *boxhi, int *tag, int **nspecial,
|
||||
int **special, const bool eflag, const bool vflag,
|
||||
const bool eatom, const bool vatom, int &host_start,
|
||||
const double cpu_time, bool &success);
|
||||
void mor_gpu_compute(const int ago, const int inum, const int nall,
|
||||
double **host_x, int *host_type, int *ilist, int *numj,
|
||||
int **firstneigh, const bool eflag, const bool vflag,
|
||||
const bool eatom, const bool vatom, int &host_start,
|
||||
const double cpu_time, bool &success);
|
||||
double mor_gpu_bytes();
|
||||
|
||||
using namespace LAMMPS_NS;
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
PairMorseGPU::PairMorseGPU(LAMMPS *lmp) : PairMorse(lmp), gpu_mode(GPU_PAIR)
|
||||
{
|
||||
cpu_time = 0.0;
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
free all arrays
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
PairMorseGPU::~PairMorseGPU()
|
||||
{
|
||||
mor_gpu_clear();
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void PairMorseGPU::compute(int eflag, int vflag)
|
||||
{
|
||||
if (eflag || vflag) ev_setup(eflag,vflag);
|
||||
else evflag = vflag_fdotr = 0;
|
||||
|
||||
int nall = atom->nlocal + atom->nghost;
|
||||
int inum, host_start;
|
||||
|
||||
bool success = true;
|
||||
|
||||
if (gpu_mode == GPU_NEIGH) {
|
||||
inum = atom->nlocal;
|
||||
gpulist = mor_gpu_compute_n(neighbor->ago, inum, nall,
|
||||
atom->x, atom->type, domain->sublo,
|
||||
domain->subhi, atom->tag, atom->nspecial,
|
||||
atom->special, eflag, vflag, eflag_atom,
|
||||
vflag_atom, host_start, cpu_time, success);
|
||||
} else {
|
||||
inum = list->inum;
|
||||
mor_gpu_compute(neighbor->ago, inum, nall, atom->x, atom->type,
|
||||
list->ilist, list->numneigh, list->firstneigh, eflag,
|
||||
vflag, eflag_atom, vflag_atom, host_start, cpu_time,
|
||||
success);
|
||||
}
|
||||
if (!success)
|
||||
error->one("Out of memory on GPGPU");
|
||||
|
||||
if (host_start<inum) {
|
||||
cpu_time = MPI_Wtime();
|
||||
if (gpu_mode == GPU_NEIGH)
|
||||
cpu_compute(gpulist, host_start, eflag, vflag);
|
||||
else
|
||||
cpu_compute(host_start, eflag, vflag);
|
||||
cpu_time = MPI_Wtime() - cpu_time;
|
||||
}
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
init specific to this pair style
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void PairMorseGPU::init_style()
|
||||
{
|
||||
if (force->newton_pair)
|
||||
error->all("Cannot use newton pair with GPU Morse pair style");
|
||||
|
||||
// Repeat cutsq calculation because done after call to init_style
|
||||
double maxcut = -1.0;
|
||||
double cut;
|
||||
for (int i = 1; i <= atom->ntypes; i++) {
|
||||
for (int j = i; j <= atom->ntypes; j++) {
|
||||
if (setflag[i][j] != 0 || (setflag[i][i] != 0 && setflag[j][j] != 0)) {
|
||||
cut = init_one(i,j);
|
||||
cut *= cut;
|
||||
if (cut > maxcut)
|
||||
maxcut = cut;
|
||||
cutsq[i][j] = cutsq[j][i] = cut;
|
||||
} else
|
||||
cutsq[i][j] = cutsq[j][i] = 0.0;
|
||||
}
|
||||
}
|
||||
double cell_size = sqrt(maxcut) + neighbor->skin;
|
||||
|
||||
int maxspecial=0;
|
||||
if (atom->molecular)
|
||||
maxspecial=atom->maxspecial;
|
||||
bool init_ok = mor_gpu_init(atom->ntypes+1, cutsq, morse1, r0, alpha, d0,
|
||||
offset, force->special_lj, atom->nlocal,
|
||||
atom->nlocal+atom->nghost, 300, maxspecial,
|
||||
cell_size, gpu_mode, screen);
|
||||
if (!init_ok)
|
||||
error->one("Insufficient memory on accelerator (or no fix gpu).\n");
|
||||
|
||||
if (gpu_mode != GPU_NEIGH) {
|
||||
int irequest = neighbor->request(this);
|
||||
neighbor->requests[irequest]->half = 0;
|
||||
neighbor->requests[irequest]->full = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
double PairMorseGPU::memory_usage()
|
||||
{
|
||||
double bytes = Pair::memory_usage();
|
||||
return bytes + mor_gpu_bytes();
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void PairMorseGPU::cpu_compute(int start, int eflag, int vflag) {
|
||||
int i,j,ii,jj,inum,jnum,itype,jtype;
|
||||
double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,fpair;
|
||||
double rsq,r,dr,dexp,factor_lj;
|
||||
int *ilist,*jlist,*numneigh,**firstneigh;
|
||||
|
||||
double **x = atom->x;
|
||||
double **f = atom->f;
|
||||
int *type = atom->type;
|
||||
int nlocal = atom->nlocal;
|
||||
int nall = nlocal + atom->nghost;
|
||||
double *special_lj = force->special_lj;
|
||||
|
||||
inum = list->inum;
|
||||
ilist = list->ilist;
|
||||
numneigh = list->numneigh;
|
||||
firstneigh = list->firstneigh;
|
||||
|
||||
// loop over neighbors of my atoms
|
||||
|
||||
for (ii = start; ii < inum; ii++) {
|
||||
i = ilist[ii];
|
||||
xtmp = x[i][0];
|
||||
ytmp = x[i][1];
|
||||
ztmp = x[i][2];
|
||||
itype = type[i];
|
||||
jlist = firstneigh[i];
|
||||
jnum = numneigh[i];
|
||||
|
||||
for (jj = 0; jj < jnum; jj++) {
|
||||
j = jlist[jj];
|
||||
|
||||
if (j < nall) factor_lj = 1.0;
|
||||
else {
|
||||
factor_lj = special_lj[j/nall];
|
||||
j %= nall;
|
||||
}
|
||||
|
||||
delx = xtmp - x[j][0];
|
||||
dely = ytmp - x[j][1];
|
||||
delz = ztmp - x[j][2];
|
||||
rsq = delx*delx + dely*dely + delz*delz;
|
||||
jtype = type[j];
|
||||
|
||||
if (rsq < cutsq[itype][jtype]) {
|
||||
r = sqrt(rsq);
|
||||
dr = r - r0[itype][jtype];
|
||||
dexp = exp(-alpha[itype][jtype] * dr);
|
||||
fpair = factor_lj * morse1[itype][jtype] * (dexp*dexp - dexp) / r;
|
||||
|
||||
f[i][0] += delx*fpair;
|
||||
f[i][1] += dely*fpair;
|
||||
f[i][2] += delz*fpair;
|
||||
|
||||
if (eflag) {
|
||||
evdwl = d0[itype][jtype] * (dexp*dexp - 2.0*dexp) -
|
||||
offset[itype][jtype];
|
||||
evdwl *= factor_lj;
|
||||
}
|
||||
|
||||
if (evflag) ev_tally_full(i,evdwl,0.0,fpair,delx,dely,delz);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void PairMorseGPU::cpu_compute(int *nbors, int start, int eflag, int vflag) {
|
||||
int i,j,itype,jtype;
|
||||
int nlocal = atom->nlocal;
|
||||
int nall = nlocal + atom->nghost;
|
||||
int stride = nlocal-start;
|
||||
double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,fpair;
|
||||
double rsq,r,dr,dexp,factor_lj;
|
||||
int *ilist,*jlist,*numneigh,**firstneigh;
|
||||
|
||||
double **x = atom->x;
|
||||
double **f = atom->f;
|
||||
int *type = atom->type;
|
||||
double *special_lj = force->special_lj;
|
||||
|
||||
// loop over neighbors of my atoms
|
||||
|
||||
for (i = start; i < nlocal; i++) {
|
||||
xtmp = x[i][0];
|
||||
ytmp = x[i][1];
|
||||
ztmp = x[i][2];
|
||||
itype = type[i];
|
||||
int *nbor = nbors + i - start;
|
||||
int jnum = *nbor;
|
||||
nbor += stride;
|
||||
int *nbor_end = nbor + stride * jnum;
|
||||
|
||||
for (; nbor<nbor_end; nbor+=stride) {
|
||||
j = *nbor;
|
||||
|
||||
if (j < nall) factor_lj = 1.0;
|
||||
else {
|
||||
factor_lj = special_lj[j/nall];
|
||||
j %= nall;
|
||||
}
|
||||
|
||||
delx = xtmp - x[j][0];
|
||||
dely = ytmp - x[j][1];
|
||||
delz = ztmp - x[j][2];
|
||||
rsq = delx*delx + dely*dely + delz*delz;
|
||||
jtype = type[j];
|
||||
|
||||
if (rsq < cutsq[itype][jtype]) {
|
||||
r = sqrt(rsq);
|
||||
dr = r - r0[itype][jtype];
|
||||
dexp = exp(-alpha[itype][jtype] * dr);
|
||||
fpair = factor_lj * morse1[itype][jtype] * (dexp*dexp - dexp) / r;
|
||||
|
||||
f[i][0] += delx*fpair;
|
||||
f[i][1] += dely*fpair;
|
||||
f[i][2] += delz*fpair;
|
||||
|
||||
if (eflag) {
|
||||
evdwl = d0[itype][jtype] * (dexp*dexp - 2.0*dexp) -
|
||||
offset[itype][jtype];
|
||||
evdwl *= factor_lj;
|
||||
}
|
||||
|
||||
if (j<start) {
|
||||
if (evflag) ev_tally_full(i,evdwl,0.0,fpair,delx,dely,delz);
|
||||
} else {
|
||||
if (j<nlocal) {
|
||||
f[j][0] -= delx*fpair;
|
||||
f[j][1] -= dely*fpair;
|
||||
f[j][2] -= delz*fpair;
|
||||
}
|
||||
if (evflag) ev_tally(i,j,nlocal,0,
|
||||
evdwl,0.0,fpair,delx,dely,delz);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
48
src/GPU/pair_morse_gpu.h
Normal file
48
src/GPU/pair_morse_gpu.h
Normal file
@ -0,0 +1,48 @@
|
||||
/* ----------------------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
http://lammps.sandia.gov, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifdef PAIR_CLASS
|
||||
|
||||
PairStyle(morse/gpu,PairMorseGPU)
|
||||
|
||||
#else
|
||||
|
||||
#ifndef LMP_PAIR_MORSE_GPU_H
|
||||
#define LMP_PAIR_MORSE_GPU_H
|
||||
|
||||
#include "pair_morse.h"
|
||||
|
||||
namespace LAMMPS_NS {
|
||||
|
||||
class PairMorseGPU : public PairMorse {
|
||||
public:
|
||||
PairMorseGPU(LAMMPS *lmp);
|
||||
~PairMorseGPU();
|
||||
void cpu_compute(int, int, int);
|
||||
void cpu_compute(int *, int, int, int);
|
||||
void compute(int, int);
|
||||
void init_style();
|
||||
double memory_usage();
|
||||
|
||||
enum { GPU_PAIR, GPU_NEIGH };
|
||||
|
||||
private:
|
||||
int gpu_mode;
|
||||
double cpu_time;
|
||||
int *gpulist;
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user