Files
lammps/lib/gpu/cmm_cut_gpu.cpp
W. Michael Brown 71ff7b8096 Updating all code to use full neighbor lists for all host forces
computations. Changed the load balancing scheme. Added timing estimates
for GPU and driver overhead, cpu idle time.
2011-03-28 13:18:39 -04:00

126 lines
4.6 KiB
C++

/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing authors: Mike Brown (ORNL), brownw@ornl.gov
------------------------------------------------------------------------- */
#include <iostream>
#include <cassert>
#include <math.h>
#include "cmm_cut_gpu_memory.h"
using namespace std;
static CMM_GPU_Memory<PRECISION,ACC_PRECISION> CMMMF;
// ---------------------------------------------------------------------------
// Allocate memory on host and device and copy constants to device
// ---------------------------------------------------------------------------
bool cmm_gpu_init(const int ntypes, double **cutsq, int **cg_types,
double **host_lj1, double **host_lj2, double **host_lj3,
double **host_lj4, double **offset, double *special_lj,
const int inum, const int nall, const int max_nbors,
const int maxspecial, const double cell_size, int &gpu_mode,
FILE *screen) {
CMMMF.clear();
gpu_mode=CMMMF.device->gpu_mode();
double gpu_split=CMMMF.device->particle_split();
int first_gpu=CMMMF.device->first_device();
int last_gpu=CMMMF.device->last_device();
int world_me=CMMMF.device->world_me();
int gpu_rank=CMMMF.device->gpu_rank();
int procs_per_gpu=CMMMF.device->procs_per_gpu();
CMMMF.device->init_message(screen,"cg/cmm",first_gpu,last_gpu);
bool message=false;
if (CMMMF.device->replica_me()==0 && screen)
message=true;
if (message) {
fprintf(screen,"Initializing GPU and compiling on process 0...");
fflush(screen);
}
if (world_me==0) {
bool init_ok=CMMMF.init(ntypes,cutsq,cg_types,host_lj1,host_lj2,host_lj3,
host_lj4, offset, special_lj, inum, nall, 300,
maxspecial, cell_size, gpu_split, screen);
if (!init_ok)
return false;
}
CMMMF.device->world_barrier();
if (message)
fprintf(screen,"Done.\n");
for (int i=0; i<procs_per_gpu; i++) {
if (message) {
if (last_gpu-first_gpu==0)
fprintf(screen,"Initializing GPU %d on core %d...",first_gpu,i);
else
fprintf(screen,"Initializing GPUs %d-%d on core %d...",first_gpu,
last_gpu,i);
fflush(screen);
}
if (gpu_rank==i && world_me!=0) {
bool init_ok=CMMMF.init(ntypes,cutsq,cg_types,host_lj1,host_lj2,host_lj3,
host_lj4, offset, special_lj, inum, nall, 300,
maxspecial, cell_size, gpu_split,
screen);
if (!init_ok)
return false;
}
CMMMF.device->gpu_barrier();
if (message)
fprintf(screen,"Done.\n");
}
if (message)
fprintf(screen,"\n");
CMMMF.estimate_gpu_overhead();
return true;
}
void cmm_gpu_clear() {
CMMMF.clear();
}
int** cmm_gpu_compute_n(const int ago, const int inum_full,
const int nall, double **host_x, int *host_type,
double *boxlo, double *boxhi, int *tag, int **nspecial,
int **special, const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
int **ilist, int **jnum, const double cpu_time,
bool &success) {
return CMMMF.compute(ago, inum_full, nall, host_x, host_type, boxlo,
boxhi, tag, nspecial, special, eflag, vflag, eatom,
vatom, host_start, ilist, jnum, cpu_time, success);
}
void cmm_gpu_compute(const int ago, const int inum_full, const int nall,
double **host_x, int *host_type, int *ilist, int *numj,
int **firstneigh, const bool eflag, const bool vflag,
const bool eatom, const bool vatom, int &host_start,
const double cpu_time, bool &success) {
CMMMF.compute(ago,inum_full,nall,host_x,host_type,ilist,numj,
firstneigh,eflag,vflag,eatom,vatom,host_start,cpu_time,success);
}
double cmm_gpu_bytes() {
return CMMMF.host_memory_usage();
}