grid class name changes
This commit is contained in:
@ -227,7 +227,7 @@ void FixTTM::init()
|
||||
|
||||
// to allow this, would have to reset grid bounds dynamically
|
||||
// for RCB balancing would have to reassign grid pts to procs
|
||||
// and create a new GridComm, and pass old GC data to new GC
|
||||
// and create a new Grid3d, and pass old GC data to new GC
|
||||
|
||||
if (domain->box_change)
|
||||
error->all(FLERR,"Cannot use fix ttm with changing box shape, size, or sub-domains");
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include "comm.h"
|
||||
#include "domain.h"
|
||||
#include "error.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "memory.h"
|
||||
#include "neighbor.h"
|
||||
#include "random_mars.h"
|
||||
@ -86,7 +86,7 @@ void FixTTMGrid::post_constructor()
|
||||
|
||||
if (infile) {
|
||||
read_electron_temperatures(infile);
|
||||
gc->forward_comm(GridComm::FIX,this,1,sizeof(double),0,gc_buf1,gc_buf2,MPI_DOUBLE);
|
||||
gc->forward_comm(Grid3d::FIX,this,1,sizeof(double),0,gc_buf1,gc_buf2,MPI_DOUBLE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ void FixTTMGrid::end_of_step()
|
||||
flangevin[i][2]*v[i][2]);
|
||||
}
|
||||
|
||||
gc->reverse_comm(GridComm::FIX,this,1,sizeof(double),0,
|
||||
gc->reverse_comm(Grid3d::FIX,this,1,sizeof(double),0,
|
||||
gc_buf1,gc_buf2,MPI_DOUBLE);
|
||||
|
||||
// clang-format off
|
||||
@ -246,7 +246,7 @@ void FixTTMGrid::end_of_step()
|
||||
|
||||
// communicate new T_electron values to ghost grid points
|
||||
|
||||
gc->forward_comm(GridComm::FIX,this,1,sizeof(double),0,gc_buf1,gc_buf2,MPI_DOUBLE);
|
||||
gc->forward_comm(Grid3d::FIX,this,1,sizeof(double),0,gc_buf1,gc_buf2,MPI_DOUBLE);
|
||||
}
|
||||
|
||||
// clang-format on
|
||||
@ -363,7 +363,7 @@ void FixTTMGrid::write_electron_temperatures(const std::string &filename)
|
||||
style);
|
||||
}
|
||||
|
||||
gc->gather(GridComm::FIX, this, 1, sizeof(double), 1, nullptr, MPI_DOUBLE);
|
||||
gc->gather(Grid3d::FIX, this, 1, sizeof(double), 1, nullptr, MPI_DOUBLE);
|
||||
|
||||
if (comm->me == 0) fclose(FPout);
|
||||
}
|
||||
@ -468,8 +468,8 @@ void FixTTMGrid::allocate_grid()
|
||||
totalmine = (bigint) (nxhi_in - nxlo_in + 1) * (nyhi_in - nylo_in + 1) * (nzhi_in - nzlo_in + 1);
|
||||
ngridmine = totalmine;
|
||||
|
||||
gc = new GridComm(lmp, world, nxgrid, nygrid, nzgrid, nxlo_in, nxhi_in, nylo_in, nyhi_in, nzlo_in,
|
||||
nzhi_in, nxlo_out, nxhi_out, nylo_out, nyhi_out, nzlo_out, nzhi_out);
|
||||
gc = new Grid3d(lmp, world, nxgrid, nygrid, nzgrid, nxlo_in, nxhi_in, nylo_in, nyhi_in, nzlo_in,
|
||||
nzhi_in, nxlo_out, nxhi_out, nylo_out, nyhi_out, nzlo_out, nzhi_out);
|
||||
|
||||
gc->setup(ngc_buf1, ngc_buf2);
|
||||
|
||||
@ -517,7 +517,7 @@ void FixTTMGrid::write_restart(FILE *fp)
|
||||
|
||||
// gather rest of rlist on proc 0 as global grid values
|
||||
|
||||
gc->gather(GridComm::FIX, this, 1, sizeof(double), 0, &rlist[4], MPI_DOUBLE);
|
||||
gc->gather(Grid3d::FIX, this, 1, sizeof(double), 0, &rlist[4], MPI_DOUBLE);
|
||||
|
||||
if (comm->me == 0) {
|
||||
int size = rsize * sizeof(double);
|
||||
@ -568,7 +568,7 @@ void FixTTMGrid::restart(char *buf)
|
||||
|
||||
// communicate new T_electron values to ghost grid points
|
||||
|
||||
gc->forward_comm(GridComm::FIX, this, 1, sizeof(double), 0, gc_buf1, gc_buf2, MPI_DOUBLE);
|
||||
gc->forward_comm(Grid3d::FIX, this, 1, sizeof(double), 0, gc_buf1, gc_buf2, MPI_DOUBLE);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
|
||||
@ -55,7 +55,7 @@ class FixTTMGrid : public FixTTM {
|
||||
double skin_original;
|
||||
FILE *FPout;
|
||||
|
||||
class GridComm *gc;
|
||||
class Grid3d *gc;
|
||||
int ngc_buf1, ngc_buf2;
|
||||
double *gc_buf1, *gc_buf2;
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include "domain.h"
|
||||
#include "error.h"
|
||||
#include "force.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "math_const.h"
|
||||
#include "memory.h"
|
||||
#include "neighbor.h"
|
||||
@ -444,7 +444,7 @@ void MSM::compute(int eflag, int vflag)
|
||||
// to fully sum contribution in their 3d grid
|
||||
|
||||
current_level = 0;
|
||||
gcall->reverse_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
gcall->reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
REVERSE_RHO,gcall_buf1,gcall_buf2,MPI_DOUBLE);
|
||||
|
||||
// forward communicate charge density values to fill ghost grid points
|
||||
@ -453,7 +453,7 @@ void MSM::compute(int eflag, int vflag)
|
||||
for (int n=0; n<=levels-2; n++) {
|
||||
if (!active_flag[n]) continue;
|
||||
current_level = n;
|
||||
gc[n]->forward_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
gc[n]->forward_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
FORWARD_RHO,gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
|
||||
direct(n);
|
||||
restriction(n);
|
||||
@ -466,15 +466,15 @@ void MSM::compute(int eflag, int vflag)
|
||||
if (domain->nonperiodic) {
|
||||
current_level = levels-1;
|
||||
gc[levels-1]->
|
||||
forward_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
forward_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
FORWARD_RHO,gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
|
||||
direct_top(levels-1);
|
||||
gc[levels-1]->
|
||||
reverse_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
REVERSE_AD,gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
|
||||
if (vflag_atom)
|
||||
gc[levels-1]->
|
||||
reverse_comm(GridComm::KSPACE,this,6,sizeof(double),
|
||||
reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),
|
||||
REVERSE_AD_PERATOM,gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
|
||||
|
||||
} else {
|
||||
@ -485,7 +485,7 @@ void MSM::compute(int eflag, int vflag)
|
||||
current_level = levels-1;
|
||||
if (vflag_atom)
|
||||
gc[levels-1]->
|
||||
reverse_comm(GridComm::KSPACE,this,6,sizeof(double),
|
||||
reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),
|
||||
REVERSE_AD_PERATOM,gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
|
||||
}
|
||||
}
|
||||
@ -498,13 +498,13 @@ void MSM::compute(int eflag, int vflag)
|
||||
prolongation(n);
|
||||
|
||||
current_level = n;
|
||||
gc[n]->reverse_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
gc[n]->reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
REVERSE_AD,gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
|
||||
|
||||
// extra per-atom virial communication
|
||||
|
||||
if (vflag_atom)
|
||||
gc[n]->reverse_comm(GridComm::KSPACE,this,6,sizeof(double),
|
||||
gc[n]->reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),
|
||||
REVERSE_AD_PERATOM,gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
|
||||
}
|
||||
|
||||
@ -512,13 +512,13 @@ void MSM::compute(int eflag, int vflag)
|
||||
// to fill ghost cells surrounding their 3d bricks
|
||||
|
||||
current_level = 0;
|
||||
gcall->forward_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
gcall->forward_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
FORWARD_AD,gcall_buf1,gcall_buf2,MPI_DOUBLE);
|
||||
|
||||
// extra per-atom energy/virial communication
|
||||
|
||||
if (vflag_atom)
|
||||
gcall->forward_comm(GridComm::KSPACE,this,6,sizeof(double),
|
||||
gcall->forward_comm(Grid3d::KSPACE,this,6,sizeof(double),
|
||||
FORWARD_AD_PERATOM,gcall_buf1,gcall_buf2,MPI_DOUBLE);
|
||||
|
||||
// calculate the force on my particles (interpolation)
|
||||
@ -595,7 +595,7 @@ void MSM::allocate()
|
||||
|
||||
// commgrid using all processors for finest grid level
|
||||
|
||||
gcall = new GridComm(lmp,world,1,nx_msm[0],ny_msm[0],nz_msm[0],
|
||||
gcall = new Grid3d(lmp,world,1,nx_msm[0],ny_msm[0],nz_msm[0],
|
||||
nxlo_in[0],nxhi_in[0],nylo_in[0],
|
||||
nyhi_in[0],nzlo_in[0],nzhi_in[0],
|
||||
nxlo_out_all,nxhi_out_all,nylo_out_all,
|
||||
@ -627,7 +627,7 @@ void MSM::allocate()
|
||||
delete gc[n];
|
||||
int **procneigh = procneigh_levels[n];
|
||||
|
||||
gc[n] = new GridComm(lmp,world_levels[n],2,nx_msm[n],ny_msm[n],nz_msm[n],
|
||||
gc[n] = new Grid3d(lmp,world_levels[n],2,nx_msm[n],ny_msm[n],nz_msm[n],
|
||||
nxlo_in[n],nxhi_in[n],nylo_in[n],nyhi_in[n],
|
||||
nzlo_in[n],nzhi_in[n],
|
||||
nxlo_out[n],nxhi_out[n],nylo_out[n],nyhi_out[n],
|
||||
@ -743,7 +743,7 @@ void MSM::allocate_levels()
|
||||
{
|
||||
ngrid = new int[levels];
|
||||
|
||||
gc = new GridComm*[levels];
|
||||
gc = new Grid3d*[levels];
|
||||
gc_buf1 = new double*[levels];
|
||||
gc_buf2 = new double*[levels];
|
||||
ngc_buf1 = new int[levels];
|
||||
@ -3394,7 +3394,7 @@ double MSM::memory_usage()
|
||||
|
||||
// NOTE: Stan, fill in other memory allocations here
|
||||
|
||||
// all GridComm bufs
|
||||
// all Grid3d bufs
|
||||
|
||||
bytes += (double)(ngcall_buf1 + ngcall_buf2) * npergrid * sizeof(double);
|
||||
|
||||
|
||||
@ -81,8 +81,8 @@ class MSM : public KSpace {
|
||||
int myloc[3]; // which proc I am in each dim
|
||||
int ***procneigh_levels; // my 6 neighboring procs, 0/1 = left/right
|
||||
|
||||
class GridComm *gcall; // GridComm class for finest level grid
|
||||
class GridComm **gc; // GridComm classes for each hierarchical level
|
||||
class Grid3d *gcall; // GridComm class for finest level grid
|
||||
class Grid3d **gc; // GridComm classes for each hierarchical level
|
||||
|
||||
double *gcall_buf1, *gcall_buf2;
|
||||
double **gc_buf1, **gc_buf2;
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
#include "msm_cg.h"
|
||||
|
||||
#include "atom.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "domain.h"
|
||||
#include "error.h"
|
||||
#include "force.h"
|
||||
@ -160,7 +160,7 @@ void MSMCG::compute(int eflag, int vflag)
|
||||
// to fully sum contribution in their 3d grid
|
||||
|
||||
current_level = 0;
|
||||
gcall->reverse_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
gcall->reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
REVERSE_RHO,gcall_buf1,gcall_buf2,MPI_DOUBLE);
|
||||
|
||||
// forward communicate charge density values to fill ghost grid points
|
||||
@ -169,7 +169,7 @@ void MSMCG::compute(int eflag, int vflag)
|
||||
for (n=0; n<=levels-2; n++) {
|
||||
if (!active_flag[n]) continue;
|
||||
current_level = n;
|
||||
gc[n]->forward_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
gc[n]->forward_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
FORWARD_RHO,gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
|
||||
direct(n);
|
||||
restriction(n);
|
||||
@ -182,15 +182,15 @@ void MSMCG::compute(int eflag, int vflag)
|
||||
if (domain->nonperiodic) {
|
||||
current_level = levels-1;
|
||||
gc[levels-1]->
|
||||
forward_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
forward_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
FORWARD_RHO,gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
|
||||
direct_top(levels-1);
|
||||
gc[levels-1]->
|
||||
reverse_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
REVERSE_AD,gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
|
||||
if (vflag_atom)
|
||||
gc[levels-1]->
|
||||
reverse_comm(GridComm::KSPACE,this,6,sizeof(double),
|
||||
reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),
|
||||
REVERSE_AD_PERATOM,gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
|
||||
|
||||
} else {
|
||||
@ -201,7 +201,7 @@ void MSMCG::compute(int eflag, int vflag)
|
||||
current_level = levels-1;
|
||||
if (vflag_atom)
|
||||
gc[levels-1]->
|
||||
reverse_comm(GridComm::KSPACE,this,6,sizeof(double),
|
||||
reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),
|
||||
REVERSE_AD_PERATOM,gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
|
||||
}
|
||||
}
|
||||
@ -214,13 +214,13 @@ void MSMCG::compute(int eflag, int vflag)
|
||||
prolongation(n);
|
||||
|
||||
current_level = n;
|
||||
gc[n]->reverse_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
gc[n]->reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
REVERSE_AD,gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
|
||||
|
||||
// extra per-atom virial communication
|
||||
|
||||
if (vflag_atom)
|
||||
gc[n]->reverse_comm(GridComm::KSPACE,this,6,sizeof(double),
|
||||
gc[n]->reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),
|
||||
REVERSE_AD_PERATOM,gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
|
||||
}
|
||||
|
||||
@ -228,13 +228,13 @@ void MSMCG::compute(int eflag, int vflag)
|
||||
// to fill ghost cells surrounding their 3d bricks
|
||||
|
||||
current_level = 0;
|
||||
gcall->forward_comm(GridComm::KSPACE,this,1,sizeof(double),
|
||||
gcall->forward_comm(Grid3d::KSPACE,this,1,sizeof(double),
|
||||
FORWARD_AD,gcall_buf1,gcall_buf2,MPI_DOUBLE);
|
||||
|
||||
// extra per-atom energy/virial communication
|
||||
|
||||
if (vflag_atom)
|
||||
gcall->forward_comm(GridComm::KSPACE,this,6,sizeof(double),
|
||||
gcall->forward_comm(Grid3d::KSPACE,this,6,sizeof(double),
|
||||
FORWARD_AD_PERATOM,gcall_buf1,gcall_buf2,MPI_DOUBLE);
|
||||
|
||||
// calculate the force on my particles (interpolation)
|
||||
|
||||
@ -29,7 +29,7 @@
|
||||
#include "error.h"
|
||||
#include "fft3d_wrap.h"
|
||||
#include "force.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "math_const.h"
|
||||
#include "math_special.h"
|
||||
#include "memory.h"
|
||||
@ -291,7 +291,7 @@ void PPPM::init()
|
||||
// or overlap is allowed, then done
|
||||
// else reduce order and try again
|
||||
|
||||
GridComm *gctmp = nullptr;
|
||||
Grid3d *gctmp = nullptr;
|
||||
int iteration = 0;
|
||||
|
||||
while (order >= minorder) {
|
||||
@ -304,7 +304,7 @@ void PPPM::init()
|
||||
set_grid_local();
|
||||
if (overlap_allowed) break;
|
||||
|
||||
gctmp = new GridComm(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
gctmp = new Grid3d(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
|
||||
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);
|
||||
|
||||
@ -634,7 +634,7 @@ void PPPM::compute(int eflag, int vflag)
|
||||
// to fully sum contribution in their 3d bricks
|
||||
// remap from 3d decomposition to FFT decomposition
|
||||
|
||||
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
brick2fft();
|
||||
|
||||
@ -649,20 +649,20 @@ void PPPM::compute(int eflag, int vflag)
|
||||
// to fill ghost cells surrounding their 3d bricks
|
||||
|
||||
if (differentiation_flag == 1)
|
||||
gc->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
else
|
||||
gc->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
// extra per-atom energy/virial communication
|
||||
|
||||
if (evflag_atom) {
|
||||
if (differentiation_flag == 1 && vflag_atom)
|
||||
gc->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
else if (differentiation_flag == 0)
|
||||
gc->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
}
|
||||
|
||||
@ -810,9 +810,9 @@ void PPPM::allocate()
|
||||
1,0,0,FFT_PRECISION,collective_flag);
|
||||
|
||||
// create ghost grid object for rho and electric field communication
|
||||
// also create 2 bufs for ghost grid cell comm, passed to GridComm methods
|
||||
// also create 2 bufs for ghost grid cell comm, passed to Grid3d methods
|
||||
|
||||
gc = new GridComm(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
gc = new Grid3d(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
|
||||
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);
|
||||
|
||||
@ -3055,7 +3055,7 @@ double PPPM::memory_usage()
|
||||
bytes += (double)2 * nfft_both * sizeof(FFT_SCALAR);;
|
||||
}
|
||||
|
||||
// two GridComm bufs
|
||||
// two Grid3d bufs
|
||||
|
||||
bytes += (double)(ngc_buf1 + ngc_buf2) * npergrid * sizeof(FFT_SCALAR);
|
||||
|
||||
@ -3115,7 +3115,7 @@ void PPPM::compute_group_group(int groupbit_A, int groupbit_B, int AA_flag)
|
||||
density_brick = density_A_brick;
|
||||
density_fft = density_A_fft;
|
||||
|
||||
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
brick2fft();
|
||||
|
||||
@ -3124,7 +3124,7 @@ void PPPM::compute_group_group(int groupbit_A, int groupbit_B, int AA_flag)
|
||||
density_brick = density_B_brick;
|
||||
density_fft = density_B_fft;
|
||||
|
||||
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
brick2fft();
|
||||
|
||||
|
||||
@ -80,7 +80,7 @@ class PPPM : public KSpace {
|
||||
|
||||
class FFT3d *fft1, *fft2;
|
||||
class Remap *remap;
|
||||
class GridComm *gc;
|
||||
class Grid3d *gc;
|
||||
|
||||
FFT_SCALAR *gc_buf1, *gc_buf2;
|
||||
int ngc_buf1, ngc_buf2, npergrid;
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
#include "atom.h"
|
||||
#include "domain.h"
|
||||
#include "error.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "math_const.h"
|
||||
#include "memory.h"
|
||||
#include "neighbor.h"
|
||||
@ -177,7 +177,7 @@ void PPPMCG::compute(int eflag, int vflag)
|
||||
// to fully sum contribution in their 3d bricks
|
||||
// remap from 3d decomposition to FFT decomposition
|
||||
|
||||
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
brick2fft();
|
||||
|
||||
@ -192,20 +192,20 @@ void PPPMCG::compute(int eflag, int vflag)
|
||||
// to fill ghost cells surrounding their 3d bricks
|
||||
|
||||
if (differentiation_flag == 1)
|
||||
gc->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
else
|
||||
gc->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
// extra per-atom energy/virial communication
|
||||
|
||||
if (evflag_atom) {
|
||||
if (differentiation_flag == 1 && vflag_atom)
|
||||
gc->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
else if (differentiation_flag == 0)
|
||||
gc->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
}
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
#include "error.h"
|
||||
#include "fft3d_wrap.h"
|
||||
#include "force.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "math_const.h"
|
||||
#include "math_special.h"
|
||||
#include "memory.h"
|
||||
@ -188,7 +188,7 @@ void PPPMDipole::init()
|
||||
// or overlap is allowed, then done
|
||||
// else reduce order and try again
|
||||
|
||||
GridComm *gctmp = nullptr;
|
||||
Grid3d *gctmp = nullptr;
|
||||
int iteration = 0;
|
||||
|
||||
while (order >= minorder) {
|
||||
@ -201,7 +201,7 @@ void PPPMDipole::init()
|
||||
set_grid_local();
|
||||
if (overlap_allowed) break;
|
||||
|
||||
gctmp = new GridComm(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
gctmp = new Grid3d(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
|
||||
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);
|
||||
|
||||
@ -441,7 +441,7 @@ void PPPMDipole::compute(int eflag, int vflag)
|
||||
// to fully sum contribution in their 3d bricks
|
||||
// remap from 3d decomposition to FFT decomposition
|
||||
|
||||
gc_dipole->reverse_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
gc_dipole->reverse_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
REVERSE_MU,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
brick2fft_dipole();
|
||||
|
||||
@ -455,13 +455,13 @@ void PPPMDipole::compute(int eflag, int vflag)
|
||||
// all procs communicate E-field values
|
||||
// to fill ghost cells surrounding their 3d bricks
|
||||
|
||||
gc_dipole->forward_comm(GridComm::KSPACE,this,9,sizeof(FFT_SCALAR),
|
||||
gc_dipole->forward_comm(Grid3d::KSPACE,this,9,sizeof(FFT_SCALAR),
|
||||
FORWARD_MU,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
// extra per-atom energy/virial communication
|
||||
|
||||
if (evflag_atom)
|
||||
gc_dipole->forward_comm(GridComm::KSPACE,this,18,sizeof(FFT_SCALAR),
|
||||
gc_dipole->forward_comm(Grid3d::KSPACE,this,18,sizeof(FFT_SCALAR),
|
||||
FORWARD_MU_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
// calculate the force on my particles
|
||||
@ -603,9 +603,9 @@ void PPPMDipole::allocate()
|
||||
1,0,0,FFT_PRECISION,collective_flag);
|
||||
|
||||
// create ghost grid object for rho and electric field communication
|
||||
// also create 2 bufs for ghost grid cell comm, passed to GridComm methods
|
||||
// also create 2 bufs for ghost grid cell comm, passed to Grid3d methods
|
||||
|
||||
gc_dipole = new GridComm(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
gc_dipole = new Grid3d(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
|
||||
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);
|
||||
|
||||
@ -2519,7 +2519,7 @@ double PPPMDipole::memory_usage()
|
||||
if (peratom_allocate_flag)
|
||||
bytes += (double)21 * nbrick * sizeof(FFT_SCALAR);
|
||||
|
||||
// two GridComm bufs
|
||||
// two Grid3d bufs
|
||||
|
||||
bytes += (double)(ngc_buf1 + ngc_buf2) * npergrid * sizeof(FFT_SCALAR);
|
||||
|
||||
|
||||
@ -70,7 +70,7 @@ class PPPMDipole : public PPPM {
|
||||
FFT_SCALAR *work3, *work4;
|
||||
FFT_SCALAR *densityx_fft_dipole, *densityy_fft_dipole, *densityz_fft_dipole;
|
||||
|
||||
class GridComm *gc_dipole;
|
||||
class Grid3d *gc_dipole;
|
||||
|
||||
int only_dipole_flag;
|
||||
double musum, musqsum, mu2;
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include "domain.h"
|
||||
#include "error.h"
|
||||
#include "force.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "math_const.h"
|
||||
#include "memory.h"
|
||||
#include "pair.h"
|
||||
@ -173,7 +173,7 @@ void PPPMDipoleSpin::init()
|
||||
// or overlap is allowed, then done
|
||||
// else reduce order and try again
|
||||
|
||||
GridComm *gctmp = nullptr;
|
||||
Grid3d *gctmp = nullptr;
|
||||
int iteration = 0;
|
||||
|
||||
while (order >= minorder) {
|
||||
@ -186,7 +186,7 @@ void PPPMDipoleSpin::init()
|
||||
set_grid_local();
|
||||
if (overlap_allowed) break;
|
||||
|
||||
gctmp = new GridComm(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
gctmp = new Grid3d(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
|
||||
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);
|
||||
|
||||
@ -298,7 +298,7 @@ void PPPMDipoleSpin::compute(int eflag, int vflag)
|
||||
// to fully sum contribution in their 3d bricks
|
||||
// remap from 3d decomposition to FFT decomposition
|
||||
|
||||
gc_dipole->reverse_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
gc_dipole->reverse_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
REVERSE_MU,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
brick2fft_dipole();
|
||||
|
||||
@ -312,13 +312,13 @@ void PPPMDipoleSpin::compute(int eflag, int vflag)
|
||||
// all procs communicate E-field values
|
||||
// to fill ghost cells surrounding their 3d bricks
|
||||
|
||||
gc_dipole->forward_comm(GridComm::KSPACE,this,9,sizeof(FFT_SCALAR),
|
||||
gc_dipole->forward_comm(Grid3d::KSPACE,this,9,sizeof(FFT_SCALAR),
|
||||
FORWARD_MU,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
// extra per-atom energy/virial communication
|
||||
|
||||
if (evflag_atom)
|
||||
gc->forward_comm(GridComm::KSPACE,this,18,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,18,sizeof(FFT_SCALAR),
|
||||
FORWARD_MU_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
// calculate the force on my particles
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
#include "error.h"
|
||||
#include "fft3d_wrap.h"
|
||||
#include "force.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "math_const.h"
|
||||
#include "memory.h"
|
||||
#include "neighbor.h"
|
||||
@ -413,7 +413,7 @@ void PPPMDisp::init()
|
||||
int iteration = 0;
|
||||
if (function[0]) {
|
||||
|
||||
GridComm *gctmp = nullptr;
|
||||
Grid3d *gctmp = nullptr;
|
||||
while (order >= minorder) {
|
||||
|
||||
if (iteration && me == 0)
|
||||
@ -441,7 +441,7 @@ void PPPMDisp::init()
|
||||
|
||||
if (overlap_allowed) break;
|
||||
|
||||
gctmp = new GridComm(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
gctmp = new Grid3d(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
|
||||
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);
|
||||
|
||||
@ -493,7 +493,7 @@ void PPPMDisp::init()
|
||||
iteration = 0;
|
||||
if (function[1] + function[2] + function[3]) {
|
||||
|
||||
GridComm *gctmp = nullptr;
|
||||
Grid3d *gctmp = nullptr;
|
||||
while (order_6 >= minorder) {
|
||||
|
||||
if (iteration && me == 0)
|
||||
@ -519,7 +519,7 @@ void PPPMDisp::init()
|
||||
|
||||
if (overlap_allowed) break;
|
||||
|
||||
gctmp = new GridComm(lmp,world,nx_pppm_6,ny_pppm_6,nz_pppm_6,
|
||||
gctmp = new Grid3d(lmp,world,nx_pppm_6,ny_pppm_6,nz_pppm_6,
|
||||
nxlo_in_6,nxhi_in_6,nylo_in_6,nyhi_in_6,
|
||||
nzlo_in_6,nzhi_in_6,
|
||||
nxlo_out_6,nxhi_out_6,nylo_out_6,nyhi_out_6,
|
||||
@ -926,7 +926,7 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
|
||||
make_rho_c();
|
||||
|
||||
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
brick2fft(nxlo_in,nylo_in,nzlo_in,nxhi_in,nyhi_in,nzhi_in,
|
||||
@ -941,13 +941,13 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
virial_1,vg,vg2,
|
||||
u_brick,v0_brick,v1_brick,v2_brick,v3_brick,v4_brick,v5_brick);
|
||||
|
||||
gc->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
fieldforce_c_ad();
|
||||
|
||||
if (vflag_atom)
|
||||
gc->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
} else {
|
||||
@ -960,13 +960,13 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
vdx_brick,vdy_brick,vdz_brick,virial_1,vg,vg2,
|
||||
u_brick,v0_brick,v1_brick,v2_brick,v3_brick,v4_brick,v5_brick);
|
||||
|
||||
gc->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
fieldforce_c_ik();
|
||||
|
||||
if (evflag_atom)
|
||||
gc->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
}
|
||||
|
||||
@ -984,7 +984,7 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
|
||||
make_rho_g();
|
||||
|
||||
gc6->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc6->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO_GEOM,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
brick2fft(nxlo_in_6,nylo_in_6,nzlo_in_6,nxhi_in_6,nyhi_in_6,nzhi_in_6,
|
||||
@ -1000,13 +1000,13 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
u_brick_g,v0_brick_g,v1_brick_g,v2_brick_g,
|
||||
v3_brick_g,v4_brick_g,v5_brick_g);
|
||||
|
||||
gc6->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_GEOM,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
fieldforce_g_ad();
|
||||
|
||||
if (vflag_atom)
|
||||
gc6->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_PERATOM_GEOM,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
} else {
|
||||
@ -1020,13 +1020,13 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
u_brick_g,v0_brick_g,v1_brick_g,v2_brick_g,
|
||||
v3_brick_g,v4_brick_g,v5_brick_g);
|
||||
|
||||
gc6->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_GEOM,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
fieldforce_g_ik();
|
||||
|
||||
if (evflag_atom)
|
||||
gc6->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_PERATOM_GEOM,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
}
|
||||
|
||||
@ -1044,7 +1044,7 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
|
||||
make_rho_a();
|
||||
|
||||
gc6->reverse_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
gc6->reverse_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO_ARITH,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
brick2fft_a();
|
||||
@ -1074,13 +1074,13 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
u_brick_a4,v0_brick_a4,v1_brick_a4,v2_brick_a4,
|
||||
v3_brick_a4,v4_brick_a4,v5_brick_a4);
|
||||
|
||||
gc6->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_ARITH,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
fieldforce_a_ad();
|
||||
|
||||
if (evflag_atom)
|
||||
gc6->forward_comm(GridComm::KSPACE,this,42,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,42,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_PERATOM_ARITH,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
} else {
|
||||
@ -1115,13 +1115,13 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
u_brick_a4,v0_brick_a4,v1_brick_a4,v2_brick_a4,
|
||||
v3_brick_a4,v4_brick_a4,v5_brick_a4);
|
||||
|
||||
gc6->forward_comm(GridComm::KSPACE,this,21,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,21,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_ARITH,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
fieldforce_a_ik();
|
||||
|
||||
if (evflag_atom)
|
||||
gc6->forward_comm(GridComm::KSPACE,this,49,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,49,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_PERATOM_ARITH,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
}
|
||||
|
||||
@ -1139,7 +1139,7 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
|
||||
make_rho_none();
|
||||
|
||||
gc6->reverse_comm(GridComm::KSPACE,this,nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
gc6->reverse_comm(Grid3d::KSPACE,this,nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO_NONE,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
brick2fft_none();
|
||||
@ -1154,13 +1154,13 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
n += 2;
|
||||
}
|
||||
|
||||
gc6->forward_comm(GridComm::KSPACE,this,1*nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,1*nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_NONE,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
fieldforce_none_ad();
|
||||
|
||||
if (vflag_atom)
|
||||
gc6->forward_comm(GridComm::KSPACE,this,6*nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,6*nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_PERATOM_NONE,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
} else {
|
||||
@ -1174,13 +1174,13 @@ void PPPMDisp::compute(int eflag, int vflag)
|
||||
n += 2;
|
||||
}
|
||||
|
||||
gc6->forward_comm(GridComm::KSPACE,this,3*nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,3*nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_NONE,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
fieldforce_none_ik();
|
||||
|
||||
if (evflag_atom)
|
||||
gc6->forward_comm(GridComm::KSPACE,this,7*nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
gc6->forward_comm(Grid3d::KSPACE,this,7*nsplit_alloc,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_PERATOM_NONE,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
|
||||
}
|
||||
|
||||
@ -1748,9 +1748,9 @@ void _noopt PPPMDisp::allocate()
|
||||
1,0,0,FFT_PRECISION,collective_flag);
|
||||
|
||||
// create ghost grid object for rho and electric field communication
|
||||
// also create 2 bufs for ghost grid cell comm, passed to GridComm methods
|
||||
// also create 2 bufs for ghost grid cell comm, passed to Grid3d methods
|
||||
|
||||
gc = new GridComm(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
gc = new Grid3d(lmp,world,nx_pppm,ny_pppm,nz_pppm,
|
||||
nxlo_in,nxhi_in,nylo_in,nyhi_in,nzlo_in,nzhi_in,
|
||||
nxlo_out,nxhi_out,nylo_out,nyhi_out,nzlo_out,nzhi_out);
|
||||
|
||||
@ -1831,10 +1831,10 @@ void _noopt PPPMDisp::allocate()
|
||||
1,0,0,FFT_PRECISION,collective_flag);
|
||||
|
||||
// create ghost grid object for rho and electric field communication
|
||||
// also create 2 bufs for ghost grid cell comm, passed to GridComm methods
|
||||
// also create 2 bufs for ghost grid cell comm, passed to Grid3d methods
|
||||
|
||||
gc6 =
|
||||
new GridComm(lmp,world,nx_pppm_6,ny_pppm_6,nz_pppm_6,
|
||||
new Grid3d(lmp,world,nx_pppm_6,ny_pppm_6,nz_pppm_6,
|
||||
nxlo_in_6,nxhi_in_6,nylo_in_6,nyhi_in_6,nzlo_in_6,nzhi_in_6,
|
||||
nxlo_out_6,nxhi_out_6,nylo_out_6,nyhi_out_6,nzlo_out_6,nzhi_out_6);
|
||||
|
||||
@ -1994,10 +1994,10 @@ void _noopt PPPMDisp::allocate()
|
||||
1,0,0,FFT_PRECISION,collective_flag);
|
||||
|
||||
// create ghost grid object for rho and electric field communication
|
||||
// also create 2 bufs for ghost grid cell comm, passed to GridComm methods
|
||||
// also create 2 bufs for ghost grid cell comm, passed to Grid3d methods
|
||||
|
||||
gc6 =
|
||||
new GridComm(lmp,world,nx_pppm_6,ny_pppm_6,nz_pppm_6,
|
||||
new Grid3d(lmp,world,nx_pppm_6,ny_pppm_6,nz_pppm_6,
|
||||
nxlo_in_6,nxhi_in_6,nylo_in_6,nyhi_in_6,nzlo_in_6,nzhi_in_6,
|
||||
nxlo_out_6,nxhi_out_6,nylo_out_6,nyhi_out_6,nzlo_out_6,nzhi_out_6);
|
||||
|
||||
@ -2081,10 +2081,10 @@ void _noopt PPPMDisp::allocate()
|
||||
1,0,0,FFT_PRECISION,collective_flag);
|
||||
|
||||
// create ghost grid object for rho and electric field communication
|
||||
// also create 2 bufs for ghost grid cell comm, passed to GridComm methods
|
||||
// also create 2 bufs for ghost grid cell comm, passed to Grid3d methods
|
||||
|
||||
gc6 =
|
||||
new GridComm(lmp,world,nx_pppm_6,ny_pppm_6,nz_pppm_6,
|
||||
new Grid3d(lmp,world,nx_pppm_6,ny_pppm_6,nz_pppm_6,
|
||||
nxlo_in_6,nxhi_in_6,nylo_in_6,nyhi_in_6,nzlo_in_6,nzhi_in_6,
|
||||
nxlo_out_6,nxhi_out_6,nylo_out_6,nyhi_out_6,nzlo_out_6,nzhi_out_6);
|
||||
|
||||
@ -8310,7 +8310,7 @@ double PPPMDisp::memory_usage()
|
||||
bytes += (double)nfft_both_6 * (mixing + 2) * sizeof(FFT_SCALAR);
|
||||
}
|
||||
|
||||
// four GridComm bufs
|
||||
// four Grid3d bufs
|
||||
|
||||
bytes += (double)(ngc_buf1 + ngc_buf2) * npergrid * sizeof(FFT_SCALAR);
|
||||
bytes += (double)(ngc6_buf1 + ngc6_buf2) * npergrid6 * sizeof(FFT_SCALAR);
|
||||
|
||||
@ -178,7 +178,7 @@ class PPPMDisp : public KSpace {
|
||||
class FFT3d *fft1, *fft2;
|
||||
class FFT3d *fft1_6, *fft2_6;
|
||||
class Remap *remap, *remap_6;
|
||||
class GridComm *gc, *gc6;
|
||||
class Grid3d *gc, *gc6;
|
||||
|
||||
FFT_SCALAR *gc_buf1, *gc_buf2, *gc6_buf1, *gc6_buf2;
|
||||
int ngc_buf1, ngc_buf2, npergrid;
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
#include <cstring>
|
||||
#include <cmath>
|
||||
#include "atom.h"
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
#include "domain.h"
|
||||
#include "memory.h"
|
||||
#include "error.h"
|
||||
@ -157,7 +157,7 @@ void PPPMStagger::compute(int eflag, int vflag)
|
||||
// to fully sum contribution in their 3d bricks
|
||||
// remap from 3d decomposition to FFT decomposition
|
||||
|
||||
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
brick2fft();
|
||||
|
||||
@ -172,20 +172,20 @@ void PPPMStagger::compute(int eflag, int vflag)
|
||||
// to fill ghost cells surrounding their 3d bricks
|
||||
|
||||
if (differentiation_flag == 1)
|
||||
gc->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
else
|
||||
gc->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
|
||||
// extra per-atom energy/virial communication
|
||||
|
||||
if (evflag_atom) {
|
||||
if (differentiation_flag == 1 && vflag_atom)
|
||||
gc->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
|
||||
FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
else if (differentiation_flag == 0)
|
||||
gc->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
gc->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
|
||||
FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
|
||||
}
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#include "gridcomm.h"
|
||||
#include "grid3d.h"
|
||||
|
||||
#include "comm.h"
|
||||
#include "error.h"
|
||||
@ -48,10 +48,10 @@ enum{REGULAR,TILED};
|
||||
communication is done across the periodic boundaries
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
GridComm::GridComm(LAMMPS *lmp, MPI_Comm gcomm,
|
||||
int gnx, int gny, int gnz,
|
||||
int ixlo, int ixhi, int iylo, int iyhi, int izlo, int izhi,
|
||||
int oxlo, int oxhi, int oylo, int oyhi, int ozlo, int ozhi)
|
||||
Grid3d::Grid3d(LAMMPS *lmp, MPI_Comm gcomm,
|
||||
int gnx, int gny, int gnz,
|
||||
int ixlo, int ixhi, int iylo, int iyhi, int izlo, int izhi,
|
||||
int oxlo, int oxhi, int oylo, int oyhi, int ozlo, int ozhi)
|
||||
: Pointers(lmp)
|
||||
{
|
||||
if (comm->layout == Comm::LAYOUT_TILED) layout = TILED;
|
||||
@ -87,11 +87,11 @@ GridComm::GridComm(LAMMPS *lmp, MPI_Comm gcomm,
|
||||
e xyz lohi for flag = 2: 6 neighbor procs
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
GridComm::GridComm(LAMMPS *lmp, MPI_Comm gcomm, int flag,
|
||||
int gnx, int gny, int gnz,
|
||||
int ixlo, int ixhi, int iylo, int iyhi, int izlo, int izhi,
|
||||
int oxlo, int oxhi, int oylo, int oyhi, int ozlo, int ozhi,
|
||||
int exlo, int exhi, int eylo, int eyhi, int ezlo, int ezhi)
|
||||
Grid3d::Grid3d(LAMMPS *lmp, MPI_Comm gcomm, int flag,
|
||||
int gnx, int gny, int gnz,
|
||||
int ixlo, int ixhi, int iylo, int iyhi, int izlo, int izhi,
|
||||
int oxlo, int oxhi, int oylo, int oyhi, int ozlo, int ozhi,
|
||||
int exlo, int exhi, int eylo, int eyhi, int ezlo, int ezhi)
|
||||
: Pointers(lmp)
|
||||
{
|
||||
if (comm->layout == Comm::LAYOUT_TILED) layout = TILED;
|
||||
@ -124,14 +124,14 @@ GridComm::GridComm(LAMMPS *lmp, MPI_Comm gcomm, int flag,
|
||||
oxlo,oxhi,oylo,oyhi,ozlo,ozhi,
|
||||
exlo,exhi,eylo,eyhi,ezlo,ezhi);
|
||||
} else {
|
||||
error->all(FLERR,"GridComm does not support tiled layout with neighbor procs");
|
||||
error->all(FLERR,"Grid3d does not support tiled layout with neighbor procs");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
GridComm::~GridComm()
|
||||
Grid3d::~Grid3d()
|
||||
{
|
||||
// regular comm data struct
|
||||
|
||||
@ -164,16 +164,16 @@ GridComm::~GridComm()
|
||||
store constructor args in local variables
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::initialize(MPI_Comm gcomm,
|
||||
int gnx, int gny, int gnz,
|
||||
int ixlo, int ixhi, int iylo, int iyhi,
|
||||
int izlo, int izhi,
|
||||
int oxlo, int oxhi, int oylo, int oyhi,
|
||||
int ozlo, int ozhi,
|
||||
int fxlo, int fxhi, int fylo, int fyhi,
|
||||
int fzlo, int fzhi,
|
||||
int pxlo, int pxhi, int pylo, int pyhi,
|
||||
int pzlo, int pzhi)
|
||||
void Grid3d::initialize(MPI_Comm gcomm,
|
||||
int gnx, int gny, int gnz,
|
||||
int ixlo, int ixhi, int iylo, int iyhi,
|
||||
int izlo, int izhi,
|
||||
int oxlo, int oxhi, int oylo, int oyhi,
|
||||
int ozlo, int ozhi,
|
||||
int fxlo, int fxhi, int fylo, int fyhi,
|
||||
int fzlo, int fzhi,
|
||||
int pxlo, int pxhi, int pylo, int pyhi,
|
||||
int pzlo, int pzhi)
|
||||
{
|
||||
gridcomm = gcomm;
|
||||
MPI_Comm_rank(gridcomm,&me);
|
||||
@ -229,7 +229,7 @@ void GridComm::initialize(MPI_Comm gcomm,
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::setup(int &nbuf1, int &nbuf2)
|
||||
void Grid3d::setup(int &nbuf1, int &nbuf2)
|
||||
{
|
||||
if (layout == REGULAR) setup_regular(nbuf1,nbuf2);
|
||||
else setup_tiled(nbuf1,nbuf2);
|
||||
@ -244,7 +244,7 @@ void GridComm::setup(int &nbuf1, int &nbuf2)
|
||||
all procs perform same # of swaps in a direction, even if some don't need it
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::setup_regular(int &nbuf1, int &nbuf2)
|
||||
void Grid3d::setup_regular(int &nbuf1, int &nbuf2)
|
||||
{
|
||||
int nsent,sendfirst,sendlast,recvfirst,recvlast;
|
||||
int sendplanes,recvplanes;
|
||||
@ -545,7 +545,7 @@ void GridComm::setup_regular(int &nbuf1, int &nbuf2)
|
||||
no exchanges by dimension, unlike CommTiled forward/reverse comm of particles
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
void Grid3d::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
{
|
||||
int i,m;
|
||||
double xlo,xhi,ylo,yhi,zlo,zhi;
|
||||
@ -557,7 +557,7 @@ void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
// dim is -1 for proc 0, but never accessed
|
||||
|
||||
rcbinfo = (RCBinfo *)
|
||||
memory->smalloc(nprocs*sizeof(RCBinfo),"GridComm:rcbinfo");
|
||||
memory->smalloc(nprocs*sizeof(RCBinfo),"grid3d:rcbinfo");
|
||||
RCBinfo rcbone;
|
||||
rcbone.dim = comm->rcbcutdim;
|
||||
if (rcbone.dim <= 0) rcbone.cut = inxlo;
|
||||
@ -580,7 +580,7 @@ void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
|
||||
pbc[0] = pbc[1] = pbc[2] = 0;
|
||||
|
||||
memory->create(overlap_procs,nprocs,"GridComm:overlap_procs");
|
||||
memory->create(overlap_procs,nprocs,"grid3d:overlap_procs");
|
||||
noverlap = maxoverlap = 0;
|
||||
overlap = nullptr;
|
||||
|
||||
@ -591,9 +591,9 @@ void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
// ncopy = # of overlaps with myself, across a periodic boundary
|
||||
|
||||
int *proclist;
|
||||
memory->create(proclist,noverlap,"GridComm:proclist");
|
||||
memory->create(proclist,noverlap,"grid3d:proclist");
|
||||
srequest = (Request *)
|
||||
memory->smalloc(noverlap*sizeof(Request),"GridComm:srequest");
|
||||
memory->smalloc(noverlap*sizeof(Request),"grid3d:srequest");
|
||||
|
||||
int nsend_request = 0;
|
||||
ncopy = 0;
|
||||
@ -612,17 +612,17 @@ void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
|
||||
auto irregular = new Irregular(lmp);
|
||||
int nrecv_request = irregular->create_data(nsend_request,proclist,1);
|
||||
auto rrequest = (Request *) memory->smalloc(nrecv_request*sizeof(Request),"GridComm:rrequest");
|
||||
auto rrequest = (Request *) memory->smalloc(nrecv_request*sizeof(Request),"grid3d:rrequest");
|
||||
irregular->exchange_data((char *) srequest,sizeof(Request),(char *) rrequest);
|
||||
irregular->destroy_data();
|
||||
|
||||
// compute overlaps between received ghost boxes and my owned box
|
||||
// overlap box used to setup my Send data struct and respond to requests
|
||||
|
||||
send = (Send *) memory->smalloc(nrecv_request*sizeof(Send),"GridComm:send");
|
||||
sresponse = (Response *) memory->smalloc(nrecv_request*sizeof(Response),"GridComm:sresponse");
|
||||
send = (Send *) memory->smalloc(nrecv_request*sizeof(Send),"grid3d:send");
|
||||
sresponse = (Response *) memory->smalloc(nrecv_request*sizeof(Response),"grid3d:sresponse");
|
||||
memory->destroy(proclist);
|
||||
memory->create(proclist,nrecv_request,"GridComm:proclist");
|
||||
memory->create(proclist,nrecv_request,"grid3d:proclist");
|
||||
|
||||
for (m = 0; m < nrecv_request; m++) {
|
||||
send[m].proc = rrequest[m].sender;
|
||||
@ -651,7 +651,7 @@ void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
|
||||
int nsend_response = nrecv_request;
|
||||
int nrecv_response = irregular->create_data(nsend_response,proclist,1);
|
||||
auto rresponse = (Response *) memory->smalloc(nrecv_response*sizeof(Response),"GridComm:rresponse");
|
||||
auto rresponse = (Response *) memory->smalloc(nrecv_response*sizeof(Response),"grid3d:rresponse");
|
||||
irregular->exchange_data((char *) sresponse,sizeof(Response),(char *) rresponse);
|
||||
irregular->destroy_data();
|
||||
delete irregular;
|
||||
@ -660,7 +660,7 @@ void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
// box used to setup my Recv data struct after unwrapping via PBC
|
||||
// adjacent = 0 if any box of ghost cells does not adjoin my owned cells
|
||||
|
||||
recv = (Recv *) memory->smalloc(nrecv_response*sizeof(Recv),"GridComm:recv");
|
||||
recv = (Recv *) memory->smalloc(nrecv_response*sizeof(Recv),"grid3d:recv");
|
||||
adjacent = 1;
|
||||
|
||||
for (i = 0; i < nrecv_response; i++) {
|
||||
@ -683,7 +683,7 @@ void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
|
||||
// create Copy data struct from overlaps with self
|
||||
|
||||
copy = (Copy *) memory->smalloc(ncopy*sizeof(Copy),"GridComm:copy");
|
||||
copy = (Copy *) memory->smalloc(ncopy*sizeof(Copy),"grid3d:copy");
|
||||
|
||||
ncopy = 0;
|
||||
for (m = 0; m < noverlap; m++) {
|
||||
@ -770,7 +770,7 @@ void GridComm::setup_tiled(int &nbuf1, int &nbuf2)
|
||||
add all the procs it overlaps with to Overlap list
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::ghost_box_drop(int *box, int *pbc)
|
||||
void Grid3d::ghost_box_drop(int *box, int *pbc)
|
||||
{
|
||||
int i,m;
|
||||
|
||||
@ -855,7 +855,7 @@ void GridComm::ghost_box_drop(int *box, int *pbc)
|
||||
return Np = # of procs, plist = proc IDs
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::box_drop_grid(int *box, int proclower, int procupper,
|
||||
void Grid3d::box_drop_grid(int *box, int proclower, int procupper,
|
||||
int &np, int *plist)
|
||||
{
|
||||
// end recursion when partition is a single proc
|
||||
@ -886,7 +886,7 @@ void GridComm::box_drop_grid(int *box, int proclower, int procupper,
|
||||
return 1 if yes, 0 if no
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
int GridComm::ghost_adjacent()
|
||||
int Grid3d::ghost_adjacent()
|
||||
{
|
||||
if (layout == REGULAR) return ghost_adjacent_regular();
|
||||
return ghost_adjacent_tiled();
|
||||
@ -897,7 +897,7 @@ int GridComm::ghost_adjacent()
|
||||
return 0 if adjacent=0 for any proc, else 1
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
int GridComm::ghost_adjacent_regular()
|
||||
int Grid3d::ghost_adjacent_regular()
|
||||
{
|
||||
adjacent = 1;
|
||||
if (ghostxlo > inxhi-inxlo+1) adjacent = 0;
|
||||
@ -918,7 +918,7 @@ int GridComm::ghost_adjacent_regular()
|
||||
return 0 if adjacent=0 for any proc, else 1
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
int GridComm::ghost_adjacent_tiled()
|
||||
int Grid3d::ghost_adjacent_tiled()
|
||||
{
|
||||
int adjacent_all;
|
||||
MPI_Allreduce(&adjacent,&adjacent_all,1,MPI_INT,MPI_MIN,gridcomm);
|
||||
@ -929,7 +929,7 @@ int GridComm::ghost_adjacent_tiled()
|
||||
forward comm of my owned cells to other's ghost cells
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::forward_comm(int caller, void *ptr, int nper, int nbyte, int which,
|
||||
void Grid3d::forward_comm(int caller, void *ptr, int nper, int nbyte, int which,
|
||||
void *buf1, void *buf2, MPI_Datatype datatype)
|
||||
{
|
||||
if (layout == REGULAR) {
|
||||
@ -960,7 +960,7 @@ void GridComm::forward_comm(int caller, void *ptr, int nper, int nbyte, int whic
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
template < class T >
|
||||
void GridComm::
|
||||
void Grid3d::
|
||||
forward_comm_regular(T *ptr, int nper, int /*nbyte*/, int which,
|
||||
void *buf1, void *buf2, MPI_Datatype datatype)
|
||||
{
|
||||
@ -990,7 +990,7 @@ forward_comm_regular(T *ptr, int nper, int /*nbyte*/, int which,
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
template < class T >
|
||||
void GridComm::
|
||||
void Grid3d::
|
||||
forward_comm_tiled(T *ptr, int nper, int nbyte, int which,
|
||||
void *buf1, void *vbuf2, MPI_Datatype datatype)
|
||||
{
|
||||
@ -1034,7 +1034,7 @@ forward_comm_tiled(T *ptr, int nper, int nbyte, int which,
|
||||
reverse comm of my ghost cells to sum to owner cells
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::reverse_comm(int caller, void *ptr, int nper, int nbyte, int which,
|
||||
void Grid3d::reverse_comm(int caller, void *ptr, int nper, int nbyte, int which,
|
||||
void *buf1, void *buf2, MPI_Datatype datatype)
|
||||
{
|
||||
if (layout == REGULAR) {
|
||||
@ -1065,7 +1065,7 @@ void GridComm::reverse_comm(int caller, void *ptr, int nper, int nbyte, int whic
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
template < class T >
|
||||
void GridComm::
|
||||
void Grid3d::
|
||||
reverse_comm_regular(T *ptr, int nper, int /*nbyte*/, int which,
|
||||
void *buf1, void *buf2, MPI_Datatype datatype)
|
||||
{
|
||||
@ -1095,7 +1095,7 @@ reverse_comm_regular(T *ptr, int nper, int /*nbyte*/, int which,
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
template < class T >
|
||||
void GridComm::
|
||||
void Grid3d::
|
||||
reverse_comm_tiled(T *ptr, int nper, int nbyte, int which,
|
||||
void *buf1, void *vbuf2, MPI_Datatype datatype)
|
||||
{
|
||||
@ -1142,7 +1142,7 @@ reverse_comm_tiled(T *ptr, int nper, int nbyte, int which,
|
||||
caller can decide whether to store chunks, output them, etc
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::gather(int /*caller*/, void *ptr, int nper, int nbyte,
|
||||
void Grid3d::gather(int /*caller*/, void *ptr, int nper, int nbyte,
|
||||
int which, void *buf, MPI_Datatype datatype)
|
||||
{
|
||||
int me = comm->me;
|
||||
@ -1158,8 +1158,8 @@ void GridComm::gather(int /*caller*/, void *ptr, int nper, int nbyte,
|
||||
// pack my data via callback to caller
|
||||
|
||||
char *mybuf;
|
||||
if (me == 0) memory->create(mybuf,maxsize*nbyte,"GridComm:mybuf");
|
||||
else memory->create(mybuf,mysize*nbyte,"GridComm:mybuf");
|
||||
if (me == 0) memory->create(mybuf,maxsize*nbyte,"grid3d:mybuf");
|
||||
else memory->create(mybuf,mysize*nbyte,"grid3d:mybuf");
|
||||
fptr->pack_gather_grid(which,mybuf);
|
||||
|
||||
// ping each proc for its data
|
||||
@ -1219,10 +1219,10 @@ void GridComm::gather(int /*caller*/, void *ptr, int nper, int nbyte,
|
||||
same swap list used by forward and reverse communication
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::grow_swap()
|
||||
void Grid3d::grow_swap()
|
||||
{
|
||||
maxswap += DELTA;
|
||||
swap = (Swap *) memory->srealloc(swap,maxswap*sizeof(Swap),"GridComm:swap");
|
||||
swap = (Swap *) memory->srealloc(swap,maxswap*sizeof(Swap),"grid3d:swap");
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
@ -1233,11 +1233,11 @@ void GridComm::grow_swap()
|
||||
same swap list used by forward and reverse communication
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
void GridComm::grow_overlap()
|
||||
void Grid3d::grow_overlap()
|
||||
{
|
||||
maxoverlap += DELTA;
|
||||
overlap = (Overlap *)
|
||||
memory->srealloc(overlap,maxoverlap*sizeof(Overlap),"GridComm:overlap");
|
||||
memory->srealloc(overlap,maxoverlap*sizeof(Overlap),"grid3d:overlap");
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
@ -1246,11 +1246,11 @@ void GridComm::grow_overlap()
|
||||
(fullxlo:fullxhi,fullylo:fullyhi,fullzlo:fullzhi)
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
int GridComm::indices(int *&list,
|
||||
int Grid3d::indices(int *&list,
|
||||
int xlo, int xhi, int ylo, int yhi, int zlo, int zhi)
|
||||
{
|
||||
int nmax = (xhi-xlo+1) * (yhi-ylo+1) * (zhi-zlo+1);
|
||||
memory->create(list,nmax,"GridComm:indices");
|
||||
memory->create(list,nmax,"grid3d:indices");
|
||||
if (nmax == 0) return 0;
|
||||
|
||||
int nx = (fullxhi-fullxlo+1);
|
||||
@ -11,22 +11,22 @@
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef LMP_GRIDCOMM_H
|
||||
#define LMP_GRIDCOMM_H
|
||||
#ifndef LMP_GRID2D_H
|
||||
#define LMP_GRID2D_H
|
||||
|
||||
#include "pointers.h"
|
||||
|
||||
namespace LAMMPS_NS {
|
||||
|
||||
class GridComm : protected Pointers {
|
||||
class Grid2d : protected Pointers {
|
||||
public:
|
||||
enum { KSPACE = 0, PAIR = 1, FIX = 2 }; // calling classes
|
||||
|
||||
GridComm(class LAMMPS *, MPI_Comm, int, int, int, int, int, int, int, int, int, int, int, int,
|
||||
Grid2d(class LAMMPS *, MPI_Comm, int, int, int, int, int, int, int, int, int, int, int, int,
|
||||
int, int, int);
|
||||
GridComm(class LAMMPS *, MPI_Comm, int, int, int, int, int, int, int, int, int, int, int, int,
|
||||
Grid2d(class LAMMPS *, MPI_Comm, int, int, int, int, int, int, int, int, int, int, int, int,
|
||||
int, int, int, int, int, int, int, int, int, int);
|
||||
~GridComm() override;
|
||||
~Grid2d() override;
|
||||
void setup(int &, int &);
|
||||
int ghost_adjacent();
|
||||
void forward_comm(int, void *, int, int, int, void *, void *, MPI_Datatype);
|
||||
1267
src/grid3d.cpp
Normal file
1267
src/grid3d.cpp
Normal file
File diff suppressed because it is too large
Load Diff
200
src/grid3d.h
Normal file
200
src/grid3d.h
Normal file
@ -0,0 +1,200 @@
|
||||
/* -*- c++ -*- ----------------------------------------------------------
|
||||
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
|
||||
https://www.lammps.org/, Sandia National Laboratories
|
||||
Steve Plimpton, sjplimp@sandia.gov
|
||||
|
||||
Copyright (2003) Sandia Corporation. Under the terms of Contract
|
||||
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
|
||||
certain rights in this software. This software is distributed under
|
||||
the GNU General Public License.
|
||||
|
||||
See the README file in the top-level LAMMPS directory.
|
||||
------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef LMP_GRID3D_H
|
||||
#define LMP_GRID3D_H
|
||||
|
||||
#include "pointers.h"
|
||||
|
||||
namespace LAMMPS_NS {
|
||||
|
||||
class Grid3d : protected Pointers {
|
||||
public:
|
||||
enum { KSPACE = 0, PAIR = 1, FIX = 2 }; // calling classes
|
||||
|
||||
Grid3d(class LAMMPS *, MPI_Comm, int, int, int, int, int, int, int, int, int, int, int, int,
|
||||
int, int, int);
|
||||
Grid3d(class LAMMPS *, MPI_Comm, int, int, int, int, int, int, int, int, int, int, int, int,
|
||||
int, int, int, int, int, int, int, int, int, int);
|
||||
~Grid3d() override;
|
||||
void setup(int &, int &);
|
||||
int ghost_adjacent();
|
||||
void forward_comm(int, void *, int, int, int, void *, void *, MPI_Datatype);
|
||||
void reverse_comm(int, void *, int, int, int, void *, void *, MPI_Datatype);
|
||||
void gather(int, void *, int, int, int, void *, MPI_Datatype);
|
||||
|
||||
protected:
|
||||
int me, nprocs;
|
||||
int layout; // REGULAR or TILED
|
||||
MPI_Comm gridcomm; // communicator for this class
|
||||
// usually world, but MSM calls with subset
|
||||
|
||||
// inputs from caller via constructor
|
||||
|
||||
int nx, ny, nz; // size of global grid in all 3 dims
|
||||
int inxlo, inxhi; // inclusive extent of my grid chunk
|
||||
int inylo, inyhi; // 0 <= in <= N-1
|
||||
int inzlo, inzhi;
|
||||
int outxlo, outxhi; // inclusive extent of my grid chunk plus
|
||||
int outylo, outyhi; // ghost cells in all 6 directions
|
||||
int outzlo, outzhi; // lo indices can be < 0, hi indices can be >= N
|
||||
int fullxlo, fullxhi; // extent of grid chunk that caller stores
|
||||
int fullylo, fullyhi; // can be same as out indices or larger
|
||||
int fullzlo, fullzhi;
|
||||
|
||||
// -------------------------------------------
|
||||
// internal variables for REGULAR layout
|
||||
// -------------------------------------------
|
||||
|
||||
int procxlo, procxhi; // 6 neighbor procs that adjoin me
|
||||
int procylo, procyhi; // not used for comm_style = tiled
|
||||
int proczlo, proczhi;
|
||||
|
||||
int ghostxlo, ghostxhi; // # of my owned grid planes needed
|
||||
int ghostylo, ghostyhi; // by neighobr procs in each dir as their ghost planes
|
||||
int ghostzlo, ghostzhi;
|
||||
|
||||
// swap = exchange of owned and ghost grid cells between 2 procs, including self
|
||||
|
||||
struct Swap {
|
||||
int sendproc; // proc to send to for forward comm
|
||||
int recvproc; // proc to recv from for forward comm
|
||||
int npack; // # of datums to pack
|
||||
int nunpack; // # of datums to unpack
|
||||
int *packlist; // 3d array offsets to pack
|
||||
int *unpacklist; // 3d array offsets to unpack
|
||||
};
|
||||
|
||||
int nswap, maxswap;
|
||||
Swap *swap;
|
||||
|
||||
// -------------------------------------------
|
||||
// internal variables for TILED layout
|
||||
// -------------------------------------------
|
||||
|
||||
int *overlap_procs; // length of Nprocs in communicator
|
||||
MPI_Request *requests; // length of max messages this proc receives
|
||||
|
||||
// RCB tree of cut info
|
||||
// each proc contributes one value, except proc 0
|
||||
|
||||
struct RCBinfo {
|
||||
int dim; // 0,1,2 = which dim the cut is in
|
||||
int cut; // grid index of lowest cell in upper half of cut
|
||||
};
|
||||
|
||||
RCBinfo *rcbinfo;
|
||||
|
||||
// overlap = a proc whose owned cells overlap with my extended ghost box
|
||||
// includes overlaps across periodic boundaries, can also be self
|
||||
|
||||
struct Overlap {
|
||||
int proc; // proc whose owned cells overlap my ghost cells
|
||||
int box[6]; // box that overlaps otherproc's owned cells
|
||||
// this box is wholly contained within global grid
|
||||
int pbc[3]; // PBC offsets to convert box to a portion of my ghost box
|
||||
// my ghost box may extend beyond global grid
|
||||
};
|
||||
|
||||
int noverlap, maxoverlap;
|
||||
Overlap *overlap;
|
||||
|
||||
// request = sent to each proc whose owned cells overlap my ghost cells
|
||||
|
||||
struct Request {
|
||||
int sender; // sending proc
|
||||
int index; // index of overlap on sender
|
||||
int box[6]; // box that overlaps receiver's owned cells
|
||||
// wholly contained within global grid
|
||||
};
|
||||
|
||||
Request *srequest, *rrequest;
|
||||
|
||||
// response = reply from each proc whose owned cells overlap my ghost cells
|
||||
|
||||
struct Response {
|
||||
int index; // index of my overlap for the initial request
|
||||
int box[6]; // box that overlaps responder's owned cells
|
||||
// wholly contained within global grid
|
||||
// has to unwrapped by PBC to map to my ghost cells
|
||||
};
|
||||
|
||||
Response *sresponse, *rresponse;
|
||||
|
||||
// send = proc to send a subset of my owned cells to, for forward comm
|
||||
// for reverse comm, proc I receive ghost overlaps with my owned cells from
|
||||
// offset used in reverse comm to recv a message in middle of a large buffer
|
||||
|
||||
struct Send {
|
||||
int proc;
|
||||
int npack;
|
||||
int *packlist;
|
||||
int offset;
|
||||
};
|
||||
|
||||
// recv = proc to recv a subset of my ghost cells from, for forward comm
|
||||
// for reverse comm, proc I send a subset of my ghost cells to
|
||||
// offset used in forward comm to recv a message in middle of a large buffer
|
||||
|
||||
struct Recv {
|
||||
int proc;
|
||||
int nunpack;
|
||||
int *unpacklist;
|
||||
int offset;
|
||||
};
|
||||
|
||||
int adjacent; // 0 on a proc who receives ghosts from a non-neighbor proc
|
||||
|
||||
// copy = subset of my owned cells to copy into subset of my ghost cells
|
||||
// that describes forward comm, for reverse comm it is the opposite
|
||||
|
||||
struct Copy {
|
||||
int npack;
|
||||
int nunpack;
|
||||
int *packlist;
|
||||
int *unpacklist;
|
||||
};
|
||||
|
||||
int nsend, nrecv, ncopy;
|
||||
Send *send;
|
||||
Recv *recv;
|
||||
Copy *copy;
|
||||
|
||||
// -------------------------------------------
|
||||
// internal methods
|
||||
// -------------------------------------------
|
||||
|
||||
void initialize(MPI_Comm, int, int, int, int, int, int, int, int, int, int, int, int, int, int,
|
||||
int, int, int, int, int, int, int, int, int, int, int, int, int);
|
||||
virtual void setup_regular(int &, int &);
|
||||
virtual void setup_tiled(int &, int &);
|
||||
void ghost_box_drop(int *, int *);
|
||||
void box_drop_grid(int *, int, int, int &, int *);
|
||||
|
||||
int ghost_adjacent_regular();
|
||||
int ghost_adjacent_tiled();
|
||||
|
||||
template <class T> void forward_comm_regular(T *, int, int, int, void *, void *, MPI_Datatype);
|
||||
template <class T> void forward_comm_tiled(T *, int, int, int, void *, void *, MPI_Datatype);
|
||||
template <class T> void reverse_comm_regular(T *, int, int, int, void *, void *, MPI_Datatype);
|
||||
template <class T> void reverse_comm_tiled(T *, int, int, int, void *, void *, MPI_Datatype);
|
||||
|
||||
virtual void grow_swap();
|
||||
void grow_overlap();
|
||||
|
||||
int indices(int *&, int, int, int, int, int, int);
|
||||
};
|
||||
|
||||
} // namespace LAMMPS_NS
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user