Merge branch 'distributed-grids' of github.com:lammps/lammps into distributed-grids

This commit is contained in:
Axel Kohlmeyer
2022-08-17 13:18:17 -04:00
7 changed files with 69 additions and 69 deletions

View File

@ -26,7 +26,7 @@
#include "error.h" #include "error.h"
#include "fft3d_wrap.h" #include "fft3d_wrap.h"
#include "force.h" #include "force.h"
#include "gridcomm.h" #include "grid3d.h"
#include "math_const.h" #include "math_const.h"
#include "math_special.h" #include "math_special.h"
#include "memory.h" #include "memory.h"
@ -200,7 +200,7 @@ void PPPMElectrode::init()
// or overlap is allowed, then done // or overlap is allowed, then done
// else reduce order and try again // else reduce order and try again
GridComm *gctmp = nullptr; Grid3d *gctmp = nullptr;
int iteration = 0; int iteration = 0;
while (order >= minorder) { while (order >= minorder) {
@ -215,8 +215,8 @@ void PPPMElectrode::init()
if (overlap_allowed) break; if (overlap_allowed) break;
gctmp = gctmp =
new GridComm(lmp, world, nx_pppm, ny_pppm, nz_pppm, nxlo_in, nxhi_in, nylo_in, nyhi_in, new Grid3d(lmp, world, nx_pppm, ny_pppm, nz_pppm, nxlo_in, nxhi_in, nylo_in, nyhi_in,
nzlo_in, nzhi_in, nxlo_out, nxhi_out, nylo_out, nyhi_out, nzlo_out, nzhi_out); nzlo_in, nzhi_in, nxlo_out, nxhi_out, nylo_out, nyhi_out, nzlo_out, nzhi_out);
int tmp1, tmp2; int tmp1, tmp2;
gctmp->setup(tmp1, tmp2); gctmp->setup(tmp1, tmp2);
@ -445,7 +445,7 @@ void PPPMElectrode::compute(int eflag, int vflag)
// TODO: this is dangerous now that compute_vector's interface has been // TODO: this is dangerous now that compute_vector's interface has been
// changed since a compute could call an arbitrary source, needs tightening // changed since a compute could call an arbitrary source, needs tightening
make_rho_in_brick(last_source_grpbit, density_brick, !last_invert_source); make_rho_in_brick(last_source_grpbit, density_brick, !last_invert_source);
gc->reverse_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2, gc->reverse_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
for (int nz = nzlo_out; nz <= nzhi_out; nz++) for (int nz = nzlo_out; nz <= nzhi_out; nz++)
for (int ny = nylo_out; ny <= nyhi_out; ny++) for (int ny = nylo_out; ny <= nyhi_out; ny++)
@ -459,7 +459,7 @@ void PPPMElectrode::compute(int eflag, int vflag)
// to fully sum contribution in their 3d bricks // to fully sum contribution in their 3d bricks
// remap from 3d decomposition to FFT decomposition // remap from 3d decomposition to FFT decomposition
gc->reverse_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2, gc->reverse_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
} }
@ -479,20 +479,20 @@ void PPPMElectrode::compute(int eflag, int vflag)
// to fill ghost cells surrounding their 3d bricks // to fill ghost cells surrounding their 3d bricks
if (differentiation_flag == 1) if (differentiation_flag == 1)
gc->forward_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), FORWARD_AD, gc_buf1, gc_buf2, gc->forward_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), FORWARD_AD, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
else else
gc->forward_comm(GridComm::KSPACE, this, 3, sizeof(FFT_SCALAR), FORWARD_IK, gc_buf1, gc_buf2, gc->forward_comm(Grid3d::KSPACE, this, 3, sizeof(FFT_SCALAR), FORWARD_IK, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
// extra per-atom energy/virial communication // extra per-atom energy/virial communication
if (evflag_atom) { if (evflag_atom) {
if (differentiation_flag == 1 && vflag_atom) if (differentiation_flag == 1 && vflag_atom)
gc->forward_comm(GridComm::KSPACE, this, 6, sizeof(FFT_SCALAR), FORWARD_AD_PERATOM, gc_buf1, gc->forward_comm(Grid3d::KSPACE, this, 6, sizeof(FFT_SCALAR), FORWARD_AD_PERATOM, gc_buf1,
gc_buf2, MPI_FFT_SCALAR); gc_buf2, MPI_FFT_SCALAR);
else if (differentiation_flag == 0) else if (differentiation_flag == 0)
gc->forward_comm(GridComm::KSPACE, this, 7, sizeof(FFT_SCALAR), FORWARD_IK_PERATOM, gc_buf1, gc->forward_comm(Grid3d::KSPACE, this, 7, sizeof(FFT_SCALAR), FORWARD_IK_PERATOM, gc_buf1,
gc_buf2, MPI_FFT_SCALAR); gc_buf2, MPI_FFT_SCALAR);
} }
@ -590,7 +590,7 @@ void PPPMElectrode::compute_vector(double *vec, int sensor_grpbit, int source_gr
make_rho_in_brick(source_grpbit, electrolyte_density_brick, invert_source); make_rho_in_brick(source_grpbit, electrolyte_density_brick, invert_source);
density_brick = electrolyte_density_brick; density_brick = electrolyte_density_brick;
density_fft = electrolyte_density_fft; density_fft = electrolyte_density_fft;
gc->reverse_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2, gc->reverse_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
brick2fft(); brick2fft();
// switch back pointers // switch back pointers
@ -618,7 +618,7 @@ void PPPMElectrode::compute_vector(double *vec, int sensor_grpbit, int source_gr
u_brick[k][j][i] = work2[n]; u_brick[k][j][i] = work2[n];
n += 2; n += 2;
} }
gc->forward_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), FORWARD_AD, gc_buf1, gc_buf2, gc->forward_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), FORWARD_AD, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
project_psi(vec, sensor_grpbit); project_psi(vec, sensor_grpbit);
compute_vector_called = true; compute_vector_called = true;

View File

@ -25,7 +25,7 @@
#include "fix.h" #include "fix.h"
#include "force.h" #include "force.h"
#include "gpu_extra.h" #include "gpu_extra.h"
#include "gridcomm.h" #include "grid3d.h"
#include "math_const.h" #include "math_const.h"
#include "memory.h" #include "memory.h"
#include "modify.h" #include "modify.h"
@ -252,11 +252,11 @@ void PPPMGPU::compute(int eflag, int vflag)
// remap from 3d decomposition to FFT decomposition // remap from 3d decomposition to FFT decomposition
if (triclinic == 0) { if (triclinic == 0) {
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR), gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
REVERSE_RHO_GPU,gc_buf1,gc_buf2,MPI_FFT_SCALAR); REVERSE_RHO_GPU,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
brick2fft_gpu(); brick2fft_gpu();
} else { } else {
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR), gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR); REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
PPPM::brick2fft(); PPPM::brick2fft();
} }
@ -271,20 +271,20 @@ void PPPMGPU::compute(int eflag, int vflag)
// to fill ghost cells surrounding their 3d bricks // to fill ghost cells surrounding their 3d bricks
if (differentiation_flag == 1) if (differentiation_flag == 1)
gc->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR), gc->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
FORWARD_AD,gc_buf1,gc_buf2,MPI_FFT_SCALAR); FORWARD_AD,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
else else
gc->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR), gc->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
FORWARD_IK,gc_buf1,gc_buf2,MPI_FFT_SCALAR); FORWARD_IK,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
// extra per-atom energy/virial communication // extra per-atom energy/virial communication
if (evflag_atom) { if (evflag_atom) {
if (differentiation_flag == 1 && vflag_atom) if (differentiation_flag == 1 && vflag_atom)
gc->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR), gc->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR); FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
else if (differentiation_flag == 0) else if (differentiation_flag == 0)
gc->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR), gc->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR); FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
} }
@ -828,7 +828,7 @@ void PPPMGPU::compute_group_group(int groupbit_A, int groupbit_B, int AA_flag)
density_brick = density_A_brick; density_brick = density_A_brick;
density_fft = density_A_fft; density_fft = density_A_fft;
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR), gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR); REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
brick2fft(); brick2fft();
@ -837,7 +837,7 @@ void PPPMGPU::compute_group_group(int groupbit_A, int groupbit_B, int AA_flag)
density_brick = density_B_brick; density_brick = density_B_brick;
density_fft = density_B_fft; density_fft = density_B_fft;
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR), gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR); REVERSE_RHO,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
brick2fft(); brick2fft();

View File

@ -23,7 +23,7 @@
#include "domain.h" #include "domain.h"
#include "error.h" #include "error.h"
#include "force.h" #include "force.h"
#include "gridcomm.h" #include "grid3d.h"
#include "math_const.h" #include "math_const.h"
#include "math_special.h" #include "math_special.h"
#include "memory.h" #include "memory.h"
@ -292,7 +292,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
make_rho_c<float,float>(fix->get_single_buffers()); make_rho_c<float,float>(fix->get_single_buffers());
} }
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),REVERSE_RHO, gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),REVERSE_RHO,
gc_buf1,gc_buf2,MPI_FFT_SCALAR); gc_buf1,gc_buf2,MPI_FFT_SCALAR);
brick2fft(nxlo_in, nylo_in, nzlo_in, nxhi_in, nyhi_in, nzhi_in, brick2fft(nxlo_in, nylo_in, nzlo_in, nxhi_in, nyhi_in, nzhi_in,
@ -306,7 +306,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
energy_1, greensfn, virial_1, vg,vg2, u_brick, v0_brick, energy_1, greensfn, virial_1, vg,vg2, u_brick, v0_brick,
v1_brick, v2_brick, v3_brick, v4_brick, v5_brick); v1_brick, v2_brick, v3_brick, v4_brick, v5_brick);
gc->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),FORWARD_AD, gc->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),FORWARD_AD,
gc_buf1,gc_buf2,MPI_FFT_SCALAR); gc_buf1,gc_buf2,MPI_FFT_SCALAR);
if (fix->precision() == FixIntel::PREC_MODE_MIXED) { if (fix->precision() == FixIntel::PREC_MODE_MIXED) {
@ -318,7 +318,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
} }
if (vflag_atom) if (vflag_atom)
gc->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR), gc->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR); FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
} else { } else {
@ -331,7 +331,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
u_brick, v0_brick, v1_brick, v2_brick, v3_brick, v4_brick, u_brick, v0_brick, v1_brick, v2_brick, v3_brick, v4_brick,
v5_brick); v5_brick);
gc->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),FORWARD_IK, gc->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),FORWARD_IK,
gc_buf1,gc_buf2,MPI_FFT_SCALAR); gc_buf1,gc_buf2,MPI_FFT_SCALAR);
if (fix->precision() == FixIntel::PREC_MODE_MIXED) { if (fix->precision() == FixIntel::PREC_MODE_MIXED) {
@ -343,7 +343,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
} }
if (evflag_atom) if (evflag_atom)
gc->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR), gc->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR); FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
} }
if (evflag_atom) fieldforce_c_peratom(); if (evflag_atom) fieldforce_c_peratom();
@ -376,7 +376,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
make_rho_g<float,float>(fix->get_single_buffers()); make_rho_g<float,float>(fix->get_single_buffers());
} }
gc6->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),REVERSE_RHO_G, gc6->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),REVERSE_RHO_G,
gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
brick2fft(nxlo_in_6, nylo_in_6, nzlo_in_6, nxhi_in_6, nyhi_in_6, nzhi_in_6, brick2fft(nxlo_in_6, nylo_in_6, nzlo_in_6, nxhi_in_6, nyhi_in_6, nzhi_in_6,
@ -391,7 +391,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
virial_6, vg_6, vg2_6, u_brick_g, v0_brick_g, v1_brick_g, virial_6, vg_6, vg2_6, u_brick_g, v0_brick_g, v1_brick_g,
v2_brick_g, v3_brick_g, v4_brick_g, v5_brick_g); v2_brick_g, v3_brick_g, v4_brick_g, v5_brick_g);
gc6->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),FORWARD_AD_G, gc6->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),FORWARD_AD_G,
gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
if (fix->precision() == FixIntel::PREC_MODE_MIXED) { if (fix->precision() == FixIntel::PREC_MODE_MIXED) {
@ -403,7 +403,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
} }
if (vflag_atom) if (vflag_atom)
gc6->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR), gc6->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
FORWARD_AD_PERATOM_G,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); FORWARD_AD_PERATOM_G,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
} else { } else {
@ -416,7 +416,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
vdz_brick_g, virial_6, vg_6, vg2_6, u_brick_g, v0_brick_g, vdz_brick_g, virial_6, vg_6, vg2_6, u_brick_g, v0_brick_g,
v1_brick_g, v2_brick_g, v3_brick_g, v4_brick_g, v5_brick_g); v1_brick_g, v2_brick_g, v3_brick_g, v4_brick_g, v5_brick_g);
gc6->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),FORWARD_IK_G, gc6->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),FORWARD_IK_G,
gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
if (fix->precision() == FixIntel::PREC_MODE_MIXED) { if (fix->precision() == FixIntel::PREC_MODE_MIXED) {
@ -428,7 +428,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
} }
if (evflag_atom) if (evflag_atom)
gc6->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR), gc6->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
FORWARD_IK_PERATOM_G,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); FORWARD_IK_PERATOM_G,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
} }
@ -461,7 +461,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
make_rho_a<float,float>(fix->get_single_buffers()); make_rho_a<float,float>(fix->get_single_buffers());
} }
gc->reverse_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),REVERSE_RHO_A, gc->reverse_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),REVERSE_RHO_A,
gc_buf1,gc_buf2,MPI_FFT_SCALAR); gc_buf1,gc_buf2,MPI_FFT_SCALAR);
brick2fft_a(); brick2fft_a();
@ -487,7 +487,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
v5_brick_a2, u_brick_a4, v0_brick_a4, v1_brick_a4, v5_brick_a2, u_brick_a4, v0_brick_a4, v1_brick_a4,
v2_brick_a4, v3_brick_a4, v4_brick_a4, v5_brick_a4); v2_brick_a4, v3_brick_a4, v4_brick_a4, v5_brick_a4);
gc6->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR),FORWARD_AD_A, gc6->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),FORWARD_AD_A,
gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
if (fix->precision() == FixIntel::PREC_MODE_MIXED) { if (fix->precision() == FixIntel::PREC_MODE_MIXED) {
@ -499,7 +499,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
} }
if (evflag_atom) if (evflag_atom)
gc6->forward_comm(GridComm::KSPACE,this,42,sizeof(FFT_SCALAR), gc6->forward_comm(Grid3d::KSPACE,this,42,sizeof(FFT_SCALAR),
FORWARD_AD_PERATOM_A,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); FORWARD_AD_PERATOM_A,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
} else { } else {
@ -530,7 +530,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
u_brick_a4, v0_brick_a4, v1_brick_a4, v2_brick_a4, u_brick_a4, v0_brick_a4, v1_brick_a4, v2_brick_a4,
v3_brick_a4, v4_brick_a4, v5_brick_a4); v3_brick_a4, v4_brick_a4, v5_brick_a4);
gc6->forward_comm(GridComm::KSPACE,this,18,sizeof(FFT_SCALAR),FORWARD_IK_A, gc6->forward_comm(Grid3d::KSPACE,this,18,sizeof(FFT_SCALAR),FORWARD_IK_A,
gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
if (fix->precision() == FixIntel::PREC_MODE_MIXED) { if (fix->precision() == FixIntel::PREC_MODE_MIXED) {
@ -542,7 +542,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
} }
if (evflag_atom) if (evflag_atom)
gc6->forward_comm(GridComm::KSPACE,this,49,sizeof(FFT_SCALAR), gc6->forward_comm(Grid3d::KSPACE,this,49,sizeof(FFT_SCALAR),
FORWARD_IK_PERATOM_A,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); FORWARD_IK_PERATOM_A,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
} }
@ -576,7 +576,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
make_rho_none<float,float>(fix->get_single_buffers()); make_rho_none<float,float>(fix->get_single_buffers());
} }
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),REVERSE_RHO_NONE, gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),REVERSE_RHO_NONE,
gc_buf1,gc_buf2,MPI_FFT_SCALAR); gc_buf1,gc_buf2,MPI_FFT_SCALAR);
brick2fft_none(); brick2fft_none();
@ -592,7 +592,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
n += 2; n += 2;
} }
gc6->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR), gc6->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),
FORWARD_AD_NONE,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); FORWARD_AD_NONE,gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
if (fix->precision() == FixIntel::PREC_MODE_MIXED) { if (fix->precision() == FixIntel::PREC_MODE_MIXED) {
@ -604,7 +604,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
} }
if (vflag_atom) if (vflag_atom)
gc6->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR), gc6->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
FORWARD_AD_PERATOM_NONE, FORWARD_AD_PERATOM_NONE,
gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
@ -622,7 +622,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
n += 2; n += 2;
} }
gc6->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR), gc6->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),
FORWARD_IK_NONE, FORWARD_IK_NONE,
gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
@ -635,7 +635,7 @@ void PPPMDispIntel::compute(int eflag, int vflag)
} }
if (evflag_atom) if (evflag_atom)
gc6->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR), gc6->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
FORWARD_IK_PERATOM_NONE, FORWARD_IK_PERATOM_NONE,
gc6_buf1,gc6_buf2,MPI_FFT_SCALAR); gc6_buf1,gc6_buf2,MPI_FFT_SCALAR);
} }

View File

@ -28,7 +28,7 @@
#include "error.h" #include "error.h"
#include "fft3d_wrap.h" #include "fft3d_wrap.h"
#include "force.h" #include "force.h"
#include "gridcomm.h" #include "grid3d.h"
#include "math_const.h" #include "math_const.h"
#include "math_special.h" #include "math_special.h"
#include "memory.h" #include "memory.h"
@ -210,7 +210,7 @@ void PPPMElectrodeIntel::compute(int eflag, int vflag)
make_rho_in_brick<float, float>(fix->get_single_buffers(), last_source_grpbit, make_rho_in_brick<float, float>(fix->get_single_buffers(), last_source_grpbit,
density_brick, !last_invert_source); density_brick, !last_invert_source);
} }
gc->reverse_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2, gc->reverse_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
for (int nz = nzlo_out; nz <= nzhi_out; nz++) for (int nz = nzlo_out; nz <= nzhi_out; nz++)
for (int ny = nylo_out; ny <= nyhi_out; ny++) for (int ny = nylo_out; ny <= nyhi_out; ny++)
@ -232,7 +232,7 @@ void PPPMElectrodeIntel::compute(int eflag, int vflag)
// to fully sum contribution in their 3d bricks // to fully sum contribution in their 3d bricks
// remap from 3d decomposition to FFT decomposition // remap from 3d decomposition to FFT decomposition
gc->reverse_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2, gc->reverse_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
} }
@ -252,20 +252,20 @@ void PPPMElectrodeIntel::compute(int eflag, int vflag)
// to fill ghost cells surrounding their 3d bricks // to fill ghost cells surrounding their 3d bricks
if (differentiation_flag == 1) if (differentiation_flag == 1)
gc->forward_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), FORWARD_AD, gc_buf1, gc_buf2, gc->forward_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), FORWARD_AD, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
else else
gc->forward_comm(GridComm::KSPACE, this, 3, sizeof(FFT_SCALAR), FORWARD_IK, gc_buf1, gc_buf2, gc->forward_comm(Grid3d::KSPACE, this, 3, sizeof(FFT_SCALAR), FORWARD_IK, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
// extra per-atom energy/virial communication // extra per-atom energy/virial communication
if (evflag_atom) { if (evflag_atom) {
if (differentiation_flag == 1 && vflag_atom) if (differentiation_flag == 1 && vflag_atom)
gc->forward_comm(GridComm::KSPACE, this, 6, sizeof(FFT_SCALAR), FORWARD_AD_PERATOM, gc_buf1, gc->forward_comm(Grid3d::KSPACE, this, 6, sizeof(FFT_SCALAR), FORWARD_AD_PERATOM, gc_buf1,
gc_buf2, MPI_FFT_SCALAR); gc_buf2, MPI_FFT_SCALAR);
else if (differentiation_flag == 0) else if (differentiation_flag == 0)
gc->forward_comm(GridComm::KSPACE, this, 7, sizeof(FFT_SCALAR), FORWARD_IK_PERATOM, gc_buf1, gc->forward_comm(Grid3d::KSPACE, this, 7, sizeof(FFT_SCALAR), FORWARD_IK_PERATOM, gc_buf1,
gc_buf2, MPI_FFT_SCALAR); gc_buf2, MPI_FFT_SCALAR);
} }
int tempslabflag = slabflag; int tempslabflag = slabflag;
@ -340,7 +340,7 @@ void PPPMElectrodeIntel::compute_vector(double *vec, int sensor_grpbit, int sour
} }
density_brick = electrolyte_density_brick; density_brick = electrolyte_density_brick;
density_fft = electrolyte_density_fft; density_fft = electrolyte_density_fft;
gc->reverse_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2, gc->reverse_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), REVERSE_RHO, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
brick2fft(); brick2fft();
// switch back pointers // switch back pointers
@ -370,7 +370,7 @@ void PPPMElectrodeIntel::compute_vector(double *vec, int sensor_grpbit, int sour
n += 2; n += 2;
} }
gc->forward_comm(GridComm::KSPACE, this, 1, sizeof(FFT_SCALAR), FORWARD_AD, gc_buf1, gc_buf2, gc->forward_comm(Grid3d::KSPACE, this, 1, sizeof(FFT_SCALAR), FORWARD_AD, gc_buf1, gc_buf2,
MPI_FFT_SCALAR); MPI_FFT_SCALAR);
switch (fix->precision()) { switch (fix->precision()) {

View File

@ -26,7 +26,7 @@
#include "domain.h" #include "domain.h"
#include "error.h" #include "error.h"
#include "force.h" #include "force.h"
#include "gridcomm.h" #include "grid3d.h"
#include "math_const.h" #include "math_const.h"
#include "math_special.h" #include "math_special.h"
#include "memory.h" #include "memory.h"
@ -227,7 +227,7 @@ void PPPMIntel::compute_first(int eflag, int vflag)
// to fully sum contribution in their 3d bricks // to fully sum contribution in their 3d bricks
// remap from 3d decomposition to FFT decomposition // remap from 3d decomposition to FFT decomposition
gc->reverse_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),REVERSE_RHO, gc->reverse_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),REVERSE_RHO,
gc_buf1,gc_buf2,MPI_FFT_SCALAR); gc_buf1,gc_buf2,MPI_FFT_SCALAR);
brick2fft(); brick2fft();
@ -243,20 +243,20 @@ void PPPMIntel::compute_first(int eflag, int vflag)
// to fill ghost cells surrounding their 3d bricks // to fill ghost cells surrounding their 3d bricks
if (differentiation_flag == 1) if (differentiation_flag == 1)
gc->forward_comm(GridComm::KSPACE,this,1,sizeof(FFT_SCALAR),FORWARD_AD, gc->forward_comm(Grid3d::KSPACE,this,1,sizeof(FFT_SCALAR),FORWARD_AD,
gc_buf1,gc_buf2,MPI_FFT_SCALAR); gc_buf1,gc_buf2,MPI_FFT_SCALAR);
else else
gc->forward_comm(GridComm::KSPACE,this,3,sizeof(FFT_SCALAR),FORWARD_IK, gc->forward_comm(Grid3d::KSPACE,this,3,sizeof(FFT_SCALAR),FORWARD_IK,
gc_buf1,gc_buf2,MPI_FFT_SCALAR); gc_buf1,gc_buf2,MPI_FFT_SCALAR);
// extra per-atom energy/virial communication // extra per-atom energy/virial communication
if (evflag_atom) { if (evflag_atom) {
if (differentiation_flag == 1 && vflag_atom) if (differentiation_flag == 1 && vflag_atom)
gc->forward_comm(GridComm::KSPACE,this,6,sizeof(FFT_SCALAR), gc->forward_comm(Grid3d::KSPACE,this,6,sizeof(FFT_SCALAR),
FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR); FORWARD_AD_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
else if (differentiation_flag == 0) else if (differentiation_flag == 0)
gc->forward_comm(GridComm::KSPACE,this,7,sizeof(FFT_SCALAR), gc->forward_comm(Grid3d::KSPACE,this,7,sizeof(FFT_SCALAR),
FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR); FORWARD_IK_PERATOM,gc_buf1,gc_buf2,MPI_FFT_SCALAR);
} }
} }

View File

@ -23,7 +23,7 @@
#include "domain.h" #include "domain.h"
#include "error.h" #include "error.h"
#include "force.h" #include "force.h"
#include "gridcomm.h" #include "grid3d.h"
#include "memory.h" #include "memory.h"
#include "neighbor.h" #include "neighbor.h"
#include "thr_omp.h" #include "thr_omp.h"
@ -166,7 +166,7 @@ void MSMCGOMP::compute(int eflag, int vflag)
// to fully sum contribution in their 3d grid // to fully sum contribution in their 3d grid
current_level = 0; current_level = 0;
gcall->reverse_comm(GridComm::KSPACE,this,1,sizeof(double),REVERSE_RHO, gcall->reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),REVERSE_RHO,
gcall_buf1,gcall_buf2,MPI_DOUBLE); gcall_buf1,gcall_buf2,MPI_DOUBLE);
// forward communicate charge density values to fill ghost grid points // forward communicate charge density values to fill ghost grid points
@ -175,7 +175,7 @@ void MSMCGOMP::compute(int eflag, int vflag)
for (int n=0; n<=levels-2; n++) { for (int n=0; n<=levels-2; n++) {
if (!active_flag[n]) continue; if (!active_flag[n]) continue;
current_level = n; current_level = n;
gc[n]->forward_comm(GridComm::KSPACE,this,1,sizeof(double),FORWARD_RHO, gc[n]->forward_comm(Grid3d::KSPACE,this,1,sizeof(double),FORWARD_RHO,
gc_buf1[n],gc_buf2[n],MPI_DOUBLE); gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
direct(n); direct(n);
restriction(n); restriction(n);
@ -188,15 +188,15 @@ void MSMCGOMP::compute(int eflag, int vflag)
if (domain->nonperiodic) { if (domain->nonperiodic) {
current_level = levels-1; current_level = levels-1;
gc[levels-1]-> gc[levels-1]->
forward_comm(GridComm::KSPACE,this,1,sizeof(double),FORWARD_RHO, forward_comm(Grid3d::KSPACE,this,1,sizeof(double),FORWARD_RHO,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE); gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
direct_top(levels-1); direct_top(levels-1);
gc[levels-1]-> gc[levels-1]->
reverse_comm(GridComm::KSPACE,this,1,sizeof(double),REVERSE_AD, reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),REVERSE_AD,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE); gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
if (vflag_atom) if (vflag_atom)
gc[levels-1]-> gc[levels-1]->
reverse_comm(GridComm::KSPACE,this,6,sizeof(double),REVERSE_AD_PERATOM, reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),REVERSE_AD_PERATOM,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE); gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
} else { } else {
@ -207,7 +207,7 @@ void MSMCGOMP::compute(int eflag, int vflag)
current_level = levels-1; current_level = levels-1;
if (vflag_atom) if (vflag_atom)
gc[levels-1]-> gc[levels-1]->
reverse_comm(GridComm::KSPACE,this,6,sizeof(double),REVERSE_AD_PERATOM, reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),REVERSE_AD_PERATOM,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE); gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
} }
} }
@ -220,13 +220,13 @@ void MSMCGOMP::compute(int eflag, int vflag)
prolongation(n); prolongation(n);
current_level = n; current_level = n;
gc[n]->reverse_comm(GridComm::KSPACE,this,1,sizeof(double),REVERSE_AD, gc[n]->reverse_comm(Grid3d::KSPACE,this,1,sizeof(double),REVERSE_AD,
gc_buf1[n],gc_buf2[n],MPI_DOUBLE); gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
// extra per-atom virial communication // extra per-atom virial communication
if (vflag_atom) if (vflag_atom)
gc[n]->reverse_comm(GridComm::KSPACE,this,6,sizeof(double), gc[n]->reverse_comm(Grid3d::KSPACE,this,6,sizeof(double),
REVERSE_AD_PERATOM,gc_buf1[n],gc_buf2[n],MPI_DOUBLE); REVERSE_AD_PERATOM,gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
} }
@ -234,13 +234,13 @@ void MSMCGOMP::compute(int eflag, int vflag)
// to fill ghost cells surrounding their 3d bricks // to fill ghost cells surrounding their 3d bricks
current_level = 0; current_level = 0;
gcall->forward_comm(GridComm::KSPACE,this,1,sizeof(double),FORWARD_AD, gcall->forward_comm(Grid3d::KSPACE,this,1,sizeof(double),FORWARD_AD,
gcall_buf1,gcall_buf2,MPI_DOUBLE); gcall_buf1,gcall_buf2,MPI_DOUBLE);
// extra per-atom energy/virial communication // extra per-atom energy/virial communication
if (vflag_atom) if (vflag_atom)
gcall->forward_comm(GridComm::KSPACE,this,6,sizeof(double),FORWARD_AD_PERATOM, gcall->forward_comm(Grid3d::KSPACE,this,6,sizeof(double),FORWARD_AD_PERATOM,
gcall_buf1,gcall_buf2,MPI_DOUBLE); gcall_buf1,gcall_buf2,MPI_DOUBLE);
// calculate the force on my particles (interpolation) // calculate the force on my particles (interpolation)

View File

@ -263,7 +263,7 @@ FixAveGrid::FixAveGrid(LAMMPS *lmp, int narg, char **arg) :
if (normflag != ALL) if (normflag != ALL)
error->all(FLERR,"Fix ave/grid norm all is required for now"); error->all(FLERR,"Fix ave/grid norm all is required for now");
if (normflag != ONE) if (ave != ONE)
error->all(FLERR,"Fix ave/grid ave one is required for now"); error->all(FLERR,"Fix ave/grid ave one is required for now");
// error checks for ATOM mode // error checks for ATOM mode