sync OPENMP package with new GridComm syntax

This commit is contained in:
Steve Plimpton
2021-08-27 09:48:35 -06:00
parent a519dc3e9a
commit 0879484827

View File

@ -166,8 +166,8 @@ void MSMCGOMP::compute(int eflag, int vflag)
// to fully sum contribution in their 3d grid
current_level = 0;
gcall->reverse_comm_kspace(this,1,sizeof(double),REVERSE_RHO,
gcall_buf1,gcall_buf2,MPI_DOUBLE);
gcall->reverse_comm(GridComm::KSPACE,this,1,sizeof(double),REVERSE_RHO,
gcall_buf1,gcall_buf2,MPI_DOUBLE);
// forward communicate charge density values to fill ghost grid points
// compute direct sum interaction and then restrict to coarser grid
@ -175,8 +175,8 @@ void MSMCGOMP::compute(int eflag, int vflag)
for (int n=0; n<=levels-2; n++) {
if (!active_flag[n]) continue;
current_level = n;
gc[n]->forward_comm_kspace(this,1,sizeof(double),FORWARD_RHO,
gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
gc[n]->forward_comm(GridComm::KSPACE,this,1,sizeof(double),FORWARD_RHO,
gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
direct(n);
restriction(n);
}
@ -188,16 +188,16 @@ void MSMCGOMP::compute(int eflag, int vflag)
if (domain->nonperiodic) {
current_level = levels-1;
gc[levels-1]->
forward_comm_kspace(this,1,sizeof(double),FORWARD_RHO,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
forward_comm(GridComm::KSPACE,this,1,sizeof(double),FORWARD_RHO,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
direct_top(levels-1);
gc[levels-1]->
reverse_comm_kspace(this,1,sizeof(double),REVERSE_AD,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
reverse_comm(GridComm::KSPACE,this,1,sizeof(double),REVERSE_AD,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
if (vflag_atom)
gc[levels-1]->
reverse_comm_kspace(this,6,sizeof(double),REVERSE_AD_PERATOM,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
reverse_comm(GridComm::KSPACE,this,6,sizeof(double),REVERSE_AD_PERATOM,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
} else {
// Here using MPI_Allreduce is cheaper than using commgrid
@ -207,8 +207,8 @@ void MSMCGOMP::compute(int eflag, int vflag)
current_level = levels-1;
if (vflag_atom)
gc[levels-1]->
reverse_comm_kspace(this,6,sizeof(double),REVERSE_AD_PERATOM,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
reverse_comm(GridComm::KSPACE,this,6,sizeof(double),REVERSE_AD_PERATOM,
gc_buf1[levels-1],gc_buf2[levels-1],MPI_DOUBLE);
}
}
@ -220,28 +220,28 @@ void MSMCGOMP::compute(int eflag, int vflag)
prolongation(n);
current_level = n;
gc[n]->reverse_comm_kspace(this,1,sizeof(double),REVERSE_AD,
gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
gc[n]->reverse_comm(GridComm::KSPACE,this,1,sizeof(double),REVERSE_AD,
gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
// extra per-atom virial communication
if (vflag_atom)
gc[n]->reverse_comm_kspace(this,6,sizeof(double),REVERSE_AD_PERATOM,
gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
gc[n]->reverse_comm(GridComm::KSPACE,this,6,sizeof(double),
REVERSE_AD_PERATOM,gc_buf1[n],gc_buf2[n],MPI_DOUBLE);
}
// all procs communicate E-field values
// to fill ghost cells surrounding their 3d bricks
current_level = 0;
gcall->forward_comm_kspace(this,1,sizeof(double),FORWARD_AD,
gcall_buf1,gcall_buf2,MPI_DOUBLE);
gcall->forward_comm(GridComm::KSPACE,this,1,sizeof(double),FORWARD_AD,
gcall_buf1,gcall_buf2,MPI_DOUBLE);
// extra per-atom energy/virial communication
if (vflag_atom)
gcall->forward_comm_kspace(this,6,sizeof(double),FORWARD_AD_PERATOM,
gcall_buf1,gcall_buf2,MPI_DOUBLE);
gcall->forward_comm(GridComm::KSPACE,this,6,sizeof(double),FORWARD_AD_PERATOM,
gcall_buf1,gcall_buf2,MPI_DOUBLE);
// calculate the force on my particles (interpolation)