diff --git a/src/EXTRA-FIX/fix_ttm_grid.cpp b/src/EXTRA-FIX/fix_ttm_grid.cpp index 54cb0f550d..862e487d1b 100644 --- a/src/EXTRA-FIX/fix_ttm_grid.cpp +++ b/src/EXTRA-FIX/fix_ttm_grid.cpp @@ -401,7 +401,7 @@ void FixTTMGrid::reset_grid() // perform remap from previous decomp to new decomp int nremap_buf1,nremap_buf2; - grid->remap_setup(grid_previous,nremap_buf1,nremap_buf2); + grid->setup_remap(grid_previous,nremap_buf1,nremap_buf2); double *remap_buf1,*remap_buf2; memory->create(remap_buf1, nremap_buf1, "ttm/grid:remap_buf1"); diff --git a/src/grid2d.cpp b/src/grid2d.cpp index 51afc3bf89..476310d727 100644 --- a/src/grid2d.cpp +++ b/src/grid2d.cpp @@ -33,12 +33,12 @@ enum{REGULAR,TILED}; static constexpr int OFFSET = 16384; /* ---------------------------------------------------------------------- - NOTES - tiled implementation only currently works for RCB, not general tiled - b/c RCB tree is used to find neighboring tiles + NOTES: + tiled implementations only currently work for RCB, not general tilings + b/c RCB tree is used to find neighboring tiles if o indices for ghosts are < 0 or hi indices are >= N, then grid is treated as periodic in that dimension, - communication is done across the periodic boundaries + comm is done across the periodic boundaries ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- @@ -52,7 +52,7 @@ static constexpr int OFFSET = 16384; i xy lohi = portion of global grid this proc owns, 0 <= index < N o xy lohi = owned + ghost grid cells needed in all directions for non-periodic dims, o indices will not be < 0 or >= N, - since no grid communication is done across non-periodic boundaries + since no grid comm is done across non-periodic boundaries ------------------------------------------------------------------------- */ Grid2d::Grid2d(LAMMPS *lmp, MPI_Comm gcomm, @@ -278,7 +278,7 @@ Grid2d::Grid2d(LAMMPS *lmp, MPI_Comm gcomm, int flag, Grid2d::~Grid2d() { - // regular comm data struct + // brick comm data structs for (int i = 0; i < nswap; i++) { memory->destroy(swap[i].packlist); @@ -402,38 +402,39 @@ void Grid2d::get_bounds_ghost(int &xlo, int &xhi, int &ylo, int &yhi) } /* ---------------------------------------------------------------------- + setup owned/ghost commmunication return sizes of two buffers needed for communication - either on regular grid or procs or irregular tiling + either for regular brick comm or irregular tiling comm nbuf1 = largest pack or unpack in any Send or Recv or Copy nbuf2 = larget of sum of all packs or unpacks in Send or Recv - for regular comm, nbuf1 = nbuf2 - for irregular comm, nbuf2 >= nbuf2 - nbuf1,nbuf2 are just count of grid points - caller converts them to message size for grid data it stores + for regular brick comm, nbuf1 = nbuf2 + for irregular tiling comm, nbuf2 >= nbuf2 + nbuf1,nbuf2 are counts of grid points + caller converts them to message sizes for grid data it stores ------------------------------------------------------------------------- */ void Grid2d::setup(int &nbuf1, int &nbuf2) { - if (layout == REGULAR) setup_regular(nbuf1,nbuf2); + if (layout == REGULAR) setup_brick(nbuf1,nbuf2); else setup_tiled(nbuf1,nbuf2); } /* ---------------------------------------------------------------------- - setup comm for a regular grid of procs - each proc has 6 neighbors - comm pattern = series of swaps with one of those 6 procs + setup owned/ghost comm for regular brick comm + each proc has 4 neighbors + comm pattern = series of swaps with one of those 4 procs can be multiple swaps with same proc if ghost extent is large swap may not be symmetric if both procs do not need same layers of ghosts all procs perform same # of swaps in a direction, even if some don't need it ------------------------------------------------------------------------- */ -void Grid2d::setup_regular(int &nbuf1, int &nbuf2) +void Grid2d::setup_brick(int &nbuf1, int &nbuf2) { int nsent,sendfirst,sendlast,recvfirst,recvlast; int sendplanes,recvplanes; int notdoneme,notdone; - // notify 6 neighbor procs how many ghost grid planes I need from them + // notify 4 neighbor procs how many ghost grid planes I need from them // ghost xy lo = # of my lower grid planes that proc xy lo needs as its ghosts // ghost xy hi = # of my upper grid planes that proc xy hi needs as its ghosts // if this proc is its own neighbor across periodic bounary, value is from self @@ -632,7 +633,7 @@ void Grid2d::setup_regular(int &nbuf1, int &nbuf2) } /* ---------------------------------------------------------------------- - setup comm for RCB tiled proc domains + setup owned/ghost comm for irregular tiled comm each proc has arbitrary # of neighbors that overlap its ghost extent identify which procs will send me ghost cells, and vice versa may not be symmetric if both procs do not need same layers of ghosts @@ -959,7 +960,7 @@ void Grid2d::box_drop_grid(int *box, int proclower, int procupper, int Grid2d::ghost_adjacent() { - if (layout == REGULAR) return ghost_adjacent_regular(); + if (layout == REGULAR) return ghost_adjacent_brick(); return ghost_adjacent_tiled(); } @@ -968,7 +969,7 @@ int Grid2d::ghost_adjacent() return 0 if adjacent=0 for any proc, else 1 ------------------------------------------------------------------------- */ -int Grid2d::ghost_adjacent_regular() +int Grid2d::ghost_adjacent_brick() { adjacent = 1; if (ghostxlo > inxhi-inxlo+1) adjacent = 0; @@ -1003,14 +1004,14 @@ void Grid2d::forward_comm(int caller, void *ptr, int nper, int nbyte, int which, { if (layout == REGULAR) { if (caller == KSPACE) - forward_comm_regular((KSpace *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + forward_comm_brick((KSpace *) ptr,nper,nbyte,which, + buf1,buf2,datatype); else if (caller == PAIR) - forward_comm_regular((Pair *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + forward_comm_brick((Pair *) ptr,nper,nbyte,which, + buf1,buf2,datatype); else if (caller == FIX) - forward_comm_regular((Fix *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + forward_comm_brick((Fix *) ptr,nper,nbyte,which, + buf1,buf2,datatype); } else { if (caller == KSPACE) forward_comm_tiled((KSpace *) ptr,nper,nbyte,which, @@ -1030,8 +1031,8 @@ void Grid2d::forward_comm(int caller, void *ptr, int nper, int nbyte, int which, template < class T > void Grid2d:: -forward_comm_regular(T *ptr, int nper, int /*nbyte*/, int which, - void *buf1, void *buf2, MPI_Datatype datatype) +forward_comm_brick(T *ptr, int nper, int /*nbyte*/, int which, + void *buf1, void *buf2, MPI_Datatype datatype) { int m; MPI_Request request; @@ -1108,14 +1109,14 @@ void Grid2d::reverse_comm(int caller, void *ptr, int nper, int nbyte, int which, { if (layout == REGULAR) { if (caller == KSPACE) - reverse_comm_regular((KSpace *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + reverse_comm_brick((KSpace *) ptr,nper,nbyte,which, + buf1,buf2,datatype); else if (caller == PAIR) - reverse_comm_regular((Pair *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + reverse_comm_brick((Pair *) ptr,nper,nbyte,which, + buf1,buf2,datatype); else if (caller == FIX) - reverse_comm_regular((Fix *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + reverse_comm_brick((Fix *) ptr,nper,nbyte,which, + buf1,buf2,datatype); } else { if (caller == KSPACE) reverse_comm_tiled((KSpace *) ptr,nper,nbyte,which, @@ -1135,8 +1136,8 @@ void Grid2d::reverse_comm(int caller, void *ptr, int nper, int nbyte, int which, template < class T > void Grid2d:: -reverse_comm_regular(T *ptr, int nper, int /*nbyte*/, int which, - void *buf1, void *buf2, MPI_Datatype datatype) +reverse_comm_brick(T *ptr, int nper, int /*nbyte*/, int which, + void *buf1, void *buf2, MPI_Datatype datatype) { int m; MPI_Request request; @@ -1209,22 +1210,22 @@ reverse_comm_tiled(T *ptr, int nper, int nbyte, int which, pack/unpack operations are performed by caller via callbacks ------------------------------------------------------------------------- */ -void Grid2d::remap_setup(Grid2d *old, int &ngrid1_buf, int &ngrid2_buf) +void Grid2d::setup_remap(Grid2d *old, int &nremap_buf1, int &nremap_buf2) { - if (layout == REGULAR) remap_setup_regular(old,ngrid1_buf,ngrid2_buf); - else remap_setup_tiled(old,ngrid1_buf,ngrid2_buf); + if (layout == REGULAR) setup_remap_brick(old,nremap_buf1,nremap_buf2); + else setup_remap_tiled(old,nremap_buf1,nremap_buf2); } -void Grid2d::remap_setup_regular(Grid2d *old, int &ngrid1_buf, int &ngrid2_buf) +void Grid2d::setup_remap_brick(Grid2d *old, int &nremap_buf1, int &nremap_buf2) { - ngrid1_buf = 0; - ngrid2_buf = 0; + nremap_buf1 = 0; + nremap_buf2 = 0; } -void Grid2d::remap_setup_tiled(Grid2d *old, int &ngrid1_buf, int &ngrid2_buf) +void Grid2d::setup_remap_tiled(Grid2d *old, int &nremap_buf1, int &nremap_buf2) { - ngrid1_buf = 0; - ngrid2_buf = 0; + nremap_buf1 = 0; + nremap_buf2 = 0; } /* ---------------------------------------------------------------------- diff --git a/src/grid2d.h b/src/grid2d.h index 86bde517f6..df5faff32b 100644 --- a/src/grid2d.h +++ b/src/grid2d.h @@ -37,7 +37,7 @@ class Grid2d : protected Pointers { int ghost_adjacent(); void forward_comm(int, void *, int, int, int, void *, void *, MPI_Datatype); void reverse_comm(int, void *, int, int, int, void *, void *, MPI_Datatype); - void remap_setup(Grid2d *, int &, int &); + void setup_remap(Grid2d *, int &, int &); void remap(int, void *, int, int, void *, void *, MPI_Datatype); void gather(int, void *, int, int, int, void *, MPI_Datatype); @@ -193,21 +193,21 @@ class Grid2d : protected Pointers { void store(int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int); - virtual void setup_regular(int &, int &); + virtual void setup_brick(int &, int &); virtual void setup_tiled(int &, int &); void ghost_box_drop(int *, int *); void box_drop_grid(int *, int, int, int &, int *); - int ghost_adjacent_regular(); + int ghost_adjacent_brick(); int ghost_adjacent_tiled(); - template void forward_comm_regular(T *, int, int, int, void *, void *, MPI_Datatype); + template void forward_comm_brick(T *, int, int, int, void *, void *, MPI_Datatype); template void forward_comm_tiled(T *, int, int, int, void *, void *, MPI_Datatype); - template void reverse_comm_regular(T *, int, int, int, void *, void *, MPI_Datatype); + template void reverse_comm_brick(T *, int, int, int, void *, void *, MPI_Datatype); template void reverse_comm_tiled(T *, int, int, int, void *, void *, MPI_Datatype); - void remap_setup_regular(Grid2d *, int &, int &); - void remap_setup_tiled(Grid2d *, int &, int &); + void setup_remap_brick(Grid2d *, int &, int &); + void setup_remap_tiled(Grid2d *, int &, int &); template void remap_style(T *, int, int, void *, void *, MPI_Datatype); virtual void grow_swap(); diff --git a/src/grid3d.cpp b/src/grid3d.cpp index 2701b81c6f..a503ad90da 100644 --- a/src/grid3d.cpp +++ b/src/grid3d.cpp @@ -33,12 +33,12 @@ enum{REGULAR,TILED}; static constexpr int OFFSET = 16384; /* ---------------------------------------------------------------------- - NOTES - tiled implementation only currently works for RCB, not general tiled - b/c RCB tree is used to find neighboring tiles + NOTES: + tiled implementations only currently work for RCB, not general tilings + b/c RCB tree is used to find neighboring tiles if o indices for ghosts are < 0 or hi indices are >= N, then grid is treated as periodic in that dimension, - communication is done across the periodic boundaries + comm is done across the periodic boundaries ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- @@ -52,7 +52,7 @@ static constexpr int OFFSET = 16384; i xyz lohi = portion of global grid this proc owns, 0 <= index < N o xyz lohi = owned + ghost grid cells needed in all directions for non-periodic dims, o indices will not be < 0 or >= N, - since no grid communication is done across non-periodic boundaries + since no grid comm is done across non-periodic boundaries ------------------------------------------------------------------------- */ Grid3d::Grid3d(LAMMPS *lmp, MPI_Comm gcomm, @@ -295,7 +295,7 @@ Grid3d::Grid3d(LAMMPS *lmp, MPI_Comm gcomm, int flag, Grid3d::~Grid3d() { - // regular comm data struct + // brick comm data structs for (int i = 0; i < nswap; i++) { memory->destroy(swap[i].packlist); @@ -440,24 +440,25 @@ void Grid3d::get_bounds_ghost(int &xlo, int &xhi, int &ylo, int &yhi, } /* ---------------------------------------------------------------------- + setup owned/ghost commmunication return sizes of two buffers needed for communication - either on regular grid or procs or irregular tiling + either for regular brick comm or irregular tiling comm nbuf1 = largest pack or unpack in any Send or Recv or Copy nbuf2 = larget of sum of all packs or unpacks in Send or Recv - for regular comm, nbuf1 = nbuf2 - for irregular comm, nbuf2 >= nbuf2 - nbuf1,nbuf2 are just count of grid points - caller converts them to message size for grid data it stores + for regular brick comm, nbuf1 = nbuf2 + for irregular tiling comm, nbuf2 >= nbuf2 + nbuf1,nbuf2 are counts of grid points + caller converts them to message sizes for grid data it stores ------------------------------------------------------------------------- */ void Grid3d::setup(int &nbuf1, int &nbuf2) { - if (layout == REGULAR) setup_regular(nbuf1,nbuf2); + if (layout == REGULAR) setup_brick(nbuf1,nbuf2); else setup_tiled(nbuf1,nbuf2); } /* ---------------------------------------------------------------------- - setup comm for a regular grid of procs + setup owned/ghost comm for regular brick comm each proc has 6 neighbors comm pattern = series of swaps with one of those 6 procs can be multiple swaps with same proc if ghost extent is large @@ -465,7 +466,7 @@ void Grid3d::setup(int &nbuf1, int &nbuf2) all procs perform same # of swaps in a direction, even if some don't need it ------------------------------------------------------------------------- */ -void Grid3d::setup_regular(int &nbuf1, int &nbuf2) +void Grid3d::setup_brick(int &nbuf1, int &nbuf2) { int nsent,sendfirst,sendlast,recvfirst,recvlast; int sendplanes,recvplanes; @@ -758,7 +759,7 @@ void Grid3d::setup_regular(int &nbuf1, int &nbuf2) } /* ---------------------------------------------------------------------- - setup comm for RCB tiled proc domains + setup owned/ghost comm for irregular tiled comm each proc has arbitrary # of neighbors that overlap its ghost extent identify which procs will send me ghost cells, and vice versa may not be symmetric if both procs do not need same layers of ghosts @@ -805,7 +806,7 @@ void Grid3d::setup_tiled(int &nbuf1, int &nbuf2) noverlap = maxoverlap = 0; overlap = nullptr; - ghost_box_drop(ghostbox,pbc); + box_drop(ghostbox,pbc); // send each proc an overlap message // content: me, index of my overlap, box that overlaps with its owned cells @@ -991,7 +992,7 @@ void Grid3d::setup_tiled(int &nbuf1, int &nbuf2) add all the procs it overlaps with to Overlap list ------------------------------------------------------------------------- */ -void Grid3d::ghost_box_drop(int *box, int *pbc) +void Grid3d::box_drop(int *box, int *pbc) { int i,m; @@ -1063,8 +1064,8 @@ void Grid3d::ghost_box_drop(int *box, int *pbc) // recurse with 2 split boxes if (splitflag) { - ghost_box_drop(newbox1,pbc); - ghost_box_drop(newbox2,newpbc); + box_drop(newbox1,pbc); + box_drop(newbox2,newpbc); } } @@ -1109,7 +1110,7 @@ void Grid3d::box_drop_grid(int *box, int proclower, int procupper, int Grid3d::ghost_adjacent() { - if (layout == REGULAR) return ghost_adjacent_regular(); + if (layout == REGULAR) return ghost_adjacent_brick(); return ghost_adjacent_tiled(); } @@ -1118,7 +1119,7 @@ int Grid3d::ghost_adjacent() return 0 if adjacent=0 for any proc, else 1 ------------------------------------------------------------------------- */ -int Grid3d::ghost_adjacent_regular() +int Grid3d::ghost_adjacent_brick() { adjacent = 1; if (ghostxlo > inxhi-inxlo+1) adjacent = 0; @@ -1155,14 +1156,14 @@ void Grid3d::forward_comm(int caller, void *ptr, int nper, int nbyte, int which, { if (layout == REGULAR) { if (caller == KSPACE) - forward_comm_regular((KSpace *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + forward_comm_brick((KSpace *) ptr,nper,nbyte,which, + buf1,buf2,datatype); else if (caller == PAIR) - forward_comm_regular((Pair *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + forward_comm_brick((Pair *) ptr,nper,nbyte,which, + buf1,buf2,datatype); else if (caller == FIX) - forward_comm_regular((Fix *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + forward_comm_brick((Fix *) ptr,nper,nbyte,which, + buf1,buf2,datatype); } else { if (caller == KSPACE) forward_comm_tiled((KSpace *) ptr,nper,nbyte,which, @@ -1182,8 +1183,8 @@ void Grid3d::forward_comm(int caller, void *ptr, int nper, int nbyte, int which, template < class T > void Grid3d:: -forward_comm_regular(T *ptr, int nper, int /*nbyte*/, int which, - void *buf1, void *buf2, MPI_Datatype datatype) +forward_comm_brick(T *ptr, int nper, int /*nbyte*/, int which, + void *buf1, void *buf2, MPI_Datatype datatype) { int m; MPI_Request request; @@ -1260,14 +1261,14 @@ void Grid3d::reverse_comm(int caller, void *ptr, int nper, int nbyte, int which, { if (layout == REGULAR) { if (caller == KSPACE) - reverse_comm_regular((KSpace *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + reverse_comm_brick((KSpace *) ptr,nper,nbyte,which, + buf1,buf2,datatype); else if (caller == PAIR) - reverse_comm_regular((Pair *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + reverse_comm_brick((Pair *) ptr,nper,nbyte,which, + buf1,buf2,datatype); else if (caller == FIX) - reverse_comm_regular((Fix *) ptr,nper,nbyte,which, - buf1,buf2,datatype); + reverse_comm_brick((Fix *) ptr,nper,nbyte,which, + buf1,buf2,datatype); } else { if (caller == KSPACE) reverse_comm_tiled((KSpace *) ptr,nper,nbyte,which, @@ -1287,8 +1288,8 @@ void Grid3d::reverse_comm(int caller, void *ptr, int nper, int nbyte, int which, template < class T > void Grid3d:: -reverse_comm_regular(T *ptr, int nper, int /*nbyte*/, int which, - void *buf1, void *buf2, MPI_Datatype datatype) +reverse_comm_brick(T *ptr, int nper, int /*nbyte*/, int which, + void *buf1, void *buf2, MPI_Datatype datatype) { int m; MPI_Request request; @@ -1356,30 +1357,81 @@ reverse_comm_tiled(T *ptr, int nper, int nbyte, int which, } } +/* ---------------------------------------------------------------------- + return sizes of two buffers needed for communication + either on regular grid or procs or irregular tiling + nbuf1 = largest pack or unpack in any Send or Recv or Copy + nbuf2 = larget of sum of all packs or unpacks in Send or Recv + for regular comm, nbuf1 = nbuf2 + for irregular comm, nbuf2 >= nbuf2 + nbuf1,nbuf2 are just count of grid points + caller converts them to message size for grid data it stores +------------------------------------------------------------------------- */ + /* ---------------------------------------------------------------------- setup remap from old grid decomposition to this grid decomposition pack/unpack operations are performed by caller via callbacks ------------------------------------------------------------------------- */ -void Grid3d::remap_setup(Grid3d *old, int &ngrid1_buf, int &ngrid2_buf) +void Grid3d::setup_remap(Grid3d *old, int &nremap_buf1, int &nremap_buf2) { - if (layout == REGULAR) remap_setup_regular(old,ngrid1_buf,ngrid2_buf); - else remap_setup_tiled(old,ngrid1_buf,ngrid2_buf); + if (layout == REGULAR) setup_remap_brick(old,nremap_buf1,nremap_buf2); + else setup_remap_tiled(old,nremap_buf2,nremap_buf2); } -void Grid3d::remap_setup_regular(Grid3d *old, int &ngrid1_buf, int &ngrid2_buf) +void Grid3d::setup_remap_brick(Grid3d *old, int &nremap_buf1, int &nremap_buf2) { // NOTE: when to clean up data structs when multiple remaps occur // NOTE: does a remap also require ghost comm in fix ttm/grid ? - ngrid1_buf = 0; - ngrid2_buf = 0; + nremap_buf1 = 0; + nremap_buf2 = 0; } -void Grid3d::remap_setup_tiled(Grid3d *old, int &ngrid1_buf, int &ngrid2_buf) +void Grid3d::setup_remap_tiled(Grid3d *old, int &nremap_buf1, int &nremap_buf2) { - ngrid1_buf = 0; - ngrid2_buf = 0; + // find overlaps of my owned box in old decomp with all procs in new decomp + // noverlap = # of overlaps, including self + // overlap = vector of overlap info using Overlap data struct + + int ownedbox[6],pbc[3]; + + old->get_bounds(ownedbox[0],ownedbox[1],ownedbox[2],ownedbox[3], + ownedbox[4],ownedbox[5]); + pbc[0] = pbc[1] = pbc[2] = 0; + + memory->create(overlap_procs,nprocs,"grid3d:overlap_procs"); + noverlap = maxoverlap = 0; + overlap = nullptr; + + box_drop(ownedbox,pbc); + + // use overlaps to construct send and copy lists + + self_remap = 0; + nsend_request = 0; + + for (int m = 0; m < noverlap; m++) { + if (overlap[m].proc == me) self_remap = 1; + else { + proclist[nsend_request] = overlap[m].proc; + srequest[nsend_request].sender = me; + srequest[nsend_request].index = m; + for (i = 0; i < 6; i++) + srequest[nsend_request].box[i] = overlap[m].box[i]; + nsend_request++; + } + } + + + // send each proc an overlap message + + + // use received overlaps to construct recv and copy lists + + + nremap_buf1 = 0; + nremap_buf2 = 0; } /* ---------------------------------------------------------------------- diff --git a/src/grid3d.h b/src/grid3d.h index 5e4cee30ed..f4a8f0321e 100644 --- a/src/grid3d.h +++ b/src/grid3d.h @@ -39,7 +39,7 @@ class Grid3d : protected Pointers { int ghost_adjacent(); void forward_comm(int, void *, int, int, int, void *, void *, MPI_Datatype); void reverse_comm(int, void *, int, int, int, void *, void *, MPI_Datatype); - void remap_setup(Grid3d *, int &, int &); + void setup_remap(Grid3d *, int &, int &); void remap(int, void *, int, int, void *, void *, MPI_Datatype); void gather(int, void *, int, int, int, void *, MPI_Datatype); @@ -199,21 +199,21 @@ class Grid3d : protected Pointers { void store(int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int, int); - virtual void setup_regular(int &, int &); + virtual void setup_brick(int &, int &); virtual void setup_tiled(int &, int &); - void ghost_box_drop(int *, int *); + void box_drop(int *, int *); void box_drop_grid(int *, int, int, int &, int *); - int ghost_adjacent_regular(); + int ghost_adjacent_brick(); int ghost_adjacent_tiled(); - template void forward_comm_regular(T *, int, int, int, void *, void *, MPI_Datatype); + template void forward_comm_brick(T *, int, int, int, void *, void *, MPI_Datatype); template void forward_comm_tiled(T *, int, int, int, void *, void *, MPI_Datatype); - template void reverse_comm_regular(T *, int, int, int, void *, void *, MPI_Datatype); + template void reverse_comm_brick(T *, int, int, int, void *, void *, MPI_Datatype); template void reverse_comm_tiled(T *, int, int, int, void *, void *, MPI_Datatype); - void remap_setup_regular(Grid3d *, int &, int &); - void remap_setup_tiled(Grid3d *, int &, int &); + void setup_remap_brick(Grid3d *, int &, int &); + void setup_remap_tiled(Grid3d *, int &, int &); template void remap_style(T *, int, int, void *, void *, MPI_Datatype); virtual void grow_swap();