git-svn-id: svn://svn.icms.temple.edu/lammps-ro/trunk@7333 f3b2605a-c512-4ea7-a41b-209d697bcdaa

This commit is contained in:
sjplimp
2011-12-09 16:46:31 +00:00
parent 7060c62c8d
commit 5baa07d3af
8 changed files with 63 additions and 574 deletions

View File

@ -51,7 +51,7 @@ int MPI_Initialized(int *flag)
/* ---------------------------------------------------------------------- */
/* Returns "localhost" as the name of the processor */
/* return "localhost" as name of the processor */
void MPI_Get_processor_name(char *name, int *resultlen)
{

View File

@ -8,3 +8,12 @@ doc/Section_accelerate.html, sub-section 5.2
The person who created this package is Axel Kohlmeyer at Temple U
(akohlmey at gmail.com). Contact him directly if you have questions.
--------------------------
This directory also contains a shell script:
hack_openmp_for_pgi.sh
which will convert OpenMP directives in src files
into a form compatible with the PGI compiler.

View File

@ -1201,8 +1201,9 @@ void Comm::reverse_comm_dump(Dump *dump)
area = surface area of each of 3 faces of simulation box divided by sx,sy,sz
for triclinic, area = cross product of 2 edge vectors stored in h matrix
valid assignment will be factorization of numprocs = Px by Py by Pz
user_factors = if non-zero, factor is specified by user
user_factors = if non-zero, factors are specified by user
sx,sy,sz = scale box xyz dimension vy dividing by sx,sy,sz
other = 1 to enforce compatability with other partition's layout
return factors = # of procs assigned to each dimension
return 1 if successully factor, 0 if not
------------------------------------------------------------------------- */
@ -1553,6 +1554,16 @@ void Comm::set_processors(int narg, char **arg)
} else error->all(FLERR,"Illegal processors command");
}
// error check
if (numa_nodes) {
if (layoutflag != CART)
error->all(FLERR,"Can only use processors numa "
"with processors grid cart");
if (send_to_partition >= 0 || recv_from_partition >= 0)
error->one(FLERR,"Cannot use processors numa with processors part");
}
}
/* ----------------------------------------------------------------------
@ -1587,15 +1598,15 @@ int Comm::numa_set_proc_grid()
int procs_per_node = name_map.begin()->second;
int procs_per_numa = procs_per_node / numa_nodes;
// use regular mapping if any condition met
// use non-numa mapping if any condition met
if (procs_per_numa < 4 || // less than 4 procs per numa node
procs_per_node % numa_nodes != 0 || // reserve usage for numa_node != 1
procs_per_node % numa_nodes != 0 || // no-op since numa_nodes = 1 for now
nprocs % procs_per_numa != 0 || // total procs not a multiple of node
nprocs <= procs_per_numa || // only 1 node used
user_procgrid[0] > 1 || // user specified grid dimension
user_procgrid[1] > 1 || // that is greater than 1
user_procgrid[2] > 1) { // in any dimension
user_procgrid[0] > 1 || // user specified grid dimension < 1
user_procgrid[1] > 1 || // in any dimension
user_procgrid[2] > 1) {
if (me == 0) {
if (screen) fprintf(screen," 1 by 1 by 1 Node grid\n");
if (logfile) fprintf(logfile," 1 by 1 by 1 Node grid\n");
@ -1617,17 +1628,13 @@ int Comm::numa_set_proc_grid()
// get an initial factorization for each numa node,
// if the user has not set the number of processors
// can fail (on one partition) if constrained by other_partition_style
int numagrid[3];
int flag = procs2box(procs_per_numa,user_numagrid,numagrid,
1,1,1,other_partition_style);
if (!flag) error->all(FLERR,"Could not layout grid of processors",1);
procs2box(procs_per_numa,user_numagrid,numagrid,1,1,1,0);
if (numagrid[0] * numagrid[1] * numagrid[2] != procs_per_numa)
error->all(FLERR,"Bad node grid of processors");
// get a factorization for the grid of numa nodes
// should never fail
int node_count = nprocs / procs_per_numa;
procs2box(node_count,user_procgrid,procgrid,
@ -1637,7 +1644,6 @@ int Comm::numa_set_proc_grid()
// repeat the numa node factorization using the subdomain sizes
// this will refine the factorization if the user specified the node layout
// should never fail
procs2box(procs_per_numa,user_numagrid,numagrid,
procgrid[0],procgrid[1],procgrid[2],0);
@ -1675,7 +1681,7 @@ int Comm::numa_set_proc_grid()
MPI_Comm_split(world,numa_rank,0,&numa_leaders);
// use the MPI Cartesian routines to map the nodes to the grid
// could implement layoutflag as in non-NUMA case
// could implement layoutflag as in non-NUMA case?
int reorder = 0;
int periods[3];

View File

@ -156,7 +156,7 @@ LAMMPS::LAMMPS(int narg, char **arg, MPI_Comm communicator)
} else error->universe_all(FLERR,"Invalid command-line argument");
}
// if no partition command-line switch, universe is one world w/ all procs
// if no partition command-line switch, universe is one world with all procs
if (universe->existflag == 0) universe->add_world(NULL);
@ -211,8 +211,8 @@ LAMMPS::LAMMPS(int narg, char **arg, MPI_Comm communicator)
universe->ulogfile = NULL;
}
// universe does not exist on its own, only a single world
// inherit settings from universe
// make universe and single world the same, since no partition switch
// world inherits settings from universe
// set world screen, logfile, communicator, infile
// open input script if from file
@ -237,8 +237,8 @@ LAMMPS::LAMMPS(int narg, char **arg, MPI_Comm communicator)
if (logfile) fprintf(logfile,"LAMMPS (%s)\n",universe->version);
}
// universe is one or more worlds
// split into separate communicators
// universe is one or more worlds, as setup by partition switch
// split universe communicator into separate world communicators
// set world screen, logfile, communicator, infile
// open input script

View File

@ -29,7 +29,7 @@ Universe::Universe(LAMMPS *lmp, MPI_Comm communicator) : Pointers(lmp)
{
version = (char *) LAMMPS_VERSION;
uworld = communicator;
uworld = original = communicator;
MPI_Comm_rank(uworld,&me);
MPI_Comm_size(uworld,&nprocs);
@ -40,14 +40,37 @@ Universe::Universe(LAMMPS *lmp, MPI_Comm communicator) : Pointers(lmp)
nworlds = 0;
procs_per_world = NULL;
root_proc = NULL;
memory->create(proc2original,nprocs,"universe:proc2original");
for (int i = 0; i < nprocs; i++) proc2original[i] = i;
}
/* ---------------------------------------------------------------------- */
Universe::~Universe()
{
if (uworld != original) MPI_Comm_free(&uworld);
memory->destroy(procs_per_world);
memory->destroy(root_proc);
memory->destroy(proc2original);
}
/* ----------------------------------------------------------------------
placeholder routine, not yet operational
permute the mapping of universe procs in uworld to procs in original
------------------------------------------------------------------------- */
void Universe::reorder(int key)
{
if (uworld != original) MPI_Comm_free(&uworld);
MPI_Comm_split(original,0,key,&uworld);
MPI_Comm_rank(uworld,&me);
MPI_Comm_size(uworld,&nprocs);
int ome;
MPI_Comm_rank(original,&ome);
MPI_Allgather(&ome,1,MPI_INT,proc2original,1,MPI_INT,uworld);
}
/* ----------------------------------------------------------------------

View File

@ -35,8 +35,13 @@ class Universe : protected Pointers {
int *procs_per_world; // # of procs in each world
int *root_proc; // root proc in each world
MPI_Comm original; // original communicator passed to LAMMPS instance
int *proc2original; // proc I in universe uworld is
// proc p2o[I] in original communicator
Universe(class LAMMPS *, MPI_Comm);
~Universe();
void reorder(int);
void add_world(char *);
int consistent();
};

View File

@ -1,500 +0,0 @@
/* -------------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing authors: Yuxing Peng and Chris Knight (U Chicago)
------------------------------------------------------------------------- */
#include "string.h"
#include "verlet_split.h"
#include "universe.h"
#include "neighbor.h"
#include "domain.h"
#include "comm.h"
#include "atom.h"
#include "atom_vec.h"
#include "force.h"
#include "pair.h"
#include "bond.h"
#include "angle.h"
#include "dihedral.h"
#include "improper.h"
#include "kspace.h"
#include "output.h"
#include "update.h"
#include "modify.h"
#include "timer.h"
#include "memory.h"
#include "error.h"
using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
VerletSplit::VerletSplit(LAMMPS *lmp, int narg, char **arg) :
Verlet(lmp, narg, arg)
{
// error checks on partitions
if (universe->nworlds != 2)
error->universe_all(FLERR,"Verlet/split requires 2 partitions");
if (universe->procs_per_world[0] % universe->procs_per_world[1])
error->universe_all(FLERR,"Verlet/split requires Rspace partition "
"size be multiple of Kspace partition size");
// master = 1 for Rspace procs, 0 for Kspace procs
if (universe->iworld == 0) master = 1;
else master = 0;
ratio = universe->procs_per_world[0] / universe->procs_per_world[1];
// Kspace root proc broadcasts info about Kspace proc layout to Rspace procs
int kspace_procgrid[3];
if (universe->me == universe->root_proc[1]) {
kspace_procgrid[0] = comm->procgrid[0];
kspace_procgrid[1] = comm->procgrid[1];
kspace_procgrid[2] = comm->procgrid[2];
}
MPI_Bcast(kspace_procgrid,3,MPI_INT,universe->root_proc[1],universe->uworld);
int ***kspace_grid2proc;
memory->create(kspace_grid2proc,kspace_procgrid[0],
kspace_procgrid[1],kspace_procgrid[2],
"verlet/split:kspace_grid2proc");
if (universe->me == universe->root_proc[1]) {
for (int i = 0; i < comm->procgrid[0]; i++)
for (int j = 0; j < comm->procgrid[1]; j++)
for (int k = 0; k < comm->procgrid[2]; k++)
kspace_grid2proc[i][j][k] = comm->grid2proc[i][j][k];
}
MPI_Bcast(&kspace_grid2proc[0][0][0],
kspace_procgrid[0]*kspace_procgrid[1]*kspace_procgrid[2],MPI_INT,
universe->root_proc[1],universe->uworld);
// Rspace partition must be multiple of Kspace partition in each dim
// so atoms of one Kspace proc coincide with atoms of several Rspace procs
if (master) {
int flag = 0;
if (comm->procgrid[0] % kspace_procgrid[0]) flag = 1;
if (comm->procgrid[1] % kspace_procgrid[1]) flag = 1;
if (comm->procgrid[2] % kspace_procgrid[2]) flag = 1;
if (flag)
error->one(FLERR,
"Verlet/split requires Rspace partition layout be "
"multiple of Kspace partition layout in each dim");
}
// block = 1 Kspace proc with set of Rspace procs it overlays
// me_block = 0 for Kspace proc
// me_block = 1 to ratio for Rspace procs
// block = MPI communicator for that set of procs
int iblock,key;
if (!master) {
iblock = comm->me;
key = 0;
} else {
int kpx = comm->myloc[0] / (comm->procgrid[0]/kspace_procgrid[0]);
int kpy = comm->myloc[1] / (comm->procgrid[1]/kspace_procgrid[1]);
int kpz = comm->myloc[2] / (comm->procgrid[2]/kspace_procgrid[2]);
iblock = kspace_grid2proc[kpx][kpy][kpz];
key = 1;
}
MPI_Comm_split(universe->uworld,iblock,key,&block);
MPI_Comm_rank(block,&me_block);
// output block groupings to universe screen/logfile
// bmap is ordered by block and then by proc within block
int *bmap = new int[universe->nprocs];
for (int i = 0; i < universe->nprocs; i++) bmap[i] = -1;
bmap[iblock*(ratio+1)+me_block] = universe->me;
int *bmapall = new int[universe->nprocs];
MPI_Allreduce(bmap,bmapall,universe->nprocs,MPI_INT,MPI_MAX,universe->uworld);
if (universe->me == 0) {
if (universe->uscreen) {
fprintf(universe->uscreen,"Rspace/Kspace procs in each block:\n");
int m = 0;
for (int i = 0; i < universe->nprocs/(ratio+1); i++) {
fprintf(universe->uscreen," block %d:",i);
int kspace_proc = bmapall[m++];
for (int j = 1; j <= ratio; j++)
fprintf(universe->uscreen," %d",bmapall[m++]);
fprintf(universe->uscreen," %d\n",kspace_proc);
}
}
if (universe->ulogfile) {
fprintf(universe->ulogfile,"Rspace/Kspace procs in each block:\n");
int m = 0;
for (int i = 0; i < universe->nprocs/(ratio+1); i++) {
fprintf(universe->ulogfile," block %d:",i);
int kspace_proc = bmapall[m++];
for (int j = 1; j <= ratio; j++)
fprintf(universe->ulogfile," %d",bmapall[m++]);
fprintf(universe->ulogfile," %d\n",kspace_proc);
}
}
}
memory->destroy(kspace_grid2proc);
delete [] bmap;
delete [] bmapall;
// size/disp = vectors for MPI gather/scatter within block
qsize = new int[ratio+1];
qdisp = new int[ratio+1];
xsize = new int[ratio+1];
xdisp = new int[ratio+1];
// f_kspace = Rspace copy of Kspace forces
// allocate dummy version for Kspace partition
maxatom = 0;
f_kspace = NULL;
if (!master) memory->create(f_kspace,1,1,"verlet/split:f_kspace");
}
/* ---------------------------------------------------------------------- */
VerletSplit::~VerletSplit()
{
delete [] qsize;
delete [] qdisp;
delete [] xsize;
delete [] xdisp;
memory->destroy(f_kspace);
MPI_Comm_free(&block);
}
/* ----------------------------------------------------------------------
initialization before run
------------------------------------------------------------------------- */
void VerletSplit::init()
{
if (!force->kspace && comm->me == 0)
error->warning(FLERR,"No Kspace calculation with verlet/split");
if (force->kspace_match("tip4p",0)) tip4p_flag = 1;
else tip4p_flag = 0;
Verlet::init();
}
/* ----------------------------------------------------------------------
run for N steps
master partition does everything but Kspace
servant partition does just Kspace
communicate back and forth every step:
atom coords from master -> servant
kspace forces from servant -> master
also box bounds from master -> servant if necessary
------------------------------------------------------------------------- */
void VerletSplit::run(int n)
{
int nflag,ntimestep,sortflag;
// sync both partitions before start timer
MPI_Barrier(universe->uworld);
timer->init();
timer->barrier_start(TIME_LOOP);
// setup initial Rspace <-> Kspace comm params
rk_setup();
// flags for timestepping iterations
int n_post_integrate = modify->n_post_integrate;
int n_pre_exchange = modify->n_pre_exchange;
int n_pre_neighbor = modify->n_pre_neighbor;
int n_pre_force = modify->n_pre_force;
int n_post_force = modify->n_post_force;
int n_end_of_step = modify->n_end_of_step;
if (atom->sortfreq > 0) sortflag = 1;
else sortflag = 0;
for (int i = 0; i < n; i++) {
ntimestep = ++update->ntimestep;
ev_set(ntimestep);
// initial time integration
if (master) {
modify->initial_integrate(vflag);
if (n_post_integrate) modify->post_integrate();
}
// regular communication vs neighbor list rebuild
if (master) nflag = neighbor->decide();
MPI_Bcast(&nflag,1,MPI_INT,1,block);
if (master) {
if (nflag == 0) {
timer->stamp();
comm->forward_comm();
timer->stamp(TIME_COMM);
} else {
if (n_pre_exchange) modify->pre_exchange();
if (triclinic) domain->x2lamda(atom->nlocal);
domain->pbc();
if (domain->box_change) {
domain->reset_box();
comm->setup();
if (neighbor->style) neighbor->setup_bins();
}
timer->stamp();
comm->exchange();
if (sortflag && ntimestep >= atom->nextsort) atom->sort();
comm->borders();
if (triclinic) domain->lamda2x(atom->nlocal+atom->nghost);
timer->stamp(TIME_COMM);
if (n_pre_neighbor) modify->pre_neighbor();
neighbor->build();
timer->stamp(TIME_NEIGHBOR);
}
}
// if reneighboring occurred, re-setup Rspace <-> Kspace comm params
// comm Rspace atom coords to Kspace procs
if (nflag) rk_setup();
r2k_comm();
// force computations
force_clear();
if (master) {
if (n_pre_force) modify->pre_force(vflag);
timer->stamp();
if (force->pair) {
force->pair->compute(eflag,vflag);
timer->stamp(TIME_PAIR);
}
if (atom->molecular) {
if (force->bond) force->bond->compute(eflag,vflag);
if (force->angle) force->angle->compute(eflag,vflag);
if (force->dihedral) force->dihedral->compute(eflag,vflag);
if (force->improper) force->improper->compute(eflag,vflag);
timer->stamp(TIME_BOND);
}
if (force->newton) {
comm->reverse_comm();
timer->stamp(TIME_COMM);
}
} else {
if (force->kspace) {
timer->stamp();
force->kspace->compute(eflag,vflag);
timer->stamp(TIME_KSPACE);
}
// TIP4P PPPM puts forces on ghost atoms, so must reverse_comm()
if (tip4p_flag && force->newton) {
comm->reverse_comm();
timer->stamp(TIME_COMM);
}
}
// comm and sum Kspace forces back to Rspace procs
k2r_comm();
// force modifications, final time integration, diagnostics
// all output
if (master) {
if (n_post_force) modify->post_force(vflag);
modify->final_integrate();
if (n_end_of_step) modify->end_of_step();
if (ntimestep == output->next) {
timer->stamp();
output->write(ntimestep);
timer->stamp(TIME_OUTPUT);
}
}
}
}
/* ----------------------------------------------------------------------
setup params for Rspace <-> Kspace communication
called initially and after every reneighbor
also communcicate atom charges from Rspace to KSpace since static
------------------------------------------------------------------------- */
void VerletSplit::rk_setup()
{
// grow f_kspace array on master procs if necessary
if (master) {
if (atom->nlocal > maxatom) {
memory->destroy(f_kspace);
maxatom = atom->nmax;
memory->create(f_kspace,maxatom,3,"verlet/split:f_kspace");
}
}
// qsize = # of atoms owned by each master proc in block
int n = 0;
if (master) n = atom->nlocal;
MPI_Gather(&n,1,MPI_INT,qsize,1,MPI_INT,0,block);
// setup qdisp, xsize, xdisp based on qsize
// only needed by Kspace proc
// set Kspace nlocal to sum of Rspace nlocals
// insure Kspace atom arrays are large enough
if (!master) {
qsize[0] = qdisp[0] = xsize[0] = xdisp[0] = 0;
for (int i = 1; i <= ratio; i++) {
qdisp[i] = qdisp[i-1]+qsize[i-1];
xsize[i] = 3*qsize[i];
xdisp[i] = xdisp[i-1]+xsize[i-1];
}
atom->nlocal = qdisp[ratio] + qsize[ratio];
while (atom->nmax <= atom->nlocal) atom->avec->grow(0);
atom->nghost = 0;
}
// one-time gather of Rspace atom charges to Kspace proc
MPI_Gatherv(atom->q,n,MPI_DOUBLE,atom->q,qsize,qdisp,MPI_DOUBLE,0,block);
// for TIP4P also need to send atom type and tag
// KSpace procs need to acquire ghost atoms and map all their atoms
// map_clear() call is in lieu of comm->exchange() which performs map_clear
// borders() call acquires ghost atoms and maps them
if (tip4p_flag) {
MPI_Gatherv(atom->type,n,MPI_INT,atom->type,qsize,qdisp,MPI_INT,0,block);
MPI_Gatherv(atom->tag,n,MPI_INT,atom->tag,qsize,qdisp,MPI_INT,0,block);
if (!master) {
if (triclinic) domain->x2lamda(atom->nlocal);
if (domain->box_change) comm->setup();
timer->stamp();
atom->map_clear();
comm->borders();
if (triclinic) domain->lamda2x(atom->nlocal+atom->nghost);
timer->stamp(TIME_COMM);
}
}
}
/* ----------------------------------------------------------------------
communicate Rspace atom coords to Kspace
also eflag,vflag and box bounds if needed
------------------------------------------------------------------------- */
void VerletSplit::r2k_comm()
{
MPI_Status status;
int n = 0;
if (master) n = atom->nlocal;
MPI_Gatherv(atom->x[0],n*3,MPI_DOUBLE,atom->x[0],xsize,xdisp,
MPI_DOUBLE,0,block);
// send eflag,vflag from Rspace to Kspace
if (me_block == 1) {
int flags[2];
flags[0] = eflag; flags[1] = vflag;
MPI_Send(flags,2,MPI_INT,0,0,block);
} else if (!master) {
int flags[2];
MPI_Recv(flags,2,MPI_DOUBLE,1,0,block,&status);
eflag = flags[0]; vflag = flags[1];
}
// send box bounds from Rspace to Kspace if simulation box is dynamic
if (domain->box_change) {
if (me_block == 1) {
MPI_Send(domain->boxlo,3,MPI_DOUBLE,0,0,block);
MPI_Send(domain->boxhi,3,MPI_DOUBLE,0,0,block);
} else if (!master) {
MPI_Recv(domain->boxlo,3,MPI_DOUBLE,1,0,block,&status);
MPI_Recv(domain->boxhi,3,MPI_DOUBLE,1,0,block,&status);
domain->set_global_box();
domain->set_local_box();
force->kspace->setup();
}
}
// for TIP4P, Kspace partition needs to update its ghost atoms
if (tip4p_flag && !master) {
timer->stamp();
comm->forward_comm();
timer->stamp(TIME_COMM);
}
}
/* ----------------------------------------------------------------------
communicate and sum Kspace atom forces back to Rspace
------------------------------------------------------------------------- */
void VerletSplit::k2r_comm()
{
if (eflag) MPI_Bcast(&force->kspace->energy,1,MPI_DOUBLE,0,block);
if (vflag) MPI_Bcast(force->kspace->virial,6,MPI_DOUBLE,0,block);
int n = 0;
if (master) n = atom->nlocal;
MPI_Scatterv(atom->f[0],xsize,xdisp,MPI_DOUBLE,
f_kspace[0],n*3,MPI_DOUBLE,0,block);
if (master) {
double **f = atom->f;
int nlocal = atom->nlocal;
for (int i = 0; i < nlocal; i++) {
f[i][0] += f_kspace[i][0];
f[i][1] += f_kspace[i][1];
f[i][2] += f_kspace[i][2];
}
}
}
/* ----------------------------------------------------------------------
memory usage of Kspace force array on master procs
------------------------------------------------------------------------- */
bigint VerletSplit::memory_usage()
{
bigint bytes = maxatom*3 * sizeof(double);
return bytes;
}

View File

@ -1,54 +0,0 @@
/* -------------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef INTEGRATE_CLASS
IntegrateStyle(verlet/split,VerletSplit)
#else
#ifndef LMP_VERLET_SPLIT_H
#define LMP_VERLET_SPLIT_H
#include "verlet.h"
namespace LAMMPS_NS {
class VerletSplit : public Verlet {
public:
VerletSplit(class LAMMPS *, int, char **);
~VerletSplit();
void init();
void run(int);
bigint memory_usage();
private:
int master; // 1 if an Rspace proc, 0 if Kspace
int me_block; // proc ID within Rspace/Kspace block
int ratio; // ratio of Rspace procs to Kspace procs
int *qsize,*qdisp,*xsize,*xdisp; // MPI gather/scatter params for block comm
MPI_Comm block; // communicator within one block
int tip4p_flag; // 1 if PPPM/tip4p so do extra comm
double **f_kspace; // copy of Kspace forces on Rspace procs
int maxatom;
void rk_setup();
void r2k_comm();
void k2r_comm();
};
}
#endif
#endif