replace tabs and remove trailing whitespace in lib folder with updated script

This commit is contained in:
Axel Kohlmeyer
2021-08-22 20:45:24 -04:00
parent 30821b37e5
commit 92b5b159e5
311 changed files with 9176 additions and 9176 deletions

View File

@ -40,14 +40,14 @@ CSlib::CSlib(int csflag, const char *mode, const void *ptr, const void *pcomm)
else myworld = 0;
#ifdef MPI_NO
if (pcomm)
if (pcomm)
error_all("constructor(): CSlib invoked with MPI_Comm "
"but built w/out MPI support");
#endif
#ifdef MPI_YES // NOTE: this could be OK to allow ??
// would allow a parallel app to invoke CSlib
// in parallel and/or in serial
if (!pcomm)
if (!pcomm)
error_all("constructor(): CSlib invoked w/out MPI_Comm "
"but built with MPI support");
#endif
@ -63,7 +63,7 @@ CSlib::CSlib(int csflag, const char *mode, const void *ptr, const void *pcomm)
if (strcmp(mode,"file") == 0) msg = new MsgFile(csflag,ptr);
else if (strcmp(mode,"zmq") == 0) msg = new MsgZMQ(csflag,ptr);
else if (strcmp(mode,"mpi/one") == 0)
else if (strcmp(mode,"mpi/one") == 0)
error_all("constructor(): No mpi/one mode for serial lib usage");
else if (strcmp(mode,"mpi/two") == 0)
error_all("constructor(): No mpi/two mode for serial lib usage");
@ -93,9 +93,9 @@ CSlib::CSlib(int csflag, const char *mode, const void *ptr, const void *pcomm)
allids = nullptr;
maxfieldbytes = 0;
fielddata = nullptr;
pad = "\0\0\0\0\0\0\0"; // just length 7 since will have trailing nullptr
nsend = nrecv = 0;
}
@ -106,7 +106,7 @@ CSlib::~CSlib()
deallocate_fields();
sfree(header);
sfree(buf);
sfree(recvcounts);
sfree(displs);
sfree(allids);
@ -127,7 +127,7 @@ void CSlib::send(int msgID_caller, int nfield_caller)
fieldcount = 0;
nbuf = 0;
if (fieldcount == nfield) send_message();
}
@ -174,7 +174,7 @@ void CSlib::pack(int id, int ftype, int flen, void *data)
error_all("pack(): Reuse of field ID");
if (ftype < 1 || ftype > MAXTYPE) error_all("pack(): Invalid ftype");
if (flen < 0) error_all("pack(): Invalid flen");
fieldID[fieldcount] = id;
fieldtype[fieldcount] = ftype;
fieldlen[fieldcount] = flen;
@ -185,7 +185,7 @@ void CSlib::pack(int id, int ftype, int flen, void *data)
memcpy(&buf[nbuf],data,nbytes);
memcpy(&buf[nbuf+nbytes],pad,nbytesround-nbytes);
nbuf += nbytesround;
fieldcount++;
if (fieldcount == nfield) send_message();
}
@ -193,7 +193,7 @@ void CSlib::pack(int id, int ftype, int flen, void *data)
/* ---------------------------------------------------------------------- */
void CSlib::pack_parallel(int id, int ftype,
int nlocal, int *ids, int nper, void *data)
int nlocal, int *ids, int nper, void *data)
{
int i,j,k,m;
@ -214,7 +214,7 @@ void CSlib::pack_parallel(int id, int ftype,
fieldID[fieldcount] = id;
fieldtype[fieldcount] = ftype;
fieldlen[fieldcount] = flen;
// nlocal datums, each of nper length, from all procs
// final data in buf = datums for all natoms, ordered by ids
@ -238,7 +238,7 @@ void CSlib::pack_parallel(int id, int ftype,
MPI_Allgatherv(ids,nlocal,MPI_INT,allids,
recvcounts,displs,MPI_INT,world);
int nlocalsize = nper*nlocal;
MPI_Allgather(&nlocalsize,1,MPI_INT,recvcounts,1,MPI_INT,world);
@ -254,22 +254,22 @@ void CSlib::pack_parallel(int id, int ftype,
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (int *) fielddata;
} else alldata = (int *) &buf[nbuf];
MPI_Allgatherv(data,nlocalsize,MPI_INT,alldata,
recvcounts,displs,MPI_INT,world);
recvcounts,displs,MPI_INT,world);
if (ids) {
int *bufptr = (int *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
}
}
@ -278,32 +278,32 @@ void CSlib::pack_parallel(int id, int ftype,
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (int64_t *) fielddata;
} else alldata = (int64_t *) &buf[nbuf];
// NOTE: may be just MPI_LONG on some machines
MPI_Allgatherv(data,nlocalsize,MPI_LONG_LONG,alldata,
recvcounts,displs,MPI_LONG_LONG,world);
recvcounts,displs,MPI_LONG_LONG,world);
if (ids) {
int64_t *bufptr = (int64_t *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
}
}
} else if (ftype == 3) {
float *alldata;
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (float *) fielddata;
@ -314,11 +314,11 @@ void CSlib::pack_parallel(int id, int ftype,
float *bufptr = (float *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
}
}
@ -327,7 +327,7 @@ void CSlib::pack_parallel(int id, int ftype,
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (double *) fielddata;
@ -338,11 +338,11 @@ void CSlib::pack_parallel(int id, int ftype,
double *bufptr = (double *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
j = (allids[i]-1) * nper;
if (nper == 1) bufptr[j] = alldata[m++];
else
for (k = 0; k < nper; k++)
bufptr[j++] = alldata[m++];
}
}
@ -352,7 +352,7 @@ void CSlib::pack_parallel(int id, int ftype,
if (ids) {
if (nbytes > maxfieldbytes) {
sfree(fielddata);
maxfieldbytes = nbytes;
maxfieldbytes = nbytes;
fielddata = (char *) smalloc(maxfieldbytes);
}
alldata = (char *) fielddata;
@ -363,9 +363,9 @@ void CSlib::pack_parallel(int id, int ftype,
char *bufptr = (char *) &buf[nbuf];
m = 0;
for (i = 0; i < nglobal; i++) {
j = (allids[i]-1) * nper;
memcpy(&bufptr[j],&alldata[m],nper);
m += nper;
j = (allids[i]-1) * nper;
memcpy(&bufptr[j],&alldata[m],nper);
m += nper;
}
}
*/
@ -399,14 +399,14 @@ void CSlib::send_message()
/* ---------------------------------------------------------------------- */
int CSlib::recv(int &nfield_caller, int *&fieldID_caller,
int *&fieldtype_caller, int *&fieldlen_caller)
int CSlib::recv(int &nfield_caller, int *&fieldID_caller,
int *&fieldtype_caller, int *&fieldlen_caller)
{
msg->recv(maxheader,header,maxbuf,buf);
nrecv++;
// unpack header message
int m = 0;
msgID = header[m++];
nfield = header[m++];
@ -423,7 +423,7 @@ int CSlib::recv(int &nfield_caller, int *&fieldID_caller,
onefield(fieldtype[ifield],fieldlen[ifield],nbytes,nbytesround);
nbuf += nbytesround;
}
// return message parameters
nfield_caller = nfield;
@ -513,7 +513,7 @@ void CSlib::unpack(int id, void *data)
{
int ifield = find_field(id,nfield);
if (ifield < 0) error_all("unpack(): Unknown field ID");
int ftype = fieldtype[ifield];
int nbytes = fieldlen[ifield];
if (ftype == 1) nbytes *= sizeof(int);
@ -541,7 +541,7 @@ void CSlib::unpack_parallel(int id, int nlocal, int *ids, int nper, void *data)
MPI_Scan(&nlocal,&upto,1,MPI_INT,MPI_SUM,world);
upto -= nlocal;
}
if (fieldtype[ifield] == 1) {
int *local = (int *) data;
int *global = (int *) &buf[fieldoffset[ifield]];
@ -549,13 +549,13 @@ void CSlib::unpack_parallel(int id, int nlocal, int *ids, int nper, void *data)
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
}
}
}
} else if (fieldtype[ifield] == 2) {
int64_t *local = (int64_t *) data;
@ -564,11 +564,11 @@ void CSlib::unpack_parallel(int id, int nlocal, int *ids, int nper, void *data)
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
}
}
@ -579,14 +579,14 @@ void CSlib::unpack_parallel(int id, int nlocal, int *ids, int nper, void *data)
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
}
}
} else if (fieldtype[ifield] == 4) {
double *local = (double *) data;
double *global = (double *) &buf[fieldoffset[ifield]];
@ -594,14 +594,14 @@ void CSlib::unpack_parallel(int id, int nlocal, int *ids, int nper, void *data)
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
j = (ids[i]-1) * nper;
if (nper == 1) local[m++] = global[j];
else
for (k = 0; k < nper; k++)
local[m++] = global[j++];
}
}
/* eventually ftype = BYTE, but not yet
} else if (fieldtype[ifield] == 5) {
char *local = (char *) data;
@ -610,9 +610,9 @@ void CSlib::unpack_parallel(int id, int nlocal, int *ids, int nper, void *data)
else {
m = 0;
for (i = 0; i < nlocal; i++) {
j = (ids[i]-1) * nper;
memcpy(&local[m],&global[j],nper);
m += nper;
j = (ids[i]-1) * nper;
memcpy(&local[m],&global[j],nper);
m += nper;
}
}
*/
@ -635,7 +635,7 @@ void CSlib::onefield(int ftype, int flen, int &nbytes, int &nbytesround)
{
int64_t bigbytes,bigbytesround;
int64_t biglen = flen;
if (ftype == 1) bigbytes = biglen * sizeof(int);
else if (ftype == 2) bigbytes = biglen * sizeof(int64_t);
else if (ftype == 3) bigbytes = biglen * sizeof(float);
@ -675,7 +675,7 @@ void CSlib::allocate_fields()
nheader = 2;
nheader += 3 * nfield;
if (nfield > maxfield) {
deallocate_fields();
maxfield = nfield;
@ -684,7 +684,7 @@ void CSlib::allocate_fields()
fieldlen = new int[maxfield];
fieldoffset = new int[maxfield];
}
if (nheader > maxheader) {
sfree(header);
maxheader = nheader;
@ -724,7 +724,7 @@ void *CSlib::srealloc(void *ptr, int nbytes)
sfree(ptr);
return nullptr;
}
ptr = realloc(ptr,nbytes);
if (ptr == nullptr) {
char str[128];

View File

@ -52,7 +52,7 @@ class CSlib {
void unpack_parallel(int, int, int *, int, void *);
int extract(int);
private:
uint64_t myworld; // really MPI_Comm, but avoids use of mpi.h in this file
// so apps can include this file w/ no MPI on system

View File

@ -42,11 +42,11 @@ except:
class CSlib:
# instantiate CSlib thru its C-interface
def __init__(self,csflag,mode,ptr,comm):
# load libcslib.so
try:
if comm: self.lib = CDLL("libcsmpi.so",RTLD_GLOBAL)
else: self.lib = CDLL("libcsnompi.so",RTLD_GLOBAL)
@ -66,35 +66,35 @@ class CSlib:
self.lib.cslib_send.argtypes = [c_void_p,c_int,c_int]
self.lib.cslib_send.restype = None
self.lib.cslib_pack_int.argtypes = [c_void_p,c_int,c_int]
self.lib.cslib_pack_int.restype = None
self.lib.cslib_pack_int64.argtypes = [c_void_p,c_int,c_longlong]
self.lib.cslib_pack_int64.restype = None
self.lib.cslib_pack_float.argtypes = [c_void_p,c_int,c_float]
self.lib.cslib_pack_float.restype = None
self.lib.cslib_pack_double.argtypes = [c_void_p,c_int,c_double]
self.lib.cslib_pack_double.restype = None
self.lib.cslib_pack_string.argtypes = [c_void_p,c_int,c_char_p]
self.lib.cslib_pack_string.restype = None
self.lib.cslib_pack.argtypes = [c_void_p,c_int,c_int,c_int,c_void_p]
self.lib.cslib_pack.restype = None
self.lib.cslib_pack_parallel.argtypes = [c_void_p,c_int,c_int,c_int,
POINTER(c_int),c_int,c_void_p]
self.lib.cslib_pack_parallel.restype = None
self.lib.cslib_recv.argtypes = [c_void_p,POINTER(c_int),
POINTER(POINTER(c_int)),
POINTER(POINTER(c_int)),
POINTER(POINTER(c_int))]
self.lib.cslib_recv.restype = c_int
self.lib.cslib_unpack_int.argtypes = [c_void_p,c_int]
self.lib.cslib_unpack_int.restype = c_int
@ -128,7 +128,7 @@ class CSlib:
# create an instance of CSlib with or w/out MPI communicator
self.cs = c_void_p()
if not comm:
self.lib.cslib_open(csflag,mode,ptr,None,byref(self.cs))
elif not mpi4pyflag:
@ -144,7 +144,7 @@ class CSlib:
self.lib.cslib_open(csflag,mode,ptrcopy,comm_ptr,byref(self.cs))
# destroy instance of CSlib
def __del__(self):
if self.cs: self.lib.cslib_close(self.cs)
@ -153,13 +153,13 @@ class CSlib:
self.lib = None
# send a message
def send(self,msgID,nfield):
self.nfield = nfield
self.lib.cslib_send(self.cs,msgID,nfield)
# pack one field of message
def pack_int(self,id,value):
self.lib.cslib_pack_int(self.cs,id,value)
@ -185,24 +185,24 @@ class CSlib:
self.lib.cslib_pack_parallel(self.cs,id,ftype,nlocal,cids,nper,cdata)
# convert input data to a ctypes vector to pass to CSlib
def data_convert(self,ftype,flen,data):
# tflag = type of data
# tflag = 1 if data is list or tuple
# tflag = 2 if data is Numpy array
# tflag = 3 if data is ctypes vector
# same usage of tflag as in unpack function
txttype = str(type(data))
if "numpy" in txttype: tflag = 2
elif "c_" in txttype: tflag = 3
else: tflag = 1
# create ctypes vector out of data to pass to lib
# cdata = ctypes vector to return
# NOTE: error check on ftype and tflag everywhere, also flen
if ftype == 1:
if tflag == 1: cdata = (flen * c_int)(*data)
elif tflag == 2: cdata = data.ctypes.data_as(POINTER(c_int))
@ -223,7 +223,7 @@ class CSlib:
return cdata
# receive a message
def recv(self):
self.lib.cslib_recv.restype = c_int
nfield = c_int()
@ -235,18 +235,18 @@ class CSlib:
# copy returned C args to native Python int and lists
# store them in class so unpack() methods can access the info
self.nfield = nfield = nfield.value
self.fieldID = fieldID[:nfield]
self.fieldtype = fieldtype[:nfield]
self.fieldlen = fieldlen[:nfield]
return msgID,self.nfield,self.fieldID,self.fieldtype,self.fieldlen
# unpack one field of message
# tflag = type of data to return
# 3 = ctypes vector is default, since no conversion required
def unpack_int(self,id):
return self.lib.cslib_unpack_int(self.cs,id)
@ -267,7 +267,7 @@ class CSlib:
# reset data type of return so can morph by tflag
# cannot do this for the generic c_void_p returned by CSlib
if self.fieldtype[index] == 1:
self.lib.cslib_unpack.restype = POINTER(c_int)
elif self.fieldtype[index] == 2:
@ -287,7 +287,7 @@ class CSlib:
# tflag = 3 to return data as ctypes vector
# same usage of tflag as in pack functions
# tflag = 2,3 should NOT perform a data copy
if tflag == 1:
data = cdata[:self.fieldlen[index]]
elif tflag == 2:
@ -297,11 +297,11 @@ class CSlib:
data = np.ctypeslib.as_array(cdata,shape=(self.fieldlen[index],))
elif tflag == 3:
data = cdata
return data
# handle data array like pack() or unpack_parallel() ??
def unpack_data(self,id,tflag=3):
index = self.fieldID.index(id)
@ -312,14 +312,14 @@ class CSlib:
# as opposed to creating this cdata
# does that make any performance difference ?
# e.g. should we allow CSlib to populate an existing Numpy array's memory
def unpack_parallel(self,id,nlocal,ids,nper,tflag=3):
cids = self.data_convert(1,nlocal,ids)
# allocate memory for the returned data
# pass cdata ptr to the memory to CSlib unpack_parallel()
# this resets data type of last unpack_parallel() arg
index = self.fieldID.index(id)
if self.fieldtype[index] == 1: cdata = (nper*nlocal * c_int)()
elif self.fieldtype[index] == 2: cdata = (nlocal*nper * c_longlong)()
@ -334,7 +334,7 @@ class CSlib:
# tflag = 2 to return data as Numpy array
# tflag = 3 to return data as ctypes vector
# same usage of tflag as in pack functions
if tflag == 1:
data = cdata[:nper*nlocal]
elif tflag == 2:
@ -353,10 +353,10 @@ class CSlib:
data = np.ctypeslib.as_array(cdata,shape=(nlocal*nper,))
elif tflag == 3:
data = cdata
return data
# extract a library value
def extract(self,flag):
return self.lib.cslib_extract(self.cs,flag)

View File

@ -31,7 +31,7 @@ using namespace CSLIB_NS;
// ----------------------------------------------------------------------
void cslib_open(int csflag, const char *mode, const void *ptr,
void cslib_open(int csflag, const char *mode, const void *ptr,
const void *pcomm, void **csptr)
{
CSlib *cs = new CSlib(csflag,mode,ptr,pcomm);
@ -40,7 +40,7 @@ void cslib_open(int csflag, const char *mode, const void *ptr,
// ----------------------------------------------------------------------
void cslib_open_fortran(int csflag, const char *mode, const char *str,
void cslib_open_fortran(int csflag, const char *mode, const char *str,
const void *pcomm, void **csptr)
{
MPI_Comm ccomm;
@ -48,7 +48,7 @@ void cslib_open_fortran(int csflag, const char *mode, const char *str,
if (pcomm) {
MPI_Fint *fcomm = (MPI_Fint *) pcomm;
ccomm = MPI_Comm_f2c(*fcomm);
ccomm = MPI_Comm_f2c(*fcomm);
pccomm = &ccomm;
}
@ -58,7 +58,7 @@ void cslib_open_fortran(int csflag, const char *mode, const char *str,
// ----------------------------------------------------------------------
void cslib_open_fortran_mpi_one(int csflag, const char *mode,
void cslib_open_fortran_mpi_one(int csflag, const char *mode,
const void *pboth, const void *pcomm,
void **csptr)
{
@ -66,11 +66,11 @@ void cslib_open_fortran_mpi_one(int csflag, const char *mode,
void *pccomm,*pcboth;
MPI_Fint *fcomm = (MPI_Fint *) pcomm;
ccomm = MPI_Comm_f2c(*fcomm);
ccomm = MPI_Comm_f2c(*fcomm);
pccomm = &ccomm;
MPI_Fint *fboth = (MPI_Fint *) pboth;
cboth = MPI_Comm_f2c(*fboth);
cboth = MPI_Comm_f2c(*fboth);
pcboth = &cboth;
CSlib *cs = new CSlib(csflag,mode,pcboth,pccomm);
@ -152,8 +152,8 @@ void cslib_pack_parallel(void *ptr, int id, int ftype,
// ----------------------------------------------------------------------
int cslib_recv(void *ptr, int *nfield_caller,
int **fieldID_caller, int **fieldtype_caller,
int cslib_recv(void *ptr, int *nfield_caller,
int **fieldID_caller, int **fieldtype_caller,
int **fieldlen_caller)
{
CSlib *cs = (CSlib *) ptr;
@ -227,7 +227,7 @@ void cslib_unpack_data(void *ptr, int id, void *data)
// ----------------------------------------------------------------------
void cslib_unpack_parallel(void *ptr, int id, int nlocal, int *ids,
void cslib_unpack_parallel(void *ptr, int id, int nlocal, int *ids,
int nper, void *data)
{
CSlib *cs = (CSlib *) ptr;

View File

@ -22,7 +22,7 @@ extern "C" {
void cslib_open(int, const char *, const void *, const void *, void **);
void cslib_open_fortran(int, const char *, const char *, const void *, void **);
void cslib_open_fortran_mpi_one(int, const char *, const void *,
void cslib_open_fortran_mpi_one(int, const char *, const void *,
const void *, void **);
void cslib_close(void *);
@ -48,7 +48,7 @@ void cslib_unpack_data(void *, int, void *);
void cslib_unpack_parallel(void *, int, int, int *, int, void *);
int cslib_extract(void *, int);
#ifdef __cplusplus
}
#endif

View File

@ -32,7 +32,7 @@ using namespace CSLIB_NS;
/* ---------------------------------------------------------------------- */
MsgFile::MsgFile(int csflag, const void *ptr, MPI_Comm cworld) :
MsgFile::MsgFile(int csflag, const void *ptr, MPI_Comm cworld) :
Msg(csflag, ptr, cworld)
{
char *filename = (char *) ptr;
@ -68,14 +68,14 @@ void MsgFile::init(char *filename)
void MsgFile::send(int nheader, int *header, int nbuf, char *buf)
{
char filename[MAXLINE];
lengths[0] = nheader;
lengths[1] = nbuf;
if (me == 0) {
if (client) sprintf(filename,"%s.%s",fileroot,"client");
else if (server) sprintf(filename,"%s.%s",fileroot,"server");
fp = fopen(filename,"wb");
if (!fp) error_one("send(): Could not open send message file");
fwrite(lengths,sizeof(int),2,fp);
@ -83,7 +83,7 @@ void MsgFile::send(int nheader, int *header, int nbuf, char *buf)
fwrite(buf,1,nbuf,fp);
fclose(fp);
}
// create empty signal file
if (me == 0) {
@ -113,7 +113,7 @@ void MsgFile::recv(int &maxheader, int *&header, int &maxbuf, char *&buf)
usleep(delay);
}
fclose(fp);
if (client) sprintf(filename,"%s.%s",fileroot,"server");
else if (server) sprintf(filename,"%s.%s",fileroot,"client");
fp = fopen(filename,"rb");
@ -121,14 +121,14 @@ void MsgFile::recv(int &maxheader, int *&header, int &maxbuf, char *&buf)
}
// read and broadcast data
if (me == 0) fread(lengths,sizeof(int),2,fp);
if (nprocs > 1) MPI_Bcast(lengths,2,MPI_INT,0,world);
int nheader = lengths[0];
int nbuf = lengths[1];
allocate(nheader,maxheader,header,nbuf,maxbuf,buf);
if (me == 0) fread(header,sizeof(int),nheader,fp);
if (nprocs > 1) MPI_Bcast(header,nheader,MPI_INT,0,world);

View File

@ -29,7 +29,7 @@ using namespace CSLIB_NS;
/* ---------------------------------------------------------------------- */
MsgMPIOne::MsgMPIOne(int csflag, const void *ptr, MPI_Comm cworld) :
MsgMPIOne::MsgMPIOne(int csflag, const void *ptr, MPI_Comm cworld) :
Msg(csflag, ptr, cworld)
{
// NOTE: ideally would skip this call if mpi/two

View File

@ -29,7 +29,7 @@ using namespace CSLIB_NS;
/* ---------------------------------------------------------------------- */
MsgMPITwo::MsgMPITwo(int csflag, const void *ptr, MPI_Comm cworld) :
MsgMPITwo::MsgMPITwo(int csflag, const void *ptr, MPI_Comm cworld) :
MsgMPIOne(csflag, ptr, cworld)
{
char *filename = (char *) ptr;
@ -61,14 +61,14 @@ void MsgMPITwo::init(char *filename)
//printf("Client port: %s\n",port);
fclose(fp);
}
MPI_Bcast(port,MPI_MAX_PORT_NAME,MPI_CHAR,0,world);
MPI_Comm_connect(port,MPI_INFO_NULL,0,world,&bothcomm);
MPI_Comm_connect(port,MPI_INFO_NULL,0,world,&bothcomm);
//if (me == 0) printf("CLIENT comm connect\n");
if (me == 0) unlink(filename);
} else if (server) {
MPI_Open_port(MPI_INFO_NULL,port);
MPI_Open_port(MPI_INFO_NULL,port);
if (me == 0) {
//printf("Server name: %s\n",port);
@ -76,8 +76,8 @@ void MsgMPITwo::init(char *filename)
fprintf(fp,"%s",port);
fclose(fp);
}
MPI_Comm_accept(port,MPI_INFO_NULL,0,world,&bothcomm);
MPI_Comm_accept(port,MPI_INFO_NULL,0,world,&bothcomm);
//if (me == 0) printf("SERVER comm accept\n");
}

View File

@ -65,7 +65,7 @@ void MsgZMQ::init(char *port)
char *socket_name = new char[n];
strcpy(socket_name,"tcp://");
strcat(socket_name,port);
if (client) {
context = zmq_ctx_new();
socket = zmq_socket(context,ZMQ_REQ);