remove MPIIO sources and references to them. update Purge.list and build systems

This commit is contained in:
Axel Kohlmeyer
2023-08-20 21:51:33 -04:00
parent b904534ac2
commit a81412f015
27 changed files with 51 additions and 3145 deletions

View File

@ -268,7 +268,6 @@ set(STANDARD_PACKAGES
MOFFF
MOLECULE
MOLFILE
MPIIO
NETCDF
ORIENT
PERI
@ -377,7 +376,6 @@ endif()
# "hard" dependencies between packages resulting
# in an error instead of skipping over files
pkg_depends(ML-IAP ML-SNAP)
pkg_depends(MPIIO MPI)
pkg_depends(ATC MANYBODY)
pkg_depends(LATBOLTZ MPI)
pkg_depends(SCAFACOS MPI)
@ -579,13 +577,6 @@ foreach(PKG ${STANDARD_PACKAGES})
RegisterPackages(${${PKG}_SOURCES_DIR})
endforeach()
# packages that need defines set
foreach(PKG MPIIO)
if(PKG_${PKG})
target_compile_definitions(lammps PRIVATE -DLMP_${PKG})
endif()
endforeach()
# dedicated check for entire contents of accelerator packages
foreach(PKG ${SUFFIX_PACKAGES})
set(${PKG}_SOURCES_DIR ${LAMMPS_SOURCE_DIR}/${PKG})

View File

@ -63,7 +63,6 @@ set(ALL_PACKAGES
MOFFF
MOLECULE
MOLFILE
MPIIO
NETCDF
OPENMP
OPT

View File

@ -65,7 +65,6 @@ set(ALL_PACKAGES
MOFFF
MOLECULE
MOLFILE
MPIIO
NETCDF
OPENMP
OPT

View File

@ -83,7 +83,6 @@ endforeach()
# these two packages require a full MPI implementation
if(BUILD_MPI)
set(PKG_MPIIO ON CACHE BOOL "" FORCE)
set(PKG_LATBOLTZ ON CACHE BOOL "" FORCE)
endif()

View File

@ -19,7 +19,6 @@ set(PACKAGES_WITH_LIB
ML-PACE
ML-QUIP
MOLFILE
MPIIO
NETCDF
PLUMED
PYTHON

10
src/.gitignore vendored
View File

@ -671,20 +671,14 @@
/dump_atom_gz.h
/dump_atom_zstd.cpp
/dump_atom_zstd.h
/dump_atom_mpiio.cpp
/dump_atom_mpiio.h
/dump_cfg_gz.cpp
/dump_cfg_gz.h
/dump_cfg_mpiio.cpp
/dump_cfg_mpiio.h
/dump_cfg_zstd.cpp
/dump_cfg_zstd.h
/dump_custom_adios.cpp
/dump_custom_adios.h
/dump_custom_gz.cpp
/dump_custom_gz.h
/dump_custom_mpiio.cpp
/dump_custom_mpiio.h
/dump_custom_zstd.cpp
/dump_custom_zstd.h
/dump_h5md.cpp
@ -703,8 +697,6 @@
/dump_xtc.h
/dump_xyz_gz.cpp
/dump_xyz_gz.h
/dump_xyz_mpiio.cpp
/dump_xyz_mpiio.h
/dump_xyz_zstd.cpp
/dump_xyz_zstd.h
/dump_yaml.cpp
@ -1455,8 +1447,6 @@
/remap.h
/remap_wrap.cpp
/remap_wrap.h
/restart_mpiio.cpp
/restart_mpiio.h
/rigid_const.h
/scafacos.cpp
/scafacos.h

View File

@ -1,58 +0,0 @@
# Install/unInstall package files in LAMMPS
# mode = 0/1/2 for uninstall/install/update
mode=$1
# enforce using portable C locale
LC_ALL=C
export LC_ALL
# arg1 = file, arg2 = file it depends on
action () {
if (test $mode = 0) then
rm -f ../$1
elif (! cmp -s $1 ../$1) then
if (test -z "$2" || test -e ../$2) then
cp $1 ..
if (test $mode = 2) then
echo " updating src/$1"
fi
fi
elif (test -n "$2") then
if (test ! -e ../$2) then
rm -f ../$1
fi
fi
}
# force rebuild of files with LMP_MPIIO switch
# also read/write restart so their dependence on changed mpiio.h is rebuilt
touch ../mpiio.h
touch ../read_restart.cpp
touch ../write_restart.cpp
# all package files with no dependencies
for file in *.cpp *.h; do
test -f ${file} && action $file
done
# edit 2 Makefile.package to include/exclude LMP_MPIIO setting
if (test $1 = 1) then
if (test -e ../Makefile.package) then
sed -i -e 's/[^ \t]*MPIIO[^ \t]* //' ../Makefile.package
sed -i -e 's|^PKG_INC =[ \t]*|&-DLMP_MPIIO |' ../Makefile.package
fi
elif (test $1 = 0) then
if (test -e ../Makefile.package) then
sed -i -e 's/[^ \t]*MPIIO[^ \t]* //' ../Makefile.package
fi
fi

View File

@ -1,728 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "dump_atom_mpiio.h"
#include "domain.h"
#include "error.h"
#include "memory.h"
#include "update.h"
#include <cmath>
#include <cstring>
#include "omp_compat.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
using namespace LAMMPS_NS;
#define MAX_TEXT_HEADER_SIZE 4096
#define DUMP_BUF_CHUNK_SIZE 16384
#define DUMP_BUF_INCREMENT_SIZE 4096
/* ---------------------------------------------------------------------- */
DumpAtomMPIIO::DumpAtomMPIIO(LAMMPS *lmp, int narg, char **arg) : DumpAtom(lmp, narg, arg)
{
if (me == 0)
error->warning(FLERR, "MPI-IO output is unmaintained and unreliable. Use with caution.");
}
/* ---------------------------------------------------------------------- */
DumpAtomMPIIO::~DumpAtomMPIIO()
{
if (multifile == 0) MPI_File_close(&mpifh);
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::openfile()
{
if (singlefile_opened) { // single file already opened, so just return after resetting filesize
mpifo = currentFileSize;
MPI_File_set_size(mpifh, mpifo + headerSize + sumFileSize);
currentFileSize = mpifo + headerSize + sumFileSize;
return;
}
if (multifile == 0) singlefile_opened = 1;
// if one file per timestep, replace '*' with current timestep
filecurrent = filename;
if (multifile) {
filecurrent = utils::strdup(utils::star_subst(filecurrent, update->ntimestep, padflag));
if (maxfiles > 0) {
if (numfiles < maxfiles) {
nameslist[numfiles] = utils::strdup(filecurrent);
++numfiles;
} else {
remove(nameslist[fileidx]);
delete[] nameslist[fileidx];
nameslist[fileidx] = utils::strdup(filecurrent);
fileidx = (fileidx + 1) % maxfiles;
}
}
}
if (append_flag) { // append open
int err = MPI_File_open(world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND | MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS)
error->one(FLERR, "Cannot open dump file {}: {}", filecurrent, utils::getsyserror());
int myrank;
MPI_Comm_rank(world, &myrank);
if (myrank == 0) MPI_File_get_size(mpifh, &mpifo);
MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
MPI_File_set_size(mpifh, mpifo + headerSize + sumFileSize);
currentFileSize = mpifo + headerSize + sumFileSize;
} else { // replace open
int err =
MPI_File_open(world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS)
error->one(FLERR, "Cannot open dump file {}: {}", filecurrent, utils::getsyserror());
mpifo = 0;
MPI_File_set_size(mpifh, (MPI_Offset) (headerSize + sumFileSize));
currentFileSize = (headerSize + sumFileSize);
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write()
{
if (domain->triclinic == 0) {
boxxlo = domain->boxlo[0];
boxxhi = domain->boxhi[0];
boxylo = domain->boxlo[1];
boxyhi = domain->boxhi[1];
boxzlo = domain->boxlo[2];
boxzhi = domain->boxhi[2];
} else {
boxxlo = domain->boxlo_bound[0];
boxxhi = domain->boxhi_bound[0];
boxylo = domain->boxlo_bound[1];
boxyhi = domain->boxhi_bound[1];
boxzlo = domain->boxlo_bound[2];
boxzhi = domain->boxhi_bound[2];
boxxy = domain->xy;
boxxz = domain->xz;
boxyz = domain->yz;
}
// nme = # of dump lines this proc contributes to dump
nme = count();
// ntotal = total # of dump lines in snapshot
// nmax = max # of dump lines on any proc
bigint bnme = nme;
MPI_Allreduce(&bnme, &ntotal, 1, MPI_LMP_BIGINT, MPI_SUM, world);
int nmax;
MPI_Allreduce(&nme, &nmax, 1, MPI_INT, MPI_MAX, world);
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
// ensure filewriter proc can receive everyone's info
// limit nmax*size_one to int since used as arg in MPI_Rsend() below
// pack my data into buf
// if sorting on IDs also request ID list from pack()
// sort buf as needed
if (nmax > maxbuf) {
if ((bigint) nmax * size_one > MAXSMALLINT)
error->all(FLERR, "Too much per-proc info for dump");
maxbuf = nmax;
memory->destroy(buf);
memory->create(buf, (maxbuf * size_one), "dump:buf");
}
if (sort_flag && sortcol == 0 && nmax > maxids) {
maxids = nmax;
memory->destroy(ids);
memory->create(ids, maxids, "dump:ids");
}
if (sort_flag && sortcol == 0)
pack(ids);
else
pack(nullptr);
if (sort_flag) sort();
// determine how much data needs to be written for setting the file size and prepocess it prior to writing
performEstimate = 1;
write_header(nheader);
write_data(nme, buf);
MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs - 1), world);
openfile();
performEstimate = 0;
write_header(nheader); // mpifo now points to end of header info
// now actually write the data
performEstimate = 0;
write_data(nme, buf);
if (multifile) MPI_File_close(&mpifh);
if (multifile) delete[] filecurrent;
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::init_style()
{
if (image_flag == 0)
size_one = 5;
else
size_one = 8;
// format = copy of default or user-specified line format
// default depends on image flags
delete[] format;
if (format_line_user) {
format = utils::strdup(std::string(format_line_user) + "\n");
} else {
if (image_flag == 0)
format = utils::strdup(TAGINT_FORMAT " %d %g %g %g\n");
else
format = utils::strdup(TAGINT_FORMAT " %d %g %g %g %d %d %d\n");
}
// setup boundary string
domain->boundary_string(boundstr);
// setup column string
std::string default_columns;
if (scale_flag == 0 && image_flag == 0)
default_columns = "id type x y z";
else if (scale_flag == 0 && image_flag == 1)
default_columns = "id type x y z ix iy iz";
else if (scale_flag == 1 && image_flag == 0)
default_columns = "id type xs ys zs";
else if (scale_flag == 1 && image_flag == 1)
default_columns = "id type xs ys zs ix iy iz";
int icol = 0;
columns.clear();
for (const auto &item : utils::split_words(default_columns)) {
if (columns.size()) columns += " ";
if (keyword_user[icol].size())
columns += keyword_user[icol];
else
columns += item;
++icol;
}
// setup function ptrs
if (binary && domain->triclinic == 0)
header_choice = &DumpAtomMPIIO::header_binary;
else if (binary && domain->triclinic == 1)
header_choice = &DumpAtomMPIIO::header_binary_triclinic;
else if (!binary && domain->triclinic == 0)
header_choice = &DumpAtomMPIIO::header_item;
else if (!binary && domain->triclinic == 1)
header_choice = &DumpAtomMPIIO::header_item_triclinic;
if (scale_flag == 1 && image_flag == 0 && domain->triclinic == 0)
pack_choice = &DumpAtomMPIIO::pack_scale_noimage;
else if (scale_flag == 1 && image_flag == 1 && domain->triclinic == 0)
pack_choice = &DumpAtomMPIIO::pack_scale_image;
else if (scale_flag == 1 && image_flag == 0 && domain->triclinic == 1)
pack_choice = &DumpAtomMPIIO::pack_scale_noimage_triclinic;
else if (scale_flag == 1 && image_flag == 1 && domain->triclinic == 1)
pack_choice = &DumpAtomMPIIO::pack_scale_image_triclinic;
else if (scale_flag == 0 && image_flag == 0)
pack_choice = &DumpAtomMPIIO::pack_noscale_noimage;
else if (scale_flag == 0 && image_flag == 1)
pack_choice = &DumpAtomMPIIO::pack_noscale_image;
if (binary)
write_choice = &DumpAtomMPIIO::write_binary;
else if (buffer_flag == 1)
write_choice = &DumpAtomMPIIO::write_string;
else if (image_flag == 0)
write_choice = &DumpAtomMPIIO::write_lines_noimage;
else if (image_flag == 1)
write_choice = &DumpAtomMPIIO::write_lines_image;
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write_header(bigint ndump)
{
if (!header_choice) error->all(FLERR, "Must not use 'run pre no' after creating a new dump");
(this->*header_choice)(ndump);
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::header_binary(bigint ndump)
{
if (performEstimate) {
headerBuffer = (char *) malloc((2 * sizeof(bigint)) + (9 * sizeof(int)) + (6 * sizeof(double)));
headerSize = 0;
memcpy(headerBuffer + headerSize, &update->ntimestep, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &ndump, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &domain->triclinic, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &domain->boundary[0][0], 6 * sizeof(int));
headerSize += 6 * sizeof(int);
memcpy(headerBuffer + headerSize, &boxxlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxylo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &size_one, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &nprocs, sizeof(int));
headerSize += sizeof(int);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_BYTE, MPI_STATUS_IGNORE);
mpifo += headerSize;
free(headerBuffer);
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::header_binary_triclinic(bigint ndump)
{
if (performEstimate) {
headerBuffer = (char *) malloc((2 * sizeof(bigint)) + (9 * sizeof(int)) + (9 * sizeof(double)));
headerSize = 0;
memcpy(headerBuffer + headerSize, &update->ntimestep, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &ndump, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &domain->triclinic, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &domain->boundary[0][0], 6 * sizeof(int));
headerSize += 6 * sizeof(int);
memcpy(headerBuffer + headerSize, &boxxlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxylo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxy, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxz, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyz, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &size_one, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &nprocs, sizeof(int));
headerSize += sizeof(int);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_BYTE, MPI_STATUS_IGNORE);
mpifo += headerSize;
free(headerBuffer);
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::header_item(bigint ndump)
{
if (performEstimate) {
auto itemtxt = fmt::format("ITEM: TIMESTEP\n{}\n", update->ntimestep);
itemtxt += fmt::format("ITEM: NUMBER OF ATOMS\n{}\n", ndump);
itemtxt += fmt::format("ITEM: BOX BOUNDS {}\n", boundstr);
itemtxt += fmt::format("{} {}\n{} {}\n{} {}\n", boxxlo, boxxhi, boxylo, boxyhi, boxzlo, boxzhi);
itemtxt += fmt::format("ITEM: ATOMS {}\n", columns);
headerSize = itemtxt.size();
headerBuffer = utils::strdup(itemtxt);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_CHAR, MPI_STATUS_IGNORE);
mpifo += headerSize;
delete[] headerBuffer;
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::header_item_triclinic(bigint ndump)
{
if (performEstimate) {
auto itemtxt = fmt::format("ITEM: TIMESTEP\n{}\n", update->ntimestep);
itemtxt += fmt::format("ITEM: NUMBER OF ATOMS\n{}\n", ndump);
itemtxt += fmt::format("ITEM: BOX BOUNDS xy xz yz {}\n", boundstr);
itemtxt += fmt::format("{} {} {}\n{} {} {}\n{} {} {}\n", boxxlo, boxxhi, boxxy, boxylo, boxyhi,
boxxz, boxzlo, boxzhi, boxyz);
itemtxt += fmt::format("ITEM: ATOMS {}\n", columns);
headerSize = itemtxt.size();
headerBuffer = utils::strdup(itemtxt);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_CHAR, MPI_STATUS_IGNORE);
mpifo += headerSize;
delete[] headerBuffer;
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write_data(int n, double *mybuf)
{
(this->*write_choice)(n, mybuf);
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write_binary(int n, double *mybuf)
{
n *= size_one;
if (performEstimate) {
bigint incPrefix = 0;
bigint bigintNme = (bigint) nme;
MPI_Scan(&bigintNme, &incPrefix, 1, MPI_LMP_BIGINT, MPI_SUM, world);
sumFileSize = (incPrefix * size_one * sizeof(double)) + (nprocs * sizeof(int));
offsetFromHeader = ((incPrefix - bigintNme) * size_one * sizeof(double)) + (me * sizeof(int));
} else {
int byteBufSize = (n * sizeof(double)) + sizeof(int);
char *bufWithSize;
memory->create(bufWithSize, byteBufSize, "dump:bufWithSize");
memcpy(bufWithSize, (char *) (&n), sizeof(int));
memcpy(&((char *) bufWithSize)[sizeof(int)], mybuf, (n * sizeof(double)));
MPI_File_write_at_all(mpifh, mpifo + offsetFromHeader, bufWithSize, byteBufSize, MPI_BYTE,
MPI_STATUS_IGNORE);
memory->destroy(bufWithSize);
if (flush_flag) MPI_File_sync(mpifh);
}
}
/* ---------------------------------------------------------------------- */
void DumpAtomMPIIO::write_string(int n, double *mybuf)
{
if (performEstimate) {
#if defined(_OPENMP)
int nthreads = omp_get_max_threads();
if (nthreads > 1)
nsme = convert_string_omp(n, mybuf);
else {
nsme = convert_string(n, mybuf);
}
#else
nsme = convert_string(n, mybuf);
#endif
bigint incPrefix = 0;
bigint bigintNsme = (bigint) nsme;
MPI_Scan(&bigintNsme, &incPrefix, 1, MPI_LMP_BIGINT, MPI_SUM, world);
sumFileSize = (incPrefix * sizeof(char));
offsetFromHeader = ((incPrefix - bigintNsme) * sizeof(char));
} else {
MPI_File_write_at_all(mpifh, mpifo + offsetFromHeader, sbuf, nsme, MPI_CHAR, MPI_STATUS_IGNORE);
if (flush_flag) MPI_File_sync(mpifh);
}
}
/* ---------------------------------------------------------------------- */
int DumpAtomMPIIO::convert_string(int n, double *mybuf)
{
if (image_flag == 0)
return convert_noimage(n, mybuf);
else
return convert_image(n, mybuf);
}
/* ---------------------------------------------------------------------- */
#if defined(_OPENMP)
int DumpAtomMPIIO::convert_string_omp(int n, double *mybuf)
{
if (image_flag == 0)
return convert_noimage_omp(n, mybuf);
else
return convert_image_omp(n, mybuf);
}
/* ----------------------------------------------------------------------
multithreaded version - convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpAtomMPIIO::convert_image_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n, mybuf);
} else {
memory->create(mpifhStringCountPerThread, nthreads, "dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads * sizeof(char *));
memory->create(bufOffset, nthreads, "dump:bufOffset");
memory->create(bufRange, nthreads, "dump:bufRange");
memory->create(bufLength, nthreads, "dump:bufLength");
int i = 0;
for (i = 0; i < (nthreads - 1); i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = (int) (floor((double) n / (double) nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = n - (i * (int) (floor((double) n / (double) nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, \
mpifhStringCountPerThread, \
mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m = 0;
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(
mpifh_buffer_line_per_thread[tid],
(mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]), format,
static_cast<int>(mybuf[bufOffset[tid] + m]),
static_cast<int>(mybuf[bufOffset[tid] + m + 1]), mybuf[bufOffset[tid] + m + 2],
mybuf[bufOffset[tid] + m + 3], mybuf[bufOffset[tid] + m + 4],
static_cast<int>(mybuf[bufOffset[tid] + m + 5]),
static_cast<int>(mybuf[bufOffset[tid] + m + 6]),
static_cast<int>(mybuf[bufOffset[tid] + m + 7]));
m += size_one;
}
}
#pragma omp barrier
mpifhStringCount = 0;
for (i = 0; i < nthreads; i++) { mpifhStringCount += mpifhStringCountPerThread[i]; }
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount + 1;
memory->grow(sbuf, maxsbuf, "dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i = 0; i < nthreads; i++) {
strcat(sbuf, mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
}
return mpifhStringCount;
}
/* ----------------------------------------------------------------------
multithreaded version - convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpAtomMPIIO::convert_noimage_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n, mybuf);
} else {
memory->create(mpifhStringCountPerThread, nthreads, "dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads * sizeof(char *));
memory->create(bufOffset, nthreads, "dump:bufOffset");
memory->create(bufRange, nthreads, "dump:bufRange");
memory->create(bufLength, nthreads, "dump:bufLength");
int i = 0;
for (i = 0; i < (nthreads - 1); i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = (int) (floor((double) n / (double) nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = n - (i * (int) (floor((double) n / (double) nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, \
mpifhStringCountPerThread, \
mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m = 0;
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(
mpifh_buffer_line_per_thread[tid],
(mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]), format,
static_cast<int>(mybuf[bufOffset[tid] + m]),
static_cast<int>(mybuf[bufOffset[tid] + m + 1]), mybuf[bufOffset[tid] + m + 2],
mybuf[bufOffset[tid] + m + 3], mybuf[bufOffset[tid] + m + 4]);
m += size_one;
}
}
#pragma omp barrier
mpifhStringCount = 0;
for (i = 0; i < nthreads; i++) { mpifhStringCount += mpifhStringCountPerThread[i]; }
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount + 1;
memory->grow(sbuf, maxsbuf, "dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i = 0; i < nthreads; i++) {
strcat(sbuf, mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
}
return mpifhStringCount;
}
#endif

View File

@ -1,72 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef DUMP_CLASS
// clang-format off
DumpStyle(atom/mpiio,DumpAtomMPIIO);
// clang-format on
#else
#ifndef LMP_DUMP_ATOM_MPIIO_H
#define LMP_DUMP_ATOM_MPIIO_H
#include "dump_atom.h"
namespace LAMMPS_NS {
class DumpAtomMPIIO : public DumpAtom {
public:
DumpAtomMPIIO(class LAMMPS *, int, char **);
~DumpAtomMPIIO() override;
protected:
bigint
sumFileSize; // size in bytes of the file up through this rank offset from the end of the header data
char *headerBuffer; // buffer for holding header data
MPI_File mpifh;
MPI_Offset mpifo, offsetFromHeader, headerSize, currentFileSize;
int performEstimate; // switch for write_data and write_header methods to use for gathering data and detemining filesize for preallocation vs actually writing the data
char *filecurrent; // name of file for this round (with % and * replaced)
void openfile() override;
void write_header(bigint) override;
void write() override;
void write_data(int, double *) override;
void init_style() override;
typedef void (DumpAtomMPIIO::*FnPtrHeader)(bigint);
FnPtrHeader header_choice; // ptr to write header functions
void header_binary(bigint);
void header_binary_triclinic(bigint);
void header_item(bigint);
void header_item_triclinic(bigint);
#if defined(_OPENMP)
int convert_string_omp(int, double *); // multithreaded version of convert_string
int convert_image_omp(int, double *); // multithreaded version of convert_image
int convert_noimage_omp(int, double *); // multithreaded version of convert_noimage
#endif
int convert_string(int, double *) override;
typedef void (DumpAtomMPIIO::*FnPtrData)(int, double *);
FnPtrData write_choice; // ptr to write data functions
void write_binary(int, double *);
void write_string(int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,488 +0,0 @@
// clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "omp_compat.h"
#include "dump_cfg_mpiio.h"
#include "atom.h"
#include "domain.h"
#include "update.h"
#include "memory.h"
#include "error.h"
#include <cmath>
#include <cstring>
#ifdef LMP_USER_IO_TIMER
#include <sys/times.h>
#include <hwi/include/bqc/A2_inlines.h>
long dumpCFGTimestamps[10];
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
using namespace LAMMPS_NS;
#define MAX_TEXT_HEADER_SIZE 4096
#define DUMP_BUF_CHUNK_SIZE 16384
#define DUMP_BUF_INCREMENT_SIZE 4096
#define UNWRAPEXPAND 10.0
#define ONEFIELD 32
#define DELTA 1048576
/* ---------------------------------------------------------------------- */
DumpCFGMPIIO::DumpCFGMPIIO(LAMMPS *lmp, int narg, char **arg) :
DumpCFG(lmp, narg, arg)
{
if (me == 0)
error->warning(FLERR,"MPI-IO output is unmaintained and unreliable. Use with caution.");
}
/* ---------------------------------------------------------------------- */
DumpCFGMPIIO::~DumpCFGMPIIO()
{
if (multifile == 0) MPI_File_close(&mpifh);
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::openfile()
{
if (singlefile_opened) { // single file already opened, so just return after resetting filesize
mpifo = currentFileSize;
MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
currentFileSize = mpifo+headerSize+sumFileSize;
return;
}
if (multifile == 0) singlefile_opened = 1;
// if one file per timestep, replace '*' with current timestep
filecurrent = filename;
if (multifile) {
filecurrent = utils::strdup(utils::star_subst(filecurrent, update->ntimestep, padflag));
if (maxfiles > 0) {
if (numfiles < maxfiles) {
nameslist[numfiles] = utils::strdup(filecurrent);
++numfiles;
} else {
remove(nameslist[fileidx]);
delete[] nameslist[fileidx];
nameslist[fileidx] = utils::strdup(filecurrent);
fileidx = (fileidx + 1) % maxfiles;
}
}
}
if (append_flag) { // append open
int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND |
MPI_MODE_WRONLY, MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) error->one(FLERR, "Cannot open dump file {}", filecurrent);
int myrank;
MPI_Comm_rank(world,&myrank);
if (myrank == 0)
MPI_File_get_size(mpifh,&mpifo);
MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
currentFileSize = mpifo+headerSize+sumFileSize;
} else { // replace open
int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) error->one(FLERR, "Cannot open dump file {}", filecurrent);
mpifo = 0;
MPI_File_set_size(mpifh,(MPI_Offset) (headerSize+sumFileSize));
currentFileSize = (headerSize+sumFileSize);
}
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::write()
{
#ifdef LMP_USER_IO_TIMER
long startTimeBase, endTimeBase;
MPI_Barrier(world); // timestamp barrier
if (me == 0)
startTimeBase = GetTimeBase();
#endif
if (domain->triclinic == 0) {
boxxlo = domain->boxlo[0];
boxxhi = domain->boxhi[0];
boxylo = domain->boxlo[1];
boxyhi = domain->boxhi[1];
boxzlo = domain->boxlo[2];
boxzhi = domain->boxhi[2];
} else {
boxxlo = domain->boxlo_bound[0];
boxxhi = domain->boxhi_bound[0];
boxylo = domain->boxlo_bound[1];
boxyhi = domain->boxhi_bound[1];
boxzlo = domain->boxlo_bound[2];
boxzhi = domain->boxhi_bound[2];
boxxy = domain->xy;
boxxz = domain->xz;
boxyz = domain->yz;
}
// nme = # of dump lines this proc contributes to dump
nme = count();
// ntotal = total # of dump lines in snapshot
// nmax = max # of dump lines on any proc
bigint bnme = nme;
MPI_Allreduce(&bnme,&ntotal,1,MPI_LMP_BIGINT,MPI_SUM,world);
int nmax;
MPI_Allreduce(&nme,&nmax,1,MPI_INT,MPI_MAX,world);
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
// ensure filewriter proc can receive everyone's info
// limit nmax*size_one to int since used as arg in MPI_Rsend() below
// pack my data into buf
// if sorting on IDs also request ID list from pack()
// sort buf as needed
if (nmax > maxbuf) {
if ((bigint) nmax * size_one > MAXSMALLINT)
error->all(FLERR,"Too much per-proc info for dump");
maxbuf = nmax;
memory->destroy(buf);
memory->create(buf,(maxbuf*size_one),"dump:buf");
}
if (sort_flag && sortcol == 0 && nmax > maxids) {
maxids = nmax;
memory->destroy(ids);
memory->create(ids,maxids,"dump:ids");
}
if (sort_flag && sortcol == 0) pack(ids);
else pack(nullptr);
if (sort_flag) sort();
// determine how much data needs to be written for setting the file size and prepocess it prior to writing
performEstimate = 1;
write_header(nheader);
write_data(nme,buf);
MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs-1), world);
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[0] = GetTimeBase();
#endif
openfile();
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[1] = GetTimeBase();
#endif
performEstimate = 0;
write_header(nheader); // mpifo now points to end of header info
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[2] = GetTimeBase();
#endif
// now actually write the data
performEstimate = 0;
write_data(nme,buf);
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[3] = GetTimeBase();
#endif
if (multifile) MPI_File_close(&mpifh);
if (multifile) delete [] filecurrent;
#ifdef LMP_USER_IO_TIMER
MPI_Barrier(world); // timestamp barrier
dumpCFGTimestamps[4] = GetTimeBase();
if (me == 0) {
endTimeBase = GetTimeBase();
printf("total dump cycles: %ld - estimates and setup: %ld openfile: %ld write header: %ld write data: %ld close file: %ld\n",(long) (endTimeBase-startTimeBase),(long) (dumpCFGTimestamps[0]-startTimeBase),(long) (dumpCFGTimestamps[1]-dumpCFGTimestamps[0]),(long) (dumpCFGTimestamps[2]-dumpCFGTimestamps[1]),(long) (dumpCFGTimestamps[3]-dumpCFGTimestamps[2]),(long) (dumpCFGTimestamps[4]-dumpCFGTimestamps[3]));
}
#endif
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::init_style()
{
if (multifile == 0 && !multifile_override)
error->all(FLERR,"Dump cfg requires one snapshot per file");
DumpCFG::init_style();
// setup function ptrs
write_choice = &DumpCFGMPIIO::write_string;
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::write_header(bigint n)
{
// set scale factor used by AtomEye for CFG viz
// default = 1.0
// for peridynamics, set to pre-computed PD scale factor
// so PD particles mimic C atoms
// for unwrapped coords, set to UNWRAPEXPAND (10.0)
// so molecules are not split across periodic box boundaries
double scale = 1.0;
if (atom->peri_flag) scale = atom->pdscale;
else if (unwrapflag == 1) scale = UNWRAPEXPAND;
auto header = fmt::format("Number of particles = {}\n",n);
header += fmt::format("A = {} Angstrom (basic length-scale)\n",scale);
header += fmt::format("H0(1,1) = {} A\n",domain->xprd);
header += fmt::format("H0(1,2) = 0 A\n");
header += fmt::format("H0(1,3) = 0 A\n");
header += fmt::format("H0(2,1) = {} A\n",domain->xy);
header += fmt::format("H0(2,2) = {} A\n",domain->yprd);
header += fmt::format("H0(2,3) = 0 A\n");
header += fmt::format("H0(3,1) = {} A\n",domain->xz);
header += fmt::format("H0(3,2) = {} A\n",domain->yz);
header += fmt::format("H0(3,3) = {} A\n",domain->zprd);
header += fmt::format(".NO_VELOCITY.\n");
header += fmt::format("entry_count = {}\n",nfield-2);
for (int i = 0; i < nfield-5; i++)
header += fmt::format("auxiliary[{}] = {}\n",i,auxname[i]);
if (performEstimate) {
headerSize = header.size();
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh,mpifo,(void *)header.c_str(),header.size(),MPI_CHAR,MPI_STATUS_IGNORE);
mpifo += header.size();
}
}
#if defined(_OPENMP)
/* ----------------------------------------------------------------------
convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpCFGMPIIO::convert_string_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n,mybuf);
}
else {
memory->create(mpifhStringCountPerThread,nthreads,"dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads*sizeof(char*));
memory->create(bufOffset,nthreads,"dump:bufOffset");
memory->create(bufRange,nthreads,"dump:bufRange");
memory->create(bufLength,nthreads,"dump:bufLength");
int i=0;
for (i=0;i<(nthreads-1);i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
bufRange[i] = (int)(floor((double)n/(double)nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
bufRange[i] = n-(i*(int)(floor((double)n/(double)nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, mpifhStringCountPerThread, mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m=0;
if (unwrapflag == 0) {
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(mpifh_buffer_line_per_thread[tid],(mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
for (int j = 0; j < size_one; j++) {
if (j == 0) {
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%f \n",(mybuf[bufOffset[tid]+m]));
} else if (j == 1) {
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%s \n",typenames[(int) mybuf[bufOffset[tid]+m]]);
} else if (j >= 2) {
if (vtype[j] == Dump::INT)
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<int> (mybuf[bufOffset[tid]+m]));
else if (vtype[j] == Dump::DOUBLE)
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],mybuf[bufOffset[tid]+m]);
else if (vtype[j] == Dump::STRING)
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],typenames[(int) mybuf[bufOffset[tid]+m]]);
else if (vtype[j] == Dump::BIGINT)
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<bigint> (mybuf[bufOffset[tid]+m]));
}
m++;
} // for j
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"\n");
} // for i
} // wrap flag
else if (unwrapflag == 1) {
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(mpifh_buffer_line_per_thread[tid],(mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
for (int j = 0; j < size_one; j++) {
double unwrap_coord;
if (j == 0) {
//offset += sprintf(&sbuf[offset],"%f \n",mybuf[m]);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%f \n",mybuf[bufOffset[tid]+m]);
} else if (j == 1) {
// offset += sprintf(&sbuf[offset],"%s \n",typenames[(int) mybuf[m]]);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"%s \n",typenames[(int) mybuf[bufOffset[tid]+m]]);
} else if (j >= 2 && j <= 4) {
unwrap_coord = (mybuf[bufOffset[tid]+m] - 0.5)/UNWRAPEXPAND + 0.5;
//offset += sprintf(&sbuf[offset],vformat[j],unwrap_coord);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],unwrap_coord);
} else if (j >= 5) {
if (vtype[j] == Dump::INT)
//offset +=
// sprintf(&sbuf[offset],vformat[j],static_cast<int> (mybuf[m]));
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<int> (mybuf[bufOffset[tid]+m]));
else if (vtype[j] == Dump::DOUBLE)
// offset += sprintf(&sbuf[offset],vformat[j],mybuf[m]);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],mybuf[bufOffset[tid]+m]);
else if (vtype[j] == Dump::STRING)
// offset +=
// sprintf(&sbuf[offset],vformat[j],typenames[(int) mybuf[m]]);
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],typenames[(int) mybuf[bufOffset[tid]+m]]);
else if (vtype[j] == Dump::BIGINT)
// offset +=
// sprintf(&sbuf[offset],vformat[j],static_cast<bigint> (mybuf[m]));
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),vformat[j],static_cast<bigint> (mybuf[bufOffset[tid]+m]));
}
m++;
} // for j
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),"\n");
} // for i
} // unwrap flag
} // pragma omp parallel
#pragma omp barrier
mpifhStringCount = 0;
for (i=0;i<nthreads;i++) {
mpifhStringCount += mpifhStringCountPerThread[i];
}
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount+1;
memory->grow(sbuf,maxsbuf,"dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i=0;i<nthreads;i++) {
strcat(sbuf,mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
} // else omp
return mpifhStringCount;
}
#endif
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::write_data(int n, double *mybuf)
{
(this->*write_choice)(n,mybuf);
}
/* ---------------------------------------------------------------------- */
void DumpCFGMPIIO::write_string(int n, double *mybuf)
{
if (performEstimate) {
#if defined(_OPENMP)
int nthreads = omp_get_max_threads();
if (nthreads > 1)
nsme = convert_string_omp(n,mybuf);
else
nsme = convert_string(n,mybuf);
#else
nsme = convert_string(n,mybuf);
#endif
bigint incPrefix = 0;
bigint bigintNsme = (bigint) nsme;
MPI_Scan(&bigintNsme,&incPrefix,1,MPI_LMP_BIGINT,MPI_SUM,world);
sumFileSize = (incPrefix*sizeof(char));
offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
}
else {
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
if (flush_flag)
MPI_File_sync(mpifh);
}
}

View File

@ -1,60 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef DUMP_CLASS
// clang-format off
DumpStyle(cfg/mpiio,DumpCFGMPIIO);
// clang-format on
#else
#ifndef LMP_DUMP_CFG_MPIIO_H
#define LMP_DUMP_CFG_MPIIO_H
#include "dump_cfg.h"
namespace LAMMPS_NS {
class DumpCFGMPIIO : public DumpCFG {
public:
DumpCFGMPIIO(class LAMMPS *, int, char **);
~DumpCFGMPIIO() override;
protected:
bigint
sumFileSize; // size in bytes of the file up through this rank offset from the end of the header data
char *headerBuffer; // buffer for holding header data
MPI_File mpifh;
MPI_Offset mpifo, offsetFromHeader, headerSize, currentFileSize;
int performEstimate; // switch for write_data and write_header methods to use for gathering data and detemining filesize for preallocation vs actually writing the data
char *filecurrent; // name of file for this round (with % and * replaced)
#if defined(_OPENMP)
int convert_string_omp(int, double *); // multithreaded version of convert_string
#endif
void openfile() override;
void init_style() override;
void write_header(bigint) override;
void write() override;
void write_data(int, double *) override;
typedef void (DumpCFGMPIIO::*FnPtrData)(int, double *);
FnPtrData write_choice; // ptr to write data functions
void write_string(int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,648 +0,0 @@
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "dump_custom_mpiio.h"
#include "domain.h"
#include "error.h"
#include "fix.h"
#include "input.h"
#include "memory.h"
#include "modify.h"
#include "update.h"
#include "variable.h"
#include <cmath>
#include <cstring>
#include "omp_compat.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
using namespace LAMMPS_NS;
#define DUMP_BUF_CHUNK_SIZE 16384
#define DUMP_BUF_INCREMENT_SIZE 4096
/* ---------------------------------------------------------------------- */
DumpCustomMPIIO::DumpCustomMPIIO(LAMMPS *lmp, int narg, char **arg) : DumpCustom(lmp, narg, arg)
{
if (me == 0)
error->warning(FLERR, "MPI-IO output is unmaintained and unreliable. Use with caution.");
}
/* ---------------------------------------------------------------------- */
DumpCustomMPIIO::~DumpCustomMPIIO()
{
if (multifile == 0) MPI_File_close(&mpifh);
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::openfile()
{
if (singlefile_opened) { // single file already opened, so just return after resetting filesize
mpifo = currentFileSize;
MPI_File_set_size(mpifh, mpifo + headerSize + sumFileSize);
currentFileSize = mpifo + headerSize + sumFileSize;
return;
}
if (multifile == 0) singlefile_opened = 1;
// if one file per timestep, replace '*' with current timestep
filecurrent = filename;
if (multifile) {
filecurrent = utils::strdup(utils::star_subst(filecurrent, update->ntimestep, padflag));
if (maxfiles > 0) {
if (numfiles < maxfiles) {
nameslist[numfiles] = utils::strdup(filecurrent);
++numfiles;
} else {
remove(nameslist[fileidx]);
delete[] nameslist[fileidx];
nameslist[fileidx] = utils::strdup(filecurrent);
fileidx = (fileidx + 1) % maxfiles;
}
}
}
if (append_flag) { // append open
int err = MPI_File_open(world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND | MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS)
error->one(FLERR, "Cannot open dump file {}: {}", filecurrent, utils::getsyserror());
int myrank;
MPI_Comm_rank(world, &myrank);
if (myrank == 0) MPI_File_get_size(mpifh, &mpifo);
MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
MPI_File_set_size(mpifh, mpifo + headerSize + sumFileSize);
currentFileSize = mpifo + headerSize + sumFileSize;
} else { // replace open
int err =
MPI_File_open(world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS)
error->one(FLERR, "Cannot open dump file {}: {}", filecurrent, utils::getsyserror());
mpifo = 0;
MPI_File_set_size(mpifh, (MPI_Offset) (headerSize + sumFileSize));
currentFileSize = (headerSize + sumFileSize);
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write()
{
if (domain->triclinic == 0) {
boxxlo = domain->boxlo[0];
boxxhi = domain->boxhi[0];
boxylo = domain->boxlo[1];
boxyhi = domain->boxhi[1];
boxzlo = domain->boxlo[2];
boxzhi = domain->boxhi[2];
} else {
boxxlo = domain->boxlo_bound[0];
boxxhi = domain->boxhi_bound[0];
boxylo = domain->boxlo_bound[1];
boxyhi = domain->boxhi_bound[1];
boxzlo = domain->boxlo_bound[2];
boxzhi = domain->boxhi_bound[2];
boxxy = domain->xy;
boxxz = domain->xz;
boxyz = domain->yz;
}
// nme = # of dump lines this proc contributes to dump
nme = count();
// ntotal = total # of dump lines in snapshot
// nmax = max # of dump lines on any proc
bigint bnme = nme;
MPI_Allreduce(&bnme, &ntotal, 1, MPI_LMP_BIGINT, MPI_SUM, world);
int nmax;
MPI_Allreduce(&nme, &nmax, 1, MPI_INT, MPI_MAX, world);
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
// ensure filewriter proc can receive everyone's info
// limit nmax*size_one to int since used as arg in MPI_Rsend() below
// pack my data into buf
// if sorting on IDs also request ID list from pack()
// sort buf as needed
if (nmax > maxbuf) {
if ((bigint) nmax * size_one > MAXSMALLINT)
error->all(FLERR, "Too much per-proc info for dump");
maxbuf = nmax;
memory->destroy(buf);
memory->create(buf, (maxbuf * size_one), "dump:buf");
}
if (sort_flag && sortcol == 0 && nmax > maxids) {
maxids = nmax;
memory->destroy(ids);
memory->create(ids, maxids, "dump:ids");
}
if (sort_flag && sortcol == 0)
pack(ids);
else
pack(nullptr);
if (sort_flag) sort();
// determine how much data needs to be written for setting the file size and prepocess it prior to writing
performEstimate = 1;
write_header(nheader);
write_data(nme, buf);
MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs - 1), world);
openfile();
performEstimate = 0;
write_header(nheader); // mpifo now points to end of header info
// now actually write the data
performEstimate = 0;
write_data(nme, buf);
if (multifile) MPI_File_close(&mpifh);
if (multifile) delete[] filecurrent;
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::init_style()
{
// assemble ITEMS: column string from defaults and user values
delete[] columns;
std::string combined;
int icol = 0;
for (const auto &item : utils::split_words(columns_default)) {
if (combined.size()) combined += " ";
if (keyword_user[icol].size())
combined += keyword_user[icol];
else
combined += item;
++icol;
}
columns = utils::strdup(combined);
// format = copy of default or user-specified line format
delete[] format;
if (format_line_user)
format = utils::strdup(format_line_user);
else
format = utils::strdup(format_default);
// tokenize the format string and add space at end of each format element
// if user-specified int/float format exists, use it instead
// if user-specified column format exists, use it instead
// lo priority = line, medium priority = int/float, hi priority = column
auto words = utils::split_words(format);
if ((int) words.size() < nfield)
error->all(FLERR, "Dump_modify format line is too short: {}", format);
int i = 0;
for (const auto &word : words) {
if (i >= nfield) break;
delete[] vformat[i];
if (format_column_user[i])
vformat[i] = utils::strdup(std::string(format_column_user[i]) + " ");
else if (vtype[i] == Dump::INT && format_int_user)
vformat[i] = utils::strdup(std::string(format_int_user) + " ");
else if (vtype[i] == Dump::DOUBLE && format_float_user)
vformat[i] = utils::strdup(std::string(format_float_user) + " ");
else if (vtype[i] == Dump::BIGINT && format_bigint_user)
vformat[i] = utils::strdup(std::string(format_bigint_user) + " ");
else
vformat[i] = utils::strdup(word + " ");
// remove trailing blank on last column's format
if (i == nfield - 1) vformat[i][strlen(vformat[i]) - 1] = '\0';
++i;
}
// setup boundary string
domain->boundary_string(boundstr);
// setup function ptrs
if (binary && domain->triclinic == 0)
header_choice = &DumpCustomMPIIO::header_binary;
else if (binary && domain->triclinic == 1)
header_choice = &DumpCustomMPIIO::header_binary_triclinic;
else if (!binary && domain->triclinic == 0)
header_choice = &DumpCustomMPIIO::header_item;
else if (!binary && domain->triclinic == 1)
header_choice = &DumpCustomMPIIO::header_item_triclinic;
if (binary)
write_choice = &DumpCustomMPIIO::write_binary;
else
write_choice = &DumpCustomMPIIO::write_string;
// find current ptr for each compute,fix,variable
// check that fix frequency is acceptable
for (i = 0; i < ncompute; i++) {
compute[i] = modify->get_compute_by_id(id_compute[i]);
if (!compute[i])
error->all(FLERR, "Could not find dump custom/mpiio compute ID {}", id_compute[i]);
}
for (i = 0; i < nfix; i++) {
fix[i] = modify->get_fix_by_id(id_fix[i]);
if (!fix[i]) error->all(FLERR, "Could not find dump custom/mpiio fix ID {}", id_fix[i]);
if (nevery % fix[i]->peratom_freq)
error->all(FLERR, "dump custom/mpiio and fix not computed at compatible times");
}
for (i = 0; i < nvariable; i++) {
int ivariable = input->variable->find(id_variable[i]);
if (ivariable < 0)
error->all(FLERR, "Could not find dump custom/mpiio variable name {}", id_variable[i]);
variable[i] = ivariable;
}
// set index and check validity of region
if (idregion && !domain->get_region_by_id(idregion))
error->all(FLERR, "Region {} for dump custom/mpiio does not exist", idregion);
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write_header(bigint ndump)
{
if (!header_choice) error->all(FLERR, "Must not use 'run pre no' after creating a new dump");
(this->*header_choice)(ndump);
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::header_binary(bigint ndump)
{
if (performEstimate) {
headerBuffer = (char *) malloc((2 * sizeof(bigint)) + (9 * sizeof(int)) + (6 * sizeof(double)));
headerSize = 0;
memcpy(headerBuffer + headerSize, &update->ntimestep, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &ndump, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &domain->triclinic, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &domain->boundary[0][0], 6 * sizeof(int));
headerSize += 6 * sizeof(int);
memcpy(headerBuffer + headerSize, &boxxlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxylo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &size_one, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &nprocs, sizeof(int));
headerSize += sizeof(int);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_BYTE, MPI_STATUS_IGNORE);
mpifo += headerSize;
free(headerBuffer);
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::header_binary_triclinic(bigint ndump)
{
if (performEstimate) {
headerBuffer = (char *) malloc((2 * sizeof(bigint)) + (9 * sizeof(int)) + (9 * sizeof(double)));
headerSize = 0;
memcpy(headerBuffer + headerSize, &update->ntimestep, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &ndump, sizeof(bigint));
headerSize += sizeof(bigint);
memcpy(headerBuffer + headerSize, &domain->triclinic, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &domain->boundary[0][0], 6 * sizeof(int));
headerSize += 6 * sizeof(int);
memcpy(headerBuffer + headerSize, &boxxlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxylo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzlo, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxzhi, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxy, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxxz, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &boxyz, sizeof(double));
headerSize += sizeof(double);
memcpy(headerBuffer + headerSize, &size_one, sizeof(int));
headerSize += sizeof(int);
memcpy(headerBuffer + headerSize, &nprocs, sizeof(int));
headerSize += sizeof(int);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_BYTE, MPI_STATUS_IGNORE);
mpifo += headerSize;
free(headerBuffer);
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::header_item(bigint ndump)
{
if (performEstimate) {
auto itemtxt = fmt::format("ITEM: TIMESTEP\n{}\n", update->ntimestep);
itemtxt += fmt::format("ITEM: NUMBER OF ATOMS\n{}\n", ndump);
itemtxt += fmt::format("ITEM: BOX BOUNDS {}\n", boundstr);
itemtxt += fmt::format("{} {}\n{} {}\n{} {}\n", boxxlo, boxxhi, boxylo, boxyhi, boxzlo, boxzhi);
itemtxt += fmt::format("ITEM: ATOMS {}\n", columns);
headerSize = itemtxt.size();
headerBuffer = utils::strdup(itemtxt);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_CHAR, MPI_STATUS_IGNORE);
mpifo += headerSize;
delete[] headerBuffer;
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::header_item_triclinic(bigint ndump)
{
if (performEstimate) {
auto itemtxt = fmt::format("ITEM: TIMESTEP\n{}\n", update->ntimestep);
itemtxt += fmt::format("ITEM: NUMBER OF ATOMS\n{}\n", ndump);
itemtxt += fmt::format("ITEM: BOX BOUNDS xy xz yz {}\n", boundstr);
itemtxt += fmt::format("{} {} {}\n{} {} {}\n{} {} {}\n", boxxlo, boxxhi, boxxy, boxylo, boxyhi,
boxxz, boxzlo, boxzhi, boxyz);
itemtxt += fmt::format("ITEM: ATOMS {}\n", columns);
headerSize = itemtxt.size();
headerBuffer = utils::strdup(itemtxt);
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh, mpifo, headerBuffer, headerSize, MPI_CHAR, MPI_STATUS_IGNORE);
mpifo += headerSize;
delete[] headerBuffer;
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write_data(int n, double *mybuf)
{
(this->*write_choice)(n, mybuf);
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write_binary(int n, double *mybuf)
{
n *= size_one;
if (performEstimate) {
bigint incPrefix = 0;
bigint bigintNme = (bigint) nme;
MPI_Scan(&bigintNme, &incPrefix, 1, MPI_LMP_BIGINT, MPI_SUM, world);
sumFileSize = (incPrefix * size_one * sizeof(double)) + (nprocs * sizeof(int));
offsetFromHeader = ((incPrefix - bigintNme) * size_one * sizeof(double)) + (me * sizeof(int));
} else {
int byteBufSize = (n * sizeof(double)) + sizeof(int);
char *bufWithSize;
memory->create(bufWithSize, byteBufSize, "dump:bufWithSize");
memcpy(bufWithSize, (char *) (&n), sizeof(int));
memcpy(&((char *) bufWithSize)[sizeof(int)], mybuf, (n * sizeof(double)));
MPI_File_write_at_all(mpifh, mpifo + offsetFromHeader, bufWithSize, byteBufSize, MPI_BYTE,
MPI_STATUS_IGNORE);
memory->destroy(bufWithSize);
if (flush_flag) MPI_File_sync(mpifh);
}
}
/* ---------------------------------------------------------------------- */
void DumpCustomMPIIO::write_string(int n, double *mybuf)
{
if (performEstimate) {
#if defined(_OPENMP)
int nthreads = omp_get_max_threads();
if ((nthreads > 1) && !(lmp->kokkos))
nsme = convert_string_omp(n, mybuf); // not (yet) compatible with Kokkos
else
nsme = convert_string(n, mybuf);
#else
nsme = convert_string(n, mybuf);
#endif
bigint incPrefix = 0;
bigint bigintNsme = (bigint) nsme;
MPI_Scan(&bigintNsme, &incPrefix, 1, MPI_LMP_BIGINT, MPI_SUM, world);
sumFileSize = (incPrefix * sizeof(char));
offsetFromHeader = ((incPrefix - bigintNsme) * sizeof(char));
} else {
MPI_File_write_at_all(mpifh, mpifo + offsetFromHeader, sbuf, nsme, MPI_CHAR, MPI_STATUS_IGNORE);
if (flush_flag) MPI_File_sync(mpifh);
}
}
#if defined(_OPENMP)
/* ----------------------------------------------------------------------
multithreaded version - convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpCustomMPIIO::convert_string_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n, mybuf);
} else {
memory->create(mpifhStringCountPerThread, nthreads, "dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads * sizeof(char *));
memory->create(bufOffset, nthreads, "dump:bufOffset");
memory->create(bufRange, nthreads, "dump:bufRange");
memory->create(bufLength, nthreads, "dump:bufLength");
int i = 0;
for (i = 0; i < (nthreads - 1); i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = (int) (floor((double) n / (double) nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i * (int) (floor((double) n / (double) nthreads)) * size_one);
bufRange[i] = n - (i * (int) (floor((double) n / (double) nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, \
mpifhStringCountPerThread, \
mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m = 0;
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(
mpifh_buffer_line_per_thread[tid],
(mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid] + DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
for (int j = 0; j < size_one; j++) {
if (vtype[j] == Dump::INT)
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),
vformat[j], static_cast<int>(mybuf[bufOffset[tid] + m]));
else if (vtype[j] == Dump::DOUBLE)
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),
vformat[j], mybuf[bufOffset[tid] + m]);
else if (vtype[j] == Dump::STRING)
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),
vformat[j], typenames[(int) mybuf[bufOffset[tid] + m]]);
m++;
}
mpifhStringCountPerThread[tid] +=
sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]), "\n");
}
}
#pragma omp barrier
mpifhStringCount = 0;
for (i = 0; i < nthreads; i++) { mpifhStringCount += mpifhStringCountPerThread[i]; }
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount + 1;
memory->grow(sbuf, maxsbuf, "dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i = 0; i < nthreads; i++) {
strcat(sbuf, mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
}
return mpifhStringCount;
}
#endif

View File

@ -1,68 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef DUMP_CLASS
// clang-format off
DumpStyle(custom/mpiio,DumpCustomMPIIO);
// clang-format on
#else
#ifndef LMP_DUMP_CUSTOM_MPIIO_H
#define LMP_DUMP_CUSTOM_MPIIO_H
#include "dump_custom.h"
namespace LAMMPS_NS {
class DumpCustomMPIIO : public DumpCustom {
public:
DumpCustomMPIIO(class LAMMPS *, int, char **);
~DumpCustomMPIIO() override;
protected:
bigint
sumFileSize; // size in bytes of the file up through this rank offset from the end of the header data
char *headerBuffer; // buffer for holding header data
MPI_File mpifh;
MPI_Offset mpifo, offsetFromHeader, headerSize, currentFileSize;
int performEstimate; // switch for write_data and write_header methods to use for gathering data and detemining filesize for preallocation vs actually writing the data
char *filecurrent; // name of file for this round (with % and * replaced)
#if defined(_OPENMP)
int convert_string_omp(int, double *); // multithreaded version of convert_string
#endif
void openfile() override;
void write_header(bigint) override;
void write() override;
void write_data(int, double *) override;
void init_style() override;
typedef void (DumpCustomMPIIO::*FnPtrHeader)(bigint);
FnPtrHeader header_choice; // ptr to write header functions
void header_binary(bigint);
void header_binary_triclinic(bigint);
void header_item(bigint);
void header_item_triclinic(bigint);
typedef void (DumpCustomMPIIO::*FnPtrData)(int, double *);
FnPtrData write_choice; // ptr to write data functions
void write_binary(int, double *);
void write_string(int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,369 +0,0 @@
// clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "dump_xyz_mpiio.h"
#include "compute.h"
#include "domain.h"
#include "error.h"
#include "memory.h"
#include "update.h"
#include <cmath>
#include <cstring>
#include "omp_compat.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
using namespace LAMMPS_NS;
#define MAX_TEXT_HEADER_SIZE 4096
#define DUMP_BUF_CHUNK_SIZE 16384
#define DUMP_BUF_INCREMENT_SIZE 4096
/* ---------------------------------------------------------------------- */
DumpXYZMPIIO::DumpXYZMPIIO(LAMMPS *lmp, int narg, char **arg) :
DumpXYZ(lmp, narg, arg) {
if (me == 0)
error->warning(FLERR,"MPI-IO output is unmaintained and unreliable. Use with caution.");
}
/* ---------------------------------------------------------------------- */
DumpXYZMPIIO::~DumpXYZMPIIO()
{
if (multifile == 0) MPI_File_close(&mpifh);
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::openfile()
{
if (singlefile_opened) { // single file already opened, so just return after resetting filesize
mpifo = currentFileSize;
MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
currentFileSize = mpifo+headerSize+sumFileSize;
return;
}
if (multifile == 0) singlefile_opened = 1;
// if one file per timestep, replace '*' with current timestep
filecurrent = filename;
if (multifile) {
filecurrent = utils::strdup(utils::star_subst(filecurrent, update->ntimestep, padflag));
if (maxfiles > 0) {
if (numfiles < maxfiles) {
nameslist[numfiles] = utils::strdup(filecurrent);
++numfiles;
} else {
remove(nameslist[fileidx]);
delete[] nameslist[fileidx];
nameslist[fileidx] = utils::strdup(filecurrent);
fileidx = (fileidx + 1) % maxfiles;
}
}
}
if (append_flag) { // append open
int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_APPEND |
MPI_MODE_WRONLY , MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) error->one(FLERR, "Cannot open dump file {}", filecurrent);
int myrank;
MPI_Comm_rank(world,&myrank);
if (myrank == 0) MPI_File_get_size(mpifh,&mpifo);
MPI_Bcast(&mpifo, 1, MPI_LMP_BIGINT, 0, world);
MPI_File_set_size(mpifh,mpifo+headerSize+sumFileSize);
currentFileSize = mpifo+headerSize+sumFileSize;
} else { // replace open
int err = MPI_File_open( world, filecurrent, MPI_MODE_CREATE | MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) error->one(FLERR, "Cannot open dump file {}",filecurrent);
mpifo = 0;
MPI_File_set_size(mpifh,(MPI_Offset) (headerSize+sumFileSize));
currentFileSize = (headerSize+sumFileSize);
}
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::write()
{
if (domain->triclinic == 0) {
boxxlo = domain->boxlo[0];
boxxhi = domain->boxhi[0];
boxylo = domain->boxlo[1];
boxyhi = domain->boxhi[1];
boxzlo = domain->boxlo[2];
boxzhi = domain->boxhi[2];
} else {
boxxlo = domain->boxlo_bound[0];
boxxhi = domain->boxhi_bound[0];
boxylo = domain->boxlo_bound[1];
boxyhi = domain->boxhi_bound[1];
boxzlo = domain->boxlo_bound[2];
boxzhi = domain->boxhi_bound[2];
boxxy = domain->xy;
boxxz = domain->xz;
boxyz = domain->yz;
}
// nme = # of dump lines this proc contributes to dump
nme = count();
// ntotal = total # of dump lines in snapshot
// nmax = max # of dump lines on any proc
bigint bnme = nme;
MPI_Allreduce(&bnme,&ntotal,1,MPI_LMP_BIGINT,MPI_SUM,world);
int nmax;
MPI_Allreduce(&nme,&nmax,1,MPI_INT,MPI_MAX,world);
// write timestep header
// for multiproc,
// nheader = # of lines in this file via Allreduce on clustercomm
bigint nheader = ntotal;
// ensure filewriter proc can receive everyone's info
// limit nmax*size_one to int since used as arg in MPI_Rsend() below
// pack my data into buf
// if sorting on IDs also request ID list from pack()
// sort buf as needed
if (nmax > maxbuf) {
if ((bigint) nmax * size_one > MAXSMALLINT)
error->all(FLERR,"Too much per-proc info for dump");
maxbuf = nmax;
memory->destroy(buf);
memory->create(buf,(maxbuf*size_one),"dump:buf");
}
if (sort_flag && sortcol == 0 && nmax > maxids) {
maxids = nmax;
memory->destroy(ids);
memory->create(ids,maxids,"dump:ids");
}
if (sort_flag && sortcol == 0) pack(ids);
else pack(nullptr);
if (sort_flag) sort();
// determine how much data needs to be written for setting the file size and prepocess it prior to writing
performEstimate = 1;
write_header(nheader);
write_data(nme,buf);
MPI_Bcast(&sumFileSize, 1, MPI_LMP_BIGINT, (nprocs-1), world);
openfile();
performEstimate = 0;
write_header(nheader); // actually write the header - mpifo now points to end of header info
// now actually write the data
performEstimate = 0;
write_data(nme,buf);
if (multifile) MPI_File_close(&mpifh);
if (multifile) delete[] filecurrent;
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::init_style()
{
// format = copy of default or user-specified line format
delete[] format;
char *str;
if (format_line_user) str = format_line_user;
else str = format_default;
int n = strlen(str) + 2;
format = new char[n];
strcpy(format,str);
strcat(format,"\n");
// initialize typenames array to be backward compatible by default
// a 32-bit int can be maximally 10 digits plus sign
if (typenames == nullptr) {
typenames = new char*[ntypes+1];
for (int itype = 1; itype <= ntypes; itype++)
typenames[itype] = utils::strdup(std::to_string(itype));
}
// setup function ptr
write_choice = &DumpXYZMPIIO::write_string;
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::write_header(bigint n)
{
auto header = fmt::format("{}\n Atoms. Timestep: {}", n, update->ntimestep);
if (time_flag) header += fmt::format(" Time: {:.6f}", compute_time());
header += "\n";
if (performEstimate) {
headerSize = header.size();
} else { // write data
if (me == 0)
MPI_File_write_at(mpifh,mpifo,(void *)header.c_str(),header.size(),MPI_CHAR,MPI_STATUS_IGNORE);
mpifo += header.size();
}
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::write_data(int n, double *mybuf)
{
(this->*write_choice)(n,mybuf);
}
/* ---------------------------------------------------------------------- */
void DumpXYZMPIIO::write_string(int n, double *mybuf)
{
if (performEstimate) {
#if defined(_OPENMP)
int nthreads = omp_get_max_threads();
if (nthreads > 1)
nsme = convert_string_omp(n,mybuf);
else
nsme = convert_string(n,mybuf);
#else
nsme = convert_string(n,mybuf);
#endif
bigint incPrefix = 0;
bigint bigintNsme = (bigint) nsme;
MPI_Scan(&bigintNsme,&incPrefix,1,MPI_LMP_BIGINT,MPI_SUM,world);
sumFileSize = (incPrefix*sizeof(char));
offsetFromHeader = ((incPrefix-bigintNsme)*sizeof(char));
}
else { // write data
MPI_File_write_at_all(mpifh,mpifo+offsetFromHeader,sbuf,nsme,MPI_CHAR,MPI_STATUS_IGNORE);
if (flush_flag)
MPI_File_sync(mpifh);
}
}
#if defined(_OPENMP)
/* ----------------------------------------------------------------------
multithreaded version - convert mybuf of doubles to one big formatted string in sbuf
return -1 if strlen exceeds an int, since used as arg in MPI calls in Dump
------------------------------------------------------------------------- */
int DumpXYZMPIIO::convert_string_omp(int n, double *mybuf)
{
char **mpifh_buffer_line_per_thread;
int mpifhStringCount;
int *mpifhStringCountPerThread, *bufOffset, *bufRange, *bufLength;
mpifhStringCount = 0;
int nthreads = omp_get_max_threads();
if (nthreads > n) { // call serial version
convert_string(n,mybuf);
}
else {
memory->create(mpifhStringCountPerThread,nthreads,"dump:mpifhStringCountPerThread");
mpifh_buffer_line_per_thread = (char **) malloc(nthreads*sizeof(char*));
memory->create(bufOffset,nthreads,"dump:bufOffset");
memory->create(bufRange,nthreads,"dump:bufRange");
memory->create(bufLength,nthreads,"dump:bufLength");
int i=0;
for (i=0;i<(nthreads-1);i++) {
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
bufRange[i] = (int)(floor((double)n/(double)nthreads));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
}
mpifhStringCountPerThread[i] = 0;
bufOffset[i] = (int) (i*(int)(floor((double)n/(double)nthreads))*size_one);
bufRange[i] = n-(i*(int)(floor((double)n/(double)nthreads)));
bufLength[i] = DUMP_BUF_CHUNK_SIZE;
mpifh_buffer_line_per_thread[i] = (char *) malloc(DUMP_BUF_CHUNK_SIZE * sizeof(char));
mpifh_buffer_line_per_thread[i][0] = '\0';
#pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(bufOffset, bufRange, bufLength, mpifhStringCountPerThread, mpifh_buffer_line_per_thread, mybuf)
{
int tid = omp_get_thread_num();
int m=0;
for (int i = 0; i < bufRange[tid]; i++) {
if ((bufLength[tid] - mpifhStringCountPerThread[tid]) < DUMP_BUF_INCREMENT_SIZE) {
mpifh_buffer_line_per_thread[tid] = (char *) realloc(mpifh_buffer_line_per_thread[tid],(mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char));
bufLength[tid] = (mpifhStringCountPerThread[tid]+DUMP_BUF_CHUNK_SIZE) * sizeof(char);
}
mpifhStringCountPerThread[tid] += sprintf(&(mpifh_buffer_line_per_thread[tid][mpifhStringCountPerThread[tid]]),format,typenames[static_cast<int> (mybuf[bufOffset[tid]+m+1])],mybuf[bufOffset[tid]+m+2],mybuf[bufOffset[tid]+m+3],mybuf[bufOffset[tid]+m+4]);
m += size_one;
}
}
#pragma omp barrier
mpifhStringCount = 0;
for (i=0;i<nthreads;i++) {
mpifhStringCount += mpifhStringCountPerThread[i];
}
memory->destroy(bufOffset);
memory->destroy(bufRange);
memory->destroy(bufLength);
if (mpifhStringCount > 0) {
if (mpifhStringCount > maxsbuf) {
if (mpifhStringCount > MAXSMALLINT) return -1;
maxsbuf = mpifhStringCount+1;
memory->grow(sbuf,maxsbuf,"dump:sbuf");
}
sbuf[0] = '\0';
}
for (int i=0;i<nthreads;i++) {
strcat(sbuf,mpifh_buffer_line_per_thread[i]);
free(mpifh_buffer_line_per_thread[i]);
}
memory->destroy(mpifhStringCountPerThread);
free(mpifh_buffer_line_per_thread);
}
return mpifhStringCount;
}
#endif

View File

@ -1,61 +0,0 @@
/* -*- c++ -*- ---------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifdef DUMP_CLASS
// clang-format off
DumpStyle(xyz/mpiio,DumpXYZMPIIO);
// clang-format on
#else
#ifndef LMP_DUMP_XYZ_MPIIO_H
#define LMP_DUMP_XYZ_MPIIO_H
#include "dump_xyz.h"
namespace LAMMPS_NS {
class DumpXYZMPIIO : public DumpXYZ {
public:
DumpXYZMPIIO(class LAMMPS *, int, char **);
~DumpXYZMPIIO() override;
protected:
bigint
sumFileSize; // size in bytes of the file up through this rank offset from the end of the header data
char *headerBuffer; // buffer for holding header data
MPI_File mpifh;
MPI_Offset mpifo, offsetFromHeader, headerSize, currentFileSize;
int performEstimate; // switch for write_data and write_header methods to use for gathering data and detemining filesize for preallocation vs actually writing the data
char *filecurrent; // name of file for this round (with % and * replaced)
#if defined(_OPENMP)
int convert_string_omp(int, double *); // multithreaded version of convert_string
#endif
void openfile() override;
void write_header(bigint) override;
void write() override;
void write_data(int, double *) override;
void init_style() override;
typedef void (DumpXYZMPIIO::*FnPtrData)(int, double *);
FnPtrData write_choice; // ptr to write data functions
void write_string(int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -1,199 +0,0 @@
// clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Paul Coffman (IBM)
------------------------------------------------------------------------- */
#include "restart_mpiio.h"
#include "error.h"
using namespace LAMMPS_NS;
// the (rather old) version of ROMIO in MPICH for Windows
// uses "char *" instead of "const char *". This works around it.
#if defined(_WIN32)
#define ROMIO_COMPAT_CAST (char *)
#else
#define ROMIO_COMPAT_CAST
#endif
/* ---------------------------------------------------------------------- */
RestartMPIIO::RestartMPIIO(LAMMPS *lmp) : Pointers(lmp)
{
mpiio_exists = 1;
MPI_Comm_size(world,&nprocs);
MPI_Comm_rank(world,&myrank);
}
/* ----------------------------------------------------------------------
calls MPI_File_open in read-only mode, read_restart should call this
for some file servers it is most efficient to only read or only write
------------------------------------------------------------------------- */
void RestartMPIIO::openForRead(const char *filename)
{
int err = MPI_File_open(world, ROMIO_COMPAT_CAST filename, MPI_MODE_RDONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot open restart file for reading - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
}
/* ----------------------------------------------------------------------
calls MPI_File_open in write-only mode, write_restart should call this
for some file servers it is most efficient to only read or only write
------------------------------------------------------------------------- */
void RestartMPIIO::openForWrite(const char *filename)
{
int err = MPI_File_open(world, ROMIO_COMPAT_CAST filename, MPI_MODE_WRONLY,
MPI_INFO_NULL, &mpifh);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot open restart file for writing - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
}
/* ----------------------------------------------------------------------
determine the absolute offset for the data to be written with
MPI_Scan of the send sizes
compute the file size based off the MPI_Scan send size value on the last rank
set the filesize with ftruncate via MPI_File_set_size
write the data via collective MPI-IO by calling MPI_File_write_at_all
------------------------------------------------------------------------- */
void RestartMPIIO::write(MPI_Offset headerOffset, int send_size, double *buf)
{
bigint incPrefix = 0;
bigint bigintSendSize = (bigint) send_size;
MPI_Scan(&bigintSendSize,&incPrefix,1,MPI_LMP_BIGINT,MPI_SUM,world);
bigint largestIncPrefix = incPrefix;
MPI_Bcast(&largestIncPrefix, 1, MPI_LMP_BIGINT, (nprocs-1), world);
int err = MPI_File_set_size(mpifh,
(headerOffset+(largestIncPrefix*sizeof(double))));
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot set restart file size - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
err = MPI_File_write_at_all(mpifh,headerOffset +
((incPrefix-bigintSendSize)*sizeof(double)),
buf,send_size,MPI_DOUBLE,MPI_STATUS_IGNORE);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot write to restart file - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
}
/* ----------------------------------------------------------------------
read the data into buf via collective MPI-IO by calling MPI_File_read_at_all
with the chunkOffset and chunkSize provided
if the consolidated chunksize is greater than INT_MAX
can only happen in extreme situation of reading restart file on
much fewer ranks than written and with relatively large data sizes
follow the collective IO call with rank independent IO to read remaining data
------------------------------------------------------------------------- */
void RestartMPIIO::read(MPI_Offset chunkOffset, bigint chunkSize, double *buf)
{
int intChunkSize;
bigint remainingSize = 0;
if (chunkSize > INT_MAX) {
intChunkSize = INT_MAX;
remainingSize = chunkSize - INT_MAX;
}
else intChunkSize = (int) chunkSize;
int err = MPI_File_read_at_all(mpifh,chunkOffset,buf,intChunkSize,
MPI_DOUBLE,MPI_STATUS_IGNORE);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot read from restart file - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
MPI_Offset currentOffset = chunkOffset+intChunkSize;
MPI_Offset bufOffset = intChunkSize;
while (remainingSize > 0) {
int currentChunkSize;
if (remainingSize > INT_MAX) {
currentChunkSize = INT_MAX;
remainingSize -= INT_MAX;
}
else {
currentChunkSize = remainingSize;
remainingSize = 0;
}
int err = MPI_File_read_at(mpifh,currentOffset,&buf[bufOffset],
currentChunkSize,MPI_DOUBLE,MPI_STATUS_IGNORE);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot read from restart file - MPI error: %s",
mpiErrorString);
error->one(FLERR,str);
}
currentOffset += currentChunkSize;
bufOffset += currentChunkSize;
}
}
/* ----------------------------------------------------------------------
calls MPI_File_close
------------------------------------------------------------------------- */
void RestartMPIIO::close()
{
int err = MPI_File_close(&mpifh);
if (err != MPI_SUCCESS) {
char str[MPI_MAX_ERROR_STRING+128];
char mpiErrorString[MPI_MAX_ERROR_STRING];
int mpiErrorStringLength;
MPI_Error_string(err, mpiErrorString, &mpiErrorStringLength);
sprintf(str,"Cannot close restart file - MPI error: %s",mpiErrorString);
error->one(FLERR,str);
}
}

View File

@ -1,40 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef LMP_RESTART_MPIIO_H
#define LMP_RESTART_MPIIO_H
#include "pointers.h"
namespace LAMMPS_NS {
class RestartMPIIO : protected Pointers {
private:
MPI_File mpifh;
int nprocs, myrank;
public:
int mpiio_exists;
RestartMPIIO(class LAMMPS *);
void openForRead(const char *);
void openForWrite(const char *);
void write(MPI_Offset, int, double *);
void read(MPI_Offset, bigint, double *);
void close();
};
} // namespace LAMMPS_NS
#endif

View File

@ -44,7 +44,7 @@ endif
# PACKLIB = all packages that require an additional lib
# should be PACKSYS + PACKINT + PACKEXT
# PACKSYS = subset that reqiure a common system library
# include MPIIO and LB b/c require full MPI, not just STUBS
# include LATBOLTZ b/c it requires a full MPI, not just STUBS
# PACKINT = subset that require an internal (provided) library
# PACKEXT = subset that require an external (downloaded) library
@ -106,7 +106,6 @@ PACKAGE = \
mofff \
molecule \
molfile \
mpiio \
netcdf \
openmp \
opt \
@ -212,7 +211,6 @@ PACKLIB = \
kim \
kokkos \
lepton \
mpiio \
poems \
python \
voronoi \
@ -237,7 +235,7 @@ PACKLIB = \
vtk \
electrode
PACKSYS = compress latboltz mpiio python
PACKSYS = compress latboltz python
PACKINT = atc awpmd colvars electrode gpu kokkos lepton ml-pod poems

View File

@ -52,6 +52,17 @@ lmpgitversion.h
mliap_model_python_couple.cpp
mliap_model_python_couple.h
# removed in August 2023
dump_atom_mpiio.cpp
dump_atom_mpiio.h
dump_cfg_mpiio.cpp
dump_cfg_mpiio.h
dump_custom_mpiio.cpp
dump_custom_mpiio.h
dump_xyz_mpiio.cpp
dump_xyz_mpiio.h
restart_mpiio.cpp
restart_mpiio.h
# removed on 3 August 2023
fix_mscg.cpp
fix_mscg.h
# removed on 29 March 2023

View File

@ -120,8 +120,6 @@ Dump::Dump(LAMMPS *lmp, int /*narg*/, char **arg) :
char *ptr;
if ((ptr = strchr(filename,'%'))) {
if (strstr(style,"mpiio"))
error->all(FLERR,"Dump file MPI-IO output not allowed with % in filename");
multiproc = 1;
nclusterprocs = 1;
filewriter = 1;

View File

@ -1,51 +0,0 @@
/* -*- c++ -*- ----------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
LAMMPS development team: developers@lammps.org
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#ifndef LMP_MPIIO_H
#define LMP_MPIIO_H
// true interface to MPIIO package
// used when MPIIO package is installed
#ifdef LMP_MPIIO
#if defined(MPI_STUBS)
#error "The MPIIO package cannot be compiled in serial with MPI STUBS"
#endif
#include "restart_mpiio.h" // IWYU pragma: export
#else
// dummy interface to MPIIO package
// needed for compiling when MPIIO package is not installed
namespace LAMMPS_NS {
class RestartMPIIO {
public:
int mpiio_exists;
RestartMPIIO(class LAMMPS *) { mpiio_exists = 0; }
~RestartMPIIO() {}
void openForRead(const char *) {}
void openForWrite(const char *) {}
void write(MPI_Offset, int, double *) {}
void read(MPI_Offset, long, double *) {}
void close() {}
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -994,22 +994,12 @@ void Output::create_restart(int narg, char **arg)
error->all(FLERR,"Both restart files must use % or neither");
}
int mpiioflag;
if (utils::strmatch(arg[1],"\\.mpiio$")) mpiioflag = 1;
else mpiioflag = 0;
if (nfile == 2) {
if (mpiioflag && !utils::strmatch(arg[2],"\\.mpiio$"))
error->all(FLERR,"Both restart files must use MPI-IO or neither");
if (!mpiioflag && utils::strmatch(arg[2],"\\.mpiio$"))
error->all(FLERR,"Both restart files must use MPI-IO or neither");
}
// setup output style and process optional args
delete restart;
restart = new WriteRestart(lmp);
int iarg = nfile+1;
restart->multiproc_options(multiproc,mpiioflag,narg-iarg,&arg[iarg]);
restart->multiproc_options(multiproc,narg-iarg,&arg[iarg]);
}
/* ----------------------------------------------------------------------

View File

@ -30,7 +30,6 @@
#include "label_map.h"
#include "memory.h"
#include "modify.h"
#include "mpiio.h"
#include "pair.h"
#include "special.h"
#include "update.h"
@ -43,7 +42,7 @@ using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
ReadRestart::ReadRestart(LAMMPS *lmp) : Command(lmp), mpiio(nullptr) {}
ReadRestart::ReadRestart(LAMMPS *lmp) : Command(lmp) {}
/* ---------------------------------------------------------------------- */
@ -88,17 +87,8 @@ void ReadRestart::command(int narg, char **arg)
if (strchr(arg[0],'%')) multiproc = 1;
else multiproc = 0;
if (strstr(arg[0],".mpiio")) mpiioflag = 1;
else mpiioflag = 0;
if (multiproc && mpiioflag)
error->all(FLERR,"Read restart MPI-IO input not allowed with % in filename");
if (mpiioflag) {
mpiio = new RestartMPIIO(lmp);
if (!mpiio->mpiio_exists)
error->all(FLERR,"Reading from MPI-IO filename when MPIIO package is not installed");
}
if (utils::strmatch(arg[0],"\\.mpiio"))
error->all(FLERR,"MPI-IO files are no longer supported by LAMMPS");
// open single restart file or base file for multiproc case
@ -182,28 +172,6 @@ void ReadRestart::command(int narg, char **arg)
double *buf = nullptr;
int m,flag;
// MPI-IO input from single file
if (mpiioflag) {
mpiio->openForRead(file);
memory->create(buf,assignedChunkSize,"read_restart:buf");
mpiio->read((headerOffset+assignedChunkOffset),assignedChunkSize,buf);
mpiio->close();
// can calculate number of atoms from assignedChunkSize
if (!nextra) {
atom->nlocal = 1; // temporarily claim there is one atom...
int perAtomSize = avec->size_restart(); // ...so we can get its size
atom->nlocal = 0; // restore nlocal to zero atoms
int atomCt = (int) (assignedChunkSize / perAtomSize);
if (atomCt > atom->nmax) avec->grow(atomCt);
}
m = 0;
while (m < assignedChunkSize) m += avec->unpack_restart(&buf[m]);
}
// input of single native file
// nprocs_file = # of chunks in file
// proc 0 reads a chunk and bcasts it to other procs
@ -211,7 +179,7 @@ void ReadRestart::command(int narg, char **arg)
// if remapflag set, remap the atom to box before checking sub-domain
// check for atom in sub-domain differs for orthogonal vs triclinic box
else if (multiproc == 0) {
if (multiproc == 0) {
int triclinic = domain->triclinic;
imageint *iptr;
@ -410,7 +378,7 @@ void ReadRestart::command(int narg, char **arg)
// for multiproc or MPI-IO files:
// perform irregular comm to migrate atoms to correct procs
if (multiproc || mpiioflag) {
if (multiproc) {
// if remapflag set, remap all atoms I read back to box before migrating
@ -419,8 +387,7 @@ void ReadRestart::command(int narg, char **arg)
imageint *image = atom->image;
int nlocal = atom->nlocal;
for (int i = 0; i < nlocal; i++)
domain->remap(x[i],image[i]);
for (int i = 0; i < nlocal; i++) domain->remap(x[i],image[i]);
}
// create a temporary fix to hold and migrate extra atom info
@ -528,8 +495,6 @@ void ReadRestart::command(int narg, char **arg)
if (comm->me == 0)
utils::logmesg(lmp," read_restart CPU = {:.3f} seconds\n",platform::walltime()-time1);
delete mpiio;
}
/* ----------------------------------------------------------------------
@ -989,120 +954,9 @@ void ReadRestart::file_layout()
error->all(FLERR,"Restart file is not a multi-proc file");
if (multiproc && multiproc_file == 0)
error->all(FLERR,"Restart file is a multi-proc file");
} else if (flag == MPIIO) {
int mpiioflag_file = read_int();
if (mpiioflag == 0 && mpiioflag_file)
error->all(FLERR,"Restart file is a MPI-IO file");
if (mpiioflag && mpiioflag_file == 0)
error->all(FLERR,"Restart file is not a MPI-IO file");
if (mpiioflag) {
bigint *nproc_chunk_offsets;
memory->create(nproc_chunk_offsets,nprocs,
"write_restart:nproc_chunk_offsets");
bigint *nproc_chunk_sizes;
memory->create(nproc_chunk_sizes,nprocs,
"write_restart:nproc_chunk_sizes");
// on rank 0 read in the chunk sizes that were written out
// then consolidate them and compute offsets relative to the
// end of the header info to fit the current partition size
// if the number of ranks that did the writing is different
if (me == 0) {
int ndx;
int *all_written_send_sizes;
memory->create(all_written_send_sizes,nprocs_file,
"write_restart:all_written_send_sizes");
int *nproc_chunk_number;
memory->create(nproc_chunk_number,nprocs,
"write_restart:nproc_chunk_number");
utils::sfread(FLERR,all_written_send_sizes,sizeof(int),nprocs_file,fp,nullptr,error);
if ((nprocs != nprocs_file) && !(atom->nextra_store)) {
// nprocs differ, but atom sizes are fixed length, yeah!
atom->nlocal = 1; // temporarily claim there is one atom...
int perAtomSize = atom->avec->size_restart(); // ...so we can get its size
atom->nlocal = 0; // restore nlocal to zero atoms
bigint total_size = 0;
for (int i = 0; i < nprocs_file; ++i) {
total_size += all_written_send_sizes[i];
}
bigint total_ct = total_size / perAtomSize;
bigint base_ct = total_ct / nprocs;
bigint leftover_ct = total_ct - (base_ct * nprocs);
bigint current_ByteOffset = 0;
base_ct += 1;
bigint base_ByteOffset = base_ct * (perAtomSize * sizeof(double));
for (ndx = 0; ndx < leftover_ct; ++ndx) {
nproc_chunk_offsets[ndx] = current_ByteOffset;
nproc_chunk_sizes[ndx] = base_ct * perAtomSize;
current_ByteOffset += base_ByteOffset;
}
base_ct -= 1;
base_ByteOffset -= (perAtomSize * sizeof(double));
for (; ndx < nprocs; ++ndx) {
nproc_chunk_offsets[ndx] = current_ByteOffset;
nproc_chunk_sizes[ndx] = base_ct * perAtomSize;
current_ByteOffset += base_ByteOffset;
}
} else { // we have to read in based on how it was written
int init_chunk_number = nprocs_file/nprocs;
int num_extra_chunks = nprocs_file - (nprocs*init_chunk_number);
for (int i = 0; i < nprocs; i++) {
if (i < num_extra_chunks)
nproc_chunk_number[i] = init_chunk_number+1;
else
nproc_chunk_number[i] = init_chunk_number;
}
int all_written_send_sizes_index = 0;
bigint current_offset = 0;
for (int i=0;i<nprocs;i++) {
nproc_chunk_offsets[i] = current_offset;
nproc_chunk_sizes[i] = 0;
for (int j=0;j<nproc_chunk_number[i];j++) {
nproc_chunk_sizes[i] +=
all_written_send_sizes[all_written_send_sizes_index];
current_offset +=
(all_written_send_sizes[all_written_send_sizes_index] *
sizeof(double));
all_written_send_sizes_index++;
}
}
}
memory->destroy(all_written_send_sizes);
memory->destroy(nproc_chunk_number);
}
// scatter chunk sizes and offsets to all procs
MPI_Scatter(nproc_chunk_sizes, 1, MPI_LMP_BIGINT,
&assignedChunkSize , 1, MPI_LMP_BIGINT, 0,world);
MPI_Scatter(nproc_chunk_offsets, 1, MPI_LMP_BIGINT,
&assignedChunkOffset , 1, MPI_LMP_BIGINT, 0,world);
memory->destroy(nproc_chunk_sizes);
memory->destroy(nproc_chunk_offsets);
}
}
flag = read_int();
}
// if MPI-IO file, broadcast the end of the header offset
// this allows all ranks to compute offset to their data
if (mpiioflag) {
if (me == 0) headerOffset = platform::ftell(fp);
MPI_Bcast(&headerOffset,1,MPI_LMP_BIGINT,0,world);
}
}
// ----------------------------------------------------------------------

View File

@ -39,13 +39,6 @@ class ReadRestart : public Command {
int nprocs_file; // total # of procs that wrote restart file
int revision; // revision number of the restart file format
// MPI-IO values
int mpiioflag; // 1 for MPIIO output, else 0
class RestartMPIIO *mpiio; // MPIIO for restart file input
bigint assignedChunkSize;
MPI_Offset assignedChunkOffset, headerOffset;
std::string file_search(const std::string &);
void header();
void type_arrays();

View File

@ -29,7 +29,6 @@
#include "label_map.h"
#include "memory.h"
#include "modify.h"
#include "mpiio.h"
#include "neighbor.h"
#include "output.h"
#include "pair.h"
@ -74,16 +73,13 @@ void WriteRestart::command(int narg, char **arg)
if (strchr(arg[0],'%')) multiproc = nprocs;
else multiproc = 0;
if (utils::strmatch(arg[0],"\\.mpiio$")) mpiioflag = 1;
else mpiioflag = 0;
if ((comm->me == 0) && mpiioflag)
error->warning(FLERR,"MPI-IO output is unmaintained and unreliable. Use with caution.");
if (utils::strmatch(arg[0],"\\.mpiio$"))
error->all(FLERR,"MPI-IO files are no longer supported by LAMMPS");
// setup output style and process optional args
// also called by Output class for periodic restart files
multiproc_options(multiproc,mpiioflag,narg-1,&arg[1]);
multiproc_options(multiproc,narg-1,&arg[1]);
// init entire system since comm->exchange is done
// comm::init needs neighbor::init needs pair::init needs kspace::init, etc
@ -119,21 +115,9 @@ void WriteRestart::command(int narg, char **arg)
/* ---------------------------------------------------------------------- */
void WriteRestart::multiproc_options(int multiproc_caller, int mpiioflag_caller, int narg, char **arg)
void WriteRestart::multiproc_options(int multiproc_caller, int narg, char **arg)
{
multiproc = multiproc_caller;
mpiioflag = mpiioflag_caller;
// error checks
if (multiproc && mpiioflag)
error->all(FLERR,"Restart file MPI-IO output not allowed with % in filename");
if (mpiioflag) {
mpiio = new RestartMPIIO(lmp);
if (!mpiio->mpiio_exists)
error->all(FLERR,"Writing to MPI-IO filename when MPIIO package is not installed");
}
// defaults for multiproc file writing
@ -354,49 +338,34 @@ void WriteRestart::write(const std::string &file)
}
}
// MPI-IO output to single file
// output of one or more native files
// filewriter = 1 = this proc writes to file
// ping each proc in my cluster, receive its data, write data to file
// else wait for ping from fileproc, send my data to fileproc
if (mpiioflag) {
if (me == 0 && fp) {
magic_string();
if (ferror(fp)) io_error = 1;
fclose(fp);
fp = nullptr;
int tmp,recv_size;
if (filewriter) {
MPI_Status status;
MPI_Request request;
for (int iproc = 0; iproc < nclusterprocs; iproc++) {
if (iproc) {
MPI_Irecv(buf,max_size,MPI_DOUBLE,me+iproc,0,world,&request);
MPI_Send(&tmp,0,MPI_INT,me+iproc,0,world);
MPI_Wait(&request,&status);
MPI_Get_count(&status,MPI_DOUBLE,&recv_size);
} else recv_size = send_size;
write_double_vec(PERPROC,recv_size,buf);
}
mpiio->openForWrite(file.c_str());
mpiio->write(headerOffset,send_size,buf);
mpiio->close();
magic_string();
if (ferror(fp)) io_error = 1;
fclose(fp);
fp = nullptr;
} else {
// output of one or more native files
// filewriter = 1 = this proc writes to file
// ping each proc in my cluster, receive its data, write data to file
// else wait for ping from fileproc, send my data to fileproc
int tmp,recv_size;
if (filewriter) {
MPI_Status status;
MPI_Request request;
for (int iproc = 0; iproc < nclusterprocs; iproc++) {
if (iproc) {
MPI_Irecv(buf,max_size,MPI_DOUBLE,me+iproc,0,world,&request);
MPI_Send(&tmp,0,MPI_INT,me+iproc,0,world);
MPI_Wait(&request,&status);
MPI_Get_count(&status,MPI_DOUBLE,&recv_size);
} else recv_size = send_size;
write_double_vec(PERPROC,recv_size,buf);
}
magic_string();
if (ferror(fp)) io_error = 1;
fclose(fp);
fp = nullptr;
} else {
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,MPI_STATUS_IGNORE);
MPI_Rsend(buf,send_size,MPI_DOUBLE,fileproc,0,world);
}
MPI_Recv(&tmp,0,MPI_INT,fileproc,0,world,MPI_STATUS_IGNORE);
MPI_Rsend(buf,send_size,MPI_DOUBLE,fileproc,0,world);
}
// check for I/O error status
@ -578,18 +547,7 @@ void WriteRestart::force_fields()
void WriteRestart::file_layout(int send_size)
{
if (me == 0) {
write_int(MULTIPROC,multiproc);
write_int(MPIIO,mpiioflag);
}
if (mpiioflag) {
int *all_send_sizes;
memory->create(all_send_sizes,nprocs,"write_restart:all_send_sizes");
MPI_Gather(&send_size, 1, MPI_INT, all_send_sizes, 1, MPI_INT, 0,world);
if (me == 0) fwrite(all_send_sizes,sizeof(int),nprocs,fp);
memory->destroy(all_send_sizes);
}
if (me == 0) write_int(MULTIPROC,multiproc);
// -1 flag signals end of file layout info
@ -597,14 +555,6 @@ void WriteRestart::file_layout(int send_size)
int flag = -1;
fwrite(&flag,sizeof(int),1,fp);
}
// if MPI-IO file, broadcast the end of the header offste
// this allows all ranks to compute offset to their data
if (mpiioflag) {
if (me == 0) headerOffset = platform::ftell(fp);
MPI_Bcast(&headerOffset,1,MPI_LMP_BIGINT,0,world);
}
}
// ----------------------------------------------------------------------

View File

@ -28,7 +28,7 @@ class WriteRestart : public Command {
public:
WriteRestart(class LAMMPS *);
void command(int, char **) override;
void multiproc_options(int, int, int, char **);
void multiproc_options(int, int, char **);
void write(const std::string &);
private:
@ -44,12 +44,6 @@ class WriteRestart : public Command {
int fileproc; // ID of proc in my cluster who writes to file
int icluster; // which cluster I am in
// MPI-IO values
int mpiioflag; // 1 for MPIIO output, else 0
class RestartMPIIO *mpiio; // MPIIO for restart file output
MPI_Offset headerOffset;
void header();
void type_arrays();
void force_fields();
@ -66,8 +60,6 @@ class WriteRestart : public Command {
void write_int_vec(int, int, int *);
void write_double_vec(int, int, double *);
};
} // namespace LAMMPS_NS
#endif
#endif

View File

@ -324,7 +324,6 @@ TEST_F(FileOperationsTest, write_restart)
command("write_restart multi-%.restart");
command("write_restart multi2-%.restart fileper 2");
command("write_restart multi3-%.restart nfile 1");
// if (Info::has_package("MPIIO")) command("write_restart test.restart.mpiio");
END_HIDE_OUTPUT();
ASSERT_FILE_EXISTS("noinit.restart");
@ -336,19 +335,6 @@ TEST_F(FileOperationsTest, write_restart)
ASSERT_FILE_EXISTS("multi2-0.restart");
ASSERT_FILE_EXISTS("multi3-base.restart");
ASSERT_FILE_EXISTS("multi3-0.restart");
#if 0
if (Info::has_package("MPIIO")) {
ASSERT_FILE_EXISTS("test.restart.mpiio");
}
if (!Info::has_package("MPIIO")) {
TEST_FAILURE(".*ERROR: Writing to MPI-IO filename when MPIIO package is not inst.*",
command("write_restart test.restart.mpiio"););
} else {
TEST_FAILURE(".*ERROR: Restart file MPI-IO output not allowed with % in filename.*",
command("write_restart test.restart-%.mpiio"););
}
#endif
TEST_FAILURE(".*ERROR: Illegal write_restart command.*", command("write_restart"););
TEST_FAILURE(".*ERROR: Unknown write_restart keyword: xxxx.*",
command("write_restart test.restart xxxx"););
@ -402,7 +388,6 @@ TEST_F(FileOperationsTest, write_restart)
delete_file("multi3-base.restart");
delete_file("multi3-0.restart");
delete_file("triclinic.restart");
//if (Info::has_package("MPIIO")) delete_file("test.restart.mpiio");
}
TEST_F(FileOperationsTest, write_data)