make more reliable with explicit loops using exported nfield value
This commit is contained in:
@ -62,18 +62,21 @@ void DumpYAML::write_header(bigint ndump)
|
||||
Thermo *th = output->thermo;
|
||||
// output thermo data only on timesteps where it was computed
|
||||
if (update->ntimestep == th->get_timestep()) {
|
||||
int nfield = th->get_nfield();
|
||||
const auto &keywords = th->get_keywords();
|
||||
const auto &fields = th->get_fields();
|
||||
|
||||
thermo_data += "thermo:\n - keywords: [ ";
|
||||
for (const auto &key : th->get_keywords()) thermo_data += fmt::format("{}, ", key);
|
||||
for (int i = 0; i < nfield; ++i) thermo_data += fmt::format("{}, ", keywords[i]);
|
||||
thermo_data += "]\n - data: [ ";
|
||||
|
||||
for (const auto &val : th->get_fields()) {
|
||||
if (val.type == multitype::DOUBLE)
|
||||
thermo_data += fmt::format("{}, ", val.data.d);
|
||||
else if (val.type == multitype::INT)
|
||||
thermo_data += fmt::format("{}, ", val.data.i);
|
||||
else if (val.type == multitype::BIGINT)
|
||||
thermo_data += fmt::format("{}, ", val.data.b);
|
||||
for (int i = 0; i < nfield; ++i) {
|
||||
if (fields[i].type == multitype::DOUBLE)
|
||||
thermo_data += fmt::format("{}, ", fields[i].data.d);
|
||||
else if (fields[i].type == multitype::INT)
|
||||
thermo_data += fmt::format("{}, ", fields[i].data.i);
|
||||
else if (fields[i].type == multitype::BIGINT)
|
||||
thermo_data += fmt::format("{}, ", fields[i].data.b);
|
||||
else
|
||||
thermo_data += ", ";
|
||||
}
|
||||
|
||||
@ -195,6 +195,7 @@ DumpNetCDF::DumpNetCDF(LAMMPS *lmp, int narg, char **arg) :
|
||||
type_nc_real = NC_FLOAT;
|
||||
|
||||
thermo = false;
|
||||
thermo_warn = true;
|
||||
thermovar = nullptr;
|
||||
|
||||
framei = 0;
|
||||
@ -223,7 +224,7 @@ void DumpNetCDF::openfile()
|
||||
|
||||
if (thermo && !singlefile_opened) {
|
||||
delete[] thermovar;
|
||||
thermovar = new int[output->thermo->get_keywords().size()];
|
||||
thermovar = new int[output->thermo->get_nfield()];
|
||||
}
|
||||
|
||||
// now the computes and fixes have been initialized, so we can query
|
||||
@ -320,8 +321,10 @@ void DumpNetCDF::openfile()
|
||||
|
||||
// perframe variables
|
||||
if (thermo) {
|
||||
const auto &keywords = output->thermo->get_keywords();
|
||||
int nfield = keywords.size();
|
||||
Thermo *th = output->thermo;
|
||||
const auto &keywords = th->get_keywords();
|
||||
const int nfield = th->get_nfield();
|
||||
|
||||
for (int i = 0; i < nfield; i++) {
|
||||
NCERRX( nc_inq_varid(ncid, keywords[i].c_str(), &thermovar[i]), keywords[i].c_str() );
|
||||
}
|
||||
@ -433,9 +436,11 @@ void DumpNetCDF::openfile()
|
||||
|
||||
// perframe variables
|
||||
if (thermo) {
|
||||
const auto &fields = output->thermo->get_fields();
|
||||
const auto &keywords = output->thermo->get_keywords();
|
||||
int nfield = fields.size();
|
||||
Thermo *th = output->thermo;
|
||||
const auto &fields = th->get_fields();
|
||||
const auto &keywords = th->get_keywords();
|
||||
const int nfield = th->get_nfield();
|
||||
|
||||
for (int i = 0; i < nfield; i++) {
|
||||
if (fields[i].type == multitype::DOUBLE) {
|
||||
NCERRX( nc_def_var(ncid, keywords[i].c_str(), type_nc_real, 1, dims, &thermovar[i]), keywords[i].c_str() );
|
||||
@ -600,9 +605,23 @@ void DumpNetCDF::write()
|
||||
start[1] = 0;
|
||||
|
||||
if (thermo) {
|
||||
const auto &keywords = output->thermo->get_keywords();
|
||||
const auto &fields = output->thermo->get_fields();
|
||||
int nfield = fields.size();
|
||||
Thermo *th = output->thermo;
|
||||
|
||||
// will output current thermo data only on timesteps where it was computed.
|
||||
// warn (once) about using cached copy from old timestep.
|
||||
|
||||
if (thermo_warn && (update->ntimestep != th->get_timestep())) {
|
||||
thermo_warn = false;
|
||||
if (comm->me == 0) {
|
||||
error->warning(FLERR, "Dump {} output on incompatible timestep with thermo output: {} vs {} \n"
|
||||
" Dump netcdf always stores thermo data from last thermo output",
|
||||
id, th->get_timestep(), update->ntimestep);
|
||||
}
|
||||
}
|
||||
|
||||
const auto &keywords = th->get_keywords();
|
||||
const auto &fields = th->get_fields();
|
||||
int nfield = th->get_nfield();
|
||||
for (int i = 0; i < nfield; i++) {
|
||||
if (filewriter) {
|
||||
if (fields[i].type == multitype::DOUBLE) {
|
||||
|
||||
@ -65,6 +65,7 @@ class DumpNetCDF : public DumpCustom {
|
||||
|
||||
int type_nc_real; // netcdf type to use for real variables: float or double
|
||||
bool thermo; // write thermo output to netcdf file
|
||||
bool thermo_warn; // warn (once) that thermo output is on incompatible step
|
||||
|
||||
bigint n_buffer; // size of buffer
|
||||
bigint *int_buffer; // buffer for passing data to netcdf
|
||||
|
||||
@ -192,6 +192,7 @@ DumpNetCDFMPIIO::DumpNetCDFMPIIO(LAMMPS *lmp, int narg, char **arg) :
|
||||
type_nc_real = NC_FLOAT;
|
||||
|
||||
thermo = false;
|
||||
thermo_warn = true;
|
||||
thermovar = nullptr;
|
||||
|
||||
framei = 0;
|
||||
@ -220,7 +221,7 @@ void DumpNetCDFMPIIO::openfile()
|
||||
|
||||
if (thermo && !singlefile_opened) {
|
||||
delete[] thermovar;
|
||||
thermovar = new int[output->thermo->get_keywords().size()];
|
||||
thermovar = new int[output->thermo->get_nfield()];
|
||||
}
|
||||
|
||||
// now the computes and fixes have been initialized, so we can query
|
||||
@ -318,8 +319,10 @@ void DumpNetCDFMPIIO::openfile()
|
||||
|
||||
// perframe variables
|
||||
if (thermo) {
|
||||
const auto &keywords = output->thermo->get_keywords();
|
||||
int nfield = keywords.size();
|
||||
Thermo *th = output->thermo;
|
||||
const auto &keywords = th->get_keywords();
|
||||
const int nfield = th->get_nfield();
|
||||
|
||||
for (int i = 0; i < nfield; i++) {
|
||||
NCERRX( ncmpi_inq_varid(ncid, keywords[i].c_str(), &thermovar[i]), keywords[i].c_str() );
|
||||
}
|
||||
@ -423,9 +426,11 @@ void DumpNetCDFMPIIO::openfile()
|
||||
|
||||
// perframe variables
|
||||
if (thermo) {
|
||||
const auto &fields = output->thermo->get_fields();
|
||||
const auto &keywords = output->thermo->get_keywords();
|
||||
int nfield = fields.size();
|
||||
Thermo *th = output->thermo;
|
||||
const auto &fields = th->get_fields();
|
||||
const auto &keywords = th->get_keywords();
|
||||
const int nfield = th->get_nfield();
|
||||
|
||||
for (int i = 0; i < nfield; i++) {
|
||||
if (fields[i].type == multitype::DOUBLE) {
|
||||
NCERRX( ncmpi_def_var(ncid, keywords[i].c_str(), type_nc_real, 1, dims, &thermovar[i]), keywords[i].c_str() );
|
||||
@ -593,9 +598,23 @@ void DumpNetCDFMPIIO::write()
|
||||
NCERR( ncmpi_begin_indep_data(ncid) );
|
||||
|
||||
if (thermo) {
|
||||
const auto &keywords = output->thermo->get_keywords();
|
||||
const auto &fields = output->thermo->get_fields();
|
||||
int nfield = fields.size();
|
||||
Thermo *th = output->thermo;
|
||||
|
||||
// will output current thermo data only on timesteps where it was computed.
|
||||
// warn (once) about using cached copy from old timestep.
|
||||
|
||||
if (thermo_warn && (update->ntimestep != th->get_timestep())) {
|
||||
thermo_warn = false;
|
||||
if (comm->me == 0) {
|
||||
error->warning(FLERR, "Dump {} output on incompatible timestep with thermo output: {} vs {} \n"
|
||||
" Dump netcdf/mpiio always stores thermo data from last thermo output",
|
||||
id, th->get_timestep(), update->ntimestep);
|
||||
}
|
||||
}
|
||||
|
||||
const auto &keywords = th->get_keywords();
|
||||
const auto &fields = th->get_fields();
|
||||
int nfield = th->get_nfield();
|
||||
for (int i = 0; i < nfield; i++) {
|
||||
if (filewriter) {
|
||||
if (fields[i].type == multitype::DOUBLE) {
|
||||
|
||||
@ -62,6 +62,7 @@ class DumpNetCDFMPIIO : public DumpCustom {
|
||||
|
||||
int type_nc_real; // netcdf type to use for real variables: float or double
|
||||
bool thermo; // write thermo output to netcdf file
|
||||
bool thermo_warn; // warn (once) that thermo output is on incompatible step
|
||||
|
||||
bigint n_buffer; // size of buffer
|
||||
bigint *int_buffer; // buffer for passing data to netcdf
|
||||
|
||||
Reference in New Issue
Block a user