diff --git a/doc/src/compute_reduce.rst b/doc/src/compute_reduce.rst index 1793830e0a..c2d85753db 100644 --- a/doc/src/compute_reduce.rst +++ b/doc/src/compute_reduce.rst @@ -23,7 +23,7 @@ Syntax *reduce/region* arg = region-ID region-ID = ID of region to use for choosing atoms -* mode = *sum* or *min* or *max* or *ave* or *sumsq* or *avesq* +* mode = *sum* or *min* or *max* or *ave* or *sumsq* or *avesq* or *sumabs* or *aveabs* * one or more inputs can be listed * input = x, y, z, vx, vy, vz, fx, fy, fz, c_ID, c_ID[N], f_ID, f_ID[N], v_name @@ -77,7 +77,10 @@ option sums the square of the values in the vector into a global total. The *avesq* setting does the same as *sumsq*, then divides the sum of squares by the number of values. The last two options can be useful for calculating the variance of some quantity, e.g. variance = -sumsq - ave\^2. +sumsq - ave\^2. The *sumabs* option sums the absolute values in the +vector into a global total. The *aveabs* setting does the same as +*sumabs*, then divides the sum of absolute values by the number of +values. Each listed input is operated on independently. For per-atom inputs, the group specified with this command means only atoms within the @@ -189,7 +192,7 @@ value. If multiple inputs are specified, this compute produces a global vector of values, the length of which is equal to the number of inputs specified. -As discussed below, for the *sum* and *sumsq* modes, the value(s) +As discussed below, for the *sum*, *sumabs* and *sumsq* modes, the value(s) produced by this compute are all "extensive", meaning their value scales linearly with the number of atoms involved. If normalized values are desired, this compute can be accessed by the :doc:`thermo_style custom ` command with :doc:`thermo_modify norm yes ` set as an option. Or it can be accessed by a @@ -208,7 +211,7 @@ compute as input. See the :doc:`Howto output ` doc page for an overview of LAMMPS output options. All the scalar or vector values calculated by this compute are -"intensive", except when the *sum* or *sumsq* modes are used on +"intensive", except when the *sum*, *sumabs* or *sumsq* modes are used on per-atom or local vectors, in which case the calculated values are "extensive". diff --git a/src/compute_reduce.cpp b/src/compute_reduce.cpp index 018f658c61..c8426673ab 100644 --- a/src/compute_reduce.cpp +++ b/src/compute_reduce.cpp @@ -54,6 +54,8 @@ ComputeReduce::ComputeReduce(LAMMPS *lmp, int narg, char **arg) : mode = SUM; else if (strcmp(arg[iarg], "sumsq") == 0) mode = SUMSQ; + else if (strcmp(arg[iarg], "sumabs") == 0) + mode = SUMABS; else if (strcmp(arg[iarg], "min") == 0) mode = MINN; else if (strcmp(arg[iarg], "max") == 0) @@ -62,6 +64,8 @@ ComputeReduce::ComputeReduce(LAMMPS *lmp, int narg, char **arg) : mode = AVE; else if (strcmp(arg[iarg], "avesq") == 0) mode = AVESQ; + else if (strcmp(arg[iarg], "aveabs") == 0) + mode = AVEABS; else error->all(FLERR, "Illegal compute {} operation {}", style, arg[iarg]); iarg++; @@ -253,7 +257,7 @@ ComputeReduce::ComputeReduce(LAMMPS *lmp, int narg, char **arg) : if (nvalues == 1) { scalar_flag = 1; - if (mode == SUM || mode == SUMSQ) + if (mode == SUM || mode == SUMSQ || mode == SUMABS) extscalar = 1; else extscalar = 0; @@ -262,7 +266,7 @@ ComputeReduce::ComputeReduce(LAMMPS *lmp, int narg, char **arg) : } else { vector_flag = 1; size_vector = nvalues; - if (mode == SUM || mode == SUMSQ) + if (mode == SUM || mode == SUMSQ || mode == SUMABS) extvector = 1; else extvector = 0; @@ -339,13 +343,13 @@ double ComputeReduce::compute_scalar() double one = compute_one(0, -1); - if (mode == SUM || mode == SUMSQ) { + if (mode == SUM || mode == SUMSQ || mode == SUMABS) { MPI_Allreduce(&one, &scalar, 1, MPI_DOUBLE, MPI_SUM, world); } else if (mode == MINN) { MPI_Allreduce(&one, &scalar, 1, MPI_DOUBLE, MPI_MIN, world); } else if (mode == MAXX) { MPI_Allreduce(&one, &scalar, 1, MPI_DOUBLE, MPI_MAX, world); - } else if (mode == AVE || mode == AVESQ) { + } else if (mode == AVE || mode == AVESQ || mode == AVEABS) { MPI_Allreduce(&one, &scalar, 1, MPI_DOUBLE, MPI_SUM, world); bigint n = count(0); if (n) scalar /= n; @@ -366,7 +370,7 @@ void ComputeReduce::compute_vector() indices[m] = index; } - if (mode == SUM || mode == SUMSQ) { + if (mode == SUM || mode == SUMSQ || mode == AVEABS) { for (int m = 0; m < nvalues; m++) MPI_Allreduce(&onevec[m], &vector[m], 1, MPI_DOUBLE, MPI_SUM, world); @@ -412,7 +416,7 @@ void ComputeReduce::compute_vector() } } - } else if (mode == AVE || mode == AVESQ) { + } else if (mode == AVE || mode == AVESQ || mode == AVEABS) { for (int m = 0; m < nvalues; m++) { MPI_Allreduce(&onevec[m], &vector[m], 1, MPI_DOUBLE, MPI_SUM, world); bigint n = count(m); @@ -646,6 +650,8 @@ void ComputeReduce::combine(double &one, double two, int i) one += two; else if (mode == SUMSQ || mode == AVESQ) one += two * two; + else if (mode == SUMABS || mode == AVEABS) + one += std::fabs(two); else if (mode == MINN) { if (two < one) { one = two; diff --git a/src/compute_reduce.h b/src/compute_reduce.h index a7590ccd52..dc4ee1ef2c 100644 --- a/src/compute_reduce.h +++ b/src/compute_reduce.h @@ -26,7 +26,7 @@ namespace LAMMPS_NS { class ComputeReduce : public Compute { public: - enum { SUM, SUMSQ, MINN, MAXX, AVE, AVESQ }; + enum { SUM, SUMSQ, SUMABS, MINN, MAXX, AVE, AVESQ, AVEABS }; enum { PERATOM, LOCAL }; ComputeReduce(class LAMMPS *, int, char **);