Compare commits

...

132 Commits

Author SHA1 Message Date
3027ac9250 patch 16Dec16 2016-12-16 10:30:57 -07:00
fc54ab5cea Merge pull request #301 from akohlmey/corrections-and-bugfixes
Collected corrections and bugfixes
2016-12-16 10:25:29 -07:00
e364b80724 added length keyword to python command 2016-12-16 10:24:25 -07:00
830c9e8661 Merge branch 'USER-DPD_internal_energy' of https://github.com/timattox/lammps_USER-DPD into corrections-and-bugfixes
This closes #303
2016-12-16 11:22:25 -05:00
4907b29ad2 Merge branch 'USER-DPD_bugfixes' of https://github.com/timattox/lammps_USER-DPD into corrections-and-bugfixes
This closes #302
2016-12-16 11:21:15 -05:00
eff7238ff2 USER-DPD: fix_eos*: partition all internal energy into the uMech term only
This makes our results more closely match a vetted serial implementation.
NOTE: This does make the output different from any previous versions.
Patch by Jim Larentzos.  Applied by Tim Mattox.
2016-12-16 10:25:12 -05:00
126fb22e93 USER-DPD: Fix #define typo in pair_multi_lucy.h and pair_multi_lucy_rx.h 2016-12-16 10:08:30 -05:00
0a90492c44 USER-DPD: Update the header files to properly document all error statements
Patch by Jim Larentzos.  Applied by Tim Mattox.
2016-12-15 17:39:15 -05:00
fed629c23e USER-DPD: Bugfix for fix_rx and fix_eos_table_rx to handle restart files.
Patch by Jim Larentzos.  Applied by Tim Mattox.
2016-12-15 17:10:13 -05:00
925481c3f4 USER-DPD: Fix hard-wall force interaction bug, and ensure fraction is >= 0
pair_exp6_rx.cpp patch by Jim Larentzos.  Applied by Tim Mattox.
2016-12-15 16:46:25 -05:00
da2ad5b6e0 update FixIntel code for new neighbor list code 2016-12-14 15:51:12 -05:00
bfcab72268 restore change to make -DLAMMPS_MEMALIGN=64 default when USER-INTEL package is installed (which requires it) 2016-12-14 15:24:55 -05:00
f509f133af patch 13Dec16: neighbor refactor, Stan pppm/disp bug fix, M Brown INTEL package updates 2016-12-13 17:14:28 -07:00
624c57e9da Merge pull request #185 from akohlmey/new-neighbor
New neighbor list code with updates for USER-OMP and USER-DPD
2016-12-13 16:24:41 -07:00
f3b355bcbe Merge pull request #298 from akohlmey/collected-small-fixes
Collected small fixes
2016-12-13 16:23:23 -07:00
ae5764beac added functionity to lib interface 2016-12-13 16:22:17 -07:00
fda43c00fd add deleted file in package to purge list 2016-12-12 13:22:54 -05:00
b34be30be6 Merge pull request #53 from stanmoore1/new-neighbor
New neighbor Kokkos
2016-12-12 13:18:03 -05:00
13b6196b82 Fixing Kokkos compile error 2016-12-12 10:47:39 -07:00
baf55c90f4 Whitespace change 2016-12-12 09:25:41 -07:00
770f5d0bf7 Whitespace change 2016-12-12 09:24:37 -07:00
a31b00965a Updating to master 2016-12-12 09:18:20 -07:00
a5e46e3e6a Merging 2016-12-09 16:20:42 -07:00
31be0da590 Merging pull request 2016-12-09 16:17:35 -07:00
0f3b2544a1 Merge pull request #1 from timattox/new-neighbor
USER-DPD workaround for neighbor list issues
2016-12-09 16:08:31 -07:00
586514e05c Merge branch 'new-neighbor' into new-neighbor 2016-12-09 16:08:08 -07:00
43c459ba56 More changes for Kokkos neighbor 2016-12-09 15:56:55 -07:00
b5c3d2f66c Merge pull request #52 from timattox/new-neighbor
USER-DPD workaround for neighbor list issues
2016-12-09 17:51:35 -05:00
5187cb97e5 USER-DPD: Make fix_shardlow request its own SSA-specific neighbor list,
instead of having pair_dpd_fdt* make the SSA-neighbor list request for it.
Forces an "extra" list to be built, but now skip lists work properly.
Maybe we can detect if skip lists won't be used, and squash the extra list.
2016-12-09 15:42:27 -06:00
eff503e56c Prevent neighbor list copies between SSA and non-SSA neighbor list requests. 2016-12-09 15:39:46 -06:00
cdcebab3bd make the output of the %CPU/OpenMP threads line consistent with compiling in OpenMP support, not having USER-OMP installed 2016-12-09 14:43:56 -05:00
ddf678da51 make fix gcmc command overview consistent
this closes #296
2016-12-09 14:30:27 -05:00
435421301b Small tweaks to Kokkos neighbor 2016-12-09 08:37:01 -07:00
9b48c49f83 Removing used Kokkos file 2016-12-08 09:18:55 -07:00
d3d5ac17bf Fixed small typos in doc 2016-12-07 19:37:51 -08:00
8318c67816 Kokkos neighbor refactor 2016-12-07 13:00:27 -07:00
7c61dbf5e2 Merge branch 'new-neighbor' of github.com:akohlmey/lammps into new-neighbor 2016-12-07 13:43:04 -05:00
39a12b15d7 Merge branch 'master' into new-neighbor
Resolved Conflicts:
	src/Purge.list
	src/neigh_derive.cpp
2016-12-07 13:40:14 -05:00
fb3f597f41 30Nov16 patch 2016-11-30 14:04:41 -07:00
d14814ae2e Merge pull request #289 from akohlmey/collected-updates-and-bugfixes
Collected updates and bugfixes
2016-11-30 14:02:41 -07:00
beb5a30f67 new compute global/atom command, also bug fix for descending dump sorts 2016-11-30 14:01:27 -07:00
7ddb6670c0 fix typo 2016-11-30 00:12:35 -05:00
789e62388f simplify code 2016-11-29 09:03:53 -05:00
7d098bff90 update format
(cherry picked from commit 2597185afb)
2016-11-29 09:01:36 -05:00
1d970d3cdf dihedral_nharmonic: added writing coefficient by write_data
(cherry picked from commit 618f5c6aa5)
2016-11-29 09:01:36 -05:00
42d430168b fix typo in compute cluster/atom docs
this closes #292
2016-11-29 07:24:09 -05:00
5ff5bc2a6c avoid issues detected by coverity scan 2016-11-28 21:34:35 -05:00
02ae2d218a correct broken link to USER-SMD docs PDF 2016-11-28 11:34:22 -05:00
470908fc93 explicitly disallow dynamic groups with compute rdf 2016-11-24 05:46:15 -05:00
6759630c16 bug fix for dump sort in descending order 2016-11-23 17:08:36 -07:00
87781771ba fix typo and USER-OMP support omission 2016-11-23 09:02:32 -05:00
df46b9aa38 rename compute pressure/grem to compute PRESSURE/GREM 2016-11-22 15:25:59 -05:00
647c6f00ce Merge branch 'grem-feature' of https://github.com/dstelter92/lammps into collected-updates-and-bugfixes 2016-11-22 14:51:19 -05:00
237307eda2 small typo and changes 2016-11-22 12:16:00 -05:00
d58dd4f159 bugfix when parsing mpirun 2016-11-22 12:13:20 -05:00
ae70f1090f added readme for grem examples 2016-11-22 12:05:14 -05:00
59d100ab57 final prep for 22Nov patch 2016-11-22 09:23:02 -07:00
61e71d23ed Merge pull request #288 from akohlmey/moltemplate-1.40
update bundled version of moltemplate to v1.40
2016-11-22 08:51:11 -07:00
b6f2f0e6e9 Merge pull request #287 from rbberger/pylammps/docs
Created PyLammps documentation
2016-11-22 08:50:29 -07:00
ff0441ac16 Merge pull request #286 from akohlmey/small-fixes-and-updates
Collected small fixes and updates
2016-11-22 08:49:46 -07:00
41907d3110 Merge pull request #285 from akohlmey/fix-ipi-update
update for fix ipi from michele ceriotti
2016-11-22 08:48:27 -07:00
b95f255af4 small changes to temper/grem commands 2016-11-22 08:47:44 -07:00
d7b542101a Merge pull request #283 from akohlmey/grem-feature
gREM generalized replica exchange feature for USER-MISC
2016-11-22 08:15:35 -07:00
0ffa50f8e8 tweaked author syntax 2016-11-22 08:15:13 -07:00
7893215964 small comment/whitespace tweak 2016-11-21 12:46:43 -05:00
3dff9f2018 removed extra file 2016-11-21 12:05:30 -05:00
dab232c542 modified temper_grem name to fit conventions, re-ran example to match 2016-11-21 12:02:17 -05:00
9e9d9d5aa5 update bundled version of moltemplate to v1.40 2016-11-21 11:34:42 -05:00
c982b174a2 Merge pull request #49 from epfl-cosmo/fix-ipi
i-PI interface fix
2016-11-19 19:36:13 -05:00
87a5a35bad A tiny bugfix for the reset flag, and a brief explanation of the changes 2016-11-20 00:44:23 +01:00
fd174ce2b1 Merge branch 'fix-ipi-update' of https://github.com/akohlmey/lammps into fix-ipi 2016-11-20 00:04:56 +01:00
b11f376a4f Merge branch 'master' of github.com:lammps/lammps 2016-11-19 23:25:51 +01:00
230b29eae6 correct accelerator flags for dpd styles in pair style overview 2016-11-19 11:47:12 -05:00
2383c31f15 Created PyLammps documentation
Based on material presented during MD Workshop at Temple University in
August 2016.
2016-11-18 23:58:57 -07:00
e175a18bdb be more thorough in initializing optional data in pair style dpd/fdt/energy 2016-11-18 16:18:47 -05:00
a5bde82e37 update .gitignore for recent addition 2016-11-18 15:38:11 -05:00
d787afcca9 also remove generated html files with 'make clean' in docs folder 2016-11-18 15:37:49 -05:00
176cde8ed3 minor cleanups 2016-11-18 15:36:38 -05:00
2862c20815 Merge branch 'master' into grem-feature 2016-11-18 14:51:46 -05:00
78e018829f Merge branch 'grem-feature' of https://github.com/dstelter92/lammps into grem-feature 2016-11-18 14:48:47 -05:00
c78914e7b3 update for fix ipi from michele ceriotti 2016-11-18 09:21:50 -05:00
635f3ce128 synchronize USER-SMD examples with code 2016-11-18 08:09:24 -05:00
81f68e06fd Merge branch 'master' into doc-updates 2016-11-17 20:44:07 -05:00
2a026c9ad8 revised temper_grem example, better file management 2016-11-17 12:53:25 -05:00
4a3091f844 modified temper_grem example with more exchanges 2016-11-17 11:24:29 -05:00
747c95c525 revised documentation, added temper_grem ref to fix_grem 2016-11-17 11:00:49 -05:00
5c64934bc8 added documention, re-ran temper_grem example 2016-11-17 10:40:10 -05:00
4e62e58d29 Merge pull request #47 from dstelter92/grem-feature
added internal tempering in grem with example
2016-11-17 10:04:43 -05:00
5ac2d9532e Re-run example with debug off 2016-11-17 09:43:44 -05:00
19ac9d2959 turned off dev mode by default in temper_grem 2016-11-17 09:31:07 -05:00
9f313aac75 shorter example 2016-11-16 20:43:41 -05:00
0102c5dadc file cleanup 2016-11-16 20:38:53 -05:00
07e46b797a added internal tempering in grem with example 2016-11-16 20:27:14 -05:00
b45d1e37ef integrate fix grem docs and update to match current conventions 2016-11-16 16:46:00 -05:00
2e7fd513d4 provide fix grem example input for nvt and npt 2016-11-16 16:42:01 -05:00
82364d10e3 Merge branch 'grem-feature' of https://github.com/dstelter92/lammps into grem-feature
Resolved merge conflicts and adapted logic to most recent changes in feature branch

Closes #46
2016-11-16 16:11:53 -05:00
16c8a307e5 removed leftover tex files 2016-11-16 15:39:02 -05:00
94f14ab051 spell check, minor typos 2016-11-16 15:34:32 -05:00
683f514fac simplify multi-replica run by passing per-replica parameters as variables on the command line 2016-11-16 14:22:20 -05:00
f617993944 need to apply fix_modify already in fix grem constructor 2016-11-16 13:52:27 -05:00
4641c9e568 Added basic documentation for grem fix 2016-11-16 13:36:13 -05:00
705f66aaee remove superfluous code 2016-11-16 13:24:41 -05:00
e57ae1ce3f compute scaled kinetic energy tensor without destroying the original data 2016-11-16 12:45:13 -05:00
950442b8b1 added check for nvt vs npt, enabled nvt simulation with fix_grem 2016-11-15 21:53:28 -05:00
1c68e42ecc fix_modify is not longer needed 2016-11-14 13:43:28 -05:00
5f94b31806 add multi-replica example for gREM 2016-11-14 10:12:48 -05:00
fdf5d68f9f allow to extract properties in NH integrator only when they are active 2016-11-14 09:27:33 -05:00
0c25f3b1d6 whitespace cleanup 2016-11-13 23:20:09 -05:00
14c7cf4197 retrieve target temperature and pressure from fix npt. add sanity checks. 2016-11-13 23:18:59 -05:00
26870f223d add example for gREM 2016-11-13 23:18:14 -05:00
09544d0698 bugfix for compute pressure/grem: must make a copy of argument strings 2016-11-13 19:19:52 -05:00
b5130a3b35 avoid NaN for variance from average output 2016-11-13 18:46:55 -05:00
20daf82463 initial import of adapted gREM code by David Stelter and Edyta Malolepsza
The following changes were made:
- the modifications to compute pressure were transferred to a derived class compute pressure/grem
- fix scaleforce was renamed to fix grem
- identifying the grem fix was simplified as fix grem passes an additional argument to compute pressure/grem
- dead code was removed in both files
- checking of arguments was tightened
2016-11-13 18:44:10 -05:00
114926a488 Merge branch 'master' into new-neighbor 2016-10-02 00:26:56 -04:00
5eb9dd0c5d Merge branch 'master' into new-neighbor 2016-09-29 23:14:28 -04:00
ebabc8f0bc Merge remote-tracking branch 'lammps-rw/integration' into update-neighbor 2016-09-09 15:46:20 -04:00
232abf8534 restore locale and enforce grep option squashing 2016-09-09 15:42:14 -04:00
d22caf2658 Merge pull request #29 from timattox/new-neighbor
USER-DPD: remove several files from src that came from src/USER-DPD/
2016-09-09 15:28:56 -04:00
3842aa6095 forward skip lists /omp neighbor list builds to non-omp implementations 2016-09-09 15:23:40 -04:00
32c240978a USER-DPD: remove several files from src that came from src/USER-DPD/
These were accidentally added to git in c9c2ae6.
2016-09-09 15:17:42 -04:00
212c2617f6 delete a couple more files, that don't belong into src/ 2016-09-09 14:56:23 -04:00
40f85c93ba corrected mismatched free() vs. delete[] 2016-09-09 14:10:42 -04:00
2f02d98469 remove USER-DPD files that should not be where they are 2016-09-09 13:53:07 -04:00
4553881fc2 Merge pull request #28 from timattox/new-neighbor
New neighbor, USER-DPD updates
2016-09-09 13:11:55 -04:00
81fcbcd99c USER-DPD: move nstencil_ssa out of core LAMMPS into USER-DPD 2016-09-09 12:19:54 -04:00
82c6eb4675 USER-DPD: Set missing NP_HALF flag in npair_half_bin_newton_ssa.h 2016-09-09 12:19:36 -04:00
8ed3f4226e USER-DPD: move custom binning stuff to a NBinSSA child class.
Removes most SSA specific fields from class NeighList.
2016-09-09 12:19:06 -04:00
9b7a0d7e1c Update gitignore for the new USER-DPD source files. 2016-09-09 12:18:51 -04:00
c9c2ae6c61 new neighbor list changes 2016-09-07 13:42:58 -06:00
0229556b03 Merge branch 'master' of github.com:lammps/lammps 2015-07-03 15:43:29 +02:00
357d4517e8 Merge branch 'master' of github.com:lammps/lammps 2015-04-08 10:46:50 +02:00
a4a97de84f A few GLE fixes 2015-04-08 10:45:49 +02:00
806 changed files with 58854 additions and 17874 deletions

View File

@ -43,7 +43,7 @@ clean-all:
rm -rf $(BUILDDIR)/* utils/txt2html/txt2html.exe
clean:
rm -rf $(RSTDIR)
rm -rf $(RSTDIR) html
html: $(OBJECTS)
@(\

BIN
doc/src/Eqs/fix_grem.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.1 KiB

9
doc/src/Eqs/fix_grem.tex Normal file
View File

@ -0,0 +1,9 @@
\documentclass[12pt]{article}
\begin{document}
$$
T_{eff} = \lambda + \eta (H - H_0)
$$
\end{document}

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

View File

@ -1,7 +1,7 @@
<!-- HTML_ONLY -->
<HEAD>
<TITLE>LAMMPS Users Manual</TITLE>
<META NAME="docnumber" CONTENT="17 Nov 2016 version">
<META NAME="docnumber" CONTENT="16 Dec 2016 version">
<META NAME="author" CONTENT="http://lammps.sandia.gov - Sandia National Laboratories">
<META NAME="copyright" CONTENT="Copyright (2003) Sandia Corporation. This software and manual is distributed under the GNU General Public License.">
</HEAD>
@ -21,7 +21,7 @@
<H1></H1>
LAMMPS Documentation :c,h3
17 Nov 2016 version :c,h4
16 Dec 2016 version :c,h4
Version info: :h4

View File

@ -531,7 +531,8 @@ package"_Section_start.html#start_3.
"dump nc"_dump_nc.html,
"dump nc/mpiio"_dump_nc.html,
"group2ndx"_group2ndx.html,
"ndx2group"_group2ndx.html :tb(c=3,ea=c)
"ndx2group"_group2ndx.html,
"temper/grem"_temper_grem.html :tb(c=3,ea=c)
:line
@ -632,10 +633,10 @@ USER-INTEL, k = KOKKOS, o = USER-OMP, t = OPT.
"rigid/nve (o)"_fix_rigid.html,
"rigid/nvt (o)"_fix_rigid.html,
"rigid/small (o)"_fix_rigid.html,
"rigid/small/nph"_fix_rigid.html,
"rigid/small/npt"_fix_rigid.html,
"rigid/small/nve"_fix_rigid.html,
"rigid/small/nvt"_fix_rigid.html,
"rigid/small/nph (o)"_fix_rigid.html,
"rigid/small/npt (o)"_fix_rigid.html,
"rigid/small/nve (o)"_fix_rigid.html,
"rigid/small/nvt (o)"_fix_rigid.html,
"setforce (k)"_fix_setforce.html,
"shake"_fix_shake.html,
"spring"_fix_spring.html,
@ -687,6 +688,7 @@ package"_Section_start.html#start_3.
"eos/table/rx"_fix_eos_table_rx.html,
"flow/gauss"_fix_flow_gauss.html,
"gle"_fix_gle.html,
"grem"_fix_grem.html,
"imd"_fix_imd.html,
"ipi"_fix_ipi.html,
"langevin/drude"_fix_langevin_drude.html,
@ -765,6 +767,7 @@ KOKKOS, o = USER-OMP, t = OPT.
"erotate/sphere"_compute_erotate_sphere.html,
"erotate/sphere/atom"_compute_erotate_sphere_atom.html,
"event/displace"_compute_event_displace.html,
"global/atom"_compute_global_atom.html,
"group/group"_compute_group_group.html,
"gyration"_compute_gyration.html,
"gyration/chunk"_compute_gyration_chunk.html,
@ -911,8 +914,8 @@ KOKKOS, o = USER-OMP, t = OPT.
"coul/msm"_pair_coul.html,
"coul/streitz"_pair_coul.html,
"coul/wolf (ko)"_pair_coul.html,
"dpd (o)"_pair_dpd.html,
"dpd/tstat (o)"_pair_dpd.html,
"dpd (go)"_pair_dpd.html,
"dpd/tstat (go)"_pair_dpd.html,
"dsmc"_pair_dsmc.html,
"eam (gkot)"_pair_eam.html,
"eam/alloy (gkot)"_pair_eam.html,

View File

@ -1936,18 +1936,22 @@ documentation in the src/library.cpp file for details, including
which quantities can be queried by name:
void *lammps_extract_global(void *, char *)
void lammps_extract_box(void *, double *, double *,
double *, double *, double *, int *, int *)
void *lammps_extract_atom(void *, char *)
void *lammps_extract_compute(void *, char *, int, int)
void *lammps_extract_fix(void *, char *, int, int, int, int)
void *lammps_extract_variable(void *, char *, char *) :pre
int lammps_set_variable(void *, char *, char *)
double lammps_get_thermo(void *, char *) :pre
void lammps_reset_box(void *, double *, double *, double, double, double)
int lammps_set_variable(void *, char *, char *) :pre
double lammps_get_thermo(void *, char *)
int lammps_get_natoms(void *)
void lammps_gather_atoms(void *, double *)
void lammps_scatter_atoms(void *, double *) :pre
void lammps_create_atoms(void *, int, tagint *, int *, double *, double *) :pre
void lammps_create_atoms(void *, int, tagint *, int *, double *, double *,
imageint *, int) :pre
The extract functions return a pointer to various global or per-atom
quantities stored in LAMMPS or to values calculated by a compute, fix,
@ -1957,10 +1961,16 @@ the other extract functions, the underlying storage may be reallocated
as LAMMPS runs, so you need to re-call the function to assure a
current pointer or returned value(s).
The lammps_reset_box() function resets the size and shape of the
simulation box, e.g. as part of restoring a previously extracted and
saved state of a simulation.
The lammps_set_variable() function can set an existing string-style
variable to a new string value, so that subsequent LAMMPS commands can
access the variable. The lammps_get_thermo() function returns the
current value of a thermo keyword as a double.
access the variable.
The lammps_get_thermo() function returns the current value of a thermo
keyword as a double precision value.
The lammps_get_natoms() function returns the total number of atoms in
the system and can be used by the caller to allocate space for the
@ -1973,10 +1983,13 @@ passed by the caller, to each atom owned by individual processors.
The lammps_create_atoms() function takes a list of N atoms as input
with atom types and coords (required), an optionally atom IDs and
velocities. It uses the coords of each atom to assign it as a new
atom to the processor that owns it. Additional properties for the new
atoms can be assigned via the lammps_scatter_atoms() or
lammps_extract_atom() functions.
velocities and image flags. It uses the coords of each atom to assign
it as a new atom to the processor that owns it. This function is
useful to add atoms to a simulation or (in tandem with
lammps_reset_box()) to restore a previously extracted and saved state
of a simulation. Additional properties for the new atoms can then be
assigned via the lammps_scatter_atoms() or lammps_extract_atom()
functions.
The examples/COUPLE and python directories have example C++ and C and
Python codes which show how a driver code can link to LAMMPS as a

View File

@ -8,19 +8,26 @@
11. Python interface to LAMMPS :h3
LAMMPS can work together with Python in two ways. First, Python can
LAMMPS can work together with Python in three ways. First, Python can
wrap LAMMPS through the "LAMMPS library
interface"_Section_howto.html#howto_19, so that a Python script can
create one or more instances of LAMMPS and launch one or more
simulations. In Python lingo, this is "extending" Python with LAMMPS.
Second, LAMMPS can use the Python interpreter, so that a LAMMPS input
Second, the low-level Python interface can be used indirectly through the
PyLammps and IPyLammps wrapper classes in Python. These wrappers try to
simplify the usage of LAMMPS in Python by providing an object-based interface
to common LAMMPS functionality. It also reduces the amount of code necessary to
parameterize LAMMPS scripts through Python and makes variables and computes
directly accessible. See "PyLammps interface"_#py_9 for more details.
Third, LAMMPS can use the Python interpreter, so that a LAMMPS input
script can invoke Python code, and pass information back-and-forth
between the input script and Python functions you write. The Python
code can also callback to LAMMPS to query or change its attributes.
In Python lingo, this is "embedding" Python in LAMMPS.
This section describes how to do both.
This section describes how to use these three approaches.
11.1 "Overview of running LAMMPS from Python"_#py_1
11.2 "Overview of using Python from a LAMMPS script"_#py_2
@ -29,7 +36,8 @@ This section describes how to do both.
11.5 "Extending Python with MPI to run in parallel"_#py_5
11.6 "Testing the Python-LAMMPS interface"_#py_6
11.7 "Using LAMMPS from Python"_#py_7
11.8 "Example Python scripts that use LAMMPS"_#py_8 :ul
11.8 "Example Python scripts that use LAMMPS"_#py_8
11.9 "PyLammps interface"_#py_9 :ul
If you are not familiar with it, "Python"_http://www.python.org is a
powerful scripting and programming language which can essentially do
@ -824,3 +832,7 @@ different visualization package options. Click to see larger images:
:image(JPG/screenshot_atomeye_small.jpg,JPG/screenshot_atomeye.jpg)
:image(JPG/screenshot_pymol_small.jpg,JPG/screenshot_pymol.jpg)
:image(JPG/screenshot_vmd_small.jpg,JPG/screenshot_vmd.jpg)
11.9 PyLammps interface :link(py_9),h4
Please see the "PyLammps Tutorial"_tutorial_pylammps.html.

View File

@ -51,12 +51,12 @@ relative to the center of mass (COM) velocity of the 2 atoms in the
bond.
The value {engvib} is the vibrational kinetic energy of the two atoms
in the bond, which is simply 1/2 m1 v1^2 + 1/2 m1 v2^2, where v1 and
in the bond, which is simply 1/2 m1 v1^2 + 1/2 m2 v2^2, where v1 and
v2 are the magnitude of the velocity of the 2 atoms along the bond
direction, after the COM velocity has been subtracted from each.
The value {engrot} is the rotationsl kinetic energy of the two atoms
in the bond, which is simply 1/2 m1 v1^2 + 1/2 m1 v2^2, where v1 and
in the bond, which is simply 1/2 m1 v1^2 + 1/2 m2 v2^2, where v1 and
v2 are the magnitude of the velocity of the 2 atoms perpendicular to
the bond direction, after the COM velocity has been subtracted from
each.
@ -67,7 +67,7 @@ Vcm^2 where Vcm = magnitude of the velocity of the COM.
Note that these 3 kinetic energy terms are simply a partitioning of
the summed kinetic energy of the 2 atoms themselves. I.e. total KE =
1/2 m1 v1^2 + 1/2 m2 v3^2 = engvib + engrot + engtrans, where v1,v2
1/2 m1 v1^2 + 1/2 m2 v2^2 = engvib + engrot + engtrans, where v1,v2
are the magnitude of the velocities of the 2 atoms, without any
adjustment for the COM velocity.

View File

@ -641,7 +641,8 @@ the restarted simulation begins.
[Related commands:]
"fix ave/chunk"_fix_ave_chunk.html
"fix ave/chunk"_fix_ave_chunk.html,
"compute global/atom"_compute_global_atom.html
[Default:]

View File

@ -37,7 +37,7 @@ The neighbor list needed to compute this quantity is constructed each
time the calculation is performed (i.e. each time a snapshot of atoms
is dumped). Thus it can be inefficient to compute/dump this quantity
too frequently or to have multiple compute/dump commands, each of a
{clsuter/atom} style.
{cluster/atom} style.
NOTE: If you have a bonded system, then the settings of
"special_bonds"_special_bonds.html command can remove pairwise

View File

@ -0,0 +1,220 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
compute global/atom command :h3
[Syntax:]
compute ID group-ID style index input1 input2 ... :pre
ID, group-ID are documented in "compute"_compute.html command :ulb,l
global/atom = style name of this compute command :l
index = c_ID, c_ID\[N\], f_ID, f_ID\[N\], v_name :l
c_ID = per-atom vector calculated by a compute with ID
c_ID\[I\] = Ith column of per-atom array calculated by a compute with ID
f_ID = per-atom vector calculated by a fix with ID
f_ID\[I\] = Ith column of per-atom array calculated by a fix with ID
v_name = per-atom vector calculated by an atom-style variable with name :pre
one or more inputs can be listed :l
input = c_ID, c_ID\[N\], f_ID, f_ID\[N\], v_name :l
c_ID = global vector calculated by a compute with ID
c_ID\[I\] = Ith column of global array calculated by a compute with ID, I can include wildcard (see below)
f_ID = global vector calculated by a fix with ID
f_ID\[I\] = Ith column of global array calculated by a fix with ID, I can include wildcard (see below)
v_name = global vector calculated by a vector-style variable with name :pre
:ule
[Examples:]
compute 1 all global/atom c_chunk c_com\[1\\] c_com\[2\\] c_com\[3\\]
compute 1 all global/atom c_chunk c_com\[*\\] :pre
[Description:]
Define a calculation that assigns global values to each atom from
vectors or arrays of global values. The specified {index} parameter
is used to determine which global value is assigned to each atom.
The {index} parameter must reference a per-atom vector or array from a
"compute"_compute.html or "fix"_fix.html or the evaluation of an
atom-style "variable"_variable.html. Each {input} value must
reference a global vector or array from a "compute"_compute.html or
"fix"_fix.html or the evaluation of an vector-style
"variable"_variable.html. Details are given below.
The {index} value for an atom is used as a index I (from 1 to N) into
the vector associated with each of the input values. The Ith value
from the input vector becomes one output value for that atom. If the
atom is not in the specified group, or the index I < 1 or I > M, where
M is the actual length of the input vector, then an output value of
0.0 is assigned to the atom.
An example of how this command is useful, is in the context of
"chunks" which are static or dyanmic subsets of atoms. The "compute
chunk/atom"_compute_chunk_atom.html command assigns unique chunk IDs
to each atom. It's output can be used as the {index} parameter for
this command. Various other computes with "chunk" in their style
name, such as "compute com/chunk"_compute_com_chunk.html or "compute
msd/chunk"_compute_msd_chunk.html, calculate properties for each
chunk. The output of these commands are global vectors or arrays,
with one or more values per chunk, and can be used as input values for
this command. This command will then assign the global chunk value to
each atom in the chunk, producing a per-atom vector or per-atom array
as output. The per-atom values can then be output to a dump file or
used by any command that uses per-atom values from a compute as input,
as discussed in "Section 6.15"_Section_howto.html#howto_15.
As a concrete example, these commands will calculate the displacement
of each atom from the center-of-mass of the molecule it is in, and
dump those values to a dump file. In this case, each molecule is a
chunk.
compute cc1 all chunk/atom molecule
compute myChunk all com/chunk cc1
compute prop all property/atom xu yu zu
compute glob all global/atom c_cc1 c_myChunk\[*\]
variable dx atom c_prop\[1\]-c_glob\[1\]
variable dy atom c_prop\[2\]-c_glob\[2\]
variable dz atom c_prop\[3\]-c_glob\[3\]
variable dist atom sqrt(v_dx*v_dx+v_dy*v_dy+v_dz*v_dz)
dump 1 all custom 100 tmp.dump id xu yu zu c_glob\[1\] c_glob\[2\] c_glob\[3\] &
v_dx v_dy v_dz v_dist
dump_modify 1 sort id :pre
You can add these commands to the bench/in.chain script to see how
they work.
:line
Note that for input values from a compute or fix, the bracketed index
I can be specified using a wildcard asterisk with the index to
effectively specify multiple values. This takes the form "*" or "*n"
or "n*" or "m*n". If N = the size of the vector (for {mode} = scalar)
or the number of columns in the array (for {mode} = vector), then an
asterisk with no numeric values means all indices from 1 to N. A
leading asterisk means all indices from 1 to n (inclusive). A
trailing asterisk means all indices from n to N (inclusive). A middle
asterisk means all indices from m to n (inclusive).
Using a wildcard is the same as if the individual columns of the array
had been listed one by one. E.g. these 2 compute global/atom commands
are equivalent, since the "compute com/chunk"_compute_com_chunk.html
command creates a global array with 3 columns:
compute cc1 all chunk/atom molecule
compute com all com/chunk cc1
compute 1 all global/atom c_cc1 c_com\[1\] c_com\[2\] c_com\[3\]
compute 1 all global/atom c_cc1 c_com\[*\] :pre
:line
This section explains the {index} parameter. Note that it must
reference per-atom values, as contrasted with the {input} values which
must reference global values.
Note that all of these options generate floating point values. When
they are used as an index into the specified input vectors, they
simple rounded down to convert the value to integer indices. The
final values should range from 1 to N (inclusive), since they are used
to access values from N-length vectors.
If {index} begins with "c_", a compute ID must follow which has been
previously defined in the input script. The compute must generate
per-atom quantities. See the individual "compute"_compute.html doc
page for details. If no bracketed integer is appended, the per-atom
vector calculated by the compute is used. If a bracketed integer is
appended, the Ith column of the per-atom array calculated by the
compute is used. Users can also write code for their own compute
styles and "add them to LAMMPS"_Section_modify.html. See the
discussion above for how I can be specified with a wildcard asterisk
to effectively specify multiple values.
If {index} begins with "f_", a fix ID must follow which has been
previously defined in the input script. The Fix must generate
per-atom quantities. See the individual "fix"_fix.html doc page for
details. Note that some fixes only produce their values on certain
timesteps, which must be compatible with when compute global/atom
references the values, else an error results. If no bracketed integer
is appended, the per-atom vector calculated by the fix is used. If a
bracketed integer is appended, the Ith column of the per-atom array
calculated by the fix is used. Users can also write code for their
own fix style and "add them to LAMMPS"_Section_modify.html. See the
discussion above for how I can be specified with a wildcard asterisk
to effectively specify multiple values.
If {index} begins with "v_", a variable name must follow which has
been previously defined in the input script. It must be an
"atom-style variable"_variable.html. Atom-style variables can
reference thermodynamic keywords and various per-atom attributes, or
invoke other computes, fixes, or variables when they are evaluated, so
this is a very general means of generating per-atom quantities to use
as {index}.
:line
This section explains the kinds of {input} values that can be used.
Note that inputs reference global values, as contrasted with the
{index} parameter which must reference per-atom values.
If a value begins with "c_", a compute ID must follow which has been
previously defined in the input script. The compute must generate a
global vector or array. See the individual "compute"_compute.html doc
page for details. If no bracketed integer is appended, the vector
calculated by the compute is used. If a bracketed integer is
appended, the Ith column of the array calculated by the compute is
used. Users can also write code for their own compute styles and "add
them to LAMMPS"_Section_modify.html. See the discussion above for how
I can be specified with a wildcard asterisk to effectively specify
multiple values.
If a value begins with "f_", a fix ID must follow which has been
previously defined in the input script. The fix must generate a
global vector or array. See the individual "fix"_fix.html doc page
for details. Note that some fixes only produce their values on
certain timesteps, which must be compatible with when compute
global/atom references the values, else an error results. If no
bracketed integer is appended, the vector calculated by the fix is
used. If a bracketed integer is appended, the Ith column of the array
calculated by the fix is used. Users can also write code for their
own fix style and "add them to LAMMPS"_Section_modify.html. See the
discussion above for how I can be specified with a wildcard asterisk
to effectively specify multiple values.
If a value begins with "v_", a variable name must follow which has
been previously defined in the input script. It must be a
"vector-style variable"_variable.html. Vector-style variables can
reference thermodynamic keywords and various other attributes of
atoms, or invoke other computes, fixes, or variables when they are
evaluated, so this is a very general means of generating a vector of
global quantities which the {index} parameter will reference for
assignement of global values to atoms.
:line
[Output info:]
If a single input is specified this compute produces a per-atom
vector. If multiple inputs are specified, this compute produces a
per-atom array values, where the number of columns is equal to the
number of inputs specified. These values can be used by any command
that uses per-atom vector or array values from a compute as input.
See "Section 6.15"_Section_howto.html#howto_15 for an overview of
LAMMPS output options.
The per-atom vector or array values will be in whatever units the
corresponsing input values are in.
[Restrictions:] none
[Related commands:]
"compute"_compute.html, "fix"_fix.html, "variable"_variable.html,
"compute chunk/atom"_compute_chunk_atom.html, "compute
reduce"_compute_reduce.html
[Default:] none

View File

@ -27,7 +27,7 @@ contact radius is used only to prevent particles belonging to
different physical bodies from penetrating each other. It is used by
the contact pair styles, e.g., smd/hertz and smd/tri_surface.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
The value of the contact radius will be 0.0 for particles not in the

View File

@ -24,7 +24,7 @@ compute 1 all smd/damage :pre
Define a computation that calculates the damage status of SPH particles
according to the damage model which is defined via the SMD SPH pair styles, e.g., the maximum plastic strain failure criterion.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth Mach Dynamics in LAMMPS.
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth Mach Dynamics in LAMMPS.
[Output Info:]

View File

@ -32,7 +32,7 @@ configuration. This compute is only really useful for debugging the
hourglass control mechanim which is part of the Total-Lagrangian SPH
pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth
Mach Dynamics in LAMMPS.
[Output Info:]

View File

@ -24,7 +24,7 @@ compute 1 all smd/internal/energy :pre
Define a computation which outputs the per-particle enthalpy, i.e.,
the sum of potential energy and heat.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth
Mach Dynamics in LAMMPS.
[Output Info:]

View File

@ -25,7 +25,7 @@ Define a computation that outputs the equivalent plastic strain per
particle. This command is only meaningful if a material model with
plasticity is defined.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth
Mach Dynamics in LAMMPS.
[Output Info:]

View File

@ -25,7 +25,7 @@ Define a computation that outputs the time rate of the equivalent
plastic strain. This command is only meaningful if a material model
with plasticity is defined.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth
Mach Dynamics in LAMMPS.
[Output Info:]

View File

@ -26,7 +26,7 @@ The mass density is the mass of a particle which is constant during
the course of a simulation, divided by its volume, which can change
due to mechanical deformation.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -25,7 +25,7 @@ Define a computation that calculates the deformation gradient. It is
only meaningful for particles which interact according to the
Total-Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -30,7 +30,7 @@ time step. This calculation is performed automatically in the
relevant SPH pair styles and this compute only serves to make the
stable time increment accessible for output purposes.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -25,7 +25,7 @@ Define a computation that calculates the number of particles inside of
the smoothing kernel radius for particles interacting via the
Total-Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -26,7 +26,7 @@ associated with a particle as a rotated ellipsoid. It is only
meaningful for particles which interact according to the
Total-Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -24,7 +24,7 @@ compute 1 all smd/tlsph/strain :pre
Define a computation that calculates the Green-Lagrange strain tensor
for particles interacting via the Total-Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -24,7 +24,7 @@ compute 1 all smd/tlsph/strain/rate :pre
Define a computation that calculates the rate of the strain tensor for
particles interacting via the Total-Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -24,7 +24,7 @@ compute 1 all smd/tlsph/stress :pre
Define a computation that outputs the Cauchy stress tensor for
particles interacting via the Total-Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -25,7 +25,7 @@ Define a computation that returns the coordinates of the vertices
corresponding to the triangle-elements of a mesh created by the "fix
smd/wall_surface"_fix_smd_wall_surface.html.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -25,7 +25,7 @@ Define a computation that returns the number of neighbor particles
inside of the smoothing kernel radius for particles interacting via
the updated Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -24,7 +24,7 @@ compute 1 all smd/ulsph/strain :pre
Define a computation that outputs the logarithmic strain tensor. for
particles interacting via the updated Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -25,7 +25,7 @@ Define a computation that outputs the rate of the logarithmic strain
tensor for particles interacting via the updated Lagrangian SPH pair
style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -23,7 +23,7 @@ compute 1 all smd/ulsph/stress :pre
Define a computation that outputs the Cauchy stress tensor.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -24,7 +24,7 @@ compute 1 all smd/vol :pre
Define a computation that provides the per-particle volume and the sum
of the per-particle volumes of the group for which the fix is defined.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth
Mach Dynamics in LAMMPS.
[Output info:]

View File

@ -21,7 +21,7 @@ type = atom type for inserted atoms (must be 0 if mol keyword used) :l
seed = random # seed (positive integer) :l
T = temperature of the ideal gas reservoir (temperature units) :l
mu = chemical potential of the ideal gas reservoir (energy units) :l
translate = maximum Monte Carlo translation distance (length units) :l
displace = maximum Monte Carlo translation distance (length units) :l
zero or more keyword/value pairs may be appended to args :l
keyword = {mol}, {region}, {maxangle}, {pressure}, {fugacity_coeff}, {full_energy}, {charge}, {group}, {grouptype}, {intra_energy}, or {tfac_insert}
{mol} value = template-ID

111
doc/src/fix_grem.txt Normal file
View File

@ -0,0 +1,111 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
fix grem command :h3
[Syntax:]
fix ID group-ID grem lambda eta H0 thermostat-ID :pre
ID, group-ID are documented in "fix"_fix.html command :ulb,l
grem = style name of this fix command :l
lambda = intercept parameter of linear effective temperature function :l
eta = slope parameter of linear effective temperature function :l
H0 = shift parameter of linear effective temperature function :l
thermostat-ID = ID of Nose-Hoover thermostat or barostat used in simulation :l,ule
[Examples:]
fix fxgREM all grem 400 -0.01 -30000 fxnpt
thermo_modify press fxgREM_press :pre
fix fxgREM all grem 502 -0.15 -80000 fxnvt :pre
[Description:]
This fix implements the molecular dynamics version of the generalized
replica exchange method (gREM) originally developed by "(Kim)"_#Kim,
which uses non-Boltzmann ensembles to sample over first order phase
transitions. The is done by defining replicas with an enthalpy
dependent effective temperature
:c,image(Eqs/fix_grem.jpg)
with {eta} negative and steep enough to only intersect the
characteristic microcanonical temperature (Ts) of the system once,
ensuring a unimodal enthalpy distribution in that replica. {Lambda} is
the intercept and effects the generalized ensemble similar to how
temperature effects a Boltzmann ensemble. {H0} is a reference
enthalpy, and is typically set as the lowest desired sampled enthalpy.
Further explanation can be found in our recent papers
"(Malolepsza)"_#Malolepsza.
This fix requires a Nose-Hoover thermostat fix reference passed to the
grem as {thermostat-ID}. Two distinct temperatures exist in this
generalized ensemble, the effective temperature defined above, and a
kinetic temperature that controls the velocity distribution of
particles as usual. Either constant volume or constant pressure
algorithms can be used.
The fix enforces a generalized ensemble in a single replica
only. Typically, this ideaology is combined with replica exchange with
replicas differing by {lambda} only for simplicity, but this is not
required. A multi-replica simulation can be run within the LAMMPS
environment using the "temper/grem"_temper_grem.html command. This
utilizes LAMMPS partition mode and requires the number of available
processors be on the order of the number of desired replicas. A
100-replica simulation would require at least 100 processors (1 per
world at minimum). If a many replicas are needed on a small number of
processors, multi-replica runs can be run outside of LAMMPS. An
example of this can be found in examples/USER/misc/grem and has no
limit on the number of replicas per processor. However, this is very
inefficient and error prone and should be avoided if possible.
In general, defining the generalized ensembles is unique for every
system. When starting a many-replica simulation without any knowledge
of the underlying microcanonical temperature, there are several tricks
we have utilized to optimize the process. Choosing a less-steep {eta}
yields broader distributions, requiring fewer replicas to map the
microcanonical temperature. While this likely struggles from the same
sampling problems gREM was built to avoid, it provides quick insight
to Ts. Initially using an evenly-spaced {lambda} distribution
identifies regions where small changes in enthalpy lead to large
temperature changes. Replicas are easily added where needed.
:line
[Restart, fix_modify, output, run start/stop, minimize info:]
No information about this fix is written to "binary restart
files"_restart.html.
The "thermo_modify"_thermo_modify.html {press} option is supported
by this fix to add the rescaled kinetic pressure as part of
"thermodynamic output"_thermo_style.html.
[Restrictions:]
This fix is part of the USER-MISC package. It is only enabled if
LAMMPS was built with that package. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info.
[Related commands:]
"temper/grem"_temper_grem.html, "fix nvt"_fix_nh.html, "fix
npt"_fix_nh.html, "thermo_modify"_thermo_modify.html
[Default:] none
:line
:link(Kim)
[(Kim)] Kim, Keyes, Straub, J Chem. Phys, 132, 224107 (2010).
:link(Malolepsza)
[(Malolepsza)] Malolepsza, Secor, Keyes, J Phys Chem B 119 (42),
13379-13384 (2015).

View File

@ -10,18 +10,19 @@ fix ipi command :h3
[Syntax:]
fix ID group-ID ipi address port \[unix\] :pre
fix ID group-ID ipi address port \[unix\] \[reset\] :pre
ID, group-ID are documented in "fix"_fix.html command
ipi = style name of this fix command
address = internet address (FQDN or IP), or UNIX socket name
port = port number (ignored for UNIX sockets)
optional keyword = {unix}, if present uses a unix socket :ul
optional keyword = {unix}, if present uses a unix socket
optional keyword = {reset}, if present reset electrostatics at each call :ul
[Examples:]
fix 1 all ipi my.server.com 12345
fix 1 all ipi mysocket 666 unix
fix 1 all ipi mysocket 666 unix reset
[Description:]
@ -57,6 +58,15 @@ input are listed in the same order as in the data file of LAMMPS. The
initial configuration is ignored, as it will be substituted with the
coordinates received from i-PI before forces are ever evaluated.
A note of caution when using potentials that contain long-range
electrostatics, or that contain parameters that depend on box size:
all of these options will be initialized based on the cell size in the
LAMMPS-side initial configuration and kept constant during the run.
This is required to e.g. obtain reproducible and conserved forces.
If the cell varies too wildly, it may be advisable to reinitialize
these interactions at each call. This behavior can be requested by
setting the {reset} switch.
[Restart, fix_modify, output, run start/stop, minimize info:]
There is no restart information associated with this fix, since all

View File

@ -36,7 +36,7 @@ stable maximum time step.
This fix inquires the minimum stable time increment across all particles contained in the group for which this
fix is defined. An additional safety factor {s_fact} is applied to the time increment.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth Mach Dynamics in LAMMPS.
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth Mach Dynamics in LAMMPS.
[Restart, fix_modify, output, run start/stop, minimize info:]

View File

@ -32,7 +32,7 @@ fix 1 all smd/integrate_tlsph limit_velocity 1000 :pre
The fix performs explicit time integration for particles which interact according with the Total-Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth Mach Dynamics in LAMMPS.
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth Mach Dynamics in LAMMPS.
The {limit_velocity} keyword will control the velocity, scaling the norm of
the velocity vector to max_vel in case it exceeds this velocity limit.

View File

@ -34,7 +34,7 @@ fix 1 all smd/integrate_ulsph limit_velocity 1000 :pre
[Description:]
The fix performs explicit time integration for particles which interact with the updated Lagrangian SPH pair style.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth Mach Dynamics in LAMMPS.
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth Mach Dynamics in LAMMPS.
The {adjust_radius} keyword activates dynamic adjustment of the per-particle SPH smoothing kernel radius such that the number of neighbors per particles remains
within the interval {min_nn} to {max_nn}. The parameter {adjust_radius_factor} determines the amount of adjustment per timestep. Typical values are

View File

@ -55,7 +55,7 @@ specified. This style also sets the velocity of each particle to (omega cross
Rperp) where omega is its angular velocity around the rotation axis and
Rperp is a perpendicular vector from the rotation axis to the particle.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to using Smooth Mach Dynamics in LAMMPS.
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth Mach Dynamics in LAMMPS.
[Restart, fix_modify, output, run start/stop, minimize info:]

View File

@ -37,7 +37,7 @@ It is possible to move the triangulated surface via the "smd/move_tri_surf"_fix_
Immediately after a .STL file has been read, the simulation needs to be run for 0 timesteps in order to properly register the new particles
in the system. See the "funnel_flow" example in the USER-SMD examples directory.
See "this PDF guide"_USER/smd/SMD_LAMMPS_userguide.pdf to use Smooth Mach Dynamics in LAMMPS.
See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to use Smooth Mach Dynamics in LAMMPS.
[Restart, fix_modify, output, run start/stop, minimize info:]

View File

@ -48,6 +48,7 @@ Fixes :h1
fix_gld
fix_gle
fix_gravity
fix_grem
fix_halt
fix_heat
fix_imd

View File

@ -172,6 +172,7 @@ fix_gcmc.html
fix_gld.html
fix_gle.html
fix_gravity.html
fix_grem.html
fix_halt.html
fix_heat.html
fix_imd.html
@ -310,6 +311,7 @@ compute_erotate_sphere.html
compute_erotate_sphere_atom.html
compute_event_displace.html
compute_fep.html
compute_global_atom.html
compute_group_group.html
compute_gyration.html
compute_gyration_chunk.html

View File

@ -39,7 +39,7 @@ invocation of the {tlsph} for a solid body would consist of an equation of state
the pressure (the diagonal components of the stress tensor), and a material model to compute shear
stresses (the off-diagonal components of the stress tensor). Damage and failure models can also be added.
Please see the "SMD user guide"_USER/smd/SMD_LAMMPS_userguide.pdf for a complete listing of the possible keywords and material models.
Please see the "SMD user guide"_PDF/SMD_LAMMPS_userguide.pdf for a complete listing of the possible keywords and material models.
:line

View File

@ -43,7 +43,7 @@ stresses (the off-diagonal components of the stress tensor).
Note that the use of *GRADIENT_CORRECTION can lead to severe numerical instabilities. For a general fluid simulation, *NO_GRADIENT_CORRECTION is recommended.
Please see the "SMD user guide"_USER/smd/SMD_LAMMPS_userguide.pdf for a complete listing of the possible keywords and material models.
Please see the "SMD user guide"_PDF/SMD_LAMMPS_userguide.pdf for a complete listing of the possible keywords and material models.
:line

View File

@ -14,7 +14,7 @@ python func keyword args ... :pre
func = name of Python function :ulb,l
one or more keyword/args pairs must be appended :l
keyword = {invoke} or {input} or {return} or {format} or {file} or {here} or {exists}
keyword = {invoke} or {input} or {return} or {format} or {length} or {file} or {here} or {exists}
{invoke} arg = none = invoke the previously defined Python function
{input} args = N i1 i2 ... iN
N = # of inputs to function
@ -29,6 +29,8 @@ keyword = {invoke} or {input} or {return} or {format} or {file} or {here} or {ex
M = N+1 if there is a return value
fstring = each character (i,f,s,p) corresponds in order to an input or return value
'i' = integer, 'f' = floating point, 's' = string, 'p' = SELF
{length} arg = Nlen
Nlen = max length of string returned from Python function
{file} arg = filename
filename = file of Python code, which defines func
{here} arg = inline
@ -165,6 +167,17 @@ equal-style variable as an argument, but only if the output of the
Python function is flagged as a numeric value ("i" or "f") via the
{format} keyword.
If the {return} keyword is used and the {format} keyword specifies the
output as a string, then the default maximum length of that string is
63 characters (64-1 for the string terminator). If you want to return
a longer string, the {length} keyword can be specified with its {Nlen}
value set to a larger number (the code allocates space for Nlen+1 to
include the string terminator). If the Python function generates a
string longer than the default 63 or the specified {Nlen}, it will be
trunctated.
:line
Either the {file}, {here}, or {exists} keyword must be used, but only
one of them. These keywords specify what Python code to load into the
Python interpreter. The {file} keyword gives the name of a file,

109
doc/src/temper_grem.txt Normal file
View File

@ -0,0 +1,109 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
temper/grem command :h3
[Syntax:]
temper/grem N M lambda fix-ID thermostat-ID seed1 seed2 index :pre
N = total # of timesteps to run
M = attempt a tempering swap every this many steps
lambda = initial lambda for this ensemble
fix-ID = ID of fix_grem
thermostat-ID = ID of the thermostat that controls kinetic temperature
seed1 = random # seed used to decide on adjacent temperature to partner with
seed2 = random # seed for Boltzmann factor in Metropolis swap
index = which temperature (0 to N-1) I am simulating (optional) :ul
[Examples:]
temper/grem 100000 1000 ${lambda} fxgREM fxnvt 0 58728
temper/grem 40000 100 ${lambda} fxgREM fxnpt 0 32285 ${walkers} :pre
[Description:]
Run a parallel tempering or replica exchange simulation in LAMMPS
partition mode using multiple generalized replicas (ensembles) of a
system defined by "fix grem"_fix_grem.html, which stands for the
generalized replica exchange method (gREM) originally developed by
"(Kim)"_#Kim. It uses non-Boltzmann ensembles to sample over first
order phase transitions. The is done by defining replicas with an
enthalpy dependent effective temperature
Two or more replicas must be used. See the "temper"_temper.html
command for an explanation of how to run replicas on multiple
partitions of one or more processors.
This command is a modification of the "temper"_temper.html command and
has the same dependencies, restraints, and input variables which are
discussed there in greater detail.
Instead of temperature, this command performs replica exchanges in
lambda as per the generalized ensemble enforced by "fix
grem"_fix_grem.html. The desired lambda is specified by {lambda},
which is typically a variable previously set in the input script, so
that each partition is assigned a different temperature. See the
"variable"_variable.html command for more details. For example:
variable lambda world 400 420 440 460
fix fxnvt all nvt temp 300.0 300.0 100.0
fix fxgREM all grem ${lambda} -0.05 -50000 fxnvt
temper 100000 100 ${lambda} fxgREM fxnvt 3847 58382 :pre
would define 4 lambdas with constant kinetic temperature but unique
generalized temperature, and assign one of them to "fix
grem"_fix_grem.html used by each replica, and to the grem command.
As the gREM simulation runs for {N} timesteps, a swap between adjacent
ensembles will be attempted every {M} timesteps. If {seed1} is 0,
then the swap attempts will alternate between odd and even pairings.
If {seed1} is non-zero then it is used as a seed in a random number
generator to randomly choose an odd or even pairing each time. Each
attempted swap of temperatures is either accepted or rejected based on
a Metropolis criterion, derived for gREM by "(Kim)"_#Kim, which uses
{seed2} in the random number generator.
File management works identical to the "temper"_temper.html command.
Dump files created by this fix contain continuous trajectories and
require post-processing to obtain per-replica information.
The last argument {index} in the grem command is optional and is used
when restarting a run from a set of restart files (one for each
replica) which had previously swapped to new lambda. This is done
using a variable. For example if the log file listed the following for
a simulation with 5 replicas:
500000 2 4 0 1 3 :pre
then a setting of
variable walkers world 2 4 0 1 3 :pre
would be used to restart the run with a grem command like the example
above with ${walkers} as the last argument. This functionality is
identical to "temper"_temper.html.
:line
[Restrictions:]
This command can only be used if LAMMPS was built with the USER-MISC
package. See the "Making LAMMPS"_Section_start.html#start_3 section
for more info on packages.
This command must be used with "fix grem"_fix_grem.html.
[Related commands:]
"fix grem"_fix_grem.html, "temper"_temper.html, "variable"_variable.html
[Default:] none
:link(Kim)
[(Kim)] Kim, Keyes, Straub, J Chem Phys, 132, 224107 (2010).

View File

@ -0,0 +1,462 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
PyLammps Tutorial :h1
<!-- RST
.. contents::
END_RST -->
Overview :h2
PyLammps is a Python wrapper class which can be created on its own or use an
existing lammps Python object. It creates a simpler, Python-like interface to
common LAMMPS functionality. Unlike the original flat C-types interface, it
exposes a discoverable API. It no longer requires knowledge of the underlying
C++ code implementation. Finally, the IPyLammps wrapper builds on top of
PyLammps and adds some additional features for IPython integration into IPython
notebooks, e.g. for embedded visualization output from dump/image.
Comparison of lammps and PyLammps interfaces :h3
lammps.lammps :h4
uses C-Types
direct memory access to native C++ data
provides functions to send and receive data to LAMMPS
requires knowledge of how LAMMPS internally works (C pointers, etc) :ul
lammps.PyLammps :h4
higher-level abstraction built on top of original C-Types interface
manipulation of Python objects
communication with LAMMPS is hidden from API user
shorter, more concise Python
better IPython integration, designed for quick prototyping :ul
Quick Start :h2
System-wide Installation :h3
Step 1: Building LAMMPS as a shared library :h4
To use LAMMPS inside of Python it has to be compiled as shared library. This
library is then loaded by the Python interface. In this example, we use the
Make.py utility to create a Makefile with C++ exceptions, PNG, JPEG and FFMPEG
output support enabled. Finally, we also enable the MOLECULE package and compile
using the generated {auto} Makefile.
cd $LAMMPS_DIR/src :pre
# generate custom Makefile
python2 Make.py -jpg -png -s ffmpeg exceptions -m mpi -a file :pre
# add packages if necessary
make yes-MOLECULE :pre
# compile shared library using Makefile
make mode=shlib auto :pre
Step 2: Installing the LAMMPS Python package :h4
PyLammps is part of the lammps Python package. To install it simply install
that package into your current Python installation.
cd $LAMMPS_DIR/python
python install.py :pre
NOTE: Recompiling the shared library requires reinstalling the Python package
Installation inside of a virtualenv :h3
You can use virtualenv to create a custom Python environment specifically tuned
for your workflow.
Benefits of using a virtualenv :h4
isolation of your system Python installation from your development installation
installation can happen in your user directory without root access (useful for HPC clusters)
installing packages through pip allows you to get newer versions of packages than e.g., through apt-get or yum package managers (and without root access)
you can even install specific old versions of a package if necessary :ul
[Prerequisite (e.g. on Ubuntu)]
apt-get install python-virtualenv :pre
Creating a virtualenv with lammps installed :h4
# create virtualenv name 'testing' :pre
# activate 'testing' environment
source testing/bin/activate :pre
# install LAMMPS package in virtualenv
(testing) cd $LAMMPS_DIR/python
(testing) python install.py :pre
# install other useful packages
(testing) pip install matplotlib jupyter mpi4py :pre
... :pre
# return to original shell
(testing) deactivate :pre
Creating a new instance of PyLammps :h2
To create a PyLammps object you need to first import the class from the lammps
module. By using the default constructor, a new {lammps} instance is created.
from lammps import PyLammps
L = PyLammps() :pre
You can also initialize PyLammps on top of this existing {lammps} object:
from lammps import lammps, PyLammps
lmp = lammps()
L = PyLammps(ptr=lmp) :pre
Commands :h2
Sending a LAMMPS command with the existing library interfaces is done using
the command method of the lammps object instance.
For instance, let's take the following LAMMPS command:
region box block 0 10 0 5 -0.5 0.5 :pre
In the original interface this command can be executed with the following
Python code if {L} was a lammps instance:
L.command("region box block 0 10 0 5 -0.5 0.5") :pre
With the PyLammps interface, any command can be split up into arbitrary parts
separated by whitespace, passed as individual arguments to a region method.
L.region("box block", 0, 10, 0, 5, -0.5, 0.5) :pre
Note that each parameter is set as Python literal floating-point number. In the
PyLammps interface, each command takes an arbitrary parameter list and transparently
merges it to a single command string, separating individual parameters by whitespace.
The benefit of this approach is avoiding redundant command calls and easier
parameterization. In the original interface parametrization needed to be done
manually by creating formatted strings.
L.command("region box block %f %f %f %f %f %f" % (xlo, xhi, ylo, yhi, zlo, zhi)) :pre
In contrast, methods of PyLammps accept parameters directly and will convert
them automatically to a final command string.
L.region("box block", xlo, xhi, ylo, yhi, zlo, zhi) :pre
System state :h2
In addition to dispatching commands directly through the PyLammps object, it
also provides several properties which allow you to query the system state.
:dlb
L.system :dt
Is a dictionary describing the system such as the bounding box or number of atoms :dd
L.system.xlo, L.system.xhi :dt
bounding box limits along x-axis :dd
L.system.ylo, L.system.yhi :dt
bounding box limits along y-axis :dd
L.system.zlo, L.system.zhi :dt
bounding box limits along z-axis :dd
L.communication :dt
configuration of communication subsystem, such as the number of threads or processors :dd
L.communication.nthreads :dt
number of threads used by each LAMMPS process :dd
L.communication.nprocs :dt
number of MPI processes used by LAMMPS :dd
L.fixes :dt
List of fixes in the current system :dd
L.computes :dt
List of active computes in the current system :dd
L.dump :dt
List of active dumps in the current system :dd
L.groups :dt
List of groups present in the current system :dd
:dle
Working with LAMMPS variables :h2
LAMMPS variables can be both defined and accessed via the PyLammps interface.
To define a variable you can use the "variable"_variable.html command:
L.variable("a index 2") :pre
A dictionary of all variables is returned by L.variables
you can access an individual variable by retrieving a variable object from the
L.variables dictionary by name
a = L.variables\['a'\] :pre
The variable value can then be easily read and written by accessing the value
property of this object.
print(a.value)
a.value = 4 :pre
Retrieving the value of an arbitrary LAMMPS expressions :h2
LAMMPS expressions can be immediately evaluated by using the eval method. The
passed string parameter can be any expression containing global thermo values,
variables, compute or fix data.
result = L.eval("ke") # kinetic energy
result = L.eval("pe") # potential energy :pre
result = L.eval("v_t/2.0") :pre
Accessing atom data :h2
All atoms in the current simulation can be accessed by using the L.atoms list.
Each element of this list is an object which exposes its properties (id, type,
position, velocity, force, etc.).
# access first atom
L.atoms\[0\].id
L.atoms\[0\].type :pre
# access second atom
L.atoms\[1\].position
L.atoms\[1\].velocity
L.atoms\[1\].force :pre
Some properties can also be used to set:
# set position in 2D simulation
L.atoms\[0\].position = (1.0, 0.0) :pre
# set position in 3D simulation
L.atoms\[0\].position = (1.0, 0.0, 1.) :pre
Evaluating thermo data :h2
Each simulation run usually produces thermo output based on system state,
computes, fixes or variables. The trajectories of these values can be queried
after a run via the L.runs list. This list contains a growing list of run data.
The first element is the output of the first run, the second element that of
the second run.
L.run(1000)
L.runs\[0\] # data of first 1000 time steps :pre
L.run(1000)
L.runs\[1\] # data of second 1000 time steps :pre
Each run contains a dictionary of all trajectories. Each trajectory is
accessible through its thermo name:
L.runs\[0\].step # list of time steps in first run
L.runs\[0\].ke # list of kinetic energy values in first run :pre
Together with matplotlib plotting data out of LAMMPS becomes simple:
import matplotlib.plot as plt
steps = L.runs\[0\].step
ke = L.runs\[0\].ke
plt.plot(steps, ke) :pre
Error handling with PyLammps :h2
Compiling the shared library with C++ exception support provides a better error
handling experience. Without exceptions the LAMMPS code will terminate the
current Python process with an error message. C++ exceptions allow capturing
them on the C++ side and rethrowing them on the Python side. This way you
can handle LAMMPS errors through the Python exception handling mechanism.
IMPORTANT NOTE: Capturing a LAMMPS exception in Python can still mean that the
current LAMMPS process is in an illegal state and must be terminated. It is
advised to save your data and terminate the Python instance as quickly as
possible.
Using PyLammps in IPython notebooks and Jupyter :h2
If the LAMMPS Python package is installed for the same Python interpreter as
IPython, you can use PyLammps directly inside of an IPython notebook inside of
Jupyter. Jupyter is a powerful integrated development environment (IDE) for
many dynamic languages like Python, Julia and others, which operates inside of
any web browser. Besides auto-completion and syntax highlighting it allows you
to create formatted documents using Markup, mathematical formulas, graphics and
animations intermixed with executable Python code. It is a great format for
tutorials and showcasing your latest research.
To launch an instance of Jupyter simply run the following command inside your
Python environment (this assumes you followed the Quick Start instructions):
jupyter notebook :pre
IPyLammps Examples :h2
Examples of IPython notebooks can be found in the python/examples/pylammps
subdirectory. To open these notebooks launch {jupyter notebook} inside this
directory and navigate to one of them. If you compiled and installed
a LAMMPS shared library with execeptions, PNG, JPEG and FFMPEG support
you should be able to rerun all of these notebooks.
Validating a dihedral potential :h3
This example showcases how an IPython Notebook can be used to compare a simple
LAMMPS simulation of a harmonic dihedral potential to its analytical solution.
Four atoms are placed in the simulation and the dihedral potential is applied on
them using a datafile. Then one of the atoms is rotated along the central axis by
setting its position from Python, which changes the dihedral angle.
phi = \[d * math.pi / 180 for d in range(360)\] :pre
pos = \[(1.0, math.cos(p), math.sin(p)) for p in phi\] :pre
pe = \[\]
for p in pos:
L.atoms\[3\].position = p
L.run(0)
pe.append(L.eval("pe")) :pre
By evaluating the potential energy for each position we can verify that
trajectory with the analytical formula. To compare both solutions, we plot
both trajectories over each other using matplotlib, which embeds the generated
plot inside the IPython notebook.
:c,image(JPG/pylammps_dihedral.jpg)
Running a Monte Carlo relaxation :h3
This second example shows how to use PyLammps to create a 2D Monte Carlo Relaxation
simulation, computing and plotting energy terms and even embedding video output.
Initially, a 2D system is created in a state with minimal energy.
:c,image(JPG/pylammps_mc_minimum.jpg)
It is then disordered by moving each atom by a random delta.
random.seed(27848)
deltaperturb = 0.2 :pre
for i in range(L.system.natoms):
x, y = L.atoms\[i\].position
dx = deltaperturb * random.uniform(-1, 1)
dy = deltaperturb * random.uniform(-1, 1)
L.atoms\[i\].position = (x+dx, y+dy) :pre
L.run(0) :pre
:c,image(JPG/pylammps_mc_disordered.jpg)
Finally, the Monte Carlo algorithm is implemented in Python. It continuously
moves random atoms by a random delta and only accepts certain moves.
estart = L.eval("pe")
elast = estart :pre
naccept = 0
energies = \[estart\] :pre
niterations = 3000
deltamove = 0.1
kT = 0.05 :pre
natoms = L.system.natoms :pre
for i in range(niterations):
iatom = random.randrange(0, natoms)
current_atom = L.atoms\[iatom\] :pre
x0, y0 = current_atom.position :pre
dx = deltamove * random.uniform(-1, 1)
dy = deltamove * random.uniform(-1, 1) :pre
current_atom.position = (x0+dx, y0+dy) :pre
L.run(1, "pre no post no") :pre
e = L.eval("pe")
energies.append(e) :pre
if e <= elast:
naccept += 1
elast = e
elif random.random() <= math.exp(natoms*(elast-e)/kT):
naccept += 1
elast = e
else:
current_atom.position = (x0, y0) :pre
The energies of each iteration are collected in a Python list and finally plotted using matplotlib.
:c,image(JPG/pylammps_mc_energies_plot.jpg)
The IPython notebook also shows how to use dump commands and embed video files
inside of the IPython notebook.
Using PyLammps and mpi4py (Experimental) :h2
PyLammps can be run in parallel using mpi4py. This python package can be installed using
pip install mpi4py :pre
The following is a short example which reads in an existing LAMMPS input file and
executes it in parallel. You can find in.melt in the examples/melt folder.
from mpi4py import MPI
from lammps import PyLammps :pre
L = PyLammps()
L.file("in.melt") :pre
if MPI.COMM_WORLD.rank == 0:
print("Potential energy: ", L.eval("pe")) :pre
MPI.Finalize() :pre
To run this script (melt.py) in parallel using 4 MPI processes we invoke the
following mpirun command:
mpirun -np 4 python melt.py :pre
IMPORTANT NOTE: Any command must be executed by all MPI processes. However, evaluations and querying the system state is only available on rank 0.
Feedback and Contributing :h2
If you find this Python interface useful, please feel free to provide feedback
and ideas on how to improve it to Richard Berger (richard.berger@temple.edu). We also
want to encourage people to write tutorial style IPython notebooks showcasing LAMMPS usage
and maybe their latest research results.

View File

@ -7,6 +7,7 @@ Tutorials :h1
tutorial_drude
tutorial_github
tutorial_pylammps
body
manifolds

View File

@ -0,0 +1,81 @@
Generalized Replica Exchange Method (gREM) examples
===================================================
Examples:
---------------------------------------------------
lj-single:
This example is the simplest case scenario utilizing the generalized
ensemble defined by fix_grem. It utilizes only 1 replica and requires
the LAMMPS executable to be run as usual:
mpirun -np 4 lmp_mpi -in in.gREM-npt
./lmp_serial -in in.gREM-nvt
While this does not obtain any information about Ts(H), it is most similar to
a microcanonical simulation and "single-replica gREM" can be useful for
studying non-equilibrium processes as well.
lj-6rep:
This example utilizes an external python script to handle swaps between
replicas. Included is run.sh, which requires the path to your LAMMPS
executable. The python script is fragile as it relies on parsing output files
from the LAMMPS run and moving LAMMPS data files between directories. Use
caution if modifying this example further. If complied with mpi, multiple
processors can be used as:
./run.sh $NUM_PROCS
a serial run is completed simply as
./run.sh 1
where the executable provided must be serial if "1" is provided as the number
of procs. While this external replica exchange module is quite slow and
inefficient, it allows for many replicas to be used on a single processor.
While here there are only 6 replicas, this example could be extended to >100
replicas while still using a serial compilation. This is also beneficial for
running on high performance nodes with few cores to complete a full-scale gREM
simulation with a large number of replicas.
A quick note on efficiency: frequent exchanges slow down this script
substantially because LAMMPS is restarted every exchange attempt. The script
works best for large systems with infrequent exchanges.
lj-temper:
This is an example using the internal replica exchange module. While fast
in comparison to the python version, it requires substantial resources
(at least 1 proc per replica). Instead of stopping LAMMPS every exchange
attempt, all replicas are run concurrently, and exchanges take place
internally. This requires use of LAMMPS partition mode, via the command
line using the -p flag. Input files require world type variables defining
the parameters of each replica. The included example with 4 replicas must
run on at least 4 procs, in that case LAMMPS could be initiated as:
mpirun -np 4 lmp_mpi -p 4x1 -in in.gREM-temper
spawning 4 partitions with 1 replica each. Multiple procs per replica could
be used. In the case of a 16 system with 4 replicas, the
following logic could be used:
mpirun -np 16 lmp_mpi -p 4x4 -in in.gREM-temper
Once started, a universe log file will be created as well as log files for
each replica. The universe (log.lammps) contains exchange information, while
the replicas (*/log.lammps.*) contains the thermo_output as usual. In this
example, in.gREM-temper moves the log files to their respective folders.
Closing Notes:
---------------------------------------------------
Of significant difference between lj-6rep and lj-temper is the format of data.
In lj-6rep, data is stored as 'replicas' meaning discontinuous trajectories, as
files are moved between directories labeled by the 'lambda' of the replica. In
lj-temper, data is stored as 'walkers' with continuous trajectories, but
discontinuous parameters. The later is significantly more efficient, but
requires post-processing to obtain per-replica information.
Any problems/questions should be directed to <dstelter@bu.edu>.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
#/bin/bash
for i in $(ls -d [0-9]*)
do
rm -f $i/final*
rm -f $i/log*
rm -f $i/ent*
rm -f $i/output
cp $i/restart.init $i/restart_file
done
echo 1 > lastexchange
cp walker.bkp lastwalker
exit 0

View File

@ -0,0 +1,168 @@
#!/usr/bin/env python2.7
import os, sys
from numpy import *
import numpy.random
### Runs replica exchange with gREM (fix grem) for unlimited number of replicas on a set number of processors. This script is inefficient, but necessary if wanting to run with hundreds of replicas on relatively few number of procs.
### read number of processors from the command line
nproc = int(sys.argv[1])
### path to simulation directory
path = os.getcwd()
### path to LAMMPS executable
lmp = sys.argv[2]
### LAMMPS input name
inp = sys.argv[3]
### define pressure for simulations (0 if const V)
pressure = 0
### some constants for gREM, must match with LAMMPS input file!
H = -30000
eta = -0.01
#kB = 0.000086173324 # eV (metal)
kB = 0.0019872 # kcal/mol (real)
### define lambdas - script assumes that there are already existing directories with all files necessary to run
lambdas=[400,405,410,415,420,425]
ll = len(lambdas)
### define number of exchanges
starting_ex = int(loadtxt("lastexchange"))
how_many_ex = 5
max_exchange = starting_ex+how_many_ex
### array with walkers
walker = loadtxt("lastwalker")
### initiate array with enthalpies
enthalpy = zeros(ll)
aver_enthalpy = zeros(ll)
for exchange in arange(starting_ex,max_exchange):
print "run", exchange
for l in range(ll):
#print "replica", l
os.chdir(path+"/%s" % lambdas[l])
#os.system("cp restart_file restart_file%d" % exchange)
if (nproc > 1):
os.system("mpirun -np %d " % (nproc) + lmp + " -in ../" + inp + " -var lambda %g -var eta %g -var enthalpy %g > output" % (lambdas[l], eta, H))
if (nproc == 1):
os.system(lmp + " -in ../" + inp + " -var lambda %g -var eta %g -var enthalpy %g > output" % (lambdas[l], eta, H))
os.system("grep -v '[a-zA-Z]' output | awk '{if(NF==6 && NR>19)print $0}' | awk '{print $3}' >ent")
enthalpy[l] = os.popen("tail -n 1 ent").read()
ee = loadtxt("ent")
aver_enthalpy[l] = mean(ee[-1])
# os.system("mv dump.dcd dump%d.dcd" % exchange)
os.system("mv log.lammps log%d.lammps" % exchange)
os.system("mv final_restart_file final_restart_file%d" % exchange)
os.system("mv ent ent%d" % exchange)
os.system("bzip2 log%d.lammps ent%d" % (exchange,exchange))
os.system("cp final_restart_file%d restart_file" % exchange)
### replicas will be exchanged based on enthalpy order, not replicas order (termostat order)
#entalpy_sorted_indices = enthalpy.argsort()
aver_entalpy_sorted_indices = aver_enthalpy.argsort()
### choose pair of replicas for exchange attempt based on enthalpy order
pp = random.random_integers(0,ll-2)
first = aver_entalpy_sorted_indices[pp]
second = aver_entalpy_sorted_indices[pp+1]
#if (first>second):
# tmp = first
# first = second
# second = tmp
print "pair1:", first, second
### calculate weights for exchange criterion
w1 = log(lambdas[first]+eta*(enthalpy[first]-1*H))
w2 = log(lambdas[first]+eta*(enthalpy[second]-1*H))
w3 = log(lambdas[second]+eta*(enthalpy[first]-1*H))
w4 = log(lambdas[second]+eta*(enthalpy[second]-1*H))
weight = (w4-w3+w1-w2)/eta/kB
### generate randon number for exchange criterion and calc its log
LOGRANDNUM = log(random.random())
### wyzeruj warunki
compare1 = 0
compare2 = 0
if (weight>0):
compare1 = 1
if (weight>LOGRANDNUM):
compare2 = 1
### exchange restart files if exchange condition is satisfied
if (compare1>0 or compare2>0):
print "exchange1 accepted for pair", first, second, lambdas[first], lambdas[second], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[first],exchange,path,lambdas[second]))
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[second],exchange,path,lambdas[first]))
### update walkers
tmp1=walker[first]
tmp2=walker[second]
walker[first]=tmp2
walker[second]=tmp1
else:
print "exchange1 not accepted for pair", first, second, lambdas[first], lambdas[second], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
### choose again pair of replicas for exchange attempt based on enthalpy order
### but make sure this pair is different than the first pair
if_different = 0
while if_different<1:
pp2 = random.random_integers(0,ll-2)
third = aver_entalpy_sorted_indices[pp2]
fourth = aver_entalpy_sorted_indices[pp2+1]
if (third!=first and third!=second and third!=aver_entalpy_sorted_indices[pp-1]):
if_different = 1
print "pair2:", third, fourth
### calculate weights for exchange criterion
w1 = log(lambdas[third]+eta*(enthalpy[third]-1*H))
w2 = log(lambdas[third]+eta*(enthalpy[fourth]-1*H))
w3 = log(lambdas[fourth]+eta*(enthalpy[third]-1*H))
w4 = log(lambdas[fourth]+eta*(enthalpy[fourth]-1*H))
weight = (w4-w3+w1-w2)/eta/kB
### generate randon number for exchange criterion and calc its log
LOGRANDNUM = log(random.random())
### wyzeruj warunki
compare1 = 0
compare2 = 0
if (weight>0):
compare1 = 1
if (weight>LOGRANDNUM):
compare2 = 1
### exchange restart files if exchange condition is satisfied
if (compare1>0 or compare2>0):
print "exchange2 accepted for pair", third, fourth, lambdas[third], lambdas[fourth], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[third],exchange,path,lambdas[fourth]))
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[fourth],exchange,path,lambdas[third]))
### update walkers
tmp1=walker[third]
tmp2=walker[fourth]
walker[third]=tmp2
walker[fourth]=tmp1
else:
print "exchange2 not accepted for pair", third, fourth, lambdas[third], lambdas[fourth], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
#print "walkers:", walker
print "".join(["%d " % x for x in walker])
sys.stdout.flush()
lastwalker = open(path + "/lastwalker", "w")
lastwalker.write("".join(["%d " % w for w in walker]))
lastwalker.close()
lastexchange = open(path + "/lastexchange", "w")
lastexchange.write("%d" % (exchange+1))
lastexchange.close()

View File

@ -0,0 +1,25 @@
# LJ particles
variable T0 index 300.0
variable press index 0.0
variable lambda index 400.0
variable eta index -0.01
variable enthalpy index -30000.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "restart_file"
thermo 10
thermo_style custom step temp pe etotal press vol
velocity all create ${T0} 12427
timestep 1.0
fix fxnvt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxgREM all grem ${lambda} ${eta} ${enthalpy} fxnvt
thermo_modify press fxgREM_press
run 10000
write_data final_restart_file

View File

@ -0,0 +1,12 @@
#!/bin/sh
NPROCS=1
if [ $# -gt 0 ]; then
NPROCS=$1
fi
bash ./clean.sh
python ./double-re-short.py $NPROCS $HOME/compile/lammps-icms/src/lmp_omp in.gREM > total_output.$NPROCS
exit 0

View File

@ -0,0 +1 @@
0 1 2 3 4 5

View File

@ -0,0 +1,21 @@
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnpt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxgREM all grem 400 -.01 -30000 fxnpt
thermo_modify press fxgREM_press
run 1000
#write_data lj-out.data

View File

@ -0,0 +1,20 @@
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all nvt temp ${T0} ${T0} 1000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
run 1000
#write_data lj-out.data

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,176 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
orthogonal box = (1.06874 1.06874 1.06874) to (23.9313 23.9313 23.9313)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso 0 ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso 0 0 10000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
thermo_modify press fxgREM_press
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5 -> bins = 7 7 7
Memory usage per processor = 5.37943 Mbytes
Step Temp PotEng TotEng Press Volume
0 305.69499 -3177.6423 -2722.9442 -91.741776 11950.115
10 312.30124 -3182.2257 -2717.7013 -203.95075 11950.113
20 314.94567 -3186.456 -2717.9982 -265.56737 11950.108
30 312.229 -3183.7641 -2719.3472 -196.90499 11950.097
40 305.94068 -3180.7085 -2725.6449 -92.562221 11950.083
50 300.42281 -3176.5838 -2729.7277 10.896769 11950.066
60 299.16747 -3174.1939 -2729.205 50.094171 11950.05
70 301.65965 -3176.0918 -2727.396 0.096901939 11950.035
80 304.77876 -3178.2699 -2724.9346 -64.001022 11950.019
90 305.60598 -3178.9517 -2724.386 -93.672879 11950.003
100 303.8005 -3177.5156 -2725.6354 -74.516709 11949.985
110 300.86776 -3175.4773 -2727.9593 -34.22655 11949.965
120 298.70177 -3175.6488 -2731.3526 -19.014898 11949.944
130 298.39686 -3176.3792 -2732.5365 -21.293245 11949.923
140 300.00669 -3177.7032 -2731.466 -40.992937 11949.902
150 301.85665 -3178.1312 -2729.1423 -45.715505 11949.88
160 301.20597 -3177.3218 -2729.3007 -10.104082 11949.857
170 297.01134 -3172.7462 -2730.9643 99.298381 11949.833
180 291.279 -3168.3513 -2735.0958 219.47549 11949.812
190 287.13954 -3165.1287 -2738.0304 309.36947 11949.796
200 286.57735 -3165.2951 -2739.033 323.96954 11949.786
210 289.83941 -3167.8245 -2736.7103 271.77305 11949.783
220 296.12858 -3171.8054 -2731.3366 172.4056 11949.785
230 303.82424 -3176.3108 -2724.3952 56.711479 11949.791
240 309.95738 -3180.9789 -2719.9408 -40.992898 11949.798
250 312.0405 -3182.3473 -2718.2107 -57.591676 11949.805
260 309.65444 -3181.0587 -2720.4712 3.3540332 11949.81
270 304.40001 -3176.5798 -2723.8078 130.77028 11949.816
280 298.65985 -3174.1505 -2729.9166 237.63562 11949.825
290 294.78709 -3170.9701 -2732.4966 326.94924 11949.838
300 294.03216 -3169.9567 -2732.6062 349.85486 11949.859
310 296.44397 -3172.8519 -2731.914 284.80897 11949.886
320 301.41027 -3175.9697 -2727.6447 179.4647 11949.92
330 307.88911 -3181.2615 -2723.2998 24.702414 11949.957
340 314.73138 -3186.0047 -2717.8656 -132.6263 11949.995
350 320.55591 -3187.8509 -2711.0483 -245.88468 11950.031
360 323.50274 -3188.9994 -2707.8136 -314.73676 11950.062
370 321.61539 -3187.1233 -2708.7448 -293.17446 11950.086
380 314.37275 -3181.484 -2713.8784 -169.00448 11950.104
390 303.54884 -3174.1675 -2722.6616 12.923999 11950.119
400 293.40432 -3167.0348 -2730.6181 187.6624 11950.135
410 288.46351 -3165.273 -2736.2054 252.20051 11950.154
420 290.31387 -3168.604 -2736.7841 193.73816 11950.178
430 296.35519 -3173.09 -2732.2841 81.521847 11950.207
440 301.92973 -3175.4344 -2726.3368 -1.8329439 11950.237
450 303.76205 -3176.777 -2724.9539 -35.002096 11950.267
460 301.71619 -3174.2731 -2725.4932 14.977875 11950.296
470 298.92404 -3172.9921 -2728.3652 64.224747 11950.326
480 298.80164 -3172.5329 -2728.0881 82.781347 11950.358
490 302.71589 -3175.3703 -2725.1034 27.223049 11950.39
500 309.10665 -3179.3013 -2719.5285 -65.460658 11950.424
510 314.36408 -3183.2854 -2715.6927 -151.19245 11950.456
520 315.71154 -3183.5328 -2713.9358 -163.19151 11950.485
530 313.31886 -3182.2521 -2716.214 -125.5741 11950.511
540 309.81847 -3178.9358 -2718.1043 -55.55841 11950.534
550 308.29687 -3177.837 -2719.2688 -24.39371 11950.556
560 308.75927 -3176.3265 -2717.0705 0.93689833 11950.578
570 307.52811 -3175.8145 -2718.3897 35.502429 11950.6
580 301.75074 -3173.1208 -2724.2894 136.29625 11950.622
590 292.37743 -3165.5806 -2730.6913 319.75957 11950.648
600 283.57627 -3159.8617 -2738.0635 471.28045 11950.68
610 279.85172 -3157.4557 -2741.1975 530.72699 11950.722
620 283.40879 -3160.5911 -2739.042 455.28104 11950.775
630 292.53718 -3166.3125 -2731.1856 296.63465 11950.838
640 302.81112 -3173.3096 -2722.901 113.80844 11950.907
650 309.83321 -3179.3684 -2718.515 -26.499431 11950.978
660 312.1283 -3182.7335 -2718.4663 -89.363745 11951.049
670 311.16363 -3181.867 -2719.0347 -69.370989 11951.118
680 308.51041 -3180.6869 -2721.801 -25.972987 11951.186
690 304.64393 -3176.8751 -2723.7403 56.592367 11951.254
700 300.24456 -3175.4797 -2728.8887 112.34442 11951.323
710 296.35785 -3172.9705 -2732.1607 168.18009 11951.394
720 293.78145 -3172.1065 -2735.1289 182.81082 11951.468
730 293.25707 -3170.8715 -2734.6738 171.04236 11951.547
740 295.33219 -3172.9109 -2733.6266 91.351362 11951.629
750 299.69136 -3175.2574 -2729.4892 -16.266404 11951.713
760 305.2281 -3177.9836 -2723.9799 -137.30615 11951.796
770 310.59309 -3182.7053 -2720.7216 -272.72961 11951.877
780 314.65573 -3183.4212 -2715.3947 -341.231 11951.952
790 316.48606 -3185.44 -2714.691 -388.53602 11952.02
800 315.15897 -3186.846 -2718.0709 -384.28316 11952.08
810 310.43559 -3183.6648 -2721.9154 -282.61999 11952.133
820 303.22265 -3178.464 -2727.4433 -121.47565 11952.179
830 295.36843 -3175.4771 -2736.1389 33.066504 11952.223
840 288.69698 -3169.5813 -2740.1664 216.10697 11952.268
850 283.82649 -3165.7822 -2743.6118 359.56896 11952.317
860 280.04102 -3162.8228 -2746.283 475.61942 11952.374
870 277.10059 -3159.6212 -2747.4551 572.5432 11952.441
880 275.76549 -3158.2545 -2748.0743 616.43304 11952.52
890 276.82327 -3158.9703 -2747.2166 596.08147 11952.612
900 280.72135 -3162.0637 -2744.5119 506.33695 11952.716
910 287.1035 -3167.4388 -2740.3941 356.68688 11952.831
920 294.28041 -3171.6218 -2733.902 206.06394 11952.953
930 300.36009 -3173.9046 -2727.1418 88.047911 11953.08
940 303.86761 -3175.5599 -2723.5798 7.6846808 11953.209
950 304.42957 -3176.0831 -2723.2672 -25.15496 11953.339
960 303.13982 -3176.0534 -2725.1559 -28.715178 11953.467
970 302.30166 -3176.9758 -2727.325 -43.264668 11953.596
980 303.93331 -3178.9891 -2726.9114 -88.434034 11953.723
990 307.36223 -3180.7316 -2723.5535 -145.46208 11953.849
1000 310.09574 -3181.101 -2719.8571 -180.39125 11953.972
Loop time of 0.307225 on 1 procs for 1000 steps with 500 atoms
Performance: 281.227 ns/day, 0.085 hours/ns, 3254.944 timesteps/s
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.25351 | 0.25351 | 0.25351 | 0.0 | 82.52
Bond | 7.1526e-05 | 7.1526e-05 | 7.1526e-05 | 0.0 | 0.02
Neigh | 0.0042093 | 0.0042093 | 0.0042093 | 0.0 | 1.37
Comm | 0.010211 | 0.010211 | 0.010211 | 0.0 | 3.32
Output | 0.0013611 | 0.0013611 | 0.0013611 | 0.0 | 0.44
Modify | 0.033891 | 0.033891 | 0.033891 | 0.0 | 11.03
Other | | 0.003969 | | | 1.29
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1610 ave 1610 max 1610 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 14765 ave 14765 max 14765 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14765
Ave neighs/atom = 29.53
Ave special neighs/atom = 0
Neighbor list builds = 5
Dangerous builds = 0
#write_data lj-out.data
Total wall time: 0:00:00

View File

@ -0,0 +1,176 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
orthogonal box = (1.06874 1.06874 1.06874) to (23.9313 23.9313 23.9313)
1 by 2 by 2 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso 0 ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso 0 0 10000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
thermo_modify press fxgREM_press
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5 -> bins = 7 7 7
Memory usage per processor = 5.34276 Mbytes
Step Temp PotEng TotEng Press Volume
0 305.69499 -3177.6423 -2722.9442 -91.741776 11950.115
10 312.30124 -3182.2257 -2717.7013 -203.95075 11950.113
20 314.94567 -3186.456 -2717.9982 -265.56737 11950.108
30 312.229 -3183.7641 -2719.3472 -196.90499 11950.097
40 305.94068 -3180.7085 -2725.6449 -92.562221 11950.083
50 300.42281 -3176.5838 -2729.7277 10.896769 11950.066
60 299.16747 -3174.1939 -2729.205 50.094171 11950.05
70 301.65965 -3176.0918 -2727.396 0.096901939 11950.035
80 304.77876 -3178.2699 -2724.9346 -64.001022 11950.019
90 305.60598 -3178.9517 -2724.386 -93.672879 11950.003
100 303.8005 -3177.5156 -2725.6354 -74.516709 11949.985
110 300.86776 -3175.4773 -2727.9593 -34.22655 11949.965
120 298.70177 -3175.6488 -2731.3526 -19.014898 11949.944
130 298.39686 -3176.3792 -2732.5365 -21.293245 11949.923
140 300.00669 -3177.7032 -2731.466 -40.992937 11949.902
150 301.85665 -3178.1312 -2729.1423 -45.715505 11949.88
160 301.20597 -3177.3218 -2729.3007 -10.104082 11949.857
170 297.01134 -3172.7462 -2730.9643 99.298381 11949.833
180 291.279 -3168.3513 -2735.0958 219.47549 11949.812
190 287.13954 -3165.1287 -2738.0304 309.36947 11949.796
200 286.57735 -3165.2951 -2739.033 323.96954 11949.786
210 289.83941 -3167.8245 -2736.7103 271.77305 11949.783
220 296.12858 -3171.8054 -2731.3366 172.4056 11949.785
230 303.82424 -3176.3108 -2724.3952 56.711479 11949.791
240 309.95738 -3180.9789 -2719.9408 -40.992898 11949.798
250 312.0405 -3182.3473 -2718.2107 -57.591676 11949.805
260 309.65444 -3181.0587 -2720.4712 3.3540332 11949.81
270 304.40001 -3176.5798 -2723.8078 130.77028 11949.816
280 298.65985 -3174.1505 -2729.9166 237.63562 11949.825
290 294.78709 -3170.9701 -2732.4966 326.94924 11949.838
300 294.03216 -3169.9567 -2732.6062 349.85486 11949.859
310 296.44397 -3172.8519 -2731.914 284.80897 11949.886
320 301.41027 -3175.9697 -2727.6447 179.4647 11949.92
330 307.88911 -3181.2615 -2723.2998 24.702414 11949.957
340 314.73138 -3186.0047 -2717.8656 -132.6263 11949.995
350 320.55591 -3187.8509 -2711.0483 -245.88468 11950.031
360 323.50274 -3188.9994 -2707.8136 -314.73676 11950.062
370 321.61539 -3187.1233 -2708.7448 -293.17446 11950.086
380 314.37275 -3181.484 -2713.8784 -169.00448 11950.104
390 303.54884 -3174.1675 -2722.6616 12.923999 11950.119
400 293.40432 -3167.0348 -2730.6181 187.6624 11950.135
410 288.46351 -3165.273 -2736.2054 252.20051 11950.154
420 290.31387 -3168.604 -2736.7841 193.73816 11950.178
430 296.35519 -3173.09 -2732.2841 81.521847 11950.207
440 301.92973 -3175.4344 -2726.3368 -1.8329439 11950.237
450 303.76205 -3176.777 -2724.9539 -35.002096 11950.267
460 301.71619 -3174.2731 -2725.4932 14.977875 11950.296
470 298.92404 -3172.9921 -2728.3652 64.224747 11950.326
480 298.80164 -3172.5329 -2728.0881 82.781347 11950.358
490 302.71589 -3175.3703 -2725.1034 27.223049 11950.39
500 309.10665 -3179.3013 -2719.5285 -65.460658 11950.424
510 314.36408 -3183.2854 -2715.6927 -151.19245 11950.456
520 315.71154 -3183.5328 -2713.9358 -163.19151 11950.485
530 313.31886 -3182.2521 -2716.214 -125.5741 11950.511
540 309.81847 -3178.9358 -2718.1043 -55.55841 11950.534
550 308.29687 -3177.837 -2719.2688 -24.39371 11950.556
560 308.75927 -3176.3265 -2717.0705 0.93689833 11950.578
570 307.52811 -3175.8145 -2718.3897 35.502429 11950.6
580 301.75074 -3173.1208 -2724.2894 136.29625 11950.622
590 292.37743 -3165.5806 -2730.6913 319.75957 11950.648
600 283.57627 -3159.8617 -2738.0635 471.28045 11950.68
610 279.85172 -3157.4557 -2741.1975 530.72699 11950.722
620 283.40879 -3160.5911 -2739.042 455.28104 11950.775
630 292.53718 -3166.3125 -2731.1856 296.63465 11950.838
640 302.81112 -3173.3096 -2722.901 113.80844 11950.907
650 309.83321 -3179.3684 -2718.515 -26.499431 11950.978
660 312.1283 -3182.7335 -2718.4663 -89.363745 11951.049
670 311.16363 -3181.867 -2719.0347 -69.370989 11951.118
680 308.51041 -3180.6869 -2721.801 -25.972987 11951.186
690 304.64393 -3176.8751 -2723.7403 56.592367 11951.254
700 300.24456 -3175.4797 -2728.8887 112.34442 11951.323
710 296.35785 -3172.9705 -2732.1607 168.18009 11951.394
720 293.78145 -3172.1065 -2735.1289 182.81082 11951.468
730 293.25707 -3170.8715 -2734.6738 171.04236 11951.547
740 295.33219 -3172.9109 -2733.6266 91.351362 11951.629
750 299.69136 -3175.2574 -2729.4892 -16.266404 11951.713
760 305.2281 -3177.9836 -2723.9799 -137.30615 11951.796
770 310.59309 -3182.7053 -2720.7216 -272.72961 11951.877
780 314.65573 -3183.4212 -2715.3947 -341.231 11951.952
790 316.48606 -3185.44 -2714.691 -388.53602 11952.02
800 315.15897 -3186.846 -2718.0709 -384.28316 11952.08
810 310.43559 -3183.6648 -2721.9154 -282.61999 11952.133
820 303.22265 -3178.464 -2727.4433 -121.47565 11952.179
830 295.36843 -3175.4771 -2736.1389 33.066504 11952.223
840 288.69698 -3169.5813 -2740.1664 216.10697 11952.268
850 283.82649 -3165.7822 -2743.6118 359.56896 11952.317
860 280.04102 -3162.8228 -2746.283 475.61942 11952.374
870 277.10059 -3159.6212 -2747.4551 572.5432 11952.441
880 275.76549 -3158.2545 -2748.0743 616.43304 11952.52
890 276.82327 -3158.9703 -2747.2166 596.08147 11952.612
900 280.72135 -3162.0637 -2744.5119 506.33695 11952.716
910 287.1035 -3167.4388 -2740.3941 356.68688 11952.831
920 294.28041 -3171.6218 -2733.902 206.06394 11952.953
930 300.36009 -3173.9046 -2727.1418 88.047911 11953.08
940 303.86761 -3175.5599 -2723.5798 7.6846808 11953.209
950 304.42957 -3176.0831 -2723.2672 -25.15496 11953.339
960 303.13982 -3176.0534 -2725.1559 -28.715178 11953.467
970 302.30166 -3176.9758 -2727.325 -43.264668 11953.596
980 303.93331 -3178.9891 -2726.9114 -88.434034 11953.723
990 307.36223 -3180.7316 -2723.5535 -145.46208 11953.849
1000 310.09574 -3181.101 -2719.8571 -180.39125 11953.972
Loop time of 0.154208 on 4 procs for 1000 steps with 500 atoms
Performance: 560.281 ns/day, 0.043 hours/ns, 6484.730 timesteps/s
98.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.072079 | 0.074846 | 0.079666 | 1.1 | 48.54
Bond | 5.7936e-05 | 6.634e-05 | 8.1062e-05 | 0.1 | 0.04
Neigh | 0.0010812 | 0.0012064 | 0.0012748 | 0.2 | 0.78
Comm | 0.032452 | 0.037544 | 0.04076 | 1.6 | 24.35
Output | 0.0018461 | 0.0020589 | 0.0026393 | 0.7 | 1.34
Modify | 0.032085 | 0.032688 | 0.033361 | 0.3 | 21.20
Other | | 0.005799 | | | 3.76
Nlocal: 125 ave 127 max 123 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 870.5 ave 882 max 862 min
Histogram: 1 1 0 0 0 0 1 0 0 1
Neighs: 3691.25 ave 3807 max 3563 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Total # of neighbors = 14765
Ave neighs/atom = 29.53
Ave special neighs/atom = 0
Neighbor list builds = 5
Dangerous builds = 0
#write_data lj-out.data
Total wall time: 0:00:00

View File

@ -0,0 +1,173 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
orthogonal box = (1.06874 1.06874 1.06874) to (23.9313 23.9313 23.9313)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all nvt temp ${T0} ${T0} 1000.0
fix fxnvt all nvt temp 300 ${T0} 1000.0
fix fxnvt all nvt temp 300 300 1000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5 -> bins = 7 7 7
Memory usage per processor = 5.37943 Mbytes
Step Temp PotEng TotEng Press Volume
0 305.69499 -3177.6423 -2722.9442 883.58369 11950.115
10 312.30121 -3182.2257 -2717.7013 793.47811 11950.115
20 314.94553 -3186.4559 -2717.9983 738.74091 11950.115
30 312.22861 -3183.7638 -2719.3474 797.47978 11950.115
40 305.93987 -3180.7079 -2725.6455 881.30806 11950.115
50 300.42132 -3176.5828 -2729.7288 967.92042 11950.115
60 299.16487 -3174.1921 -2729.2071 1004.247 11950.115
70 301.65565 -3176.0891 -2727.3992 962.58134 11950.115
80 304.77334 -3178.2663 -2724.939 907.8946 11950.115
90 305.59929 -3178.9472 -2724.3914 879.91629 11950.115
100 303.79263 -3177.5103 -2725.6418 892.67631 11950.115
110 300.85863 -3175.4711 -2727.9667 923.44924 11950.115
120 298.69083 -3175.6415 -2731.3615 931.87518 11950.115
130 298.38415 -3176.3706 -2732.5468 928.88286 11950.115
140 299.99129 -3177.6935 -2731.4792 914.36783 11950.115
150 301.83869 -3178.121 -2729.1588 915.01407 11950.115
160 301.18834 -3177.3117 -2729.3169 947.45228 11950.115
170 296.99406 -3172.7363 -2730.9801 1042.6928 11950.115
180 291.25952 -3168.3407 -2735.1142 1144.5436 11950.115
190 287.1178 -3164.9847 -2737.9187 1223.4003 11950.115
200 286.552 -3165.2799 -2739.0555 1235.6703 11950.115
210 289.81033 -3167.8062 -2736.7353 1194.6672 11950.115
220 296.09616 -3171.7847 -2731.3641 1115.8799 11950.115
230 303.79176 -3176.2893 -2724.4221 1024.6471 11950.115
240 309.9273 -3180.9591 -2719.9657 945.55045 11950.115
250 312.0159 -3182.3307 -2718.2306 934.36956 11950.115
260 309.63264 -3181.0452 -2720.4901 986.77385 11950.115
270 304.38172 -3176.568 -2723.8233 1097.264 11950.115
280 298.64188 -3174.1384 -2729.9313 1186.2239 11950.115
290 294.76686 -3170.9562 -2732.5128 1264.247 11950.115
300 294.00805 -3169.8091 -2732.4944 1287.4001 11950.115
310 296.41801 -3172.834 -2731.9347 1229.5624 11950.115
320 301.38477 -3175.9514 -2727.6644 1140.8664 11950.115
330 307.86584 -3181.2442 -2723.3171 1007.1545 11950.115
340 314.7103 -3185.9891 -2717.8814 871.74528 11950.115
350 320.53954 -3187.8385 -2711.0602 776.85994 11950.115
360 323.49505 -3188.9927 -2707.8184 716.58062 11950.115
370 321.62077 -3187.1246 -2708.7381 731.01909 11950.115
380 314.39049 -3181.4931 -2713.8611 831.21057 11950.115
390 303.57079 -3174.1804 -2722.6419 978.62645 11950.115
400 293.42165 -3167.0452 -2730.6027 1122.3558 11950.115
410 288.46838 -3165.4071 -2736.3322 1171.8087 11950.115
420 290.30766 -3168.5988 -2736.7882 1122.5413 11950.115
430 296.34338 -3173.0824 -2732.2941 1030.2769 11950.115
440 301.92394 -3175.4307 -2726.3417 964.25387 11950.115
450 303.76745 -3176.9122 -2725.0811 934.49176 11950.115
460 301.72985 -3174.2821 -2725.4818 979.07605 11950.115
470 298.93736 -3173.0014 -2728.3548 1020.0482 11950.115
480 298.80912 -3172.803 -2728.3471 1036.6531 11950.115
490 302.72217 -3175.3764 -2725.1001 997.71146 11950.115
500 309.11393 -3179.3088 -2719.5253 925.81108 11950.115
510 314.37612 -3183.2961 -2715.6855 856.23748 11950.115
520 315.72767 -3183.547 -2713.926 847.70543 11950.115
530 313.34173 -3182.2695 -2716.1974 877.30842 11950.115
540 309.84312 -3178.9553 -2718.0871 936.69244 11950.115
550 308.3251 -3177.8582 -2719.248 963.93032 11950.115
560 308.79192 -3176.4834 -2717.1788 989.67643 11950.115
570 307.57194 -3175.8464 -2718.3565 1021.0494 11950.115
580 301.8035 -3173.1582 -2724.2483 1102.4893 11950.115
590 292.43425 -3165.751 -2730.7772 1254.7815 11950.115
600 283.62905 -3159.8987 -2738.022 1381.0608 11950.115
610 279.90122 -3157.49 -2741.1581 1431.0028 11950.115
620 283.4582 -3160.756 -2739.1334 1367.7385 11950.115
630 292.58866 -3166.3469 -2731.1435 1241.1194 11950.115
640 302.86585 -3173.4778 -2722.9878 1089.7342 11950.115
650 309.89252 -3179.4078 -2718.4662 972.6359 11950.115
660 312.19165 -3182.7754 -2718.414 916.62037 11950.115
670 311.2287 -3181.9102 -2718.9811 933.79804 11950.115
680 308.57852 -3180.7312 -2721.7441 969.24936 11950.115
690 304.71609 -3176.9196 -2723.6775 1040.2699 11950.115
700 300.31995 -3175.5245 -2728.8213 1082.845 11950.115
710 296.43537 -3173.0166 -2732.0915 1127.4487 11950.115
720 293.86692 -3172.1582 -2735.0535 1135.0215 11950.115
730 293.35611 -3170.9335 -2734.5885 1122.9143 11950.115
740 295.44861 -3172.9862 -2733.5288 1050.995 11950.115
750 299.82732 -3175.3467 -2729.3763 958.31462 11950.115
760 305.37987 -3178.216 -2723.9866 854.1946 11950.115
770 310.75394 -3182.8127 -2720.5898 737.72668 11950.115
780 314.81395 -3183.7905 -2715.5286 679.74198 11950.115
790 316.63339 -3185.8028 -2714.8346 638.48871 11950.115
800 315.2894 -3186.9345 -2717.9654 641.53256 11950.115
810 310.54289 -3183.7383 -2721.8293 728.51241 11950.115
820 303.31439 -3178.7897 -2727.6326 864.45674 11950.115
830 295.46125 -3175.5387 -2736.0625 997.72969 11950.115
840 288.802 -3169.6502 -2740.0791 1160.6622 11950.115
850 283.94785 -3165.8605 -2743.5096 1289.55 11950.115
860 280.17501 -3163.0381 -2746.299 1392.8854 11950.115
870 277.2456 -3159.8429 -2747.4611 1481.3899 11950.115
880 275.93123 -3158.3584 -2747.9316 1523.5374 11950.115
890 277.0215 -3159.2285 -2747.18 1506.1558 11950.115
900 280.96237 -3162.483 -2744.5728 1428.4183 11950.115
910 287.37962 -3167.6183 -2740.1628 1303.0268 11950.115
920 294.56731 -3171.6765 -2733.5299 1177.748 11950.115
930 300.63273 -3174.0842 -2726.9158 1078.7393 11950.115
940 304.10943 -3175.9847 -2723.645 1007.7154 11950.115
950 304.64845 -3176.6263 -2723.4848 976.37917 11950.115
960 303.36343 -3176.4694 -2725.2393 971.40749 11950.115
970 302.57138 -3177.5541 -2727.5021 954.01115 11950.115
980 304.2593 -3179.2101 -2726.6475 919.74949 11950.115
990 307.69959 -3180.9631 -2723.2833 874.9594 11950.115
1000 310.3971 -3181.9675 -2720.2753 842.81184 11950.115
Loop time of 0.279202 on 1 procs for 1000 steps with 500 atoms
Performance: 309.453 ns/day, 0.078 hours/ns, 3581.633 timesteps/s
99.1% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.24196 | 0.24196 | 0.24196 | 0.0 | 86.66
Bond | 6.628e-05 | 6.628e-05 | 6.628e-05 | 0.0 | 0.02
Neigh | 0.0043204 | 0.0043204 | 0.0043204 | 0.0 | 1.55
Comm | 0.010242 | 0.010242 | 0.010242 | 0.0 | 3.67
Output | 0.0012252 | 0.0012252 | 0.0012252 | 0.0 | 0.44
Modify | 0.017572 | 0.017572 | 0.017572 | 0.0 | 6.29
Other | | 0.003811 | | | 1.37
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1610 ave 1610 max 1610 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 14767 ave 14767 max 14767 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14767
Ave neighs/atom = 29.534
Ave special neighs/atom = 0
Neighbor list builds = 5
Dangerous builds = 0
#write_data lj-out.data
Total wall time: 0:00:00

View File

@ -0,0 +1,173 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
orthogonal box = (1.06874 1.06874 1.06874) to (23.9313 23.9313 23.9313)
1 by 2 by 2 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all nvt temp ${T0} ${T0} 1000.0
fix fxnvt all nvt temp 300 ${T0} 1000.0
fix fxnvt all nvt temp 300 300 1000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5 -> bins = 7 7 7
Memory usage per processor = 5.34276 Mbytes
Step Temp PotEng TotEng Press Volume
0 305.69499 -3177.6423 -2722.9442 883.58369 11950.115
10 312.30121 -3182.2257 -2717.7013 793.47811 11950.115
20 314.94553 -3186.4559 -2717.9983 738.74091 11950.115
30 312.22861 -3183.7638 -2719.3474 797.47978 11950.115
40 305.93987 -3180.7079 -2725.6455 881.30806 11950.115
50 300.42132 -3176.5828 -2729.7288 967.92042 11950.115
60 299.16487 -3174.1921 -2729.2071 1004.247 11950.115
70 301.65565 -3176.0891 -2727.3992 962.58134 11950.115
80 304.77334 -3178.2663 -2724.939 907.8946 11950.115
90 305.59929 -3178.9472 -2724.3914 879.91629 11950.115
100 303.79263 -3177.5103 -2725.6418 892.67631 11950.115
110 300.85863 -3175.4711 -2727.9667 923.44924 11950.115
120 298.69083 -3175.6415 -2731.3615 931.87518 11950.115
130 298.38415 -3176.3706 -2732.5468 928.88286 11950.115
140 299.99129 -3177.6935 -2731.4792 914.36783 11950.115
150 301.83869 -3178.121 -2729.1588 915.01407 11950.115
160 301.18834 -3177.3117 -2729.3169 947.45228 11950.115
170 296.99406 -3172.7363 -2730.9801 1042.6928 11950.115
180 291.25952 -3168.3407 -2735.1142 1144.5436 11950.115
190 287.1178 -3164.9847 -2737.9187 1223.4003 11950.115
200 286.552 -3165.2799 -2739.0555 1235.6703 11950.115
210 289.81033 -3167.8062 -2736.7353 1194.6672 11950.115
220 296.09616 -3171.7847 -2731.3641 1115.8799 11950.115
230 303.79176 -3176.2893 -2724.4221 1024.6471 11950.115
240 309.9273 -3180.9591 -2719.9657 945.55045 11950.115
250 312.0159 -3182.3307 -2718.2306 934.36956 11950.115
260 309.63264 -3181.0452 -2720.4901 986.77385 11950.115
270 304.38172 -3176.568 -2723.8233 1097.264 11950.115
280 298.64188 -3174.1384 -2729.9313 1186.2239 11950.115
290 294.76686 -3170.9562 -2732.5128 1264.247 11950.115
300 294.00805 -3169.8091 -2732.4944 1287.4001 11950.115
310 296.41801 -3172.834 -2731.9347 1229.5624 11950.115
320 301.38477 -3175.9514 -2727.6644 1140.8664 11950.115
330 307.86584 -3181.2442 -2723.3171 1007.1545 11950.115
340 314.7103 -3185.9891 -2717.8814 871.74528 11950.115
350 320.53954 -3187.8385 -2711.0602 776.85994 11950.115
360 323.49505 -3188.9927 -2707.8184 716.58062 11950.115
370 321.62077 -3187.1246 -2708.7381 731.01909 11950.115
380 314.39049 -3181.4931 -2713.8611 831.21057 11950.115
390 303.57079 -3174.1804 -2722.6419 978.62645 11950.115
400 293.42165 -3167.0452 -2730.6027 1122.3558 11950.115
410 288.46838 -3165.4071 -2736.3322 1171.8087 11950.115
420 290.30766 -3168.5988 -2736.7882 1122.5413 11950.115
430 296.34338 -3173.0824 -2732.2941 1030.2769 11950.115
440 301.92394 -3175.4307 -2726.3417 964.25387 11950.115
450 303.76745 -3176.9122 -2725.0811 934.49176 11950.115
460 301.72985 -3174.2821 -2725.4818 979.07605 11950.115
470 298.93736 -3173.0014 -2728.3548 1020.0482 11950.115
480 298.80912 -3172.803 -2728.3471 1036.6531 11950.115
490 302.72217 -3175.3764 -2725.1001 997.71146 11950.115
500 309.11393 -3179.3088 -2719.5253 925.81108 11950.115
510 314.37612 -3183.2961 -2715.6855 856.23748 11950.115
520 315.72767 -3183.547 -2713.926 847.70543 11950.115
530 313.34173 -3182.2695 -2716.1974 877.30842 11950.115
540 309.84312 -3178.9553 -2718.0871 936.69244 11950.115
550 308.3251 -3177.8582 -2719.248 963.93032 11950.115
560 308.79192 -3176.4834 -2717.1788 989.67643 11950.115
570 307.57194 -3175.8464 -2718.3565 1021.0494 11950.115
580 301.8035 -3173.1582 -2724.2483 1102.4893 11950.115
590 292.43425 -3165.751 -2730.7772 1254.7815 11950.115
600 283.62905 -3159.8987 -2738.022 1381.0608 11950.115
610 279.90122 -3157.49 -2741.1581 1431.0028 11950.115
620 283.4582 -3160.756 -2739.1334 1367.7385 11950.115
630 292.58866 -3166.3469 -2731.1435 1241.1194 11950.115
640 302.86585 -3173.4778 -2722.9878 1089.7342 11950.115
650 309.89252 -3179.4078 -2718.4662 972.6359 11950.115
660 312.19165 -3182.7754 -2718.414 916.62037 11950.115
670 311.2287 -3181.9102 -2718.9811 933.79804 11950.115
680 308.57852 -3180.7312 -2721.7441 969.24936 11950.115
690 304.71609 -3176.9196 -2723.6775 1040.2699 11950.115
700 300.31995 -3175.5245 -2728.8213 1082.845 11950.115
710 296.43537 -3173.0166 -2732.0915 1127.4487 11950.115
720 293.86692 -3172.1582 -2735.0535 1135.0215 11950.115
730 293.35611 -3170.9335 -2734.5885 1122.9143 11950.115
740 295.44861 -3172.9862 -2733.5288 1050.995 11950.115
750 299.82732 -3175.3467 -2729.3763 958.31462 11950.115
760 305.37987 -3178.216 -2723.9866 854.1946 11950.115
770 310.75394 -3182.8127 -2720.5898 737.72668 11950.115
780 314.81395 -3183.7905 -2715.5286 679.74198 11950.115
790 316.63339 -3185.8028 -2714.8346 638.48871 11950.115
800 315.2894 -3186.9345 -2717.9654 641.53256 11950.115
810 310.54289 -3183.7383 -2721.8293 728.51241 11950.115
820 303.31439 -3178.7897 -2727.6326 864.45674 11950.115
830 295.46125 -3175.5387 -2736.0625 997.72969 11950.115
840 288.802 -3169.6502 -2740.0791 1160.6622 11950.115
850 283.94785 -3165.8605 -2743.5096 1289.55 11950.115
860 280.17501 -3163.0381 -2746.299 1392.8854 11950.115
870 277.2456 -3159.8429 -2747.4611 1481.3899 11950.115
880 275.93123 -3158.3584 -2747.9316 1523.5374 11950.115
890 277.0215 -3159.2285 -2747.18 1506.1558 11950.115
900 280.96237 -3162.483 -2744.5728 1428.4183 11950.115
910 287.37962 -3167.6183 -2740.1628 1303.0268 11950.115
920 294.56731 -3171.6765 -2733.5299 1177.748 11950.115
930 300.63273 -3174.0842 -2726.9158 1078.7393 11950.115
940 304.10943 -3175.9847 -2723.645 1007.7154 11950.115
950 304.64845 -3176.6263 -2723.4848 976.37917 11950.115
960 303.36343 -3176.4694 -2725.2393 971.40749 11950.115
970 302.57138 -3177.5541 -2727.5021 954.01115 11950.115
980 304.2593 -3179.2101 -2726.6475 919.74949 11950.115
990 307.69959 -3180.9631 -2723.2833 874.9594 11950.115
1000 310.3971 -3181.9675 -2720.2753 842.81184 11950.115
Loop time of 0.133894 on 4 procs for 1000 steps with 500 atoms
Performance: 645.285 ns/day, 0.037 hours/ns, 7468.580 timesteps/s
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.065271 | 0.071043 | 0.07818 | 1.9 | 53.06
Bond | 5.6505e-05 | 6.5565e-05 | 7.7724e-05 | 0.1 | 0.05
Neigh | 0.0011396 | 0.0012607 | 0.0013669 | 0.2 | 0.94
Comm | 0.033866 | 0.040269 | 0.045386 | 2.6 | 30.08
Output | 0.0019252 | 0.0020776 | 0.0023642 | 0.4 | 1.55
Modify | 0.012141 | 0.013629 | 0.01486 | 0.9 | 10.18
Other | | 0.005549 | | | 4.14
Nlocal: 125 ave 127 max 123 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 871.25 ave 882 max 863 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 3691.75 ave 3808 max 3563 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Total # of neighbors = 14767
Ave neighs/atom = 29.534
Ave special neighs/atom = 0
Neighbor list builds = 5
Dangerous builds = 0
#write_data lj-out.data
Total wall time: 0:00:00

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,31 @@
# LJ particles
variable lambda world 900 910 920 930
variable rep world 0 1 2 3
#variable walker world 0 1 3 2
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
# LJ particles
log ${rep}/log.lammps.${rep}
print "This is replica: ${rep}"
read_data ${rep}/lj.data
#dump dump all xyz 1000 ${rep}/dump.xyz
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnpt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxgREM all grem ${lambda} -.03 -30000 fxnpt
thermo_modify press fxgREM_press
temper/grem 10000 100 ${lambda} fxgREM fxnpt 10294 98392 #${walker}
#write_data ${rep}/lj-out.data

View File

@ -103,14 +103,14 @@ fix integration_fix tlsph smd/integrate_tlsph
####################################################################################################
# SPECIFY TRAJECTORY OUTPUT
####################################################################################################
compute dt_atom all smd/tlsph_dt
compute p all smd/plastic_strain
compute epsdot all smd/plastic_strain_rate
compute S all smd/tlsph_stress # Cauchy stress tensor
compute D all smd/tlsph_strain_rate
compute E all smd/tlsph_strain
compute nn all smd/tlsph_num_neighs # number of neighbors for each particle
compute shape all smd/tlsph_shape
compute dt_atom all smd/tlsph/dt
compute p all smd/plastic/strain
compute epsdot all smd/plastic/strain/rate
compute S all smd/tlsph/stress # Cauchy stress tensor
compute D all smd/tlsph/strain/rate
compute E all smd/tlsph/strain
compute nn all smd/tlsph/num/neighs # number of neighbors for each particle
compute shape all smd/tlsph/shape
compute damage all smd/damage
dump dump_id all custom 100 dump.LAMMPS id type x y z &
c_S[1] c_S[2] c_S[3] c_S[4] c_S[5] c_S[6] c_S[7] c_nn c_p &

View File

@ -124,11 +124,11 @@ fix integration_fix_solids solids smd/integrate_tlsph
####################################################################################################
# SPECIFY TRAJECTORY OUTPUT
####################################################################################################
compute eint all smd/internal_energy
compute contact_radius all smd/contact_radius
compute S solids smd/tlsph_stress
compute nn water smd/ulsph_num_neighs
compute epl solids smd/plastic_strain
compute eint all smd/internal/energy
compute contact_radius all smd/contact/radius
compute S solids smd/tlsph/stress
compute nn water smd/ulsph/num/neighs
compute epl solids smd/plastic/strain
compute vol all smd/volume
compute rho all smd/rho

View File

@ -98,9 +98,9 @@ fix integration_fix all smd/integrate_ulsph adjust_radius 1.01 10 15
####################################################################################################
variable dumpFreq equal 100
compute rho all smd/rho
compute nn all smd/ulsph_num_neighs # number of neighbors for each particle
compute contact_radius all smd/contact_radius
compute surface_coords surface smd/triangle_vertices
compute nn all smd/ulsph/num/neighs # number of neighbors for each particle
compute contact_radius all smd/contact/radius
compute surface_coords surface smd/triangle/vertices
dump dump_id water custom ${dumpFreq} dump.LAMMPS id type x y z vx vy vz &
@ -116,7 +116,7 @@ dump_modify surf_dump first yes
####################################################################################################
# STATUS OUTPUT
####################################################################################################
compute eint all smd/internal_energy
compute eint all smd/internal/energy
compute alleint all reduce sum c_eint
variable etot equal pe+ke+c_alleint+f_gfix # total energy of the system
thermo 100

View File

@ -88,11 +88,11 @@ fix integration_fix tlsph smd/integrate_tlsph
# SPECIFY TRAJECTORY OUTPUT
####################################################################################################
variable dumpFreq equal 30
compute S all smd/tlsph_stress # Cauchy stress tensor
compute nn all smd/tlsph_num_neighs # number of neighbors for each particle
compute cr all smd/contact_radius
compute p all smd/plastic_strain
compute eint all smd/internal_energy
compute S all smd/tlsph/stress # Cauchy stress tensor
compute nn all smd/tlsph/num/neighs # number of neighbors for each particle
compute cr all smd/contact/radius
compute p all smd/plastic/strain
compute eint all smd/internal/energy
compute alleint all reduce sum c_eint
variable etot equal c_alleint+ke+pe

View File

@ -49,7 +49,7 @@ variable vol_one equal ${l0}^2 # volume of one particle -- assuming unit
variable skin equal ${h} # Verlet list range
neighbor ${skin} bin
set group all volume ${vol_one}
set group all smd_mass_density ${rho}
set group all smd/mass/density ${rho}
set group all diameter ${h} # set SPH kernel radius
####################################################################################################
@ -83,9 +83,9 @@ fix integration_fix tlsph smd/integrate_tlsph
####################################################################################################
# SPECIFY TRAJECTORY OUTPUT
####################################################################################################
compute S all smd/tlsph_stress # Cauchy stress tensor
compute E all smd/tlsph_strain # Green-Lagrange strain tensor
compute nn all smd/tlsph_num_neighs # number of neighbors for each particle
compute S all smd/tlsph/stress # Cauchy stress tensor
compute E all smd/tlsph/strain # Green-Lagrange strain tensor
compute nn all smd/tlsph/num/neighs # number of neighbors for each particle
dump dump_id all custom 10 dump.LAMMPS id type x y z vx vy vz &
c_S[1] c_S[2] c_S[4] c_nn &
c_E[1] c_E[2] c_E[4] &

View File

@ -0,0 +1,28 @@
# Compile LAMMPS as shared library
git clone https://github.com/lammps/lammps.git
cd lammps/src
python Make.py -m mpi -png -s ffmpeg exceptions -a file
make -j 4 mode=shlib auto
cd ../..
# Install Python package
virtualenv testing
source testing/bin/activate
(testing) cd lammps/python
(testing) python install.py
(testing) pip install jupyter matplotlib mpi4py
(testing) cd ../../examples
# Launch jupter and work inside browser
(testing) jupyter notebook
# Use Ctrl+c to stop jupyter
# finally exit the virtualenv
(testing) deactivate

View File

@ -0,0 +1,34 @@
Comment line
4 atoms
0 bonds
0 angles
1 dihedrals
0 impropers
1 atom types
0 bond types
0 angle types
1 dihedral types
0 improper types
-5.0 5.0 xlo xhi
-5.0 5.0 ylo yhi
-5.0 5.0 zlo zhi
0.0 0.0 0.0 xy xz yz
Atoms # molecular
1 1 1 -1.00000 1.00000 0.00000
2 1 1 -0.50000 0.00000 0.00000
3 1 1 0.50000 0.00000 0.00000
4 1 1 1.00000 1.00000 0.00000
Dihedral Coeffs
1 80.0 1 2
Dihedrals
1 1 1 2 3 4

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,4 @@
from mpi4py import MPI
comm=MPI.COMM_WORLD
print("Hello from rank %d of %d" % (comm.rank, comm.size))

View File

@ -0,0 +1,33 @@
# 3d Lennard-Jones melt
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 10 0 10 0 10
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 3.0 87287
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify every 20 delay 0 check no
fix 1 all nve
#dump id all atom 50 dump.melt
#dump 2 all image 25 image.*.jpg type type &
# axes yes 0.8 0.02 view 60 -30
#dump_modify 2 pad 3
#dump 3 all movie 25 movie.mpg type type &
# axes yes 0.8 0.02 view 60 -30
#dump_modify 3 pad 3
thermo 50
run 250

View File

@ -0,0 +1,10 @@
from mpi4py import MPI
from lammps import PyLammps
L = PyLammps()
L.file('in.melt')
if MPI.COMM_WORLD.rank == 0:
pe = L.eval("pe")
print("Potential Energy:", pe)

10
src/.gitignore vendored
View File

@ -18,6 +18,8 @@
/*_tally.cpp
/*_rx.h
/*_rx.cpp
/*_ssa.h
/*_ssa.cpp
/kokkos.cpp
/kokkos.h
@ -205,6 +207,8 @@
/compute_pe_tally.h
/compute_plasticity_atom.cpp
/compute_plasticity_atom.h
/compute_pressure_grem.cpp
/compute_pressure_grem.h
/compute_rigid_local.cpp
/compute_rigid_local.h
/compute_spec_atom.cpp
@ -343,6 +347,8 @@
/fix_gle.h
/fix_gpu.cpp
/fix_gpu.h
/fix_grem.cpp
/fix_grem.h
/fix_imd.cpp
/fix_imd.h
/fix_ipi.cpp
@ -775,6 +781,8 @@
/pair_tersoff.h
/pair_tersoff_mod.cpp
/pair_tersoff_mod.h
/pair_tersoff_mod_c.cpp
/pair_tersoff_mod_c.h
/pair_tersoff_table.cpp
/pair_tersoff_table.h
/pair_tersoff_zbl.cpp
@ -873,6 +881,8 @@
/tad.h
/temper.cpp
/temper.h
/temper_grem.cpp
/temper_grem.h
/thr_data.cpp
/thr_data.h
/verlet_split.cpp

View File

@ -105,11 +105,16 @@ action modify_kokkos.cpp
action modify_kokkos.h
action neigh_bond_kokkos.cpp
action neigh_bond_kokkos.h
action neigh_full_kokkos.h
action neigh_list_kokkos.cpp
action neigh_list_kokkos.h
action neighbor_kokkos.cpp
action neighbor_kokkos.h
action npair_copy_kokkos.cpp
action npair_copy_kokkos.h
action npair_kokkos.cpp
action npair_kokkos.h
action nbin_kokkos.cpp
action nbin_kokkos.h
action math_special_kokkos.cpp
action math_special_kokkos.h
action pair_buck_coul_cut_kokkos.cpp

View File

@ -125,12 +125,10 @@ void FixQEqReaxKokkos<DeviceType>::init()
neighbor->requests[irequest]->pair = 0;
neighbor->requests[irequest]->full = 1;
neighbor->requests[irequest]->half = 0;
neighbor->requests[irequest]->full_cluster = 0;
} else { //if (neighflag == HALF || neighflag == HALFTHREAD)
neighbor->requests[irequest]->fix = 1;
neighbor->requests[irequest]->full = 0;
neighbor->requests[irequest]->half = 1;
neighbor->requests[irequest]->full_cluster = 0;
neighbor->requests[irequest]->ghost = 1;
}
}

View File

@ -168,7 +168,6 @@ void KokkosLMP::accelerator(int narg, char **arg)
else
neighflag = HALF;
} else if (strcmp(arg[iarg+1],"n2") == 0) neighflag = N2;
else if (strcmp(arg[iarg+1],"full/cluster") == 0) neighflag = FULLCLUSTER;
else error->all(FLERR,"Illegal package kokkos command");
iarg += 2;
} else if (strcmp(arg[iarg],"binsize") == 0) {
@ -232,20 +231,6 @@ void KokkosLMP::accelerator(int narg, char **arg)
called by Finish
------------------------------------------------------------------------- */
int KokkosLMP::neigh_list_kokkos(int m)
{
NeighborKokkos *nk = (NeighborKokkos *) neighbor;
if (nk->lists_host[m] && nk->lists_host[m]->d_numneigh.dimension_0())
return 1;
if (nk->lists_device[m] && nk->lists_device[m]->d_numneigh.dimension_0())
return 1;
return 0;
}
/* ----------------------------------------------------------------------
called by Finish
------------------------------------------------------------------------- */
int KokkosLMP::neigh_count(int m)
{
int inum;
@ -255,28 +240,30 @@ int KokkosLMP::neigh_count(int m)
ArrayTypes<LMPHostType>::t_int_1d h_numneigh;
NeighborKokkos *nk = (NeighborKokkos *) neighbor;
if (nk->lists_host[m]) {
inum = nk->lists_host[m]->inum;
if (nk->lists[m]->execution_space == Host) {
NeighListKokkos<LMPHostType>* nlistKK = (NeighListKokkos<LMPHostType>*) nk->lists[m];
inum = nlistKK->inum;
#ifndef KOKKOS_USE_CUDA_UVM
h_ilist = Kokkos::create_mirror_view(nk->lists_host[m]->d_ilist);
h_numneigh = Kokkos::create_mirror_view(nk->lists_host[m]->d_numneigh);
h_ilist = Kokkos::create_mirror_view(nlistKK->d_ilist);
h_numneigh = Kokkos::create_mirror_view(nlistKK->d_numneigh);
#else
h_ilist = nk->lists_host[m]->d_ilist;
h_numneigh = nk->lists_host[m]->d_numneigh;
h_ilist = nlistKK->d_ilist;
h_numneigh = nlistKK->d_numneigh;
#endif
Kokkos::deep_copy(h_ilist,nk->lists_host[m]->d_ilist);
Kokkos::deep_copy(h_numneigh,nk->lists_host[m]->d_numneigh);
} else if (nk->lists_device[m]) {
inum = nk->lists_device[m]->inum;
Kokkos::deep_copy(h_ilist,nlistKK->d_ilist);
Kokkos::deep_copy(h_numneigh,nlistKK->d_numneigh);
} else if (nk->lists[m]->execution_space == Device) {
NeighListKokkos<LMPDeviceType>* nlistKK = (NeighListKokkos<LMPDeviceType>*) nk->lists[m];
inum = nlistKK->inum;
#ifndef KOKKOS_USE_CUDA_UVM
h_ilist = Kokkos::create_mirror_view(nk->lists_device[m]->d_ilist);
h_numneigh = Kokkos::create_mirror_view(nk->lists_device[m]->d_numneigh);
h_ilist = Kokkos::create_mirror_view(nlistKK->d_ilist);
h_numneigh = Kokkos::create_mirror_view(nlistKK->d_numneigh);
#else
h_ilist = nk->lists_device[m]->d_ilist;
h_numneigh = nk->lists_device[m]->d_numneigh;
h_ilist = nlistKK->d_ilist;
h_numneigh = nlistKK->d_numneigh;
#endif
Kokkos::deep_copy(h_ilist,nk->lists_device[m]->d_ilist);
Kokkos::deep_copy(h_numneigh,nk->lists_device[m]->d_numneigh);
Kokkos::deep_copy(h_ilist,nlistKK->d_ilist);
Kokkos::deep_copy(h_numneigh,nlistKK->d_numneigh);
}
for (int i = 0; i < inum; i++) nneigh += h_numneigh[h_ilist[i]];

View File

@ -34,7 +34,6 @@ class KokkosLMP : protected Pointers {
KokkosLMP(class LAMMPS *, int, char **);
~KokkosLMP();
void accelerator(int, char **);
int neigh_list_kokkos(int);
int neigh_count(int);
private:
static void my_signal_handler(int);

Some files were not shown because too many files have changed in this diff Show More