Compare commits

...

147 Commits

Author SHA1 Message Date
59d100ab57 final prep for 22Nov patch 2016-11-22 09:23:02 -07:00
61e71d23ed Merge pull request #288 from akohlmey/moltemplate-1.40
update bundled version of moltemplate to v1.40
2016-11-22 08:51:11 -07:00
b6f2f0e6e9 Merge pull request #287 from rbberger/pylammps/docs
Created PyLammps documentation
2016-11-22 08:50:29 -07:00
ff0441ac16 Merge pull request #286 from akohlmey/small-fixes-and-updates
Collected small fixes and updates
2016-11-22 08:49:46 -07:00
41907d3110 Merge pull request #285 from akohlmey/fix-ipi-update
update for fix ipi from michele ceriotti
2016-11-22 08:48:27 -07:00
b95f255af4 small changes to temper/grem commands 2016-11-22 08:47:44 -07:00
d7b542101a Merge pull request #283 from akohlmey/grem-feature
gREM generalized replica exchange feature for USER-MISC
2016-11-22 08:15:35 -07:00
0ffa50f8e8 tweaked author syntax 2016-11-22 08:15:13 -07:00
7893215964 small comment/whitespace tweak 2016-11-21 12:46:43 -05:00
3dff9f2018 removed extra file 2016-11-21 12:05:30 -05:00
dab232c542 modified temper_grem name to fit conventions, re-ran example to match 2016-11-21 12:02:17 -05:00
9e9d9d5aa5 update bundled version of moltemplate to v1.40 2016-11-21 11:34:42 -05:00
c982b174a2 Merge pull request #49 from epfl-cosmo/fix-ipi
i-PI interface fix
2016-11-19 19:36:13 -05:00
87a5a35bad A tiny bugfix for the reset flag, and a brief explanation of the changes 2016-11-20 00:44:23 +01:00
fd174ce2b1 Merge branch 'fix-ipi-update' of https://github.com/akohlmey/lammps into fix-ipi 2016-11-20 00:04:56 +01:00
b11f376a4f Merge branch 'master' of github.com:lammps/lammps 2016-11-19 23:25:51 +01:00
230b29eae6 correct accelerator flags for dpd styles in pair style overview 2016-11-19 11:47:12 -05:00
2383c31f15 Created PyLammps documentation
Based on material presented during MD Workshop at Temple University in
August 2016.
2016-11-18 23:58:57 -07:00
e175a18bdb be more thorough in initializing optional data in pair style dpd/fdt/energy 2016-11-18 16:18:47 -05:00
a5bde82e37 update .gitignore for recent addition 2016-11-18 15:38:11 -05:00
d787afcca9 also remove generated html files with 'make clean' in docs folder 2016-11-18 15:37:49 -05:00
176cde8ed3 minor cleanups 2016-11-18 15:36:38 -05:00
2862c20815 Merge branch 'master' into grem-feature 2016-11-18 14:51:46 -05:00
78e018829f Merge branch 'grem-feature' of https://github.com/dstelter92/lammps into grem-feature 2016-11-18 14:48:47 -05:00
c78914e7b3 update for fix ipi from michele ceriotti 2016-11-18 09:21:50 -05:00
635f3ce128 synchronize USER-SMD examples with code 2016-11-18 08:09:24 -05:00
81f68e06fd Merge branch 'master' into doc-updates 2016-11-17 20:44:07 -05:00
4b51719e67 new 17Nov16 patch and stable 2016-11-17 16:51:35 -07:00
25d7be5f3d compute pressure doc change 2016-11-17 16:11:30 -07:00
2a026c9ad8 revised temper_grem example, better file management 2016-11-17 12:53:25 -05:00
4a3091f844 modified temper_grem example with more exchanges 2016-11-17 11:24:29 -05:00
74c0e4dd5c Merge pull request #278 from akohlmey/pair-agni
Implementation of the AGNI manybody potential
2016-11-17 09:04:31 -07:00
073e8a0524 Merge pull request #276 from akohlmey/doc-updates
Small bugfixes and updates
2016-11-17 09:02:27 -07:00
5320bbf585 Merge pull request #275 from andeplane/IP_VORONOI
Initializing pointers in VORONOI
2016-11-17 09:01:46 -07:00
4448819824 Merge pull request #274 from andeplane/IP_POEMS
Initialize pointers in POEMS
2016-11-17 09:01:37 -07:00
300ac30332 Merge pull request #273 from akohlmey/auto-memalign
turn on -DLAMMPS_MEMALIGN=64 automatically when USER-INTEL is installed
2016-11-17 09:01:27 -07:00
2535e44991 Merge pull request #271 from akohlmey/tersoff-modc
pair style tersoff/mod/c
2016-11-17 09:00:52 -07:00
747c95c525 revised documentation, added temper_grem ref to fix_grem 2016-11-17 11:00:49 -05:00
cdae794383 Merge pull request #242 from andeplane/vashishta_kokkos
Added KOKKOS vashishta
2016-11-17 09:00:03 -07:00
8756a1017d Kokkos updates by Stan 2016-11-17 08:58:22 -07:00
5c64934bc8 added documention, re-ran temper_grem example 2016-11-17 10:40:10 -05:00
4e62e58d29 Merge pull request #47 from dstelter92/grem-feature
added internal tempering in grem with example
2016-11-17 10:04:43 -05:00
5ac2d9532e Re-run example with debug off 2016-11-17 09:43:44 -05:00
19ac9d2959 turned off dev mode by default in temper_grem 2016-11-17 09:31:07 -05:00
9f313aac75 shorter example 2016-11-16 20:43:41 -05:00
0102c5dadc file cleanup 2016-11-16 20:38:53 -05:00
07e46b797a added internal tempering in grem with example 2016-11-16 20:27:14 -05:00
b45d1e37ef integrate fix grem docs and update to match current conventions 2016-11-16 16:46:00 -05:00
2e7fd513d4 provide fix grem example input for nvt and npt 2016-11-16 16:42:01 -05:00
82364d10e3 Merge branch 'grem-feature' of https://github.com/dstelter92/lammps into grem-feature
Resolved merge conflicts and adapted logic to most recent changes in feature branch

Closes #46
2016-11-16 16:11:53 -05:00
16c8a307e5 removed leftover tex files 2016-11-16 15:39:02 -05:00
94f14ab051 spell check, minor typos 2016-11-16 15:34:32 -05:00
22d93fe8fb add restrict to CCFLAGS for makefiles intended for intel compilers 2016-11-16 14:31:10 -05:00
683f514fac simplify multi-replica run by passing per-replica parameters as variables on the command line 2016-11-16 14:22:20 -05:00
f617993944 need to apply fix_modify already in fix grem constructor 2016-11-16 13:52:27 -05:00
4641c9e568 Added basic documentation for grem fix 2016-11-16 13:36:13 -05:00
705f66aaee remove superfluous code 2016-11-16 13:24:41 -05:00
e57ae1ce3f compute scaled kinetic energy tensor without destroying the original data 2016-11-16 12:45:13 -05:00
950442b8b1 added check for nvt vs npt, enabled nvt simulation with fix_grem 2016-11-15 21:53:28 -05:00
1c68e42ecc fix_modify is not longer needed 2016-11-14 13:43:28 -05:00
5f94b31806 add multi-replica example for gREM 2016-11-14 10:12:48 -05:00
fdf5d68f9f allow to extract properties in NH integrator only when they are active 2016-11-14 09:27:33 -05:00
0c25f3b1d6 whitespace cleanup 2016-11-13 23:20:09 -05:00
14c7cf4197 retrieve target temperature and pressure from fix npt. add sanity checks. 2016-11-13 23:18:59 -05:00
26870f223d add example for gREM 2016-11-13 23:18:14 -05:00
09544d0698 bugfix for compute pressure/grem: must make a copy of argument strings 2016-11-13 19:19:52 -05:00
b5130a3b35 avoid NaN for variance from average output 2016-11-13 18:46:55 -05:00
20daf82463 initial import of adapted gREM code by David Stelter and Edyta Malolepsza
The following changes were made:
- the modifications to compute pressure were transferred to a derived class compute pressure/grem
- fix scaleforce was renamed to fix grem
- identifying the grem fix was simplified as fix grem passes an additional argument to compute pressure/grem
- dead code was removed in both files
- checking of arguments was tightened
2016-11-13 18:44:10 -05:00
57124b9b25 update documentation metadata files for recent changes 2016-11-12 09:18:21 -05:00
03b3834fe3 add documentation for pair style agni 2016-11-12 09:07:42 -05:00
d0124eac95 optimized data access and using approximate exponential for USER-OMP version 2016-11-12 08:36:27 -05:00
5685131fe2 add USER-OMP version of pair style agni 2016-11-11 19:08:01 -05:00
22fc92f9d8 use special ev_tally() function suitable for this kind of force compute 2016-11-11 18:32:55 -05:00
b9770766a8 add adatom and vacancy examples for AGNI pair style 2016-11-11 18:19:29 -05:00
9cc0c8badd error exit when requested element is not in potential file 2016-11-11 18:10:57 -05:00
6e1492a86c add potential file for pair style AGNI 2016-11-11 18:10:26 -05:00
9b0987d8c4 first complete implementation of AGNI pair style 2016-11-11 17:32:47 -05:00
e453adaf81 implemented parser for 1 element potential files 2016-11-11 15:53:37 -05:00
8e0fd88697 add example demonstrating the use of fix addtorque 2016-11-11 10:52:50 -05:00
fdcabd7d1d fix addtorque is compatible with dynamic groups 2016-11-11 09:01:18 -05:00
c5c8c50e97 initialize 'nper' 2016-11-11 07:47:42 -05:00
72b0841b28 Merge branch 'doc-updates' of github.com:akohlmey/lammps into doc-updates 2016-11-11 07:44:44 -05:00
801111a7ab dummy framework implementation for AGNI pair style 2016-11-10 15:00:36 -05:00
bfc478c320 simpler variant of the segfault workaround, that does not offend Clang c++. 2016-11-10 14:12:02 -05:00
2b75ee761d avoid segmentation fault, when creating a LAMMPS instance from the library interface. arg[] may be NULL. 2016-11-10 13:28:32 -05:00
352e177fcd Merge branch 'master' into small-fixes-and-updates 2016-11-10 13:26:00 -05:00
c20ee34c7b Initializing pointers in VORONOI 2016-11-10 09:30:07 +01:00
95a7f7160e Initialize pointers in POEMS 2016-11-10 09:26:52 +01:00
1f38e1a771 Merge branch 'master' into doc-updates 2016-11-09 18:18:05 -05:00
9806da69f3 Stan bug fixes for fix reaxc/bonds/kk 2016-11-09 15:47:50 -07:00
fec87c070d simplify compiling USER-INTEL package, by defaulting to -DLAMMPS_MEMALIGN=64 in case it is not set 2016-11-09 16:53:39 -05:00
3d3a99c082 added missing potential for tersoff/mod/c 2016-11-09 16:50:34 -05:00
3e36ec3754 remove unused class member 2016-11-09 16:17:46 -05:00
9ed5c4f0fa Merge branch 'master' into kokkos-vashishta 2016-11-09 15:15:10 -05:00
c55fd502e0 correct typo in formula 2016-11-09 15:04:24 -05:00
71ee2ecaa1 integrate pair style tersoff/mod/c contributed by Ganga P Purja Pun (GMU)
This includes docs, added testing and inclusion of USER-OMP support.
2016-11-09 14:52:39 -05:00
bfea3dce7d Merge pull request #268 from arielzn/born_dsf
pair styles born/coul/dsf and born/coul/dsf/cs added
2016-11-09 11:57:09 -07:00
eef862ee1c Merge pull request #267 from akohlmey/pager-help
use pager for help message, if connected to stdout
2016-11-09 11:53:43 -07:00
0cc2fbf1d6 Merge pull request #266 from andeplane/IP_USER_OMP
Initializing pointers in USER-OMP
2016-11-09 11:52:05 -07:00
ae00666994 Merge pull request #265 from andeplane/IP_DIFF_DPD
Initializing pointers in USER-DIFFRACTION and USER-DPD
2016-11-09 11:51:53 -07:00
51b3b5fb35 Merge pull request #264 from andeplane/IP_SNAP_SRD
Initialize pointers in SNAP and SRD
2016-11-09 11:51:44 -07:00
176f2c3aa1 Merge pull request #263 from andeplane/IP_RIGID_SHOCK
Initialize pointers in RIGID and SHOCK
2016-11-09 11:51:36 -07:00
3f71bfb185 Merge pull request #262 from andeplane/IP_PERI_QEQ_REPLICA
Initializing pointers in PERI, QEQ and REPLICA
2016-11-09 11:51:25 -07:00
cf3ab51679 Merge pull request #261 from andeplane/IP_MISC_MOLECULE
Initialized pointers in MISC and MOLECULE
2016-11-09 11:51:09 -07:00
59922f894b Merge pull request #260 from andeplane/IP_MANYBODY_MC
Initialize pointers in MANYBODY and MC
2016-11-09 11:51:01 -07:00
5e2b9d8bf3 Merge pull request #259 from andeplane/IP_KSPACE
Initialize pointers in KSPACE
2016-11-09 11:50:50 -07:00
2d132cad6b Merge pull request #258 from andeplane/IP_GRANULAR
Initialize pointers in GRANULAR
2016-11-09 11:50:41 -07:00
ef6801f8bf Merge pull request #257 from andeplane/IP_CORESHELL
Initialize pointers in CORESHELL
2016-11-09 11:50:31 -07:00
c81a723642 Merge pull request #256 from andeplane/IP_BODY
Initialized pointers in BODY
2016-11-09 11:50:23 -07:00
f9eb2a99ce Merge pull request #255 from andeplane/IP_ASPHERE
Initialize pointers in ASPHERE
2016-11-09 11:50:11 -07:00
16a02ef27d Merge pull request #254 from andeplane/IP_root
Initialized pointers in src folder
2016-11-09 11:47:21 -07:00
2c801320c2 fixed links in Section_intro.txt 2016-11-09 11:46:10 -07:00
d20b32092e Building correct shortlists and removed rsq test in force loops 2016-11-08 18:57:27 +01:00
9de1a2a08f added input using born/coul/dsf/cs to examples/coreshell 2016-11-08 18:27:44 +01:00
cdb5d47e9f add FLERR argument to force->bounds() in born/coul/dsf 2016-11-08 18:24:54 +01:00
a23b287a7a sync with SVN for creation of stable release 2016-11-08 09:05:50 -07:00
31204aab6a sync with SVN 2016-11-08 08:57:51 -07:00
25e7d074cf documentation added for born/coul/dsf and born/coul/dsf/cs styles 2016-11-08 16:51:54 +01:00
667f4dfe28 pair style born/coul/dsf added with its coreshell version 2016-11-08 11:32:38 +01:00
21694ca3a8 improve help and it through a pager, when screen == stdout 2016-11-07 17:10:12 -05:00
9b910d5511 make name of the actual executable (i.e. arg[0]) accessible 2016-11-07 17:07:40 -05:00
054ab6bff3 Initializing pointers in USER-OMP 2016-11-07 21:07:10 +01:00
616420cda8 Initializing pointers in USER-DIFFRACTION and USER-DPD 2016-11-07 20:51:36 +01:00
fb3ac9afba Initialize pointers in SNAP and SRD 2016-11-07 20:30:38 +01:00
7cd7cda2d4 Initialize pointers in RIGID and SHOCK 2016-11-07 20:22:04 +01:00
db0524278a Initializing pointers in PERI, QEQ and REPLICA 2016-11-07 19:58:27 +01:00
1ff75eaba2 Initialized pointers in MISC and MOLECULE 2016-11-07 17:15:48 +01:00
30dede867a Initialize pointers in MANYBODY and MC 2016-11-07 17:02:32 +01:00
a5c6104d64 Initialize pointers in KSPACE 2016-11-07 16:54:59 +01:00
c5869bdee2 Initialize pointers in GRANULAR 2016-11-07 16:33:50 +01:00
e7a2c6b5d1 Initialize pointers in CORESHELL 2016-11-07 16:26:16 +01:00
06959a9c59 Initialized pointers in BODY 2016-11-07 16:21:08 +01:00
cd65d44d95 Initialize pointers in ASPHERE 2016-11-07 16:15:39 +01:00
45f2e86dd6 NULLed ptrs in files 2016-11-07 16:07:37 +01:00
f8226e8ae5 NULL ptrs in dump_custom and dump_image 2016-11-07 15:56:47 +01:00
b221b15d24 NULLing ptrs in comm_brick and dump*.cpp 2016-11-07 15:50:18 +01:00
3a3d96b877 info styles also prints out pair styles 2016-11-04 18:18:40 -04:00
f333d659c2 Using short neighborlists in neigh full 2016-10-29 22:54:43 +02:00
51e2313fac Using short neighbor lists 2016-10-29 22:35:29 +02:00
e37d2b5c94 Calculating short neighbor lists 2016-10-29 22:20:37 +02:00
3870780894 Merge branch 'master' into kokkos-vashishta 2016-10-28 10:43:05 -04:00
21619f6a2f Recommitting reverted change
git-svn-id: svn://svn.icms.temple.edu/lammps-ro/trunk@15794 f3b2605a-c512-4ea7-a41b-209d697bcdaa
(cherry picked from commit c0b98f5299)
2016-10-25 15:25:39 -04:00
039bda9b61 Added updated vashishta for KOKKOS support
(cherry picked from commit 96089a42547f625e70aa2ac3933d248d2731b731)
2016-10-25 15:07:10 -04:00
6929603eef Added KOKKOS vashishta
(cherry picked from commit 5edc474bf0be574ddba96d00bb63894edf400ddb)
2016-10-25 15:07:10 -04:00
0229556b03 Merge branch 'master' of github.com:lammps/lammps 2015-07-03 15:43:29 +02:00
357d4517e8 Merge branch 'master' of github.com:lammps/lammps 2015-04-08 10:46:50 +02:00
a4a97de84f A few GLE fixes 2015-04-08 10:45:49 +02:00
570 changed files with 40813 additions and 1574 deletions

View File

@ -43,7 +43,7 @@ clean-all:
rm -rf $(BUILDDIR)/* utils/txt2html/txt2html.exe
clean:
rm -rf $(RSTDIR)
rm -rf $(RSTDIR) html
html: $(OBJECTS)
@(\

BIN
doc/src/Eqs/fix_grem.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.1 KiB

9
doc/src/Eqs/fix_grem.tex Normal file
View File

@ -0,0 +1,9 @@
\documentclass[12pt]{article}
\begin{document}
$$
T_{eff} = \lambda + \eta (H - H_0)
$$
\end{document}

BIN
doc/src/Eqs/pair_agni.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

View File

@ -0,0 +1,10 @@
\documentclass[12pt]{article}
\pagestyle{empty}
\begin{document}
\begin{eqnarray*}
V_{ij} & = & f_C(r_{ij}) \left[ f_R(r_{ij}) + b_{ij} f_A(r_{ij}) + c_0 \right]
\end{eqnarray*}
\end{document}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.0 KiB

After

Width:  |  Height:  |  Size: 4.2 KiB

View File

@ -3,7 +3,7 @@
\begin{document}
$$
P = \frac{N k_B T}{V} + \frac{\sum_{i}^{N} r_i \bullet f_i}{dV}
P = \frac{N k_B T}{V} + \frac{\sum_{i}^{N'} r_i \bullet f_i}{dV}
$$
\end{document}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.9 KiB

After

Width:  |  Height:  |  Size: 5.3 KiB

View File

@ -4,7 +4,7 @@
$$
P_{IJ} = \frac{\sum_{k}^{N} m_k v_{k_I} v_{k_J}}{V} +
\frac{\sum_{k}^{N} r_{k_I} f_{k_J}}{V}
\frac{\sum_{k}^{N'} r_{k_I} f_{k_J}}{V}
$$
\end{document}

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

View File

@ -1,7 +1,7 @@
<!-- HTML_ONLY -->
<HEAD>
<TITLE>LAMMPS Users Manual</TITLE>
<META NAME="docnumber" CONTENT="27 Oct 2016 version">
<META NAME="docnumber" CONTENT="22 Nov 2016 version">
<META NAME="author" CONTENT="http://lammps.sandia.gov - Sandia National Laboratories">
<META NAME="copyright" CONTENT="Copyright (2003) Sandia Corporation. This software and manual is distributed under the GNU General Public License.">
</HEAD>
@ -21,7 +21,7 @@
<H1></H1>
LAMMPS Documentation :c,h3
27 Oct 2016 version :c,h4
22 Nov 2016 version :c,h4
Version info: :h4

View File

@ -531,7 +531,8 @@ package"_Section_start.html#start_3.
"dump nc"_dump_nc.html,
"dump nc/mpiio"_dump_nc.html,
"group2ndx"_group2ndx.html,
"ndx2group"_group2ndx.html :tb(c=3,ea=c)
"ndx2group"_group2ndx.html,
"temper/grem"_temper_grem.html :tb(c=3,ea=c)
:line
@ -631,7 +632,7 @@ USER-INTEL, k = KOKKOS, o = USER-OMP, t = OPT.
"rigid/npt (o)"_fix_rigid.html,
"rigid/nve (o)"_fix_rigid.html,
"rigid/nvt (o)"_fix_rigid.html,
"rigid/small (o)"_fix_rigid.html,
<"rigid/small (o)"_fix_rigid.html,
"rigid/small/nph"_fix_rigid.html,
"rigid/small/npt"_fix_rigid.html,
"rigid/small/nve"_fix_rigid.html,
@ -687,6 +688,7 @@ package"_Section_start.html#start_3.
"eos/table/rx"_fix_eos_table_rx.html,
"flow/gauss"_fix_flow_gauss.html,
"gle"_fix_gle.html,
"grem"_fix_grem.html,
"imd"_fix_imd.html,
"ipi"_fix_ipi.html,
"langevin/drude"_fix_langevin_drude.html,
@ -886,6 +888,8 @@ KOKKOS, o = USER-OMP, t = OPT.
"body"_pair_body.html,
"bop"_pair_bop.html,
"born (go)"_pair_born.html,
"born/coul/dsf"_pair_born.html,
"born/coul/dsf/cs"_pair_born.html,
"born/coul/long (go)"_pair_born.html,
"born/coul/long/cs"_pair_born.html,
"born/coul/msm (o)"_pair_born.html,
@ -909,8 +913,8 @@ KOKKOS, o = USER-OMP, t = OPT.
"coul/msm"_pair_coul.html,
"coul/streitz"_pair_coul.html,
"coul/wolf (ko)"_pair_coul.html,
"dpd (o)"_pair_dpd.html,
"dpd/tstat (o)"_pair_dpd.html,
"dpd (go)"_pair_dpd.html,
"dpd/tstat (go)"_pair_dpd.html,
"dsmc"_pair_dsmc.html,
"eam (gkot)"_pair_eam.html,
"eam/alloy (gkot)"_pair_eam.html,
@ -979,11 +983,12 @@ KOKKOS, o = USER-OMP, t = OPT.
"table (gko)"_pair_table.html,
"tersoff (gkio)"_pair_tersoff.html,
"tersoff/mod (gko)"_pair_tersoff_mod.html,
"tersoff/mod/c (o)"_pair_tersoff_mod.html,
"tersoff/zbl (gko)"_pair_tersoff_zbl.html,
"tip4p/cut (o)"_pair_coul.html,
"tip4p/long (o)"_pair_coul.html,
"tri/lj"_pair_tri_lj.html,
"vashishta (o)"_pair_vashishta.html,
"vashishta (ko)"_pair_vashishta.html,
"vashishta/table (o)"_pair_vashishta.html,
"yukawa (go)"_pair_yukawa.html,
"yukawa/colloid (go)"_pair_yukawa_colloid.html,
@ -993,6 +998,7 @@ These are additional pair styles in USER packages, which can be used
if "LAMMPS is built with the appropriate
package"_Section_start.html#start_3.
"agni (o)"_pair_agni.html,
"awpmd/cut"_pair_awpmd.html,
"buck/mdf"_pair_mdf.html,
"coul/cut/soft (o)"_pair_lj_soft.html,

View File

@ -366,11 +366,11 @@ complementary modeling tasks.
"DL_POLY"_dlpoly
"Tinker"_tinker :ul
:link(charmm,http://www.scripps.edu/brooks)
:link(amber,http://amber.scripps.edu)
:link(charmm,http://www.charmm.org)
:link(amber,http://ambermd.org)
:link(namd,http://www.ks.uiuc.edu/Research/namd/)
:link(nwchem,http://www.emsl.pnl.gov/docs/nwchem/nwchem.html)
:link(dlpoly,http://www.cse.clrc.ac.uk/msi/software/DL_POLY)
:link(dlpoly,http://www.ccp5.ac.uk/DL_POLY_CLASSIC)
:link(tinker,http://dasher.wustl.edu/tinker)
CHARMM, AMBER, NAMD, NWCHEM, and Tinker are designed primarily for

View File

@ -8,19 +8,26 @@
11. Python interface to LAMMPS :h3
LAMMPS can work together with Python in two ways. First, Python can
LAMMPS can work together with Python in three ways. First, Python can
wrap LAMMPS through the "LAMMPS library
interface"_Section_howto.html#howto_19, so that a Python script can
create one or more instances of LAMMPS and launch one or more
simulations. In Python lingo, this is "extending" Python with LAMMPS.
Second, LAMMPS can use the Python interpreter, so that a LAMMPS input
Second, the low-level Python interface can be used indirectly through the
PyLammps and IPyLammps wrapper classes in Python. These wrappers try to
simplify the usage of LAMMPS in Python by providing an object-based interface
to common LAMMPS functionality. It also reduces the amount of code necessary to
parameterize LAMMPS scripts through Python and makes variables and computes
directly accessible. See "PyLammps interface"_#py_9 for more details.
Third, LAMMPS can use the Python interpreter, so that a LAMMPS input
script can invoke Python code, and pass information back-and-forth
between the input script and Python functions you write. The Python
code can also callback to LAMMPS to query or change its attributes.
In Python lingo, this is "embedding" Python in LAMMPS.
This section describes how to do both.
This section describes how to use these three approaches.
11.1 "Overview of running LAMMPS from Python"_#py_1
11.2 "Overview of using Python from a LAMMPS script"_#py_2
@ -29,7 +36,8 @@ This section describes how to do both.
11.5 "Extending Python with MPI to run in parallel"_#py_5
11.6 "Testing the Python-LAMMPS interface"_#py_6
11.7 "Using LAMMPS from Python"_#py_7
11.8 "Example Python scripts that use LAMMPS"_#py_8 :ul
11.8 "Example Python scripts that use LAMMPS"_#py_8
11.9 "PyLammps interface"_#py_9 :ul
If you are not familiar with it, "Python"_http://www.python.org is a
powerful scripting and programming language which can essentially do
@ -824,3 +832,7 @@ different visualization package options. Click to see larger images:
:image(JPG/screenshot_atomeye_small.jpg,JPG/screenshot_atomeye.jpg)
:image(JPG/screenshot_pymol_small.jpg,JPG/screenshot_pymol.jpg)
:image(JPG/screenshot_vmd_small.jpg,JPG/screenshot_vmd.jpg)
11.9 PyLammps interface :link(py_9),h4
Please see the "PyLammps Tutorial"_tutorial_pylammps.html.

View File

@ -37,12 +37,18 @@ The pressure is computed by the formula
where N is the number of atoms in the system (see discussion of DOF
below), Kb is the Boltzmann constant, T is the temperature, d is the
dimensionality of the system (2 or 3 for 2d/3d), V is the system
volume (or area in 2d), and the second term is the virial, computed
within LAMMPS for all pairwise as well as 2-body, 3-body, and 4-body,
and long-range interactions. "Fixes"_fix.html that impose constraints
(e.g. the "fix shake"_fix_shake.html command) also contribute to the
virial term.
dimensionality of the system (2 or 3 for 2d/3d), and V is the system
volume (or area in 2d). The second term is the virial, equal to
-dU/dV, computed for all pairwise as well as 2-body, 3-body, 4-body,
manybody, and long-range interactions, where r_i and f_i are the
position and force vector of atom i, and the black dot indicates a dot
product. When periodic boundary conditions are used, N' necessarily
includes periodic image (ghost) atoms outside the central box, and the
position and force vectors of ghost atoms are thus included in the
summation. When periodic boundary conditions are not used, N' = N =
the number of atoms in the system. "Fixes"_fix.html that impose
constraints (e.g. the "fix shake"_fix_shake.html command) also
contribute to the virial term.
A symmetric pressure tensor, stored as a 6-element vector, is also
calculated by this compute. The 6 components of the vector are
@ -62,8 +68,9 @@ compute temperature or ke and/or the virial. The {virial} keyword
means include all terms except the kinetic energy {ke}.
Details of how LAMMPS computes the virial efficiently for the entire
system, including the effects of periodic boundary conditions is
discussed in "(Thompson)"_#Thompson.
system, including for manybody potentials and accounting for the
effects of periodic boundary conditions are discussed in
"(Thompson)"_#Thompson.
The temperature and kinetic energy tensor is not calculated by this
compute, but rather by the temperature compute specified with the

111
doc/src/fix_grem.txt Normal file
View File

@ -0,0 +1,111 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
fix grem command :h3
[Syntax:]
fix ID group-ID grem lambda eta H0 thermostat-ID :pre
ID, group-ID are documented in "fix"_fix.html command :ulb,l
grem = style name of this fix command :l
lambda = intercept parameter of linear effective temperature function :l
eta = slope parameter of linear effective temperature function :l
H0 = shift parameter of linear effective temperature function :l
thermostat-ID = ID of Nose-Hoover thermostat or barostat used in simulation :l,ule
[Examples:]
fix fxgREM all grem 400 -0.01 -30000 fxnpt
thermo_modify press fxgREM_press :pre
fix fxgREM all grem 502 -0.15 -80000 fxnvt :pre
[Description:]
This fix implements the molecular dynamics version of the generalized
replica exchange method (gREM) originally developed by "(Kim)"_#Kim,
which uses non-Boltzmann ensembles to sample over first order phase
transitions. The is done by defining replicas with an enthalpy
dependent effective temperature
:c,image(Eqs/fix_grem.jpg)
with {eta} negative and steep enough to only intersect the
characteristic microcanonical temperature (Ts) of the system once,
ensuring a unimodal enthalpy distribution in that replica. {Lambda} is
the intercept and effects the generalized ensemble similar to how
temperature effects a Boltzmann ensemble. {H0} is a reference
enthalpy, and is typically set as the lowest desired sampled enthalpy.
Further explanation can be found in our recent papers
"(Malolepsza)"_#Malolepsza.
This fix requires a Nose-Hoover thermostat fix reference passed to the
grem as {thermostat-ID}. Two distinct temperatures exist in this
generalized ensemble, the effective temperature defined above, and a
kinetic temperature that controls the velocity distribution of
particles as usual. Either constant volume or constant pressure
algorithms can be used.
The fix enforces a generalized ensemble in a single replica
only. Typically, this ideaology is combined with replica exchange with
replicas differing by {lambda} only for simplicity, but this is not
required. A multi-replica simulation can be run within the LAMMPS
environment using the "temper/grem"_temper_grem.html command. This
utilizes LAMMPS partition mode and requires the number of available
processors be on the order of the number of desired replicas. A
100-replica simulation would require at least 100 processors (1 per
world at minimum). If a many replicas are needed on a small number of
processors, multi-replica runs can be run outside of LAMMPS. An
example of this can be found in examples/USER/misc/grem and has no
limit on the number of replicas per processor. However, this is very
inefficient and error prone and should be avoided if possible.
In general, defining the generalized ensembles is unique for every
system. When starting a many-replica simulation without any knowledge
of the underlying microcanonical temperature, there are several tricks
we have utilized to optimize the process. Choosing a less-steep {eta}
yields broader distributions, requiring fewer replicas to map the
microcanonical temperature. While this likely struggles from the same
sampling problems gREM was built to avoid, it provides quick insight
to Ts. Initially using an evenly-spaced {lambda} distribution
identifies regions where small changes in enthalpy lead to large
temperature changes. Replicas are easily added where needed.
:line
[Restart, fix_modify, output, run start/stop, minimize info:]
No information about this fix is written to "binary restart
files"_restart.html.
The "thermo_modify"_thermo_modify.html {press} option is supported
by this fix to add the rescaled kinetic pressure as part of
"thermodynamic output"_thermo_style.html.
[Restrictions:]
This fix is part of the USER-MISC package. It is only enabled if
LAMMPS was built with that package. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info.
[Related commands:]
"temper/grem"_temper_grem.html, "fix nvt"_fix_nh.html, "fix
npt"_fix_nh.html, "thermo_modify"_thermo_modify.html
[Default:] none
:line
:link(Kim)
[(Kim)] Kim, Keyes, Straub, J Chem. Phys, 132, 224107 (2010).
:link(Malolepsza)
[(Malolepsza)] Malolepsza, Secor, Keyes, J Phys Chem B 119 (42),
13379-13384 (2015).

View File

@ -10,18 +10,19 @@ fix ipi command :h3
[Syntax:]
fix ID group-ID ipi address port \[unix\] :pre
fix ID group-ID ipi address port \[unix\] \[reset\] :pre
ID, group-ID are documented in "fix"_fix.html command
ipi = style name of this fix command
address = internet address (FQDN or IP), or UNIX socket name
port = port number (ignored for UNIX sockets)
optional keyword = {unix}, if present uses a unix socket :ul
optional keyword = {unix}, if present uses a unix socket
optional keyword = {reset}, if present reset electrostatics at each call :ul
[Examples:]
fix 1 all ipi my.server.com 12345
fix 1 all ipi mysocket 666 unix
fix 1 all ipi mysocket 666 unix reset
[Description:]
@ -57,6 +58,15 @@ input are listed in the same order as in the data file of LAMMPS. The
initial configuration is ignored, as it will be substituted with the
coordinates received from i-PI before forces are ever evaluated.
A note of caution when using potentials that contain long-range
electrostatics, or that contain parameters that depend on box size:
all of these options will be initialized based on the cell size in the
LAMMPS-side initial configuration and kept constant during the run.
This is required to e.g. obtain reproducible and conserved forces.
If the cell varies too wildly, it may be advisable to reinitialize
these interactions at each call. This behavior can be requested by
setting the {reset} switch.
[Restart, fix_modify, output, run start/stop, minimize info:]
There is no restart information associated with this fix, since all

View File

@ -48,6 +48,7 @@ Fixes :h1
fix_gld
fix_gle
fix_gravity
fix_grem
fix_halt
fix_heat
fix_imd

View File

@ -14,7 +14,7 @@ info args :pre
args = one or more of the following keywords: {out}, {all}, {system}, {communication}, {computes}, {dumps}, {fixes}, {groups}, {regions}, {variables}, {styles}, {time}, or {configuration}
{out} values = {screen}, {log}, {append} filename, {overwrite} filename
{styles} values = {all}, {angle}, {atom}, {bond}, {compute}, {command}, {dump}, {dihedral}, {fix}, {improper}, {integrate}, {kspace}, {minimize}, {region} :ul
{styles} values = {all}, {angle}, {atom}, {bond}, {compute}, {command}, {dump}, {dihedral}, {fix}, {improper}, {integrate}, {kspace}, {minimize}, {pair}, {region} :ul
[Examples:]
@ -70,8 +70,9 @@ The {variables} category prints a list of all currently defined
variables, their names, styles, definition and last computed value, if
available.
The {styles} category prints the list of styles available in LAMMPS. It
supports one of the following options to control what is printed out:
The {styles} category prints the list of styles available in the
current LAMMPS binary. It supports one of the following options
to control which category of styles is printed out:
all
angle
@ -86,6 +87,7 @@ improper
integrate
kspace
minimize
pair
region :ul
The {time} category prints the accumulated CPU and wall time for the

View File

@ -59,6 +59,7 @@ dump_h5md.html
dump_image.html
dump_modify.html
dump_molfile.html
dump_nc.html
echo.html
fix.html
fix_modify.html
@ -152,6 +153,7 @@ fix_colvars.html
fix_controller.html
fix_deform.html
fix_deposit.html
fix_dpd_energy.html
fix_drag.html
fix_drude.html
fix_drude_transform.html
@ -170,6 +172,7 @@ fix_gcmc.html
fix_gld.html
fix_gle.html
fix_gravity.html
fix_grem.html
fix_halt.html
fix_heat.html
fix_imd.html
@ -272,6 +275,7 @@ fix_viscosity.html
fix_viscous.html
fix_wall.html
fix_wall_gran.html
fix_wall_gran_region.html
fix_wall_piston.html
fix_wall_reflect.html
fix_wall_region.html
@ -390,6 +394,7 @@ compute_voronoi_atom.html
compute_xrd.html
pair_adp.html
pair_agni.html
pair_airebo.html
pair_awpmd.html
pair_beck.html
@ -622,3 +627,4 @@ USER/atc/man_unfix_flux.html
USER/atc/man_unfix_nodes.html
USER/atc/man_write_atom_weights.html
USER/atc/man_write_restart.html

128
doc/src/pair_agni.txt Normal file
View File

@ -0,0 +1,128 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
pair_style agni command :h3
pair_style agni/omp command :h3
[Syntax:]
pair_style agni :pre
[Examples:]
pair_style agni
pair_coeff * * Al.agni Al
[Description:]
Style {agni} style computes the manybody vectorial force components for
an atom as
:c,image(Eqs/pair_agni.jpg)
{u} labels the individual components, i.e. x, y or z, and {V} is the
corresponding atomic fingerprint. {d} is the Euclidean distance between
any two atomic fingerprints. A total of N_t reference atomic
environments are considered to construct the force field file. {alpha_t}
and {l} are the weight coefficients and length scale parameter of the
non-linear regression model.
The method implements the recently proposed machine learning access to
atomic forces as discussed extensively in the following publications -
"(Botu1)"_#Botu2015adaptive and "(Botu2)"_#Botu2015learning. The premise
of the method is to map the atomic enviornment numerically into a
fingerprint, and use machine learning methods to create a mapping to the
vectorial atomic forces.
Only a single pair_coeff command is used with the {agni} style which
specifies an AGNI potential file containing the parameters of the
force field for the needed elements. These are mapped to LAMMPS atom
types by specifying N additional arguments after the filename in the
pair_coeff command, where N is the number of LAMMPS atom types:
filename
N element names = mapping of AGNI elements to atom types :ul
See the "pair_coeff"_pair_coeff.html doc page for alternate ways
to specify the path for the force field file.
An AGNI force field is fully specified by the filename which contains the
parameters of the force field, i.e., the reference training environments
used to construct the machine learning force field. Example force field
and input files are provided in the examples/USER/misc/agni directory.
:line
Styles with {omp} suffix is functionally the same as the corresponding
style without the suffix. They have been optimized to run faster, depending
on your available hardware, as discussed in "Section 5"_Section_accelerate.html
of the manual. The accelerated style takes the same arguments and
should produce the same results, except for round-off and precision
issues.
The accelerated style is part of the USER-OMP. They are only enabled if
LAMMPS was built with those packages. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info.
You can specify the accelerated style explicitly in your input script
by including their suffix, or you can use the "-suffix command-line
switch"_Section_start.html#start_7 when you invoke LAMMPS, or you can
use the "suffix"_suffix.html command in your input script.
See "Section 5"_Section_accelerate.html of the manual for
more instructions on how to use the accelerated styles effectively.
:line
[Mixing, shift, table, tail correction, restart, rRESPA info]:
This pair style does not support the "pair_modify"_pair_modify.html
shift, table, and tail options.
This pair style does not write its information to "binary restart
files"_restart.html, since it is stored in potential files. Thus, you
need to re-specify the pair_style and pair_coeff commands in an input
script that reads a restart file.
This pair style can only be used via the {pair} keyword of the
"run_style respa"_run_style.html command. It does not support the
{inner}, {middle}, {outer} keywords.
:line
[Restrictions:]
Currently, only elemental systems are implemented. Also, the method only
provides access to the forces and not energies or stresses. However, one
can access the energy via thermodynamic integration of the forces as
discussed in "(Botu3)"_#Botu2016construct. This pair style is part
of the USER-MISC package. It is only enabled if LAMMPS was built with
that package. See the "Making LAMMPS"_Section_start.html#start_3 section
for more info.
The AGNI force field files provided with LAMMPS (see the
potentials directory) are parameterized for metal "units"_units.html.
You can use the AGNI potential with any LAMMPS units, but you would need
to create your own AGNI potential file with coefficients listed in the
appropriate units if your simulation doesn't use "metal" units.
[Related commands:]
"pair_coeff"_pair_coeff.html
[Default:] none
:line
:link(Botu2015adaptive)
[(Botu1)] V. Botu and R. Ramprasad, Int. J. Quant. Chem., 115(16), 1074 (2015).
:link(Botu2015learning)
[(Botu2)] V. Botu and R. Ramprasad, Phys. Rev. B, 92(9), 094306 (2015).
:link(Botu2016construct)
[(Botu3)] V. Botu, R. Batra, J. Chapman and R. Ramprasad, https://arxiv.org/abs/1610.02098 (2016).

View File

@ -19,6 +19,8 @@ pair_style born/coul/msm/omp command :h3
pair_style born/coul/wolf command :h3
pair_style born/coul/wolf/gpu command :h3
pair_style born/coul/wolf/omp command :h3
pair_style born/coul/dsf command :h3
pair_style born/coul/dsf/cs command :h3
[Syntax:]
@ -37,7 +39,11 @@ args = list of arguments for a particular style :ul
{born/coul/wolf} args = alpha cutoff (cutoff2)
alpha = damping parameter (inverse distance units)
cutoff = global cutoff for non-Coulombic (and Coulombic if only 1 arg) (distance units)
cutoff2 = global cutoff for Coulombic (optional) (distance units) :pre
cutoff2 = global cutoff for Coulombic (optional) (distance units)
{born/coul/dsf} or {born/coul/dsf/cs} args = alpha cutoff (cutoff2)
alpha = damping parameter (inverse distance units)
cutoff = global cutoff for non-Coulombic (and Coulombic if only 1 arg) (distance units)
cutoff2 = global cutoff for Coulombic (distance units) :pre
[Examples:]
@ -62,6 +68,10 @@ pair_style born/coul/wolf 0.25 10.0 9.0
pair_coeff * * 6.08 0.317 2.340 24.18 11.51
pair_coeff 1 1 6.08 0.317 2.340 24.18 11.51 :pre
pair_style born/coul/dsf 0.1 10.0 12.0
pair_coeff * * 0.0 1.00 0.00 0.00 0.00
pair_coeff 1 1 480.0 0.25 0.00 1.05 0.50 :pre
[Description:]
The {born} style computes the Born-Mayer-Huggins or Tosi/Fumi
@ -90,10 +100,14 @@ term.
The {born/coul/wolf} style adds a Coulombic term as described for the
Wolf potential in the "coul/wolf"_pair_coul.html pair style.
The {born/coul/dsf} style computes the Coulomb contribution with the
damped shifted force model as in the "coul/dsf"_pair_coul.html style.
Style {born/coul/long/cs} is identical to {born/coul/long} except that
a term is added for the "core/shell model"_Section_howto.html#howto_25
to allow charges on core and shell particles to be separated by r =
0.0.
0.0. The same correction is introduced for {born/coul/dsf/cs} style
which is identical to {born/coul/dsf}.
Note that these potentials are related to the "Buckingham
potential"_pair_buck.html.
@ -116,9 +130,10 @@ The second coefficient, rho, must be greater than zero.
The last coefficient is optional. If not specified, the global A,C,D
cutoff specified in the pair_style command is used.
For {born/coul/long} and {born/coul/wolf} no Coulombic cutoff can be
specified for an individual I,J type pair. All type pairs use the
same global Coulombic cutoff specified in the pair_style command.
For {born/coul/long}, {born/coul/wolf} and {born/coul/dsf} no
Coulombic cutoff can be specified for an individual I,J type pair.
All type pairs use the same global Coulombic cutoff specified in the
pair_style command.
:line

View File

@ -8,19 +8,24 @@
pair_style born/coul/long/cs command :h3
pair_style buck/coul/long/cs command :h3
pair_style born/coul/dsf/cs command :h3
[Syntax:]
pair_style style args :pre
style = {born/coul/long/cs} or {buck/coul/long/cs}
style = {born/coul/long/cs} or {buck/coul/long/cs} or {born/coul/dsf/cs}
args = list of arguments for a particular style :ul
{born/coul/long/cs} args = cutoff (cutoff2)
cutoff = global cutoff for non-Coulombic (and Coulombic if only 1 arg) (distance units)
cutoff2 = global cutoff for Coulombic (optional) (distance units)
{buck/coul/long/cs} args = cutoff (cutoff2)
cutoff = global cutoff for Buckingham (and Coulombic if only 1 arg) (distance units)
cutoff2 = global cutoff for Coulombic (optional) (distance units) :pre
cutoff2 = global cutoff for Coulombic (optional) (distance units)
{born/coul/dsf/cs} args = alpha cutoff (cutoff2)
alpha = damping parameter (inverse distance units)
cutoff = global cutoff for non-Coulombic (and Coulombic if only 1 arg) (distance units)
cutoff2 = global cutoff for Coulombic (distance units) :pre
[Examples:]
@ -32,6 +37,10 @@ pair_style buck/coul/long/cs 10.0 8.0
pair_coeff * * 100.0 1.5 200.0
pair_coeff 1 1 100.0 1.5 200.0 9.0 :pre
pair_style born/coul/dsf/cs 0.1 10.0 12.0
pair_coeff * * 0.0 1.00 0.00 0.00 0.00
pair_coeff 1 1 480.0 0.25 0.00 1.05 0.50 :pre
[Description:]
These pair styles are designed to be used with the adiabatic
@ -39,7 +48,7 @@ core/shell model of "(Mitchell and Finchham)"_#MitchellFinchham. See
"Section 6.25"_Section_howto.html#howto_25 of the manual for an
overview of the model as implemented in LAMMPS.
These pair styles are identical to the "pair_style
The styles with a {coul/long} term are identical to the "pair_style
born/coul/long"_pair_born.html and "pair_style
buck/coul/long"_pair_buck.html styles, except they correctly treat the
special case where the distance between two charged core and shell
@ -63,6 +72,14 @@ where C is an energy-conversion constant, Qi and Qj are the charges on
the core and shell, epsilon is the dielectric constant and r_min is the
minimal distance.
The pair style {born/coul/dsf/cs} is identical to the
"pair_style born/coul/dsf"_pair_born.html style, which uses the
the damped shifted force model as in "coul/dsf"_pair_coul.html
to compute the Coulomb contribution. This approach does not require
a long-range solver, thus the only correction is the addition of a
minimal distance to avoid the possible r = 0.0 case for a
core/shell pair.
[Restrictions:]
These pair styles are part of the CORESHELL package. They are only

View File

@ -15,7 +15,7 @@ pair_style snap :pre
[Examples:]
pair_style snap
pair_coeff * * snap InP.snapcoeff In P InP.snapparam In In P P :pre
pair_coeff * * InP.snapcoeff In P InP.snapparam In In P P :pre
[Description:]
@ -27,9 +27,9 @@ it uses bispectrum components
to characterize the local neighborhood of each atom
in a very general way. The mathematical definition of the
bispectrum calculation used by SNAP is identical
to that used of "compute sna/atom"_compute_sna_atom.html.
to that used by "compute sna/atom"_compute_sna_atom.html.
In SNAP, the total energy is decomposed into a sum over
atom energies. The energy of atom {i} is
atom energies. The energy of atom {i } is
expressed as a weighted sum over bispectrum components.
:c,image(Eqs/pair_snap.jpg)
@ -183,8 +183,7 @@ LAMMPS"_Section_start.html#start_3 section for more info.
:line
:link(Thompson2014)
[(Thompson)] Thompson, Swiler, Trott, Foiles, Tucker, under review, preprint
available at "arXiv:1409.3880"_http://arxiv.org/abs/1409.3880
[(Thompson)] Thompson, Swiler, Trott, Foiles, Tucker, J Comp Phys, 285, 316 (2015).
:link(Bartok2010)
[(Bartok2010)] Bartok, Payne, Risi, Csanyi, Phys Rev Lett, 104, 136403 (2010).

View File

@ -7,32 +7,43 @@
:line
pair_style tersoff/mod command :h3
pair_style tersoff/mod/c command :h3
pair_style tersoff/mod/gpu command :h3
pair_style tersoff/mod/kk command :h3
pair_style tersoff/mod/omp command :h3
pair_style tersoff/mod/c/omp command :h3
[Syntax:]
pair_style tersoff/mod :pre
pair_style tersoff/mod/c :pre
[Examples:]
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si :pre
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si :pre
[Description:]
The {tersoff/mod} style computes a bond-order type interatomic
potential "(Kumagai)"_#Kumagai based on a 3-body Tersoff potential
"(Tersoff_1)"_#Tersoff_1, "(Tersoff_2)"_#Tersoff_2 with modified
cutoff function and angular-dependent term, giving the energy E of a
system of atoms as
The {tersoff/mod} and {tersoff/mod/c} styles computes a bond-order type
interatomic potential "(Kumagai)"_#Kumagai based on a 3-body Tersoff
potential "(Tersoff_1)"_#Tersoff_1, "(Tersoff_2)"_#Tersoff_2 with
modified cutoff function and angular-dependent term, giving the energy
E of a system of atoms as
:c,image(Eqs/pair_tersoff_mod.jpg)
where f_R is a two-body term and f_A includes three-body interactions.
The summations in the formula are over all neighbors J and K of atom I
within a cutoff distance = R + D.
The {tersoff/mod/c} style differs from {tersoff/mod} only in the
formulation of the V_ij term, where it contains an additional c0 term.
:c,image(Eqs/pair_tersoff_mod_c.jpg)
The modified cutoff function f_C proposed by "(Murty)"_#Murty and
having a continuous second-order differential is employed. The
@ -69,10 +80,11 @@ are placeholders for atom types that will be used with other
potentials.
Tersoff/MOD file in the {potentials} directory of the LAMMPS
distribution have a ".tersoff.mod" suffix. Lines that are not blank
or comments (starting with #) define parameters for a triplet of
elements. The parameters in a single entry correspond to coefficients
in the formula above:
distribution have a ".tersoff.mod" suffix. Potential files for the
{tersoff/mod/c} style have the suffix ".tersoff.modc". Lines that are
not blank or comments (starting with #) define parameters for a triplet
of elements. The parameters in a single entry correspond to
coefficients in the formulae above:
element 1 (the center atom in a 3-body interaction)
element 2 (the atom bonded to the center atom)
@ -93,13 +105,15 @@ c1
c2
c3
c4
c5 :ul
c5
c0 (energy units, tersoff/mod/c only):ul
The n, eta, lambda2, B, lambda1, and A parameters are only used for
two-body interactions. The beta, alpha, c1, c2, c3, c4, c5, h
parameters are only used for three-body interactions. The R and D
parameters are used for both two-body and three-body interactions. The
non-annotated parameters are unitless.
parameters are used for both two-body and three-body interactions.
The c0 term applies to {tersoff/mod/c} only. The non-annotated
parameters are unitless.
The Tersoff/MOD potential file must contain entries for all the elements
listed in the pair_coeff command. It can also contain entries for

View File

@ -8,6 +8,7 @@
pair_style vashishta command :h3
pair_style vashishta/omp command :h3
pair_style vashishta/kk command :h3
pair_style vashishta/table command :h3
pair_style vashishta/table/omp command :h3

View File

@ -6,6 +6,7 @@ Pair Styles :h1
:maxdepth: 1
pair_adp
pair_agni
pair_airebo
pair_awpmd
pair_beck

109
doc/src/temper_grem.txt Normal file
View File

@ -0,0 +1,109 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
temper/grem command :h3
[Syntax:]
temper/grem N M lambda fix-ID thermostat-ID seed1 seed2 index :pre
N = total # of timesteps to run
M = attempt a tempering swap every this many steps
lambda = initial lambda for this ensemble
fix-ID = ID of fix_grem
thermostat-ID = ID of the thermostat that controls kinetic temperature
seed1 = random # seed used to decide on adjacent temperature to partner with
seed2 = random # seed for Boltzmann factor in Metropolis swap
index = which temperature (0 to N-1) I am simulating (optional) :ul
[Examples:]
temper/grem 100000 1000 ${lambda} fxgREM fxnvt 0 58728
temper/grem 40000 100 ${lambda} fxgREM fxnpt 0 32285 ${walkers} :pre
[Description:]
Run a parallel tempering or replica exchange simulation in LAMMPS
partition mode using multiple generalized replicas (ensembles) of a
system defined by "fix grem"_fix_grem.html, which stands for the
generalized replica exchange method (gREM) originally developed by
"(Kim)"_#Kim. It uses non-Boltzmann ensembles to sample over first
order phase transitions. The is done by defining replicas with an
enthalpy dependent effective temperature
Two or more replicas must be used. See the "temper"_temper.html
command for an explanation of how to run replicas on multiple
partitions of one or more processors.
This command is a modification of the "temper"_temper.html command and
has the same dependencies, restraints, and input variables which are
discussed there in greater detail.
Instead of temperature, this command performs replica exchanges in
lambda as per the generalized ensemble enforced by "fix
grem"_fix_grem.html. The desired lambda is specified by {lambda},
which is typically a variable previously set in the input script, so
that each partition is assigned a different temperature. See the
"variable"_variable.html command for more details. For example:
variable lambda world 400 420 440 460
fix fxnvt all nvt temp 300.0 300.0 100.0
fix fxgREM all grem ${lambda} -0.05 -50000 fxnvt
temper 100000 100 ${lambda} fxgREM fxnvt 3847 58382 :pre
would define 4 lambdas with constant kinetic temperature but unique
generalized temperature, and assign one of them to "fix
grem"_fix_grem.html used by each replica, and to the grem command.
As the gREM simulation runs for {N} timesteps, a swap between adjacent
ensembles will be attempted every {M} timesteps. If {seed1} is 0,
then the swap attempts will alternate between odd and even pairings.
If {seed1} is non-zero then it is used as a seed in a random number
generator to randomly choose an odd or even pairing each time. Each
attempted swap of temperatures is either accepted or rejected based on
a Metropolis criterion, derived for gREM by "(Kim)"_#Kim, which uses
{seed2} in the random number generator.
File management works identical to the "temper"_temper.html command.
Dump files created by this fix contain continuous trajectories and
require post-processing to obtain per-replica information.
The last argument {index} in the grem command is optional and is used
when restarting a run from a set of restart files (one for each
replica) which had previously swapped to new lambda. This is done
using a variable. For example if the log file listed the following for
a simulation with 5 replicas:
500000 2 4 0 1 3 :pre
then a setting of
variable walkers world 2 4 0 1 3 :pre
would be used to restart the run with a grem command like the example
above with ${walkers} as the last argument. This functionality is
identical to "temper"_temper.html.
:line
[Restrictions:]
This command can only be used if LAMMPS was built with the USER-MISC
package. See the "Making LAMMPS"_Section_start.html#start_3 section
for more info on packages.
This command must be used with "fix grem"_fix_grem.html.
[Related commands:]
"fix grem"_fix_grem.html, "temper"_temper.html, "variable"_variable.html
[Default:] none
:link(Kim)
[(Kim)] Kim, Keyes, Straub, J Chem Phys, 132, 224107 (2010).

View File

@ -0,0 +1,462 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
PyLammps Tutorial :h1
<!-- RST
.. contents::
END_RST -->
Overview :h2
PyLammps is a Python wrapper class which can be created on its own or use an
existing lammps Python object. It creates a simpler, Python-like interface to
common LAMMPS functionality. Unlike the original flat C-types interface, it
exposes a discoverable API. It no longer requires knowledge of the underlying
C++ code implementation. Finally, the IPyLammps wrapper builds on top of
PyLammps and adds some additional features for IPython integration into IPython
notebooks, e.g. for embedded visualization output from dump/image.
Comparison of lammps and PyLammps interfaces :h3
lammps.lammps :h4
uses C-Types
direct memory access to native C++ data
provides functions to send and receive data to LAMMPS
requires knowledge of how LAMMPS internally works (C pointers, etc) :ul
lammps.PyLammps :h4
higher-level abstraction built on top of original C-Types interface
manipulation of Python objects
communication with LAMMPS is hidden from API user
shorter, more concise Python
better IPython integration, designed for quick prototyping :ul
Quick Start :h2
System-wide Installation :h3
Step 1: Building LAMMPS as a shared library :h4
To use LAMMPS inside of Python it has to be compiled as shared library. This
library is then loaded by the Python interface. In this example, we use the
Make.py utility to create a Makefile with C++ exceptions, PNG, JPEG and FFMPEG
output support enabled. Finally, we also enable the MOLECULE package and compile
using the generated {auto} Makefile.
cd $LAMMPS_DIR/src :pre
# generate custom Makefile
python2 Make.py -jpg -png -s ffmpeg exceptions -m mpi -a file :pre
# add packages if necessary
make yes-MOLECULE :pre
# compile shared library using Makefile
make mode=shlib auto :pre
Step 2: Installing the LAMMPS Python package :h4
PyLammps is part of the lammps Python package. To install it simply install
that package into your current Python installation.
cd $LAMMPS_DIR/python
python install.py :pre
NOTE: Recompiling the shared library requires reinstalling the Python package
Installation inside of a virtualenv :h3
You can use virtualenv to create a custom Python environment specifically tuned
for your workflow.
Benefits of using a virtualenv :h4
isolation of your system Python installation from your development installation
installation can happen in your user directory without root access (useful for HPC clusters)
installing packages through pip allows you to get newer versions of packages than e.g., through apt-get or yum package managers (and without root access)
you can even install specific old versions of a package if necessary :ul
[Prerequisite (e.g. on Ubuntu)]
apt-get install python-virtualenv :pre
Creating a virtualenv with lammps installed :h4
# create virtualenv name 'testing' :pre
# activate 'testing' environment
source testing/bin/activate :pre
# install LAMMPS package in virtualenv
(testing) cd $LAMMPS_DIR/python
(testing) python install.py :pre
# install other useful packages
(testing) pip install matplotlib jupyter mpi4py :pre
... :pre
# return to original shell
(testing) deactivate :pre
Creating a new instance of PyLammps :h2
To create a PyLammps object you need to first import the class from the lammps
module. By using the default constructor, a new {lammps} instance is created.
from lammps import PyLammps
L = PyLammps() :pre
You can also initialize PyLammps on top of this existing {lammps} object:
from lammps import lammps, PyLammps
lmp = lammps()
L = PyLammps(ptr=lmp) :pre
Commands :h2
Sending a LAMMPS command with the existing library interfaces is done using
the command method of the lammps object instance.
For instance, let's take the following LAMMPS command:
region box block 0 10 0 5 -0.5 0.5 :pre
In the original interface this command can be executed with the following
Python code if {L} was a lammps instance:
L.command("region box block 0 10 0 5 -0.5 0.5") :pre
With the PyLammps interface, any command can be split up into arbitrary parts
separated by whitespace, passed as individual arguments to a region method.
L.region("box block", 0, 10, 0, 5, -0.5, 0.5) :pre
Note that each parameter is set as Python literal floating-point number. In the
PyLammps interface, each command takes an arbitrary parameter list and transparently
merges it to a single command string, separating individual parameters by whitespace.
The benefit of this approach is avoiding redundant command calls and easier
parameterization. In the original interface parametrization needed to be done
manually by creating formatted strings.
L.command("region box block %f %f %f %f %f %f" % (xlo, xhi, ylo, yhi, zlo, zhi)) :pre
In contrast, methods of PyLammps accept parameters directly and will convert
them automatically to a final command string.
L.region("box block", xlo, xhi, ylo, yhi, zlo, zhi) :pre
System state :h2
In addition to dispatching commands directly through the PyLammps object, it
also provides several properties which allow you to query the system state.
:dlb
L.system :dt
Is a dictionary describing the system such as the bounding box or number of atoms :dd
L.system.xlo, L.system.xhi :dt
bounding box limits along x-axis :dd
L.system.ylo, L.system.yhi :dt
bounding box limits along y-axis :dd
L.system.zlo, L.system.zhi :dt
bounding box limits along z-axis :dd
L.communication :dt
configuration of communication subsystem, such as the number of threads or processors :dd
L.communication.nthreads :dt
number of threads used by each LAMMPS process :dd
L.communication.nprocs :dt
number of MPI processes used by LAMMPS :dd
L.fixes :dt
List of fixes in the current system :dd
L.computes :dt
List of active computes in the current system :dd
L.dump :dt
List of active dumps in the current system :dd
L.groups :dt
List of groups present in the current system :dd
:dle
Working with LAMMPS variables :h2
LAMMPS variables can be both defined and accessed via the PyLammps interface.
To define a variable you can use the "variable"_variable.html command:
L.variable("a index 2") :pre
A dictionary of all variables is returned by L.variables
you can access an individual variable by retrieving a variable object from the
L.variables dictionary by name
a = L.variables\['a'\] :pre
The variable value can then be easily read and written by accessing the value
property of this object.
print(a.value)
a.value = 4 :pre
Retrieving the value of an arbitrary LAMMPS expressions :h2
LAMMPS expressions can be immediately evaluated by using the eval method. The
passed string parameter can be any expression containing global thermo values,
variables, compute or fix data.
result = L.eval("ke") # kinetic energy
result = L.eval("pe") # potential energy :pre
result = L.eval("v_t/2.0") :pre
Accessing atom data :h2
All atoms in the current simulation can be accessed by using the L.atoms list.
Each element of this list is an object which exposes its properties (id, type,
position, velocity, force, etc.).
# access first atom
L.atoms\[0\].id
L.atoms\[0\].type :pre
# access second atom
L.atoms\[1\].position
L.atoms\[1\].velocity
L.atoms\[1\].force :pre
Some properties can also be used to set:
# set position in 2D simulation
L.atoms\[0\].position = (1.0, 0.0) :pre
# set position in 3D simulation
L.atoms\[0\].position = (1.0, 0.0, 1.) :pre
Evaluating thermo data :h2
Each simulation run usually produces thermo output based on system state,
computes, fixes or variables. The trajectories of these values can be queried
after a run via the L.runs list. This list contains a growing list of run data.
The first element is the output of the first run, the second element that of
the second run.
L.run(1000)
L.runs\[0\] # data of first 1000 time steps :pre
L.run(1000)
L.runs\[1\] # data of second 1000 time steps :pre
Each run contains a dictionary of all trajectories. Each trajectory is
accessible through its thermo name:
L.runs\[0\].step # list of time steps in first run
L.runs\[0\].ke # list of kinetic energy values in first run :pre
Together with matplotlib plotting data out of LAMMPS becomes simple:
import matplotlib.plot as plt
steps = L.runs\[0\].step
ke = L.runs\[0\].ke
plt.plot(steps, ke) :pre
Error handling with PyLammps :h2
Compiling the shared library with C++ exception support provides a better error
handling experience. Without exceptions the LAMMPS code will terminate the
current Python process with an error message. C++ exceptions allow capturing
them on the C++ side and rethrowing them on the Python side. This way you
can handle LAMMPS errors through the Python exception handling mechanism.
IMPORTANT NOTE: Capturing a LAMMPS exception in Python can still mean that the
current LAMMPS process is in an illegal state and must be terminated. It is
advised to save your data and terminate the Python instance as quickly as
possible.
Using PyLammps in IPython notebooks and Jupyter :h2
If the LAMMPS Python package is installed for the same Python interpreter as
IPython, you can use PyLammps directly inside of an IPython notebook inside of
Jupyter. Jupyter is a powerful integrated development environment (IDE) for
many dynamic languages like Python, Julia and others, which operates inside of
any web browser. Besides auto-completion and syntax highlighting it allows you
to create formatted documents using Markup, mathematical formulas, graphics and
animations intermixed with executable Python code. It is a great format for
tutorials and showcasing your latest research.
To launch an instance of Jupyter simply run the following command inside your
Python environment (this assumes you followed the Quick Start instructions):
jupyter notebook :pre
IPyLammps Examples :h2
Examples of IPython notebooks can be found in the python/examples/pylammps
subdirectory. To open these notebooks launch {jupyter notebook} inside this
directory and navigate to one of them. If you compiled and installed
a LAMMPS shared library with execeptions, PNG, JPEG and FFMPEG support
you should be able to rerun all of these notebooks.
Validating a dihedral potential :h3
This example showcases how an IPython Notebook can be used to compare a simple
LAMMPS simulation of a harmonic dihedral potential to its analytical solution.
Four atoms are placed in the simulation and the dihedral potential is applied on
them using a datafile. Then one of the atoms is rotated along the central axis by
setting its position from Python, which changes the dihedral angle.
phi = \[d * math.pi / 180 for d in range(360)\] :pre
pos = \[(1.0, math.cos(p), math.sin(p)) for p in phi\] :pre
pe = \[\]
for p in pos:
L.atoms\[3\].position = p
L.run(0)
pe.append(L.eval("pe")) :pre
By evaluating the potential energy for each position we can verify that
trajectory with the analytical formula. To compare both solutions, we plot
both trajectories over each other using matplotlib, which embeds the generated
plot inside the IPython notebook.
:c,image(JPG/pylammps_dihedral.jpg)
Running a Monte Carlo relaxation :h3
This second example shows how to use PyLammps to create a 2D Monte Carlo Relaxation
simulation, computing and plotting energy terms and even embedding video output.
Initially, a 2D system is created in a state with minimal energy.
:c,image(JPG/pylammps_mc_minimum.jpg)
It is then disordered by moving each atom by a random delta.
random.seed(27848)
deltaperturb = 0.2 :pre
for i in range(L.system.natoms):
x, y = L.atoms\[i\].position
dx = deltaperturb * random.uniform(-1, 1)
dy = deltaperturb * random.uniform(-1, 1)
L.atoms\[i\].position = (x+dx, y+dy) :pre
L.run(0) :pre
:c,image(JPG/pylammps_mc_disordered.jpg)
Finally, the Monte Carlo algorithm is implemented in Python. It continuously
moves random atoms by a random delta and only accepts certain moves.
estart = L.eval("pe")
elast = estart :pre
naccept = 0
energies = \[estart\] :pre
niterations = 3000
deltamove = 0.1
kT = 0.05 :pre
natoms = L.system.natoms :pre
for i in range(niterations):
iatom = random.randrange(0, natoms)
current_atom = L.atoms\[iatom\] :pre
x0, y0 = current_atom.position :pre
dx = deltamove * random.uniform(-1, 1)
dy = deltamove * random.uniform(-1, 1) :pre
current_atom.position = (x0+dx, y0+dy) :pre
L.run(1, "pre no post no") :pre
e = L.eval("pe")
energies.append(e) :pre
if e <= elast:
naccept += 1
elast = e
elif random.random() <= math.exp(natoms*(elast-e)/kT):
naccept += 1
elast = e
else:
current_atom.position = (x0, y0) :pre
The energies of each iteration are collected in a Python list and finally plotted using matplotlib.
:c,image(JPG/pylammps_mc_energies_plot.jpg)
The IPython notebook also shows how to use dump commands and embed video files
inside of the IPython notebook.
Using PyLammps and mpi4py (Experimental) :h2
PyLammps can be run in parallel using mpi4py. This python package can be installed using
pip install mpi4py :pre
The following is a short example which reads in an existing LAMMPS input file and
executes it in parallel. You can find in.melt in the examples/melt folder.
from mpi4py import MPI
from lammps import PyLammps :pre
L = PyLammps()
L.file("in.melt") :pre
if MPI.COMM_WORLD.rank == 0:
print("Potential energy: ", L.eval("pe")) :pre
MPI.Finalize() :pre
To run this script (melt.py) in parallel using 4 MPI processes we invoke the
following mpirun command:
mpirun -np 4 python melt.py :pre
IMPORTANT NOTE: Any command must be executed by all MPI processes. However, evaluations and querying the system state is only available on rank 0.
Feedback and Contributing :h2
If you find this Python interface useful, please feel free to provide feedback
and ideas on how to improve it to Richard Berger (richard.berger@temple.edu). We also
want to encourage people to write tutorial style IPython notebooks showcasing LAMMPS usage
and maybe their latest research results.

View File

@ -7,6 +7,7 @@ Tutorials :h1
tutorial_drude
tutorial_github
tutorial_pylammps
body
manifolds

View File

@ -0,0 +1,64 @@
units lj
lattice fcc 0.8442
boundary f f f
region ball sphere 0.0 0.0 0.0 5.0
region box block -10 10 -10 10 -10 10
region half block -10 0 -10 10 -10 10
# add molecule ids so we can use chunk/atom
fix 0 all property/atom mol ghost yes
create_box 2 box
create_atoms 1 region ball
pair_style lj/cut 4.0
pair_coeff * * 1.0 1.0
mass * 1.0
set group all mol 1
# label half the sphere with a different type for better visualization
set region half type 2
# use a dynamic group (may require a patch to fix addtorque with older versions of LAMMPS)
group ball dynamic all region ball
neigh_modify delay 2 every 1 check yes
minimize 0.0 0.0 1000 10000
reset_timestep 0
velocity all create 1.2 12351235
fix 1 all nve
fix 2 all wall/reflect xlo EDGE xhi EDGE ylo EDGE yhi EDGE zlo EDGE zhi EDGE
compute 1 all chunk/atom molecule
compute 2 ball omega/chunk 1
compute 3 ball inertia/chunk 1
# compute rotational kinetic energy: 1/2 * I * omega**2
variable r_ke_x equal c_2[1][1]*c_2[1][1]*c_3[1][1]*0.5
variable r_ke_y equal c_2[1][2]*c_2[1][2]*c_3[1][2]*0.5
variable r_ke_z equal c_2[1][3]*c_2[1][3]*c_3[1][3]*0.5
# output moments of inertia for x,y,z and angular velocity as well as rotational kinertic energy
thermo_style custom step ke pe c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
thermo 500
# dump 1 all atom 100 dump.lammpstrj
# dump 2 all movie 10 ball.mp4 type mass
# equilibration w/o torque added
run 1000 post no
# start spinning the ball. rotation around z should increase and Erot_z should grow
fix 4 ball addtorque 0.0 0.0 200.0
run 10000 upto post no
# continue without adding more torque. rotation continues at fixed speed
unfix 4
run 5000

View File

@ -0,0 +1,202 @@
LAMMPS (9 Nov 2016)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
units lj
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
boundary f f f
region ball sphere 0.0 0.0 0.0 5.0
region box block -10 10 -10 10 -10 10
region half block -10 0 -10 10 -10 10
# add molecule ids so we can use chunk/atom
fix 0 all property/atom mol ghost yes
create_box 2 box
Created orthogonal box = (-16.796 -16.796 -16.796) to (16.796 16.796 16.796)
1 by 1 by 1 MPI processor grid
create_atoms 1 region ball
Created 2123 atoms
pair_style lj/cut 4.0
pair_coeff * * 1.0 1.0
mass * 1.0
set group all mol 1
2123 settings made for mol
# label half the sphere with a different type for better visualization
set region half type 2
1142 settings made for type
# use a dynamic group (may require a patch to fix addtorque with older versions of LAMMPS)
group ball dynamic all region ball
dynamic group ball defined
neigh_modify delay 2 every 1 check yes
minimize 0.0 0.0 1000 10000
WARNING: Resetting reneighboring criteria during minimization (../min.cpp:168)
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15 -> bins = 16 16 16
Memory usage per processor = 3.68322 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -6.2285099 0 -6.2285099 -0.38871568
1000 0 -7.3616908 0 -7.3616908 -9.1828951e-16
Loop time of 12.4181 on 1 procs for 1000 steps with 2123 atoms
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
-6.22850993032 -7.36169083402 -7.36169083402
Force two-norm initial, final = 197.762 3.3539e-12
Force max component initial, final = 7.88704 1.52475e-13
Final line search alpha, max atom move = 1 1.52475e-13
Iterations, force evaluations = 1000 1994
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 12.282 | 12.282 | 12.282 | 0.0 | 98.91
Neigh | 0.06315 | 0.06315 | 0.06315 | 0.0 | 0.51
Comm | 0.000443 | 0.000443 | 0.000443 | 0.0 | 0.00
Output | 2.7e-05 | 2.7e-05 | 2.7e-05 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.07231 | | | 0.58
Nlocal: 2123 ave 2123 max 2123 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 270799 ave 270799 max 270799 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 270799
Ave neighs/atom = 127.555
Neighbor list builds = 11
Dangerous builds = 0
reset_timestep 0
velocity all create 1.2 12351235
fix 1 all nve
fix 2 all wall/reflect xlo EDGE xhi EDGE ylo EDGE yhi EDGE zlo EDGE zhi EDGE
compute 1 all chunk/atom molecule
compute 2 ball omega/chunk 1
compute 3 ball inertia/chunk 1
# compute rotational kinetic energy: 1/2 * I * omega**2
variable r_ke_x equal c_2[1][1]*c_2[1][1]*c_3[1][1]*0.5
variable r_ke_y equal c_2[1][2]*c_2[1][2]*c_3[1][2]*0.5
variable r_ke_z equal c_2[1][3]*c_2[1][3]*c_3[1][3]*0.5
# output moments of inertia for x,y,z and angular velocity as well as rotational kinertic energy
thermo_style custom step ke pe c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
thermo 500
# dump 1 all atom 100 dump.lammpstrj
# dump 2 all movie 10 ball.mp4 type mass
# equilibration w/o torque added
run 1000 post no
WARNING: One or more dynamic groups may not be updated at correct point in timestep (../fix_group.cpp:153)
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15 -> bins = 16 16 16
Memory usage per processor = 2.68359 Mbytes
Step KinEng PotEng c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
0 1.7991521 -7.3616908 51125.265 51125.265 51125.265 0.0034331372 -0.0045852283 0.0091015032 0.30129221 0.53743693 2.117541
500 0.8882476 -6.43927 52585.112 52205.936 52112.109 0.0033058719 -0.0043753253 0.0089502237 0.2873458 0.49970143 2.0872595
1000 0.79407357 -6.337372 53754.145 54093.977 53787.624 0.0033214912 -0.0042125031 0.0090171557 0.29651603 0.47995375 2.1867116
Loop time of 4.26119 on 1 procs for 1000 steps with 2123 atoms
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
# start spinning the ball. rotation around z should increase and Erot_z should grow
fix 4 ball addtorque 0.0 0.0 200.0
run 10000 upto post no
WARNING: One or more dynamic groups may not be updated at correct point in timestep (../fix_group.cpp:153)
Memory usage per processor = 2.68359 Mbytes
Step KinEng PotEng c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
1000 0.79407357 -6.337372 53754.145 54093.977 53787.624 0.0033214912 -0.0042125031 0.0090171557 0.29651603 0.47995375 2.1867116
1500 0.68685306 -6.2226287 55026.889 54809.958 55224.858 0.0026096779 -0.0039390202 0.01797948 0.18737807 0.42521238 8.9260406
2000 0.65370325 -6.1832475 54914.897 55655.542 55405.781 0.0029310978 -0.0040761978 0.025816421 0.23589612 0.46236922 18.463634
2500 0.69337585 -6.2170462 54604.66 54800.001 54488.865 0.0028821313 -0.0045216915 0.035781895 0.22679174 0.56021203 34.882245
3000 0.76778067 -6.2850756 53423.198 53620.349 53692.133 0.004088872 -0.004451787 0.044703139 0.44658786 0.53133496 53.64839
3500 0.79707238 -6.3044974 53055.225 53071.129 52927.537 0.0036751739 -0.0037584362 0.054889715 0.3583059 0.3748372 79.732181
4000 0.80746429 -6.3010044 53519.853 53643.284 54058.105 0.003813517 -0.0041637733 0.062983015 0.38916725 0.46500703 107.22047
4500 0.81206394 -6.2884719 53371.354 53883.202 53854.559 0.00385001 -0.003643712 0.072544638 0.39555051 0.35769386 141.71085
5000 0.81648687 -6.2737414 53776.764 54233.367 53762.425 0.0024484228 -0.0043310965 0.080673643 0.16118978 0.50866551 174.94929
5500 0.81888245 -6.2572796 53908.22 53502.342 54717.506 0.0037110524 -0.00327586 0.088836946 0.37120958 0.28707375 215.91536
6000 0.86533749 -6.2804248 53687.533 53571.135 53536.171 0.0025223465 -0.0047753224 0.099646475 0.17078626 0.61081016 265.79156
6500 0.88029206 -6.2719195 53344.67 53291.068 53298.665 0.003937416 -0.0033910578 0.10778737 0.41350774 0.30640427 309.61504
7000 0.9066019 -6.2714707 53928.646 53524.142 54003.175 0.0028500736 -0.0039730774 0.11855069 0.21902903 0.4224485 379.4875
7500 0.94601421 -6.2819912 53534.525 53547.598 53851.344 0.0028610722 -0.0049440438 0.12716472 0.21910969 0.6544472 435.41142
8000 0.9562253 -6.2626222 53486.577 53033.175 53858.803 0.0025501008 -0.0048075887 0.13526164 0.17391198 0.61287549 492.69254
8500 0.99679401 -6.2691139 53566.766 53721.523 53831.283 0.0034137155 -0.0039265 0.14392854 0.3121189 0.41412316 557.56894
9000 1.0371056 -6.2761647 53415.879 53605.078 53985.02 0.0029321914 -0.0046362889 0.1518846 0.2296281 0.57612526 622.6884
9500 1.0598491 -6.26216 53493.003 53049.859 53699.774 0.0032127038 -0.0050624912 0.16002437 0.27606311 0.67980256 687.56651
10000 1.1014855 -6.2654945 53418.49 53251.964 53867.56 0.0037330249 -0.0056278017 0.17103133 0.37220602 0.84330216 787.85924
Loop time of 43.025 on 1 procs for 9000 steps with 2123 atoms
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
# continue without adding more torque. rotation continues at fixed speed
unfix 4
run 5000
WARNING: One or more dynamic groups may not be updated at correct point in timestep (../fix_group.cpp:153)
Memory usage per processor = 2.68359 Mbytes
Step KinEng PotEng c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
10000 1.1014855 -6.2654945 53418.49 53251.964 53867.56 0.0037330249 -0.0056278017 0.17103133 0.37220602 0.84330216 787.85924
10500 1.07259 -6.2363698 52825.233 52896.327 55753.551 0.0035928335 -0.0050843805 0.16344484 0.34094601 0.68370948 744.70621
11000 1.0644214 -6.2282099 52016.795 51950.497 54922.101 0.0047316668 -0.0050149935 0.16196531 0.58229343 0.65328165 720.37919
11500 1.0887339 -6.2525622 52602.789 52903.594 54461.53 0.0044295697 -0.0046710153 0.16698036 0.51606197 0.57713546 759.26022
12000 1.0707466 -6.234719 52785.654 52997.192 54943.066 0.0057389353 -0.0030340721 0.16553451 0.86925773 0.2439353 752.76594
12500 1.0758302 -6.2397291 52375.734 52783.309 55011.986 0.0047029783 -0.0023526884 0.16493895 0.57922337 0.14608158 748.29657
13000 1.0716957 -6.2354221 52711.183 52788.224 55234.737 0.0034033406 -0.00206283 0.16427569 0.30526962 0.11231401 745.29615
13500 1.083667 -6.2475953 52698.902 52203.324 55102.881 0.0032012254 -0.0021366488 0.16381832 0.27002507 0.11916109 739.38261
14000 1.085106 -6.2490655 52767.613 52353.974 55225.438 0.0025647931 -0.0022235227 0.1636534 0.17355699 0.12942041 739.53587
14500 1.0838261 -6.2477856 52292.343 51995.567 54735.836 0.001794954 -0.0029396951 0.16409339 0.084239299 0.22466783 736.92607
15000 1.0827419 -6.2468971 51917.584 51388.833 54481.681 0.0017979486 -0.0025793756 0.16196568 0.083914884 0.17094953 714.60575
Loop time of 22.7848 on 1 procs for 5000 steps with 2123 atoms
Performance: 94800.138 tau/day, 219.445 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 18.891 | 18.891 | 18.891 | 0.0 | 82.91
Neigh | 3.5735 | 3.5735 | 3.5735 | 0.0 | 15.68
Comm | 0.005778 | 0.005778 | 0.005778 | 0.0 | 0.03
Output | 0.001862 | 0.001862 | 0.001862 | 0.0 | 0.01
Modify | 0.27476 | 0.27476 | 0.27476 | 0.0 | 1.21
Other | | 0.0379 | | | 0.17
Nlocal: 2123 ave 2123 max 2123 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 242472 ave 242472 max 242472 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 242472
Ave neighs/atom = 114.212
Neighbor list builds = 560
Dangerous builds = 0
Total wall time: 0:01:22

View File

@ -0,0 +1,202 @@
LAMMPS (9 Nov 2016)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
units lj
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
boundary f f f
region ball sphere 0.0 0.0 0.0 5.0
region box block -10 10 -10 10 -10 10
region half block -10 0 -10 10 -10 10
# add molecule ids so we can use chunk/atom
fix 0 all property/atom mol ghost yes
create_box 2 box
Created orthogonal box = (-16.796 -16.796 -16.796) to (16.796 16.796 16.796)
1 by 2 by 2 MPI processor grid
create_atoms 1 region ball
Created 2123 atoms
pair_style lj/cut 4.0
pair_coeff * * 1.0 1.0
mass * 1.0
set group all mol 1
2123 settings made for mol
# label half the sphere with a different type for better visualization
set region half type 2
1142 settings made for type
# use a dynamic group (may require a patch to fix addtorque with older versions of LAMMPS)
group ball dynamic all region ball
dynamic group ball defined
neigh_modify delay 2 every 1 check yes
minimize 0.0 0.0 1000 10000
WARNING: Resetting reneighboring criteria during minimization (../min.cpp:168)
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15 -> bins = 16 16 16
Memory usage per processor = 3.77014 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -6.2285099 0 -6.2285099 -0.38871568
1000 0 -7.3616908 0 -7.3616908 -9.7399049e-16
Loop time of 8.29581 on 4 procs for 1000 steps with 2123 atoms
89.4% CPU use with 4 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = max iterations
Energy initial, next-to-last, final =
-6.22850993032 -7.36169083402 -7.36169083402
Force two-norm initial, final = 197.762 3.40861e-12
Force max component initial, final = 7.88704 1.60379e-13
Final line search alpha, max atom move = 1 1.60379e-13
Iterations, force evaluations = 1000 1994
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 5.8462 | 6.8198 | 8.0872 | 34.0 | 82.21
Neigh | 0.021211 | 0.028936 | 0.035891 | 3.8 | 0.35
Comm | 0.10672 | 1.3842 | 2.3694 | 76.2 | 16.69
Output | 2.8e-05 | 3.35e-05 | 5e-05 | 0.2 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.0629 | | | 0.76
Nlocal: 530.75 ave 543 max 514 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Nghost: 1058.25 ave 1075 max 1046 min
Histogram: 1 0 0 2 0 0 0 0 0 1
Neighs: 67699.8 ave 82013 max 55681 min
Histogram: 1 1 0 0 0 0 1 0 0 1
Total # of neighbors = 270799
Ave neighs/atom = 127.555
Neighbor list builds = 11
Dangerous builds = 0
reset_timestep 0
velocity all create 1.2 12351235
fix 1 all nve
fix 2 all wall/reflect xlo EDGE xhi EDGE ylo EDGE yhi EDGE zlo EDGE zhi EDGE
compute 1 all chunk/atom molecule
compute 2 ball omega/chunk 1
compute 3 ball inertia/chunk 1
# compute rotational kinetic energy: 1/2 * I * omega**2
variable r_ke_x equal c_2[1][1]*c_2[1][1]*c_3[1][1]*0.5
variable r_ke_y equal c_2[1][2]*c_2[1][2]*c_3[1][2]*0.5
variable r_ke_z equal c_2[1][3]*c_2[1][3]*c_3[1][3]*0.5
# output moments of inertia for x,y,z and angular velocity as well as rotational kinertic energy
thermo_style custom step ke pe c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
thermo 500
# dump 1 all atom 100 dump.lammpstrj
# dump 2 all movie 10 ball.mp4 type mass
# equilibration w/o torque added
run 1000 post no
WARNING: One or more dynamic groups may not be updated at correct point in timestep (../fix_group.cpp:153)
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.3
ghost atom cutoff = 4.3
binsize = 2.15 -> bins = 16 16 16
Memory usage per processor = 2.77052 Mbytes
Step KinEng PotEng c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
0 1.7991521 -7.3616908 51125.265 51125.265 51125.265 0.0040198435 -0.0024237256 0.0011306245 0.4130702 0.15016629 0.032677011
500 0.90968729 -6.4603004 52559.075 52593.598 52370.876 0.0039073429 -0.0023505431 0.0011032256 0.40121833 0.14529121 0.031870471
1000 0.80269935 -6.3456699 53966.859 54192.03 54298.931 0.0040875347 -0.0023098179 0.00077010445 0.45083753 0.14456425 0.016101285
Loop time of 2.3842 on 4 procs for 1000 steps with 2123 atoms
89.0% CPU use with 4 MPI tasks x 1 OpenMP threads
# start spinning the ball. rotation around z should increase and Erot_z should grow
fix 4 ball addtorque 0.0 0.0 200.0
run 10000 upto post no
WARNING: One or more dynamic groups may not be updated at correct point in timestep (../fix_group.cpp:153)
Memory usage per processor = 2.77052 Mbytes
Step KinEng PotEng c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
1000 0.80269935 -6.3456699 53966.859 54192.03 54298.931 0.0040875347 -0.0023098179 0.00077010445 0.45083753 0.14456425 0.016101285
1500 0.68922642 -6.2266953 54966.109 54681.607 54947.256 0.0038798951 -0.0016325797 0.0099837591 0.41371853 0.072871893 2.7384461
2000 0.64105167 -6.1739656 54782.995 54570.486 54910.316 0.0034645832 -0.0027243304 0.017763588 0.32878936 0.20251043 8.6633395
2500 0.69929409 -6.2287557 54307.47 53952.925 54538.409 0.0035199565 -0.0022538001 0.028279733 0.33643732 0.13703004 21.808363
3000 0.77013056 -6.2945597 53916.743 53801.81 53955.863 0.0039732989 -0.0024543292 0.037182149 0.42559463 0.16204384 37.297319
3500 0.80807105 -6.3246605 53451.163 53387.178 53474.789 0.0043137676 -0.0020556348 0.047270147 0.49732542 0.11279735 59.743821
4000 0.81007199 -6.3142362 53334.317 53243.089 53685.963 0.0038548696 -0.0031009535 0.055811043 0.3962745 0.25599044 83.612467
4500 0.7850954 -6.2735818 53738.002 53682.367 53639.471 0.0033046861 -0.0018472801 0.065975851 0.29343502 0.091594032 116.74129
5000 0.77992741 -6.2508277 53864.644 53804.867 53877.025 0.0038258186 -0.0028703189 0.073848203 0.39420539 0.2216419 146.91071
5500 0.79428302 -6.2467907 54353.329 53987.578 54234.062 0.0034715133 -0.0030161617 0.082746129 0.32751699 0.24556875 185.66819
6000 0.82211943 -6.2549436 54273.545 53195.299 54061.645 0.0030929934 -0.0031282112 0.090458895 0.25960687 0.26027676 221.18813
6500 0.87630771 -6.2870988 54042.229 53505.982 54492.239 0.0026486452 -0.0024783378 0.10046947 0.18956181 0.1643211 275.02546
7000 0.88614639 -6.2727488 53701.993 52682.206 53948.142 0.0035384498 -0.0035905797 0.11030427 0.33619131 0.33959641 328.19439
7500 0.92102182 -6.2787281 53410.068 52577.853 53132.511 0.0033084315 -0.0031776605 0.11973875 0.2923058 0.26545312 380.8902
8000 0.94010525 -6.2697758 53732.562 53384.271 53795.933 0.0046460009 -0.0032755993 0.12803006 0.5799174 0.28639462 440.90328
8500 0.95359399 -6.252319 53444.305 53558.444 53789.691 0.0037919474 -0.0035729209 0.13616177 0.38423423 0.34185722 498.63117
9000 0.98631627 -6.25197 53115.01 54017.327 53955.578 0.0045084495 -0.0034368377 0.14488919 0.53981096 0.31902236 566.34143
9500 1.0328238 -6.2632597 53287.675 53682.978 53769.692 0.0044595175 -0.0025931203 0.15416363 0.5298739 0.1804895 638.95665
10000 1.0741755 -6.2667785 53103.657 53319.569 53601.523 0.0059237675 -0.0019561182 0.16445664 0.93173079 0.10201096 724.85302
Loop time of 21.8398 on 4 procs for 9000 steps with 2123 atoms
90.3% CPU use with 4 MPI tasks x 1 OpenMP threads
# continue without adding more torque. rotation continues at fixed speed
unfix 4
run 5000
WARNING: One or more dynamic groups may not be updated at correct point in timestep (../fix_group.cpp:153)
Memory usage per processor = 2.77052 Mbytes
Step KinEng PotEng c_3[1][1] c_3[1][2] c_3[1][3] c_2[1][1] c_2[1][2] c_2[1][3] v_r_ke_x v_r_ke_y v_r_ke_z
10000 1.0741755 -6.2667785 53103.657 53319.569 53601.523 0.0059237675 -0.0019561182 0.16445664 0.93173079 0.10201096 724.85302
10500 1.0588571 -6.2509381 52374.303 52131.544 55020.367 0.0048843769 -0.0018716797 0.15729531 0.62475047 0.091313217 680.65188
11000 1.0554911 -6.2471863 52803.345 52177.891 55200.756 0.0042073234 -0.0024283269 0.15742315 0.46735107 0.15384055 683.99392
11500 1.0559499 -6.2469955 54031.351 52919.728 54882.35 0.0046703444 -0.0016225764 0.15994353 0.58926908 0.06966232 701.99832
12000 1.0311805 -6.2224471 52812.51 52444.13 55356.101 0.0044986993 -0.0019225732 0.15672327 0.53441759 0.096924293 679.83334
12500 1.0423882 -6.2339087 52000.562 52043.013 55003.272 0.0038688875 -0.0022935053 0.1548654 0.38917977 0.13687746 659.57977
13000 1.0548857 -6.2465445 52196.499 52869.024 54622.553 0.0036650563 -0.0025542156 0.1590498 0.35056832 0.17245921 690.88895
13500 1.0443009 -6.2360149 51921.746 53124.078 54750.325 0.0052756473 -0.0011658069 0.15689119 0.72255483 0.036100621 673.83538
14000 1.0505583 -6.241923 51861.696 52153.234 54321.531 0.0037119391 -0.00045576703 0.15738082 0.35728798 0.0054167284 672.73745
14500 1.040343 -6.2316147 52035.588 51680.479 54443.305 0.0026177168 -0.0014795729 0.15428968 0.1782854 0.056567797 648.01978
15000 1.0404962 -6.2322338 52376.795 51327.453 54677.693 0.0025711787 -0.0021695312 0.15403509 0.17313044 0.12079571 648.66363
Loop time of 11.9695 on 4 procs for 5000 steps with 2123 atoms
Performance: 180458.440 tau/day, 417.728 timesteps/s
89.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 7.0313 | 8.4017 | 9.6932 | 35.5 | 70.19
Neigh | 0.97886 | 1.3633 | 1.7406 | 26.8 | 11.39
Comm | 0.34002 | 2.0227 | 3.7911 | 94.9 | 16.90
Output | 0.001198 | 0.0015522 | 0.002578 | 1.5 | 0.01
Modify | 0.12841 | 0.131 | 0.13336 | 0.5 | 1.09
Other | | 0.04924 | | | 0.41
Nlocal: 530.75 ave 534 max 529 min
Histogram: 1 0 2 0 0 0 0 0 0 1
Nghost: 1038 ave 1048 max 1029 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 60482.5 ave 72547 max 49124 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Total # of neighbors = 241930
Ave neighs/atom = 113.957
Neighbor list builds = 555
Dangerous builds = 0
Total wall time: 0:00:44

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,197 @@
Data File created from VASP POSCAR
181 atoms
1 atom types
0 17.121440767 xlo xhi
0 14.8276026536 ylo yhi
0 39.3197318979 zlo zhi
Masses
1 26.982
Atoms
1 1 0.0 1.64751140595 15.0
2 1 2.85357346116 1.64751140595 15.0
3 1 5.70714692232 1.64751140595 15.0
4 1 8.56072038348 1.64751140595 15.0
5 1 11.4142938446 1.64751140595 15.0
6 1 14.2678673058 1.64751140595 15.0
7 1 1.42678673058 4.11877851488 15.0
8 1 4.28036019174 4.11877851488 15.0
9 1 7.1339336529 4.11877851488 15.0
10 1 9.98750711406 4.11877851488 15.0
11 1 12.8410805752 4.11877851488 15.0
12 1 15.6946540364 4.11877851488 15.0
13 1 0.0 6.59004562381 15.0
14 1 2.85357346116 6.59004562381 15.0
15 1 5.70714692232 6.59004562381 15.0
16 1 8.56072038348 6.59004562381 15.0
17 1 11.4142938446 6.59004562381 15.0
18 1 14.2678673058 6.59004562381 15.0
19 1 1.42678673058 9.06131273274 15.0
20 1 4.28036019174 9.06131273274 15.0
21 1 7.1339336529 9.06131273274 15.0
22 1 9.98750711406 9.06131273274 15.0
23 1 12.8410805752 9.06131273274 15.0
24 1 15.6946540364 9.06131273274 15.0
25 1 0.0 11.5325798417 15.0
26 1 2.85357346116 11.5325798417 15.0
27 1 5.70714692232 11.5325798417 15.0
28 1 8.56072038348 11.5325798417 15.0
29 1 11.4142938446 11.5325798417 15.0
30 1 14.2678673058 11.5325798417 15.0
31 1 1.42678673058 14.0038469506 15.0
32 1 4.28036019174 14.0038469506 15.0
33 1 7.1339336529 14.0038469506 15.0
34 1 9.98750711406 14.0038469506 15.0
35 1 12.8410805752 14.0038469506 15.0
36 1 15.6946540364 14.0038469506 15.0
37 1 0.0 0.0 17.3299329745
38 1 2.85357346116 0.0 17.3299329745
39 1 5.70714692232 0.0 17.3299329745
40 1 8.56072038348 0.0 17.3299329745
41 1 11.4142938446 0.0 17.3299329745
42 1 14.2678673058 0.0 17.3299329745
43 1 1.42678673058 2.47126710893 17.3299329745
44 1 4.28036019174 2.47126710893 17.3299329745
45 1 7.1339336529 2.47126710893 17.3299329745
46 1 9.98750711406 2.47126710893 17.3299329745
47 1 12.8410805752 2.47126710893 17.3299329745
48 1 15.6946540364 2.47126710893 17.3299329745
49 1 0.0 4.94253421786 17.3299329745
50 1 2.85357346116 4.94253421786 17.3299329745
51 1 5.70714692232 4.94253421786 17.3299329745
52 1 8.56072038348 4.94253421786 17.3299329745
53 1 11.4142938446 4.94253421786 17.3299329745
54 1 14.2678673058 4.94253421786 17.3299329745
55 1 1.42678673058 7.41380132679 17.3299329745
56 1 4.28036019174 7.41380132679 17.3299329745
57 1 7.1339336529 7.41380132679 17.3299329745
58 1 9.98750711406 7.41380132679 17.3299329745
59 1 12.8410805752 7.41380132679 17.3299329745
60 1 15.6946540364 7.41380132679 17.3299329745
61 1 0.0 9.88506843572 17.3299329745
62 1 2.85357346116 9.88506843572 17.3299329745
63 1 5.70714692232 9.88506843572 17.3299329745
64 1 8.56072038348 9.88506843572 17.3299329745
65 1 11.4142938446 9.88506843572 17.3299329745
66 1 14.2678673058 9.88506843572 17.3299329745
67 1 1.42678673058 12.3563355446 17.3299329745
68 1 4.28036019174 12.3563355446 17.3299329745
69 1 7.1339336529 12.3563355446 17.3299329745
70 1 9.98750711406 12.3563355446 17.3299329745
71 1 12.8410805752 12.3563355446 17.3299329745
72 1 15.6946540364 12.3563355446 17.3299329745
73 1 1.42678673058 0.823755702976 19.6598659489
74 1 4.28036019174 0.823755702976 19.6598659489
75 1 7.1339336529 0.823755702976 19.6598659489
76 1 9.98750711406 0.823755702976 19.6598659489
77 1 12.8410805752 0.823755702976 19.6598659489
78 1 15.6946540364 0.823755702976 19.6598659489
79 1 0.0 3.29502281191 19.6598659489
80 1 2.85357346116 3.29502281191 19.6598659489
81 1 5.70714692232 3.29502281191 19.6598659489
82 1 8.56072038348 3.29502281191 19.6598659489
83 1 11.4142938446 3.29502281191 19.6598659489
84 1 14.2678673058 3.29502281191 19.6598659489
85 1 1.42678673058 5.76628992084 19.6598659489
86 1 4.28036019174 5.76628992084 19.6598659489
87 1 7.1339336529 5.76628992084 19.6598659489
88 1 9.98750711406 5.76628992084 19.6598659489
89 1 12.8410805752 5.76628992084 19.6598659489
90 1 15.6946540364 5.76628992084 19.6598659489
91 1 0.0 8.23755702976 19.6598659489
92 1 2.85357346116 8.23755702976 19.6598659489
93 1 5.70714692232 8.23755702976 19.6598659489
94 1 8.56072038348 8.23755702976 19.6598659489
95 1 11.4142938446 8.23755702976 19.6598659489
96 1 14.2678673058 8.23755702976 19.6598659489
97 1 1.42678673058 10.7088241387 19.6598659489
98 1 4.28036019174 10.7088241387 19.6598659489
99 1 7.1339336529 10.7088241387 19.6598659489
100 1 9.98750711406 10.7088241387 19.6598659489
101 1 12.8410805752 10.7088241387 19.6598659489
102 1 15.6946540364 10.7088241387 19.6598659489
103 1 0.0 13.1800912476 19.6598659489
104 1 2.85357346116 13.1800912476 19.6598659489
105 1 5.70714692232 13.1800912476 19.6598659489
106 1 8.56072038348 13.1800912476 19.6598659489
107 1 11.4142938446 13.1800912476 19.6598659489
108 1 14.2678673058 13.1800912476 19.6598659489
109 1 0.0 1.64751140595 21.9897989234
110 1 2.85357346116 1.64751140595 21.9897989234
111 1 5.70714692232 1.64751140595 21.9897989234
112 1 8.56072038348 1.64751140595 21.9897989234
113 1 11.4142938446 1.64751140595 21.9897989234
114 1 14.2678673058 1.64751140595 21.9897989234
115 1 1.42678673058 4.11877851488 21.9897989234
116 1 4.28036019174 4.11877851488 21.9897989234
117 1 7.1339336529 4.11877851488 21.9897989234
118 1 9.98750711406 4.11877851488 21.9897989234
119 1 12.8410805752 4.11877851488 21.9897989234
120 1 15.6946540364 4.11877851488 21.9897989234
121 1 0.0 6.59004562381 21.9897989234
122 1 2.85357346116 6.59004562381 21.9897989234
123 1 5.70714692232 6.59004562381 21.9897989234
124 1 8.56072038348 6.59004562381 21.9897989234
125 1 11.4142938446 6.59004562381 21.9897989234
126 1 14.2678673058 6.59004562381 21.9897989234
127 1 1.42678673058 9.06131273274 21.9897989234
128 1 4.28036019174 9.06131273274 21.9897989234
129 1 7.1339336529 9.06131273274 21.9897989234
130 1 9.98750711406 9.06131273274 21.9897989234
131 1 12.8410805752 9.06131273274 21.9897989234
132 1 15.6946540364 9.06131273274 21.9897989234
133 1 0.0 11.5325798417 21.9897989234
134 1 2.85357346116 11.5325798417 21.9897989234
135 1 5.70714692232 11.5325798417 21.9897989234
136 1 8.56072038348 11.5325798417 21.9897989234
137 1 11.4142938446 11.5325798417 21.9897989234
138 1 14.2678673058 11.5325798417 21.9897989234
139 1 1.42678673058 14.0038469506 21.9897989234
140 1 4.28036019174 14.0038469506 21.9897989234
141 1 7.1339336529 14.0038469506 21.9897989234
142 1 9.98750711406 14.0038469506 21.9897989234
143 1 12.8410805752 14.0038469506 21.9897989234
144 1 15.6946540364 14.0038469506 21.9897989234
145 1 0.0 0.0 24.3197318979
146 1 2.85357346116 0.0 24.3197318979
147 1 5.70714692232 0.0 24.3197318979
148 1 8.56072038348 0.0 24.3197318979
149 1 11.4142938446 0.0 24.3197318979
150 1 14.2678673058 0.0 24.3197318979
151 1 1.42678673058 2.47126710893 24.3197318979
152 1 4.28036019174 2.47126710893 24.3197318979
153 1 7.1339336529 2.47126710893 24.3197318979
154 1 9.98750711406 2.47126710893 24.3197318979
155 1 12.8410805752 2.47126710893 24.3197318979
156 1 15.6946540364 2.47126710893 24.3197318979
157 1 0.0 4.94253421786 24.3197318979
158 1 2.85357346116 4.94253421786 24.3197318979
159 1 5.70714692232 4.94253421786 24.3197318979
160 1 8.56072038348 4.94253421786 24.3197318979
161 1 11.4142938446 4.94253421786 24.3197318979
162 1 14.2678673058 4.94253421786 24.3197318979
163 1 1.42678673058 7.41380132679 24.3197318979
164 1 4.28036019174 7.41380132679 24.3197318979
165 1 7.1339336529 7.41380132679 24.3197318979
166 1 9.98750711406 7.41380132679 24.3197318979
167 1 12.8410805752 7.41380132679 24.3197318979
168 1 15.6946540364 7.41380132679 24.3197318979
169 1 0.0 9.88506843572 24.3197318979
170 1 2.85357346116 9.88506843572 24.3197318979
171 1 5.70714692232 9.88506843572 24.3197318979
172 1 8.56072038348 9.88506843572 24.3197318979
173 1 11.4142938446 9.88506843572 24.3197318979
174 1 14.2678673058 9.88506843572 24.3197318979
175 1 1.42678673058 12.3563355446 24.3197318979
176 1 4.28036019174 12.3563355446 24.3197318979
177 1 7.1339336529 12.3563355446 24.3197318979
178 1 9.98750711406 12.3563355446 24.3197318979
179 1 12.8410805752 12.3563355446 24.3197318979
180 1 15.6946540364 12.3563355446 24.3197318979
181 1 7.1339336529 4.11877851488 26.7197318979

View File

@ -0,0 +1,23 @@
processors * * 1
units metal
boundary p p f
read_data adatom.data
pair_style agni
pair_coeff * * Al_prb.agni Al
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 500 12345
fix 1 all nvt temp 250 250 0.2
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000

View File

@ -0,0 +1,23 @@
units metal
boundary p p p
read_data vacancy.data
pair_style agni
pair_coeff * * Al_prb.agni Al
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 1000 12345
fix 1 all nvt temp 900 900 200
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke etotal temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000

View File

@ -0,0 +1,82 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
processors * * 1
units metal
boundary p p f
read_data adatom.data
orthogonal box = (0 0 0) to (17.1214 14.8276 39.3197)
1 by 1 by 1 MPI processor grid
reading atoms ...
181 atoms
pair_style agni
pair_coeff * * Al_prb.agni Al
Reading potential file Al_prb.agni with DATE: 2016-11-11
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 500 12345
fix 1 all nvt temp 250 250 0.2
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15 -> bins = 5 4 10
Memory usage per processor = 2.37049 Mbytes
Step KinEng Temp
0 11.633413 500
100 4.049974 174.06646
200 5.8983472 253.50889
300 5.3667309 230.66021
400 4.9343935 212.0785
500 5.4054496 232.32432
600 6.1779127 265.52452
700 6.3749266 273.9921
800 6.0701481 260.89283
900 6.4582394 277.57286
1000 6.4047444 275.27366
Loop time of 20.8273 on 1 procs for 1000 steps with 181 atoms
Performance: 2.074 ns/day, 11.571 hours/ns, 48.014 timesteps/s
100.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 20.79 | 20.79 | 20.79 | 0.0 | 99.82
Neigh | 0.022742 | 0.022742 | 0.022742 | 0.0 | 0.11
Comm | 0.0040836 | 0.0040836 | 0.0040836 | 0.0 | 0.02
Output | 0.00011086 | 0.00011086 | 0.00011086 | 0.0 | 0.00
Modify | 0.0089345 | 0.0089345 | 0.0089345 | 0.0 | 0.04
Other | | 0.001819 | | | 0.01
Nlocal: 181 ave 181 max 181 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 562 ave 562 max 562 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 18810 ave 18810 max 18810 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18810
Ave neighs/atom = 103.923
Neighbor list builds = 33
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:20

View File

@ -0,0 +1,82 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
processors * * 1
units metal
boundary p p f
read_data adatom.data
orthogonal box = (0 0 0) to (17.1214 14.8276 39.3197)
2 by 2 by 1 MPI processor grid
reading atoms ...
181 atoms
pair_style agni
pair_coeff * * Al_prb.agni Al
Reading potential file Al_prb.agni with DATE: 2016-11-11
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 500 12345
fix 1 all nvt temp 250 250 0.2
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15 -> bins = 5 4 10
Memory usage per processor = 2.48695 Mbytes
Step KinEng Temp
0 11.633413 500
100 4.049974 174.06646
200 5.8983472 253.50889
300 5.3667309 230.66021
400 4.9343935 212.0785
500 5.4054496 232.32432
600 6.1779127 265.52451
700 6.3749266 273.9921
800 6.0701481 260.89283
900 6.4582394 277.57286
1000 6.4047444 275.27366
Loop time of 5.96868 on 4 procs for 1000 steps with 181 atoms
Performance: 7.238 ns/day, 3.316 hours/ns, 167.541 timesteps/s
99.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 5.4176 | 5.4602 | 5.5505 | 2.3 | 91.48
Neigh | 0.0056074 | 0.0060464 | 0.0062635 | 0.3 | 0.10
Comm | 0.39544 | 0.48696 | 0.53111 | 7.9 | 8.16
Output | 0.0001545 | 0.00015736 | 0.0001595 | 0.0 | 0.00
Modify | 0.010492 | 0.011565 | 0.012588 | 0.9 | 0.19
Other | | 0.003794 | | | 0.06
Nlocal: 45.25 ave 47 max 42 min
Histogram: 1 0 0 0 0 0 0 0 2 1
Nghost: 374.75 ave 380 max 373 min
Histogram: 3 0 0 0 0 0 0 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 4702.5 ave 4868 max 4389 min
Histogram: 1 0 0 0 0 0 0 1 1 1
Total # of neighbors = 18810
Ave neighs/atom = 103.923
Neighbor list builds = 33
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:05

View File

@ -0,0 +1,82 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
units metal
boundary p p p
read_data vac.data
orthogonal box = (0 0 0) to (8.07113 8.07113 8.07113)
1 by 1 by 1 MPI processor grid
reading atoms ...
31 atoms
pair_style agni
pair_coeff * * Al_prb.agni Al
Reading potential file Al_prb.agni with DATE: 2016-11-11
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 1000 12345
fix 1 all nvt temp 900 900 200
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke etotal temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15 -> bins = 2 2 2
Memory usage per processor = 2.40842 Mbytes
Step KinEng TotEng Temp
0 3.8778043 3.8778043 1000
100 2.8126642 2.8126642 725.32391
200 3.7110413 3.7110413 956.9955
300 3.2361084 3.2361084 834.52081
400 3.4625769 3.4625769 892.92201
500 3.4563307 3.4563307 891.31126
600 2.8486344 2.8486344 734.59982
700 3.1183057 3.1183057 804.14208
800 2.9164818 2.9164818 752.09618
900 3.464416 3.464416 893.39629
1000 3.5954546 3.5954546 927.18825
Loop time of 3.86777 on 1 procs for 1000 steps with 31 atoms
Performance: 11.169 ns/day, 2.149 hours/ns, 258.547 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.8463 | 3.8463 | 3.8463 | 0.0 | 99.44
Neigh | 0.011294 | 0.011294 | 0.011294 | 0.0 | 0.29
Comm | 0.0057271 | 0.0057271 | 0.0057271 | 0.0 | 0.15
Output | 0.00014257 | 0.00014257 | 0.00014257 | 0.0 | 0.00
Modify | 0.0029459 | 0.0029459 | 0.0029459 | 0.0 | 0.08
Other | | 0.001395 | | | 0.04
Nlocal: 31 ave 31 max 31 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 878 ave 878 max 878 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 4334 ave 4334 max 4334 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 4334
Ave neighs/atom = 139.806
Neighbor list builds = 51
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:03

View File

@ -0,0 +1,82 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
units metal
boundary p p p
read_data vac.data
orthogonal box = (0 0 0) to (8.07113 8.07113 8.07113)
1 by 2 by 2 MPI processor grid
reading atoms ...
31 atoms
pair_style agni
pair_coeff * * Al_prb.agni Al
Reading potential file Al_prb.agni with DATE: 2016-11-11
neighbor 0.3 bin
neigh_modify delay 2 check yes
timestep 0.0005
velocity all create 1000 12345
fix 1 all nvt temp 900 900 200
fix 5 all momentum 1 linear 1 1 1
thermo 100
thermo_style custom step ke etotal temp
# dump MyDump all custom 250 dump.atoms id type x y z vx vy vz fx fy fz
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 2 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 8.3
ghost atom cutoff = 8.3
binsize = 4.15 -> bins = 2 2 2
Memory usage per processor = 2.3974 Mbytes
Step KinEng TotEng Temp
0 3.8778044 3.8778044 1000
100 2.8126642 2.8126642 725.32391
200 3.7110413 3.7110413 956.99549
300 3.2361084 3.2361084 834.52081
400 3.4625769 3.4625769 892.92201
500 3.4563307 3.4563307 891.31126
600 2.8486343 2.8486343 734.59981
700 3.1183057 3.1183057 804.14208
800 2.9164819 2.9164819 752.09618
900 3.4644161 3.4644161 893.39631
1000 3.5954546 3.5954546 927.18824
Loop time of 1.11007 on 4 procs for 1000 steps with 31 atoms
Performance: 38.916 ns/day, 0.617 hours/ns, 900.843 timesteps/s
99.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.906 | 0.99469 | 1.0291 | 5.1 | 89.61
Neigh | 0.0026186 | 0.0027665 | 0.0028622 | 0.2 | 0.25
Comm | 0.066125 | 0.10079 | 0.1896 | 16.2 | 9.08
Output | 0.00012875 | 0.00014615 | 0.00018787 | 0.2 | 0.01
Modify | 0.0080338 | 0.0083079 | 0.00861 | 0.2 | 0.75
Other | | 0.003372 | | | 0.30
Nlocal: 7.75 ave 8 max 7 min
Histogram: 1 0 0 0 0 0 0 0 0 3
Nghost: 623.5 ave 630 max 616 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 1083.5 ave 1131 max 988 min
Histogram: 1 0 0 0 0 0 0 1 1 1
Total # of neighbors = 4334
Ave neighs/atom = 139.806
Neighbor list builds = 51
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:01

View File

@ -0,0 +1,46 @@
Data File created from VASP POSCAR
31 atoms
1 atom types
0 8.071125 xlo xhi
0 8.071125 ylo yhi
0 8.071125 zlo zhi
Masses
1 26.9815
Atoms
1 1 8.05711986217 3.20498589607 7.09652861184
2 1 8.05262255028 3.62006786258 3.16719841667
3 1 2.08891866821 1.38430927213 3.14852514324
4 1 4.25446836692 3.27689661974 3.35678388118
5 1 7.92524269451 7.20500664579 3.03232792051
6 1 6.04056771113 7.24499020906 1.11223380379
7 1 2.32585852889 5.29910389395 7.31500292009
8 1 2.09613190567 1.27658214906 7.44277603054
9 1 3.96852985867 7.2805082905 3.37568009522
10 1 0.0773420461671 1.29964047903 5.27451616984
11 1 7.96501442334 1.24471347504 1.17853896176
12 1 2.13035246804 5.36148411996 3.3817805118
13 1 2.06211525033 7.25482811482 1.52039033766
14 1 3.99735704234 7.4099829467 7.05753768668
15 1 3.84113228596 5.1855444403 1.41642147402
16 1 0.231862769544 5.38528175164 5.51171817022
17 1 0.12718452785 5.35814065671 1.11669573581
18 1 8.05303937039 7.38861123542 7.41398359808
19 1 1.88506066609 3.17578974033 1.20929473631
20 1 4.33739926831 1.37976783613 5.28141762358
21 1 2.23200994743 3.12419127088 5.36881641316
22 1 6.22871004896 1.34968648416 7.24032447626
23 1 6.08380394159 1.16222146146 3.30535465675
24 1 6.16629028099 5.22806528503 3.7675179617
25 1 4.30194966153 1.14526017671 1.45054175732
26 1 6.24221620153 5.05377575942 7.17573714759
27 1 3.92820642281 2.9627641757 7.71515743722
28 1 4.33686872315 4.73096617728 5.57649231331
29 1 6.05033104136 3.51389714904 1.34127903322
30 1 6.27311587476 7.19257797516 5.46814369382
31 1 1.81274009101 7.47392095028 5.35484578074

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
#/bin/bash
for i in $(ls -d [0-9]*)
do
rm -f $i/final*
rm -f $i/log*
rm -f $i/ent*
rm -f $i/output
cp $i/restart.init $i/restart_file
done
echo 1 > lastexchange
cp walker.bkp lastwalker
exit 0

View File

@ -0,0 +1,169 @@
#!/usr/bin/env python2.7
import os, sys
from numpy import *
import numpy.random
### Runs replica exchange with gREM (fix grem) for unlimited number of replicas on a set number of processors. This script is inefficient, but necessary if wanting to run with hundreds of replicas on relatively few number of procs.
### read number of processors from the command line
nproc = int(sys.argv[1])
### path to simulation directory
path = os.getcwd()
### path to LAMMPS executable
lmp = sys.argv[2]
### LAMMPS input name
inp = sys.argv[3]
### define pressure for simulations (0 if const V)
pressure = 0
### some constants for gREM, must match with LAMMPS input file!
H = -30000
eta = -0.01
#kB = 0.000086173324 # eV (metal)
kB = 0.0019872 # kcal/mol (real)
### define lambdas - script assumes that there are already existing directories with all files necessary to run
lambdas=[400,405,410,415,420,425]
ll = len(lambdas)
### define number of exchanges
starting_ex = int(loadtxt("lastexchange"))
how_many_ex = 5
max_exchange = starting_ex+how_many_ex
### array with walkers
walker = loadtxt("lastwalker")
### initiate array with enthalpies
enthalpy = zeros(ll)
aver_enthalpy = zeros(ll)
for exchange in arange(starting_ex,max_exchange):
print "run", exchange
for l in range(ll):
#print "replica", l
os.chdir(path+"/%s" % lambdas[l])
#os.system("cp restart_file restart_file%d" % exchange)
if (nproc > 1):
os.system("mpirun -np %d " + lmp + " -in ../" + inp + " -var lambda %g -var eta %g -var enthalpy %g > output" % (nproc, lambdas[l], eta, H))
if (nproc == 1):
os.system(lmp + " -in ../" + inp + " -var lambda %g -var eta %g -var enthalpy %g > output" % (lambdas[l], eta, H))
os.system("grep -v '[a-zA-Z]' output | awk '{if(NF==6 && NR>19)print $0}' | awk '{print $3}' >ent")
enthalpy[l] = os.popen("tail -n 1 ent").read()
ee = loadtxt("ent")
aver_enthalpy[l] = mean(ee[-1])
# os.system("mv dump.dcd dump%d.dcd" % exchange)
os.system("mv log.lammps log%d.lammps" % exchange)
# os.system("rm output")
os.system("mv final_restart_file final_restart_file%d" % exchange)
os.system("mv ent ent%d" % exchange)
os.system("bzip2 log%d.lammps ent%d" % (exchange,exchange))
os.system("cp final_restart_file%d restart_file" % exchange)
### replicas will be exchanged based on enthalpy order, not replicas order (termostat order)
#entalpy_sorted_indices = enthalpy.argsort()
aver_entalpy_sorted_indices = aver_enthalpy.argsort()
### choose pair of replicas for exchange attempt based on enthalpy order
pp = random.random_integers(0,ll-2)
first = aver_entalpy_sorted_indices[pp]
second = aver_entalpy_sorted_indices[pp+1]
#if (first>second):
# tmp = first
# first = second
# second = tmp
print "pair1:", first, second
### calculate weights for exchange criterion
w1 = log(lambdas[first]+eta*(enthalpy[first]-1*H))
w2 = log(lambdas[first]+eta*(enthalpy[second]-1*H))
w3 = log(lambdas[second]+eta*(enthalpy[first]-1*H))
w4 = log(lambdas[second]+eta*(enthalpy[second]-1*H))
weight = (w4-w3+w1-w2)/eta/kB
### generate randon number for exchange criterion and calc its log
LOGRANDNUM = log(random.random())
### wyzeruj warunki
compare1 = 0
compare2 = 0
if (weight>0):
compare1 = 1
if (weight>LOGRANDNUM):
compare2 = 1
### exchange restart files if exchange condition is satisfied
if (compare1>0 or compare2>0):
print "exchange1 accepted for pair", first, second, lambdas[first], lambdas[second], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[first],exchange,path,lambdas[second]))
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[second],exchange,path,lambdas[first]))
### update walkers
tmp1=walker[first]
tmp2=walker[second]
walker[first]=tmp2
walker[second]=tmp1
else:
print "exchange1 not accepted for pair", first, second, lambdas[first], lambdas[second], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
### choose again pair of replicas for exchange attempt based on enthalpy order
### but make sure this pair is different than the first pair
if_different = 0
while if_different<1:
pp2 = random.random_integers(0,ll-2)
third = aver_entalpy_sorted_indices[pp2]
fourth = aver_entalpy_sorted_indices[pp2+1]
if (third!=first and third!=second and third!=aver_entalpy_sorted_indices[pp-1]):
if_different = 1
print "pair2:", third, fourth
### calculate weights for exchange criterion
w1 = log(lambdas[third]+eta*(enthalpy[third]-1*H))
w2 = log(lambdas[third]+eta*(enthalpy[fourth]-1*H))
w3 = log(lambdas[fourth]+eta*(enthalpy[third]-1*H))
w4 = log(lambdas[fourth]+eta*(enthalpy[fourth]-1*H))
weight = (w4-w3+w1-w2)/eta/kB
### generate randon number for exchange criterion and calc its log
LOGRANDNUM = log(random.random())
### wyzeruj warunki
compare1 = 0
compare2 = 0
if (weight>0):
compare1 = 1
if (weight>LOGRANDNUM):
compare2 = 1
### exchange restart files if exchange condition is satisfied
if (compare1>0 or compare2>0):
print "exchange2 accepted for pair", third, fourth, lambdas[third], lambdas[fourth], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[third],exchange,path,lambdas[fourth]))
os.system("cp %s/%s/final_restart_file%d %s/%s/restart_file" % (path,lambdas[fourth],exchange,path,lambdas[third]))
### update walkers
tmp1=walker[third]
tmp2=walker[fourth]
walker[third]=tmp2
walker[fourth]=tmp1
else:
print "exchange2 not accepted for pair", third, fourth, lambdas[third], lambdas[fourth], "with compares as", compare1, compare2, "weight as", weight, "and lograndnum", LOGRANDNUM
#print "walkers:", walker
print "".join(["%d " % x for x in walker])
sys.stdout.flush()
lastwalker = open(path + "/lastwalker", "w")
lastwalker.write("".join(["%d " % w for w in walker]))
lastwalker.close()
lastexchange = open(path + "/lastexchange", "w")
lastexchange.write("%d" % (exchange+1))
lastexchange.close()

View File

@ -0,0 +1,25 @@
# LJ particles
variable T0 index 300.0
variable press index 0.0
variable lambda index 400.0
variable eta index -0.01
variable enthalpy index -30000.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "restart_file"
thermo 10
thermo_style custom step temp pe etotal press vol
velocity all create ${T0} 12427
timestep 1.0
fix fxnvt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxgREM all grem ${lambda} ${eta} ${enthalpy} fxnvt
thermo_modify press fxgREM_press
run 10000
write_data final_restart_file

View File

@ -0,0 +1,12 @@
#!/bin/sh
NPROCS=1
if [ $# -gt 0 ]; then
NPROCS=$1
fi
bash ./clean.sh
python ./double-re-short.py $NPROCS $HOME/compile/lammps-icms/src/lmp_omp in.gREM > total_output.$NPROCS
exit 0

View File

@ -0,0 +1 @@
0 1 2 3 4 5

View File

@ -0,0 +1,21 @@
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnpt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxgREM all grem 400 -.01 -30000 fxnpt
thermo_modify press fxgREM_press
run 1000
#write_data lj-out.data

View File

@ -0,0 +1,20 @@
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all nvt temp ${T0} ${T0} 1000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
run 1000
#write_data lj-out.data

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,176 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
orthogonal box = (1.06874 1.06874 1.06874) to (23.9313 23.9313 23.9313)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso 0 ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso 0 0 10000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
thermo_modify press fxgREM_press
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5 -> bins = 7 7 7
Memory usage per processor = 5.37943 Mbytes
Step Temp PotEng TotEng Press Volume
0 305.69499 -3177.6423 -2722.9442 -91.741776 11950.115
10 312.30124 -3182.2257 -2717.7013 -203.95075 11950.113
20 314.94567 -3186.456 -2717.9982 -265.56737 11950.108
30 312.229 -3183.7641 -2719.3472 -196.90499 11950.097
40 305.94068 -3180.7085 -2725.6449 -92.562221 11950.083
50 300.42281 -3176.5838 -2729.7277 10.896769 11950.066
60 299.16747 -3174.1939 -2729.205 50.094171 11950.05
70 301.65965 -3176.0918 -2727.396 0.096901939 11950.035
80 304.77876 -3178.2699 -2724.9346 -64.001022 11950.019
90 305.60598 -3178.9517 -2724.386 -93.672879 11950.003
100 303.8005 -3177.5156 -2725.6354 -74.516709 11949.985
110 300.86776 -3175.4773 -2727.9593 -34.22655 11949.965
120 298.70177 -3175.6488 -2731.3526 -19.014898 11949.944
130 298.39686 -3176.3792 -2732.5365 -21.293245 11949.923
140 300.00669 -3177.7032 -2731.466 -40.992937 11949.902
150 301.85665 -3178.1312 -2729.1423 -45.715505 11949.88
160 301.20597 -3177.3218 -2729.3007 -10.104082 11949.857
170 297.01134 -3172.7462 -2730.9643 99.298381 11949.833
180 291.279 -3168.3513 -2735.0958 219.47549 11949.812
190 287.13954 -3165.1287 -2738.0304 309.36947 11949.796
200 286.57735 -3165.2951 -2739.033 323.96954 11949.786
210 289.83941 -3167.8245 -2736.7103 271.77305 11949.783
220 296.12858 -3171.8054 -2731.3366 172.4056 11949.785
230 303.82424 -3176.3108 -2724.3952 56.711479 11949.791
240 309.95738 -3180.9789 -2719.9408 -40.992898 11949.798
250 312.0405 -3182.3473 -2718.2107 -57.591676 11949.805
260 309.65444 -3181.0587 -2720.4712 3.3540332 11949.81
270 304.40001 -3176.5798 -2723.8078 130.77028 11949.816
280 298.65985 -3174.1505 -2729.9166 237.63562 11949.825
290 294.78709 -3170.9701 -2732.4966 326.94924 11949.838
300 294.03216 -3169.9567 -2732.6062 349.85486 11949.859
310 296.44397 -3172.8519 -2731.914 284.80897 11949.886
320 301.41027 -3175.9697 -2727.6447 179.4647 11949.92
330 307.88911 -3181.2615 -2723.2998 24.702414 11949.957
340 314.73138 -3186.0047 -2717.8656 -132.6263 11949.995
350 320.55591 -3187.8509 -2711.0483 -245.88468 11950.031
360 323.50274 -3188.9994 -2707.8136 -314.73676 11950.062
370 321.61539 -3187.1233 -2708.7448 -293.17446 11950.086
380 314.37275 -3181.484 -2713.8784 -169.00448 11950.104
390 303.54884 -3174.1675 -2722.6616 12.923999 11950.119
400 293.40432 -3167.0348 -2730.6181 187.6624 11950.135
410 288.46351 -3165.273 -2736.2054 252.20051 11950.154
420 290.31387 -3168.604 -2736.7841 193.73816 11950.178
430 296.35519 -3173.09 -2732.2841 81.521847 11950.207
440 301.92973 -3175.4344 -2726.3368 -1.8329439 11950.237
450 303.76205 -3176.777 -2724.9539 -35.002096 11950.267
460 301.71619 -3174.2731 -2725.4932 14.977875 11950.296
470 298.92404 -3172.9921 -2728.3652 64.224747 11950.326
480 298.80164 -3172.5329 -2728.0881 82.781347 11950.358
490 302.71589 -3175.3703 -2725.1034 27.223049 11950.39
500 309.10665 -3179.3013 -2719.5285 -65.460658 11950.424
510 314.36408 -3183.2854 -2715.6927 -151.19245 11950.456
520 315.71154 -3183.5328 -2713.9358 -163.19151 11950.485
530 313.31886 -3182.2521 -2716.214 -125.5741 11950.511
540 309.81847 -3178.9358 -2718.1043 -55.55841 11950.534
550 308.29687 -3177.837 -2719.2688 -24.39371 11950.556
560 308.75927 -3176.3265 -2717.0705 0.93689833 11950.578
570 307.52811 -3175.8145 -2718.3897 35.502429 11950.6
580 301.75074 -3173.1208 -2724.2894 136.29625 11950.622
590 292.37743 -3165.5806 -2730.6913 319.75957 11950.648
600 283.57627 -3159.8617 -2738.0635 471.28045 11950.68
610 279.85172 -3157.4557 -2741.1975 530.72699 11950.722
620 283.40879 -3160.5911 -2739.042 455.28104 11950.775
630 292.53718 -3166.3125 -2731.1856 296.63465 11950.838
640 302.81112 -3173.3096 -2722.901 113.80844 11950.907
650 309.83321 -3179.3684 -2718.515 -26.499431 11950.978
660 312.1283 -3182.7335 -2718.4663 -89.363745 11951.049
670 311.16363 -3181.867 -2719.0347 -69.370989 11951.118
680 308.51041 -3180.6869 -2721.801 -25.972987 11951.186
690 304.64393 -3176.8751 -2723.7403 56.592367 11951.254
700 300.24456 -3175.4797 -2728.8887 112.34442 11951.323
710 296.35785 -3172.9705 -2732.1607 168.18009 11951.394
720 293.78145 -3172.1065 -2735.1289 182.81082 11951.468
730 293.25707 -3170.8715 -2734.6738 171.04236 11951.547
740 295.33219 -3172.9109 -2733.6266 91.351362 11951.629
750 299.69136 -3175.2574 -2729.4892 -16.266404 11951.713
760 305.2281 -3177.9836 -2723.9799 -137.30615 11951.796
770 310.59309 -3182.7053 -2720.7216 -272.72961 11951.877
780 314.65573 -3183.4212 -2715.3947 -341.231 11951.952
790 316.48606 -3185.44 -2714.691 -388.53602 11952.02
800 315.15897 -3186.846 -2718.0709 -384.28316 11952.08
810 310.43559 -3183.6648 -2721.9154 -282.61999 11952.133
820 303.22265 -3178.464 -2727.4433 -121.47565 11952.179
830 295.36843 -3175.4771 -2736.1389 33.066504 11952.223
840 288.69698 -3169.5813 -2740.1664 216.10697 11952.268
850 283.82649 -3165.7822 -2743.6118 359.56896 11952.317
860 280.04102 -3162.8228 -2746.283 475.61942 11952.374
870 277.10059 -3159.6212 -2747.4551 572.5432 11952.441
880 275.76549 -3158.2545 -2748.0743 616.43304 11952.52
890 276.82327 -3158.9703 -2747.2166 596.08147 11952.612
900 280.72135 -3162.0637 -2744.5119 506.33695 11952.716
910 287.1035 -3167.4388 -2740.3941 356.68688 11952.831
920 294.28041 -3171.6218 -2733.902 206.06394 11952.953
930 300.36009 -3173.9046 -2727.1418 88.047911 11953.08
940 303.86761 -3175.5599 -2723.5798 7.6846808 11953.209
950 304.42957 -3176.0831 -2723.2672 -25.15496 11953.339
960 303.13982 -3176.0534 -2725.1559 -28.715178 11953.467
970 302.30166 -3176.9758 -2727.325 -43.264668 11953.596
980 303.93331 -3178.9891 -2726.9114 -88.434034 11953.723
990 307.36223 -3180.7316 -2723.5535 -145.46208 11953.849
1000 310.09574 -3181.101 -2719.8571 -180.39125 11953.972
Loop time of 0.307225 on 1 procs for 1000 steps with 500 atoms
Performance: 281.227 ns/day, 0.085 hours/ns, 3254.944 timesteps/s
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.25351 | 0.25351 | 0.25351 | 0.0 | 82.52
Bond | 7.1526e-05 | 7.1526e-05 | 7.1526e-05 | 0.0 | 0.02
Neigh | 0.0042093 | 0.0042093 | 0.0042093 | 0.0 | 1.37
Comm | 0.010211 | 0.010211 | 0.010211 | 0.0 | 3.32
Output | 0.0013611 | 0.0013611 | 0.0013611 | 0.0 | 0.44
Modify | 0.033891 | 0.033891 | 0.033891 | 0.0 | 11.03
Other | | 0.003969 | | | 1.29
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1610 ave 1610 max 1610 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 14765 ave 14765 max 14765 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14765
Ave neighs/atom = 29.53
Ave special neighs/atom = 0
Neighbor list builds = 5
Dangerous builds = 0
#write_data lj-out.data
Total wall time: 0:00:00

View File

@ -0,0 +1,176 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
orthogonal box = (1.06874 1.06874 1.06874) to (23.9313 23.9313 23.9313)
1 by 2 by 2 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso ${press} ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso 0 ${press} 10000.0
fix fxnvt all npt temp 300 300 1000.0 iso 0 0 10000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
thermo_modify press fxgREM_press
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5 -> bins = 7 7 7
Memory usage per processor = 5.34276 Mbytes
Step Temp PotEng TotEng Press Volume
0 305.69499 -3177.6423 -2722.9442 -91.741776 11950.115
10 312.30124 -3182.2257 -2717.7013 -203.95075 11950.113
20 314.94567 -3186.456 -2717.9982 -265.56737 11950.108
30 312.229 -3183.7641 -2719.3472 -196.90499 11950.097
40 305.94068 -3180.7085 -2725.6449 -92.562221 11950.083
50 300.42281 -3176.5838 -2729.7277 10.896769 11950.066
60 299.16747 -3174.1939 -2729.205 50.094171 11950.05
70 301.65965 -3176.0918 -2727.396 0.096901939 11950.035
80 304.77876 -3178.2699 -2724.9346 -64.001022 11950.019
90 305.60598 -3178.9517 -2724.386 -93.672879 11950.003
100 303.8005 -3177.5156 -2725.6354 -74.516709 11949.985
110 300.86776 -3175.4773 -2727.9593 -34.22655 11949.965
120 298.70177 -3175.6488 -2731.3526 -19.014898 11949.944
130 298.39686 -3176.3792 -2732.5365 -21.293245 11949.923
140 300.00669 -3177.7032 -2731.466 -40.992937 11949.902
150 301.85665 -3178.1312 -2729.1423 -45.715505 11949.88
160 301.20597 -3177.3218 -2729.3007 -10.104082 11949.857
170 297.01134 -3172.7462 -2730.9643 99.298381 11949.833
180 291.279 -3168.3513 -2735.0958 219.47549 11949.812
190 287.13954 -3165.1287 -2738.0304 309.36947 11949.796
200 286.57735 -3165.2951 -2739.033 323.96954 11949.786
210 289.83941 -3167.8245 -2736.7103 271.77305 11949.783
220 296.12858 -3171.8054 -2731.3366 172.4056 11949.785
230 303.82424 -3176.3108 -2724.3952 56.711479 11949.791
240 309.95738 -3180.9789 -2719.9408 -40.992898 11949.798
250 312.0405 -3182.3473 -2718.2107 -57.591676 11949.805
260 309.65444 -3181.0587 -2720.4712 3.3540332 11949.81
270 304.40001 -3176.5798 -2723.8078 130.77028 11949.816
280 298.65985 -3174.1505 -2729.9166 237.63562 11949.825
290 294.78709 -3170.9701 -2732.4966 326.94924 11949.838
300 294.03216 -3169.9567 -2732.6062 349.85486 11949.859
310 296.44397 -3172.8519 -2731.914 284.80897 11949.886
320 301.41027 -3175.9697 -2727.6447 179.4647 11949.92
330 307.88911 -3181.2615 -2723.2998 24.702414 11949.957
340 314.73138 -3186.0047 -2717.8656 -132.6263 11949.995
350 320.55591 -3187.8509 -2711.0483 -245.88468 11950.031
360 323.50274 -3188.9994 -2707.8136 -314.73676 11950.062
370 321.61539 -3187.1233 -2708.7448 -293.17446 11950.086
380 314.37275 -3181.484 -2713.8784 -169.00448 11950.104
390 303.54884 -3174.1675 -2722.6616 12.923999 11950.119
400 293.40432 -3167.0348 -2730.6181 187.6624 11950.135
410 288.46351 -3165.273 -2736.2054 252.20051 11950.154
420 290.31387 -3168.604 -2736.7841 193.73816 11950.178
430 296.35519 -3173.09 -2732.2841 81.521847 11950.207
440 301.92973 -3175.4344 -2726.3368 -1.8329439 11950.237
450 303.76205 -3176.777 -2724.9539 -35.002096 11950.267
460 301.71619 -3174.2731 -2725.4932 14.977875 11950.296
470 298.92404 -3172.9921 -2728.3652 64.224747 11950.326
480 298.80164 -3172.5329 -2728.0881 82.781347 11950.358
490 302.71589 -3175.3703 -2725.1034 27.223049 11950.39
500 309.10665 -3179.3013 -2719.5285 -65.460658 11950.424
510 314.36408 -3183.2854 -2715.6927 -151.19245 11950.456
520 315.71154 -3183.5328 -2713.9358 -163.19151 11950.485
530 313.31886 -3182.2521 -2716.214 -125.5741 11950.511
540 309.81847 -3178.9358 -2718.1043 -55.55841 11950.534
550 308.29687 -3177.837 -2719.2688 -24.39371 11950.556
560 308.75927 -3176.3265 -2717.0705 0.93689833 11950.578
570 307.52811 -3175.8145 -2718.3897 35.502429 11950.6
580 301.75074 -3173.1208 -2724.2894 136.29625 11950.622
590 292.37743 -3165.5806 -2730.6913 319.75957 11950.648
600 283.57627 -3159.8617 -2738.0635 471.28045 11950.68
610 279.85172 -3157.4557 -2741.1975 530.72699 11950.722
620 283.40879 -3160.5911 -2739.042 455.28104 11950.775
630 292.53718 -3166.3125 -2731.1856 296.63465 11950.838
640 302.81112 -3173.3096 -2722.901 113.80844 11950.907
650 309.83321 -3179.3684 -2718.515 -26.499431 11950.978
660 312.1283 -3182.7335 -2718.4663 -89.363745 11951.049
670 311.16363 -3181.867 -2719.0347 -69.370989 11951.118
680 308.51041 -3180.6869 -2721.801 -25.972987 11951.186
690 304.64393 -3176.8751 -2723.7403 56.592367 11951.254
700 300.24456 -3175.4797 -2728.8887 112.34442 11951.323
710 296.35785 -3172.9705 -2732.1607 168.18009 11951.394
720 293.78145 -3172.1065 -2735.1289 182.81082 11951.468
730 293.25707 -3170.8715 -2734.6738 171.04236 11951.547
740 295.33219 -3172.9109 -2733.6266 91.351362 11951.629
750 299.69136 -3175.2574 -2729.4892 -16.266404 11951.713
760 305.2281 -3177.9836 -2723.9799 -137.30615 11951.796
770 310.59309 -3182.7053 -2720.7216 -272.72961 11951.877
780 314.65573 -3183.4212 -2715.3947 -341.231 11951.952
790 316.48606 -3185.44 -2714.691 -388.53602 11952.02
800 315.15897 -3186.846 -2718.0709 -384.28316 11952.08
810 310.43559 -3183.6648 -2721.9154 -282.61999 11952.133
820 303.22265 -3178.464 -2727.4433 -121.47565 11952.179
830 295.36843 -3175.4771 -2736.1389 33.066504 11952.223
840 288.69698 -3169.5813 -2740.1664 216.10697 11952.268
850 283.82649 -3165.7822 -2743.6118 359.56896 11952.317
860 280.04102 -3162.8228 -2746.283 475.61942 11952.374
870 277.10059 -3159.6212 -2747.4551 572.5432 11952.441
880 275.76549 -3158.2545 -2748.0743 616.43304 11952.52
890 276.82327 -3158.9703 -2747.2166 596.08147 11952.612
900 280.72135 -3162.0637 -2744.5119 506.33695 11952.716
910 287.1035 -3167.4388 -2740.3941 356.68688 11952.831
920 294.28041 -3171.6218 -2733.902 206.06394 11952.953
930 300.36009 -3173.9046 -2727.1418 88.047911 11953.08
940 303.86761 -3175.5599 -2723.5798 7.6846808 11953.209
950 304.42957 -3176.0831 -2723.2672 -25.15496 11953.339
960 303.13982 -3176.0534 -2725.1559 -28.715178 11953.467
970 302.30166 -3176.9758 -2727.325 -43.264668 11953.596
980 303.93331 -3178.9891 -2726.9114 -88.434034 11953.723
990 307.36223 -3180.7316 -2723.5535 -145.46208 11953.849
1000 310.09574 -3181.101 -2719.8571 -180.39125 11953.972
Loop time of 0.154208 on 4 procs for 1000 steps with 500 atoms
Performance: 560.281 ns/day, 0.043 hours/ns, 6484.730 timesteps/s
98.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.072079 | 0.074846 | 0.079666 | 1.1 | 48.54
Bond | 5.7936e-05 | 6.634e-05 | 8.1062e-05 | 0.1 | 0.04
Neigh | 0.0010812 | 0.0012064 | 0.0012748 | 0.2 | 0.78
Comm | 0.032452 | 0.037544 | 0.04076 | 1.6 | 24.35
Output | 0.0018461 | 0.0020589 | 0.0026393 | 0.7 | 1.34
Modify | 0.032085 | 0.032688 | 0.033361 | 0.3 | 21.20
Other | | 0.005799 | | | 3.76
Nlocal: 125 ave 127 max 123 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 870.5 ave 882 max 862 min
Histogram: 1 1 0 0 0 0 1 0 0 1
Neighs: 3691.25 ave 3807 max 3563 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Total # of neighbors = 14765
Ave neighs/atom = 29.53
Ave special neighs/atom = 0
Neighbor list builds = 5
Dangerous builds = 0
#write_data lj-out.data
Total wall time: 0:00:00

View File

@ -0,0 +1,173 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
orthogonal box = (1.06874 1.06874 1.06874) to (23.9313 23.9313 23.9313)
1 by 1 by 1 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all nvt temp ${T0} ${T0} 1000.0
fix fxnvt all nvt temp 300 ${T0} 1000.0
fix fxnvt all nvt temp 300 300 1000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5 -> bins = 7 7 7
Memory usage per processor = 5.37943 Mbytes
Step Temp PotEng TotEng Press Volume
0 305.69499 -3177.6423 -2722.9442 883.58369 11950.115
10 312.30121 -3182.2257 -2717.7013 793.47811 11950.115
20 314.94553 -3186.4559 -2717.9983 738.74091 11950.115
30 312.22861 -3183.7638 -2719.3474 797.47978 11950.115
40 305.93987 -3180.7079 -2725.6455 881.30806 11950.115
50 300.42132 -3176.5828 -2729.7288 967.92042 11950.115
60 299.16487 -3174.1921 -2729.2071 1004.247 11950.115
70 301.65565 -3176.0891 -2727.3992 962.58134 11950.115
80 304.77334 -3178.2663 -2724.939 907.8946 11950.115
90 305.59929 -3178.9472 -2724.3914 879.91629 11950.115
100 303.79263 -3177.5103 -2725.6418 892.67631 11950.115
110 300.85863 -3175.4711 -2727.9667 923.44924 11950.115
120 298.69083 -3175.6415 -2731.3615 931.87518 11950.115
130 298.38415 -3176.3706 -2732.5468 928.88286 11950.115
140 299.99129 -3177.6935 -2731.4792 914.36783 11950.115
150 301.83869 -3178.121 -2729.1588 915.01407 11950.115
160 301.18834 -3177.3117 -2729.3169 947.45228 11950.115
170 296.99406 -3172.7363 -2730.9801 1042.6928 11950.115
180 291.25952 -3168.3407 -2735.1142 1144.5436 11950.115
190 287.1178 -3164.9847 -2737.9187 1223.4003 11950.115
200 286.552 -3165.2799 -2739.0555 1235.6703 11950.115
210 289.81033 -3167.8062 -2736.7353 1194.6672 11950.115
220 296.09616 -3171.7847 -2731.3641 1115.8799 11950.115
230 303.79176 -3176.2893 -2724.4221 1024.6471 11950.115
240 309.9273 -3180.9591 -2719.9657 945.55045 11950.115
250 312.0159 -3182.3307 -2718.2306 934.36956 11950.115
260 309.63264 -3181.0452 -2720.4901 986.77385 11950.115
270 304.38172 -3176.568 -2723.8233 1097.264 11950.115
280 298.64188 -3174.1384 -2729.9313 1186.2239 11950.115
290 294.76686 -3170.9562 -2732.5128 1264.247 11950.115
300 294.00805 -3169.8091 -2732.4944 1287.4001 11950.115
310 296.41801 -3172.834 -2731.9347 1229.5624 11950.115
320 301.38477 -3175.9514 -2727.6644 1140.8664 11950.115
330 307.86584 -3181.2442 -2723.3171 1007.1545 11950.115
340 314.7103 -3185.9891 -2717.8814 871.74528 11950.115
350 320.53954 -3187.8385 -2711.0602 776.85994 11950.115
360 323.49505 -3188.9927 -2707.8184 716.58062 11950.115
370 321.62077 -3187.1246 -2708.7381 731.01909 11950.115
380 314.39049 -3181.4931 -2713.8611 831.21057 11950.115
390 303.57079 -3174.1804 -2722.6419 978.62645 11950.115
400 293.42165 -3167.0452 -2730.6027 1122.3558 11950.115
410 288.46838 -3165.4071 -2736.3322 1171.8087 11950.115
420 290.30766 -3168.5988 -2736.7882 1122.5413 11950.115
430 296.34338 -3173.0824 -2732.2941 1030.2769 11950.115
440 301.92394 -3175.4307 -2726.3417 964.25387 11950.115
450 303.76745 -3176.9122 -2725.0811 934.49176 11950.115
460 301.72985 -3174.2821 -2725.4818 979.07605 11950.115
470 298.93736 -3173.0014 -2728.3548 1020.0482 11950.115
480 298.80912 -3172.803 -2728.3471 1036.6531 11950.115
490 302.72217 -3175.3764 -2725.1001 997.71146 11950.115
500 309.11393 -3179.3088 -2719.5253 925.81108 11950.115
510 314.37612 -3183.2961 -2715.6855 856.23748 11950.115
520 315.72767 -3183.547 -2713.926 847.70543 11950.115
530 313.34173 -3182.2695 -2716.1974 877.30842 11950.115
540 309.84312 -3178.9553 -2718.0871 936.69244 11950.115
550 308.3251 -3177.8582 -2719.248 963.93032 11950.115
560 308.79192 -3176.4834 -2717.1788 989.67643 11950.115
570 307.57194 -3175.8464 -2718.3565 1021.0494 11950.115
580 301.8035 -3173.1582 -2724.2483 1102.4893 11950.115
590 292.43425 -3165.751 -2730.7772 1254.7815 11950.115
600 283.62905 -3159.8987 -2738.022 1381.0608 11950.115
610 279.90122 -3157.49 -2741.1581 1431.0028 11950.115
620 283.4582 -3160.756 -2739.1334 1367.7385 11950.115
630 292.58866 -3166.3469 -2731.1435 1241.1194 11950.115
640 302.86585 -3173.4778 -2722.9878 1089.7342 11950.115
650 309.89252 -3179.4078 -2718.4662 972.6359 11950.115
660 312.19165 -3182.7754 -2718.414 916.62037 11950.115
670 311.2287 -3181.9102 -2718.9811 933.79804 11950.115
680 308.57852 -3180.7312 -2721.7441 969.24936 11950.115
690 304.71609 -3176.9196 -2723.6775 1040.2699 11950.115
700 300.31995 -3175.5245 -2728.8213 1082.845 11950.115
710 296.43537 -3173.0166 -2732.0915 1127.4487 11950.115
720 293.86692 -3172.1582 -2735.0535 1135.0215 11950.115
730 293.35611 -3170.9335 -2734.5885 1122.9143 11950.115
740 295.44861 -3172.9862 -2733.5288 1050.995 11950.115
750 299.82732 -3175.3467 -2729.3763 958.31462 11950.115
760 305.37987 -3178.216 -2723.9866 854.1946 11950.115
770 310.75394 -3182.8127 -2720.5898 737.72668 11950.115
780 314.81395 -3183.7905 -2715.5286 679.74198 11950.115
790 316.63339 -3185.8028 -2714.8346 638.48871 11950.115
800 315.2894 -3186.9345 -2717.9654 641.53256 11950.115
810 310.54289 -3183.7383 -2721.8293 728.51241 11950.115
820 303.31439 -3178.7897 -2727.6326 864.45674 11950.115
830 295.46125 -3175.5387 -2736.0625 997.72969 11950.115
840 288.802 -3169.6502 -2740.0791 1160.6622 11950.115
850 283.94785 -3165.8605 -2743.5096 1289.55 11950.115
860 280.17501 -3163.0381 -2746.299 1392.8854 11950.115
870 277.2456 -3159.8429 -2747.4611 1481.3899 11950.115
880 275.93123 -3158.3584 -2747.9316 1523.5374 11950.115
890 277.0215 -3159.2285 -2747.18 1506.1558 11950.115
900 280.96237 -3162.483 -2744.5728 1428.4183 11950.115
910 287.37962 -3167.6183 -2740.1628 1303.0268 11950.115
920 294.56731 -3171.6765 -2733.5299 1177.748 11950.115
930 300.63273 -3174.0842 -2726.9158 1078.7393 11950.115
940 304.10943 -3175.9847 -2723.645 1007.7154 11950.115
950 304.64845 -3176.6263 -2723.4848 976.37917 11950.115
960 303.36343 -3176.4694 -2725.2393 971.40749 11950.115
970 302.57138 -3177.5541 -2727.5021 954.01115 11950.115
980 304.2593 -3179.2101 -2726.6475 919.74949 11950.115
990 307.69959 -3180.9631 -2723.2833 874.9594 11950.115
1000 310.3971 -3181.9675 -2720.2753 842.81184 11950.115
Loop time of 0.279202 on 1 procs for 1000 steps with 500 atoms
Performance: 309.453 ns/day, 0.078 hours/ns, 3581.633 timesteps/s
99.1% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.24196 | 0.24196 | 0.24196 | 0.0 | 86.66
Bond | 6.628e-05 | 6.628e-05 | 6.628e-05 | 0.0 | 0.02
Neigh | 0.0043204 | 0.0043204 | 0.0043204 | 0.0 | 1.55
Comm | 0.010242 | 0.010242 | 0.010242 | 0.0 | 3.67
Output | 0.0012252 | 0.0012252 | 0.0012252 | 0.0 | 0.44
Modify | 0.017572 | 0.017572 | 0.017572 | 0.0 | 6.29
Other | | 0.003811 | | | 1.37
Nlocal: 500 ave 500 max 500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 1610 ave 1610 max 1610 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 14767 ave 14767 max 14767 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 14767
Ave neighs/atom = 29.534
Ave special neighs/atom = 0
Neighbor list builds = 5
Dangerous builds = 0
#write_data lj-out.data
Total wall time: 0:00:00

View File

@ -0,0 +1,173 @@
LAMMPS (9 Nov 2016)
using 1 OpenMP thread(s) per MPI task
# LJ particles
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
read_data "lj.data"
orthogonal box = (1.06874 1.06874 1.06874) to (23.9313 23.9313 23.9313)
1 by 2 by 2 MPI processor grid
reading atoms ...
500 atoms
reading velocities ...
500 velocities
0 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnvt all nvt temp ${T0} ${T0} 1000.0
fix fxnvt all nvt temp 300 ${T0} 1000.0
fix fxnvt all nvt temp 300 300 1000.0
fix fxgREM all grem 400 -.01 -30000 fxnvt
run 1000
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 7
ghost atom cutoff = 7
binsize = 3.5 -> bins = 7 7 7
Memory usage per processor = 5.34276 Mbytes
Step Temp PotEng TotEng Press Volume
0 305.69499 -3177.6423 -2722.9442 883.58369 11950.115
10 312.30121 -3182.2257 -2717.7013 793.47811 11950.115
20 314.94553 -3186.4559 -2717.9983 738.74091 11950.115
30 312.22861 -3183.7638 -2719.3474 797.47978 11950.115
40 305.93987 -3180.7079 -2725.6455 881.30806 11950.115
50 300.42132 -3176.5828 -2729.7288 967.92042 11950.115
60 299.16487 -3174.1921 -2729.2071 1004.247 11950.115
70 301.65565 -3176.0891 -2727.3992 962.58134 11950.115
80 304.77334 -3178.2663 -2724.939 907.8946 11950.115
90 305.59929 -3178.9472 -2724.3914 879.91629 11950.115
100 303.79263 -3177.5103 -2725.6418 892.67631 11950.115
110 300.85863 -3175.4711 -2727.9667 923.44924 11950.115
120 298.69083 -3175.6415 -2731.3615 931.87518 11950.115
130 298.38415 -3176.3706 -2732.5468 928.88286 11950.115
140 299.99129 -3177.6935 -2731.4792 914.36783 11950.115
150 301.83869 -3178.121 -2729.1588 915.01407 11950.115
160 301.18834 -3177.3117 -2729.3169 947.45228 11950.115
170 296.99406 -3172.7363 -2730.9801 1042.6928 11950.115
180 291.25952 -3168.3407 -2735.1142 1144.5436 11950.115
190 287.1178 -3164.9847 -2737.9187 1223.4003 11950.115
200 286.552 -3165.2799 -2739.0555 1235.6703 11950.115
210 289.81033 -3167.8062 -2736.7353 1194.6672 11950.115
220 296.09616 -3171.7847 -2731.3641 1115.8799 11950.115
230 303.79176 -3176.2893 -2724.4221 1024.6471 11950.115
240 309.9273 -3180.9591 -2719.9657 945.55045 11950.115
250 312.0159 -3182.3307 -2718.2306 934.36956 11950.115
260 309.63264 -3181.0452 -2720.4901 986.77385 11950.115
270 304.38172 -3176.568 -2723.8233 1097.264 11950.115
280 298.64188 -3174.1384 -2729.9313 1186.2239 11950.115
290 294.76686 -3170.9562 -2732.5128 1264.247 11950.115
300 294.00805 -3169.8091 -2732.4944 1287.4001 11950.115
310 296.41801 -3172.834 -2731.9347 1229.5624 11950.115
320 301.38477 -3175.9514 -2727.6644 1140.8664 11950.115
330 307.86584 -3181.2442 -2723.3171 1007.1545 11950.115
340 314.7103 -3185.9891 -2717.8814 871.74528 11950.115
350 320.53954 -3187.8385 -2711.0602 776.85994 11950.115
360 323.49505 -3188.9927 -2707.8184 716.58062 11950.115
370 321.62077 -3187.1246 -2708.7381 731.01909 11950.115
380 314.39049 -3181.4931 -2713.8611 831.21057 11950.115
390 303.57079 -3174.1804 -2722.6419 978.62645 11950.115
400 293.42165 -3167.0452 -2730.6027 1122.3558 11950.115
410 288.46838 -3165.4071 -2736.3322 1171.8087 11950.115
420 290.30766 -3168.5988 -2736.7882 1122.5413 11950.115
430 296.34338 -3173.0824 -2732.2941 1030.2769 11950.115
440 301.92394 -3175.4307 -2726.3417 964.25387 11950.115
450 303.76745 -3176.9122 -2725.0811 934.49176 11950.115
460 301.72985 -3174.2821 -2725.4818 979.07605 11950.115
470 298.93736 -3173.0014 -2728.3548 1020.0482 11950.115
480 298.80912 -3172.803 -2728.3471 1036.6531 11950.115
490 302.72217 -3175.3764 -2725.1001 997.71146 11950.115
500 309.11393 -3179.3088 -2719.5253 925.81108 11950.115
510 314.37612 -3183.2961 -2715.6855 856.23748 11950.115
520 315.72767 -3183.547 -2713.926 847.70543 11950.115
530 313.34173 -3182.2695 -2716.1974 877.30842 11950.115
540 309.84312 -3178.9553 -2718.0871 936.69244 11950.115
550 308.3251 -3177.8582 -2719.248 963.93032 11950.115
560 308.79192 -3176.4834 -2717.1788 989.67643 11950.115
570 307.57194 -3175.8464 -2718.3565 1021.0494 11950.115
580 301.8035 -3173.1582 -2724.2483 1102.4893 11950.115
590 292.43425 -3165.751 -2730.7772 1254.7815 11950.115
600 283.62905 -3159.8987 -2738.022 1381.0608 11950.115
610 279.90122 -3157.49 -2741.1581 1431.0028 11950.115
620 283.4582 -3160.756 -2739.1334 1367.7385 11950.115
630 292.58866 -3166.3469 -2731.1435 1241.1194 11950.115
640 302.86585 -3173.4778 -2722.9878 1089.7342 11950.115
650 309.89252 -3179.4078 -2718.4662 972.6359 11950.115
660 312.19165 -3182.7754 -2718.414 916.62037 11950.115
670 311.2287 -3181.9102 -2718.9811 933.79804 11950.115
680 308.57852 -3180.7312 -2721.7441 969.24936 11950.115
690 304.71609 -3176.9196 -2723.6775 1040.2699 11950.115
700 300.31995 -3175.5245 -2728.8213 1082.845 11950.115
710 296.43537 -3173.0166 -2732.0915 1127.4487 11950.115
720 293.86692 -3172.1582 -2735.0535 1135.0215 11950.115
730 293.35611 -3170.9335 -2734.5885 1122.9143 11950.115
740 295.44861 -3172.9862 -2733.5288 1050.995 11950.115
750 299.82732 -3175.3467 -2729.3763 958.31462 11950.115
760 305.37987 -3178.216 -2723.9866 854.1946 11950.115
770 310.75394 -3182.8127 -2720.5898 737.72668 11950.115
780 314.81395 -3183.7905 -2715.5286 679.74198 11950.115
790 316.63339 -3185.8028 -2714.8346 638.48871 11950.115
800 315.2894 -3186.9345 -2717.9654 641.53256 11950.115
810 310.54289 -3183.7383 -2721.8293 728.51241 11950.115
820 303.31439 -3178.7897 -2727.6326 864.45674 11950.115
830 295.46125 -3175.5387 -2736.0625 997.72969 11950.115
840 288.802 -3169.6502 -2740.0791 1160.6622 11950.115
850 283.94785 -3165.8605 -2743.5096 1289.55 11950.115
860 280.17501 -3163.0381 -2746.299 1392.8854 11950.115
870 277.2456 -3159.8429 -2747.4611 1481.3899 11950.115
880 275.93123 -3158.3584 -2747.9316 1523.5374 11950.115
890 277.0215 -3159.2285 -2747.18 1506.1558 11950.115
900 280.96237 -3162.483 -2744.5728 1428.4183 11950.115
910 287.37962 -3167.6183 -2740.1628 1303.0268 11950.115
920 294.56731 -3171.6765 -2733.5299 1177.748 11950.115
930 300.63273 -3174.0842 -2726.9158 1078.7393 11950.115
940 304.10943 -3175.9847 -2723.645 1007.7154 11950.115
950 304.64845 -3176.6263 -2723.4848 976.37917 11950.115
960 303.36343 -3176.4694 -2725.2393 971.40749 11950.115
970 302.57138 -3177.5541 -2727.5021 954.01115 11950.115
980 304.2593 -3179.2101 -2726.6475 919.74949 11950.115
990 307.69959 -3180.9631 -2723.2833 874.9594 11950.115
1000 310.3971 -3181.9675 -2720.2753 842.81184 11950.115
Loop time of 0.133894 on 4 procs for 1000 steps with 500 atoms
Performance: 645.285 ns/day, 0.037 hours/ns, 7468.580 timesteps/s
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.065271 | 0.071043 | 0.07818 | 1.9 | 53.06
Bond | 5.6505e-05 | 6.5565e-05 | 7.7724e-05 | 0.1 | 0.05
Neigh | 0.0011396 | 0.0012607 | 0.0013669 | 0.2 | 0.94
Comm | 0.033866 | 0.040269 | 0.045386 | 2.6 | 30.08
Output | 0.0019252 | 0.0020776 | 0.0023642 | 0.4 | 1.55
Modify | 0.012141 | 0.013629 | 0.01486 | 0.9 | 10.18
Other | | 0.005549 | | | 4.14
Nlocal: 125 ave 127 max 123 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 871.25 ave 882 max 863 min
Histogram: 2 0 0 0 0 0 1 0 0 1
Neighs: 3691.75 ave 3808 max 3563 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Total # of neighbors = 14767
Ave neighs/atom = 29.534
Ave special neighs/atom = 0
Neighbor list builds = 5
Dangerous builds = 0
#write_data lj-out.data
Total wall time: 0:00:00

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,31 @@
# LJ particles
variable lambda world 900 910 920 930
variable rep world 0 1 2 3
#variable walker world 0 1 3 2
variable T0 equal 300.0
variable press equal 0.0
units real
atom_style full
pair_style lj/cut 5.0
# LJ particles
log ${rep}/log.lammps.${rep}
print "This is replica: ${rep}"
read_data ${rep}/lj.data
#dump dump all xyz 1000 ${rep}/dump.xyz
thermo 10
thermo_style custom step temp pe etotal press vol
timestep 1.0
fix fxnpt all npt temp ${T0} ${T0} 1000.0 iso ${press} ${press} 10000.0
fix fxgREM all grem ${lambda} -.03 -30000 fxnpt
thermo_modify press fxgREM_press
temper/grem 10000 100 ${lambda} fxgREM fxnpt 10294 98392 #${walker}
#write_data ${rep}/lj-out.data

View File

@ -103,14 +103,14 @@ fix integration_fix tlsph smd/integrate_tlsph
####################################################################################################
# SPECIFY TRAJECTORY OUTPUT
####################################################################################################
compute dt_atom all smd/tlsph_dt
compute p all smd/plastic_strain
compute epsdot all smd/plastic_strain_rate
compute S all smd/tlsph_stress # Cauchy stress tensor
compute D all smd/tlsph_strain_rate
compute E all smd/tlsph_strain
compute nn all smd/tlsph_num_neighs # number of neighbors for each particle
compute shape all smd/tlsph_shape
compute dt_atom all smd/tlsph/dt
compute p all smd/plastic/strain
compute epsdot all smd/plastic/strain/rate
compute S all smd/tlsph/stress # Cauchy stress tensor
compute D all smd/tlsph/strain/rate
compute E all smd/tlsph/strain
compute nn all smd/tlsph/num/neighs # number of neighbors for each particle
compute shape all smd/tlsph/shape
compute damage all smd/damage
dump dump_id all custom 100 dump.LAMMPS id type x y z &
c_S[1] c_S[2] c_S[3] c_S[4] c_S[5] c_S[6] c_S[7] c_nn c_p &

View File

@ -124,11 +124,11 @@ fix integration_fix_solids solids smd/integrate_tlsph
####################################################################################################
# SPECIFY TRAJECTORY OUTPUT
####################################################################################################
compute eint all smd/internal_energy
compute contact_radius all smd/contact_radius
compute S solids smd/tlsph_stress
compute nn water smd/ulsph_num_neighs
compute epl solids smd/plastic_strain
compute eint all smd/internal/energy
compute contact_radius all smd/contact/radius
compute S solids smd/tlsph/stress
compute nn water smd/ulsph/num/neighs
compute epl solids smd/plastic/strain
compute vol all smd/volume
compute rho all smd/rho

View File

@ -98,9 +98,9 @@ fix integration_fix all smd/integrate_ulsph adjust_radius 1.01 10 15
####################################################################################################
variable dumpFreq equal 100
compute rho all smd/rho
compute nn all smd/ulsph_num_neighs # number of neighbors for each particle
compute contact_radius all smd/contact_radius
compute surface_coords surface smd/triangle_vertices
compute nn all smd/ulsph/num/neighs # number of neighbors for each particle
compute contact_radius all smd/contact/radius
compute surface_coords surface smd/triangle/vertices
dump dump_id water custom ${dumpFreq} dump.LAMMPS id type x y z vx vy vz &
@ -116,7 +116,7 @@ dump_modify surf_dump first yes
####################################################################################################
# STATUS OUTPUT
####################################################################################################
compute eint all smd/internal_energy
compute eint all smd/internal/energy
compute alleint all reduce sum c_eint
variable etot equal pe+ke+c_alleint+f_gfix # total energy of the system
thermo 100

View File

@ -88,11 +88,11 @@ fix integration_fix tlsph smd/integrate_tlsph
# SPECIFY TRAJECTORY OUTPUT
####################################################################################################
variable dumpFreq equal 30
compute S all smd/tlsph_stress # Cauchy stress tensor
compute nn all smd/tlsph_num_neighs # number of neighbors for each particle
compute cr all smd/contact_radius
compute p all smd/plastic_strain
compute eint all smd/internal_energy
compute S all smd/tlsph/stress # Cauchy stress tensor
compute nn all smd/tlsph/num/neighs # number of neighbors for each particle
compute cr all smd/contact/radius
compute p all smd/plastic/strain
compute eint all smd/internal/energy
compute alleint all reduce sum c_eint
variable etot equal c_alleint+ke+pe

View File

@ -49,7 +49,7 @@ variable vol_one equal ${l0}^2 # volume of one particle -- assuming unit
variable skin equal ${h} # Verlet list range
neighbor ${skin} bin
set group all volume ${vol_one}
set group all smd_mass_density ${rho}
set group all smd/mass/density ${rho}
set group all diameter ${h} # set SPH kernel radius
####################################################################################################
@ -83,9 +83,9 @@ fix integration_fix tlsph smd/integrate_tlsph
####################################################################################################
# SPECIFY TRAJECTORY OUTPUT
####################################################################################################
compute S all smd/tlsph_stress # Cauchy stress tensor
compute E all smd/tlsph_strain # Green-Lagrange strain tensor
compute nn all smd/tlsph_num_neighs # number of neighbors for each particle
compute S all smd/tlsph/stress # Cauchy stress tensor
compute E all smd/tlsph/strain # Green-Lagrange strain tensor
compute nn all smd/tlsph/num/neighs # number of neighbors for each particle
dump dump_id all custom 10 dump.LAMMPS id type x y z vx vy vz &
c_S[1] c_S[2] c_S[4] c_nn &
c_E[1] c_E[2] c_E[4] &

View File

@ -0,0 +1,71 @@
# Testsystem for core-shell model compared to Mitchel and Finchham
# Hendrik Heenen, June 2014
# ------------------------ INITIALIZATION ----------------------------
units metal
dimension 3
boundary p p p
atom_style full
# ----------------------- ATOM DEFINITION ----------------------------
fix csinfo all property/atom i_CSID
read_data data.coreshell fix csinfo NULL CS-Info
group cores type 1 2
group shells type 3 4
neighbor 2.0 bin
comm_modify vel yes
# ------------------------ FORCE FIELDS ------------------------------
pair_style born/coul/dsf/cs 0.1 20.0 20.0 # A, rho, sigma=0, C, D
pair_coeff * * 0.0 1.000 0.00 0.00 0.00
pair_coeff 3 3 487.0 0.23768 0.00 1.05 0.50 #Na-Na
pair_coeff 3 4 145134.0 0.23768 0.00 6.99 8.70 #Na-Cl
pair_coeff 4 4 405774.0 0.23768 0.00 72.40 145.40 #Cl-Cl
bond_style harmonic
bond_coeff 1 63.014 0.0
bond_coeff 2 25.724 0.0
# ------------------------ Equilibration Run -------------------------------
reset_timestep 0
thermo 50
thermo_style custom step etotal pe ke temp press &
epair evdwl ecoul elong ebond fnorm fmax vol
compute CSequ all temp/cs cores shells
# output via chunk method
#compute prop all property/atom i_CSID
#compute cs_chunk all chunk/atom c_prop
#compute cstherm all temp/chunk cs_chunk temp internal com yes cdof 3.0
#fix ave_chunk all ave/time 100 1 100 c_cstherm file chunk.dump mode vector
thermo_modify temp CSequ
# velocity bias option
velocity all create 1427 134 dist gaussian mom yes rot no bias yes temp CSequ
velocity all scale 1427 temp CSequ
fix thermoberendsen all temp/berendsen 1427 1427 0.4
fix nve all nve
fix_modify thermoberendsen temp CSequ
# 2 fmsec timestep
timestep 0.002
run 500
unfix thermoberendsen
# ------------------------ Dynamic Run -------------------------------
run 1000

View File

@ -0,0 +1,185 @@
LAMMPS (27 Oct 2016)
# Testsystem for core-shell model compared to Mitchel and Finchham
# Hendrik Heenen, June 2014
# ------------------------ INITIALIZATION ----------------------------
units metal
dimension 3
boundary p p p
atom_style full
# ----------------------- ATOM DEFINITION ----------------------------
fix csinfo all property/atom i_CSID
read_data data.coreshell fix csinfo NULL CS-Info
orthogonal box = (0 0 0) to (24.096 24.096 24.096)
1 by 1 by 1 MPI processor grid
reading atoms ...
432 atoms
scanning bonds ...
1 = max bonds/atom
reading bonds ...
216 bonds
1 = max # of 1-2 neighbors
0 = max # of 1-3 neighbors
0 = max # of 1-4 neighbors
1 = max # of special neighbors
group cores type 1 2
216 atoms in group cores
group shells type 3 4
216 atoms in group shells
neighbor 2.0 bin
comm_modify vel yes
# ------------------------ FORCE FIELDS ------------------------------
pair_style born/coul/dsf/cs 0.1 20.0 20.0 # A, rho, sigma=0, C, D
pair_coeff * * 0.0 1.000 0.00 0.00 0.00
pair_coeff 3 3 487.0 0.23768 0.00 1.05 0.50 #Na-Na
pair_coeff 3 4 145134.0 0.23768 0.00 6.99 8.70 #Na-Cl
pair_coeff 4 4 405774.0 0.23768 0.00 72.40 145.40 #Cl-Cl
bond_style harmonic
bond_coeff 1 63.014 0.0
bond_coeff 2 25.724 0.0
# ------------------------ Equilibration Run -------------------------------
reset_timestep 0
thermo 50
thermo_style custom step etotal pe ke temp press epair evdwl ecoul elong ebond fnorm fmax vol
compute CSequ all temp/cs cores shells
# output via chunk method
#compute prop all property/atom i_CSID
#compute cs_chunk all chunk/atom c_prop
#compute cstherm all temp/chunk cs_chunk temp internal com yes cdof 3.0
#fix ave_chunk all ave/time 100 1 100 c_cstherm file chunk.dump mode vector
thermo_modify temp CSequ
# velocity bias option
velocity all create 1427 134 dist gaussian mom yes rot no bias yes temp CSequ
Neighbor list info ...
1 neighbor list requests
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 22
ghost atom cutoff = 22
binsize = 11 -> bins = 3 3 3
velocity all scale 1427 temp CSequ
fix thermoberendsen all temp/berendsen 1427 1427 0.4
fix nve all nve
fix_modify thermoberendsen temp CSequ
# 2 fmsec timestep
timestep 0.002
run 500
Memory usage per processor = 7.04355 Mbytes
Step TotEng PotEng KinEng Temp Press E_pair E_vdwl E_coul E_long E_bond Fnorm Fmax Volume
0 -635.80596 -675.46362 39.657659 1427 -21302.622 -675.46362 1.6320365 -677.09565 0 0 1.3517686e-14 2.942091e-15 13990.5
50 -633.9898 -666.02679 32.03699 1152.7858 -4578.5681 -668.50431 37.800204 -706.30452 0 2.4775226 14.568073 4.3012389 13990.5
100 -631.89604 -661.96148 30.065442 1081.8436 -3536.6738 -664.61798 39.18583 -703.80381 0 2.6564973 14.677968 3.9051029 13990.5
150 -630.08723 -662.95879 32.871559 1182.816 -109.19506 -665.76772 46.247821 -712.01554 0 2.8089226 15.270039 2.9328953 13990.5
200 -628.55895 -663.97376 35.414806 1274.3296 -1748.35 -666.58439 41.738552 -708.32294 0 2.6106349 14.148282 3.1047826 13990.5
250 -627.28761 -661.92274 34.635123 1246.2743 -1280.4899 -664.917 43.045475 -707.96247 0 2.9942594 14.248617 2.4694705 13990.5
300 -626.6163 -663.65651 37.040209 1332.8164 -1887.9043 -666.35215 40.84964 -707.20179 0 2.6956373 13.142643 1.9263242 13990.5
350 -625.76781 -664.66441 38.896607 1399.6151 -1839.482 -667.47659 40.999206 -708.47579 0 2.8121749 13.601238 1.9262698 13990.5
400 -625.02586 -661.46042 36.434568 1311.0236 -868.2031 -664.40231 43.21398 -707.61629 0 2.9418875 14.945389 2.7493413 13990.5
450 -624.3278 -660.50844 36.180639 1301.8865 -2203.3944 -663.49896 40.008669 -703.50763 0 2.9905179 14.158866 1.7299899 13990.5
500 -623.56254 -661.33839 37.775849 1359.2869 -810.50736 -664.11652 42.993999 -707.11052 0 2.7781274 13.68709 2.9115277 13990.5
Loop time of 10.7162 on 1 procs for 500 steps with 432 atoms
Performance: 8.063 ns/day, 2.977 hours/ns, 46.658 timesteps/s
99.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 10.478 | 10.478 | 10.478 | 0.0 | 97.78
Bond | 0.0029511 | 0.0029511 | 0.0029511 | 0.0 | 0.03
Neigh | 0.14159 | 0.14159 | 0.14159 | 0.0 | 1.32
Comm | 0.074382 | 0.074382 | 0.074382 | 0.0 | 0.69
Output | 0.00054097 | 0.00054097 | 0.00054097 | 0.0 | 0.01
Modify | 0.010588 | 0.010588 | 0.010588 | 0.0 | 0.10
Other | | 0.007748 | | | 0.07
Nlocal: 432 ave 432 max 432 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 9280 ave 9280 max 9280 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 297636 ave 297636 max 297636 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 297636
Ave neighs/atom = 688.972
Ave special neighs/atom = 1
Neighbor list builds = 20
Dangerous builds = 0
unfix thermoberendsen
# ------------------------ Dynamic Run -------------------------------
run 1000
Memory usage per processor = 7.04355 Mbytes
Step TotEng PotEng KinEng Temp Press E_pair E_vdwl E_coul E_long E_bond Fnorm Fmax Volume
500 -623.56254 -661.33839 37.775849 1359.2869 -810.50736 -664.11652 42.993999 -707.11052 0 2.7781274 13.68709 2.9115277 13990.5
550 -623.5004 -660.74472 37.244326 1340.1611 -1413.4326 -663.99669 41.875014 -705.8717 0 3.2519651 15.097948 2.278405 13990.5
600 -623.46963 -659.61729 36.147655 1300.6997 -521.50578 -662.54994 43.956071 -706.50601 0 2.9326492 14.99649 2.6334959 13990.5
650 -623.49291 -661.50698 38.014069 1367.8588 -1230.0925 -664.21074 42.027844 -706.23859 0 2.7037578 13.982308 1.6247207 13990.5
700 -623.4913 -660.11564 36.62434 1317.8522 -727.89052 -663.24921 43.413397 -706.66261 0 3.1335699 15.009937 2.0563966 13990.5
750 -623.50292 -657.95982 34.4569 1239.8613 636.46644 -661.16971 46.539267 -707.70898 0 3.2098934 15.25993 2.1864622 13990.5
800 -623.5176 -659.92032 36.402711 1309.8773 -912.75799 -662.84989 42.668309 -705.5182 0 2.9295708 13.577516 2.0006099 13990.5
850 -623.44098 -660.92727 37.486295 1348.8679 -550.40358 -664.08308 43.667245 -707.75033 0 3.1558098 14.836208 2.279198 13990.5
900 -623.46361 -661.21737 37.753765 1358.4923 1267.8647 -664.52195 47.67284 -712.19479 0 3.3045765 15.058502 1.886141 13990.5
950 -623.50114 -660.58464 37.083492 1334.3739 1754.7359 -663.48186 48.70363 -712.18549 0 2.897226 15.519042 2.2654928 13990.5
1000 -623.50161 -660.02915 36.527539 1314.369 228.76104 -663.31152 45.374099 -708.68562 0 3.2823685 14.783709 2.4201134 13990.5
1050 -623.45985 -660.57417 37.114321 1335.4832 -1490.604 -663.75391 41.258878 -705.01279 0 3.1797391 14.250262 2.3153255 13990.5
1100 -623.51051 -661.20338 37.692871 1356.3011 1791.7899 -664.01042 48.626451 -712.63687 0 2.807039 15.559872 3.184101 13990.5
1150 -623.51067 -663.19545 39.684776 1427.9758 1023.0584 -666.07723 46.5628 -712.64003 0 2.8817804 13.895322 2.3950292 13990.5
1200 -623.49625 -659.6715 36.175253 1301.6927 1600.2805 -662.62259 48.522365 -711.14495 0 2.9510854 15.567834 2.1677651 13990.5
1250 -623.48282 -660.56735 37.084533 1334.4113 -871.67341 -663.86673 42.560699 -706.42743 0 3.2993759 14.569539 2.0093709 13990.5
1300 -623.47744 -663.63125 40.153811 1444.853 1343.7147 -666.39564 47.104842 -713.50048 0 2.7643857 14.186019 1.4599359 13990.5
1350 -623.49121 -661.42731 37.936096 1365.0531 589.73669 -664.46099 45.947687 -710.40867 0 3.0336821 14.801223 2.7486556 13990.5
1400 -623.50803 -660.03912 36.53109 1314.4968 362.97431 -663.24909 45.772904 -709.02199 0 3.2099708 14.566488 1.9170714 13990.5
1450 -623.51243 -659.65548 36.143052 1300.534 2853.0755 -663.0534 51.355353 -714.40875 0 3.3979157 15.890282 2.5251359 13990.5
1500 -623.51621 -661.87741 38.361201 1380.3496 740.04973 -665.00896 46.208742 -711.2177 0 3.1315492 15.168927 2.4710846 13990.5
Loop time of 22.2766 on 1 procs for 1000 steps with 432 atoms
Performance: 7.757 ns/day, 3.094 hours/ns, 44.890 timesteps/s
99.9% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 21.8 | 21.8 | 21.8 | 0.0 | 97.86
Bond | 0.005852 | 0.005852 | 0.005852 | 0.0 | 0.03
Neigh | 0.30423 | 0.30423 | 0.30423 | 0.0 | 1.37
Comm | 0.14388 | 0.14388 | 0.14388 | 0.0 | 0.65
Output | 0.0010855 | 0.0010855 | 0.0010855 | 0.0 | 0.00
Modify | 0.0064189 | 0.0064189 | 0.0064189 | 0.0 | 0.03
Other | | 0.01527 | | | 0.07
Nlocal: 432 ave 432 max 432 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 9318 ave 9318 max 9318 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 297131 ave 297131 max 297131 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 297131
Ave neighs/atom = 687.803
Ave special neighs/atom = 1
Neighbor list builds = 44
Dangerous builds = 0
Total wall time: 0:00:33

View File

@ -114,3 +114,35 @@ neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
# Test Tersoff/Mod model for Si
clear
read_restart restart.equil
pair_style tersoff/mod
pair_coeff * * Si.tersoff.mod Si Si Si Si Si Si Si Si
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100
# Test Tersoff/Mod/C model for Si
clear
read_restart restart.equil
pair_style tersoff/mod/c
pair_coeff * * Si.tersoff.modc Si Si Si Si Si Si Si Si
thermo 10
fix 1 all nvt temp $t $t 0.1
fix_modify 1 energy yes
timestep 1.0e-3
neighbor 1.0 bin
neigh_modify every 1 delay 10 check yes
run 100

1019
potentials/Al_prb.agni Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
# DATE: 2016-11-09 CONTRIBUTOR: Ganga P Purja Pun (George Mason University, Fairfax) CITATION: Unknown
#
# Format:
# element1 element2 element3
# beta alpha h eta
# beta_ters lam2 B R D lam1 A
# n c1 c2 c3 c4 c5 C
Si Si Si 3.00000000 1.80536502 -0.38136087 2.16152496
1 1.39343356 117.78072440 2.87478837 0.33090566 3.18011795 3198.51383127
1.98633876 0.20123243 614230.04310619 996439.09714140 3.33560562 25.20963770 -0.00592042

View File

@ -0,0 +1,28 @@
# Compile LAMMPS as shared library
git clone https://github.com/lammps/lammps.git
cd lammps/src
python Make.py -m mpi -png -s ffmpeg exceptions -a file
make -j 4 mode=shlib auto
cd ../..
# Install Python package
virtualenv testing
source testing/bin/activate
(testing) cd lammps/python
(testing) python install.py
(testing) pip install jupyter matplotlib mpi4py
(testing) cd ../../examples
# Launch jupter and work inside browser
(testing) jupyter notebook
# Use Ctrl+c to stop jupyter
# finally exit the virtualenv
(testing) deactivate

View File

@ -0,0 +1,34 @@
Comment line
4 atoms
0 bonds
0 angles
1 dihedrals
0 impropers
1 atom types
0 bond types
0 angle types
1 dihedral types
0 improper types
-5.0 5.0 xlo xhi
-5.0 5.0 ylo yhi
-5.0 5.0 zlo zhi
0.0 0.0 0.0 xy xz yz
Atoms # molecular
1 1 1 -1.00000 1.00000 0.00000
2 1 1 -0.50000 0.00000 0.00000
3 1 1 0.50000 0.00000 0.00000
4 1 1 1.00000 1.00000 0.00000
Dihedral Coeffs
1 80.0 1 2
Dihedrals
1 1 1 2 3 4

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,4 @@
from mpi4py import MPI
comm=MPI.COMM_WORLD
print("Hello from rank %d of %d" % (comm.rank, comm.size))

View File

@ -0,0 +1,33 @@
# 3d Lennard-Jones melt
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 10 0 10 0 10
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 3.0 87287
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify every 20 delay 0 check no
fix 1 all nve
#dump id all atom 50 dump.melt
#dump 2 all image 25 image.*.jpg type type &
# axes yes 0.8 0.02 view 60 -30
#dump_modify 2 pad 3
#dump 3 all movie 25 movie.mpg type type &
# axes yes 0.8 0.02 view 60 -30
#dump_modify 3 pad 3
thermo 50
run 250

View File

@ -0,0 +1,10 @@
from mpi4py import MPI
from lammps import PyLammps
L = PyLammps()
L.file('in.melt')
if MPI.COMM_WORLD.rank == 0:
pe = L.eval("pe")
print("Potential Energy:", pe)

12
src/.gitignore vendored
View File

@ -205,6 +205,8 @@
/compute_pe_tally.h
/compute_plasticity_atom.cpp
/compute_plasticity_atom.h
/compute_pressure_grem.cpp
/compute_pressure_grem.h
/compute_rigid_local.cpp
/compute_rigid_local.h
/compute_spec_atom.cpp
@ -343,6 +345,8 @@
/fix_gle.h
/fix_gpu.cpp
/fix_gpu.h
/fix_grem.cpp
/fix_grem.h
/fix_imd.cpp
/fix_imd.h
/fix_ipi.cpp
@ -539,6 +543,8 @@
/pair_adp.cpp
/pair_adp.h
/pair_agni.cpp
/pair_agni.h
/pair_airebo.cpp
/pair_airebo.h
/pair_airebo_morse.cpp
@ -773,6 +779,8 @@
/pair_tersoff.h
/pair_tersoff_mod.cpp
/pair_tersoff_mod.h
/pair_tersoff_mod_c.cpp
/pair_tersoff_mod_c.h
/pair_tersoff_table.cpp
/pair_tersoff_table.h
/pair_tersoff_zbl.cpp
@ -871,6 +879,8 @@
/tad.h
/temper.cpp
/temper.h
/temper_grem.cpp
/temper_grem.h
/thr_data.cpp
/thr_data.h
/verlet_split.cpp
@ -976,6 +986,8 @@
/fix_ttm_mod.h
/pair_born_coul_long_cs.cpp
/pair_born_coul_long_cs.h
/pair_born_coul_dsf_cs.cpp
/pair_born_coul_dsf_cs.h
/pair_buck_coul_long_cs.cpp
/pair_buck_coul_long_cs.h
/pair_coul_long_cs.cpp

View File

@ -38,7 +38,8 @@ enum{ROTATE,ALL};
/* ---------------------------------------------------------------------- */
ComputeTempAsphere::ComputeTempAsphere(LAMMPS *lmp, int narg, char **arg) :
Compute(lmp, narg, arg)
Compute(lmp, narg, arg),
id_bias(NULL), tbias(NULL), avec(NULL)
{
if (narg < 3) error->all(FLERR,"Illegal compute temp/asphere command");

View File

@ -28,7 +28,7 @@ enum{SPHERE,LINE,TRI}; // also in DumpImage
/* ---------------------------------------------------------------------- */
BodyNparticle::BodyNparticle(LAMMPS *lmp, int narg, char **arg) :
Body(lmp, narg, arg)
Body(lmp, narg, arg), imflag(NULL), imdata(NULL)
{
if (narg != 3) error->all(FLERR,"Invalid body nparticle command");

View File

@ -33,7 +33,7 @@ enum{ID,TYPE,INDEX};
/* ---------------------------------------------------------------------- */
ComputeBodyLocal::ComputeBodyLocal(LAMMPS *lmp, int narg, char **arg) :
Compute(lmp, narg, arg)
Compute(lmp, narg, arg), which(NULL), index(NULL), avec(NULL), bptr(NULL)
{
if (narg < 4) error->all(FLERR,"Illegal compute body/local command");

View File

@ -37,7 +37,7 @@ enum{ROTATE,ALL};
/* ---------------------------------------------------------------------- */
ComputeTempBody::ComputeTempBody(LAMMPS *lmp, int narg, char **arg) :
Compute(lmp, narg, arg)
Compute(lmp, narg, arg), id_bias(NULL), tbias(NULL), avec(NULL)
{
if (narg < 3) error->all(FLERR,"Illegal compute temp/body command");

View File

@ -31,8 +31,10 @@ action () {
action compute_temp_cs.cpp
action compute_temp_cs.h
action pair_born_coul_long_cs.cpp pair_born_coul_long.cpp
action pair_born_coul_dsf_cs.cpp pair_born_coul_dsf.cpp
action pair_buck_coul_long_cs.cpp pair_buck_coul_long.cpp
action pair_born_coul_long_cs.h pair_born_coul_long.h
action pair_born_coul_dsf_cs.h pair_born_coul_dsf.h
action pair_buck_coul_long_cs.h pair_buck_coul_long.h
action pair_coul_long_cs.cpp pair_coul_long.cpp
action pair_coul_long_cs.h pair_coul_long.h

Some files were not shown because too many files have changed in this diff Show More