Compare commits

...

282 Commits

Author SHA1 Message Date
7e78738c73 patch 5Feb18 2018-02-09 14:26:21 -07:00
fa4c7fc664 Merge pull request #781 from wmbrownIntel/user-intel-2018u1p2
USER-INTEL: Adding missing backslash for two Makefiles using Intel co…
2018-02-02 14:51:23 -07:00
401bfc52e1 Merge pull request #798 from akohlmey/always-use-internal-xdr
Replace OS provided XDR support with bundled code
2018-02-02 14:47:40 -07:00
984fda5e78 Merge pull request #797 from akohlmey/multifile-restart-bugfix
Address bug in multifile restart writing with step number included
2018-02-02 14:47:09 -07:00
196b3c81ef Merge pull request #796 from akohlmey/create-bonds-sanity-check
Sanity check on created/read-in bonds/angles/dihedrals/impropers
2018-02-02 14:46:46 -07:00
f4a79b4d8e Merge pull request #794 from akohlmey/tersoff-fixes
Consistent checking of Tersoff potential parameters
2018-02-02 14:46:11 -07:00
7441b062e9 Merge pull request #792 from pastewka/18_dump_nc1
Dumping to multiple files failed with an error message complaining about a missing 'append yes' option.
2018-02-02 14:45:52 -07:00
10d80ba9c3 Merge pull request #791 from akohlmey/charmm-cmap-docs-reference
Update CMAP related docs and rebuild example input decks
2018-02-02 14:44:58 -07:00
5383035828 Merge pull request #787 from akohlmey/user-atc-dep
Make the USER-ATC package depend on MANYBODY
2018-02-02 14:44:36 -07:00
dc4dd1591f Merge pull request #785 from jrgissing/imp_dihed_print
modifications to correctly print modified dihedrals, impropers
2018-02-02 14:44:15 -07:00
e4a1826dee Merge pull request #783 from numericalfreedom/lammps-doxygen
Added double quotes to a word in Section_tools.txt file
2018-02-02 14:41:56 -07:00
160edc9532 Merge pull request #782 from hheenen/user-mofff-contribution
User mofff contribution
2018-02-02 14:41:28 -07:00
553b3ff69a Merge pull request #778 from athomps/fix_gcmc_segfault_fix
Fixed recent segfault in fix gcmc and added mcmoves keyword
2018-02-02 14:40:14 -07:00
2913d3da60 Merge pull request #777 from lammps/fix_prop_atom
Fix bug in FixPropertyAtom
2018-02-02 14:39:42 -07:00
4af14becb5 Merge pull request #710 from rpleite/add-pair-ufm
Add the UFM pair potential - (GPU/OPT/OMP)
2018-02-02 14:38:28 -07:00
85fdf9eaba make links to papers unique across files 2018-02-02 10:10:27 +01:00
2ff278defa fully integrate ufm into documentation 2018-02-02 10:02:44 +01:00
bfcb71a8be add package file to .gitignore 2018-02-02 09:27:09 +01:00
c3d1cee5f9 try to use OS-provided (and obsoleted) RPC headers for XDR. always use bundled code 2018-02-02 09:23:02 +01:00
3e0cb9b463 rename examples to match usual naming conventions and add reference output. 2018-02-01 18:56:10 +01:00
b70149e86a include added docpages in toctrees for sphinx 2018-02-01 18:50:25 +01:00
080ce422ae correct incorrect reference 2018-02-01 18:40:28 +01:00
cc54848f7b adapt to current manual conventions 2018-02-01 18:40:17 +01:00
090ce7cecb remove tab char 2018-02-01 18:19:37 +01:00
3bc1c6b59e Address bug in multifile restart writing with step number included 2018-02-01 14:25:55 +01:00
38715d7f85 do not allow illegal combination of atom IDs in data files for bonds/angles/dihedrals/impropers 2018-01-31 06:08:04 -05:00
54a3096278 do not allow single bond/angle/dihedral definitions with illegal atom ID combinations 2018-01-31 05:59:19 -05:00
17d98d0915 make checking for tersoff parameters consistent across all implementations 2018-01-30 04:53:57 -05:00
9cf4ac8b7c DOC: Added netcdf and netcdf/mpiio to dump style supporting atom attributes. 2018-01-28 14:24:28 +01:00
4e4fd5f07c BUG: Dumping to multiple files failed with an error message complaining about a missing 'append yes' option. 2018-01-28 14:23:05 +01:00
9fd1e47968 ignore generated/copied files 2018-01-28 06:24:03 -05:00
6753977837 recreate 1ac7 input deck with up to date tools and LAMMPS binary. 2018-01-28 06:23:09 -05:00
031812b2bf recreate 1gb1 input deck with CMAP included. 2018-01-28 06:22:03 -05:00
cf8dae5ef3 explain position of fix cmap command in input for data and restart files 2018-01-28 06:20:03 -05:00
ba68548e38 clarify ch2lmp commands purpose in tools section 2018-01-28 06:19:33 -05:00
448c9c8d8a add a dependency on PairEAM/MANYBODY for installing USER-ATC 2018-01-23 16:27:06 +01:00
d2da49cdf9 modifications to correctly print modified dihedrals, impropers 2018-01-22 23:56:11 -07:00
e72faf3d7f Added double quotes to a word in Section_tools.txt file in order to activate
Sphinx hyperlink.
2018-01-21 20:55:04 +01:00
3f967e3d84 cmake: add USER-MOFFF 2018-01-21 08:10:38 -07:00
5212e95787 added src/USER-MOFFF/README 2018-01-21 13:34:02 +01:00
f7e2bf239f Added MOF-FF src, doc and example files 2018-01-21 11:40:48 +01:00
a802b750a6 USER-INTEL: Adding missing backslash for two Makefiles using Intel compiler. 2018-01-17 21:59:37 -08:00
9bb7f1ddf6 Fixed recent segfault in fix gcmc and added mcmoves keyword 2018-01-17 13:45:15 -07:00
5e9d257ec2 patch 17Jan18 2018-01-17 13:00:48 -07:00
415a55bc3e Fix bug in fix_property_atom 2018-01-17 10:41:56 -07:00
e1e6825eb2 Merge pull request #776 from numericalfreedom/lammps-tools-doxygen
Small character corrections to the file Developer.dox.lammps
2018-01-17 07:34:18 -07:00
88a2f9fcc6 Merge pull request #775 from wmbrownIntel/user-intel-2018u1
USER-INTEL: Adding compiler flag and small fix to tersoff/intel for 2…
2018-01-17 07:34:08 -07:00
480d7dd6ce Merge pull request #774 from lammps/neigh-tweak
small changes to recent PR to standardize with other npair styles
2018-01-17 07:33:46 -07:00
9b12984378 Small correction in image caption in Developer.dox.lammps 2018-01-17 14:46:44 +01:00
8d29f64236 Small character corrections to the file Developer.dox.lammps 2018-01-17 14:05:30 +01:00
1b91c0eab0 fix typo in docs resulting in broken link 2018-01-16 21:01:25 -05:00
0c8af0704e Tweak to npair_halffull_newtoff.cpp 2018-01-16 16:43:25 -07:00
f1901237be small changes to recent PR to standardize with other npair styles 2018-01-16 15:49:16 -07:00
0cd864134d Merge pull request #765 from stanmoore1/halffull_ghost
Add ghost option to npair_halffull
2018-01-16 15:39:07 -07:00
bf48f3e240 Merge pull request #758 from mkanski/ci-ReaxFF
Modification of ReaxFF
2018-01-16 13:04:20 -07:00
23dda3d51b Merge pull request #770 from numericalfreedom/lammps-tools-doxygen
Tools for LAMMPS documentation for developers with the "doxygen" documentation platform
2018-01-16 13:02:10 -07:00
5d254855eb Merge pull request #772 from akohlmey/improved-variable-error-reporting
More improved variable error reporting
2018-01-16 13:00:42 -07:00
9a70f2d182 Merge pull request #773 from lammps/couple-simple
update of COUPLE/simple examples
2018-01-16 12:59:51 -07:00
b95cf658c7 remove faulty line of code, left in by accident 2018-01-16 14:10:46 -05:00
709ce8a230 Merge branch 'neigh-identical-check' into couple-simple
# Conflicts:
#	src/create_bonds.cpp
#	src/delete_atoms.cpp
2018-01-16 14:09:21 -05:00
2ad823ffd4 correct check for identical requests 2018-01-16 14:08:18 -05:00
4c0cd5f1ad add short bond cutoff warning also to USER-OMP
This implements the warning about a total cutoff shorter than 2x bond cutoff also to the USER-OMP variant of reax/c
2018-01-16 13:50:49 -05:00
8d37c89cb6 different bug fix for create_bonds, delete_atoms neigh list issue 2018-01-16 11:32:37 -07:00
21ff4407ab update of COUPLE/simple examples 2018-01-16 10:48:38 -07:00
f2c0c4a7d1 Merge branch 'lammps-tools-doxygen' of https://github.com/numericalfreedom/lammps into lammps-tools-doxygen 2018-01-16 18:39:08 +01:00
1640066132 Merge remote-tracking branch 'lammps-origin/master' into lammps-tools-doxygen 2018-01-16 18:38:12 +01:00
3b1ec14a68 add a warning if the bonded cutoff is large
This should print a warning when 2x the bonded interaction cutoff list larger then other cutoffs, as was the setting before the performance optimization with the change in 2690075405
2018-01-16 12:01:47 -05:00
01cfb710ff propagate printing variable name with error deeper into the code
this now covers a large set of cases where the variable name can be printed.
it also is complete for the current code, since no more default arguments are required
2018-01-16 11:31:20 -05:00
3de39c70c1 print variable name with errors only for known variables.
this makes errors for "anonymous" evaluations (if statement, immediate variables) less confusing.
2018-01-16 11:29:48 -05:00
86ed55599d The bash shell script doxygen.sh has been revised. 2018-01-16 17:28:58 +01:00
e798cdf31f Merge pull request #771 from stanmoore1/reax_example
Add ReaxFF Example
2018-01-16 09:05:36 -07:00
97dd812647 Merge pull request #769 from rbberger/cmake_compilation_fixes
CMake compilation fixes
2018-01-16 09:04:04 -07:00
e07a6d1e34 Merge pull request #768 from akohlmey/collected-doc-fixes
collected documentation fixes
2018-01-16 09:03:45 -07:00
6e37272c9d Merge pull request #764 from akohlmey/improved-variable-error-reporting
Improved error messages when defining or evaluating variables
2018-01-16 09:02:20 -07:00
6bd6e62767 Merge pull request #763 from stanmoore1/kk_neighlist
Enhance Kokkos Neighbor lists
2018-01-16 09:01:15 -07:00
57dd6c78c1 Merge pull request #761 from stanmoore1/kk_snap
Kokkos version of Pair SNAP and ZBL
2018-01-16 09:00:52 -07:00
9e413bf57a Merge pull request #760 from akohlmey/info-coeffs
New "coeffs" keyword added to "info" command
2018-01-16 09:00:34 -07:00
b374813104 Add reax example 2018-01-16 08:50:30 -07:00
07ddb5e62c USER-INTEL: Adding compiler flag and small fix to tersoff/intel for 2018u1 compilers. 2018-01-15 23:54:48 -08:00
72b479d42e Add missing return value in pair_snap_kokkos 2018-01-15 11:01:07 -07:00
c8b5d83cc7 mention tool/doxygen folder in tools section and refer to README file 2018-01-15 10:43:30 -05:00
a5998179bf Fix issues with Kokkos skip lists 2018-01-15 08:23:39 -07:00
26d6f6d1f1 Tools for LAMMPS documentation with the "doxygen" documentation platform
are provided.

        New directory: tools/doxygen

        New file:      tools/doxygen/Developer.dox.lammps
        New file:      tools/doxygen/Doxyfile.lammps
        New file:      tools/doxygen/doxygen.sh
        New file:      tools/doxygen/README

The Developer.dox.lammps file contains a slightly revised version of the
Developer.pdf file adopted to the LAMMPS "doxygen" documentation.

The Doxyfile.lammps file is a first proposal for a LAMMPS "doxygen"
documentation flavor and can be adjusted to specific requirements.

The "doxygen.sh" shell script generates the LAMMPS "doxygen"
documentation.

Detailed instructions can be found in the README file.
2018-01-15 15:42:31 +01:00
f37f4f0041 support more cases where the variable name is reported on errors 2018-01-14 17:56:03 -05:00
d2983caad4 CMake: limit visibility of COLVARS lepton library headers 2018-01-14 18:01:19 +01:00
2b7c233791 Simplify change to CMake build for USER-COLVARS 2018-01-14 08:44:50 +01:00
9e35e76b8c Fix issues in Kokkos npair_halffull and npair_skip 2018-01-13 21:32:59 -07:00
7a78875911 Fix issues in Kokkos pair snap and zbl 2018-01-13 21:31:11 -07:00
1cfc3118cc CMake: remove BUILD_SHARED_LIBS requirement from PYTHON package 2018-01-13 22:17:23 +01:00
23e8fb0542 Update CMake build for USER-COLVARS 2018-01-13 21:57:24 +01:00
72eb2dab52 Add missing math.h header 2018-01-13 20:41:05 +01:00
f6075c9d2c One last tweak to npair_halffull_newtoff.cpp 2018-01-12 16:12:57 -07:00
24f1889b02 Only include ghosts for newton off, since this style exists as a standalone build, but the newton on method doesn't 2018-01-12 16:10:42 -07:00
dea8d592da Optimize npair_halffull styles; the number of i atoms, whether owned or ghost, must match between the child half list and parent full list 2018-01-12 15:51:54 -07:00
52d3e98f3b Fix issue in npair_skip_kokkos 2018-01-12 15:45:24 -07:00
6e3acce3be Relax restriction in neighbor.cpp 2018-01-12 15:44:11 -07:00
1ec54827d6 Add Kokkos halffull neighbor list capability 2018-01-12 15:41:59 -07:00
61ebf6265a address some formatting/markup issues reported by Nandor Tamaskovics 2018-01-12 12:50:02 -05:00
190cc78034 add dummy doc files for not-yet-implemented features 2018-01-12 12:29:03 -05:00
5863f115dd correctly escape 'a_ ' text, which is not a reference unlike 'a_b ' 2018-01-12 12:26:50 -05:00
75d259f5ee improve PDF reader compatibility for the created PDF file. 2018-01-12 12:00:53 -05:00
3b1b9a2cbf replace non-ASCII quotation marks with ASCII equivalent 2018-01-12 11:30:27 -05:00
17b6a4c3cd Added more permanent citation 2018-01-12 11:47:35 +01:00
1c10c78684 Add ghost option to npair_halffull 2018-01-11 15:15:04 -07:00
26917280be state name of the affected variable in error messages where possible 2018-01-11 15:01:08 -05:00
45674e6cd3 Add Kokkos skip list capability 2018-01-11 10:07:44 -07:00
22d2d1cdf3 Fix issue in pair_snap_kokkos memory_usage 2018-01-11 10:01:34 -07:00
0d7bee40ae Add newline at end of pair_snap_kokkos_impl.h 2018-01-11 09:41:37 -07:00
db1ed32a51 Fix minor issues in pair_snap_kokkos 2018-01-11 09:39:53 -07:00
d7d087ae67 Fix issue with peratom energy/virial in pair_snap_kokkos 2018-01-10 10:20:40 -07:00
92e2df74c1 Fix bug in Kokkos peratom energy/virial with newton on 2018-01-10 10:20:12 -07:00
92742c5373 Introduced the change in reax/c/omp 2018-01-10 18:00:22 +01:00
2047ae76e3 Move compute_bi outside of j-loop 2018-01-09 17:05:57 -07:00
4adbb882b3 Temper SNAP output 2018-01-09 17:05:34 -07:00
275c08453f Add Kokkos versions of pair_style snap and zbl 2018-01-09 17:03:49 -07:00
91107cc1f3 add a "coeffs" keyword to the info command for debugging "All XXX coeffs are not set" errors. 2018-01-09 12:47:33 -05:00
e26c170679 Added example and tabulated close-range correction 2018-01-09 15:08:08 +01:00
1bd9e175e9 Merge pull request #749 from akohlmey/collected-bugfixes-and-updates
Collected bugfixes and updates for the next patch release
2018-01-08 09:20:18 -07:00
9e9cfe5869 Merge pull request #754 from rbberger/cmake_kokkos_bugfix
CMake bugfixes for KOKKOS builds
2018-01-08 09:16:43 -07:00
85ff0c1e46 Merge pull request #750 from stanmoore1/kk_fences
Add Kokkos thread fences to comm pack/unpack routines
2018-01-08 09:16:16 -07:00
cc9b6118b8 Merge pull request #748 from stanmoore1/kk_docs
Update Kokkos docs
2018-01-08 09:15:36 -07:00
09bed0c09a Merge pull request #747 from stanmoore1/kk_reax_hist
Fix broken charge history in fix qeq/reax/kk
2018-01-08 09:15:21 -07:00
1b51efd6b8 Merge pull request #744 from akohlmey/doc-update
Documentation build updates
2018-01-08 09:15:02 -07:00
8888b05b18 Merge pull request #742 from rbberger/fix_python_move
Fix python/move
2018-01-08 09:14:36 -07:00
3bb8294f31 Merge pull request #718 from timattox/USER-DPD_es_RNG
USER-DPD: External State RNG
2018-01-08 09:13:11 -07:00
450c689ae9 Merge pull request #725 from stanmoore1/kk_update
Update the Kokkos library in LAMMPS to v2.5.00
2018-01-08 09:12:51 -07:00
a5d401e164 Fixed header for the force field file 2018-01-08 14:46:32 +01:00
b96100c0b7 Fixed header for the force field file 2018-01-08 14:42:43 +01:00
2690075405 Performance increase for charge-implicit ReaxFF/changed cutoff selection 2018-01-08 14:38:24 +01:00
f77483e437 adapt #include statements to current LAMMPS conventions 2018-01-06 19:44:07 -05:00
11cddd8798 explicitly include math.h 2018-01-06 19:42:40 -05:00
09ca7b32fc Revert "no need to include library.o in the LAMMPS executable"
This reverts commit 4a3a6b4455.
As it turns out, when using the LAMMPS python wrapper from inside
code using the PYTHON package, the library symbols *are* needed.
Thanks for Richard Berger (@rbberger) for pointing this out.
2018-01-06 19:47:33 +01:00
3af389e6cf Revert "no need to include library.o in the LAMMPS executable"
This reverts commit 4a3a6b4455.
As it turns out, when using the LAMMPS python wrapper from inside
code using the PYTHON package, the library symbols *are* needed.
Thanks for Richard Berger (@rbberger) for pointing this out.
2018-01-06 12:58:17 -05:00
46217db8a5 make python functions examples more complete by adding exception handling and initializing variables 2018-01-06 12:56:42 -05:00
d6d7dde653 Add error output if python evaluation failed 2018-01-06 18:54:55 +01:00
6070182f06 correctly account for individually added bonds, angles, and dihedrals 2018-01-06 11:03:48 -05:00
6c058fb56c avoid division by zero in ewald for empty and uncharged systems. require kspace_modify gewald 2018-01-05 20:14:25 -05:00
91993b236d avoid division by zero in PPPM for empty and uncharged systems. require kspace_modify gewald 2018-01-05 19:52:51 -05:00
5ecc3ce366 avoid division by zero when trying to run PPPM on a system without atoms 2018-01-04 20:44:40 -05:00
75f1a4f3f0 use filelink.o consistently, since filelink does not work with some compilers, e.g. nvcc 2018-01-04 20:43:53 -05:00
ffc74fca6c Merge branch 'fix_property_atom_doc_update' of https://github.com/Pakketeretet2/lammps into collected-bugfixes-and-updates
this closes PR #755
2018-01-04 19:46:31 -05:00
2896df2140 avoid that occasional neighbor lists requested from commands linger around for too long and thus cause segementation faults 2018-01-04 17:16:31 -05:00
c333401e72 Use bold font instead of underscores for emphasis. 2017-12-25 13:27:14 +01:00
a9e9a2046b Fixes/clarifies the fix_property_atom docs. 2017-12-25 13:03:18 +01:00
d4f45f4f85 correct set command example in fix property/atom 2017-12-24 17:45:58 -05:00
7d07baa8ad Better load balance fix_qeq_reax_kokkos for half neigh list 2017-12-21 11:07:51 -07:00
b9184ef441 Add special KOKKOS styles individually
This avoids unnecessary dependencies
2017-12-20 04:51:46 -05:00
ff2b61354d Bugfix for building LAMMPS+KOKKOS+DPD with CMake
Resolves issue #753, but there might be more work ahead.
2017-12-20 03:17:40 -05:00
18acc6ae47 remove some dead code 2017-12-19 15:01:11 -05:00
56e633a2cf error out on trying to compile USER-INTEL with -DLAMMPS_BIGBIG 2017-12-19 14:54:35 -05:00
798d68c607 Edits to accelerate_kokkos.txt 2017-12-18 14:50:33 -07:00
46fe0a968b Fix compiler warnings in atom_vec_hybrid_kokkos 2017-12-18 14:09:39 -07:00
00a9672524 Fix issue in fix_qeq_reax_kokkos, can't call child function from base constructor 2017-12-18 13:58:37 -07:00
a2756db66b Update to Kokkos library v2.5.00 2017-12-15 16:42:06 -07:00
da83feb8ca Merge branch 'master' into kk_update 2017-12-15 16:36:21 -07:00
a7bc3ed391 Add missing fences in comm_kokkos 2017-12-15 13:30:17 -07:00
68cf6941e1 Add Kokkos thread fences to pack/unpack routines 2017-12-15 13:01:38 -07:00
73c55ac4d1 Fix up docs after renaming 2017-12-15 14:06:16 -05:00
2a131d1416 Merge remote-tracking branch 'upstream/master' into fix_python_move 2017-12-15 13:56:54 -05:00
bcc5f49d0b Rename fix python to fix python/invoke 2017-12-15 13:53:04 -05:00
950bfb84a9 Clean up after renaming back to fix python/move 2017-12-15 13:27:27 -05:00
4d725c3153 Revert "Rename fix python/move to move/python"
This reverts commit 4d915dba08.
2017-12-15 13:17:12 -05:00
10fa54b2fd correct error messages. the atom style is called "smd" not "tlsph" 2017-12-13 18:44:31 -05:00
8a36cdc6bc correct velocity output for write_data of atom style smd 2017-12-13 18:42:24 -05:00
e5cd068cd5 Merge branch 'patch-3' of https://github.com/abbatux/lammps into collected-bugfixes-and-updates
This closes #745
2017-12-13 17:41:00 -05:00
cec22dda92 change pair style airebo/intel to compile with -DLAMMPS_BIGBIG 2017-12-13 16:20:49 -05:00
9a71efc5d5 fix neb bugfix from Emile Maras
NEB was not working fine when using multiple proc
per replica and the keywords last/efirst or last/efirst/middle

I have corrected this in the enclosed fix_neb.cpp

I also slightly modified the nudging for this free end so that
it would be applied only when the target energy is larger than
the energy. Anyway if the target energy is lower than the energy,
the replica should relax toward the target energy without adding
any nudging.

I also modified the documentation according to this change.
2017-12-13 15:19:46 -05:00
2f857c6eda correct fix neigh/history/omp to properly support -DLAMMPS_BIGBIG compilation 2017-12-13 15:12:14 -05:00
8a93f63de9 Update Kokkos docs 2017-12-13 10:05:53 -07:00
193252275f A few more tweaks to charge history in fix_qeq_reax_kokkos 2017-12-12 09:54:28 -07:00
5968850306 Fix broken charge history in fix_qeq_reax_kokkos 2017-12-12 09:20:09 -07:00
3291a4fe96 Revert "no need to include library.o in the LAMMPS executable"
This reverts commit 4a3a6b4455.
As it turns out, when using the LAMMPS python wrapper from inside
code using the PYTHON package, the library symbols *are* needed.
Thanks for Richard Berger (@rbberger) for pointing this out.
2017-12-11 08:08:49 -05:00
1b07a4edee Fix memory leak in pair python 2017-12-10 17:14:08 -05:00
0edad83b25 Update atom_vec_smd.cpp 2017-12-08 11:29:12 +11:00
81a1c007ed correct file name for doc file 2017-12-07 16:39:41 -05:00
0b51e9b2ff add documentation for fix move/python 2017-12-07 16:36:36 -05:00
4b1bcaa1ae fix speelink in documentation files 2017-12-07 16:01:02 -05:00
ed8680d695 suspend support for sphinxcontrib.images
the sphinxcontrib.image extension was broken with sphinx 16.x.
however, sphinx 15.x breaks with newer version of the multiprocessor module.
so we suspend the thumbnail processing and lift the lock to sphinx 15.x

also, the number of parallel sphinx tasks is can be overridden with SPHINXEXTRA="-j #'.
default is to try use all local CPU cores.
2017-12-07 15:38:15 -05:00
29df5a536f Merge branch 'master' of https://github.com/lammps/lammps into USER-DPD_es_RNG 2017-12-07 10:57:01 -07:00
d029cb9002 Merge pull request #743 from stanmoore1/kk_yukawa_fix
Fix compile error in pair_yukawa_kokkos
2017-12-07 10:31:04 -07:00
3e99d1a83a Add pair yukawa/kk to Section_commands 2017-12-07 09:06:33 -07:00
c4e83be533 Fix compile error in pair_yukawa_kokkos 2017-12-07 08:42:17 -07:00
d7e5d60f90 Merge pull request #738 from rbberger/pylammps_thermo_style_fix
PyLammps bugfix to support custom thermo output with vector element addressing
2017-12-07 08:25:01 -07:00
5179efd2bb Merge pull request #741 from stanmoore1/kk_compile
Reduce Kokkos compile dependencies
2017-12-07 08:24:22 -07:00
abb2fe5be7 Merge pull request #740 from stanmoore1/improve_reax
Improve ReaxFF
2017-12-07 08:23:47 -07:00
bae45e2493 Merge pull request #737 from akohlmey/collected-bugfixes
Collected bugfixes and small updates for next patch
2017-12-07 08:23:06 -07:00
73d509f339 Merge pull request #735 from jaapkroe/extep
Add ExTeP pair style
2017-12-07 08:22:34 -07:00
fa0c28b717 Merge pull request #731 from Pakketeretet2/pair-yukawa-kk
Added a KOKKOS-enabled Yukawa pair style.
2017-12-07 08:21:20 -07:00
bc3a84b480 Merge pull request #727 from lammps/fix-gcmc-examples
new example scripts for fix gcmc
2017-12-07 08:19:56 -07:00
4d915dba08 Rename fix python/move to move/python 2017-12-05 23:42:57 -05:00
f64544a5fe Move instantiation of MemoryKokkos class 2017-12-05 08:34:24 -07:00
fc742eb2ef Remove outdated elastic constants 2017-12-05 16:23:20 +01:00
1baecc689e Merge pull request #3 from stanmoore1/kk_yukawa
Add pair_yukawa_kokkos to Install.sh and minor cleanup.

These changes look fine. Sorry for the tabs, I must have forgotten about those.
2017-12-04 14:16:53 -05:00
d916416cc5 Add pair_yukawa_kokkos to Install.sh and minor cleanup 2017-12-04 12:02:32 -07:00
2813923f15 Add multiple inheritance to pair_multi_lucy_rx_kokkos 2017-12-04 10:43:41 -07:00
4a3a6b4455 no need to include library.o in the LAMMPS executable 2017-12-03 19:37:42 -05:00
f8891a4451 Rename fix python/integrate to python/move
This is to avoid confusion to what LAMMPS considers to be an
integrator like Verlet and RESPA.
2017-12-02 01:04:46 -05:00
51688b2504 Improve performance of Python integrator (NVE_Opt version)
Removing the loop over atoms by using NumPy array indexing allows to recover
performance close to that of plain fix nve.
2017-12-02 00:40:21 -05:00
93be2d264e Detect correct integer type in lammps python interface 2017-12-02 00:39:15 -05:00
b9fd1156b2 Completed first version of fix python/integrate
This includes an example of how to implement fix NVE in Python.

The library interface was extended to provide direct access to atom data using
numpy arrays. No data copies are made and numpy operations directly manipulate
memory of the native code.

To keep this numpy dependency optional, all functions are wrapped into the
lammps.numpy sub-object which is only loaded when accessed.
2017-12-02 00:39:15 -05:00
bbfe16782b Adjust .gitignore 2017-12-02 00:37:14 -05:00
1931d2088a Started FixPythonIntegrate 2017-12-02 00:37:14 -05:00
5d9a6c1fe2 Add checks to Kokkos Install.sh so that files aren't unnecessarily recompiled 2017-12-01 16:06:50 -07:00
e7f97728c3 disable disruptive debug output 2017-12-01 17:14:11 -05:00
58ed92d905 one more file that doesn't require to include accelerator_kokkos.h 2017-12-01 16:45:22 -05:00
14aa036f36 add missing header files to USER-INTEL package 2017-12-01 15:53:02 -05:00
42e03da70c Remove redundant #include in input.cpp 2017-12-01 13:16:35 -07:00
5d2e097b27 Need dynamic cast for multiple inheritance 2017-12-01 12:53:40 -07:00
da51a8a0bb Add missing header files to GPU package 2017-12-01 12:13:14 -07:00
80dffb27e2 Add missing header files 2017-12-01 11:33:42 -07:00
5b33f153f4 Add newline to memory_kokkos.h 2017-12-01 10:33:47 -07:00
31eb12920c Use multiple inheritance to remove accelerator_kokkos.h out of region.h and kspace.h 2017-12-01 10:18:22 -07:00
31f2ca1e4c Use multiple inheritance to remove accelerator_kokkos.h out of pair.h 2017-12-01 09:51:40 -07:00
15a3364c2c Make memory_kokkos its own class 2017-12-01 09:13:31 -07:00
c3aa705d04 Improve performance of pair_reaxc, this change is safe because the non-bonded i-loop doesn't include ghost atoms; this optimization is already included in the USER-OMP version 2017-11-30 09:22:32 -07:00
8c2d38c7e9 Remove non-existent function in reaxc_forces_omp 2017-11-30 09:13:03 -07:00
e3b961b622 Improve performance of PairReaxCKokkos 2017-11-30 09:10:39 -07:00
319508bd29 fix scoping ambiguity issue that confuses nvcc 2017-11-30 10:56:58 -05:00
6f7bd78ea2 Fixes issue #701
PyLammps now supports thermo_style entries such as c_msd[4]
2017-11-29 01:06:44 -05:00
5647522906 various small cosmetic adjustments to increase conformance with LAMMPS programming conventions 2017-11-28 12:34:23 -05:00
e4b14213b4 delete old log files. new log files are approved by @athomps 2017-11-28 11:35:49 -05:00
fa6fc947f2 fix issues in FixNeighHistory and FixNeighHistoryOMP reported in https://sourceforge.net/p/lammps/mailman/message/36138319/
This supersedes and closes PR #734
2017-11-28 11:28:15 -05:00
e1189381e0 correct documentation for create_atoms rotate
This closes #736
2017-11-28 11:19:19 -05:00
39d24ab7eb ExTeP potential file header update 2017-11-27 21:25:23 +01:00
5770a20e2c added ExTeP to USER-MISC 2017-11-27 21:16:51 +01:00
83ec9815fe Merge pull request #733 from pastewka/17_dump_nc3
BUG: Proper handling of bigint data in NetCDF dump style. Affects LAMMPS version compiled with LAMMPS_BIGBIG.
2017-11-27 11:07:58 -07:00
90ee52296b Merge remote-tracking branch 'andeplane/fix_ave_chunk_bug' into collected-bugfixes
this closes PR #732
2017-11-27 11:45:09 -05:00
f02eb225c6 Merge branch '17_dump_nc3' of github.com:pastewka/lammps into 17_dump_nc3 2017-11-26 22:39:03 +01:00
a111cf640a BUG: Proper handling of bigint data. Corrects behavior when compiled with LAMMPS_BIGBIG. 2017-11-26 22:35:56 +01:00
e755a8339d BUG: openfile is called multiple times, second call stopped code with 'at keyword requires use of append yes' 2017-11-26 22:31:47 +01:00
f7f6a15ac0 MAINT: Removed superfluous code. 2017-11-26 22:31:47 +01:00
36b7aa73aa MAINT: Use proper data type for bigint when defining NetCDF file. 2017-11-26 22:31:47 +01:00
9a5723123f Set peratom_freq in fix ave/chunk 2017-11-21 23:43:52 -08:00
7d07f062b6 Remove const from variable 2017-11-20 10:21:19 -07:00
f3ed148828 Fix for big endian machines 2017-11-20 10:18:53 -07:00
5ba80662c3 Undid the removal of rad in pair_yukawa. 2017-11-17 15:07:59 -05:00
53c1558271 Updated docs for pair_yukawa 2017-11-17 14:21:54 -05:00
8e5d4fa891 Added a KOKKOS-enabled Yukawa pair style. 2017-11-17 14:12:16 -05:00
ec067bde36 fix cut-n-paste error in create_bond docs 2017-11-13 17:59:16 -05:00
adbc75cae6 add alternate log files with the latest version of LAMMPS 2017-11-13 11:52:01 -05:00
dde94c28a7 new example scripts for fix gcmc 2017-11-13 09:07:25 -07:00
f2dc764d1d Merge pull request #723 from lammps/replicate_bbox
Add bounding box to Replicate command
2017-11-13 09:01:38 -07:00
c4c59b909e Merge pull request #716 from akohlmey/collected-small-changes
Collected small changes and bugfixes for next patch
2017-11-13 09:00:26 -07:00
e2e21f0661 Merge pull request #714 from Pakketeretet2/user-manifold-plane-wiggle-fix
User manifold plane wiggle fix
2017-11-13 08:59:40 -07:00
6abf68f614 Merge pull request #712 from akohlmey/linalg-for-latte
Update lib/linalg for use with lib/latte
2017-11-13 08:59:18 -07:00
a97553a92e Merge pull request #709 from abbatux/patch-1
Update pair_smd_tlsph.cpp
2017-11-13 08:58:29 -07:00
dbd4acc4d6 Merge pull request #708 from abbatux/patch-2
Update smd_material_models.cpp
2017-11-13 08:58:07 -07:00
40e776ebc6 Remove OpenMP from KOKKOS_DEVICES in Kokkos CUDA Makefiles since normally this doesn't improve performance 2017-11-10 09:55:11 -07:00
f043212511 USER-DPD: fix a segfault when using constant temperature (DPD vs. DPDE) 2017-11-07 13:57:29 -06:00
4342bcdafc Fix CUDA issue in USER-DPD/random_external_state.h 2017-11-07 09:17:58 -07:00
2e40c00995 add option to the print command to also print to the global universe screen and logfile 2017-11-07 10:00:57 -05:00
f39c6213e1 Change path to nvcc_wrapper 2017-11-06 13:50:16 -07:00
88474fc5c2 Remove Kokkos tpls directory 2017-11-06 13:48:26 -07:00
16b5315845 Update Kokkos to v2.04.11 2017-11-06 13:47:33 -07:00
e337db4059 Replicate bbox from Chris Knight 2017-11-06 11:31:43 -07:00
ba43465268 Merge branch 'collected-small-changes' of github.com:akohlmey/lammps into collected-small-changes 2017-11-06 13:30:08 -05:00
09c61ca598 correct listing of USER-OMP variants of fix rigid styles 2017-11-06 13:29:59 -05:00
0f971bf07c print name of unknown or incorrect variable in error message 2017-11-06 10:41:32 -05:00
5a8c5eb479 provide last input command line with error->one() output 2017-11-06 10:41:01 -05:00
aa0d6cd75b Merge branch 'collected-small-changes' of github.com:akohlmey/lammps into collected-small-changes 2017-11-04 02:49:28 -04:00
b34000a5e1 make *_gpu_reinit() functions consistent in returning void. correct prototypes in src/GPU, too.
this supersedes and closes #719
2017-11-04 02:49:22 -04:00
279339ebd0 Remove the now unused AIRct_ssa[] array from the NeighList class. 2017-11-03 13:42:36 -05:00
605fe53c07 USER-DPD: make the kokkos and non-kokkos fix_shardlow match more closely
This was accomplished with several key changes:
1) Modified fix_shardlow's control flow to match fix_shardlow_kokkos so
that random numbers are pulled fromn the RNGs in exactly the same order.

2) Created random_external_state.h, a simplified version of the Kokkos
random number generator that keeps its state variables external to itself.
Thus it can be used both with and without Kokkos enabled, as long as the
caller stores and passes in the required state variable.

3) Replaced all references to random_mars.h and Kokkos_Random.hpp code in
the fix_shardlow* files with calls to the random_external_state.h code,
guaranteeing that fix_shardlow* is using an identical RNG in all cases.

Result: most (56 of 61) of our internal tests now generate the same results
with kokkos turned on or off.  Four cases still differ due to what appear
to be vectorization caused rounding differences, and the fifth case
appears to be something triggered by the kokkos "atom_style hybrid" code.
2017-11-03 12:51:37 -05:00
65b77230fd added missing "dihedral" in compute dihedral/local docs 2017-11-02 15:04:04 -04:00
91e4bcca33 BUG: openfile is called multiple times, second call stopped code with 'at keyword requires use of append yes' 2017-11-01 21:28:14 +01:00
7ef17efe2e MAINT: Removed superfluous code. 2017-11-01 21:20:19 +01:00
8a804460f9 MAINT: Use proper data type for bigint when defining NetCDF file. 2017-11-01 21:19:55 +01:00
f6658d10b7 fix incorrect create_atoms example for selecting created atoms with a variable 2017-10-30 23:33:26 -04:00
f4d0aa3393 correct cut-n-paste bug reported by Massimo Pica Ciamarra on lammps-users 2017-10-27 11:25:57 -04:00
99a6c6edb4 Fixed a typo in manifold plane wiggle. 2017-10-26 12:33:16 -04:00
a26ffc7ff7 Reset plane wiggle in master. 2017-10-26 12:32:30 -04:00
b002e071e7 Reset some files back to upstream. 2017-10-26 12:31:30 -04:00
9f44e3e5b0 Merged upstream. 2017-10-26 12:26:34 -04:00
e79cd6c62c Moved some stuff around. 2017-10-26 12:25:55 -04:00
82c6fd609e Merge branch 'master' of https://www.github.com/lammps/lammps 2017-10-26 12:06:57 -04:00
2dbb44f2c6 provide Makefile.lammps template for use with bundled linalg lib 2017-10-24 13:38:04 -04:00
d1630bbe34 add missing BLAS/LAPACK functions used by LATTE to linalg lib 2017-10-24 13:22:20 -04:00
941ee565a1 Added lal_ufm_ext.cpp in lib/gpu 2017-10-24 11:12:51 -02:00
b63acf6843 Added the UFM files (doc/src - lib/gpu - src) 2017-10-24 11:11:10 -02:00
41c25877e8 Update smd_material_models.cpp
The pressure pFinal using ShockEOS() was of the wrong sign when Gamma = 0.
2017-10-24 11:04:11 +11:00
253a17b6d0 Update pair_smd_tlsph.cpp
Correction of a typo in the computation of LAME_LAMBDA.
2017-10-23 12:45:58 +11:00
0448bc9caf Merged stuff. 2017-09-25 18:31:32 -04:00
821 changed files with 116716 additions and 17403 deletions

View File

@ -104,7 +104,7 @@ set(OTHER_PACKAGES KIM PYTHON MSCG MPIIO VORONOI POEMS LATTE
USER-ATC USER-AWPMD USER-CGDNA USER-MESO
USER-CGSDK USER-COLVARS USER-DIFFRACTION USER-DPD USER-DRUDE USER-EFF
USER-FEP USER-H5MD USER-LB USER-MANIFOLD USER-MEAMC USER-MGPT USER-MISC
USER-MOLFILE USER-NETCDF USER-PHONON USER-QTB USER-REAXC USER-SMD
USER-MOFFF USER-MOLFILE USER-NETCDF USER-PHONON USER-QTB USER-REAXC USER-SMD
USER-SMTBQ USER-SPH USER-TALLY USER-UEF USER-VTK USER-QUIP USER-QMMM)
set(ACCEL_PACKAGES USER-OMP KOKKOS OPT USER-INTEL GPU)
foreach(PKG ${DEFAULT_PACKAGES})
@ -170,13 +170,6 @@ if(ENABLE_KSPACE)
endif()
endif()
if(ENABLE_MISC)
option(LAMMPS_XDR "include XDR compatibility files for doing particle dumps in XTC format" OFF)
if(LAMMPS_XDR)
add_definitions(-DLAMMPS_XDR) # for liblammps
endif()
endif()
if(ENABLE_MSCG OR ENABLE_USER-ATC OR ENABLE_USER-AWPMD OR ENABLE_USER-QUIP OR ENABLE_LATTE)
find_package(LAPACK)
if(NOT LAPACK_FOUND)
@ -198,14 +191,13 @@ if(ENABLE_PYTHON)
add_definitions(-DLMP_PYTHON)
include_directories(${PYTHON_INCLUDE_DIR})
list(APPEND LAMMPS_LINK_LIBS ${PYTHON_LIBRARY})
if(NOT PYTHON_INSTDIR)
execute_process(COMMAND ${PYTHON_EXECUTABLE}
-c "import distutils.sysconfig as cg; print(cg.get_python_lib(1,0,prefix='${CMAKE_INSTALL_PREFIX}'))"
OUTPUT_VARIABLE PYTHON_INSTDIR OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/../python/lammps.py DESTINATION ${PYTHON_INSTDIR})
if(NOT BUILD_SHARED_LIBS)
message(FATAL_ERROR "Python package need lammps to be build shared, use -DBUILD_SHARED_LIBS=ON")
if(BUILD_SHARED_LIBS)
if(NOT PYTHON_INSTDIR)
execute_process(COMMAND ${PYTHON_EXECUTABLE}
-c "import distutils.sysconfig as cg; print(cg.get_python_lib(1,0,prefix='${CMAKE_INSTALL_PREFIX}'))"
OUTPUT_VARIABLE PYTHON_INSTDIR OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/../python/lammps.py DESTINATION ${PYTHON_INSTDIR})
endif()
endif()
@ -401,6 +393,10 @@ foreach(SIMPLE_LIB REAX MEAM POEMS USER-ATC USER-AWPMD USER-COLVARS USER-H5MD
target_include_directories(awpmd PUBLIC ${LAMMPS_LIB_SOURCE_DIR}/awpmd/systems/interact ${LAMMPS_LIB_SOURCE_DIR}/awpmd/ivutils/include)
elseif(PKG_LIB STREQUAL h5md)
target_include_directories(h5md PUBLIC ${LAMMPS_LIB_SOURCE_DIR}/h5md/include)
elseif(PKG_LIB STREQUAL colvars)
target_compile_options(colvars PRIVATE -DLEPTON)
target_include_directories(colvars PRIVATE ${LAMMPS_LIB_SOURCE_DIR}/colvars/lepton/include)
target_include_directories(colvars PUBLIC ${LAMMPS_LIB_SOURCE_DIR}/colvars)
else()
target_include_directories(${PKG_LIB} PUBLIC ${LAMMPS_LIB_SOURCE_DIR}/${PKG_LIB})
endif()
@ -480,6 +476,8 @@ if(ENABLE_KOKKOS)
${KOKKOS_PKG_SOURCES_DIR}/neigh_list_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/neigh_bond_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/fix_nh_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/nbin_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/npair_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/domain_kokkos.cpp
${KOKKOS_PKG_SOURCES_DIR}/modify_kokkos.cpp)
set_property(GLOBAL PROPERTY "KOKKOS_PKG_SOURCES" "${KOKKOS_PKG_SOURCES}")
@ -487,6 +485,17 @@ if(ENABLE_KOKKOS)
# detects styles which have KOKKOS version
RegisterStylesExt(${KOKKOS_PKG_SOURCES_DIR} kokkos KOKKOS_PKG_SOURCES)
# register kokkos-only styles
RegisterNBinStyle(${KOKKOS_PKG_SOURCES_DIR}/nbin_kokkos.h)
RegisterNPairStyle(${KOKKOS_PKG_SOURCES_DIR}/npair_kokkos.h)
if(ENABLE_USER-DPD)
get_property(KOKKOS_PKG_SOURCES GLOBAL PROPERTY KOKKOS_PKG_SOURCES)
list(APPEND KOKKOS_PKG_SOURCES ${KOKKOS_PKG_SOURCES_DIR}/npair_ssa_kokkos.cpp)
RegisterNPairStyle(${KOKKOS_PKG_SOURCES_DIR}/npair_ssa_kokkos.h)
set_property(GLOBAL PROPERTY "KOKKOS_PKG_SOURCES" "${KOKKOS_PKG_SOURCES}")
endif()
get_property(KOKKOS_PKG_SOURCES GLOBAL PROPERTY KOKKOS_PKG_SOURCES)
list(APPEND LIB_SOURCES ${KOKKOS_PKG_SOURCES})

View File

@ -11,6 +11,12 @@ function(FindStyleHeaders path style_class file_pattern headers)
set_property(GLOBAL PROPERTY ${headers} "${hlist}")
endfunction(FindStyleHeaders)
function(AddStyleHeader path headers)
get_property(hlist GLOBAL PROPERTY ${headers})
list(APPEND hlist ${path})
set_property(GLOBAL PROPERTY ${headers} "${hlist}")
endfunction(AddStyleHeader)
function(FindStyleHeadersExt path style_class extension headers sources)
get_property(hlist GLOBAL PROPERTY ${headers})
get_property(slist GLOBAL PROPERTY ${sources})
@ -62,6 +68,22 @@ function(GenerateStyleHeader path property style)
CreateStyleHeader("${path}" "style_${style}.h" ${files})
endfunction(GenerateStyleHeader)
function(RegisterNBinStyles search_path)
FindStyleHeaders(${search_path} NBIN_CLASS nbin_ NBIN ) # nbin ) # neighbor
endfunction(RegisterNBinStyles)
function(RegisterNPairStyles search_path)
FindStyleHeaders(${search_path} NPAIR_CLASS npair_ NPAIR ) # npair ) # neighbor
endfunction(RegisterNPairStyles)
function(RegisterNBinStyle path)
AddStyleHeader(${path} NBIN)
endfunction(RegisterNBinStyle)
function(RegisterNPairStyle path)
AddStyleHeader(${path} NPAIR)
endfunction(RegisterNPairStyle)
function(RegisterStyles search_path)
FindStyleHeaders(${search_path} ANGLE_CLASS angle_ ANGLE ) # angle ) # force
FindStyleHeaders(${search_path} ATOM_CLASS atom_vec_ ATOM_VEC ) # atom ) # atom atom_vec_hybrid

View File

@ -20,6 +20,7 @@ ifeq ($(shell which virtualenv >/dev/null 2>&1; echo $$?), 0)
HAS_VIRTUALENV = YES
endif
SPHINXEXTRA = -j $(shell $(PYTHON) -c 'import multiprocessing;print(multiprocessing.cpu_count())')
SOURCES=$(wildcard src/*.txt)
OBJECTS=$(SOURCES:src/%.txt=$(RSTDIR)/%.rst)
@ -55,7 +56,7 @@ html: $(OBJECTS) $(ANCHORCHECK)
@(\
. $(VENV)/bin/activate ;\
cp -r src/* $(RSTDIR)/ ;\
sphinx-build -j 8 -b html -c utils/sphinx-config -d $(BUILDDIR)/doctrees $(RSTDIR) html ;\
sphinx-build $(SPHINXEXTRA) -b html -c utils/sphinx-config -d $(BUILDDIR)/doctrees $(RSTDIR) html ;\
echo "############################################" ;\
doc_anchor_check src/*.txt ;\
echo "############################################" ;\
@ -91,7 +92,7 @@ epub: $(OBJECTS)
@(\
. $(VENV)/bin/activate ;\
cp -r src/* $(RSTDIR)/ ;\
sphinx-build -j 8 -b epub -c utils/sphinx-config -d $(BUILDDIR)/doctrees $(RSTDIR) epub ;\
sphinx-build $(SPHINXEXTRA) -b epub -c utils/sphinx-config -d $(BUILDDIR)/doctrees $(RSTDIR) epub ;\
deactivate ;\
)
@mv epub/LAMMPS.epub .
@ -159,7 +160,7 @@ $(VENV):
@( \
virtualenv -p $(PYTHON) $(VENV); \
. $(VENV)/bin/activate; \
pip install Sphinx==1.5.6; \
pip install Sphinx; \
pip install sphinxcontrib-images; \
deactivate;\
)

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

BIN
doc/src/Eqs/pair_buck6d.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.9 KiB

View File

@ -0,0 +1,9 @@
\documentclass[12pt]{article}
\begin{document}
\begin{eqnarray*}
E = A e^{-\kappa r} - \frac{C}{r^6} \cdot \frac{1}{1 + D r^{14}} \qquad r < r_c \\
\end{eqnarray*}
\end{document}

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

BIN
doc/src/Eqs/pair_ufm.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

14
doc/src/Eqs/pair_ufm.tex Normal file
View File

@ -0,0 +1,14 @@
\documentclass[12pt]{article}
\begin{document}
$$
E = -\varepsilon\, \ln{\left[1-\exp{\left(-r^{2}/\sigma^{2}\right)}\right]} \qquad r < r_c
$$
$$
\varepsilon = p\,k_B\,T
$$
\end{document}

View File

@ -1,7 +1,7 @@
<!-- HTML_ONLY -->
<HEAD>
<TITLE>LAMMPS Users Manual</TITLE>
<META NAME="docnumber" CONTENT="23 Oct 2017 version">
<META NAME="docnumber" CONTENT="5 Feb 2018 version">
<META NAME="author" CONTENT="http://lammps.sandia.gov - Sandia National Laboratories">
<META NAME="copyright" CONTENT="Copyright (2003) Sandia Corporation. This software and manual is distributed under the GNU General Public License.">
</HEAD>
@ -21,7 +21,7 @@
<H1></H1>
LAMMPS Documentation :c,h3
23 Oct 2017 version :c,h4
5 Feb 2018 version :c,h4
Version info: :h4

View File

@ -619,8 +619,9 @@ USER-INTEL, k = KOKKOS, o = USER-OMP, t = OPT.
"pour"_fix_pour.html,
"press/berendsen"_fix_press_berendsen.html,
"print"_fix_print.html,
"property/atom"_fix_property_atom.html,
"python"_fix_python.html,
"property/atom (k)"_fix_property_atom.html,
"python/invoke"_fix_python_invoke.html,
"python/move"_fix_python_move.html,
"qeq/comb (o)"_fix_qeq_comb.html,
"qeq/dynamic"_fix_qeq.html,
"qeq/fire"_fix_qeq.html,
@ -637,10 +638,10 @@ USER-INTEL, k = KOKKOS, o = USER-OMP, t = OPT.
"rigid/nve (o)"_fix_rigid.html,
"rigid/nvt (o)"_fix_rigid.html,
"rigid/small (o)"_fix_rigid.html,
"rigid/small/nph (o)"_fix_rigid.html,
"rigid/small/npt (o)"_fix_rigid.html,
"rigid/small/nve (o)"_fix_rigid.html,
"rigid/small/nvt (o)"_fix_rigid.html,
"rigid/small/nph"_fix_rigid.html,
"rigid/small/npt"_fix_rigid.html,
"rigid/small/nve"_fix_rigid.html,
"rigid/small/nvt"_fix_rigid.html,
"setforce (k)"_fix_setforce.html,
"shake"_fix_shake.html,
"spring"_fix_spring.html,
@ -668,7 +669,7 @@ USER-INTEL, k = KOKKOS, o = USER-OMP, t = OPT.
"wall/harmonic"_fix_wall.html,
"wall/lj1043"_fix_wall.html,
"wall/lj126"_fix_wall.html,
"wall/lj93"_fix_wall.html,
"wall/lj93 (k)"_fix_wall.html,
"wall/piston"_fix_wall_piston.html,
"wall/reflect (k)"_fix_wall_reflect.html,
"wall/region"_fix_wall_region.html,
@ -683,14 +684,14 @@ package"_Section_start.html#start_3.
"atc"_fix_atc.html,
"ave/correlate/long"_fix_ave_correlate_long.html,
"colvars"_fix_colvars.html,
"dpd/energy"_fix_dpd_energy.html,
"dpd/energy (k)"_fix_dpd_energy.html,
"drude"_fix_drude.html,
"drude/transform/direct"_fix_drude_transform.html,
"drude/transform/reverse"_fix_drude_transform.html,
"edpd/source"_fix_dpd_source.html,
"eos/cv"_fix_eos_cv.html,
"eos/table"_fix_eos_table.html,
"eos/table/rx"_fix_eos_table_rx.html,
"eos/table/rx (k)"_fix_eos_table_rx.html,
"filter/corotate"_fix_filter_corotate.html,
"flow/gauss"_fix_flow_gauss.html,
"gle"_fix_gle.html,
@ -728,12 +729,12 @@ package"_Section_start.html#start_3.
"qeq/reax (ko)"_fix_qeq_reax.html,
"qmmm"_fix_qmmm.html,
"qtb"_fix_qtb.html,
"reax/c/bonds"_fix_reax_bonds.html,
"reax/c/species"_fix_reaxc_species.html,
"reax/c/bonds (k)"_fix_reax_bonds.html,
"reax/c/species (k)"_fix_reaxc_species.html,
"rhok"_fix_rhok.html,
"rx"_fix_rx.html,
"rx (k)"_fix_rx.html,
"saed/vtk"_fix_saed_vtk.html,
"shardlow"_fix_shardlow.html,
"shardlow (k)"_fix_shardlow.html,
"smd"_fix_smd.html,
"smd/adjust/dt"_fix_smd_adjust_dt.html,
"smd/integrate/tlsph"_fix_smd_integrate_tlsph.html,
@ -906,7 +907,7 @@ KOKKOS, o = USER-OMP, t = OPT.
"none"_pair_none.html,
"zero"_pair_zero.html,
"hybrid"_pair_hybrid.html,
"hybrid/overlay"_pair_hybrid.html,
"hybrid/overlay (k)"_pair_hybrid.html,
"adp (o)"_pair_adp.html,
"airebo (oi)"_pair_airebo.html,
"airebo/morse (oi)"_pair_airebo.html,
@ -1021,11 +1022,12 @@ KOKKOS, o = USER-OMP, t = OPT.
"tip4p/cut (o)"_pair_coul.html,
"tip4p/long (o)"_pair_coul.html,
"tri/lj"_pair_tri_lj.html,
"ufm (got)"_pair_ufm.html,
"vashishta (ko)"_pair_vashishta.html,
"vashishta/table (o)"_pair_vashishta.html,
"yukawa (go)"_pair_yukawa.html,
"yukawa (gok)"_pair_yukawa.html,
"yukawa/colloid (go)"_pair_yukawa_colloid.html,
"zbl (go)"_pair_zbl.html :tb(c=4,ea=c)
"zbl (gok)"_pair_zbl.html :tb(c=4,ea=c)
These are additional pair styles in USER packages, which can be used
if "LAMMPS is built with the appropriate
@ -1038,13 +1040,14 @@ package"_Section_start.html#start_3.
"coul/diel (o)"_pair_coul_diel.html,
"coul/long/soft (o)"_pair_lj_soft.html,
"dpd/fdt"_pair_dpd_fdt.html,
"dpd/fdt/energy"_pair_dpd_fdt.html,
"dpd/fdt/energy (k)"_pair_dpd_fdt.html,
"eam/cd (o)"_pair_eam.html,
"edip (o)"_pair_edip.html,
"edip/multi"_pair_edip.html,
"edpd"_pair_meso.html,
"eff/cut"_pair_eff.html,
"exp6/rx"_pair_exp6_rx.html,
"exp6/rx (k)"_pair_exp6_rx.html,
"extep"_pair_extep.html,
"gauss/cut"_pair_gauss.html,
"kolmogorov/crespi/z"_pair_kolmogorov_crespi_z.html,
"lennard/mdf"_pair_mdf.html,
@ -1070,7 +1073,7 @@ package"_Section_start.html#start_3.
"morse/smooth/linear"_pair_morse.html,
"morse/soft"_pair_morse.html,
"multi/lucy"_pair_multi_lucy.html,
"multi/lucy/rx"_pair_multi_lucy_rx.html,
"multi/lucy/rx (k)"_pair_multi_lucy_rx.html,
"oxdna/coaxstk"_pair_oxdna.html,
"oxdna/excv"_pair_oxdna.html,
"oxdna/hbond"_pair_oxdna.html,
@ -1087,6 +1090,7 @@ package"_Section_start.html#start_3.
"smd/triangulated/surface"_pair_smd_triangulated_surface.html,
"smd/ulsph"_pair_smd_ulsph.html,
"smtbq"_pair_smtbq.html,
"snap (k)"_pair_snap.html,
"sph/heatconduction"_pair_sph_heatconduction.html,
"sph/idealgas"_pair_sph_idealgas.html,
"sph/lj"_pair_sph_lj.html,
@ -1094,7 +1098,7 @@ package"_Section_start.html#start_3.
"sph/taitwater"_pair_sph_taitwater.html,
"sph/taitwater/morris"_pair_sph_taitwater_morris.html,
"srp"_pair_srp.html,
"table/rx"_pair_table_rx.html,
"table/rx (k)"_pair_table_rx.html,
"tdpd"_pair_meso.html,
"tersoff/table (o)"_pair_tersoff.html,
"thole"_pair_thole.html,
@ -1249,7 +1253,7 @@ USER-OMP, t = OPT.
"ewald/disp"_kspace_style.html,
"msm (o)"_kspace_style.html,
"msm/cg (o)"_kspace_style.html,
"pppm (go)"_kspace_style.html,
"pppm (gok)"_kspace_style.html,
"pppm/cg (o)"_kspace_style.html,
"pppm/disp (i)"_kspace_style.html,
"pppm/disp/tip4p"_kspace_style.html,

View File

@ -454,7 +454,7 @@ NOTE: By default, for 2d systems, granular particles are still modeled
as 3d spheres, not 2d discs (circles), meaning their moment of inertia
will be the same as in 3d. If you wish to model granular particles in
2d as 2d discs, see the note on this topic in "Section
6.2"_Section_howto.html#howto_2, where 2d simulations are disussed.
6.2"_Section_howto.html#howto_2, where 2d simulations are discussed.
:line

View File

@ -138,6 +138,7 @@ Package, Description, Doc page, Example, Library
"USER-MESO"_#USER-MESO, mesoscale DPD models, "pair_style edpd"_pair_meso.html, USER/meso, -
"USER-MGPT"_#USER-MGPT, fast MGPT multi-ion potentials, "pair_style mgpt"_pair_mgpt.html, USER/mgpt, -
"USER-MISC"_#USER-MISC, single-file contributions, USER-MISC/README, USER/misc, -
"USER-MOFFF"_#USER-MOFFF, styles for "MOF-FF"_MOFplus force field, "pair_style buck6d/coul/gauss"_pair_buck6d_coul_gauss.html, USER/mofff, -
"USER-MOLFILE"_#USER-MOLFILE, "VMD"_vmd_home molfile plug-ins,"dump molfile"_dump_molfile.html, -, ext
"USER-NETCDF"_#USER-NETCDF, dump output via NetCDF,"dump netcdf"_dump_netcdf.html, -, ext
"USER-OMP"_#USER-OMP, OpenMP-enabled styles,"Section 5.3.4"_accelerate_omp.html, "Benchmarks"_http://lammps.sandia.gov/bench.html, -
@ -243,7 +244,7 @@ COLLOID package :link(COLLOID),h4
[Contents:]
Coarse-grained finite-size colloidal particles. Pair stayle and fix
Coarse-grained finite-size colloidal particles. Pair styles and fix
wall styles for colloidal interactions. Includes the Fast Lubrication
Dynamics (FLD) method for hydrodynamic interactions, which is a
simplified approximation to Stokesian dynamics.
@ -732,9 +733,9 @@ make lib-latte args="-b -m gfortran" # download and build in lib/latte and
:pre
Note that 3 symbolic (soft) links, "includelink" and "liblink" and
"filelink", are created in lib/latte to point into the LATTE home dir.
"filelink.o", are created in lib/latte to point into the LATTE home dir.
When LAMMPS builds in src it will use these links. You should
also check that the Makefile.lammps file you create is apporpriate
also check that the Makefile.lammps file you create is appropriate
for the compiler you use on your system to build LATTE.
You can then install/un-install the package and build LAMMPS in the
@ -984,7 +985,7 @@ MSCG package :link(mscg),h4
[Contents:]
A "fix mscg"_fix_mscg.html command which can parameterize a
Mulit-Scale Coarse-Graining (MSCG) model using the open-source "MS-CG
Multi-Scale Coarse-Graining (MSCG) model using the open-source "MS-CG
library"_mscg_home.
:link(mscg_home,https://github.com/uchicago-voth/MSCG-release)
@ -1781,7 +1782,7 @@ coarse-grained DPD-based models for energetic, reactive molecular
crystalline materials. It includes many pair styles specific to these
systems, including for reactive DPD, where each particle has internal
state for multiple species and a coupled set of chemical reaction ODEs
are integrated each timestep. Highly accurate time intergrators for
are integrated each timestep. Highly accurate time integrators for
isothermal, isoenergetic, isobaric and isenthalpic conditions are
included. These enable long timesteps via the Shardlow splitting
algorithm.
@ -2231,7 +2232,7 @@ Several extensions of the the dissipative particle dynamics (DPD)
method. Specifically, energy-conserving DPD (eDPD) that can model
non-isothermal processes, many-body DPD (mDPD) for simulating
vapor-liquid coexistence, and transport DPD (tDPD) for modeling
advection-diffuion-reaction systems. The equations of motion of these
advection-diffusion-reaction systems. The equations of motion of these
DPD extensions are integrated through a modified velocity-Verlet (MVV)
algorithm.
@ -2259,6 +2260,44 @@ http://lammps.sandia.gov/movies.html#mesodpd :ul
:line
USER-MOFFF package :link(USER-MOFFF),h4
[Contents:]
Pair, angle and improper styles needed to employ the MOF-FF
force field by Schmid and coworkers with LAMMPS.
MOF-FF is a first principles derived force field with the primary aim
to simulate MOFs and related porous framework materials, using spherical
Gaussian charges. It is described in S. Bureekaew et al., Phys. Stat. Sol. B
2013, 250, 1128-1141.
For the usage of MOF-FF see the example in the example directory as
well as the "MOF+"_MOFplus website.
:link(MOFplus,https://www.mofplus.org/content/show/MOF-FF)
[Author:] Hendrik Heenen (Technical U of Munich),
Rochus Schmid (Ruhr-University Bochum).
[Install or un-install:]
make yes-user-mofff
make machine :pre
make no-user-mofff
make machine :pre
[Supporting info:]
src/USER-MOFFF: filenames -> commands
src/USER-MOFFF/README
"pair_style buck6d/coul/gauss"_pair_buck6d_coul_gauss.html
"angle_style class2"_angle_class2.html
"angle_style cosine/buck6d"_angle_cosine_buck6d.html
"improper_style inversion/harmonic"_improper_inversion_harmonic.html
examples/USER/mofff :ul
:line
USER-MOLFILE package :link(USER-MOLFILE),h4
[Contents:]
@ -2495,7 +2534,7 @@ make machine :pre
NOTE: The LAMMPS executable these steps produce is not yet functional
for a QM/MM simulation. You must also build Quantum ESPRESSO and
create a new executable which links LAMMPS and Quanutm ESPRESSO
create a new executable which links LAMMPS and Quantum ESPRESSO
together. These are steps 3 and 4 described in the lib/qmmm/README
file.
@ -2554,7 +2593,7 @@ developed by the Cambridge University group.
:link(quip,https://github.com/libAtoms/QUIP)
To use this package you must have the QUIP libAatoms library available
To use this package you must have the QUIP libAtoms library available
on your system.
[Author:] Albert Bartok (Cambridge University)
@ -2809,7 +2848,7 @@ USER-VTK package :link(USER-VTK),h4
A "dump vtk"_dump_vtk.html command which outputs snapshot info in the
"VTK format"_vtk, enabling visualization by "Paraview"_paraview or
other visuzlization packages.
other visualization packages.
:link(vtk,http://www.vtk.org)
:link(paraview,http://www.paraview.org)

View File

@ -123,7 +123,7 @@ code directly from an input script:
"python"_python.html
"variable python"_variable.html
"fix python"_fix_python.html
"fix python/invoke"_fix_python_invoke.html
"pair_style python"_pair_python.html :ul
The "python"_python.html command which can be used to define and
@ -165,7 +165,7 @@ doc page for its python-style variables for more info, including
examples of Python code you can write for both pure Python operations
and callbacks to LAMMPS.
The "fix python"_fix_python.html command can execute
The "fix python/invoke"_fix_python_invoke.html command can execute
Python code at selected timesteps during a simulation run.
The "pair_style python"_pair_python command allows you to define

View File

@ -79,7 +79,7 @@ This section has the following sub-sections:
Read this first :h5,link(start_2_1)
If you want to avoid building LAMMPS yourself, read the preceeding
If you want to avoid building LAMMPS yourself, read the preceding
section about options available for downloading and installing
executables. Details are discussed on the "download"_download page.
@ -252,14 +252,13 @@ re-compile, after typing "make clean" (which will describe different
clean options).
The LMP_INC variable is used to include options that turn on ifdefs
within the LAMMPS code. The options that are currently recogized are:
within the LAMMPS code. The options that are currently recognized are:
-DLAMMPS_GZIP
-DLAMMPS_JPEG
-DLAMMPS_PNG
-DLAMMPS_FFMPEG
-DLAMMPS_MEMALIGN
-DLAMMPS_XDR
-DLAMMPS_SMALLBIG
-DLAMMPS_BIGBIG
-DLAMMPS_SMALLSMALL
@ -308,11 +307,6 @@ has to be aligned on larger than default byte boundaries (e.g. 16
bytes instead of 8 bytes on x86 type platforms) for optimal
performance.
If you use -DLAMMPS_XDR, the build will include XDR compatibility
files for doing particle dumps in XTC format. This is only necessary
if your platform does have its own XDR files available. See the
Restrictions section of the "dump"_dump.html command for details.
Use at most one of the -DLAMMPS_SMALLBIG, -DLAMMPS_BIGBIG,
-DLAMMPS_SMALLSMALL settings. The default is -DLAMMPS_SMALLBIG. These
settings refer to use of 4-byte (small) vs 8-byte (big) integers
@ -363,7 +357,7 @@ installed on your platform. If MPI is installed on your system in the
usual place (under /usr/local), you also may not need to specify these
3 variables, assuming /usr/local is in your path. On some large
parallel machines which use "modules" for their compile/link
environements, you may simply need to include the correct module in
environments, you may simply need to include the correct module in
your build environment, before building LAMMPS. Or the parallel
machine may have a vendor-provided MPI which the compiler has no
trouble finding.
@ -431,7 +425,7 @@ use the KISS library described above.
You may also need to set the FFT_INC, FFT_PATH, and FFT_LIB variables,
so the compiler and linker can find the needed FFT header and library
files. Note that on some large parallel machines which use "modules"
for their compile/link environements, you may simply need to include
for their compile/link environments, you may simply need to include
the correct module in your build environment. Or the parallel machine
may have a vendor-provided FFT library which the compiler has no
trouble finding. See the src/MAKE/OPTIONS/Makefile.fftw file for an
@ -470,7 +464,7 @@ precision.
The FFT_INC variable also allows for a -DFFT_SINGLE setting that will
use single-precision FFTs with PPPM, which can speed-up long-range
calulations, particularly in parallel or on GPUs. Fourier transform
calculations, particularly in parallel or on GPUs. Fourier transform
and related PPPM operations are somewhat insensitive to floating point
truncation errors and thus do not always need to be performed in
double precision. Using the -DFFT_SINGLE setting trades off a little
@ -483,7 +477,7 @@ with support for single-precision, as explained above. For FFTW3 you
also need to include -lfftw3f with the FFT_LIB setting, in addition to
-lfftw3. For FFTW2, you also need to specify -DFFT_SIZE with the
FFT_INC setting and -lsfftw with the FFT_LIB setting (in place of
-lfftw). Similarly, if FFTW2 has been preinstalled with an explicit
-lfftw). Similarly, if FFTW2 has been pre-installed with an explicit
double-precision library (libdfftw.a and not the default libfftw.a),
then you can specify -DFFT_SIZE (and not -DFFT_SINGLE), and specify
-ldfftw to use double-precision FFTs.
@ -714,7 +708,7 @@ list various make commands that can be used to manage packages.
If you use a command in a LAMMPS input script that is part of a
package, you must have built LAMMPS with that package, else you will
get an error that the style is invalid or the command is unknown.
Every command's doc page specfies if it is part of a package. You can
Every command's doc page specifies if it is part of a package. You can
type
lmp_machine -h :pre
@ -859,7 +853,7 @@ details for each package.
[External libraries:]
Packages in the tables "Section 4"_Section_packages.html with an "ext"
in the last column link to exernal libraries whose source code is not
in the last column link to external libraries whose source code is not
included with LAMMPS. You must first download and install the library
before building LAMMPS with that package installed. E.g. the voronoi
package links to the freely available "Voro++ library"_voro_home2. You
@ -963,7 +957,7 @@ src/MAKE/Makefile.foo and perform the build in the directory
Obj_shared_foo. This is so that each file can be compiled with the
-fPIC flag which is required for inclusion in a shared library. The
build will create the file liblammps_foo.so which another application
can link to dyamically. It will also create a soft link liblammps.so,
can link to dynamically. It will also create a soft link liblammps.so,
which will point to the most recently built shared library. This is
the file the Python wrapper loads by default.
@ -1329,8 +1323,8 @@ LAMMPS is compiled with CUDA=yes.
numa Nm :pre
This option is only relevant when using pthreads with hwloc support.
In this case Nm defines the number of NUMA regions (typicaly sockets)
on a node which will be utilizied by a single MPI rank. By default Nm
In this case Nm defines the number of NUMA regions (typically sockets)
on a node which will be utilized by a single MPI rank. By default Nm
= 1. If this option is used the total number of worker-threads per
MPI rank is threads*numa. Currently it is always almost better to
assign at least one MPI rank per NUMA region, and leave numa set to
@ -1394,7 +1388,7 @@ replica runs on on one or a few processors. Note that with MPI
installed on a machine (e.g. your desktop), you can run on more
(virtual) processors than you have physical processors.
To run multiple independent simulatoins from one input script, using
To run multiple independent simulations from one input script, using
multiple partitions, see "Section 6.4"_Section_howto.html#howto_4
of the manual. World- and universe-style "variables"_variable.html
are useful in this context.
@ -1673,7 +1667,7 @@ The first section provides a global loop timing summary. The {loop time}
is the total wall time for the section. The {Performance} line is
provided for convenience to help predicting the number of loop
continuations required and for comparing performance with other,
similar MD codes. The {CPU use} line provides the CPU utilzation per
similar MD codes. The {CPU use} line provides the CPU utilization per
MPI task; it should be close to 100% times the number of OpenMP
threads (or 1 of no OpenMP). Lower numbers correspond to delays due
to file I/O or insufficient thread utilization.

View File

@ -48,6 +48,7 @@ own sub-directories with their own Makefiles and/or README files.
"chain"_#chain
"colvars"_#colvars
"createatoms"_#createatoms
"doxygen"_#doxygen
"drude"_#drude
"eam database"_#eamdb
"eam generate"_#eamgn
@ -110,14 +111,21 @@ back-and-forth between the CHARMM MD code and LAMMPS.
They are intended to make it easy to use CHARMM as a builder and as a
post-processor for LAMMPS. Using charmm2lammps.pl, you can convert a
PDB file with associated CHARMM info, including CHARMM force field
data, into its LAMMPS equivalent. Using lammps2pdb.pl you can convert
LAMMPS atom dumps into PDB files.
data, into its LAMMPS equivalent. Support for the CMAP correction of
CHARMM22 and later is available as an option. This tool can also add
solvent water molecules and Na+ or Cl- ions to the system.
Using lammps2pdb.pl you can convert LAMMPS atom dumps into PDB files.
See the README file in the ch2lmp sub-directory for more information.
These tools were created by Pieter in't Veld (pjintve at sandia.gov)
and Paul Crozier (pscrozi at sandia.gov) at Sandia.
CMAP support added and tested by Xiaohu Hu (hux2 at ornl.gov) and
Robert A. Latour (latourr at clemson.edu), David Hyde-Volpe, and
Tigran Abramyan, (Clemson University) and
Chris Lorenz (chris.lorenz at kcl.ac.uk), King's College London.
:line
chain tool :h4,link(chain)
@ -172,6 +180,18 @@ The tool is authored by Xiaowang Zhou (Sandia), xzhou at sandia.gov.
:line
doxygen tool :h4,link(doxygen)
The tools/doxygen directory contains a shell script called
doxygen.sh which can generate a call graph and API lists using
the "Doxygen"_http://doxygen.org software.
See the included README file for details.
The tool is authored by Nandor Tamaskovics, numericalfreedom at googlemail.com.
:line
drude tool :h4,link(drude)
The tools/drude directory contains a Python script called

View File

@ -78,7 +78,7 @@ order of operations compared to LAMMPS without acceleration:
Neighbor lists can be created in a different order :ulb,l
Bins used for sorting atoms can be oriented differently :l
The default stencil order for PPPM is 7. By default, LAMMPS will
calculate other PPPM parameters to fit the desired acuracy with
calculate other PPPM parameters to fit the desired accuracy with
this order :l
The {newton} setting applies to all atoms, not just atoms shared
between MPI tasks :l

View File

@ -11,336 +11,346 @@
5.3.3 KOKKOS package :h5
The KOKKOS package was developed primarily by Christian Trott (Sandia)
with contributions of various styles by others, including Sikandar
Mashayak (UIUC), Stan Moore (Sandia), and Ray Shan (Sandia). The
underlying Kokkos library was written primarily by Carter Edwards,
Kokkos is a templated C++ library that provides abstractions to allow
a single implementation of an application kernel (e.g. a pair style) to run efficiently on
different kinds of hardware, such as GPUs, Intel Xeon Phis, or many-core
CPUs. Kokkos maps the C++ kernel onto different backend languages such as CUDA, OpenMP, or Pthreads.
The Kokkos library also provides data abstractions to adjust (at
compile time) the memory layout of data structures like 2d and
3d arrays to optimize performance on different hardware. For more information on Kokkos, see
"Github"_https://github.com/kokkos/kokkos. Kokkos is part of
"Trilinos"_http://trilinos.sandia.gov/packages/kokkos. The Kokkos library was written primarily by Carter Edwards,
Christian Trott, and Dan Sunderland (all Sandia).
The KOKKOS package contains versions of pair, fix, and atom styles
The LAMMPS KOKKOS package contains versions of pair, fix, and atom styles
that use data structures and macros provided by the Kokkos library,
which is included with LAMMPS in lib/kokkos.
The Kokkos library is part of
"Trilinos"_http://trilinos.sandia.gov/packages/kokkos and can also be
downloaded from "Github"_https://github.com/kokkos/kokkos. Kokkos is a
templated C++ library that provides two key abstractions for an
application like LAMMPS. First, it allows a single implementation of
an application kernel (e.g. a pair style) to run efficiently on
different kinds of hardware, such as a GPU, Intel Phi, or many-core
CPU.
The Kokkos library also provides data abstractions to adjust (at
compile time) the memory layout of basic data structures like 2d and
3d arrays and allow the transparent utilization of special hardware
load and store operations. Such data structures are used in LAMMPS to
store atom coordinates or forces or neighbor lists. The layout is
chosen to optimize performance on different platforms. Again this
functionality is hidden from the developer, and does not affect how
the kernel is coded.
These abstractions are set at build time, when LAMMPS is compiled with
the KOKKOS package installed. All Kokkos operations occur within the
context of an individual MPI task running on a single node of the
machine. The total number of MPI tasks used by LAMMPS (one or
multiple per compute node) is set in the usual manner via the mpirun
or mpiexec commands, and is independent of Kokkos.
which is included with LAMMPS in /lib/kokkos. The KOKKOS package was developed primarily by Christian Trott (Sandia)
and Stan Moore (Sandia) with contributions of various styles by others, including Sikandar
Mashayak (UIUC), Ray Shan (Sandia), and Dan Ibanez (Sandia). For more information on developing using Kokkos abstractions
see the Kokkos programmers' guide at /lib/kokkos/doc/Kokkos_PG.pdf.
Kokkos currently provides support for 3 modes of execution (per MPI
task). These are OpenMP (for many-core CPUs), Cuda (for NVIDIA GPUs),
and OpenMP (for Intel Phi). Note that the KOKKOS package supports
running on the Phi in native mode, not offload mode like the
USER-INTEL package supports. You choose the mode at build time to
task). These are Serial (MPI-only for CPUs and Intel Phi), OpenMP (threading
for many-core CPUs and Intel Phi), and CUDA (for NVIDIA GPUs). You choose the mode at build time to
produce an executable compatible with specific hardware.
Here is a quick overview of how to use the KOKKOS package
for CPU acceleration, assuming one or more 16-core nodes.
More details follow.
use a C++11 compatible compiler
make yes-kokkos
make mpi KOKKOS_DEVICES=OpenMP # build with the KOKKOS package
make kokkos_omp # or Makefile.kokkos_omp already has variable set :pre
mpirun -np 16 lmp_mpi -k on -sf kk -in in.lj # 1 node, 16 MPI tasks/node, no threads
mpirun -np 2 -ppn 1 lmp_mpi -k on t 16 -sf kk -in in.lj # 2 nodes, 1 MPI task/node, 16 threads/task
mpirun -np 2 lmp_mpi -k on t 8 -sf kk -in in.lj # 1 node, 2 MPI tasks/node, 8 threads/task
mpirun -np 32 -ppn 4 lmp_mpi -k on t 4 -sf kk -in in.lj # 8 nodes, 4 MPI tasks/node, 4 threads/task :pre
specify variables and settings in your Makefile.machine that enable OpenMP, GPU, or Phi support
include the KOKKOS package and build LAMMPS
enable the KOKKOS package and its hardware options via the "-k on" command-line switch use KOKKOS styles in your input script :ul
Here is a quick overview of how to use the KOKKOS package for GPUs,
assuming one or more nodes, each with 16 cores and a GPU. More
details follow.
discuss use of NVCC, which Makefiles to examine
use a C++11 compatible compiler
KOKKOS_DEVICES = Cuda, OpenMP
KOKKOS_ARCH = Kepler35
make yes-kokkos
make machine :pre
mpirun -np 1 lmp_cuda -k on t 6 -sf kk -in in.lj # one MPI task, 6 threads on CPU
mpirun -np 4 -ppn 1 lmp_cuda -k on t 6 -sf kk -in in.lj # ditto on 4 nodes :pre
mpirun -np 2 lmp_cuda -k on t 8 g 2 -sf kk -in in.lj # two MPI tasks, 8 threads per CPU
mpirun -np 32 -ppn 2 lmp_cuda -k on t 8 g 2 -sf kk -in in.lj # ditto on 16 nodes :pre
Here is a quick overview of how to use the KOKKOS package
for the Intel Phi:
use a C++11 compatible compiler
KOKKOS_DEVICES = OpenMP
KOKKOS_ARCH = KNC
make yes-kokkos
make machine :pre
host=MIC, Intel Phi with 61 cores (240 threads/phi via 4x hardware threading):
mpirun -np 1 lmp_g++ -k on t 240 -sf kk -in in.lj # 1 MPI task on 1 Phi, 1*240 = 240
mpirun -np 30 lmp_g++ -k on t 8 -sf kk -in in.lj # 30 MPI tasks on 1 Phi, 30*8 = 240
mpirun -np 12 lmp_g++ -k on t 20 -sf kk -in in.lj # 12 MPI tasks on 1 Phi, 12*20 = 240
mpirun -np 96 -ppn 12 lmp_g++ -k on t 20 -sf kk -in in.lj # ditto on 8 Phis :pre
[Required hardware/software:]
Kokkos support within LAMMPS must be built with a C++11 compatible
compiler. If using gcc, version 4.7.2 or later is required.
To build with Kokkos support for CPUs, your compiler must support the
OpenMP interface. You should have one or more multi-core CPUs so that
multiple threads can be launched by each MPI task running on a CPU.
To build with Kokkos support for NVIDIA GPUs, NVIDIA CUDA software
version 7.5 or later must be installed on your system. See the
discussion for the "GPU"_accelerate_gpu.html package for details of
how to check and do this.
NOTE: For good performance of the KOKKOS package on GPUs, you must
have Kepler generation GPUs (or later). The Kokkos library exploits
texture cache options not supported by Telsa generation GPUs (or
older).
To build with Kokkos support for Intel Xeon Phi coprocessors, your
sysmte must be configured to use them in "native" mode, not "offload"
mode like the USER-INTEL package supports.
[Building LAMMPS with the KOKKOS package:]
You must choose at build time whether to build for CPUs (OpenMP),
GPUs, or Phi.
NOTE: Kokkos support within LAMMPS must be built with a C++11 compatible
compiler. This means GCC version 4.7.2 or later, Intel 14.0.4 or later, or
Clang 3.5.2 or later is required.
You can do any of these in one line, using the suitable make command
line flags as described in "Section 4"_Section_packages.html of the
manual. If run from the src directory, these
commands will create src/lmp_kokkos_omp, lmp_kokkos_cuda_mpi, and
lmp_kokkos_phi. Note that the OMP and PHI options use
src/MAKE/Makefile.mpi as the starting Makefile.machine. The CUDA
option uses src/MAKE/OPTIONS/Makefile.kokkos_cuda_mpi.
The recommended method of building the KOKKOS package is to start with the provided Kokkos
Makefiles in /src/MAKE/OPTIONS/. You may need to modify the KOKKOS_ARCH variable in the Makefile
to match your specific hardware. For example:
The latter two steps can be done using the "-k on", "-pk kokkos" and
"-sf kk" "command-line switches"_Section_start.html#start_6
respectively. Or the effect of the "-pk" or "-sf" switches can be
duplicated by adding the "package kokkos"_package.html or "suffix
kk"_suffix.html commands respectively to your input script.
for Sandy Bridge CPUs, set KOKKOS_ARCH=SNB
for Broadwell CPUs, set KOKKOS_ARCH=BWD
for K80 GPUs, set KOKKOS_ARCH=Kepler37
for P100 GPUs and Power8 CPUs, set KOKKOS_ARCH=Pascal60,Power8 :ul
See the [Advanced Kokkos Options] section below for a listing of all KOKKOS_ARCH options.
Or you can follow these steps:
[Compile for CPU-only (MPI only, no threading):]
CPU-only (run all-MPI or with OpenMP threading):
cd lammps/src
make yes-kokkos
make kokkos_omp :pre
CPU-only (only MPI, no threading):
use a C++11 compatible compiler and set KOKKOS_ARCH variable in
/src/MAKE/OPTIONS/Makefile.kokkos_mpi_only as described above. Then do the
following:
cd lammps/src
make yes-kokkos
make kokkos_mpi_only :pre
Intel Xeon Phi (Intel Compiler, Intel MPI):
[Compile for CPU-only (MPI plus OpenMP threading):]
NOTE: To build with Kokkos support for OpenMP threading, your compiler must support the
OpenMP interface. You should have one or more multi-core CPUs so that
multiple threads can be launched by each MPI task running on a CPU.
use a C++11 compatible compiler and set KOKKOS_ARCH variable in
/src/MAKE/OPTIONS/Makefile.kokkos_omp as described above. Then do the
following:
cd lammps/src
make yes-kokkos
make kokkos_omp :pre
[Compile for Intel KNL Xeon Phi (Intel Compiler, OpenMPI):]
use a C++11 compatible compiler and do the following:
cd lammps/src
make yes-kokkos
make kokkos_phi :pre
CPUs and GPUs (with MPICH or OpenMPI):
[Compile for CPUs and GPUs (with OpenMPI or MPICH):]
NOTE: To build with Kokkos support for NVIDIA GPUs, NVIDIA CUDA software
version 7.5 or later must be installed on your system. See the
discussion for the "GPU"_accelerate_gpu.html package for details of
how to check and do this.
use a C++11 compatible compiler and set KOKKOS_ARCH variable in
/src/MAKE/OPTIONS/Makefile.kokkos_cuda_mpi for both GPU and CPU as described
above. Then do the following:
cd lammps/src
make yes-kokkos
make kokkos_cuda_mpi :pre
These examples set the KOKKOS-specific OMP, MIC, CUDA variables on the
make command line which requires a GNU-compatible make command. Try
[Alternative Methods of Compiling:]
Alternatively, the KOKKOS package can be built by specifying Kokkos variables
on the make command line. For example:
make mpi KOKKOS_DEVICES=OpenMP KOKKOS_ARCH=SNB # set the KOKKOS_DEVICES and KOKKOS_ARCH variable explicitly
make kokkos_cuda_mpi KOKKOS_ARCH=Pascal60,Power8 # set the KOKKOS_ARCH variable explicitly :pre
Setting the KOKKOS_DEVICES and KOKKOS_ARCH variables on the
make command line requires a GNU-compatible make command. Try
"gmake" if your system's standard make complains.
NOTE: If you build using make line variables and re-build LAMMPS twice
with different KOKKOS options and the *same* target, e.g. g++ in the
first two examples above, then you *must* perform a "make clean-all"
or "make clean-machine" before each build. This is to force all the
with different KOKKOS options and the *same* target, then you *must* perform a "make clean-all"
or "make clean-machine" before each build. This is to force all the
KOKKOS-dependent files to be re-compiled with the new options.
NOTE: Currently, there are no precision options with the KOKKOS
package. All compilation and computation is performed in double
precision.
[Running LAMMPS with the KOKKOS package:]
There are other allowed options when building with the KOKKOS package.
As above, they can be set either as variables on the make command line
or in Makefile.machine. This is the full list of options, including
those discussed above, Each takes a value shown below. The
default value is listed, which is set in the
lib/kokkos/Makefile.kokkos file.
All Kokkos operations occur within the
context of an individual MPI task running on a single node of the
machine. The total number of MPI tasks used by LAMMPS (one or
multiple per compute node) is set in the usual manner via the mpirun
or mpiexec commands, and is independent of Kokkos. E.g. the mpirun
command in OpenMPI does this via its
-np and -npernode switches. Ditto for MPICH via -np and -ppn.
#Default settings specific options
#Options: force_uvm,use_ldg,rdc
[Running on a multi-core CPU:]
KOKKOS_DEVICES, values = {OpenMP}, {Serial}, {Pthreads}, {Cuda}, default = {OpenMP}
KOKKOS_ARCH, values = {KNC}, {SNB}, {HSW}, {Kepler}, {Kepler30}, {Kepler32}, {Kepler35}, {Kepler37}, {Maxwell}, {Maxwell50}, {Maxwell52}, {Maxwell53}, {ARMv8}, {BGQ}, {Power7}, {Power8}, default = {none}
KOKKOS_DEBUG, values = {yes}, {no}, default = {no}
KOKKOS_USE_TPLS, values = {hwloc}, {librt}, default = {none}
KOKKOS_CUDA_OPTIONS, values = {force_uvm}, {use_ldg}, {rdc} :ul
Here is a quick overview of how to use the KOKKOS package
for CPU acceleration, assuming one or more 16-core nodes.
KOKKOS_DEVICE sets the parallelization method used for Kokkos code
(within LAMMPS). KOKKOS_DEVICES=OpenMP means that OpenMP will be
used. KOKKOS_DEVICES=Pthreads means that pthreads will be used.
KOKKOS_DEVICES=Cuda means an NVIDIA GPU running CUDA will be used.
If KOKKOS_DEVICES=Cuda, then the lo-level Makefile in the src/MAKE
directory must use "nvcc" as its compiler, via its CC setting. For
best performance its CCFLAGS setting should use -O3 and have a
KOKKOS_ARCH setting that matches the compute capability of your NVIDIA
hardware and software installation, e.g. KOKKOS_ARCH=Kepler30. Note
the minimal required compute capability is 2.0, but this will give
significantly reduced performance compared to Kepler generation GPUs
with compute capability 3.x. For the LINK setting, "nvcc" should not
be used; instead use g++ or another compiler suitable for linking C++
applications. Often you will want to use your MPI compiler wrapper
for this setting (i.e. mpicxx). Finally, the lo-level Makefile must
also have a "Compilation rule" for creating *.o files from *.cu files.
See src/Makefile.cuda for an example of a lo-level Makefile with all
of these settings.
KOKKOS_USE_TPLS=hwloc binds threads to hardware cores, so they do not
migrate during a simulation. KOKKOS_USE_TPLS=hwloc should always be
used if running with KOKKOS_DEVICES=Pthreads for pthreads. It is not
necessary for KOKKOS_DEVICES=OpenMP for OpenMP, because OpenMP
provides alternative methods via environment variables for binding
threads to hardware cores. More info on binding threads to cores is
given in "Section 5.3"_Section_accelerate.html#acc_3.
KOKKOS_ARCH=KNC enables compiler switches needed when compiling for an
Intel Phi processor.
KOKKOS_USE_TPLS=librt enables use of a more accurate timer mechanism
on most Unix platforms. This library is not available on all
platforms.
KOKKOS_DEBUG is only useful when developing a Kokkos-enabled style
within LAMMPS. KOKKOS_DEBUG=yes enables printing of run-time
debugging information that can be useful. It also enables runtime
bounds checking on Kokkos data structures.
KOKKOS_CUDA_OPTIONS are additional options for CUDA.
For more information on Kokkos see the Kokkos programmers' guide here:
/lib/kokkos/doc/Kokkos_PG.pdf.
[Run with the KOKKOS package from the command line:]
The mpirun or mpiexec command sets the total number of MPI tasks used
by LAMMPS (one or multiple per compute node) and the number of MPI
tasks used per node. E.g. the mpirun command in MPICH does this via
its -np and -ppn switches. Ditto for OpenMPI via -np and -npernode.
When using KOKKOS built with host=OMP, you need to choose how many
OpenMP threads per MPI task will be used (via the "-k" command-line
switch discussed below). Note that the product of MPI tasks * OpenMP
threads/task should not exceed the physical number of cores (on a
node), otherwise performance will suffer.
When using the KOKKOS package built with device=CUDA, you must use
exactly one MPI task per physical GPU.
When using the KOKKOS package built with host=MIC for Intel Xeon Phi
coprocessor support you need to insure there are one or more MPI tasks
per coprocessor, and choose the number of coprocessor threads to use
per MPI task (via the "-k" command-line switch discussed below). The
product of MPI tasks * coprocessor threads/task should not exceed the
maximum number of threads the coprocessor is designed to run,
otherwise performance will suffer. This value is 240 for current
generation Xeon Phi(TM) chips, which is 60 physical cores * 4
threads/core. Note that with the KOKKOS package you do not need to
specify how many Phi coprocessors there are per node; each
coprocessors is simply treated as running some number of MPI tasks.
mpirun -np 16 lmp_kokkos_mpi_only -k on -sf kk -in in.lj # 1 node, 16 MPI tasks/node, no multi-threading
mpirun -np 2 -ppn 1 lmp_kokkos_omp -k on t 16 -sf kk -in in.lj # 2 nodes, 1 MPI task/node, 16 threads/task
mpirun -np 2 lmp_kokkos_omp -k on t 8 -sf kk -in in.lj # 1 node, 2 MPI tasks/node, 8 threads/task
mpirun -np 32 -ppn 4 lmp_kokkos_omp -k on t 4 -sf kk -in in.lj # 8 nodes, 4 MPI tasks/node, 4 threads/task :pre
To run using the KOKKOS package, use the "-k on", "-sf kk" and "-pk kokkos" "command-line switches"_Section_start.html#start_7 in your mpirun command.
You must use the "-k on" "command-line
switch"_Section_start.html#start_6 to enable the KOKKOS package. It
switch"_Section_start.html#start_7 to enable the KOKKOS package. It
takes additional arguments for hardware settings appropriate to your
system. Those arguments are "documented
here"_Section_start.html#start_6. The two most commonly used
options are:
system. Those arguments are "documented
here"_Section_start.html#start_7. For OpenMP use:
-k on t Nt g Ng :pre
The "t Nt" option applies to host=OMP (even if device=CUDA) and
host=MIC. For host=OMP, it specifies how many OpenMP threads per MPI
task to use with a node. For host=MIC, it specifies how many Xeon Phi
threads per MPI task to use within a node. The default is Nt = 1.
Note that for host=OMP this is effectively MPI-only mode which may be
fine. But for host=MIC you will typically end up using far less than
all the 240 available threads, which could give very poor performance.
The "g Ng" option applies to device=CUDA. It specifies how many GPUs
per compute node to use. The default is 1, so this only needs to be
specified is you have 2 or more GPUs per compute node.
-k on t Nt :pre
The "t Nt" option specifies how many OpenMP threads per MPI
task to use with a node. The default is Nt = 1, which is MPI-only mode.
Note that the product of MPI tasks * OpenMP
threads/task should not exceed the physical number of cores (on a
node), otherwise performance will suffer. If hyperthreading is enabled, then
the product of MPI tasks * OpenMP threads/task should not exceed the
physical number of cores * hardware threads.
The "-k on" switch also issues a "package kokkos" command (with no
additional arguments) which sets various KOKKOS options to default
values, as discussed on the "package"_package.html command doc page.
Use the "-sf kk" "command-line switch"_Section_start.html#start_6,
which will automatically append "kk" to styles that support it. Use
the "-pk kokkos" "command-line switch"_Section_start.html#start_6 if
you wish to change any of the default "package kokkos"_package.html
optionns set by the "-k on" "command-line
switch"_Section_start.html#start_6.
The "-sf kk" "command-line switch"_Section_start.html#start_7
will automatically append the "/kk" suffix to styles that support it.
In this manner no modification to the input script is needed. Alternatively,
one can run with the KOKKOS package by editing the input script as described below.
Note that the default for the "package kokkos"_package.html command is
NOTE: The default for the "package kokkos"_package.html command is
to use "full" neighbor lists and set the Newton flag to "off" for both
pairwise and bonded interactions. This typically gives fastest
performance. If the "newton"_newton.html command is used in the input
script, it can override the Newton flag defaults.
However, when running in MPI-only mode with 1 thread per MPI task, it
pairwise and bonded interactions. However, when running on CPUs, it
will typically be faster to use "half" neighbor lists and set the
Newton flag to "on", just as is the case for non-accelerated pair
styles. You can do this with the "-pk" "command-line
switch"_Section_start.html#start_6.
styles. It can also be faster to use non-threaded communication.
Use the "-pk kokkos" "command-line switch"_Section_start.html#start_7 to
change the default "package kokkos"_package.html
options. See its doc page for details and default settings. Experimenting with
its options can provide a speed-up for specific calculations. For example:
[Or run with the KOKKOS package by editing an input script:]
mpirun -np 16 lmp_kokkos_mpi_only -k on -sf kk -pk kokkos newton on neigh half comm no -in in.lj # Newton on, Half neighbor list, non-threaded comm :pre
The discussion above for the mpirun/mpiexec command and setting
appropriate thread and GPU values for host=OMP or host=MIC or
device=CUDA are the same.
If the "newton"_newton.html command is used in the input
script, it can also override the Newton flag defaults.
[Core and Thread Affinity:]
When using multi-threading, it is important for
performance to bind both MPI tasks to physical cores, and threads to
physical cores, so they do not migrate during a simulation.
If you are not certain MPI tasks are being bound (check the defaults
for your MPI installation), binding can be forced with these flags:
OpenMPI 1.8: mpirun -np 2 --bind-to socket --map-by socket ./lmp_openmpi ...
Mvapich2 2.0: mpiexec -np 2 --bind-to socket --map-by socket ./lmp_mvapich ... :pre
For binding threads with KOKKOS OpenMP, use thread affinity
environment variables to force binding. With OpenMP 3.1 (gcc 4.7 or
later, intel 12 or later) setting the environment variable
OMP_PROC_BIND=true should be sufficient. In general, for best performance
with OpenMP 4.0 or better set OMP_PROC_BIND=spread and OMP_PLACES=threads.
For binding threads with the
KOKKOS pthreads option, compile LAMMPS the KOKKOS HWLOC=yes option
as described below.
[Running on Knight's Landing (KNL) Intel Xeon Phi:]
Here is a quick overview of how to use the KOKKOS package
for the Intel Knight's Landing (KNL) Xeon Phi:
KNL Intel Phi chips have 68 physical cores. Typically 1 to 4 cores
are reserved for the OS, and only 64 or 66 cores are used. Each core
has 4 hyperthreads,so there are effectively N = 256 (4*64) or
N = 264 (4*66) cores to run on. The product of MPI tasks * OpenMP threads/task should not exceed this limit,
otherwise performance will suffer. Note that with the KOKKOS package you do not need to
specify how many KNLs there are per node; each
KNL is simply treated as running some number of MPI tasks.
Examples of mpirun commands that follow these rules are shown below.
Intel KNL node with 68 cores (272 threads/node via 4x hardware threading):
mpirun -np 64 lmp_kokkos_phi -k on t 4 -sf kk -in in.lj # 1 node, 64 MPI tasks/node, 4 threads/task
mpirun -np 66 lmp_kokkos_phi -k on t 4 -sf kk -in in.lj # 1 node, 66 MPI tasks/node, 4 threads/task
mpirun -np 32 lmp_kokkos_phi -k on t 8 -sf kk -in in.lj # 1 node, 32 MPI tasks/node, 8 threads/task
mpirun -np 512 -ppn 64 lmp_kokkos_phi -k on t 4 -sf kk -in in.lj # 8 nodes, 64 MPI tasks/node, 4 threads/task :pre
The -np setting of the mpirun command sets the number of MPI
tasks/node. The "-k on t Nt" command-line switch sets the number of
threads/task as Nt. The product of these two values should be N, i.e.
256 or 264.
NOTE: The default for the "package kokkos"_package.html command is
to use "full" neighbor lists and set the Newton flag to "off" for both
pairwise and bonded interactions. When running on KNL, this
will typically be best for pair-wise potentials. For manybody potentials,
using "half" neighbor lists and setting the
Newton flag to "on" may be faster. It can also be faster to use non-threaded communication.
Use the "-pk kokkos" "command-line switch"_Section_start.html#start_7 to
change the default "package kokkos"_package.html
options. See its doc page for details and default settings. Experimenting with
its options can provide a speed-up for specific calculations. For example:
mpirun -np 64 lmp_kokkos_phi -k on t 4 -sf kk -pk kokkos comm no -in in.lj # Newton off, full neighbor list, non-threaded comm
mpirun -np 64 lmp_kokkos_phi -k on t 4 -sf kk -pk kokkos newton on neigh half comm no -in in.reax # Newton on, half neighbor list, non-threaded comm :pre
NOTE: MPI tasks and threads should be bound to cores as described above for CPUs.
NOTE: To build with Kokkos support for Intel Xeon Phi coprocessors such as Knight's Corner (KNC), your
system must be configured to use them in "native" mode, not "offload"
mode like the USER-INTEL package supports.
[Running on GPUs:]
Use the "-k" "command-line switch"_Section_commands.html#start_7 to
specify the number of GPUs per node. Typically the -np setting
of the mpirun command should set the number of MPI
tasks/node to be equal to the # of physical GPUs on the node.
You can assign multiple MPI tasks to the same GPU with the
KOKKOS package, but this is usually only faster if significant portions
of the input script have not been ported to use Kokkos. Using CUDA MPS
is recommended in this scenario. As above for multi-core CPUs (and no GPU), if N is the number
of physical cores/node, then the number of MPI tasks/node should not exceed N.
-k on g Ng :pre
Here are examples of how to use the KOKKOS package for GPUs,
assuming one or more nodes, each with two GPUs:
mpirun -np 2 lmp_kokkos_cuda_openmpi -k on g 2 -sf kk -in in.lj # 1 node, 2 MPI tasks/node, 2 GPUs/node
mpirun -np 32 -ppn 2 lmp_kokkos_cuda_openmpi -k on g 2 -sf kk -in in.lj # 16 nodes, 2 MPI tasks/node, 2 GPUs/node (32 GPUs total) :pre
NOTE: The default for the "package kokkos"_package.html command is
to use "full" neighbor lists and set the Newton flag to "off" for both
pairwise and bonded interactions, along with threaded communication.
When running on Maxwell or Kepler GPUs, this will typically be best. For Pascal GPUs,
using "half" neighbor lists and setting the
Newton flag to "on" may be faster. For many pair styles, setting the neighbor binsize
equal to the ghost atom cutoff will give speedup.
Use the "-pk kokkos" "command-line switch"_Section_start.html#start_7 to
change the default "package kokkos"_package.html
options. See its doc page for details and default settings. Experimenting with
its options can provide a speed-up for specific calculations. For example:
mpirun -np 2 lmp_kokkos_cuda_openmpi -k on g 2 -sf kk -pk kokkos binsize 2.8 -in in.lj # Set binsize = neighbor ghost cutoff
mpirun -np 2 lmp_kokkos_cuda_openmpi -k on g 2 -sf kk -pk kokkos newton on neigh half binsize 2.8 -in in.lj # Newton on, half neighborlist, set binsize = neighbor ghost cutoff :pre
NOTE: For good performance of the KOKKOS package on GPUs, you must
have Kepler generation GPUs (or later). The Kokkos library exploits
texture cache options not supported by Telsa generation GPUs (or
older).
NOTE: When using a GPU, you will achieve the best performance if your
input script does not use fix or compute styles which are not yet
Kokkos-enabled. This allows data to stay on the GPU for multiple
timesteps, without being copied back to the host CPU. Invoking a
non-Kokkos fix or compute, or performing I/O for
"thermo"_thermo_style.html or "dump"_dump.html output will cause data
to be copied back to the CPU incurring a performance penalty.
NOTE: To get an accurate timing breakdown between time spend in pair,
kspace, etc., you must set the environment variable CUDA_LAUNCH_BLOCKING=1.
However, this will reduce performance and is not recommended for production runs.
[Run with the KOKKOS package by editing an input script:]
Alternatively the effect of the "-sf" or "-pk" switches can be
duplicated by adding the "package kokkos"_package.html or "suffix
kk"_suffix.html commands to your input script.
The discussion above for building LAMMPS with the KOKKOS package, the mpirun/mpiexec command, and setting
appropriate thread are the same.
You must still use the "-k on" "command-line
switch"_Section_start.html#start_6 to enable the KOKKOS package, and
switch"_Section_start.html#start_7 to enable the KOKKOS package, and
specify its additional arguments for hardware options appropriate to
your system, as documented above.
Use the "suffix kk"_suffix.html command, or you can explicitly add a
You can use the "suffix kk"_suffix.html command, or you can explicitly add a
"kk" suffix to individual styles in your input script, e.g.
pair_style lj/cut/kk 2.5 :pre
You only need to use the "package kokkos"_package.html command if you
wish to change any of its option defaults, as set by the "-k on"
"command-line switch"_Section_start.html#start_6.
"command-line switch"_Section_start.html#start_7.
[Using OpenMP threading and CUDA together (experimental):]
With the KOKKOS package, both OpenMP multi-threading and GPUs can be used
together in a few special cases. In the Makefile, the KOKKOS_DEVICES variable must
include both "Cuda" and "OpenMP", as is the case for /src/MAKE/OPTIONS/Makefile.kokkos_cuda_mpi
KOKKOS_DEVICES=Cuda,OpenMP :pre
The suffix "/kk" is equivalent to "/kk/device", and for Kokkos CUDA,
using the "-sf kk" in the command line gives the default CUDA version everywhere.
However, if the "/kk/host" suffix is added to a specific style in the input
script, the Kokkos OpenMP (CPU) version of that specific style will be used instead.
Set the number of OpenMP threads as "t Nt" and the number of GPUs as "g Ng"
-k on t Nt g Ng :pre
For example, the command to run with 1 GPU and 8 OpenMP threads is then:
mpiexec -np 1 lmp_kokkos_cuda_openmpi -in in.lj -k on g 1 t 8 -sf kk :pre
Conversely, if the "-sf kk/host" is used in the command line and then the
"/kk" or "/kk/device" suffix is added to a specific style in your input script,
then only that specific style will run on the GPU while everything else will
run on the CPU in OpenMP mode. Note that the execution of the CPU and GPU
styles will NOT overlap, except for a special case:
A kspace style and/or molecular topology (bonds, angles, etc.) running on
the host CPU can overlap with a pair style running on the GPU. First compile
with "--default-stream per-thread" added to CCFLAGS in the Kokkos CUDA Makefile.
Then explicitly use the "/kk/host" suffix for kspace and bonds, angles, etc.
in the input file and the "kk" suffix (equal to "kk/device") on the command line.
Also make sure the environment variable CUDA_LAUNCH_BLOCKING is not set to "1"
so CPU/GPU overlap can occur.
[Speed-ups to expect:]
@ -353,7 +363,7 @@ Generally speaking, the following rules of thumb apply:
When running on CPUs only, with a single thread per MPI task,
performance of a KOKKOS style is somewhere between the standard
(un-accelerated) styles (MPI-only mode), and those provided by the
USER-OMP package. However the difference between all 3 is small (less
USER-OMP package. However the difference between all 3 is small (less
than 20%). :ulb,l
When running on CPUs only, with multiple threads per MPI task,
@ -363,7 +373,7 @@ package. :l
When running large number of atoms per GPU, KOKKOS is typically faster
than the GPU package. :l
When running on Intel Xeon Phi, KOKKOS is not as fast as
When running on Intel hardware, KOKKOS is not as fast as
the USER-INTEL package, which is optimized for that hardware. :l
:ule
@ -371,123 +381,78 @@ See the "Benchmark page"_http://lammps.sandia.gov/bench.html of the
LAMMPS web site for performance of the KOKKOS package on different
hardware.
[Guidelines for best performance:]
[Advanced Kokkos options:]
Here are guidline for using the KOKKOS package on the different
hardware configurations listed above.
There are other allowed options when building with the KOKKOS package.
As above, they can be set either as variables on the make command line
or in Makefile.machine. This is the full list of options, including
those discussed above. Each takes a value shown below. The
default value is listed, which is set in the
/lib/kokkos/Makefile.kokkos file.
Many of the guidelines use the "package kokkos"_package.html command
See its doc page for details and default settings. Experimenting with
its options can provide a speed-up for specific calculations.
KOKKOS_DEVICES, values = {Serial}, {OpenMP}, {Pthreads}, {Cuda}, default = {OpenMP}
KOKKOS_ARCH, values = {KNC}, {SNB}, {HSW}, {Kepler30}, {Kepler32}, {Kepler35}, {Kepler37}, {Maxwell50}, {Maxwell52}, {Maxwell53}, {Pascal60}, {Pascal61}, {ARMv80}, {ARMv81}, {ARMv81}, {ARMv8-ThunderX}, {BGQ}, {Power7}, {Power8}, {Power9}, {KNL}, {BDW}, {SKX}, default = {none}
KOKKOS_DEBUG, values = {yes}, {no}, default = {no}
KOKKOS_USE_TPLS, values = {hwloc}, {librt}, {experimental_memkind}, default = {none}
KOKKOS_CXX_STANDARD, values = {c++11}, {c++1z}, default = {c++11}
KOKKOS_OPTIONS, values = {aggressive_vectorization}, {disable_profiling}, default = {none}
KOKKOS_CUDA_OPTIONS, values = {force_uvm}, {use_ldg}, {rdc}, {enable_lambda}, default = {enable_lambda} :ul
[Running on a multi-core CPU:]
KOKKOS_DEVICES sets the parallelization method used for Kokkos code
(within LAMMPS). KOKKOS_DEVICES=Serial means that no threading will be used.
KOKKOS_DEVICES=OpenMP means that OpenMP threading will be
used. KOKKOS_DEVICES=Pthreads means that pthreads will be used.
KOKKOS_DEVICES=Cuda means an NVIDIA GPU running CUDA will be used.
If N is the number of physical cores/node, then the number of MPI
tasks/node * number of threads/task should not exceed N, and should
typically equal N. Note that the default threads/task is 1, as set by
the "t" keyword of the "-k" "command-line
switch"_Section_start.html#start_6. If you do not change this, no
additional parallelism (beyond MPI) will be invoked on the host
CPU(s).
KOKKOS_ARCH enables compiler switches needed when compiling for a
specific hardware:
You can compare the performance running in different modes:
ARMv80 = ARMv8.0 Compatible CPU
ARMv81 = ARMv8.1 Compatible CPU
ARMv8-ThunderX = ARMv8 Cavium ThunderX CPU
SNB = Intel Sandy/Ivy Bridge CPUs
HSW = Intel Haswell CPUs
BDW = Intel Broadwell Xeon E-class CPUs
SKX = Intel Sky Lake Xeon E-class HPC CPUs (AVX512)
KNC = Intel Knights Corner Xeon Phi
KNL = Intel Knights Landing Xeon Phi
Kepler30 = NVIDIA Kepler generation CC 3.0
Kepler32 = NVIDIA Kepler generation CC 3.2
Kepler35 = NVIDIA Kepler generation CC 3.5
Kepler37 = NVIDIA Kepler generation CC 3.7
Maxwell50 = NVIDIA Maxwell generation CC 5.0
Maxwell52 = NVIDIA Maxwell generation CC 5.2
Maxwell53 = NVIDIA Maxwell generation CC 5.3
Pascal60 = NVIDIA Pascal generation CC 6.0
Pascal61 = NVIDIA Pascal generation CC 6.1
BGQ = IBM Blue Gene/Q CPUs
Power8 = IBM POWER8 CPUs
Power9 = IBM POWER9 CPUs :ul
run with 1 MPI task/node and N threads/task
run with N MPI tasks/node and 1 thread/task
run with settings in between these extremes :ul
KOKKOS_USE_TPLS=hwloc binds threads to hardware cores, so they do not
migrate during a simulation. KOKKOS_USE_TPLS=hwloc should always be
used if running with KOKKOS_DEVICES=Pthreads for pthreads. It is not
necessary for KOKKOS_DEVICES=OpenMP for OpenMP, because OpenMP
provides alternative methods via environment variables for binding
threads to hardware cores. More info on binding threads to cores is
given in "Section 5.3"_Section_accelerate.html#acc_3.
Examples of mpirun commands in these modes are shown above.
KOKKOS_USE_TPLS=librt enables use of a more accurate timer mechanism
on most Unix platforms. This library is not available on all
platforms.
When using KOKKOS to perform multi-threading, it is important for
performance to bind both MPI tasks to physical cores, and threads to
physical cores, so they do not migrate during a simulation.
KOKKOS_DEBUG is only useful when developing a Kokkos-enabled style
within LAMMPS. KOKKOS_DEBUG=yes enables printing of run-time
debugging information that can be useful. It also enables runtime
bounds checking on Kokkos data structures.
If you are not certain MPI tasks are being bound (check the defaults
for your MPI installation), binding can be forced with these flags:
KOKKOS_CXX_STANDARD and KOKKOS_OPTIONS are typically not changed when building LAMMPS.
OpenMPI 1.8: mpirun -np 2 -bind-to socket -map-by socket ./lmp_openmpi ...
Mvapich2 2.0: mpiexec -np 2 -bind-to socket -map-by socket ./lmp_mvapich ... :pre
For binding threads with the KOKKOS OMP option, use thread affinity
environment variables to force binding. With OpenMP 3.1 (gcc 4.7 or
later, intel 12 or later) setting the environment variable
OMP_PROC_BIND=true should be sufficient. For binding threads with the
KOKKOS pthreads option, compile LAMMPS the KOKKOS HWLOC=yes option
(see "this section"_Section_packages.html#KOKKOS of the manual for
details).
[Running on GPUs:]
Insure the -arch setting in the machine makefile you are using,
e.g. src/MAKE/Makefile.cuda, is correct for your GPU hardware/software.
(see "this section"_Section_packages.html#KOKKOS of the manual for
details).
The -np setting of the mpirun command should set the number of MPI
tasks/node to be equal to the # of physical GPUs on the node.
Use the "-k" "command-line switch"_Section_commands.html#start_6 to
specify the number of GPUs per node, and the number of threads per MPI
task. As above for multi-core CPUs (and no GPU), if N is the number
of physical cores/node, then the number of MPI tasks/node * number of
threads/task should not exceed N. With one GPU (and one MPI task) it
may be faster to use less than all the available cores, by setting
threads/task to a smaller value. This is because using all the cores
on a dual-socket node will incur extra cost to copy memory from the
2nd socket to the GPU.
Examples of mpirun commands that follow these rules are shown above.
NOTE: When using a GPU, you will achieve the best performance if your
input script does not use any fix or compute styles which are not yet
Kokkos-enabled. This allows data to stay on the GPU for multiple
timesteps, without being copied back to the host CPU. Invoking a
non-Kokkos fix or compute, or performing I/O for
"thermo"_thermo_style.html or "dump"_dump.html output will cause data
to be copied back to the CPU.
You cannot yet assign multiple MPI tasks to the same GPU with the
KOKKOS package. We plan to support this in the future, similar to the
GPU package in LAMMPS.
You cannot yet use both the host (multi-threaded) and device (GPU)
together to compute pairwise interactions with the KOKKOS package. We
hope to support this in the future, similar to the GPU package in
LAMMPS.
[Running on an Intel Phi:]
Kokkos only uses Intel Phi processors in their "native" mode, i.e.
not hosted by a CPU.
As illustrated above, build LAMMPS with OMP=yes (the default) and
MIC=yes. The latter insures code is correctly compiled for the Intel
Phi. The OMP setting means OpenMP will be used for parallelization on
the Phi, which is currently the best option within Kokkos. In the
future, other options may be added.
Current-generation Intel Phi chips have either 61 or 57 cores. One
core should be excluded for running the OS, leaving 60 or 56 cores.
Each core is hyperthreaded, so there are effectively N = 240 (4*60) or
N = 224 (4*56) cores to run on.
The -np setting of the mpirun command sets the number of MPI
tasks/node. The "-k on t Nt" command-line switch sets the number of
threads/task as Nt. The product of these 2 values should be N, i.e.
240 or 224. Also, the number of threads/task should be a multiple of
4 so that logical threads from more than one MPI task do not run on
the same physical core.
Examples of mpirun commands that follow these rules are shown above.
KOKKOS_CUDA_OPTIONS are additional options for CUDA. The LAMMPS KOKKOS package must be compiled
with the {enable_lambda} option when using GPUs.
[Restrictions:]
As noted above, if using GPUs, the number of MPI tasks per compute
node should equal to the number of GPUs per compute node. In the
future Kokkos will support assigning multiple MPI tasks to a single
GPU.
Currently Kokkos does not support AMD GPUs due to limits in the
available backend programming models. Specifically, Kokkos requires
extensive C++ support from the Kernel language. This is expected to
change in the future.
Currently, there are no precision options with the KOKKOS
package. All compilation and computation is performed in double
precision.

View File

@ -9,6 +9,7 @@
angle_style class2 command :h3
angle_style class2/omp command :h3
angle_style class2/kk command :h3
angle_style class2/p6 command :h3
[Syntax:]
@ -102,11 +103,29 @@ more instructions on how to use the accelerated styles effectively.
:line
The {class2/p6} angle style uses the {class2} potential expanded to sixth order:
:c,image(Eqs/angle_class2_p6.jpg)
In this expanded term 6 coefficients for the Ea formula need to be set:
theta0 (degrees)
K2 (energy/radian^2)
K3 (energy/radian^3)
K4 (energy/radian^4)
K5 (energy/radian^5)
K6 (energy/radian^6) :ul
The bond-bond and bond-angle terms remain unchanged.
:line
[Restrictions:]
This angle style can only be used if LAMMPS was built with the CLASS2
package. See the "Making LAMMPS"_Section_start.html#start_3 section
for more info on packages.
package. For the {class2/p6} style LAMMPS needs to be built with the
USER-MOFFF package. See the "Making LAMMPS"_Section_start.html#start_3
section for more info on packages.
[Related commands:]

View File

@ -0,0 +1,65 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
angle_style cosine/buck6d command :h3
[Syntax:]
angle_style cosine/buck6d :pre
[Examples:]
angle_style cosine/buck6d
angle_coeff 1 cosine/buck6d 1.978350 4 180.000000 :pre
[Description:]
The {cosine/buck6d} angle style uses the potential
:c,image(Eqs/angle_cosine_buck6d.jpg)
where K is the energy constant, n is the periodic multiplicity and
Theta0 is the equilibrium angle.
The coefficients must be defined for each angle type via the
"angle_coeff"_angle_coeff.html command as in the example above, or in
the data file or restart files read by the "read_data"_read_data.html
or "read_restart"_read_restart.html commands in the following order:
K (energy)
n
Theta0 (degrees) :ul
Theta0 is specified in degrees, but LAMMPS converts it to radians
internally.
Additional to the cosine term the {cosine/buck6d} angle style computes
the short range (vdW) interaction belonging to the
"pair_buck6d"_pair_buck6d_coul_gauss.html between the end atoms of
the angle. For this reason this angle style only works in combination
with the "pair_buck6d"_pair_buck6d_coul_gauss.html styles and needs
the "special_bonds"_special_bonds.html 1-3 interactions to be weighted
0.0 to prevent double counting.
:line
[Restrictions:]
{cosine/buck6d} can only be used in combination with the
"pair_buck6d"_pair_buck6d_coul_gauss.html style and with a
"special_bonds"_special_bonds.html 0.0 weighting of 1-3 interactions.
This angle style can only be used if LAMMPS was built with the
USER-MOFFF package. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info on packages.
[Related commands:]
"angle_coeff"_angle_coeff.html
[Default:] none

View File

@ -8,6 +8,7 @@ Angle Styles :h1
angle_charmm
angle_class2
angle_cosine
angle_cosine_buck6d
angle_cosine_delta
angle_cosine_periodic
angle_cosine_shift

View File

@ -261,7 +261,7 @@ For images created by the "dump image"_dump_image.html command, if the
polygon consisting of N line segments. Note that the line segments
are drawn between the N vertices, which does not correspond exactly to
the physical extent of the body (because the "pair_style
rounded/polygon"_pair_body_rounded_polygon.cpp defines finite-size
rounded/polygon"_pair_body_rounded_polygon.html defines finite-size
spheres at those point and the line segments between the spheres are
tangent to the spheres). The drawn diameter of each line segment is
determined by the {bflag1} parameter for the {body} keyword. The

View File

@ -28,7 +28,7 @@ compute 1 all aggregate/atom 3.5 :pre
[Description:]
Define a computation that assigns each atom a cluster, fragement,
Define a computation that assigns each atom a cluster, fragment,
or aggregate ID.
A cluster is defined as a set of atoms, each of which is within the
@ -53,7 +53,7 @@ like micelles.
Only atoms in the compute group are clustered and assigned cluster
IDs. Atoms not in the compute group are assigned a cluster ID = 0.
For fragments, only bonds where [both] atoms of the bond are included
in the compute group are assigned to fragments, so that only fragmets
in the compute group are assigned to fragments, so that only fragments
are detected where [all] atoms are in the compute group. Thus atoms
may be included in the compute group, yes still have a fragment ID of 0.

View File

@ -27,8 +27,8 @@ compute 1 all dihedral/local phi :pre
Define a computation that calculates properties of individual dihedral
interactions. The number of datums generated, aggregated across all
processors, equals the number of angles in the system, modified by the
group parameter as explained below.
processors, equals the number of dihedral angles in the system, modified
by the group parameter as explained below.
The value {phi} is the dihedral angle, as defined in the diagram on
the "dihedral_style"_dihedral_style.html doc page.

View File

@ -67,7 +67,7 @@ parameters. This is followed by that number of integers giving the
degree of each order parameter. Because {Q}2 and all odd-degree order
parameters are zero for atoms in cubic crystals (see
"Steinhardt"_#Steinhardt), the default order parameters are {Q}4,
{Q}6, {Q}8, {Q}10, and {Q}12. For the FCC crystal with {nnn}=12, {Q}4
{Q}6, {Q}8, {Q}10, and {Q}12. For the FCC crystal with {nnn} =12, {Q}4
= sqrt(7/3)/8 = 0.19094.... The numerical values of all order
parameters up to {Q}12 for a range of commonly encountered
high-symmetry structures are given in Table I of "Mickel et

View File

@ -25,15 +25,15 @@ compute 2 all pressure/uef my_temp_uef virial :pre
[Description:]
This command is used to compute the pressure tensor in
This command is used to compute the pressure tensor in
the reference frame of the applied flow field when
"fix nvt/uef"_fix_nh_uef.html" or
"fix npt/uef"_fix_nh_uef.html" is used.
"fix nvt/uef"_fix_nh_uef.html" or
"fix npt/uef"_fix_nh_uef.html" is used.
It is not necessary to use this command to compute the scalar
value of the pressure. A "compute pressure"_compute_pressure.html
may be used for that purpose.
The keywords and output information are documented in
The keywords and output information are documented in
"compute_pressure"_compute_pressure.html.
[Restrictions:]
@ -46,8 +46,8 @@ This command can only be used when "fix nvt/uef"_fix_nh_uef.html
or "fix npt/uef"_fix_nh_uef.html is active.
The kinetic contribution to the pressure tensor
will be accurate only when
the compute specificed by {temp-ID} is a
will be accurate only when
the compute specified by {temp-ID} is a
"compute temp/uef"_compute_temp_uef.html.
[Related commands:]

View File

@ -224,7 +224,7 @@ block contains six sub-blocks corresponding to the {xx}, {yy}, {zz},
notation. Each of these sub-blocks contains one column for each
bispectrum component, the same as for compute {sna/atom}
For example, if {K}=30 and ntypes=1, the number of columns in the per-atom
For example, if {K} =30 and ntypes=1, the number of columns in the per-atom
arrays generated by {sna/atom}, {snad/atom}, and {snav/atom}
are 30, 90, and 180, respectively. With {quadratic} value=1,
the numbers of columns are 930, 2790, and 5580, respectively.

View File

@ -36,9 +36,9 @@ keyword = {mol} or {basis} or {remap} or {var} or {set} or {units} :l
{set} values = dim name
dim = {x} or {y} or {z}
name = name of variable to set with x, y, or z atom position
{rotate} values = Rx Ry Rz theta
Rx,Ry,Rz = rotation vector for single molecule
{rotate} values = theta Rx Ry Rz
theta = rotation angle for single molecule (degrees)
Rx,Ry,Rz = rotation vector for single molecule
{units} value = {lattice} or {box}
{lattice} = the geometry is defined in lattice units
{box} = the geometry is defined in simulation box units :pre
@ -227,28 +227,30 @@ the sinusoid would appear to be "smoother". Also note the use of the
converts lattice spacings to distance. Click on the image for a
larger version.
dimension 2
variable x equal 100
variable y equal 25
lattice hex 0.8442
region box block 0 $x 0 $y -0.5 0.5
create_box 1 box :pre
variable xx equal 0.0
variable yy equal 0.0
variable xx internal 0.0
variable yy internal 0.0
variable v equal "(0.2*v_y*ylat * cos(v_xx/xlat * 2.0*PI*4.0/v_x) + 0.5*v_y*ylat - v_yy) > 0.0"
create_atoms 1 box var v set x xx set y yy :pre
create_atoms 1 box var v set x xx set y yy
write_dump all atom sinusoid.lammpstrj :pre
:c,image(JPG/sinusoid_small.jpg,JPG/sinusoid.jpg)
The {rotate} keyword can be used with the {single} style, when adding
a single molecule to specify the orientation at which the molecule is
inserted. The axis of rotation is determined by the rotation vector
(Rx,Ry,Rz) that goes through the insertion point. The specified
{theta} determines the angle of rotation around that axis. Note that
the direction of rotation for the atoms around the rotation axis is
consistent with the right-hand rule: if your right-hand's thumb points
along {R}, then your fingers wrap around the axis in the direction of
rotation.
The {rotate} keyword can only be used with the {single} style and
when adding a single molecule. It allows to specify the orientation
at which the molecule is inserted. The axis of rotation is
determined by the rotation vector (Rx,Ry,Rz) that goes through the
insertion point. The specified {theta} determines the angle of
rotation around that axis. Note that the direction of rotation for
the atoms around the rotation axis is consistent with the right-hand
rule: if your right-hand's thumb points along {R}, then your fingers
wrap around the axis in the direction of rotation.
The {units} keyword determines the meaning of the distance units used
to specify the coordinates of the one particle created by the {single}

View File

@ -18,7 +18,7 @@ style = {many} or {single/bond} or {single/angle} or {single/dihedral} :ule,l
group2-ID = ID of second group, bonds will be between atoms in the 2 groups
btype = bond type of created bonds
rmin = minimum distance between pair of atoms to bond together
rmax = minimum distance between pair of atoms to bond together
rmax = maximum distance between pair of atoms to bond together
{single/bond} args = btype batom1 batom2
btype = bond type of new bond
batom1,batom2 = atom IDs for two atoms in bond

View File

@ -45,7 +45,7 @@ args = list of arguments for a particular style :l
{xyz/gz} args = none
{xyz/mpiio} args = none :pre
{custom} or {custom/gz} or {custom/mpiio} args = list of atom attributes :l
{custom} or {custom/gz} or {custom/mpiio} or {netcdf} or {netcdf/mpiio} args = list of atom attributes :l
possible attributes = id, mol, proc, procp1, type, element, mass,
x, y, z, xs, ys, zs, xu, yu, zu,
xsu, ysu, zsu, ix, iy, iz,
@ -649,20 +649,7 @@ LAMMPS"_Section_start.html#start_3 section for more info.
The {xtc} style is part of the MISC package. It is only enabled if
LAMMPS was built with that package. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info. This is
because some machines may not support the low-level XDR data format
that XTC files are written with, which will result in a compile-time
error when a low-level include file is not found. Putting this style
in a package makes it easy to exclude from a LAMMPS build for those
machines. However, the MISC package also includes two compatibility
header files and associated functions, which should be a suitable
substitute on machines that do not have the appropriate native header
files. This option can be invoked at build time by adding
-DLAMMPS_XDR to the CCFLAGS variable in the appropriate low-level
Makefile, e.g. src/MAKE/Makefile.foo. This compatibility mode has
been tested successfully on Cray XT3/XT4/XT5 and IBM BlueGene/L
machines and should also work on IBM BG/P, and Windows XP/Vista/7
machines.
LAMMPS"_Section_start.html#start_3 section for more info.
[Related commands:]

View File

@ -120,6 +120,10 @@ quantity being minimized), you MUST enable the
[Restrictions:]
To function as expected this fix command must be issued {before} a
"read_data"_read_data.html command but {after} a
"read_restart"_read_restart.html command.
This fix can only be used if LAMMPS was built with the MOLECULE
package. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info on packages.

View File

@ -83,7 +83,7 @@ the following dynamic equation:
:c,image(Eqs/fix_controller1.jpg)
where {c} is the continuous time analog of the control variable,
{e}={pvar}-{setpoint} is the error in the process variable, and
{e} ={pvar}-{setpoint} is the error in the process variable, and
{alpha}, {Kp}, {Ki}, and {Kd} are constants set by the corresponding
keywords described above. The discretized version of this equation is:
@ -106,10 +106,10 @@ the value of {alpha} to reflect this, while leaving {Kp}, {Ki}, and
When choosing the values of the four constants, it is best to first
pick a value and sign for {alpha} that is consistent with the
magnitudes and signs of {pvar} and {cvar}. The magnitude of {Kp}
should then be tested over a large positive range keeping {Ki}={Kd}=0.
should then be tested over a large positive range keeping {Ki} = {Kd} =0.
A good value for {Kp} will produce a fast response in {pvar}, without
overshooting the {setpoint}. For many applications, proportional
feedback is sufficient, and so {Ki}={Kd}=0 can be used. In cases where
feedback is sufficient, and so {Ki} = {Kd} =0 can be used. In cases where
there is a substantial lag time in the response of {pvar} to a change
in {cvar}, this can be counteracted by increasing {Kd}. In situations
where {pvar} plateaus without reaching {setpoint}, this can be

View File

@ -26,16 +26,20 @@ zero or more keyword/value pairs may be appended to args :l
keyword = {mol}, {region}, {maxangle}, {pressure}, {fugacity_coeff}, {full_energy}, {charge}, {group}, {grouptype}, {intra_energy}, {tfac_insert}, or {overlap_cutoff}
{mol} value = template-ID
template-ID = ID of molecule template specified in a separate "molecule"_molecule.html command
{mcmoves} values = Patomtrans Pmoltrans Pmolrotate
Patomtrans = proportion of atom translation MC moves
Pmoltrans = proportion of molecule translation MC moves
Pmolrotate = proportion of molecule rotation MC moves
{rigid} value = fix-ID
fix-ID = ID of "fix rigid/small"_fix_rigid.html command
{shake} value = fix-ID
fix-ID = ID of "fix shake"_fix_shake.html command
{region} value = region-ID
region-ID = ID of region where MC moves are allowed
region-ID = ID of region where GCMC exchanges and MC moves are allowed
{maxangle} value = maximum molecular rotation angle (degrees)
{pressure} value = pressure of the gas reservoir (pressure units)
{fugacity_coeff} value = fugacity coefficient of the gas reservoir (unitless)
{full_energy} = compute the entire system energy when performing MC moves
{full_energy} = compute the entire system energy when performing GCMC exchanges and MC moves
{charge} value = charge of inserted atoms (charge units)
{group} value = group-ID
group-ID = group-ID for inserted atoms (string)
@ -56,34 +60,42 @@ fix 4 my_gas gcmc 1 10 10 1 123456543 300.0 -12.5 1.0 region disk :pre
[Description:]
This fix performs grand canonical Monte Carlo (GCMC) exchanges of
atoms or molecules of the given type with an imaginary ideal gas
atoms or molecules with an imaginary ideal gas
reservoir at the specified T and chemical potential (mu) as discussed
in "(Frenkel)"_#Frenkel. If used with the "fix nvt"_fix_nh.html
in "(Frenkel)"_#Frenkel. It also
attempts Monte Carlo (MC) moves (translations and molecule
rotations) within the simulation cell or
region. If used with the "fix nvt"_fix_nh.html
command, simulations in the grand canonical ensemble (muVT, constant
chemical potential, constant volume, and constant temperature) can be
performed. Specific uses include computing isotherms in microporous
materials, or computing vapor-liquid coexistence curves.
Every N timesteps the fix attempts a number of GCMC exchanges
(insertions or deletions) of gas atoms or molecules of the given type
between the simulation cell and the imaginary reservoir. It also
attempts a number of Monte Carlo moves (translations and molecule
rotations) of gas of the given type within the simulation cell or
region. The average number of attempted GCMC exchanges is X. The
average number of attempted MC moves is M. M should typically be
Every N timesteps the fix attempts both GCMC exchanges
(insertions or deletions) and MC moves of gas atoms or molecules.
On those timesteps, the average number of attempted GCMC exchanges is X,
while the average number of attempted MC moves is M.
For GCMC exchanges of either molecular or atomic gasses,
these exchanges can be either deletions or insertions,
with equal probability.
The possible choices for MC moves are translation of an atom,
translation of a molecule, and rotation of a molecule.
The relative amounts of each are determined by the optional
{mcmoves} keyword (see below).
The default behavior is as follows.
If the {mol} keyword is used, only molecule translations
and molecule rotations are performed with equal probability.
Conversely, if the {mol} keyword is not used, only atom
translations are performed.
M should typically be
chosen to be approximately equal to the expected number of gas atoms
or molecules of the given type within the simulation cell or region,
which will result in roughly one MC translation per atom or molecule
which will result in roughly one MC move per atom or molecule
per MC cycle.
For MC moves of molecular gasses, rotations and translations are each
attempted with 50% probability. For MC moves of atomic gasses,
translations are attempted 100% of the time. For MC exchanges of
either molecular or atomic gasses, deletions and insertions are each
attempted with 50% probability.
All inserted particles are always assigned to two groups: the default
group "all" and the group specified in the fix gcmc command (which can
All inserted particles are always added to two groups: the default
group "all" and the fix group specified in the fix command (which can
also be "all"). In addition, particles are also added to any groups
specified by the {group} and {grouptype} keywords. If inserted
particles are individual atoms, they are assigned the atom type given
@ -92,9 +104,9 @@ effect and must be set to zero. Instead, the type of each atom in the
inserted molecule is specified in the file read by the
"molecule"_molecule.html command.
This fix cannot be used to perform MC insertions of gas atoms or
molecules other than the exchanged type, but MC deletions,
translations, and rotations can be performed on any atom/molecule in
This fix cannot be used to perform GCMC insertions of gas atoms or
molecules other than the exchanged type, but GCMC deletions,
and MC translations, and rotations can be performed on any atom/molecule in
the fix group. All atoms in the simulation cell can be moved using
regular time integration translations, e.g. via "fix nvt"_fix_nh.html,
resulting in a hybrid GCMC+MD simulation. A smaller-than-usual
@ -150,8 +162,8 @@ about this point.
Individual atoms are inserted, unless the {mol} keyword is used. It
specifies a {template-ID} previously defined using the
"molecule"_molecule.html command, which reads a file that defines the
molecule. The coordinates, atom types, charges, etc, as well as any
bond/angle/etc and special neighbor information for the molecule can
molecule. The coordinates, atom types, charges, etc., as well as any
bonding and special neighbor information for the molecule can
be specified in the molecule file. See the "molecule"_molecule.html
command for details. The only settings required to be in this file
are the coordinates and types of atoms in the molecule.
@ -162,7 +174,7 @@ error when it tries to find bonded neighbors. LAMMPS will warn you if
any of the atoms eligible for deletion have a non-zero molecule ID,
but does not check for this at the time of deletion.
If you wish to insert molecules via the {mol} keyword, that will be
If you wish to insert molecules using the {mol} keyword that will be
treated as rigid bodies, use the {rigid} keyword, specifying as its
value the ID of a separate "fix rigid/small"_fix_rigid.html command
which also appears in your input script.
@ -178,6 +190,15 @@ their bonds or angles constrained via SHAKE, use the {shake} keyword,
specifying as its value the ID of a separate "fix
shake"_fix_shake.html command which also appears in your input script.
Optionally, users may specify the relative amounts of different MC
moves using the {mcmoves} keyword. The values {Patomtrans},
{Pmoltrans}, {Pmolrotate} specify the average proportion of
atom translations, molecule translations, and molecule rotations,
respectively. The values must be non-negative integers or real
numbers, with at least one non-zero value. For example, (10,30,0)
would result in 25% of the MC moves being atomic translations, 75%
molecular translations, and no molecular rotations.
Optionally, users may specify the maximum rotation angle for molecular
rotations using the {maxangle} keyword and specifying the angle in
degrees. Rotations are performed by generating a random point on the
@ -188,19 +209,19 @@ to the unit vector defined by the point on the unit sphere. The same
procedure is used for randomly rotating molecules when they are
inserted, except that the maximum angle is 360 degrees.
Note that fix GCMC does not use configurational bias MC or any other
Note that fix gcmc does not use configurational bias MC or any other
kind of sampling of intramolecular degrees of freedom. Inserted
molecules can have different orientations, but they will all have the
same intramolecular configuration, which was specified in the molecule
command input.
For atomic gasses, inserted atoms have the specified atom type, but
deleted atoms are any atoms that have been inserted or that belong to
the user-specified fix group. For molecular gasses, exchanged
deleted atoms are any atoms that have been inserted or that already
belong to the fix group. For molecular gasses, exchanged
molecules use the same atom types as in the template molecule supplied
by the user. In both cases, exchanged atoms/molecules are assigned to
two groups: the default group "all" and the group specified in the fix
gcmc command (which can also be "all").
two groups: the default group "all" and the fix group
(which can also be "all").
The chemical potential is a user-specified input parameter defined
as:
@ -241,13 +262,16 @@ which case the user-specified chemical potential is ignored. The user
may also specify the fugacity coefficient phi using the
{fugacity_coeff} keyword, which defaults to unity.
The {full_energy} option means that fix GCMC will compute the total
potential energy of the entire simulated system. The total system
energy before and after the proposed GCMC move is then used in the
The {full_energy} option means that the fix calculates the total
potential energy of the entire simulated system, instead of just
the energy of the part that is changed. The total system
energy before and after the proposed GCMC exchange or MC move
is then used in the
Metropolis criterion to determine whether or not to accept the
proposed GCMC move. By default, this option is off, in which case only
partial energies are computed to determine the difference in energy
that would be caused by the proposed GCMC move.
proposed change. By default, this option is off,
in which case only
partial energies are computed to determine the energy difference
due to the proposed change.
The {full_energy} option is needed for systems with complicated
potential energy calculations, including the following:
@ -263,10 +287,11 @@ In these cases, LAMMPS will automatically apply the {full_energy}
keyword and issue a warning message.
When the {mol} keyword is used, the {full_energy} option also includes
the intramolecular energy of inserted and deleted molecules. If this
the intramolecular energy of inserted and deleted molecules, whereas
this energy is not included when {full_energy} is not used. If this
is not desired, the {intra_energy} keyword can be used to define an
amount of energy that is subtracted from the final energy when a
molecule is inserted, and added to the initial energy when a molecule
molecule is inserted, and subtracted from the initial energy when a molecule
is deleted. For molecules that have a non-zero intramolecular energy,
this will ensure roughly the same behavior whether or not the
{full_energy} option is used.
@ -291,7 +316,8 @@ include: "efield"_fix_efield.html, "gravity"_fix_gravity.html,
"temp/berendsen"_fix_temp_berendsen.html,
"temp/rescale"_fix_temp_rescale.html, and "wall fixes"_fix_wall.html.
For that energy to be included in the total potential energy of the
system (the quantity used when performing GCMC moves), you MUST enable
system (the quantity used when performing GCMC exchange and MC moves),
you MUST enable
the "fix_modify"_fix_modify.html {energy} option for that fix. The
doc pages for individual "fix"_fix.html commands specify if this
should be done.
@ -305,9 +331,14 @@ about simulating non-neutral systems with kspace on.
Use of this fix typically will cause the number of atoms to fluctuate,
therefore, you will want to use the
"compute_modify"_compute_modify.html command to insure that the
"compute_modify dynamic/dof"_compute_modify.html command to insure that the
current number of atoms is used as a normalizing factor each time
temperature is computed. Here is the necessary command:
temperature is computed. A simple example of this is:
compute_modify thermo_temp dynamic yes :pre
A more complicated example is listed earlier on this page
in the context of NVT dynamics.
NOTE: If the density of the cell is initially very small or zero, and
increases to a much larger density after a period of equilibration,
@ -327,17 +358,9 @@ assigning an infinite positive energy to all new configurations that
place any pair of atoms closer than the specified overlap cutoff
distance.
compute_modify thermo_temp dynamic yes :pre
If LJ units are used, note that a value of 0.18292026 is used by this
fix as the reduced value for Planck's constant. This value was
derived from LJ parameters for argon, where h* = h/sqrt(sigma^2 *
epsilon * mass), sigma = 3.429 angstroms, epsilon/k = 121.85 K, and
mass = 39.948 amu.
The {group} keyword assigns all inserted atoms to the
The {group} keyword adds all inserted atoms to the
"group"_group.html of the group-ID value. The {grouptype} keyword
assigns all inserted atoms of the specified type to the
adds all inserted atoms of the specified type to the
"group"_group.html of the group-ID value.
[Restart, fix_modify, output, run start/stop, minimize info:]
@ -384,7 +407,8 @@ Can be run in parallel, but aspects of the GCMC part will not scale
well in parallel. Only usable for 3D simulations.
When using fix gcmc in combination with fix shake or fix rigid,
only gcmc exchange moves are supported.
only GCMC exchange moves are supported, so the argument
{M} must be zero.
Note that very lengthy simulations involving insertions/deletions of
billions of gas molecules may run out of atom or molecule IDs and
@ -409,7 +433,9 @@ the user for each subsequent fix gcmc command.
[Default:]
The option defaults are mol = no, maxangle = 10, overlap_cutoff = 0.0,
fugacity_coeff = 1, and full_energy = no,
fugacity_coeff = 1.0, intra_energy = 0.0, tfac_insert = 1.0.
(Patomtrans, Pmoltrans, Pmolrotate) = (1, 0, 0) for mol = no and
(0, 1, 1) for mol = yes. full_energy = no,
except for the situations where full_energy is required, as
listed above.

View File

@ -64,10 +64,10 @@ not performed once every {N} steps by this command. Instead it is
performed (typically) only a small number of times and the elapsed
times are used to predict when the end-of-the-run will be. Both of
these attributes can be useful when performing benchmark calculations
for a desired length of time with minmimal overhead. For example, if
for a desired length of time with minimal overhead. For example, if
a run is performing 1000s of timesteps/sec, the overhead for syncing
the timer frequently across a large number of processors may be
non-negligble.
non-negligible.
Equal-style variables evaluate to a numeric value. See the
"variable"_variable.html command for a description. They calculate
@ -125,7 +125,7 @@ to the screen and logfile when the halt condition is triggered. If
{message} is set to yes, a one line message with the values that
triggered the halt is printed. If {message} is set to no, no message
is printed; the run simply exits. The latter may be desirable for
post-processing tools that extract thermodyanmic information from log
post-processing tools that extract thermodynamic information from log
files.
[Restart, fix_modify, output, run start/stop, minimize info:]

View File

@ -44,7 +44,7 @@ the velocity for the force evaluation:
where the parameter <font size="4">&lambda;</font> depends on the
specific choice of DPD parameters, and needs to be tuned on a
case-by-case basis. Specification of a {lambda} value is opttional.
case-by-case basis. Specification of a {lambda} value is optional.
If specified, the setting must be from 0.0 to 1.0. If not specified,
a default value of 0.5 is used, which effectively reproduces the
standard velocity-Verlet (VV) scheme. For more details, see

View File

@ -113,9 +113,9 @@ keeping the replicas equally spaced.
:line
The keyword {perp} specifies if and how a perpendicual nudging force
The keyword {perp} specifies if and how a perpendicular nudging force
is computed. It adds a spring force perpendicular to the path in
order to prevent the path from becoming too kinky. It can
order to prevent the path from becoming too strongly kinked. It can
significantly improve the convergence of the NEB calculation when the
resolution is poor. I.e. when few replicas are used; see
"(Maras)"_#Maras1 for details.
@ -138,17 +138,17 @@ By default, no additional forces act on the first and last replicas
during the NEB relaxation, so these replicas simply relax toward their
respective local minima. By using the key word {end}, additional
forces can be applied to the first and/or last replicas, to enable
them to relax toward a MEP while constraining their energy.
them to relax toward a MEP while constraining their energy E to the
target energy ETarget.
The interatomic force Fi for the specified replica becomes:
If ETarget>E, the interatomic force Fi for the specified replica becomes:
Fi = -Grad(V) + (Grad(V) dot T' + (E-ETarget)*Kspring3) T', {when} Grad(V) dot T' < 0
Fi = -Grad(V) + (Grad(V) dot T' + (ETarget- E)*Kspring3) T', {when} Grad(V) dot T' > 0
:pre
where E is the current energy of the replica and ETarget is the target
energy. The "spring" constant on the difference in energies is the
specified {Kspring3} value.
The "spring" constant on the difference in energies is the specified
{Kspring3} value.
When {estyle} is specified as {first}, the force is applied to the
first replica. When {estyle} is specified as {last}, the force is
@ -183,10 +183,9 @@ After converging a NEB calculation using an {estyle} of
have a larger energy than the first replica. If this is not the case,
the path is probably not a MEP.
Finally, note that if the last replica converges toward a local
minimum which has a larger energy than the energy of the first
replica, a NEB calculation using an {estyle} of {last/efirst} or
{last/efirst/middle} cannot reach final convergence.
Finally, note that the last replica may never reach the target energy
if it is stuck in a local minima which has a larger energy than the
target energy.
[Restart, fix_modify, output, run start/stop, minimize info:]

View File

@ -111,6 +111,10 @@ need to communicate their new values to/from ghost atoms, an operation
that can be invoked from within a "pair style"_pair_style.html or
"fix"_fix.html or "compute"_compute.html that you write.
NOTE: If this fix is defined [after] the simulation box is created,
a 'run 0' command should be issued to properly initialize the storage
created by this fix.
:line
This fix is one of a small number that can be defined in an input
@ -155,7 +159,7 @@ these commands could be used:
fix prop all property/atom mol
variable cluster atom ((id-1)/10)+1
set id * mol v_cluster :pre
set atom * mol v_cluster :pre
The "atom-style variable"_variable.html will create values for atoms
with IDs 31,32,33,...40 that are 4.0,4.1,4.2,...,4.9. When the

View File

@ -6,14 +6,14 @@
:line
fix python command :h3
fix python/invoke command :h3
[Syntax:]
fix ID group-ID python N callback function_name :pre
fix ID group-ID python/invoke N callback function_name :pre
ID, group-ID are ignored by this fix :ulb,l
python = style name of this fix command :l
python/invoke = style name of this fix command :l
N = execute every N steps :l
callback = {post_force} or {end_of_step} :l
{post_force} = callback after force computations on atoms every N time steps
@ -36,8 +36,8 @@ def end_of_step_callback(lammps_ptr):
# access LAMMPS state using Python interface
""" :pre
fix pf all python 50 post_force post_force_callback
fix eos all python 50 end_of_step end_of_step_callback :pre
fix pf all python/invoke 50 post_force post_force_callback
fix eos all python/invoke 50 end_of_step end_of_step_callback :pre
[Description:]

102
doc/src/fix_python_move.txt Normal file
View File

@ -0,0 +1,102 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
fix python/move command :h3
[Syntax:]
fix python/move pymodule.CLASS :pre
pymodule.CLASS = use class [CLASS] in module/file [pymodule] to compute how to move atoms
[Examples:]
fix 1 all python/move py_nve.NVE
fix 1 all python/move py_nve.NVE_OPT :pre
[Description:]
The {python/move} fix style provides a way to define ways how particles
are moved during an MD run from python script code, that is loaded from
a file into LAMMPS and executed at the various steps where other fixes
can be executed. This python script must contain specific python class
definitions.
This allows to implement complex position updates and also modified
time integration methods. Due to python being an interpreted language,
however, the performance of this fix can be moderately to significantly
slower than the corresponding C++ code. For specific cases, this
performance penalty can be limited through effective use of NumPy.
:line
The python module file has to start with the following code:
from __future__ import print_function
import lammps
import ctypes
import traceback
import numpy as np
#
class LAMMPSFix(object):
def __init__(self, ptr, group_name="all"):
self.lmp = lammps.lammps(ptr=ptr)
self.group_name = group_name
#
class LAMMPSFixMove(LAMMPSFix):
def __init__(self, ptr, group_name="all"):
super(LAMMPSFixMove, self).__init__(ptr, group_name)
#
def init(self):
pass
#
def initial_integrate(self, vflag):
pass
#
def final_integrate(self):
pass
#
def initial_integrate_respa(self, vflag, ilevel, iloop):
pass
#
def final_integrate_respa(self, ilevel, iloop):
pass
#
def reset_dt(self):
pass :pre
Any classes implementing new atom motion functionality have to be
derived from the [LAMMPSFixMove] class, overriding the available
methods as needed.
Examples for how to do this are in the {examples/python} folder.
:line
[Restart, fix_modify, output, run start/stop, minimize info:]
No information about this fix is written to "binary restart
files"_restart.html. None of the "fix_modify"_fix_modify.html options
are relevant to this fix. No global or per-atom quantities are stored
by this fix for access by various "output
commands"_Section_howto.html#howto_15. No parameter of this fix can
be used with the {start/stop} keywords of the "run"_run.html command.
This fix is not invoked during "energy minimization"_minimize.html.
[Restrictions:]
This pair style is part of the PYTHON package. It is only enabled if
LAMMPS was built with that package. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info.
[Related commands:]
"fix nve"_fix_nve.html, "fix python/invoke"_fix_python_invoke.html
[Default:] none

View File

@ -7,11 +7,17 @@
:line
fix rigid command :h3
fix rigid/omp command :h3
fix rigid/nve command :h3
fix rigid/nve/omp command :h3
fix rigid/nvt command :h3
fix rigid/nvt/omp command :h3
fix rigid/npt command :h3
fix rigid/npt/omp command :h3
fix rigid/nph command :h3
fix rigid/nph/omp command :h3
fix rigid/small command :h3
fix rigid/small/omp command :h3
fix rigid/nve/small command :h3
fix rigid/nvt/small command :h3
fix rigid/npt/small command :h3
@ -28,7 +34,7 @@ bodystyle = {single} or {molecule} or {group} :l
{molecule} args = none
{custom} args = {i_propname} or {v_varname}
i_propname = an integer property defined via fix property/atom
v_varname = an atom-style or atomfile-style variable
v_varname = an atom-style or atomfile-style variable
{group} args = N groupID1 groupID2 ...
N = # of groups
groupID1, groupID2, ... = list of N group IDs :pre
@ -93,7 +99,7 @@ fix 1 clump rigid custom v_bodyid :pre
fix 0 all property/atom i_bodyid
read_restart data.rigid fix 0 NULL Bodies
fix 1 clump rigid/small custom i_bodyid :pre
[Description:]
Treat one or more sets of atoms as independent rigid bodies. This

View File

@ -58,7 +58,7 @@ required in order to eliminate velocity components along the bonds
In order to formulate individual constraints for SHAKE and RATTLE,
focus on a single molecule whose bonds are constrained. Let Ri and Vi
be the position and velocity of atom {i} at time {n}, for
{i}=1,...,{N}, where {N} is the number of sites of our reference
{i} =1,...,{N}, where {N} is the number of sites of our reference
molecule. The distance vector between sites {i} and {j} is given by
:c,image(Eqs/fix_rattle_rij.jpg)

View File

@ -14,15 +14,12 @@ fix ID group-ID smd/integrate_tlsph keyword values :pre
ID, group-ID are documented in "fix"_fix.html command
smd/integrate_tlsph = style name of this fix command
zero or more keyword/value pairs may be appended :ul
keyword = {limit_velocity} :l
zero or more keyword/value pairs may be appended
keyword = {limit_velocity} :ul
{limit_velocity} value = max_vel
max_vel = maximum allowed velocity :pre
:ule
[Examples:]
fix 1 all smd/integrate_tlsph

View File

@ -14,9 +14,8 @@ fix ID group-ID smd/integrate_ulsph keyword :pre
ID, group-ID are documented in "fix"_fix.html command
smd/integrate_ulsph = style name of this fix command
zero or more keyword/value pairs may be appended :ul
keyword = adjust_radius or limit_velocity
zero or more keyword/value pairs may be appended
keyword = adjust_radius or limit_velocity :ul
adjust_radius values = adjust_radius_factor min_nn max_nn
adjust_radius_factor = factor which scale the smooth/kernel radius
@ -28,7 +27,7 @@ limit_velocity values = max_velocity
[Examples:]
fix 1 all smd/integrate_ulsph adjust_radius 1.02 25 50 :pre
fix 1 all smd/integrate_ulsph adjust_radius 1.02 25 50
fix 1 all smd/integrate_ulsph limit_velocity 1000 :pre
[Description:]
@ -38,7 +37,7 @@ See "this PDF guide"_PDF/SMD_LAMMPS_userguide.pdf to using Smooth Mach Dynamics
The {adjust_radius} keyword activates dynamic adjustment of the per-particle SPH smoothing kernel radius such that the number of neighbors per particles remains
within the interval {min_nn} to {max_nn}. The parameter {adjust_radius_factor} determines the amount of adjustment per timestep. Typical values are
{adjust_radius_factor}=1.02, {min_nn}=15, and {max_nn}=20.
{adjust_radius_factor} =1.02, {min_nn} =15, and {max_nn} =20.
The {limit_velocity} keyword will control the velocity, scaling the norm of
the velocity vector to max_vel in case it exceeds this velocity limit.

View File

@ -0,0 +1,19 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
fix wall/surface/globale command :h3
[Description:]
This feature is not yet implemented.
[Related commands:]
"dump image"_dump_image.html

View File

@ -34,7 +34,7 @@ by performing a nonequilibrium thermodynamic integration between the
solid of interest and an Einstein crystal. A detailed explanation of
how to use this command and choose its parameters for optimal
performance and accuracy is given in the paper by
"Freitas"_#Freitas. The paper also presents a short summary of the
"Freitas"_#Freitas1. The paper also presents a short summary of the
theory of nonequilibrium thermodynamic integrations.
The thermodynamic integration procedure is performed by rescaling the
@ -67,13 +67,13 @@ of lambda is kept equal to zero and the fix has no other effect on the
dynamics of the system.
The processes described above is known as nonequilibrium thermodynamic
integration and is has been shown ("Freitas"_#Freitas) to present a
integration and is has been shown ("Freitas"_#Freitas1) to present a
much superior efficiency when compared to standard equilibrium
methods. The reason why the switching it is made in both directions
(potential to Einstein crystal and back) is to eliminate the
dissipated heat due to the nonequilibrium process. Further details
about nonequilibrium thermodynamic integration and its implementation
in LAMMPS is available in "Freitas"_#Freitas.
in LAMMPS is available in "Freitas"_#Freitas1.
The {function} keyword allows the use of two different lambda
paths. Option {1} results in a constant rate of change of lambda with
@ -94,7 +94,7 @@ thermodynamic integration. The use of option {2} is recommended since
it results in better accuracy and less dissipation without any
increase in computational resources cost.
NOTE: As described in "Freitas"_#Freitas, it is important to keep the
NOTE: As described in "Freitas"_#Freitas1, it is important to keep the
center-of-mass fixed during the thermodynamic integration. A nonzero
total velocity will result in divergences during the integration due
to the fact that the atoms are 'attached' to their equilibrium
@ -156,7 +156,7 @@ The keyword default is function = 1.
:line
:link(Freitas)
:link(Freitas1)
[(Freitas)] Freitas, Asta, and de Koning, Computational Materials
Science, 112, 333 (2016).

View File

@ -115,7 +115,8 @@ Fixes :h1
fix_press_berendsen
fix_print
fix_property_atom
fix_python
fix_python_invoke
fix_python_move
fix_qbmsst
fix_qeq
fix_qeq_comb
@ -147,6 +148,7 @@ Fixes :h1
fix_srd
fix_store_force
fix_store_state
fix_surface_global
fix_temp_berendsen
fix_temp_csvr
fix_temp_rescale

View File

@ -0,0 +1,65 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
improper_style inversion/harmonic command :h3
[Syntax:]
improper_style inversion/harmonic :pre
[Examples:]
improper_style inversion/harmonic
improper_coeff 1 18.776340 0.000000 :pre
[Description:]
The {inversion/harmonic} improper style follows the Wilson-Decius
out-of-plane angle definition and uses an harmonic potential:
:c,image(Eqs/improper_inversion_harmonic.jpg)
where K is the force constant and omega is the angle evaluated for
all three axis-plane combinations centered around the atom I. For
the IL axis and the IJK plane omega looks as follows:
:c,image(Eqs/umbrella.jpg)
Note that the {inversion/harmonic} angle term evaluation differs to
the "improper_umbrella"_improper_umbrella.html due to the cyclic
evaluation of all possible angles omega.
The following coefficients must be defined for each improper type via
the "improper_coeff"_improper_coeff.html command as in the example
above, or in the data file or restart files read by the
"read_data"_read_data.html or "read_restart"_read_restart.html
commands:
K (energy)
omega0 (degrees) :ul
If omega0 = 0 the potential term has a minimum for the planar
structure. Otherwise it has two minima at +/- omega0, with a barrier
in between.
:line
[Restrictions:]
This improper style can only be used if LAMMPS was built with the
USER-MOFFF package. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info on packages.
[Related commands:]
"improper_coeff"_improper_coeff.html
[Default:] none
:line

View File

@ -12,6 +12,7 @@ Improper Styles :h1
improper_fourier
improper_harmonic
improper_hybrid
improper_inversion_harmonic
improper_none
improper_ring
improper_umbrella

View File

@ -12,7 +12,7 @@ info command :h3
info args :pre
args = one or more of the following keywords: {out}, {all}, {system}, {memory}, {communication}, {computes}, {dumps}, {fixes}, {groups}, {regions}, {variables}, {styles}, {time}, or {configuration}
args = one or more of the following keywords: {out}, {all}, {system}, {memory}, {communication}, {computes}, {dumps}, {fixes}, {groups}, {regions}, {variables}, {coeffs}, {styles}, {time}, or {configuration}
{out} values = {screen}, {log}, {append} filename, {overwrite} filename
{styles} values = {all}, {angle}, {atom}, {bond}, {compute}, {command}, {dump}, {dihedral}, {fix}, {improper}, {integrate}, {kspace}, {minimize}, {pair}, {region} :ul
@ -81,6 +81,11 @@ The {variables} category prints a list of all currently defined
variables, their names, styles, definition and last computed value, if
available.
The {coeffs} category prints a list for each defined force style
(pair, bond, angle, dihedral, improper) indicating which of the
corresponding coefficients have been set. This can be very helpful
to debug error messages like "All pair coeffs are not set".
The {styles} category prints the list of styles available in the
current LAMMPS binary. It supports one of the following options
to control which category of styles is printed out:

View File

@ -1,6 +1,5 @@
#HTMLDOC 1.8.27
-t pdf14 -f "../Manual.pdf" --book --toclevels 4 --no-numbered --toctitle "Table of Contents" --title --textcolor #000000 --linkcolor #0000ff --linkstyle plain --bodycolor #ffffff --size Universal --left 1.00in --right 0.50in --top 0.50in --bottom 0.50in --header .t. --header1 ... --footer ..1 --nup 1 --tocheader .t. --tocfooter ..i --portrait --color --no-pscommands --no-xrxcomments --compression=1 --jpeg=0 --fontsize 11.0 --fontspacing 1.2 --headingfont helvetica --bodyfont times --headfootsize 11.0 --headfootfont helvetica --charset iso-8859-1 --links --embedfonts --pagemode document --pagelayout single --firstpage c1 --pageeffect none --pageduration 10 --effectduration 1.0 --no-encryption --permissions all --owner-password "" --user-password "" --browserwidth 680 --no-strict --no-overflow
#HTMLDOC 1.8.28
-t pdf14 -f "../Manual.pdf" --book --toclevels 4 --no-numbered --toctitle "Table of Contents" --title --textcolor #000000 --linkcolor #0000ff --linkstyle plain --bodycolor #ffffff --size Universal --left 1.00in --right 0.50in --top 0.50in --bottom 0.50in --header .t. --header1 ... --footer ..1 --nup 1 --tocheader .t. --tocfooter ..i --portrait --color --no-pscommands --no-xrxcomments --compression=9 --jpeg=0 --fontsize 11.0 --fontspacing 1.2 --headingfont Sans --bodyfont Serif --headfootsize 11.0 --headfootfont Sans-Bold --charset iso-8859-15 --links --embedfonts --pagemode document --pagelayout single --firstpage c1 --pageeffect none --pageduration 10 --effectduration 1.0 --no-encryption --permissions all --owner-password "" --user-password "" --browserwidth 680 --no-strict --no-overflow
Manual.html
Section_intro.html
Section_start.html
@ -244,7 +243,8 @@ fix_pour.html
fix_press_berendsen.html
fix_print.html
fix_property_atom.html
fix_python.html
fix_python_invoke.html
fix_python_move.html
fix_qbmsst.html
fix_qeq.html
fix_qeq_comb.html
@ -443,6 +443,7 @@ pair_edip.html
pair_eff.html
pair_eim.html
pair_exp6_rx.html
pair_extep.html
pair_gauss.html
pair_gayberne.html
pair_gran.html
@ -511,6 +512,7 @@ pair_tersoff_mod.html
pair_tersoff_zbl.html
pair_thole.html
pair_tri_lj.html
pair_ufm.html
pair_vashishta.html
pair_yukawa.html
pair_yukawa_colloid.html

View File

@ -322,9 +322,9 @@ the fix neb command.
The forward (reverse) energy barrier is the potential energy of the
highest replica minus the energy of the first (last) replica.
Supplementary informations for all replicas can be printed out to the
screen and master log.lammps file by adding the verbose keyword. These
informations include the following. The "path angle" (pathangle) for
Supplementary information for all replicas can be printed out to the
screen and master log.lammps file by adding the verbose keyword. This
information include the following. The "path angle" (pathangle) for
the replica i which is the angle between the 3N-length vectors (Ri-1 -
Ri) and (Ri+1 - Ri) (where Ri is the atomic coordinates of replica
i). A "path angle" of 180 indicates that replicas i-1, i and i+1 are
@ -339,8 +339,8 @@ energy gradient of image i. ReplicaForce is the two-norm of the
3N-length force vector (including nudging forces) for replica i.
MaxAtomForce is the maximum force component of any atom in replica i.
When a NEB calculation does not converge properly, these suplementary
informations can help understanding what is going wrong. For instance
When a NEB calculation does not converge properly, the suplementary
information can help understanding what is going wrong. For instance
when the path angle becomes accute the definition of tangent used in
the NEB calculation is questionable and the NEB cannot may diverge
"(Maras)"_#Maras2.

View File

@ -0,0 +1,19 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
pair_style body/rounded/polygon command :h3
[Description:]
Note: This feature is not yet implemented.
[Related commands:]
"pair_style body"_pair_body.html
[Default:] none

View File

@ -0,0 +1,138 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
pair_style buck6d/coul/gauss/dsf :h3
pair_style buck6d/coul/gauss/long :h3
[Syntax:]
pair_style style args :pre
style = {buck6d/coul/gauss/dsf} or {buck6d/coul/gauss/long}
args = list of arguments for a particular style :ul
{buck6d/coul/gauss/dsf} args = smooth cutoff (cutoff2)
smooth = smoothing onset within Buckingham cutoff (ratio)
cutoff = global cutoff for Buckingham (and Coulombic if only 1 arg) (distance units)
cutoff2 = global cutoff for Coulombic (optional) (distance units)
{buck6d/coul/gauss/long} args = smooth smooth2 cutoff (cutoff2)
smooth = smoothing onset within Buckingham cutoff (ratio)
smooth2 = smoothing onset within Coulombic cutoff (ratio)
cutoff = global cutoff for Buckingham (and Coulombic if only 1 arg) (distance units)
cutoff2 = global cutoff for Coulombic (optional) (distance units) :pre
[Examples:]
pair_style buck6d/coul/gauss/dsf 0.9000 12.0000
pair_coeff 1 1 1030. 3.061 457.179 4.521 0.608 :pre
pair_style buck6d/coul/gauss/long 0.9000 1.0000 12.0000
pair_coeff 1 1 1030. 3.061 457.179 4.521 0.608 :pre
[Description:]
The {buck6d/coul/gauss} styles evaluate vdW and Coulomb
interactions following the MOF-FF force field after
"(Schmid)"_#Schmid. The vdW term of the {buck6d} styles
computes a dispersion damped Buckingham potential:
:c,image(Eqs/pair_buck6d.jpg)
where A and C are a force constant, kappa is an ionic-pair dependent
reciprocal length parameter, D is a dispersion correction parameter,
and the cutoff Rc truncates the interaction distance.
The first term in the potential corresponds to the Buckingham
repulsion term and the second term to the dispersion attraction with
a damping correction analog to the Grimme correction used in DFT.
The latter corrects for artifacts occurring at short distances which
become an issue for soft vdW potentials.
The {buck6d} styles include a smoothing function which is invoked
according to the global smooting parameter within the specified
cutoff. Hereby a parameter of i.e. 0.9 invokes the smoothing
within 90% of the cutoff. No smoothing is applied at a value
of 1.0. For the {gauss/dsf} style this smoothing is only applicable
for the dispersion damped Buckingham potential. For the {gauss/long}
styles the smoothing function can also be invoked for the real
space coulomb interactions which enforce continous energies and
forces at the cutoff.
Both styles {buck6d/coul/gauss/dsf} and {buck6d/coul/gauss/long}
evaluate a Coulomb potential using spherical Gaussian type charge
distributions which effectively dampen electrostatic ineractions
for high charges at close distances. The electrostatic potential
is thus evaluated as:
:c,image(Eqs/pair_coul_gauss.jpg)
where C is an energy-conversion constant, Qi and Qj are the
charges on the 2 atoms, epsilon is the dielectric constant which
can be set by the "dielectric"_dielectric.html command, alpha is
ion pair dependent damping parameter and erf() is the error-function.
The cutoff Rc truncates the interaction distance.
The style {buck6d/coul/gauss/dsf} computes the Coulomb interaction
via the damped shifted force model described in "(Fennell)"_#Fennell
approximating an Ewald sum similar to the "pair coul/dsf"_pair_coul.html
styles. In {buck6d/coul/gauss/long} an additional damping factor is
applied to the Coulombic term so it can be used in conjunction with the
"kspace_style"_kspace_style.html command and its {ewald} or {pppm}
options. The Coulombic cutoff in this case separates the real and
reciprocal space evaluation of the Ewald sum.
If one cutoff is specified it is used for both the vdW and Coulomb
terms. If two cutoffs are specified, the first is used as the cutoff
for the vdW terms, and the second is the cutoff for the Coulombic term.
The following coefficients must be defined for each pair of atoms
types via the "pair_coeff"_pair_coeff.html command as in the examples
above, or in the data file or restart files read by the
"read_data"_read_data.html or "read_restart"_read_restart.html
commands:
A (energy units)
rho (distance^-1 units)
C (energy-distance^6 units)
D (distance^14 units)
alpha (distance^-1 units)
cutoff (distance units) :ul
The second coefficient, rho, must be greater than zero. The latter
coefficient is optional. If not specified, the global vdW cutoff
is used.
:line
[Mixing, shift, table, tail correction, restart, rRESPA info]:
These pair styles do not support mixing. Thus, coefficients for all
I,J pairs must be specified explicitly.
These styles do not support the "pair_modify"_pair_modify.html shift
option for the energy. Instead the smoothing function should be applied
by setting the global smoothing parameter to a value < 1.0.
These styles write their information to "binary restart
files"_restart.html, so pair_style and pair_coeff commands do not need
to be specified in an input script that reads a restart file.
[Restrictions:]
These styles are part of the USER-MOFFF package. They are only enabled
if LAMMPS was built with that package. See the
"Making LAMMPS"_Section_start.html#start_3 section for more info.
[Related commands:]
"pair_coeff"_pair_coeff.html
[Default:] none
:link(Schmid)
[(Schmid)] S. Bureekaew, S. Amirjalayer, M. Tafipolsky, C. Spickermann, T.K. Roy and R. Schmid, Phys. Status Solidi B, 6, 1128 (2013).
:link(Fennell)
[(Fennell)] C. J. Fennell, J. D. Gezelter, J Chem Phys, 124, 234104 (2006).

40
doc/src/pair_extep.txt Normal file
View File

@ -0,0 +1,40 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
pair_style extep command :h3
[Syntax:]
pair_style extep :pre
[Examples:]
pair_style extep
pair_coeff * * BN.extep B N :pre
[Description:]
Style {extep} computes the Extended Tersoff Potential (ExTeP)
interactions as described in "(Los2017)"_#Los2017.
:line
[Restrictions:] none
[Related commands:]
"pair_tersoff" pair_tersoff.html
[Default:] none
:line
:link(Los2017)
[(Los2017)] J. H. Los et al. "Extended Tersoff potential for boron nitride:
Energetics and elastic properties of pristine and defective h-BN",
Phys. Rev. B 96 (184108), 2017.

View File

@ -28,7 +28,7 @@ The parameter <contact_stiffness> has units of pressure and should equal roughly
of the Young's modulus (or bulk modulus in the case of fluids) of the material model associated with the SPH particles.
The parameter {scale_factor} can be used to scale the particles' contact radii. This can be useful to control how close
particles can approach each other. Usually, {scale_factor}=1.0.
particles can approach each other. Usually, {scale_factor} =1.0.
:line

View File

@ -29,7 +29,7 @@ The parameter <contact_stiffness> has units of pressure and should equal roughly
of the Young's modulus (or bulk modulus in the case of fluids) of the material model associated with the SPH particle
The parameter {scale_factor} can be used to scale the particles' contact radii. This can be useful to control how close
particles can approach the triangulated surface. Usually, {scale_factor}=1.0.
particles can approach the triangulated surface. Usually, {scale_factor} =1.0.
:line

View File

@ -12,8 +12,8 @@ pair_style smd/ulsph command :h3
pair_style smd/ulsph args :pre
these keywords must be given :l
keyword = {*DENSITY_SUMMATION} or {*DENSITY_CONTINUITY} and {*VELOCITY_GRADIENT} or {*NO_VELOCITY_GRADIENT} and {*GRADIENT_CORRECTION} or {*NO_GRADIENT_CORRECTION}
these keywords must be given :ul
keyword = {*DENSITY_SUMMATION} or {*DENSITY_CONTINUITY} and {*VELOCITY_GRADIENT} or {*NO_VELOCITY_GRADIENT} and {*GRADIENT_CORRECTION} or {*NO_GRADIENT_CORRECTION} :pre
[Examples:]

View File

@ -7,6 +7,7 @@
:line
pair_style snap command :h3
pair_style snap/kk command :h3
[Syntax:]
@ -171,6 +172,29 @@ This pair style can only be used via the {pair} keyword of the
:line
Styles with a {gpu}, {intel}, {kk}, {omp}, or {opt} suffix are
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed in "Section 5"_Section_accelerate.html
of the manual. The accelerated styles take the same arguments and
should produce the same results, except for round-off and precision
issues.
These accelerated styles are part of the GPU, USER-INTEL, KOKKOS,
USER-OMP and OPT packages, respectively. They are only enabled if
LAMMPS was built with those packages. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info.
You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the "-suffix command-line
switch"_Section_start.html#start_6 when you invoke LAMMPS, or you can
use the "suffix"_suffix.html command in your input script.
See "Section 5"_Section_accelerate.html of the manual for
more instructions on how to use the accelerated styles effectively.
:line
[Restrictions:]
This style is part of the SNAP package. It is only enabled if

135
doc/src/pair_ufm.txt Normal file
View File

@ -0,0 +1,135 @@
"LAMMPS WWW Site"_lws - "LAMMPS Documentation"_ld - "LAMMPS Commands"_lc :c
:link(lws,http://lammps.sandia.gov)
:link(ld,Manual.html)
:link(lc,Section_commands.html#comm)
:line
pair_style ufm command :h3
pair_style ufm/gpu command :h3
pair_style ufm/omp command :h3
pair_style ufm/opt command :h3
[Syntax:]
pair_style ufm cutoff :pre
cutoff = global cutoff for {ufm} interactions (distance units) :ul
[Examples:]
pair_style ufm 4.0
pair_coeff 1 1 100.0 1.0 2.5
pair_coeff * * 100.0 1.0 :pre
pair_style ufm 4.0
pair_coeff * * 10.0 1.0
variable prefactor equal ramp(10,100)
fix 1 all adapt 1 pair ufm epsilon * * v_prefactor :pre
[Description:]
Style {ufm} computes pairwise interactions using the Uhlenbeck-Ford model (UFM) potential "(Paula Leite2016)"_#PL2 which is given by
:c,image(Eqs/pair_ufm.jpg)
where rc is the cutoff, sigma is a distance-scale and epsilon is an energy-scale, i.e., a product of Boltzmann constant kB, temperature T and the Uhlenbeck-Ford p-parameter which is responsible
to control the softness of the interactions "(Paula Leite2017)"_#PL1.
This model is useful as a reference system for fluid-phase free-energy calculations "(Paula Leite2016)"_#PL2.
The following coefficients must be defined for each pair of atom types
via the "pair_coeff"_pair_coeff.html command as in the examples above,
or in the data file or restart files read by the
"read_data"_read_data.html or "read_restart"_read_restart.html
commands, or by mixing as described below:
epsilon (energy units)
sigma (distance units)
cutoff (distance units) :ul
The last coefficient is optional. If not specified, the global {ufm}
cutoff is used.
The "fix adapt"_fix_adapt.html command can be used to vary epsilon and sigma for this pair style over the course of a simulation, in which case
pair_coeff settings for epsilon and sigma must still be specified, but will be
overridden. For example these commands will vary the prefactor epsilon for
all pairwise interactions from 10.0 at the beginning to 100.0 at the end
of a run:
variable prefactor equal ramp(10,100)
fix 1 all adapt 1 pair ufm epsilon * * v_prefactor :pre
NOTE: The thermodynamic integration procedure can be performed with this potential using "fix adapt"_fix_adapt.html. This command will rescale the force on each atom by varying a scale variable, which always starts with value 1.0. The syntax is the same described above, however, changing epsilon to scale. A detailed explanation of how to use this command and perform nonequilibrium thermodynamic integration in LAMMPS is given in the paper by "(Freitas)"_#Freitas2.
:line
Styles with a {gpu}, {intel}, {kk}, {omp}, or {opt} suffix are
functionally the same as the corresponding style without the suffix.
They have been optimized to run faster, depending on your available
hardware, as discussed in "Section 5"_Section_accelerate.html
of the manual. The accelerated styles take the same arguments and
should produce the same results, except for round-off and precision
issues.
These accelerated styles are part of the GPU, USER-INTEL, KOKKOS,
USER-OMP and OPT packages, respectively. They are only enabled if
LAMMPS was built with those packages. See the "Making
LAMMPS"_Section_start.html#start_3 section for more info.
You can specify the accelerated styles explicitly in your input script
by including their suffix, or you can use the "-suffix command-line
switch"_Section_start.html#start_6 when you invoke LAMMPS, or you can
use the "suffix"_suffix.html command in your input script.
See "Section 5"_Section_accelerate.html of the manual for
more instructions on how to use the accelerated styles effectively.
:line
[Mixing, shift, table, tail correction, restart, rRESPA info]:
For atom type pairs I,J and I != J, the A coefficient and cutoff
distance for this pair style can be mixed. A is always mixed via a
{geometric} rule. The cutoff is mixed according to the pair_modify
mix value. The default mix value is {geometric}. See the
"pair_modify" command for details.
This pair style support the "pair_modify"_pair_modify.html shift option for the energy of the pair interaction.
The "pair_modify"_pair_modify.html table and tail are not relevant for this
pair style.
This pair style does not support the "pair_modify"_pair_modify.html tail option for adding long-range tail corrections to energy and pressure.
This pair style writes its information to "binary restart
files"_restart.html, so pair_style and pair_coeff commands do not need
to be specified in an input script that reads a restart file.
This pair style can only be used via the {pair} keyword of the
"run_style respa"_run_style.html command. It does not support the
{inner}, {middle}, {outer} keywords.
:line
[Restrictions:] none
[Related commands:]
"pair_coeff"_pair_coeff.html, "fix adapt"_fix_adapt.html
[Default:] none
:link(PL1)
[(Paula Leite2017)] Paula Leite, Santos-Florez, and de Koning, Phys Rev E, 96,
32115 (2017).
:link(PL2)
[(Paula Leite2016)] Paula Leite , Freitas, Azevedo, and de Koning, J Chem Phys, 126,
044509 (2016).
:link(Freitas2)
[(Freitas)] Freitas, Asta, and de Koning, Computational Materials Science, 112, 333 (2016).

View File

@ -9,6 +9,7 @@
pair_style yukawa command :h3
pair_style yukawa/gpu command :h3
pair_style yukawa/omp command :h3
pair_style yukawa/kk command :h3
[Syntax:]

View File

@ -8,6 +8,7 @@
pair_style zbl command :h3
pair_style zbl/gpu command :h3
pair_style zbl/kk command :h3
pair_style zbl/omp command :h3
[Syntax:]

View File

@ -11,11 +11,13 @@ Pair Styles :h1
pair_awpmd
pair_beck
pair_body
pair_body_rounded_polygon
pair_bop
pair_born
pair_brownian
pair_buck
pair_buck_long
pair_buck6d_coul_gauss
pair_charmm
pair_class2
pair_colloid
@ -32,6 +34,7 @@ Pair Styles :h1
pair_eff
pair_eim
pair_exp6_rx
pair_extep
pair_gauss
pair_gayberne
pair_gran
@ -100,6 +103,7 @@ Pair Styles :h1
pair_tersoff_zbl
pair_thole
pair_tri_lj
pair_ufm
pair_vashishta
pair_yukawa
pair_yukawa_colloid

View File

@ -14,10 +14,11 @@ print string keyword value :pre
string = text string to print, which may contain variables :ulb,l
zero or more keyword/value pairs may be appended :l
keyword = {file} or {append} or {screen} :l
keyword = {file} or {append} or {screen} or {universe} :l
{file} value = filename
{append} value = filename
{screen} value = {yes} or {no} :pre
{screen} value = {yes} or {no}
{universe} value = {yes} or {no} :pre
:ule
[Examples:]
@ -26,6 +27,7 @@ print "Done with equilibration" file info.dat
print Vol=$v append info.dat screen no
print "The system volume is now $v"
print 'The system volume is now $v'
print "NEB calculation 1 complete" screen no universe yes
print """
System volume = $v
System temperature = $t
@ -49,6 +51,11 @@ it does not exist.
If the {screen} keyword is used, output to the screen and logfile can
be turned on or off as desired.
If the {universe} keyword is used, output to the global screen and
logfile can be turned on or off as desired. In multi-partition
calculations, the {screen} option and the corresponding output only
apply to the screen and logfile of the individual partition.
If you want the print command to be executed multiple times (with
changing variable values), there are 3 options. First, consider using
the "fix print"_fix_print.html command, which will print a string
@ -74,4 +81,4 @@ thermodynamic properties, global values calculated by a
[Default:]
The option defaults are no file output and screen = yes.
The option defaults are no file output, screen = yes, and universe = no.

View File

@ -406,7 +406,7 @@ cases, LAMMPS has no simple way to check that something illogical is
being attempted.
The same applies to Python functions called during a simulation run at
each time step using "fix python"_fix_python.html.
each time step using "fix python/invoke"_fix_python_invoke.html.
:line
@ -493,6 +493,6 @@ different source files, problems may occur.
[Related commands:]
"shell"_shell.html, "variable"_variable.html, "fix python"_fix_python.html
"shell"_shell.html, "variable"_variable.html, "fix python/invoke"_fix_python_invoke.html
[Default:] none

View File

@ -10,9 +10,11 @@ replicate command :h3
[Syntax:]
replicate nx ny nz :pre
replicate nx ny nz {keyword} :pre
nx,ny,nz = replication factors in each dimension :ul
nx,ny,nz = replication factors in each dimension :ulb
optional {keyword} = {bbox} :l
{bbox} = only check atoms in replicas that overlap with a processor's subdomain :ule
[Examples:]
@ -43,6 +45,12 @@ file that crosses a periodic boundary should be between two atoms with
image flags that differ by 1. This will allow the bond to be
unwrapped appropriately.
The optional keyword {bbox} uses a bounding box to only check atoms
in replicas that overlap with a processor's subdomain when assigning
atoms to processors, and thus can result in substantial speedups for
calculations using a large number of processors. It does require
temporarily using more memory.
[Restrictions:]
A 2d simulation cannot be replicated in the z dimension.

View File

@ -67,7 +67,8 @@ class RSTMarkup(Markup):
text = text.replace('*', '\\*')
text = text.replace('^', '\\^')
text = text.replace('|', '\\|')
text = re.sub(r'([^"])_', r'\1\\_', text)
text = re.sub(r'([^"])_([ \t\n\r\f])', r'\1\\\\_\2', text)
text = re.sub(r'([^"])_([^ \t\n\r\f])', r'\1\\_\2', text)
return text
def unescape_rst_chars(self, text):
@ -148,15 +149,18 @@ class RSTFormatting(Formatting):
return "\n----------\n\n" + content.strip()
def image(self, content, file, link=None):
if link and (link.lower().endswith('.jpg') or
link.lower().endswith('.jpeg') or
link.lower().endswith('.png') or
link.lower().endswith('.gif')):
converted = ".. thumbnail:: " + self.markup.unescape_rst_chars(link) + "\n"
else:
converted = ".. image:: " + self.markup.unescape_rst_chars(file) + "\n"
if link:
converted += " :target: " + self.markup.unescape_rst_chars(link) + "\n"
# 2017-12-07: commented out to disable thumbnail processing due to dropping
# support for obsolete sphinxcontrib.images extension
#
#if link and (link.lower().endswith('.jpg') or
# link.lower().endswith('.jpeg') or
# link.lower().endswith('.png') or
# link.lower().endswith('.gif')):
# converted = ".. thumbnail:: " + self.markup.unescape_rst_chars(link) + "\n"
#else:
converted = ".. image:: " + self.markup.unescape_rst_chars(file) + "\n"
if link:
converted += " :target: " + self.markup.unescape_rst_chars(link) + "\n"
if "c" in self.current_command_list:
converted += " :align: center\n"

View File

@ -31,8 +31,11 @@ import os
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinxcontrib.images',
]
# 2017-12-07: commented out, since this package is broken with Sphinx 16.x
# yet we can no longer use Sphinx 15.x, since that breaks with
# newer version of the multiprocessor module.
# 'sphinxcontrib.images',
images_config = {
'default_image_width' : '25%',

View File

@ -17,33 +17,36 @@ additional wrapper library that interfaces the C interface of the
LAMMPS library to Fortran and also translates the MPI communicator
from Fortran to C.
Once you have built LAMMPS as a library (see examples/COUPLE/README),
you can then build any of the driver codes with compile lines like
these, which include paths to the LAMMPS library interface, MPI (an
installed MPICH in this case), and FFTW (assuming you built LAMMPS as
a library with its PPPM solver).
First build LAMMPS as a library (see examples/COUPLE/README), e.g.
This builds the C++ driver with the LAMMPS library using a C++ compiler:
make mode=shlib mpi
g++ -I/home/sjplimp/lammps/src -c simple.cpp
g++ -L/home/sjplimp/lammps/src simple.o \
-llammps -lfftw -lmpich -lmpl -lpthread -o simpleCC
You can then build any of the driver codes with compile lines like
these, which include paths to the LAMMPS library interface, and
linking with FFTW (only needed if you built LAMMPS as a library with
its PPPM solver).
This builds the C driver with the LAMMPS library using a C compiler:
This builds the C++ driver with the LAMMPS library using the mpiCC
(C++) compiler:
gcc -I/home/sjplimp/lammps/src -c simple.c
gcc -L/home/sjplimp/lammps/src simple.o \
-llammps -lfftw -lmpich -lmpl -lpthread -lstdc++ -o simpleC
mpiCC -I/home/sjplimp/lammps/src -c simple.cpp
mpiCC -L/home/sjplimp/lammps/src simple.o -llammps -lfftw -o simpleCC
This builds the C driver with the LAMMPS library using the mpicc (C)
compiler:
mpicc -I/home/sjplimp/lammps/src -c simple.c
mpicc -L/home/sjplimp/lammps/src simple.o -llammps -lfftw -o simpleC
This builds the Fortran wrapper and driver with the LAMMPS library
using a Fortran and C compiler, using the wrapper in the fortran
directory:
using the mpicc (C) and mpifort (Fortran) compilers, using the wrapper
in the fortran directory:
cp ../fortran/libfwrapper.c .
gcc -I/home/sjplimp/lammps/src -c libfwrapper.c
gfortran -I/home/sjplimp/lammps/src -c simple.f90
gfortran -L/home/sjplimp/lammps/src simple.o libfwrapper.o \
-llammps -lfftw -lfmpich -lmpich -lpthread -lstdc++ -o simpleF
mpicc -I/home/sjplimp/lammps/src -c libfwrapper.c
mpifort -c simple.f90
mpifort -L/home/sjplimp/lammps/src simple.o libfwrapper.o \
-llammps -lfftw -o simpleF
You then run simpleCC, simpleC, or simpleF on a parallel machine
on some number of processors Q with 2 arguments:

View File

@ -145,7 +145,7 @@ int main(int narg, char **arg)
for (i = 0; i < natoms; i++) type[i] = 1;
lammps_command(lmp,"delete_atoms group all");
lammps_create_atoms(lmp,natoms,NULL,type,x,v);
lammps_create_atoms(lmp,natoms,NULL,type,x,v,NULL,0);
lammps_command(lmp,"run 10");
}

View File

@ -109,11 +109,11 @@ int main(int narg, char **arg)
int natoms = static_cast<int> (lmp->atom->natoms);
x = new double[3*natoms];
v = new double[3*natoms];
lammps_gather_atoms(lmp,"x",1,3,x);
lammps_gather_atoms(lmp,"v",1,3,v);
lammps_gather_atoms(lmp,(char *) "x",1,3,x);
lammps_gather_atoms(lmp,(char *) "v",1,3,v);
double epsilon = 0.1;
x[0] += epsilon;
lammps_scatter_atoms(lmp,"x",1,3,x);
lammps_scatter_atoms(lmp,(char *) "x",1,3,x);
// these 2 lines are the same
@ -124,21 +124,22 @@ int main(int narg, char **arg)
// extract force on single atom two different ways
if (lammps == 1) {
double **f = (double **) lammps_extract_atom(lmp,"f");
double **f = (double **) lammps_extract_atom(lmp,(char *) "f");
printf("Force on 1 atom via extract_atom: %g\n",f[0][0]);
double *fx = (double *) lammps_extract_variable(lmp,"fx","all");
double *fx = (double *)
lammps_extract_variable(lmp,(char *) "fx",(char *) "all");
printf("Force on 1 atom via extract_variable: %g\n",fx[0]);
}
// use commands_string() and commands_list() to invoke more commands
char *strtwo = "run 10\nrun 20";
char *strtwo = (char *) "run 10\nrun 20";
if (lammps == 1) lammps_commands_string(lmp,strtwo);
char *cmds[2];
cmds[0] = "run 10";
cmds[1] = "run 20";
cmds[0] = (char *) "run 10";
cmds[1] = (char *) "run 20";
if (lammps == 1) lammps_commands_list(lmp,2,cmds);
// delete all atoms

View File

@ -115,9 +115,12 @@ PROGRAM f_driver
CALL lammps_get_natoms(ptr,natoms)
ALLOCATE(x(3*natoms))
CALL lammps_gather_atoms(ptr,'x',1,3,x);
x(1) = x(1) + epsilon
CALL lammps_scatter_atoms(ptr,'x',1,3,x);
! these calls are commented out, b/c libfwrapper.c
! needs to be updated to use gather_atoms and scatter_atoms
!CALL lammps_gather_atoms(ptr,'x',1,3,x);
!x(1) = x(1) + epsilon
!CALL lammps_scatter_atoms(ptr,'x',1,3,x);
DEALLOCATE(x)

View File

@ -0,0 +1,116 @@
info: BN sample with r_BN=1.45
100 atoms
2 atom types
0.0 21.75000000 xlo xhi
0.0 12.55736835 ylo yhi
0.0 50.00000000 zlo zhi
Masses
1 10.811
2 14.0067
Atoms
1 1 0.00000000 0.00000000 0.00000000
2 2 1.45000000 0.00000000 0.00000000
3 1 2.17500000 1.25573684 0.00000000
4 2 3.62500000 1.25573684 0.00000000
5 1 0.00000000 2.51147367 0.00000000
6 2 1.45000000 2.51147367 0.00000000
7 1 2.17500000 3.76721051 0.00000000
8 2 3.62500000 3.76721051 0.00000000
9 1 0.00000000 5.02294734 0.00000000
10 2 1.45000000 5.02294734 0.00000000
11 1 2.17500000 6.27868418 0.00000000
12 2 3.62500000 6.27868418 0.00000000
13 1 0.00000000 7.53442101 0.00000000
14 2 1.45000000 7.53442101 0.00000000
15 1 2.17500000 8.79015785 0.00000000
16 2 3.62500000 8.79015785 0.00000000
17 1 0.00000000 10.04589468 0.00000000
18 2 1.45000000 10.04589468 0.00000000
19 1 2.17500000 11.30163152 0.00000000
20 2 3.62500000 11.30163152 0.00000000
21 1 4.35000000 0.00000000 0.00000000
22 2 5.80000000 0.00000000 0.00000000
23 1 6.52500000 1.25573684 0.00000000
24 2 7.97500000 1.25573684 0.00000000
25 1 4.35000000 2.51147367 0.00000000
26 2 5.80000000 2.51147367 0.00000000
27 1 6.52500000 3.76721051 0.00000000
28 2 7.97500000 3.76721051 0.00000000
29 1 4.35000000 5.02294734 0.00000000
30 2 5.80000000 5.02294734 0.00000000
31 1 6.52500000 6.27868418 0.00000000
32 2 7.97500000 6.27868418 0.00000000
33 1 4.35000000 7.53442101 0.00000000
34 2 5.80000000 7.53442101 0.00000000
35 1 6.52500000 8.79015785 0.00000000
36 2 7.97500000 8.79015785 0.00000000
37 1 4.35000000 10.04589468 0.00000000
38 2 5.80000000 10.04589468 0.00000000
39 1 6.52500000 11.30163152 0.00000000
40 2 7.97500000 11.30163152 0.00000000
41 1 8.70000000 0.00000000 0.00000000
42 2 10.15000000 0.00000000 0.00000000
43 1 10.87500000 1.25573684 0.00000000
44 2 12.32500000 1.25573684 0.00000000
45 1 8.70000000 2.51147367 0.00000000
46 2 10.15000000 2.51147367 0.00000000
47 1 10.87500000 3.76721051 0.00000000
48 2 12.32500000 3.76721051 0.00000000
49 1 8.70000000 5.02294734 0.00000000
50 2 10.15000000 5.02294734 0.00000000
51 1 10.87500000 6.27868418 0.00000000
52 2 12.32500000 6.27868418 0.00000000
53 1 8.70000000 7.53442101 0.00000000
54 2 10.15000000 7.53442101 0.00000000
55 1 10.87500000 8.79015785 0.00000000
56 2 12.32500000 8.79015785 0.00000000
57 1 8.70000000 10.04589468 0.00000000
58 2 10.15000000 10.04589468 0.00000000
59 1 10.87500000 11.30163152 0.00000000
60 2 12.32500000 11.30163152 0.00000000
61 1 13.05000000 0.00000000 0.00000000
62 2 14.50000000 0.00000000 0.00000000
63 1 15.22500000 1.25573684 0.00000000
64 2 16.67500000 1.25573684 0.00000000
65 1 13.05000000 2.51147367 0.00000000
66 2 14.50000000 2.51147367 0.00000000
67 1 15.22500000 3.76721051 0.00000000
68 2 16.67500000 3.76721051 0.00000000
69 1 13.05000000 5.02294734 0.00000000
70 2 14.50000000 5.02294734 0.00000000
71 1 15.22500000 6.27868418 0.00000000
72 2 16.67500000 6.27868418 0.00000000
73 1 13.05000000 7.53442101 0.00000000
74 2 14.50000000 7.53442101 0.00000000
75 1 15.22500000 8.79015785 0.00000000
76 2 16.67500000 8.79015785 0.00000000
77 1 13.05000000 10.04589468 0.00000000
78 2 14.50000000 10.04589468 0.00000000
79 1 15.22500000 11.30163152 0.00000000
80 2 16.67500000 11.30163152 0.00000000
81 1 17.40000000 0.00000000 0.00000000
82 2 18.85000000 0.00000000 0.00000000
83 1 19.57500000 1.25573684 0.00000000
84 2 21.02500000 1.25573684 0.00000000
85 1 17.40000000 2.51147367 0.00000000
86 2 18.85000000 2.51147367 0.00000000
87 1 19.57500000 3.76721051 0.00000000
88 2 21.02500000 3.76721051 0.00000000
89 1 17.40000000 5.02294734 0.00000000
90 2 18.85000000 5.02294734 0.00000000
91 1 19.57500000 6.27868418 0.00000000
92 2 21.02500000 6.27868418 0.00000000
93 1 17.40000000 7.53442101 0.00000000
94 2 18.85000000 7.53442101 0.00000000
95 1 19.57500000 8.79015785 0.00000000
96 2 21.02500000 8.79015785 0.00000000
97 1 17.40000000 10.04589468 0.00000000
98 2 18.85000000 10.04589468 0.00000000
99 1 19.57500000 11.30163152 0.00000000
100 2 21.02500000 11.30163152 0.00000000

View File

@ -0,0 +1,29 @@
# Initialization
units metal
boundary p p p
atom_style atomic
processors * * 1
# System and atom definition
read_data BN.data # read lammps data file
# Neighbor update settings
neighbor 2.0 bin
neigh_modify every 1
neigh_modify delay 0
neigh_modify check yes
# Potential
pair_style extep
pair_coeff * * ../../../../potentials/BN.extep B N
# Output
thermo 10
thermo_style custom step time etotal pe temp lx ly lz pxx pyy pzz
thermo_modify line one format float %14.8g
# Setup NPT MD run
timestep 0.0001 # ps
velocity all create 300.0 12345
fix thermos all npt temp 300 300 1.0 x 0 0 1.0 y 0 0 1.0
run 1000

View File

@ -0,0 +1,180 @@
LAMMPS (23 Oct 2017)
using 1 OpenMP thread(s) per MPI task
# Initialization
units metal
boundary p p p
atom_style atomic
processors * * 1
# System and atom definition
read_data BN.data # read lammps data file
orthogonal box = (0 0 0) to (21.75 12.5574 50)
1 by 1 by 1 MPI processor grid
reading atoms ...
100 atoms
# Neighbor update settings
neighbor 2.0 bin
neigh_modify every 1
neigh_modify delay 0
neigh_modify check yes
# Potential
pair_style extep
pair_coeff * * ../../../../potentials/BN.extep B N
Reading potential file ../../../../potentials/BN.extep with DATE: 2017-11-28
# Output
thermo 10
thermo_style custom step time etotal pe temp lx ly lz pxx pyy pzz
thermo_modify line one format float %14.8g
# Setup NPT MD run
timestep 0.0001 # ps
velocity all create 300.0 12345
fix thermos all npt temp 300 300 1.0 x 0 0 1.0 y 0 0 1.0
run 1000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.2
ghost atom cutoff = 4.2
binsize = 2.1, bins = 11 6 24
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair extep, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.97 | 2.97 | 2.97 Mbytes
Step Time TotEng PotEng Temp Lx Ly Lz Pxx Pyy Pzz
0 0 -665.11189 -668.95092 300 21.75 12.557368 50 -1638.8315 -1636.7368 321.73163
10 0.001 -665.11194 -668.81065 289.03491 21.749944 12.557333 50 -1391.3771 -1841.1723 316.66669
20 0.002 -665.1121 -668.4273 259.06599 21.749789 12.557222 50 -1137.0171 -1980.5977 301.79466
30 0.003 -665.11237 -667.90117 217.93027 21.749552 12.557029 50 -912.51949 -2055.822 278.00774
40 0.004 -665.11278 -667.36471 175.97662 21.74925 12.556752 50 -755.38643 -2078.0669 246.62816
50 0.005 -665.11333 -666.94254 142.94321 21.748894 12.556389 50 -694.93153 -2062.1349 209.26356
60 0.006 -665.11405 -666.71476 125.08741 21.748487 12.55594 50 -744.6962 -2019.9093 167.70563
70 0.007 -665.11494 -666.69555 123.51632 21.748026 12.555408 50 -898.67863 -1956.2845 123.88845
80 0.008 -665.116 -666.83408 134.25892 21.7475 12.554796 50 -1132.5952 -1868.738 79.87581
90 0.009 -665.1172 -667.03647 149.98053 21.746893 12.554106 50 -1409.6896 -1750.4875 37.821017
100 0.01 -665.11853 -667.20002 162.65705 21.746185 12.553344 50 -1689.1599 -1595.9411 -0.14399002
110 0.011 -665.11997 -667.24752 166.25742 21.745356 12.552516 50 -1934.6334 -1406.3665 -32.091026
120 0.012 -665.12148 -667.15088 158.58671 21.744389 12.55163 50 -2120.4014 -1193.6117 -56.50543
130 0.013 -665.12306 -666.93754 141.7922 21.743271 12.550694 50 -2234.0841 -980.32815 -72.45885
140 0.014 -665.1247 -666.67903 121.4631 21.741993 12.549719 50 -2275.5656 -796.26701 -79.693692
150 0.015 -665.1264 -666.46562 104.65306 21.740553 12.54871 50 -2253.08 -671.5409 -78.603431
160 0.016 -665.1282 -666.37541 97.462619 21.738952 12.547674 50 -2178.0108 -628.83531 -70.130423
170 0.017 -665.13011 -666.44775 102.96665 21.737195 12.546611 50 -2060.2073 -677.02227 -55.623931
180 0.018 -665.13215 -666.67004 120.17784 21.735292 12.54552 50 -1905.36 -808.22824 -36.699042
190 0.019 -665.13431 -666.98201 144.38814 21.733253 12.544396 50 -1715.2526 -999.2481 -15.117617
200 0.02 -665.13656 -667.29591 168.74214 21.731091 12.543231 50 -1490.6934 -1216.735 7.3107732
210 0.021 -665.13885 -667.52511 186.47391 21.728823 12.542015 50 -1235.9283 -1424.4324 28.822782
220 0.022 -665.14112 -667.61153 193.0492 21.726467 12.540741 50 -962.70697 -1590.2885 47.801678
230 0.023 -665.14332 -667.54317 187.53534 21.724043 12.539402 50 -692.12856 -1691.6537 62.881768
240 0.024 -665.1454 -667.35665 172.79772 21.72157 12.537993 50 -453.02755 -1717.6064 73.041858
250 0.025 -665.14735 -667.12424 154.48373 21.719064 12.536514 50 -276.81709 -1668.3598 77.670868
260 0.026 -665.14918 -666.92939 139.11409 21.716539 12.534967 50 -190.03656 -1552.4049 76.59734
270 0.027 -665.15091 -666.83859 131.88391 21.714 12.533357 50 -206.85537 -1382.4915 70.085105
280 0.028 -665.15258 -666.87889 134.90214 21.711446 12.53169 50 -324.01795 -1171.7578 58.801327
290 0.029 -665.15421 -667.02881 146.49028 21.708869 12.529975 50 -520.0146 -931.26466 43.758636
300 0.03 -665.1558 -667.22646 161.81084 21.706255 12.528222 50 -758.87113 -669.74523 26.225956
310 0.031 -665.15734 -667.39183 174.61368 21.703587 12.526442 50 -997.42782 -395.56111 7.601897
320 0.032 -665.15878 -667.45546 179.47345 21.700849 12.524646 50 -1193.9402 -119.86797 -10.744258
330 0.033 -665.16008 -667.38312 173.71901 21.698026 12.522846 50 -1315.6446 140.7451 -27.638433
340 0.034 -665.16118 -667.18792 158.37888 21.695112 12.521051 50 -1343.5396 363.95099 -42.231049
350 0.035 -665.16207 -666.92571 137.81938 21.692103 12.519271 50 -1273.6625 524.73453 -54.046178
360 0.036 -665.16274 -666.67543 118.20885 21.689004 12.517514 50 -1115.1514 601.37143 -62.932702
370 0.037 -665.1632 -666.5115 105.36258 21.685827 12.515781 50 -886.11568 582.42087 -68.942158
380 0.038 -665.16348 -666.47849 102.76116 21.682589 12.514072 50 -608.71321 472.04732 -72.193259
390 0.039 -665.1636 -666.57728 110.47178 21.679308 12.512382 50 -304.85697 291.41908 -72.787214
400 0.04 -665.16356 -666.76741 125.33244 21.676006 12.510704 50 6.3732307 75.407852 -70.806087
410 0.041 -665.16336 -666.98363 142.24457 21.672705 12.50903 50 309.23046 -134.40319 -66.378966
420 0.042 -665.16298 -667.15939 156.00935 21.669426 12.507351 50 590.16982 -298.16702 -59.767469
430 0.043 -665.16239 -667.24843 163.01313 21.66619 12.50566 50 836.19535 -385.22443 -51.420249
440 0.044 -665.16157 -667.23746 162.2204 21.663014 12.503955 50 1033.943 -378.7816 -41.969885
450 0.045 -665.1605 -667.14707 155.24066 21.659911 12.502234 50 1170.3399 -277.11556 -32.175503
460 0.046 -665.15917 -667.0218 145.55489 21.656891 12.500503 50 1234.9026 -91.620499 -22.833423
470 0.047 -665.15761 -666.91366 137.22578 21.65396 12.498768 50 1222.9519 157.31306 -14.680548
480 0.048 -665.15585 -666.86462 133.53159 21.651114 12.497041 50 1138.5551 445.2926 -8.3071781
490 0.049 -665.15393 -666.89359 135.9458 21.64835 12.495333 50 996.00682 748.51842 -4.0872169
500 0.05 -665.15188 -666.99142 143.75058 21.645657 12.493655 50 819.08561 1046.9785 -2.1306918
510 0.051 -665.14975 -667.12519 154.36991 21.643022 12.49202 50 637.99022 1325.7112 -2.2650822
520 0.052 -665.14756 -667.25 164.29491 21.640432 12.49044 50 484.54509 1574.1916 -4.0528391
530 0.053 -665.14531 -667.32459 170.29969 21.637878 12.488923 50 386.77357 1784.4858 -6.8479114
540 0.054 -665.143 -667.32552 170.55254 21.635352 12.48748 50 364.14599 1949.2189 -9.8841824
550 0.055 -665.14064 -667.25527 165.24765 21.632853 12.486117 50 424.6565 2060.4607 -12.37851
560 0.056 -665.13822 -667.14127 156.52756 21.630385 12.484837 50 564.3912 2110.2547 -13.62742
570 0.057 -665.13576 -667.0259 147.70502 21.627958 12.483643 50 769.54354 2092.8157 -13.082914
580 0.058 -665.13327 -666.95107 142.05154 21.625586 12.482535 50 1020.1218 2007.6508 -10.405617
590 0.059 -665.13079 -666.94279 141.59877 21.623287 12.481508 50 1294.1274 1862.3568 -5.5031153
600 0.06 -665.12832 -667.00189 146.40928 21.621079 12.480557 50 1570.9478 1673.8456 1.4410957
610 0.061 -665.12591 -667.10417 154.59072 21.618982 12.479674 50 1833.1388 1467.2639 9.9561573
620 0.062 -665.12355 -667.20973 163.02368 21.617015 12.478851 50 2066.4951 1272.6732 19.310607
630 0.063 -665.12128 -667.27744 168.49239 21.615193 12.47808 50 2259.0193 1120.2758 28.59477
640 0.064 -665.11911 -667.27898 168.7823 21.613531 12.477355 50 2399.792 1035.3525 36.8539
650 0.065 -665.11707 -667.20773 163.37438 21.612037 12.476673 50 2478.6675 1034.0481 43.239368
660 0.066 -665.11518 -667.0802 153.55598 21.610718 12.476033 50 2487.2505 1120.8274 47.131883
670 0.067 -665.11345 -666.93026 141.97434 21.609573 12.475439 50 2420.9786 1288.0136 48.201717
680 0.068 -665.11191 -666.79864 131.80955 21.608598 12.474897 50 2281.6131 1517.4002 46.399066
690 0.069 -665.11056 -666.72065 125.82027 21.607784 12.474418 50 2079.2055 1783.5346 41.895586
700 0.07 -665.10941 -666.71578 125.5291 21.607116 12.474011 50 1832.7039 2057.9076 35.011051
710 0.071 -665.10848 -666.78203 130.77932 21.606577 12.473687 50 1568.7275 2313.0601 26.153491
720 0.072 -665.10776 -666.89681 139.80468 21.606148 12.473458 50 1318.5189 2525.6808 15.783637
730 0.073 -665.10727 -667.0243 149.80574 21.605812 12.47333 50 1113.5537 2678.1859 4.3967762
740 0.074 -665.10701 -667.12698 157.85016 21.605555 12.473311 50 980.633 2758.9123 -7.4930622
750 0.075 -665.10697 -667.17729 161.78497 21.605368 12.473404 50 937.45086 2761.5936 -19.376492
760 0.076 -665.10714 -667.1654 160.84249 21.605247 12.473609 50 989.5724 2684.9256 -30.776106
770 0.077 -665.1075 -667.10061 155.75086 21.605196 12.473922 50 1129.4775 2532.7048 -41.263677
780 0.078 -665.10803 -667.00654 148.35835 21.605226 12.474338 50 1337.8663 2314.4556 -50.455407
790 0.079 -665.10869 -666.91242 140.9515 21.605349 12.474848 50 1586.9099 2045.9808 -57.988114
800 0.08 -665.10946 -666.84375 135.52533 21.605585 12.475441 50 1844.7038 1749.1281 -63.495405
810 0.081 -665.11032 -666.81538 133.24173 21.60595 12.476105 50 2079.9601 1450.3113 -66.60795
820 0.082 -665.11127 -666.82877 134.21424 21.606461 12.476828 50 2266.0059 1177.7937 -66.990929
830 0.083 -665.1123 -666.87353 137.6312 21.607131 12.477599 50 2383.4351 958.19752 -64.411861
840 0.084 -665.11343 -666.93214 142.12323 21.607968 12.478409 50 2421.1969 812.91475 -58.816538
850 0.085 -665.11467 -666.98597 146.2321 21.608975 12.479253 50 2376.3483 755.06052 -50.389393
860 0.086 -665.11603 -667.02075 148.84448 21.610149 12.480128 50 2252.9811 787.43069 -39.585062
870 0.087 -665.1175 -667.03045 149.48743 21.611481 12.481034 50 2060.884 901.76342 -27.129117
880 0.088 -665.11907 -667.01838 148.42091 21.612958 12.481978 50 1814.3354 1079.4855 -13.988401
890 0.089 -665.12073 -666.99552 146.50471 21.614562 12.482966 50 1531.1565 1293.9709 -1.305884
900 0.09 -665.12247 -666.97639 144.87389 21.616275 12.484007 50 1231.9005 1514.0741 9.7083525
910 0.091 -665.12426 -666.97371 144.52455 21.618074 12.485109 50 938.90089 1708.364 17.929974
920 0.092 -665.12609 -666.99389 145.95889 21.61994 12.486281 50 674.90767 1849.2415 22.497207
930 0.093 -665.12794 -667.03498 149.02559 21.621853 12.487528 50 461.18604 1916.1468 22.971745
940 0.094 -665.12977 -667.08777 153.00718 21.6238 12.488852 50 315.19601 1897.3867 19.43758
950 0.095 -665.13156 -667.13925 156.8903 21.62577 12.490254 50 248.20946 1790.5667 12.504818
960 0.096 -665.13326 -667.17668 159.68273 21.627757 12.491728 50 263.35912 1601.9528 3.2123256
970 0.097 -665.13485 -667.19079 160.6611 21.629764 12.493267 50 354.58496 1345.1489 -7.1487162
980 0.098 -665.13628 -667.17758 159.5175 21.631796 12.494862 50 506.7626 1039.346 -17.249179
990 0.099 -665.13753 -667.13942 156.43758 21.633864 12.496499 50 697.06054 707.26671 -25.92737
1000 0.1 -665.13859 -667.0853 152.12472 21.635982 12.498164 50 897.38498 372.94791 -32.344697
Loop time of 0.463574 on 1 procs for 1000 steps with 100 atoms
Performance: 18.638 ns/day, 1.288 hours/ns, 2157.152 timesteps/s
99.0% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.44776 | 0.44776 | 0.44776 | 0.0 | 96.59
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.0023057 | 0.0023057 | 0.0023057 | 0.0 | 0.50
Output | 0.0015752 | 0.0015752 | 0.0015752 | 0.0 | 0.34
Modify | 0.010602 | 0.010602 | 0.010602 | 0.0 | 2.29
Other | | 0.001331 | | | 0.29
Nlocal: 100 ave 100 max 100 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 360 ave 360 max 360 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 1800 ave 1800 max 1800 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 1800
Ave neighs/atom = 18
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,180 @@
LAMMPS (23 Oct 2017)
using 1 OpenMP thread(s) per MPI task
# Initialization
units metal
boundary p p p
atom_style atomic
processors * * 1
# System and atom definition
read_data BN.data # read lammps data file
orthogonal box = (0 0 0) to (21.75 12.5574 50)
2 by 2 by 1 MPI processor grid
reading atoms ...
100 atoms
# Neighbor update settings
neighbor 2.0 bin
neigh_modify every 1
neigh_modify delay 0
neigh_modify check yes
# Potential
pair_style extep
pair_coeff * * ../../../../potentials/BN.extep B N
Reading potential file ../../../../potentials/BN.extep with DATE: 2017-11-28
# Output
thermo 10
thermo_style custom step time etotal pe temp lx ly lz pxx pyy pzz
thermo_modify line one format float %14.8g
# Setup NPT MD run
timestep 0.0001 # ps
velocity all create 300.0 12345
fix thermos all npt temp 300 300 1.0 x 0 0 1.0 y 0 0 1.0
run 1000
Neighbor list info ...
update every 1 steps, delay 0 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 4.2
ghost atom cutoff = 4.2
binsize = 2.1, bins = 11 6 24
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair extep, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 2.943 | 2.943 | 2.943 Mbytes
Step Time TotEng PotEng Temp Lx Ly Lz Pxx Pyy Pzz
0 0 -665.11189 -668.95092 300 21.75 12.557368 50 -1638.8315 -1636.7368 321.73163
10 0.001 -665.11194 -668.81065 289.03491 21.749944 12.557333 50 -1391.3771 -1841.1723 316.66669
20 0.002 -665.1121 -668.4273 259.06599 21.749789 12.557222 50 -1137.0171 -1980.5977 301.79466
30 0.003 -665.11237 -667.90117 217.93027 21.749552 12.557029 50 -912.51949 -2055.822 278.00774
40 0.004 -665.11278 -667.36471 175.97662 21.74925 12.556752 50 -755.38643 -2078.0669 246.62816
50 0.005 -665.11333 -666.94254 142.94321 21.748894 12.556389 50 -694.93153 -2062.1349 209.26356
60 0.006 -665.11405 -666.71476 125.08741 21.748487 12.55594 50 -744.6962 -2019.9093 167.70563
70 0.007 -665.11494 -666.69555 123.51632 21.748026 12.555408 50 -898.67863 -1956.2845 123.88845
80 0.008 -665.116 -666.83408 134.25892 21.7475 12.554796 50 -1132.5952 -1868.738 79.87581
90 0.009 -665.1172 -667.03647 149.98053 21.746893 12.554106 50 -1409.6896 -1750.4875 37.821017
100 0.01 -665.11853 -667.20002 162.65705 21.746185 12.553344 50 -1689.1599 -1595.9411 -0.14399002
110 0.011 -665.11997 -667.24752 166.25742 21.745356 12.552516 50 -1934.6334 -1406.3665 -32.091026
120 0.012 -665.12148 -667.15088 158.58671 21.744389 12.55163 50 -2120.4014 -1193.6117 -56.50543
130 0.013 -665.12306 -666.93754 141.7922 21.743271 12.550694 50 -2234.0841 -980.32815 -72.45885
140 0.014 -665.1247 -666.67903 121.4631 21.741993 12.549719 50 -2275.5656 -796.26701 -79.693692
150 0.015 -665.1264 -666.46562 104.65306 21.740553 12.54871 50 -2253.08 -671.5409 -78.603431
160 0.016 -665.1282 -666.37541 97.462619 21.738952 12.547674 50 -2178.0108 -628.83531 -70.130423
170 0.017 -665.13011 -666.44775 102.96665 21.737195 12.546611 50 -2060.2073 -677.02227 -55.623931
180 0.018 -665.13215 -666.67004 120.17784 21.735292 12.54552 50 -1905.36 -808.22824 -36.699042
190 0.019 -665.13431 -666.98201 144.38814 21.733253 12.544396 50 -1715.2526 -999.2481 -15.117617
200 0.02 -665.13656 -667.29591 168.74214 21.731091 12.543231 50 -1490.6934 -1216.735 7.3107732
210 0.021 -665.13885 -667.52511 186.47391 21.728823 12.542015 50 -1235.9283 -1424.4324 28.822782
220 0.022 -665.14112 -667.61153 193.0492 21.726467 12.540741 50 -962.70697 -1590.2885 47.801678
230 0.023 -665.14332 -667.54317 187.53534 21.724043 12.539402 50 -692.12856 -1691.6537 62.881768
240 0.024 -665.1454 -667.35665 172.79772 21.72157 12.537993 50 -453.02755 -1717.6064 73.041858
250 0.025 -665.14735 -667.12424 154.48373 21.719064 12.536514 50 -276.81709 -1668.3598 77.670868
260 0.026 -665.14918 -666.92939 139.11409 21.716539 12.534967 50 -190.03656 -1552.4049 76.59734
270 0.027 -665.15091 -666.83859 131.88391 21.714 12.533357 50 -206.85537 -1382.4915 70.085105
280 0.028 -665.15258 -666.87889 134.90214 21.711446 12.53169 50 -324.01795 -1171.7578 58.801327
290 0.029 -665.15421 -667.02881 146.49028 21.708869 12.529975 50 -520.0146 -931.26466 43.758636
300 0.03 -665.1558 -667.22646 161.81084 21.706255 12.528222 50 -758.87113 -669.74523 26.225956
310 0.031 -665.15734 -667.39183 174.61368 21.703587 12.526442 50 -997.42782 -395.56111 7.601897
320 0.032 -665.15878 -667.45546 179.47345 21.700849 12.524646 50 -1193.9402 -119.86797 -10.744258
330 0.033 -665.16008 -667.38312 173.71901 21.698026 12.522846 50 -1315.6446 140.7451 -27.638433
340 0.034 -665.16118 -667.18792 158.37888 21.695112 12.521051 50 -1343.5396 363.95099 -42.231049
350 0.035 -665.16207 -666.92571 137.81938 21.692103 12.519271 50 -1273.6625 524.73453 -54.046178
360 0.036 -665.16274 -666.67543 118.20885 21.689004 12.517514 50 -1115.1514 601.37143 -62.932702
370 0.037 -665.1632 -666.5115 105.36258 21.685827 12.515781 50 -886.11568 582.42087 -68.942158
380 0.038 -665.16348 -666.47849 102.76116 21.682589 12.514072 50 -608.71321 472.04732 -72.193259
390 0.039 -665.1636 -666.57728 110.47178 21.679308 12.512382 50 -304.85697 291.41908 -72.787214
400 0.04 -665.16356 -666.76741 125.33244 21.676006 12.510704 50 6.3732307 75.407852 -70.806087
410 0.041 -665.16336 -666.98363 142.24457 21.672705 12.50903 50 309.23046 -134.40319 -66.378966
420 0.042 -665.16298 -667.15939 156.00935 21.669426 12.507351 50 590.16982 -298.16702 -59.767469
430 0.043 -665.16239 -667.24843 163.01313 21.66619 12.50566 50 836.19535 -385.22443 -51.420249
440 0.044 -665.16157 -667.23746 162.2204 21.663014 12.503955 50 1033.943 -378.7816 -41.969885
450 0.045 -665.1605 -667.14707 155.24066 21.659911 12.502234 50 1170.3399 -277.11556 -32.175503
460 0.046 -665.15917 -667.0218 145.55489 21.656891 12.500503 50 1234.9026 -91.620499 -22.833423
470 0.047 -665.15761 -666.91366 137.22578 21.65396 12.498768 50 1222.9519 157.31306 -14.680548
480 0.048 -665.15585 -666.86462 133.53159 21.651114 12.497041 50 1138.5551 445.2926 -8.3071781
490 0.049 -665.15393 -666.89359 135.9458 21.64835 12.495333 50 996.00682 748.51842 -4.0872169
500 0.05 -665.15188 -666.99142 143.75058 21.645657 12.493655 50 819.08561 1046.9785 -2.1306918
510 0.051 -665.14975 -667.12519 154.36991 21.643022 12.49202 50 637.99022 1325.7112 -2.2650822
520 0.052 -665.14756 -667.25 164.29491 21.640432 12.49044 50 484.54509 1574.1916 -4.0528391
530 0.053 -665.14531 -667.32459 170.29969 21.637878 12.488923 50 386.77357 1784.4858 -6.8479114
540 0.054 -665.143 -667.32552 170.55254 21.635352 12.48748 50 364.14599 1949.2189 -9.8841824
550 0.055 -665.14064 -667.25527 165.24765 21.632853 12.486117 50 424.6565 2060.4607 -12.37851
560 0.056 -665.13822 -667.14127 156.52756 21.630385 12.484837 50 564.3912 2110.2547 -13.62742
570 0.057 -665.13576 -667.0259 147.70502 21.627958 12.483643 50 769.54354 2092.8157 -13.082914
580 0.058 -665.13327 -666.95107 142.05154 21.625586 12.482535 50 1020.1218 2007.6508 -10.405617
590 0.059 -665.13079 -666.94279 141.59877 21.623287 12.481508 50 1294.1274 1862.3568 -5.5031153
600 0.06 -665.12832 -667.00189 146.40928 21.621079 12.480557 50 1570.9478 1673.8456 1.4410957
610 0.061 -665.12591 -667.10417 154.59072 21.618982 12.479674 50 1833.1388 1467.2639 9.9561573
620 0.062 -665.12355 -667.20973 163.02368 21.617015 12.478851 50 2066.4951 1272.6732 19.310607
630 0.063 -665.12128 -667.27744 168.49239 21.615193 12.47808 50 2259.0193 1120.2758 28.59477
640 0.064 -665.11911 -667.27898 168.7823 21.613531 12.477355 50 2399.792 1035.3525 36.8539
650 0.065 -665.11707 -667.20773 163.37438 21.612037 12.476673 50 2478.6675 1034.0481 43.239368
660 0.066 -665.11518 -667.0802 153.55598 21.610718 12.476033 50 2487.2505 1120.8274 47.131883
670 0.067 -665.11345 -666.93026 141.97434 21.609573 12.475439 50 2420.9786 1288.0136 48.201717
680 0.068 -665.11191 -666.79864 131.80955 21.608598 12.474897 50 2281.6131 1517.4002 46.399066
690 0.069 -665.11056 -666.72065 125.82027 21.607784 12.474418 50 2079.2055 1783.5346 41.895586
700 0.07 -665.10941 -666.71578 125.5291 21.607116 12.474011 50 1832.7039 2057.9076 35.011051
710 0.071 -665.10848 -666.78203 130.77932 21.606577 12.473687 50 1568.7275 2313.0601 26.153491
720 0.072 -665.10776 -666.89681 139.80468 21.606148 12.473458 50 1318.5189 2525.6808 15.783637
730 0.073 -665.10727 -667.0243 149.80574 21.605812 12.47333 50 1113.5537 2678.1859 4.3967762
740 0.074 -665.10701 -667.12698 157.85016 21.605555 12.473311 50 980.633 2758.9123 -7.4930622
750 0.075 -665.10697 -667.17729 161.78497 21.605368 12.473404 50 937.45086 2761.5936 -19.376492
760 0.076 -665.10714 -667.1654 160.84249 21.605247 12.473609 50 989.5724 2684.9256 -30.776106
770 0.077 -665.1075 -667.10061 155.75086 21.605196 12.473922 50 1129.4775 2532.7048 -41.263677
780 0.078 -665.10803 -667.00654 148.35835 21.605226 12.474338 50 1337.8663 2314.4556 -50.455407
790 0.079 -665.10869 -666.91242 140.9515 21.605349 12.474848 50 1586.9099 2045.9808 -57.988114
800 0.08 -665.10946 -666.84375 135.52533 21.605585 12.475441 50 1844.7038 1749.1281 -63.495405
810 0.081 -665.11032 -666.81538 133.24173 21.60595 12.476105 50 2079.9601 1450.3113 -66.60795
820 0.082 -665.11127 -666.82877 134.21424 21.606461 12.476828 50 2266.0059 1177.7937 -66.990929
830 0.083 -665.1123 -666.87353 137.6312 21.607131 12.477599 50 2383.4351 958.19752 -64.411861
840 0.084 -665.11343 -666.93214 142.12323 21.607968 12.478409 50 2421.1969 812.91475 -58.816538
850 0.085 -665.11467 -666.98597 146.2321 21.608975 12.479253 50 2376.3483 755.06052 -50.389393
860 0.086 -665.11603 -667.02075 148.84448 21.610149 12.480128 50 2252.9811 787.43069 -39.585062
870 0.087 -665.1175 -667.03045 149.48743 21.611481 12.481034 50 2060.884 901.76342 -27.129117
880 0.088 -665.11907 -667.01838 148.42091 21.612958 12.481978 50 1814.3354 1079.4855 -13.988401
890 0.089 -665.12073 -666.99552 146.50471 21.614562 12.482966 50 1531.1565 1293.9709 -1.305884
900 0.09 -665.12247 -666.97639 144.87389 21.616275 12.484007 50 1231.9005 1514.0741 9.7083525
910 0.091 -665.12426 -666.97371 144.52455 21.618074 12.485109 50 938.90089 1708.364 17.929974
920 0.092 -665.12609 -666.99389 145.95889 21.61994 12.486281 50 674.90767 1849.2415 22.497207
930 0.093 -665.12794 -667.03498 149.02559 21.621853 12.487528 50 461.18604 1916.1468 22.971745
940 0.094 -665.12977 -667.08777 153.00718 21.6238 12.488852 50 315.19601 1897.3867 19.43758
950 0.095 -665.13156 -667.13925 156.8903 21.62577 12.490254 50 248.20946 1790.5667 12.504818
960 0.096 -665.13326 -667.17668 159.68273 21.627757 12.491728 50 263.35912 1601.9528 3.2123256
970 0.097 -665.13485 -667.19079 160.6611 21.629764 12.493267 50 354.58496 1345.1489 -7.1487162
980 0.098 -665.13628 -667.17758 159.5175 21.631796 12.494862 50 506.7626 1039.346 -17.249179
990 0.099 -665.13753 -667.13942 156.43758 21.633864 12.496499 50 697.06054 707.26671 -25.92737
1000 0.1 -665.13859 -667.0853 152.12472 21.635982 12.498164 50 897.38498 372.94791 -32.344697
Loop time of 0.174508 on 4 procs for 1000 steps with 100 atoms
Performance: 49.511 ns/day, 0.485 hours/ns, 5730.393 timesteps/s
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.12409 | 0.12834 | 0.13408 | 1.1 | 73.54
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.016369 | 0.021358 | 0.025324 | 2.7 | 12.24
Output | 0.0023892 | 0.0025101 | 0.0028272 | 0.4 | 1.44
Modify | 0.01733 | 0.018302 | 0.018958 | 0.5 | 10.49
Other | | 0.003995 | | | 2.29
Nlocal: 25 ave 26 max 24 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 179 ave 180 max 178 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 450 ave 468 max 432 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Total # of neighbors = 1800
Ave neighs/atom = 18
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,93 @@
clear
units real
boundary p p p
atom_style full
read_data hkust1.data
neighbor 2.0 bin
# ------------------------ MOF-FF FORCE FIELD ------------------------------
pair_style buck6d/coul/gauss/dsf 0.9000 12.0000
pair_coeff 1 1 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW
pair_coeff 1 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 1 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 1 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 1 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 1 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 2 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 2 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 2 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 2 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 2 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 3 3 3680 4 32.805 0.10690769 0.9771554 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 3 4 14157.243 3.1914894 489.18197 2.5231391 0.45538909 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 3 5 6320.6076 3.6144578 103.50278 0.44181916 0.75109952 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 3 6 6157.8178 3.4883721 124.7792 0.72632262 0.73006542 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 4 4 54464 2.6548673 5679.4311 33.208515 0.34105936 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 4 5 24315.863 2.9411765 1371.5617 7.9168726 0.42457748 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 4 6 23689.598 2.8571429 1590.0769 11.87959 0.42066711 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 5 5 10856 3.2967033 308.7755 1.6022517 0.63272774 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 5 6 10576.399 3.1914894 365.45138 2.5231391 0.61999948 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 6 6 10304 3.0927835 429.89352 3.9170177 0.60800971 # buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
bond_style hybrid class2 morse
bond_coeff 5 morse 50.000000 1.451345 1.914000 # morse->(cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
bond_coeff 4 class2 2.536000 75.465060 -192.435903 286.248406 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2)|CuPW
bond_coeff 3 class2 1.094000 390.634200 -996.117210 1481.724350 # mm3->(c3_c2h1@ph,h1_c1@ph)|benzene
bond_coeff 6 class2 1.278000 585.591600 -1493.258580 2221.222138 # mm3->(c3_c1o2@co2,o2_c1cu1@co2)|CuPW
bond_coeff 1 class2 1.394000 509.335200 -1298.804760 1931.972080 # mm3->(c3_c2h1@ph,c3_c2h1@ph)|benzene
bond_coeff 2 class2 1.485000 360.635220 -919.619811 1367.934469 # mm3->(c3_c1o2@co2,c3_c3@ph)|CuPW
angle_style hybrid class2/p6 cosine/buck6d
angle_coeff 2 class2/p6 117.711000 57.408120 -46.049402 10.553745 -7.558563 13.610890 # mm3->(c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
angle_coeff 2 class2/p6 bb 0.0 1.0 1.0
angle_coeff 2 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 6 class2/p6 126.814000 13.740540 -11.021849 2.526022 -1.809130 3.257744 # mm3->(c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
angle_coeff 6 class2/p6 bb 0.0 1.0 1.0
angle_coeff 6 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 7 class2/p6 123.490000 111.075360 -89.098091 20.419778 -14.624589 26.334856 # mm3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 7 class2/p6 bb 14.244120 1.278000 1.278000
angle_coeff 7 class2/p6 ba 3.309240 3.309240 1.278000 1.278000
angle_coeff 1 class2/p6 127.050000 53.307540 -42.760159 9.799907 -7.018666 12.638684 # mm3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
angle_coeff 1 class2/p6 bb 71.796120 1.394000 1.394000
angle_coeff 1 class2/p6 ba 6.762360 6.762360 1.394000 1.394000
angle_coeff 4 class2/p6 84.336000 29.351520 -23.544055 5.395900 -3.864529 6.958951 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
angle_coeff 4 class2/p6 bb 0.0 1.0 1.0
angle_coeff 4 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 3 class2/p6 120.350000 36.185820 -29.026127 6.652298 -4.764358 8.579296 # mm3->(c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
angle_coeff 3 class2/p6 bb 93.378120 1.394000 1.094000
angle_coeff 3 class2/p6 ba -25.179000 53.523360 1.394000 1.094000
angle_coeff 8 class2/p6 115.098000 79.493700 -63.765149 14.613896 -10.466432 18.847160 # mm3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 8 class2/p6 bb 0.0 1.0 1.0
angle_coeff 8 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 5 cosine/buck6d 1.978350 4 180.000000 # fourier->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_style opls
dihedral_coeff 3 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 9 0.000000 4.528000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 8 0.000000 0.000000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
dihedral_coeff 5 0.000000 1.741000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
dihedral_coeff 2 0.000000 6.316000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
dihedral_coeff 1 0.000000 4.379000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
dihedral_coeff 6 0.000000 0.000000 0.000000 0.609000 # cos4->(o2_c1cu1@co2,cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_coeff 4 0.000000 0.000000 0.000000 0.000000 # cos3->(h1_c1@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 10 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 7 0.000000 0.000000 0.000000 0.000000 # cos3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
improper_style inversion/harmonic
improper_coeff 1 18.776340 0.000000 # harm->(c3_c3@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c1o2@co2)|CuPW
improper_coeff 3 41.005800 0.000000 # harm->(c3_c1o2@co2,c3_c3@ph,o2_c1cu1@co2,o2_c1cu1@co2)|CuPW
improper_coeff 2 4.100580 0.000000 # harm->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
special_bonds lj 0.0 0.0 1.0 coul 1.0 1.0 1.0
# ------------------------ MOF-FF FORCE FIELD END --------------------------
run 0

View File

@ -0,0 +1,94 @@
clear
units real
boundary p p p
atom_style full
read_data hkust1.data
neighbor 2.0 bin
# ------------------------ MOF-FF FORCE FIELD ------------------------------
kspace_style ewald 1.0e-6
pair_style buck6d/coul/gauss/long 0.9 0.9 12.0000
pair_coeff 1 1 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW
pair_coeff 1 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 1 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 1 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 1 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 1 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 2 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 2 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 2 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 2 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 2 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 3 3 3680 4 32.805 0.10690769 0.9771554 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 3 4 14157.243 3.1914894 489.18197 2.5231391 0.45538909 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 3 5 6320.6076 3.6144578 103.50278 0.44181916 0.75109952 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 3 6 6157.8178 3.4883721 124.7792 0.72632262 0.73006542 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 4 4 54464 2.6548673 5679.4311 33.208515 0.34105936 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 4 5 24315.863 2.9411765 1371.5617 7.9168726 0.42457748 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 4 6 23689.598 2.8571429 1590.0769 11.87959 0.42066711 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 5 5 10856 3.2967033 308.7755 1.6022517 0.63272774 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 5 6 10576.399 3.1914894 365.45138 2.5231391 0.61999948 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 6 6 10304 3.0927835 429.89352 3.9170177 0.60800971 # buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
bond_style hybrid class2 morse
bond_coeff 5 morse 50.000000 1.451345 1.914000 # morse->(cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
bond_coeff 4 class2 2.536000 75.465060 -192.435903 286.248406 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2)|CuPW
bond_coeff 3 class2 1.094000 390.634200 -996.117210 1481.724350 # mm3->(c3_c2h1@ph,h1_c1@ph)|benzene
bond_coeff 6 class2 1.278000 585.591600 -1493.258580 2221.222138 # mm3->(c3_c1o2@co2,o2_c1cu1@co2)|CuPW
bond_coeff 1 class2 1.394000 509.335200 -1298.804760 1931.972080 # mm3->(c3_c2h1@ph,c3_c2h1@ph)|benzene
bond_coeff 2 class2 1.485000 360.635220 -919.619811 1367.934469 # mm3->(c3_c1o2@co2,c3_c3@ph)|CuPW
angle_style hybrid class2/p6 cosine/buck6d
angle_coeff 2 class2/p6 117.711000 57.408120 -46.049402 10.553745 -7.558563 13.610890 # mm3->(c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
angle_coeff 2 class2/p6 bb 0.0 1.0 1.0
angle_coeff 2 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 6 class2/p6 126.814000 13.740540 -11.021849 2.526022 -1.809130 3.257744 # mm3->(c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
angle_coeff 6 class2/p6 bb 0.0 1.0 1.0
angle_coeff 6 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 7 class2/p6 123.490000 111.075360 -89.098091 20.419778 -14.624589 26.334856 # mm3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 7 class2/p6 bb 14.244120 1.278000 1.278000
angle_coeff 7 class2/p6 ba 3.309240 3.309240 1.278000 1.278000
angle_coeff 1 class2/p6 127.050000 53.307540 -42.760159 9.799907 -7.018666 12.638684 # mm3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
angle_coeff 1 class2/p6 bb 71.796120 1.394000 1.394000
angle_coeff 1 class2/p6 ba 6.762360 6.762360 1.394000 1.394000
angle_coeff 4 class2/p6 84.336000 29.351520 -23.544055 5.395900 -3.864529 6.958951 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
angle_coeff 4 class2/p6 bb 0.0 1.0 1.0
angle_coeff 4 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 3 class2/p6 120.350000 36.185820 -29.026127 6.652298 -4.764358 8.579296 # mm3->(c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
angle_coeff 3 class2/p6 bb 93.378120 1.394000 1.094000
angle_coeff 3 class2/p6 ba -25.179000 53.523360 1.394000 1.094000
angle_coeff 8 class2/p6 115.098000 79.493700 -63.765149 14.613896 -10.466432 18.847160 # mm3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 8 class2/p6 bb 0.0 1.0 1.0
angle_coeff 8 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 5 cosine/buck6d 1.978350 4 180.000000 #1.0 # fourier->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_style opls
dihedral_coeff 3 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 9 0.000000 4.528000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 8 0.000000 0.000000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
dihedral_coeff 5 0.000000 1.741000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
dihedral_coeff 2 0.000000 6.316000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
dihedral_coeff 1 0.000000 4.379000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
dihedral_coeff 6 0.000000 0.000000 0.000000 0.609000 # cos4->(o2_c1cu1@co2,cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_coeff 4 0.000000 0.000000 0.000000 0.000000 # cos3->(h1_c1@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 10 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 7 0.000000 0.000000 0.000000 0.000000 # cos3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
improper_style inversion/harmonic
improper_coeff 1 18.776340 0.000000 # harm->(c3_c3@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c1o2@co2)|CuPW
improper_coeff 3 41.005800 0.000000 # harm->(c3_c1o2@co2,c3_c3@ph,o2_c1cu1@co2,o2_c1cu1@co2)|CuPW
improper_coeff 2 4.100580 0.000000 # harm->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
special_bonds lj 0.0 0.0 1.0 coul 1.0 1.0 1.0
# ------------------------ MOF-FF FORCE FIELD END --------------------------
run 0

View File

@ -0,0 +1,166 @@
LAMMPS (17 Jan 2018)
clear
units real
boundary p p p
atom_style full
read_data hkust1.data
triclinic box = (0 0 0) to (26.4408 26.4408 26.4408) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
reading atoms ...
624 atoms
scanning bonds ...
5 = max bonds/atom
scanning angles ...
10 = max angles/atom
scanning dihedrals ...
32 = max dihedrals/atom
scanning impropers ...
2 = max impropers/atom
reading bonds ...
792 bonds
reading angles ...
1536 angles
reading dihedrals ...
2688 dihedrals
reading impropers ...
288 impropers
5 = max # of 1-2 neighbors
8 = max # of 1-3 neighbors
20 = max # of 1-4 neighbors
17 = max # of special neighbors
neighbor 2.0 bin
# ------------------------ MOF-FF FORCE FIELD ------------------------------
pair_style buck6d/coul/gauss/dsf 0.9000 12.0000
pair_coeff 1 1 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW
pair_coeff 1 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 1 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 1 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 1 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 1 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 2 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 2 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 2 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 2 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 2 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 3 3 3680 4 32.805 0.10690769 0.9771554 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 3 4 14157.243 3.1914894 489.18197 2.5231391 0.45538909 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 3 5 6320.6076 3.6144578 103.50278 0.44181916 0.75109952 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 3 6 6157.8178 3.4883721 124.7792 0.72632262 0.73006542 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 4 4 54464 2.6548673 5679.4311 33.208515 0.34105936 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 4 5 24315.863 2.9411765 1371.5617 7.9168726 0.42457748 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 4 6 23689.598 2.8571429 1590.0769 11.87959 0.42066711 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 5 5 10856 3.2967033 308.7755 1.6022517 0.63272774 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 5 6 10576.399 3.1914894 365.45138 2.5231391 0.61999948 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 6 6 10304 3.0927835 429.89352 3.9170177 0.60800971 # buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
bond_style hybrid class2 morse
bond_coeff 5 morse 50.000000 1.451345 1.914000 # morse->(cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
bond_coeff 4 class2 2.536000 75.465060 -192.435903 286.248406 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2)|CuPW
bond_coeff 3 class2 1.094000 390.634200 -996.117210 1481.724350 # mm3->(c3_c2h1@ph,h1_c1@ph)|benzene
bond_coeff 6 class2 1.278000 585.591600 -1493.258580 2221.222138 # mm3->(c3_c1o2@co2,o2_c1cu1@co2)|CuPW
bond_coeff 1 class2 1.394000 509.335200 -1298.804760 1931.972080 # mm3->(c3_c2h1@ph,c3_c2h1@ph)|benzene
bond_coeff 2 class2 1.485000 360.635220 -919.619811 1367.934469 # mm3->(c3_c1o2@co2,c3_c3@ph)|CuPW
angle_style hybrid class2/p6 cosine/buck6d
angle_coeff 2 class2/p6 117.711000 57.408120 -46.049402 10.553745 -7.558563 13.610890 # mm3->(c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
angle_coeff 2 class2/p6 bb 0.0 1.0 1.0
angle_coeff 2 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 6 class2/p6 126.814000 13.740540 -11.021849 2.526022 -1.809130 3.257744 # mm3->(c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
angle_coeff 6 class2/p6 bb 0.0 1.0 1.0
angle_coeff 6 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 7 class2/p6 123.490000 111.075360 -89.098091 20.419778 -14.624589 26.334856 # mm3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 7 class2/p6 bb 14.244120 1.278000 1.278000
angle_coeff 7 class2/p6 ba 3.309240 3.309240 1.278000 1.278000
angle_coeff 1 class2/p6 127.050000 53.307540 -42.760159 9.799907 -7.018666 12.638684 # mm3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
angle_coeff 1 class2/p6 bb 71.796120 1.394000 1.394000
angle_coeff 1 class2/p6 ba 6.762360 6.762360 1.394000 1.394000
angle_coeff 4 class2/p6 84.336000 29.351520 -23.544055 5.395900 -3.864529 6.958951 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
angle_coeff 4 class2/p6 bb 0.0 1.0 1.0
angle_coeff 4 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 3 class2/p6 120.350000 36.185820 -29.026127 6.652298 -4.764358 8.579296 # mm3->(c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
angle_coeff 3 class2/p6 bb 93.378120 1.394000 1.094000
angle_coeff 3 class2/p6 ba -25.179000 53.523360 1.394000 1.094000
angle_coeff 8 class2/p6 115.098000 79.493700 -63.765149 14.613896 -10.466432 18.847160 # mm3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 8 class2/p6 bb 0.0 1.0 1.0
angle_coeff 8 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 5 cosine/buck6d 1.978350 4 180.000000 # fourier->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_style opls
dihedral_coeff 3 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 9 0.000000 4.528000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 8 0.000000 0.000000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
dihedral_coeff 5 0.000000 1.741000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
dihedral_coeff 2 0.000000 6.316000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
dihedral_coeff 1 0.000000 4.379000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
dihedral_coeff 6 0.000000 0.000000 0.000000 0.609000 # cos4->(o2_c1cu1@co2,cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_coeff 4 0.000000 0.000000 0.000000 0.000000 # cos3->(h1_c1@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 10 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 7 0.000000 0.000000 0.000000 0.000000 # cos3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
improper_style inversion/harmonic
improper_coeff 1 18.776340 0.000000 # harm->(c3_c3@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c1o2@co2)|CuPW
improper_coeff 3 41.005800 0.000000 # harm->(c3_c1o2@co2,c3_c3@ph,o2_c1cu1@co2,o2_c1cu1@co2)|CuPW
improper_coeff 2 4.100580 0.000000 # harm->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
special_bonds lj 0.0 0.0 1.0 coul 1.0 1.0 1.0
5 = max # of 1-2 neighbors
8 = max # of 1-3 neighbors
17 = max # of special neighbors
# ------------------------ MOF-FF FORCE FIELD END --------------------------
run 0
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 14
ghost atom cutoff = 14
binsize = 7, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair buck6d/coul/gauss/dsf, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/3d/newton/tri
bin: standard
WARNING: Inconsistent image flags (../domain.cpp:786)
Per MPI rank memory allocation (min/avg/max) = 21.23 | 21.23 | 21.23 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -11833.81 343.7619 -11490.048 -5.8508834
Loop time of 9.53674e-07 on 1 procs for 0 steps with 624 atoms
0.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Bond | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 624 ave 624 max 624 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 4464 ave 4464 max 4464 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 115368 ave 115368 max 115368 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 115368
Ave neighs/atom = 184.885
Ave special neighs/atom = 7.46154
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,166 @@
LAMMPS (17 Jan 2018)
clear
units real
boundary p p p
atom_style full
read_data hkust1.data
triclinic box = (0 0 0) to (26.4408 26.4408 26.4408) with tilt (0 0 0)
1 by 2 by 2 MPI processor grid
reading atoms ...
624 atoms
scanning bonds ...
5 = max bonds/atom
scanning angles ...
10 = max angles/atom
scanning dihedrals ...
32 = max dihedrals/atom
scanning impropers ...
2 = max impropers/atom
reading bonds ...
792 bonds
reading angles ...
1536 angles
reading dihedrals ...
2688 dihedrals
reading impropers ...
288 impropers
5 = max # of 1-2 neighbors
8 = max # of 1-3 neighbors
20 = max # of 1-4 neighbors
17 = max # of special neighbors
neighbor 2.0 bin
# ------------------------ MOF-FF FORCE FIELD ------------------------------
pair_style buck6d/coul/gauss/dsf 0.9000 12.0000
pair_coeff 1 1 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW
pair_coeff 1 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 1 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 1 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 1 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 1 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 2 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 2 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 2 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 2 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 2 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 3 3 3680 4 32.805 0.10690769 0.9771554 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 3 4 14157.243 3.1914894 489.18197 2.5231391 0.45538909 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 3 5 6320.6076 3.6144578 103.50278 0.44181916 0.75109952 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 3 6 6157.8178 3.4883721 124.7792 0.72632262 0.73006542 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 4 4 54464 2.6548673 5679.4311 33.208515 0.34105936 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 4 5 24315.863 2.9411765 1371.5617 7.9168726 0.42457748 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 4 6 23689.598 2.8571429 1590.0769 11.87959 0.42066711 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 5 5 10856 3.2967033 308.7755 1.6022517 0.63272774 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 5 6 10576.399 3.1914894 365.45138 2.5231391 0.61999948 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 6 6 10304 3.0927835 429.89352 3.9170177 0.60800971 # buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
bond_style hybrid class2 morse
bond_coeff 5 morse 50.000000 1.451345 1.914000 # morse->(cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
bond_coeff 4 class2 2.536000 75.465060 -192.435903 286.248406 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2)|CuPW
bond_coeff 3 class2 1.094000 390.634200 -996.117210 1481.724350 # mm3->(c3_c2h1@ph,h1_c1@ph)|benzene
bond_coeff 6 class2 1.278000 585.591600 -1493.258580 2221.222138 # mm3->(c3_c1o2@co2,o2_c1cu1@co2)|CuPW
bond_coeff 1 class2 1.394000 509.335200 -1298.804760 1931.972080 # mm3->(c3_c2h1@ph,c3_c2h1@ph)|benzene
bond_coeff 2 class2 1.485000 360.635220 -919.619811 1367.934469 # mm3->(c3_c1o2@co2,c3_c3@ph)|CuPW
angle_style hybrid class2/p6 cosine/buck6d
angle_coeff 2 class2/p6 117.711000 57.408120 -46.049402 10.553745 -7.558563 13.610890 # mm3->(c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
angle_coeff 2 class2/p6 bb 0.0 1.0 1.0
angle_coeff 2 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 6 class2/p6 126.814000 13.740540 -11.021849 2.526022 -1.809130 3.257744 # mm3->(c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
angle_coeff 6 class2/p6 bb 0.0 1.0 1.0
angle_coeff 6 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 7 class2/p6 123.490000 111.075360 -89.098091 20.419778 -14.624589 26.334856 # mm3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 7 class2/p6 bb 14.244120 1.278000 1.278000
angle_coeff 7 class2/p6 ba 3.309240 3.309240 1.278000 1.278000
angle_coeff 1 class2/p6 127.050000 53.307540 -42.760159 9.799907 -7.018666 12.638684 # mm3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
angle_coeff 1 class2/p6 bb 71.796120 1.394000 1.394000
angle_coeff 1 class2/p6 ba 6.762360 6.762360 1.394000 1.394000
angle_coeff 4 class2/p6 84.336000 29.351520 -23.544055 5.395900 -3.864529 6.958951 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
angle_coeff 4 class2/p6 bb 0.0 1.0 1.0
angle_coeff 4 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 3 class2/p6 120.350000 36.185820 -29.026127 6.652298 -4.764358 8.579296 # mm3->(c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
angle_coeff 3 class2/p6 bb 93.378120 1.394000 1.094000
angle_coeff 3 class2/p6 ba -25.179000 53.523360 1.394000 1.094000
angle_coeff 8 class2/p6 115.098000 79.493700 -63.765149 14.613896 -10.466432 18.847160 # mm3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 8 class2/p6 bb 0.0 1.0 1.0
angle_coeff 8 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 5 cosine/buck6d 1.978350 4 180.000000 # fourier->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_style opls
dihedral_coeff 3 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 9 0.000000 4.528000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 8 0.000000 0.000000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
dihedral_coeff 5 0.000000 1.741000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
dihedral_coeff 2 0.000000 6.316000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
dihedral_coeff 1 0.000000 4.379000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
dihedral_coeff 6 0.000000 0.000000 0.000000 0.609000 # cos4->(o2_c1cu1@co2,cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_coeff 4 0.000000 0.000000 0.000000 0.000000 # cos3->(h1_c1@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 10 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 7 0.000000 0.000000 0.000000 0.000000 # cos3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
improper_style inversion/harmonic
improper_coeff 1 18.776340 0.000000 # harm->(c3_c3@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c1o2@co2)|CuPW
improper_coeff 3 41.005800 0.000000 # harm->(c3_c1o2@co2,c3_c3@ph,o2_c1cu1@co2,o2_c1cu1@co2)|CuPW
improper_coeff 2 4.100580 0.000000 # harm->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
special_bonds lj 0.0 0.0 1.0 coul 1.0 1.0 1.0
5 = max # of 1-2 neighbors
8 = max # of 1-3 neighbors
17 = max # of special neighbors
# ------------------------ MOF-FF FORCE FIELD END --------------------------
run 0
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 14
ghost atom cutoff = 14
binsize = 7, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair buck6d/coul/gauss/dsf, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/3d/newton/tri
bin: standard
WARNING: Inconsistent image flags (../domain.cpp:786)
Per MPI rank memory allocation (min/avg/max) = 20.68 | 20.68 | 20.68 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -11833.81 343.7619 -11490.048 -5.8508834
Loop time of 2.20537e-06 on 4 procs for 0 steps with 624 atoms
0.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Bond | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 2.205e-06 | | |100.00
Nlocal: 156 ave 156 max 156 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 2718 ave 2718 max 2718 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 28842 ave 28870 max 28814 min
Histogram: 1 1 0 0 0 0 0 0 1 1
Total # of neighbors = 115368
Ave neighs/atom = 184.885
Ave special neighs/atom = 7.46154
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,175 @@
LAMMPS (17 Jan 2018)
clear
units real
boundary p p p
atom_style full
read_data hkust1.data
triclinic box = (0 0 0) to (26.4408 26.4408 26.4408) with tilt (0 0 0)
1 by 1 by 1 MPI processor grid
reading atoms ...
624 atoms
scanning bonds ...
5 = max bonds/atom
scanning angles ...
10 = max angles/atom
scanning dihedrals ...
32 = max dihedrals/atom
scanning impropers ...
2 = max impropers/atom
reading bonds ...
792 bonds
reading angles ...
1536 angles
reading dihedrals ...
2688 dihedrals
reading impropers ...
288 impropers
5 = max # of 1-2 neighbors
8 = max # of 1-3 neighbors
20 = max # of 1-4 neighbors
17 = max # of special neighbors
neighbor 2.0 bin
# ------------------------ MOF-FF FORCE FIELD ------------------------------
kspace_style ewald 1.0e-6
pair_style buck6d/coul/gauss/long 0.9 0.9 12.0000
pair_coeff 1 1 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW
pair_coeff 1 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 1 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 1 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 1 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 1 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 2 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 2 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 2 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 2 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 2 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 3 3 3680 4 32.805 0.10690769 0.9771554 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 3 4 14157.243 3.1914894 489.18197 2.5231391 0.45538909 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 3 5 6320.6076 3.6144578 103.50278 0.44181916 0.75109952 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 3 6 6157.8178 3.4883721 124.7792 0.72632262 0.73006542 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 4 4 54464 2.6548673 5679.4311 33.208515 0.34105936 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 4 5 24315.863 2.9411765 1371.5617 7.9168726 0.42457748 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 4 6 23689.598 2.8571429 1590.0769 11.87959 0.42066711 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 5 5 10856 3.2967033 308.7755 1.6022517 0.63272774 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 5 6 10576.399 3.1914894 365.45138 2.5231391 0.61999948 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 6 6 10304 3.0927835 429.89352 3.9170177 0.60800971 # buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
bond_style hybrid class2 morse
bond_coeff 5 morse 50.000000 1.451345 1.914000 # morse->(cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
bond_coeff 4 class2 2.536000 75.465060 -192.435903 286.248406 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2)|CuPW
bond_coeff 3 class2 1.094000 390.634200 -996.117210 1481.724350 # mm3->(c3_c2h1@ph,h1_c1@ph)|benzene
bond_coeff 6 class2 1.278000 585.591600 -1493.258580 2221.222138 # mm3->(c3_c1o2@co2,o2_c1cu1@co2)|CuPW
bond_coeff 1 class2 1.394000 509.335200 -1298.804760 1931.972080 # mm3->(c3_c2h1@ph,c3_c2h1@ph)|benzene
bond_coeff 2 class2 1.485000 360.635220 -919.619811 1367.934469 # mm3->(c3_c1o2@co2,c3_c3@ph)|CuPW
angle_style hybrid class2/p6 cosine/buck6d
angle_coeff 2 class2/p6 117.711000 57.408120 -46.049402 10.553745 -7.558563 13.610890 # mm3->(c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
angle_coeff 2 class2/p6 bb 0.0 1.0 1.0
angle_coeff 2 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 6 class2/p6 126.814000 13.740540 -11.021849 2.526022 -1.809130 3.257744 # mm3->(c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
angle_coeff 6 class2/p6 bb 0.0 1.0 1.0
angle_coeff 6 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 7 class2/p6 123.490000 111.075360 -89.098091 20.419778 -14.624589 26.334856 # mm3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 7 class2/p6 bb 14.244120 1.278000 1.278000
angle_coeff 7 class2/p6 ba 3.309240 3.309240 1.278000 1.278000
angle_coeff 1 class2/p6 127.050000 53.307540 -42.760159 9.799907 -7.018666 12.638684 # mm3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
angle_coeff 1 class2/p6 bb 71.796120 1.394000 1.394000
angle_coeff 1 class2/p6 ba 6.762360 6.762360 1.394000 1.394000
angle_coeff 4 class2/p6 84.336000 29.351520 -23.544055 5.395900 -3.864529 6.958951 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
angle_coeff 4 class2/p6 bb 0.0 1.0 1.0
angle_coeff 4 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 3 class2/p6 120.350000 36.185820 -29.026127 6.652298 -4.764358 8.579296 # mm3->(c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
angle_coeff 3 class2/p6 bb 93.378120 1.394000 1.094000
angle_coeff 3 class2/p6 ba -25.179000 53.523360 1.394000 1.094000
angle_coeff 8 class2/p6 115.098000 79.493700 -63.765149 14.613896 -10.466432 18.847160 # mm3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 8 class2/p6 bb 0.0 1.0 1.0
angle_coeff 8 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 5 cosine/buck6d 1.978350 4 180.000000 #1.0 # fourier->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_style opls
dihedral_coeff 3 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 9 0.000000 4.528000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 8 0.000000 0.000000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
dihedral_coeff 5 0.000000 1.741000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
dihedral_coeff 2 0.000000 6.316000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
dihedral_coeff 1 0.000000 4.379000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
dihedral_coeff 6 0.000000 0.000000 0.000000 0.609000 # cos4->(o2_c1cu1@co2,cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_coeff 4 0.000000 0.000000 0.000000 0.000000 # cos3->(h1_c1@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 10 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 7 0.000000 0.000000 0.000000 0.000000 # cos3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
improper_style inversion/harmonic
improper_coeff 1 18.776340 0.000000 # harm->(c3_c3@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c1o2@co2)|CuPW
improper_coeff 3 41.005800 0.000000 # harm->(c3_c1o2@co2,c3_c3@ph,o2_c1cu1@co2,o2_c1cu1@co2)|CuPW
improper_coeff 2 4.100580 0.000000 # harm->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
special_bonds lj 0.0 0.0 1.0 coul 1.0 1.0 1.0
5 = max # of 1-2 neighbors
8 = max # of 1-3 neighbors
17 = max # of special neighbors
# ------------------------ MOF-FF FORCE FIELD END --------------------------
run 0
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Ewald initialization ...
WARNING: Using 12-bit tables for long-range coulomb (../kspace.cpp:321)
G vector (1/distance) = 0.267593
estimated absolute RMS force accuracy = 0.000333665
estimated relative force accuracy = 1.00482e-06
KSpace vectors: actual max1d max3d = 1054 8 2456
kxmax kymax kzmax = 8 8 8
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 14
ghost atom cutoff = 14
binsize = 7, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair buck6d/coul/gauss/long, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/3d/newton/tri
bin: standard
WARNING: Inconsistent image flags (../domain.cpp:786)
Per MPI rank memory allocation (min/avg/max) = 34.64 | 34.64 | 34.64 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -16541.109 343.7619 -16197.347 -629.64956
Loop time of 9.53674e-07 on 1 procs for 0 steps with 624 atoms
0.0% CPU use with 1 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Bond | 0 | 0 | 0 | 0.0 | 0.00
Kspace | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 9.537e-07 | | |100.00
Nlocal: 624 ave 624 max 624 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 4464 ave 4464 max 4464 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 115368 ave 115368 max 115368 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 115368
Ave neighs/atom = 184.885
Ave special neighs/atom = 7.46154
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -0,0 +1,175 @@
LAMMPS (17 Jan 2018)
clear
units real
boundary p p p
atom_style full
read_data hkust1.data
triclinic box = (0 0 0) to (26.4408 26.4408 26.4408) with tilt (0 0 0)
1 by 2 by 2 MPI processor grid
reading atoms ...
624 atoms
scanning bonds ...
5 = max bonds/atom
scanning angles ...
10 = max angles/atom
scanning dihedrals ...
32 = max dihedrals/atom
scanning impropers ...
2 = max impropers/atom
reading bonds ...
792 bonds
reading angles ...
1536 angles
reading dihedrals ...
2688 dihedrals
reading impropers ...
288 impropers
5 = max # of 1-2 neighbors
8 = max # of 1-3 neighbors
20 = max # of 1-4 neighbors
17 = max # of special neighbors
neighbor 2.0 bin
# ------------------------ MOF-FF FORCE FIELD ------------------------------
kspace_style ewald 1.0e-6
pair_style buck6d/coul/gauss/long 0.9 0.9 12.0000
pair_coeff 1 1 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW
pair_coeff 1 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 1 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 1 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 1 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 1 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c3@ph)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 2 2 10304 3.0612245 457.17971 4.5218516 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene
pair_coeff 2 3 6157.8178 3.4682081 129.19572 0.78772886 0.73006542 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 2 4 23689.598 2.8436019 1636.052 12.696549 0.42066711 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 2 5 10576.399 3.1746032 377.27092 2.7176691 0.61999948 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 2 6 10304 3.0769231 443.36171 4.2093581 0.60800971 # buck6d->(c3_c2h1@ph)|benzene/gaussian->(c3_c2h1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 3 3 3680 4 32.805 0.10690769 0.9771554 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene
pair_coeff 3 4 14157.243 3.1914894 489.18197 2.5231391 0.45538909 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 3 5 6320.6076 3.6144578 103.50278 0.44181916 0.75109952 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 3 6 6157.8178 3.4883721 124.7792 0.72632262 0.73006542 # buck6d->(h1_c1@ph)|benzene/gaussian->(h1_c1@ph)|benzene <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 4 4 54464 2.6548673 5679.4311 33.208515 0.34105936 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW
pair_coeff 4 5 24315.863 2.9411765 1371.5617 7.9168726 0.42457748 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 4 6 23689.598 2.8571429 1590.0769 11.87959 0.42066711 # buck6d->(cu5_cu1o4@cu2)|CuPW/gaussian->(cu5_cu1o4@cu2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 5 5 10856 3.2967033 308.7755 1.6022517 0.63272774 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW
pair_coeff 5 6 10576.399 3.1914894 365.45138 2.5231391 0.61999948 # buck6d->(o2_c1cu1@co2)|CuPW/gaussian->(o2_c1cu1@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
pair_coeff 6 6 10304 3.0927835 429.89352 3.9170177 0.60800971 # buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW <--> buck6d->(c3_c1o2@co2)|CuPW/gaussian->(c3_c1o2@co2)|CuPW
bond_style hybrid class2 morse
bond_coeff 5 morse 50.000000 1.451345 1.914000 # morse->(cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
bond_coeff 4 class2 2.536000 75.465060 -192.435903 286.248406 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2)|CuPW
bond_coeff 3 class2 1.094000 390.634200 -996.117210 1481.724350 # mm3->(c3_c2h1@ph,h1_c1@ph)|benzene
bond_coeff 6 class2 1.278000 585.591600 -1493.258580 2221.222138 # mm3->(c3_c1o2@co2,o2_c1cu1@co2)|CuPW
bond_coeff 1 class2 1.394000 509.335200 -1298.804760 1931.972080 # mm3->(c3_c2h1@ph,c3_c2h1@ph)|benzene
bond_coeff 2 class2 1.485000 360.635220 -919.619811 1367.934469 # mm3->(c3_c1o2@co2,c3_c3@ph)|CuPW
angle_style hybrid class2/p6 cosine/buck6d
angle_coeff 2 class2/p6 117.711000 57.408120 -46.049402 10.553745 -7.558563 13.610890 # mm3->(c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
angle_coeff 2 class2/p6 bb 0.0 1.0 1.0
angle_coeff 2 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 6 class2/p6 126.814000 13.740540 -11.021849 2.526022 -1.809130 3.257744 # mm3->(c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
angle_coeff 6 class2/p6 bb 0.0 1.0 1.0
angle_coeff 6 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 7 class2/p6 123.490000 111.075360 -89.098091 20.419778 -14.624589 26.334856 # mm3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 7 class2/p6 bb 14.244120 1.278000 1.278000
angle_coeff 7 class2/p6 ba 3.309240 3.309240 1.278000 1.278000
angle_coeff 1 class2/p6 127.050000 53.307540 -42.760159 9.799907 -7.018666 12.638684 # mm3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
angle_coeff 1 class2/p6 bb 71.796120 1.394000 1.394000
angle_coeff 1 class2/p6 ba 6.762360 6.762360 1.394000 1.394000
angle_coeff 4 class2/p6 84.336000 29.351520 -23.544055 5.395900 -3.864529 6.958951 # mm3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
angle_coeff 4 class2/p6 bb 0.0 1.0 1.0
angle_coeff 4 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 3 class2/p6 120.350000 36.185820 -29.026127 6.652298 -4.764358 8.579296 # mm3->(c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
angle_coeff 3 class2/p6 bb 93.378120 1.394000 1.094000
angle_coeff 3 class2/p6 ba -25.179000 53.523360 1.394000 1.094000
angle_coeff 8 class2/p6 115.098000 79.493700 -63.765149 14.613896 -10.466432 18.847160 # mm3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2)|CuPW
angle_coeff 8 class2/p6 bb 0.0 1.0 1.0
angle_coeff 8 class2/p6 ba 0.0 0.0 1.0 1.0
angle_coeff 5 cosine/buck6d 1.978350 4 180.000000 #1.0 # fourier->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_style opls
dihedral_coeff 3 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 9 0.000000 4.528000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 8 0.000000 0.000000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
dihedral_coeff 5 0.000000 1.741000 0.000000 0.000000 # cos3->(o2_c1cu1@co2,c3_c1o2@co2,c3_c3@ph,c3_c2h1@ph)|CuPW
dihedral_coeff 2 0.000000 6.316000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
dihedral_coeff 1 0.000000 4.379000 0.000000 0.000000 # cos3->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph)|benzene
dihedral_coeff 6 0.000000 0.000000 0.000000 0.609000 # cos4->(o2_c1cu1@co2,cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2)|CuPW
dihedral_coeff 4 0.000000 0.000000 0.000000 0.000000 # cos3->(h1_c1@ph,c3_c2h1@ph,c3_c3@ph,c3_c1o2@co2)|CuPW
dihedral_coeff 10 0.000000 0.000000 0.000000 0.000000 # cos3->(c3_c3@ph,c3_c1o2@co2,o2_c1cu1@co2,cu5_cu1o4@cu2)|CuPW
dihedral_coeff 7 0.000000 0.000000 0.000000 0.000000 # cos3->(cu5_cu1o4@cu2,cu5_cu1o4@cu2,o2_c1cu1@co2,c3_c1o2@co2)|CuPW
improper_style inversion/harmonic
improper_coeff 1 18.776340 0.000000 # harm->(c3_c3@ph,c3_c2h1@ph,c3_c2h1@ph,c3_c1o2@co2)|CuPW
improper_coeff 3 41.005800 0.000000 # harm->(c3_c1o2@co2,c3_c3@ph,o2_c1cu1@co2,o2_c1cu1@co2)|CuPW
improper_coeff 2 4.100580 0.000000 # harm->(c3_c2h1@ph,c3_c2h1@ph,c3_c2h1@ph,h1_c1@ph)|benzene
special_bonds lj 0.0 0.0 1.0 coul 1.0 1.0 1.0
5 = max # of 1-2 neighbors
8 = max # of 1-3 neighbors
17 = max # of special neighbors
# ------------------------ MOF-FF FORCE FIELD END --------------------------
run 0
WARNING: No fixes defined, atoms won't move (../verlet.cpp:55)
Ewald initialization ...
WARNING: Using 12-bit tables for long-range coulomb (../kspace.cpp:321)
G vector (1/distance) = 0.267593
estimated absolute RMS force accuracy = 0.000333665
estimated relative force accuracy = 1.00482e-06
KSpace vectors: actual max1d max3d = 1054 8 2456
kxmax kymax kzmax = 8 8 8
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 14
ghost atom cutoff = 14
binsize = 7, bins = 4 4 4
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair buck6d/coul/gauss/long, perpetual
attributes: half, newton on
pair build: half/bin/newton/tri
stencil: half/bin/3d/newton/tri
bin: standard
WARNING: Inconsistent image flags (../domain.cpp:786)
Per MPI rank memory allocation (min/avg/max) = 34.1 | 34.1 | 34.1 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0 -16541.109 343.7619 -16197.347 -629.64956
Loop time of 6.13928e-06 on 4 procs for 0 steps with 624 atoms
0.0% CPU use with 4 MPI tasks x no OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0 | 0 | 0 | 0.0 | 0.00
Bond | 0 | 0 | 0 | 0.0 | 0.00
Kspace | 0 | 0 | 0 | 0.0 | 0.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0 | 0 | 0 | 0.0 | 0.00
Output | 0 | 0 | 0 | 0.0 | 0.00
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 6.139e-06 | | |100.00
Nlocal: 156 ave 156 max 156 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 2718 ave 2718 max 2718 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 28842 ave 28870 max 28814 min
Histogram: 1 1 0 0 0 0 0 0 1 1
Total # of neighbors = 115368
Ave neighs/atom = 184.885
Ave special neighs/atom = 7.46154
Neighbor list builds = 0
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -67,6 +67,15 @@ variable tfac equal 5.0/3.0 # (3 trans + 2 rot)/(3 trans)
fix mygcmc all gcmc 100 100 0 0 54341 ${temp} ${mu} ${disp} mol &
co2mol tfac_insert ${tfac} group co2 rigid myrigidnvt
# atom counts
variable carbon atom "type==1"
variable oxygen atom "type==2"
group carbon dynamic all var carbon
group oxygen dynamic all var oxygen
variable nC equal count(carbon)
variable nO equal count(oxygen)
# output
variable tacc equal f_mygcmc[2]/(f_mygcmc[1]+0.1)
@ -74,7 +83,7 @@ variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+0.1)
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+0.1)
variable racc equal f_mygcmc[8]/(f_mygcmc[7]+0.1)
compute_modify thermo_temp dynamic/dof yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc v_nC v_nO
thermo 1000
# run

View File

@ -72,6 +72,15 @@ variable tfac equal 5.0/3.0 # (3 trans + 2 rot)/(3 trans)
fix mygcmc all gcmc 100 100 0 0 54341 ${temp} ${mu} ${disp} mol &
h2omol tfac_insert ${tfac} group h2o shake wshake
# atom counts
variable oxygen atom "type==1"
variable hydrogen atom "type==2"
group oxygen dynamic all var oxygen
group hydrogen dynamic all var hydrogen
variable nO equal count(oxygen)
variable nH equal count(hydrogen)
# output
variable tacc equal f_mygcmc[2]/(f_mygcmc[1]+0.1)
@ -79,7 +88,7 @@ variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+0.1)
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+0.1)
variable racc equal f_mygcmc[8]/(f_mygcmc[7]+0.1)
compute_modify thermo_temp dynamic/dof yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc v_nO v_nH
thermo 1000
# run

View File

@ -33,6 +33,12 @@ mass * 1.0
fix mygcmc all gcmc 1 100 100 1 29494 ${temp} ${mu} ${disp}
# atom count
variable type1 atom "type==1"
group type1 dynamic all var type1
variable n1 equal count(type1)
# averaging
variable rho equal density
@ -40,10 +46,11 @@ variable p equal press
variable nugget equal 1.0e-8
variable lambda equal 1.0
variable muex equal ${mu}-${temp}*ln(density*${lambda}+${nugget})
fix ave all ave/time 10 100 1000 v_rho v_p v_muex ave one file rho_vs_p.dat
fix ave all ave/time 10 100 1000 v_rho v_p v_muex v_n1 ave one file rho_vs_p.dat
variable rhoav equal f_ave[1]
variable pav equal f_ave[2]
variable muexav equal f_ave[3]
variable n1av equal f_ave[4]
# output
@ -51,7 +58,7 @@ variable tacc equal f_mygcmc[2]/(f_mygcmc[1]+${nugget})
variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+${nugget})
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+${nugget})
compute_modify thermo_temp dynamic yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav v_n1av
thermo 1000
# run

View File

@ -1,4 +1,4 @@
LAMMPS (6 Jul 2017)
LAMMPS (23 Oct 2017)
using 1 OpenMP thread(s) per MPI task
# GCMC for CO2 molecular fluid, rigid/small/nvt dynamics
# Rigid CO2 TraPPE model
@ -46,6 +46,7 @@ Read molecule co2mol:
0 impropers with 0 types
create_atoms 0 box mol co2mol 464563 units box
Created 24 atoms
Time spent = 0.00196958 secs
# rigid CO2 TraPPE model
@ -87,6 +88,17 @@ fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 ${disp} mol
fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 0.5 mol co2mol tfac_insert ${tfac} group co2 rigid myrigidnvt
fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 0.5 mol co2mol tfac_insert 1.66666666666667 group co2 rigid myrigidnvt
# atom counts
variable carbon atom "type==1"
variable oxygen atom "type==2"
group carbon dynamic all var carbon
dynamic group carbon defined
group oxygen dynamic all var oxygen
dynamic group oxygen defined
variable nC equal count(carbon)
variable nO equal count(oxygen)
# output
variable tacc equal f_mygcmc[2]/(f_mygcmc[1]+0.1)
@ -94,7 +106,7 @@ variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+0.1)
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+0.1)
variable racc equal f_mygcmc[8]/(f_mygcmc[7]+0.1)
compute_modify thermo_temp dynamic/dof yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc v_nC v_nO
thermo 1000
# run
@ -124,45 +136,45 @@ Neighbor list info ...
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 15.62 | 15.62 | 15.62 Mbytes
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_racc
0 364.27579 4238.8631 -9.6809388 13.391989 0.5846359 24 0 0 0 0
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_racc v_nC v_nO
0 364.27579 4238.8631 -9.6809388 13.391989 0.5846359 24 0 0 0 0 8 16
WARNING: Using kspace solver on system with no charge (../kspace.cpp:289)
1000 420.43475 1722.4052 -9.6956123 15.456579 0.5846359 24 0.20879341 0.20713005 0 0
2000 302.29516 -547.83641 -22.017674 14.11699 0.73079488 30 0.1742478 0.1678018 0 0
3000 316.6934 -1080.2672 -8.2218891 10.069364 0.51155641 21 0.13544917 0.13720634 0 0
4000 246.81618 -679.83642 -14.577244 10.29997 0.65771539 27 0.1568939 0.15860229 0 0
5000 260.22849 -896.29914 -16.097593 10.859684 0.65771539 27 0.13138744 0.13547049 0 0
6000 291.70796 -1521.99 -22.303136 13.622574 0.73079488 30 0.12615476 0.12717694 0 0
7000 236.02638 -599.92186 -27.580831 13.367447 0.87695385 36 0.119703 0.12145398 0 0
8000 321.45341 688.10577 -10.09204 11.817696 0.5846359 24 0.10917411 0.11032646 0 0
9000 502.85382 -302.31056 -0.22330142 0.99927447 0.073079488 3 0.1254105 0.12905828 0 0
10000 249.98239 -510.0091 -32.815145 15.399767 0.95003334 39 0.1274504 0.12875623 0 0
11000 247.59424 -1129.0274 -25.320205 12.792544 0.80387436 33 0.11739076 0.11916784 0 0
12000 0 -20.39554 -0.14872889 -0 0 0 0.1254933 0.12920375 0 0
13000 1272.2738 -474.79484 -0.29450485 8.8489483 0.14615898 6 0.13767133 0.14112496 0 0
14000 516.54246 -36.296516 -5.0012009 11.291243 0.36539744 15 0.15632744 0.15955377 0 0
15000 307.09233 1951.9301 -14.820362 12.815375 0.65771539 27 0.15393544 0.15716192 0 0
16000 198.31989 -559.48443 -30.459487 11.231925 0.87695385 36 0.1482565 0.15025652 0 0
17000 246.99311 657.85683 -18.579206 11.53442 0.73079488 30 0.14143958 0.14375423 0 0
18000 467.13468 167.03738 -1.0945268 5.569759 0.21923846 9 0.13847359 0.14098533 0 0
19000 359.54027 -1413.5407 -12.156233 13.217895 0.5846359 24 0.15169146 0.15294205 0 0
20000 227.79597 -1204.5652 -23.24144 10.637925 0.73079488 30 0.14917199 0.15022946 0 0
Loop time of 20.153 on 1 procs for 20000 steps with 30 atoms
1000 420.43475 1722.4052 -9.6956123 15.456579 0.5846359 24 0.20879341 0.20713005 0 0 8 16
2000 302.29516 -547.83641 -22.017674 14.11699 0.73079488 30 0.1742478 0.1678018 0 0 10 20
3000 316.6934 -1080.2672 -8.2218891 10.069364 0.51155641 21 0.13544917 0.13720634 0 0 7 14
4000 246.81618 -679.83642 -14.577244 10.29997 0.65771539 27 0.1568939 0.15860229 0 0 9 18
5000 260.22849 -896.29914 -16.097593 10.859684 0.65771539 27 0.13138744 0.13547049 0 0 9 18
6000 291.70796 -1521.99 -22.303136 13.622574 0.73079488 30 0.12615476 0.12717694 0 0 10 20
7000 236.02638 -599.92186 -27.580831 13.367447 0.87695385 36 0.119703 0.12145398 0 0 12 24
8000 321.45341 688.10577 -10.09204 11.817696 0.5846359 24 0.10917411 0.11032646 0 0 8 16
9000 502.85382 -302.31056 -0.22330142 0.99927447 0.073079488 3 0.1254105 0.12905828 0 0 1 2
10000 249.98239 -510.0091 -32.815145 15.399767 0.95003334 39 0.1274504 0.12875623 0 0 13 26
11000 247.59424 -1129.0274 -25.320205 12.792544 0.80387436 33 0.11739076 0.11916784 0 0 11 22
12000 0 -20.39554 -0.14872889 -0 0 0 0.1254933 0.12920375 0 0 0 0
13000 1272.2738 -474.79484 -0.29450485 8.8489483 0.14615898 6 0.13767133 0.14112496 0 0 2 4
14000 516.54246 -36.296516 -5.0012009 11.291243 0.36539744 15 0.15632744 0.15955377 0 0 5 10
15000 307.09233 1951.9301 -14.820362 12.815375 0.65771539 27 0.15393544 0.15716192 0 0 9 18
16000 198.31989 -559.48443 -30.459487 11.231925 0.87695385 36 0.1482565 0.15025652 0 0 12 24
17000 246.99311 657.85683 -18.579206 11.53442 0.73079488 30 0.14143958 0.14375423 0 0 10 20
18000 467.13468 167.03738 -1.0945268 5.569759 0.21923846 9 0.13847359 0.14098533 0 0 3 6
19000 359.54027 -1413.5407 -12.156233 13.217895 0.5846359 24 0.15169146 0.15294205 0 0 8 16
20000 227.79597 -1204.5652 -23.24144 10.637925 0.73079488 30 0.14917199 0.15022946 0 0 10 20
Loop time of 20.6928 on 1 procs for 20000 steps with 30 atoms
Performance: 85.744 ns/day, 0.280 hours/ns, 992.408 timesteps/s
99.3% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 83.507 ns/day, 0.287 hours/ns, 966.519 timesteps/s
99.2% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.5352 | 2.5352 | 2.5352 | 0.0 | 12.58
Bond | 0.026112 | 0.026112 | 0.026112 | 0.0 | 0.13
Kspace | 0.25 | 0.25 | 0.25 | 0.0 | 1.24
Neigh | 0.10364 | 0.10364 | 0.10364 | 0.0 | 0.51
Comm | 0.22907 | 0.22907 | 0.22907 | 0.0 | 1.14
Output | 0.0013065 | 0.0013065 | 0.0013065 | 0.0 | 0.01
Modify | 16.957 | 16.957 | 16.957 | 0.0 | 84.14
Other | | 0.05061 | | | 0.25
Pair | 2.5462 | 2.5462 | 2.5462 | 0.0 | 12.30
Bond | 0.029783 | 0.029783 | 0.029783 | 0.0 | 0.14
Kspace | 0.26167 | 0.26167 | 0.26167 | 0.0 | 1.26
Neigh | 0.10705 | 0.10705 | 0.10705 | 0.0 | 0.52
Comm | 0.23409 | 0.23409 | 0.23409 | 0.0 | 1.13
Output | 0.0013416 | 0.0013416 | 0.0013416 | 0.0 | 0.01
Modify | 17.458 | 17.458 | 17.458 | 0.0 | 84.37
Other | | 0.05433 | | | 0.26
Nlocal: 30 ave 30 max 30 min
Histogram: 1 0 0 0 0 0 0 0 0 0

View File

@ -1,4 +1,4 @@
LAMMPS (6 Jul 2017)
LAMMPS (23 Oct 2017)
using 1 OpenMP thread(s) per MPI task
# GCMC for CO2 molecular fluid, rigid/small/nvt dynamics
# Rigid CO2 TraPPE model
@ -46,6 +46,7 @@ Read molecule co2mol:
0 impropers with 0 types
create_atoms 0 box mol co2mol 464563 units box
Created 24 atoms
Time spent = 0.00261331 secs
# rigid CO2 TraPPE model
@ -87,6 +88,17 @@ fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 ${disp} mol
fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 0.5 mol co2mol tfac_insert ${tfac} group co2 rigid myrigidnvt
fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 0.5 mol co2mol tfac_insert 1.66666666666667 group co2 rigid myrigidnvt
# atom counts
variable carbon atom "type==1"
variable oxygen atom "type==2"
group carbon dynamic all var carbon
dynamic group carbon defined
group oxygen dynamic all var oxygen
dynamic group oxygen defined
variable nC equal count(carbon)
variable nO equal count(oxygen)
# output
variable tacc equal f_mygcmc[2]/(f_mygcmc[1]+0.1)
@ -94,7 +106,7 @@ variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+0.1)
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+0.1)
variable racc equal f_mygcmc[8]/(f_mygcmc[7]+0.1)
compute_modify thermo_temp dynamic/dof yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc v_nC v_nO
thermo 1000
# run
@ -124,45 +136,45 @@ Neighbor list info ...
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 15.41 | 15.41 | 15.41 Mbytes
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_racc
0 386.52184 23582.465 -3.2433417 14.209828 0.5846359 24 0 0 0 0
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_racc v_nC v_nO
0 386.52184 23582.465 -3.2433417 14.209828 0.5846359 24 0 0 0 0 8 16
WARNING: Using kspace solver on system with no charge (../kspace.cpp:289)
1000 335.66829 -3.7743052 -4.6268612 7.3374649 0.36539744 15 0.20601899 0.20787963 0 0
2000 459.73529 238.91592 -0.42937831 5.4815343 0.21923846 9 0.30392058 0.30105616 0 0
3000 255.47773 -479.67802 -36.850434 15.738299 0.95003334 39 0.22220744 0.2197582 0 0
4000 182.70803 -1059.2262 -43.044833 12.163134 1.0231128 42 0.16781689 0.16716177 0 0
5000 234.00907 -1821.0444 -46.04795 15.578317 1.0231128 42 0.13498091 0.13704201 0 0
6000 163.42759 -774.67294 -49.686261 11.691518 1.0961923 45 0.11401677 0.11296973 0 0
7000 171.64616 -355.23516 -49.323434 12.27947 1.0961923 45 0.098302308 0.098552065 0 0
8000 251.29791 -905.47863 -37.841209 15.480807 0.95003334 39 0.086856972 0.08638658 0 0
9000 143.69498 -849.95393 -49.073188 10.279858 1.0961923 45 0.078261061 0.077955243 0 0
10000 239.35727 -1158.1879 -43.562047 15.934355 1.0231128 42 0.070789792 0.070807529 0 0
11000 169.51213 -1574.7885 -51.125228 12.126803 1.0961923 45 0.065008734 0.06498871 0 0
12000 181.39739 160.11631 -46.850937 12.977068 1.0961923 45 0.059648717 0.059514803 0 0
13000 164.14601 -1107.7629 -50.726722 11.742914 1.0961923 45 0.055207333 0.055097701 0 0
14000 287.26285 418.51463 -41.664766 19.123497 1.0231128 42 0.051346789 0.051222285 0 0
15000 256.94593 -532.36615 -41.651618 17.105257 1.0231128 42 0.047870301 0.047861685 0 0
16000 166.92132 151.15933 -39.957018 11.11219 1.0231128 42 0.045205599 0.045042211 0 0
17000 163.22452 -1299.8119 -42.677558 10.866089 1.0231128 42 0.043122086 0.042993687 0 0
18000 158.01154 475.77329 -48.803162 11.304057 1.0961923 45 0.041016683 0.040647229 0 0
19000 138.49297 -1585.1508 -47.517099 9.9077098 1.0961923 45 0.038929287 0.038436764 0 0
20000 173.84439 -1362.6301 -53.002743 12.436731 1.0961923 45 0.036973919 0.036523816 0 0
Loop time of 31.8386 on 4 procs for 20000 steps with 45 atoms
1000 335.66829 -3.7743052 -4.6268612 7.3374649 0.36539744 15 0.20601899 0.20787963 0 0 5 10
2000 459.73529 238.91592 -0.42937831 5.4815343 0.21923846 9 0.30392058 0.30105616 0 0 3 6
3000 255.47773 -479.67802 -36.850434 15.738299 0.95003334 39 0.22220744 0.2197582 0 0 13 26
4000 182.70803 -1059.2262 -43.044833 12.163134 1.0231128 42 0.16781689 0.16716177 0 0 14 28
5000 234.00907 -1821.0444 -46.04795 15.578317 1.0231128 42 0.13498091 0.13704201 0 0 14 28
6000 163.42759 -774.67294 -49.686261 11.691518 1.0961923 45 0.11401677 0.11296973 0 0 15 30
7000 171.64616 -355.23516 -49.323434 12.27947 1.0961923 45 0.098302308 0.098552065 0 0 15 30
8000 251.29791 -905.47863 -37.841209 15.480807 0.95003334 39 0.086856972 0.08638658 0 0 13 26
9000 143.69498 -849.95393 -49.073188 10.279858 1.0961923 45 0.078261061 0.077955243 0 0 15 30
10000 239.35727 -1158.1879 -43.562047 15.934355 1.0231128 42 0.070789792 0.070807529 0 0 14 28
11000 169.51213 -1574.7885 -51.125228 12.126803 1.0961923 45 0.065008734 0.06498871 0 0 15 30
12000 181.39739 160.11631 -46.850937 12.977068 1.0961923 45 0.059648717 0.059514803 0 0 15 30
13000 164.14601 -1107.7629 -50.726722 11.742914 1.0961923 45 0.055207333 0.055097701 0 0 15 30
14000 287.26285 418.51463 -41.664766 19.123497 1.0231128 42 0.051346789 0.051222285 0 0 14 28
15000 256.94593 -532.36615 -41.651618 17.105257 1.0231128 42 0.047870301 0.047861685 0 0 14 28
16000 166.92132 151.15933 -39.957018 11.11219 1.0231128 42 0.045205599 0.045042211 0 0 14 28
17000 163.22452 -1299.8119 -42.677558 10.866089 1.0231128 42 0.043122086 0.042993687 0 0 14 28
18000 158.01154 475.77329 -48.803162 11.304057 1.0961923 45 0.041016683 0.040647229 0 0 15 30
19000 138.49297 -1585.1508 -47.517099 9.9077098 1.0961923 45 0.038929287 0.038436764 0 0 15 30
20000 173.84439 -1362.6301 -53.002743 12.436731 1.0961923 45 0.036973919 0.036523816 0 0 15 30
Loop time of 32.4481 on 4 procs for 20000 steps with 45 atoms
Performance: 54.274 ns/day, 0.442 hours/ns, 628.168 timesteps/s
98.5% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 53.254 ns/day, 0.451 hours/ns, 616.369 timesteps/s
98.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.1546 | 1.6687 | 2.1338 | 29.5 | 5.24
Bond | 0.019769 | 0.020369 | 0.02132 | 0.4 | 0.06
Kspace | 0.53392 | 0.99911 | 1.5116 | 37.8 | 3.14
Neigh | 0.06737 | 0.067842 | 0.068412 | 0.2 | 0.21
Comm | 1.9408 | 1.9582 | 1.9733 | 1.1 | 6.15
Output | 0.0019503 | 0.0020472 | 0.0022476 | 0.3 | 0.01
Modify | 26.974 | 26.99 | 27.001 | 0.2 | 84.77
Other | | 0.1322 | | | 0.42
Pair | 1.1687 | 1.6702 | 2.1864 | 30.8 | 5.15
Bond | 0.018828 | 0.020035 | 0.020975 | 0.6 | 0.06
Kspace | 0.57506 | 1.0931 | 1.5898 | 37.7 | 3.37
Neigh | 0.068863 | 0.069524 | 0.070128 | 0.2 | 0.21
Comm | 2.0735 | 2.0865 | 2.0979 | 0.7 | 6.43
Output | 0.0025017 | 0.0025966 | 0.0027781 | 0.2 | 0.01
Modify | 27.335 | 27.344 | 27.363 | 0.2 | 84.27
Other | | 0.1621 | | | 0.50
Nlocal: 11.25 ave 14 max 8 min
Histogram: 1 0 0 0 0 1 1 0 0 1
@ -177,4 +189,4 @@ Ave special neighs/atom = 2
Neighbor list builds = 20394
Dangerous builds = 0
Total wall time: 0:00:31
Total wall time: 0:00:32

View File

@ -1,4 +1,4 @@
LAMMPS (6 Jul 2017)
LAMMPS (23 Oct 2017)
using 1 OpenMP thread(s) per MPI task
# fix gcmc example with fix shake
@ -51,6 +51,7 @@ Read molecule h2omol:
0 impropers with 0 types
create_atoms 0 box mol h2omol 464563 units box
Created 24 atoms
Time spent = 0.00201297 secs
# rigid SPC/E water model
@ -100,9 +101,9 @@ Per MPI rank memory allocation (min/avg/max) = 11.88 | 11.88 | 11.88 Mbytes
Step Temp E_pair E_mol TotEng Press
0 338 -4.1890564 9.2628112e-06 18.98377 739.06991
100 338 -30.182886 0.85607237 -6.1539961 -2535.3207
Loop time of 0.0525794 on 1 procs for 100 steps with 24 atoms
Loop time of 0.0507543 on 1 procs for 100 steps with 24 atoms
99.4% CPU use with 1 MPI tasks x 1 OpenMP threads
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = max iterations
@ -116,14 +117,14 @@ Minimization stats:
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.044199 | 0.044199 | 0.044199 | 0.0 | 84.06
Bond | 0.00049019 | 0.00049019 | 0.00049019 | 0.0 | 0.93
Kspace | 0.0031631 | 0.0031631 | 0.0031631 | 0.0 | 6.02
Neigh | 0.00046444 | 0.00046444 | 0.00046444 | 0.0 | 0.88
Comm | 0.0034101 | 0.0034101 | 0.0034101 | 0.0 | 6.49
Output | 1.9073e-05 | 1.9073e-05 | 1.9073e-05 | 0.0 | 0.04
Pair | 0.042597 | 0.042597 | 0.042597 | 0.0 | 83.93
Bond | 0.00047708 | 0.00047708 | 0.00047708 | 0.0 | 0.94
Kspace | 0.0031135 | 0.0031135 | 0.0031135 | 0.0 | 6.13
Neigh | 0.00045919 | 0.00045919 | 0.00045919 | 0.0 | 0.90
Comm | 0.0032997 | 0.0032997 | 0.0032997 | 0.0 | 6.50
Output | 1.359e-05 | 1.359e-05 | 1.359e-05 | 0.0 | 0.03
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.0008333 | | | 1.58
Other | | 0.0007946 | | | 1.57
Nlocal: 24 ave 24 max 24 min
Histogram: 1 0 0 0 0 0 0 0 0 0
@ -164,22 +165,22 @@ Per MPI rank memory allocation (min/avg/max) = 11.63 | 11.63 | 11.63 Mbytes
Step Temp E_pair E_mol TotEng Press
0 518.26667 -30.182886 0 -7.0100684 993.1985
1000 326.9865 -62.258445 0 -47.638175 -5.3440813
Loop time of 0.14263 on 1 procs for 1000 steps with 24 atoms
Loop time of 0.141449 on 1 procs for 1000 steps with 24 atoms
Performance: 605.764 ns/day, 0.040 hours/ns, 7011.155 timesteps/s
99.5% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 610.819 ns/day, 0.039 hours/ns, 7069.663 timesteps/s
99.7% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.10849 | 0.10849 | 0.10849 | 0.0 | 76.07
Bond | 0.00015426 | 0.00015426 | 0.00015426 | 0.0 | 0.11
Kspace | 0.01205 | 0.01205 | 0.01205 | 0.0 | 8.45
Neigh | 0.0046577 | 0.0046577 | 0.0046577 | 0.0 | 3.27
Comm | 0.011531 | 0.011531 | 0.011531 | 0.0 | 8.08
Output | 1.6212e-05 | 1.6212e-05 | 1.6212e-05 | 0.0 | 0.01
Modify | 0.0037699 | 0.0037699 | 0.0037699 | 0.0 | 2.64
Other | | 0.001957 | | | 1.37
Pair | 0.10788 | 0.10788 | 0.10788 | 0.0 | 76.27
Bond | 0.00018954 | 0.00018954 | 0.00018954 | 0.0 | 0.13
Kspace | 0.011867 | 0.011867 | 0.011867 | 0.0 | 8.39
Neigh | 0.0045254 | 0.0045254 | 0.0045254 | 0.0 | 3.20
Comm | 0.011277 | 0.011277 | 0.011277 | 0.0 | 7.97
Output | 1.5497e-05 | 1.5497e-05 | 1.5497e-05 | 0.0 | 0.01
Modify | 0.00383 | 0.00383 | 0.00383 | 0.0 | 2.71
Other | | 0.001868 | | | 1.32
Nlocal: 24 ave 24 max 24 min
Histogram: 1 0 0 0 0 0 0 0 0 0
@ -201,6 +202,17 @@ fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 ${disp} mol
fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 0.5 mol h2omol tfac_insert ${tfac} group h2o shake wshake
fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 0.5 mol h2omol tfac_insert 1.66666666666667 group h2o shake wshake
# atom counts
variable oxygen atom "type==1"
variable hydrogen atom "type==2"
group oxygen dynamic all var oxygen
dynamic group oxygen defined
group hydrogen dynamic all var hydrogen
dynamic group hydrogen defined
variable nO equal count(oxygen)
variable nH equal count(hydrogen)
# output
variable tacc equal f_mygcmc[2]/(f_mygcmc[1]+0.1)
@ -208,7 +220,7 @@ variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+0.1)
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+0.1)
variable racc equal f_mygcmc[8]/(f_mygcmc[7]+0.1)
compute_modify thermo_temp dynamic/dof yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc v_nO v_nH
thermo 1000
# run
@ -226,44 +238,44 @@ WARNING: Fix gcmc using full_energy option (../fix_gcmc.cpp:445)
0 atoms in group FixGCMC:rotation_gas_atoms:mygcmc
WARNING: Neighbor exclusions used with KSpace solver may give inconsistent Coulombic energies (../neighbor.cpp:472)
Per MPI rank memory allocation (min/avg/max) = 11.63 | 11.63 | 11.63 Mbytes
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_racc
1000 326.9865 -4.3509713 -62.258445 14.62027 0.23910963 24 0 0 0 0
2000 116.99793 -5344.1527 -286.61595 17.088682 0.74721761 75 0.048183096 0.013941446 0 0
3000 106.86746 -3920.4926 -361.60598 18.794545 0.89666113 90 0.035637919 0.012768883 0 0
4000 75.002668 540.46846 -414.8511 14.531966 0.98632724 99 0.025963651 0.0093451705 0 0
5000 79.924788 -2131.1173 -437.21216 15.962121 1.0162159 102 0.019879728 0.0070418993 0 0
6000 95.552773 -3647.0233 -438.24276 19.083253 1.0162159 102 0.015753613 0.0056885133 0 0
7000 79.501736 -2071.5369 -440.77351 15.877631 1.0162159 102 0.01326216 0.0046915318 0 0
8000 62.567091 -3102.9616 -442.21884 12.495541 1.0162159 102 0.011305503 0.0040437885 0 0
9000 68.324047 -3812.7866 -440.46835 13.645287 1.0162159 102 0.0099549538 0.0035157329 0 0
10000 83.857631 -2158.2659 -444.8183 16.747566 1.0162159 102 0.0088200922 0.0031354281 0 0
11000 68.350984 -2084.0789 -440.14081 13.650667 1.0162159 102 0.0081331455 0.0030247424 0 0
12000 76.867315 -1585.6723 -443.36199 15.3515 1.0162159 102 0.0073845932 0.0027532534 0 0
13000 59.74266 -2211.0211 -446.07791 11.931462 1.0162159 102 0.0067756276 0.0025213898 0 0
14000 81.154979 -907.0176 -441.53368 16.207808 1.0162159 102 0.0062527642 0.0023280719 0 0
15000 66.814346 -2804.5134 -455.80704 13.7421 1.0461046 105 0.0059590528 0.0021576214 0 0
16000 71.42983 -3930.4004 -458.43218 14.691394 1.0461046 105 0.0055547473 0.0020163729 0 0
17000 89.624855 -3569.8136 -455.18164 18.433672 1.0461046 105 0.0052173265 0.0018867687 0 0
18000 63.519962 -1882.8157 -456.58939 13.064525 1.0461046 105 0.0049082049 0.0017765986 0 0
19000 71.872698 -2243.5046 -454.93359 14.782481 1.0461046 105 0.0046439115 0.0016748361 0 0
20000 73.660765 -2285.3173 -476.35473 15.589381 1.0759934 108 0.0045124933 0.0015837653 0 0
21000 95.675868 987.92089 -475.46736 20.248603 1.0759934 108 0.004285814 0.0015049513 0 0
Loop time of 226.155 on 1 procs for 20000 steps with 108 atoms
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_racc v_nO v_nH
1000 326.9865 -4.3509713 -62.258445 14.62027 0.23910963 24 0 0 0 0 8 16
2000 116.99793 -5344.1527 -286.61595 17.088682 0.74721761 75 0.048183096 0.013941446 0 0 25 50
3000 106.86746 -3920.4926 -361.60598 18.794545 0.89666113 90 0.035637919 0.012768883 0 0 30 60
4000 75.002668 540.46846 -414.8511 14.531966 0.98632724 99 0.025963651 0.0093451705 0 0 33 66
5000 79.924788 -2131.1173 -437.21216 15.962121 1.0162159 102 0.019879728 0.0070418993 0 0 34 68
6000 95.552773 -3647.0233 -438.24276 19.083253 1.0162159 102 0.015753613 0.0056885133 0 0 34 68
7000 79.501736 -2071.5369 -440.77351 15.877631 1.0162159 102 0.01326216 0.0046915318 0 0 34 68
8000 62.567091 -3102.9616 -442.21884 12.495541 1.0162159 102 0.011305503 0.0040437885 0 0 34 68
9000 68.324047 -3812.7866 -440.46835 13.645287 1.0162159 102 0.0099549538 0.0035157329 0 0 34 68
10000 83.857631 -2158.2659 -444.8183 16.747566 1.0162159 102 0.0088200922 0.0031354281 0 0 34 68
11000 68.350984 -2084.0789 -440.14081 13.650667 1.0162159 102 0.0081331455 0.0030247424 0 0 34 68
12000 76.867315 -1585.6723 -443.36199 15.3515 1.0162159 102 0.0073845932 0.0027532534 0 0 34 68
13000 59.74266 -2211.0211 -446.07791 11.931462 1.0162159 102 0.0067756276 0.0025213898 0 0 34 68
14000 81.154979 -907.0176 -441.53368 16.207808 1.0162159 102 0.0062527642 0.0023280719 0 0 34 68
15000 66.814346 -2804.5134 -455.80704 13.7421 1.0461046 105 0.0059590528 0.0021576214 0 0 35 70
16000 71.42983 -3930.4004 -458.43218 14.691394 1.0461046 105 0.0055547473 0.0020163729 0 0 35 70
17000 89.624855 -3569.8136 -455.18164 18.433672 1.0461046 105 0.0052173265 0.0018867687 0 0 35 70
18000 63.519962 -1882.8157 -456.58939 13.064525 1.0461046 105 0.0049082049 0.0017765986 0 0 35 70
19000 71.872698 -2243.5046 -454.93359 14.782481 1.0461046 105 0.0046439115 0.0016748361 0 0 35 70
20000 73.660765 -2285.3173 -476.35473 15.589381 1.0759934 108 0.0045124933 0.0015837653 0 0 36 72
21000 95.675868 987.92089 -475.46736 20.248603 1.0759934 108 0.004285814 0.0015049513 0 0 36 72
Loop time of 220.662 on 1 procs for 20000 steps with 108 atoms
Performance: 7.641 ns/day, 3.141 hours/ns, 88.435 timesteps/s
99.2% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 7.831 ns/day, 3.065 hours/ns, 90.637 timesteps/s
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 38.053 | 38.053 | 38.053 | 0.0 | 16.83
Bond | 0.089673 | 0.089673 | 0.089673 | 0.0 | 0.04
Kspace | 0.92778 | 0.92778 | 0.92778 | 0.0 | 0.41
Neigh | 1.2619 | 1.2619 | 1.2619 | 0.0 | 0.56
Comm | 0.97483 | 0.97483 | 0.97483 | 0.0 | 0.43
Output | 0.0013306 | 0.0013306 | 0.0013306 | 0.0 | 0.00
Modify | 184.68 | 184.68 | 184.68 | 0.0 | 81.66
Other | | 0.171 | | | 0.08
Pair | 37.459 | 37.459 | 37.459 | 0.0 | 16.98
Bond | 0.087067 | 0.087067 | 0.087067 | 0.0 | 0.04
Kspace | 0.90234 | 0.90234 | 0.90234 | 0.0 | 0.41
Neigh | 1.2299 | 1.2299 | 1.2299 | 0.0 | 0.56
Comm | 0.95437 | 0.95437 | 0.95437 | 0.0 | 0.43
Output | 0.0010636 | 0.0010636 | 0.0010636 | 0.0 | 0.00
Modify | 179.85 | 179.85 | 179.85 | 0.0 | 81.51
Other | | 0.1754 | | | 0.08
Nlocal: 108 ave 108 max 108 min
Histogram: 1 0 0 0 0 0 0 0 0 0
@ -278,4 +290,4 @@ Ave special neighs/atom = 2
Neighbor list builds = 20439
Dangerous builds = 0
Total wall time: 0:03:46
Total wall time: 0:03:40

View File

@ -1,4 +1,4 @@
LAMMPS (6 Jul 2017)
LAMMPS (23 Oct 2017)
using 1 OpenMP thread(s) per MPI task
# fix gcmc example with fix shake
@ -51,6 +51,7 @@ Read molecule h2omol:
0 impropers with 0 types
create_atoms 0 box mol h2omol 464563 units box
Created 24 atoms
Time spent = 0.00174451 secs
# rigid SPC/E water model
@ -100,9 +101,9 @@ Per MPI rank memory allocation (min/avg/max) = 11.85 | 11.85 | 11.85 Mbytes
Step Temp E_pair E_mol TotEng Press
0 338 -4.9610706 9.2628112e-06 18.211756 730.90791
100 338 -15.742442 0.14954269 7.579918 -637.49568
Loop time of 0.0828406 on 4 procs for 100 steps with 24 atoms
Loop time of 0.0566185 on 4 procs for 100 steps with 24 atoms
98.7% CPU use with 4 MPI tasks x 1 OpenMP threads
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
Minimization stats:
Stopping criterion = max iterations
@ -116,14 +117,14 @@ Minimization stats:
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.012844 | 0.025471 | 0.047008 | 8.1 | 30.75
Bond | 0.00038934 | 0.00046468 | 0.00054336 | 0.0 | 0.56
Kspace | 0.0061138 | 0.027556 | 0.04014 | 7.8 | 33.26
Pair | 0.0085177 | 0.016083 | 0.026787 | 5.3 | 28.41
Bond | 0.00022459 | 0.00031394 | 0.00037575 | 0.0 | 0.55
Kspace | 0.0049062 | 0.014122 | 0.02044 | 5.0 | 24.94
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.023276 | 0.023636 | 0.023804 | 0.1 | 28.53
Output | 3.171e-05 | 3.3557e-05 | 3.8147e-05 | 0.0 | 0.04
Comm | 0.018515 | 0.02086 | 0.023246 | 1.2 | 36.84
Output | 2.4796e-05 | 2.6047e-05 | 2.9802e-05 | 0.0 | 0.05
Modify | 0 | 0 | 0 | 0.0 | 0.00
Other | | 0.00568 | | | 6.86
Other | | 0.005213 | | | 9.21
Nlocal: 6 ave 8 max 3 min
Histogram: 1 0 0 0 1 0 0 0 0 2
@ -164,22 +165,22 @@ Per MPI rank memory allocation (min/avg/max) = 11.6 | 11.6 | 11.6 Mbytes
Step Temp E_pair E_mol TotEng Press
0 518.26667 -15.742442 0 7.4303753 -613.0781
1000 369.81793 -54.202686 0 -37.667331 294.98823
Loop time of 0.199641 on 4 procs for 1000 steps with 24 atoms
Loop time of 0.154891 on 4 procs for 1000 steps with 24 atoms
Performance: 432.777 ns/day, 0.055 hours/ns, 5008.996 timesteps/s
98.5% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 557.810 ns/day, 0.043 hours/ns, 6456.135 timesteps/s
98.3% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.017161 | 0.034988 | 0.05833 | 8.0 | 17.53
Bond | 0.00017357 | 0.00021374 | 0.00027347 | 0.0 | 0.11
Kspace | 0.018025 | 0.044624 | 0.065613 | 8.4 | 22.35
Neigh | 0.0029755 | 0.0033154 | 0.0036366 | 0.6 | 1.66
Comm | 0.059933 | 0.06537 | 0.070709 | 1.5 | 32.74
Output | 3.4571e-05 | 3.6657e-05 | 4.22e-05 | 0.0 | 0.02
Modify | 0.043458 | 0.045628 | 0.04767 | 0.9 | 22.86
Other | | 0.005465 | | | 2.74
Pair | 0.0154 | 0.028993 | 0.040525 | 5.5 | 18.72
Bond | 0.00016999 | 0.0001902 | 0.00023293 | 0.0 | 0.12
Kspace | 0.019093 | 0.028112 | 0.038976 | 4.3 | 18.15
Neigh | 0.0020263 | 0.0022184 | 0.002408 | 0.4 | 1.43
Comm | 0.04947 | 0.053627 | 0.058009 | 1.4 | 34.62
Output | 2.5749e-05 | 2.7537e-05 | 3.2187e-05 | 0.0 | 0.02
Modify | 0.035275 | 0.036815 | 0.038425 | 0.7 | 23.77
Other | | 0.004909 | | | 3.17
Nlocal: 6 ave 8 max 3 min
Histogram: 1 0 0 0 0 0 1 0 1 1
@ -201,6 +202,17 @@ fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 ${disp} mol
fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 0.5 mol h2omol tfac_insert ${tfac} group h2o shake wshake
fix mygcmc all gcmc 100 100 0 0 54341 338.0 -8.1 0.5 mol h2omol tfac_insert 1.66666666666667 group h2o shake wshake
# atom counts
variable oxygen atom "type==1"
variable hydrogen atom "type==2"
group oxygen dynamic all var oxygen
dynamic group oxygen defined
group hydrogen dynamic all var hydrogen
dynamic group hydrogen defined
variable nO equal count(oxygen)
variable nH equal count(hydrogen)
# output
variable tacc equal f_mygcmc[2]/(f_mygcmc[1]+0.1)
@ -208,7 +220,7 @@ variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+0.1)
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+0.1)
variable racc equal f_mygcmc[8]/(f_mygcmc[7]+0.1)
compute_modify thermo_temp dynamic/dof yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_racc v_nO v_nH
thermo 1000
# run
@ -226,44 +238,44 @@ WARNING: Fix gcmc using full_energy option (../fix_gcmc.cpp:445)
0 atoms in group FixGCMC:rotation_gas_atoms:mygcmc
WARNING: Neighbor exclusions used with KSpace solver may give inconsistent Coulombic energies (../neighbor.cpp:472)
Per MPI rank memory allocation (min/avg/max) = 11.6 | 11.6 | 11.6 Mbytes
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_racc
1000 369.81793 295.32434 -54.202686 16.535355 0.23910963 24 0 0 0 0
2000 84.544466 -2810.9047 -344.81664 14.364627 0.86677242 87 0.052198354 0.0099581757 0 0
3000 75.188527 -3688.256 -425.02228 14.567977 0.98632724 99 0.030546787 0.0049111089 0 0
4000 75.019396 -5669.3063 -427.69454 14.535207 0.98632724 99 0.019972039 0.0033375609 0 0
5000 90.415371 -2141.7596 -434.65925 17.518218 0.98632724 99 0.014909796 0.002514964 0 0
6000 78.212628 -943.75125 -428.80584 15.153904 0.98632724 99 0.01181521 0.0020316119 0 0
7000 71.754139 -2028.5122 -435.2139 13.902555 0.98632724 99 0.0099466198 0.0016755471 0 0
8000 84.446231 -1969.1657 -428.27313 16.361681 0.98632724 99 0.0084791272 0.0014442102 0 0
9000 70.952348 -2476.9812 -446.33824 14.170197 1.0162159 102 0.0077150892 0.0012556189 0 0
10000 71.418543 -1875.7083 -443.7214 14.263302 1.0162159 102 0.0068355714 0.0011197957 0 0
11000 86.094994 -4508.7581 -444.82687 17.194399 1.0162159 102 0.0061494515 0.0010082475 0 0
12000 81.906091 -1547.8105 -442.36719 16.357815 1.0162159 102 0.0055834729 0.00091775114 0 0
13000 57.221548 -4607.6222 -448.30939 11.42796 1.0162159 102 0.0051230355 0.00084046326 0 0
14000 61.288344 -2518.1779 -445.70636 12.240157 1.0162159 102 0.0047276997 0.00077602396 0 0
15000 85.787669 -2407.7111 -443.3834 17.133022 1.0162159 102 0.0043983485 0.00071920715 0 0
16000 74.845939 -3288.3403 -445.8247 14.947802 1.0162159 102 0.0042321884 0.00080654918 0 0
17000 73.835431 -1926.9566 -445.67476 14.745989 1.0162159 102 0.0039751059 0.00075470749 0 0
18000 72.634985 -3997.552 -447.2351 14.506243 1.0162159 102 0.0037395847 0.00071063946 0 0
19000 96.776472 -714.44132 -453.65552 19.904587 1.0461046 105 0.0036487876 0.00066993446 0 0
20000 75.470786 183.16972 -464.04688 15.522521 1.0461046 105 0.0034630763 0.00063350614 0 0
21000 65.658309 -773.41266 -466.27068 13.504331 1.0461046 105 0.003289113 0.00060198052 0 0
Loop time of 93.8859 on 4 procs for 20000 steps with 105 atoms
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_racc v_nO v_nH
1000 369.81793 295.32434 -54.202686 16.535355 0.23910963 24 0 0 0 0 8 16
2000 84.544466 -2810.9047 -344.81664 14.364627 0.86677242 87 0.052198354 0.0099581757 0 0 29 58
3000 75.188527 -3688.256 -425.02228 14.567977 0.98632724 99 0.030546787 0.0049111089 0 0 33 66
4000 75.019396 -5669.3063 -427.69454 14.535207 0.98632724 99 0.019972039 0.0033375609 0 0 33 66
5000 90.415371 -2141.7596 -434.65925 17.518218 0.98632724 99 0.014909796 0.002514964 0 0 33 66
6000 78.212628 -943.75125 -428.80584 15.153904 0.98632724 99 0.01181521 0.0020316119 0 0 33 66
7000 71.754139 -2028.5122 -435.2139 13.902555 0.98632724 99 0.0099466198 0.0016755471 0 0 33 66
8000 84.446231 -1969.1657 -428.27313 16.361681 0.98632724 99 0.0084791272 0.0014442102 0 0 33 66
9000 70.952348 -2476.9812 -446.33824 14.170197 1.0162159 102 0.0077150892 0.0012556189 0 0 34 68
10000 71.418543 -1875.7083 -443.7214 14.263302 1.0162159 102 0.0068355714 0.0011197957 0 0 34 68
11000 86.094994 -4508.7581 -444.82687 17.194399 1.0162159 102 0.0061494515 0.0010082475 0 0 34 68
12000 81.906091 -1547.8105 -442.36719 16.357815 1.0162159 102 0.0055834729 0.00091775114 0 0 34 68
13000 57.221548 -4607.6222 -448.30939 11.42796 1.0162159 102 0.0051230355 0.00084046326 0 0 34 68
14000 61.288344 -2518.1779 -445.70636 12.240157 1.0162159 102 0.0047276997 0.00077602396 0 0 34 68
15000 85.787669 -2407.7111 -443.3834 17.133022 1.0162159 102 0.0043983485 0.00071920715 0 0 34 68
16000 74.845939 -3288.3403 -445.8247 14.947802 1.0162159 102 0.0042321884 0.00080654918 0 0 34 68
17000 73.835431 -1926.9566 -445.67476 14.745989 1.0162159 102 0.0039751059 0.00075470749 0 0 34 68
18000 72.634985 -3997.552 -447.2351 14.506243 1.0162159 102 0.0037395847 0.00071063946 0 0 34 68
19000 96.776472 -714.44132 -453.65552 19.904587 1.0461046 105 0.0036487876 0.00066993446 0 0 35 70
20000 75.470786 183.16972 -464.04688 15.522521 1.0461046 105 0.0034630763 0.00063350614 0 0 35 70
21000 65.658309 -773.41266 -466.27068 13.504331 1.0461046 105 0.003289113 0.00060198052 0 0 35 70
Loop time of 84.4085 on 4 procs for 20000 steps with 105 atoms
Performance: 18.405 ns/day, 1.304 hours/ns, 213.024 timesteps/s
Performance: 20.472 ns/day, 1.172 hours/ns, 236.943 timesteps/s
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 6.7882 | 10.264 | 14.758 | 93.2 | 10.93
Bond | 0.028286 | 0.034218 | 0.039038 | 2.5 | 0.04
Kspace | 0.57255 | 5.2227 | 8.8493 | 133.8 | 5.56
Neigh | 0.3635 | 0.36915 | 0.37473 | 0.9 | 0.39
Comm | 2.9961 | 3.2542 | 3.509 | 11.4 | 3.47
Output | 0.0011675 | 0.0012342 | 0.001375 | 0.2 | 0.00
Modify | 74.428 | 74.499 | 74.571 | 0.7 | 79.35
Other | | 0.2411 | | | 0.26
Pair | 6.3571 | 9.7574 | 13.984 | 90.7 | 11.56
Bond | 0.026374 | 0.031321 | 0.035482 | 2.1 | 0.04
Kspace | 0.57402 | 4.7894 | 8.1754 | 129.0 | 5.67
Neigh | 0.34952 | 0.34987 | 0.35021 | 0.1 | 0.41
Comm | 2.4028 | 2.4228 | 2.4372 | 0.9 | 2.87
Output | 0.0012269 | 0.0012826 | 0.0014355 | 0.2 | 0.00
Modify | 66.819 | 66.828 | 66.837 | 0.1 | 79.17
Other | | 0.2281 | | | 0.27
Nlocal: 26.25 ave 31 max 22 min
Histogram: 1 0 1 0 0 0 1 0 0 1
@ -278,4 +290,4 @@ Ave special neighs/atom = 2
Neighbor list builds = 20428
Dangerous builds = 0
Total wall time: 0:01:34
Total wall time: 0:01:24

View File

@ -1,4 +1,4 @@
LAMMPS (6 Jul 2017)
LAMMPS (23 Oct 2017)
using 1 OpenMP thread(s) per MPI task
# GCMC for LJ simple fluid, no dynamics
# T = 2.0
@ -43,6 +43,13 @@ fix mygcmc all gcmc 1 100 100 1 29494 2.0 ${mu} ${disp}
fix mygcmc all gcmc 1 100 100 1 29494 2.0 -1.25 ${disp}
fix mygcmc all gcmc 1 100 100 1 29494 2.0 -1.25 1.0
# atom count
variable type1 atom "type==1"
group type1 dynamic all var type1
dynamic group type1 defined
variable n1 equal count(type1)
# averaging
variable rho equal density
@ -54,10 +61,11 @@ variable muex equal -1.25-${temp}*ln(density*${lambda}+${nugget})
variable muex equal -1.25-2.0*ln(density*${lambda}+${nugget})
variable muex equal -1.25-2.0*ln(density*1+${nugget})
variable muex equal -1.25-2.0*ln(density*1+1e-08)
fix ave all ave/time 10 100 1000 v_rho v_p v_muex ave one file rho_vs_p.dat
fix ave all ave/time 10 100 1000 v_rho v_p v_muex v_n1 ave one file rho_vs_p.dat
variable rhoav equal f_ave[1]
variable pav equal f_ave[2]
variable muexav equal f_ave[3]
variable n1av equal f_ave[4]
# output
@ -68,7 +76,7 @@ variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+1e-08)
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+${nugget})
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+1e-08)
compute_modify thermo_temp dynamic yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav v_n1av
thermo 1000
# run
@ -87,32 +95,32 @@ Neighbor list info ...
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 0.433 | 0.433 | 0.433 Mbytes
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav
0 0 0 0 -0 0 0 0 0 0 0 0 0
1000 2.4038954 2.1735585 -2.7041368 3.5476844 0.496 62 0.064790036 0.06313096 0.1081294 0.54304 1.4513524 -0.025479219
2000 2.0461168 1.1913842 -2.9880181 3.0212194 0.512 64 0.067416408 0.066335853 0.11306166 0.52736 1.3274665 0.034690004
3000 1.7930436 1.3788681 -3.2212667 2.6505861 0.552 69 0.067733191 0.066877836 0.1133516 0.5344 1.3834744 0.0070582537
4000 1.981449 1.2541054 -2.8222868 2.9217977 0.472 59 0.068546991 0.067856412 0.11442807 0.52504 1.3815629 0.043309657
5000 2.0946818 1.0701629 -3.5213291 3.0977688 0.568 71 0.06813743 0.067567891 0.11342906 0.53824 1.4049567 -0.0054539777
6000 1.9793484 0.68224187 -3.410211 2.9247088 0.536 67 0.067797628 0.067420108 0.11295333 0.5384 1.401683 -0.0066894359
7000 2.1885798 1.6745012 -3.185499 3.2345922 0.544 68 0.068630201 0.068261832 0.11403705 0.5244 1.449239 0.045987399
8000 2.2175324 1.5897263 -3.078898 3.2759002 0.528 66 0.068180395 0.067899629 0.11332691 0.53928 1.5488388 -0.01075766
9000 1.8610779 1.0396231 -2.923262 2.7465908 0.496 62 0.068346453 0.068028117 0.1134132 0.52912 1.4352871 0.027082544
10000 2.1079271 1.1746643 -2.9112062 3.1091925 0.48 60 0.068352878 0.068054948 0.11335434 0.5316 1.4462327 0.018503094
Loop time of 20.6892 on 1 procs for 10000 steps with 60 atoms
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav v_n1av
0 0 0 0 -0 0 0 0 0 0 0 0 0 0
1000 2.4038954 2.1735585 -2.7041368 3.5476844 0.496 62 0.064790036 0.06313096 0.1081294 0.54304 1.4513524 -0.025479219 64.98
2000 2.0461168 1.1913842 -2.9880181 3.0212194 0.512 64 0.067416408 0.066335853 0.11306166 0.52736 1.3274665 0.034690004 62.97
3000 1.7930436 1.3788681 -3.2212667 2.6505861 0.552 69 0.067733191 0.066877836 0.1133516 0.5344 1.3834744 0.0070582537 63.5
4000 1.981449 1.2541054 -2.8222868 2.9217977 0.472 59 0.068546991 0.067856412 0.11442807 0.52504 1.3815629 0.043309657 62.17
5000 2.0946818 1.0701629 -3.5213291 3.0977688 0.568 71 0.06813743 0.067567891 0.11342906 0.53824 1.4049567 -0.0054539777 64.15
6000 1.9793484 0.68224187 -3.410211 2.9247088 0.536 67 0.067797628 0.067420108 0.11295333 0.5384 1.401683 -0.0066894359 64.37
7000 2.1885798 1.6745012 -3.185499 3.2345922 0.544 68 0.068630201 0.068261832 0.11403705 0.5244 1.449239 0.045987399 62.33
8000 2.2175324 1.5897263 -3.078898 3.2759002 0.528 66 0.068180395 0.067899629 0.11332691 0.53928 1.5488388 -0.01075766 64.29
9000 1.8610779 1.0396231 -2.923262 2.7465908 0.496 62 0.068346453 0.068028117 0.1134132 0.52912 1.4352871 0.027082544 62.87
10000 2.1079271 1.1746643 -2.9112062 3.1091925 0.48 60 0.068352878 0.068054948 0.11335434 0.5316 1.4462327 0.018503094 63.2
Loop time of 20.4081 on 1 procs for 10000 steps with 60 atoms
Performance: 208804.611 tau/day, 483.344 timesteps/s
99.4% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 211680.375 tau/day, 490.001 timesteps/s
98.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.47227 | 0.47227 | 0.47227 | 0.0 | 2.28
Neigh | 1.1729 | 1.1729 | 1.1729 | 0.0 | 5.67
Comm | 0.17133 | 0.17133 | 0.17133 | 0.0 | 0.83
Output | 0.00028253 | 0.00028253 | 0.00028253 | 0.0 | 0.00
Modify | 18.852 | 18.852 | 18.852 | 0.0 | 91.12
Other | | 0.02063 | | | 0.10
Pair | 0.46484 | 0.46484 | 0.46484 | 0.0 | 2.28
Neigh | 1.1447 | 1.1447 | 1.1447 | 0.0 | 5.61
Comm | 0.1696 | 0.1696 | 0.1696 | 0.0 | 0.83
Output | 0.000319 | 0.000319 | 0.000319 | 0.0 | 0.00
Modify | 18.607 | 18.607 | 18.607 | 0.0 | 91.17
Other | | 0.02194 | | | 0.11
Nlocal: 60 ave 60 max 60 min
Histogram: 1 0 0 0 0 0 0 0 0 0

View File

@ -1,4 +1,4 @@
LAMMPS (6 Jul 2017)
LAMMPS (23 Oct 2017)
using 1 OpenMP thread(s) per MPI task
# GCMC for LJ simple fluid, no dynamics
# T = 2.0
@ -43,6 +43,13 @@ fix mygcmc all gcmc 1 100 100 1 29494 2.0 ${mu} ${disp}
fix mygcmc all gcmc 1 100 100 1 29494 2.0 -1.25 ${disp}
fix mygcmc all gcmc 1 100 100 1 29494 2.0 -1.25 1.0
# atom count
variable type1 atom "type==1"
group type1 dynamic all var type1
dynamic group type1 defined
variable n1 equal count(type1)
# averaging
variable rho equal density
@ -54,10 +61,11 @@ variable muex equal -1.25-${temp}*ln(density*${lambda}+${nugget})
variable muex equal -1.25-2.0*ln(density*${lambda}+${nugget})
variable muex equal -1.25-2.0*ln(density*1+${nugget})
variable muex equal -1.25-2.0*ln(density*1+1e-08)
fix ave all ave/time 10 100 1000 v_rho v_p v_muex ave one file rho_vs_p.dat
fix ave all ave/time 10 100 1000 v_rho v_p v_muex v_n1 ave one file rho_vs_p.dat
variable rhoav equal f_ave[1]
variable pav equal f_ave[2]
variable muexav equal f_ave[3]
variable n1av equal f_ave[4]
# output
@ -68,7 +76,7 @@ variable iacc equal f_mygcmc[4]/(f_mygcmc[3]+1e-08)
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+${nugget})
variable dacc equal f_mygcmc[6]/(f_mygcmc[5]+1e-08)
compute_modify thermo_temp dynamic yes
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav
thermo_style custom step temp press pe ke density atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav v_n1av
thermo 1000
# run
@ -87,32 +95,32 @@ Neighbor list info ...
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 0.4477 | 0.4477 | 0.4477 Mbytes
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav
0 0 0 0 -0 0 0 0 0 0 0 0 0
1000 1.956397 1.7699101 -2.7889468 2.8864874 0.488 61 0.068894746 0.067229075 0.1141726 0.53288 1.3832798 0.013392866
2000 2.040943 0.56060899 -2.8001647 3.0077055 0.456 57 0.069858594 0.068831934 0.11629114 0.5232 1.3587174 0.049995794
3000 2.0004866 1.5736515 -3.3098044 2.9572411 0.552 69 0.069594029 0.068727791 0.11592543 0.53096 1.4129434 0.020022578
4000 2.1127942 2.642809 -2.8865084 3.1211733 0.528 66 0.070268697 0.069533235 0.11693806 0.52424 1.3444615 0.046884078
5000 2.3663648 1.354269 -3.1917346 3.4957662 0.528 66 0.070519633 0.069960064 0.11710321 0.52688 1.3595814 0.036270867
6000 1.9224136 0.82756699 -3.1965 2.839257 0.52 65 0.06985018 0.069474645 0.11628632 0.536 1.47062 0.00141549
7000 2.0266192 1.5593811 -2.9972341 2.9931606 0.52 65 0.070244693 0.069880791 0.11666541 0.52528 1.3246332 0.040754793
8000 1.7790467 1.8680568 -2.8028819 2.6275151 0.52 65 0.070454494 0.070172368 0.11736806 0.524 1.4213649 0.047985191
9000 1.7968847 1.3195587 -3.261001 2.6550983 0.536 67 0.069952011 0.069618327 0.11650087 0.53904 1.4624201 -0.01069837
10000 2.1566109 1.1015729 -3.4999837 3.1880335 0.552 69 0.069603309 0.069284134 0.11625548 0.53128 1.3587249 0.02075238
Loop time of 24.9916 on 4 procs for 10000 steps with 69 atoms
Step Temp Press PotEng KinEng Density Atoms v_iacc v_dacc v_tacc v_rhoav v_pav v_muexav v_n1av
0 0 0 0 -0 0 0 0 0 0 0 0 0 0
1000 1.956397 1.7699101 -2.7889468 2.8864874 0.488 61 0.068894746 0.067229075 0.1141726 0.53288 1.3832798 0.013392866 63.44
2000 2.040943 0.56060899 -2.8001647 3.0077055 0.456 57 0.069858594 0.068831934 0.11629114 0.5232 1.3587174 0.049995794 62.19
3000 2.0004866 1.5736515 -3.3098044 2.9572411 0.552 69 0.069594029 0.068727791 0.11592543 0.53096 1.4129434 0.020022578 63.23
4000 2.1127942 2.642809 -2.8865084 3.1211733 0.528 66 0.070268697 0.069533235 0.11693806 0.52424 1.3444615 0.046884078 62.57
5000 2.3663648 1.354269 -3.1917346 3.4957662 0.528 66 0.070519633 0.069960064 0.11710321 0.52688 1.3595814 0.036270867 62.56
6000 1.9224136 0.82756699 -3.1965 2.839257 0.52 65 0.06985018 0.069474645 0.11628632 0.536 1.47062 0.00141549 63.76
7000 2.0266192 1.5593811 -2.9972341 2.9931606 0.52 65 0.070244693 0.069880791 0.11666541 0.52528 1.3246332 0.040754793 62.2
8000 1.7790467 1.8680568 -2.8028819 2.6275151 0.52 65 0.070454494 0.070172368 0.11736806 0.524 1.4213649 0.047985191 62.03
9000 1.7968847 1.3195587 -3.261001 2.6550983 0.536 67 0.069952011 0.069618327 0.11650087 0.53904 1.4624201 -0.01069837 64.36
10000 2.1566109 1.1015729 -3.4999837 3.1880335 0.552 69 0.069603309 0.069284134 0.11625548 0.53128 1.3587249 0.02075238 63.24
Loop time of 23.8213 on 4 procs for 10000 steps with 69 atoms
Performance: 172857.936 tau/day, 400.134 timesteps/s
98.2% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 181350.388 tau/day, 419.793 timesteps/s
97.6% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.11696 | 0.12516 | 0.1321 | 1.7 | 0.50
Neigh | 0.34874 | 0.35644 | 0.36545 | 1.2 | 1.43
Comm | 0.48531 | 0.51366 | 0.54755 | 3.8 | 2.06
Output | 0.0005362 | 0.00069767 | 0.00076008 | 0.0 | 0.00
Modify | 23.956 | 23.972 | 23.988 | 0.3 | 95.92
Other | | 0.02376 | | | 0.10
Pair | 0.10935 | 0.11844 | 0.12741 | 2.1 | 0.50
Neigh | 0.33 | 0.33945 | 0.35091 | 1.6 | 1.42
Comm | 0.49249 | 0.51745 | 0.53856 | 2.7 | 2.17
Output | 0.00053334 | 0.0007208 | 0.0007906 | 0.0 | 0.00
Modify | 22.82 | 22.822 | 22.825 | 0.0 | 95.81
Other | | 0.02289 | | | 0.10
Nlocal: 17.25 ave 23 max 10 min
Histogram: 1 0 0 0 0 0 2 0 0 1
@ -125,4 +133,4 @@ Total # of neighbors = 2823
Ave neighs/atom = 40.913
Neighbor list builds = 10000
Dangerous builds = 0
Total wall time: 0:00:24
Total wall time: 0:00:23

View File

@ -0,0 +1,16 @@
This folder contains several LAMMPS input scripts and a python module
file py_nve.py to demonstrate the use of the fix style python/move
to reimplement NVE using Python.
in.fix_python_move_nve_melt:
This is a version of the melt example which replaces the default NVE integrator
with a Python implementation. Fix python/move is used to create an
instance of the py_nve.NVE class which implements the required interface.
It demonstrates how to access LAMMPS data as numpy arrays. This gives direct
access to memory owned by the C++ code, allows easy manipulation through numpy
operations and avoids unnecessary copies.
in.fix_python_move_nve_melt_opt:
This version of melt example uses NVE_Opt instead of NVE. While this Python
implementation is still much slower than the native version, it shows that
simple code transformations can lead to speedups.

View File

@ -8,17 +8,21 @@ def loop(N,cut0,thresh,lmpptr):
from lammps import lammps
lmp = lammps(ptr=lmpptr)
natoms = lmp.get_natoms()
for i in range(N):
cut = cut0 + i*0.1
lmp.set_variable("cut",cut) # set a variable in LAMMPS
try:
for i in range(N):
cut = cut0 + i*0.1
lmp.command("pair_style lj/cut ${cut}") # LAMMPS command
#lmp.command("pair_style lj/cut %d" % cut) # LAMMPS command option
lmp.set_variable("cut",cut) # set a variable in LAMMPS
lmp.command("pair_style lj/cut ${cut}") # LAMMPS command
#lmp.command("pair_style lj/cut %d" % cut) # LAMMPS command option
lmp.command("pair_coeff * * 1.0 1.0") # ditto
lmp.command("run 10") # ditto
pe = lmp.extract_compute("thermo_pe",0,0) # extract total PE from LAMMPS
print("PE",pe/natoms,thresh)
if pe/natoms < thresh: return
except Exception as e:
print("LOOP error:", e)
lmp.command("pair_coeff * * 1.0 1.0") # ditto
lmp.command("run 10") # ditto
pe = lmp.extract_compute("thermo_pe",0,0) # extract total PE from LAMMPS
print("PE",pe/natoms,thresh)
if pe/natoms < thresh: return

View File

@ -33,8 +33,8 @@ def post_force_callback(lmp, v):
"""
fix 1 all nve
fix 2 all python 50 end_of_step end_of_step_callback
fix 3 all python 50 post_force post_force_callback
fix 2 all python/invoke 50 end_of_step end_of_step_callback
fix 3 all python/invoke 50 post_force post_force_callback
#dump id all atom 50 dump.melt

View File

@ -0,0 +1,23 @@
# 3d Lennard-Jones melt
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 10 0 10 0 10
create_box 1 box
create_atoms 1 box
mass * 1.0
velocity all create 3.0 87287
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify every 20 delay 0 check no
fix 1 all python/move py_nve.NVE
thermo 50
run 250

View File

@ -0,0 +1,23 @@
# 3d Lennard-Jones melt
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 10 0 10 0 10
create_box 1 box
create_atoms 1 box
mass * 1.0
velocity all create 3.0 87287
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify every 20 delay 0 check no
fix 1 all python/move py_nve.NVE_Opt
thermo 50
run 250

View File

@ -28,6 +28,7 @@ python simple here """
from __future__ import print_function
def simple():
foo = 0
print("Inside simple function")
try:
foo += 1

118
examples/python/py_nve.py Normal file
View File

@ -0,0 +1,118 @@
from __future__ import print_function
import lammps
import ctypes
import traceback
import numpy as np
class LAMMPSFix(object):
def __init__(self, ptr, group_name="all"):
self.lmp = lammps.lammps(ptr=ptr)
self.group_name = group_name
class LAMMPSFixMove(LAMMPSFix):
def __init__(self, ptr, group_name="all"):
super(LAMMPSFixMove, self).__init__(ptr, group_name)
def init(self):
pass
def initial_integrate(self, vflag):
pass
def final_integrate(self):
pass
def initial_integrate_respa(self, vflag, ilevel, iloop):
pass
def final_integrate_respa(self, ilevel, iloop):
pass
def reset_dt(self):
pass
class NVE(LAMMPSFixMove):
""" Python implementation of fix/nve """
def __init__(self, ptr, group_name="all"):
super(NVE, self).__init__(ptr)
assert(self.group_name == "all")
def init(self):
dt = self.lmp.extract_global("dt", 1)
ftm2v = self.lmp.extract_global("ftm2v", 1)
self.ntypes = self.lmp.extract_global("ntypes", 0)
self.dtv = dt
self.dtf = 0.5 * dt * ftm2v
def initial_integrate(self, vflag):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
x = self.lmp.numpy.extract_atom_darray("x", nlocal, dim=3)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
for i in range(x.shape[0]):
dtfm = self.dtf / mass[int(atype[i])]
v[i,:]+= dtfm * f[i,:]
x[i,:] += self.dtv * v[i,:]
def final_integrate(self):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
for i in range(v.shape[0]):
dtfm = self.dtf / mass[int(atype[i])]
v[i,:] += dtfm * f[i,:]
class NVE_Opt(LAMMPSFixMove):
""" Performance-optimized Python implementation of fix/nve """
def __init__(self, ptr, group_name="all"):
super(NVE_Opt, self).__init__(ptr)
assert(self.group_name == "all")
def init(self):
dt = self.lmp.extract_global("dt", 1)
ftm2v = self.lmp.extract_global("ftm2v", 1)
self.ntypes = self.lmp.extract_global("ntypes", 0)
self.dtv = dt
self.dtf = 0.5 * dt * ftm2v
self.mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
def initial_integrate(self, vflag):
nlocal = self.lmp.extract_global("nlocal", 0)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
x = self.lmp.numpy.extract_atom_darray("x", nlocal, dim=3)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
dtf = self.dtf
dtv = self.dtv
mass = self.mass
dtfm = dtf / np.take(mass, atype)
dtfm.reshape((nlocal, 1))
for d in range(x.shape[1]):
v[:,d] += dtfm[:,0] * f[:,d]
x[:,d] += dtv * v[:,d]
def final_integrate(self):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
dtf = self.dtf
dtv = self.dtv
mass = self.mass
dtfm = dtf / np.take(mass, atype)
dtfm.reshape((nlocal, 1))
for d in range(v.shape[1]):
v[:,d] += dtfm[:,0] * f[:,d]

Some files were not shown because too many files have changed in this diff Show More