Compare commits

..

1132 Commits

Author SHA1 Message Date
c29474c533 Merge pull request #2443 from akohlmey/next_lammps_version
Step version strings for next release
2020-10-22 18:11:33 -04:00
c7a10050c2 Merge pull request #2439 from akohlmey/collected-small-changes
Collected small changes for the next release
2020-10-22 17:23:49 -04:00
9bc3ccd49a fix misplaced "#endif" 2020-10-22 15:45:24 -04:00
5ff0dbb431 step version strings for next patch release (and stable release candidate) 2020-10-22 15:36:58 -04:00
fcd442d751 simplify code. print warnings only on MPI rank 0. update parameter names 2020-10-22 15:35:45 -04:00
68b533d1b8 correct order of functions 2020-10-22 14:59:55 -04:00
b64585c631 Merge branch 'collected-small-changes' of github.com:akohlmey/lammps into collected-small-changes 2020-10-22 12:11:58 -04:00
bcf5abccb5 reorder functions in library.h and library.cpp to match order in manual 2020-10-22 12:11:42 -04:00
bc43865f45 need to look for Java Development components, not just JNI parts 2020-10-22 01:55:42 -04:00
a14b38ce21 avoid bell 2020-10-21 22:23:45 -04:00
bb32cbc223 add a few more tests for the lammps-shell 2020-10-21 22:07:38 -04:00
ac6f85f8be make expansion of internal command unambiguous 2020-10-21 21:14:53 -04:00
0ab8803d19 Merge pull request #2437 from akohlmey/correct-urls
Make references to LAMMPS and other websites consistent
2020-10-21 20:34:14 -04:00
e79e53c540 Merge pull request #2438 from akohlmey/swig-support-fixes
Small updates to SWIG tool for increased portability and correctness
2020-10-21 19:57:23 -04:00
386bea76cb describe more clearly what is provided 2020-10-21 17:25:42 -04:00
6b2a98b537 ad few more manual updates of outdated, mistyped or no longer available URLs 2020-10-21 17:19:33 -04:00
f8bf4989f4 update URLs from http:// to https:// 2020-10-21 17:04:23 -04:00
a914a3aac2 update outdated URLs 2020-10-21 17:03:46 -04:00
f23d831a90 update cslib homepage URLs to use https:// 2020-10-21 16:53:30 -04:00
adf74b3a22 use https://lammps.sandia.gov based URLs consistently 2020-10-21 16:45:30 -04:00
9cdf6f1e8b more portability changes 2020-10-21 14:22:06 -04:00
d7a0971578 make python support more portable, require python3 2020-10-21 14:14:46 -04:00
53028b24d6 improve portability for building SWIG Tcl wrappers 2020-10-21 14:06:25 -04:00
1753c9f361 Merge pull request #2434 from akohlmey/swig-wrappers
Provide an interface file for creating script wrappers for LAMMPS with SWIG
2020-10-21 12:56:11 -04:00
2ed11f55d6 fix URLs to documentation files 2020-10-21 11:12:13 -04:00
0a30398d1f get rid of obsolete README note. We have long ago fixed the aliasing issue in Coulomb tabulation 2020-10-21 11:10:26 -04:00
de8149154a fix broken internal links in the manual 2020-10-21 10:56:52 -04:00
cc90596183 fix link to documentation 2020-10-21 10:49:01 -04:00
de40b3e637 remove trailing whitespace 2020-10-20 21:24:02 -04:00
7549edd844 Merge branch 'master' into swig-wrappers 2020-10-20 21:22:53 -04:00
a8c0142621 update docs for building SWIG Tcl wrapper 2020-10-20 21:22:39 -04:00
4b447fb3d8 Merge pull request #2429 from rbberger/python_docs
Minor updates to the Python docs
2020-10-20 21:12:44 -04:00
955c7c1efb Merge pull request #2433 from athomps/eamfs-doc
Clarified order of rho_alpha_beta density functions
2020-10-20 20:13:35 -04:00
ad50db1feb use a different/cleaner approach to build an extended Tcl shell 2020-10-20 18:53:29 -04:00
1b358603c2 simplify 2020-10-20 18:52:53 -04:00
0547425482 a few more small fixes 2020-10-20 18:30:32 -04:00
6259b277df replace explicit clean-all target with added properties 2020-10-20 18:30:19 -04:00
fe7628f954 add check that that extension module is compiled before running example scripts 2020-10-20 18:29:43 -04:00
7db669df40 add support to also build an extended tcl shell as opposed to dynamically loading the extension 2020-10-20 18:29:16 -04:00
515f1d9ead make module search path commands consistent 2020-10-20 17:38:03 -04:00
faf67662d5 fix typo 2020-10-20 16:47:56 -04:00
b9a57899a6 whitespace fix 2020-10-20 16:46:21 -04:00
eeaff55df7 SWIG support documentation update 2020-10-20 16:37:30 -04:00
bc1d325930 dmonstrate data access functions 2020-10-20 15:57:02 -04:00
75ac1857ab Clarified order of rho_alpha_beta density functions 2020-10-20 11:47:41 -06:00
3ad397dd60 implement some more data access functions and handle passing string buffers 2020-10-20 11:43:03 -04:00
6c826823fe update SWIG support readme 2020-10-20 11:42:23 -04:00
7297ed3115 apply stack-protector fix required by newer versions of MinGW, e.g. on Fedora 33 2020-10-19 21:59:27 -04:00
cd444eefe2 Merge branch 'master' into swig-wrappers 2020-10-19 21:58:23 -04:00
7af95dd396 Merge pull request #2432 from akohlmey/collected-small-changes
Collected small changes for the stable release
2020-10-19 19:43:39 -04:00
5ac910a748 remove redundant file 2020-10-19 15:46:49 -04:00
56cecf2c3a small tweaks: avoid overflow and add missing newline 2020-10-19 15:46:33 -04:00
4b2fd65585 fix issue where a variable was used before it was assigned. 2020-10-19 15:46:03 -04:00
2c54b7e505 update USER-SMD examples: rename files, shorten runs and add logfiles 2020-10-19 15:45:26 -04:00
ef9920bd37 Merge branch 'correct_user_smd_examples' of github.com:dboemer/lammps into collected-small-changes 2020-10-19 13:52:18 -04:00
4795e01d54 Support being called from a graphical shell through filename association.
This adds a hack to the LAMMPS shell that hides the first argument from
the LAMMPS class, if it does not start with a '-' and instead tries to
open it as an input file.
2020-10-19 11:40:28 -04:00
f67d9bb523 Correct USER smd examples and remove tabs 2020-10-19 14:56:40 +02:00
c891b9dca6 add simple main function for building a static Tcl shell executable 2020-10-19 01:01:27 -04:00
8c5ac3d49c add README file to SWIG tool 2020-10-19 01:01:06 -04:00
5a70ee8a40 make names of custom data functions consistent 2020-10-19 01:00:50 -04:00
f74203fb33 start adding documentation about SWIG wrapping to the manual 2020-10-19 01:00:02 -04:00
97767d0a3c add some support functions to cast pointers and get values from pointers 2020-10-18 12:40:17 -04:00
eab5d68a09 move SWIG support to tools/swig folder 2020-10-18 08:19:20 -04:00
0f71c02059 suppress developer warnings 2020-10-18 08:12:29 -04:00
0685df3e64 add "clean-all" target to remove all content generated by build or example run scripts 2020-10-18 08:12:08 -04:00
1595c7ed48 copy example run scripts to the swig folder in the build tree 2020-10-18 08:11:20 -04:00
c6690eed6c add ruby to the fold 2020-10-18 02:16:06 -04:00
23cfdaac11 simplify 2020-10-18 01:56:18 -04:00
c376b8ffa5 add java interface 2020-10-18 01:49:19 -04:00
42116fc444 expand examples to include some LAMMPS commands 2020-10-18 01:06:34 -04:00
e1b00a67f8 include enumerator constants into swig interface file 2020-10-18 01:06:17 -04:00
1ed735e311 add some minimal usage examples 2020-10-18 00:45:21 -04:00
58ceab93ec include enumerators 2020-10-18 00:45:04 -04:00
fcbbc51a33 add minimally tested swig wrapper for Lua, Perl5, Python, and Tcl 2020-10-17 23:51:36 -04:00
722b9c8cf3 small tweak 2020-10-16 22:46:33 -04:00
a04c8c8a3b fix typo 2020-10-16 22:46:20 -04:00
cd605d46bc Add missing code-block language 2020-10-16 18:22:29 -04:00
b3b650acf2 Update Python_execute.rst 2020-10-16 18:22:29 -04:00
91a1e9abb4 Correct table in Python_examples.rst 2020-10-16 18:22:28 -04:00
4534e78df1 Add extra space in Python_run.rst 2020-10-16 17:20:38 -04:00
853b5ea576 Remove redundant links and wording 2020-10-16 17:13:31 -04:00
7420b7018a Merge pull request #2427 from akohlmey/lammps-shell-tweaks
A few more tweaks for the LAMMPS Shell
2020-10-16 16:43:42 -04:00
4d31ae7279 Beter system folder detection, add URL to "help" commmand. avoid buffer overflow. 2020-10-16 15:17:57 -04:00
eaa75417f2 Merge pull request #2425 from akohlmey/more-doc-updates
More updates/reorganization of the programmer part of the manual
2020-10-16 15:02:21 -04:00
4335f3d2ab Add LAMMPS icon in docs 2020-10-16 12:18:25 -04:00
f547cb4262 clarify tasks for single() for Bond and Angle classes 2020-10-16 10:48:00 -04:00
f2ba37e35d try swithing the current working directory when dropped into a Windows system folder. 2020-10-16 10:47:23 -04:00
f5c457334a make headers more compact 2020-10-16 02:43:20 -04:00
dd7a3a3a54 move the entries about using the Python module into its own (sub) toctree 2020-10-15 20:29:24 -04:00
08d892d0d9 whitespace 2020-10-15 19:56:31 -04:00
5d3f10611a Adjust Python deprecation notice 2020-10-15 18:25:58 -04:00
99f9a16a25 Update Python docs 2020-10-15 18:19:57 -04:00
4a946f5388 rename sections to User Guide and Programmer Guide 2020-10-15 12:00:36 -04:00
f15dc4a0a8 add more details and derived class to the class diagram and use a different layout engine 2020-10-15 12:00:10 -04:00
bc16709150 tweak 2020-10-14 23:59:06 -04:00
dc8e6bc5ab mention example using the new Fortran module in examples/COUPLE/simple 2020-10-14 23:29:03 -04:00
88644caaec update header texts 2020-10-14 23:28:28 -04:00
937f8b3f61 reorder sections in developer guide 2020-10-14 22:57:41 -04:00
d41cce7660 update LAMMPS class diagram 2020-10-14 22:57:21 -04:00
a7a4ea76a2 update LAMMPS logo with raytraced version 2020-10-14 22:57:02 -04:00
89842fc7c4 Merge pull request #2418 from akohlmey/collected-small-changes
Collected small changes and bugfixes for the stable release
2020-10-14 17:45:10 -04:00
28641bcbc7 Merge pull request #2402 from ndtrung81/gpu-dpd
Allowed dpd/tstat/gpu and dpd/gpu to work together in pair style hybrid
2020-10-14 17:18:02 -04:00
61c33e5622 update documentation conventions for recent changes to the manual. 2020-10-14 17:01:58 -04:00
3926cbe5f0 Merge branch 'patch-3' of github.com:evoyiatzis/lammps into collected-small-changes 2020-10-14 15:18:03 -04:00
865b159d16 update singularity definitions for readline 6/7 to have a pkg-config file 2020-10-14 15:06:57 -04:00
234f27514f avoid CMake failure when reporting packages without any of them enabled 2020-10-14 14:59:24 -04:00
65eb4b5527 explicitly enable C++11 and use pkg-config to get readline flags 2020-10-14 14:33:12 -04:00
d322e29f64 mention that C++11 also needs to be enabled with Ubuntu 16.04 2020-10-14 14:30:03 -04:00
45939602a5 Update fix_widom.rst
the shake keyword is not applicable to widom insertions
2020-10-14 20:17:35 +02:00
18091f15e0 Merge branch 'master' into collected-small-changes 2020-10-14 13:11:30 -04:00
7f804512b7 rename to be consistent with example log files 2020-10-14 13:11:13 -04:00
d27c7cd2fc Merge pull request #2421 from akohlmey/remove-obsolete-files
Remove outdated or obsolete files
2020-10-14 13:09:52 -04:00
60ff1380d7 update log files for bench/POTENTIALS 2020-10-14 11:04:51 -04:00
50986d5f27 Merge pull request #2416 from akohlmey/lammps-shell-improvements
Bug fixes and improvements for the LAMMPS Shell tool
2020-10-14 10:42:30 -04:00
1e2c8ce2e6 correct data file for USER-EFF 2020-10-14 08:15:59 -04:00
3894212712 correctly use REBO potential 2020-10-14 08:04:57 -04:00
bb6cfbeec1 mention AMD GPUs 2020-10-14 08:04:37 -04:00
a002194e26 remove outdated "bench/KEPLER" folder 2020-10-14 07:57:32 -04:00
07387d5e6c remove outdated "examples/accelerate" folder 2020-10-14 07:56:58 -04:00
587be7a21a Remove makefiles for obsolete machines and configurations or that are redundant 2020-10-14 07:54:07 -04:00
89d60afb91 make certain alpha_final is initialized 2020-10-14 04:51:51 -04:00
00e1edfae9 add a preset for selecting gcc compilers (for cases where they are not the default) 2020-10-14 04:51:34 -04:00
2026636f72 overview description for the unit test folders 2020-10-13 22:00:10 -04:00
acbb54d35f tweak testing and add at test run for history expansion 2020-10-13 21:32:48 -04:00
f003a6c536 implement a test prototype for the LAMMPS shell 2020-10-13 20:36:24 -04:00
290763a844 add a test_mode where the isatty() check is bypassed and history not read
this is enabled when the LAMMPS_SHELL_TESTING environment variable is set
2020-10-13 17:09:11 -04:00
114dd48779 change behavior with respect to OpenMP versus the regular LAMMPS executable
since the shell is not meant to be run with MPI we can use all threads by default.
2020-10-13 17:08:23 -04:00
8ab5f3c71a adjust recently added/expanded tests so they skip tests when packages are missing 2020-10-13 15:42:21 -04:00
d0ac698a41 must not assume that size_one == nfield in DumpCustom since DumpVTK changes it
this changes all loops over format strings and output types to using nfield
2020-10-13 15:28:02 -04:00
e23774ed56 include "pointers.h" to avoid include file name conflicts when compiling with USER-VTK enabled 2020-10-13 15:26:38 -04:00
8920830909 utils.h is not needed (globally included via pointer.h) 2020-10-13 15:25:46 -04:00
338b05d9dd set only build type specific compiler flags, so using -DCMAKE_CXX_FLAGS will work as expected 2020-10-13 12:00:03 -04:00
60a296fea8 small tweaks 2020-10-12 23:51:00 -04:00
49afab09a5 Building LAMMPS for Windows with the MinGW cross compiler is supported with CMake only 2020-10-12 23:49:40 -04:00
61381ad821 Use User's Guide and Programmer's Guide. Normalize header level formatting 2020-10-12 23:31:03 -04:00
ed885847a3 expose document and test more atom style property flags 2020-10-12 20:24:21 -04:00
a684f896de rearrange doxygen docs for lammps_extract_settings() and lammps_extract_global()
this replaces two large tables whith multiple small tables that group
parameters by similar context
2020-10-12 18:59:35 -04:00
9bdae2b5c0 move installed packages summary to a different location further down in the output 2020-10-12 18:04:19 -04:00
1d7fa39bc2 don't print a message for each generated style/package header file 2020-10-12 17:10:38 -04:00
a97e632bb0 make enabled packages report more compact 2020-10-12 17:10:04 -04:00
ec4eab5061 CMake change requested by @junghans 2020-10-12 15:21:08 -04:00
24cec51e85 add tests for using python invoke and python style variables 2020-10-12 06:17:16 -04:00
8c5da70823 handle the case where the variable type is invalid and thus a null pointer is returned 2020-10-12 06:16:28 -04:00
6cda1e16ae add test program template for testing the PYTHON package 2020-10-12 02:40:02 -04:00
c01ff34785 we must initialize the number of types with any "extra" values
so far, the "extra" value will only be applied if the "types" line is
present in the header. if not, the "extra" value is ignored. now will
first apply this and then lets it be overwritten in case a "types" line
is present in the header.
2020-10-11 23:26:35 -04:00
a6df6cf84c fix format 2020-10-11 23:18:03 -04:00
a82c0a58af improve error message 2020-10-11 23:17:46 -04:00
bdb4334210 add tip about using history as starting point for a new input 2020-10-11 16:13:13 -04:00
ee98ecc7bb document save_history command and a few more small tweaks 2020-10-11 15:47:38 -04:00
556d48f1dc implement a "save_history" command into the LAMMPS shell 2020-10-11 15:30:56 -04:00
f6683fefea avoid segfault on the last word of the command history 2020-10-11 14:05:37 -04:00
9950f0d259 small LAMMPS shell updates
- implement a "source" command (same as "include")
- synchronize documentation in manual and README
- updates to readline customizations
2020-10-11 13:53:15 -04:00
44549ba81d replace doc page with page or other appropriate text 2020-10-11 12:19:38 -04:00
4908a53612 correct example to include '=on' for package selection with CMake 2020-10-11 12:19:21 -04:00
3396897438 document both, CMake and traditional procedure to enable MPIIO 2020-10-11 12:18:26 -04:00
f63ff4cdc9 LAMMPS shell customization info and some usage tips 2020-10-11 12:00:16 -04:00
c0f58243cb use correct header levels for LAMMPS shell docs 2020-10-10 23:40:34 -04:00
b3d9f648ef Merge pull request #2414 from akohlmey/next_lammps_version
Update version strings for next patch release
2020-10-09 20:12:56 -04:00
0460dc3dfb Merge pull request #2413 from akohlmey/progguide-updates
More Programmer Guide and Library Interface updates for next release
2020-10-09 19:30:37 -04:00
51489c1509 dedup anchor 2020-10-09 17:29:31 -04:00
97964604c6 no point in trying a static link of executables 2020-10-09 13:29:13 -04:00
362fe700a5 remove static libraries for cross-compiler. not used with .dll version. 2020-10-09 13:26:52 -04:00
43f2c14c8b print warning when reading data files with non-zero image flags for non-periodic boundaries 2020-10-09 13:24:53 -04:00
57e0e64ffe No MPI communicaton after MPI_Finalize() and no MPI_Finalize() unless we are initialized. 2020-10-09 12:47:13 -04:00
078f931393 add comment to explain using the LAMMPS_LIB_MPI define in example 2020-10-09 11:50:53 -04:00
84c9fcf6e9 add legacy warning to F77 style Fortran wrapper 2020-10-09 11:15:15 -04:00
74e1d0f8cf move Tools compilation script code from CMakeLists.txt to separate Tools.cmake file 2020-10-09 11:15:00 -04:00
5457accb3d make the legacy fortran wrapper work (again) 2020-10-09 07:52:36 -04:00
ca3d10fa39 need to define LAMMPS_LIB_MPI 2020-10-09 07:37:25 -04:00
bafba4235c include versionadded tags 2020-10-09 07:26:01 -04:00
188e1c3828 correct versionadded dates 2020-10-09 07:22:16 -04:00
38bf231361 silence sphinx warning 2020-10-09 07:19:02 -04:00
471acb2ef8 add unit tests for added APIs 2020-10-09 07:16:34 -04:00
7a9b4ef0d4 include FFTW in MinGW cross-compiler libraries. Make static/dynamic explicit 2020-10-09 02:27:31 -04:00
eb3992a69a enable static linkage for executables when cross-compiling with MinGW 2020-10-09 02:26:46 -04:00
bfcb78d5d7 fix bug with string passing 2020-10-09 01:42:48 -04:00
5a207247b6 update version strings for next patch release 2020-10-09 01:12:41 -04:00
6d67fa004c tweak formulation 2020-10-08 23:35:49 -04:00
90d511bc86 add wrappers for new library functions 2020-10-08 22:32:33 -04:00
49ebc5ac40 update versionadded date 2020-10-08 22:32:08 -04:00
5c6bfa6ab5 Merge branch 'master' into progguide-axel 2020-10-08 21:51:22 -04:00
a581b4b85e Merge pull request #2412 from lammps/progguide-updates
Updates to the Programmer Guide part of the Manual
2020-10-08 21:50:37 -04:00
4f4bc427ae implement LAMMPS_LIB_NO_MPI to LAMMPS_LIB_MPI change 2020-10-08 21:50:15 -04:00
9f0002550e Merge pull request #2404 from akohlmey/fortran-portable-string
Make f2c_string utility function of the new Fortran module portable across Fortran compilers
2020-10-08 20:58:51 -04:00
4995ab3264 Merge pull request #2403 from akohlmey/fortran2-updates
Update of fortran2 interface for recent changes in the C-library interface
2020-10-08 20:23:44 -04:00
4a856148ad Merge pull request #2389 from kateryna-goloviznina/coul_tt
Pair style for charge-dipole damping with Tang-Toennies function
2020-10-08 19:47:36 -04:00
677e8afdc5 update section headlines 2020-10-08 10:32:02 -04:00
a89741f7f6 more 'doc page' to 'page' changes 2020-10-08 08:32:15 -04:00
fdbcd202fa Consolidate the redundant instructions for building the manual into one location. 2020-10-08 08:31:58 -04:00
60864f021f replace "doc page" with "page" or equivalent 2020-10-07 22:29:26 -04:00
e683585bba reformat reax/c control file parameter descriptions 2020-10-07 22:28:34 -04:00
353158f7e7 improve errors vs. exception not 2020-10-07 21:56:14 -04:00
20e1697dba add ":class: note" to admonitions where it was missing for consistent formatting 2020-10-07 20:24:56 -04:00
08f8e7ed28 update titles as suggested by steve and move a few entries around 2020-10-07 20:07:30 -04:00
d48b0ebd7b print OS/Compiler info when starting LAMMPS shell 2020-10-07 18:37:27 -04:00
0891a8ce7d better MPI version detection. 2020-10-07 18:37:09 -04:00
ac6e99ae81 implement "mem" command into LAMMPS shell to print current and max memory use 2020-10-07 18:36:43 -04:00
50bfb9142d add library API to print OS/Compiler info (for use with LAMMPS shell) 2020-10-07 18:35:59 -04:00
57cc8d6290 fix incorrectly formatted "notes" 2020-10-07 18:35:05 -04:00
1098ca8749 add some more explanation about exceptions from the library interface perspective 2020-10-07 13:08:49 -04:00
e637c150ba add versionadded tags 2020-10-07 13:08:25 -04:00
395906b387 whitespace 2020-10-07 13:07:57 -04:00
5fa1cd7e86 small corrections 2020-10-07 05:33:55 -04:00
9ea38c545d update global properties tests 2020-10-06 22:43:42 -04:00
355c68f304 Merge branch 'master' into progguide-axel 2020-10-06 21:40:43 -04:00
bf4773c3f7 Merge pull request #2407 from akohlmey/collected-small-fixes
Collected small changes and bugfixes for the next patch release
2020-10-06 20:17:45 -04:00
f0f933a711 allow forward slash in strings for if statements 2020-10-06 18:44:26 -04:00
15f9987c32 add custom commands "cd" and "pwd" to the LAMMPS shell 2020-10-06 18:39:22 -04:00
fb1dab6098 Merge branch 'master' into collected-small-fixes 2020-10-06 18:00:33 -04:00
1ee9e4eabd Merge branch 'master' into progguide-axel 2020-10-06 17:11:47 -04:00
9274912a81 add note about no longer supporting PyPar and remove instructions related to it. 2020-10-06 16:19:43 -04:00
2f857176c8 stay compatible with cmake older than version 3.14 2020-10-06 13:45:46 -04:00
f442d811be Merge commit 'dbf51af7d1565c62902eef22a7aa6f6eb552432b' of github.com:rbberger/lammps into collected-small-fixes 2020-10-06 12:29:36 -04:00
1afd9b1c83 update Python installation docs 2020-10-06 12:15:56 -04:00
887603d856 Merge pull request #2411 from rbberger/fix_wall_gran_region
Corrections for fix wall/gran and wall/gran/region
2020-10-06 11:51:58 -04:00
7dd244a828 Correct fix wall/gran and wall/gran/region docs 2020-10-06 10:45:23 -04:00
18023ad9dc Update fix_wall_gran_region.cpp
Don't store atom tag, just if there is a contact.

Fixes #2306
2020-10-06 10:39:14 -04:00
5d08f629ab fix typo 2020-10-06 05:30:09 -04:00
b5db29bae4 revise python in LAMMPS docs 2020-10-05 22:48:57 -04:00
f2ba00ea9c update label in graph 2020-10-05 21:50:54 -04:00
549e4df506 Merge branch 'progguide-richard' into progguide-axel 2020-10-05 21:04:29 -04:00
80a054471d Merge branch 'progguide-updates' into progguide-richard 2020-10-05 19:31:03 -04:00
5244f49234 Fix broken page 2020-10-05 18:04:30 -04:00
caf434811a Add example to Python_config.rst 2020-10-05 17:54:48 -04:00
b8c66b099d Fix typo 2020-10-05 17:42:43 -04:00
93ed9b4266 Add PyLammps parts 2020-10-05 17:34:50 -04:00
22cca93603 Add tests for neighbor list API 2020-10-05 17:34:08 -04:00
02b10380bc Create ctypes only neighbor list API variant
This moves the lammps.get_neighlist() method to lammps.numpy.get_neighlist().
lammps.get_neighlist() now returns a NeighList object, while the NumPy variants
returns a NumPyNeighList object. The main difference between the two is that while
the ctypes variant returns neighlist elements as

atom idx (int), numneighs (int), neighbors (POINTER(c_int))

the NumPy variant returns

atom idx (int), neighbors (numpy.array)
2020-10-05 16:21:37 -04:00
68147306e7 Add Python_neighbor.rst 2020-10-05 16:15:40 -04:00
c06348c507 Update Python docs 2020-10-05 15:07:57 -04:00
0fb91ec755 Merge pull request #2408 from akohlmey/lammps-shell
Add a LAMMPS shell tool as alternative to the LAMMPS executable for interactive use
2020-10-05 14:42:23 -04:00
d9f7ddfd6d Merge pull request #2409 from mrcdr/math_eigen_impl
PR to relate "math_eigen_impl.h" with an author
2020-10-05 14:07:09 -04:00
8f808a5e6e Update Python_usage.rst and Python_module.rst 2020-10-05 14:06:09 -04:00
ed17eabe17 Add missing code highlights 2020-10-05 14:04:44 -04:00
884295eb5f Add example showing Python exception handling 2020-10-05 14:04:14 -04:00
6672568fca Change links 2020-10-05 14:03:50 -04:00
4f1ed775e9 Add missing docstrings 2020-10-05 14:03:19 -04:00
6933233c56 add tests for the new library APIs 2020-10-05 13:43:11 -04:00
113db3ac73 fold group specific query functions into the more generic ID query functions 2020-10-05 13:41:12 -04:00
68cca6d31a make warning about lack of exception handling more specific 2020-10-05 12:51:42 -04:00
bd206dca10 simplify based on suggestions from @junghans 2020-10-05 11:44:19 -04:00
b6d333535c correct variable name 2020-10-05 11:43:43 -04:00
47e08f63ac Merge pull request #2406 from jrgissing/bond/react-rmsd_constraint_bugfix
bond/react: rmsd constraint bugfix
2020-10-05 10:43:21 -04:00
e08d905504 make a small change to math_eigen_impl.h 2020-10-05 22:32:15 +09:00
3e74e03bbb update all_on/all_off presets to really include all packages 2020-10-04 16:46:29 -04:00
d500ffa784 implement handler for CTRL-C on windows. 2020-10-04 16:42:29 -04:00
f3b33ea0c6 address spelling issues 2020-10-04 14:10:35 -04:00
d1e76068e8 add new APIs to library docs 2020-10-04 14:10:18 -04:00
ffcd905bf1 complete LAMMPS shell docs interation. Warn if exceptions are missing but don't fail 2020-10-04 14:03:34 -04:00
e360219a8a add readme and makefile for LAMMPS shell 2020-10-04 12:28:01 -04:00
e71806196f ignore LAMMPS shell history files 2020-10-04 12:27:40 -04:00
cc14bae240 add LAMMPS shell to README 2020-10-04 12:22:37 -04:00
f6a7cbd2e8 fix typo 2020-10-04 11:31:15 -04:00
2f67f299e5 correct error messages and make use of utils and fmtlib 2020-10-04 11:26:22 -04:00
ecb5078ba7 add signal handler to smoothly interrupt ongoing runs. non-windows only. 2020-10-04 11:25:37 -04:00
f1ed6a9782 change word break characters to make direct variable expansion completion work 2020-10-04 11:25:05 -04:00
9353004e3e add APIs to determine of LAMMPS is running and to interrupt a run cleanly 2020-10-04 11:23:49 -04:00
2eb07f7427 add expansions of IDs and references to IDs 2020-10-04 06:02:14 -04:00
9dfb715296 add support for variable names to id introspection in library interface 2020-10-04 06:01:26 -04:00
b1cc9949e4 include added introspection into docs 2020-10-03 23:38:22 -04:00
1988e4c24c add introspection for various kinds of IDs 2020-10-03 23:38:02 -04:00
c08089c1a7 add missing pkg-config file for readline 2020-10-03 22:35:58 -04:00
4520eef1b0 include readline support to be ready for LAMMPS-Shell 2020-10-03 22:05:52 -04:00
7f3db6f8e3 improve wording 2020-10-03 22:04:54 -04:00
c5514c0a36 windows portability fix 2020-10-03 22:02:13 -04:00
4e8feff52c add support for building the LAMMPS shell in CMake 2020-10-03 22:01:57 -04:00
acf53ff55e add a few more context aware matcher functions 2020-10-03 12:26:40 -04:00
a2f7aae6db add API to query group names 2020-10-03 12:26:17 -04:00
01c85a3032 correct embedded docs 2020-10-03 12:25:56 -04:00
b231fa8a16 add completion for force style names 2020-10-03 11:32:56 -04:00
4cf30ceda1 silence compiler warnings 2020-10-03 09:52:00 -04:00
af8f8d3c11 avoid segfault 2020-10-03 09:51:42 -04:00
024c9238f9 fix issue spotted by compiler warning 2020-10-03 09:51:28 -04:00
22cbac0b01 initial version of LAMMPS shell. without build system support (yet). 2020-10-03 09:49:35 -04:00
946a49662f bond/react: rmsd constraint bugfix
previously, eligible reactions could have been temporarily prevented, at the edge of the box, in serial
2020-10-02 18:43:35 -04:00
fb1bd3e962 Correct links 2020-10-02 18:01:52 -04:00
d91d8de76d Refactor LammpsNumpyWrapper to numpy_wrapper
LammpsNumpyWrapper was a class that was defined inside of the
lammps.numpy property when it was first accessed. This made it hard
to document the methods of this class.

This commit extracts this utility class into the lammps module and
renames it to 'numpy_wrapper'.
2020-10-02 17:28:25 -04:00
0089a35d95 Remove dead code 2020-10-02 17:27:08 -04:00
7355977819 Add missing exception handling for functions that could cause errors 2020-10-02 17:26:14 -04:00
d9cbb354d2 Move lammps exception code into its own property 2020-10-02 17:22:01 -04:00
0475397229 Update Python_usage.rst 2020-10-02 17:19:51 -04:00
60891fe6f9 Add Python_config.rst 2020-10-02 17:19:04 -04:00
b57c8f6b77 Add tests for LAMMPS exceptions in Python 2020-10-02 16:53:41 -04:00
52c6353f86 Fix typo 2020-10-02 15:09:29 -04:00
a0d0f96e52 make f2c_string utility function portable across fortran compilers 2020-10-02 10:36:17 -04:00
1f417f8eb4 mention Fortran interface in examples/COUPLE/fortran2 now that it is up-to-date again 2020-10-02 00:28:13 -04:00
086a71ba46 incorporate LAMMPS fortran2 interface updates and apply a few corrections 2020-10-02 00:07:01 -04:00
2c7a686220 update fortran2 module 2020-10-01 23:20:52 -04:00
51d55aa036 Merge pull request #2400 from akohlmey/code-analysis-fixes
A few more static code analysis fixes
2020-10-01 18:32:50 -04:00
2ea61b21f8 fix small memory leaks 2020-10-01 18:11:19 -04:00
48c45767f9 remove dead code 2020-10-01 18:11:08 -04:00
c5fc3f2c78 Update Python_usage.rst 2020-10-01 18:01:07 -04:00
533c453a08 Update Python docs 2020-10-01 15:00:08 -04:00
243a81e9d8 must initialize eflag_atom and vflag_atom to zero in constructor 2020-10-01 14:49:23 -04:00
9dc42262ec one more edit 2020-10-01 11:34:27 -06:00
343b24dd5e more edits to C library chapter 2020-10-01 10:50:33 -06:00
b94df09570 Merge pull request #2398 from lammps/progguide-updates
Documentation updates plus a few small bugfixes
2020-10-01 11:11:28 -04:00
774ac8b2d9 avoid overflow of per-local-atom computation 2020-10-01 09:49:28 -04:00
4a40a70870 avoid overflow of timestep related computations 2020-10-01 09:48:54 -04:00
8c56f60939 use bigint instead of int for timestep related variable functions to avoid overflows 2020-10-01 09:35:45 -04:00
ea3af3c2bc resolve delete/free() mismatch 2020-10-01 09:34:38 -04:00
9efcaf2d61 update documentation for Windows installer 2020-10-01 00:57:53 -04:00
fb33a71720 look for liblammps.dll in windows binary dir only if that directory exists 2020-10-01 00:34:03 -04:00
507c2cb2a8 Update and reorganize Python docs 2020-09-30 22:20:01 -04:00
a79a7b2704 fix typo reported on lammps-users 2020-09-30 21:24:55 -04:00
77fb2ee311 add list of documented functions at the top of the C-Library doc pages 2020-09-30 21:16:48 -04:00
dc13a61b40 fix spelling 2020-09-30 20:00:53 -04:00
a9b27caf17 Merge branch 'master' into progguide-axel 2020-09-30 19:57:20 -04:00
38d954e038 Update the text to mention returning the handle instead of passing it as an argument. 2020-09-30 19:54:07 -04:00
0d9a2808b4 Merge branch 'progguide-updates' of github.com:lammps/lammps into progguide-axel 2020-09-30 19:50:00 -04:00
6928e79883 Merge pull request #2395 from rbberger/collected-small-fixes
Fixes issues found via Static Code Analysis and from bug reports
2020-09-30 19:23:52 -04:00
4b71e1cbda initial library doc page edits 2020-09-30 15:37:11 -06:00
cbc529881c support hybrid atom styles in in fix propel/self 2020-09-30 14:07:46 -04:00
23279836c9 Invalidate pointer after deallocation 2020-09-30 09:10:40 -04:00
d84300a3ac do not fail when looking for gzip or ffmpeg executables when cross compiling to windows 2020-09-30 00:05:15 -04:00
555fba1e4b add special case for windows installer package where the .dll file is in the bin folder 2020-09-29 18:46:32 -04:00
e4cfb91169 discuss that USER-MESONT package potentials are not bundled and provide URL 2020-09-29 16:57:19 -04:00
0ed935ab76 silence compiler warning on windows 2020-09-29 11:19:12 -04:00
54c4b95c4d small format tweak 2020-09-28 14:09:03 -04:00
b1f6a6799c Merge remote-tracking branch 'github/master' into coul_tt 2020-09-28 13:56:00 -04:00
3b1d07225d corret strmatch() semantics and restore strcmp() in extract function 2020-09-28 13:49:38 -04:00
c217c8df24 shorten example and do not print CPU time output to avoid false positives when comparing 2020-09-28 13:49:08 -04:00
8bc67f17cc fully integrate pair style coul/tt into manual 2020-09-28 13:46:53 -04:00
e0e4e516fc Merge pull request #2396 from akohlmey/openmp-compat-auto
Autodetect OpenMP compatibility setting for known compiler signatures
2020-09-28 13:14:13 -04:00
25bda86ad0 fix typo 2020-09-28 12:52:32 -04:00
67803fe17f Minor cleanup for dpd/tstat in the GPU library 2020-09-28 11:41:39 -05:00
d8b55ecd65 Merge pull request #2397 from akohlmey/remove-cilk-array-notation
Remove specializations using cilk array notation from USER-INTEL
2020-09-28 11:56:02 -04:00
405c1b98c5 remove inactive code and documentation for dump image persp option 2020-09-28 08:19:26 -04:00
d868271b01 remove unneeded escapes from input examples 2020-09-28 07:52:38 -04:00
407f445753 remove specializations using cilk array notation 2020-09-28 07:32:11 -04:00
c626208c80 avoid failure with Intel compilers 2020-09-27 19:05:58 -04:00
9d39587d37 expand c-library properties example 2020-09-27 19:05:40 -04:00
fdbdd26585 include intel fortran compiler into intel cmake preset 2020-09-26 22:30:21 -04:00
b11b420b08 small doc update for singularity 2020-09-26 22:30:05 -04:00
d23f7d14c9 typo 2020-09-25 22:20:31 -04:00
7e68746ebe use safe reader function 2020-09-25 22:16:19 -04:00
adef7d4e18 need to check for nthreads < 1 only if OpenMP is enabled 2020-09-25 22:14:21 -04:00
909960514a avoid unneeded division 2020-09-25 22:10:51 -04:00
6967522a4f fix logic error 2020-09-25 22:08:28 -04:00
172bb6050a use safe read function 2020-09-25 22:08:22 -04:00
a963e85846 remove dead code 2020-09-25 21:50:53 -04:00
66c4627775 Merge branch 'collected-small-fixes' of github.com:rbberger/lammps into collected-small-changes 2020-09-25 18:14:50 -04:00
2ffa50f2f8 fix memory leak 2020-09-25 18:13:52 -04:00
ab08a27731 correct data layout for creating integrate style 2020-09-25 18:00:47 -04:00
bd60174fd3 fix argument dimensions 2020-09-25 18:00:19 -04:00
c0384eb264 update code owners 2020-09-25 17:52:51 -04:00
c496b18a9d avoid memory leak 2020-09-25 17:50:02 -04:00
d1321a0860 fix argument dimensions 2020-09-25 17:49:52 -04:00
e378532003 avoid 32-bit integer overflow 2020-09-25 17:31:49 -04:00
5e110fe707 update docs for autodetected OpenMP compatibility 2020-09-25 16:45:39 -04:00
5489c64aef remove trailing whitespace 2020-09-25 16:32:54 -04:00
2e67aa6b47 autodetect OpenMP 4.0 semantic compatibility if not explicitly set 2020-09-25 16:31:45 -04:00
cc5ef652e4 move Using Python with LAMMPS to programmer guide and integrate python module docs 2020-09-25 15:13:05 -04:00
0ffb2ce09c move Tutorials to bottom of Howto section 2020-09-25 15:12:31 -04:00
5e6d222d29 fix up a few internal references 2020-09-25 14:04:44 -04:00
495438105f mass renaming of programmer guide files 2020-09-25 13:59:29 -04:00
9a523fef65 add bibliography 2020-09-25 13:48:22 -04:00
089e55cc37 fix typo 2020-09-25 13:48:05 -04:00
26ad664079 Avoid string duplication and remove memory leak 2020-09-25 13:44:28 -04:00
e1b00e96ed bug fixed 2020-09-25 17:10:54 +02:00
064f735272 source files updated, examples changes 2020-09-25 16:08:20 +02:00
7a35f786fb tweak button size setting for active state, too. 2020-09-24 16:56:05 -04:00
28812b1ea7 Merge pull request #2381 from akohlmey/collected-small-changes
Collected small changes
2020-09-24 16:17:28 -04:00
08270c3498 update container descriptions 2020-09-24 02:17:26 -04:00
02c1480546 update README in examples/COUPLE/simple 2020-09-24 02:17:11 -04:00
ac78f8f1e1 update examples/COUPLE/simple sources to work with the current version of LAMMPS 2020-09-24 01:30:34 -04:00
b350dce573 improve error messages for variables and python commands 2020-09-23 19:02:55 -04:00
2f8a5ddd1f xindy doesn't seem to be required (and is not available on CentOS 8) 2020-09-23 17:13:09 -04:00
85fe94a6dd Merge pull request #2388 from jrgissing/molecule_fragments_check
molecule: findfragments correction
2020-09-23 15:54:37 -04:00
790f636523 rephrase 2020-09-23 15:45:29 -04:00
b6d98707ec molecule: revert overzealous error checking 2020-09-23 11:23:10 -04:00
2b30661b07 molecule: correctly update docs 2020-09-23 10:46:23 -04:00
1be766c8cd molecule fragment: let's add more checks 2020-09-23 10:37:33 -04:00
ed3975054a fix typo 2020-09-23 10:30:19 -04:00
e29f838121 also add error checks 2020-09-23 10:28:48 -04:00
88b8461e0f the right molecule fragment fix 2020-09-23 10:19:38 -04:00
343932f220 document -DLAMMPS_TRAP_FPE 2020-09-23 09:38:07 -04:00
ab4dc9c343 Merge pull request #2378 from junghans/cmake_install_fix
cmake: do not install mpi.h without library
2020-09-22 21:37:57 -04:00
20027cc7ec Merge pull request #2390 from Vsevak/hip_pppm_fix
Fix atomicAdd in PPPM/GPU for the HIP backend
2020-09-22 18:04:12 -04:00
9df1bb4db0 remove obsolete makefiles. Makefile.xk7 is on death watch but Blue Waters still has such nodes 2020-09-22 17:47:14 -04:00
459d51bbea Do not disable atomics for HIP
atomics are disabled for NVIDIA 1.0 cards, which should not affect ROCm HIP
2020-09-22 22:34:24 +03:00
7b4304577a add -std=c++11 flag to compilation of all .cpp files on Linux 2020-09-22 10:58:54 -04:00
2d7b6a9008 remove dead code 2020-09-22 10:12:59 -04:00
b878403aee update a few more singularity container definitions for doc build requirements 2020-09-22 06:01:34 -04:00
1eebdcbd03 fix obvious bugs 2020-09-22 05:34:29 -04:00
b876f451f8 make compiler choice overridable 2020-09-22 05:34:12 -04:00
f186eb7005 be more selective about which elements not to display
we only want to skip the redundant User-Documentation and Programmer-Documentation captions
2020-09-21 22:41:19 -04:00
54aacc47ea add section about citing LAMMPS 2020-09-21 22:40:36 -04:00
cc8b042abf cmake: export LAMMPS_LIB_NO_MPI for serial version 2020-09-21 12:54:56 -06:00
fbcb888ec0 cmake: do not install install mpi.h 2020-09-21 12:41:34 -06:00
51b900b043 Merge remote-tracking branch 'origin/master' into cmake_install_fix 2020-09-21 12:39:51 -06:00
36fdba32e7 Merge pull request #2382 from junghans/cmake_install_fix_2
cmake: install missing utils.h
2020-09-21 13:29:29 -04:00
19bc3d3643 Docs updated 2020-09-21 18:05:57 +02:00
7eab154786 one-line bug when checking for molecule fragments 2020-09-21 11:01:04 -04:00
8099e2a5d9 Docs updated 2020-09-21 16:16:38 +02:00
2206de7c68 Examples added, bug fixed 2020-09-21 16:14:33 +02:00
a1407de09d have make yes/no package terminate with error status for non-existing packages
this closes #2387
2020-09-21 07:09:13 -04:00
c7c7d2f474 Merge pull request #2377 from akohlmey/move-version-to-lammps
Move the members `version` and `num_ver` form Universe class to LAMMPS class
2020-09-21 02:10:08 -04:00
29a7d598ac update formatting style to be more consistent 2020-09-21 02:04:58 -04:00
0a11cc5eb9 make code valgrind clean 2020-09-21 01:55:07 -04:00
486131f4c5 fix segmentation fault bug in USER-PTM. closes #2386 2020-09-21 01:54:36 -04:00
0951fea738 Merge branch 'collected-small-changes' of github.com:akohlmey/lammps into collected-small-changes 2020-09-20 19:20:42 -04:00
acb5fd2ebf Simplify GenerateBinaryHeader utility
The function implementation was incomplete for handling the case
of multiple files. It would generate wrong C code in case more than
one files were specified. Since we don't have this use case, it's
safe to only leave the single input file implementation.

Closes issue #2294
2020-09-20 18:24:43 -04:00
7d07d04989 Use correct library for unit test if LAMMPS_MACHINE_NAME is set 2020-09-20 17:52:24 -04:00
54b9e60dd0 reformat 2020-09-20 14:06:40 -04:00
4cbf821af3 modernize example 2020-09-20 14:03:13 -04:00
774bffe623 relax result precision requirement somewhat 2020-09-20 13:57:04 -04:00
f2bb835e23 cmake: install missing utils.h 2020-09-20 10:44:54 -06:00
dfd447f570 Add missing tools for PDF index build to containers 2020-09-19 23:07:24 -04:00
61ce73273b Add missing -u flag for unbuffered PyLammps tests
Addresses issue #2380
2020-09-19 23:07:13 -04:00
92f8398bed check more thoroughly for prerequisites to translate the PDF manual. update doc/README 2020-09-19 18:16:52 -04:00
705a9a0a8b cmake: do not install mpi.h without library 2020-09-19 15:12:36 -06:00
f72fb7290b fix links 2020-09-19 16:28:20 -04:00
67aeb7a5e5 Merge branch 'move-version-to-lammps' into collected-small-changes 2020-09-19 16:17:01 -04:00
6b0346c299 try use unencrypted download or using wget if normal download of polyfill.js fails 2020-09-19 16:14:08 -04:00
c91ee210e4 simplify variable lookup in set command 2020-09-19 16:04:09 -04:00
9c0808e95a update unit tests for move of version string location 2020-09-19 15:00:03 -04:00
4c51a8ae57 move version and num_ver from Universe to LAMMPS and make num_ver an int 2020-09-19 11:37:08 -04:00
89506efd23 document Pointers class and LAMMPS_NS namespace 2020-09-19 11:16:44 -04:00
73c65d43e1 update list of main folders and descriptions 2020-09-19 02:53:36 -04:00
9030c987e6 tweak formatting and style setting to have the desired structure and layout 2020-09-19 00:54:54 -04:00
6dda562501 skip over uninitialized and internal variables. correctly identify all kinds of boolean values 2020-09-18 23:40:31 -04:00
9cdde97863 correct test logic 2020-09-18 22:37:54 -04:00
5ce6259632 correct reference to function 2020-09-18 22:35:56 -04:00
56d9222156 Merge pull request #2374 from akohlmey/next_patch_version
Step version strings for the next patch release
2020-09-18 21:45:34 -04:00
897a7d0ed4 only try to broadcast if we have a proper communicator 2020-09-18 21:17:31 -04:00
f5fda95da6 Merge branch 'master' into next_patch_version 2020-09-18 21:15:12 -04:00
094a1bea91 Merge pull request #2369 from akohlmey/collected-small-changes
Collected small changes and fixes for the next patch release
2020-09-18 20:20:41 -04:00
51957a6219 fix merge conflict 2020-09-18 19:49:36 -04:00
a0afb9b554 Merge branch 'master' into collected-small-changes 2020-09-18 18:38:47 -04:00
7580176020 Merge pull request #2376 from giacomofiorin/colvars-update
Update Colvars to version 2020-09-17
2020-09-18 18:38:17 -04:00
b520a6287b Revert parts of "Restore PDF index"
We need to update the container image(s) for building the
pdf documentation on ci.lammps.org by adding `latexmk`
and `xindy` before we can re-enable this change to the makefile.

This partially reverts commit 3c3b76a781.
2020-09-18 18:37:26 -04:00
494498df03 update docs on building the PDF version of the manual 2020-09-18 18:27:23 -04:00
906e000e4c Merge pull request #2368 from jrgissing/bond/react-update_all_charges_default
Bond/react update all charges by default
2020-09-18 18:17:54 -04:00
33a2857b33 Fix whitespace 2020-09-18 17:56:12 -04:00
3c3b76a781 Restore PDF index 2020-09-18 17:51:51 -04:00
07c9298558 Fix some broken internal hyperlinks in Colvars PDF 2020-09-18 17:24:12 -04:00
d4cd67ce74 tweak format some more and refactor manual sectioning to be logically correct
now if only there was a way to make the titles of the parts show in the
navigation sidebar.
2020-09-18 17:15:42 -04:00
7deb773440 Update Colvars to version 2020-09-17
Small changes

Implement global map of components (@HanatoK)
https://github.com/Colvars/colvars/pull/363

Format code examples with colored background (@giacomofiorin)
https://github.com/Colvars/colvars/pull/361

replaced jacobi() with an open-source version (@jewettaij)
https://github.com/Colvars/colvars/pull/360
2020-09-18 17:09:18 -04:00
e1397b9a88 Merge branch 'master' into collected-small-changes 2020-09-18 16:11:47 -04:00
34ba8ec83c Merge pull request #2359 from rbberger/python_interface_guide
Library interface updates and Python documentation
2020-09-18 16:11:08 -04:00
635b7c04a5 Update docs and docstrings 2020-09-18 14:49:25 -04:00
de811db221 address spelling issues 2020-09-18 14:13:35 -04:00
ae9abf0f7d correctly update docs 2020-09-18 14:10:32 -04:00
9a6dfbc0ee add some cross referencing 2020-09-18 13:45:33 -04:00
449513703b Merge remote-tracking branch 'origin/master' into python_interface_guide 2020-09-18 12:35:17 -04:00
e0b22a51da Doc updates 2020-09-18 12:35:07 -04:00
1afb355d09 Limit int types to LAMMPS_INT and LAMMPS_INT64
Keep LAMMPS_TAGINT and LAMMPS_BIGINT internal to LAMMPS.
An external caller now only needs to distinguish between 32 and 64bit
integers, doubles and C strings.
2020-09-18 12:05:37 -04:00
edbfdde372 document ubuf union in developer guide 2020-09-18 11:55:06 -04:00
2c17652ad6 Created another static instance for dpd/tstat in the GPU library 2020-09-18 10:28:52 -05:00
1264184e7b tweak some website layout and style settings
- make previous/next buttoms smaller and show on top and bottom of page
- make tab header items smaller and more rounded
- reduce padding on a few selected items to make pages more compact

remove obsolete lammps.css file variant
2020-09-18 10:55:50 -04:00
df4ab6c39e Merge pull request #2375 from akohlmey/fmtlib-update
Update bundled fmtlib to version 7.0.3
2020-09-18 10:30:18 -04:00
dc89f0879c update specific pull request templates as they are available through the github cli now 2020-09-18 06:54:31 -04:00
3a2457bb3d update bundled fmtlib to version 7.0.3 2020-09-18 06:14:46 -04:00
2b1e249483 add comments to mark LAMMPS specific customizations 2020-09-18 06:12:42 -04:00
1ef36fd20a step version strings for next patch release 2020-09-17 22:16:42 -04:00
0e81803c8b some embedded documentation tweaks 2020-09-17 22:00:58 -04:00
90abe12a3e update LAMMPS version reference 2020-09-17 21:04:43 -04:00
445e57923f add one-time deprecation warning to lammps_open() and lammps_open_no_mpi() 2020-09-17 21:01:38 -04:00
6e22229424 Merge remote-tracking branch 'github/master' into collected-small-changes 2020-09-17 20:27:55 -04:00
94ad6821f7 Merge pull request #2373 from stanmoore1/kk_wkar
Add workaround for performance regression in Kokkos Package
2020-09-17 20:24:39 -04:00
d8b6ebdfc8 Add workaround for performance regression 2020-09-17 16:33:48 -06:00
42aca25a58 Update docs, fix typos 2020-09-17 18:10:08 -04:00
b4a1c9c24c Update examples 2020-09-17 17:08:13 -04:00
cf1ae7afa6 Use wrapper lammps.extract_atom_datatype 2020-09-17 17:04:40 -04:00
6729a42cfa Merge branch 'master' into collected-small-changes 2020-09-17 17:01:44 -04:00
3b96375888 Merge pull request #2371 from arghdos/kokkos_hip_porting
Additional Kokkos/HIP porting work
2020-09-17 16:58:46 -04:00
be72ce2534 Simplify Python examples to use numpy.extract_atom 2020-09-17 16:51:46 -04:00
ab6b69d6bd Add documentation and better autodetect 2020-09-17 16:39:19 -04:00
c06d5f7fb7 Fix type cast 2020-09-17 16:37:25 -04:00
b81ad54baa Simplify extract_atom and extract_global in Python interface
Both extract methods now can auto-detect the datatype of both global
and per-atom properties. Callers can still enforce different types
if needed by specifying the now optional dtype argument.

The numpy wrapper now has a new extract_atom function method, which
replace the extract_atom_darray and extract_atom_iarray method and
autodetects both type and size. All parameters can still be forced
to use different values if needed.
2020-09-17 16:16:27 -04:00
3275419872 Allow Colvars to include headers from the LAMMPS source directory 2020-09-17 14:37:26 -04:00
d88810f13a Add datatype introspection methods to library
Adds lammps_extract_atom_datatype and lammps_extract_global_datatype functions
to allow extracting type information of properties.
2020-09-17 13:43:43 -04:00
4c5aedeaa0 fix typo 2020-09-17 12:50:53 -04:00
91eaaba1be improve portability of docs makefile
- detect if /bin/bash is not found and provide a suitable error message
- use $(MAKE) and $(MFLAGS) variables to handle cases where GNU make is not called "make"
2020-09-17 12:40:09 -04:00
706a898163 Merge remote-tracking branch 'origin/master' into python_interface_guide 2020-09-17 12:08:12 -04:00
9fa43b8b12 Skip Numpy tests if not installed 2020-09-17 12:03:36 -04:00
7bf177a1c7 Fast forward porting work to master
Change-Id: Ieb428e4a001efadf880dbe2c64c2a685cebdd4ae
2020-09-17 10:45:04 -05:00
0522e3d7b6 a few more unittest tweaks 2020-09-17 11:11:44 -04:00
00641f9f24 relax epsilon a little for passing cross-platform tests 2020-09-17 11:01:59 -04:00
5dacfc47ca Change return type of lammps.get_natoms to int
Closes PR #2370
2020-09-17 10:57:49 -04:00
62a63d0cd8 silence compiler warnings 2020-09-17 10:27:06 -04:00
f8f2a94f60 coul/tt pair style added to USER-DRUDE 2020-09-17 15:14:27 +02:00
e2fc70da62 Merge branch 'master' into collected-small-changes 2020-09-17 06:51:21 -04:00
e924fc6d6e Merge pull request #2347 from jewettaij/math_eigen
Replace eigensolver code in LAMMPS with math_eigen.h and updated docs
2020-09-17 00:28:12 -04:00
e839fe0d30 Merge branch 'master' into collected-small-changes 2020-09-16 23:53:29 -04:00
b96512171c set -DCOLVARS_LAMMPS when compiling colvars library 2020-09-16 23:52:06 -04:00
4e304177a1 Merge pull request #2356 from giacomofiorin/colvars-update
Update Colvars to version 2020-07-07
2020-09-16 23:37:02 -04:00
b9919b72d7 make MSM test w/o PBC run faster 2020-09-16 23:36:17 -04:00
429cc0cacf add missing updates for symbolic constants in Atom class 2020-09-16 23:19:10 -04:00
6e9a39cf22 remove trailing whitespace 2020-09-16 22:57:49 -04:00
c14fd3131a Merge pull request #2367 from ndtrung81/rigid-langevin
Fixed a bug in computing the langevin torques applied to rigid bodies
2020-09-16 22:36:04 -04:00
61b83901f5 bond/react: correct new custom_charges logic 2020-09-16 21:28:48 -04:00
55bc1baf97 bond/react: correct recent enum update 2020-09-16 20:33:45 -04:00
b176cdf28c remove trailing whitespace 2020-09-16 18:18:53 -04:00
a8a9fb6eb8 adapt unit test for Jacobi class 2020-09-16 18:17:23 -04:00
12582edfb7 Add numpy.extract_atom_darray test 2020-09-16 18:04:45 -04:00
96f4178d92 add URLs to upstream projects for eigensolvers 2020-09-16 17:53:27 -04:00
942ed3afeb update copyright headers to match LAMMPS' conventions 2020-09-16 17:35:58 -04:00
85d36ad6c4 Condense export of debug macro 2020-09-16 17:28:28 -04:00
55a054a44a bond/react: default charge update docs 2020-09-16 17:09:48 -04:00
6faf436850 bond/react: update all charges by default, even with edge atoms
custom charges has new keyword, now done with molecule fragments (not backwards compatible)
2020-09-16 17:02:49 -04:00
dfcdb61d87 Used existing helper functions to make codes more consise 2020-09-16 15:40:04 -05:00
e6703019bc Update examples to use Python API constants 2020-09-16 16:10:43 -04:00
c82df186b5 Disable zstd tests if library is too old 2020-09-16 15:18:28 -04:00
ccc743e13e Add test for thermo data 2020-09-16 15:14:05 -04:00
bd542efa52 include math_eigen.h, remove it from .gitignore 2020-09-16 14:12:14 -04:00
0931cdd109 relax epsilon for rigid body integrators 2020-09-16 14:07:28 -04:00
fd9e39bf1a update docs 2020-09-16 14:07:06 -04:00
10991ee638 separate the MathEigen implementation into its own header file 2020-09-16 14:06:58 -04:00
8ccd3c03fa Correct test name 2020-09-16 12:54:36 -04:00
2270d8f4ec Add PyLammps.atoms test and fix bug with -echo screen/both 2020-09-16 12:48:20 -04:00
9cdd35e625 Make sure Python tests run unbuffered so PyLammps works 2020-09-16 11:28:38 -04:00
f7a939dec2 update atom_style tester for new jacobi implementation 2020-09-16 10:25:03 -04:00
ca24806f4c Add tests 2020-09-16 10:11:00 -04:00
981d60f1bd update rigid tests for new eigensolver 2020-09-15 22:47:52 -04:00
48ad860889 only do Zstd tests if it is enabled/found. 2020-09-15 22:47:10 -04:00
f5998692d0 Merge branch 'master' into math_eigen 2020-09-15 21:37:05 -04:00
d611b85d7d Merge pull request #2360 from akohlmey/symbolic-atom-constants
Replace numeric with symbolic constants in Atom and AtomVec classes
2020-09-15 21:27:47 -04:00
38f0a2e24c Fix doc build errors 2020-09-15 19:50:28 -04:00
cf7d2109a3 Merge branch 'library-progguide' into symbolic-atom-constants 2020-09-15 19:50:18 -04:00
4b25eb1a34 small tweaks 2020-09-15 19:08:40 -04:00
88cee4ff09 the library interface is for calling from C, so switch nullptr in comments/docs back to NULL 2020-09-15 19:07:07 -04:00
a3765eb75b Merge branch 'master' into library-progguide 2020-09-15 18:58:29 -04:00
7611efeea2 Update python docs 2020-09-15 18:03:26 -04:00
0f6a34775c Merge branch 'master' into symbolic-atom-constants 2020-09-15 17:44:31 -04:00
cebf6d33ba Merge pull request #2358 from eagunn/iss2345
Replace instances of NULL with nullptr
2020-09-15 17:41:24 -04:00
ae68becf4a Fixed a bug in computing the langevin torques applied to rigid bodies 2020-09-15 15:27:24 -05:00
60712e3f90 Add docstrings 2020-09-15 16:22:43 -04:00
f481a403bc Merge pull request #2366 from akohlmey/msm-fix
Apply alternate bugfix to KSpace style MSM that avoids grid level overflow
2020-09-15 16:22:19 -04:00
556dc67fdc Correct filename of generated image 2020-09-15 15:20:38 -04:00
6cfad41be4 Add background_color argument to IPyLammps.image 2020-09-15 15:19:39 -04:00
e5e449795a Add tests for has_style and available_styles 2020-09-15 15:16:19 -04:00
618b08dcfc Merge branch 'master' into symbolic-atom-constants 2020-09-15 14:58:13 -04:00
3dd3c6a2a5 redo fix for class member shadowing while avoiding overflowing levels 2020-09-15 14:48:55 -04:00
16749566f8 Revert "fix issue with local variable shadowing a global variable"
This reverts commit 3368eb79d8.
2020-09-15 14:42:00 -04:00
2165f6ed38 Merge pull request #2363 from akohlmey/consistent-memory-usage
Make memory usage reporting consistent
2020-09-15 14:21:07 -04:00
baa49984c2 update comments 2020-09-15 14:19:40 -04:00
326d9f398c Merge branch 'master' into iss2345 2020-09-15 14:06:16 -04:00
5cbb54b844 Merge branch 'iss2345' of github.com:eagunn/lammps into iss2345 2020-09-15 14:04:16 -04:00
b5832191d8 Merge pull request #2364 from akohlmey/force-test-bugfix
Force style unit test template bugfix
2020-09-15 13:40:13 -04:00
5da50d64c9 reformulate 2020-09-15 12:32:43 -04:00
2f3c916a57 add unit test for lammps_memory_usage() 2020-09-15 12:29:03 -04:00
3576464268 Merge branch 'master' into consistent-memory-usage 2020-09-15 12:25:16 -04:00
9490370eb0 Use consistent naming in PyLAMMPS classes 2020-09-15 12:22:36 -04:00
2c30de65d2 Fix typos in docstring 2020-09-15 12:16:47 -04:00
165444004e Merge pull request #2362 from akohlmey/library-progguide
Updates to C-Library interface progammer guide and reference
2020-09-15 11:51:02 -04:00
db59abf1b9 fix bug in lammps_has_style() 2020-09-15 11:34:24 -04:00
4a48ce76b8 Merge remote-tracking branch 'origin/master' into python_interface_guide 2020-09-15 11:34:16 -04:00
c6bc21febf Merge pull request #2361 from akohlmey/collected-small-changes
Collected small changes for the next patch release
2020-09-15 11:28:25 -04:00
2f3c73217b detect if dt has been changed from default and print warning if reset by units command 2020-09-15 10:50:44 -04:00
b20b234ebe Merge remote-tracking branch 'origin/master' into python_interface_guide 2020-09-15 10:45:28 -04:00
e30864431f add test for missing quartic bond style 2020-09-15 10:12:18 -04:00
2794108837 don't update per-atom pair data if not available 2020-09-15 10:12:07 -04:00
238730b13d update epsilon 2020-09-15 09:55:58 -04:00
c6b6369d2d fix bug in input script templates that would have the timestep setting overwritten by a units command 2020-09-15 09:46:57 -04:00
bc31486fd1 start tests for lammps_extract_global() 2020-09-15 09:27:07 -04:00
bb76215ef0 add library interface to retrieve memory usage info 2020-09-15 01:59:58 -04:00
4b64be77e0 consolidate memory allocation information into a single function (in Info) 2020-09-15 01:59:58 -04:00
0183e999c9 consistently return "memory_usage" as double. 2020-09-15 01:59:58 -04:00
39681acfa4 add cross reference 2020-09-14 21:03:39 -04:00
d27d4912af test lammps_extract_settings() 2020-09-14 20:53:19 -04:00
1e54a9fcf1 complete tests for extracting and resetting the box 2020-09-14 20:27:29 -04:00
5c7f67201b make naming more consistent 2020-09-14 20:27:10 -04:00
93bd851ab0 fix typo 2020-09-14 17:39:31 -04:00
df9f0e244b add false positive 2020-09-14 17:18:14 -04:00
5634dd4394 correct formatting of examples 2020-09-14 17:15:53 -04:00
b1e0990c46 update lammps config settings calls example 2020-09-14 17:11:27 -04:00
f70717710e fix for zstd support. add styles to list at top and document .zst extension requirement 2020-09-14 17:07:09 -04:00
a1b2f82107 test for exceptions add example 2020-09-14 17:01:28 -04:00
4185608e92 complete testing for LAMMPS configuration queries 2020-09-14 14:56:56 -04:00
b455812434 more tests for properties and configuration 2020-09-14 14:25:17 -04:00
f5e9804557 cosmetic 2020-09-14 14:24:34 -04:00
6315b277c8 Revert commit 14dfd3876a which walked-back
the NULL->nullptr changes for lib/awpmd.

Then, hand-applied a minor cleanup to comments, ala Axel's changes to the
rest of the codebase.
2020-09-14 12:05:46 -06:00
430d151660 Revert commit 05d41cd50d which walked-back
the NULL->nullptr changes for lib/poems.

Then, hand-applied a minor cleanup to comments, ala Axel's changes to the
rest of the codebase.
2020-09-14 12:05:46 -06:00
58a17532bf fix up some comments 2020-09-14 12:05:45 -06:00
ec1fe4e445 update molfile plugin headers with latest versions from VMD 2020-09-14 12:05:45 -06:00
a57a5c76a5 T2345: After examining comments changed by batch conversion of NULL
to nullptr, I've corrected a small number where it seemed clear
that the previous NULL should have been either "NULL" (where the
code is looking for a string with that value),  or null char / null byte
(where the comment is talking about the char/byte that terminates
a c-style string).

Also changed 6 places where the batch change had changed 'NULL ptr'
to 'nullptr ptr'. Now they simply say 'nullptr'.
2020-09-14 12:05:45 -06:00
96f0e7e0d0 T2345: Walking back nullptr changes in #defines in STUBS/mpi.h 2020-09-14 12:05:45 -06:00
96b01f2383 T2345: Walking back nullptr changes to lib/awpmd 2020-09-14 12:05:45 -06:00
ed57554e18 T2345: Replace instances of NULL with nullptr
revert lib/poems to remove nullptr changes.
Walking back changes to lib as needed.
2020-09-14 12:05:44 -06:00
f524fa758d T2345: Replace instances of NULL with nullptr
Per guidance from Axel, revert lib/kokkos and lib/colvars
to remove nullptr changes.
2020-09-14 12:05:44 -06:00
9a5d8fd18b Manually resolve two rebase conflicts. 2020-09-14 12:04:51 -06:00
50b80e078d Merge pull request #2354 from stanmoore1/qeq_refactor
small fix qeq/reax refactoring for base and accelerated variants
2020-09-14 12:18:32 -04:00
ac040dc85f update prototype to allow constant strings 2020-09-14 12:17:49 -04:00
f4601235c9 add tests for lammps configuration introspection via library interface 2020-09-14 12:17:24 -04:00
155e7de859 fix bug in lammps_has_style() 2020-09-14 12:16:53 -04:00
a5740d75cf add missing option 2020-09-14 09:27:49 -04:00
3655919c7c tweak minimize summary output format 2020-09-14 09:09:22 -04:00
cc86214fe2 take back too eager change 2020-09-13 17:20:29 -04:00
2438f38afc skip test if atom style is missing 2020-09-13 17:12:21 -04:00
96a5585d65 use more const char pointers 2020-09-13 17:12:11 -04:00
a2fc33b419 update c-library tests 2020-09-13 17:02:32 -04:00
de49325c0f add properties example 2020-09-13 17:00:08 -04:00
fcb38354dc import adapted example input deck 2020-09-13 16:47:29 -04:00
39255da6ae use common main function that allows handling of common command line flags. 2020-09-13 16:46:59 -04:00
5a0623595b make char * argument of lammps_get_thermo() const 2020-09-13 16:46:13 -04:00
b702298e83 fix typo 2020-09-12 19:06:58 -04:00
da0cdb0de4 update formatting of keyword summary 2020-09-12 19:06:30 -04:00
eda04dac98 update conventions and guidelines about C++ standard and requirements 2020-09-12 15:13:23 -04:00
93a0d4c096 fix up some comments 2020-09-12 14:26:34 -04:00
bc850bcd97 update molfile plugin headers with latest versions from VMD 2020-09-12 14:04:23 -04:00
1597cd5dcb T2345: After examining comments changed by batch conversion of NULL
to nullptr, I've corrected a small number where it seemed clear
that the previous NULL should have been either "NULL" (where the
code is looking for a string with that value),  or null char / null byte
(where the comment is talking about the char/byte that terminates
a c-style string).

Also changed 6 places where the batch change had changed 'NULL ptr'
to 'nullptr ptr'. Now they simply say 'nullptr'.
2020-09-12 09:34:39 -06:00
9f0354f816 T2345: Walking back nullptr changes in #defines in STUBS/mpi.h 2020-09-12 09:34:39 -06:00
14dfd3876a T2345: Walking back nullptr changes to lib/awpmd 2020-09-12 09:34:39 -06:00
05d41cd50d T2345: Replace instances of NULL with nullptr
revert lib/poems to remove nullptr changes.
Walking back changes to lib as needed.
2020-09-12 09:34:39 -06:00
13be8085e5 T2345: Replace instances of NULL with nullptr
Per guidance from Axel, revert lib/kokkos and lib/colvars
to remove nullptr changes.
2020-09-12 09:34:39 -06:00
f1ef7d85a8 T2345: Replace instances of NULL with nullptr
The following changes have been applied to src and lib folders:
regex replace: ([^"_])NULL ⇒ \1nullptr (8968 chgs in src, 1153 in lib)
Manually find/change: (void \*) nullptr ⇒ nullptr (1 case)
regex find: ".*?nullptr.*?"
  Manually ~14 cases back to "NULL" in src, ~2 in lib
  regex finds a few false positive where nullptr appears between two
  strings in a function call
2020-09-12 09:34:38 -06:00
69962b4de5 add test for MSM kspace without periodic boundaries 2020-09-12 11:10:38 -04:00
3368eb79d8 fix issue with local variable shadowing a global variable 2020-09-12 11:10:12 -04:00
d7500c3e5a MSM output format tweaks 2020-09-12 11:09:33 -04:00
16d2fef5ad add check that shrinkwrap boundaries are not use when reading a data file while a MSM kspace style is defined 2020-09-12 11:06:20 -04:00
4a8d6016e4 Update docs 2020-09-11 22:49:44 -06:00
828ce84df5 add check for number of local atom indices overflowing neighbor lists for molecular systems 2020-09-11 22:09:09 -04:00
bc1b876a3d make fix widom errors and test output consistent with the fix name capitalization 2020-09-11 19:29:17 -04:00
47b3039b28 update unit tests, too 2020-09-11 19:24:32 -04:00
9d0c3bc665 replace some numeric constants in Atom and AtomVec classes with enumerators 2020-09-11 19:07:55 -04:00
612f1d7ce3 fix typo 2020-09-11 17:49:06 -04:00
7b869e8ee8 Start with more detailed description of PyLammps 2020-09-11 17:09:38 -04:00
77d475d121 Add more tests 2020-09-11 15:48:23 -04:00
58833ce11e Add API tests 2020-09-11 15:21:36 -04:00
cb253b2b1a Add HTML report option for Python 2020-09-11 14:03:13 -04:00
1855f92694 Use include and omit 2020-09-11 12:44:41 -04:00
377163d940 Add target to generate Python coverage XML 2020-09-11 11:36:22 -04:00
101d39142e Merge pull request #2357 from akohlmey/lib-get-mpi-comm
Add ability to retrieve the MPI communicator from LAMMPS instance via library/python interface
2020-09-11 11:20:12 -04:00
97000fa016 Merge remote-tracking branch 'origin/master' into python_interface_guide 2020-09-11 09:48:28 -04:00
ded57ba749 move test to avoid issues with indentation from having tabs follow directly a list-table 2020-09-10 16:16:58 -04:00
4452109b60 address spelling issue 2020-09-10 16:15:03 -04:00
0d5f68bc30 make "new in" and "changed in" leads boldface 2020-09-10 16:14:53 -04:00
3d37fd05f4 disable caching - for now - to avoid bad side effects from sphinx-tabs 2020-09-10 16:14:18 -04:00
e813e2d30a add minimal unit test for lammps_get_mpi_comm() API 2020-09-09 22:12:47 -04:00
bd72ef7996 add API to library/python interface to extract the MPI communicator 2020-09-09 22:03:11 -04:00
b99e005ca1 Update example output files 2020-09-09 18:26:34 -04:00
d43cb34c70 Support debugging flag for Colvars via CMake 2020-09-09 18:19:29 -04:00
85c394453c Update Colvars to version 2020-07-07
This update contains several fixes and small new features or usability
improvements.  Descriptions and authorship information can be accessed from
the pull requests listed below.

Skip the zero-step also when multiple run commands are executed in sequence (@giacomofiorin)
https://github.com/Colvars/colvars/pull/357

Do not accumulate data at step 0 (@giacomofiorin)
https://github.com/Colvars/colvars/pull/345

Support for symmetry permutations of atoms in RMSD (@jhenin)
https://github.com/Colvars/colvars/pull/344

Detect new grid parameters (@jhenin)
https://github.com/Colvars/colvars/pull/341

Only access the output streams in non-threaded regions (@giacomofiorin)
https://github.com/Colvars/colvars/pull/338

Fix incomplete setting of default colvarsRestartFrequency (@giacomofiorin)
https://github.com/Colvars/colvars/pull/334

Fix typo (@e-kwsm)
https://github.com/Colvars/colvars/pull/333

Convert the input keyword to lowercase in read_state_data_key (@HanatoK)
https://github.com/Colvars/colvars/pull/332

Implement reflecting b.c. for ext Lagrangian (@jhenin)
https://github.com/Colvars/colvars/pull/329
2020-09-09 18:19:21 -04:00
ed14793c69 moved eigensolver documentation into pg_dev_utils.rst 2020-09-09 14:59:00 -07:00
3bacf97468 Merge branch 'master' into math_eigen 2020-09-09 14:56:28 -07:00
2ab0878c9e remreduced compile time by removing unnecessary instantiations from math_eigen.cpp 2020-09-09 14:43:28 -07:00
cdd9d693ad Merge pull request #2349 from akohlmey/more-progguide-updates
More Programmer docs updates and related code refactoring
2020-09-09 14:34:41 -04:00
e238201be5 add a couple more versionadded directives 2020-09-09 13:21:19 -04:00
790b2693ba Merge pull request #2353 from junghans/cmake_clean_up
cmake: consistent usage of find variable
2020-09-09 11:04:13 -04:00
a51bebc9e4 Merge pull request #2340 from rbberger/container_update
Singularity Container definitions update
2020-09-09 09:53:48 -04:00
f4c4c62349 adjust test to updated API 2020-09-09 09:27:34 -04:00
03d43bda3c make "New in" and "Changed in" statements boldface to stand out more 2020-09-09 09:25:58 -04:00
2b854ec64f reformulate 2020-09-09 09:25:34 -04:00
a1cf6c087c no need to use a deprecated argument in a newly added API 2020-09-09 09:06:28 -04:00
2a42fe66ad discuss change to get LAMMPS handle as return value instead of argument 2020-09-09 09:06:00 -04:00
56b1300fd3 reverted the default behavior of "jacobi3()". By default it now sorts the eigenvalues again. 2020-09-08 21:58:54 -07:00
dad749b62b make vget()/vgot() inline functions again for optimal performance. 2020-09-08 20:52:51 -04:00
6ade5dd740 changed the default sorting options in "jacobi3()" to be more similar to the original "jacobi()" function from "math_extra.h". This should not have any effect on LAMMPS behavior 2020-09-08 17:05:08 -07:00
c818a00523 remove unwanted docs 2020-09-08 16:53:26 -04:00
ea0a9f00e8 swap order of classes in toctree 2020-09-08 16:45:16 -04:00
ca1b5ed936 fix minor issues 2020-09-08 16:43:10 -04:00
daf8a2313a split developer guide into multiple files 2020-09-08 16:38:06 -04:00
ee25ed3897 update/correct documentation for memory pool classes 2020-09-08 16:36:48 -04:00
9f4a991fc5 move over doxygen comments for a few LAMMPS classes 2020-09-08 16:36:29 -04:00
1273179d03 Fix compile error 2020-09-08 13:42:25 -06:00
9e47452382 recover compilation for platforms using malloc() instead of posix_memalign() 2020-09-08 15:36:21 -04:00
7a9d31328e remove obsolete comment 2020-09-08 15:29:42 -04:00
4f8e627479 QEq refactor 2020-09-08 13:16:45 -06:00
9f7e309f07 small updates to MyPage and convert MyPoolChunk similar to MyPage 2020-09-08 15:13:16 -04:00
a3cc7581b1 rename test for memory pools 2020-09-08 15:12:08 -04:00
f72e5b6d13 Typo 2020-09-08 12:52:32 -06:00
b008ba5ecb Merge remote-tracking branch 'origin/master' into cmake_clean_up 2020-09-08 12:51:49 -06:00
9f8b8529b1 Merge pull request #2348 from akohlmey/use-tabs-with-html
Enable sphinx-tabs extension with the html version of the documentation
2020-09-08 12:02:43 -04:00
22fac9affc more doc update 2020-09-08 09:49:07 -06:00
8fb13f4fd1 cmake: make doc more consistent 2020-09-08 09:43:21 -06:00
5df3deb8e6 update/refactor a few more build documentation files for using tabs 2020-09-08 11:40:21 -04:00
09b8b117fc cmake: consistent usage of find variable 2020-09-08 09:35:45 -06:00
88cc673f78 more use of tabs in Build_extras.rst 2020-09-08 10:11:37 -04:00
dd20776915 fix spelling issues 2020-09-07 21:17:56 -04:00
9b01d5bf95 revise and add tabs to some sections 2020-09-07 21:08:47 -04:00
9afba50d01 update for zstd support 2020-09-07 21:00:45 -04:00
f02db43f63 add discussion and minimal code example for MyPage class. 2020-09-07 16:10:48 -04:00
0ee7c5f434 add more MyPage test variants 2020-09-07 15:27:13 -04:00
ab5c81f7d6 refactor some more to reduce redundant code. 2020-09-07 08:57:37 -04:00
bbb81a8dd0 add unit tester for MyPage class 2020-09-07 08:33:05 -04:00
3089205a54 tweak MyPage docs 2020-09-07 08:32:34 -04:00
0447616aaf address spelling issues 2020-09-07 08:03:41 -04:00
f8ebcc90fb define OneCoeff struct in my_page.h as HyperOneCoeff to resolve compilation issues 2020-09-07 07:57:23 -04:00
f329d56495 updated pg_developer.rst 2020-09-06 20:12:38 -07:00
a57a1404f3 bug fix fix unit tests, improve doc readability, and to prevent accidentally allocating memory on the heap. (Note: All of these changes are related to "jacobi3()". That function instantiates Jacobi without allocating memory on the heap, and this created some headaches. The original code at https://github/jewettaij/jacobi_pd does not have this feature, and the unit tests there do not test for these kinds of errors. Hopefully this commit fixes everything.) 2020-09-06 20:05:47 -07:00
2270d86519 whitespace cleanup and try to recover legacy build 2020-09-06 22:51:58 -04:00
4ca36d9526 add doxygen docs and convert MyPage template class from header only to header plus implementation 2020-09-06 22:26:17 -04:00
485a169ef2 move merge sort to utils namespace 2020-09-06 17:38:53 -04:00
19ce59ba38 use tabs for CMake vs. GNU make in LAMMPS library linking page 2020-09-06 16:50:28 -04:00
d77f08d5fb tweak formatting to look better in the PDF file 2020-09-06 16:49:58 -04:00
fabf762fa8 reverting to previous version of util.h and util.cpp 2020-09-06 11:09:36 -07:00
eb6f7f9740 Merge branch 'master' into math_eigen 2020-09-06 10:59:20 -07:00
d97e8d8cbf fixed linker problems in "utils.h" and "utils.cpp" 2020-09-06 10:12:07 -07:00
b29522273a changed the "trim_and_count_words()" function declaration in "utils.h" to make sphinx happy. Fixed some new spelling mistakes. 2020-09-06 09:40:28 -07:00
9f24f056ff reformat system sizes information and make explanations more concise 2020-09-06 07:12:43 -04:00
77e22c90ac complete using tab/tabs directives in Build_settings 2020-09-06 06:11:12 -04:00
5536c1e6ee make more compact 2020-09-06 06:10:39 -04:00
2ee2009356 removed assert() statements from "superpose3d.h" 2020-09-06 02:30:20 -07:00
c16321e9d3 purged greek characters from "pd_developer.rst"
I replaced the 'Σ' character that was causing problems with PDF generation with the word "sum".  Hopefully PDF documentation generation works now.
2020-09-05 21:44:32 -07:00
77789f9ed8 small tweaks 2020-09-06 00:07:53 -04:00
7b6d07a812 load sphinx extension for content in tabs with html and for transforming them to admonitions for non-html builders 2020-09-05 23:54:10 -04:00
4ad6d99ca0 updated documentation files: "pg_developer.rst", "math_eigen.h", and "false_positives.txt" 2020-09-05 20:08:45 -07:00
7cb774e6bc some corrections before rewriting the doxygen comments to comply with LAMMPS doxygen style. 2020-09-05 16:37:15 -07:00
4b66414bdf fixed a type error in math_eigen.h 2020-09-05 10:38:14 -07:00
58fa72ddc5 replaced all numerical recipes jacobi() code with code from "math_eigen.h". moved "math_eigen.h" into the main "src" directory. 2020-09-05 10:19:19 -07:00
57f82abae3 updated all code in LAMMPS to use the open-source "math_eigen.h" instead of "math_extra.h". Code in "lib" now uses its own abbreviated local version of the "math_eigen.h" file (which is named "jacobi_pd.h"), since it is not clear how code from "lib/" can access the code in "src/" 2020-09-05 01:39:27 -07:00
d995ed0d87 Merge pull request #2344 from akohlmey/use-improved-sphinx-fortran
Use modernized version of sphinx-fortran
2020-09-04 18:46:34 -04:00
75a119d534 divert installation of sphinx-fortran to github branch with parallel read capable version 2020-09-04 17:40:26 -04:00
64728678f1 Merge branch 'python_interface_guide' of github.com:rbberger/lammps into python_interface_guide 2020-09-04 17:15:58 -04:00
96db122377 Add Python coverage collection if enabled 2020-09-04 17:15:51 -04:00
176f7c064d Merge branch 'master' into python_interface_guide 2020-09-04 15:51:56 -04:00
e6592d2324 Merge pull request #2336 from akohlmey/collected-small-changes
Collected small changes for the next patch release
2020-09-04 15:22:31 -04:00
6a40eba2ae Add Ubuntu 20.04 Intel OpenCL container 2020-09-04 15:03:53 -04:00
da0e60c173 Apply changes to remaining containers 2020-09-04 14:43:03 -04:00
f6abdf97c4 Remove -j in distribution container files 2020-09-04 14:28:03 -04:00
41a1c96e06 Update other GPU containers 2020-09-04 14:19:41 -04:00
3bdfbdcf09 Update Ubuntu 18.04 GPU container and add Ubuntu 20.04 2020-09-04 14:06:38 -04:00
ceed9284c1 Merge branch 'master' into collected-small-changes 2020-09-04 13:32:47 -04:00
de777ce994 Merge pull request #2341 from akohlmey/doc-updates
Revised links and related descriptions in the manual
2020-09-04 13:29:58 -04:00
41a3e7c712 update a few links in the programmer guide 2020-09-04 11:59:04 -04:00
60d25c2eea Update Ubuntu 20.04 container 2020-09-04 11:54:08 -04:00
5c22d1197e Merge branch 'master' into doc-updates 2020-09-04 11:50:28 -04:00
d0d3cee28d a few more cosmetic changes 2020-09-04 11:50:02 -04:00
598819229b Fix missing OpenKIM models for testing due to KIM-API packaging 2020-09-04 11:47:38 -04:00
64d3be27a6 Add zstd to containers 2020-09-04 11:46:40 -04:00
90c13b1b14 update build files for plumed 2.6.1 release 2020-09-04 11:44:39 -04:00
7ea4177626 Set Plumed version to 2.6.1 2020-09-04 11:41:44 -04:00
2b1e4749dd first chunk of revised updates to the manual addressing link issues. 2020-09-04 11:39:38 -04:00
c8327e66b3 Merge pull request #2338 from akohlmey/include-cleanup
Update list of included headers for latest updates of the convention
2020-09-04 10:14:12 -04:00
f19ce32bf5 a few more small tweaks 2020-09-04 09:42:11 -04:00
255bf2f8fd update iwyu custom mapping file 2020-09-04 09:41:57 -04:00
f541647d44 some more IWYU tweaks 2020-09-04 09:23:20 -04:00
d6ba95bed6 minor tweaks 2020-09-04 08:05:59 -04:00
2d802411e9 remove pointless stringstream abuse 2020-09-04 08:03:53 -04:00
91a87b745f final second round IWYU cleanup 2020-09-04 08:01:11 -04:00
63e5ca53ef add iwyu mappings for gtest and gmock 2020-09-04 08:00:38 -04:00
bb7447363c more include cleanup and IWYU tweaks 2020-09-04 07:22:39 -04:00
7f6fc8a003 updated math_eigen.h and moved it into the main "src" directory 2020-09-04 04:10:47 -07:00
fc3d9fee5e more USER-OMP updates 2020-09-04 01:32:52 -04:00
d0691d0a5d add mapping for Eigen3 includes 2020-09-04 01:32:15 -04:00
2e0af2fdce some more second pass cleanup 2020-09-04 00:46:23 -04:00
508b49c976 add missing comma 2020-09-04 00:45:41 -04:00
87dfdb8723 add comments/warnings to docs of fix property/atom and fix store/state about restarting 2020-09-04 00:14:01 -04:00
cb618e19d1 small tweak to address formal issues 2020-09-03 18:30:35 -04:00
18cea11100 Merge pull request #66 from junghans/include-cleanup
cmake: use static iwyu-extra-map.imp
2020-09-03 18:27:00 -04:00
5a7189e95f update note for write_data command on missing coefficient data. 2020-09-03 18:24:12 -04:00
ce78be864a move date2num() convernience function from Universe class to utils namespace 2020-09-03 17:47:45 -04:00
390c6ba106 rechecking some corefiles with IWYU after the many updates 2020-09-03 15:28:07 -04:00
93d0358ebc cmake: use static iwyu-extra-map.imp 2020-09-03 11:36:02 -06:00
05c7cfe59f more include updates for USER-OMP 2020-09-03 11:16:32 -04:00
f67bd8eb5d more KOKKOS, USER-OMP, and USER-INTEL include file cleanup 2020-09-03 10:54:06 -04:00
27203304d6 USER-INTEL include handling cleanup 2020-09-03 09:47:48 -04:00
88ddfa4eb0 more IWYU cleanup in packages 2020-09-03 05:20:02 -04:00
55704368d0 include updates for commonly used packages 2020-09-03 01:45:05 -04:00
c7c4297cfc update include handling for commands 2020-09-03 00:51:58 -04:00
c2cf19bdd4 fixes for file readers 2020-09-03 00:32:57 -04:00
baa79bd1c4 add another iwyu-mapping entry 2020-09-03 00:22:28 -04:00
9925122cea more IWYU updates 2020-09-03 00:22:09 -04:00
a4a1f7e848 more IWYU cleanup 2020-09-02 23:49:17 -04:00
047c14d17c tweak iwyu target. generate file with custom mappings 2020-09-02 23:41:16 -04:00
45f80f4f06 more include file updates found by iwyu 2020-09-02 22:52:22 -04:00
7b8fe97a88 remove trailing whitespace 2020-09-02 22:51:42 -04:00
71d111ccd9 get rid of 'using namespace std;' 2020-09-02 22:51:30 -04:00
203e42073a fix compilation error 2020-09-02 22:51:14 -04:00
f88f4cd60e more include file updates 2020-09-02 21:44:04 -04:00
4ba6a4f958 have clang-style output for easier parsing in compilation mode in emacs 2020-09-02 21:43:20 -04:00
79584c842d add false positive 2020-09-02 18:44:12 -04:00
b741a07a34 document IWYU analysis custom build target 2020-09-02 18:43:03 -04:00
5e429b8212 update include conventions and refer to the new CMake build target 2020-09-02 18:42:41 -04:00
4a10111ced add iwyu target to CMake configuration 2020-09-02 18:42:19 -04:00
d5027c125c rename fmt() macro to logfmt() to avoid clashes with fmtlib 2020-09-02 17:58:22 -04:00
1421817276 remove redundant includes 2020-09-02 17:54:29 -04:00
6113169a47 IWYU based include statement cleanup 2020-09-02 17:39:34 -04:00
a38873f3f6 Add venv example to pg_python.rst 2020-09-02 17:34:14 -04:00
2cd0e9edc4 Merge pull request #2335 from rbberger/doc_updates
Update documentation pages headings and update style check tool to detect missing index entries.
2020-09-02 15:46:10 -04:00
a72ebb2ab2 update link and formatting of install via git page in manual. 2020-09-02 15:41:17 -04:00
862bc99772 add links to references of the two parts of the manual 2020-09-02 15:28:27 -04:00
8def5844c8 Skip old styles in check-styles 2020-09-02 14:06:35 -04:00
ac7129b3a9 Revert "Add missing index entries"
These are intentionally left out from docs
2020-09-02 14:02:53 -04:00
0f9c46235c Add missing index entries 2020-09-02 13:56:17 -04:00
515746063f Fix copy&paste error 2020-09-02 13:55:27 -04:00
e519919cae Fix typo 2020-09-02 13:48:11 -04:00
28c90a6dea Update index for kspace_style commands 2020-09-02 13:40:24 -04:00
12aa689e97 fix typo in cfg/zstd tester tool 2020-09-02 13:38:18 -04:00
c812bf7a91 Update headers in improper_*.rst files 2020-09-02 13:03:03 -04:00
a0c9c213fd Update headers in dihedral_*.rst files 2020-09-02 12:57:44 -04:00
d01c985feb Update headers in compute_*.rst files 2020-09-02 12:56:24 -04:00
f4d2523ce0 Update headers in bond_*.rst files 2020-09-02 12:55:18 -04:00
4e27c47f96 Undo lost headers 2020-09-02 12:54:08 -04:00
88e09fe648 Update headers in angle_*.rst files 2020-09-02 12:43:48 -04:00
286d3866e3 Add style index check in check-styles.py 2020-09-02 12:42:18 -04:00
549bdd080c Add kspace_style to fixup_headers.py 2020-09-02 12:37:48 -04:00
84e922eff6 Add other styles to fixup_header.py 2020-09-02 11:55:53 -04:00
8468f4ab2e fix missing comparison and thus reduce memory use in serial case 2020-09-02 11:12:15 -04:00
a4380a548d make more readable 2020-09-02 11:11:48 -04:00
0c88074525 Apply header fix to all remaining fix styles docs 2020-09-02 11:05:33 -04:00
98808fb5ff Skip doc files that don't have common file structure 2020-09-02 11:04:08 -04:00
8d1a117b75 Apply header fix to all remaining pair styles docs 2020-09-02 10:59:01 -04:00
09bc686f71 Avoid extra newline 2020-09-02 10:55:04 -04:00
cc594c0613 include fmt/format.h into pointers.h 2020-09-02 10:52:10 -04:00
e5e2abe446 update include file policy file and policy for pointers.h special case 2020-09-02 10:51:55 -04:00
e479033109 Avoid breaking already fixed headers 2020-09-02 10:51:47 -04:00
3c60a9e08e Update fix_nh.rst 2020-09-02 10:31:21 -04:00
49a6bf5e30 Update pair_class2.rst 2020-09-02 10:30:31 -04:00
3a22f26e87 Update pair_cs.rst 2020-09-02 10:29:46 -04:00
4d6be195ec Add utility to fixup doc headers 2020-09-02 10:29:29 -04:00
e5f1f58aab Update pair_born.rst 2020-09-02 10:26:37 -04:00
a6a49d80b2 Update pair_sdk.rst 2020-09-02 10:19:20 -04:00
7ae98ffae1 Update fix_rigid.rst 2020-09-02 09:50:16 -04:00
dce7a39021 Update pair_buck.rst 2020-09-02 09:47:32 -04:00
77f803b366 Update pair_charmm.rst 2020-09-02 09:14:20 -04:00
a9cf25db4c Update pair_eam.rst 2020-09-02 09:13:59 -04:00
7e733913d7 Update pair_fep_soft.rst 2020-09-02 09:03:00 -04:00
45100528ad Update pair_coul.rst 2020-09-02 08:54:53 -04:00
c50a82af78 apply consistent naming scheme for tester source files and executables 2020-09-01 20:16:39 -04:00
2ffb5ddd5a add kspace dependency to coul/streitz/long test 2020-09-01 17:34:08 -04:00
80519cd109 Merge pull request #2331 from rbberger/zstd_support
Update COMPRESS package
2020-09-01 15:01:04 -04:00
e7dcb79ac5 Merge pull request #1472 from tomswinburne/master
Add fix pafi
2020-09-01 14:36:46 -04:00
35cb41147a update date 2020-09-01 14:16:06 -04:00
60b98f6b91 Require libzstd>=1.4 for Zstd dump styles 2020-09-01 13:52:56 -04:00
45c9478f5a Merge pull request #2333 from akohlmey/iss1109
Fix memory leaks and out-of-range memory access in USER-BOCS package
2020-09-01 13:37:56 -04:00
31bd76efa5 Merge remote-tracking branch 'origin/master' into zstd_support 2020-09-01 12:58:41 -04:00
40ea03234f Update force->inumeric to utils::inumeric 2020-09-01 12:58:27 -04:00
9d3e3526a0 Add false positives and fix spelling in docs 2020-09-01 12:30:35 -04:00
f899a0f484 Reorder init to supress warnings 2020-09-01 12:25:38 -04:00
a2b5c379f2 Update docs for COMPRESS styles 2020-09-01 12:17:08 -04:00
3db1a6d690 Use PkgConfig to find Zstd 2020-09-01 11:14:36 -04:00
ae3a7d8901 minor changes to example 2020-09-01 17:02:24 +02:00
8e0c4d764e Change include for fmt 2020-09-01 10:58:13 -04:00
75b3bbd657 modernize 2020-09-01 10:09:34 -04:00
95aab99c2a remove redundant entries 2020-09-01 09:57:44 -04:00
2802db4493 correct application of the BEGIN_CAPTURE macro 2020-09-01 09:41:26 -04:00
3b765956d2 Manually resolve merge conflict
Merge branch 'iss1109' of https://github.com/eagunn/lammps into iss1109
2020-09-01 06:31:35 -06:00
4f03acc560 Fix merge conflicts caused by code's do-si-do around Axel's additions
to the utils namespace.
2020-09-01 06:30:12 -06:00
2536f28e1a Per comments from Axel, continued the evolution of the output messages.
- Converted Compute message with imputed value to fmt::format style.
- Condensed most error-> output messages to oneliners where fmt::format
is called within the call to the error method. For a couple, the
formatting string/value list were complex enough that I continued using
a temporary string variable to make the code easier to read.
2020-09-01 05:49:29 -06:00
52748f57da Delete extraneous, unused file/folder that should never have gotten
into the repo.
2020-09-01 05:49:29 -06:00
9ef725ea52 Move destruction of splines data structure to FixBocs destructor
from ComputePressure destructor.
2020-09-01 05:49:29 -06:00
e11a3c4799 Remove "test" example files. 2020-09-01 05:49:29 -06:00
4dca927693 Switch dynamically created arrays/matrices to use memory->create and
->destroy, LAMMPS-standard for dynamic memory management.
2020-09-01 05:49:29 -06:00
fe89edc828 Remove c_str() calls from message strings. Message class now accepts
actual strings.
2020-09-01 05:49:29 -06:00
0aa66319fe Change the build_xxx_splines methods to accept a const reference parameter
to the data vector rather than a vector parameter. Vectors are, as all
types are, passed by value in C++. Using a vector directly as a parameter
causes the data to be copied. Using a reference to the vector neatly
avoids that problem.

Also tucked fmt/format.h into its rightful place in the include list
since Axel says there's no absolute rule about it being an exception.
2020-09-01 05:49:29 -06:00
076c834734 Manually reconciled conflict from:
Corrected two issues in code identified by valgrind:
1) memory leaks (the subject of the original #1109) and
2) an invalid memory access (reading from beyond the end of an array).

Also:
- replaced several dynamically-allocated (calloc and free) local arrays with std::vector.
- reorganized include statements per new conventions, and
- updated messages to use strings and fmt.

Added folders and files used in manual testing and verification to examples folder.

No changes in analysis results are expected or intended. This is pure refactoring except for a bit of error message cleanup.
2020-09-01 05:48:49 -06:00
aa12f4f78a Per request from Axel, Revert revert one commit too far:
"Per suggestion from Axel, reformat code to what I believe to be"

This reverts commit b72c1c0eda.
2020-09-01 05:10:44 -06:00
fbffcf3321 spelling 2020-09-01 11:34:11 +02:00
c18eccad39 force -> utils:: 2020-09-01 11:25:25 +02:00
2f182b4606 force-> -> utils:: 2020-09-01 11:24:06 +02:00
c6de769696 Merge branch 'master' of github.com:tomswinburne/lammps 2020-09-01 11:12:29 +02:00
ac080228dc whitespace style fix 2020-09-01 11:12:05 +02:00
0541996919 Merge branch 'master' into master 2020-09-01 10:59:03 +02:00
d601acd0ca merge 2020-09-01 10:57:43 +02:00
2eb6a8a8a4 update python interface 2020-09-01 10:55:45 +02:00
db9de111be updated python interface 2020-09-01 10:54:18 +02:00
fe833e6c87 Add missing #ifdef 2020-08-31 19:49:17 -04:00
0925fc822d Make Zstd support optional 2020-08-31 18:21:58 -04:00
3865de8705 Add lost line 2020-08-31 16:43:06 -04:00
2effd2f707 Merge remote-tracking branch 'origin/master' into zstd_support 2020-08-31 15:38:56 -04:00
c5a2e50bf5 Add Zstd variants of dump local and xyz 2020-08-31 15:38:20 -04:00
a0f82a4b18 Add compression_level parameter to dump xyz/gz and local/gz 2020-08-31 15:00:57 -04:00
443a817152 Add tests for dump local/gz and xyz/gz 2020-08-31 14:54:10 -04:00
24f5807623 Merge pull request #2319 from akohlmey/move-convenience-functions
Move some more convenience functions to utils namespace
2020-08-31 14:29:42 -04:00
99b83333c9 Add dump cfg/zstd 2020-08-31 14:20:52 -04:00
a922355e19 Add compression_level parameter to dump cfg/gz 2020-08-31 14:20:36 -04:00
54ed23448b instantiate bounds() also with long long to make windows happy. 2020-08-31 14:14:51 -04:00
5faca3aef0 Add tests for dump cfg/gz 2020-08-31 13:36:42 -04:00
675cf20e93 Merge remote-tracking branch 'github/master' into move-convenience-functions 2020-08-31 12:58:21 -04:00
22ccde1d13 modernize utils::bounds() implementation and remove duplicate definition. 2020-08-31 12:58:00 -04:00
9a4ec23c7f Merge pull request #2327 from akohlmey/remove-doxygen-tool
Remove doxygen tool folder and references to it in the manual
2020-08-31 12:49:24 -04:00
7fca3b7a65 revert accidental change 2020-08-31 12:46:22 -04:00
1ff34d4b13 avoid doxygen lookup ambiguity 2020-08-31 12:46:05 -04:00
67a5db03c7 Merge pull request #2326 from ndtrung81/gpu-update-makefiles
update makefiles in lib/gpu for more recent architectures
2020-08-31 12:30:28 -04:00
0c7b9a7c63 update utils tester for API change in bound()/boundsbig() 2020-08-31 11:43:25 -04:00
dd03c7232a Merge remote-tracking branch 'github/master' into move-convenience-functions 2020-08-31 11:41:18 -04:00
a4d3b21a43 Merge pull request #2322 from Vsevak/cmake_cuda
Fix CMake building for CUDA 11
2020-08-31 11:33:52 -04:00
cf11945e21 get rid of utils::boundsbig() by making it a template function with two specializations 2020-08-31 11:25:43 -04:00
8034a83ee8 Merge pull request #2325 from akohlmey/fortran-interface
New Fortran interface to LAMMPS
2020-08-31 10:27:10 -04:00
3c71d300e5 Merge pull request #2320 from akohlmey/programmer-guide
Add programmer guide part 2
2020-08-31 10:06:04 -04:00
b72c1c0eda Per suggestion from Axel, reformat code to what I believe to be
LAMMPS standards. I used the .clang-format file from the unit-test
folder but changed all spacing settings to 2 from 4.
2020-08-31 06:31:48 -06:00
0c067700e6 Per comments from Axel, continued the evolution of the output messages.
- Converted Compute message with imputed value to fmt::format style.
- Condensed most error-> output messages to oneliners where fmt::format
is called within the call to the error method. For a couple, the
formatting string/value list were complex enough that I continued using
a temporary string variable to make the code easier to read.
2020-08-31 06:31:48 -06:00
f0b304efcb Delete extraneous, unused file/folder that should never have gotten
into the repo.
2020-08-31 06:31:48 -06:00
69b42ea9ae Move destruction of splines data structure to FixBocs destructor
from ComputePressure destructor.
2020-08-31 06:31:48 -06:00
a4790fdf00 Remove "test" example files. 2020-08-31 06:31:47 -06:00
c75c3451f6 Switch dynamically created arrays/matrices to use memory->create and
->destroy, LAMMPS-standard for dynamic memory management.
2020-08-31 06:31:47 -06:00
e8306a2535 Remove c_str() calls from message strings. Message class now accepts
actual strings.
2020-08-31 06:31:47 -06:00
3760ccd672 Change the build_xxx_splines methods to accept a const reference parameter
to the data vector rather than a vector parameter. Vectors are, as all
types are, passed by value in C++. Using a vector directly as a parameter
causes the data to be copied. Using a reference to the vector neatly
avoids that problem.

Also tucked fmt/format.h into its rightful place in the include list
since Axel says there's no absolute rule about it being an exception.
2020-08-31 06:31:47 -06:00
b241294e84 Corrected two issues in code identified by valgrind:
1) memory leaks (the subject of the original #1109) and
2) an invalid memory access (reading from beyond the end of an array).

Also:
- replaced several dynamically-allocated (calloc and free) local arrays with std::vector.
- reorganized include statements per new conventions, and
- updated messages to use strings and fmt.

Added folders and files used in manual testing and verification to examples folder.

No changes in analysis results are expected or intended. This is pure refactoring except for a bit of error message cleanup.
2020-08-31 06:31:47 -06:00
c7360fb808 include example use of PotentialFileReader class 2020-08-31 07:45:52 -04:00
c84033677c add documentation for potential file reader 2020-08-31 07:31:28 -04:00
33f2cbc713 add docs for TextFileReader class to developer guide 2020-08-31 06:57:16 -04:00
39a9974f3d add ValueTokenizer example and use captions with code-blocks 2020-08-30 23:57:02 -04:00
569b6f95a3 no need to use ValueTokenizer. Make code more compact. 2020-08-30 23:32:17 -04:00
e8e57b0628 remove doxygen tool folder and references to it in the input as it is no longer compatible and needed 2020-08-30 19:59:52 -04:00
ecb1f266b5 More updates 2020-08-30 15:10:29 -05:00
7a5f193c02 Updated several Makefile's in lib/gpu for newer compute capabilities 2020-08-30 15:06:44 -05:00
4484699ab6 fix spelling 2020-08-30 15:52:42 -04:00
65d2ee7464 add work-in-progress marker to fortran library wrapper 2020-08-30 15:23:02 -04:00
199cfeba78 more tweaks. doxygen translation has no more warnings now. 2020-08-30 14:32:53 -04:00
cd0cdf0b74 silence compiler warning 2020-08-30 14:28:29 -04:00
f3ed1dea4d minor tweaks 2020-08-30 14:28:19 -04:00
8d45b724a6 fix bug in conventional build makefile preventing the correct Install.py in the lib folder to be run 2020-08-30 14:12:53 -04:00
4b0999e167 complete documentation of tokenizer classes 2020-08-30 14:12:21 -04:00
5623009962 arch 3.0 is dropped in CUDA 11 2020-08-30 13:12:42 +03:00
96ee132e85 start documenting tokenizer classes 2020-08-30 01:50:37 -04:00
8d2c16ad66 remove trailing whitespace 2020-08-30 00:31:04 -04:00
f123246189 minor doc updates 2020-08-30 00:25:01 -04:00
ceeaf1e988 update and correct documentation for utils functions 2020-08-29 23:51:46 -04:00
9c404e02fd update include file conventions 2020-08-29 22:56:37 -04:00
83a9e5e724 handle spelling issues with new doc text 2020-08-29 22:55:05 -04:00
e51a5ad8f4 import doxygen docs for new utils functions 2020-08-29 22:46:25 -04:00
4396dbd9a3 move Force::open_potential() to utils::open_potential() 2020-08-29 22:37:14 -04:00
798226967f Merge branch 'move-convenience-functions' of github.com:akohlmey/lammps into move-convenience-functions 2020-08-29 22:22:45 -04:00
6b7f4c500f document changes to LAMMPS_POTENTIALS environment variable 2020-08-29 22:22:27 -04:00
5a22f4d7f2 support that LAMMPS_POTENTIALS is a real path variable with multiple entries, not just a single folder 2020-08-29 22:22:03 -04:00
b06ba74d18 support that LAMMPS_POTENTIALS is a real path variable with multiple entries, not just a single folder 2020-08-29 22:09:07 -04:00
05ff352021 add utils::open_potential() function to utils namespace 2020-08-29 22:08:16 -04:00
7413dc783e add tokenizer tests for splitting path environment variables 2020-08-29 22:07:22 -04:00
8601e608ca add unit tests for utils::bounds() and utils::boundsbig() 2020-08-29 22:00:07 -04:00
96d8d12a88 replace calls to force->*numeric() with the corresponding utils::*numeric()
this also removes the now obsolete functions from Force
2020-08-29 20:31:02 -04:00
27b4e93bf5 silence compiler warnings 2020-08-29 18:32:21 -04:00
741a1d1fc9 move Input::expand_args() to utils::expand_args() 2020-08-29 18:32:10 -04:00
fcd0b9f78f move Force::bounds() and Force::boundsbig() to utils. 2020-08-29 18:23:03 -04:00
60dfb6f77f still need to disable variable tracking to silence unwanted warnings on ubuntu 2020-08-28 22:03:27 -04:00
31c91a8928 fix typo 2020-08-28 21:22:06 -04:00
cec18b6aef add docs for the new fortran interface 2020-08-28 20:57:19 -04:00
e44707d5e1 add unittest support for the fortran interface to LAMMPS 2020-08-28 20:56:52 -04:00
3e92647abb add new "official" fortran interface to LAMMPS in new folder "fortran" 2020-08-28 20:56:15 -04:00
2e1b4498bd update false positives 2020-08-28 20:30:00 -04:00
7b6924329f make doxygen docs and code consistent 2020-08-28 20:25:38 -04:00
cb1a2601e1 add more existing programmer guide docs 2020-08-28 20:25:09 -04:00
3be064748d Merge pull request #2318 from akohlmey/programmer-guide
Programmer guide part 1
2020-08-28 20:14:32 -04:00
02ea7af1f7 let dummy tests pass 2020-08-28 18:49:39 -04:00
52f7f3629d Merge branch 'master' into programmer-guide 2020-08-28 18:49:15 -04:00
876c53a275 Merge pull request #2310 from lammps/library-refactor
LAMMPS C library interface and python module refactor
2020-08-28 18:40:16 -04:00
2a365c17e0 update utils namespace documentation including doxygen parts 2020-08-28 18:21:18 -04:00
cb09844182 Add placeholders for future tests 2020-08-28 17:52:15 -04:00
e64a977ae9 include documentation of utility functions in utils:: to developer guide 2020-08-28 17:14:49 -04:00
a1bf772df2 move modifying and extending LAMMPS section to programmer guide 2020-08-28 17:14:29 -04:00
8fcd72405a port over more of the framework of the programmer guide and remove programming details from user guide 2020-08-28 16:32:23 -04:00
2e2763d0f1 update .gitignore for recent additions 2020-08-28 15:34:53 -04:00
17ec3a4fe8 Fix typo 2020-08-28 15:15:01 -04:00
9412d6f6fc Add missing argtypes declarations in lammps.py 2020-08-28 15:12:36 -04:00
ded657120d Merge pull request #2312 from akohlmey/developer-guide
Integrate text from developer.tex file to the manual
2020-08-28 15:01:27 -04:00
6bcc263b41 Ensure LAMMPS pointer is of type c_void_p
Fixes segfaults caused by API change. The API change in
lammps_open and lammps_open_no_mpi makes them return the LAMMPS pointer
via their return value. However due to how ctypes operates, even
if restype is specified to be c_void_p, the function returns an integer.

Without the proper type of the pointer, calling functions without arglists would default
to using 32bit integers to pass an argument, which cuts away parts of the 64bit pointer.
Subsequently, resolving the truncated pointer in the library causes segfaults.

This commit fixes the root cause. But it also highlights the need of specifying
the arglists of all library functions.
2020-08-28 14:21:03 -04:00
caeb0af0d1 Add missing argtypes for lammps_extract_global 2020-08-28 13:54:06 -04:00
167f12a4a4 Add python test for extract_compute of peratom vector 2020-08-28 13:53:36 -04:00
e8cfa185ac update src/.gitignore for new files 2020-08-28 12:31:43 -04:00
899144c0f8 Merge pull request #2314 from jrgissing/bond/react-RMSD_constraint
Bond/react: add RMSD constraint
2020-08-28 12:29:11 -04:00
9f2eba981a a few more tweaks and spellcheck to make this ready for merging 2020-08-28 11:44:31 -04:00
ef50a67169 bond/react: make rmsd constraint lowercase 2020-08-28 11:06:27 -04:00
f8495975d3 transfer the rest of the Developer guide and remove the .tex versions and references to it 2020-08-28 10:52:45 -04:00
2686df3760 Update math_eigen.h
whitespace cleanup
2020-08-28 10:46:22 -04:00
f0788bfe86 transfer of developer.tex almost complete 2020-08-28 09:54:01 -04:00
e777badfa8 Merge pull request #2317 from evoyiatzis/patch-1
removal of a few duplicated lines in the drude polarizer tool
2020-08-28 09:25:01 -04:00
d361f26ca2 removal of a few duplicated lines
The "Velocities" sections is searched for in the input file twice - the second time being redundant.
2020-08-28 11:28:47 +02:00
b16746e46e Merge pull request #2313 from ndtrung81/gpu-bugfix
Fixed bugs in the CPU kernel for beck, beck/omp and the GPU kernel of gauss/gpu
2020-08-27 19:56:50 -04:00
992b981cee Fixes segfault due to uninitialized pointers 2020-08-27 19:02:02 -04:00
b1cca2cf74 update reference data for molecular test with pair style beck 2020-08-27 19:00:20 -04:00
463774319e add missing parenthesis 2020-08-27 17:57:44 -04:00
9c84fe8830 Add missing lammps_extract_compute.argtypes 2020-08-27 17:47:49 -04:00
d7e2be1c81 Start Python interface tests for numpy extensions 2020-08-27 17:47:49 -04:00
0b8136a38b Add extract_compute, extract_fix, and extract_variable to lammps.numpy 2020-08-27 17:47:49 -04:00
a216d3f5f5 Fix typo 2020-08-27 17:47:49 -04:00
26d09ea648 Use datatype constants instead of magic numbers 2020-08-27 17:47:49 -04:00
b1fae04751 use consistent style when referencing packages 2020-08-27 17:44:49 -04:00
7b4891a9a3 document the behavior of the gauss pair style to ignore special bond factors 2020-08-27 17:41:43 -04:00
fa13e23f7a add atomic test for gauss pair style 2020-08-27 17:41:25 -04:00
fdb726629f revert "fix" for pair style beck/gpu and correct beck and beck/omp instead
we should consistently apply factor_lj for both, force and energy, or not
2020-08-27 17:14:05 -04:00
a4f1be8fe3 Merge branch 'master' into gpu-bugfix 2020-08-27 17:05:26 -04:00
a54f99f774 Merge pull request #2315 from lammps/peri-doc-page
fix two typos on pair peri doc page
2020-08-27 17:00:06 -04:00
de4bf77757 two typos on pair peri doc page 2020-08-27 13:32:27 -06:00
32aede7769 minor doc clarification 2020-08-27 14:53:09 -04:00
ab90493e5b bond/react: RMSD, remove debugging statements 2020-08-27 14:45:42 -04:00
0d8baff7a9 Rename LAMMPS_DBLE2D to LAMMPS_DOUBLE2D 2020-08-27 14:03:17 -04:00
2f629db3d4 Refactor Zstd dump styles 2020-08-27 13:43:36 -04:00
5c0edeb679 namespace clarify 2020-08-27 12:43:28 -04:00
977a89e537 add separators 2020-08-27 12:33:03 -04:00
794e74e064 return of the lost enum 2020-08-27 12:22:48 -04:00
91554d6287 move RMSD files to new reaction package 2020-08-27 12:13:07 -04:00
ce2b128cf9 Merge branch 'lammps-master' into bond/react-RMSD_constraint 2020-08-27 12:04:09 -04:00
583a09c808 Merge branch 'master' of https://github.com/lammps/lammps into lammps-master 2020-08-27 12:02:08 -04:00
bc49e854ba bond/react RMSD constraint: manual rebase 2020-08-27 11:53:19 -04:00
e4e15157f8 Fixed bugs in the GPU kernels of pair styles beck/gpu and gauss/gpu involving factor_lj 2020-08-26 23:41:00 -05:00
f314b7e54f update format from enumerate to itemize and start next subsection 2020-08-26 23:05:59 -04:00
6a68743e54 transfer a chunk of text from Developer.tex to the manual. 2020-08-26 22:46:55 -04:00
ced78a72eb Add dump custom/zstd and tests 2020-08-26 19:59:28 -04:00
e9fd8b3ec6 Migrate changes to dump custom/gz and add tests 2020-08-26 19:44:55 -04:00
096cef40a8 remove redundant use of \brief 2020-08-26 19:44:23 -04:00
55829752c9 Merge branch 'master' into library-refactor 2020-08-26 19:08:09 -04:00
2f1086dfe4 Merge branch 'python-refactor' into library-refactor 2020-08-26 19:07:44 -04:00
85764b3774 replace a few more c++-style comments with old-fashion c-style comments 2020-08-26 19:07:29 -04:00
1ad82d7cdb add separator comment lines 2020-08-26 19:07:08 -04:00
dbf51af7d1 Fixes segfault due to uninitialized pointers 2020-08-26 18:41:32 -04:00
78a1b92503 Add dump atom zstd tests 2020-08-26 16:35:35 -04:00
007a43a5b2 Add more atom/gz tests 2020-08-26 16:20:02 -04:00
32aa35009b Start tests for compressed dump styles 2020-08-26 16:04:19 -04:00
046fd3d14b Verify valid compression level ranges 2020-08-26 16:03:18 -04:00
d00807ee9a Merge pull request #2311 from stanmoore1/kk_update_3.2
Update Kokkos library in LAMMPS to v3.2
2020-08-26 15:35:25 -04:00
aeb3e20385 Merge pull request #2307 from Vsevak/zbl_reduce_declaration
Fix NaN in GPU-accelerated Tersoff styles with OpenCL backend
2020-08-26 13:56:49 -04:00
629ead6348 Merge pull request #2309 from lammps/doxygen-integration
Add doxygen and graphviz processing into the documentation build
2020-08-26 13:05:00 -04:00
50b8b1bf60 Fix Kokkos HIP compile error 2020-08-26 11:45:24 -05:00
94db627ba5 fix formatting issue 2020-08-26 12:09:43 -04:00
b4403de026 add false positive 2020-08-26 12:09:35 -04:00
d40b658644 cmake: update kokkos version/checksum 2020-08-26 10:05:14 -06:00
8137ada848 Merge branch 'collected-small-changes' into doxygen-integration 2020-08-26 11:51:45 -04:00
bdc952ac88 Merge branch 'master' into collected-small-changes 2020-08-26 11:51:05 -04:00
f89a0f9fe3 must not try to delete computes if they have not been created and their ids not yet set 2020-08-26 11:50:20 -04:00
c8af729701 Merge branch 'master' into collected-small-changes 2020-08-26 11:49:29 -04:00
4ad68c98a0 Merge pull request #2196 from jibril-b-coulibaly/mindlin_rescale
Implement force history in Mindlin granular pair styles
2020-08-26 11:42:06 -04:00
e2fd95d8ed Merge pull request #2240 from jrgissing/bond/react-reset_mol_ids
support molecule ID resets in fix bond/react
2020-08-26 10:56:21 -04:00
ed63edc9da lammps_has_error() and lammps_get_last_error_message() are always available but dummies without exceptions enabled 2020-08-26 09:10:59 -04:00
48d2a48a1f import updated python module from progguide branch 2020-08-26 09:01:59 -04:00
7ab6def2ca update list of Kokkos GPU archs in manual as well 2020-08-26 03:49:59 -04:00
abbbb0ab06 remove trailing whitespace 2020-08-26 03:42:50 -04:00
aa6dec84ed update list of Kokkos supported architectures 2020-08-26 03:42:42 -04:00
e0439ac94f Add compression_level parameter to dump atom gz 2020-08-26 00:14:32 -04:00
5cb8e73655 Add checksum and compression_level as dump atom/zstd options 2020-08-25 23:59:17 -04:00
0d8454ac25 First version of Zstd dump atom 2020-08-25 23:27:42 -04:00
b8f59fd37c Update from master 2020-08-25 20:29:03 -06:00
4d90c2b74b Update Kokkos library in LAMMPS to v3.2 2020-08-25 20:21:48 -06:00
1c10aa6a4d GIFs are only supported in HTML 2020-08-25 17:15:34 -04:00
c256f2331f JPG folder is no longer needed in HTML output 2020-08-25 16:59:39 -04:00
ae5c0bd8d1 Remove targets to JPG images 2020-08-25 16:44:50 -04:00
3a638440a4 Add missing PDF folder 2020-08-25 16:12:09 -04:00
80e07c69f0 add part of developer guide as to showcase the integration of the programmer guide 2020-08-25 15:07:09 -04:00
df8fb26272 Refactor changes to documentation build
- Added CMake variables for readability
- Moved Mathjax files to _static to avoid special copy logic
- Moved JPG/lammps-logo.png to _static to avoid special copy logic
- Removed dead CMake code
2020-08-25 13:41:31 -04:00
14b66d1f84 tweak test tolerance of reax/c tests for running on ubuntu 18.04 2020-08-25 12:15:19 -04:00
f965786e74 refactor C library interface and add doxygen decorations 2020-08-25 11:45:07 -04:00
69cffb2d04 import test infrastructure for c, c++ and python library usage 2020-08-25 11:03:31 -04:00
024e4c5f21 make formatting and doxygen decorations for utils functions consistent 2020-08-24 20:55:13 -04:00
3f685c346f add doxygen integration with conventional and cmake doc build 2020-08-24 17:58:37 -04:00
8cbc3e421f clean up 2020-08-24 17:49:38 +02:00
090ef2d1e8 merge error fix 2020-08-24 17:10:33 +02:00
9bdb7b5b98 gatherscatter 2020-08-24 17:09:16 +02:00
28d59ce594 final steps 2020-08-24 17:04:35 +02:00
2ee6e9077a Merge remote-tracking branch 'upstream/master' into 2020 2020-08-24 17:02:53 +02:00
e8fb7c7ece big update 2020-08-24 16:54:16 +02:00
0842911cd8 Rename local buffers 'red_acc' in lal_tersoff*
Rename __local red_acc in lal_tersoff
2020-08-23 22:22:28 +03:00
10080079e3 ISO compliance 2020-08-23 11:44:48 -04:00
63abb2dff9 fix broken reset_mol_ids command 2020-08-23 11:32:54 -04:00
01dd80f35e bond/react: actually make reset_mol_ids the default 2020-08-23 11:21:43 -04:00
450fd12d31 Changes needed for Kokkos v3.2 2020-08-21 14:45:12 -05:00
6fc2ab07ef reset_mol_ids: unique created computes 2020-08-21 14:52:39 -04:00
921b6d8135 relative threshold for contact frame update based on tangential critical force 2020-08-21 13:20:53 -05:00
ee6ef98b9b remove trailing whitespace 2020-08-21 00:43:40 -04:00
e742ae7475 fix RST syntax and spelling issues in granular pair style docs 2020-08-21 00:31:55 -04:00
900830a4a1 Merge branch 'master' into mindlin_rescale 2020-08-21 00:24:07 -04:00
c9773fc288 Merge branch 'master' into bond/react-reset_mol_ids 2020-08-21 00:19:52 -04:00
e4ab49c2e5 bond/react: bond-type-checking docs 2020-08-18 18:12:01 -04:00
df497e4853 bond/react: clarify bond-type-checking error 2020-08-18 17:53:07 -04:00
9d486d734b update bond/react for reset_mol_ids->create_computes 2020-08-11 17:29:27 -04:00
3c69ebc669 reset_mold_ids: add create_computes 2020-08-11 17:12:36 -04:00
f6d91b3b2c move domain/comm commands 2020-08-11 15:02:37 -04:00
5ebac27fd5 safety for division by zero in scaling of the projection 2020-08-10 15:15:47 -05:00
2de98999c1 bug fix formula for frame of reference rotation for granular tangential history 2020-08-10 14:51:00 -05:00
bf724332d4 implement tangential force history in mindlin/force and mindlin_rescale/force 2020-08-10 10:53:30 -05:00
cf83ce6745 reset_mol_ids->reset() version 2020-07-25 00:52:39 -06:00
295d75f230 guess should reset_mol_IDs *after* updating bonds 2020-07-18 21:14:36 -06:00
845b918501 probably better reset_mol_id doc version 2020-07-18 20:59:17 -06:00
edd3fb7108 reset_mol_ids: documented verbose option 2020-07-18 20:51:14 -06:00
fe6efe8861 need header file! 2020-07-18 14:29:39 -06:00
9ec5708f2f Update reset_mol_ids.cpp 2020-07-18 14:21:10 -06:00
e00b0e96f6 bond/react: prevent reset_mol_ids printing 2020-07-18 14:00:46 -06:00
a2547701e6 fix verbose reset_mol_ids 2020-07-18 13:59:30 -06:00
6272b7d2bf add (undocumented) verbosity option to reset_mol_ids 2020-07-18 13:52:13 -06:00
da91f81d40 bond/react:doc clarification 2020-07-18 13:42:47 -06:00
371a5c5b61 bond/react: reset_mol_ids docs 2020-07-18 12:44:34 -06:00
57f639c0e5 bond/react:reset_mol_ids keyword 2020-07-18 12:42:47 -06:00
8108063f47 small bug fix for gather() 2020-07-15 12:21:03 +02:00
6c5d928358 small bug fix for gather() 2020-07-15 11:45:23 +02:00
a6cd4a935e Replace accumulated displacement by accumulated force for tangential force in styles mindlin and mindlin_rescale. Change documentation accordingly 2020-06-27 01:17:46 -05:00
b75d9b8224 bond/react: RMSD constraint docs 2020-06-26 23:43:08 -06:00
6a68715d7b bond/react:RMSD constraint 2020-06-26 23:33:40 -06:00
fa17ba7a8f Merge pull request #86 from lammps/master
rebase
2020-06-26 22:26:40 -06:00
6ec3aac3f0 extra return value 2020-06-25 13:32:13 +02:00
36bac80978 up-to-date 2020-05-01 18:42:00 +02:00
215ad7e0b9 allreaduce 2020-05-01 17:52:31 +02:00
b96058eac3 gather/scatter custom 2020-05-01 16:53:07 +02:00
a64f70f0b5 removed broken soft link 2020-05-01 16:24:42 +02:00
2571e187d0 updated README, removed broken soft link to potential 2020-05-01 16:23:51 +02:00
64fdaaec3c citation update 2020-05-01 16:19:03 +02:00
2ff463cc6e fixed typo and checked make html 2020-05-01 16:11:25 +02:00
012c344622 updated documentation 2020-05-01 16:10:45 +02:00
6eb9923c68 added documentation 2020-05-01 15:58:32 +02:00
4e0c835e9e fixed dimension in error message 2020-05-01 15:57:16 +02:00
95e81c7e47 allowed 0/1 or no/yes 2020-05-01 14:52:48 +02:00
7c620f7514 removed redundant region check, made com/overdamped "yes" or "no" instead of 0/1 2020-05-01 14:36:06 +02:00
bad3becac3 set com and overdamped flags as keywords with defaults 1,0 2020-05-01 14:28:36 +02:00
79f074eb6a updated potential path in example 2020-05-01 14:27:33 +02:00
feb6e10734 error messages 2020-05-01 13:53:24 +02:00
e708479782 error messages for fix pafi 2020-05-01 13:28:36 +02:00
910d91bc93 some more tweaks to the example 2020-04-29 17:15:46 -04:00
6cd1b45a49 recover compilation with -DLAMMPS_BIGBIG 2020-04-29 17:12:58 -04:00
1ad85980fb fix up include files 2020-04-29 17:12:43 -04:00
d3b88ab3e1 fix documentation translation issues 2020-04-29 17:12:30 -04:00
42d36e360e cosmetic change 2020-04-29 17:02:18 -04:00
db631c941d fix minor memory leak 2020-04-29 16:51:43 -04:00
cc6b55515b add integration into manual and add dummy documentation 2020-04-29 16:51:30 -04:00
46739329c4 update PAFI example: remove unused data, replace potential with existing one, update logs 2020-04-29 16:41:08 -04:00
595bb30e14 Merge branch 'master' into user-pafi 2020-04-29 15:35:29 -04:00
3ab797984b verified cmake compatibility 2020-04-27 18:21:42 +02:00
97ac67840b corrections to headers 2020-04-27 17:40:09 +02:00
2fb71cfb62 Merge pull request #80 from jrgissing/master
import Andrew's PR into create_atoms branch
2020-04-22 22:36:20 -06:00
08b4159c6b Merge pull request #79 from lammps/master
rebase
2020-04-22 22:32:01 -06:00
251517127d Merge pull request #78 from lammps/master
rebase
2020-04-22 22:13:33 -06:00
f63893bd7d citation fix 2020-04-21 17:19:48 +02:00
1c1ff9623f check for compute at fix_init() 2020-04-21 15:07:15 +02:00
1670196bc1 c++ style headers 2020-04-21 15:00:00 +02:00
cbe6e0dcb2 small updates 2020-04-21 14:32:52 +02:00
1cd5db683f typo.. 2020-04-20 19:59:26 +02:00
0f8669c51d updated_extract_atom 2020-04-20 19:55:29 +02:00
4da1ba749f removed changes to atom.cpp 2020-04-20 19:42:04 +02:00
1709bc3f83 cleaned makefile 2020-04-20 19:12:37 +02:00
b1bd0251a9 better example file 2020-04-20 18:32:44 +02:00
8fa2092580 example folder 2020-04-20 10:20:53 +02:00
946cb27267 ignore 2020-04-20 10:20:17 +02:00
0ea2eabbb2 start of clean up 2020-04-17 18:11:59 +02:00
6904e1e6ef Merge pull request #2 from tomswinburne/newfix
update
2020-04-17 16:39:28 +02:00
bddcc3519c update 2020-04-17 14:55:24 +02:00
5b31077cef Merge pull request #1 from tomswinburne/newfix
Newfix
2020-04-17 14:49:57 +02:00
b8d6de2bdc binary example 2020-04-14 21:21:22 +02:00
6cbd61d929 working with library interface 2020-04-14 21:11:32 +02:00
276608738c working new version 2020-04-14 15:11:09 +02:00
430f2ae6aa getting there 2020-04-13 19:28:43 +02:00
39799c62fc update fork 2020-04-13 10:48:20 +02:00
581f65bacd makefile 2020-04-13 10:39:54 +02:00
789a697d69 Merge pull request #77 from mrcdr/math_eigen_lanczos
modified the "math_eigen.h" file
2020-04-04 20:59:24 -06:00
94e5e8de76 modified the "math_eigen.h" file to track the commit 9cdfcac069 2020-04-03 01:23:30 +09:00
9cdfcac069 Merge pull request #76 from jewettaij/bond-react_create_atoms
libraries for matrix algebra and point cloud registration
2020-03-28 23:33:44 -06:00
8d0cb2a70a removed the "superpose3d" namespace, and renamed "namespace math_eigen" to "namespace MathEigen" (to imitate the style used in "math_extra.h"). 2020-03-23 14:19:04 -07:00
e795d9ff6a added the "math_eigen.h" and "superpose3d.h" files 2020-03-12 17:33:50 -07:00
0fb341f7eb Merge pull request #73 from lammps/master
rebase
2020-03-05 22:46:07 -07:00
b3425afef5 Merge pull request #72 from lammps/master
rebase
2020-03-03 23:12:46 -07:00
4b11e43660 Merge pull request #68 from lammps/master
rebase
2020-02-17 21:07:30 -07:00
453649453e Merge pull request #65 from jrgissing/master
rebase
2020-01-24 21:25:59 -07:00
49d9089a63 USER-PAFI examples 2019-05-22 13:50:07 +02:00
646d833027 USER-PAFI 2019-05-22 10:29:29 +02:00
4369 changed files with 142410 additions and 179590 deletions

4
.github/CODEOWNERS vendored
View File

@ -114,6 +114,7 @@ src/info.* @akohlmey @rbberger
src/timer.* @akohlmey
src/min* @sjplimp @stanmoore1
src/utils.* @akohlmey @rbberger
src/math_eigen_impl.h @jewettaij
# tools
tools/msi2lmp/* @akohlmey
@ -134,6 +135,9 @@ cmake/presets/*.cmake @junghans @rbberger @akohlmey
# python
python/* @rbberger
# fortran
fortran/* @akohlmey
# docs
doc/utils/*/* @rbberger
doc/Makefile @rbberger

View File

@ -67,6 +67,7 @@ How quickly your contribution will be integrated depends largely on how much eff
Here is a checklist of steps you need to follow to submit a single file or user package for our consideration. Following these steps will save both you and us time. See existing files in packages in the source directory for examples. If you are uncertain, please ask on the lammps-users mailing list.
* C++ source code must be compatible with the C++-11 standard. Packages may require a later standard, if justified.
* All source files you provide must compile with the most current version of LAMMPS with multiple configurations. In particular you need to test compiling LAMMPS from scratch with `-DLAMMPS_BIGBIG` set in addition to the default `-DLAMMPS_SMALLBIG` setting. Your code will need to work correctly in serial and in parallel using MPI.
* For consistency with the rest of LAMMPS and especially, if you want your contribution(s) to be added to main LAMMPS code or one of its standard packages, it needs to be written in a style compatible with other LAMMPS source files. This means: 2-character indentation per level, no tabs, no trailing whitespace, no lines over 80 characters. I/O is done via the C-style stdio library, style class header files should not import any system headers, STL containers should be avoided in headers, and forward declarations used where possible or needed. All added code should be placed into the LAMMPS_NS namespace or a sub-namespace; global or static variables should be avoided, as they conflict with the modular nature of LAMMPS and the C++ class structure. There MUST NOT be any "using namespace XXX;" statements in headers. In the implementation file (<name>.cpp) system includes should be placed in angular brackets (<>) and for c-library functions the C++ style header files should be included (<cstdio> instead of <stdio.h>, or <cstring> instead of <string.h>). This all is so the developers can more easily understand, integrate, and maintain your contribution and reduce conflicts with other parts of LAMMPS. This basically means that the code accesses data structures, performs its operations, and is formatted similar to other LAMMPS source files, including the use of the error class for error and warning messages.
* Source, style name, and documentation file should follow the following naming convention: style names should be lowercase and words separated by a forward slash; for a new fix style 'foo/bar', the class should be named FixFooBar, the name of the source files should be 'fix_foo_bar.h' and 'fix_foo_bar.cpp' and the corresponding documentation should be in a file 'fix_foo_bar.rst'.

View File

@ -2,7 +2,7 @@
<!--Briefly describe the new feature(s), enhancement(s), or bugfix(es) included in this pull request.-->
**Related Issues**
**Related Issue(s)**
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->

View File

@ -9,34 +9,37 @@ assignees: ''
**Summary**
<!--Briefly describe the bug or bugs, that are eliminated by this pull request.-->
<!--Briefly describe the bug(s) that are eliminated by this pull request.-->
**Related Issue(s)**
<!--If this request addresses or is related to an existing (open) GitHub issue, e.g. a bug report, mention the issue number number here following a pound sign (aka hashmark), e.g.`#222`.-->
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
**Author(s)**
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request-->
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request. If this pull request adds new files to the distribution, please also provide a suitable "long-lived" e-mail address (ideally something that can outlive your institution's e-mail, in case you change jobs) for the *corresponding* author, i.e. the person the LAMMPS developers can contact directly with questions and requests related to maintenance and support of this contributed code.-->
**Licensing**
By submitting this pull request I implicitly accept, that my submission is subject to the same licensing terms as the files that are modified.
By submitting this pull request, I agree, that my contribution will be included in LAMMPS and redistributed under either the GNU General Public License version 2 (GPL v2) or the GNU Lesser General Public License version 2.1 (LGPL v2.1).
**Backward Compatibility**
<!--Please state whether any changes in the pull request break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
<!--Please state whether any changes in the pull request will break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
**Detailed Description**
<!--Provide any relevant details about how the fixed bug can be reproduced, how the changes are implemented, how correctness was verified, how other features - if any - in LAMMPS are affected-->
## Post Submission Checklist
**Post Submission Checklist**
<!--Please check the fields below as they are completed *after* the pull request is submitted-->
- [ ] The code in this pull request is complete
<!--Please check the fields below as they are completed **after** the pull request has been submitted. Delete lines that don't apply-->
- [ ] The feature or features in this pull request is complete
- [ ] Licensing information is complete
- [ ] Corresponding author information is complete
- [ ] The source code follows the LAMMPS formatting guidelines
- [ ] The feature has been verified to work with the conventional build system
- [ ] The feature has been verified to work with the CMake based build system
- [ ] Suitable tests have been added to the unittest tree.
## Further Information, Files, and Links
<!--Put any additional information here, attach relevant text or image files, and URLs to external sites (e.g. to download input decks for testing)-->

View File

@ -13,23 +13,31 @@ assignees: ''
**Related Issue(s)**
<!--If this request addresses or is related to an existing (open) GitHub issue, e.g. a bug report, mention the issue number number here following a pound sign (aka hashmark), e.g.`#222`.
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
**Author(s)**
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request-->
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request. If this pull request adds new files to the distribution, please also provide a suitable "long-lived" e-mail address (ideally something that can outlive your institution's e-mail, in case you change jobs) for the *corresponding* author, i.e. the person the LAMMPS developers can contact directly with questions and requests related to maintenance and support of this contributed code.-->
**Licensing**
By submitting this pull request I implicitly accept, that my submission is subject to the same licensing terms as the files that are modified.
By submitting this pull request, I agree, that my contribution will be included in LAMMPS and redistributed under either the GNU General Public License version 2 (GPL v2) or the GNU Lesser General Public License version 2.1 (LGPL v2.1).
**Backward Compatibility**
<!--Please state whether any changes in the pull request will break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
**Detailed Description**
<!--Provide any relevant details about the included changes.-->
<!--Provide any relevant details about how the changes are implemented, how correctness was verified, how other features - if any - in LAMMPS are affected-->
## Post Submission Checklist
**Post Submission Checklist**
<!--Please check the fields below as they are completed *after* the pull request is submitted-->
- [ ] The pull request is complete
- [ ] The source code follows the LAMMPS formatting guidelines
- [ ] The feature has been verified to work with the conventional build system
- [ ] The feature has been verified to work with the CMake based build system
- [ ] Suitable tests have been added to the unittest tree.

View File

@ -11,32 +11,29 @@ assignees: ''
<!--Briefly describe the new feature(s) included in this pull request.-->
**Related Issues**
**Related Issue(s)**
<!--If this addresses an existing (open) GitHub issue, e.g. a feature request, mention the issue number here following a pound sign (aka hashmark), e.g. `#331`.-->
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
**Author(s)**
<!--Please state name and affiliation of the author or authors that should be credited with the features added in this pull request. Please provide a suitable "long-lived" e-mail address (e.g. from gmail, yahoo, outlook, etc.) for the *corresponding* author, i.e. the person the LAMMPS developers can contact directly with questions and requests related to maintenance and support of this code. now and in the future-->
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request. If this pull request adds new files to the distribution, please also provide a suitable "long-lived" e-mail address (ideally something that can outlive your institution's e-mail, in case you change jobs) for the *corresponding* author, i.e. the person the LAMMPS developers can contact directly with questions and requests related to maintenance and support of this contributed code.-->
**Licensing**
<!--Please add *yes* or *no* to the following two statements (please contact @lammps/core if you have questions about this)-->
My contribution may be licensed as GPL v2 (default LAMMPS license):
My contribution may be licensed as LGPL (for use as a library with proprietary software):
By submitting this pull request, I agree, that my contribution will be included in LAMMPS and redistributed under either the GNU General Public License version 2 (GPL v2) or the GNU Lesser General Public License version 2.1 (LGPL v2.1).
**Backward Compatibility**
<!--Please state if any of the changes in this pull request will affect backward compatibility for inputs, and - if yes - explain what has been changed and why-->
<!--Please state whether any changes in the pull request will break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
**Implementation Notes**
<!--Provide any relevant details about how the new features are implemented, how correctness was verified, what platforms (OS, compiler, MPI, hardware, number of processors, accelerator(s)) it was tested on-->
<!--Provide any relevant details about how the new feature(s) are implemented, how correctness was verified, how other features - if any - in LAMMPS are affected-->
## Post Submission Checklist
**Post Submission Checklist**
<!--Please check the fields below as they are completed *after* the pull request has been submitted-->
<!--Please check the fields below as they are completed **after** the pull request has been submitted. Delete lines that don't apply-->
- [ ] The feature or features in this pull request is complete
- [ ] Licensing information is complete
@ -46,10 +43,11 @@ My contribution may be licensed as LGPL (for use as a library with proprietary s
- [ ] The added/updated documentation is integrated and tested with the documentation build system
- [ ] The feature has been verified to work with the conventional build system
- [ ] The feature has been verified to work with the CMake based build system
- [ ] Suitable tests have been added to the unittest tree.
- [ ] A package specific README file has been included or updated
- [ ] One or more example input decks are included
## Further Information, Files, and Links
**Further Information, Files, and Links**
<!--Put any additional information here, attach relevant text or image files, and URLs to external sites (e.g. DOIs or webpages)-->

View File

@ -11,17 +11,21 @@ assignees: ''
<!--Briefly describe what kind of updates or enhancements for a package or feature are included. If you are not the original author of the package or feature, please mention, whether your contribution was created independently or in collaboration/cooperation with the original author.-->
**Related Issue(s)**
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
**Author(s)**
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request-->
**Licensing**
By submitting this pull request I implicitly accept, that my submission is subject to the same licensing terms as the original package or feature(s) that are updated or amended by this pull request.
By submitting this pull request, I agree, that my contribution will be included in LAMMPS and redistributed under either the GNU General Public License version 2 (GPL v2) or the GNU Lesser General Public License version 2.1 (LGPL v2.1).
**Backward Compatibility**
<!--Please state whether any changes in the pull request break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
<!--Please state whether any changes in the pull request will break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
**Implementation Notes**
@ -29,11 +33,19 @@ By submitting this pull request I implicitly accept, that my submission is subje
**Post Submission Checklist**
<!--Please check the fields below as they are completed-->
<!--Please check the fields below as they are completed **after** the pull request has been submitted. Delete lines that don't apply-->
- [ ] The feature or features in this pull request is complete
- [ ] Suitable updates to the existing docs are included
- [ ] One or more example input decks are included
- [ ] Licensing information is complete
- [ ] Corresponding author information is complete
- [ ] The source code follows the LAMMPS formatting guidelines
- [ ] Suitable updates to the existing docs are included
- [ ] The updated documentation is integrated and tested with the documentation build system
- [ ] The feature has been verified to work with the conventional build system
- [ ] The feature has been verified to work with the CMake based build system
- [ ] Suitable tests have been updated or added to the unittest tree.
- [ ] A package specific README file has been updated
- [ ] One or more example input decks are included
**Further Information, Files, and Links**

1
.gitignore vendored
View File

@ -37,6 +37,7 @@ vgcore.*
ehthumbs.db
Thumbs.db
.clang-format
.lammps_history
#cmake
/build*

22
README
View File

@ -25,25 +25,29 @@ The LAMMPS distribution includes the following files and directories:
README this file
LICENSE the GNU General Public License (GPL)
bench benchmark problems
cmake CMake build system
cmake CMake build files
doc documentation
examples simple test problems
lib libraries LAMMPS can be linked with
fortran Fortran wrapper for LAMMPS
lib additional provided or external libraries
potentials interatomic potential files
python Python wrapper on LAMMPS as a library
python Python wrappers for LAMMPS
src source files
tools pre- and post-processing tools
Point your browser at any of these files to get started:
http://lammps.sandia.gov/doc/Manual.html the LAMMPS manual
http://lammps.sandia.gov/doc/Intro.html hi-level introduction
http://lammps.sandia.gov/doc/Build.html how to build LAMMPS
http://lammps.sandia.gov/doc/Run_head.html how to run LAMMPS
http://lammps.sandia.gov/doc/Developer.pdf LAMMPS developer guide
https://lammps.sandia.gov/doc/Manual.html LAMMPS user manual
https://lammps.sandia.gov/doc/Intro.html hi-level introduction
https://lammps.sandia.gov/doc/Build.html how to build LAMMPS
https://lammps.sandia.gov/doc/Run_head.html how to run LAMMPS
https://lammps.sandia.gov/doc/Commands_all.html Table of available commands
https://lammps.sandia.gov/doc/pg_library.html LAMMPS programmer guide
https://lammps.sandia.gov/doc/Modify.html how to modify and extend LAMMPS
https://lammps.sandia.gov/doc/pg_developer.html LAMMPS developer guide
You can also create these doc pages locally:
% cd doc
% make html # creates HTML pages in doc/html
% make pdf # creates Manual.pdf and Developer.pdf
% make pdf # creates Manual.pdf

View File

@ -1,108 +0,0 @@
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
SHELL = /bin/sh
# ---------------------------------------------------------------------
# compiler/linker settings
# specify flags and libraries needed for your compiler
CC = icc
CCFLAGS = -O
SHFLAGS = -fPIC
DEPFLAGS = -M
LINK = icc
LINKFLAGS = -O
LIB = -lstdc++
SIZE = size
ARCHIVE = ar
ARFLAGS = -rc
SHLIBFLAGS = -shared
# ---------------------------------------------------------------------
# LAMMPS-specific settings
# specify settings for LAMMPS features you will use
# if you change any -D setting, do full re-compile after "make clean"
# LAMMPS ifdef settings, OPTIONAL
# see possible settings in doc/Section_start.html#2_2 (step 4)
LMP_INC =
# MPI library, REQUIRED
# see discussion in doc/Section_start.html#2_2 (step 5)
# can point to dummy MPI library in src/STUBS as in Makefile.serial
# INC = path for mpi.h, MPI compiler settings
# PATH = path for MPI library
# LIB = name of MPI library
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
MPI_LIB = -lmpi
# FFT library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 6)
# can be left blank to use provided KISS FFT library
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
# PATH = path for FFT library
# LIB = name of FFT library
FFT_INC =
FFT_PATH =
FFT_LIB =
# JPEG and/or PNG library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 7)
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
# INC = path(s) for jpeglib.h and/or png.h
# PATH = path(s) for JPEG library and/or PNG library
# LIB = name(s) of JPEG library and/or PNG library
JPG_INC =
JPG_PATH =
JPG_LIB = -ljpeg
# ---------------------------------------------------------------------
# build rules and dependencies
# no need to edit this section
include Makefile.package.settings
include Makefile.package
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
# Path to src files
vpath %.cpp ..
vpath %.h ..
# Link target
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
$(SIZE) $(EXE)
# Library targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
shlib: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
$(OBJ) $(EXTRA_LIB) $(LIB)
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
sinclude $(DEPENDS)

View File

@ -1,108 +0,0 @@
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
SHELL = /bin/sh
# ---------------------------------------------------------------------
# compiler/linker settings
# specify flags and libraries needed for your compiler
CC = icc
CCFLAGS = -O
SHFLAGS = -fPIC
DEPFLAGS = -M
LINK = icc
LINKFLAGS = -O
LIB = -lstdc++
SIZE = size
ARCHIVE = ar
ARFLAGS = -rc
SHLIBFLAGS = -shared
# ---------------------------------------------------------------------
# LAMMPS-specific settings
# specify settings for LAMMPS features you will use
# if you change any -D setting, do full re-compile after "make clean"
# LAMMPS ifdef settings, OPTIONAL
# see possible settings in doc/Section_start.html#2_2 (step 4)
LMP_INC =
# MPI library, REQUIRED
# see discussion in doc/Section_start.html#2_2 (step 5)
# can point to dummy MPI library in src/STUBS as in Makefile.serial
# INC = path for mpi.h, MPI compiler settings
# PATH = path for MPI library
# LIB = name of MPI library
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
MPI_LIB = -lmpi
# FFT library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 6)
# can be left blank to use provided KISS FFT library
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
# PATH = path for FFT library
# LIB = name of FFT library
FFT_INC =
FFT_PATH =
FFT_LIB =
# JPEG and/or PNG library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 7)
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
# INC = path(s) for jpeglib.h and/or png.h
# PATH = path(s) for JPEG library and/or PNG library
# LIB = name(s) of JPEG library and/or PNG library
JPG_INC =
JPG_PATH =
JPG_LIB = -ljpeg
# ---------------------------------------------------------------------
# build rules and dependencies
# no need to edit this section
include Makefile.package.settings
include Makefile.package
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
# Path to src files
vpath %.cpp ..
vpath %.h ..
# Link target
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
$(SIZE) $(EXE)
# Library targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
shlib: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
$(OBJ) $(EXTRA_LIB) $(LIB)
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
sinclude $(DEPENDS)

View File

@ -1,108 +0,0 @@
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
SHELL = /bin/sh
# ---------------------------------------------------------------------
# compiler/linker settings
# specify flags and libraries needed for your compiler
CC = icc
CCFLAGS = -O
SHFLAGS = -fPIC
DEPFLAGS = -M
LINK = icc
LINKFLAGS = -O
LIB = -lstdc++
SIZE = size
ARCHIVE = ar
ARFLAGS = -rc
SHLIBFLAGS = -shared
# ---------------------------------------------------------------------
# LAMMPS-specific settings
# specify settings for LAMMPS features you will use
# if you change any -D setting, do full re-compile after "make clean"
# LAMMPS ifdef settings, OPTIONAL
# see possible settings in doc/Section_start.html#2_2 (step 4)
LMP_INC =
# MPI library, REQUIRED
# see discussion in doc/Section_start.html#2_2 (step 5)
# can point to dummy MPI library in src/STUBS as in Makefile.serial
# INC = path for mpi.h, MPI compiler settings
# PATH = path for MPI library
# LIB = name of MPI library
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
MPI_LIB = -lmpi
# FFT library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 6)
# can be left blank to use provided KISS FFT library
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
# PATH = path for FFT library
# LIB = name of FFT library
FFT_INC =
FFT_PATH =
FFT_LIB =
# JPEG and/or PNG library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 7)
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
# INC = path(s) for jpeglib.h and/or png.h
# PATH = path(s) for JPEG library and/or PNG library
# LIB = name(s) of JPEG library and/or PNG library
JPG_INC =
JPG_PATH =
JPG_LIB = -ljpeg
# ---------------------------------------------------------------------
# build rules and dependencies
# no need to edit this section
include Makefile.package.settings
include Makefile.package
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
# Path to src files
vpath %.cpp ..
vpath %.h ..
# Link target
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
$(SIZE) $(EXE)
# Library targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
shlib: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
$(OBJ) $(EXTRA_LIB) $(LIB)
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
sinclude $(DEPENDS)

View File

@ -1,50 +0,0 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# - Change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
CUDA_HOME = /home/projects/cuda/6.0.37
NVCC = nvcc
# Kepler CUDA
CUDA_ARCH = -arch=sm_35
# Tesla CUDA
#CUDA_ARCH = -arch=sm_21
# newer CUDA
#CUDA_ARCH = -arch=sm_13
# older CUDA
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
LMP_INC = -DLAMMPS_SMALLBIG
# precision for GPU calculations
# -D_SINGLE_SINGLE # Single precision for all calculations
# -D_DOUBLE_DOUBLE # Double precision for all calculations
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
CUDA_PRECISION = -D_DOUBLE_DOUBLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64
CUDA_OPTS = -DUNIX -O3 -Xptxas -v --use_fast_math
CUDR_CPP = mpic++ -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK
CUDR_OPTS = -O2 # -xHost -no-prec-div -ansi-alias
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -1,50 +0,0 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# - Change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
CUDA_HOME = /home/projects/cuda/6.0.37
NVCC = nvcc
# Kepler CUDA
CUDA_ARCH = -arch=sm_35
# Tesla CUDA
#CUDA_ARCH = -arch=sm_21
# newer CUDA
#CUDA_ARCH = -arch=sm_13
# older CUDA
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
LMP_INC = -DLAMMPS_SMALLBIG
# precision for GPU calculations
# -D_SINGLE_SINGLE # Single precision for all calculations
# -D_DOUBLE_DOUBLE # Double precision for all calculations
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
CUDA_PRECISION = -D_SINGLE_DOUBLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64
CUDA_OPTS = -DUNIX -O3 -Xptxas -v --use_fast_math
CUDR_CPP = mpic++ -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK
CUDR_OPTS = -O2 # -xHost -no-prec-div -ansi-alias
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -1,50 +0,0 @@
# /* ----------------------------------------------------------------------
# Generic Linux Makefile for CUDA
# - Change CUDA_ARCH for your GPU
# ------------------------------------------------------------------------- */
# which file will be copied to Makefile.lammps
EXTRAMAKE = Makefile.lammps.standard
CUDA_HOME = /home/projects/cuda/6.0.37
NVCC = nvcc
# Kepler CUDA
CUDA_ARCH = -arch=sm_35
# Tesla CUDA
#CUDA_ARCH = -arch=sm_21
# newer CUDA
#CUDA_ARCH = -arch=sm_13
# older CUDA
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
# this setting should match LAMMPS Makefile
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
LMP_INC = -DLAMMPS_SMALLBIG
# precision for GPU calculations
# -D_SINGLE_SINGLE # Single precision for all calculations
# -D_DOUBLE_DOUBLE # Double precision for all calculations
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
CUDA_PRECISION = -D_SINGLE_SINGLE
CUDA_INCLUDE = -I$(CUDA_HOME)/include
CUDA_LIB = -L$(CUDA_HOME)/lib64
CUDA_OPTS = -DUNIX -O3 -Xptxas -v --use_fast_math
CUDR_CPP = mpic++ -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK
CUDR_OPTS = -O2 # -xHost -no-prec-div -ansi-alias
BIN_DIR = ./
OBJ_DIR = ./
LIB_DIR = ./
AR = ar
BSH = /bin/sh
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
include Nvidia.makefile

View File

@ -1,109 +0,0 @@
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
SHELL = /bin/sh
# ---------------------------------------------------------------------
# compiler/linker settings
# specify flags and libraries needed for your compiler
CC = icc
CCFLAGS = -O3 -openmp -DLAMMPS_MEMALIGN=64 -no-offload \
-xHost -fno-alias -ansi-alias -restrict -override-limits
SHFLAGS = -fPIC
DEPFLAGS = -M
LINK = icc
LINKFLAGS = -O -openmp
LIB = -lstdc++
SIZE = size
ARCHIVE = ar
ARFLAGS = -rc
SHLIBFLAGS = -shared
# ---------------------------------------------------------------------
# LAMMPS-specific settings
# specify settings for LAMMPS features you will use
# if you change any -D setting, do full re-compile after "make clean"
# LAMMPS ifdef settings, OPTIONAL
# see possible settings in doc/Section_start.html#2_2 (step 4)
LMP_INC = -DLAMMPS_GZIP -DLAMMPS_JPEG
# MPI library, REQUIRED
# see discussion in doc/Section_start.html#2_2 (step 5)
# can point to dummy MPI library in src/STUBS as in Makefile.serial
# INC = path for mpi.h, MPI compiler settings
# PATH = path for MPI library
# LIB = name of MPI library
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
MPI_LIB = -lmpi
# FFT library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 6)
# can be left blank to use provided KISS FFT library
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
# PATH = path for FFT library
# LIB = name of FFT library
FFT_INC =
FFT_PATH =
FFT_LIB =
# JPEG and/or PNG library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 7)
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
# INC = path(s) for jpeglib.h and/or png.h
# PATH = path(s) for JPEG library and/or PNG library
# LIB = name(s) of JPEG library and/or PNG library
JPG_INC =
JPG_PATH =
JPG_LIB = -ljpeg
# ---------------------------------------------------------------------
# build rules and dependencies
# no need to edit this section
include Makefile.package.settings
include Makefile.package
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
# Path to src files
vpath %.cpp ..
vpath %.h ..
# Link target
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
$(SIZE) $(EXE)
# Library targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
shlib: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
$(OBJ) $(EXTRA_LIB) $(LIB)
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
sinclude $(DEPENDS)

View File

@ -1,113 +0,0 @@
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
SHELL = /bin/sh
# ---------------------------------------------------------------------
# compiler/linker settings
# specify flags and libraries needed for your compiler
CC = nvcc
CCFLAGS = -O3 -arch=sm_35
SHFLAGS = -fPIC
DEPFLAGS = -M
LINK = mpicxx
LINKFLAGS = -O
LIB = -lstdc++
SIZE = size
ARCHIVE = ar
ARFLAGS = -rc
SHLIBFLAGS = -shared
OMP = yes
CUDA = yes
# ---------------------------------------------------------------------
# LAMMPS-specific settings
# specify settings for LAMMPS features you will use
# if you change any -D setting, do full re-compile after "make clean"
# LAMMPS ifdef settings, OPTIONAL
# see possible settings in doc/Section_start.html#2_2 (step 4)
LMP_INC =
# MPI library, REQUIRED
# see discussion in doc/Section_start.html#2_2 (step 5)
# can point to dummy MPI library in src/STUBS as in Makefile.serial
# INC = path for mpi.h, MPI compiler settings
# PATH = path for MPI library
# LIB = name of MPI library
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
MPI_LIB = -lmpi
# FFT library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 6)
# can be left blank to use provided KISS FFT library
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
# PATH = path for FFT library
# LIB = name of FFT library
FFT_INC =
FFT_PATH =
FFT_LIB =
# JPEG and/or PNG library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 7)
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
# INC = path(s) for jpeglib.h and/or png.h
# PATH = path(s) for JPEG library and/or PNG library
# LIB = name(s) of JPEG library and/or PNG library
JPG_INC =
JPG_PATH =
JPG_LIB = -ljpeg
# ---------------------------------------------------------------------
# build rules and dependencies
# no need to edit this section
include Makefile.package.settings
include Makefile.package
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
# Path to src files
vpath %.cpp ..
vpath %.h ..
# Link target
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
$(SIZE) $(EXE)
# Library targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
shlib: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
$(OBJ) $(EXTRA_LIB) $(LIB)
# Compilation rules
%.o:%.cu
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
sinclude $(DEPENDS)

View File

@ -1,110 +0,0 @@
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
SHELL = /bin/sh
# ---------------------------------------------------------------------
# compiler/linker settings
# specify flags and libraries needed for your compiler
CC = icc
CCFLAGS = -O
SHFLAGS = -fPIC
DEPFLAGS = -M
LINK = icc
LINKFLAGS = -O
LIB = -lstdc++
SIZE = size
ARCHIVE = ar
ARFLAGS = -rc
SHLIBFLAGS = -shared
OMP = yes
# ---------------------------------------------------------------------
# LAMMPS-specific settings
# specify settings for LAMMPS features you will use
# if you change any -D setting, do full re-compile after "make clean"
# LAMMPS ifdef settings, OPTIONAL
# see possible settings in doc/Section_start.html#2_2 (step 4)
LMP_INC =
# MPI library, REQUIRED
# see discussion in doc/Section_start.html#2_2 (step 5)
# can point to dummy MPI library in src/STUBS as in Makefile.serial
# INC = path for mpi.h, MPI compiler settings
# PATH = path for MPI library
# LIB = name of MPI library
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
MPI_LIB = -lmpi
# FFT library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 6)
# can be left blank to use provided KISS FFT library
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
# PATH = path for FFT library
# LIB = name of FFT library
FFT_INC =
FFT_PATH =
FFT_LIB =
# JPEG and/or PNG library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 7)
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
# INC = path(s) for jpeglib.h and/or png.h
# PATH = path(s) for JPEG library and/or PNG library
# LIB = name(s) of JPEG library and/or PNG library
JPG_INC =
JPG_PATH =
JPG_LIB = -ljpeg
# ---------------------------------------------------------------------
# build rules and dependencies
# no need to edit this section
include Makefile.package.settings
include Makefile.package
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
# Path to src files
vpath %.cpp ..
vpath %.h ..
# Link target
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
$(SIZE) $(EXE)
# Library targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
shlib: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
$(OBJ) $(EXTRA_LIB) $(LIB)
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
sinclude $(DEPENDS)

View File

@ -1,108 +0,0 @@
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
SHELL = /bin/sh
# ---------------------------------------------------------------------
# compiler/linker settings
# specify flags and libraries needed for your compiler
CC = icc
CCFLAGS = -O3 -openmp -restrict -ansi-alias
SHFLAGS = -fPIC
DEPFLAGS = -M
LINK = icc
LINKFLAGS = -O -openmp
LIB = -lstdc++
SIZE = size
ARCHIVE = ar
ARFLAGS = -rc
SHLIBFLAGS = -shared
# ---------------------------------------------------------------------
# LAMMPS-specific settings
# specify settings for LAMMPS features you will use
# if you change any -D setting, do full re-compile after "make clean"
# LAMMPS ifdef settings, OPTIONAL
# see possible settings in doc/Section_start.html#2_2 (step 4)
LMP_INC =
# MPI library, REQUIRED
# see discussion in doc/Section_start.html#2_2 (step 5)
# can point to dummy MPI library in src/STUBS as in Makefile.serial
# INC = path for mpi.h, MPI compiler settings
# PATH = path for MPI library
# LIB = name of MPI library
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
MPI_LIB = -lmpi
# FFT library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 6)
# can be left blank to use provided KISS FFT library
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
# PATH = path for FFT library
# LIB = name of FFT library
FFT_INC =
FFT_PATH =
FFT_LIB =
# JPEG and/or PNG library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 7)
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
# INC = path(s) for jpeglib.h and/or png.h
# PATH = path(s) for JPEG library and/or PNG library
# LIB = name(s) of JPEG library and/or PNG library
JPG_INC =
JPG_PATH =
JPG_LIB = -ljpeg
# ---------------------------------------------------------------------
# build rules and dependencies
# no need to edit this section
include Makefile.package.settings
include Makefile.package
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
# Path to src files
vpath %.cpp ..
vpath %.h ..
# Link target
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
$(SIZE) $(EXE)
# Library targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
shlib: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
$(OBJ) $(EXTRA_LIB) $(LIB)
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
sinclude $(DEPENDS)

View File

@ -1,108 +0,0 @@
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
SHELL = /bin/sh
# ---------------------------------------------------------------------
# compiler/linker settings
# specify flags and libraries needed for your compiler
CC = icc
CCFLAGS = -O -restrict
SHFLAGS = -fPIC
DEPFLAGS = -M
LINK = icc
LINKFLAGS = -O
LIB = -lstdc++
SIZE = size
ARCHIVE = ar
ARFLAGS = -rc
SHLIBFLAGS = -shared
# ---------------------------------------------------------------------
# LAMMPS-specific settings
# specify settings for LAMMPS features you will use
# if you change any -D setting, do full re-compile after "make clean"
# LAMMPS ifdef settings, OPTIONAL
# see possible settings in doc/Section_start.html#2_2 (step 4)
LMP_INC =
# MPI library, REQUIRED
# see discussion in doc/Section_start.html#2_2 (step 5)
# can point to dummy MPI library in src/STUBS as in Makefile.serial
# INC = path for mpi.h, MPI compiler settings
# PATH = path for MPI library
# LIB = name of MPI library
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
MPI_LIB = -lmpi
# FFT library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 6)
# can be left blank to use provided KISS FFT library
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
# PATH = path for FFT library
# LIB = name of FFT library
FFT_INC =
FFT_PATH =
FFT_LIB =
# JPEG and/or PNG library, OPTIONAL
# see discussion in doc/Section_start.html#2_2 (step 7)
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
# INC = path(s) for jpeglib.h and/or png.h
# PATH = path(s) for JPEG library and/or PNG library
# LIB = name(s) of JPEG library and/or PNG library
JPG_INC =
JPG_PATH =
JPG_LIB = -ljpeg
# ---------------------------------------------------------------------
# build rules and dependencies
# no need to edit this section
include Makefile.package.settings
include Makefile.package
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
# Path to src files
vpath %.cpp ..
vpath %.h ..
# Link target
$(EXE): $(OBJ)
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
$(SIZE) $(EXE)
# Library targets
lib: $(OBJ)
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
shlib: $(OBJ)
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
$(OBJ) $(EXTRA_LIB) $(LIB)
# Compilation rules
%.o:%.cpp
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
%.d:%.cpp
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
# Individual dependencies
DEPENDS = $(OBJ:.o=.d)
sinclude $(DEPENDS)

View File

@ -1,68 +0,0 @@
These are build and input and run scripts used to run the LJ benchmark
in the top-level bench directory using all the various accelerator
packages currently available in LAMMPS. The results of running these
benchmarks on a GPU cluster with Kepler GPUs are shown on the "GPU
(Kepler)" section of the Benchmark page of the LAMMPS WWW site:
lammps.sandia.gov/bench.
The specifics of the benchmark machine are as follows:
It is a small GPU cluster at Sandia National Labs called "shannon". It
has 32 nodes, each with two 8-core Sandy Bridge Xeon CPUs (E5-2670,
2.6GHz, HT deactivated), for a total of 512 cores. Twenty-four of the
nodes have two NVIDIA Kepler GPUs (K20x, 2688 732 MHz cores). LAMMPS
was compiled with the Intel icc compiler, using module
openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37.
------------------------------------------------------------------------
You can, of course, build LAMMPS yourself with any of the accelerator
packages installed for your platform.
The build.py script will build LAMMPS for the various accelerlator
packages using the Makefile.* files in this dir, which you can edit if
necessary for your platform. You must set the "lmpdir" variable at
the top of build.py to the home directory of LAMMPS as installed on
your system. Note that the build.py script hardcodes the arch setting
for the USER-CUDA package, which should be matched to the GPUs on your
system, e.g. sm_35 for Kepler GPUs. For the GPU package, this setting
is in the Makefile.gpu.* files, as is the CUDA_HOME variable which
should point to where NVIDIA Cuda software is installed on your
system.
Once the Makefiles are in place, then typing, for example,
python build.py cpu gpu
will build executables for the CPU (no accelerators), and 3 variants
(double, mixed, single precision) of the GPU package. See the list of
possible targets at the top of the build.py script.
Note that the build.py script will un-install all packages in your
LAMMPS directory, then only install the ones needed for the benchmark.
The Makefile.* files in this dir are copied into lammps/src/MAKE, as a
dummy Makefile.foo, so they will not conflict with makefiles that may
already be there. The build.py script also builds the auxiliary GPU
and USER-CUDA library as needed.
LAMMPS executables that are generated by build.py are copied into this
directory when the script finishes each build.
------------------------------------------------------------------------
The in.* files can be run with any of the accelerator packages,
if you specify the appropriate command-line switches. These
include switches to set the problem size and number of timesteps
to run.
The run*.sh scripts have sample mpirun commands for running the input
scripts on a single node or on multiple nodes for the strong and weak
scaling results shown on the benchmark web page. These scripts are
provided for illustration purposes, to show what command-line
arguments are used with each accelerator package.
Note that we generate these run scripts, either for interactive or
batch submission, via Python scripts which often produces a long list
of runs to exercise a combination of options. To perform a quick
benchmark calculation on your platform, you will typically only want
to run a few commands out of any of the run*.sh scripts.

View File

@ -1,187 +0,0 @@
#!/usr/local/bin/python
# Syntax: build.py target1 target2 ...
# targets:
# cpu, opt, omp,
# gpu/double, gpu/mixed, gpu/single,
# cuda/double, cuda/mixed, cuda/single,
# intel/cpu, intel/phi,
# kokkos/omp, kokkos/phi, kokkos/cuda
# gpu = gpu/double + gpu/mixed + gpu/single
# cuda = cuda/double + cuda/mixed + cuda/single
# intel = intel/cpu + intel/phi
# kokkos = kokkos/omp + kokkos/phi + kokkos/cuda
# all = cpu + opt + omp + gpu + cuda + intel + kokkos
# create exectuables for different packages
# MUST set lmpdir to path of LAMMPS home directory
import sys,commands,os
lmpdir = "~/lammps"
# build LAMMPS
# copy makefile into src/MAKE as Makefile.foo, then remove it
def build_lammps(makefile,pkg):
print "Building LAMMPS with %s and %s packages ..." % (makefile,pkg)
commands.getoutput("cp %s %s/src/MAKE/Makefile.foo" % (makefile,lmpdir))
cwd = os.getcwd()
os.chdir(os.path.expanduser(lmpdir + "/src"))
str = "make clean-foo"
txt = commands.getoutput(str)
str = "make no-all"
txt = commands.getoutput(str)
for package in pkg:
str = "make yes-%s" % package
txt = commands.getoutput(str)
print txt
str = "make -j 16 foo"
txt = commands.getoutput(str)
os.remove("MAKE/Makefile.foo")
os.chdir(cwd)
# build GPU library in LAMMPS
# copy makefile into lib/gpu as Makefile.foo, then remove it
def build_gpu(makefile):
print "Building GPU lib with %s ..." % makefile
commands.getoutput("cp %s %s/lib/gpu/Makefile.foo" % (makefile,lmpdir))
cwd = os.getcwd()
os.chdir(os.path.expanduser(lmpdir + "/lib/gpu"))
str = "make -f Makefile.foo clean"
txt = commands.getoutput(str)
str = "make -j 16 -f Makefile.foo"
txt = commands.getoutput(str)
os.remove("Makefile.foo")
os.chdir(cwd)
# build CUDA library in LAMMPS
# set precision and arch explicitly as options to make in lib/cuda
def build_cuda(precision,arch):
print "Building USER-CUDA lib with %s and arch sm_%d ..." % (precision,arch)
cwd = os.getcwd()
os.chdir(os.path.expanduser(lmpdir + "/lib/cuda"))
str = "make clean"
txt = commands.getoutput(str)
if precision == "double": pflag = 2
elif precision == "mixed": pflag = 4
elif precision == "single": pflag = 1
str = "make -j 16 precision=%d arch=%s" % (pflag,arch)
txt = commands.getoutput(str)
os.chdir(cwd)
# main program
# convert target keywords into target flags
cpu = opt = omp = 0
gpu = gpu_double = gpu_mixed = gpu_single = 0
cuda = cuda_double = cuda_mixed = cuda_single = 0
intel = intel_cpu = intel_phi = 0
kokkos = kokkos_omp = kokkos_phi = kokkos_cuda = 0
targets = sys.argv[1:]
for target in targets:
if target == "cpu": cpu = 1
elif target == "opt": opt = 1
elif target == "omp": omp = 1
elif target == "gpu/double": gpu_double = 1
elif target == "gpu/mixed": gpu_mixed = 1
elif target == "gpu/single": gpu_single = 1
elif target == "gpu": gpu = 1
elif target == "cuda/double": cuda_double = 1
elif target == "cuda/mixed": cuda_mixed = 1
elif target == "cuda/single": cuda_single = 1
elif target == "cuda": cuda = 1
elif target == "intel/cpu": intel_cpu = 1
elif target == "intel/phi": intel_phi = 1
elif target == "intel": intel = 1
elif target == "kokkos/omp": kokkos_omp = 1
elif target == "kokkos/phi": kokkos_phi = 1
elif target == "kokkos/cuda": kokkos_cuda = 1
elif target == "kokkos": kokkos = 1
elif target == "all": cpu = omp = gpu = cuda = intel = kokkos = 1
else: print "Target",target,"is unknown"
if gpu: gpu_double = gpu_mixed = gpu_single = 1
if cuda: cuda_double = cuda_mixed = cuda_single = 1
if intel: intel_cpu = intel_phi = 1
if kokkos: kokkos_omp = kokkos_phi = kokkos_cuda = 1
# CPU
if cpu:
build_lammps(makefile = "Makefile.cpu", pkg = [])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cpu" % lmpdir)
# OPT
if opt:
build_lammps(makefile = "Makefile.opt", pkg = ["opt"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_opt" % lmpdir)
# OMP
if omp:
build_lammps(makefile = "Makefile.omp", pkg = ["user-omp"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_omp" % lmpdir)
# GPU, 3 precisions
if gpu_double:
build_gpu(makefile = "Makefile.gpu.double")
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_double" % lmpdir)
if gpu_mixed:
build_gpu(makefile = "Makefile.gpu.mixed")
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_mixed" % lmpdir)
if gpu_single:
build_gpu(makefile = "Makefile.gpu.single")
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_single" % lmpdir)
# CUDA, 3 precisions
if cuda_double:
build_cuda(precision = "double", arch = 35)
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_double" % lmpdir)
if cuda_mixed:
build_cuda(precision = "mixed", arch = 35)
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_mixed" % lmpdir)
if cuda_single:
build_cuda(precision = "single", arch = 35)
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_single" % lmpdir)
# INTEL, CPU and Phi
if intel_cpu:
build_lammps(makefile = "Makefile.intel.cpu", pkg = ["user-intel"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_intel_cpu" % lmpdir)
if intel_phi:
build_lammps(makefile = "Makefile.intel.phi", pkg = ["user-intel","user-omp"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_intel_phi" % lmpdir)
# KOKKOS, all variants
if kokkos_omp:
build_lammps(makefile = "Makefile.kokkos.omp", pkg = ["kokkos"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_omp" % lmpdir)
if kokkos_phi:
build_lammps(makefile = "Makefile.kokkos.phi", pkg = ["kokkos"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_phi" % lmpdir)
if kokkos_cuda:
build_lammps(makefile = "Makefile.kokkos.cuda", pkg = ["kokkos"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_cuda" % lmpdir)

View File

@ -1,22 +0,0 @@
# 3d Lennard-Jones melt
units lj
atom_style atomic
lattice fcc 0.8442
region box block 0 $x 0 $y 0 $z
create_box 1 box
create_atoms 1 box
mass 1 1.0
velocity all create 1.44 87287 loop geom
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.3 bin
neigh_modify delay 0 every 20 check no
fix 1 all nve
run $t

View File

@ -1,29 +0,0 @@
#!/bin/bash
#SBATCH -N 1 --time=12:00:00
mpirun -np 1 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.1
mpirun -np 2 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.2
mpirun -np 4 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.4
mpirun -np 6 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.6
mpirun -np 8 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.8
mpirun -np 10 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.10
mpirun -np 12 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.12
mpirun -np 14 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.14
mpirun -np 16 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cpu.128K.16

View File

@ -1,20 +0,0 @@
#!/bin/bash
#SBATCH -N 1 --time=12:00:00
mpirun -N 1 lmp_cuda_double -c on -sf cuda -pk cuda 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cuda.double.128K.1
mpirun -N 2 lmp_cuda_double -c on -sf cuda -pk cuda 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cuda.double.128K.2
mpirun -N 1 lmp_cuda_mixed -c on -sf cuda -pk cuda 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cuda.mixed.128K.1
mpirun -N 2 lmp_cuda_mixed -c on -sf cuda -pk cuda 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cuda.mixed.128K.2
mpirun -N 1 lmp_cuda_single -c on -sf cuda -pk cuda 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cuda.single.128K.1
mpirun -N 2 lmp_cuda_single -c on -sf cuda -pk cuda 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.cuda.single.128K.2

View File

@ -1,155 +0,0 @@
#!/bin/bash
#SBATCH -N 1 --time=12:00:00
mpirun -np 1 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.1.1
mpirun -np 2 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.2.1
mpirun -np 2 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.2.2
mpirun -np 4 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.4.1
mpirun -np 4 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.4.2
mpirun -np 6 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.6.1
mpirun -np 6 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.6.2
mpirun -np 8 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.8.1
mpirun -np 8 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.8.2
mpirun -np 10 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.10.1
mpirun -np 10 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.10.2
mpirun -np 12 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.12.1
mpirun -np 12 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.12.2
mpirun -np 14 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.14.1
mpirun -np 14 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.14.2
mpirun -np 16 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.16.1
mpirun -np 16 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.single.128K.16.2
mpirun -np 1 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.1.1
mpirun -np 2 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.2.1
mpirun -np 2 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.2.2
mpirun -np 4 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.4.1
mpirun -np 4 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.4.2
mpirun -np 6 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.6.1
mpirun -np 6 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.6.2
mpirun -np 8 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.8.1
mpirun -np 8 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.8.2
mpirun -np 10 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.10.1
mpirun -np 10 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.10.2
mpirun -np 12 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.12.1
mpirun -np 12 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.12.2
mpirun -np 14 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.14.1
mpirun -np 14 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.14.2
mpirun -np 16 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.16.1
mpirun -np 16 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.16.2
mpirun -np 1 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.1.1
mpirun -np 2 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.2.1
mpirun -np 2 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.2.2
mpirun -np 4 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.4.1
mpirun -np 4 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.4.2
mpirun -np 6 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.6.1
mpirun -np 6 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.6.2
mpirun -np 8 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.8.1
mpirun -np 8 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.8.2
mpirun -np 10 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.10.1
mpirun -np 10 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.10.2
mpirun -np 12 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.12.1
mpirun -np 12 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.12.2
mpirun -np 14 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.14.1
mpirun -np 14 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.14.2
mpirun -np 16 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.16.1
mpirun -np 16 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.gpu.double.128K.16.2

View File

@ -1,83 +0,0 @@
#!/bin/bash
#SBATCH -N 1 --time=12:00:00
mpirun -np 1 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.1
mpirun -np 2 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.2
mpirun -np 4 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.4
mpirun -np 6 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.6
mpirun -np 8 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.8
mpirun -np 10 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.10
mpirun -np 12 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.12
mpirun -np 14 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.14
mpirun -np 16 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.16
mpirun -np 1 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.1
mpirun -np 2 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.2
mpirun -np 4 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.4
mpirun -np 6 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.6
mpirun -np 8 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.8
mpirun -np 10 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.10
mpirun -np 12 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.12
mpirun -np 14 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.14
mpirun -np 16 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.16
mpirun -np 1 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.1
mpirun -np 2 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.2
mpirun -np 4 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.4
mpirun -np 6 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.6
mpirun -np 8 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.8
mpirun -np 10 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.10
mpirun -np 12 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.12
mpirun -np 14 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.14
mpirun -np 16 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.16

View File

@ -1,74 +0,0 @@
#!/bin/bash
#SBATCH -N 1 --time=12:00:00
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 1 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.1
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 2 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.2
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 3 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.3
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 4 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.4
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 5 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.5
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 6 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.6
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 7 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.7
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 8 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.8
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 9 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.9
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 10 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.10
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 11 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.11
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 12 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.12
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 13 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.13
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 14 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.14
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 15 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.15
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 16 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.16
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 1 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.1
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 2 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.2
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 3 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.3
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 4 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.4
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 5 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.5
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 6 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.6
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 7 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.7
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 8 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.8

View File

@ -1,17 +0,0 @@
#!/bin/bash
#SBATCH -N 1 --time=12:00:00
mpirun -np full -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 16 -sf kk -pk kokkos neigh full newton off comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.1.16
mpirun -np full -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 8 -sf kk -pk kokkos neigh full newton off comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.2.8
mpirun -np full -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 4 -sf kk -pk kokkos neigh full newton off comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.4.4
mpirun -np full -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 2 -sf kk -pk kokkos neigh full newton off comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.8.2
mpirun -np half -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 1 -sf kk -pk kokkos neigh half newton on comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.16.1

View File

@ -1,17 +0,0 @@
#!/bin/bash
#SBATCH -N 1 --time=12:00:00
mpirun -np 1 lmp_omp -sf omp -pk omp 16 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.omp.128K.1.16
mpirun -np 2 lmp_omp -sf omp -pk omp 8 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.omp.128K.2.8
mpirun -np 4 lmp_omp -sf omp -pk omp 4 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.omp.128K.4.4
mpirun -np 8 lmp_omp -sf omp -pk omp 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.omp.128K.8.2
mpirun -np 16 lmp_omp -sf omp -pk omp 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.omp.128K.16.1

View File

@ -1,29 +0,0 @@
#!/bin/bash
#SBATCH -N 1 --time=12:00:00
mpirun -np 1 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.1
mpirun -np 2 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.2
mpirun -np 4 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.4
mpirun -np 6 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.6
mpirun -np 8 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.8
mpirun -np 10 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.10
mpirun -np 12 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.12
mpirun -np 14 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.14
mpirun -np 16 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
mv log.lammps log.10Sep14.lj.opt.128K.16

View File

@ -1,20 +0,0 @@
#!/bin/bash
#SBATCH -N 16 --time=12:00:00
mpirun -npernode 16 lmp_cpu -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.cpu.2048K.16.16
mpirun -npernode 16 lmp_omp -sf omp -pk omp 1 -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.omp.2048K.16.1.16
mpirun -npernode 2 lmp_cuda -c on -sf cuda -pk cuda 2 -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.cuda.2048K.2.16
mpirun -npernode 14 lmp_gpu -sf gpu -pk gpu 2 -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.gpu.2048K.2.14.16
mpirun -npernode 2 lmp_kokkos_cuda -k on g 2 t 1 -sf kk -pk kokkos comm device -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.kokkos.cuda.2048K.2.1.16
mpirun -np 256 -bind-to core -map-by core -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 1 -sf kk -pk kokkos comm device -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.kokkos.omp.2048K.16.1.16

View File

@ -1,20 +0,0 @@
#!/bin/bash
#SBATCH -N 16 --time=12:00:00
mpirun -npernode 16 lmp_cpu -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.cpu.512K.16.16
mpirun -npernode 16 lmp_omp -sf omp -pk omp 1 -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.omp.512K.16.1.16
mpirun -npernode 2 lmp_cuda -c on -sf cuda -pk cuda 2 -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.cuda.512K.2.16
mpirun -npernode 14 lmp_gpu -sf gpu -pk gpu 2 -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.gpu.512K.2.14.16
mpirun -npernode 2 lmp_kokkos_cuda -k on g 2 t 1 -sf kk -pk kokkos comm device -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.kokkos.cuda.512K.2.1.16
mpirun -np 256 -bind-to core -map-by core -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 1 -sf kk -pk kokkos comm device -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
mv log.lammps log.28Jun14.lj.kokkos.omp.512K.16.1.16

1
bench/POTENTIALS/CH.rebo Symbolic link
View File

@ -0,0 +1 @@
../../potentials/CH.rebo

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,7 @@ neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style rebo
pair_coeff * * CH.airebo C H
pair_coeff * * CH.rebo C H
velocity all create 300.0 761341

View File

@ -1,87 +0,0 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
# AIREBO polyethelene benchmark
units metal
atom_style atomic
read_data data.airebo
orthogonal box = (-2.1 -2.1 0) to (2.1 2.1 25.579)
1 by 1 by 1 MPI processor grid
reading atoms ...
60 atoms
replicate 17 16 2
orthogonal box = (-2.1 -2.1 0) to (69.3 65.1 51.158)
1 by 1 by 1 MPI processor grid
32640 atoms
Time spent = 0.00154901 secs
neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style airebo 3.0 1 1
pair_coeff * * CH.airebo C H
velocity all create 300.0 761341
fix 1 all nve
timestep 0.0005
thermo 10
run 100
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 10.7
ghost atom cutoff = 10.7
binsize = 5.35, bins = 14 13 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair airebo, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 106.4 | 106.4 | 106.4 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -139299.7 0 -138034.03 7998.7287
10 161.33916 -138711.85 0 -138031.17 33242.273
20 208.59505 -138911.77 0 -138031.73 -3199.2371
30 139.73485 -138617.76 0 -138028.23 10890.529
40 142.15332 -138628.03 0 -138028.3 14614.022
50 114.21945 -138509.87 0 -138027.98 24700.885
60 164.9432 -138725.08 0 -138029.19 35135.722
70 162.14928 -138714.86 0 -138030.77 5666.4609
80 157.17575 -138694.81 0 -138031.7 19838.161
90 196.16354 -138859.65 0 -138032.05 -7942.9718
100 178.30378 -138783.8 0 -138031.55 31012.15
Loop time of 60.9424 on 1 procs for 100 steps with 32640 atoms
Performance: 0.071 ns/day, 338.569 hours/ns, 1.641 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 55.299 | 55.299 | 55.299 | 0.0 | 90.74
Neigh | 5.5777 | 5.5777 | 5.5777 | 0.0 | 9.15
Comm | 0.027658 | 0.027658 | 0.027658 | 0.0 | 0.05
Output | 0.0011463 | 0.0011463 | 0.0011463 | 0.0 | 0.00
Modify | 0.024684 | 0.024684 | 0.024684 | 0.0 | 0.04
Other | | 0.012 | | | 0.02
Nlocal: 32640 ave 32640 max 32640 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 48190 ave 48190 max 48190 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 2.22179e+07 ave 2.22179e+07 max 2.22179e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 22217870
Ave neighs/atom = 680.695
Neighbor list builds = 8
Dangerous builds = 0
Total wall time: 0:01:02

View File

@ -1,87 +0,0 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
# AIREBO polyethelene benchmark
units metal
atom_style atomic
read_data data.airebo
orthogonal box = (-2.1 -2.1 0) to (2.1 2.1 25.579)
1 by 1 by 4 MPI processor grid
reading atoms ...
60 atoms
replicate 17 16 2
orthogonal box = (-2.1 -2.1 0) to (69.3 65.1 51.158)
2 by 2 by 1 MPI processor grid
32640 atoms
Time spent = 0.00070262 secs
neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style airebo 3.0 1 1
pair_coeff * * CH.airebo C H
velocity all create 300.0 761341
fix 1 all nve
timestep 0.0005
thermo 10
run 100
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 10.7
ghost atom cutoff = 10.7
binsize = 5.35, bins = 14 13 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair airebo, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 29.37 | 29.75 | 30.13 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -139299.7 0 -138034.03 7998.7287
10 161.33916 -138711.85 0 -138031.17 33242.273
20 208.59505 -138911.77 0 -138031.73 -3199.2371
30 139.73485 -138617.76 0 -138028.23 10890.529
40 142.15332 -138628.03 0 -138028.3 14614.022
50 114.21945 -138509.87 0 -138027.98 24700.885
60 164.9432 -138725.08 0 -138029.19 35135.722
70 162.14928 -138714.86 0 -138030.77 5666.4609
80 157.17575 -138694.81 0 -138031.7 19838.161
90 196.16354 -138859.65 0 -138032.05 -7942.9718
100 178.30378 -138783.8 0 -138031.55 31012.15
Loop time of 16.768 on 4 procs for 100 steps with 32640 atoms
Performance: 0.258 ns/day, 93.156 hours/ns, 5.964 timesteps/s
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 14.538 | 14.589 | 14.654 | 1.3 | 87.00
Neigh | 1.8853 | 1.8992 | 1.9159 | 0.8 | 11.33
Comm | 0.18073 | 0.25896 | 0.31361 | 10.6 | 1.54
Output | 0.00050807 | 0.0040419 | 0.0077746 | 5.6 | 0.02
Modify | 0.0094635 | 0.0096973 | 0.0099616 | 0.2 | 0.06
Other | | 0.007481 | | | 0.04
Nlocal: 8160 ave 8174 max 8146 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 22614.5 ave 22629 max 22601 min
Histogram: 1 1 0 0 0 0 0 1 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 5.55447e+06 ave 5.56557e+06 max 5.54193e+06 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Total # of neighbors = 22217870
Ave neighs/atom = 680.695
Neighbor list builds = 8
Dangerous builds = 0
Total wall time: 0:00:17

View File

@ -1,93 +0,0 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
# ReaxFF benchmark: simulation of PETN crystal, replicated unit cell
units real
atom_style charge
read_data data.reax
orthogonal box = (0 0 0) to (9.49107 9.49107 6.99123)
1 by 1 by 1 MPI processor grid
reading atoms ...
58 atoms
replicate 7 8 10
orthogonal box = (0 0 0) to (66.4375 75.9285 69.9123)
1 by 1 by 1 MPI processor grid
32480 atoms
Time spent = 0.00162625 secs
velocity all create 300.0 9999
pair_style reax/c NULL
pair_coeff * * ffield.reax C H O N
timestep 0.1
fix 1 all nve
fix 2 all qeq/reax 1 0.0 10.0 1.0e-6 reax/c
thermo 10
thermo_style custom step temp ke pe pxx pyy pzz etotal
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 12
ghost atom cutoff = 12
binsize = 6, bins = 12 13 12
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair reax/c, perpetual
attributes: half, newton off, ghost
pair build: half/bin/newtoff/ghost
stencil: half/ghost/bin/3d/newtoff
bin: standard
(2) fix qeq/reax, perpetual, copy from (1)
attributes: half, newton off, ghost
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 1727 | 1727 | 1727 Mbytes
Step Temp KinEng PotEng Pxx Pyy Pzz TotEng
0 300 29044.119 -3232140.8 22804.879 -29365.593 6302.5637 -3203096.6
10 299.37479 28983.59 -3232075.2 21746.778 -23987.41 7610.2967 -3203091.6
20 295.5855 28616.735 -3231710.1 18178.568 -10871.882 10603.247 -3203093.3
30 289.48845 28026.457 -3231123.2 12146.362 4985.5572 13364.455 -3203096.8
40 282.66404 27365.76 -3230467.5 4284.2794 18132.771 14133.719 -3203101.7
50 274.97005 26620.876 -3229730.4 -3719.11 25519.692 12551.708 -3203109.5
60 266.11301 25763.393 -3228883.8 -9271.4049 27307.216 9753.2509 -3203120.4
70 259.3263 25106.346 -3228237.2 -11150.726 24238.382 6578.5306 -3203130.8
80 260.33956 25204.444 -3228344.2 -9576.6006 16737.65 3454.5747 -3203139.7
90 269.90199 26130.219 -3229275.5 -5906.376 5246.1572 467.31789 -3203145.3
100 280.76717 27182.117 -3230330.6 -1363.8281 -8133.2509 -1689.7711 -3203148.5
Loop time of 437.886 on 1 procs for 100 steps with 32480 atoms
Performance: 0.002 ns/day, 12163.512 hours/ns, 0.228 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 350.29 | 350.29 | 350.29 | 0.0 | 80.00
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.026264 | 0.026264 | 0.026264 | 0.0 | 0.01
Output | 0.0024614 | 0.0024614 | 0.0024614 | 0.0 | 0.00
Modify | 87.55 | 87.55 | 87.55 | 0.0 | 19.99
Other | | 0.01296 | | | 0.00
Nlocal: 32480 ave 32480 max 32480 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 45128 ave 45128 max 45128 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.27781e+07 ave 1.27781e+07 max 1.27781e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 12778082
Ave neighs/atom = 393.414
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:07:24

View File

@ -1,93 +0,0 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
# ReaxFF benchmark: simulation of PETN crystal, replicated unit cell
units real
atom_style charge
read_data data.reax
orthogonal box = (0 0 0) to (9.49107 9.49107 6.99123)
2 by 2 by 1 MPI processor grid
reading atoms ...
58 atoms
replicate 7 8 10
orthogonal box = (0 0 0) to (66.4375 75.9285 69.9123)
1 by 2 by 2 MPI processor grid
32480 atoms
Time spent = 0.000803709 secs
velocity all create 300.0 9999
pair_style reax/c NULL
pair_coeff * * ffield.reax C H O N
timestep 0.1
fix 1 all nve
fix 2 all qeq/reax 1 0.0 10.0 1.0e-6 reax/c
thermo 10
thermo_style custom step temp ke pe pxx pyy pzz etotal
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 12
ghost atom cutoff = 12
binsize = 6, bins = 12 13 12
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair reax/c, perpetual
attributes: half, newton off, ghost
pair build: half/bin/newtoff/ghost
stencil: half/ghost/bin/3d/newtoff
bin: standard
(2) fix qeq/reax, perpetual, copy from (1)
attributes: half, newton off, ghost
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 647 | 647 | 647 Mbytes
Step Temp KinEng PotEng Pxx Pyy Pzz TotEng
0 300 29044.119 -3232140.8 22804.879 -29365.593 6302.5638 -3203096.6
10 299.37479 28983.59 -3232075.2 21746.773 -23987.409 7610.2911 -3203091.6
20 295.58552 28616.737 -3231710.1 18178.576 -10871.874 10603.421 -3203093.3
30 289.48843 28026.455 -3231123.3 12146.158 4985.3436 13364.461 -3203096.8
40 282.66409 27365.764 -3230467.5 4284.5077 18133.151 14133.949 -3203101.7
50 274.97008 26620.879 -3229730.3 -3718.536 25520.328 12552.195 -3203109.5
60 266.11301 25763.393 -3228883.7 -9271.0381 27307.591 9753.5339 -3203120.4
70 259.32631 25106.348 -3228237.1 -11150.314 24238.962 6578.8636 -3203130.8
80 260.33966 25204.453 -3228344.1 -9575.5709 16738.467 3455.2525 -3203139.7
90 269.90213 26130.231 -3229275.5 -5906.0456 5246.2122 467.43473 -3203145.2
100 280.76727 27182.127 -3230330.6 -1363.1733 -8132.8726 -1689.3275 -3203148.4
Loop time of 128.275 on 4 procs for 100 steps with 32480 atoms
Performance: 0.007 ns/day, 3563.196 hours/ns, 0.780 timesteps/s
99.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 92.314 | 97.932 | 103.53 | 44.1 | 76.34
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.039458 | 5.6409 | 11.263 | 184.0 | 4.40
Output | 0.00086117 | 0.0010868 | 0.0016167 | 0.9 | 0.00
Modify | 24.687 | 24.688 | 24.69 | 0.0 | 19.25
Other | | 0.01323 | | | 0.01
Nlocal: 8120 ave 8120 max 8120 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 21992 ave 21992 max 21992 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 3.48274e+06 ave 3.48274e+06 max 3.48274e+06 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 13930976
Ave neighs/atom = 428.909
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:02:10

View File

@ -1,87 +0,0 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
# REBO polyethelene benchmark
units metal
atom_style atomic
read_data data.rebo
orthogonal box = (-2.1 -2.1 0) to (2.1 2.1 25.579)
1 by 1 by 1 MPI processor grid
reading atoms ...
60 atoms
replicate 17 16 2
orthogonal box = (-2.1 -2.1 0) to (69.3 65.1 51.158)
1 by 1 by 1 MPI processor grid
32640 atoms
Time spent = 0.00151849 secs
neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style rebo
pair_coeff * * CH.airebo C H
velocity all create 300.0 761341
fix 1 all nve
timestep 0.0005
thermo 10
run 100
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.5
ghost atom cutoff = 6.5
binsize = 3.25, bins = 22 21 16
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair rebo, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 34.21 | 34.21 | 34.21 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -138442.48 0 -137176.8 2464.5258
10 179.38052 -137930.92 0 -137174.12 15656.95
20 206.87725 -138046.63 0 -137173.83 -24042.005
30 150.80048 -137807.07 0 -137170.86 -16524.069
40 173.25027 -137902 0 -137171.07 -5716.7297
50 151.80552 -137812.01 0 -137171.55 3481.1096
60 199.08762 -138013.46 0 -137173.53 17882.906
70 217.8592 -138093.51 0 -137174.38 -12269.648
80 202.37612 -138029.05 0 -137175.24 -7622.1573
90 194.905 -137996.68 0 -137174.4 -32267.297
100 185.17966 -137954.16 0 -137172.9 -6902.1493
Loop time of 5.17257 on 1 procs for 100 steps with 32640 atoms
Performance: 0.835 ns/day, 28.737 hours/ns, 19.333 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.3427 | 3.3427 | 3.3427 | 0.0 | 64.62
Neigh | 1.7764 | 1.7764 | 1.7764 | 0.0 | 34.34
Comm | 0.017914 | 0.017914 | 0.017914 | 0.0 | 0.35
Output | 0.0011199 | 0.0011199 | 0.0011199 | 0.0 | 0.02
Modify | 0.024357 | 0.024357 | 0.024357 | 0.0 | 0.47
Other | | 0.01004 | | | 0.19
Nlocal: 32640 ave 32640 max 32640 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 26460 ave 26460 max 26460 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 4.90213e+06 ave 4.90213e+06 max 4.90213e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 4902132
Ave neighs/atom = 150.188
Neighbor list builds = 9
Dangerous builds = 0
Total wall time: 0:00:05

View File

@ -1,87 +0,0 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
# REBO polyethelene benchmark
units metal
atom_style atomic
read_data data.rebo
orthogonal box = (-2.1 -2.1 0) to (2.1 2.1 25.579)
1 by 1 by 4 MPI processor grid
reading atoms ...
60 atoms
replicate 17 16 2
orthogonal box = (-2.1 -2.1 0) to (69.3 65.1 51.158)
2 by 2 by 1 MPI processor grid
32640 atoms
Time spent = 0.000838995 secs
neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style rebo
pair_coeff * * CH.airebo C H
velocity all create 300.0 761341
fix 1 all nve
timestep 0.0005
thermo 10
run 100
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.5
ghost atom cutoff = 6.5
binsize = 3.25, bins = 22 21 16
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair rebo, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 11.75 | 11.94 | 12.13 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -138442.48 0 -137176.8 2464.5258
10 179.38052 -137930.92 0 -137174.12 15656.95
20 206.87725 -138046.63 0 -137173.83 -24042.005
30 150.80048 -137807.07 0 -137170.86 -16524.069
40 173.25027 -137902 0 -137171.07 -5716.7297
50 151.80552 -137812.01 0 -137171.55 3481.1096
60 199.08762 -138013.46 0 -137173.53 17882.906
70 217.8592 -138093.51 0 -137174.38 -12269.648
80 202.37612 -138029.05 0 -137175.24 -7622.1573
90 194.905 -137996.68 0 -137174.4 -32267.297
100 185.17966 -137954.16 0 -137172.9 -6902.1493
Loop time of 1.52214 on 4 procs for 100 steps with 32640 atoms
Performance: 2.838 ns/day, 8.456 hours/ns, 65.697 timesteps/s
98.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.88531 | 0.90632 | 0.92546 | 1.6 | 59.54
Neigh | 0.53954 | 0.54258 | 0.54621 | 0.3 | 35.65
Comm | 0.035654 | 0.058364 | 0.079543 | 7.0 | 3.83
Output | 0.00048494 | 0.00065351 | 0.0011017 | 0.0 | 0.04
Modify | 0.0090034 | 0.0090633 | 0.0091114 | 0.0 | 0.60
Other | | 0.005168 | | | 0.34
Nlocal: 8160 ave 8163 max 8157 min
Histogram: 1 1 0 0 0 0 0 0 1 1
Nghost: 11605.8 ave 11615 max 11593 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 0 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 1.22553e+06 ave 1.22734e+06 max 1.22455e+06 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 4902132
Ave neighs/atom = 150.188
Neighbor list builds = 9
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -1,132 +0,0 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
# SPC/E water box benchmark
units real
atom_style full
read_data data.spce
orthogonal box = (0.02645 0.02645 0.02641) to (35.5328 35.5328 35.4736)
1 by 1 by 1 MPI processor grid
reading atoms ...
4500 atoms
scanning bonds ...
2 = max bonds/atom
scanning angles ...
1 = max angles/atom
reading bonds ...
3000 bonds
reading angles ...
1500 angles
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
replicate 2 4 1
orthogonal box = (0.02645 0.02645 0.02641) to (71.0392 142.052 35.4736)
1 by 1 by 1 MPI processor grid
36000 atoms
24000 bonds
12000 angles
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
Time spent = 0.0105426 secs
pair_style lj/cut/coul/long 9.8 9.8
kspace_style pppm 1.0e-4
pair_coeff 1 1 0.15535 3.166
pair_coeff * 2 0.0000 0.0000
bond_style harmonic
angle_style harmonic
dihedral_style none
improper_style none
bond_coeff 1 1000.00 1.000
angle_coeff 1 100.0 109.47
special_bonds lj/coul 0.0 0.0 0.5
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
neighbor 2.0 bin
neigh_modify every 1 delay 10 check yes
fix 1 all shake 0.0001 20 0 b 1 a 1
0 = # of size 2 clusters
0 = # of size 3 clusters
0 = # of size 4 clusters
12000 = # of frozen angles
fix 2 all nvt temp 300.0 300.0 100.0
velocity all create 300 432567 dist uniform
timestep 2.0
thermo_style one
thermo 50
run 100
PPPM initialization ...
using 12-bit tables for long-range coulomb (../kspace.cpp:321)
G vector (1/distance) = 0.268801
grid = 36 64 24
stencil order = 5
estimated absolute RMS force accuracy = 0.0331015
estimated relative force accuracy = 9.96841e-05
using double precision FFTs
3d grid and FFT values/proc = 91977 55296
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 11.8
ghost atom cutoff = 11.8
binsize = 5.9, bins = 13 25 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut/coul/long, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 105.1 | 105.1 | 105.1 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -133281.51 0 -111820.57 516.17807
50 264.98553 -136986.74 0 -118030.61 -440.29256
100 274.45966 -136364.57 0 -116730.69 -128.61949
Loop time of 20.172 on 1 procs for 100 steps with 36000 atoms
Performance: 0.857 ns/day, 28.017 hours/ns, 4.957 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 15.818 | 15.818 | 15.818 | 0.0 | 78.41
Bond | 7.8201e-05 | 7.8201e-05 | 7.8201e-05 | 0.0 | 0.00
Kspace | 1.966 | 1.966 | 1.966 | 0.0 | 9.75
Neigh | 2.0639 | 2.0639 | 2.0639 | 0.0 | 10.23
Comm | 0.043918 | 0.043918 | 0.043918 | 0.0 | 0.22
Output | 0.00025153 | 0.00025153 | 0.00025153 | 0.0 | 0.00
Modify | 0.27056 | 0.27056 | 0.27056 | 0.0 | 1.34
Other | | 0.009522 | | | 0.05
Nlocal: 36000 ave 36000 max 36000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 56963 ave 56963 max 56963 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.24625e+07 ave 1.24625e+07 max 1.24625e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 12462451
Ave neighs/atom = 346.179
Ave special neighs/atom = 2
Neighbor list builds = 9
Dangerous builds = 6
Total wall time: 0:00:20

View File

@ -1,132 +0,0 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
using 1 OpenMP thread(s) per MPI task
# SPC/E water box benchmark
units real
atom_style full
read_data data.spce
orthogonal box = (0.02645 0.02645 0.02641) to (35.5328 35.5328 35.4736)
2 by 2 by 1 MPI processor grid
reading atoms ...
4500 atoms
scanning bonds ...
2 = max bonds/atom
scanning angles ...
1 = max angles/atom
reading bonds ...
3000 bonds
reading angles ...
1500 angles
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
replicate 2 4 1
orthogonal box = (0.02645 0.02645 0.02641) to (71.0392 142.052 35.4736)
1 by 4 by 1 MPI processor grid
36000 atoms
24000 bonds
12000 angles
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
Time spent = 0.00535488 secs
pair_style lj/cut/coul/long 9.8 9.8
kspace_style pppm 1.0e-4
pair_coeff 1 1 0.15535 3.166
pair_coeff * 2 0.0000 0.0000
bond_style harmonic
angle_style harmonic
dihedral_style none
improper_style none
bond_coeff 1 1000.00 1.000
angle_coeff 1 100.0 109.47
special_bonds lj/coul 0.0 0.0 0.5
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
neighbor 2.0 bin
neigh_modify every 1 delay 10 check yes
fix 1 all shake 0.0001 20 0 b 1 a 1
0 = # of size 2 clusters
0 = # of size 3 clusters
0 = # of size 4 clusters
12000 = # of frozen angles
fix 2 all nvt temp 300.0 300.0 100.0
velocity all create 300 432567 dist uniform
timestep 2.0
thermo_style one
thermo 50
run 100
PPPM initialization ...
using 12-bit tables for long-range coulomb (../kspace.cpp:321)
G vector (1/distance) = 0.268801
grid = 36 64 24
stencil order = 5
estimated absolute RMS force accuracy = 0.0331015
estimated relative force accuracy = 9.96841e-05
using double precision FFTs
3d grid and FFT values/proc = 27993 13824
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 11.8
ghost atom cutoff = 11.8
binsize = 5.9, bins = 13 25 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut/coul/long, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 37.74 | 37.74 | 37.74 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -133281.51 0 -111820.57 516.17807
50 264.98553 -136986.74 0 -118030.61 -440.29256
100 274.45966 -136364.57 0 -116730.69 -128.61949
Loop time of 5.43807 on 4 procs for 100 steps with 36000 atoms
Performance: 3.178 ns/day, 7.553 hours/ns, 18.389 timesteps/s
99.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 4.0016 | 4.0968 | 4.1706 | 3.3 | 75.34
Bond | 7.1049e-05 | 8.5771e-05 | 0.00010586 | 0.0 | 0.00
Kspace | 0.56386 | 0.63614 | 0.73036 | 8.3 | 11.70
Neigh | 0.52575 | 0.52587 | 0.52594 | 0.0 | 9.67
Comm | 0.045847 | 0.047308 | 0.048331 | 0.4 | 0.87
Output | 9.1314e-05 | 0.00012183 | 0.00021172 | 0.0 | 0.00
Modify | 0.12561 | 0.1258 | 0.12605 | 0.1 | 2.31
Other | | 0.005944 | | | 0.11
Nlocal: 9000 ave 9002 max 8998 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 24134.2 ave 24184 max 24062 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Neighs: 3.11561e+06 ave 3.11676e+06 max 3.11446e+06 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Total # of neighbors = 12462451
Ave neighs/atom = 346.179
Ave special neighs/atom = 2
Neighbor list builds = 9
Dangerous builds = 6
Total wall time: 0:00:05

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Ni in ADP
@ -7,17 +6,18 @@ units metal
atom_style atomic
lattice fcc 3.52
Lattice spacing in x,y,z = 3.52 3.52 3.52
Lattice spacing in x,y,z = 3.5200000 3.5200000 3.5200000
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (70.400000 70.400000 70.400000)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.00184107 secs
create_atoms CPU = 0.002 seconds
pair_style adp
pair_coeff * * Ni.adp Ni
Reading adp potential file Ni.adp with DATE: 2011-06-20
velocity all create 1600.0 376847 loop geom
@ -41,35 +41,35 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 27.56 | 27.56 | 27.56 Mbytes
Per MPI rank memory allocation (min/avg/max) = 27.57 | 27.57 | 27.57 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1600 -142400 0 -135782.09 20259.105
100 793.05485 -139023.13 0 -135742.9 32175.694
Loop time of 11.9854 on 1 procs for 100 steps with 32000 atoms
Loop time of 11.0841 on 1 procs for 100 steps with 32000 atoms
Performance: 3.604 ns/day, 6.659 hours/ns, 8.344 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 3.897 ns/day, 6.158 hours/ns, 9.022 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 11.447 | 11.447 | 11.447 | 0.0 | 95.51
Neigh | 0.48465 | 0.48465 | 0.48465 | 0.0 | 4.04
Comm | 0.019317 | 0.019317 | 0.019317 | 0.0 | 0.16
Output | 0.00011063 | 0.00011063 | 0.00011063 | 0.0 | 0.00
Modify | 0.025319 | 0.025319 | 0.025319 | 0.0 | 0.21
Other | | 0.009125 | | | 0.08
Pair | 10.597 | 10.597 | 10.597 | 0.0 | 95.60
Neigh | 0.43765 | 0.43765 | 0.43765 | 0.0 | 3.95
Comm | 0.018561 | 0.018561 | 0.018561 | 0.0 | 0.17
Output | 0.0001123 | 0.0001123 | 0.0001123 | 0.0 | 0.00
Modify | 0.023261 | 0.023261 | 0.023261 | 0.0 | 0.21
Other | | 0.00792 | | | 0.07
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 19911 ave 19911 max 19911 min
Nghost: 19911.0 ave 19911 max 19911 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.33704e+06 ave 1.33704e+06 max 1.33704e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 1337035
Ave neighs/atom = 41.7823
Ave neighs/atom = 41.782344
Neighbor list builds = 13
Dangerous builds = 0
Total wall time: 0:00:12
Total wall time: 0:00:11

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Ni in ADP
@ -7,17 +6,18 @@ units metal
atom_style atomic
lattice fcc 3.52
Lattice spacing in x,y,z = 3.52 3.52 3.52
Lattice spacing in x,y,z = 3.5200000 3.5200000 3.5200000
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (70.400000 70.400000 70.400000)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.000586033 secs
create_atoms CPU = 0.001 seconds
pair_style adp
pair_coeff * * Ni.adp Ni
Reading adp potential file Ni.adp with DATE: 2011-06-20
velocity all create 1600.0 376847 loop geom
@ -45,30 +45,30 @@ Per MPI rank memory allocation (min/avg/max) = 12.45 | 12.45 | 12.45 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1600 -142400 0 -135782.09 20259.105
100 793.05485 -139023.13 0 -135742.9 32175.694
Loop time of 3.49752 on 4 procs for 100 steps with 32000 atoms
Loop time of 3.54402 on 4 procs for 100 steps with 32000 atoms
Performance: 12.352 ns/day, 1.943 hours/ns, 28.592 timesteps/s
99.1% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 12.190 ns/day, 1.969 hours/ns, 28.217 timesteps/s
97.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.3203 | 3.3261 | 3.3317 | 0.3 | 95.10
Neigh | 0.12544 | 0.12594 | 0.12634 | 0.1 | 3.60
Comm | 0.024059 | 0.03001 | 0.035574 | 2.9 | 0.86
Output | 4.8161e-05 | 6.8128e-05 | 0.00011802 | 0.0 | 0.00
Modify | 0.010666 | 0.010841 | 0.011109 | 0.2 | 0.31
Other | | 0.00457 | | | 0.13
Pair | 3.2768 | 3.3041 | 3.339 | 1.2 | 93.23
Neigh | 0.11542 | 0.11601 | 0.11661 | 0.1 | 3.27
Comm | 0.068218 | 0.10201 | 0.13103 | 7.0 | 2.88
Output | 4.4823e-05 | 8.0943e-05 | 0.000175 | 0.0 | 0.00
Modify | 0.010904 | 0.011064 | 0.011172 | 0.1 | 0.31
Other | | 0.01075 | | | 0.30
Nlocal: 8000 ave 8044 max 7960 min
Nlocal: 8000.00 ave 8044 max 7960 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Nghost: 9131 ave 9171 max 9087 min
Nghost: 9131.00 ave 9171 max 9087 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Neighs: 334259 ave 336108 max 332347 min
Neighs: 334259.0 ave 336108 max 332347 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Total # of neighbors = 1337035
Ave neighs/atom = 41.7823
Ave neighs/atom = 41.782344
Neighbor list builds = 13
Dangerous builds = 0

View File

@ -0,0 +1,90 @@
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# AIREBO polyethelene benchmark
units metal
atom_style atomic
read_data data.airebo
Reading data file ...
orthogonal box = (-2.1000000 -2.1000000 0.0000000) to (2.1000000 2.1000000 25.579000)
1 by 1 by 1 MPI processor grid
reading atoms ...
60 atoms
read_data CPU = 0.000 seconds
replicate 17 16 2
Replicating atoms ...
orthogonal box = (-2.1000000 -2.1000000 0.0000000) to (69.300000 65.100000 51.158000)
1 by 1 by 1 MPI processor grid
32640 atoms
replicate CPU = 0.002 seconds
neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style airebo 3.0 1 1
pair_coeff * * CH.airebo C H
Reading airebo potential file CH.airebo with DATE: 2011-10-25
velocity all create 300.0 761341
fix 1 all nve
timestep 0.0005
thermo 10
run 100
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 10.7
ghost atom cutoff = 10.7
binsize = 5.35, bins = 14 13 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair airebo, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 106.4 | 106.4 | 106.4 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -139300.72 0 -138035.04 7988.6646
10 161.34683 -138712.9 0 -138032.19 33228.921
20 208.59504 -138912.79 0 -138032.74 -3211.8806
30 139.7513 -138618.85 0 -138029.25 10878.143
40 142.14562 -138629.02 0 -138029.32 14601.302
50 114.23401 -138510.95 0 -138029 24691.124
60 164.92002 -138726 0 -138030.21 35125.541
70 162.15256 -138715.9 0 -138031.79 5658.7946
80 157.16184 -138695.77 0 -138032.72 19824.698
90 196.15907 -138860.65 0 -138033.07 -7950.8463
100 178.31875 -138784.89 0 -138032.57 30997.671
Loop time of 58.0757 on 1 procs for 100 steps with 32640 atoms
Performance: 0.074 ns/day, 322.643 hours/ns, 1.722 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 53.513 | 53.513 | 53.513 | 0.0 | 92.14
Neigh | 4.5013 | 4.5013 | 4.5013 | 0.0 | 7.75
Comm | 0.026609 | 0.026609 | 0.026609 | 0.0 | 0.05
Output | 0.0010192 | 0.0010192 | 0.0010192 | 0.0 | 0.00
Modify | 0.02275 | 0.02275 | 0.02275 | 0.0 | 0.04
Other | | 0.01074 | | | 0.02
Nlocal: 32640.0 ave 32640 max 32640 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 48190.0 ave 48190 max 48190 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 2.22178e+07 ave 2.22178e+07 max 2.22178e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 22217840
Ave neighs/atom = 680.69363
Neighbor list builds = 8
Dangerous builds = 0
Total wall time: 0:00:59

View File

@ -0,0 +1,90 @@
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# AIREBO polyethelene benchmark
units metal
atom_style atomic
read_data data.airebo
Reading data file ...
orthogonal box = (-2.1000000 -2.1000000 0.0000000) to (2.1000000 2.1000000 25.579000)
1 by 1 by 4 MPI processor grid
reading atoms ...
60 atoms
read_data CPU = 0.000 seconds
replicate 17 16 2
Replicating atoms ...
orthogonal box = (-2.1000000 -2.1000000 0.0000000) to (69.300000 65.100000 51.158000)
2 by 2 by 1 MPI processor grid
32640 atoms
replicate CPU = 0.001 seconds
neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style airebo 3.0 1 1
pair_coeff * * CH.airebo C H
Reading airebo potential file CH.airebo with DATE: 2011-10-25
velocity all create 300.0 761341
fix 1 all nve
timestep 0.0005
thermo 10
run 100
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 10.7
ghost atom cutoff = 10.7
binsize = 5.35, bins = 14 13 10
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair airebo, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 29.43 | 29.81 | 30.19 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -139300.72 0 -138035.04 7988.6646
10 161.34683 -138712.9 0 -138032.19 33228.921
20 208.59504 -138912.79 0 -138032.74 -3211.8806
30 139.7513 -138618.85 0 -138029.25 10878.143
40 142.14562 -138629.02 0 -138029.32 14601.302
50 114.23401 -138510.95 0 -138029 24691.124
60 164.92002 -138726 0 -138030.21 35125.541
70 162.15256 -138715.9 0 -138031.79 5658.7946
80 157.16184 -138695.77 0 -138032.72 19824.698
90 196.15907 -138860.65 0 -138033.07 -7950.8463
100 178.31875 -138784.89 0 -138032.57 30997.671
Loop time of 17.206 on 4 procs for 100 steps with 32640 atoms
Performance: 0.251 ns/day, 95.589 hours/ns, 5.812 timesteps/s
97.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 14.655 | 14.727 | 14.848 | 1.9 | 85.59
Neigh | 1.5571 | 1.6135 | 1.6871 | 3.7 | 9.38
Comm | 0.7741 | 0.83422 | 0.90385 | 5.8 | 4.85
Output | 0.00047541 | 0.0027475 | 0.009517 | 7.5 | 0.02
Modify | 0.0091925 | 0.009367 | 0.0096078 | 0.2 | 0.05
Other | | 0.01908 | | | 0.11
Nlocal: 8160.00 ave 8174 max 8146 min
Histogram: 1 0 1 0 0 0 0 1 0 1
Nghost: 22614.5 ave 22629 max 22601 min
Histogram: 1 1 0 0 0 0 0 1 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 5.55446e+06 ave 5.56556e+06 max 5.54192e+06 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Total # of neighbors = 22217840
Ave neighs/atom = 680.69363
Neighbor list builds = 8
Dangerous builds = 0
Total wall time: 0:00:17

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk CdTe via BOP
@ -7,19 +6,18 @@ units metal
atom_style atomic
lattice custom 6.82884 basis 0.0 0.0 0.0 basis 0.25 0.25 0.25 basis 0.0 0.5 0.5 basis 0.25 0.75 0.75 basis 0.5 0.0 0.5 basis 0.75 0.25 0.75 basis 0.5 0.5 0.0 basis 0.75 0.75 0.25
Lattice spacing in x,y,z = 6.82884 6.82884 6.82884
Lattice spacing in x,y,z = 6.8288400 6.8288400 6.8288400
region box block 0 20 0 20 0 10
create_box 2 box
Created orthogonal box = (0 0 0) to (136.577 136.577 68.2884)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (136.57680 136.57680 68.288400)
1 by 1 by 1 MPI processor grid
create_atoms 1 box basis 2 2 basis 4 2 basis 6 2 basis 8 2
Created 32000 atoms
Time spent = 0.00191426 secs
create_atoms CPU = 0.002 seconds
pair_style bop
pair_coeff * * CdTe.bop.table Cd Te
Reading potential file CdTe.bop.table with DATE: 2012-06-25
Reading potential file CdTe.bop.table with DATE: 2012-06-25
Reading bop potential file CdTe.bop.table with DATE: 2012-06-25
mass 1 112.4
mass 2 127.6
@ -51,32 +49,32 @@ Per MPI rank memory allocation (min/avg/max) = 19.39 | 19.39 | 19.39 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1000 -69539.487 0 -65403.292 3473.2595
100 572.16481 -67769.936 0 -65403.35 1838.6993
Loop time of 24.1696 on 1 procs for 100 steps with 32000 atoms
Loop time of 36.0284 on 1 procs for 100 steps with 32000 atoms
Performance: 0.357 ns/day, 67.138 hours/ns, 4.137 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 0.240 ns/day, 100.079 hours/ns, 2.776 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 23.355 | 23.355 | 23.355 | 0.0 | 96.63
Neigh | 0.7545 | 0.7545 | 0.7545 | 0.0 | 3.12
Comm | 0.026978 | 0.026978 | 0.026978 | 0.0 | 0.11
Output | 0.0001111 | 0.0001111 | 0.0001111 | 0.0 | 0.00
Modify | 0.024145 | 0.024145 | 0.024145 | 0.0 | 0.10
Other | | 0.009326 | | | 0.04
Pair | 35.306 | 35.306 | 35.306 | 0.0 | 97.99
Neigh | 0.66375 | 0.66375 | 0.66375 | 0.0 | 1.84
Comm | 0.027954 | 0.027954 | 0.027954 | 0.0 | 0.08
Output | 9.9182e-05 | 9.9182e-05 | 9.9182e-05 | 0.0 | 0.00
Modify | 0.022574 | 0.022574 | 0.022574 | 0.0 | 0.06
Other | | 0.008374 | | | 0.02
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 35071 ave 35071 max 35071 min
Nghost: 35071.0 ave 35071 max 35071 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 141288 ave 141288 max 141288 min
FullNghs: 141288.0 ave 141288 max 141288 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 141288
Ave neighs/atom = 4.41525
Ave neighs/atom = 4.4152500
Neighbor list builds = 14
Dangerous builds = 0
Total wall time: 0:00:24
Total wall time: 0:00:36

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk CdTe via BOP
@ -7,19 +6,18 @@ units metal
atom_style atomic
lattice custom 6.82884 basis 0.0 0.0 0.0 basis 0.25 0.25 0.25 basis 0.0 0.5 0.5 basis 0.25 0.75 0.75 basis 0.5 0.0 0.5 basis 0.75 0.25 0.75 basis 0.5 0.5 0.0 basis 0.75 0.75 0.25
Lattice spacing in x,y,z = 6.82884 6.82884 6.82884
Lattice spacing in x,y,z = 6.8288400 6.8288400 6.8288400
region box block 0 20 0 20 0 10
create_box 2 box
Created orthogonal box = (0 0 0) to (136.577 136.577 68.2884)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (136.57680 136.57680 68.288400)
2 by 2 by 1 MPI processor grid
create_atoms 1 box basis 2 2 basis 4 2 basis 6 2 basis 8 2
Created 32000 atoms
Time spent = 0.000597477 secs
create_atoms CPU = 0.001 seconds
pair_style bop
pair_coeff * * CdTe.bop.table Cd Te
Reading potential file CdTe.bop.table with DATE: 2012-06-25
Reading potential file CdTe.bop.table with DATE: 2012-06-25
Reading bop potential file CdTe.bop.table with DATE: 2012-06-25
mass 1 112.4
mass 2 127.6
@ -47,36 +45,36 @@ Neighbor list info ...
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 8.497 | 8.497 | 8.497 Mbytes
Per MPI rank memory allocation (min/avg/max) = 8.495 | 8.495 | 8.495 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1000 -69539.487 0 -65403.292 3473.2595
100 572.16481 -67769.936 0 -65403.35 1838.6993
Loop time of 6.50033 on 4 procs for 100 steps with 32000 atoms
Loop time of 10.2579 on 4 procs for 100 steps with 32000 atoms
Performance: 1.329 ns/day, 18.056 hours/ns, 15.384 timesteps/s
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 0.842 ns/day, 28.494 hours/ns, 9.749 timesteps/s
97.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 5.7879 | 5.975 | 6.1607 | 5.4 | 91.92
Neigh | 0.27603 | 0.27621 | 0.27647 | 0.0 | 4.25
Comm | 0.049869 | 0.23531 | 0.42241 | 27.2 | 3.62
Output | 4.9829e-05 | 5.9724e-05 | 8.5592e-05 | 0.0 | 0.00
Modify | 0.0089927 | 0.0090921 | 0.0092406 | 0.1 | 0.14
Other | | 0.004665 | | | 0.07
Pair | 9.0899 | 9.3839 | 9.6808 | 6.9 | 91.48
Neigh | 0.24734 | 0.2533 | 0.25828 | 0.8 | 2.47
Comm | 0.30495 | 0.60685 | 0.89832 | 27.5 | 5.92
Output | 4.673e-05 | 7.695e-05 | 0.00016189 | 0.0 | 0.00
Modify | 0.0092409 | 0.00937 | 0.0094445 | 0.1 | 0.09
Other | | 0.004455 | | | 0.04
Nlocal: 8000 ave 8006 max 7994 min
Nlocal: 8000.00 ave 8006 max 7994 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 15171 ave 15177 max 15165 min
Nghost: 15171.0 ave 15177 max 15165 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Neighs: 0 ave 0 max 0 min
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 35322 ave 35412 max 35267 min
FullNghs: 35322.0 ave 35412 max 35267 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Total # of neighbors = 141288
Ave neighs/atom = 4.41525
Ave neighs/atom = 4.4152500
Neighbor list builds = 14
Dangerous builds = 0
Total wall time: 0:00:06
Total wall time: 0:00:10

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# SiO2 for COMB potential
@ -7,10 +6,12 @@ units metal
atom_style charge
read_data data.comb
triclinic box = (0 0 0) to (74.58 74.58 83.064) with tilt (0 0 0)
Reading data file ...
triclinic box = (0.0000000 0.0000000 0.0000000) to (74.580000 74.580000 83.064000) with tilt (0.0000000 0.0000000 0.0000000)
1 by 1 by 1 MPI processor grid
reading atoms ...
32400 atoms
read_data CPU = 0.022 seconds
mass 1 28.0855
group type1 type 1
@ -63,32 +64,32 @@ Step Temp TotEng PotEng E_vdwl E_coul c_q1 c_q2 Press Volume
80 272.98301 -6.803583 -6.8388677 4.6404093 -11.479277 2.8932784 -1.4466392 -9896.1704 462016.62
90 305.77651 -6.8036184 -6.8431419 4.6512736 -11.494415 2.8953109 -1.4476554 -15675.983 462016.62
100 331.58255 -6.8036753 -6.8465344 4.662727 -11.509261 2.897273 -1.4486365 -21675.515 462016.62
Loop time of 517.206 on 1 procs for 100 steps with 32400 atoms
Loop time of 426.185 on 1 procs for 100 steps with 32400 atoms
Performance: 0.003 ns/day, 7183.417 hours/ns, 0.193 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 0.004 ns/day, 5919.239 hours/ns, 0.235 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 114.18 | 114.18 | 114.18 | 0.0 | 22.08
Neigh | 0.47558 | 0.47558 | 0.47558 | 0.0 | 0.09
Comm | 0.030611 | 0.030611 | 0.030611 | 0.0 | 0.01
Output | 0.0024922 | 0.0024922 | 0.0024922 | 0.0 | 0.00
Modify | 402.51 | 402.51 | 402.51 | 0.0 | 77.82
Other | | 0.006137 | | | 0.00
Pair | 87.4 | 87.4 | 87.4 | 0.0 | 20.51
Neigh | 0.3908 | 0.3908 | 0.3908 | 0.0 | 0.09
Comm | 0.029936 | 0.029936 | 0.029936 | 0.0 | 0.01
Output | 0.0024605 | 0.0024605 | 0.0024605 | 0.0 | 0.00
Modify | 338.36 | 338.36 | 338.36 | 0.0 | 79.39
Other | | 0.005751 | | | 0.00
Nlocal: 32400 ave 32400 max 32400 min
Nlocal: 32400.0 ave 32400 max 32400 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 42518 ave 42518 max 42518 min
Nghost: 42518.0 ave 42518 max 42518 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 1.85317e+07 ave 1.85317e+07 max 1.85317e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 18531740
Ave neighs/atom = 571.967
Ave neighs/atom = 571.96728
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:09:18
Total wall time: 0:07:40

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# SiO2 for COMB potential
@ -7,10 +6,12 @@ units metal
atom_style charge
read_data data.comb
triclinic box = (0 0 0) to (74.58 74.58 83.064) with tilt (0 0 0)
Reading data file ...
triclinic box = (0.0000000 0.0000000 0.0000000) to (74.580000 74.580000 83.064000) with tilt (0.0000000 0.0000000 0.0000000)
1 by 2 by 2 MPI processor grid
reading atoms ...
32400 atoms
read_data CPU = 0.031 seconds
mass 1 28.0855
group type1 type 1
@ -50,7 +51,7 @@ Neighbor list info ...
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 32.11 | 32.11 | 32.11 Mbytes
Per MPI rank memory allocation (min/avg/max) = 32.28 | 32.28 | 32.29 Mbytes
Step Temp TotEng PotEng E_vdwl E_coul c_q1 c_q2 Press Volume
0 300 -6.8032038 -6.8419806 4.6274455 -11.469426 2.8875895 -1.4437947 13386.415 462016.62
10 273.21913 -6.8032489 -6.8385642 4.6221303 -11.460695 2.8872353 -1.4436176 13076.442 462016.62
@ -63,32 +64,32 @@ Step Temp TotEng PotEng E_vdwl E_coul c_q1 c_q2 Press Volume
80 272.98301 -6.803583 -6.8388677 4.6404093 -11.479277 2.8932784 -1.4466392 -9896.1704 462016.62
90 305.77651 -6.8036184 -6.8431419 4.6512736 -11.494415 2.8953109 -1.4476554 -15675.983 462016.62
100 331.58255 -6.8036753 -6.8465344 4.662727 -11.509261 2.897273 -1.4486365 -21675.515 462016.62
Loop time of 131.437 on 4 procs for 100 steps with 32400 atoms
Loop time of 116.902 on 4 procs for 100 steps with 32400 atoms
Performance: 0.013 ns/day, 1825.518 hours/ns, 0.761 timesteps/s
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 0.015 ns/day, 1623.637 hours/ns, 0.855 timesteps/s
97.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 28.847 | 28.874 | 28.913 | 0.5 | 21.97
Neigh | 0.10981 | 0.11084 | 0.11145 | 0.2 | 0.08
Comm | 0.28924 | 0.32866 | 0.3556 | 4.5 | 0.25
Output | 0.0010426 | 0.0011656 | 0.0015302 | 0.6 | 0.00
Modify | 102.12 | 102.12 | 102.12 | 0.0 | 77.69
Other | | 0.003455 | | | 0.00
Pair | 22.866 | 23.181 | 23.375 | 4.0 | 19.83
Neigh | 0.093812 | 0.094818 | 0.095301 | 0.2 | 0.08
Comm | 0.94054 | 1.1329 | 1.4505 | 18.1 | 0.97
Output | 0.0011141 | 0.001422 | 0.0023448 | 1.4 | 0.00
Modify | 92.485 | 92.488 | 92.494 | 0.0 | 79.12
Other | | 0.003673 | | | 0.00
Nlocal: 8100 ave 8110 max 8090 min
Nlocal: 8100.00 ave 8110 max 8090 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Nghost: 20725.2 ave 20772 max 20694 min
Histogram: 1 1 0 0 1 0 0 0 0 1
Neighs: 0 ave 0 max 0 min
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 4.63294e+06 ave 4.63866e+06 max 4.62736e+06 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Total # of neighbors = 18531740
Ave neighs/atom = 571.967
Ave neighs/atom = 571.96728
Neighbor list builds = 1
Dangerous builds = 0
Total wall time: 0:02:21
Total wall time: 0:02:06

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# DPD benchmark
@ -8,14 +7,14 @@ atom_style atomic
comm_modify mode single vel yes
lattice fcc 3.0
Lattice spacing in x,y,z = 1.10064 1.10064 1.10064
Lattice spacing in x,y,z = 1.1006424 1.1006424 1.1006424
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (22.0128 22.0128 22.0128)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (22.012848 22.012848 22.012848)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.0018332 secs
create_atoms CPU = 0.002 seconds
mass 1 1.0
velocity all create 1.0 87287 loop geom
@ -46,30 +45,30 @@ Per MPI rank memory allocation (min/avg/max) = 11.32 | 11.32 | 11.32 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 3.6872574 0 5.1872105 28.880274
100 1.0246036 4.5727353 0 6.1095927 23.859969
Loop time of 3.09286 on 1 procs for 100 steps with 32000 atoms
Loop time of 2.63541 on 1 procs for 100 steps with 32000 atoms
Performance: 111741.340 tau/day, 32.333 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 131137.146 tau/day, 37.945 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.5326 | 1.5326 | 1.5326 | 0.0 | 49.55
Neigh | 1.4771 | 1.4771 | 1.4771 | 0.0 | 47.76
Comm | 0.044292 | 0.044292 | 0.044292 | 0.0 | 1.43
Output | 0.00011039 | 0.00011039 | 0.00011039 | 0.0 | 0.00
Modify | 0.022322 | 0.022322 | 0.022322 | 0.0 | 0.72
Other | | 0.01648 | | | 0.53
Pair | 1.1841 | 1.1841 | 1.1841 | 0.0 | 44.93
Neigh | 1.3737 | 1.3737 | 1.3737 | 0.0 | 52.12
Comm | 0.04266 | 0.04266 | 0.04266 | 0.0 | 1.62
Output | 9.5844e-05 | 9.5844e-05 | 9.5844e-05 | 0.0 | 0.00
Modify | 0.020128 | 0.020128 | 0.020128 | 0.0 | 0.76
Other | | 0.01468 | | | 0.56
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 14981 ave 14981 max 14981 min
Nghost: 14981.0 ave 14981 max 14981 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 660587 ave 660587 max 660587 min
Neighs: 660587.0 ave 660587 max 660587 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 660587
Ave neighs/atom = 20.6433
Ave neighs/atom = 20.643344
Neighbor list builds = 50
Dangerous builds = 0
Total wall time: 0:00:03
Total wall time: 0:00:02

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# DPD benchmark
@ -8,14 +7,14 @@ atom_style atomic
comm_modify mode single vel yes
lattice fcc 3.0
Lattice spacing in x,y,z = 1.10064 1.10064 1.10064
Lattice spacing in x,y,z = 1.1006424 1.1006424 1.1006424
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (22.0128 22.0128 22.0128)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (22.012848 22.012848 22.012848)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.000589132 secs
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.0 87287 loop geom
@ -42,34 +41,34 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.874 | 3.874 | 3.874 Mbytes
Per MPI rank memory allocation (min/avg/max) = 3.875 | 3.875 | 3.875 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1 3.6872574 0 5.1872105 28.911346
100 1.0219182 4.5817845 0 6.1146139 23.803115
Loop time of 0.83904 on 4 procs for 100 steps with 32000 atoms
Loop time of 0.882096 on 4 procs for 100 steps with 32000 atoms
Performance: 411899.440 tau/day, 119.184 timesteps/s
99.3% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 391793.935 tau/day, 113.366 timesteps/s
93.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.39605 | 0.40101 | 0.40702 | 0.6 | 47.79
Neigh | 0.38186 | 0.38494 | 0.38738 | 0.4 | 45.88
Comm | 0.032073 | 0.039688 | 0.045953 | 2.9 | 4.73
Output | 4.4823e-05 | 5.4002e-05 | 7.844e-05 | 0.0 | 0.01
Modify | 0.0056572 | 0.0056887 | 0.0057547 | 0.1 | 0.68
Other | | 0.007655 | | | 0.91
Pair | 0.31428 | 0.33654 | 0.37754 | 4.4 | 38.15
Neigh | 0.36308 | 0.3849 | 0.41542 | 3.1 | 43.63
Comm | 0.07276 | 0.14322 | 0.1842 | 11.3 | 16.24
Output | 4.22e-05 | 5.2989e-05 | 8.2493e-05 | 0.0 | 0.01
Modify | 0.0057678 | 0.0060433 | 0.0065472 | 0.4 | 0.69
Other | | 0.01134 | | | 1.29
Nlocal: 8000 ave 8014 max 7986 min
Nlocal: 8000.00 ave 8014 max 7986 min
Histogram: 1 1 0 0 0 0 0 0 1 1
Nghost: 6744 ave 6764 max 6726 min
Nghost: 6744.00 ave 6764 max 6726 min
Histogram: 1 0 0 1 0 1 0 0 0 1
Neighs: 165107 ave 166433 max 163419 min
Neighs: 165107.0 ave 166433 max 163419 min
Histogram: 1 0 1 0 0 0 0 0 0 2
Total # of neighbors = 660428
Ave neighs/atom = 20.6384
Ave neighs/atom = 20.638375
Neighbor list builds = 50
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Cu in EAM
@ -7,17 +6,18 @@ units metal
atom_style atomic
lattice fcc 3.615
Lattice spacing in x,y,z = 3.615 3.615 3.615
Lattice spacing in x,y,z = 3.6150000 3.6150000 3.6150000
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (72.3 72.3 72.3)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (72.300000 72.300000 72.300000)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.00185037 secs
create_atoms CPU = 0.002 seconds
pair_style eam
pair_coeff 1 1 Cu_u3.eam
Reading eam potential file Cu_u3.eam with DATE: 2007-06-11
velocity all create 1600.0 376847 loop geom
@ -45,30 +45,30 @@ Per MPI rank memory allocation (min/avg/max) = 16.83 | 16.83 | 16.83 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1600 -113280 0 -106662.09 18703.573
100 801.832 -109957.3 0 -106640.77 51322.821
Loop time of 3.92295 on 1 procs for 100 steps with 32000 atoms
Loop time of 3.70399 on 1 procs for 100 steps with 32000 atoms
Performance: 11.012 ns/day, 2.179 hours/ns, 25.491 timesteps/s
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 11.663 ns/day, 2.058 hours/ns, 26.998 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.3913 | 3.3913 | 3.3913 | 0.0 | 86.45
Neigh | 0.48107 | 0.48107 | 0.48107 | 0.0 | 12.26
Comm | 0.01729 | 0.01729 | 0.01729 | 0.0 | 0.44
Output | 0.00011253 | 0.00011253 | 0.00011253 | 0.0 | 0.00
Modify | 0.024349 | 0.024349 | 0.024349 | 0.0 | 0.62
Other | | 0.008847 | | | 0.23
Pair | 3.2216 | 3.2216 | 3.2216 | 0.0 | 86.98
Neigh | 0.43766 | 0.43766 | 0.43766 | 0.0 | 11.82
Comm | 0.015404 | 0.015404 | 0.015404 | 0.0 | 0.42
Output | 0.000103 | 0.000103 | 0.000103 | 0.0 | 0.00
Modify | 0.021604 | 0.021604 | 0.021604 | 0.0 | 0.58
Other | | 0.007627 | | | 0.21
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 19909 ave 19909 max 19909 min
Nghost: 19909.0 ave 19909 max 19909 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.20778e+06 ave 1.20778e+06 max 1.20778e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 1207784
Ave neighs/atom = 37.7433
Ave neighs/atom = 37.743250
Neighbor list builds = 13
Dangerous builds = 0
Total wall time: 0:00:03

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Cu in EAM
@ -7,17 +6,18 @@ units metal
atom_style atomic
lattice fcc 3.615
Lattice spacing in x,y,z = 3.615 3.615 3.615
Lattice spacing in x,y,z = 3.6150000 3.6150000 3.6150000
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (72.3 72.3 72.3)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (72.300000 72.300000 72.300000)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.000595331 secs
create_atoms CPU = 0.001 seconds
pair_style eam
pair_coeff 1 1 Cu_u3.eam
Reading eam potential file Cu_u3.eam with DATE: 2007-06-11
velocity all create 1600.0 376847 loop geom
@ -41,34 +41,34 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.381 | 7.381 | 7.381 Mbytes
Per MPI rank memory allocation (min/avg/max) = 7.382 | 7.382 | 7.382 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1600 -113280 0 -106662.09 18703.573
100 801.832 -109957.3 0 -106640.77 51322.821
Loop time of 1.04497 on 4 procs for 100 steps with 32000 atoms
Loop time of 1.01466 on 4 procs for 100 steps with 32000 atoms
Performance: 41.341 ns/day, 0.581 hours/ns, 95.697 timesteps/s
99.4% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 42.576 ns/day, 0.564 hours/ns, 98.555 timesteps/s
99.3% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.88513 | 0.88724 | 0.89191 | 0.3 | 84.91
Neigh | 0.12418 | 0.12458 | 0.12511 | 0.1 | 11.92
Comm | 0.015654 | 0.020543 | 0.022984 | 2.0 | 1.97
Output | 4.8637e-05 | 5.8711e-05 | 8.6546e-05 | 0.0 | 0.01
Modify | 0.0085199 | 0.0085896 | 0.0086446 | 0.1 | 0.82
Other | | 0.003959 | | | 0.38
Pair | 0.86683 | 0.86797 | 0.86877 | 0.1 | 85.54
Neigh | 0.11567 | 0.11681 | 0.11992 | 0.5 | 11.51
Comm | 0.015399 | 0.017346 | 0.018526 | 0.9 | 1.71
Output | 4.6253e-05 | 8.1241e-05 | 0.00017262 | 0.0 | 0.01
Modify | 0.0085337 | 0.0085824 | 0.0086181 | 0.0 | 0.85
Other | | 0.003876 | | | 0.38
Nlocal: 8000 ave 8008 max 7993 min
Nlocal: 8000.00 ave 8008 max 7993 min
Histogram: 2 0 0 0 0 0 0 0 1 1
Nghost: 9130.25 ave 9138 max 9122 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Neighs: 301946 ave 302392 max 301360 min
Neighs: 301946.0 ave 302392 max 301360 min
Histogram: 1 0 0 0 1 0 0 0 1 1
Total # of neighbors = 1207784
Ave neighs/atom = 37.7433
Ave neighs/atom = 37.743250
Neighbor list builds = 13
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# eFF benchmark of H plasma
@ -7,12 +6,14 @@ units electron
atom_style electron
read_data data.eff
orthogonal box = (0 0 0) to (41.9118 41.9118 41.9118)
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (41.911791 41.911791 41.911791)
1 by 1 by 1 MPI processor grid
reading atoms ...
32000 atoms
reading velocities ...
32000 velocities
read_data CPU = 0.040 seconds
pair_style eff/cut 12
pair_coeff * *
@ -42,7 +43,7 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 370.9 | 370.9 | 370.9 Mbytes
Per MPI rank memory allocation (min/avg/max) = 371.5 | 371.5 | 371.5 Mbytes
Step TotEng PotEng KinEng Temp Press
0 4046.5854 796.63785 3249.9475 42763.133 4.4764483e+12
5 4046.5854 796.95799 3249.6274 42758.92 4.4728546e+12
@ -65,33 +66,33 @@ Step TotEng PotEng KinEng Temp Press
90 4046.5857 864.14162 3182.4441 41874.916 4.3868277e+12
95 4046.5857 871.30234 3175.2834 41780.695 4.3805068e+12
100 4046.5858 878.76023 3167.8255 41682.563 4.3740731e+12
Loop time of 323.031 on 1 procs for 100 steps with 32000 atoms
Loop time of 344.943 on 1 procs for 100 steps with 32000 atoms
Performance: 26.747 fs/day, 0.897 hours/fs, 0.310 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 25.048 fs/day, 0.958 hours/fs, 0.290 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 322.78 | 322.78 | 322.78 | 0.0 | 99.92
Pair | 344.71 | 344.71 | 344.71 | 0.0 | 99.93
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.1876 | 0.1876 | 0.1876 | 0.0 | 0.06
Output | 0.0027025 | 0.0027025 | 0.0027025 | 0.0 | 0.00
Modify | 0.032475 | 0.032475 | 0.032475 | 0.0 | 0.01
Other | | 0.02538 | | | 0.01
Comm | 0.1763 | 0.1763 | 0.1763 | 0.0 | 0.05
Output | 0.0024362 | 0.0024362 | 0.0024362 | 0.0 | 0.00
Modify | 0.030869 | 0.030869 | 0.030869 | 0.0 | 0.01
Other | | 0.02272 | | | 0.01
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 114349 ave 114349 max 114349 min
Nghost: 114349.0 ave 114349 max 114349 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 8.10572e+07 ave 8.10572e+07 max 8.10572e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 81057159
Ave neighs/atom = 2533.04
Ave neighs/atom = 2533.0362
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:05:27
Total wall time: 0:05:49

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# eFF benchmark of H plasma
@ -7,12 +6,14 @@ units electron
atom_style electron
read_data data.eff
orthogonal box = (0 0 0) to (41.9118 41.9118 41.9118)
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (41.911791 41.911791 41.911791)
1 by 2 by 2 MPI processor grid
reading atoms ...
32000 atoms
reading velocities ...
32000 velocities
read_data CPU = 0.064 seconds
pair_style eff/cut 12
pair_coeff * *
@ -42,7 +43,7 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 101.4 | 104.8 | 108.3 Mbytes
Per MPI rank memory allocation (min/avg/max) = 102.1 | 105.5 | 109.0 Mbytes
Step TotEng PotEng KinEng Temp Press
0 4046.5854 796.63785 3249.9475 42763.133 4.4764483e+12
5 4046.5854 796.95799 3249.6274 42758.92 4.4728546e+12
@ -65,33 +66,33 @@ Step TotEng PotEng KinEng Temp Press
90 4046.5857 864.14162 3182.4441 41874.916 4.3868277e+12
95 4046.5857 871.30234 3175.2834 41780.695 4.3805068e+12
100 4046.5858 878.76023 3167.8255 41682.563 4.3740731e+12
Loop time of 90.1636 on 4 procs for 100 steps with 32000 atoms
Loop time of 100.431 on 4 procs for 100 steps with 32000 atoms
Performance: 95.826 fs/day, 0.250 hours/fs, 1.109 timesteps/s
99.1% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 86.029 fs/day, 0.279 hours/fs, 0.996 timesteps/s
95.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 83.772 | 86.516 | 89.593 | 29.5 | 95.95
Pair | 89.149 | 93.787 | 97.971 | 41.9 | 93.38
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.51677 | 3.5934 | 6.3368 | 144.6 | 3.99
Output | 0.0012872 | 0.0018208 | 0.0024981 | 1.0 | 0.00
Modify | 0.017231 | 0.018405 | 0.01983 | 0.8 | 0.02
Other | | 0.03431 | | | 0.04
Comm | 2.4073 | 6.5821 | 11.21 | 157.8 | 6.55
Output | 0.0014203 | 0.0094504 | 0.019111 | 8.3 | 0.01
Modify | 0.016678 | 0.016999 | 0.017425 | 0.2 | 0.02
Other | | 0.03524 | | | 0.04
Nlocal: 8000 ave 8112 max 7875 min
Nlocal: 8000.00 ave 8112 max 7875 min
Histogram: 1 1 0 0 0 0 0 0 0 2
Nghost: 65589 ave 66004 max 65177 min
Nghost: 65589.0 ave 66004 max 65177 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Neighs: 2.02643e+07 ave 2.11126e+07 max 1.94058e+07 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Total # of neighbors = 81057159
Ave neighs/atom = 2533.04
Ave neighs/atom = 2533.0362
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:01:31
Total wall time: 0:01:42

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# EIM benchmark
# if run long enough (e.g. 1M steps), the unstable CsCl form of a NaCl single
@ -9,12 +8,14 @@ units metal
atom_style atomic
read_data data.eim
orthogonal box = (-0.5 -0.5 -0.5) to (71.58 143.66 71.58)
Reading data file ...
orthogonal box = (-0.5 -0.5 -0.5) to (71.580002 143.66000 71.580002)
1 by 1 by 1 MPI processor grid
reading atoms ...
32000 atoms
reading velocities ...
32000 velocities
read_data CPU = 0.024 seconds
pair_style eim
pair_coeff * * Na Cl ffield.eim Na Cl
@ -44,34 +45,34 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 17.72 | 17.72 | 17.72 Mbytes
Per MPI rank memory allocation (min/avg/max) = 17.74 | 17.74 | 17.74 Mbytes
Step PotEng Pxx Pyy Pzz Temp
0 -90567.58 -117883.6 -118039.81 -117894.07 1400
100 -91997.012 -4104.7052 -4138.276 -4145.8936 944.10136
Loop time of 11.4536 on 1 procs for 100 steps with 32000 atoms
100 -91997.39 -4127.237 -4160.9799 -4169.0581 944.09785
Loop time of 10.3731 on 1 procs for 100 steps with 32000 atoms
Performance: 0.377 ns/day, 63.631 hours/ns, 8.731 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 0.416 ns/day, 57.628 hours/ns, 9.640 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 9.8277 | 9.8277 | 9.8277 | 0.0 | 85.80
Neigh | 1.484 | 1.484 | 1.484 | 0.0 | 12.96
Comm | 0.028584 | 0.028584 | 0.028584 | 0.0 | 0.25
Output | 0.00023127 | 0.00023127 | 0.00023127 | 0.0 | 0.00
Modify | 0.09791 | 0.09791 | 0.09791 | 0.0 | 0.85
Other | | 0.0152 | | | 0.13
Pair | 8.8937 | 8.8937 | 8.8937 | 0.0 | 85.74
Neigh | 1.344 | 1.344 | 1.344 | 0.0 | 12.96
Comm | 0.028207 | 0.028207 | 0.028207 | 0.0 | 0.27
Output | 0.00020099 | 0.00020099 | 0.00020099 | 0.0 | 0.00
Modify | 0.093584 | 0.093584 | 0.093584 | 0.0 | 0.90
Other | | 0.0134 | | | 0.13
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 21505 ave 21505 max 21505 min
Nghost: 21505.0 ave 21505 max 21505 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.5839e+06 ave 1.5839e+06 max 1.5839e+06 min
Neighs: 1.58387e+06 ave 1.58387e+06 max 1.58387e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 1583901
Ave neighs/atom = 49.4969
Total # of neighbors = 1583871
Ave neighs/atom = 49.495969
Neighbor list builds = 37
Dangerous builds = 12
Total wall time: 0:00:11
Total wall time: 0:00:10

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# EIM benchmark
# if run long enough (e.g. 1M steps), the unstable CsCl form of a NaCl single
@ -9,12 +8,14 @@ units metal
atom_style atomic
read_data data.eim
orthogonal box = (-0.5 -0.5 -0.5) to (71.58 143.66 71.58)
Reading data file ...
orthogonal box = (-0.5 -0.5 -0.5) to (71.580002 143.66000 71.580002)
1 by 4 by 1 MPI processor grid
reading atoms ...
32000 atoms
reading velocities ...
32000 velocities
read_data CPU = 0.023 seconds
pair_style eim
pair_coeff * * Na Cl ffield.eim Na Cl
@ -44,34 +45,34 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 7.285 | 7.285 | 7.285 Mbytes
Per MPI rank memory allocation (min/avg/max) = 7.460 | 7.460 | 7.460 Mbytes
Step PotEng Pxx Pyy Pzz Temp
0 -90567.58 -117883.6 -118039.81 -117894.07 1400
100 -91997.012 -4104.7052 -4138.276 -4145.8936 944.10136
Loop time of 3.12061 on 4 procs for 100 steps with 32000 atoms
100 -91997.39 -4127.237 -4160.9799 -4169.0581 944.09785
Loop time of 3.14457 on 4 procs for 100 steps with 32000 atoms
Performance: 1.384 ns/day, 17.337 hours/ns, 32.045 timesteps/s
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 1.374 ns/day, 17.470 hours/ns, 31.801 timesteps/s
95.8% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.6504 | 2.6583 | 2.6685 | 0.5 | 85.18
Neigh | 0.36996 | 0.37847 | 0.39396 | 1.5 | 12.13
Comm | 0.037041 | 0.040586 | 0.04504 | 1.4 | 1.30
Output | 7.081e-05 | 8.75e-05 | 0.00012994 | 0.0 | 0.00
Modify | 0.029286 | 0.035978 | 0.047942 | 3.9 | 1.15
Other | | 0.007206 | | | 0.23
Pair | 2.6017 | 2.6264 | 2.6758 | 1.8 | 83.52
Neigh | 0.34384 | 0.35308 | 0.36784 | 1.6 | 11.23
Comm | 0.039635 | 0.099661 | 0.15326 | 15.0 | 3.17
Output | 6.485e-05 | 9.656e-05 | 0.0001905 | 0.0 | 0.00
Modify | 0.035666 | 0.055446 | 0.098401 | 10.6 | 1.76
Other | | 0.009939 | | | 0.32
Nlocal: 8000 ave 8000 max 8000 min
Nlocal: 8000.00 ave 8000 max 8000 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 9460.25 ave 9469 max 9449 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Neighs: 395975 ave 397239 max 394616 min
Neighs: 395968.0 ave 397233 max 394606 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Total # of neighbors = 1583901
Ave neighs/atom = 49.4969
Total # of neighbors = 1583871
Ave neighs/atom = 49.495969
Neighbor list builds = 37
Dangerous builds = 12
Total wall time: 0:00:03

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# FENE beadspring benchmark
@ -8,7 +7,8 @@ atom_style bond
special_bonds fene
read_data data.fene
orthogonal box = (-16.796 -16.796 -16.796) to (16.796 16.796 16.796)
Reading data file ...
orthogonal box = (-16.796000 -16.796000 -16.796000) to (16.796000 16.796000 16.796000)
1 by 1 by 1 MPI processor grid
reading atoms ...
32000 atoms
@ -18,8 +18,13 @@ read_data data.fene
1 = max bonds/atom
reading bonds ...
31680 bonds
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 1.0 1.0
special bond factors coul: 0.0 1.0 1.0
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 0.003 seconds
read_data CPU = 0.054 seconds
neighbor 0.4 bin
neigh_modify delay 5 every 1
@ -49,36 +54,37 @@ Neighbor list info ...
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 13.18 | 13.18 | 13.18 Mbytes
WARNING: Communication cutoff 1.52 is shorter than a bond length based estimate of 1.855. This may lead to errors. (src/comm.cpp:667)
Per MPI rank memory allocation (min/avg/max) = 13.20 | 13.20 | 13.20 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.97029772 0.44484087 20.494523 22.394765 4.6721833
100 0.9729966 0.4361122 20.507698 22.40326 4.6548819
Loop time of 0.66285 on 1 procs for 100 steps with 32000 atoms
Loop time of 0.648089 on 1 procs for 100 steps with 32000 atoms
Performance: 156415.445 tau/day, 150.864 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 159978.044 tau/day, 154.300 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.13075 | 0.13075 | 0.13075 | 0.0 | 19.73
Bond | 0.046363 | 0.046363 | 0.046363 | 0.0 | 6.99
Neigh | 0.3172 | 0.3172 | 0.3172 | 0.0 | 47.85
Comm | 0.016553 | 0.016553 | 0.016553 | 0.0 | 2.50
Output | 0.00010395 | 0.00010395 | 0.00010395 | 0.0 | 0.02
Modify | 0.14515 | 0.14515 | 0.14515 | 0.0 | 21.90
Other | | 0.006728 | | | 1.02
Pair | 0.12174 | 0.12174 | 0.12174 | 0.0 | 18.78
Bond | 0.050688 | 0.050688 | 0.050688 | 0.0 | 7.82
Neigh | 0.33136 | 0.33136 | 0.33136 | 0.0 | 51.13
Comm | 0.014753 | 0.014753 | 0.014753 | 0.0 | 2.28
Output | 9.8467e-05 | 9.8467e-05 | 9.8467e-05 | 0.0 | 0.02
Modify | 0.12378 | 0.12378 | 0.12378 | 0.0 | 19.10
Other | | 0.005668 | | | 0.87
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 9493 ave 9493 max 9493 min
Nghost: 9493.00 ave 9493 max 9493 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 155873 ave 155873 max 155873 min
Neighs: 155873.0 ave 155873 max 155873 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 155873
Ave neighs/atom = 4.87103
Ave special neighs/atom = 1.98
Ave neighs/atom = 4.8710312
Ave special neighs/atom = 1.9800000
Neighbor list builds = 20
Dangerous builds = 20
Total wall time: 0:00:00

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# FENE beadspring benchmark
@ -8,7 +7,8 @@ atom_style bond
special_bonds fene
read_data data.fene
orthogonal box = (-16.796 -16.796 -16.796) to (16.796 16.796 16.796)
Reading data file ...
orthogonal box = (-16.796000 -16.796000 -16.796000) to (16.796000 16.796000 16.796000)
1 by 2 by 2 MPI processor grid
reading atoms ...
32000 atoms
@ -18,8 +18,13 @@ read_data data.fene
1 = max bonds/atom
reading bonds ...
31680 bonds
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 1.0 1.0
special bond factors coul: 0.0 1.0 1.0
2 = max # of 1-2 neighbors
2 = max # of special neighbors
special bonds CPU = 0.001 seconds
read_data CPU = 0.048 seconds
neighbor 0.4 bin
neigh_modify delay 5 every 1
@ -49,36 +54,37 @@ Neighbor list info ...
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 4.605 | 4.605 | 4.606 Mbytes
WARNING: Communication cutoff 1.52 is shorter than a bond length based estimate of 1.855. This may lead to errors. (src/comm.cpp:667)
Per MPI rank memory allocation (min/avg/max) = 4.779 | 4.780 | 4.780 Mbytes
Step Temp E_pair E_mol TotEng Press
0 0.97029772 0.44484087 20.494523 22.394765 4.6721833
100 0.9736748 0.44378481 20.502389 22.40664 4.7809557
Loop time of 0.184782 on 4 procs for 100 steps with 32000 atoms
Loop time of 0.179123 on 4 procs for 100 steps with 32000 atoms
Performance: 561093.346 tau/day, 541.178 timesteps/s
98.4% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 578819.228 tau/day, 558.275 timesteps/s
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.033747 | 0.034391 | 0.035036 | 0.3 | 18.61
Bond | 0.012475 | 0.012579 | 0.012812 | 0.1 | 6.81
Neigh | 0.083916 | 0.083953 | 0.084022 | 0.0 | 45.43
Comm | 0.012409 | 0.01363 | 0.014534 | 0.7 | 7.38
Output | 4.1246e-05 | 5.9545e-05 | 0.00010443 | 0.0 | 0.03
Modify | 0.036675 | 0.037876 | 0.038357 | 0.4 | 20.50
Other | | 0.002294 | | | 1.24
Pair | 0.031898 | 0.032311 | 0.032864 | 0.2 | 18.04
Bond | 0.01335 | 0.013471 | 0.013588 | 0.1 | 7.52
Neigh | 0.087105 | 0.087195 | 0.087282 | 0.0 | 48.68
Comm | 0.010541 | 0.011533 | 0.012463 | 0.7 | 6.44
Output | 3.8624e-05 | 5.6028e-05 | 0.00010157 | 0.0 | 0.03
Modify | 0.031766 | 0.03233 | 0.033015 | 0.3 | 18.05
Other | | 0.002227 | | | 1.24
Nlocal: 8000 ave 8023 max 7978 min
Nlocal: 8000.00 ave 8023 max 7978 min
Histogram: 1 0 0 0 1 1 0 0 0 1
Nghost: 4158.75 ave 4175 max 4145 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 38940 ave 39184 max 38640 min
Neighs: 38940.0 ave 39184 max 38640 min
Histogram: 1 0 0 0 0 1 1 0 0 1
Total # of neighbors = 155760
Ave neighs/atom = 4.8675
Ave special neighs/atom = 1.98
Ave neighs/atom = 4.8675000
Ave special neighs/atom = 1.9800000
Neighbor list builds = 20
Dangerous builds = 20
Total wall time: 0:00:00

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# Gay-Berne benchmark
# biaxial ellipsoid mesogens in isotropic phase
@ -18,13 +17,15 @@ atom_style ellipsoid
#set group all quat/random 982381
read_data data.gb
orthogonal box = (2.19575 2.19575 2.19575) to (50.8124 50.8124 50.8124)
Reading data file ...
orthogonal box = (2.1957493 2.1957493 2.1957493) to (50.812373 50.812373 50.812373)
1 by 1 by 1 MPI processor grid
reading atoms ...
32768 atoms
reading velocities ...
32768 velocities
32768 ellipsoids
read_data CPU = 0.097 seconds
compute rot all temp/asphere
group spheroid type 1
@ -63,41 +64,41 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 28.91 | 28.91 | 28.91 Mbytes
Per MPI rank memory allocation (min/avg/max) = 28.98 | 28.98 | 28.98 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 2.4 0.50438568 0 4.1042758 6.7818168 114909.09
20 2.7357818 0.26045557 0 4.364003 6.8299368 111715.16
40 2.9201296 0.22570735 0 4.605768 7.0767907 109473.23
60 2.9820039 0.19733812 0 4.6702075 7.1507065 108393.77
80 3.0148529 0.15114819 0 4.6732895 7.1699502 107672.24
100 3.0206703 0.10567623 0 4.6365433 7.154345 107184.83
Loop time of 43.7894 on 1 procs for 100 steps with 32768 atoms
20 2.7357797 0.26044978 0 4.363994 6.8299173 111715.2
40 2.9201268 0.2257049 0 4.6057615 7.0767796 109473.26
60 2.9820022 0.19733756 0 4.6702044 7.1507023 108393.79
80 3.014852 0.15114765 0 4.6732876 7.1699472 107672.25
100 3.0206698 0.105676 0 4.6365424 7.1543436 107184.84
Loop time of 57.1053 on 1 procs for 100 steps with 32768 atoms
Performance: 394.616 tau/day, 2.284 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 302.599 tau/day, 1.751 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 42.881 | 42.881 | 42.881 | 0.0 | 97.93
Neigh | 0.35071 | 0.35071 | 0.35071 | 0.0 | 0.80
Comm | 0.065153 | 0.065153 | 0.065153 | 0.0 | 0.15
Output | 0.00054383 | 0.00054383 | 0.00054383 | 0.0 | 0.00
Modify | 0.47852 | 0.47852 | 0.47852 | 0.0 | 1.09
Other | | 0.01337 | | | 0.03
Pair | 56.246 | 56.246 | 56.246 | 0.0 | 98.50
Neigh | 0.31058 | 0.31058 | 0.31058 | 0.0 | 0.54
Comm | 0.066039 | 0.066039 | 0.066039 | 0.0 | 0.12
Output | 0.00048757 | 0.00048757 | 0.00048757 | 0.0 | 0.00
Modify | 0.46972 | 0.46972 | 0.46972 | 0.0 | 0.82
Other | | 0.01198 | | | 0.02
Nlocal: 32768 ave 32768 max 32768 min
Nlocal: 32768.0 ave 32768 max 32768 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 25669 ave 25669 max 25669 min
Nghost: 25669.0 ave 25669 max 25669 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 2.30433e+06 ave 2.30433e+06 max 2.30433e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 2304332
Ave neighs/atom = 70.3226
Total # of neighbors = 2304331
Ave neighs/atom = 70.322601
Neighbor list builds = 6
Dangerous builds = 3
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:44
Total wall time: 0:00:57

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# Gay-Berne benchmark
# biaxial ellipsoid mesogens in isotropic phase
@ -18,13 +17,15 @@ atom_style ellipsoid
#set group all quat/random 982381
read_data data.gb
orthogonal box = (2.19575 2.19575 2.19575) to (50.8124 50.8124 50.8124)
Reading data file ...
orthogonal box = (2.1957493 2.1957493 2.1957493) to (50.812373 50.812373 50.812373)
1 by 2 by 2 MPI processor grid
reading atoms ...
32768 atoms
reading velocities ...
32768 velocities
32768 ellipsoids
read_data CPU = 0.079 seconds
compute rot all temp/asphere
group spheroid type 1
@ -63,41 +64,41 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 11.78 | 11.78 | 11.78 Mbytes
Per MPI rank memory allocation (min/avg/max) = 11.99 | 11.99 | 12.00 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 2.4 0.50438568 0 4.1042758 6.7818168 114909.09
20 2.7357818 0.26045557 0 4.364003 6.8299368 111715.16
40 2.9201296 0.22570735 0 4.605768 7.0767907 109473.23
60 2.9820039 0.19733812 0 4.6702075 7.1507065 108393.77
80 3.0148529 0.15114819 0 4.6732895 7.1699502 107672.24
100 3.0206703 0.10567623 0 4.6365433 7.154345 107184.83
Loop time of 11.3124 on 4 procs for 100 steps with 32768 atoms
20 2.7357797 0.26044978 0 4.363994 6.8299173 111715.2
40 2.9201268 0.2257049 0 4.6057615 7.0767796 109473.26
60 2.9820022 0.19733756 0 4.6702044 7.1507023 108393.79
80 3.014852 0.15114765 0 4.6732876 7.1699472 107672.25
100 3.0206698 0.105676 0 4.6365424 7.1543436 107184.84
Loop time of 14.9338 on 4 procs for 100 steps with 32768 atoms
Performance: 1527.522 tau/day, 8.840 timesteps/s
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 1157.109 tau/day, 6.696 timesteps/s
99.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 10.778 | 10.849 | 10.934 | 2.0 | 95.90
Neigh | 0.088265 | 0.08871 | 0.089238 | 0.1 | 0.78
Comm | 0.1384 | 0.22518 | 0.29662 | 14.1 | 1.99
Output | 0.00020599 | 0.00024837 | 0.00036836 | 0.0 | 0.00
Modify | 0.13828 | 0.13899 | 0.13984 | 0.2 | 1.23
Other | | 0.01053 | | | 0.09
Pair | 14.317 | 14.457 | 14.545 | 2.5 | 96.81
Neigh | 0.080048 | 0.080928 | 0.082009 | 0.3 | 0.54
Comm | 0.15948 | 0.24734 | 0.38914 | 18.9 | 1.66
Output | 0.00018859 | 0.00034791 | 0.00082254 | 0.0 | 0.00
Modify | 0.137 | 0.13804 | 0.13981 | 0.3 | 0.92
Other | | 0.01041 | | | 0.07
Nlocal: 8192 ave 8215 max 8166 min
Nlocal: 8192.00 ave 8215 max 8166 min
Histogram: 1 1 0 0 0 0 0 0 0 2
Nghost: 11972.5 ave 11984 max 11959 min
Histogram: 1 0 0 0 1 0 1 0 0 1
Neighs: 576083 ave 579616 max 572161 min
Neighs: 576083.0 ave 579616 max 572161 min
Histogram: 1 1 0 0 0 0 0 0 0 2
Total # of neighbors = 2304332
Ave neighs/atom = 70.3226
Total # of neighbors = 2304331
Ave neighs/atom = 70.322601
Neighbor list builds = 6
Dangerous builds = 3
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:11
Total wall time: 0:00:15

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# granular chute flow
@ -10,12 +9,14 @@ newton off
comm_modify vel yes
read_data data.granular
orthogonal box = (0 0 0) to (40 20 37.2886)
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (40.000000 20.000000 37.288600)
1 by 1 by 1 MPI processor grid
reading atoms ...
32000 atoms
reading velocities ...
32000 velocities
read_data CPU = 0.050 seconds
pair_style gran/hooke/history 200000.0 NULL 50.0 NULL 0.5 0
pair_coeff * *
@ -52,34 +53,34 @@ Neighbor list info ...
pair build: half/size/bin/newtoff
stencil: half/bin/3d/newtoff
bin: standard
Per MPI rank memory allocation (min/avg/max) = 23.36 | 23.36 | 23.36 Mbytes
Per MPI rank memory allocation (min/avg/max) = 23.37 | 23.37 | 23.37 Mbytes
Step Atoms KinEng c_1 Volume
0 32000 784139.13 1601.1263 29833.783
100 32000 784292.08 1571.0968 29834.707
Loop time of 0.292816 on 1 procs for 100 steps with 32000 atoms
Loop time of 0.274779 on 1 procs for 100 steps with 32000 atoms
Performance: 2950.657 tau/day, 341.511 timesteps/s
99.3% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 3144.341 tau/day, 363.928 timesteps/s
99.4% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.17449 | 0.17449 | 0.17449 | 0.0 | 59.59
Neigh | 0.031927 | 0.031927 | 0.031927 | 0.0 | 10.90
Comm | 0.010195 | 0.010195 | 0.010195 | 0.0 | 3.48
Output | 0.00019121 | 0.00019121 | 0.00019121 | 0.0 | 0.07
Modify | 0.064463 | 0.064463 | 0.064463 | 0.0 | 22.01
Other | | 0.01155 | | | 3.94
Pair | 0.16956 | 0.16956 | 0.16956 | 0.0 | 61.71
Neigh | 0.027646 | 0.027646 | 0.027646 | 0.0 | 10.06
Comm | 0.010068 | 0.010068 | 0.010068 | 0.0 | 3.66
Output | 0.00017285 | 0.00017285 | 0.00017285 | 0.0 | 0.06
Modify | 0.056372 | 0.056372 | 0.056372 | 0.0 | 20.52
Other | | 0.01096 | | | 3.99
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 5463 ave 5463 max 5463 min
Nghost: 5463.00 ave 5463 max 5463 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 115133 ave 115133 max 115133 min
Neighs: 115133.0 ave 115133 max 115133 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 115133
Ave neighs/atom = 3.59791
Ave neighs/atom = 3.5979062
Neighbor list builds = 2
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# granular chute flow
@ -10,12 +9,14 @@ newton off
comm_modify vel yes
read_data data.granular
orthogonal box = (0 0 0) to (40 20 37.2886)
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (40.000000 20.000000 37.288600)
2 by 1 by 2 MPI processor grid
reading atoms ...
32000 atoms
reading velocities ...
32000 velocities
read_data CPU = 0.052 seconds
pair_style gran/hooke/history 200000.0 NULL 50.0 NULL 0.5 0
pair_coeff * *
@ -52,34 +53,34 @@ Neighbor list info ...
pair build: half/size/bin/newtoff
stencil: half/bin/3d/newtoff
bin: standard
Per MPI rank memory allocation (min/avg/max) = 10.41 | 10.42 | 10.42 Mbytes
Per MPI rank memory allocation (min/avg/max) = 10.59 | 10.59 | 10.60 Mbytes
Step Atoms KinEng c_1 Volume
0 32000 784139.13 1601.1263 29833.783
100 32000 784292.08 1571.0968 29834.707
Loop time of 0.0903978 on 4 procs for 100 steps with 32000 atoms
Loop time of 0.0952788 on 4 procs for 100 steps with 32000 atoms
Performance: 9557.751 tau/day, 1106.221 timesteps/s
98.3% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 9068.124 tau/day, 1049.551 timesteps/s
95.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.046331 | 0.049088 | 0.052195 | 1.2 | 54.30
Neigh | 0.0090401 | 0.0091327 | 0.0091863 | 0.1 | 10.10
Comm | 0.0073855 | 0.0080023 | 0.0086699 | 0.6 | 8.85
Output | 7.1049e-05 | 0.00010067 | 0.00012088 | 0.0 | 0.11
Modify | 0.017226 | 0.017449 | 0.01803 | 0.3 | 19.30
Other | | 0.006625 | | | 7.33
Pair | 0.044316 | 0.047274 | 0.049681 | 1.0 | 49.62
Neigh | 0.0079038 | 0.0079354 | 0.0079608 | 0.0 | 8.33
Comm | 0.0082569 | 0.0089372 | 0.0094819 | 0.5 | 9.38
Output | 6.9857e-05 | 9.3222e-05 | 0.00010514 | 0.0 | 0.10
Modify | 0.015689 | 0.016034 | 0.016789 | 0.4 | 16.83
Other | | 0.015 | | | 15.75
Nlocal: 8000 ave 8008 max 7992 min
Nlocal: 8000.00 ave 8008 max 7992 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 2439 ave 2450 max 2428 min
Nghost: 2439.00 ave 2450 max 2428 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Neighs: 29500.5 ave 30488 max 28513 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Total # of neighbors = 118002
Ave neighs/atom = 3.68756
Ave neighs/atom = 3.6875625
Neighbor list builds = 2
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# 3d Lennard-Jones melt
@ -7,14 +6,14 @@ units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (33.591924 33.591924 33.591924)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.00183916 secs
create_atoms CPU = 0.002 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
@ -44,30 +43,30 @@ Per MPI rank memory allocation (min/avg/max) = 15.82 | 15.82 | 15.82 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6134356 -5.0197073
100 0.75745998 -5.7584998 0 -4.6223453 0.20729996
Loop time of 1.721 on 1 procs for 100 steps with 32000 atoms
Loop time of 1.59245 on 1 procs for 100 steps with 32000 atoms
Performance: 25101.720 tau/day, 58.106 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 27127.959 tau/day, 62.796 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.2551 | 1.2551 | 1.2551 | 0.0 | 72.93
Neigh | 0.41825 | 0.41825 | 0.41825 | 0.0 | 24.30
Comm | 0.015347 | 0.015347 | 0.015347 | 0.0 | 0.89
Output | 0.00010729 | 0.00010729 | 0.00010729 | 0.0 | 0.01
Modify | 0.023436 | 0.023436 | 0.023436 | 0.0 | 1.36
Other | | 0.008766 | | | 0.51
Pair | 1.1654 | 1.1654 | 1.1654 | 0.0 | 73.18
Neigh | 0.38321 | 0.38321 | 0.38321 | 0.0 | 24.06
Comm | 0.014476 | 0.014476 | 0.014476 | 0.0 | 0.91
Output | 9.5844e-05 | 9.5844e-05 | 9.5844e-05 | 0.0 | 0.01
Modify | 0.021453 | 0.021453 | 0.021453 | 0.0 | 1.35
Other | | 0.007799 | | | 0.49
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 19669 ave 19669 max 19669 min
Nghost: 19669.0 ave 19669 max 19669 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.20318e+06 ave 1.20318e+06 max 1.20318e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 1203176
Ave neighs/atom = 37.5992
Ave neighs/atom = 37.599250
Neighbor list builds = 11
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# 3d Lennard-Jones melt
@ -7,14 +6,14 @@ units lj
atom_style atomic
lattice fcc 0.8442
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
Lattice spacing in x,y,z = 1.6795962 1.6795962 1.6795962
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (33.591924 33.591924 33.591924)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.000587225 secs
create_atoms CPU = 0.001 seconds
mass 1 1.0
velocity all create 1.44 87287 loop geom
@ -40,34 +39,34 @@ Neighbor list info ...
pair build: half/bin/atomonly/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 6.88 | 6.88 | 6.88 Mbytes
Per MPI rank memory allocation (min/avg/max) = 6.881 | 6.881 | 6.881 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1.44 -6.7733681 0 -4.6134356 -5.0197073
100 0.75745998 -5.7584998 0 -4.6223453 0.20729996
Loop time of 0.469936 on 4 procs for 100 steps with 32000 atoms
Loop time of 0.452443 on 4 procs for 100 steps with 32000 atoms
Performance: 91927.316 tau/day, 212.795 timesteps/s
99.1% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 95481.741 tau/day, 221.023 timesteps/s
98.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.32713 | 0.32917 | 0.33317 | 0.4 | 70.05
Neigh | 0.10836 | 0.10931 | 0.11007 | 0.2 | 23.26
Comm | 0.015526 | 0.020355 | 0.022399 | 2.0 | 4.33
Output | 4.2439e-05 | 5.8353e-05 | 0.00010061 | 0.0 | 0.01
Modify | 0.0071156 | 0.0072448 | 0.007309 | 0.1 | 1.54
Other | | 0.003793 | | | 0.81
Pair | 0.31149 | 0.3132 | 0.31493 | 0.2 | 69.22
Neigh | 0.1006 | 0.10164 | 0.10385 | 0.4 | 22.47
Comm | 0.02195 | 0.025904 | 0.028603 | 1.6 | 5.73
Output | 4.3631e-05 | 7.534e-05 | 0.00015879 | 0.0 | 0.02
Modify | 0.0067751 | 0.0073788 | 0.0088398 | 1.0 | 1.63
Other | | 0.004243 | | | 0.94
Nlocal: 8000 ave 8041 max 7958 min
Nlocal: 8000.00 ave 8041 max 7958 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 9011 ave 9065 max 8961 min
Nghost: 9011.00 ave 9065 max 8961 min
Histogram: 1 1 0 0 0 0 0 1 0 1
Neighs: 300794 ave 304843 max 297317 min
Neighs: 300794.0 ave 304843 max 297317 min
Histogram: 1 0 0 1 1 0 0 0 0 1
Total # of neighbors = 1203176
Ave neighs/atom = 37.5992
Ave neighs/atom = 37.599250
Neighbor list builds = 11
Dangerous builds = 0
Total wall time: 0:00:00

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Ni in MEAM
@ -7,17 +6,19 @@ units metal
atom_style atomic
lattice fcc 3.52
Lattice spacing in x,y,z = 3.52 3.52 3.52
Lattice spacing in x,y,z = 3.5200000 3.5200000 3.5200000
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (70.400000 70.400000 70.400000)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.00184226 secs
create_atoms CPU = 0.002 seconds
pair_style meam/c
pair_coeff * * library.meam Ni4 Ni.meam Ni4
Reading potential file library.meam with DATE: 2012-06-29
Reading potential file Ni.meam with DATE: 2007-06-11
velocity all create 1600.0 376847 loop geom
@ -47,37 +48,37 @@ Neighbor list info ...
pair build: halffull/newton
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 55.91 | 55.91 | 55.91 Mbytes
Per MPI rank memory allocation (min/avg/max) = 55.92 | 55.92 | 55.92 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1600 -142400 0 -135782.09 20259.18
50 885.10702 -139411.51 0 -135750.54 32425.431
100 895.50973 -139454.3 0 -135750.3 31804.185
Loop time of 22.9343 on 1 procs for 100 steps with 32000 atoms
Loop time of 21.655 on 1 procs for 100 steps with 32000 atoms
Performance: 1.884 ns/day, 12.741 hours/ns, 4.360 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 1.995 ns/day, 12.031 hours/ns, 4.618 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 22.397 | 22.397 | 22.397 | 0.0 | 97.66
Neigh | 0.48781 | 0.48781 | 0.48781 | 0.0 | 2.13
Comm | 0.013967 | 0.013967 | 0.013967 | 0.0 | 0.06
Output | 0.00022793 | 0.00022793 | 0.00022793 | 0.0 | 0.00
Modify | 0.025412 | 0.025412 | 0.025412 | 0.0 | 0.11
Other | | 0.009448 | | | 0.04
Pair | 21.181 | 21.181 | 21.181 | 0.0 | 97.81
Neigh | 0.42787 | 0.42787 | 0.42787 | 0.0 | 1.98
Comm | 0.013557 | 0.013557 | 0.013557 | 0.0 | 0.06
Output | 0.00020766 | 0.00020766 | 0.00020766 | 0.0 | 0.00
Modify | 0.023456 | 0.023456 | 0.023456 | 0.0 | 0.11
Other | | 0.008504 | | | 0.04
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 13576 ave 13576 max 13576 min
Nghost: 13576.0 ave 13576 max 13576 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 780360 ave 780360 max 780360 min
Neighs: 780360.0 ave 780360 max 780360 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 1.56072e+06 ave 1.56072e+06 max 1.56072e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 1560720
Ave neighs/atom = 48.7725
Ave neighs/atom = 48.772500
Neighbor list builds = 8
Dangerous builds = 0
Total wall time: 0:00:23
Total wall time: 0:00:21

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Ni in MEAM
@ -7,17 +6,19 @@ units metal
atom_style atomic
lattice fcc 3.52
Lattice spacing in x,y,z = 3.52 3.52 3.52
Lattice spacing in x,y,z = 3.5200000 3.5200000 3.5200000
region box block 0 20 0 20 0 20
create_box 1 box
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (70.400000 70.400000 70.400000)
1 by 2 by 2 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.00058651 secs
create_atoms CPU = 0.001 seconds
pair_style meam/c
pair_coeff * * library.meam Ni4 Ni.meam Ni4
Reading potential file library.meam with DATE: 2012-06-29
Reading potential file Ni.meam with DATE: 2007-06-11
velocity all create 1600.0 376847 loop geom
@ -47,37 +48,37 @@ Neighbor list info ...
pair build: halffull/newton
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 17.41 | 17.41 | 17.41 Mbytes
Per MPI rank memory allocation (min/avg/max) = 17.42 | 17.42 | 17.42 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1600 -142400 0 -135782.09 20259.18
50 885.10702 -139411.51 0 -135750.54 32425.431
100 895.50973 -139454.3 0 -135750.3 31804.185
Loop time of 6.45947 on 4 procs for 100 steps with 32000 atoms
Loop time of 6.34746 on 4 procs for 100 steps with 32000 atoms
Performance: 6.688 ns/day, 3.589 hours/ns, 15.481 timesteps/s
Performance: 6.806 ns/day, 3.526 hours/ns, 15.754 timesteps/s
98.0% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 6.22 | 6.2385 | 6.265 | 0.7 | 96.58
Neigh | 0.12657 | 0.12691 | 0.12721 | 0.1 | 1.96
Comm | 0.052339 | 0.07915 | 0.097897 | 5.9 | 1.23
Output | 9.7752e-05 | 0.0001151 | 0.00016594 | 0.0 | 0.00
Modify | 0.010194 | 0.010291 | 0.010442 | 0.1 | 0.16
Other | | 0.004529 | | | 0.07
Pair | 6.0585 | 6.1109 | 6.1535 | 1.4 | 96.27
Neigh | 0.11286 | 0.11651 | 0.12455 | 1.4 | 1.84
Comm | 0.058046 | 0.099641 | 0.15569 | 11.7 | 1.57
Output | 9.0122e-05 | 0.00016046 | 0.0003624 | 0.0 | 0.00
Modify | 0.010822 | 0.011674 | 0.014224 | 1.4 | 0.18
Other | | 0.008601 | | | 0.14
Nlocal: 8000 ave 8045 max 7947 min
Nlocal: 8000.00 ave 8045 max 7947 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Nghost: 6066.75 ave 6120 max 6021 min
Histogram: 1 0 1 0 0 0 1 0 0 1
Neighs: 195090 ave 196403 max 193697 min
Neighs: 195090.0 ave 196403 max 193697 min
Histogram: 1 0 0 1 0 0 0 1 0 1
FullNghs: 390180 ave 392616 max 387490 min
FullNghs: 390180.0 ave 392616 max 387490 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Total # of neighbors = 1560720
Ave neighs/atom = 48.7725
Ave neighs/atom = 48.772500
Neighbor list builds = 8
Dangerous builds = 0
Total wall time: 0:00:06

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# Crack growth in notched 3D Peridynamic block
@ -58,11 +57,11 @@ region plate block 0 0.01975 0 0.01575 ${myzmin} ${myzmax} units bo
region plate block 0 0.01975 0 0.01575 0 ${myzmax} units box
region plate block 0 0.01975 0 0.01575 0 0.01225 units box
create_box 3 plate
Created orthogonal box = (0 0 0) to (0.01975 0.01575 0.01225)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (0.01975 0.01575 0.01225)
1 by 1 by 1 MPI processor grid
create_atoms 1 region plate
Created 32000 atoms
Time spent = 0.00362897 secs
create_atoms CPU = 0.004 seconds
pair_style peri/pmb
@ -101,8 +100,10 @@ region topright block 0.009875 0.01975 0.01075 0.01575 ${myzmin} ${myzmax}
region topright block 0.009875 0.01975 0.01075 0.01575 0 ${myzmax} units box
region topright block 0.009875 0.01975 0.01075 0.01575 0 0.01225 units box
set region topleft type 2
Setting atom values ...
5000 settings made for type
set region topright type 3
Setting atom values ...
5000 settings made for type
pair_coeff 1 1 ${myc} ${mydelta} ${mys0} 0.0
pair_coeff 1 1 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
@ -129,11 +130,13 @@ pair_coeff 1 3 5.43248872420337e+22 0.001515 ${mys0} 0.0
pair_coeff 1 3 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
set group all density ${mydensity}
set group all density 2440
Setting atom values ...
32000 settings made for density
variable myvolume equal ($h)^3
variable myvolume equal (0.0005)^3
set group all volume ${myvolume}
set group all volume 1.25e-10
Setting atom values ...
32000 settings made for volume
velocity all set 0.0 0.0 0.0 sum no units box
@ -184,23 +187,23 @@ Step Temp E_pair E_mol TotEng Press Volume
60 9.8975313e+26 5.7284448e+08 0 1.2287455e+09 1.2048543e+14 3.6292128e-06
80 9.3888573e+26 4.0928092e+08 0 1.0314725e+09 1.1429321e+14 3.6292128e-06
100 8.3930314e+26 3.8522361e+08 0 9.4142265e+08 1.0217075e+14 3.6292128e-06
Loop time of 11.0398 on 1 procs for 100 steps with 32000 atoms
Loop time of 10.1036 on 1 procs for 100 steps with 32000 atoms
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 11.005 | 11.005 | 11.005 | 0.0 | 99.68
Pair | 10.07 | 10.07 | 10.07 | 0.0 | 99.67
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 6.6042e-05 | 6.6042e-05 | 6.6042e-05 | 0.0 | 0.00
Output | 0.00057292 | 0.00057292 | 0.00057292 | 0.0 | 0.01
Modify | 0.0256 | 0.0256 | 0.0256 | 0.0 | 0.23
Other | | 0.008592 | | | 0.08
Comm | 6.6519e-05 | 6.6519e-05 | 6.6519e-05 | 0.0 | 0.00
Output | 0.00051737 | 0.00051737 | 0.00051737 | 0.0 | 0.01
Modify | 0.024288 | 0.024288 | 0.024288 | 0.0 | 0.24
Other | | 0.008486 | | | 0.08
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 0 ave 0 max 0 min
Nghost: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 6.74442e+06 ave 6.74442e+06 max 6.74442e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
@ -208,10 +211,10 @@ FullNghs: 1.34888e+07 ave 1.34888e+07 max 1.34888e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 13488836
Ave neighs/atom = 421.526
Ave neighs/atom = 421.52612
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:11
Total wall time: 0:00:10

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# Crack growth in notched 3D Peridynamic block
@ -58,11 +57,11 @@ region plate block 0 0.01975 0 0.01575 ${myzmin} ${myzmax} units bo
region plate block 0 0.01975 0 0.01575 0 ${myzmax} units box
region plate block 0 0.01975 0 0.01575 0 0.01225 units box
create_box 3 plate
Created orthogonal box = (0 0 0) to (0.01975 0.01575 0.01225)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (0.01975 0.01575 0.01225)
2 by 2 by 1 MPI processor grid
create_atoms 1 region plate
Created 32000 atoms
Time spent = 0.0011344 secs
create_atoms CPU = 0.001 seconds
pair_style peri/pmb
@ -101,8 +100,10 @@ region topright block 0.009875 0.01975 0.01075 0.01575 ${myzmin} ${myzmax}
region topright block 0.009875 0.01975 0.01075 0.01575 0 ${myzmax} units box
region topright block 0.009875 0.01975 0.01075 0.01575 0 0.01225 units box
set region topleft type 2
Setting atom values ...
5000 settings made for type
set region topright type 3
Setting atom values ...
5000 settings made for type
pair_coeff 1 1 ${myc} ${mydelta} ${mys0} 0.0
pair_coeff 1 1 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
@ -129,11 +130,13 @@ pair_coeff 1 3 5.43248872420337e+22 0.001515 ${mys0} 0.0
pair_coeff 1 3 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
set group all density ${mydensity}
set group all density 2440
Setting atom values ...
32000 settings made for density
variable myvolume equal ($h)^3
variable myvolume equal (0.0005)^3
set group all volume ${myvolume}
set group all volume 1.25e-10
Setting atom values ...
32000 settings made for volume
velocity all set 0.0 0.0 0.0 sum no units box
@ -176,7 +179,7 @@ Neighbor list info ...
Peridynamic bonds:
total # of bonds = 3457032
bonds/atom = 108.032
Per MPI rank memory allocation (min/avg/max) = 47.63 | 48.11 | 48.78 Mbytes
Per MPI rank memory allocation (min/avg/max) = 47.70 | 48.18 | 48.85 Mbytes
Step Temp E_pair E_mol TotEng Press Volume
0 2.0134233e+27 0 0 1.3342785e+09 2.4509971e+14 3.6292128e-06
20 1.7695805e+27 1.6163291e+08 0 1.3343188e+09 2.1541601e+14 3.6292128e-06
@ -184,34 +187,34 @@ Step Temp E_pair E_mol TotEng Press Volume
60 9.8975313e+26 5.7284448e+08 0 1.2287455e+09 1.2048543e+14 3.6292128e-06
80 9.3888573e+26 4.0928092e+08 0 1.0314725e+09 1.1429321e+14 3.6292128e-06
100 8.3930314e+26 3.8522361e+08 0 9.4142265e+08 1.0217075e+14 3.6292128e-06
Loop time of 2.8928 on 4 procs for 100 steps with 32000 atoms
Loop time of 2.82804 on 4 procs for 100 steps with 32000 atoms
99.0% CPU use with 4 MPI tasks x 1 OpenMP threads
97.9% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.7472 | 2.7951 | 2.8585 | 2.9 | 96.62
Pair | 2.6021 | 2.6599 | 2.7081 | 2.4 | 94.05
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.019592 | 0.083156 | 0.13278 | 17.0 | 2.87
Output | 0.00022125 | 0.00034326 | 0.00058961 | 0.0 | 0.01
Modify | 0.0083542 | 0.0089623 | 0.0095983 | 0.5 | 0.31
Other | | 0.005276 | | | 0.18
Comm | 0.10341 | 0.15313 | 0.21057 | 10.3 | 5.41
Output | 0.00020409 | 0.00041658 | 0.00093699 | 0.0 | 0.01
Modify | 0.008944 | 0.0092288 | 0.0095088 | 0.3 | 0.33
Other | | 0.005395 | | | 0.19
Nlocal: 8000 ave 8000 max 8000 min
Nlocal: 8000.00 ave 8000 max 8000 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 5125 ave 5125 max 5125 min
Nghost: 5125.00 ave 5125 max 5125 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 1.6861e+06 ave 1.77502e+06 max 1.60625e+06 min
Neighs: 1.68610e+06 ave 1.77502e+06 max 1.60625e+06 min
Histogram: 2 0 0 0 0 0 0 0 1 1
FullNghs: 3.37221e+06 ave 3.41832e+06 max 3.3261e+06 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Total # of neighbors = 13488836
Ave neighs/atom = 421.526
Ave neighs/atom = 421.52612
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:00:03
Total wall time: 0:00:02

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# Rhodopsin model
@ -17,7 +16,8 @@ pair_modify mix arithmetic
kspace_style pppm 1e-4
read_data data.protein
orthogonal box = (-27.5 -38.5 -36.3646) to (27.5 38.5 36.3615)
Reading data file ...
orthogonal box = (-27.500000 -38.500000 -36.364600) to (27.500000 38.500000 36.361500)
1 by 1 by 1 MPI processor grid
reading atoms ...
32000 atoms
@ -39,16 +39,22 @@ read_data data.protein
56829 dihedrals
reading impropers ...
1034 impropers
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 0.0 0.0
special bond factors coul: 0.0 0.0 0.0
4 = max # of 1-2 neighbors
12 = max # of 1-3 neighbors
24 = max # of 1-4 neighbors
26 = max # of special neighbors
special bonds CPU = 0.011 seconds
read_data CPU = 0.125 seconds
fix 1 all shake 0.0001 5 0 m 1.0 a 232
1617 = # of size 2 clusters
3633 = # of size 3 clusters
747 = # of size 4 clusters
4233 = # of frozen angles
find clusters CPU = 0.006 seconds
fix 2 all npt temp 300.0 300.0 100.0 z 0.0 0.0 1000.0 mtk no pchain 0 tchain 1
special_bonds charmm
@ -58,13 +64,13 @@ timestep 2.0
run 100
PPPM initialization ...
using 12-bit tables for long-range coulomb (../kspace.cpp:321)
G vector (1/distance) = 0.248835
using 12-bit tables for long-range coulomb (src/kspace.cpp:328)
G vector (1/distance) = 0.24883488
grid = 25 32 32
stencil order = 5
estimated absolute RMS force accuracy = 0.0355478
estimated relative force accuracy = 0.000107051
using double precision FFTs
estimated absolute RMS force accuracy = 0.035547797
estimated relative force accuracy = 0.00010705113
using double precision KISS FFT
3d grid and FFT values/proc = 41070 25600
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
@ -78,46 +84,46 @@ Neighbor list info ...
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 140 | 140 | 140 Mbytes
Per MPI rank memory allocation (min/avg/max) = 140.0 | 140.0 | 140.0 Mbytes
---------------- Step 0 ----- CPU = 0.0000 (sec) ----------------
TotEng = -25356.2064 KinEng = 21444.8313 Temp = 299.0397
PotEng = -46801.0377 E_bond = 2537.9940 E_angle = 10921.3742
E_dihed = 5211.7865 E_impro = 213.5116 E_vdwl = -2307.8634
E_coul = 207025.8927 E_long = -270403.7333 Press = -149.3301
Volume = 307995.0335
---------------- Step 100 ----- CPU = 23.7567 (sec) ----------------
TotEng = -25290.7386 KinEng = 21591.9096 Temp = 301.0906
PotEng = -46882.6482 E_bond = 2567.9789 E_angle = 10781.9556
E_dihed = 5198.7493 E_impro = 216.7863 E_vdwl = -1902.6458
E_coul = 206659.5007 E_long = -270404.9733 Press = 6.7898
Volume = 308133.9933
Loop time of 23.7568 on 1 procs for 100 steps with 32000 atoms
---------------- Step 100 ----- CPU = 20.0022 (sec) ----------------
TotEng = -25290.7304 KinEng = 21591.9084 Temp = 301.0906
PotEng = -46882.6388 E_bond = 2567.9807 E_angle = 10781.9571
E_dihed = 5198.7492 E_impro = 216.7864 E_vdwl = -1902.6618
E_coul = 206659.5226 E_long = -270404.9730 Press = 6.7406
Volume = 308134.2285
Loop time of 20.0022 on 1 procs for 100 steps with 32000 atoms
Performance: 0.727 ns/day, 32.995 hours/ns, 4.209 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 0.864 ns/day, 27.781 hours/ns, 4.999 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 17.905 | 17.905 | 17.905 | 0.0 | 75.37
Bond | 0.73417 | 0.73417 | 0.73417 | 0.0 | 3.09
Kspace | 1.4676 | 1.4676 | 1.4676 | 0.0 | 6.18
Neigh | 2.9907 | 2.9907 | 2.9907 | 0.0 | 12.59
Comm | 0.037427 | 0.037427 | 0.037427 | 0.0 | 0.16
Output | 0.00011754 | 0.00011754 | 0.00011754 | 0.0 | 0.00
Modify | 0.60985 | 0.60985 | 0.60985 | 0.0 | 2.57
Other | | 0.01201 | | | 0.05
Pair | 15 | 15 | 15 | 0.0 | 74.99
Bond | 0.65091 | 0.65091 | 0.65091 | 0.0 | 3.25
Kspace | 1.2144 | 1.2144 | 1.2144 | 0.0 | 6.07
Neigh | 2.6096 | 2.6096 | 2.6096 | 0.0 | 13.05
Comm | 0.035203 | 0.035203 | 0.035203 | 0.0 | 0.18
Output | 0.00010848 | 0.00010848 | 0.00010848 | 0.0 | 0.00
Modify | 0.48116 | 0.48116 | 0.48116 | 0.0 | 2.41
Other | | 0.01032 | | | 0.05
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 47958 ave 47958 max 47958 min
Nghost: 47958.0 ave 47958 max 47958 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.20281e+07 ave 1.20281e+07 max 1.20281e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 12028098
Ave neighs/atom = 375.878
Ave special neighs/atom = 7.43187
Total # of neighbors = 12028093
Ave neighs/atom = 375.87791
Ave special neighs/atom = 7.4318750
Neighbor list builds = 11
Dangerous builds = 0
Total wall time: 0:00:24
Total wall time: 0:00:20

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# Rhodopsin model
@ -17,7 +16,8 @@ pair_modify mix arithmetic
kspace_style pppm 1e-4
read_data data.protein
orthogonal box = (-27.5 -38.5 -36.3646) to (27.5 38.5 36.3615)
Reading data file ...
orthogonal box = (-27.500000 -38.500000 -36.364600) to (27.500000 38.500000 36.361500)
1 by 2 by 2 MPI processor grid
reading atoms ...
32000 atoms
@ -39,16 +39,22 @@ read_data data.protein
56829 dihedrals
reading impropers ...
1034 impropers
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 0.0 0.0
special bond factors coul: 0.0 0.0 0.0
4 = max # of 1-2 neighbors
12 = max # of 1-3 neighbors
24 = max # of 1-4 neighbors
26 = max # of special neighbors
special bonds CPU = 0.005 seconds
read_data CPU = 0.210 seconds
fix 1 all shake 0.0001 5 0 m 1.0 a 232
1617 = # of size 2 clusters
3633 = # of size 3 clusters
747 = # of size 4 clusters
4233 = # of frozen angles
find clusters CPU = 0.003 seconds
fix 2 all npt temp 300.0 300.0 100.0 z 0.0 0.0 1000.0 mtk no pchain 0 tchain 1
special_bonds charmm
@ -58,13 +64,13 @@ timestep 2.0
run 100
PPPM initialization ...
using 12-bit tables for long-range coulomb (../kspace.cpp:321)
G vector (1/distance) = 0.248835
using 12-bit tables for long-range coulomb (src/kspace.cpp:328)
G vector (1/distance) = 0.24883488
grid = 25 32 32
stencil order = 5
estimated absolute RMS force accuracy = 0.0355478
estimated relative force accuracy = 0.000107051
using double precision FFTs
estimated absolute RMS force accuracy = 0.035547797
estimated relative force accuracy = 0.00010705113
using double precision KISS FFT
3d grid and FFT values/proc = 13230 6400
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
@ -78,46 +84,46 @@ Neighbor list info ...
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 49.07 | 49.17 | 49.46 Mbytes
Per MPI rank memory allocation (min/avg/max) = 49.25 | 49.35 | 49.64 Mbytes
---------------- Step 0 ----- CPU = 0.0000 (sec) ----------------
TotEng = -25356.2064 KinEng = 21444.8313 Temp = 299.0397
PotEng = -46801.0377 E_bond = 2537.9940 E_angle = 10921.3742
E_dihed = 5211.7865 E_impro = 213.5116 E_vdwl = -2307.8634
E_coul = 207025.8927 E_long = -270403.7333 Press = -149.3301
Volume = 307995.0335
---------------- Step 100 ----- CPU = 6.3997 (sec) ----------------
TotEng = -25290.7386 KinEng = 21591.9096 Temp = 301.0906
PotEng = -46882.6483 E_bond = 2567.9789 E_angle = 10781.9556
E_dihed = 5198.7493 E_impro = 216.7863 E_vdwl = -1902.6458
E_coul = 206659.5007 E_long = -270404.9733 Press = 6.7898
Volume = 308133.9933
Loop time of 6.39977 on 4 procs for 100 steps with 32000 atoms
---------------- Step 100 ----- CPU = 5.5375 (sec) ----------------
TotEng = -25290.7303 KinEng = 21591.9085 Temp = 301.0906
PotEng = -46882.6388 E_bond = 2567.9807 E_angle = 10781.9571
E_dihed = 5198.7492 E_impro = 216.7864 E_vdwl = -1902.6618
E_coul = 206659.5225 E_long = -270404.9730 Press = 6.7406
Volume = 308134.2285
Loop time of 5.53765 on 4 procs for 100 steps with 32000 atoms
Performance: 2.700 ns/day, 8.889 hours/ns, 15.626 timesteps/s
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 3.120 ns/day, 7.691 hours/ns, 18.058 timesteps/s
99.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 4.4434 | 4.5321 | 4.6846 | 4.3 | 70.82
Bond | 0.17894 | 0.18568 | 0.19951 | 1.9 | 2.90
Kspace | 0.4651 | 0.61064 | 0.69123 | 11.1 | 9.54
Neigh | 0.7739 | 0.77394 | 0.774 | 0.0 | 12.09
Comm | 0.057676 | 0.069183 | 0.07901 | 3.0 | 1.08
Output | 5.6505e-05 | 6.6578e-05 | 9.4414e-05 | 0.0 | 0.00
Modify | 0.21444 | 0.21866 | 0.22524 | 0.9 | 3.42
Other | | 0.009451 | | | 0.15
Pair | 3.8921 | 3.9427 | 4.0762 | 3.9 | 71.20
Bond | 0.16218 | 0.16829 | 0.17972 | 1.7 | 3.04
Kspace | 0.35196 | 0.48475 | 0.53996 | 11.1 | 8.75
Neigh | 0.69975 | 0.69981 | 0.69988 | 0.0 | 12.64
Comm | 0.04908 | 0.049445 | 0.049767 | 0.1 | 0.89
Output | 5.1737e-05 | 8.5056e-05 | 0.00018382 | 0.0 | 0.00
Modify | 0.18393 | 0.18474 | 0.18528 | 0.1 | 3.34
Other | | 0.007858 | | | 0.14
Nlocal: 8000 ave 8143 max 7933 min
Nlocal: 8000.00 ave 8143 max 7933 min
Histogram: 1 2 0 0 0 0 0 0 0 1
Nghost: 22733.5 ave 22769 max 22693 min
Histogram: 1 0 0 0 0 2 0 0 0 1
Neighs: 3.00702e+06 ave 3.0975e+06 max 2.96492e+06 min
Histogram: 1 2 0 0 0 0 0 0 0 1
Total # of neighbors = 12028098
Ave neighs/atom = 375.878
Ave special neighs/atom = 7.43187
Total # of neighbors = 12028093
Ave neighs/atom = 375.87791
Ave special neighs/atom = 7.4318750
Neighbor list builds = 11
Dangerous builds = 0
Total wall time: 0:00:06
Total wall time: 0:00:05

View File

@ -0,0 +1,96 @@
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# ReaxFF benchmark: simulation of PETN crystal, replicated unit cell
units real
atom_style charge
read_data data.reax
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (9.4910650 9.4910650 6.9912300)
1 by 1 by 1 MPI processor grid
reading atoms ...
58 atoms
read_data CPU = 0.000 seconds
replicate 7 8 10
Replicating atoms ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (66.437455 75.928520 69.912300)
1 by 1 by 1 MPI processor grid
32480 atoms
replicate CPU = 0.002 seconds
velocity all create 300.0 9999
pair_style reax/c NULL
pair_coeff * * ffield.reax C H O N
WARNING: Changed valency_val to valency_boc for X (src/USER-REAXC/reaxc_ffield.cpp:315)
timestep 0.1
fix 1 all nve
fix 2 all qeq/reax 1 0.0 10.0 1.0e-6 reax/c
thermo 10
thermo_style custom step temp ke pe pxx pyy pzz etotal
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 12
ghost atom cutoff = 12
binsize = 6, bins = 12 13 12
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair reax/c, perpetual
attributes: half, newton off, ghost
pair build: half/bin/newtoff/ghost
stencil: half/ghost/bin/3d/newtoff
bin: standard
(2) fix qeq/reax, perpetual, copy from (1)
attributes: half, newton off, ghost
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 1727.0 | 1727.0 | 1727.0 Mbytes
Step Temp KinEng PotEng Pxx Pyy Pzz TotEng
0 300 29044.119 -3232140.8 22804.879 -29365.593 6302.5638 -3203096.6
10 299.37479 28983.59 -3232075.2 21746.783 -23987.396 7610.3039 -3203091.6
20 295.58549 28616.733 -3231710.1 18178.443 -10872.027 10603.19 -3203093.3
30 289.48845 28026.456 -3231123.3 12146.101 4985.2572 13364.258 -3203096.8
40 282.66408 27365.763 -3230467.5 4284.1187 18132.512 14133.51 -3203101.7
50 274.97007 26620.878 -3229730.4 -3718.933 25520.016 12551.903 -3203109.5
60 266.11301 25763.393 -3228883.8 -9271.3498 27307.451 9753.2362 -3203120.4
70 259.32635 25106.351 -3228237.2 -11150.623 24238.509 6578.531 -3203130.8
80 260.33969 25204.456 -3228344.2 -9576.4144 16737.758 3454.6426 -3203139.7
90 269.9021 26130.229 -3229275.5 -5905.8652 5246.3236 467.53439 -3203145.2
100 280.76723 27182.123 -3230330.6 -1363.6002 -8133.2093 -1689.6535 -3203148.5
Loop time of 213.234 on 1 procs for 100 steps with 32480 atoms
Performance: 0.004 ns/day, 5923.154 hours/ns, 0.469 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 157 | 157 | 157 | 0.0 | 73.63
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.024997 | 0.024997 | 0.024997 | 0.0 | 0.01
Output | 0.0022025 | 0.0022025 | 0.0022025 | 0.0 | 0.00
Modify | 56.19 | 56.19 | 56.19 | 0.0 | 26.35
Other | | 0.01211 | | | 0.01
Nlocal: 32480.0 ave 32480 max 32480 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 45128.0 ave 45128 max 45128 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.27781e+07 ave 1.27781e+07 max 1.27781e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 12778082
Ave neighs/atom = 393.41385
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:03:37

View File

@ -0,0 +1,96 @@
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# ReaxFF benchmark: simulation of PETN crystal, replicated unit cell
units real
atom_style charge
read_data data.reax
Reading data file ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (9.4910650 9.4910650 6.9912300)
2 by 2 by 1 MPI processor grid
reading atoms ...
58 atoms
read_data CPU = 0.000 seconds
replicate 7 8 10
Replicating atoms ...
orthogonal box = (0.0000000 0.0000000 0.0000000) to (66.437455 75.928520 69.912300)
1 by 2 by 2 MPI processor grid
32480 atoms
replicate CPU = 0.001 seconds
velocity all create 300.0 9999
pair_style reax/c NULL
pair_coeff * * ffield.reax C H O N
WARNING: Changed valency_val to valency_boc for X (src/USER-REAXC/reaxc_ffield.cpp:315)
timestep 0.1
fix 1 all nve
fix 2 all qeq/reax 1 0.0 10.0 1.0e-6 reax/c
thermo 10
thermo_style custom step temp ke pe pxx pyy pzz etotal
run 100
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 12
ghost atom cutoff = 12
binsize = 6, bins = 12 13 12
2 neighbor lists, perpetual/occasional/extra = 2 0 0
(1) pair reax/c, perpetual
attributes: half, newton off, ghost
pair build: half/bin/newtoff/ghost
stencil: half/ghost/bin/3d/newtoff
bin: standard
(2) fix qeq/reax, perpetual, copy from (1)
attributes: half, newton off, ghost
pair build: copy
stencil: none
bin: none
Per MPI rank memory allocation (min/avg/max) = 647.0 | 647.0 | 647.0 Mbytes
Step Temp KinEng PotEng Pxx Pyy Pzz TotEng
0 300 29044.119 -3232140.8 22804.879 -29365.593 6302.5638 -3203096.6
10 299.37479 28983.59 -3232075.2 21746.771 -23987.411 7610.2901 -3203091.6
20 295.58551 28616.736 -3231710.1 18178.439 -10871.954 10603.303 -3203093.3
30 289.48844 28026.456 -3231123.2 12146.289 4985.5678 13364.519 -3203096.8
40 282.66406 27365.762 -3230467.4 4284.8179 18133.406 14134.156 -3203101.7
50 274.97009 26620.88 -3229730.3 -3718.6796 25520.338 12552.205 -3203109.5
60 266.11302 25763.394 -3228883.8 -9271.5644 27307.146 9753.1034 -3203120.4
70 259.32636 25106.352 -3228237.1 -11150.66 24238.705 6578.7141 -3203130.8
80 260.33966 25204.454 -3228344.1 -9576.2474 16737.753 3454.7607 -3203139.7
90 269.90207 26130.226 -3229275.5 -5905.8809 5246.1687 467.42114 -3203145.2
100 280.76722 27182.122 -3230330.6 -1363.4752 -8133.2096 -1689.5922 -3203148.5
Loop time of 69.1187 on 4 procs for 100 steps with 32480 atoms
Performance: 0.013 ns/day, 1919.965 hours/ns, 1.447 timesteps/s
97.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 45.964 | 48.533 | 51.423 | 29.3 | 70.22
Neigh | 0 | 0 | 0 | 0.0 | 0.00
Comm | 0.19604 | 3.0913 | 5.6647 | 116.3 | 4.47
Output | 0.00074649 | 0.0011722 | 0.0023553 | 2.0 | 0.00
Modify | 17.48 | 17.485 | 17.489 | 0.1 | 25.30
Other | | 0.008528 | | | 0.01
Nlocal: 8120.00 ave 8120 max 8120 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Nghost: 21992.0 ave 21992 max 21992 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Neighs: 3.48274e+06 ave 3.48274e+06 max 3.48274e+06 min
Histogram: 4 0 0 0 0 0 0 0 0 0
Total # of neighbors = 13930976
Ave neighs/atom = 428.90936
Neighbor list builds = 0
Dangerous builds = 0
Please see the log.cite file for references relevant to this simulation
Total wall time: 0:01:10

View File

@ -0,0 +1,90 @@
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# REBO polyethelene benchmark
units metal
atom_style atomic
read_data data.rebo
Reading data file ...
orthogonal box = (-2.1000000 -2.1000000 0.0000000) to (2.1000000 2.1000000 25.579000)
1 by 1 by 1 MPI processor grid
reading atoms ...
60 atoms
read_data CPU = 0.000 seconds
replicate 17 16 2
Replicating atoms ...
orthogonal box = (-2.1000000 -2.1000000 0.0000000) to (69.300000 65.100000 51.158000)
1 by 1 by 1 MPI processor grid
32640 atoms
replicate CPU = 0.002 seconds
neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style rebo
pair_coeff * * CH.rebo C H
Reading rebo potential file CH.rebo with DATE: 2018-7-3
velocity all create 300.0 761341
fix 1 all nve
timestep 0.0005
thermo 10
run 100
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.5
ghost atom cutoff = 6.5
binsize = 3.25, bins = 22 21 16
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair rebo, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 34.21 | 34.21 | 34.21 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -138442.83 0 -137177.16 2463.0748
10 179.37985 -137931.27 0 -137174.48 15655.936
20 206.87654 -138046.99 0 -137174.19 -24042.627
30 150.80122 -137807.43 0 -137171.21 -16524.118
40 173.24945 -137902.35 0 -137171.42 -5716.9119
50 151.80455 -137812.36 0 -137171.91 3480.4584
60 199.08777 -138013.82 0 -137173.88 17881.372
70 217.85748 -138093.86 0 -137174.73 -12270.999
80 202.37482 -138029.39 0 -137175.59 -7622.7319
90 194.90628 -137997.05 0 -137174.75 -32267.471
100 185.17818 -137954.51 0 -137173.26 -6901.7499
Loop time of 4.83649 on 1 procs for 100 steps with 32640 atoms
Performance: 0.893 ns/day, 26.869 hours/ns, 20.676 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.3248 | 3.3248 | 3.3248 | 0.0 | 68.74
Neigh | 1.4583 | 1.4583 | 1.4583 | 0.0 | 30.15
Comm | 0.01934 | 0.01934 | 0.01934 | 0.0 | 0.40
Output | 0.0011871 | 0.0011871 | 0.0011871 | 0.0 | 0.02
Modify | 0.023516 | 0.023516 | 0.023516 | 0.0 | 0.49
Other | | 0.009316 | | | 0.19
Nlocal: 32640.0 ave 32640 max 32640 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 26460.0 ave 26460 max 26460 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 4.90213e+06 ave 4.90213e+06 max 4.90213e+06 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 4902134
Ave neighs/atom = 150.18793
Neighbor list builds = 9
Dangerous builds = 0
Total wall time: 0:00:05

View File

@ -0,0 +1,90 @@
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# REBO polyethelene benchmark
units metal
atom_style atomic
read_data data.rebo
Reading data file ...
orthogonal box = (-2.1000000 -2.1000000 0.0000000) to (2.1000000 2.1000000 25.579000)
1 by 1 by 4 MPI processor grid
reading atoms ...
60 atoms
read_data CPU = 0.000 seconds
replicate 17 16 2
Replicating atoms ...
orthogonal box = (-2.1000000 -2.1000000 0.0000000) to (69.300000 65.100000 51.158000)
2 by 2 by 1 MPI processor grid
32640 atoms
replicate CPU = 0.001 seconds
neighbor 0.5 bin
neigh_modify delay 5 every 1
pair_style rebo
pair_coeff * * CH.rebo C H
Reading rebo potential file CH.rebo with DATE: 2018-7-3
velocity all create 300.0 761341
fix 1 all nve
timestep 0.0005
thermo 10
run 100
Neighbor list info ...
update every 1 steps, delay 5 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 6.5
ghost atom cutoff = 6.5
binsize = 3.25, bins = 22 21 16
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair rebo, perpetual
attributes: full, newton on, ghost
pair build: full/bin/ghost
stencil: full/ghost/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 11.80 | 12.00 | 12.19 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -138442.83 0 -137177.16 2463.0748
10 179.37985 -137931.27 0 -137174.48 15655.936
20 206.87654 -138046.99 0 -137174.19 -24042.627
30 150.80122 -137807.43 0 -137171.21 -16524.118
40 173.24945 -137902.35 0 -137171.42 -5716.9119
50 151.80455 -137812.36 0 -137171.91 3480.4584
60 199.08777 -138013.82 0 -137173.88 17881.372
70 217.85748 -138093.86 0 -137174.73 -12270.999
80 202.37482 -138029.39 0 -137175.59 -7622.7319
90 194.90628 -137997.05 0 -137174.75 -32267.471
100 185.17818 -137954.51 0 -137173.26 -6901.7499
Loop time of 1.74701 on 4 procs for 100 steps with 32640 atoms
Performance: 2.473 ns/day, 9.706 hours/ns, 57.241 timesteps/s
94.1% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 0.89836 | 0.96998 | 1.054 | 6.0 | 55.52
Neigh | 0.453 | 0.47091 | 0.50316 | 2.8 | 26.96
Comm | 0.15706 | 0.27291 | 0.36547 | 14.5 | 15.62
Output | 0.00047016 | 0.00073808 | 0.0015287 | 0.0 | 0.04
Modify | 0.0093558 | 0.010209 | 0.011958 | 1.0 | 0.58
Other | | 0.02227 | | | 1.27
Nlocal: 8160.00 ave 8163 max 8157 min
Histogram: 1 1 0 0 0 0 0 0 1 1
Nghost: 11605.8 ave 11615 max 11593 min
Histogram: 1 0 0 0 0 0 2 0 0 1
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 1.22553e+06 ave 1.22735e+06 max 1.22455e+06 min
Histogram: 2 0 0 1 0 0 0 0 0 1
Total # of neighbors = 4902134
Ave neighs/atom = 150.18793
Neighbor list builds = 9
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -0,0 +1,147 @@
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# SPC/E water box benchmark
units real
atom_style full
read_data data.spce
Reading data file ...
orthogonal box = (0.02645 0.02645 0.02641) to (35.532800 35.532800 35.473600)
1 by 1 by 1 MPI processor grid
reading atoms ...
4500 atoms
scanning bonds ...
2 = max bonds/atom
scanning angles ...
1 = max angles/atom
reading bonds ...
3000 bonds
reading angles ...
1500 angles
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 0.0 0.0
special bond factors coul: 0.0 0.0 0.0
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.001 seconds
read_data CPU = 0.009 seconds
replicate 2 4 1
Replicating atoms ...
orthogonal box = (0.02645 0.02645 0.02641) to (71.039150 142.05185 35.473600)
1 by 1 by 1 MPI processor grid
36000 atoms
24000 bonds
12000 angles
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 0.0 0.0
special bond factors coul: 0.0 0.0 0.0
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.005 seconds
replicate CPU = 0.012 seconds
pair_style lj/cut/coul/long 9.8 9.8
kspace_style pppm 1.0e-4
pair_coeff 1 1 0.15535 3.166
pair_coeff * 2 0.0000 0.0000
bond_style harmonic
angle_style harmonic
dihedral_style none
improper_style none
bond_coeff 1 1000.00 1.000
angle_coeff 1 100.0 109.47
special_bonds lj/coul 0.0 0.0 0.5
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 0.0 0.5
special bond factors coul: 0.0 0.0 0.5
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.005 seconds
neighbor 2.0 bin
neigh_modify every 1 delay 10 check yes
fix 1 all shake 0.0001 20 0 b 1 a 1
0 = # of size 2 clusters
0 = # of size 3 clusters
0 = # of size 4 clusters
12000 = # of frozen angles
find clusters CPU = 0.005 seconds
fix 2 all nvt temp 300.0 300.0 100.0
velocity all create 300 432567 dist uniform
timestep 2.0
thermo_style one
thermo 50
run 100
PPPM initialization ...
using 12-bit tables for long-range coulomb (src/kspace.cpp:328)
G vector (1/distance) = 0.2688011
grid = 36 64 24
stencil order = 5
estimated absolute RMS force accuracy = 0.033101471
estimated relative force accuracy = 9.9684097e-05
using double precision KISS FFT
3d grid and FFT values/proc = 91977 55296
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 11.8
ghost atom cutoff = 11.8
binsize = 5.9, bins = 13 25 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut/coul/long, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 105.4 | 105.4 | 105.4 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -133281.51 0 -111820.57 516.17807
50 264.98553 -136986.74 0 -118030.61 -440.29256
100 274.45966 -136364.57 0 -116730.69 -128.61948
Loop time of 18.5133 on 1 procs for 100 steps with 36000 atoms
Performance: 0.933 ns/day, 25.713 hours/ns, 5.402 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 14.557 | 14.557 | 14.557 | 0.0 | 78.63
Bond | 7.1764e-05 | 7.1764e-05 | 7.1764e-05 | 0.0 | 0.00
Kspace | 1.7651 | 1.7651 | 1.7651 | 0.0 | 9.53
Neigh | 1.8703 | 1.8703 | 1.8703 | 0.0 | 10.10
Comm | 0.042219 | 0.042219 | 0.042219 | 0.0 | 0.23
Output | 0.00021219 | 0.00021219 | 0.00021219 | 0.0 | 0.00
Modify | 0.26983 | 0.26983 | 0.26983 | 0.0 | 1.46
Other | | 0.008397 | | | 0.05
Nlocal: 36000.0 ave 36000 max 36000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 56963.0 ave 56963 max 56963 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 1.24625e+07 ave 1.24625e+07 max 1.24625e+07 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 12462451
Ave neighs/atom = 346.17919
Ave special neighs/atom = 2.0000000
Neighbor list builds = 9
Dangerous builds = 6
Total wall time: 0:00:19

View File

@ -0,0 +1,147 @@
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# SPC/E water box benchmark
units real
atom_style full
read_data data.spce
Reading data file ...
orthogonal box = (0.02645 0.02645 0.02641) to (35.532800 35.532800 35.473600)
2 by 2 by 1 MPI processor grid
reading atoms ...
4500 atoms
scanning bonds ...
2 = max bonds/atom
scanning angles ...
1 = max angles/atom
reading bonds ...
3000 bonds
reading angles ...
1500 angles
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 0.0 0.0
special bond factors coul: 0.0 0.0 0.0
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.001 seconds
read_data CPU = 0.008 seconds
replicate 2 4 1
Replicating atoms ...
orthogonal box = (0.02645 0.02645 0.02641) to (71.039150 142.05185 35.473600)
1 by 4 by 1 MPI processor grid
36000 atoms
24000 bonds
12000 angles
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 0.0 0.0
special bond factors coul: 0.0 0.0 0.0
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.002 seconds
replicate CPU = 0.005 seconds
pair_style lj/cut/coul/long 9.8 9.8
kspace_style pppm 1.0e-4
pair_coeff 1 1 0.15535 3.166
pair_coeff * 2 0.0000 0.0000
bond_style harmonic
angle_style harmonic
dihedral_style none
improper_style none
bond_coeff 1 1000.00 1.000
angle_coeff 1 100.0 109.47
special_bonds lj/coul 0.0 0.0 0.5
Finding 1-2 1-3 1-4 neighbors ...
special bond factors lj: 0.0 0.0 0.5
special bond factors coul: 0.0 0.0 0.5
2 = max # of 1-2 neighbors
1 = max # of 1-3 neighbors
1 = max # of 1-4 neighbors
2 = max # of special neighbors
special bonds CPU = 0.004 seconds
neighbor 2.0 bin
neigh_modify every 1 delay 10 check yes
fix 1 all shake 0.0001 20 0 b 1 a 1
0 = # of size 2 clusters
0 = # of size 3 clusters
0 = # of size 4 clusters
12000 = # of frozen angles
find clusters CPU = 0.003 seconds
fix 2 all nvt temp 300.0 300.0 100.0
velocity all create 300 432567 dist uniform
timestep 2.0
thermo_style one
thermo 50
run 100
PPPM initialization ...
using 12-bit tables for long-range coulomb (src/kspace.cpp:328)
G vector (1/distance) = 0.2688011
grid = 36 64 24
stencil order = 5
estimated absolute RMS force accuracy = 0.033101471
estimated relative force accuracy = 9.9684097e-05
using double precision KISS FFT
3d grid and FFT values/proc = 27993 13824
Neighbor list info ...
update every 1 steps, delay 10 steps, check yes
max neighbors/atom: 2000, page size: 100000
master list distance cutoff = 11.8
ghost atom cutoff = 11.8
binsize = 5.9, bins = 13 25 7
1 neighbor lists, perpetual/occasional/extra = 1 0 0
(1) pair lj/cut/coul/long, perpetual
attributes: half, newton on
pair build: half/bin/newton
stencil: half/bin/3d/newton
bin: standard
Per MPI rank memory allocation (min/avg/max) = 37.90 | 37.90 | 37.90 Mbytes
Step Temp E_pair E_mol TotEng Press
0 300 -133281.51 0 -111820.57 516.17807
50 264.98553 -136986.74 0 -118030.61 -440.29255
100 274.45966 -136364.57 0 -116730.69 -128.61954
Loop time of 5.44355 on 4 procs for 100 steps with 36000 atoms
Performance: 3.174 ns/day, 7.560 hours/ns, 18.370 timesteps/s
95.4% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 3.82 | 3.8744 | 3.9155 | 1.8 | 71.17
Bond | 7.8917e-05 | 8.6784e-05 | 9.4891e-05 | 0.0 | 0.00
Kspace | 0.79192 | 0.83671 | 0.88328 | 3.5 | 15.37
Neigh | 0.51754 | 0.5178 | 0.51789 | 0.0 | 9.51
Comm | 0.069774 | 0.078783 | 0.088247 | 2.3 | 1.45
Output | 9.0361e-05 | 0.00015712 | 0.00035048 | 0.0 | 0.00
Modify | 0.12822 | 0.13016 | 0.13486 | 0.8 | 2.39
Other | | 0.005435 | | | 0.10
Nlocal: 9000.00 ave 9002 max 8998 min
Histogram: 2 0 0 0 0 0 0 0 0 2
Nghost: 24134.2 ave 24184 max 24062 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Neighs: 3.11561e+06 ave 3.11676e+06 max 3.11446e+06 min
Histogram: 1 0 0 1 0 0 1 0 0 1
Total # of neighbors = 12462451
Ave neighs/atom = 346.17919
Ave special neighs/atom = 2.0000000
Neighbor list builds = 9
Dangerous builds = 6
Total wall time: 0:00:05

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Si via Stillinger-Weber
@ -7,17 +6,18 @@ units metal
atom_style atomic
lattice diamond 5.431
Lattice spacing in x,y,z = 5.431 5.431 5.431
Lattice spacing in x,y,z = 5.4310000 5.4310000 5.4310000
region box block 0 20 0 20 0 10
create_box 1 box
Created orthogonal box = (0 0 0) to (108.62 108.62 54.31)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (108.62000 108.62000 54.310000)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.00191712 secs
create_atoms CPU = 0.002 seconds
pair_style sw
pair_coeff * * Si.sw Si
Reading sw potential file Si.sw with DATE: 2007-06-11
mass 1 28.06
velocity all create 1000.0 376847 loop geom
@ -46,32 +46,32 @@ Per MPI rank memory allocation (min/avg/max) = 12.52 | 12.52 | 12.52 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1000 -138771.2 0 -134635 6866.6499
100 508.80533 -136736.12 0 -134631.6 6361.7858
Loop time of 5.66634 on 1 procs for 100 steps with 32000 atoms
Loop time of 3.9309 on 1 procs for 100 steps with 32000 atoms
Performance: 1.525 ns/day, 15.740 hours/ns, 17.648 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 2.198 ns/day, 10.919 hours/ns, 25.439 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 5.5464 | 5.5464 | 5.5464 | 0.0 | 97.88
Neigh | 0.075834 | 0.075834 | 0.075834 | 0.0 | 1.34
Comm | 0.0092049 | 0.0092049 | 0.0092049 | 0.0 | 0.16
Output | 0.00010991 | 0.00010991 | 0.00010991 | 0.0 | 0.00
Modify | 0.024666 | 0.024666 | 0.024666 | 0.0 | 0.44
Other | | 0.01014 | | | 0.18
Pair | 3.8206 | 3.8206 | 3.8206 | 0.0 | 97.19
Neigh | 0.067368 | 0.067368 | 0.067368 | 0.0 | 1.71
Comm | 0.0091503 | 0.0091503 | 0.0091503 | 0.0 | 0.23
Output | 0.0001049 | 0.0001049 | 0.0001049 | 0.0 | 0.00
Modify | 0.023839 | 0.023839 | 0.023839 | 0.0 | 0.61
Other | | 0.009882 | | | 0.25
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 12495 ave 12495 max 12495 min
Nghost: 12495.0 ave 12495 max 12495 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 894818 ave 894818 max 894818 min
FullNghs: 894818.0 ave 894818 max 894818 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 894818
Ave neighs/atom = 27.9631
Ave neighs/atom = 27.963062
Neighbor list builds = 2
Dangerous builds = 0
Total wall time: 0:00:05
Total wall time: 0:00:03

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Si via Stillinger-Weber
@ -7,17 +6,18 @@ units metal
atom_style atomic
lattice diamond 5.431
Lattice spacing in x,y,z = 5.431 5.431 5.431
Lattice spacing in x,y,z = 5.4310000 5.4310000 5.4310000
region box block 0 20 0 20 0 10
create_box 1 box
Created orthogonal box = (0 0 0) to (108.62 108.62 54.31)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (108.62000 108.62000 54.310000)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.000604153 secs
create_atoms CPU = 0.001 seconds
pair_style sw
pair_coeff * * Si.sw Si
Reading sw potential file Si.sw with DATE: 2007-06-11
mass 1 28.06
velocity all create 1000.0 376847 loop geom
@ -46,32 +46,32 @@ Per MPI rank memory allocation (min/avg/max) = 4.104 | 4.104 | 4.104 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1000 -138771.2 0 -134635 6866.6499
100 508.80533 -136736.12 0 -134631.6 6361.7858
Loop time of 1.47105 on 4 procs for 100 steps with 32000 atoms
Loop time of 1.04386 on 4 procs for 100 steps with 32000 atoms
Performance: 5.873 ns/day, 4.086 hours/ns, 67.978 timesteps/s
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 8.277 ns/day, 2.900 hours/ns, 95.798 timesteps/s
98.7% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 1.3788 | 1.3929 | 1.4053 | 0.8 | 94.69
Neigh | 0.019134 | 0.019502 | 0.019816 | 0.2 | 1.33
Comm | 0.024183 | 0.035734 | 0.049122 | 4.7 | 2.43
Output | 5.1975e-05 | 6.6102e-05 | 0.00010204 | 0.0 | 0.00
Modify | 0.0063825 | 0.0064374 | 0.0064764 | 0.0 | 0.44
Other | | 0.01638 | | | 1.11
Pair | 0.96496 | 0.97632 | 0.9978 | 1.3 | 93.53
Neigh | 0.01732 | 0.017998 | 0.019718 | 0.7 | 1.72
Comm | 0.012035 | 0.036398 | 0.049588 | 7.7 | 3.49
Output | 4.5061e-05 | 5.5015e-05 | 7.7248e-05 | 0.0 | 0.01
Modify | 0.0070148 | 0.0070775 | 0.0071096 | 0.0 | 0.68
Other | | 0.006012 | | | 0.58
Nlocal: 8000 ave 8015 max 7978 min
Nlocal: 8000.00 ave 8015 max 7978 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Nghost: 4995 ave 5017 max 4980 min
Nghost: 4995.00 ave 5017 max 4980 min
Histogram: 1 0 1 1 0 0 0 0 0 1
Neighs: 0 ave 0 max 0 min
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 223704 ave 224108 max 223131 min
FullNghs: 223704.0 ave 224108 max 223131 min
Histogram: 1 0 0 0 0 0 1 1 0 1
Total # of neighbors = 894818
Ave neighs/atom = 27.9631
Ave neighs/atom = 27.963062
Neighbor list builds = 2
Dangerous builds = 0
Total wall time: 0:00:01

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Si via Tersoff
@ -7,17 +6,18 @@ units metal
atom_style atomic
lattice diamond 5.431
Lattice spacing in x,y,z = 5.431 5.431 5.431
Lattice spacing in x,y,z = 5.4310000 5.4310000 5.4310000
region box block 0 20 0 20 0 10
create_box 1 box
Created orthogonal box = (0 0 0) to (108.62 108.62 54.31)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (108.62000 108.62000 54.310000)
1 by 1 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.0019412 secs
create_atoms CPU = 0.002 seconds
pair_style tersoff
pair_coeff * * Si.tersoff Si
Reading tersoff potential file Si.tersoff with DATE: 2007-10-25
mass 1 28.06
velocity all create 1000.0 376847 loop geom
@ -42,36 +42,36 @@ Neighbor list info ...
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 11.1 | 11.1 | 11.1 Mbytes
Per MPI rank memory allocation (min/avg/max) = 11.11 | 11.11 | 11.11 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1000 -148173.19 0 -144036.99 7019.4434
100 430.57813 -145815.61 0 -144034.65 -14550.734
Loop time of 8.53088 on 1 procs for 100 steps with 32000 atoms
Loop time of 4.71424 on 1 procs for 100 steps with 32000 atoms
Performance: 1.013 ns/day, 23.697 hours/ns, 11.722 timesteps/s
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
Performance: 1.833 ns/day, 13.095 hours/ns, 21.212 timesteps/s
99.9% CPU use with 1 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 8.4236 | 8.4236 | 8.4236 | 0.0 | 98.74
Neigh | 0.065852 | 0.065852 | 0.065852 | 0.0 | 0.77
Comm | 0.0078607 | 0.0078607 | 0.0078607 | 0.0 | 0.09
Output | 0.00010872 | 0.00010872 | 0.00010872 | 0.0 | 0.00
Modify | 0.023968 | 0.023968 | 0.023968 | 0.0 | 0.28
Other | | 0.009521 | | | 0.11
Pair | 4.612 | 4.612 | 4.612 | 0.0 | 97.83
Neigh | 0.060618 | 0.060618 | 0.060618 | 0.0 | 1.29
Comm | 0.008847 | 0.008847 | 0.008847 | 0.0 | 0.19
Output | 9.9659e-05 | 9.9659e-05 | 9.9659e-05 | 0.0 | 0.00
Modify | 0.023234 | 0.023234 | 0.023234 | 0.0 | 0.49
Other | | 0.00941 | | | 0.20
Nlocal: 32000 ave 32000 max 32000 min
Nlocal: 32000.0 ave 32000 max 32000 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Nghost: 11537 ave 11537 max 11537 min
Nghost: 11537.0 ave 11537 max 11537 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Neighs: 0 ave 0 max 0 min
Neighs: 0.00000 ave 0 max 0 min
Histogram: 1 0 0 0 0 0 0 0 0 0
FullNghs: 530500 ave 530500 max 530500 min
FullNghs: 530500.0 ave 530500 max 530500 min
Histogram: 1 0 0 0 0 0 0 0 0 0
Total # of neighbors = 530500
Ave neighs/atom = 16.5781
Ave neighs/atom = 16.578125
Neighbor list builds = 2
Dangerous builds = 0
Total wall time: 0:00:08
Total wall time: 0:00:04

View File

@ -1,5 +1,4 @@
LAMMPS (16 Mar 2018)
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
LAMMPS (9 Oct 2020)
using 1 OpenMP thread(s) per MPI task
# bulk Si via Tersoff
@ -7,17 +6,18 @@ units metal
atom_style atomic
lattice diamond 5.431
Lattice spacing in x,y,z = 5.431 5.431 5.431
Lattice spacing in x,y,z = 5.4310000 5.4310000 5.4310000
region box block 0 20 0 20 0 10
create_box 1 box
Created orthogonal box = (0 0 0) to (108.62 108.62 54.31)
Created orthogonal box = (0.0000000 0.0000000 0.0000000) to (108.62000 108.62000 54.310000)
2 by 2 by 1 MPI processor grid
create_atoms 1 box
Created 32000 atoms
Time spent = 0.000605822 secs
create_atoms CPU = 0.001 seconds
pair_style tersoff
pair_coeff * * Si.tersoff Si
Reading tersoff potential file Si.tersoff with DATE: 2007-10-25
mass 1 28.06
velocity all create 1000.0 376847 loop geom
@ -42,36 +42,36 @@ Neighbor list info ...
pair build: full/bin/atomonly
stencil: full/bin/3d
bin: standard
Per MPI rank memory allocation (min/avg/max) = 3.753 | 3.753 | 3.753 Mbytes
Per MPI rank memory allocation (min/avg/max) = 3.754 | 3.754 | 3.754 Mbytes
Step Temp E_pair E_mol TotEng Press
0 1000 -148173.19 0 -144036.99 7019.4434
100 430.57813 -145815.61 0 -144034.65 -14550.734
Loop time of 2.16161 on 4 procs for 100 steps with 32000 atoms
Loop time of 1.32053 on 4 procs for 100 steps with 32000 atoms
Performance: 3.997 ns/day, 6.004 hours/ns, 46.262 timesteps/s
99.4% CPU use with 4 MPI tasks x 1 OpenMP threads
Performance: 6.543 ns/day, 3.668 hours/ns, 75.727 timesteps/s
97.5% CPU use with 4 MPI tasks x 1 OpenMP threads
MPI task timing breakdown:
Section | min time | avg time | max time |%varavg| %total
---------------------------------------------------------------
Pair | 2.1122 | 2.1165 | 2.122 | 0.2 | 97.91
Neigh | 0.016894 | 0.016915 | 0.016955 | 0.0 | 0.78
Comm | 0.012348 | 0.017858 | 0.022105 | 2.7 | 0.83
Output | 4.7684e-05 | 6.2048e-05 | 9.9421e-05 | 0.0 | 0.00
Modify | 0.0064063 | 0.0064579 | 0.0065169 | 0.0 | 0.30
Other | | 0.003793 | | | 0.18
Pair | 1.1729 | 1.2118 | 1.2453 | 2.3 | 91.77
Neigh | 0.015989 | 0.016319 | 0.016708 | 0.3 | 1.24
Comm | 0.046884 | 0.078767 | 0.11602 | 8.9 | 5.96
Output | 3.9816e-05 | 7.0453e-05 | 0.00015831 | 0.0 | 0.01
Modify | 0.0070612 | 0.0071967 | 0.0073555 | 0.1 | 0.54
Other | | 0.006331 | | | 0.48
Nlocal: 8000 ave 8005 max 7993 min
Nlocal: 8000.00 ave 8005 max 7993 min
Histogram: 1 0 0 0 0 1 0 1 0 1
Nghost: 4580.25 ave 4593 max 4567 min
Histogram: 1 0 0 1 0 0 0 1 0 1
Neighs: 0 ave 0 max 0 min
Neighs: 0.00000 ave 0 max 0 min
Histogram: 4 0 0 0 0 0 0 0 0 0
FullNghs: 132625 ave 132785 max 132562 min
FullNghs: 132625.0 ave 132785 max 132562 min
Histogram: 2 1 0 0 0 0 0 0 0 1
Total # of neighbors = 530500
Ave neighs/atom = 16.5781
Ave neighs/atom = 16.578125
Neighbor list builds = 2
Dangerous builds = 0
Total wall time: 0:00:02
Total wall time: 0:00:01

View File

@ -90,6 +90,7 @@ if(BUILD_SHARED_LIBS) # for all pkg libs, mpi_stubs and linalg
endif()
option(BUILD_TOOLS "Build and install LAMMPS tools (msi2lmp, binary2txt, chain)" OFF)
option(BUILD_LAMMPS_SHELL "Build and install the LAMMPS shell" OFF)
include(GNUInstallDirs)
file(GLOB ALL_SOURCES ${LAMMPS_SOURCE_DIR}/[^.]*.cpp)
@ -157,11 +158,11 @@ else()
file(GLOB MPI_SOURCES ${LAMMPS_SOURCE_DIR}/STUBS/mpi.c)
add_library(mpi_stubs STATIC ${MPI_SOURCES})
set_target_properties(mpi_stubs PROPERTIES OUTPUT_NAME lammps_mpi_stubs${LAMMPS_MACHINE})
target_include_directories(mpi_stubs PUBLIC $<BUILD_INTERFACE:${LAMMPS_SOURCE_DIR}/STUBS> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/lammps/mpi>)
install(FILES ${LAMMPS_SOURCE_DIR}/STUBS/mpi.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/lammps/mpi)
target_include_directories(mpi_stubs PUBLIC $<BUILD_INTERFACE:${LAMMPS_SOURCE_DIR}/STUBS>)
if(BUILD_SHARED_LIBS)
target_link_libraries(lammps PRIVATE mpi_stubs)
target_include_directories(lammps INTERFACE $<BUILD_INTERFACE:${LAMMPS_SOURCE_DIR}/STUBS> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/lammps/mpi>)
target_include_directories(lammps INTERFACE $<BUILD_INTERFACE:${LAMMPS_SOURCE_DIR}/STUBS>)
target_compile_definitions(lammps INTERFACE $<INSTALL_INTERFACE:LAMMPS_LIB_NO_MPI>)
else()
target_link_libraries(lammps PUBLIC mpi_stubs)
endif()
@ -218,10 +219,9 @@ if(BUILD_OMP)
message(FATAL_ERROR "Cannot find the 'omp.h' header file required for full OpenMP support")
endif()
if (((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 8.99.9)) OR
((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 9.99.9)) OR
((CMAKE_CXX_COMPILER_ID STREQUAL "Intel") AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 18.99.9))
)
if (((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.0)) OR
((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10.0)) OR
((CMAKE_CXX_COMPILER_ID STREQUAL "Intel") AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 19.0)))
# GCC 9.x and later plus Clang 10.x and later implement strict OpenMP 4.0 semantics for consts.
# Intel 18.0 was tested to support both, so we switch to OpenMP 4+ from 19.x onward to be safe.
target_compile_definitions(lammps PRIVATE -DLAMMPS_OMP_COMPAT=4)
@ -249,6 +249,26 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")
endif()
endif()
#######################################
# add custom target for IWYU analysis
#######################################
set(ENABLE_IWYU OFF CACHE BOOL "Add 'iwyu' build target to call the include-what-you-use tool")
mark_as_advanced(ENABLE_IWYU)
if(ENABLE_IWYU)
find_program(IWYU_EXE NAMES include-what-you-use iwyu)
find_program(IWYU_TOOL NAMES iwyu_tool iwyu-tool iwyu_tool.py)
if (IWYU_EXE AND IWYU_TOOL)
add_custom_target(
iwyu
${IWYU_TOOL} -o clang -p ${CMAKE_CURRENT_BINARY_DIR} -- -Xiwyu --mapping_file=${CMAKE_CURRENT_SOURCE_DIR}/iwyu/iwyu-extra-map.imp
COMMENT "Running IWYU")
add_dependencies(iwyu lammps)
else()
message(FATAL_ERROR "To use IWYU you need the include-what-you-use/iwyu executable"
"and the iwyu-tool/iwyu_tool script installed in your PATH")
endif()
endif()
set(ENABLE_SANITIZER "none" CACHE STRING "Select a code sanitizer option (none (default), address, leak, thread, undefined)")
mark_as_advanced(ENABLE_SANITIZER)
set(ENABLE_SANITIZER_VALUES none address leak thread undefined)
@ -293,14 +313,13 @@ if(PKG_MSCG OR PKG_USER-ATC OR PKG_USER-AWPMD OR PKG_USER-QUIP OR PKG_LATTE)
endif()
endif()
find_package(JPEG QUIET)
option(WITH_JPEG "Enable JPEG support" ${JPEG_FOUND})
if(WITH_JPEG)
find_package(JPEG REQUIRED)
target_compile_definitions(lammps PRIVATE -DLAMMPS_JPEG)
if(CMAKE_VERSION VERSION_LESS 3.12)
target_include_directories(lammps PRIVATE ${JPEG_INCLUDE_DIR})
target_include_directories(lammps PRIVATE ${JPEG_INCLUDE_DIRS})
target_link_libraries(lammps PRIVATE ${JPEG_LIBRARIES})
else()
target_link_libraries(lammps PRIVATE JPEG::JPEG)
@ -325,20 +344,22 @@ find_program(GZIP_EXECUTABLE gzip)
find_package_handle_standard_args(GZIP REQUIRED_VARS GZIP_EXECUTABLE)
option(WITH_GZIP "Enable GZIP support" ${GZIP_FOUND})
if(WITH_GZIP)
if(NOT GZIP_FOUND)
if(GZIP_FOUND OR ((CMAKE_SYSTEM_NAME STREQUAL Windows) AND CMAKE_CROSSCOMPILING))
target_compile_definitions(lammps PRIVATE -DLAMMPS_GZIP)
else()
message(FATAL_ERROR "gzip executable not found")
endif()
target_compile_definitions(lammps PRIVATE -DLAMMPS_GZIP)
endif()
find_program(FFMPEG_EXECUTABLE ffmpeg)
find_package_handle_standard_args(FFMPEG REQUIRED_VARS FFMPEG_EXECUTABLE)
option(WITH_FFMPEG "Enable FFMPEG support" ${FFMPEG_FOUND})
if(WITH_FFMPEG)
if(NOT FFMPEG_FOUND)
if(FFMPEG_FOUND OR ((CMAKE_SYSTEM_NAME STREQUAL Windows) AND CMAKE_CROSSCOMPILING))
target_compile_definitions(lammps PRIVATE -DLAMMPS_FFMPEG)
else()
message(FATAL_ERROR "ffmpeg executable not found")
endif()
target_compile_definitions(lammps PRIVATE -DLAMMPS_FFMPEG)
endif()
if(BUILD_SHARED_LIBS)
@ -358,6 +379,13 @@ foreach(PKG_WITH_INCL KSPACE PYTHON VORONOI USER-COLVARS USER-MOLFILE USER-NETCD
endif()
endforeach()
# optionally enable building script wrappers using swig
option(WITH_SWIG "Build scripting language wrappers with SWIG" OFF)
if(WITH_SWIG)
get_filename_component(LAMMPS_SWIG_DIR ${LAMMPS_SOURCE_DIR}/../tools/swig ABSOLUTE)
add_subdirectory(${LAMMPS_SWIG_DIR} swig)
endif()
set(CMAKE_TUNE_FLAGS "${CMAKE_TUNE_DEFAULT}" CACHE STRING "Compiler and machine specific optimization flags (compilation only)")
separate_arguments(CMAKE_TUNE_FLAGS)
foreach(_FLAG ${CMAKE_TUNE_FLAGS})
@ -556,14 +584,14 @@ if (${_index} GREATER -1)
endif()
set(LAMMPS_CXX_HEADERS angle.h atom.h bond.h citeme.h comm.h compute.h dihedral.h domain.h error.h fix.h force.h group.h improper.h
input.h info.h kspace.h lammps.h lattice.h library.h lmppython.h lmptype.h memory.h modify.h neighbor.h neigh_list.h output.h
pair.h pointers.h region.h timer.h universe.h update.h variable.h)
pair.h pointers.h region.h timer.h universe.h update.h utils.h variable.h)
if(LAMMPS_EXCEPTIONS)
list(APPEND LAMMPS_CXX_HEADERS exceptions.h)
endif()
set_target_properties(lammps PROPERTIES OUTPUT_NAME lammps${LAMMPS_MACHINE})
set_target_properties(lammps PROPERTIES SOVERSION ${SOVERSION})
target_include_directories(lammps PUBLIC $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
target_include_directories(lammps PUBLIC $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/lammps>)
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps)
foreach(_HEADER ${LAMMPS_CXX_HEADERS})
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps/${_HEADER} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${LAMMPS_SOURCE_DIR}/${_HEADER} ${CMAKE_CURRENT_BINARY_DIR}/includes/lammps/${_HEADER} DEPENDS ${LAMMPS_SOURCE_DIR}/${_HEADER})
@ -592,36 +620,7 @@ if(BUILD_SHARED_LIBS)
endif()
install(FILES ${LAMMPS_DOC_DIR}/lammps.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 RENAME ${LAMMPS_BINARY}.1)
if(BUILD_TOOLS)
add_executable(binary2txt ${LAMMPS_TOOLS_DIR}/binary2txt.cpp)
target_compile_definitions(binary2txt PRIVATE -DLAMMPS_${LAMMPS_SIZES})
install(TARGETS binary2txt DESTINATION ${CMAKE_INSTALL_BINDIR})
include(CheckGeneratorSupport)
if(CMAKE_GENERATOR_SUPPORT_FORTRAN)
include(CheckLanguage)
check_language(Fortran)
if(CMAKE_Fortran_COMPILER)
enable_language(Fortran)
add_executable(chain.x ${LAMMPS_TOOLS_DIR}/chain.f)
target_link_libraries(chain.x PRIVATE ${CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES})
install(TARGETS chain.x DESTINATION ${CMAKE_INSTALL_BINDIR})
else()
message(WARNING "No suitable Fortran compiler found, skipping building 'chain.x'")
endif()
else()
message(WARNING "CMake build doesn't support fortran, skipping building 'chain.x'")
endif()
enable_language(C)
get_filename_component(MSI2LMP_SOURCE_DIR ${LAMMPS_TOOLS_DIR}/msi2lmp/src ABSOLUTE)
file(GLOB MSI2LMP_SOURCES ${MSI2LMP_SOURCE_DIR}/[^.]*.c)
add_executable(msi2lmp ${MSI2LMP_SOURCES})
target_link_libraries(msi2lmp PRIVATE ${MATH_LIBRARIES})
install(TARGETS msi2lmp DESTINATION ${CMAKE_INSTALL_BINDIR})
install(FILES ${LAMMPS_DOC_DIR}/msi2lmp.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1)
endif()
include(Tools)
include(Documentation)
###############################################################################
@ -704,23 +703,31 @@ include(Testing)
include(CodeCoverage)
include(CodingStandard)
###############################################################################
# Print package summary
###############################################################################
foreach(PKG ${STANDARD_PACKAGES} ${SUFFIX_PACKAGES})
if(PKG_${PKG})
message(STATUS "Building package: ${PKG}")
endif()
endforeach()
get_target_property(DEFINES lammps COMPILE_DEFINITIONS)
include(FeatureSummary)
feature_summary(DESCRIPTION "The following tools and libraries have been found and configured:" WHAT PACKAGES_FOUND)
message(STATUS "<<< Build configuration >>>
Operating System: ${CMAKE_SYSTEM_NAME}
Build type: ${CMAKE_BUILD_TYPE}
Install path: ${CMAKE_INSTALL_PREFIX}
Generator: ${CMAKE_GENERATOR} using ${CMAKE_MAKE_PROGRAM}
-- <<< Compilers and Flags: >>>
Generator: ${CMAKE_GENERATOR} using ${CMAKE_MAKE_PROGRAM}")
###############################################################################
# Print package summary
###############################################################################
set(ENABLED_PACKAGES)
foreach(PKG ${STANDARD_PACKAGES} ${SUFFIX_PACKAGES})
if(PKG_${PKG})
list(APPEND ENABLED_PACKAGES ${PKG})
endif()
endforeach()
if(ENABLED_PACKAGES)
list(SORT ENABLED_PACKAGES)
else()
set(ENABLED_PACKAGES "<None>")
endif()
message(STATUS "Enabled packages: ${ENABLED_PACKAGES}")
message(STATUS "<<< Compilers and Flags: >>>
-- C++ Compiler: ${CMAKE_CXX_COMPILER}
Type: ${CMAKE_CXX_COMPILER_ID}
Version: ${CMAKE_CXX_COMPILER_VERSION}
@ -816,3 +823,15 @@ endif()
if(BUILD_DOC)
message(STATUS "<<< Building HTML Manual >>>")
endif()
if(BUILD_TOOLS)
message(STATUS "<<< Building Tools >>>")
endif()
if(BUILD_LAMMPS_SHELL)
message(STATUS "<<< Building LAMMPS Shell >>>")
endif()
if(ENABLE_TESTING)
message(STATUS "<<< Building Unit Tests >>>")
if(ENABLE_COVERAGE)
message(STATUS "Collecting code coverage data")
endif()
endif()

View File

@ -3,11 +3,16 @@
#
# Requires latest gcovr (for GCC 8.1 support):#
# pip install git+https://github.com/gcovr/gcovr.git
#
# For Python coverage the coverage package needs to be installed
###############################################################################
if(ENABLE_COVERAGE)
find_program(GCOVR_BINARY gcovr)
find_package_handle_standard_args(GCOVR DEFAULT_MSG GCOVR_BINARY)
find_program(COVERAGE_BINARY coverage)
find_package_handle_standard_args(COVERAGE DEFAULT_MSG COVERAGE_BINARY)
if(GCOVR_FOUND)
get_filename_component(ABSOLUTE_LAMMPS_SOURCE_DIR ${LAMMPS_SOURCE_DIR} ABSOLUTE)
@ -46,4 +51,30 @@ if(ENABLE_COVERAGE)
)
add_dependencies(reset_coverage clean_coverage_html)
endif()
if(COVERAGE_FOUND)
set(PYTHON_COVERAGE_HTML_DIR ${CMAKE_BINARY_DIR}/python_coverage_html)
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/unittest/python/.coverage
COMMAND ${COVERAGE_BINARY} combine
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/unittest/python
COMMENT "Combine Python coverage files..."
)
add_custom_target(
gen_python_coverage_html
COMMAND ${COVERAGE_BINARY} html -d ${PYTHON_COVERAGE_HTML_DIR}
DEPENDS ${CMAKE_BINARY_DIR}/unittest/python/.coverage
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/unittest/python
COMMENT "Generating HTML Python coverage report..."
)
add_custom_target(
gen_python_coverage_xml
COMMAND ${COVERAGE_BINARY} xml -o ${CMAKE_BINARY_DIR}/python_coverage.xml
DEPENDS ${CMAKE_BINARY_DIR}/unittest/python/.coverage
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/unittest/python
COMMENT "Generating XML Python coverage report..."
)
endif()
endif()

View File

@ -15,75 +15,93 @@ if(BUILD_DOC)
endif()
set(VIRTUALENV ${Python3_EXECUTABLE} -m virtualenv -p ${Python3_EXECUTABLE})
endif()
find_package(Doxygen 1.8.10 REQUIRED)
file(GLOB DOC_SOURCES ${LAMMPS_DOC_DIR}/src/[^.]*.rst)
add_custom_command(
OUTPUT docenv
COMMAND ${VIRTUALENV} docenv
)
set(DOCENV_BINARY_DIR ${CMAKE_BINARY_DIR}/docenv/bin)
set(DOCENV_REQUIREMENTS_FILE ${LAMMPS_DOC_DIR}/utils/requirements.txt)
set(SPHINX_CONFIG_DIR ${LAMMPS_DOC_DIR}/utils/sphinx-config)
set(SPHINX_CONFIG_FILE_TEMPLATE ${SPHINX_CONFIG_DIR}/conf.py.in)
set(SPHINX_STATIC_DIR ${SPHINX_CONFIG_DIR}/_static)
# configuration and static files are copied to binary dir to avoid collisions with parallel builds
set(DOC_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/doc)
set(DOC_BUILD_CONFIG_FILE ${DOC_BUILD_DIR}/conf.py)
set(DOC_BUILD_STATIC_DIR ${DOC_BUILD_DIR}/_static)
set(DOXYGEN_BUILD_DIR ${DOC_BUILD_DIR}/doxygen)
set(DOXYGEN_XML_DIR ${DOXYGEN_BUILD_DIR}/xml)
# copy entire configuration folder to doc build directory
# files in _static are automatically copied during sphinx-build, so no need to copy them individually
file(COPY ${SPHINX_CONFIG_DIR}/ DESTINATION ${DOC_BUILD_DIR})
# configure paths in conf.py, since relative paths change when file is copied
configure_file(${SPHINX_CONFIG_FILE_TEMPLATE} ${DOC_BUILD_CONFIG_FILE})
add_custom_command(
OUTPUT requirements.txt
DEPENDS docenv
COMMAND ${CMAKE_COMMAND} -E copy ${LAMMPS_DOC_DIR}/utils/requirements.txt requirements.txt
OUTPUT ${DOC_BUILD_DIR}/requirements.txt
DEPENDS docenv ${DOCENV_REQUIREMENTS_FILE}
COMMAND ${CMAKE_COMMAND} -E copy ${DOCENV_REQUIREMENTS_FILE} ${DOC_BUILD_DIR}/requirements.txt
COMMAND ${DOCENV_BINARY_DIR}/pip install --upgrade pip
COMMAND ${DOCENV_BINARY_DIR}/pip install --upgrade ${LAMMPS_DOC_DIR}/utils/converters
COMMAND ${DOCENV_BINARY_DIR}/pip install --use-feature=2020-resolver -r requirements.txt --upgrade
COMMAND ${DOCENV_BINARY_DIR}/pip install --use-feature=2020-resolver -r ${DOC_BUILD_DIR}/requirements.txt --upgrade
)
# download mathjax distribution and unpack to folder "mathjax"
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/mathjax/es5)
if(NOT EXISTS ${DOC_BUILD_STATIC_DIR}/mathjax/es5)
file(DOWNLOAD "https://github.com/mathjax/MathJax/archive/3.0.5.tar.gz"
"${CMAKE_CURRENT_BINARY_DIR}/mathjax.tar.gz"
EXPECTED_MD5 5d9d3799cce77a1a95eee6be04eb68e7)
execute_process(COMMAND ${CMAKE_COMMAND} -E tar xzf mathjax.tar.gz WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
file(GLOB MATHJAX_VERSION_DIR ${CMAKE_CURRENT_BINARY_DIR}/MathJax-*)
execute_process(COMMAND ${CMAKE_COMMAND} -E rename ${MATHJAX_VERSION_DIR} ${CMAKE_CURRENT_BINARY_DIR}/mathjax)
execute_process(COMMAND ${CMAKE_COMMAND} -E rename ${MATHJAX_VERSION_DIR} ${DOC_BUILD_STATIC_DIR}/mathjax)
endif()
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html/_static/mathjax)
file(COPY ${CMAKE_CURRENT_BINARY_DIR}/mathjax/es5 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/html/_static/mathjax/)
# for increased browser compatibility
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/html/_static/polyfill.js)
if(NOT EXISTS ${DOC_BUILD_STATIC_DIR}/polyfill.js)
file(DOWNLOAD "https://polyfill.io/v3/polyfill.min.js?features=es6"
"${CMAKE_CURRENT_BINARY_DIR}/html/_static/polyfill.js")
"${DOC_BUILD_STATIC_DIR}/polyfill.js")
endif()
# note, this may run in parallel with other tasks, so we must not use multiple processes here
# set up doxygen and add targets to run it
file(MAKE_DIRECTORY ${DOXYGEN_BUILD_DIR})
file(COPY ${LAMMPS_DOC_DIR}/doxygen/lammps-logo.png DESTINATION ${DOXYGEN_BUILD_DIR}/lammps-logo.png)
configure_file(${LAMMPS_DOC_DIR}/doxygen/Doxyfile.in ${DOXYGEN_BUILD_DIR}/Doxyfile)
get_target_property(LAMMPS_SOURCES lammps SOURCES)
add_custom_command(
OUTPUT html
DEPENDS ${DOC_SOURCES} docenv requirements.txt
COMMAND ${DOCENV_BINARY_DIR}/sphinx-build -b html -c ${LAMMPS_DOC_DIR}/utils/sphinx-config -d ${CMAKE_BINARY_DIR}/doctrees ${LAMMPS_DOC_DIR}/src html
COMMAND ${CMAKE_COMMAND} -E create_symlink Manual.html ${CMAKE_CURRENT_BINARY_DIR}/html/index.html
OUTPUT ${DOXYGEN_XML_DIR}/index.xml
DEPENDS ${DOC_SOURCES} ${LAMMPS_SOURCES}
COMMAND Doxygen::doxygen ${DOXYGEN_BUILD_DIR}/Doxyfile WORKING_DIRECTORY ${DOXYGEN_BUILD_DIR}
COMMAND ${CMAKE_COMMAND} -E touch ${DOXYGEN_XML_DIR}/run.stamp
)
# copy selected image files to html output tree
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/html/JPG)
set(HTML_EXTRA_IMAGES balance_nonuniform.jpg balance_rcb.jpg
balance_uniform.jpg bow_tutorial_01.png bow_tutorial_02.png
bow_tutorial_03.png bow_tutorial_04.png bow_tutorial_05.png
dump1.jpg dump2.jpg examples_mdpd.gif gran_funnel.png gran_mixer.png
hop1.jpg hop2.jpg saed_ewald_intersect.jpg saed_mesh.jpg
screenshot_atomeye.jpg screenshot_gl.jpg screenshot_pymol.jpg
screenshot_vmd.jpg sinusoid.jpg xrd_mesh.jpg)
set(HTML_IMAGE_TARGETS "")
foreach(_IMG ${HTML_EXTRA_IMAGES})
string(PREPEND _IMG JPG/)
list(APPEND HTML_IMAGE_TARGETS "${CMAKE_CURRENT_BINARY_DIR}/html/${_IMG}")
if(EXISTS ${DOXYGEN_XML_DIR}/run.stamp)
set(SPHINX_EXTRA_OPTS "-E")
else()
set(SPHINX_EXTRA_OPTS "")
endif()
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/html/${_IMG}
DEPENDS ${LAMMPS_DOC_DIR}/src/${_IMG} ${CMAKE_CURRENT_BINARY_DIR}/html/JPG
COMMAND ${CMAKE_COMMAND} -E copy ${LAMMPS_DOC_DIR}/src/${_IMG} ${CMAKE_BINARY_DIR}/html/${_IMG}
OUTPUT html
DEPENDS ${DOC_SOURCES} docenv ${DOC_BUILD_DIR}/requirements.txt ${DOXYGEN_XML_DIR}/index.xml ${BUILD_DOC_CONFIG_FILE}
COMMAND ${DOCENV_BINARY_DIR}/sphinx-build ${SPHINX_EXTRA_OPTS} -b html -c ${DOC_BUILD_DIR} -d ${DOC_BUILD_DIR}/doctrees ${LAMMPS_DOC_DIR}/src ${DOC_BUILD_DIR}/html
COMMAND ${CMAKE_COMMAND} -E create_symlink Manual.html ${DOC_BUILD_DIR}/html/index.html
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LAMMPS_DOC_DIR}/src/PDF ${DOC_BUILD_DIR}/html/PDF
COMMAND ${CMAKE_COMMAND} -E remove -f ${DOXYGEN_XML_DIR}/run.stamp
)
endforeach()
add_custom_target(
doc ALL
DEPENDS html ${CMAKE_CURRENT_BINARY_DIR}/html/_static/mathjax/es5 ${HTML_IMAGE_TARGETS}
DEPENDS html ${DOC_BUILD_STATIC_DIR}/mathjax/es5
SOURCES ${LAMMPS_DOC_DIR}/utils/requirements.txt ${DOC_SOURCES}
)
install(DIRECTORY ${CMAKE_BINARY_DIR}/html DESTINATION ${CMAKE_INSTALL_DOCDIR})
install(DIRECTORY ${DOC_BUILD_DIR}/html DESTINATION ${CMAKE_INSTALL_DOCDIR})
endif()

View File

@ -1,3 +1,3 @@
# utility script to call GenerateBinaryHeader function
include(${SOURCE_DIR}/Modules/LAMMPSUtils.cmake)
GenerateBinaryHeader(${VARNAME} ${HEADER_FILE} ${SOURCE_FILES})
GenerateBinaryHeader(${VARNAME} ${HEADER_FILE} ${SOURCE_FILE})

View File

@ -71,19 +71,15 @@ macro(pkg_depends PKG1 PKG2)
endmacro()
# CMake-only replacement for bin2c and xxd
function(GenerateBinaryHeader varname outfile files)
function(GenerateBinaryHeader varname outfile infile)
message("Creating ${outfile}...")
file(WRITE ${outfile} "// CMake generated file\n")
math(EXPR ARG_END "${ARGC}-1")
foreach(IDX RANGE 2 ${ARG_END})
list(GET ARGV ${IDX} filename)
file(READ ${filename} content HEX)
file(READ ${infile} content HEX)
string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1," content "${content}")
string(REGEX REPLACE ",$" "" content "${content}")
file(APPEND ${outfile} "const unsigned char ${varname}[] = { ${content} };\n")
file(APPEND ${outfile} "const unsigned int ${varname}_size = sizeof(${varname});\n")
endforeach()
endfunction(GenerateBinaryHeader)
# fetch missing potential files

View File

@ -1,2 +1,10 @@
find_package(ZLIB REQUIRED)
target_link_libraries(lammps PRIVATE ZLIB::ZLIB)
find_package(PkgConfig REQUIRED)
pkg_check_modules(Zstd IMPORTED_TARGET libzstd>=1.4)
if(Zstd_FOUND)
target_compile_definitions(lammps PRIVATE -DLAMMPS_ZSTD)
target_link_libraries(lammps PRIVATE PkgConfig::Zstd)
endif()

View File

@ -75,7 +75,7 @@ if(GPU_API STREQUAL "CUDA")
endif()
# Kepler (GPU Arch 3.5) is supported by CUDA 5 to CUDA 11
if((CUDA_VERSION VERSION_GREATER_EQUAL "5.0") AND (CUDA_VERSION VERSION_LESS "12.0"))
string(APPEND GPU_CUDA_GENCODE " -gencode arch=compute_30,code=[sm_30,compute_30] -gencode arch=compute_35,code=[sm_35,compute_35]")
string(APPEND GPU_CUDA_GENCODE " -gencode arch=compute_35,code=[sm_35,compute_35]")
endif()
# Maxwell (GPU Arch 5.x) is supported by CUDA 6 and later
if(CUDA_VERSION VERSION_GREATER_EQUAL "6.0")
@ -309,7 +309,7 @@ elseif(GPU_API STREQUAL "HIP")
endif()
add_custom_command(OUTPUT ${CUBIN_H_FILE}
COMMAND ${CMAKE_COMMAND} -D SOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR} -D VARNAME=${CU_NAME} -D HEADER_FILE=${CUBIN_H_FILE} -D SOURCE_FILES=${CUBIN_FILE} -P ${CMAKE_CURRENT_SOURCE_DIR}/Modules/GenerateBinaryHeader.cmake
COMMAND ${CMAKE_COMMAND} -D SOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR} -D VARNAME=${CU_NAME} -D HEADER_FILE=${CUBIN_H_FILE} -D SOURCE_FILE=${CUBIN_FILE} -P ${CMAKE_CURRENT_SOURCE_DIR}/Modules/GenerateBinaryHeader.cmake
DEPENDS ${CUBIN_FILE}
COMMENT "Generating ${CU_NAME}_cubin.h")

View File

@ -35,8 +35,8 @@ if(DOWNLOAD_KOKKOS)
list(APPEND KOKKOS_LIB_BUILD_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}")
include(ExternalProject)
ExternalProject_Add(kokkos_build
URL https://github.com/kokkos/kokkos/archive/3.1.01.tar.gz
URL_MD5 3ccb2100f7fc316891e7dad3bc33fa37
URL https://github.com/kokkos/kokkos/archive/3.2.00.tar.gz
URL_MD5 81569170fe232e5e64ab074f7cca5e50
CMAKE_ARGS ${KOKKOS_LIB_BUILD_ARGS}
BUILD_BYPRODUCTS <INSTALL_DIR>/lib/libkokkoscore.a
)
@ -50,7 +50,7 @@ if(DOWNLOAD_KOKKOS)
target_link_libraries(lammps PRIVATE LAMMPS::KOKKOS)
add_dependencies(LAMMPS::KOKKOS kokkos_build)
elseif(EXTERNAL_KOKKOS)
find_package(Kokkos 3.1.01 REQUIRED CONFIG)
find_package(Kokkos 3.2.00 REQUIRED CONFIG)
target_link_libraries(lammps PRIVATE Kokkos::kokkos)
else()
set(LAMMPS_LIB_KOKKOS_SRC_DIR ${LAMMPS_LIB_SOURCE_DIR}/kokkos)

View File

@ -19,14 +19,14 @@ if(FFT STREQUAL "FFTW3")
find_package(${FFTW} REQUIRED)
target_compile_definitions(lammps PRIVATE -DFFT_FFTW3)
target_link_libraries(lammps PRIVATE ${FFTW}::${FFTW})
if(FFTW3_OMP_LIBRARY OR FFTW3F_OMP_LIBRARY)
if(FFTW3_OMP_LIBRARIES OR FFTW3F_OMP_LIBRARIES)
option(FFT_FFTW_THREADS "Use threaded FFTW library" ON)
else()
option(FFT_FFTW_THREADS "Use threaded FFT library" OFF)
endif()
if(FFT_FFTW_THREADS)
if(FFTW3_OMP_LIBRARY OR FFTW3F_OMP_LIBRARY)
if(FFTW3_OMP_LIBRARIES OR FFTW3F_OMP_LIBRARIES)
target_compile_definitions(lammps PRIVATE -DFFT_FFTW_THREADS)
target_link_libraries(lammps PRIVATE ${FFTW}::${FFTW}_OMP)
else()

View File

@ -38,7 +38,7 @@ if(DOWNLOAD_MSCG)
else()
find_package(MSCG)
if(NOT MSCG_FOUND)
message(FATAL_ERROR "MSCG not found, help CMake to find it by setting MSCG_LIBRARY and MSCG_INCLUDE_DIRS, or set DOWNLOAD_MSCG=ON to download it")
message(FATAL_ERROR "MSCG not found, help CMake to find it by setting MSCG_LIBRARY and MSCG_INCLUDE_DIR, or set DOWNLOAD_MSCG=ON to download it")
endif()
target_link_libraries(lammps PRIVATE MSCG::MSCG)
endif()

View File

@ -1,7 +1,7 @@
if(CMAKE_VERSION VERSION_LESS 3.12)
find_package(PythonLibs REQUIRED) # Deprecated since version 3.12
target_include_directories(lammps PRIVATE ${PYTHON_INCLUDE_DIR})
target_link_libraries(lammps PRIVATE ${PYTHON_LIBRARY})
target_include_directories(lammps PRIVATE ${PYTHON_INCLUDE_DIRS})
target_link_libraries(lammps PRIVATE ${PYTHON_LIBRARIES})
else()
find_package(Python REQUIRED COMPONENTS Development)
target_link_libraries(lammps PRIVATE Python::Python)

View File

@ -2,6 +2,8 @@ set(COLVARS_SOURCE_DIR ${LAMMPS_LIB_SOURCE_DIR}/colvars)
file(GLOB COLVARS_SOURCES ${COLVARS_SOURCE_DIR}/[^.]*.cpp)
option(COLVARS_DEBUG "Debugging messages for Colvars (quite verbose)" OFF)
# Build Lepton by default
option(COLVARS_LEPTON "Build and link the Lepton library" ON)
@ -16,11 +18,18 @@ if(COLVARS_LEPTON)
endif()
add_library(colvars STATIC ${COLVARS_SOURCES})
target_compile_definitions(colvars PRIVATE -DLAMMPS_${LAMMPS_SIZES})
target_compile_definitions(colvars PRIVATE -DCOLVARS_LAMMPS)
set_target_properties(colvars PROPERTIES OUTPUT_NAME lammps_colvars${LAMMPS_MACHINE})
target_include_directories(colvars PUBLIC ${LAMMPS_LIB_SOURCE_DIR}/colvars)
# The line below is needed to locate math_eigen_impl.h
target_include_directories(colvars PRIVATE ${LAMMPS_SOURCE_DIR})
target_link_libraries(lammps PRIVATE colvars)
if(COLVARS_DEBUG)
# Need to export the macro publicly to also affect the proxy
target_compile_definitions(colvars PUBLIC -DCOLVARS_DEBUG)
endif()
if(COLVARS_LEPTON)
target_link_libraries(lammps PRIVATE lepton)
target_compile_definitions(colvars PRIVATE -DLEPTON)

View File

@ -1,4 +1,5 @@
set(MOLFILE_INCLUDE_DIRS "${LAMMPS_LIB_SOURCE_DIR}/molfile" CACHE STRING "Path to VMD molfile plugin headers")
set(MOLFILE_INCLUDE_DIR "${LAMMPS_LIB_SOURCE_DIR}/molfile" CACHE STRING "Path to VMD molfile plugin headers")
set(MOLFILE_INCLUDE_DIRS "${MOLFILE_INCLUDE_DIR}")
add_library(molfile INTERFACE)
target_include_directories(molfile INTERFACE ${MOLFILE_INCLUDE_DIRS})
# no need to link with -ldl on windows

View File

@ -55,8 +55,8 @@ if(DOWNLOAD_PLUMED)
endif()
include(ExternalProject)
ExternalProject_Add(plumed_build
URL https://github.com/plumed/plumed2/releases/download/v2.6.0/plumed-src-2.6.0.tgz
URL_MD5 204d2edae58d9b10ba3ad460cad64191
URL https://github.com/plumed/plumed2/releases/download/v2.6.1/plumed-src-2.6.1.tgz
URL_MD5 89a9a450fc6025299fe16af235957163
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND <SOURCE_DIR>/configure --prefix=<INSTALL_DIR>
${CONFIGURE_REQUEST_PIC}

View File

@ -59,7 +59,6 @@ function(CreateStyleHeader path filename)
set(temp "${temp}#include \"${FNAME}\"\n")
endforeach()
endif()
message(STATUS "Generating ${filename}...")
file(WRITE "${path}/${filename}.tmp" "${temp}" )
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different "${path}/${filename}.tmp" "${path}/${filename}")
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS "${path}/${filename}")
@ -142,6 +141,7 @@ function(RegisterStylesExt search_path extension sources)
endfunction(RegisterStylesExt)
function(GenerateStyleHeaders output_path)
message(STATUS "Generating style headers...")
GenerateStyleHeader(${output_path} ANGLE angle ) # force
GenerateStyleHeader(${output_path} ATOM_VEC atom ) # atom atom_vec_hybrid
GenerateStyleHeader(${output_path} BODY body ) # atom_vec_body
@ -232,7 +232,6 @@ function(CreatePackagesHeader path filename)
set(temp "${temp}#include \"${DNAME}/${FNAME}\"\n")
endforeach()
endif()
message(STATUS "Generating ${filename}...")
file(WRITE "${path}/${filename}.tmp" "${temp}" )
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different "${path}/${filename}.tmp" "${path}/${filename}")
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS "${path}/${filename}")
@ -244,6 +243,7 @@ function(GeneratePackagesHeader path property style)
endfunction(GeneratePackagesHeader)
function(GeneratePackagesHeaders output_path)
message(STATUS "Generating package headers...")
GeneratePackagesHeader(${output_path} PKGANGLE angle ) # force
GeneratePackagesHeader(${output_path} PKGATOM_VEC atom ) # atom atom_vec_hybrid
GeneratePackagesHeader(${output_path} PKGBODY body ) # atom_vec_body

46
cmake/Modules/Tools.cmake Normal file
View File

@ -0,0 +1,46 @@
if(BUILD_TOOLS)
add_executable(binary2txt ${LAMMPS_TOOLS_DIR}/binary2txt.cpp)
target_compile_definitions(binary2txt PRIVATE -DLAMMPS_${LAMMPS_SIZES})
install(TARGETS binary2txt DESTINATION ${CMAKE_INSTALL_BINDIR})
include(CheckGeneratorSupport)
if(CMAKE_GENERATOR_SUPPORT_FORTRAN)
include(CheckLanguage)
check_language(Fortran)
if(CMAKE_Fortran_COMPILER)
enable_language(Fortran)
add_executable(chain.x ${LAMMPS_TOOLS_DIR}/chain.f)
target_link_libraries(chain.x PRIVATE ${CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES})
install(TARGETS chain.x DESTINATION ${CMAKE_INSTALL_BINDIR})
else()
message(WARNING "No suitable Fortran compiler found, skipping build of 'chain.x'")
endif()
else()
message(WARNING "CMake build doesn't support fortran, skipping build of 'chain.x'")
endif()
enable_language(C)
get_filename_component(MSI2LMP_SOURCE_DIR ${LAMMPS_TOOLS_DIR}/msi2lmp/src ABSOLUTE)
file(GLOB MSI2LMP_SOURCES ${MSI2LMP_SOURCE_DIR}/[^.]*.c)
add_executable(msi2lmp ${MSI2LMP_SOURCES})
target_link_libraries(msi2lmp PRIVATE ${MATH_LIBRARIES})
install(TARGETS msi2lmp DESTINATION ${CMAKE_INSTALL_BINDIR})
install(FILES ${LAMMPS_DOC_DIR}/msi2lmp.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1)
endif()
if(BUILD_LAMMPS_SHELL)
find_package(PkgConfig REQUIRED)
pkg_check_modules(READLINE IMPORTED_TARGET REQUIRED readline)
if(NOT LAMMPS_EXCEPTIONS)
message(WARNING "The LAMMPS shell needs LAMMPS_EXCEPTIONS enabled for full functionality")
endif()
add_executable(lammps-shell ${LAMMPS_TOOLS_DIR}/lammps-shell/lammps-shell.cpp)
# workaround for broken readline pkg-config file on FreeBSD
if(CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
target_include_directories(lammps-shell PRIVATE /usr/local/include)
endif()
target_link_libraries(lammps-shell PRIVATE lammps PkgConfig::READLINE)
install(TARGETS lammps-shell EXPORT LAMMPS_Targets DESTINATION ${CMAKE_INSTALL_BINDIR})
endif()

Some files were not shown because too many files have changed in this diff Show More