Merge branch 'lammps:master' into master
This commit is contained in:
5
.gitattributes
vendored
Normal file
5
.gitattributes
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
.gitattributes export-ignore
|
||||
.gitignore export-ignore
|
||||
.github export-ignore
|
||||
.lgtm.yml export-ignore
|
||||
SECURITY.md export-ignore
|
||||
77
.github/CODEOWNERS
vendored
77
.github/CODEOWNERS
vendored
@ -10,44 +10,54 @@ lib/molfile/* @akohlmey
|
||||
lib/qmmm/* @akohlmey
|
||||
lib/vtk/* @rbberger
|
||||
lib/kim/* @ellio167
|
||||
lib/mesont/* @iafoss
|
||||
|
||||
# whole packages
|
||||
src/COMPRESS/* @akohlmey
|
||||
src/COMPRESS/* @rbberger
|
||||
src/GPU/* @ndtrung81
|
||||
src/KOKKOS/* @stanmoore1
|
||||
src/KIM/* @ellio167
|
||||
src/LATTE/* @cnegre
|
||||
src/MESSAGE/* @sjplimp
|
||||
src/MLIAP/* @athomps
|
||||
src/SNAP/* @athomps
|
||||
src/SPIN/* @julient31
|
||||
src/USER-CGDNA/* @ohenrich
|
||||
src/USER-CGSDK/* @akohlmey
|
||||
src/USER-COLVARS/* @giacomofiorin
|
||||
src/USER-INTEL/* @wmbrownintel
|
||||
src/USER-MANIFOLD/* @Pakketeretet2
|
||||
src/USER-MEAMC/* @martok
|
||||
src/USER-MOFFF/* @hheenen
|
||||
src/USER-MOLFILE/* @akohlmey
|
||||
src/USER-NETCDF/* @pastewka
|
||||
src/USER-PLUMED/* @gtribello
|
||||
src/USER-PHONON/* @lingtikong
|
||||
src/USER-PTM/* @pmla
|
||||
src/USER-OMP/* @akohlmey
|
||||
src/USER-QMMM/* @akohlmey
|
||||
src/USER-REAXC/* @hasanmetin
|
||||
src/USER-SCAFACOS/* @rhalver
|
||||
src/USER-TALLY/* @akohlmey
|
||||
src/USER-UEF/* @danicholson
|
||||
src/USER-VTK/* @rbberger
|
||||
|
||||
src/BROWNIAN/* @samueljmcameron
|
||||
src/CG-DNA/* @ohenrich
|
||||
src/CG-SDK/* @akohlmey
|
||||
src/COLVARS/* @giacomofiorin
|
||||
src/DIELECTRIC/* @ndtrung81
|
||||
src/FEP/* @agiliopadua
|
||||
src/ML-HDNNP/* @singraber
|
||||
src/INTEL/* @wmbrownintel
|
||||
src/MANIFOLD/* @Pakketeretet2
|
||||
src/MDI/* @taylor-a-barnes
|
||||
src/MEAM/* @martok
|
||||
src/MESONT/* @iafoss
|
||||
src/MOFFF/* @hheenen
|
||||
src/MOLFILE/* @akohlmey
|
||||
src/NETCDF/* @pastewka
|
||||
src/ML-PACE/* @yury-lysogorskiy
|
||||
src/PLUMED/* @gtribello
|
||||
src/PHONON/* @lingtikong
|
||||
src/PTM/* @pmla
|
||||
src/OPENMP/* @akohlmey
|
||||
src/QMMM/* @akohlmey
|
||||
src/REAXFF/* @hasanmetin @stanmoore1
|
||||
src/REACTION/* @jrgissing
|
||||
src/SCAFACOS/* @rhalver
|
||||
src/TALLY/* @akohlmey
|
||||
src/UEF/* @danicholson
|
||||
src/VTK/* @rbberger
|
||||
|
||||
# individual files in packages
|
||||
src/GPU/pair_vashishta_gpu.* @andeplane
|
||||
src/KOKKOS/pair_vashishta_kokkos.* @andeplane
|
||||
src/MANYBODY/pair_vashishta_table.* @andeplane
|
||||
src/MANYBODY/pair_atm.* @sergeylishchuk
|
||||
src/USER-REACTION/fix_bond_react.* @jrgissing
|
||||
src/USER-MISC/*_grem.* @dstelter92
|
||||
src/USER-MISC/compute_stress_mop*.* @RomainVermorel
|
||||
src/REPLICA/*_grem.* @dstelter92
|
||||
src/EXTRA-COMPUTE/compute_stress_mop*.* @RomainVermorel
|
||||
src/MISC/*_tracker.* @jtclemm
|
||||
|
||||
# core LAMMPS classes
|
||||
src/lammps.* @sjplimp
|
||||
@ -71,8 +81,9 @@ src/kspace.* @sjplimp
|
||||
src/lmptyp.h @sjplimp
|
||||
src/library.* @sjplimp
|
||||
src/main.cpp @sjplimp
|
||||
src/min_*.* @sjplimp
|
||||
src/memory.* @sjplimp
|
||||
src/modify.* @sjplimp
|
||||
src/modify.* @sjplimp @stanmoore1
|
||||
src/molecule.* @sjplimp
|
||||
src/my_page.h @sjplimp
|
||||
src/my_pool_chunk.h @sjplimp
|
||||
@ -99,7 +110,6 @@ src/thermo.* @sjplimp
|
||||
src/universe.* @sjplimp
|
||||
src/update.* @sjplimp
|
||||
src/variable.* @sjplimp
|
||||
src/verlet.* @sjplimp
|
||||
src/velocity.* @sjplimp
|
||||
src/write_data.* @sjplimp
|
||||
src/write_restart.* @sjplimp
|
||||
@ -111,26 +121,39 @@ src/fix_nh.* @athomps
|
||||
src/info.* @akohlmey @rbberger
|
||||
src/timer.* @akohlmey
|
||||
src/min* @sjplimp @stanmoore1
|
||||
src/utils.* @akohlmey @rbberger
|
||||
src/verlet.* @sjplimp @stanmoore1
|
||||
src/math_eigen_impl.h @jewettaij
|
||||
|
||||
# tools
|
||||
tools/msi2lmp/* @akohlmey
|
||||
tools/emacs/* @HaoZeke
|
||||
tools/singularity/* @akohlmey @rbberger
|
||||
tools/coding_standard/* @rbberger
|
||||
tools/valgrind/* @akohlmey
|
||||
tools/swig/* @akohlmey
|
||||
tools/offline/* @rbberger
|
||||
|
||||
# tests
|
||||
unittest/* @akohlmey @rbberger
|
||||
|
||||
# cmake
|
||||
cmake/* @junghans @rbberger
|
||||
cmake/Modules/Packages/USER-COLVARS.cmake @junghans @rbberger @giacomofiorin
|
||||
cmake/Modules/Packages/COLVARS.cmake @junghans @rbberger @giacomofiorin
|
||||
cmake/Modules/Packages/KIM.cmake @junghans @rbberger @ellio167
|
||||
cmake/presets/*.cmake @akohlmey
|
||||
|
||||
# python
|
||||
python/* @rbberger
|
||||
|
||||
# fortran
|
||||
fortran/* @akohlmey
|
||||
|
||||
# docs
|
||||
doc/utils/*/* @rbberger
|
||||
doc/Makefile @rbberger
|
||||
doc/README @rbberger
|
||||
examples/plugin/* @akohlmey
|
||||
|
||||
# for releases
|
||||
src/version.h @sjplimp
|
||||
|
||||
57
.github/CONTRIBUTING.md
vendored
57
.github/CONTRIBUTING.md
vendored
@ -5,8 +5,9 @@ Thank your for considering to contribute to the LAMMPS software project.
|
||||
The following is a set of guidelines as well as explanations of policies and work flows for contributing to the LAMMPS molecular dynamics software project. These guidelines focus on submitting issues or pull requests on the LAMMPS GitHub project.
|
||||
|
||||
Thus please also have a look at:
|
||||
* [The Section on submitting new features for inclusion in LAMMPS of the Manual](https://lammps.sandia.gov/doc/Modify_contribute.html)
|
||||
* [The LAMMPS GitHub Tutorial in the Manual](http://lammps.sandia.gov/doc/Howto_github.html)
|
||||
* [The guide for submitting new features in the LAMMPS manual](https://lammps.sandia.gov/doc/Modify_contribute.html)
|
||||
* [The guide on programming style and requirement in the LAMMPS manual](https://lammps.sandia.gov/doc/Modify_contribute.html)
|
||||
* [The GitHub tutorial in the LAMMPS manual](http://lammps.sandia.gov/doc/Howto_github.html)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
@ -26,11 +27,11 @@ __
|
||||
|
||||
## I don't want to read this whole thing I just have a question!
|
||||
|
||||
> **Note:** Please do not file an issue to ask a general question about LAMMPS, its features, how to use specific commands, or how perform simulations or analysis in LAMMPS. Instead post your question to the ['lammps-users' mailing list](https://lammps.sandia.gov/mail.html). You do not need to be subscribed to post to the list (but a mailing list subscription avoids having your post delayed until it is approved by a mailing list moderator). Most posts to the mailing list receive a response within less than 24 hours. Before posting to the mailing list, please read the [mailing list guidelines](https://lammps.sandia.gov/guidelines.html). Following those guidelines will help greatly to get a helpful response. Always mention which LAMMPS version you are using.
|
||||
> **Note:** Please do not file an issue to ask a general question about LAMMPS, its features, how to use specific commands, or how perform simulations or analysis in LAMMPS. Instead post your question to either the ['lammps-users' mailing list](https://lammps.sandia.gov/mail.html) or the [LAMMPS Material Science Discourse forum](https://matsci.org/lammps). You do not need to be subscribed to post to the list (but a mailing list subscription avoids having your post delayed until it is approved by a mailing list moderator). Most posts to the mailing list receive a response within less than 24 hours. Before posting to the mailing list, please read the [mailing list guidelines](https://lammps.sandia.gov/guidelines.html). Following those guidelines will help greatly to get a helpful response. Always mention which LAMMPS version you are using. The LAMMPS forum was recently created as part of a larger effort to build a materials science community and have discussions not just about using LAMMPS. Thus the forum may be also used for discussions that would be off-topic for the mailing list. Those will just have to be posted to a more general category.
|
||||
|
||||
## How Can I Contribute?
|
||||
|
||||
There are several ways how you can actively contribute to the LAMMPS project: you can discuss compiling and using LAMMPS, and solving LAMMPS related problems with other LAMMPS users on the lammps-users mailing list, you can report bugs or suggest enhancements by creating issues on GitHub (or posting them to the lammps-users mailing list), and you can contribute by submitting pull requests on GitHub or e-mail your code
|
||||
There are several ways how you can actively contribute to the LAMMPS project: you can discuss compiling and using LAMMPS, and solving LAMMPS related problems with other LAMMPS users on the lammps-users mailing list or the forum, you can report bugs or suggest enhancements by creating issues on GitHub (or posting them to the lammps-users mailing list or posting in the LAMMPS Materials Science Discourse forum), and you can contribute by submitting pull requests on GitHub or e-mail your code
|
||||
to one of the [LAMMPS core developers](https://lammps.sandia.gov/authors.html). As you may see from the aforementioned developer page, the LAMMPS software package includes the efforts of a very large number of contributors beyond the principal authors and maintainers.
|
||||
|
||||
### Discussing How To Use LAMMPS
|
||||
@ -42,6 +43,8 @@ Anyone can browse/search previous questions/answers in the archives. You do not
|
||||
|
||||
If you post a message and you are a subscriber, your message will appear immediately. If you are not a subscriber, your message will be moderated, which typically takes one business day. Either way, when someone replies the reply will usually be sent to both, your personal email address and the mailing list. When replying to people, that responded to your post to the list, please always included the mailing list in your replies (i.e. use "Reply All" and **not** "Reply"). Responses will appear on the list in a few minutes, but it can take a few hours for postings and replies to show up in the SourceForge archive. Sending replies also to the mailing list is important, so that responses are archived and people with a similar issue can search for possible solutions in the mailing list archive.
|
||||
|
||||
The LAMMPS Materials Science Discourse forum was created recently to facilitate discussion not just about LAMMPS and as part of a larger effort towards building a materials science community. The forum contains a read-only sub-category with the continually updated mailing list archive, so you won't miss anything by joining only the forum and not the mailing list.
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
While developers writing code for LAMMPS are careful to test their code, LAMMPS is such a large and complex software, that it is impossible to test for all combinations of features under all normal and not so normal circumstances. Thus bugs do happen, and if you suspect, that you have encountered one, please try to document it and report it as an [Issue](https://github.com/lammps/lammps/issues) on the LAMMPS GitHub project web page. However, before reporting a bug, you need to check whether this is something that may have already been corrected. The [Latest Features and Bug Fixes in LAMMPS](https://lammps.sandia.gov/bug.html) web page lists all significant changes to LAMMPS over the years. It also tells you what the current latest development version of LAMMPS is, and you should test whether your issue still applies to that version.
|
||||
@ -60,34 +63,12 @@ To be able to submit an issue on GitHub, you have to register for an account (fo
|
||||
|
||||
### Contributing Code
|
||||
|
||||
We encourage users to submit new features or modifications for LAMMPS to the core developers so they can be added to the LAMMPS distribution. The preferred way to manage and coordinate this is by submitting a pull request at the LAMMPS project on GitHub. For any larger modifications or programming project, you are encouraged to contact the LAMMPS developers ahead of time, in order to discuss implementation strategies and coding guidelines, that will make it easier to integrate your contribution and result in less work for everybody involved. You are also encouraged to search through the list of open issues on GitHub and submit a new issue for a planned feature, so you would not duplicate the work of others (and possibly get scooped by them) or have your work duplicated by others.
|
||||
We encourage users to submit new features or modifications for LAMMPS. Instructions, guidelines, requirements,
|
||||
and recommendations are in the following sections of the LAMMPS manual:
|
||||
* [The guide for submitting new features in the LAMMPS manual](https://lammps.sandia.gov/doc/Modify_contribute.html)
|
||||
* [The guide on programming style and requirement in the LAMMPS manual](https://lammps.sandia.gov/doc/Modify_contribute.html)
|
||||
* [The GitHub tutorial in the LAMMPS manual](http://lammps.sandia.gov/doc/Howto_github.html)
|
||||
|
||||
How quickly your contribution will be integrated depends largely on how much effort it will cause to integrate and test it, how much it requires changes to the core code base, and of how much interest it is to the larger LAMMPS community. Please see below for a checklist of typical requirements. Once you have prepared everything, see [this tutorial](https://lammps.sandia.gov/doc/Howto_github.html)
|
||||
for instructions on how to submit your changes or new files through a GitHub pull request
|
||||
|
||||
Here is a checklist of steps you need to follow to submit a single file or user package for our consideration. Following these steps will save both you and us time. See existing files in packages in the source directory for examples. If you are uncertain, please ask on the lammps-users mailing list.
|
||||
|
||||
* All source files you provide must compile with the most current version of LAMMPS with multiple configurations. In particular you need to test compiling LAMMPS from scratch with `-DLAMMPS_BIGBIG` set in addition to the default `-DLAMMPS_SMALLBIG` setting. Your code will need to work correctly in serial and in parallel using MPI.
|
||||
* For consistency with the rest of LAMMPS and especially, if you want your contribution(s) to be added to main LAMMPS code or one of its standard packages, it needs to be written in a style compatible with other LAMMPS source files. This means: 2-character indentation per level, no tabs, no trailing whitespace, no lines over 80 characters. I/O is done via the C-style stdio library, style class header files should not import any system headers, STL containers should be avoided in headers, and forward declarations used where possible or needed. All added code should be placed into the LAMMPS_NS namespace or a sub-namespace; global or static variables should be avoided, as they conflict with the modular nature of LAMMPS and the C++ class structure. There MUST NOT be any "using namespace XXX;" statements in headers. In the implementation file (<name>.cpp) system includes should be placed in angular brackets (<>) and for c-library functions the C++ style header files should be included (<cstdio> instead of <stdio.h>, or <cstring> instead of <string.h>). This all is so the developers can more easily understand, integrate, and maintain your contribution and reduce conflicts with other parts of LAMMPS. This basically means that the code accesses data structures, performs its operations, and is formatted similar to other LAMMPS source files, including the use of the error class for error and warning messages.
|
||||
* Source, style name, and documentation file should follow the following naming convention: style names should be lowercase and words separated by a forward slash; for a new fix style 'foo/bar', the class should be named FixFooBar, the name of the source files should be 'fix_foo_bar.h' and 'fix_foo_bar.cpp' and the corresponding documentation should be in a file 'fix_foo_bar.rst'.
|
||||
* If you want your contribution to be added as a user-contributed feature, and it is a single file (actually a `<name>.cpp` and `<name>.h` file) it can be rapidly added to the USER-MISC directory. Include the one-line entry to add to the USER-MISC/README file in that directory, along with the 2 source files. You can do this multiple times if you wish to contribute several individual features.
|
||||
* If you want your contribution to be added as a user-contribution and it is several related features, it is probably best to make it a user package directory with a name like USER-FOO. In addition to your new files, the directory should contain a README text file. The README should contain your name and contact information and a brief description of what your new package does. If your files depend on other LAMMPS style files also being installed (e.g. because your file is a derived class from the other LAMMPS class), then an Install.sh file is also needed to check for those dependencies. See other README and Install.sh files in other USER directories as examples. Send us a tarball of this USER-FOO directory.
|
||||
* Your new source files need to have the LAMMPS copyright, GPL notice, and your name and email address at the top, like other user-contributed LAMMPS source files. They need to create a class that is inside the LAMMPS namespace. If the file is for one of the USER packages, including USER-MISC, then we are not as picky about the coding style (see above). I.e. the files do not need to be in the same stylistic format and syntax as other LAMMPS files, though that would be nice for developers as well as users who try to read your code.
|
||||
* You **must** also create or extend a documentation file for each new command or style you are adding to LAMMPS. For simplicity and convenience, the documentation of groups of closely related commands or styles may be combined into a single file. This will be one file for a single-file feature. For a package, it might be several files. These are files in the [reStructuredText](https://docutils.sourceforge.io/rst.html) markup language, that are then converted to HTML and PDF. The tools for this conversion are included in the source distribution, and the translation can be as simple as doing "make html pdf" in the doc folder. Thus the documentation source files must be in the same format and style as other `<name>.rst` files in the lammps/doc/src directory for similar commands and styles; use one or more of them as a starting point. An introduction to reStructuredText can be found at [https://docutils.sourceforge.io/docs/user/rst/quickstart.html](https://docutils.sourceforge.io/docs/user/rst/quickstart.html). The text files can include mathematical expressions and symbol in ".. math::" sections or ":math:" expressions or figures (see doc/JPG for examples), or even additional PDF files with further details (see doc/PDF for examples). The doc page should also include literature citations as appropriate; see the bottom of doc/fix_nh.rst for examples and the earlier part of the same file for how to format the cite itself. The "Restrictions" section of the doc page should indicate that your command is only available if LAMMPS is built with the appropriate USER-MISC or USER-FOO package. See other user package doc files for examples of how to do this. The prerequisite for building the HTML format files are Python 3.x and virtualenv. Please run at least `make html`, `make pdf` and `make spelling` and carefully inspect and proofread the resulting HTML format doc page as well as the output produced to the screen. Make sure that all spelling errors are fixed or the necessary false positives are added to the `doc/utils/sphinx-config/false_positives.txt` file. For new styles, those usually also need to be added to lists on the respective overview pages. This can be checked for also with `make style_check`.
|
||||
* For a new package (or even a single command) you should include one or more example scripts demonstrating its use. These should run in no more than a couple minutes, even on a single processor, and not require large data files as input. See directories under examples/USER for examples of input scripts other users provided for their packages. These example inputs are also required for validating memory accesses and testing for memory leaks with valgrind
|
||||
* If there is a paper of yours describing your feature (either the algorithm/science behind the feature itself, or its initial usage, or its implementation in LAMMPS), you can add the citation to the *.cpp source file. See src/USER-EFF/atom_vec_electron.cpp for an example. A LaTeX citation is stored in a variable at the top of the file and a single line of code that references the variable is added to the constructor of the class. Whenever a user invokes your feature from their input script, this will cause LAMMPS to output the citation to a log.cite file and prompt the user to examine the file. Note that you should only use this for a paper you or your group authored. E.g. adding a cite in the code for a paper by Nose and Hoover if you write a fix that implements their integrator is not the intended usage. That kind of citation should just be in the doc page you provide.
|
||||
|
||||
Finally, as a general rule-of-thumb, the more clear and self-explanatory you make your documentation and README files, and the easier you make it for people to get started, e.g. by providing example scripts, the more likely it is that users will try out your new feature.
|
||||
|
||||
If the new features/files are broadly useful we may add them as core files to LAMMPS or as part of a standard package. Else we will add them as a user-contributed file or package. Examples of user packages are in src sub-directories that start with USER. The USER-MISC package is simply a collection of (mostly) unrelated single files, which is the simplest way to have your contribution quickly added to the LAMMPS distribution. You can see a list of the both standard and user packages by typing "make package" in the LAMMPS src directory.
|
||||
|
||||
Note that by providing us files to release, you are agreeing to make them open-source, i.e. we can release them under the terms of the GPL, used as a license for the rest of LAMMPS. See Section 1.4 for details.
|
||||
|
||||
With user packages and files, all we are really providing (aside from the fame and fortune that accompanies having your name in the source code and on the Authors page of the LAMMPS WWW site), is a means for you to distribute your work to the LAMMPS user community, and a mechanism for others to easily try out your new feature. This may help you find bugs or make contact with new collaborators. Note that you are also implicitly agreeing to support your code which means answer questions, fix bugs, and maintain it if LAMMPS changes in some way that breaks it (an unusual event).
|
||||
|
||||
To be able to submit an issue on GitHub, you have to register for an account (for GitHub in general). If you do not want to do that, or have other reservations or difficulties to submit a pull request, you can - as an alternative - contact one or more of the core LAMMPS developers and ask if one of them would be interested in manually merging your code into LAMMPS and send them your source code. Since the effort to merge a pull request is a small fraction of the effort of integrating source code manually (which would usually be done by converting the contribution into a pull request), your chances to have your new code included quickly are the best with a pull request.
|
||||
|
||||
If you prefer to submit patches or full files, you should first make certain, that your code works correctly with the latest patch-level version of LAMMPS and contains all bug fixes from it. Then create a gzipped tar file of all changed or added files or a corresponding patch file using 'diff -u' or 'diff -c' and compress it with gzip. Please only use gzip compression, as this works well on all platforms.
|
||||
|
||||
## GitHub Workflows
|
||||
|
||||
@ -97,17 +78,17 @@ This section briefly summarizes the steps that will happen **after** you have su
|
||||
|
||||
After submitting an issue, one or more of the LAMMPS developers will review it and categorize it by assigning labels. Confirmed bug reports will be labeled `bug`; if the bug report also contains a suggestion for how to fix it, it will be labeled `bugfix`; if the issue is a feature request, it will be labeled `enhancement`. Other labels may be attached as well, depending on which parts of the LAMMPS code are affected. If the assessment is, that the issue does not warrant any changes, the `wontfix` label will be applied and if the submission is incorrect or something that should not be submitted as an issue, the `invalid` label will be applied. In both of the last two cases, the issue will then be closed without further action.
|
||||
|
||||
For feature requests, what happens next is that developers may comment on the viability or relevance of the request, discuss and make suggestions for how to implement it. If a LAMMPS developer or user is planning to implement the feature, the issue will be assigned to that developer. For developers, that are not yet listed as LAMMPS project collaborators, they will receive an invitation to be added to the LAMMPS project as a collaborator so they can get assigned. If the requested feature or enhancement is implemented, it will usually be submitted as a pull request, which will contain a reference to the issue number. And once the pull request is reviewed and accepted for inclusion into LAMMPS, the issue will be closed. For details on how pull requests are processed, please see below.
|
||||
For feature requests, what happens next is that developers may comment on the viability or relevance of the request, discuss and make suggestions for how to implement it. If a LAMMPS developer or user is planning to implement the feature, the issue will be assigned to that developer. For developers, that are not yet listed as LAMMPS project collaborators, they will receive an invitation to be added to the LAMMPS project as a collaborator so they can get assigned. If the requested feature or enhancement is implemented, it will be submitted as a pull request, which will contain a reference to the issue number. And once the pull request is reviewed and accepted for inclusion into LAMMPS, the issue will be closed. For details on how pull requests are processed, please see below. Feature requests may be labeled with `volunteer_needed` if none of the LAMMPS developers has the time and the required knowledge implement the feature.
|
||||
|
||||
For bug reports, the next step is that one of the core LAMMPS developers will self-assign to the issue and try to confirm the bug. If confirmed, the `bug` label and potentially other labels are added to classify the issue and its impact to LAMMPS. Before confirming, further questions may be asked or requests for providing additional input files or details about the steps required to reproduce the issue. Any bugfix is likely to be submitted as a pull request (more about that below) and since most bugs require only local changes, the bugfix may be included in a pull request specifically set up to collect such local bugfixes or small enhancements. Once the bugfix is included in the master branch, the issue will be closed.
|
||||
For bug reports, the next step is that one of the core LAMMPS developers will self-assign to the issue and try to confirm the bug. If confirmed, the `bug` label and potentially other labels are added to classify the issue and its impact to LAMMPS. Otherwise the `unconfirmed` label will be applied and some comment about what was tried to confirm the bug added. Before confirming, further questions may be asked or requests for providing additional input files or details about the steps required to reproduce the issue. Any bugfix will be submitted as a pull request (more about that below) and since most bugs require only local changes, the bugfix may be included in a pull request specifically set up to collect such local bugfixes or small enhancements. Once the bugfix is included in the master branch, the issue will be closed.
|
||||
|
||||
### Pull Requests
|
||||
|
||||
For submitting pull requests, there is a [detailed tutorial](https://lammps.sandia.gov/doc/Howto_github.html) in the LAMMPS manual. Thus only a brief breakdown of the steps is presented here. Please note, that the LAMMPS developers are still reviewing and trying to improve the process. If you are unsure about something, do not hesitate to post a question on the lammps-users mailing list or contact one fo the core LAMMPS developers.
|
||||
Immediately after the submission, the LAMMPS continuing integration server at ci.lammps.org will download your submitted branch and perform a simple compilation test, i.e. will test whether your submitted code can be compiled under various conditions. It will also do a check on whether your included documentation translates cleanly. Whether these tests are successful or fail will be recorded. If a test fails, please inspect the corresponding output on the CI server and take the necessary steps, if needed, so that the code can compile cleanly again. The test will be re-run each the pull request is updated with a push to the remote branch on GitHub.
|
||||
Next a LAMMPS core developer will self-assign and do an overall technical assessment of the submission. If you are not yet registered as a LAMMPS collaborator, you will receive an invitation for that. As part of the assesment, the pull request will be categorized with labels. There are two special labels: `needs_work` (indicates that work from the submitter of the pull request is needed) and `work_in_progress` (indicates, that the assigned LAMMPS developer will make changes, if not done by the contributor who made the submit).
|
||||
Pull requests are the **only** way that changes get made to the LAMMPS distribution. So also the LAMMPS core developers will submit pull requests for their own changes and discuss them on GitHub. Thus if you submit a pull request it will be treated in a similar fashion. When you submit a pull request you may opt to submit a "Draft" pull request. That means your changes are visible and will be subject to testing, but reviewers will not be (auto-)assigned and comments will take into account that this is not complete. On the other hand, this is a perfect way to ask the LAMMPS developers for comments on non-obvious changes and get feedback and possible suggestions for improvements or recommendations about what to avoid.
|
||||
Immediately after the submission, the LAMMPS continuing integration server at ci.lammps.org will download your submitted branch and perform a number of tests: it will tests whether it compiles cleanly under various conditions, it will also do a check on whether your included documentation translates cleanly and run some unit tests and other checks. Whether these tests are successful or fail will be recorded. If a test fails, please inspect the corresponding output on the CI server and take the necessary steps, if needed, so that the code can compile cleanly again. The test will be re-run each time the pull request is updated with a push to the remote branch on GitHub. If you are unsure about what you need to change, ask a question in the discussion area of the pull request.
|
||||
Next a LAMMPS core developer will self-assign and do an overall technical assessment of the submission. If you submitted a draft pull request, this will not happen unless you mark it "ready for review". If you are not yet invited as a LAMMPS collaborator, and your contribution seems significant, you may also receive an invitation for collaboration on the LAMMPS repository. As part of the assessment, the pull request will be categorized with labels. There are two special labels: `needs_work` (indicates that work from the submitter of the pull request is needed) and `work_in_progress` (indicates, that the assigned LAMMPS developer will make changes, if not done by the contributor who made the submit).
|
||||
You may also receive comments and suggestions on the overall submission or specific details and on occasion specific requests for changes as part of the review. If permitted, also additional changes may be pushed into your pull request branch or a pull request may be filed in your LAMMPS fork on GitHub to include those changes.
|
||||
The LAMMPS developer may then decide to assign the pull request to another developer (e.g. when that developer is more knowledgeable about the submitted feature or enhancement or has written the modified code). It may also happen, that additional developers are requested to provide a review and approve the changes. For submissions, that may change the general behavior of LAMMPS, or where a possibility of unwanted side effects exists, additional tests may be requested by the assigned developer.
|
||||
If the assigned developer is satisfied and considers the submission ready for inclusion into LAMMPS, the pull request will receive approvals and be merged into the master branch by one of the core LAMMPS developers. After the pull request is merged, you may delete the feature branch used for the pull request in your personal LAMMPS fork.
|
||||
Since the learning curve for git is quite steep for efficiently managing remote repositories, local and remote branches, pull requests and more, do not hesitate to ask questions, if you are not sure about how to do certain steps that are asked of you. Even if the changes asked of you do not make sense to you, they may be important for the LAMMPS developers. Please also note, that these all are guidelines and nothing set in stone. So depending on the nature of the contribution, the workflow may be adjusted.
|
||||
If the assigned developer is satisfied and considers the submission ready for inclusion into LAMMPS, the pull request will receive approvals and be merged into the master branch by one of the core LAMMPS developers. After the pull request is merged, you may delete the feature branch used for the pull request in your personal LAMMPS fork. The minimum requirement to merge a pull request is that all automated tests have to pass and at least one LAMMPS developer has approved integrating the submitted code. Since the approver will not be the person merging a pull request, you will have at least two LAMMPS developers that looked at your contribution.
|
||||
Since the learning curve for git is quite steep for efficiently managing remote repositories, local and remote branches, pull requests and more, do not hesitate to ask questions, if you are not sure about how to do certain steps that are asked of you. Even if the changes asked of you do not make sense to you, they may be important for the LAMMPS developers. Please also note, that these all are guidelines and nothing set in stone. So depending on the nature of the contribution, the work flow may be adjusted.
|
||||
|
||||
|
||||
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,7 +2,7 @@
|
||||
|
||||
<!--Briefly describe the new feature(s), enhancement(s), or bugfix(es) included in this pull request.-->
|
||||
|
||||
**Related Issues**
|
||||
**Related Issue(s)**
|
||||
|
||||
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
|
||||
|
||||
@ -34,6 +34,7 @@ By submitting this pull request, I agree, that my contribution will be included
|
||||
- [ ] The added/updated documentation is integrated and tested with the documentation build system
|
||||
- [ ] The feature has been verified to work with the conventional build system
|
||||
- [ ] The feature has been verified to work with the CMake based build system
|
||||
- [ ] Suitable tests have been added to the unittest tree.
|
||||
- [ ] A package specific README file has been included or updated
|
||||
- [ ] One or more example input decks are included
|
||||
|
||||
|
||||
25
.github/PULL_REQUEST_TEMPLATE/bug_fix.md
vendored
25
.github/PULL_REQUEST_TEMPLATE/bug_fix.md
vendored
@ -9,34 +9,37 @@ assignees: ''
|
||||
|
||||
**Summary**
|
||||
|
||||
<!--Briefly describe the bug or bugs, that are eliminated by this pull request.-->
|
||||
<!--Briefly describe the bug(s) that are eliminated by this pull request.-->
|
||||
|
||||
**Related Issue(s)**
|
||||
|
||||
<!--If this request addresses or is related to an existing (open) GitHub issue, e.g. a bug report, mention the issue number number here following a pound sign (aka hashmark), e.g.`#222`.-->
|
||||
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
|
||||
|
||||
**Author(s)**
|
||||
|
||||
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request-->
|
||||
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request. If this pull request adds new files to the distribution, please also provide a suitable "long-lived" e-mail address (ideally something that can outlive your institution's e-mail, in case you change jobs) for the *corresponding* author, i.e. the person the LAMMPS developers can contact directly with questions and requests related to maintenance and support of this contributed code.-->
|
||||
|
||||
**Licensing**
|
||||
|
||||
By submitting this pull request I implicitly accept, that my submission is subject to the same licensing terms as the files that are modified.
|
||||
By submitting this pull request, I agree, that my contribution will be included in LAMMPS and redistributed under either the GNU General Public License version 2 (GPL v2) or the GNU Lesser General Public License version 2.1 (LGPL v2.1).
|
||||
|
||||
**Backward Compatibility**
|
||||
|
||||
<!--Please state whether any changes in the pull request break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
|
||||
<!--Please state whether any changes in the pull request will break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
|
||||
|
||||
**Detailed Description**
|
||||
|
||||
<!--Provide any relevant details about how the fixed bug can be reproduced, how the changes are implemented, how correctness was verified, how other features - if any - in LAMMPS are affected-->
|
||||
|
||||
## Post Submission Checklist
|
||||
**Post Submission Checklist**
|
||||
|
||||
<!--Please check the fields below as they are completed *after* the pull request is submitted-->
|
||||
- [ ] The code in this pull request is complete
|
||||
<!--Please check the fields below as they are completed **after** the pull request has been submitted. Delete lines that don't apply-->
|
||||
|
||||
- [ ] The feature or features in this pull request is complete
|
||||
- [ ] Licensing information is complete
|
||||
- [ ] Corresponding author information is complete
|
||||
- [ ] The source code follows the LAMMPS formatting guidelines
|
||||
- [ ] The feature has been verified to work with the conventional build system
|
||||
- [ ] The feature has been verified to work with the CMake based build system
|
||||
- [ ] Suitable tests have been added to the unittest tree.
|
||||
|
||||
## Further Information, Files, and Links
|
||||
|
||||
<!--Put any additional information here, attach relevant text or image files, and URLs to external sites (e.g. to download input decks for testing)-->
|
||||
|
||||
@ -13,23 +13,31 @@ assignees: ''
|
||||
|
||||
**Related Issue(s)**
|
||||
|
||||
<!--If this request addresses or is related to an existing (open) GitHub issue, e.g. a bug report, mention the issue number number here following a pound sign (aka hashmark), e.g.`#222`.
|
||||
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
|
||||
|
||||
**Author(s)**
|
||||
|
||||
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request-->
|
||||
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request. If this pull request adds new files to the distribution, please also provide a suitable "long-lived" e-mail address (ideally something that can outlive your institution's e-mail, in case you change jobs) for the *corresponding* author, i.e. the person the LAMMPS developers can contact directly with questions and requests related to maintenance and support of this contributed code.-->
|
||||
|
||||
**Licensing**
|
||||
|
||||
By submitting this pull request I implicitly accept, that my submission is subject to the same licensing terms as the files that are modified.
|
||||
By submitting this pull request, I agree, that my contribution will be included in LAMMPS and redistributed under either the GNU General Public License version 2 (GPL v2) or the GNU Lesser General Public License version 2.1 (LGPL v2.1).
|
||||
|
||||
**Backward Compatibility**
|
||||
|
||||
<!--Please state whether any changes in the pull request will break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
|
||||
|
||||
**Detailed Description**
|
||||
|
||||
<!--Provide any relevant details about the included changes.-->
|
||||
<!--Provide any relevant details about how the changes are implemented, how correctness was verified, how other features - if any - in LAMMPS are affected-->
|
||||
|
||||
## Post Submission Checklist
|
||||
**Post Submission Checklist**
|
||||
|
||||
<!--Please check the fields below as they are completed *after* the pull request is submitted-->
|
||||
- [ ] The pull request is complete
|
||||
- [ ] The source code follows the LAMMPS formatting guidelines
|
||||
- [ ] The feature has been verified to work with the conventional build system
|
||||
- [ ] The feature has been verified to work with the CMake based build system
|
||||
- [ ] Suitable tests have been added to the unittest tree.
|
||||
|
||||
|
||||
|
||||
22
.github/PULL_REQUEST_TEMPLATE/new_feature.md
vendored
22
.github/PULL_REQUEST_TEMPLATE/new_feature.md
vendored
@ -11,32 +11,29 @@ assignees: ''
|
||||
|
||||
<!--Briefly describe the new feature(s) included in this pull request.-->
|
||||
|
||||
**Related Issues**
|
||||
**Related Issue(s)**
|
||||
|
||||
<!--If this addresses an existing (open) GitHub issue, e.g. a feature request, mention the issue number here following a pound sign (aka hashmark), e.g. `#331`.-->
|
||||
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
|
||||
|
||||
**Author(s)**
|
||||
|
||||
<!--Please state name and affiliation of the author or authors that should be credited with the features added in this pull request. Please provide a suitable "long-lived" e-mail address (e.g. from gmail, yahoo, outlook, etc.) for the *corresponding* author, i.e. the person the LAMMPS developers can contact directly with questions and requests related to maintenance and support of this code. now and in the future-->
|
||||
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request. If this pull request adds new files to the distribution, please also provide a suitable "long-lived" e-mail address (ideally something that can outlive your institution's e-mail, in case you change jobs) for the *corresponding* author, i.e. the person the LAMMPS developers can contact directly with questions and requests related to maintenance and support of this contributed code.-->
|
||||
|
||||
**Licensing**
|
||||
|
||||
<!--Please add *yes* or *no* to the following two statements (please contact @lammps/core if you have questions about this)-->
|
||||
|
||||
My contribution may be licensed as GPL v2 (default LAMMPS license):
|
||||
My contribution may be licensed as LGPL (for use as a library with proprietary software):
|
||||
By submitting this pull request, I agree, that my contribution will be included in LAMMPS and redistributed under either the GNU General Public License version 2 (GPL v2) or the GNU Lesser General Public License version 2.1 (LGPL v2.1).
|
||||
|
||||
**Backward Compatibility**
|
||||
|
||||
<!--Please state if any of the changes in this pull request will affect backward compatibility for inputs, and - if yes - explain what has been changed and why-->
|
||||
<!--Please state whether any changes in the pull request will break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
|
||||
|
||||
**Implementation Notes**
|
||||
|
||||
<!--Provide any relevant details about how the new features are implemented, how correctness was verified, what platforms (OS, compiler, MPI, hardware, number of processors, accelerator(s)) it was tested on-->
|
||||
<!--Provide any relevant details about how the new feature(s) are implemented, how correctness was verified, how other features - if any - in LAMMPS are affected-->
|
||||
|
||||
## Post Submission Checklist
|
||||
**Post Submission Checklist**
|
||||
|
||||
<!--Please check the fields below as they are completed *after* the pull request has been submitted-->
|
||||
<!--Please check the fields below as they are completed **after** the pull request has been submitted. Delete lines that don't apply-->
|
||||
|
||||
- [ ] The feature or features in this pull request is complete
|
||||
- [ ] Licensing information is complete
|
||||
@ -46,10 +43,11 @@ My contribution may be licensed as LGPL (for use as a library with proprietary s
|
||||
- [ ] The added/updated documentation is integrated and tested with the documentation build system
|
||||
- [ ] The feature has been verified to work with the conventional build system
|
||||
- [ ] The feature has been verified to work with the CMake based build system
|
||||
- [ ] Suitable tests have been added to the unittest tree.
|
||||
- [ ] A package specific README file has been included or updated
|
||||
- [ ] One or more example input decks are included
|
||||
|
||||
## Further Information, Files, and Links
|
||||
**Further Information, Files, and Links**
|
||||
|
||||
<!--Put any additional information here, attach relevant text or image files, and URLs to external sites (e.g. DOIs or webpages)-->
|
||||
|
||||
|
||||
@ -11,17 +11,21 @@ assignees: ''
|
||||
|
||||
<!--Briefly describe what kind of updates or enhancements for a package or feature are included. If you are not the original author of the package or feature, please mention, whether your contribution was created independently or in collaboration/cooperation with the original author.-->
|
||||
|
||||
**Related Issue(s)**
|
||||
|
||||
<!--If this addresses an open GitHub issue for this project, please mention the issue number here, and describe the relation. Use the phrases `fixes #221` or `closes #135`, when you want an issue to be automatically closed when the pull request is merged-->
|
||||
|
||||
**Author(s)**
|
||||
|
||||
<!--Please state name and affiliation of the author or authors that should be credited with the changes in this pull request-->
|
||||
|
||||
**Licensing**
|
||||
|
||||
By submitting this pull request I implicitly accept, that my submission is subject to the same licensing terms as the original package or feature(s) that are updated or amended by this pull request.
|
||||
By submitting this pull request, I agree, that my contribution will be included in LAMMPS and redistributed under either the GNU General Public License version 2 (GPL v2) or the GNU Lesser General Public License version 2.1 (LGPL v2.1).
|
||||
|
||||
**Backward Compatibility**
|
||||
|
||||
<!--Please state whether any changes in the pull request break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
|
||||
<!--Please state whether any changes in the pull request will break backward compatibility for inputs, and - if yes - explain what has been changed and why-->
|
||||
|
||||
**Implementation Notes**
|
||||
|
||||
@ -29,11 +33,19 @@ By submitting this pull request I implicitly accept, that my submission is subje
|
||||
|
||||
**Post Submission Checklist**
|
||||
|
||||
<!--Please check the fields below as they are completed-->
|
||||
<!--Please check the fields below as they are completed **after** the pull request has been submitted. Delete lines that don't apply-->
|
||||
|
||||
- [ ] The feature or features in this pull request is complete
|
||||
- [ ] Suitable updates to the existing docs are included
|
||||
- [ ] One or more example input decks are included
|
||||
- [ ] Licensing information is complete
|
||||
- [ ] Corresponding author information is complete
|
||||
- [ ] The source code follows the LAMMPS formatting guidelines
|
||||
- [ ] Suitable updates to the existing docs are included
|
||||
- [ ] The updated documentation is integrated and tested with the documentation build system
|
||||
- [ ] The feature has been verified to work with the conventional build system
|
||||
- [ ] The feature has been verified to work with the CMake based build system
|
||||
- [ ] Suitable tests have been updated or added to the unittest tree.
|
||||
- [ ] A package specific README file has been updated
|
||||
- [ ] One or more example input decks are included
|
||||
|
||||
**Further Information, Files, and Links**
|
||||
|
||||
|
||||
29
.github/codecov.yml
vendored
Normal file
29
.github/codecov.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
comment: false
|
||||
coverage:
|
||||
notify:
|
||||
slack:
|
||||
default:
|
||||
url: "secret:HWZbvgtc6OD7F3v3PfrK3/rzCJvScbh69Fi1CkLwuHK0+wIBIHVR+Q5i7q6F9Ln4OChbiRGtYAEUUsT8/jmBu4qDpIi8mx746codc0z/Z3aafLd24pBrCEPLvdCfIZxqPnw3TuUgGhwmMDZf0+thg8YNUr/MbOZ7Li2L6+ZbYuA="
|
||||
threshold: 10%
|
||||
only_pulls: false
|
||||
branches:
|
||||
- "master"
|
||||
flags:
|
||||
- "unit"
|
||||
paths:
|
||||
- "src"
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
branches:
|
||||
- "master"
|
||||
paths:
|
||||
- "src"
|
||||
informational: true
|
||||
patch:
|
||||
default:
|
||||
branches:
|
||||
- "master"
|
||||
paths:
|
||||
- "src"
|
||||
informational: true
|
||||
4
.github/codeql/cpp.yml
vendored
Normal file
4
.github/codeql/cpp.yml
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
paths:
|
||||
- src
|
||||
- lib
|
||||
- tools
|
||||
5
.github/codeql/python.yml
vendored
Normal file
5
.github/codeql/python.yml
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
paths:
|
||||
- python/lammps
|
||||
|
||||
queries:
|
||||
- uses: security-and-quality
|
||||
49
.github/workflows/codeql-analysis.yml
vendored
Normal file
49
.github/workflows/codeql-analysis.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
# GitHub action to run static code analysis on C++ and Python code
|
||||
name: "CodeQL Code Analysis"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [develop]
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
if: ${{ github.repository == 'lammps/lammps' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: ['cpp', 'python']
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
config-file: ./.github/codeql/${{ matrix.language }}.yml
|
||||
|
||||
- name: Create Build Environment
|
||||
if: ${{ matrix.language == 'cpp' }}
|
||||
run: mkdir build
|
||||
|
||||
- name: Building LAMMPS via CMake
|
||||
if: ${{ matrix.language == 'cpp' }}
|
||||
shell: bash
|
||||
working-directory: build
|
||||
run: |
|
||||
cmake -C ../cmake/presets/most.cmake ../cmake
|
||||
cmake --build . --parallel 2
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
46
.github/workflows/compile-msvc.yml
vendored
Normal file
46
.github/workflows/compile-msvc.yml
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
# GitHub action to build LAMMPS on Windows with Visual C++
|
||||
name: "Native Windows Compilation and Unit Tests"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [develop]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Windows Compilation Test
|
||||
if: ${{ github.repository == 'lammps/lammps' }}
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Select Python version
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Building LAMMPS via CMake
|
||||
shell: bash
|
||||
run: |
|
||||
python3 -m pip install numpy
|
||||
cmake -C cmake/presets/windows.cmake \
|
||||
-D PKG_PYTHON=on \
|
||||
-S cmake -B build \
|
||||
-D BUILD_SHARED_LIBS=on \
|
||||
-D LAMMPS_EXCEPTIONS=on \
|
||||
-D ENABLE_TESTING=on
|
||||
cmake --build build --config Release
|
||||
|
||||
- name: Run LAMMPS executable
|
||||
shell: bash
|
||||
run: |
|
||||
./build/Release/lmp.exe -h
|
||||
./build/Release/lmp.exe -in bench/in.lj
|
||||
|
||||
- name: Run Unit Tests
|
||||
working-directory: build
|
||||
shell: bash
|
||||
run: ctest -V -C Release
|
||||
54
.github/workflows/unittest-macos.yml
vendored
Normal file
54
.github/workflows/unittest-macos.yml
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
# GitHub action to build LAMMPS on MacOS and run unit tests
|
||||
name: "Unittest for MacOS"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [develop]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: MacOS Unit Test
|
||||
if: ${{ github.repository == 'lammps/lammps' }}
|
||||
runs-on: macos-latest
|
||||
env:
|
||||
CCACHE_DIR: ${{ github.workspace }}/.ccache
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Install ccache
|
||||
run: brew install ccache
|
||||
|
||||
- name: Create Build Environment
|
||||
run: mkdir build
|
||||
|
||||
- name: Set up ccache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: macos-ccache-${{ github.sha }}
|
||||
restore-keys: macos-ccache-
|
||||
|
||||
- name: Building LAMMPS via CMake
|
||||
shell: bash
|
||||
working-directory: build
|
||||
run: |
|
||||
ccache -z
|
||||
cmake -C ../cmake/presets/clang.cmake \
|
||||
-C ../cmake/presets/most.cmake \
|
||||
-D CMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
||||
-D CMAKE_C_COMPILER_LAUNCHER=ccache \
|
||||
-D ENABLE_TESTING=on \
|
||||
-D BUILD_SHARED_LIBS=on \
|
||||
-D LAMMPS_EXCEPTIONS=on \
|
||||
../cmake
|
||||
cmake --build . --parallel 2
|
||||
ccache -s
|
||||
|
||||
- name: Run Tests
|
||||
working-directory: build
|
||||
shell: bash
|
||||
run: ctest -V
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@ -12,6 +12,7 @@
|
||||
*.sif
|
||||
*.dll
|
||||
*.pyc
|
||||
a.out
|
||||
__pycache__
|
||||
|
||||
Obj_*
|
||||
@ -36,12 +37,20 @@ vgcore.*
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
.clang-format
|
||||
.lammps_history
|
||||
.vs
|
||||
|
||||
#cmake
|
||||
/build*
|
||||
/CMakeCache.txt
|
||||
/CMakeFiles/
|
||||
/Testing
|
||||
/Makefile
|
||||
/Testing
|
||||
/cmake_install.cmake
|
||||
/lmp
|
||||
out/Debug
|
||||
out/RelWithDebInfo
|
||||
out/Release
|
||||
out/x86
|
||||
out/x64
|
||||
|
||||
14
.lgtm.yml
Normal file
14
.lgtm.yml
Normal file
@ -0,0 +1,14 @@
|
||||
extraction:
|
||||
cpp:
|
||||
configure:
|
||||
command:
|
||||
- "mkdir build"
|
||||
- "cd build"
|
||||
- "cmake -G Ninja -C ../cmake/presets/most.cmake ../cmake"
|
||||
index:
|
||||
build_command:
|
||||
- "cd build"
|
||||
- "ninja"
|
||||
python:
|
||||
python_setup:
|
||||
version: 3
|
||||
7
LICENSE
7
LICENSE
@ -1,6 +1,6 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
|
||||
Version 2, June 1991
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
@ -301,9 +301,8 @@ one line to give the program's name and an idea of what it does.
|
||||
Copyright (C) yyyy name of author
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or (at
|
||||
your option) any later version.
|
||||
it under the terms of the GNU General Public License version 2 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
||||
42
README
42
README
@ -14,36 +14,40 @@ LAMMPS is a classical molecular dynamics simulation code designed to
|
||||
run efficiently on parallel computers. It was developed at Sandia
|
||||
National Laboratories, a US Department of Energy facility, with
|
||||
funding from the DOE. It is an open-source code, distributed freely
|
||||
under the terms of the GNU Public License (GPL).
|
||||
under the terms of the GNU Public License (GPL) version 2.
|
||||
|
||||
The primary author of the code is Steve Plimpton, who can be emailed
|
||||
at sjplimp@sandia.gov. The LAMMPS WWW Site at lammps.sandia.gov has
|
||||
at sjplimp@sandia.gov. The LAMMPS WWW Site at www.lammps.org has
|
||||
more information about the code and its uses.
|
||||
|
||||
The LAMMPS distribution includes the following files and directories:
|
||||
|
||||
README this file
|
||||
LICENSE the GNU General Public License (GPL)
|
||||
bench benchmark problems
|
||||
cmake CMake build system
|
||||
doc documentation
|
||||
examples simple test problems
|
||||
lib libraries LAMMPS can be linked with
|
||||
potentials interatomic potential files
|
||||
python Python wrapper on LAMMPS as a library
|
||||
src source files
|
||||
tools pre- and post-processing tools
|
||||
README this file
|
||||
LICENSE the GNU General Public License (GPL)
|
||||
bench benchmark problems
|
||||
cmake CMake build files
|
||||
doc documentation
|
||||
examples simple test problems
|
||||
fortran Fortran wrapper for LAMMPS
|
||||
lib additional provided or external libraries
|
||||
potentials interatomic potential files
|
||||
python Python wrappers for LAMMPS
|
||||
src source files
|
||||
tools pre- and post-processing tools
|
||||
|
||||
Point your browser at any of these files to get started:
|
||||
|
||||
http://lammps.sandia.gov/doc/Manual.html the LAMMPS manual
|
||||
http://lammps.sandia.gov/doc/Intro.html hi-level introduction
|
||||
http://lammps.sandia.gov/doc/Build.html how to build LAMMPS
|
||||
http://lammps.sandia.gov/doc/Run_head.html how to run LAMMPS
|
||||
http://lammps.sandia.gov/doc/Developer.pdf LAMMPS developer guide
|
||||
https://docs.lammps.org/Manual.html LAMMPS manual
|
||||
https://docs.lammps.org/Intro.html hi-level introduction
|
||||
https://docs.lammps.org/Build.html how to build LAMMPS
|
||||
https://docs.lammps.org/Run_head.html how to run LAMMPS
|
||||
https://docs.lammps.org/Commands_all.html Table of available commands
|
||||
https://docs.lammps.org/Library.html LAMMPS library interfaces
|
||||
https://docs.lammps.org/Modify.html how to modify and extend LAMMPS
|
||||
https://docs.lammps.org/Developer.html LAMMPS developer info
|
||||
|
||||
You can also create these doc pages locally:
|
||||
|
||||
% cd doc
|
||||
% make html # creates HTML pages in doc/html
|
||||
% make pdf # creates Manual.pdf and Developer.pdf
|
||||
% make pdf # creates Manual.pdf
|
||||
|
||||
39
SECURITY.md
Normal file
39
SECURITY.md
Normal file
@ -0,0 +1,39 @@
|
||||
# Security Policy
|
||||
|
||||
LAMMPS is designed as a user-level application to conduct computer
|
||||
simulations for research using classical mechanics. As such LAMMPS
|
||||
depends to some degrees on users providing correctly formatted input and
|
||||
LAMMPS needs to read and write files based on uncontrolled user input.
|
||||
As a parallel application for use in high-performance computing
|
||||
environments, performance critical steps are also done without checking
|
||||
data.
|
||||
|
||||
LAMMPS also is interfaced to a number of external libraries, including
|
||||
libraries with experimental research software, that are not validated
|
||||
and tested by the LAMMPS developers, so it is easy to import bad
|
||||
behavior from calling functions in one of those libraries.
|
||||
|
||||
Thus is is quite easy to crash LAMMPS through malicious input and do all
|
||||
kinds of filesystem manipulations. And because of that LAMMPS should
|
||||
**NEVER** be compiled or **run** as superuser, either from a "root" or
|
||||
"administrator" account directly or indirectly via "sudo" or "su".
|
||||
|
||||
Therefore what could be seen as a security vulnerability is usually
|
||||
either a user mistake or a bug in the code. Bugs can be reported in
|
||||
the LAMMPS project
|
||||
[issue tracker on GitHub](https://github.com/lammps/lammps/issues).
|
||||
|
||||
To mitigate issues with using homoglyphs or bidirectional reordering in
|
||||
unicode, which have been demonstrated as a vector to obfuscate and hide
|
||||
malicious changes to the source code, all LAMMPS submissions are checked
|
||||
for unicode characters and only all-ASCII source code is accepted.
|
||||
|
||||
# Version Updates
|
||||
|
||||
LAMMPS follows continuous release development model. We aim to keep all
|
||||
release versions (stable or patch) fully functional and employ a variety
|
||||
of automatic testing procedures to detect failures of existing
|
||||
functionality from adding new features before releases are made. Thus
|
||||
bugfixes and updates are only integrated into the current development
|
||||
branch and thus the next (patch) release and users are recommended to
|
||||
update regularly.
|
||||
@ -1,51 +0,0 @@
|
||||
These are input scripts used to run versions of several of the
|
||||
benchmarks in the top-level bench directory using the GPU accelerator
|
||||
package. The results of running these scripts on two different machines
|
||||
(a desktop with 2 Tesla GPUs and the ORNL Titan supercomputer) are shown
|
||||
on the "GPU (Fermi)" section of the Benchmark page of the LAMMPS WWW
|
||||
site: lammps.sandia.gov/bench.
|
||||
|
||||
Examples are shown below of how to run these scripts. This assumes
|
||||
you have built 3 executables with the GPU package
|
||||
installed, e.g.
|
||||
|
||||
lmp_linux_single
|
||||
lmp_linux_mixed
|
||||
lmp_linux_double
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
To run on just CPUs (without using the GPU styles),
|
||||
do something like the following:
|
||||
|
||||
mpirun -np 1 lmp_linux_double -v x 8 -v y 8 -v z 8 -v t 100 < in.lj
|
||||
mpirun -np 12 lmp_linux_double -v x 16 -v y 16 -v z 16 -v t 100 < in.eam
|
||||
|
||||
The "xyz" settings determine the problem size. The "t" setting
|
||||
determines the number of timesteps.
|
||||
|
||||
These mpirun commands run on a single node. To run on multiple
|
||||
nodes, scale up the "-np" setting.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
To run with the GPU package, do something like the following:
|
||||
|
||||
mpirun -np 12 lmp_linux_single -sf gpu -v x 32 -v y 32 -v z 64 -v t 100 < in.lj
|
||||
mpirun -np 8 lmp_linux_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 64 -v t 100 < in.eam
|
||||
|
||||
The "xyz" settings determine the problem size. The "t" setting
|
||||
determines the number of timesteps. The "np" setting determines how
|
||||
many MPI tasks (per node) the problem will run on. The numeric
|
||||
argument to the "-pk" setting is the number of GPUs (per node); 1 GPU
|
||||
is the default. Note that you can use more MPI tasks than GPUs (per
|
||||
node) with the GPU package.
|
||||
|
||||
These mpirun commands run on a single node. To run on multiple nodes,
|
||||
scale up the "-np" setting, and control the number of MPI tasks per
|
||||
node via a "-ppn" setting.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
If the script has "titan" in its name, it was run on the Titan
|
||||
supercomputer at ORNL.
|
||||
@ -1,24 +0,0 @@
|
||||
# bulk Cu lattice
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.615
|
||||
region box block 0 $x 0 $y 0 $z
|
||||
create_box 1 box
|
||||
create_atoms 1 box
|
||||
|
||||
pair_style eam
|
||||
pair_coeff 1 1 Cu_u3.eam
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify every 1 delay 5 check yes
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
thermo 50
|
||||
|
||||
run $t
|
||||
@ -1,37 +0,0 @@
|
||||
# bulk Cu lattice
|
||||
|
||||
newton off
|
||||
package gpu force/neigh 0 0 1
|
||||
processors * * * grid numa
|
||||
|
||||
variable x index 1
|
||||
variable y index 1
|
||||
variable z index 1
|
||||
|
||||
variable xx equal 20*$x
|
||||
variable yy equal 20*$y
|
||||
variable zz equal 20*$z
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.615
|
||||
region box block 0 ${xx} 0 ${yy} 0 ${zz}
|
||||
create_box 1 box
|
||||
create_atoms 1 box
|
||||
|
||||
pair_style eam/gpu
|
||||
pair_coeff 1 1 Cu_u3.eam
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify every 1 delay 5 check yes
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
thermo 50
|
||||
|
||||
run 15
|
||||
run 100
|
||||
@ -1,22 +0,0 @@
|
||||
# 3d Lennard-Jones melt
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 0.8442
|
||||
region box block 0 $x 0 $y 0 $z
|
||||
create_box 1 box
|
||||
create_atoms 1 box
|
||||
mass 1 1.0
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 20 check no
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
run $t
|
||||
@ -1,35 +0,0 @@
|
||||
# 3d Lennard-Jones melt
|
||||
|
||||
newton off
|
||||
package gpu force/neigh 0 0 1
|
||||
processors * * * grid numa
|
||||
|
||||
variable x index 1
|
||||
variable y index 1
|
||||
variable z index 1
|
||||
|
||||
variable xx equal 20*$x
|
||||
variable yy equal 20*$y
|
||||
variable zz equal 20*$z
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 0.8442
|
||||
region box block 0 ${xx} 0 ${yy} 0 ${zz}
|
||||
create_box 1 box
|
||||
create_atoms 1 box
|
||||
mass 1 1.0
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
pair_style lj/cut/gpu 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 20 check no
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
run 15
|
||||
run 100
|
||||
@ -1,30 +0,0 @@
|
||||
# Rhodopsin model
|
||||
|
||||
units real
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
atom_style full
|
||||
atom_modify map hash
|
||||
bond_style harmonic
|
||||
angle_style charmm
|
||||
dihedral_style charmm
|
||||
improper_style harmonic
|
||||
pair_style lj/charmm/coul/long 8.0 10.0
|
||||
pair_modify mix arithmetic
|
||||
kspace_style pppm 1e-4
|
||||
|
||||
read_data data.rhodo
|
||||
|
||||
replicate $x $y $z
|
||||
|
||||
fix 1 all shake 0.0001 5 0 m 1.0 a 232
|
||||
fix 2 all npt temp 300.0 300.0 100.0 &
|
||||
z 0.0 0.0 1000.0 mtk no pchain 0 tchain 1
|
||||
|
||||
special_bonds charmm
|
||||
|
||||
thermo 50
|
||||
thermo_style multi
|
||||
timestep 2.0
|
||||
|
||||
run $t
|
||||
@ -1,39 +0,0 @@
|
||||
# Rhodopsin model
|
||||
|
||||
newton off
|
||||
package gpu force/neigh 0 0 1
|
||||
processors * * * grid numa
|
||||
|
||||
variable x index 1
|
||||
variable y index 1
|
||||
variable z index 1
|
||||
|
||||
units real
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
atom_style full
|
||||
atom_modify map hash
|
||||
bond_style harmonic
|
||||
angle_style charmm
|
||||
dihedral_style charmm
|
||||
improper_style harmonic
|
||||
pair_style lj/charmm/coul/long/gpu 8.0 ${cutoff}
|
||||
pair_modify mix arithmetic
|
||||
kspace_style pppm/gpu 1e-4
|
||||
|
||||
read_data data.rhodo
|
||||
|
||||
replicate $x $y $z
|
||||
|
||||
fix 1 all shake 0.0001 5 0 m 1.0 a 232
|
||||
fix 2 all npt temp 300.0 300.0 100.0 &
|
||||
z 0.0 0.0 1000.0 mtk no pchain 0 tchain 1
|
||||
|
||||
special_bonds charmm
|
||||
|
||||
thermo 50
|
||||
# thermo_style multi
|
||||
timestep 2.0
|
||||
|
||||
run 15
|
||||
run 100
|
||||
@ -1,42 +0,0 @@
|
||||
# Rhodopsin model
|
||||
|
||||
newton off
|
||||
package gpu force/neigh 0 0 1
|
||||
partition yes 1 processors * * * grid twolevel ${grid} * * * &
|
||||
part 1 2 multiple
|
||||
partition yes 2 processors * * * part 1 2 multiple
|
||||
|
||||
variable x index 1
|
||||
variable y index 1
|
||||
variable z index 1
|
||||
|
||||
units real
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
atom_style full
|
||||
atom_modify map hash
|
||||
bond_style harmonic
|
||||
angle_style charmm
|
||||
dihedral_style charmm
|
||||
improper_style harmonic
|
||||
pair_style lj/charmm/coul/long/gpu 8.0 ${cutoff}
|
||||
pair_modify mix arithmetic
|
||||
kspace_style pppm/gpu 1e-4
|
||||
|
||||
read_data data.rhodo
|
||||
|
||||
replicate $x $y $z
|
||||
|
||||
fix 1 all shake 0.0001 5 0 m 1.0 a 232
|
||||
fix 2 all npt temp 300.0 300.0 100.0 &
|
||||
z 0.0 0.0 1000.0 mtk no pchain 0 tchain 1
|
||||
|
||||
special_bonds charmm
|
||||
|
||||
thermo 50
|
||||
# thermo_style multi
|
||||
timestep 2.0
|
||||
|
||||
run_style verlet/split
|
||||
run 15
|
||||
run 100
|
||||
@ -1,108 +0,0 @@
|
||||
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# compiler/linker settings
|
||||
# specify flags and libraries needed for your compiler
|
||||
|
||||
CC = icc
|
||||
CCFLAGS = -O
|
||||
SHFLAGS = -fPIC
|
||||
DEPFLAGS = -M
|
||||
|
||||
LINK = icc
|
||||
LINKFLAGS = -O
|
||||
LIB = -lstdc++
|
||||
SIZE = size
|
||||
|
||||
ARCHIVE = ar
|
||||
ARFLAGS = -rc
|
||||
SHLIBFLAGS = -shared
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# LAMMPS-specific settings
|
||||
# specify settings for LAMMPS features you will use
|
||||
# if you change any -D setting, do full re-compile after "make clean"
|
||||
|
||||
# LAMMPS ifdef settings, OPTIONAL
|
||||
# see possible settings in doc/Section_start.html#2_2 (step 4)
|
||||
|
||||
LMP_INC =
|
||||
|
||||
# MPI library, REQUIRED
|
||||
# see discussion in doc/Section_start.html#2_2 (step 5)
|
||||
# can point to dummy MPI library in src/STUBS as in Makefile.serial
|
||||
# INC = path for mpi.h, MPI compiler settings
|
||||
# PATH = path for MPI library
|
||||
# LIB = name of MPI library
|
||||
|
||||
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
|
||||
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
|
||||
MPI_LIB = -lmpi
|
||||
|
||||
# FFT library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 6)
|
||||
# can be left blank to use provided KISS FFT library
|
||||
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
|
||||
# PATH = path for FFT library
|
||||
# LIB = name of FFT library
|
||||
|
||||
FFT_INC =
|
||||
FFT_PATH =
|
||||
FFT_LIB =
|
||||
|
||||
# JPEG and/or PNG library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 7)
|
||||
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
|
||||
# INC = path(s) for jpeglib.h and/or png.h
|
||||
# PATH = path(s) for JPEG library and/or PNG library
|
||||
# LIB = name(s) of JPEG library and/or PNG library
|
||||
|
||||
JPG_INC =
|
||||
JPG_PATH =
|
||||
JPG_LIB = -ljpeg
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# build rules and dependencies
|
||||
# no need to edit this section
|
||||
|
||||
include Makefile.package.settings
|
||||
include Makefile.package
|
||||
|
||||
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
|
||||
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
|
||||
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
|
||||
|
||||
# Path to src files
|
||||
|
||||
vpath %.cpp ..
|
||||
vpath %.h ..
|
||||
|
||||
# Link target
|
||||
|
||||
$(EXE): $(OBJ)
|
||||
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
|
||||
$(SIZE) $(EXE)
|
||||
|
||||
# Library targets
|
||||
|
||||
lib: $(OBJ)
|
||||
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
|
||||
|
||||
shlib: $(OBJ)
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
|
||||
$(OBJ) $(EXTRA_LIB) $(LIB)
|
||||
|
||||
# Compilation rules
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.d:%.cpp
|
||||
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
|
||||
|
||||
# Individual dependencies
|
||||
|
||||
DEPENDS = $(OBJ:.o=.d)
|
||||
sinclude $(DEPENDS)
|
||||
@ -1,108 +0,0 @@
|
||||
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# compiler/linker settings
|
||||
# specify flags and libraries needed for your compiler
|
||||
|
||||
CC = icc
|
||||
CCFLAGS = -O
|
||||
SHFLAGS = -fPIC
|
||||
DEPFLAGS = -M
|
||||
|
||||
LINK = icc
|
||||
LINKFLAGS = -O
|
||||
LIB = -lstdc++
|
||||
SIZE = size
|
||||
|
||||
ARCHIVE = ar
|
||||
ARFLAGS = -rc
|
||||
SHLIBFLAGS = -shared
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# LAMMPS-specific settings
|
||||
# specify settings for LAMMPS features you will use
|
||||
# if you change any -D setting, do full re-compile after "make clean"
|
||||
|
||||
# LAMMPS ifdef settings, OPTIONAL
|
||||
# see possible settings in doc/Section_start.html#2_2 (step 4)
|
||||
|
||||
LMP_INC =
|
||||
|
||||
# MPI library, REQUIRED
|
||||
# see discussion in doc/Section_start.html#2_2 (step 5)
|
||||
# can point to dummy MPI library in src/STUBS as in Makefile.serial
|
||||
# INC = path for mpi.h, MPI compiler settings
|
||||
# PATH = path for MPI library
|
||||
# LIB = name of MPI library
|
||||
|
||||
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
|
||||
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
|
||||
MPI_LIB = -lmpi
|
||||
|
||||
# FFT library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 6)
|
||||
# can be left blank to use provided KISS FFT library
|
||||
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
|
||||
# PATH = path for FFT library
|
||||
# LIB = name of FFT library
|
||||
|
||||
FFT_INC =
|
||||
FFT_PATH =
|
||||
FFT_LIB =
|
||||
|
||||
# JPEG and/or PNG library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 7)
|
||||
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
|
||||
# INC = path(s) for jpeglib.h and/or png.h
|
||||
# PATH = path(s) for JPEG library and/or PNG library
|
||||
# LIB = name(s) of JPEG library and/or PNG library
|
||||
|
||||
JPG_INC =
|
||||
JPG_PATH =
|
||||
JPG_LIB = -ljpeg
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# build rules and dependencies
|
||||
# no need to edit this section
|
||||
|
||||
include Makefile.package.settings
|
||||
include Makefile.package
|
||||
|
||||
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
|
||||
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
|
||||
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
|
||||
|
||||
# Path to src files
|
||||
|
||||
vpath %.cpp ..
|
||||
vpath %.h ..
|
||||
|
||||
# Link target
|
||||
|
||||
$(EXE): $(OBJ)
|
||||
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
|
||||
$(SIZE) $(EXE)
|
||||
|
||||
# Library targets
|
||||
|
||||
lib: $(OBJ)
|
||||
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
|
||||
|
||||
shlib: $(OBJ)
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
|
||||
$(OBJ) $(EXTRA_LIB) $(LIB)
|
||||
|
||||
# Compilation rules
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.d:%.cpp
|
||||
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
|
||||
|
||||
# Individual dependencies
|
||||
|
||||
DEPENDS = $(OBJ:.o=.d)
|
||||
sinclude $(DEPENDS)
|
||||
@ -1,108 +0,0 @@
|
||||
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# compiler/linker settings
|
||||
# specify flags and libraries needed for your compiler
|
||||
|
||||
CC = icc
|
||||
CCFLAGS = -O
|
||||
SHFLAGS = -fPIC
|
||||
DEPFLAGS = -M
|
||||
|
||||
LINK = icc
|
||||
LINKFLAGS = -O
|
||||
LIB = -lstdc++
|
||||
SIZE = size
|
||||
|
||||
ARCHIVE = ar
|
||||
ARFLAGS = -rc
|
||||
SHLIBFLAGS = -shared
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# LAMMPS-specific settings
|
||||
# specify settings for LAMMPS features you will use
|
||||
# if you change any -D setting, do full re-compile after "make clean"
|
||||
|
||||
# LAMMPS ifdef settings, OPTIONAL
|
||||
# see possible settings in doc/Section_start.html#2_2 (step 4)
|
||||
|
||||
LMP_INC =
|
||||
|
||||
# MPI library, REQUIRED
|
||||
# see discussion in doc/Section_start.html#2_2 (step 5)
|
||||
# can point to dummy MPI library in src/STUBS as in Makefile.serial
|
||||
# INC = path for mpi.h, MPI compiler settings
|
||||
# PATH = path for MPI library
|
||||
# LIB = name of MPI library
|
||||
|
||||
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
|
||||
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
|
||||
MPI_LIB = -lmpi
|
||||
|
||||
# FFT library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 6)
|
||||
# can be left blank to use provided KISS FFT library
|
||||
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
|
||||
# PATH = path for FFT library
|
||||
# LIB = name of FFT library
|
||||
|
||||
FFT_INC =
|
||||
FFT_PATH =
|
||||
FFT_LIB =
|
||||
|
||||
# JPEG and/or PNG library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 7)
|
||||
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
|
||||
# INC = path(s) for jpeglib.h and/or png.h
|
||||
# PATH = path(s) for JPEG library and/or PNG library
|
||||
# LIB = name(s) of JPEG library and/or PNG library
|
||||
|
||||
JPG_INC =
|
||||
JPG_PATH =
|
||||
JPG_LIB = -ljpeg
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# build rules and dependencies
|
||||
# no need to edit this section
|
||||
|
||||
include Makefile.package.settings
|
||||
include Makefile.package
|
||||
|
||||
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
|
||||
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
|
||||
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
|
||||
|
||||
# Path to src files
|
||||
|
||||
vpath %.cpp ..
|
||||
vpath %.h ..
|
||||
|
||||
# Link target
|
||||
|
||||
$(EXE): $(OBJ)
|
||||
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
|
||||
$(SIZE) $(EXE)
|
||||
|
||||
# Library targets
|
||||
|
||||
lib: $(OBJ)
|
||||
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
|
||||
|
||||
shlib: $(OBJ)
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
|
||||
$(OBJ) $(EXTRA_LIB) $(LIB)
|
||||
|
||||
# Compilation rules
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.d:%.cpp
|
||||
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
|
||||
|
||||
# Individual dependencies
|
||||
|
||||
DEPENDS = $(OBJ:.o=.d)
|
||||
sinclude $(DEPENDS)
|
||||
@ -1,50 +0,0 @@
|
||||
# /* ----------------------------------------------------------------------
|
||||
# Generic Linux Makefile for CUDA
|
||||
# - Change CUDA_ARCH for your GPU
|
||||
# ------------------------------------------------------------------------- */
|
||||
|
||||
# which file will be copied to Makefile.lammps
|
||||
|
||||
EXTRAMAKE = Makefile.lammps.standard
|
||||
|
||||
CUDA_HOME = /home/projects/cuda/6.0.37
|
||||
NVCC = nvcc
|
||||
|
||||
# Kepler CUDA
|
||||
CUDA_ARCH = -arch=sm_35
|
||||
# Tesla CUDA
|
||||
#CUDA_ARCH = -arch=sm_21
|
||||
# newer CUDA
|
||||
#CUDA_ARCH = -arch=sm_13
|
||||
# older CUDA
|
||||
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
|
||||
|
||||
# this setting should match LAMMPS Makefile
|
||||
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
|
||||
|
||||
LMP_INC = -DLAMMPS_SMALLBIG
|
||||
|
||||
# precision for GPU calculations
|
||||
# -D_SINGLE_SINGLE # Single precision for all calculations
|
||||
# -D_DOUBLE_DOUBLE # Double precision for all calculations
|
||||
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
|
||||
|
||||
CUDA_PRECISION = -D_DOUBLE_DOUBLE
|
||||
|
||||
CUDA_INCLUDE = -I$(CUDA_HOME)/include
|
||||
CUDA_LIB = -L$(CUDA_HOME)/lib64
|
||||
CUDA_OPTS = -DUNIX -O3 -Xptxas -v --use_fast_math
|
||||
|
||||
CUDR_CPP = mpic++ -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK
|
||||
CUDR_OPTS = -O2 # -xHost -no-prec-div -ansi-alias
|
||||
|
||||
BIN_DIR = ./
|
||||
OBJ_DIR = ./
|
||||
LIB_DIR = ./
|
||||
AR = ar
|
||||
BSH = /bin/sh
|
||||
|
||||
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
|
||||
|
||||
include Nvidia.makefile
|
||||
|
||||
@ -1,50 +0,0 @@
|
||||
# /* ----------------------------------------------------------------------
|
||||
# Generic Linux Makefile for CUDA
|
||||
# - Change CUDA_ARCH for your GPU
|
||||
# ------------------------------------------------------------------------- */
|
||||
|
||||
# which file will be copied to Makefile.lammps
|
||||
|
||||
EXTRAMAKE = Makefile.lammps.standard
|
||||
|
||||
CUDA_HOME = /home/projects/cuda/6.0.37
|
||||
NVCC = nvcc
|
||||
|
||||
# Kepler CUDA
|
||||
CUDA_ARCH = -arch=sm_35
|
||||
# Tesla CUDA
|
||||
#CUDA_ARCH = -arch=sm_21
|
||||
# newer CUDA
|
||||
#CUDA_ARCH = -arch=sm_13
|
||||
# older CUDA
|
||||
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
|
||||
|
||||
# this setting should match LAMMPS Makefile
|
||||
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
|
||||
|
||||
LMP_INC = -DLAMMPS_SMALLBIG
|
||||
|
||||
# precision for GPU calculations
|
||||
# -D_SINGLE_SINGLE # Single precision for all calculations
|
||||
# -D_DOUBLE_DOUBLE # Double precision for all calculations
|
||||
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
|
||||
|
||||
CUDA_PRECISION = -D_SINGLE_DOUBLE
|
||||
|
||||
CUDA_INCLUDE = -I$(CUDA_HOME)/include
|
||||
CUDA_LIB = -L$(CUDA_HOME)/lib64
|
||||
CUDA_OPTS = -DUNIX -O3 -Xptxas -v --use_fast_math
|
||||
|
||||
CUDR_CPP = mpic++ -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK
|
||||
CUDR_OPTS = -O2 # -xHost -no-prec-div -ansi-alias
|
||||
|
||||
BIN_DIR = ./
|
||||
OBJ_DIR = ./
|
||||
LIB_DIR = ./
|
||||
AR = ar
|
||||
BSH = /bin/sh
|
||||
|
||||
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
|
||||
|
||||
include Nvidia.makefile
|
||||
|
||||
@ -1,50 +0,0 @@
|
||||
# /* ----------------------------------------------------------------------
|
||||
# Generic Linux Makefile for CUDA
|
||||
# - Change CUDA_ARCH for your GPU
|
||||
# ------------------------------------------------------------------------- */
|
||||
|
||||
# which file will be copied to Makefile.lammps
|
||||
|
||||
EXTRAMAKE = Makefile.lammps.standard
|
||||
|
||||
CUDA_HOME = /home/projects/cuda/6.0.37
|
||||
NVCC = nvcc
|
||||
|
||||
# Kepler CUDA
|
||||
CUDA_ARCH = -arch=sm_35
|
||||
# Tesla CUDA
|
||||
#CUDA_ARCH = -arch=sm_21
|
||||
# newer CUDA
|
||||
#CUDA_ARCH = -arch=sm_13
|
||||
# older CUDA
|
||||
#CUDA_ARCH = -arch=sm_10 -DCUDA_PRE_THREE
|
||||
|
||||
# this setting should match LAMMPS Makefile
|
||||
# one of LAMMPS_SMALLBIG (default), LAMMPS_BIGBIG and LAMMPS_SMALLSMALL
|
||||
|
||||
LMP_INC = -DLAMMPS_SMALLBIG
|
||||
|
||||
# precision for GPU calculations
|
||||
# -D_SINGLE_SINGLE # Single precision for all calculations
|
||||
# -D_DOUBLE_DOUBLE # Double precision for all calculations
|
||||
# -D_SINGLE_DOUBLE # Accumulation of forces, etc. in double
|
||||
|
||||
CUDA_PRECISION = -D_SINGLE_SINGLE
|
||||
|
||||
CUDA_INCLUDE = -I$(CUDA_HOME)/include
|
||||
CUDA_LIB = -L$(CUDA_HOME)/lib64
|
||||
CUDA_OPTS = -DUNIX -O3 -Xptxas -v --use_fast_math
|
||||
|
||||
CUDR_CPP = mpic++ -DMPI_GERYON -DUCL_NO_EXIT -DMPICH_IGNORE_CXX_SEEK
|
||||
CUDR_OPTS = -O2 # -xHost -no-prec-div -ansi-alias
|
||||
|
||||
BIN_DIR = ./
|
||||
OBJ_DIR = ./
|
||||
LIB_DIR = ./
|
||||
AR = ar
|
||||
BSH = /bin/sh
|
||||
|
||||
CUDPP_OPT = -DUSE_CUDPP -Icudpp_mini
|
||||
|
||||
include Nvidia.makefile
|
||||
|
||||
@ -1,109 +0,0 @@
|
||||
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# compiler/linker settings
|
||||
# specify flags and libraries needed for your compiler
|
||||
|
||||
CC = icc
|
||||
CCFLAGS = -O3 -openmp -DLAMMPS_MEMALIGN=64 -no-offload \
|
||||
-xHost -fno-alias -ansi-alias -restrict -override-limits
|
||||
SHFLAGS = -fPIC
|
||||
DEPFLAGS = -M
|
||||
|
||||
LINK = icc
|
||||
LINKFLAGS = -O -openmp
|
||||
LIB = -lstdc++
|
||||
SIZE = size
|
||||
|
||||
ARCHIVE = ar
|
||||
ARFLAGS = -rc
|
||||
SHLIBFLAGS = -shared
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# LAMMPS-specific settings
|
||||
# specify settings for LAMMPS features you will use
|
||||
# if you change any -D setting, do full re-compile after "make clean"
|
||||
|
||||
# LAMMPS ifdef settings, OPTIONAL
|
||||
# see possible settings in doc/Section_start.html#2_2 (step 4)
|
||||
|
||||
LMP_INC = -DLAMMPS_GZIP -DLAMMPS_JPEG
|
||||
|
||||
# MPI library, REQUIRED
|
||||
# see discussion in doc/Section_start.html#2_2 (step 5)
|
||||
# can point to dummy MPI library in src/STUBS as in Makefile.serial
|
||||
# INC = path for mpi.h, MPI compiler settings
|
||||
# PATH = path for MPI library
|
||||
# LIB = name of MPI library
|
||||
|
||||
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
|
||||
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
|
||||
MPI_LIB = -lmpi
|
||||
|
||||
# FFT library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 6)
|
||||
# can be left blank to use provided KISS FFT library
|
||||
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
|
||||
# PATH = path for FFT library
|
||||
# LIB = name of FFT library
|
||||
|
||||
FFT_INC =
|
||||
FFT_PATH =
|
||||
FFT_LIB =
|
||||
|
||||
# JPEG and/or PNG library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 7)
|
||||
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
|
||||
# INC = path(s) for jpeglib.h and/or png.h
|
||||
# PATH = path(s) for JPEG library and/or PNG library
|
||||
# LIB = name(s) of JPEG library and/or PNG library
|
||||
|
||||
JPG_INC =
|
||||
JPG_PATH =
|
||||
JPG_LIB = -ljpeg
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# build rules and dependencies
|
||||
# no need to edit this section
|
||||
|
||||
include Makefile.package.settings
|
||||
include Makefile.package
|
||||
|
||||
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
|
||||
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
|
||||
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
|
||||
|
||||
# Path to src files
|
||||
|
||||
vpath %.cpp ..
|
||||
vpath %.h ..
|
||||
|
||||
# Link target
|
||||
|
||||
$(EXE): $(OBJ)
|
||||
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
|
||||
$(SIZE) $(EXE)
|
||||
|
||||
# Library targets
|
||||
|
||||
lib: $(OBJ)
|
||||
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
|
||||
|
||||
shlib: $(OBJ)
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
|
||||
$(OBJ) $(EXTRA_LIB) $(LIB)
|
||||
|
||||
# Compilation rules
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.d:%.cpp
|
||||
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
|
||||
|
||||
# Individual dependencies
|
||||
|
||||
DEPENDS = $(OBJ:.o=.d)
|
||||
sinclude $(DEPENDS)
|
||||
@ -1,113 +0,0 @@
|
||||
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# compiler/linker settings
|
||||
# specify flags and libraries needed for your compiler
|
||||
|
||||
CC = nvcc
|
||||
CCFLAGS = -O3 -arch=sm_35
|
||||
SHFLAGS = -fPIC
|
||||
DEPFLAGS = -M
|
||||
|
||||
LINK = mpicxx
|
||||
LINKFLAGS = -O
|
||||
LIB = -lstdc++
|
||||
SIZE = size
|
||||
|
||||
ARCHIVE = ar
|
||||
ARFLAGS = -rc
|
||||
SHLIBFLAGS = -shared
|
||||
|
||||
OMP = yes
|
||||
CUDA = yes
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# LAMMPS-specific settings
|
||||
# specify settings for LAMMPS features you will use
|
||||
# if you change any -D setting, do full re-compile after "make clean"
|
||||
|
||||
# LAMMPS ifdef settings, OPTIONAL
|
||||
# see possible settings in doc/Section_start.html#2_2 (step 4)
|
||||
|
||||
LMP_INC =
|
||||
|
||||
# MPI library, REQUIRED
|
||||
# see discussion in doc/Section_start.html#2_2 (step 5)
|
||||
# can point to dummy MPI library in src/STUBS as in Makefile.serial
|
||||
# INC = path for mpi.h, MPI compiler settings
|
||||
# PATH = path for MPI library
|
||||
# LIB = name of MPI library
|
||||
|
||||
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
|
||||
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
|
||||
MPI_LIB = -lmpi
|
||||
|
||||
# FFT library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 6)
|
||||
# can be left blank to use provided KISS FFT library
|
||||
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
|
||||
# PATH = path for FFT library
|
||||
# LIB = name of FFT library
|
||||
|
||||
FFT_INC =
|
||||
FFT_PATH =
|
||||
FFT_LIB =
|
||||
|
||||
# JPEG and/or PNG library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 7)
|
||||
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
|
||||
# INC = path(s) for jpeglib.h and/or png.h
|
||||
# PATH = path(s) for JPEG library and/or PNG library
|
||||
# LIB = name(s) of JPEG library and/or PNG library
|
||||
|
||||
JPG_INC =
|
||||
JPG_PATH =
|
||||
JPG_LIB = -ljpeg
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# build rules and dependencies
|
||||
# no need to edit this section
|
||||
|
||||
include Makefile.package.settings
|
||||
include Makefile.package
|
||||
|
||||
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
|
||||
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
|
||||
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
|
||||
|
||||
# Path to src files
|
||||
|
||||
vpath %.cpp ..
|
||||
vpath %.h ..
|
||||
|
||||
# Link target
|
||||
|
||||
$(EXE): $(OBJ)
|
||||
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
|
||||
$(SIZE) $(EXE)
|
||||
|
||||
# Library targets
|
||||
|
||||
lib: $(OBJ)
|
||||
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
|
||||
|
||||
shlib: $(OBJ)
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
|
||||
$(OBJ) $(EXTRA_LIB) $(LIB)
|
||||
|
||||
# Compilation rules
|
||||
%.o:%.cu
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.d:%.cpp
|
||||
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
|
||||
|
||||
# Individual dependencies
|
||||
|
||||
DEPENDS = $(OBJ:.o=.d)
|
||||
sinclude $(DEPENDS)
|
||||
@ -1,110 +0,0 @@
|
||||
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# compiler/linker settings
|
||||
# specify flags and libraries needed for your compiler
|
||||
|
||||
CC = icc
|
||||
CCFLAGS = -O
|
||||
SHFLAGS = -fPIC
|
||||
DEPFLAGS = -M
|
||||
|
||||
LINK = icc
|
||||
LINKFLAGS = -O
|
||||
LIB = -lstdc++
|
||||
SIZE = size
|
||||
|
||||
ARCHIVE = ar
|
||||
ARFLAGS = -rc
|
||||
SHLIBFLAGS = -shared
|
||||
|
||||
OMP = yes
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# LAMMPS-specific settings
|
||||
# specify settings for LAMMPS features you will use
|
||||
# if you change any -D setting, do full re-compile after "make clean"
|
||||
|
||||
# LAMMPS ifdef settings, OPTIONAL
|
||||
# see possible settings in doc/Section_start.html#2_2 (step 4)
|
||||
|
||||
LMP_INC =
|
||||
|
||||
# MPI library, REQUIRED
|
||||
# see discussion in doc/Section_start.html#2_2 (step 5)
|
||||
# can point to dummy MPI library in src/STUBS as in Makefile.serial
|
||||
# INC = path for mpi.h, MPI compiler settings
|
||||
# PATH = path for MPI library
|
||||
# LIB = name of MPI library
|
||||
|
||||
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
|
||||
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
|
||||
MPI_LIB = -lmpi
|
||||
|
||||
# FFT library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 6)
|
||||
# can be left blank to use provided KISS FFT library
|
||||
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
|
||||
# PATH = path for FFT library
|
||||
# LIB = name of FFT library
|
||||
|
||||
FFT_INC =
|
||||
FFT_PATH =
|
||||
FFT_LIB =
|
||||
|
||||
# JPEG and/or PNG library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 7)
|
||||
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
|
||||
# INC = path(s) for jpeglib.h and/or png.h
|
||||
# PATH = path(s) for JPEG library and/or PNG library
|
||||
# LIB = name(s) of JPEG library and/or PNG library
|
||||
|
||||
JPG_INC =
|
||||
JPG_PATH =
|
||||
JPG_LIB = -ljpeg
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# build rules and dependencies
|
||||
# no need to edit this section
|
||||
|
||||
include Makefile.package.settings
|
||||
include Makefile.package
|
||||
|
||||
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
|
||||
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
|
||||
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
|
||||
|
||||
# Path to src files
|
||||
|
||||
vpath %.cpp ..
|
||||
vpath %.h ..
|
||||
|
||||
# Link target
|
||||
|
||||
$(EXE): $(OBJ)
|
||||
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
|
||||
$(SIZE) $(EXE)
|
||||
|
||||
# Library targets
|
||||
|
||||
lib: $(OBJ)
|
||||
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
|
||||
|
||||
shlib: $(OBJ)
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
|
||||
$(OBJ) $(EXTRA_LIB) $(LIB)
|
||||
|
||||
# Compilation rules
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.d:%.cpp
|
||||
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
|
||||
|
||||
# Individual dependencies
|
||||
|
||||
DEPENDS = $(OBJ:.o=.d)
|
||||
sinclude $(DEPENDS)
|
||||
@ -1,108 +0,0 @@
|
||||
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# compiler/linker settings
|
||||
# specify flags and libraries needed for your compiler
|
||||
|
||||
CC = icc
|
||||
CCFLAGS = -O3 -openmp -restrict -ansi-alias
|
||||
SHFLAGS = -fPIC
|
||||
DEPFLAGS = -M
|
||||
|
||||
LINK = icc
|
||||
LINKFLAGS = -O -openmp
|
||||
LIB = -lstdc++
|
||||
SIZE = size
|
||||
|
||||
ARCHIVE = ar
|
||||
ARFLAGS = -rc
|
||||
SHLIBFLAGS = -shared
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# LAMMPS-specific settings
|
||||
# specify settings for LAMMPS features you will use
|
||||
# if you change any -D setting, do full re-compile after "make clean"
|
||||
|
||||
# LAMMPS ifdef settings, OPTIONAL
|
||||
# see possible settings in doc/Section_start.html#2_2 (step 4)
|
||||
|
||||
LMP_INC =
|
||||
|
||||
# MPI library, REQUIRED
|
||||
# see discussion in doc/Section_start.html#2_2 (step 5)
|
||||
# can point to dummy MPI library in src/STUBS as in Makefile.serial
|
||||
# INC = path for mpi.h, MPI compiler settings
|
||||
# PATH = path for MPI library
|
||||
# LIB = name of MPI library
|
||||
|
||||
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
|
||||
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
|
||||
MPI_LIB = -lmpi
|
||||
|
||||
# FFT library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 6)
|
||||
# can be left blank to use provided KISS FFT library
|
||||
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
|
||||
# PATH = path for FFT library
|
||||
# LIB = name of FFT library
|
||||
|
||||
FFT_INC =
|
||||
FFT_PATH =
|
||||
FFT_LIB =
|
||||
|
||||
# JPEG and/or PNG library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 7)
|
||||
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
|
||||
# INC = path(s) for jpeglib.h and/or png.h
|
||||
# PATH = path(s) for JPEG library and/or PNG library
|
||||
# LIB = name(s) of JPEG library and/or PNG library
|
||||
|
||||
JPG_INC =
|
||||
JPG_PATH =
|
||||
JPG_LIB = -ljpeg
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# build rules and dependencies
|
||||
# no need to edit this section
|
||||
|
||||
include Makefile.package.settings
|
||||
include Makefile.package
|
||||
|
||||
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
|
||||
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
|
||||
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
|
||||
|
||||
# Path to src files
|
||||
|
||||
vpath %.cpp ..
|
||||
vpath %.h ..
|
||||
|
||||
# Link target
|
||||
|
||||
$(EXE): $(OBJ)
|
||||
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
|
||||
$(SIZE) $(EXE)
|
||||
|
||||
# Library targets
|
||||
|
||||
lib: $(OBJ)
|
||||
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
|
||||
|
||||
shlib: $(OBJ)
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
|
||||
$(OBJ) $(EXTRA_LIB) $(LIB)
|
||||
|
||||
# Compilation rules
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.d:%.cpp
|
||||
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
|
||||
|
||||
# Individual dependencies
|
||||
|
||||
DEPENDS = $(OBJ:.o=.d)
|
||||
sinclude $(DEPENDS)
|
||||
@ -1,108 +0,0 @@
|
||||
# linux = Shannon Linux box, Intel icc, OpenMPI, KISS FFTW
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# compiler/linker settings
|
||||
# specify flags and libraries needed for your compiler
|
||||
|
||||
CC = icc
|
||||
CCFLAGS = -O -restrict
|
||||
SHFLAGS = -fPIC
|
||||
DEPFLAGS = -M
|
||||
|
||||
LINK = icc
|
||||
LINKFLAGS = -O
|
||||
LIB = -lstdc++
|
||||
SIZE = size
|
||||
|
||||
ARCHIVE = ar
|
||||
ARFLAGS = -rc
|
||||
SHLIBFLAGS = -shared
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# LAMMPS-specific settings
|
||||
# specify settings for LAMMPS features you will use
|
||||
# if you change any -D setting, do full re-compile after "make clean"
|
||||
|
||||
# LAMMPS ifdef settings, OPTIONAL
|
||||
# see possible settings in doc/Section_start.html#2_2 (step 4)
|
||||
|
||||
LMP_INC =
|
||||
|
||||
# MPI library, REQUIRED
|
||||
# see discussion in doc/Section_start.html#2_2 (step 5)
|
||||
# can point to dummy MPI library in src/STUBS as in Makefile.serial
|
||||
# INC = path for mpi.h, MPI compiler settings
|
||||
# PATH = path for MPI library
|
||||
# LIB = name of MPI library
|
||||
|
||||
MPI_INC = -I/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/include/
|
||||
MPI_PATH = -L/home/projects/openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37/lib
|
||||
MPI_LIB = -lmpi
|
||||
|
||||
# FFT library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 6)
|
||||
# can be left blank to use provided KISS FFT library
|
||||
# INC = -DFFT setting, e.g. -DFFT_FFTW, FFT compiler settings
|
||||
# PATH = path for FFT library
|
||||
# LIB = name of FFT library
|
||||
|
||||
FFT_INC =
|
||||
FFT_PATH =
|
||||
FFT_LIB =
|
||||
|
||||
# JPEG and/or PNG library, OPTIONAL
|
||||
# see discussion in doc/Section_start.html#2_2 (step 7)
|
||||
# only needed if -DLAMMPS_JPEG or -DLAMMPS_PNG listed with LMP_INC
|
||||
# INC = path(s) for jpeglib.h and/or png.h
|
||||
# PATH = path(s) for JPEG library and/or PNG library
|
||||
# LIB = name(s) of JPEG library and/or PNG library
|
||||
|
||||
JPG_INC =
|
||||
JPG_PATH =
|
||||
JPG_LIB = -ljpeg
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# build rules and dependencies
|
||||
# no need to edit this section
|
||||
|
||||
include Makefile.package.settings
|
||||
include Makefile.package
|
||||
|
||||
EXTRA_INC = $(LMP_INC) $(PKG_INC) $(MPI_INC) $(FFT_INC) $(JPG_INC) $(PKG_SYSINC)
|
||||
EXTRA_PATH = $(PKG_PATH) $(MPI_PATH) $(FFT_PATH) $(JPG_PATH) $(PKG_SYSPATH)
|
||||
EXTRA_LIB = $(PKG_LIB) $(MPI_LIB) $(FFT_LIB) $(JPG_LIB) $(PKG_SYSLIB)
|
||||
|
||||
# Path to src files
|
||||
|
||||
vpath %.cpp ..
|
||||
vpath %.h ..
|
||||
|
||||
# Link target
|
||||
|
||||
$(EXE): $(OBJ)
|
||||
$(LINK) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(EXTRA_LIB) $(LIB) -o $(EXE)
|
||||
$(SIZE) $(EXE)
|
||||
|
||||
# Library targets
|
||||
|
||||
lib: $(OBJ)
|
||||
$(ARCHIVE) $(ARFLAGS) $(EXE) $(OBJ)
|
||||
|
||||
shlib: $(OBJ)
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(SHLIBFLAGS) $(EXTRA_PATH) -o $(EXE) \
|
||||
$(OBJ) $(EXTRA_LIB) $(LIB)
|
||||
|
||||
# Compilation rules
|
||||
|
||||
%.o:%.cpp
|
||||
$(CC) $(CCFLAGS) $(SHFLAGS) $(EXTRA_INC) -c $<
|
||||
|
||||
%.d:%.cpp
|
||||
$(CC) $(CCFLAGS) $(EXTRA_INC) $(DEPFLAGS) $< > $@
|
||||
|
||||
# Individual dependencies
|
||||
|
||||
DEPENDS = $(OBJ:.o=.d)
|
||||
sinclude $(DEPENDS)
|
||||
@ -1,68 +0,0 @@
|
||||
These are build and input and run scripts used to run the LJ benchmark
|
||||
in the top-level bench directory using all the various accelerator
|
||||
packages currently available in LAMMPS. The results of running these
|
||||
benchmarks on a GPU cluster with Kepler GPUs are shown on the "GPU
|
||||
(Kepler)" section of the Benchmark page of the LAMMPS WWW site:
|
||||
lammps.sandia.gov/bench.
|
||||
|
||||
The specifics of the benchmark machine are as follows:
|
||||
|
||||
It is a small GPU cluster at Sandia National Labs called "shannon". It
|
||||
has 32 nodes, each with two 8-core Sandy Bridge Xeon CPUs (E5-2670,
|
||||
2.6GHz, HT deactivated), for a total of 512 cores. Twenty-four of the
|
||||
nodes have two NVIDIA Kepler GPUs (K20x, 2688 732 MHz cores). LAMMPS
|
||||
was compiled with the Intel icc compiler, using module
|
||||
openmpi/1.8.1/intel/13.1.SP1.106/cuda/6.0.37.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
You can, of course, build LAMMPS yourself with any of the accelerator
|
||||
packages installed for your platform.
|
||||
|
||||
The build.py script will build LAMMPS for the various accelerlator
|
||||
packages using the Makefile.* files in this dir, which you can edit if
|
||||
necessary for your platform. You must set the "lmpdir" variable at
|
||||
the top of build.py to the home directory of LAMMPS as installed on
|
||||
your system. Note that the build.py script hardcodes the arch setting
|
||||
for the USER-CUDA package, which should be matched to the GPUs on your
|
||||
system, e.g. sm_35 for Kepler GPUs. For the GPU package, this setting
|
||||
is in the Makefile.gpu.* files, as is the CUDA_HOME variable which
|
||||
should point to where NVIDIA Cuda software is installed on your
|
||||
system.
|
||||
|
||||
Once the Makefiles are in place, then typing, for example,
|
||||
|
||||
python build.py cpu gpu
|
||||
|
||||
will build executables for the CPU (no accelerators), and 3 variants
|
||||
(double, mixed, single precision) of the GPU package. See the list of
|
||||
possible targets at the top of the build.py script.
|
||||
|
||||
Note that the build.py script will un-install all packages in your
|
||||
LAMMPS directory, then only install the ones needed for the benchmark.
|
||||
The Makefile.* files in this dir are copied into lammps/src/MAKE, as a
|
||||
dummy Makefile.foo, so they will not conflict with makefiles that may
|
||||
already be there. The build.py script also builds the auxiliary GPU
|
||||
and USER-CUDA library as needed.
|
||||
|
||||
LAMMPS executables that are generated by build.py are copied into this
|
||||
directory when the script finishes each build.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
The in.* files can be run with any of the accelerator packages,
|
||||
if you specify the appropriate command-line switches. These
|
||||
include switches to set the problem size and number of timesteps
|
||||
to run.
|
||||
|
||||
The run*.sh scripts have sample mpirun commands for running the input
|
||||
scripts on a single node or on multiple nodes for the strong and weak
|
||||
scaling results shown on the benchmark web page. These scripts are
|
||||
provided for illustration purposes, to show what command-line
|
||||
arguments are used with each accelerator package.
|
||||
|
||||
Note that we generate these run scripts, either for interactive or
|
||||
batch submission, via Python scripts which often produces a long list
|
||||
of runs to exercise a combination of options. To perform a quick
|
||||
benchmark calculation on your platform, you will typically only want
|
||||
to run a few commands out of any of the run*.sh scripts.
|
||||
@ -1,187 +0,0 @@
|
||||
#!/usr/local/bin/python
|
||||
|
||||
# Syntax: build.py target1 target2 ...
|
||||
# targets:
|
||||
# cpu, opt, omp,
|
||||
# gpu/double, gpu/mixed, gpu/single,
|
||||
# cuda/double, cuda/mixed, cuda/single,
|
||||
# intel/cpu, intel/phi,
|
||||
# kokkos/omp, kokkos/phi, kokkos/cuda
|
||||
# gpu = gpu/double + gpu/mixed + gpu/single
|
||||
# cuda = cuda/double + cuda/mixed + cuda/single
|
||||
# intel = intel/cpu + intel/phi
|
||||
# kokkos = kokkos/omp + kokkos/phi + kokkos/cuda
|
||||
# all = cpu + opt + omp + gpu + cuda + intel + kokkos
|
||||
|
||||
# create exectuables for different packages
|
||||
# MUST set lmpdir to path of LAMMPS home directory
|
||||
|
||||
import sys,commands,os
|
||||
|
||||
lmpdir = "~/lammps"
|
||||
|
||||
# build LAMMPS
|
||||
# copy makefile into src/MAKE as Makefile.foo, then remove it
|
||||
|
||||
def build_lammps(makefile,pkg):
|
||||
print "Building LAMMPS with %s and %s packages ..." % (makefile,pkg)
|
||||
commands.getoutput("cp %s %s/src/MAKE/Makefile.foo" % (makefile,lmpdir))
|
||||
cwd = os.getcwd()
|
||||
os.chdir(os.path.expanduser(lmpdir + "/src"))
|
||||
str = "make clean-foo"
|
||||
txt = commands.getoutput(str)
|
||||
str = "make no-all"
|
||||
txt = commands.getoutput(str)
|
||||
for package in pkg:
|
||||
str = "make yes-%s" % package
|
||||
txt = commands.getoutput(str)
|
||||
print txt
|
||||
str = "make -j 16 foo"
|
||||
txt = commands.getoutput(str)
|
||||
os.remove("MAKE/Makefile.foo")
|
||||
os.chdir(cwd)
|
||||
|
||||
# build GPU library in LAMMPS
|
||||
# copy makefile into lib/gpu as Makefile.foo, then remove it
|
||||
|
||||
def build_gpu(makefile):
|
||||
print "Building GPU lib with %s ..." % makefile
|
||||
commands.getoutput("cp %s %s/lib/gpu/Makefile.foo" % (makefile,lmpdir))
|
||||
cwd = os.getcwd()
|
||||
os.chdir(os.path.expanduser(lmpdir + "/lib/gpu"))
|
||||
str = "make -f Makefile.foo clean"
|
||||
txt = commands.getoutput(str)
|
||||
str = "make -j 16 -f Makefile.foo"
|
||||
txt = commands.getoutput(str)
|
||||
os.remove("Makefile.foo")
|
||||
os.chdir(cwd)
|
||||
|
||||
# build CUDA library in LAMMPS
|
||||
# set precision and arch explicitly as options to make in lib/cuda
|
||||
|
||||
def build_cuda(precision,arch):
|
||||
print "Building USER-CUDA lib with %s and arch sm_%d ..." % (precision,arch)
|
||||
cwd = os.getcwd()
|
||||
os.chdir(os.path.expanduser(lmpdir + "/lib/cuda"))
|
||||
str = "make clean"
|
||||
txt = commands.getoutput(str)
|
||||
if precision == "double": pflag = 2
|
||||
elif precision == "mixed": pflag = 4
|
||||
elif precision == "single": pflag = 1
|
||||
str = "make -j 16 precision=%d arch=%s" % (pflag,arch)
|
||||
txt = commands.getoutput(str)
|
||||
|
||||
os.chdir(cwd)
|
||||
|
||||
# main program
|
||||
# convert target keywords into target flags
|
||||
|
||||
cpu = opt = omp = 0
|
||||
gpu = gpu_double = gpu_mixed = gpu_single = 0
|
||||
cuda = cuda_double = cuda_mixed = cuda_single = 0
|
||||
intel = intel_cpu = intel_phi = 0
|
||||
kokkos = kokkos_omp = kokkos_phi = kokkos_cuda = 0
|
||||
|
||||
targets = sys.argv[1:]
|
||||
for target in targets:
|
||||
if target == "cpu": cpu = 1
|
||||
elif target == "opt": opt = 1
|
||||
elif target == "omp": omp = 1
|
||||
elif target == "gpu/double": gpu_double = 1
|
||||
elif target == "gpu/mixed": gpu_mixed = 1
|
||||
elif target == "gpu/single": gpu_single = 1
|
||||
elif target == "gpu": gpu = 1
|
||||
elif target == "cuda/double": cuda_double = 1
|
||||
elif target == "cuda/mixed": cuda_mixed = 1
|
||||
elif target == "cuda/single": cuda_single = 1
|
||||
elif target == "cuda": cuda = 1
|
||||
elif target == "intel/cpu": intel_cpu = 1
|
||||
elif target == "intel/phi": intel_phi = 1
|
||||
elif target == "intel": intel = 1
|
||||
elif target == "kokkos/omp": kokkos_omp = 1
|
||||
elif target == "kokkos/phi": kokkos_phi = 1
|
||||
elif target == "kokkos/cuda": kokkos_cuda = 1
|
||||
elif target == "kokkos": kokkos = 1
|
||||
elif target == "all": cpu = omp = gpu = cuda = intel = kokkos = 1
|
||||
else: print "Target",target,"is unknown"
|
||||
|
||||
if gpu: gpu_double = gpu_mixed = gpu_single = 1
|
||||
if cuda: cuda_double = cuda_mixed = cuda_single = 1
|
||||
if intel: intel_cpu = intel_phi = 1
|
||||
if kokkos: kokkos_omp = kokkos_phi = kokkos_cuda = 1
|
||||
|
||||
# CPU
|
||||
|
||||
if cpu:
|
||||
build_lammps(makefile = "Makefile.cpu", pkg = [])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cpu" % lmpdir)
|
||||
|
||||
# OPT
|
||||
|
||||
if opt:
|
||||
build_lammps(makefile = "Makefile.opt", pkg = ["opt"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_opt" % lmpdir)
|
||||
|
||||
# OMP
|
||||
|
||||
if omp:
|
||||
build_lammps(makefile = "Makefile.omp", pkg = ["user-omp"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_omp" % lmpdir)
|
||||
|
||||
# GPU, 3 precisions
|
||||
|
||||
if gpu_double:
|
||||
build_gpu(makefile = "Makefile.gpu.double")
|
||||
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_double" % lmpdir)
|
||||
|
||||
if gpu_mixed:
|
||||
build_gpu(makefile = "Makefile.gpu.mixed")
|
||||
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_mixed" % lmpdir)
|
||||
|
||||
if gpu_single:
|
||||
build_gpu(makefile = "Makefile.gpu.single")
|
||||
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_single" % lmpdir)
|
||||
|
||||
# CUDA, 3 precisions
|
||||
|
||||
if cuda_double:
|
||||
build_cuda(precision = "double", arch = 35)
|
||||
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_double" % lmpdir)
|
||||
|
||||
if cuda_mixed:
|
||||
build_cuda(precision = "mixed", arch = 35)
|
||||
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_mixed" % lmpdir)
|
||||
|
||||
if cuda_single:
|
||||
build_cuda(precision = "single", arch = 35)
|
||||
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_single" % lmpdir)
|
||||
|
||||
# INTEL, CPU and Phi
|
||||
|
||||
if intel_cpu:
|
||||
build_lammps(makefile = "Makefile.intel.cpu", pkg = ["user-intel"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_intel_cpu" % lmpdir)
|
||||
|
||||
if intel_phi:
|
||||
build_lammps(makefile = "Makefile.intel.phi", pkg = ["user-intel","user-omp"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_intel_phi" % lmpdir)
|
||||
|
||||
# KOKKOS, all variants
|
||||
|
||||
if kokkos_omp:
|
||||
build_lammps(makefile = "Makefile.kokkos.omp", pkg = ["kokkos"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_omp" % lmpdir)
|
||||
|
||||
if kokkos_phi:
|
||||
build_lammps(makefile = "Makefile.kokkos.phi", pkg = ["kokkos"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_phi" % lmpdir)
|
||||
|
||||
if kokkos_cuda:
|
||||
build_lammps(makefile = "Makefile.kokkos.cuda", pkg = ["kokkos"])
|
||||
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_cuda" % lmpdir)
|
||||
@ -1,22 +0,0 @@
|
||||
# 3d Lennard-Jones melt
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 0.8442
|
||||
region box block 0 $x 0 $y 0 $z
|
||||
create_box 1 box
|
||||
create_atoms 1 box
|
||||
mass 1 1.0
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 20 check no
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
run $t
|
||||
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 1 --time=12:00:00
|
||||
|
||||
mpirun -np 1 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.1
|
||||
|
||||
mpirun -np 2 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.2
|
||||
|
||||
mpirun -np 4 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.4
|
||||
|
||||
mpirun -np 6 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.6
|
||||
|
||||
mpirun -np 8 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.8
|
||||
|
||||
mpirun -np 10 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.10
|
||||
|
||||
mpirun -np 12 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.12
|
||||
|
||||
mpirun -np 14 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.14
|
||||
|
||||
mpirun -np 16 lmp_cpu -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cpu.128K.16
|
||||
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 1 --time=12:00:00
|
||||
|
||||
mpirun -N 1 lmp_cuda_double -c on -sf cuda -pk cuda 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cuda.double.128K.1
|
||||
|
||||
mpirun -N 2 lmp_cuda_double -c on -sf cuda -pk cuda 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cuda.double.128K.2
|
||||
|
||||
mpirun -N 1 lmp_cuda_mixed -c on -sf cuda -pk cuda 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cuda.mixed.128K.1
|
||||
|
||||
mpirun -N 2 lmp_cuda_mixed -c on -sf cuda -pk cuda 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cuda.mixed.128K.2
|
||||
|
||||
mpirun -N 1 lmp_cuda_single -c on -sf cuda -pk cuda 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cuda.single.128K.1
|
||||
|
||||
mpirun -N 2 lmp_cuda_single -c on -sf cuda -pk cuda 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.cuda.single.128K.2
|
||||
@ -1,155 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 1 --time=12:00:00
|
||||
|
||||
mpirun -np 1 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.1.1
|
||||
|
||||
mpirun -np 2 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.2.1
|
||||
|
||||
mpirun -np 2 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.2.2
|
||||
|
||||
mpirun -np 4 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.4.1
|
||||
|
||||
mpirun -np 4 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.4.2
|
||||
|
||||
mpirun -np 6 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.6.1
|
||||
|
||||
mpirun -np 6 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.6.2
|
||||
|
||||
mpirun -np 8 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.8.1
|
||||
|
||||
mpirun -np 8 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.8.2
|
||||
|
||||
mpirun -np 10 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.10.1
|
||||
|
||||
mpirun -np 10 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.10.2
|
||||
|
||||
mpirun -np 12 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.12.1
|
||||
|
||||
mpirun -np 12 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.12.2
|
||||
|
||||
mpirun -np 14 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.14.1
|
||||
|
||||
mpirun -np 14 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.14.2
|
||||
|
||||
mpirun -np 16 lmp_gpu_single -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.16.1
|
||||
|
||||
mpirun -np 16 lmp_gpu_single -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.single.128K.16.2
|
||||
|
||||
mpirun -np 1 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.1.1
|
||||
|
||||
mpirun -np 2 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.2.1
|
||||
|
||||
mpirun -np 2 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.2.2
|
||||
|
||||
mpirun -np 4 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.4.1
|
||||
|
||||
mpirun -np 4 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.4.2
|
||||
|
||||
mpirun -np 6 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.6.1
|
||||
|
||||
mpirun -np 6 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.6.2
|
||||
|
||||
mpirun -np 8 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.8.1
|
||||
|
||||
mpirun -np 8 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.8.2
|
||||
|
||||
mpirun -np 10 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.10.1
|
||||
|
||||
mpirun -np 10 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.10.2
|
||||
|
||||
mpirun -np 12 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.12.1
|
||||
|
||||
mpirun -np 12 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.12.2
|
||||
|
||||
mpirun -np 14 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.14.1
|
||||
|
||||
mpirun -np 14 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.14.2
|
||||
|
||||
mpirun -np 16 lmp_gpu_mixed -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.16.1
|
||||
|
||||
mpirun -np 16 lmp_gpu_mixed -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.mixed.128K.16.2
|
||||
|
||||
mpirun -np 1 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.1.1
|
||||
|
||||
mpirun -np 2 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.2.1
|
||||
|
||||
mpirun -np 2 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.2.2
|
||||
|
||||
mpirun -np 4 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.4.1
|
||||
|
||||
mpirun -np 4 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.4.2
|
||||
|
||||
mpirun -np 6 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.6.1
|
||||
|
||||
mpirun -np 6 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.6.2
|
||||
|
||||
mpirun -np 8 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.8.1
|
||||
|
||||
mpirun -np 8 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.8.2
|
||||
|
||||
mpirun -np 10 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.10.1
|
||||
|
||||
mpirun -np 10 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.10.2
|
||||
|
||||
mpirun -np 12 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.12.1
|
||||
|
||||
mpirun -np 12 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.12.2
|
||||
|
||||
mpirun -np 14 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.14.1
|
||||
|
||||
mpirun -np 14 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.14.2
|
||||
|
||||
mpirun -np 16 lmp_gpu_double -sf gpu -pk gpu 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.16.1
|
||||
|
||||
mpirun -np 16 lmp_gpu_double -sf gpu -pk gpu 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.gpu.double.128K.16.2
|
||||
@ -1,83 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 1 --time=12:00:00
|
||||
|
||||
mpirun -np 1 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.1
|
||||
|
||||
mpirun -np 2 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.2
|
||||
|
||||
mpirun -np 4 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.4
|
||||
|
||||
mpirun -np 6 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.6
|
||||
|
||||
mpirun -np 8 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.8
|
||||
|
||||
mpirun -np 10 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.10
|
||||
|
||||
mpirun -np 12 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.12
|
||||
|
||||
mpirun -np 14 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.14
|
||||
|
||||
mpirun -np 16 lmp_intel_cpu -sf intel -pk intel 1 prec single -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.single.128K.16
|
||||
|
||||
mpirun -np 1 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.1
|
||||
|
||||
mpirun -np 2 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.2
|
||||
|
||||
mpirun -np 4 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.4
|
||||
|
||||
mpirun -np 6 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.6
|
||||
|
||||
mpirun -np 8 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.8
|
||||
|
||||
mpirun -np 10 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.10
|
||||
|
||||
mpirun -np 12 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.12
|
||||
|
||||
mpirun -np 14 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.14
|
||||
|
||||
mpirun -np 16 lmp_intel_cpu -sf intel -pk intel 1 prec mixed -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.mixed.128K.16
|
||||
|
||||
mpirun -np 1 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.1
|
||||
|
||||
mpirun -np 2 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.2
|
||||
|
||||
mpirun -np 4 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.4
|
||||
|
||||
mpirun -np 6 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.6
|
||||
|
||||
mpirun -np 8 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.8
|
||||
|
||||
mpirun -np 10 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.10
|
||||
|
||||
mpirun -np 12 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.12
|
||||
|
||||
mpirun -np 14 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.14
|
||||
|
||||
mpirun -np 16 lmp_intel_cpu -sf intel -pk intel 1 prec double -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.intel.cpu.double.128K.16
|
||||
@ -1,74 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 1 --time=12:00:00
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 1 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.1
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 2 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.2
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 3 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.3
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 4 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.4
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 5 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.5
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 6 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.6
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 7 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.7
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 8 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.8
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 9 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.9
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 10 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.10
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 11 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.11
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 12 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.12
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 13 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.13
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 14 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.14
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 15 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.15
|
||||
|
||||
mpirun -np 1 lmp_kokkos_cuda -k on g 1 t 16 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.1.16
|
||||
|
||||
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 1 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.1
|
||||
|
||||
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 2 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.2
|
||||
|
||||
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 3 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.3
|
||||
|
||||
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 4 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.4
|
||||
|
||||
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 5 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.5
|
||||
|
||||
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 6 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.6
|
||||
|
||||
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 7 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.7
|
||||
|
||||
mpirun -np 2 lmp_kokkos_cuda -k on g 2 t 8 -sf kk -pk kokkos binsize 2.8 comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.cuda.128K.2.8
|
||||
@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 1 --time=12:00:00
|
||||
|
||||
mpirun -np full -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 16 -sf kk -pk kokkos neigh full newton off comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.1.16
|
||||
|
||||
mpirun -np full -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 8 -sf kk -pk kokkos neigh full newton off comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.2.8
|
||||
|
||||
mpirun -np full -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 4 -sf kk -pk kokkos neigh full newton off comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.4.4
|
||||
|
||||
mpirun -np full -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 2 -sf kk -pk kokkos neigh full newton off comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.8.2
|
||||
|
||||
mpirun -np half -bind-to socket -map-by socket -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 1 -sf kk -pk kokkos neigh half newton on comm device -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.kokkos.omp.128K.16.1
|
||||
@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 1 --time=12:00:00
|
||||
|
||||
mpirun -np 1 lmp_omp -sf omp -pk omp 16 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.omp.128K.1.16
|
||||
|
||||
mpirun -np 2 lmp_omp -sf omp -pk omp 8 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.omp.128K.2.8
|
||||
|
||||
mpirun -np 4 lmp_omp -sf omp -pk omp 4 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.omp.128K.4.4
|
||||
|
||||
mpirun -np 8 lmp_omp -sf omp -pk omp 2 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.omp.128K.8.2
|
||||
|
||||
mpirun -np 16 lmp_omp -sf omp -pk omp 1 -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.omp.128K.16.1
|
||||
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 1 --time=12:00:00
|
||||
|
||||
mpirun -np 1 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.1
|
||||
|
||||
mpirun -np 2 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.2
|
||||
|
||||
mpirun -np 4 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.4
|
||||
|
||||
mpirun -np 6 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.6
|
||||
|
||||
mpirun -np 8 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.8
|
||||
|
||||
mpirun -np 10 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.10
|
||||
|
||||
mpirun -np 12 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.12
|
||||
|
||||
mpirun -np 14 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.14
|
||||
|
||||
mpirun -np 16 lmp_opt -sf opt -v x 32 -v y 32 -v z 32 -v t 100 < in.lj
|
||||
mv log.lammps log.10Sep14.lj.opt.128K.16
|
||||
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 16 --time=12:00:00
|
||||
|
||||
mpirun -npernode 16 lmp_cpu -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.cpu.2048K.16.16
|
||||
|
||||
mpirun -npernode 16 lmp_omp -sf omp -pk omp 1 -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.omp.2048K.16.1.16
|
||||
|
||||
mpirun -npernode 2 lmp_cuda -c on -sf cuda -pk cuda 2 -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.cuda.2048K.2.16
|
||||
|
||||
mpirun -npernode 14 lmp_gpu -sf gpu -pk gpu 2 -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.gpu.2048K.2.14.16
|
||||
|
||||
mpirun -npernode 2 lmp_kokkos_cuda -k on g 2 t 1 -sf kk -pk kokkos comm device -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.kokkos.cuda.2048K.2.1.16
|
||||
|
||||
mpirun -np 256 -bind-to core -map-by core -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 1 -sf kk -pk kokkos comm device -v x 64 -v y 64 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.kokkos.omp.2048K.16.1.16
|
||||
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -N 16 --time=12:00:00
|
||||
|
||||
mpirun -npernode 16 lmp_cpu -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.cpu.512K.16.16
|
||||
|
||||
mpirun -npernode 16 lmp_omp -sf omp -pk omp 1 -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.omp.512K.16.1.16
|
||||
|
||||
mpirun -npernode 2 lmp_cuda -c on -sf cuda -pk cuda 2 -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.cuda.512K.2.16
|
||||
|
||||
mpirun -npernode 14 lmp_gpu -sf gpu -pk gpu 2 -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.gpu.512K.2.14.16
|
||||
|
||||
mpirun -npernode 2 lmp_kokkos_cuda -k on g 2 t 1 -sf kk -pk kokkos comm device -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.kokkos.cuda.512K.2.1.16
|
||||
|
||||
mpirun -np 256 -bind-to core -map-by core -x KMP_AFFINITY=scatter lmp_kokkos_omp -k on t 1 -sf kk -pk kokkos comm device -v x 128 -v y 128 -v z 128 -v t 100 < in.lj
|
||||
mv log.lammps log.28Jun14.lj.kokkos.omp.512K.16.1.16
|
||||
1
bench/POTENTIALS/CH.rebo
Symbolic link
1
bench/POTENTIALS/CH.rebo
Symbolic link
@ -0,0 +1 @@
|
||||
../../potentials/CH.rebo
|
||||
@ -1,2 +0,0 @@
|
||||
rc = 4.0
|
||||
delr = 0.1
|
||||
1
bench/POTENTIALS/Ni.meam
Symbolic link
1
bench/POTENTIALS/Ni.meam
Symbolic link
@ -0,0 +1 @@
|
||||
../../potentials/Ni.meam
|
||||
@ -1,7 +1,7 @@
|
||||
These are input scripts used to run benchmark tests for many of the
|
||||
interatomic potentials in LAMMPS. The results of running these
|
||||
scripts on different machines are shown on the Potentials section of
|
||||
the Benchmark page of the LAMMPS WWW site (lammps.sandia.gov/bench).
|
||||
the Benchmark page of the LAMMPS WWW site (https://www.lammps.org/bench.html).
|
||||
|
||||
Examples are shown below of how to run these scripts. Log files for
|
||||
running them on 1 and 4 processors of a Linux box are included in the
|
||||
|
||||
@ -1,17 +0,0 @@
|
||||
# Stillinger-Weber parameters for various elements and mixtures
|
||||
# multiple entries can be added to this file, LAMMPS reads the ones it needs
|
||||
# these entries are in LAMMPS "metal" units:
|
||||
# epsilon = eV; sigma = Angstroms
|
||||
# other quantities are unitless
|
||||
|
||||
# format of a single entry (one or more lines):
|
||||
# element 1, element 2, element 3,
|
||||
# epsilon, sigma, a, lambda, gamma, costheta0, A, B, p, q, tol
|
||||
|
||||
# Here are the original parameters in metal units, for Silicon from:
|
||||
#
|
||||
# Stillinger and Weber, Phys. Rev. B, v. 31, p. 5262, (1985)
|
||||
#
|
||||
|
||||
Si Si Si 2.1683 2.0951 1.80 21.0 1.20 -0.333333333333
|
||||
7.049556277 0.6022245584 4.0 0.0 0.0
|
||||
1
bench/POTENTIALS/Si.sw
Symbolic link
1
bench/POTENTIALS/Si.sw
Symbolic link
@ -0,0 +1 @@
|
||||
../../potentials/Si.sw
|
||||
@ -1,16 +0,0 @@
|
||||
# Tersoff parameters for various elements and mixtures
|
||||
# multiple entries can be added to this file, LAMMPS reads the ones it needs
|
||||
# these entries are in LAMMPS "metal" units:
|
||||
# A,B = eV; lambda1,lambda2,lambda3 = 1/Angstroms; R,D = Angstroms
|
||||
# other quantities are unitless
|
||||
|
||||
# This is the Si parameterization from a particular Tersoff paper:
|
||||
# J. Tersoff, PRB, 37, 6991 (1988)
|
||||
# See the SiCGe.tersoff file for different Si variants.
|
||||
|
||||
# format of a single entry (one or more lines):
|
||||
# element 1, element 2, element 3,
|
||||
# m, gamma, lambda3, c, d, costheta0, n, beta, lambda2, B, R, D, lambda1, A
|
||||
|
||||
Si Si Si 3.0 1.0 1.3258 4.8381 2.0417 0.0000 22.956
|
||||
0.33675 1.3258 95.373 3.0 0.2 3.2394 3264.7
|
||||
1
bench/POTENTIALS/Si.tersoff
Symbolic link
1
bench/POTENTIALS/Si.tersoff
Symbolic link
@ -0,0 +1 @@
|
||||
../../potentials/Si.tersoff
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,24 +0,0 @@
|
||||
# bulk Ni in MEAM
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.52
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
create_atoms 1 box
|
||||
|
||||
pair_style meam/c
|
||||
pair_coeff * * library.meam Ni4 Ni.meam Ni4
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
thermo 50
|
||||
|
||||
run 100
|
||||
@ -1,22 +0,0 @@
|
||||
# ReaxFF benchmark: simulation of PETN crystal, replicated unit cell
|
||||
|
||||
units real
|
||||
atom_style charge
|
||||
|
||||
read_data data.reax
|
||||
|
||||
#replicate 7 8 10
|
||||
replicate 7 8 5
|
||||
|
||||
velocity all create 300.0 9999
|
||||
|
||||
pair_style reax
|
||||
pair_coeff * * ffield.reax 1 2 3 4
|
||||
|
||||
timestep 0.1
|
||||
fix 2 all nve
|
||||
|
||||
thermo 10
|
||||
thermo_style custom step temp ke pe pxx pyy pzz etotal
|
||||
|
||||
run 100
|
||||
@ -11,7 +11,7 @@ neighbor 0.5 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
pair_style rebo
|
||||
pair_coeff * * CH.airebo C H
|
||||
pair_coeff * * CH.rebo C H
|
||||
|
||||
velocity all create 300.0 761341
|
||||
|
||||
|
||||
@ -1,162 +0,0 @@
|
||||
# meam data from vax files fcc,bcc,dia 11/4/92
|
||||
# elt lat z ielement atwt
|
||||
# alpha b0 b1 b2 b3 alat esub asub
|
||||
# t0 t1 t2 t3 rozero ibar
|
||||
|
||||
'Sn5' 'dia' 4. 50 118.
|
||||
5.09 5.00 16.0 04.0 5.0 6.483 3.14 1.00
|
||||
1.0 2.00 5.756 -0.30 1. 0
|
||||
'Sn' 'dia' 4. 50 118.
|
||||
5.09 5.42 8.0 5.0 6.0 6.483 3.14 1.12
|
||||
1.0 3.0 5.707 +0.30 1. 0
|
||||
'Cu' 'fcc' 12. 29 63.54
|
||||
5.10570729 3.634 2.20 6 2.20 3.62 3.54 1.07
|
||||
1.0 3.13803254 2.49438711 2.95269237 1. 0
|
||||
'Ag' 'fcc' 12. 47 107.870
|
||||
5.89222008 4.456 2.20 6 2.20 4.08 2.85 1.06
|
||||
1.0 5.54097609 2.45015783 1.28843988 1. 0
|
||||
'Au' 'fcc' 12. 79 196.967
|
||||
6.34090112 5.449 2.20 6 2.20 4.07 3.93 1.04
|
||||
1.0 1.58956328 1.50776392 2.60609758 1. 0
|
||||
'Ni1' 'fcc' 12. 28 58.71
|
||||
4.99 2.45 2.20 6 2.20 3.52 4.45 1.10
|
||||
1.0 3.57 1.60 3.70 1.0 0
|
||||
'Ni2' 'fcc' 12. 28 58.71
|
||||
4.99 2.45 2.20 6 2.20 3.52 4.45 1.10
|
||||
1.0 3.57 1.60 3.70 1.0 3
|
||||
'Ni3' 'fcc' 12. 28 58.71
|
||||
4.99 2.45 1.50 6 1.50 3.52 4.45 1.10
|
||||
1.0 3.57 1.60 3.70 1.0 3
|
||||
'Ni4' 'fcc' 12. 28 58.71
|
||||
4.99 2.45 1.50 6 1.50 3.52 4.45 1.10
|
||||
1.0 3.57 1.60 3.70 1.0 0
|
||||
'Ni' 'fcc' 12. 28 58.71
|
||||
4.99 2.64 1.50 4.50 1.50 3.52 4.45 1.10
|
||||
1.0 1.692 4.987 3.683 1.0 1
|
||||
'Nix' 'fcc' 12. 28 58.71
|
||||
4.99 2.64 1.50 4.50 1.50 3.52 4.45 1.10
|
||||
1.0 0.00 0.000 3.683 1.0 1
|
||||
'Ni' 'fcc' 12. 28 58.71
|
||||
4.99 3.25 0.80 4 1.50 3.52 4.45 1.07
|
||||
1.0 -4.052 13.14 3.786 1.0 1
|
||||
'Pd' 'fcc' 12. 46 106.4
|
||||
6.43230473 4.975 2.20 6 2.20 3.89 3.91 1.01
|
||||
1.0 2.33573516 1.38343023 4.47989049 1. 0
|
||||
'Pt' 'fcc' 12. 78 195.09
|
||||
6.44221724 4.673 2.20 6 2.20 3.92 5.77 1.04
|
||||
1.0 2.73335406 -1.3759593 3.29322278 1. 0
|
||||
'Al' 'fcc' 12. 13 26.9815
|
||||
4.61 2.21 2.20 6.0 2.20 4.05 3.58 1.07
|
||||
1.0 -1.78 -2.21 8.01 0.6 0
|
||||
'Al' 'fcc' 12. 13 26.9815
|
||||
4.69 1.56 4.00 5.5 0.60 4.05 3.36 1.09
|
||||
1.0 -0.251 -3.450 8.298 0.6 1
|
||||
'Al' 'fcc' 12. 13 26.9815
|
||||
4.69 1.58 1.00 6.0 0.60 4.05 3.36 1.09
|
||||
1.0 -0.808 -2.614 8.298 0.6 1
|
||||
'Pb' 'fcc' 12. 82 207.19
|
||||
6.0564428 5.306 2.20 6 2.20 4.95 2.04 1.01
|
||||
1.0 2.74022352 3.06323991 1.2 1. 0
|
||||
'Rh' 'fcc' 12. 45 102.905
|
||||
6.0045385 1.131 1.00 2 1.00 3.8 5.75 1.05
|
||||
1.0 2.9900 4.60231784 4.8 1. 0
|
||||
'Ir' 'fcc' 12. 77 192.2
|
||||
6.52315787 1.13 1.00 2 1.00 3.84 6.93 1.05
|
||||
1.0 1.50000 8.09942666 4.8 1. 0
|
||||
'Li' 'bcc' 8. 3 6.939
|
||||
2.97244804 1.425 1.00 1.00169907 1.00 3.509 1.65 0.87
|
||||
1.0 0.26395017 0.44431129 -0.2 1. 0
|
||||
'Na' 'bcc' 8. 11 22.9898
|
||||
3.64280541 2.313 1.00 1.00173951 1.00 4.291 1.13 0.9
|
||||
1.0 3.55398839 0.68807569 -0.2 1. 0
|
||||
'K' 'bcc' 8. 19 39.102
|
||||
3.90128376 2.687 1.00 1.00186667 1.00 5.344 0.94 0.92
|
||||
1.0 5.09756981 0.69413264 -0.2 1. 0
|
||||
'V' 'bcc' 8. 23 50.942
|
||||
4.83265262 4.113 1.00 1.00095022 1.00 3.04 5.3 1
|
||||
1.0 4.20161301 4.09946561 -1 1. 0
|
||||
'Nb' 'bcc' 8. 41 92.906
|
||||
4.79306197 4.374 1.00 1.00101441 1.00 3.301 7.47 1
|
||||
1.0 3.75762849 3.82514598 -1 1. 0
|
||||
'Ta' 'bcc' 8. 73 180.948
|
||||
4.89528669 3.709 1.00 1.00099783 1.00 3.303 8.09 0.99
|
||||
1.0 6.08617812 3.35255804 -2.9 1. 0
|
||||
'Cr' 'bcc' 8. 24 51.996
|
||||
5.12169218 3.224 1.00 1.00048646 1.00 2.885 4.1 0.94
|
||||
1.0 -0.207535 12.2600006 -1.9 1. 0
|
||||
'Mo' 'bcc' 8. 42 95.94
|
||||
5.84872871 4.481 1.00 1.00065204 1.00 3.15 6.81 0.99
|
||||
1.0 3.47727181 9.48582009 -2.9 1. 0
|
||||
'W' 'bcc' 8. 74 183.85
|
||||
5.62777409 3.978 1.00 1.00065894 1.00 3.165 8.66 0.98
|
||||
1.0 3.16353338 8.24586928 -2.7 1. 0
|
||||
'WL' 'bcc' 8 74 183.85
|
||||
5.6831 6.54 1 1 1 3.1639 8.66 0.4
|
||||
1 -0.6 0.3 -8.7 1 3
|
||||
'Fe' 'bcc' 8. 26 55.847
|
||||
5.07292627 2.935 1.00 1.00080073 1.00 2.866 4.29 0.89
|
||||
1.0 5.13579244 4.12042448 -2.7 1. 0
|
||||
'Si' 'dia' 4. 14 28.086
|
||||
4.87 4.8 4.8 4.8 4.8 5.431 4.63 1.
|
||||
1.0 3.30 5.105 -0.80 1. 1
|
||||
'Si97' 'dia' 4. 14 28.086
|
||||
4.87 4.4 5.5 5.5 5.5 5.431 4.63 1.
|
||||
1.0 3.13 4.47 -1.80 2.05 0
|
||||
'Si92' 'dia' 4. 14 28.086
|
||||
4.87 4.4 5.5 5.5 5.5 5.431 4.63 1.
|
||||
1.0 3.13 4.47 -1.80 2.35 0
|
||||
'Six' 'dia' 4 14 28.086
|
||||
4.87 4.4 5.5 5.5 5.5 5.431 4.63 1.0
|
||||
1.0 2.05 4.47 -1.8 2.05 0
|
||||
'Sixb' 'dia' 4 14 28.086
|
||||
4.87 4.4 5.5 5.5 5.5 5.431 4.63 1.0
|
||||
1.0 2.05 4.47 -1.8 2.5 0
|
||||
'Mg' 'hcp' 12. 12 24.305
|
||||
5.45 2.70 0.0 0.35 3.0 3.20 1.55 1.11
|
||||
1.0 8.00 04.1 -02.0 1.0 0
|
||||
'C' 'dia' 4. 6 12.0111
|
||||
4.38 4.10 4.200 5.00 3.00 3.567 7.37 1.000
|
||||
1.0 5.0 9.34 -1.00 2.25 1
|
||||
'C' 'dia' 4. 6 12.0111
|
||||
4.38 5.20 3.87 4.00 4.50 3.567 7.37 1.278
|
||||
1.0 15. 2.09 -6.00 2.5 1
|
||||
'C' 'dia' 4. 6 12.0111
|
||||
4.38 4.50 4.00 3.50 4.80 3.567 7.37 1.00
|
||||
1.0 10.5 1.54 -8.75 3.2 1
|
||||
'C' 'dia' 4. 6 12.0111
|
||||
4.38 3.30 2.80 1.50 3.20 3.567 7.37 1.00
|
||||
1.0 10.3 1.54 -8.80 2.5 1
|
||||
'C' 'dia' 4. 6 12.0111
|
||||
4.38 4.60 3.45 4.00 4.20 3.567 7.37 1.061
|
||||
1.0 15.0 1.74 -8.00 2.5 1
|
||||
'C' 'dia' 4. 6 12.0111
|
||||
4.38 4.50 4.00 3.50 4.80 3.567 7.37 1.00
|
||||
1.0 10.5 1.54 -8.75 3.2 1
|
||||
'h' 'dim' 1. 1 1.0079
|
||||
2.96 2.70 3.5 3.4 3.4 0.74 2.235 2.27
|
||||
1.0 0.19 0.00 0.00 20.00 0
|
||||
'h' 'dim' 1. 1 1.0079
|
||||
2.96 2.00 4.0 4.0 0.0 0.74 2.235 1.00
|
||||
1.0 -0.60 -0.80 -0.0 01.0 1
|
||||
'H' 'dim' 1. 1 1.0079
|
||||
2.96 2.96 3.0 3.0 3.0 0.74 2.235 2.50
|
||||
1.0 0.20 -0.10 0.0 0.5 0
|
||||
'H' 'dim' 1. 1 1.0079
|
||||
2.96 2.0 3.0 4.0 0.0 0.74 2.225 1.00
|
||||
1.0 -0.5 -1.00 0.0 0.15 1
|
||||
'H' 'dim' 1. 1 1.0079
|
||||
2.96 2.00 2.0 2.0 2.0 0.74 2.235 1.00
|
||||
1.0 -0.60 -0.80 -0.0 01.0 2
|
||||
'Hni' 'dim' 1. 1 1.0079
|
||||
2.96 2.96 3.0 3.0 3.0 0.74 2.235 2.50
|
||||
1.0 0.2 -0.1 0.0 0.5 0
|
||||
'Hni' 'dim' 1. 1 1.0079
|
||||
2.96 2.96 3.0 2.0 3.0 0.74 2.235 36.4
|
||||
1.0 0.2 6.0 0.0 22.8 0
|
||||
'Vac' 'fcc' 12. 1 1.
|
||||
0 0 0.0 0 0.0 1E+08 0 1
|
||||
0 0 0 0 1. 0
|
||||
'zz' 'zzz' 99. 1 1.
|
||||
0 0 0.0 0 0.0 0. 0. 0.
|
||||
0 0 0 0 1. 0
|
||||
|
||||
1
bench/POTENTIALS/library.meam
Symbolic link
1
bench/POTENTIALS/library.meam
Symbolic link
@ -0,0 +1 @@
|
||||
../../potentials/library.meam
|
||||
@ -1,75 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk Ni in ADP
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.52
|
||||
Lattice spacing in x,y,z = 3.52 3.52 3.52
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.00184107 secs
|
||||
|
||||
pair_style adp
|
||||
pair_coeff * * Ni.adp Ni
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 6.168
|
||||
ghost atom cutoff = 6.168
|
||||
binsize = 3.084, bins = 23 23 23
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair adp, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 27.56 | 27.56 | 27.56 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1600 -142400 0 -135782.09 20259.105
|
||||
100 793.05485 -139023.13 0 -135742.9 32175.694
|
||||
Loop time of 11.9854 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 3.604 ns/day, 6.659 hours/ns, 8.344 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 11.447 | 11.447 | 11.447 | 0.0 | 95.51
|
||||
Neigh | 0.48465 | 0.48465 | 0.48465 | 0.0 | 4.04
|
||||
Comm | 0.019317 | 0.019317 | 0.019317 | 0.0 | 0.16
|
||||
Output | 0.00011063 | 0.00011063 | 0.00011063 | 0.0 | 0.00
|
||||
Modify | 0.025319 | 0.025319 | 0.025319 | 0.0 | 0.21
|
||||
Other | | 0.009125 | | | 0.08
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 19911 ave 19911 max 19911 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 1.33704e+06 ave 1.33704e+06 max 1.33704e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 1337035
|
||||
Ave neighs/atom = 41.7823
|
||||
Neighbor list builds = 13
|
||||
Dangerous builds = 0
|
||||
|
||||
Total wall time: 0:00:12
|
||||
@ -1,75 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk Ni in ADP
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.52
|
||||
Lattice spacing in x,y,z = 3.52 3.52 3.52
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.000586033 secs
|
||||
|
||||
pair_style adp
|
||||
pair_coeff * * Ni.adp Ni
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 6.168
|
||||
ghost atom cutoff = 6.168
|
||||
binsize = 3.084, bins = 23 23 23
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair adp, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 12.45 | 12.45 | 12.45 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1600 -142400 0 -135782.09 20259.105
|
||||
100 793.05485 -139023.13 0 -135742.9 32175.694
|
||||
Loop time of 3.49752 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 12.352 ns/day, 1.943 hours/ns, 28.592 timesteps/s
|
||||
99.1% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 3.3203 | 3.3261 | 3.3317 | 0.3 | 95.10
|
||||
Neigh | 0.12544 | 0.12594 | 0.12634 | 0.1 | 3.60
|
||||
Comm | 0.024059 | 0.03001 | 0.035574 | 2.9 | 0.86
|
||||
Output | 4.8161e-05 | 6.8128e-05 | 0.00011802 | 0.0 | 0.00
|
||||
Modify | 0.010666 | 0.010841 | 0.011109 | 0.2 | 0.31
|
||||
Other | | 0.00457 | | | 0.13
|
||||
|
||||
Nlocal: 8000 ave 8044 max 7960 min
|
||||
Histogram: 1 0 0 1 0 1 0 0 0 1
|
||||
Nghost: 9131 ave 9171 max 9087 min
|
||||
Histogram: 1 0 0 0 1 0 1 0 0 1
|
||||
Neighs: 334259 ave 336108 max 332347 min
|
||||
Histogram: 1 0 0 1 0 0 1 0 0 1
|
||||
|
||||
Total # of neighbors = 1337035
|
||||
Ave neighs/atom = 41.7823
|
||||
Neighbor list builds = 13
|
||||
Dangerous builds = 0
|
||||
|
||||
Total wall time: 0:00:03
|
||||
@ -1,87 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# AIREBO polyethelene benchmark
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
read_data data.airebo
|
||||
orthogonal box = (-2.1 -2.1 0) to (2.1 2.1 25.579)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
60 atoms
|
||||
|
||||
replicate 17 16 2
|
||||
orthogonal box = (-2.1 -2.1 0) to (69.3 65.1 51.158)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
32640 atoms
|
||||
Time spent = 0.00154901 secs
|
||||
|
||||
neighbor 0.5 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
pair_style airebo 3.0 1 1
|
||||
pair_coeff * * CH.airebo C H
|
||||
|
||||
velocity all create 300.0 761341
|
||||
|
||||
fix 1 all nve
|
||||
timestep 0.0005
|
||||
|
||||
thermo 10
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 10.7
|
||||
ghost atom cutoff = 10.7
|
||||
binsize = 5.35, bins = 14 13 10
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair airebo, perpetual
|
||||
attributes: full, newton on, ghost
|
||||
pair build: full/bin/ghost
|
||||
stencil: full/ghost/bin/3d
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 106.4 | 106.4 | 106.4 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 300 -139299.7 0 -138034.03 7998.7287
|
||||
10 161.33916 -138711.85 0 -138031.17 33242.273
|
||||
20 208.59505 -138911.77 0 -138031.73 -3199.2371
|
||||
30 139.73485 -138617.76 0 -138028.23 10890.529
|
||||
40 142.15332 -138628.03 0 -138028.3 14614.022
|
||||
50 114.21945 -138509.87 0 -138027.98 24700.885
|
||||
60 164.9432 -138725.08 0 -138029.19 35135.722
|
||||
70 162.14928 -138714.86 0 -138030.77 5666.4609
|
||||
80 157.17575 -138694.81 0 -138031.7 19838.161
|
||||
90 196.16354 -138859.65 0 -138032.05 -7942.9718
|
||||
100 178.30378 -138783.8 0 -138031.55 31012.15
|
||||
Loop time of 60.9424 on 1 procs for 100 steps with 32640 atoms
|
||||
|
||||
Performance: 0.071 ns/day, 338.569 hours/ns, 1.641 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 55.299 | 55.299 | 55.299 | 0.0 | 90.74
|
||||
Neigh | 5.5777 | 5.5777 | 5.5777 | 0.0 | 9.15
|
||||
Comm | 0.027658 | 0.027658 | 0.027658 | 0.0 | 0.05
|
||||
Output | 0.0011463 | 0.0011463 | 0.0011463 | 0.0 | 0.00
|
||||
Modify | 0.024684 | 0.024684 | 0.024684 | 0.0 | 0.04
|
||||
Other | | 0.012 | | | 0.02
|
||||
|
||||
Nlocal: 32640 ave 32640 max 32640 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 48190 ave 48190 max 48190 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 0 ave 0 max 0 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 2.22179e+07 ave 2.22179e+07 max 2.22179e+07 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 22217870
|
||||
Ave neighs/atom = 680.695
|
||||
Neighbor list builds = 8
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:01:02
|
||||
@ -1,87 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# AIREBO polyethelene benchmark
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
read_data data.airebo
|
||||
orthogonal box = (-2.1 -2.1 0) to (2.1 2.1 25.579)
|
||||
1 by 1 by 4 MPI processor grid
|
||||
reading atoms ...
|
||||
60 atoms
|
||||
|
||||
replicate 17 16 2
|
||||
orthogonal box = (-2.1 -2.1 0) to (69.3 65.1 51.158)
|
||||
2 by 2 by 1 MPI processor grid
|
||||
32640 atoms
|
||||
Time spent = 0.00070262 secs
|
||||
|
||||
neighbor 0.5 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
pair_style airebo 3.0 1 1
|
||||
pair_coeff * * CH.airebo C H
|
||||
|
||||
velocity all create 300.0 761341
|
||||
|
||||
fix 1 all nve
|
||||
timestep 0.0005
|
||||
|
||||
thermo 10
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 10.7
|
||||
ghost atom cutoff = 10.7
|
||||
binsize = 5.35, bins = 14 13 10
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair airebo, perpetual
|
||||
attributes: full, newton on, ghost
|
||||
pair build: full/bin/ghost
|
||||
stencil: full/ghost/bin/3d
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 29.37 | 29.75 | 30.13 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 300 -139299.7 0 -138034.03 7998.7287
|
||||
10 161.33916 -138711.85 0 -138031.17 33242.273
|
||||
20 208.59505 -138911.77 0 -138031.73 -3199.2371
|
||||
30 139.73485 -138617.76 0 -138028.23 10890.529
|
||||
40 142.15332 -138628.03 0 -138028.3 14614.022
|
||||
50 114.21945 -138509.87 0 -138027.98 24700.885
|
||||
60 164.9432 -138725.08 0 -138029.19 35135.722
|
||||
70 162.14928 -138714.86 0 -138030.77 5666.4609
|
||||
80 157.17575 -138694.81 0 -138031.7 19838.161
|
||||
90 196.16354 -138859.65 0 -138032.05 -7942.9718
|
||||
100 178.30378 -138783.8 0 -138031.55 31012.15
|
||||
Loop time of 16.768 on 4 procs for 100 steps with 32640 atoms
|
||||
|
||||
Performance: 0.258 ns/day, 93.156 hours/ns, 5.964 timesteps/s
|
||||
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 14.538 | 14.589 | 14.654 | 1.3 | 87.00
|
||||
Neigh | 1.8853 | 1.8992 | 1.9159 | 0.8 | 11.33
|
||||
Comm | 0.18073 | 0.25896 | 0.31361 | 10.6 | 1.54
|
||||
Output | 0.00050807 | 0.0040419 | 0.0077746 | 5.6 | 0.02
|
||||
Modify | 0.0094635 | 0.0096973 | 0.0099616 | 0.2 | 0.06
|
||||
Other | | 0.007481 | | | 0.04
|
||||
|
||||
Nlocal: 8160 ave 8174 max 8146 min
|
||||
Histogram: 1 0 1 0 0 0 0 1 0 1
|
||||
Nghost: 22614.5 ave 22629 max 22601 min
|
||||
Histogram: 1 1 0 0 0 0 0 1 0 1
|
||||
Neighs: 0 ave 0 max 0 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 5.55447e+06 ave 5.56557e+06 max 5.54193e+06 min
|
||||
Histogram: 1 0 0 1 0 0 0 1 0 1
|
||||
|
||||
Total # of neighbors = 22217870
|
||||
Ave neighs/atom = 680.695
|
||||
Neighbor list builds = 8
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:17
|
||||
@ -1,82 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk CdTe via BOP
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice custom 6.82884 basis 0.0 0.0 0.0 basis 0.25 0.25 0.25 basis 0.0 0.5 0.5 basis 0.25 0.75 0.75 basis 0.5 0.0 0.5 basis 0.75 0.25 0.75 basis 0.5 0.5 0.0 basis 0.75 0.75 0.25
|
||||
Lattice spacing in x,y,z = 6.82884 6.82884 6.82884
|
||||
region box block 0 20 0 20 0 10
|
||||
create_box 2 box
|
||||
Created orthogonal box = (0 0 0) to (136.577 136.577 68.2884)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box basis 2 2 basis 4 2 basis 6 2 basis 8 2
|
||||
Created 32000 atoms
|
||||
Time spent = 0.00191426 secs
|
||||
|
||||
pair_style bop
|
||||
pair_coeff * * CdTe.bop.table Cd Te
|
||||
Reading potential file CdTe.bop.table with DATE: 2012-06-25
|
||||
Reading potential file CdTe.bop.table with DATE: 2012-06-25
|
||||
mass 1 112.4
|
||||
mass 2 127.6
|
||||
|
||||
comm_modify cutoff 14.7
|
||||
|
||||
velocity all create 1000.0 376847 loop geom
|
||||
|
||||
neighbor 0.1 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.001
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 5
|
||||
ghost atom cutoff = 14.7
|
||||
binsize = 2.5, bins = 55 55 28
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair bop, perpetual
|
||||
attributes: full, newton on, ghost
|
||||
pair build: full/bin/ghost
|
||||
stencil: full/ghost/bin/3d
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 19.39 | 19.39 | 19.39 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1000 -69539.487 0 -65403.292 3473.2595
|
||||
100 572.16481 -67769.936 0 -65403.35 1838.6993
|
||||
Loop time of 24.1696 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 0.357 ns/day, 67.138 hours/ns, 4.137 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 23.355 | 23.355 | 23.355 | 0.0 | 96.63
|
||||
Neigh | 0.7545 | 0.7545 | 0.7545 | 0.0 | 3.12
|
||||
Comm | 0.026978 | 0.026978 | 0.026978 | 0.0 | 0.11
|
||||
Output | 0.0001111 | 0.0001111 | 0.0001111 | 0.0 | 0.00
|
||||
Modify | 0.024145 | 0.024145 | 0.024145 | 0.0 | 0.10
|
||||
Other | | 0.009326 | | | 0.04
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 35071 ave 35071 max 35071 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 0 ave 0 max 0 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 141288 ave 141288 max 141288 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 141288
|
||||
Ave neighs/atom = 4.41525
|
||||
Neighbor list builds = 14
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:24
|
||||
@ -1,82 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk CdTe via BOP
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice custom 6.82884 basis 0.0 0.0 0.0 basis 0.25 0.25 0.25 basis 0.0 0.5 0.5 basis 0.25 0.75 0.75 basis 0.5 0.0 0.5 basis 0.75 0.25 0.75 basis 0.5 0.5 0.0 basis 0.75 0.75 0.25
|
||||
Lattice spacing in x,y,z = 6.82884 6.82884 6.82884
|
||||
region box block 0 20 0 20 0 10
|
||||
create_box 2 box
|
||||
Created orthogonal box = (0 0 0) to (136.577 136.577 68.2884)
|
||||
2 by 2 by 1 MPI processor grid
|
||||
create_atoms 1 box basis 2 2 basis 4 2 basis 6 2 basis 8 2
|
||||
Created 32000 atoms
|
||||
Time spent = 0.000597477 secs
|
||||
|
||||
pair_style bop
|
||||
pair_coeff * * CdTe.bop.table Cd Te
|
||||
Reading potential file CdTe.bop.table with DATE: 2012-06-25
|
||||
Reading potential file CdTe.bop.table with DATE: 2012-06-25
|
||||
mass 1 112.4
|
||||
mass 2 127.6
|
||||
|
||||
comm_modify cutoff 14.7
|
||||
|
||||
velocity all create 1000.0 376847 loop geom
|
||||
|
||||
neighbor 0.1 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.001
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 5
|
||||
ghost atom cutoff = 14.7
|
||||
binsize = 2.5, bins = 55 55 28
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair bop, perpetual
|
||||
attributes: full, newton on, ghost
|
||||
pair build: full/bin/ghost
|
||||
stencil: full/ghost/bin/3d
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 8.497 | 8.497 | 8.497 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1000 -69539.487 0 -65403.292 3473.2595
|
||||
100 572.16481 -67769.936 0 -65403.35 1838.6993
|
||||
Loop time of 6.50033 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 1.329 ns/day, 18.056 hours/ns, 15.384 timesteps/s
|
||||
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 5.7879 | 5.975 | 6.1607 | 5.4 | 91.92
|
||||
Neigh | 0.27603 | 0.27621 | 0.27647 | 0.0 | 4.25
|
||||
Comm | 0.049869 | 0.23531 | 0.42241 | 27.2 | 3.62
|
||||
Output | 4.9829e-05 | 5.9724e-05 | 8.5592e-05 | 0.0 | 0.00
|
||||
Modify | 0.0089927 | 0.0090921 | 0.0092406 | 0.1 | 0.14
|
||||
Other | | 0.004665 | | | 0.07
|
||||
|
||||
Nlocal: 8000 ave 8006 max 7994 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Nghost: 15171 ave 15177 max 15165 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Neighs: 0 ave 0 max 0 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 35322 ave 35412 max 35267 min
|
||||
Histogram: 1 0 1 1 0 0 0 0 0 1
|
||||
|
||||
Total # of neighbors = 141288
|
||||
Ave neighs/atom = 4.41525
|
||||
Neighbor list builds = 14
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:06
|
||||
@ -1,94 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# SiO2 for COMB potential
|
||||
|
||||
units metal
|
||||
atom_style charge
|
||||
|
||||
read_data data.comb
|
||||
triclinic box = (0 0 0) to (74.58 74.58 83.064) with tilt (0 0 0)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
32400 atoms
|
||||
|
||||
mass 1 28.0855
|
||||
group type1 type 1
|
||||
10800 atoms in group type1
|
||||
compute charge1 type1 property/atom q
|
||||
compute q1 type1 reduce ave c_charge1
|
||||
mass 2 16.00
|
||||
group type2 type 2
|
||||
21600 atoms in group type2
|
||||
compute charge2 type2 property/atom q
|
||||
compute q2 type2 reduce ave c_charge2
|
||||
|
||||
pair_style comb
|
||||
pair_coeff * * ffield.comb Si O
|
||||
neighbor 0.5 bin
|
||||
neigh_modify every 10 delay 0 check yes
|
||||
|
||||
timestep 0.0002
|
||||
|
||||
thermo_style custom step temp etotal pe evdwl ecoul c_q1 c_q2 press vol
|
||||
thermo_modify norm yes
|
||||
velocity all create 300.0 3482028
|
||||
fix 1 all nvt temp 300.0 300.0 0.1
|
||||
fix 2 all qeq/comb 10 0.001 file fq.out
|
||||
|
||||
thermo 10
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 10 steps, delay 0 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 12.5
|
||||
ghost atom cutoff = 12.5
|
||||
binsize = 6.25, bins = 12 12 14
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair comb, perpetual
|
||||
attributes: full, newton on
|
||||
pair build: full/bin/atomonly
|
||||
stencil: full/bin/3d
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 106.2 | 106.2 | 106.2 Mbytes
|
||||
Step Temp TotEng PotEng E_vdwl E_coul c_q1 c_q2 Press Volume
|
||||
0 300 -6.8032038 -6.8419806 4.6274455 -11.469426 2.8875895 -1.4437947 13386.415 462016.62
|
||||
10 273.21913 -6.8032489 -6.8385642 4.6221303 -11.460695 2.8872353 -1.4436176 13076.442 462016.62
|
||||
20 242.0051 -6.803367 -6.8346477 4.6208311 -11.455479 2.8870176 -1.4435087 12080.423 462016.62
|
||||
30 214.5618 -6.8034588 -6.8311922 4.620067 -11.451259 2.8870575 -1.4435287 10307.876 462016.62
|
||||
40 198.14521 -6.8035174 -6.8291289 4.6202931 -11.449422 2.8874526 -1.4437263 7765.732 462016.62
|
||||
50 197.15561 -6.8035468 -6.8290303 4.6219602 -11.450991 2.8883366 -1.4441683 4432.7134 462016.62
|
||||
60 212.04532 -6.8035584 -6.8309666 4.6260476 -11.457014 2.8896425 -1.4448212 324.71226 462016.62
|
||||
70 239.37999 -6.8035665 -6.8345078 4.6322984 -11.466806 2.8912723 -1.4456361 -4497.0492 462016.62
|
||||
80 272.98301 -6.803583 -6.8388677 4.6404093 -11.479277 2.8932784 -1.4466392 -9896.1704 462016.62
|
||||
90 305.77651 -6.8036184 -6.8431419 4.6512736 -11.494415 2.8953109 -1.4476554 -15675.983 462016.62
|
||||
100 331.58255 -6.8036753 -6.8465344 4.662727 -11.509261 2.897273 -1.4486365 -21675.515 462016.62
|
||||
Loop time of 517.206 on 1 procs for 100 steps with 32400 atoms
|
||||
|
||||
Performance: 0.003 ns/day, 7183.417 hours/ns, 0.193 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 114.18 | 114.18 | 114.18 | 0.0 | 22.08
|
||||
Neigh | 0.47558 | 0.47558 | 0.47558 | 0.0 | 0.09
|
||||
Comm | 0.030611 | 0.030611 | 0.030611 | 0.0 | 0.01
|
||||
Output | 0.0024922 | 0.0024922 | 0.0024922 | 0.0 | 0.00
|
||||
Modify | 402.51 | 402.51 | 402.51 | 0.0 | 77.82
|
||||
Other | | 0.006137 | | | 0.00
|
||||
|
||||
Nlocal: 32400 ave 32400 max 32400 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 42518 ave 42518 max 42518 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 0 ave 0 max 0 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 1.85317e+07 ave 1.85317e+07 max 1.85317e+07 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 18531740
|
||||
Ave neighs/atom = 571.967
|
||||
Neighbor list builds = 1
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:09:18
|
||||
@ -1,94 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# SiO2 for COMB potential
|
||||
|
||||
units metal
|
||||
atom_style charge
|
||||
|
||||
read_data data.comb
|
||||
triclinic box = (0 0 0) to (74.58 74.58 83.064) with tilt (0 0 0)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
reading atoms ...
|
||||
32400 atoms
|
||||
|
||||
mass 1 28.0855
|
||||
group type1 type 1
|
||||
10800 atoms in group type1
|
||||
compute charge1 type1 property/atom q
|
||||
compute q1 type1 reduce ave c_charge1
|
||||
mass 2 16.00
|
||||
group type2 type 2
|
||||
21600 atoms in group type2
|
||||
compute charge2 type2 property/atom q
|
||||
compute q2 type2 reduce ave c_charge2
|
||||
|
||||
pair_style comb
|
||||
pair_coeff * * ffield.comb Si O
|
||||
neighbor 0.5 bin
|
||||
neigh_modify every 10 delay 0 check yes
|
||||
|
||||
timestep 0.0002
|
||||
|
||||
thermo_style custom step temp etotal pe evdwl ecoul c_q1 c_q2 press vol
|
||||
thermo_modify norm yes
|
||||
velocity all create 300.0 3482028
|
||||
fix 1 all nvt temp 300.0 300.0 0.1
|
||||
fix 2 all qeq/comb 10 0.001 file fq.out
|
||||
|
||||
thermo 10
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 10 steps, delay 0 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 12.5
|
||||
ghost atom cutoff = 12.5
|
||||
binsize = 6.25, bins = 12 12 14
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair comb, perpetual
|
||||
attributes: full, newton on
|
||||
pair build: full/bin/atomonly
|
||||
stencil: full/bin/3d
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 32.11 | 32.11 | 32.11 Mbytes
|
||||
Step Temp TotEng PotEng E_vdwl E_coul c_q1 c_q2 Press Volume
|
||||
0 300 -6.8032038 -6.8419806 4.6274455 -11.469426 2.8875895 -1.4437947 13386.415 462016.62
|
||||
10 273.21913 -6.8032489 -6.8385642 4.6221303 -11.460695 2.8872353 -1.4436176 13076.442 462016.62
|
||||
20 242.0051 -6.803367 -6.8346477 4.6208311 -11.455479 2.8870176 -1.4435087 12080.423 462016.62
|
||||
30 214.5618 -6.8034588 -6.8311922 4.620067 -11.451259 2.8870575 -1.4435287 10307.876 462016.62
|
||||
40 198.14521 -6.8035174 -6.8291289 4.6202931 -11.449422 2.8874526 -1.4437263 7765.732 462016.62
|
||||
50 197.15561 -6.8035468 -6.8290303 4.6219602 -11.450991 2.8883366 -1.4441683 4432.7134 462016.62
|
||||
60 212.04532 -6.8035584 -6.8309666 4.6260476 -11.457014 2.8896425 -1.4448212 324.71226 462016.62
|
||||
70 239.37999 -6.8035665 -6.8345078 4.6322984 -11.466806 2.8912723 -1.4456361 -4497.0492 462016.62
|
||||
80 272.98301 -6.803583 -6.8388677 4.6404093 -11.479277 2.8932784 -1.4466392 -9896.1704 462016.62
|
||||
90 305.77651 -6.8036184 -6.8431419 4.6512736 -11.494415 2.8953109 -1.4476554 -15675.983 462016.62
|
||||
100 331.58255 -6.8036753 -6.8465344 4.662727 -11.509261 2.897273 -1.4486365 -21675.515 462016.62
|
||||
Loop time of 131.437 on 4 procs for 100 steps with 32400 atoms
|
||||
|
||||
Performance: 0.013 ns/day, 1825.518 hours/ns, 0.761 timesteps/s
|
||||
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 28.847 | 28.874 | 28.913 | 0.5 | 21.97
|
||||
Neigh | 0.10981 | 0.11084 | 0.11145 | 0.2 | 0.08
|
||||
Comm | 0.28924 | 0.32866 | 0.3556 | 4.5 | 0.25
|
||||
Output | 0.0010426 | 0.0011656 | 0.0015302 | 0.6 | 0.00
|
||||
Modify | 102.12 | 102.12 | 102.12 | 0.0 | 77.69
|
||||
Other | | 0.003455 | | | 0.00
|
||||
|
||||
Nlocal: 8100 ave 8110 max 8090 min
|
||||
Histogram: 1 0 0 0 1 1 0 0 0 1
|
||||
Nghost: 20725.2 ave 20772 max 20694 min
|
||||
Histogram: 1 1 0 0 1 0 0 0 0 1
|
||||
Neighs: 0 ave 0 max 0 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 4.63294e+06 ave 4.63866e+06 max 4.62736e+06 min
|
||||
Histogram: 1 0 0 0 1 1 0 0 0 1
|
||||
|
||||
Total # of neighbors = 18531740
|
||||
Ave neighs/atom = 571.967
|
||||
Neighbor list builds = 1
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:02:21
|
||||
@ -1,75 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# DPD benchmark
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
comm_modify mode single vel yes
|
||||
|
||||
lattice fcc 3.0
|
||||
Lattice spacing in x,y,z = 1.10064 1.10064 1.10064
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (22.0128 22.0128 22.0128)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.0018332 secs
|
||||
mass 1 1.0
|
||||
|
||||
velocity all create 1.0 87287 loop geom
|
||||
|
||||
pair_style dpd 1.0 1.0 928948
|
||||
pair_coeff 1 1 25.0 4.5
|
||||
|
||||
neighbor 0.5 bin
|
||||
neigh_modify delay 0 every 1
|
||||
|
||||
fix 1 all nve
|
||||
timestep 0.04
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 0 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 1.5
|
||||
ghost atom cutoff = 1.5
|
||||
binsize = 0.75, bins = 30 30 30
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair dpd, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 11.32 | 11.32 | 11.32 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1 3.6872574 0 5.1872105 28.880274
|
||||
100 1.0246036 4.5727353 0 6.1095927 23.859969
|
||||
Loop time of 3.09286 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 111741.340 tau/day, 32.333 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 1.5326 | 1.5326 | 1.5326 | 0.0 | 49.55
|
||||
Neigh | 1.4771 | 1.4771 | 1.4771 | 0.0 | 47.76
|
||||
Comm | 0.044292 | 0.044292 | 0.044292 | 0.0 | 1.43
|
||||
Output | 0.00011039 | 0.00011039 | 0.00011039 | 0.0 | 0.00
|
||||
Modify | 0.022322 | 0.022322 | 0.022322 | 0.0 | 0.72
|
||||
Other | | 0.01648 | | | 0.53
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 14981 ave 14981 max 14981 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 660587 ave 660587 max 660587 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 660587
|
||||
Ave neighs/atom = 20.6433
|
||||
Neighbor list builds = 50
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:03
|
||||
@ -1,75 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# DPD benchmark
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
comm_modify mode single vel yes
|
||||
|
||||
lattice fcc 3.0
|
||||
Lattice spacing in x,y,z = 1.10064 1.10064 1.10064
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (22.0128 22.0128 22.0128)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.000589132 secs
|
||||
mass 1 1.0
|
||||
|
||||
velocity all create 1.0 87287 loop geom
|
||||
|
||||
pair_style dpd 1.0 1.0 928948
|
||||
pair_coeff 1 1 25.0 4.5
|
||||
|
||||
neighbor 0.5 bin
|
||||
neigh_modify delay 0 every 1
|
||||
|
||||
fix 1 all nve
|
||||
timestep 0.04
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 0 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 1.5
|
||||
ghost atom cutoff = 1.5
|
||||
binsize = 0.75, bins = 30 30 30
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair dpd, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 3.874 | 3.874 | 3.874 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1 3.6872574 0 5.1872105 28.911346
|
||||
100 1.0219182 4.5817845 0 6.1146139 23.803115
|
||||
Loop time of 0.83904 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 411899.440 tau/day, 119.184 timesteps/s
|
||||
99.3% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0.39605 | 0.40101 | 0.40702 | 0.6 | 47.79
|
||||
Neigh | 0.38186 | 0.38494 | 0.38738 | 0.4 | 45.88
|
||||
Comm | 0.032073 | 0.039688 | 0.045953 | 2.9 | 4.73
|
||||
Output | 4.4823e-05 | 5.4002e-05 | 7.844e-05 | 0.0 | 0.01
|
||||
Modify | 0.0056572 | 0.0056887 | 0.0057547 | 0.1 | 0.68
|
||||
Other | | 0.007655 | | | 0.91
|
||||
|
||||
Nlocal: 8000 ave 8014 max 7986 min
|
||||
Histogram: 1 1 0 0 0 0 0 0 1 1
|
||||
Nghost: 6744 ave 6764 max 6726 min
|
||||
Histogram: 1 0 0 1 0 1 0 0 0 1
|
||||
Neighs: 165107 ave 166433 max 163419 min
|
||||
Histogram: 1 0 1 0 0 0 0 0 0 2
|
||||
|
||||
Total # of neighbors = 660428
|
||||
Ave neighs/atom = 20.6384
|
||||
Neighbor list builds = 50
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:00
|
||||
@ -1,74 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk Cu in EAM
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.615
|
||||
Lattice spacing in x,y,z = 3.615 3.615 3.615
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (72.3 72.3 72.3)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.00185037 secs
|
||||
|
||||
pair_style eam
|
||||
pair_coeff 1 1 Cu_u3.eam
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 5.95
|
||||
ghost atom cutoff = 5.95
|
||||
binsize = 2.975, bins = 25 25 25
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair eam, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 16.83 | 16.83 | 16.83 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1600 -113280 0 -106662.09 18703.573
|
||||
100 801.832 -109957.3 0 -106640.77 51322.821
|
||||
Loop time of 3.92295 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 11.012 ns/day, 2.179 hours/ns, 25.491 timesteps/s
|
||||
99.6% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 3.3913 | 3.3913 | 3.3913 | 0.0 | 86.45
|
||||
Neigh | 0.48107 | 0.48107 | 0.48107 | 0.0 | 12.26
|
||||
Comm | 0.01729 | 0.01729 | 0.01729 | 0.0 | 0.44
|
||||
Output | 0.00011253 | 0.00011253 | 0.00011253 | 0.0 | 0.00
|
||||
Modify | 0.024349 | 0.024349 | 0.024349 | 0.0 | 0.62
|
||||
Other | | 0.008847 | | | 0.23
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 19909 ave 19909 max 19909 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 1.20778e+06 ave 1.20778e+06 max 1.20778e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 1207784
|
||||
Ave neighs/atom = 37.7433
|
||||
Neighbor list builds = 13
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:03
|
||||
@ -1,74 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk Cu in EAM
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.615
|
||||
Lattice spacing in x,y,z = 3.615 3.615 3.615
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (72.3 72.3 72.3)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.000595331 secs
|
||||
|
||||
pair_style eam
|
||||
pair_coeff 1 1 Cu_u3.eam
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 5.95
|
||||
ghost atom cutoff = 5.95
|
||||
binsize = 2.975, bins = 25 25 25
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair eam, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 7.381 | 7.381 | 7.381 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1600 -113280 0 -106662.09 18703.573
|
||||
100 801.832 -109957.3 0 -106640.77 51322.821
|
||||
Loop time of 1.04497 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 41.341 ns/day, 0.581 hours/ns, 95.697 timesteps/s
|
||||
99.4% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0.88513 | 0.88724 | 0.89191 | 0.3 | 84.91
|
||||
Neigh | 0.12418 | 0.12458 | 0.12511 | 0.1 | 11.92
|
||||
Comm | 0.015654 | 0.020543 | 0.022984 | 2.0 | 1.97
|
||||
Output | 4.8637e-05 | 5.8711e-05 | 8.6546e-05 | 0.0 | 0.01
|
||||
Modify | 0.0085199 | 0.0085896 | 0.0086446 | 0.1 | 0.82
|
||||
Other | | 0.003959 | | | 0.38
|
||||
|
||||
Nlocal: 8000 ave 8008 max 7993 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 1 1
|
||||
Nghost: 9130.25 ave 9138 max 9122 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Neighs: 301946 ave 302392 max 301360 min
|
||||
Histogram: 1 0 0 0 1 0 0 0 1 1
|
||||
|
||||
Total # of neighbors = 1207784
|
||||
Ave neighs/atom = 37.7433
|
||||
Neighbor list builds = 13
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:01
|
||||
@ -1,97 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# eFF benchmark of H plasma
|
||||
|
||||
units electron
|
||||
atom_style electron
|
||||
|
||||
read_data data.eff
|
||||
orthogonal box = (0 0 0) to (41.9118 41.9118 41.9118)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
|
||||
pair_style eff/cut 12
|
||||
pair_coeff * *
|
||||
|
||||
neigh_modify one 6000 page 60000
|
||||
|
||||
comm_modify vel yes
|
||||
|
||||
compute effTemp all temp/eff
|
||||
|
||||
thermo 5
|
||||
thermo_style custom step etotal pe ke temp press
|
||||
thermo_modify temp effTemp
|
||||
|
||||
fix 1 all nve/eff
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 6000, page size: 60000
|
||||
master list distance cutoff = 14
|
||||
ghost atom cutoff = 14
|
||||
binsize = 7, bins = 6 6 6
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair eff/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 370.9 | 370.9 | 370.9 Mbytes
|
||||
Step TotEng PotEng KinEng Temp Press
|
||||
0 4046.5854 796.63785 3249.9475 42763.133 4.4764483e+12
|
||||
5 4046.5854 796.95799 3249.6274 42758.92 4.4728546e+12
|
||||
10 4046.5854 797.71165 3248.8737 42749.004 4.4690821e+12
|
||||
15 4046.5854 798.8949 3247.6905 42733.435 4.4651331e+12
|
||||
20 4046.5854 800.50332 3246.0821 42712.271 4.4610102e+12
|
||||
25 4046.5854 802.53206 3244.0534 42685.577 4.456716e+12
|
||||
30 4046.5855 804.97579 3241.6097 42653.422 4.4522535e+12
|
||||
35 4046.5855 807.82873 3238.7567 42615.883 4.4476257e+12
|
||||
40 4046.5855 811.08467 3235.5008 42573.041 4.4428357e+12
|
||||
45 4046.5855 814.73696 3231.8485 42524.984 4.437887e+12
|
||||
50 4046.5855 818.77851 3227.807 42471.806 4.432783e+12
|
||||
55 4046.5855 823.20183 3223.3837 42413.603 4.4275273e+12
|
||||
60 4046.5856 827.99901 3218.5866 42350.482 4.4221238e+12
|
||||
65 4046.5856 833.16176 3213.4238 42282.55 4.4165764e+12
|
||||
70 4046.5856 838.68137 3207.9042 42209.923 4.4108891e+12
|
||||
75 4046.5856 844.54877 3202.0369 42132.719 4.4050662e+12
|
||||
80 4046.5857 850.75454 3195.8311 42051.064 4.399112e+12
|
||||
85 4046.5857 857.28886 3189.2968 41965.085 4.393031e+12
|
||||
90 4046.5857 864.14162 3182.4441 41874.916 4.3868277e+12
|
||||
95 4046.5857 871.30234 3175.2834 41780.695 4.3805068e+12
|
||||
100 4046.5858 878.76023 3167.8255 41682.563 4.3740731e+12
|
||||
Loop time of 323.031 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 26.747 fs/day, 0.897 hours/fs, 0.310 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 322.78 | 322.78 | 322.78 | 0.0 | 99.92
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0.1876 | 0.1876 | 0.1876 | 0.0 | 0.06
|
||||
Output | 0.0027025 | 0.0027025 | 0.0027025 | 0.0 | 0.00
|
||||
Modify | 0.032475 | 0.032475 | 0.032475 | 0.0 | 0.01
|
||||
Other | | 0.02538 | | | 0.01
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 114349 ave 114349 max 114349 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 8.10572e+07 ave 8.10572e+07 max 8.10572e+07 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 81057159
|
||||
Ave neighs/atom = 2533.04
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
|
||||
Please see the log.cite file for references relevant to this simulation
|
||||
|
||||
Total wall time: 0:05:27
|
||||
@ -1,97 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# eFF benchmark of H plasma
|
||||
|
||||
units electron
|
||||
atom_style electron
|
||||
|
||||
read_data data.eff
|
||||
orthogonal box = (0 0 0) to (41.9118 41.9118 41.9118)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
|
||||
pair_style eff/cut 12
|
||||
pair_coeff * *
|
||||
|
||||
neigh_modify one 6000 page 60000
|
||||
|
||||
comm_modify vel yes
|
||||
|
||||
compute effTemp all temp/eff
|
||||
|
||||
thermo 5
|
||||
thermo_style custom step etotal pe ke temp press
|
||||
thermo_modify temp effTemp
|
||||
|
||||
fix 1 all nve/eff
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 6000, page size: 60000
|
||||
master list distance cutoff = 14
|
||||
ghost atom cutoff = 14
|
||||
binsize = 7, bins = 6 6 6
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair eff/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 101.4 | 104.8 | 108.3 Mbytes
|
||||
Step TotEng PotEng KinEng Temp Press
|
||||
0 4046.5854 796.63785 3249.9475 42763.133 4.4764483e+12
|
||||
5 4046.5854 796.95799 3249.6274 42758.92 4.4728546e+12
|
||||
10 4046.5854 797.71165 3248.8737 42749.004 4.4690821e+12
|
||||
15 4046.5854 798.8949 3247.6905 42733.435 4.4651331e+12
|
||||
20 4046.5854 800.50332 3246.0821 42712.271 4.4610102e+12
|
||||
25 4046.5854 802.53206 3244.0534 42685.577 4.456716e+12
|
||||
30 4046.5855 804.97579 3241.6097 42653.422 4.4522535e+12
|
||||
35 4046.5855 807.82873 3238.7567 42615.883 4.4476257e+12
|
||||
40 4046.5855 811.08467 3235.5008 42573.041 4.4428357e+12
|
||||
45 4046.5855 814.73696 3231.8485 42524.984 4.437887e+12
|
||||
50 4046.5855 818.77851 3227.807 42471.806 4.432783e+12
|
||||
55 4046.5855 823.20183 3223.3837 42413.603 4.4275273e+12
|
||||
60 4046.5856 827.99901 3218.5866 42350.482 4.4221238e+12
|
||||
65 4046.5856 833.16176 3213.4238 42282.55 4.4165764e+12
|
||||
70 4046.5856 838.68137 3207.9042 42209.923 4.4108891e+12
|
||||
75 4046.5856 844.54877 3202.0369 42132.719 4.4050662e+12
|
||||
80 4046.5857 850.75454 3195.8311 42051.064 4.399112e+12
|
||||
85 4046.5857 857.28886 3189.2968 41965.085 4.393031e+12
|
||||
90 4046.5857 864.14162 3182.4441 41874.916 4.3868277e+12
|
||||
95 4046.5857 871.30234 3175.2834 41780.695 4.3805068e+12
|
||||
100 4046.5858 878.76023 3167.8255 41682.563 4.3740731e+12
|
||||
Loop time of 90.1636 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 95.826 fs/day, 0.250 hours/fs, 1.109 timesteps/s
|
||||
99.1% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 83.772 | 86.516 | 89.593 | 29.5 | 95.95
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0.51677 | 3.5934 | 6.3368 | 144.6 | 3.99
|
||||
Output | 0.0012872 | 0.0018208 | 0.0024981 | 1.0 | 0.00
|
||||
Modify | 0.017231 | 0.018405 | 0.01983 | 0.8 | 0.02
|
||||
Other | | 0.03431 | | | 0.04
|
||||
|
||||
Nlocal: 8000 ave 8112 max 7875 min
|
||||
Histogram: 1 1 0 0 0 0 0 0 0 2
|
||||
Nghost: 65589 ave 66004 max 65177 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Neighs: 2.02643e+07 ave 2.11126e+07 max 1.94058e+07 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
|
||||
Total # of neighbors = 81057159
|
||||
Ave neighs/atom = 2533.04
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
|
||||
Please see the log.cite file for references relevant to this simulation
|
||||
|
||||
Total wall time: 0:01:31
|
||||
@ -1,77 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# EIM benchmark
|
||||
# if run long enough (e.g. 1M steps), the unstable CsCl form of a NaCl single
|
||||
# crystal can be annealed to the correct NaCl type of NaCl polycrystals
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
read_data data.eim
|
||||
orthogonal box = (-0.5 -0.5 -0.5) to (71.58 143.66 71.58)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
|
||||
pair_style eim
|
||||
pair_coeff * * Na Cl ffield.eim Na Cl
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 1
|
||||
|
||||
timestep 0.0005
|
||||
thermo_style custom step pe pxx pyy pzz temp
|
||||
|
||||
velocity all create 1400.0 43454 dist gaussian mom yes
|
||||
|
||||
fix int all npt temp 1400.0 1400.0 0.1 aniso 0.0 0.0 0.1
|
||||
# anneal in much longer run
|
||||
#fix int all npt temp 1400.0 300.0 0.1 aniso 0.0 0.0 0.1
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 0 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 7.906
|
||||
ghost atom cutoff = 7.906
|
||||
binsize = 3.953, bins = 19 37 19
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair eim, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 17.72 | 17.72 | 17.72 Mbytes
|
||||
Step PotEng Pxx Pyy Pzz Temp
|
||||
0 -90567.58 -117883.6 -118039.81 -117894.07 1400
|
||||
100 -91997.012 -4104.7052 -4138.276 -4145.8936 944.10136
|
||||
Loop time of 11.4536 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 0.377 ns/day, 63.631 hours/ns, 8.731 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 9.8277 | 9.8277 | 9.8277 | 0.0 | 85.80
|
||||
Neigh | 1.484 | 1.484 | 1.484 | 0.0 | 12.96
|
||||
Comm | 0.028584 | 0.028584 | 0.028584 | 0.0 | 0.25
|
||||
Output | 0.00023127 | 0.00023127 | 0.00023127 | 0.0 | 0.00
|
||||
Modify | 0.09791 | 0.09791 | 0.09791 | 0.0 | 0.85
|
||||
Other | | 0.0152 | | | 0.13
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 21505 ave 21505 max 21505 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 1.5839e+06 ave 1.5839e+06 max 1.5839e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 1583901
|
||||
Ave neighs/atom = 49.4969
|
||||
Neighbor list builds = 37
|
||||
Dangerous builds = 12
|
||||
Total wall time: 0:00:11
|
||||
@ -1,77 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# EIM benchmark
|
||||
# if run long enough (e.g. 1M steps), the unstable CsCl form of a NaCl single
|
||||
# crystal can be annealed to the correct NaCl type of NaCl polycrystals
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
read_data data.eim
|
||||
orthogonal box = (-0.5 -0.5 -0.5) to (71.58 143.66 71.58)
|
||||
1 by 4 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
|
||||
pair_style eim
|
||||
pair_coeff * * Na Cl ffield.eim Na Cl
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 0 every 1
|
||||
|
||||
timestep 0.0005
|
||||
thermo_style custom step pe pxx pyy pzz temp
|
||||
|
||||
velocity all create 1400.0 43454 dist gaussian mom yes
|
||||
|
||||
fix int all npt temp 1400.0 1400.0 0.1 aniso 0.0 0.0 0.1
|
||||
# anneal in much longer run
|
||||
#fix int all npt temp 1400.0 300.0 0.1 aniso 0.0 0.0 0.1
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 0 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 7.906
|
||||
ghost atom cutoff = 7.906
|
||||
binsize = 3.953, bins = 19 37 19
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair eim, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 7.285 | 7.285 | 7.285 Mbytes
|
||||
Step PotEng Pxx Pyy Pzz Temp
|
||||
0 -90567.58 -117883.6 -118039.81 -117894.07 1400
|
||||
100 -91997.012 -4104.7052 -4138.276 -4145.8936 944.10136
|
||||
Loop time of 3.12061 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 1.384 ns/day, 17.337 hours/ns, 32.045 timesteps/s
|
||||
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 2.6504 | 2.6583 | 2.6685 | 0.5 | 85.18
|
||||
Neigh | 0.36996 | 0.37847 | 0.39396 | 1.5 | 12.13
|
||||
Comm | 0.037041 | 0.040586 | 0.04504 | 1.4 | 1.30
|
||||
Output | 7.081e-05 | 8.75e-05 | 0.00012994 | 0.0 | 0.00
|
||||
Modify | 0.029286 | 0.035978 | 0.047942 | 3.9 | 1.15
|
||||
Other | | 0.007206 | | | 0.23
|
||||
|
||||
Nlocal: 8000 ave 8000 max 8000 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 9460.25 ave 9469 max 9449 min
|
||||
Histogram: 1 0 0 0 0 1 0 1 0 1
|
||||
Neighs: 395975 ave 397239 max 394616 min
|
||||
Histogram: 1 0 0 1 0 0 0 1 0 1
|
||||
|
||||
Total # of neighbors = 1583901
|
||||
Ave neighs/atom = 49.4969
|
||||
Neighbor list builds = 37
|
||||
Dangerous builds = 12
|
||||
Total wall time: 0:00:03
|
||||
@ -1,84 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# FENE beadspring benchmark
|
||||
|
||||
units lj
|
||||
atom_style bond
|
||||
special_bonds fene
|
||||
|
||||
read_data data.fene
|
||||
orthogonal box = (-16.796 -16.796 -16.796) to (16.796 16.796 16.796)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
scanning bonds ...
|
||||
1 = max bonds/atom
|
||||
reading bonds ...
|
||||
31680 bonds
|
||||
2 = max # of 1-2 neighbors
|
||||
2 = max # of special neighbors
|
||||
|
||||
neighbor 0.4 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
bond_style fene
|
||||
bond_coeff 1 30.0 1.5 1.0 1.0
|
||||
|
||||
pair_style lj/cut 1.12
|
||||
pair_modify shift yes
|
||||
pair_coeff 1 1 1.0 1.0 1.12
|
||||
|
||||
fix 1 all nve
|
||||
fix 2 all langevin 1.0 1.0 10.0 904297
|
||||
|
||||
timestep 0.012
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 1.52
|
||||
ghost atom cutoff = 1.52
|
||||
binsize = 0.76, bins = 45 45 45
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 13.18 | 13.18 | 13.18 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 0.97029772 0.44484087 20.494523 22.394765 4.6721833
|
||||
100 0.9729966 0.4361122 20.507698 22.40326 4.6548819
|
||||
Loop time of 0.66285 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 156415.445 tau/day, 150.864 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0.13075 | 0.13075 | 0.13075 | 0.0 | 19.73
|
||||
Bond | 0.046363 | 0.046363 | 0.046363 | 0.0 | 6.99
|
||||
Neigh | 0.3172 | 0.3172 | 0.3172 | 0.0 | 47.85
|
||||
Comm | 0.016553 | 0.016553 | 0.016553 | 0.0 | 2.50
|
||||
Output | 0.00010395 | 0.00010395 | 0.00010395 | 0.0 | 0.02
|
||||
Modify | 0.14515 | 0.14515 | 0.14515 | 0.0 | 21.90
|
||||
Other | | 0.006728 | | | 1.02
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 9493 ave 9493 max 9493 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 155873 ave 155873 max 155873 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 155873
|
||||
Ave neighs/atom = 4.87103
|
||||
Ave special neighs/atom = 1.98
|
||||
Neighbor list builds = 20
|
||||
Dangerous builds = 20
|
||||
Total wall time: 0:00:00
|
||||
@ -1,84 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# FENE beadspring benchmark
|
||||
|
||||
units lj
|
||||
atom_style bond
|
||||
special_bonds fene
|
||||
|
||||
read_data data.fene
|
||||
orthogonal box = (-16.796 -16.796 -16.796) to (16.796 16.796 16.796)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
scanning bonds ...
|
||||
1 = max bonds/atom
|
||||
reading bonds ...
|
||||
31680 bonds
|
||||
2 = max # of 1-2 neighbors
|
||||
2 = max # of special neighbors
|
||||
|
||||
neighbor 0.4 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
bond_style fene
|
||||
bond_coeff 1 30.0 1.5 1.0 1.0
|
||||
|
||||
pair_style lj/cut 1.12
|
||||
pair_modify shift yes
|
||||
pair_coeff 1 1 1.0 1.0 1.12
|
||||
|
||||
fix 1 all nve
|
||||
fix 2 all langevin 1.0 1.0 10.0 904297
|
||||
|
||||
timestep 0.012
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 1.52
|
||||
ghost atom cutoff = 1.52
|
||||
binsize = 0.76, bins = 45 45 45
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 4.605 | 4.605 | 4.606 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 0.97029772 0.44484087 20.494523 22.394765 4.6721833
|
||||
100 0.9736748 0.44378481 20.502389 22.40664 4.7809557
|
||||
Loop time of 0.184782 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 561093.346 tau/day, 541.178 timesteps/s
|
||||
98.4% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0.033747 | 0.034391 | 0.035036 | 0.3 | 18.61
|
||||
Bond | 0.012475 | 0.012579 | 0.012812 | 0.1 | 6.81
|
||||
Neigh | 0.083916 | 0.083953 | 0.084022 | 0.0 | 45.43
|
||||
Comm | 0.012409 | 0.01363 | 0.014534 | 0.7 | 7.38
|
||||
Output | 4.1246e-05 | 5.9545e-05 | 0.00010443 | 0.0 | 0.03
|
||||
Modify | 0.036675 | 0.037876 | 0.038357 | 0.4 | 20.50
|
||||
Other | | 0.002294 | | | 1.24
|
||||
|
||||
Nlocal: 8000 ave 8023 max 7978 min
|
||||
Histogram: 1 0 0 0 1 1 0 0 0 1
|
||||
Nghost: 4158.75 ave 4175 max 4145 min
|
||||
Histogram: 1 0 1 0 0 0 1 0 0 1
|
||||
Neighs: 38940 ave 39184 max 38640 min
|
||||
Histogram: 1 0 0 0 0 1 1 0 0 1
|
||||
|
||||
Total # of neighbors = 155760
|
||||
Ave neighs/atom = 4.8675
|
||||
Ave special neighs/atom = 1.98
|
||||
Neighbor list builds = 20
|
||||
Dangerous builds = 20
|
||||
Total wall time: 0:00:00
|
||||
@ -1,103 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# Gay-Berne benchmark
|
||||
# biaxial ellipsoid mesogens in isotropic phase
|
||||
# shape: 2 1.5 1
|
||||
# cutoff 4.0 with skin 0.8
|
||||
# NPT, T=2.4, P=8.0
|
||||
|
||||
units lj
|
||||
atom_style ellipsoid
|
||||
|
||||
# creation
|
||||
#lattice sc 0.22
|
||||
#region box block 0 32 0 32 0 32
|
||||
#create_box 1 box
|
||||
#create_atoms 1 box
|
||||
#set group all quat/random 982381
|
||||
|
||||
read_data data.gb
|
||||
orthogonal box = (2.19575 2.19575 2.19575) to (50.8124 50.8124 50.8124)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
32768 atoms
|
||||
reading velocities ...
|
||||
32768 velocities
|
||||
32768 ellipsoids
|
||||
|
||||
compute rot all temp/asphere
|
||||
group spheroid type 1
|
||||
32768 atoms in group spheroid
|
||||
variable dof equal count(spheroid)+3
|
||||
compute_modify rot extra ${dof}
|
||||
compute_modify rot extra 32771
|
||||
|
||||
velocity all create 2.4 41787 loop geom
|
||||
|
||||
pair_style gayberne 1.0 3.0 1.0 4.0
|
||||
pair_coeff 1 1 1.0 1.0 1.0 0.5 0.2 1.0 0.5 0.2
|
||||
|
||||
neighbor 0.8 bin
|
||||
|
||||
timestep 0.002
|
||||
thermo 20
|
||||
|
||||
# equilibration
|
||||
#fix 1 all npt/asphere temp 2.4 2.4 0.1 iso 5.0 8.0 0.1
|
||||
#compute_modify 1_temp extra ${dof}
|
||||
#run 100
|
||||
#write_restart tmp.restart
|
||||
|
||||
fix 1 all npt/asphere temp 2.4 2.4 0.2 iso 8.0 8.0 0.2
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 4.8
|
||||
ghost atom cutoff = 4.8
|
||||
binsize = 2.4, bins = 21 21 21
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair gayberne, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 28.91 | 28.91 | 28.91 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press Volume
|
||||
0 2.4 0.50438568 0 4.1042758 6.7818168 114909.09
|
||||
20 2.7357818 0.26045557 0 4.364003 6.8299368 111715.16
|
||||
40 2.9201296 0.22570735 0 4.605768 7.0767907 109473.23
|
||||
60 2.9820039 0.19733812 0 4.6702075 7.1507065 108393.77
|
||||
80 3.0148529 0.15114819 0 4.6732895 7.1699502 107672.24
|
||||
100 3.0206703 0.10567623 0 4.6365433 7.154345 107184.83
|
||||
Loop time of 43.7894 on 1 procs for 100 steps with 32768 atoms
|
||||
|
||||
Performance: 394.616 tau/day, 2.284 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 42.881 | 42.881 | 42.881 | 0.0 | 97.93
|
||||
Neigh | 0.35071 | 0.35071 | 0.35071 | 0.0 | 0.80
|
||||
Comm | 0.065153 | 0.065153 | 0.065153 | 0.0 | 0.15
|
||||
Output | 0.00054383 | 0.00054383 | 0.00054383 | 0.0 | 0.00
|
||||
Modify | 0.47852 | 0.47852 | 0.47852 | 0.0 | 1.09
|
||||
Other | | 0.01337 | | | 0.03
|
||||
|
||||
Nlocal: 32768 ave 32768 max 32768 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 25669 ave 25669 max 25669 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 2.30433e+06 ave 2.30433e+06 max 2.30433e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 2304332
|
||||
Ave neighs/atom = 70.3226
|
||||
Neighbor list builds = 6
|
||||
Dangerous builds = 3
|
||||
|
||||
Please see the log.cite file for references relevant to this simulation
|
||||
|
||||
Total wall time: 0:00:44
|
||||
@ -1,103 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# Gay-Berne benchmark
|
||||
# biaxial ellipsoid mesogens in isotropic phase
|
||||
# shape: 2 1.5 1
|
||||
# cutoff 4.0 with skin 0.8
|
||||
# NPT, T=2.4, P=8.0
|
||||
|
||||
units lj
|
||||
atom_style ellipsoid
|
||||
|
||||
# creation
|
||||
#lattice sc 0.22
|
||||
#region box block 0 32 0 32 0 32
|
||||
#create_box 1 box
|
||||
#create_atoms 1 box
|
||||
#set group all quat/random 982381
|
||||
|
||||
read_data data.gb
|
||||
orthogonal box = (2.19575 2.19575 2.19575) to (50.8124 50.8124 50.8124)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
reading atoms ...
|
||||
32768 atoms
|
||||
reading velocities ...
|
||||
32768 velocities
|
||||
32768 ellipsoids
|
||||
|
||||
compute rot all temp/asphere
|
||||
group spheroid type 1
|
||||
32768 atoms in group spheroid
|
||||
variable dof equal count(spheroid)+3
|
||||
compute_modify rot extra ${dof}
|
||||
compute_modify rot extra 32771
|
||||
|
||||
velocity all create 2.4 41787 loop geom
|
||||
|
||||
pair_style gayberne 1.0 3.0 1.0 4.0
|
||||
pair_coeff 1 1 1.0 1.0 1.0 0.5 0.2 1.0 0.5 0.2
|
||||
|
||||
neighbor 0.8 bin
|
||||
|
||||
timestep 0.002
|
||||
thermo 20
|
||||
|
||||
# equilibration
|
||||
#fix 1 all npt/asphere temp 2.4 2.4 0.1 iso 5.0 8.0 0.1
|
||||
#compute_modify 1_temp extra ${dof}
|
||||
#run 100
|
||||
#write_restart tmp.restart
|
||||
|
||||
fix 1 all npt/asphere temp 2.4 2.4 0.2 iso 8.0 8.0 0.2
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 4.8
|
||||
ghost atom cutoff = 4.8
|
||||
binsize = 2.4, bins = 21 21 21
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair gayberne, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 11.78 | 11.78 | 11.78 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press Volume
|
||||
0 2.4 0.50438568 0 4.1042758 6.7818168 114909.09
|
||||
20 2.7357818 0.26045557 0 4.364003 6.8299368 111715.16
|
||||
40 2.9201296 0.22570735 0 4.605768 7.0767907 109473.23
|
||||
60 2.9820039 0.19733812 0 4.6702075 7.1507065 108393.77
|
||||
80 3.0148529 0.15114819 0 4.6732895 7.1699502 107672.24
|
||||
100 3.0206703 0.10567623 0 4.6365433 7.154345 107184.83
|
||||
Loop time of 11.3124 on 4 procs for 100 steps with 32768 atoms
|
||||
|
||||
Performance: 1527.522 tau/day, 8.840 timesteps/s
|
||||
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 10.778 | 10.849 | 10.934 | 2.0 | 95.90
|
||||
Neigh | 0.088265 | 0.08871 | 0.089238 | 0.1 | 0.78
|
||||
Comm | 0.1384 | 0.22518 | 0.29662 | 14.1 | 1.99
|
||||
Output | 0.00020599 | 0.00024837 | 0.00036836 | 0.0 | 0.00
|
||||
Modify | 0.13828 | 0.13899 | 0.13984 | 0.2 | 1.23
|
||||
Other | | 0.01053 | | | 0.09
|
||||
|
||||
Nlocal: 8192 ave 8215 max 8166 min
|
||||
Histogram: 1 1 0 0 0 0 0 0 0 2
|
||||
Nghost: 11972.5 ave 11984 max 11959 min
|
||||
Histogram: 1 0 0 0 1 0 1 0 0 1
|
||||
Neighs: 576083 ave 579616 max 572161 min
|
||||
Histogram: 1 1 0 0 0 0 0 0 0 2
|
||||
|
||||
Total # of neighbors = 2304332
|
||||
Ave neighs/atom = 70.3226
|
||||
Neighbor list builds = 6
|
||||
Dangerous builds = 3
|
||||
|
||||
Please see the log.cite file for references relevant to this simulation
|
||||
|
||||
Total wall time: 0:00:11
|
||||
@ -1,85 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# granular chute flow
|
||||
|
||||
units lj
|
||||
atom_style sphere
|
||||
boundary p p fs
|
||||
newton off
|
||||
comm_modify vel yes
|
||||
|
||||
read_data data.granular
|
||||
orthogonal box = (0 0 0) to (40 20 37.2886)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
|
||||
pair_style gran/hooke/history 200000.0 NULL 50.0 NULL 0.5 0
|
||||
pair_coeff * *
|
||||
|
||||
neighbor 0.1 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
timestep 0.0001
|
||||
|
||||
group bottom type 2
|
||||
912 atoms in group bottom
|
||||
group active subtract all bottom
|
||||
31088 atoms in group active
|
||||
neigh_modify exclude group bottom bottom
|
||||
|
||||
fix 1 all gravity 1.0 chute 26.0
|
||||
fix 2 bottom freeze
|
||||
fix 3 active nve/sphere
|
||||
|
||||
compute 1 all erotate/sphere
|
||||
thermo_style custom step atoms ke c_1 vol
|
||||
thermo_modify norm no
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 1.1
|
||||
ghost atom cutoff = 1.1
|
||||
binsize = 0.55, bins = 73 37 68
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair gran/hooke/history, perpetual
|
||||
attributes: half, newton off, size, history
|
||||
pair build: half/size/bin/newtoff
|
||||
stencil: half/bin/3d/newtoff
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 23.36 | 23.36 | 23.36 Mbytes
|
||||
Step Atoms KinEng c_1 Volume
|
||||
0 32000 784139.13 1601.1263 29833.783
|
||||
100 32000 784292.08 1571.0968 29834.707
|
||||
Loop time of 0.292816 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 2950.657 tau/day, 341.511 timesteps/s
|
||||
99.3% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0.17449 | 0.17449 | 0.17449 | 0.0 | 59.59
|
||||
Neigh | 0.031927 | 0.031927 | 0.031927 | 0.0 | 10.90
|
||||
Comm | 0.010195 | 0.010195 | 0.010195 | 0.0 | 3.48
|
||||
Output | 0.00019121 | 0.00019121 | 0.00019121 | 0.0 | 0.07
|
||||
Modify | 0.064463 | 0.064463 | 0.064463 | 0.0 | 22.01
|
||||
Other | | 0.01155 | | | 3.94
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 5463 ave 5463 max 5463 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 115133 ave 115133 max 115133 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 115133
|
||||
Ave neighs/atom = 3.59791
|
||||
Neighbor list builds = 2
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:00
|
||||
@ -1,85 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# granular chute flow
|
||||
|
||||
units lj
|
||||
atom_style sphere
|
||||
boundary p p fs
|
||||
newton off
|
||||
comm_modify vel yes
|
||||
|
||||
read_data data.granular
|
||||
orthogonal box = (0 0 0) to (40 20 37.2886)
|
||||
2 by 1 by 2 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
|
||||
pair_style gran/hooke/history 200000.0 NULL 50.0 NULL 0.5 0
|
||||
pair_coeff * *
|
||||
|
||||
neighbor 0.1 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
timestep 0.0001
|
||||
|
||||
group bottom type 2
|
||||
912 atoms in group bottom
|
||||
group active subtract all bottom
|
||||
31088 atoms in group active
|
||||
neigh_modify exclude group bottom bottom
|
||||
|
||||
fix 1 all gravity 1.0 chute 26.0
|
||||
fix 2 bottom freeze
|
||||
fix 3 active nve/sphere
|
||||
|
||||
compute 1 all erotate/sphere
|
||||
thermo_style custom step atoms ke c_1 vol
|
||||
thermo_modify norm no
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 1.1
|
||||
ghost atom cutoff = 1.1
|
||||
binsize = 0.55, bins = 73 37 68
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair gran/hooke/history, perpetual
|
||||
attributes: half, newton off, size, history
|
||||
pair build: half/size/bin/newtoff
|
||||
stencil: half/bin/3d/newtoff
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 10.41 | 10.42 | 10.42 Mbytes
|
||||
Step Atoms KinEng c_1 Volume
|
||||
0 32000 784139.13 1601.1263 29833.783
|
||||
100 32000 784292.08 1571.0968 29834.707
|
||||
Loop time of 0.0903978 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 9557.751 tau/day, 1106.221 timesteps/s
|
||||
98.3% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0.046331 | 0.049088 | 0.052195 | 1.2 | 54.30
|
||||
Neigh | 0.0090401 | 0.0091327 | 0.0091863 | 0.1 | 10.10
|
||||
Comm | 0.0073855 | 0.0080023 | 0.0086699 | 0.6 | 8.85
|
||||
Output | 7.1049e-05 | 0.00010067 | 0.00012088 | 0.0 | 0.11
|
||||
Modify | 0.017226 | 0.017449 | 0.01803 | 0.3 | 19.30
|
||||
Other | | 0.006625 | | | 7.33
|
||||
|
||||
Nlocal: 8000 ave 8008 max 7992 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Nghost: 2439 ave 2450 max 2428 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Neighs: 29500.5 ave 30488 max 28513 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
|
||||
Total # of neighbors = 118002
|
||||
Ave neighs/atom = 3.68756
|
||||
Neighbor list builds = 2
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:00
|
||||
@ -1,73 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# 3d Lennard-Jones melt
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 0.8442
|
||||
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.00183916 secs
|
||||
mass 1 1.0
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 2.8
|
||||
ghost atom cutoff = 2.8
|
||||
binsize = 1.4, bins = 24 24 24
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 15.82 | 15.82 | 15.82 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7733681 0 -4.6134356 -5.0197073
|
||||
100 0.75745998 -5.7584998 0 -4.6223453 0.20729996
|
||||
Loop time of 1.721 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 25101.720 tau/day, 58.106 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 1.2551 | 1.2551 | 1.2551 | 0.0 | 72.93
|
||||
Neigh | 0.41825 | 0.41825 | 0.41825 | 0.0 | 24.30
|
||||
Comm | 0.015347 | 0.015347 | 0.015347 | 0.0 | 0.89
|
||||
Output | 0.00010729 | 0.00010729 | 0.00010729 | 0.0 | 0.01
|
||||
Modify | 0.023436 | 0.023436 | 0.023436 | 0.0 | 1.36
|
||||
Other | | 0.008766 | | | 0.51
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 19669 ave 19669 max 19669 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 1.20318e+06 ave 1.20318e+06 max 1.20318e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 1203176
|
||||
Ave neighs/atom = 37.5992
|
||||
Neighbor list builds = 11
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:01
|
||||
@ -1,73 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# 3d Lennard-Jones melt
|
||||
|
||||
units lj
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 0.8442
|
||||
Lattice spacing in x,y,z = 1.6796 1.6796 1.6796
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (33.5919 33.5919 33.5919)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.000587225 secs
|
||||
mass 1 1.0
|
||||
|
||||
velocity all create 1.44 87287 loop geom
|
||||
|
||||
pair_style lj/cut 2.5
|
||||
pair_coeff 1 1 1.0 1.0 2.5
|
||||
|
||||
neighbor 0.3 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 2.8
|
||||
ghost atom cutoff = 2.8
|
||||
binsize = 1.4, bins = 24 24 24
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/cut, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 6.88 | 6.88 | 6.88 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1.44 -6.7733681 0 -4.6134356 -5.0197073
|
||||
100 0.75745998 -5.7584998 0 -4.6223453 0.20729996
|
||||
Loop time of 0.469936 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 91927.316 tau/day, 212.795 timesteps/s
|
||||
99.1% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 0.32713 | 0.32917 | 0.33317 | 0.4 | 70.05
|
||||
Neigh | 0.10836 | 0.10931 | 0.11007 | 0.2 | 23.26
|
||||
Comm | 0.015526 | 0.020355 | 0.022399 | 2.0 | 4.33
|
||||
Output | 4.2439e-05 | 5.8353e-05 | 0.00010061 | 0.0 | 0.01
|
||||
Modify | 0.0071156 | 0.0072448 | 0.007309 | 0.1 | 1.54
|
||||
Other | | 0.003793 | | | 0.81
|
||||
|
||||
Nlocal: 8000 ave 8041 max 7958 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Nghost: 9011 ave 9065 max 8961 min
|
||||
Histogram: 1 1 0 0 0 0 0 1 0 1
|
||||
Neighs: 300794 ave 304843 max 297317 min
|
||||
Histogram: 1 0 0 1 1 0 0 0 0 1
|
||||
|
||||
Total # of neighbors = 1203176
|
||||
Ave neighs/atom = 37.5992
|
||||
Neighbor list builds = 11
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:00
|
||||
@ -1,84 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk Ni in MEAM
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.52
|
||||
Lattice spacing in x,y,z = 3.52 3.52 3.52
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.00186539 secs
|
||||
|
||||
pair_style meam
|
||||
WARNING: The pair_style meam command is unsupported. Please use pair_style meam/c instead (../pair_meam.cpp:51)
|
||||
pair_coeff * * library.meam Ni4 Ni.meam Ni4
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
thermo 50
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 5
|
||||
ghost atom cutoff = 5
|
||||
binsize = 2.5, bins = 29 29 29
|
||||
2 neighbor lists, perpetual/occasional/extra = 2 0 0
|
||||
(1) pair meam, perpetual
|
||||
attributes: full, newton on
|
||||
pair build: full/bin/atomonly
|
||||
stencil: full/bin/3d
|
||||
bin: standard
|
||||
(2) pair meam, perpetual, half/full from (1)
|
||||
attributes: half, newton on
|
||||
pair build: halffull/newton
|
||||
stencil: none
|
||||
bin: none
|
||||
Per MPI rank memory allocation (min/avg/max) = 55.91 | 55.91 | 55.91 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1600 -142400 0 -135782.09 20259.18
|
||||
50 885.10702 -139411.51 0 -135750.54 32425.433
|
||||
100 895.5097 -139454.3 0 -135750.3 31804.187
|
||||
Loop time of 30.6278 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 1.410 ns/day, 17.015 hours/ns, 3.265 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 30.088 | 30.088 | 30.088 | 0.0 | 98.24
|
||||
Neigh | 0.48914 | 0.48914 | 0.48914 | 0.0 | 1.60
|
||||
Comm | 0.015916 | 0.015916 | 0.015916 | 0.0 | 0.05
|
||||
Output | 0.00022554 | 0.00022554 | 0.00022554 | 0.0 | 0.00
|
||||
Modify | 0.025481 | 0.025481 | 0.025481 | 0.0 | 0.08
|
||||
Other | | 0.009055 | | | 0.03
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 13576 ave 13576 max 13576 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 780360 ave 780360 max 780360 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 1.56072e+06 ave 1.56072e+06 max 1.56072e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 1560720
|
||||
Ave neighs/atom = 48.7725
|
||||
Neighbor list builds = 8
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:30
|
||||
@ -1,84 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk Ni in MEAM
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.52
|
||||
Lattice spacing in x,y,z = 3.52 3.52 3.52
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.000587463 secs
|
||||
|
||||
pair_style meam
|
||||
WARNING: The pair_style meam command is unsupported. Please use pair_style meam/c instead (../pair_meam.cpp:51)
|
||||
pair_coeff * * library.meam Ni4 Ni.meam Ni4
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
thermo 50
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 5
|
||||
ghost atom cutoff = 5
|
||||
binsize = 2.5, bins = 29 29 29
|
||||
2 neighbor lists, perpetual/occasional/extra = 2 0 0
|
||||
(1) pair meam, perpetual
|
||||
attributes: full, newton on
|
||||
pair build: full/bin/atomonly
|
||||
stencil: full/bin/3d
|
||||
bin: standard
|
||||
(2) pair meam, perpetual, half/full from (1)
|
||||
attributes: half, newton on
|
||||
pair build: halffull/newton
|
||||
stencil: none
|
||||
bin: none
|
||||
Per MPI rank memory allocation (min/avg/max) = 17.41 | 17.41 | 17.41 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1600 -142400 0 -135782.09 20259.18
|
||||
50 885.10702 -139411.51 0 -135750.54 32425.433
|
||||
100 895.5097 -139454.3 0 -135750.3 31804.187
|
||||
Loop time of 8.21941 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 5.256 ns/day, 4.566 hours/ns, 12.166 timesteps/s
|
||||
99.2% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 8.0277 | 8.0384 | 8.0504 | 0.3 | 97.80
|
||||
Neigh | 0.12555 | 0.12645 | 0.12713 | 0.2 | 1.54
|
||||
Comm | 0.024279 | 0.036776 | 0.048389 | 4.5 | 0.45
|
||||
Output | 9.4414e-05 | 0.00011903 | 0.00018597 | 0.0 | 0.00
|
||||
Modify | 0.01252 | 0.012608 | 0.012795 | 0.1 | 0.15
|
||||
Other | | 0.005028 | | | 0.06
|
||||
|
||||
Nlocal: 8000 ave 8045 max 7947 min
|
||||
Histogram: 1 0 0 1 0 0 0 1 0 1
|
||||
Nghost: 6066.75 ave 6120 max 6021 min
|
||||
Histogram: 1 0 1 0 0 0 1 0 0 1
|
||||
Neighs: 195090 ave 196403 max 193697 min
|
||||
Histogram: 1 0 0 1 0 0 0 1 0 1
|
||||
FullNghs: 390180 ave 392616 max 387490 min
|
||||
Histogram: 1 0 0 1 0 0 0 1 0 1
|
||||
|
||||
Total # of neighbors = 1560720
|
||||
Ave neighs/atom = 48.7725
|
||||
Neighbor list builds = 8
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:08
|
||||
@ -1,83 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk Ni in MEAM
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.52
|
||||
Lattice spacing in x,y,z = 3.52 3.52 3.52
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.00184226 secs
|
||||
|
||||
pair_style meam/c
|
||||
pair_coeff * * library.meam Ni4 Ni.meam Ni4
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
thermo 50
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 5
|
||||
ghost atom cutoff = 5
|
||||
binsize = 2.5, bins = 29 29 29
|
||||
2 neighbor lists, perpetual/occasional/extra = 2 0 0
|
||||
(1) pair meam/c, perpetual
|
||||
attributes: full, newton on
|
||||
pair build: full/bin/atomonly
|
||||
stencil: full/bin/3d
|
||||
bin: standard
|
||||
(2) pair meam/c, perpetual, half/full from (1)
|
||||
attributes: half, newton on
|
||||
pair build: halffull/newton
|
||||
stencil: none
|
||||
bin: none
|
||||
Per MPI rank memory allocation (min/avg/max) = 55.91 | 55.91 | 55.91 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1600 -142400 0 -135782.09 20259.18
|
||||
50 885.10702 -139411.51 0 -135750.54 32425.431
|
||||
100 895.50973 -139454.3 0 -135750.3 31804.185
|
||||
Loop time of 22.9343 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 1.884 ns/day, 12.741 hours/ns, 4.360 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 22.397 | 22.397 | 22.397 | 0.0 | 97.66
|
||||
Neigh | 0.48781 | 0.48781 | 0.48781 | 0.0 | 2.13
|
||||
Comm | 0.013967 | 0.013967 | 0.013967 | 0.0 | 0.06
|
||||
Output | 0.00022793 | 0.00022793 | 0.00022793 | 0.0 | 0.00
|
||||
Modify | 0.025412 | 0.025412 | 0.025412 | 0.0 | 0.11
|
||||
Other | | 0.009448 | | | 0.04
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 13576 ave 13576 max 13576 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 780360 ave 780360 max 780360 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 1.56072e+06 ave 1.56072e+06 max 1.56072e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 1560720
|
||||
Ave neighs/atom = 48.7725
|
||||
Neighbor list builds = 8
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:23
|
||||
@ -1,83 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# bulk Ni in MEAM
|
||||
|
||||
units metal
|
||||
atom_style atomic
|
||||
|
||||
lattice fcc 3.52
|
||||
Lattice spacing in x,y,z = 3.52 3.52 3.52
|
||||
region box block 0 20 0 20 0 20
|
||||
create_box 1 box
|
||||
Created orthogonal box = (0 0 0) to (70.4 70.4 70.4)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
create_atoms 1 box
|
||||
Created 32000 atoms
|
||||
Time spent = 0.00058651 secs
|
||||
|
||||
pair_style meam/c
|
||||
pair_coeff * * library.meam Ni4 Ni.meam Ni4
|
||||
|
||||
velocity all create 1600.0 376847 loop geom
|
||||
|
||||
neighbor 1.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
fix 1 all nve
|
||||
|
||||
timestep 0.005
|
||||
thermo 50
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 5
|
||||
ghost atom cutoff = 5
|
||||
binsize = 2.5, bins = 29 29 29
|
||||
2 neighbor lists, perpetual/occasional/extra = 2 0 0
|
||||
(1) pair meam/c, perpetual
|
||||
attributes: full, newton on
|
||||
pair build: full/bin/atomonly
|
||||
stencil: full/bin/3d
|
||||
bin: standard
|
||||
(2) pair meam/c, perpetual, half/full from (1)
|
||||
attributes: half, newton on
|
||||
pair build: halffull/newton
|
||||
stencil: none
|
||||
bin: none
|
||||
Per MPI rank memory allocation (min/avg/max) = 17.41 | 17.41 | 17.41 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press
|
||||
0 1600 -142400 0 -135782.09 20259.18
|
||||
50 885.10702 -139411.51 0 -135750.54 32425.431
|
||||
100 895.50973 -139454.3 0 -135750.3 31804.185
|
||||
Loop time of 6.45947 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 6.688 ns/day, 3.589 hours/ns, 15.481 timesteps/s
|
||||
98.0% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 6.22 | 6.2385 | 6.265 | 0.7 | 96.58
|
||||
Neigh | 0.12657 | 0.12691 | 0.12721 | 0.1 | 1.96
|
||||
Comm | 0.052339 | 0.07915 | 0.097897 | 5.9 | 1.23
|
||||
Output | 9.7752e-05 | 0.0001151 | 0.00016594 | 0.0 | 0.00
|
||||
Modify | 0.010194 | 0.010291 | 0.010442 | 0.1 | 0.16
|
||||
Other | | 0.004529 | | | 0.07
|
||||
|
||||
Nlocal: 8000 ave 8045 max 7947 min
|
||||
Histogram: 1 0 0 1 0 0 0 1 0 1
|
||||
Nghost: 6066.75 ave 6120 max 6021 min
|
||||
Histogram: 1 0 1 0 0 0 1 0 0 1
|
||||
Neighs: 195090 ave 196403 max 193697 min
|
||||
Histogram: 1 0 0 1 0 0 0 1 0 1
|
||||
FullNghs: 390180 ave 392616 max 387490 min
|
||||
Histogram: 1 0 0 1 0 0 0 1 0 1
|
||||
|
||||
Total # of neighbors = 1560720
|
||||
Ave neighs/atom = 48.7725
|
||||
Neighbor list builds = 8
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:06
|
||||
@ -1,217 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# Crack growth in notched 3D Peridynamic block
|
||||
|
||||
# Mesh spacing
|
||||
variable h equal 5.00e-4
|
||||
# Peridynamic horizon
|
||||
variable delta equal 3.0*${h}
|
||||
variable delta equal 3.0*0.0005
|
||||
# Height of plate (meters)
|
||||
variable height equal 31.5*${h}
|
||||
variable height equal 31.5*0.0005
|
||||
# Width of plate (meters)
|
||||
variable width equal 39.5*${h}
|
||||
variable width equal 39.5*0.0005
|
||||
# Thickness of plate (meters)
|
||||
variable depth equal 24.5*${h}
|
||||
variable depth equal 24.5*0.0005
|
||||
# Height of notch
|
||||
variable crackheight equal 10*${h}
|
||||
variable crackheight equal 10*0.0005
|
||||
# Density of plate
|
||||
variable mydensity equal 2440.0
|
||||
# Elastic modulus of material
|
||||
variable myE equal 72.0e9
|
||||
# Strain energy release rate at branching
|
||||
variable myG equal 135.0
|
||||
# constant, but define it as a variable here
|
||||
variable pi equal 3.14159265358979323846
|
||||
|
||||
units si
|
||||
boundary s s s
|
||||
atom_style peri
|
||||
atom_modify map array
|
||||
variable myskin equal 2.0*${h}
|
||||
variable myskin equal 2.0*0.0005
|
||||
neighbor ${myskin} bin
|
||||
neighbor 0.001 bin
|
||||
|
||||
lattice sc $h
|
||||
lattice sc 0.0005
|
||||
Lattice spacing in x,y,z = 0.0005 0.0005 0.0005
|
||||
variable myxmin equal 0.0
|
||||
variable myxmax equal ${width}
|
||||
variable myxmax equal 0.01975
|
||||
variable myymin equal 0.0
|
||||
variable myymax equal ${height}
|
||||
variable myymax equal 0.01575
|
||||
variable myzmin equal 0.0
|
||||
variable myzmax equal ${depth}
|
||||
variable myzmax equal 0.01225
|
||||
region plate block ${myxmin} ${myxmax} ${myymin} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region plate block 0 ${myxmax} ${myymin} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region plate block 0 0.01975 ${myymin} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region plate block 0 0.01975 0 ${myymax} ${myzmin} ${myzmax} units box
|
||||
region plate block 0 0.01975 0 0.01575 ${myzmin} ${myzmax} units box
|
||||
region plate block 0 0.01975 0 0.01575 0 ${myzmax} units box
|
||||
region plate block 0 0.01975 0 0.01575 0 0.01225 units box
|
||||
create_box 3 plate
|
||||
Created orthogonal box = (0 0 0) to (0.01975 0.01575 0.01225)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
create_atoms 1 region plate
|
||||
Created 32000 atoms
|
||||
Time spent = 0.00362897 secs
|
||||
|
||||
|
||||
pair_style peri/pmb
|
||||
variable myk equal (2.0/3.0)*${myE}
|
||||
variable myk equal (2.0/3.0)*72000000000
|
||||
|
||||
variable myc equal ((18.0*${myk})/(${pi}*(${delta}^4)))
|
||||
variable myc equal ((18.0*48000000000)/(${pi}*(${delta}^4)))
|
||||
variable myc equal ((18.0*48000000000)/(3.14159265358979*(${delta}^4)))
|
||||
variable myc equal ((18.0*48000000000)/(3.14159265358979*(0.0015^4)))
|
||||
variable mydelta equal (${delta}+(${delta}/100.0))
|
||||
variable mydelta equal (0.0015+(${delta}/100.0))
|
||||
variable mydelta equal (0.0015+(0.0015/100.0))
|
||||
variable mys0 equal sqrt((5.0*${myG})/(9.0*${myk}*${delta}))
|
||||
variable mys0 equal sqrt((5.0*135)/(9.0*${myk}*${delta}))
|
||||
variable mys0 equal sqrt((5.0*135)/(9.0*48000000000*${delta}))
|
||||
variable mys0 equal sqrt((5.0*135)/(9.0*48000000000*0.0015))
|
||||
|
||||
variable tmpvar1 equal ${myymax}-${crackheight}
|
||||
variable tmpvar1 equal 0.01575-${crackheight}
|
||||
variable tmpvar1 equal 0.01575-0.005
|
||||
variable tmpvar2 equal 0.5*${width}
|
||||
variable tmpvar2 equal 0.5*0.01975
|
||||
|
||||
region topleft block 0.0 ${tmpvar2} ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 0.01075 ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 0.01075 0.01575 ${myzmin} ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 0.01075 0.01575 0 ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 0.01075 0.01575 0 0.01225 units box
|
||||
region topright block ${tmpvar2} ${myxmax} ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 ${myxmax} ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 0.01075 ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 0.01075 0.01575 ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 0.01075 0.01575 0 ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 0.01075 0.01575 0 0.01225 units box
|
||||
set region topleft type 2
|
||||
5000 settings made for type
|
||||
set region topright type 3
|
||||
5000 settings made for type
|
||||
pair_coeff 1 1 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 1 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 1 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 1 1 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
pair_coeff 2 2 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 2 2 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 2 2 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 2 2 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
pair_coeff 3 3 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 3 3 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 3 3 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 3 3 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
pair_coeff 2 3 ${myc} 0.0 ${mys0} 0.0
|
||||
pair_coeff 2 3 5.43248872420337e+22 0.0 ${mys0} 0.0
|
||||
pair_coeff 2 3 5.43248872420337e+22 0.0 0.00102062072615966 0.0
|
||||
pair_coeff 1 2 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 2 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 2 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 1 2 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
pair_coeff 1 3 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 3 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 3 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 1 3 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
set group all density ${mydensity}
|
||||
set group all density 2440
|
||||
32000 settings made for density
|
||||
variable myvolume equal ($h)^3
|
||||
variable myvolume equal (0.0005)^3
|
||||
set group all volume ${myvolume}
|
||||
set group all volume 1.25e-10
|
||||
32000 settings made for volume
|
||||
|
||||
velocity all set 0.0 0.0 0.0 sum no units box
|
||||
|
||||
fix F1 all nve
|
||||
|
||||
compute C1 all damage/atom
|
||||
|
||||
velocity all ramp vx -10.0 10.0 x ${myxmin} ${myxmax} units box
|
||||
velocity all ramp vx -10.0 10.0 x 0 ${myxmax} units box
|
||||
velocity all ramp vx -10.0 10.0 x 0 0.01975 units box
|
||||
|
||||
variable mystep equal 0.8*sqrt((2.0*${mydensity})/(512*(${myc}/$h)*${myvolume}))
|
||||
variable mystep equal 0.8*sqrt((2.0*2440)/(512*(${myc}/$h)*${myvolume}))
|
||||
variable mystep equal 0.8*sqrt((2.0*2440)/(512*(5.43248872420337e+22/$h)*${myvolume}))
|
||||
variable mystep equal 0.8*sqrt((2.0*2440)/(512*(5.43248872420337e+22/0.0005)*${myvolume}))
|
||||
variable mystep equal 0.8*sqrt((2.0*2440)/(512*(5.43248872420337e+22/0.0005)*1.25e-10))
|
||||
timestep ${mystep}
|
||||
timestep 2.11931492396226e-08
|
||||
thermo 20
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 0.002515
|
||||
ghost atom cutoff = 0.002515
|
||||
binsize = 0.0012575, bins = 16 13 10
|
||||
2 neighbor lists, perpetual/occasional/extra = 1 1 0
|
||||
(1) pair peri/pmb, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
(2) fix PERI_NEIGH, occasional
|
||||
attributes: full, newton on
|
||||
pair build: full/bin/atomonly
|
||||
stencil: full/bin/3d
|
||||
bin: standard
|
||||
Peridynamic bonds:
|
||||
total # of bonds = 3457032
|
||||
bonds/atom = 108.032
|
||||
Per MPI rank memory allocation (min/avg/max) = 133.6 | 133.6 | 133.6 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press Volume
|
||||
0 2.0134233e+27 0 0 1.3342785e+09 2.4509971e+14 3.6292128e-06
|
||||
20 1.7695805e+27 1.6163291e+08 0 1.3343188e+09 2.1541601e+14 3.6292128e-06
|
||||
40 1.3041477e+27 4.6848143e+08 0 1.332729e+09 1.5875756e+14 3.6292128e-06
|
||||
60 9.8975313e+26 5.7284448e+08 0 1.2287455e+09 1.2048543e+14 3.6292128e-06
|
||||
80 9.3888573e+26 4.0928092e+08 0 1.0314725e+09 1.1429321e+14 3.6292128e-06
|
||||
100 8.3930314e+26 3.8522361e+08 0 9.4142265e+08 1.0217075e+14 3.6292128e-06
|
||||
Loop time of 11.0398 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 11.005 | 11.005 | 11.005 | 0.0 | 99.68
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 6.6042e-05 | 6.6042e-05 | 6.6042e-05 | 0.0 | 0.00
|
||||
Output | 0.00057292 | 0.00057292 | 0.00057292 | 0.0 | 0.01
|
||||
Modify | 0.0256 | 0.0256 | 0.0256 | 0.0 | 0.23
|
||||
Other | | 0.008592 | | | 0.08
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 0 ave 0 max 0 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 6.74442e+06 ave 6.74442e+06 max 6.74442e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
FullNghs: 1.34888e+07 ave 1.34888e+07 max 1.34888e+07 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 13488836
|
||||
Ave neighs/atom = 421.526
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
|
||||
Please see the log.cite file for references relevant to this simulation
|
||||
|
||||
Total wall time: 0:00:11
|
||||
@ -1,217 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# Crack growth in notched 3D Peridynamic block
|
||||
|
||||
# Mesh spacing
|
||||
variable h equal 5.00e-4
|
||||
# Peridynamic horizon
|
||||
variable delta equal 3.0*${h}
|
||||
variable delta equal 3.0*0.0005
|
||||
# Height of plate (meters)
|
||||
variable height equal 31.5*${h}
|
||||
variable height equal 31.5*0.0005
|
||||
# Width of plate (meters)
|
||||
variable width equal 39.5*${h}
|
||||
variable width equal 39.5*0.0005
|
||||
# Thickness of plate (meters)
|
||||
variable depth equal 24.5*${h}
|
||||
variable depth equal 24.5*0.0005
|
||||
# Height of notch
|
||||
variable crackheight equal 10*${h}
|
||||
variable crackheight equal 10*0.0005
|
||||
# Density of plate
|
||||
variable mydensity equal 2440.0
|
||||
# Elastic modulus of material
|
||||
variable myE equal 72.0e9
|
||||
# Strain energy release rate at branching
|
||||
variable myG equal 135.0
|
||||
# constant, but define it as a variable here
|
||||
variable pi equal 3.14159265358979323846
|
||||
|
||||
units si
|
||||
boundary s s s
|
||||
atom_style peri
|
||||
atom_modify map array
|
||||
variable myskin equal 2.0*${h}
|
||||
variable myskin equal 2.0*0.0005
|
||||
neighbor ${myskin} bin
|
||||
neighbor 0.001 bin
|
||||
|
||||
lattice sc $h
|
||||
lattice sc 0.0005
|
||||
Lattice spacing in x,y,z = 0.0005 0.0005 0.0005
|
||||
variable myxmin equal 0.0
|
||||
variable myxmax equal ${width}
|
||||
variable myxmax equal 0.01975
|
||||
variable myymin equal 0.0
|
||||
variable myymax equal ${height}
|
||||
variable myymax equal 0.01575
|
||||
variable myzmin equal 0.0
|
||||
variable myzmax equal ${depth}
|
||||
variable myzmax equal 0.01225
|
||||
region plate block ${myxmin} ${myxmax} ${myymin} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region plate block 0 ${myxmax} ${myymin} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region plate block 0 0.01975 ${myymin} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region plate block 0 0.01975 0 ${myymax} ${myzmin} ${myzmax} units box
|
||||
region plate block 0 0.01975 0 0.01575 ${myzmin} ${myzmax} units box
|
||||
region plate block 0 0.01975 0 0.01575 0 ${myzmax} units box
|
||||
region plate block 0 0.01975 0 0.01575 0 0.01225 units box
|
||||
create_box 3 plate
|
||||
Created orthogonal box = (0 0 0) to (0.01975 0.01575 0.01225)
|
||||
2 by 2 by 1 MPI processor grid
|
||||
create_atoms 1 region plate
|
||||
Created 32000 atoms
|
||||
Time spent = 0.0011344 secs
|
||||
|
||||
|
||||
pair_style peri/pmb
|
||||
variable myk equal (2.0/3.0)*${myE}
|
||||
variable myk equal (2.0/3.0)*72000000000
|
||||
|
||||
variable myc equal ((18.0*${myk})/(${pi}*(${delta}^4)))
|
||||
variable myc equal ((18.0*48000000000)/(${pi}*(${delta}^4)))
|
||||
variable myc equal ((18.0*48000000000)/(3.14159265358979*(${delta}^4)))
|
||||
variable myc equal ((18.0*48000000000)/(3.14159265358979*(0.0015^4)))
|
||||
variable mydelta equal (${delta}+(${delta}/100.0))
|
||||
variable mydelta equal (0.0015+(${delta}/100.0))
|
||||
variable mydelta equal (0.0015+(0.0015/100.0))
|
||||
variable mys0 equal sqrt((5.0*${myG})/(9.0*${myk}*${delta}))
|
||||
variable mys0 equal sqrt((5.0*135)/(9.0*${myk}*${delta}))
|
||||
variable mys0 equal sqrt((5.0*135)/(9.0*48000000000*${delta}))
|
||||
variable mys0 equal sqrt((5.0*135)/(9.0*48000000000*0.0015))
|
||||
|
||||
variable tmpvar1 equal ${myymax}-${crackheight}
|
||||
variable tmpvar1 equal 0.01575-${crackheight}
|
||||
variable tmpvar1 equal 0.01575-0.005
|
||||
variable tmpvar2 equal 0.5*${width}
|
||||
variable tmpvar2 equal 0.5*0.01975
|
||||
|
||||
region topleft block 0.0 ${tmpvar2} ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 0.01075 ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 0.01075 0.01575 ${myzmin} ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 0.01075 0.01575 0 ${myzmax} units box
|
||||
region topleft block 0.0 0.009875 0.01075 0.01575 0 0.01225 units box
|
||||
region topright block ${tmpvar2} ${myxmax} ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 ${myxmax} ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 ${tmpvar1} ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 0.01075 ${myymax} ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 0.01075 0.01575 ${myzmin} ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 0.01075 0.01575 0 ${myzmax} units box
|
||||
region topright block 0.009875 0.01975 0.01075 0.01575 0 0.01225 units box
|
||||
set region topleft type 2
|
||||
5000 settings made for type
|
||||
set region topright type 3
|
||||
5000 settings made for type
|
||||
pair_coeff 1 1 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 1 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 1 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 1 1 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
pair_coeff 2 2 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 2 2 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 2 2 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 2 2 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
pair_coeff 3 3 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 3 3 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 3 3 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 3 3 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
pair_coeff 2 3 ${myc} 0.0 ${mys0} 0.0
|
||||
pair_coeff 2 3 5.43248872420337e+22 0.0 ${mys0} 0.0
|
||||
pair_coeff 2 3 5.43248872420337e+22 0.0 0.00102062072615966 0.0
|
||||
pair_coeff 1 2 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 2 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 2 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 1 2 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
pair_coeff 1 3 ${myc} ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 3 5.43248872420337e+22 ${mydelta} ${mys0} 0.0
|
||||
pair_coeff 1 3 5.43248872420337e+22 0.001515 ${mys0} 0.0
|
||||
pair_coeff 1 3 5.43248872420337e+22 0.001515 0.00102062072615966 0.0
|
||||
set group all density ${mydensity}
|
||||
set group all density 2440
|
||||
32000 settings made for density
|
||||
variable myvolume equal ($h)^3
|
||||
variable myvolume equal (0.0005)^3
|
||||
set group all volume ${myvolume}
|
||||
set group all volume 1.25e-10
|
||||
32000 settings made for volume
|
||||
|
||||
velocity all set 0.0 0.0 0.0 sum no units box
|
||||
|
||||
fix F1 all nve
|
||||
|
||||
compute C1 all damage/atom
|
||||
|
||||
velocity all ramp vx -10.0 10.0 x ${myxmin} ${myxmax} units box
|
||||
velocity all ramp vx -10.0 10.0 x 0 ${myxmax} units box
|
||||
velocity all ramp vx -10.0 10.0 x 0 0.01975 units box
|
||||
|
||||
variable mystep equal 0.8*sqrt((2.0*${mydensity})/(512*(${myc}/$h)*${myvolume}))
|
||||
variable mystep equal 0.8*sqrt((2.0*2440)/(512*(${myc}/$h)*${myvolume}))
|
||||
variable mystep equal 0.8*sqrt((2.0*2440)/(512*(5.43248872420337e+22/$h)*${myvolume}))
|
||||
variable mystep equal 0.8*sqrt((2.0*2440)/(512*(5.43248872420337e+22/0.0005)*${myvolume}))
|
||||
variable mystep equal 0.8*sqrt((2.0*2440)/(512*(5.43248872420337e+22/0.0005)*1.25e-10))
|
||||
timestep ${mystep}
|
||||
timestep 2.11931492396226e-08
|
||||
thermo 20
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 0.002515
|
||||
ghost atom cutoff = 0.002515
|
||||
binsize = 0.0012575, bins = 16 13 10
|
||||
2 neighbor lists, perpetual/occasional/extra = 1 1 0
|
||||
(1) pair peri/pmb, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/atomonly/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
(2) fix PERI_NEIGH, occasional
|
||||
attributes: full, newton on
|
||||
pair build: full/bin/atomonly
|
||||
stencil: full/bin/3d
|
||||
bin: standard
|
||||
Peridynamic bonds:
|
||||
total # of bonds = 3457032
|
||||
bonds/atom = 108.032
|
||||
Per MPI rank memory allocation (min/avg/max) = 47.63 | 48.11 | 48.78 Mbytes
|
||||
Step Temp E_pair E_mol TotEng Press Volume
|
||||
0 2.0134233e+27 0 0 1.3342785e+09 2.4509971e+14 3.6292128e-06
|
||||
20 1.7695805e+27 1.6163291e+08 0 1.3343188e+09 2.1541601e+14 3.6292128e-06
|
||||
40 1.3041477e+27 4.6848143e+08 0 1.332729e+09 1.5875756e+14 3.6292128e-06
|
||||
60 9.8975313e+26 5.7284448e+08 0 1.2287455e+09 1.2048543e+14 3.6292128e-06
|
||||
80 9.3888573e+26 4.0928092e+08 0 1.0314725e+09 1.1429321e+14 3.6292128e-06
|
||||
100 8.3930314e+26 3.8522361e+08 0 9.4142265e+08 1.0217075e+14 3.6292128e-06
|
||||
Loop time of 2.8928 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
99.0% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 2.7472 | 2.7951 | 2.8585 | 2.9 | 96.62
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0.019592 | 0.083156 | 0.13278 | 17.0 | 2.87
|
||||
Output | 0.00022125 | 0.00034326 | 0.00058961 | 0.0 | 0.01
|
||||
Modify | 0.0083542 | 0.0089623 | 0.0095983 | 0.5 | 0.31
|
||||
Other | | 0.005276 | | | 0.18
|
||||
|
||||
Nlocal: 8000 ave 8000 max 8000 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 5125 ave 5125 max 5125 min
|
||||
Histogram: 4 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 1.6861e+06 ave 1.77502e+06 max 1.60625e+06 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 1 1
|
||||
FullNghs: 3.37221e+06 ave 3.41832e+06 max 3.3261e+06 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
|
||||
Total # of neighbors = 13488836
|
||||
Ave neighs/atom = 421.526
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
|
||||
Please see the log.cite file for references relevant to this simulation
|
||||
|
||||
Total wall time: 0:00:03
|
||||
@ -1,123 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# Rhodopsin model
|
||||
|
||||
units real
|
||||
neighbor 2.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
atom_style full
|
||||
bond_style harmonic
|
||||
angle_style charmm
|
||||
dihedral_style charmm
|
||||
improper_style harmonic
|
||||
pair_style lj/charmm/coul/long 8.0 10.0
|
||||
pair_modify mix arithmetic
|
||||
kspace_style pppm 1e-4
|
||||
|
||||
read_data data.protein
|
||||
orthogonal box = (-27.5 -38.5 -36.3646) to (27.5 38.5 36.3615)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
scanning bonds ...
|
||||
4 = max bonds/atom
|
||||
scanning angles ...
|
||||
8 = max angles/atom
|
||||
scanning dihedrals ...
|
||||
18 = max dihedrals/atom
|
||||
scanning impropers ...
|
||||
2 = max impropers/atom
|
||||
reading bonds ...
|
||||
27723 bonds
|
||||
reading angles ...
|
||||
40467 angles
|
||||
reading dihedrals ...
|
||||
56829 dihedrals
|
||||
reading impropers ...
|
||||
1034 impropers
|
||||
4 = max # of 1-2 neighbors
|
||||
12 = max # of 1-3 neighbors
|
||||
24 = max # of 1-4 neighbors
|
||||
26 = max # of special neighbors
|
||||
|
||||
fix 1 all shake 0.0001 5 0 m 1.0 a 232
|
||||
1617 = # of size 2 clusters
|
||||
3633 = # of size 3 clusters
|
||||
747 = # of size 4 clusters
|
||||
4233 = # of frozen angles
|
||||
fix 2 all npt temp 300.0 300.0 100.0 z 0.0 0.0 1000.0 mtk no pchain 0 tchain 1
|
||||
|
||||
special_bonds charmm
|
||||
|
||||
thermo_style multi
|
||||
timestep 2.0
|
||||
|
||||
run 100
|
||||
PPPM initialization ...
|
||||
using 12-bit tables for long-range coulomb (../kspace.cpp:321)
|
||||
G vector (1/distance) = 0.248835
|
||||
grid = 25 32 32
|
||||
stencil order = 5
|
||||
estimated absolute RMS force accuracy = 0.0355478
|
||||
estimated relative force accuracy = 0.000107051
|
||||
using double precision FFTs
|
||||
3d grid and FFT values/proc = 41070 25600
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 12
|
||||
ghost atom cutoff = 12
|
||||
binsize = 6, bins = 10 13 13
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/charmm/coul/long, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 140 | 140 | 140 Mbytes
|
||||
---------------- Step 0 ----- CPU = 0.0000 (sec) ----------------
|
||||
TotEng = -25356.2064 KinEng = 21444.8313 Temp = 299.0397
|
||||
PotEng = -46801.0377 E_bond = 2537.9940 E_angle = 10921.3742
|
||||
E_dihed = 5211.7865 E_impro = 213.5116 E_vdwl = -2307.8634
|
||||
E_coul = 207025.8927 E_long = -270403.7333 Press = -149.3301
|
||||
Volume = 307995.0335
|
||||
---------------- Step 100 ----- CPU = 23.7567 (sec) ----------------
|
||||
TotEng = -25290.7386 KinEng = 21591.9096 Temp = 301.0906
|
||||
PotEng = -46882.6482 E_bond = 2567.9789 E_angle = 10781.9556
|
||||
E_dihed = 5198.7493 E_impro = 216.7863 E_vdwl = -1902.6458
|
||||
E_coul = 206659.5007 E_long = -270404.9733 Press = 6.7898
|
||||
Volume = 308133.9933
|
||||
Loop time of 23.7568 on 1 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 0.727 ns/day, 32.995 hours/ns, 4.209 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 17.905 | 17.905 | 17.905 | 0.0 | 75.37
|
||||
Bond | 0.73417 | 0.73417 | 0.73417 | 0.0 | 3.09
|
||||
Kspace | 1.4676 | 1.4676 | 1.4676 | 0.0 | 6.18
|
||||
Neigh | 2.9907 | 2.9907 | 2.9907 | 0.0 | 12.59
|
||||
Comm | 0.037427 | 0.037427 | 0.037427 | 0.0 | 0.16
|
||||
Output | 0.00011754 | 0.00011754 | 0.00011754 | 0.0 | 0.00
|
||||
Modify | 0.60985 | 0.60985 | 0.60985 | 0.0 | 2.57
|
||||
Other | | 0.01201 | | | 0.05
|
||||
|
||||
Nlocal: 32000 ave 32000 max 32000 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 47958 ave 47958 max 47958 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 1.20281e+07 ave 1.20281e+07 max 1.20281e+07 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 12028098
|
||||
Ave neighs/atom = 375.878
|
||||
Ave special neighs/atom = 7.43187
|
||||
Neighbor list builds = 11
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:24
|
||||
@ -1,123 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# Rhodopsin model
|
||||
|
||||
units real
|
||||
neighbor 2.0 bin
|
||||
neigh_modify delay 5 every 1
|
||||
|
||||
atom_style full
|
||||
bond_style harmonic
|
||||
angle_style charmm
|
||||
dihedral_style charmm
|
||||
improper_style harmonic
|
||||
pair_style lj/charmm/coul/long 8.0 10.0
|
||||
pair_modify mix arithmetic
|
||||
kspace_style pppm 1e-4
|
||||
|
||||
read_data data.protein
|
||||
orthogonal box = (-27.5 -38.5 -36.3646) to (27.5 38.5 36.3615)
|
||||
1 by 2 by 2 MPI processor grid
|
||||
reading atoms ...
|
||||
32000 atoms
|
||||
reading velocities ...
|
||||
32000 velocities
|
||||
scanning bonds ...
|
||||
4 = max bonds/atom
|
||||
scanning angles ...
|
||||
8 = max angles/atom
|
||||
scanning dihedrals ...
|
||||
18 = max dihedrals/atom
|
||||
scanning impropers ...
|
||||
2 = max impropers/atom
|
||||
reading bonds ...
|
||||
27723 bonds
|
||||
reading angles ...
|
||||
40467 angles
|
||||
reading dihedrals ...
|
||||
56829 dihedrals
|
||||
reading impropers ...
|
||||
1034 impropers
|
||||
4 = max # of 1-2 neighbors
|
||||
12 = max # of 1-3 neighbors
|
||||
24 = max # of 1-4 neighbors
|
||||
26 = max # of special neighbors
|
||||
|
||||
fix 1 all shake 0.0001 5 0 m 1.0 a 232
|
||||
1617 = # of size 2 clusters
|
||||
3633 = # of size 3 clusters
|
||||
747 = # of size 4 clusters
|
||||
4233 = # of frozen angles
|
||||
fix 2 all npt temp 300.0 300.0 100.0 z 0.0 0.0 1000.0 mtk no pchain 0 tchain 1
|
||||
|
||||
special_bonds charmm
|
||||
|
||||
thermo_style multi
|
||||
timestep 2.0
|
||||
|
||||
run 100
|
||||
PPPM initialization ...
|
||||
using 12-bit tables for long-range coulomb (../kspace.cpp:321)
|
||||
G vector (1/distance) = 0.248835
|
||||
grid = 25 32 32
|
||||
stencil order = 5
|
||||
estimated absolute RMS force accuracy = 0.0355478
|
||||
estimated relative force accuracy = 0.000107051
|
||||
using double precision FFTs
|
||||
3d grid and FFT values/proc = 13230 6400
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 5 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 12
|
||||
ghost atom cutoff = 12
|
||||
binsize = 6, bins = 10 13 13
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair lj/charmm/coul/long, perpetual
|
||||
attributes: half, newton on
|
||||
pair build: half/bin/newton
|
||||
stencil: half/bin/3d/newton
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 49.07 | 49.17 | 49.46 Mbytes
|
||||
---------------- Step 0 ----- CPU = 0.0000 (sec) ----------------
|
||||
TotEng = -25356.2064 KinEng = 21444.8313 Temp = 299.0397
|
||||
PotEng = -46801.0377 E_bond = 2537.9940 E_angle = 10921.3742
|
||||
E_dihed = 5211.7865 E_impro = 213.5116 E_vdwl = -2307.8634
|
||||
E_coul = 207025.8927 E_long = -270403.7333 Press = -149.3301
|
||||
Volume = 307995.0335
|
||||
---------------- Step 100 ----- CPU = 6.3997 (sec) ----------------
|
||||
TotEng = -25290.7386 KinEng = 21591.9096 Temp = 301.0906
|
||||
PotEng = -46882.6483 E_bond = 2567.9789 E_angle = 10781.9556
|
||||
E_dihed = 5198.7493 E_impro = 216.7863 E_vdwl = -1902.6458
|
||||
E_coul = 206659.5007 E_long = -270404.9733 Press = 6.7898
|
||||
Volume = 308133.9933
|
||||
Loop time of 6.39977 on 4 procs for 100 steps with 32000 atoms
|
||||
|
||||
Performance: 2.700 ns/day, 8.889 hours/ns, 15.626 timesteps/s
|
||||
98.8% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 4.4434 | 4.5321 | 4.6846 | 4.3 | 70.82
|
||||
Bond | 0.17894 | 0.18568 | 0.19951 | 1.9 | 2.90
|
||||
Kspace | 0.4651 | 0.61064 | 0.69123 | 11.1 | 9.54
|
||||
Neigh | 0.7739 | 0.77394 | 0.774 | 0.0 | 12.09
|
||||
Comm | 0.057676 | 0.069183 | 0.07901 | 3.0 | 1.08
|
||||
Output | 5.6505e-05 | 6.6578e-05 | 9.4414e-05 | 0.0 | 0.00
|
||||
Modify | 0.21444 | 0.21866 | 0.22524 | 0.9 | 3.42
|
||||
Other | | 0.009451 | | | 0.15
|
||||
|
||||
Nlocal: 8000 ave 8143 max 7933 min
|
||||
Histogram: 1 2 0 0 0 0 0 0 0 1
|
||||
Nghost: 22733.5 ave 22769 max 22693 min
|
||||
Histogram: 1 0 0 0 0 2 0 0 0 1
|
||||
Neighs: 3.00702e+06 ave 3.0975e+06 max 2.96492e+06 min
|
||||
Histogram: 1 2 0 0 0 0 0 0 0 1
|
||||
|
||||
Total # of neighbors = 12028098
|
||||
Ave neighs/atom = 375.878
|
||||
Ave special neighs/atom = 7.43187
|
||||
Neighbor list builds = 11
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:00:06
|
||||
@ -1,86 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# ReaxFF benchmark: simulation of PETN crystal, replicated unit cell
|
||||
|
||||
units real
|
||||
atom_style charge
|
||||
|
||||
read_data data.reax
|
||||
orthogonal box = (0 0 0) to (9.49107 9.49107 6.99123)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
58 atoms
|
||||
|
||||
#replicate 7 8 10
|
||||
replicate 7 8 5
|
||||
orthogonal box = (0 0 0) to (66.4375 75.9285 34.9562)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
16240 atoms
|
||||
Time spent = 0.000834942 secs
|
||||
|
||||
velocity all create 300.0 9999
|
||||
|
||||
pair_style reax
|
||||
WARNING: The pair_style reax command is unsupported. Please switch to pair_style reax/c instead (../pair_reax.cpp:49)
|
||||
pair_coeff * * ffield.reax 1 2 3 4
|
||||
|
||||
timestep 0.1
|
||||
fix 2 all nve
|
||||
|
||||
thermo 10
|
||||
thermo_style custom step temp ke pe pxx pyy pzz etotal
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 12
|
||||
ghost atom cutoff = 12
|
||||
binsize = 6, bins = 12 13 6
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair reax, perpetual
|
||||
attributes: half, newton off
|
||||
pair build: half/bin/newtoff
|
||||
stencil: half/bin/3d/newtoff
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 115.8 | 115.8 | 115.8 Mbytes
|
||||
Step Temp KinEng PotEng Pxx Pyy Pzz TotEng
|
||||
0 300 14521.612 -1616144.2 22296.712 -29858.677 5721.0921 -1601622.5
|
||||
10 298.98728 14472.591 -1616093.9 21955.847 -24067.096 7389.148 -1601621.3
|
||||
20 294.76158 14268.045 -1615890.1 19179.258 -10513.494 10789.925 -1601622
|
||||
30 288.56967 13968.323 -1615591.2 13854.377 5833.02 13949.731 -1601622.9
|
||||
40 282.06725 13653.571 -1615278.2 6259.9845 19406.33 14947.939 -1601624.6
|
||||
50 274.84112 13303.787 -1614931.9 -2009.6832 26964.336 13346.855 -1601628.2
|
||||
60 266.20153 12885.585 -1614519.7 -8441.1641 28485.532 10195.429 -1601634.1
|
||||
70 259.17085 12545.262 -1614184.2 -11426.993 24941.516 6572.2953 -1601638.9
|
||||
80 259.73004 12572.33 -1614216.7 -10867.598 16928.461 3033.9021 -1601644.3
|
||||
90 269.2352 13032.431 -1614679 -7962.3129 4931.5317 -280.22164 -1601646.6
|
||||
100 280.67181 13586.024 -1615234.3 -3606.1519 -8769.8482 -2527.5887 -1601648.3
|
||||
Loop time of 358.104 on 1 procs for 100 steps with 16240 atoms
|
||||
|
||||
Performance: 0.002 ns/day, 9947.338 hours/ns, 0.279 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 358.07 | 358.07 | 358.07 | 0.0 | 99.99
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0.01623 | 0.01623 | 0.01623 | 0.0 | 0.00
|
||||
Output | 0.0013328 | 0.0013328 | 0.0013328 | 0.0 | 0.00
|
||||
Modify | 0.012679 | 0.012679 | 0.012679 | 0.0 | 0.00
|
||||
Other | | 0.006895 | | | 0.00
|
||||
|
||||
Nlocal: 16240 ave 16240 max 16240 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 32428 ave 32428 max 32428 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 6.69975e+06 ave 6.69975e+06 max 6.69975e+06 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 6699752
|
||||
Ave neighs/atom = 412.546
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:06:02
|
||||
@ -1,86 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# ReaxFF benchmark: simulation of PETN crystal, replicated unit cell
|
||||
|
||||
units real
|
||||
atom_style charge
|
||||
|
||||
read_data data.reax
|
||||
orthogonal box = (0 0 0) to (9.49107 9.49107 6.99123)
|
||||
2 by 2 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
58 atoms
|
||||
|
||||
#replicate 7 8 10
|
||||
replicate 7 8 5
|
||||
orthogonal box = (0 0 0) to (66.4375 75.9285 34.9562)
|
||||
2 by 2 by 1 MPI processor grid
|
||||
16240 atoms
|
||||
Time spent = 0.000491619 secs
|
||||
|
||||
velocity all create 300.0 9999
|
||||
|
||||
pair_style reax
|
||||
WARNING: The pair_style reax command is unsupported. Please switch to pair_style reax/c instead (../pair_reax.cpp:49)
|
||||
pair_coeff * * ffield.reax 1 2 3 4
|
||||
|
||||
timestep 0.1
|
||||
fix 2 all nve
|
||||
|
||||
thermo 10
|
||||
thermo_style custom step temp ke pe pxx pyy pzz etotal
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 12
|
||||
ghost atom cutoff = 12
|
||||
binsize = 6, bins = 12 13 6
|
||||
1 neighbor lists, perpetual/occasional/extra = 1 0 0
|
||||
(1) pair reax, perpetual
|
||||
attributes: half, newton off
|
||||
pair build: half/bin/newtoff
|
||||
stencil: half/bin/3d/newtoff
|
||||
bin: standard
|
||||
Per MPI rank memory allocation (min/avg/max) = 35.58 | 35.68 | 35.77 Mbytes
|
||||
Step Temp KinEng PotEng Pxx Pyy Pzz TotEng
|
||||
0 300 14521.612 -1616144.1 22296.712 -29858.677 5721.0922 -1601622.5
|
||||
10 298.98728 14472.591 -1616093.8 21955.847 -24067.094 7389.149 -1601621.3
|
||||
20 294.76158 14268.044 -1615890 19179.258 -10513.494 10789.925 -1601622
|
||||
30 288.56967 13968.323 -1615591.2 13854.38 5833.0219 13949.731 -1601622.9
|
||||
40 282.06725 13653.571 -1615278.2 6259.981 19406.327 14947.938 -1601624.7
|
||||
50 274.84112 13303.787 -1614931.9 -2009.6844 26964.334 13346.855 -1601628.1
|
||||
60 266.20153 12885.585 -1614519.8 -8441.1628 28485.533 10195.428 -1601634.2
|
||||
70 259.17085 12545.262 -1614184.2 -11426.992 24941.517 6572.295 -1601639
|
||||
80 259.73004 12572.33 -1614216.8 -10867.596 16928.464 3033.9024 -1601644.5
|
||||
90 269.2352 13032.431 -1614679 -7962.3097 4931.5336 -280.21988 -1601646.5
|
||||
100 280.67181 13586.024 -1615234.3 -3606.1482 -8769.8463 -2527.5874 -1601648.3
|
||||
Loop time of 97.054 on 4 procs for 100 steps with 16240 atoms
|
||||
|
||||
Performance: 0.009 ns/day, 2695.944 hours/ns, 1.030 timesteps/s
|
||||
99.0% CPU use with 4 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 94.83 | 95.764 | 96.883 | 9.2 | 98.67
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0.16123 | 1.2801 | 2.2132 | 79.9 | 1.32
|
||||
Output | 0.00056076 | 0.00066662 | 0.00095987 | 0.0 | 0.00
|
||||
Modify | 0.0048354 | 0.0049006 | 0.0049515 | 0.1 | 0.01
|
||||
Other | | 0.004639 | | | 0.00
|
||||
|
||||
Nlocal: 4060 ave 4080 max 4040 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Nghost: 14972 ave 14992 max 14952 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
Neighs: 1.8135e+06 ave 1.82186e+06 max 1.80514e+06 min
|
||||
Histogram: 2 0 0 0 0 0 0 0 0 2
|
||||
|
||||
Total # of neighbors = 7253988
|
||||
Ave neighs/atom = 446.674
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
Total wall time: 0:01:38
|
||||
@ -1,93 +0,0 @@
|
||||
LAMMPS (16 Mar 2018)
|
||||
OMP_NUM_THREADS environment is not set. Defaulting to 1 thread. (../comm.cpp:90)
|
||||
using 1 OpenMP thread(s) per MPI task
|
||||
# ReaxFF benchmark: simulation of PETN crystal, replicated unit cell
|
||||
|
||||
units real
|
||||
atom_style charge
|
||||
|
||||
read_data data.reax
|
||||
orthogonal box = (0 0 0) to (9.49107 9.49107 6.99123)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
reading atoms ...
|
||||
58 atoms
|
||||
|
||||
replicate 7 8 10
|
||||
orthogonal box = (0 0 0) to (66.4375 75.9285 69.9123)
|
||||
1 by 1 by 1 MPI processor grid
|
||||
32480 atoms
|
||||
Time spent = 0.00162625 secs
|
||||
|
||||
velocity all create 300.0 9999
|
||||
|
||||
pair_style reax/c NULL
|
||||
pair_coeff * * ffield.reax C H O N
|
||||
|
||||
timestep 0.1
|
||||
fix 1 all nve
|
||||
fix 2 all qeq/reax 1 0.0 10.0 1.0e-6 reax/c
|
||||
|
||||
thermo 10
|
||||
thermo_style custom step temp ke pe pxx pyy pzz etotal
|
||||
|
||||
run 100
|
||||
Neighbor list info ...
|
||||
update every 1 steps, delay 10 steps, check yes
|
||||
max neighbors/atom: 2000, page size: 100000
|
||||
master list distance cutoff = 12
|
||||
ghost atom cutoff = 12
|
||||
binsize = 6, bins = 12 13 12
|
||||
2 neighbor lists, perpetual/occasional/extra = 2 0 0
|
||||
(1) pair reax/c, perpetual
|
||||
attributes: half, newton off, ghost
|
||||
pair build: half/bin/newtoff/ghost
|
||||
stencil: half/ghost/bin/3d/newtoff
|
||||
bin: standard
|
||||
(2) fix qeq/reax, perpetual, copy from (1)
|
||||
attributes: half, newton off, ghost
|
||||
pair build: copy
|
||||
stencil: none
|
||||
bin: none
|
||||
Per MPI rank memory allocation (min/avg/max) = 1727 | 1727 | 1727 Mbytes
|
||||
Step Temp KinEng PotEng Pxx Pyy Pzz TotEng
|
||||
0 300 29044.119 -3232140.8 22804.879 -29365.593 6302.5637 -3203096.6
|
||||
10 299.37479 28983.59 -3232075.2 21746.778 -23987.41 7610.2967 -3203091.6
|
||||
20 295.5855 28616.735 -3231710.1 18178.568 -10871.882 10603.247 -3203093.3
|
||||
30 289.48845 28026.457 -3231123.2 12146.362 4985.5572 13364.455 -3203096.8
|
||||
40 282.66404 27365.76 -3230467.5 4284.2794 18132.771 14133.719 -3203101.7
|
||||
50 274.97005 26620.876 -3229730.4 -3719.11 25519.692 12551.708 -3203109.5
|
||||
60 266.11301 25763.393 -3228883.8 -9271.4049 27307.216 9753.2509 -3203120.4
|
||||
70 259.3263 25106.346 -3228237.2 -11150.726 24238.382 6578.5306 -3203130.8
|
||||
80 260.33956 25204.444 -3228344.2 -9576.6006 16737.65 3454.5747 -3203139.7
|
||||
90 269.90199 26130.219 -3229275.5 -5906.376 5246.1572 467.31789 -3203145.3
|
||||
100 280.76717 27182.117 -3230330.6 -1363.8281 -8133.2509 -1689.7711 -3203148.5
|
||||
Loop time of 437.886 on 1 procs for 100 steps with 32480 atoms
|
||||
|
||||
Performance: 0.002 ns/day, 12163.512 hours/ns, 0.228 timesteps/s
|
||||
99.8% CPU use with 1 MPI tasks x 1 OpenMP threads
|
||||
|
||||
MPI task timing breakdown:
|
||||
Section | min time | avg time | max time |%varavg| %total
|
||||
---------------------------------------------------------------
|
||||
Pair | 350.29 | 350.29 | 350.29 | 0.0 | 80.00
|
||||
Neigh | 0 | 0 | 0 | 0.0 | 0.00
|
||||
Comm | 0.026264 | 0.026264 | 0.026264 | 0.0 | 0.01
|
||||
Output | 0.0024614 | 0.0024614 | 0.0024614 | 0.0 | 0.00
|
||||
Modify | 87.55 | 87.55 | 87.55 | 0.0 | 19.99
|
||||
Other | | 0.01296 | | | 0.00
|
||||
|
||||
Nlocal: 32480 ave 32480 max 32480 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Nghost: 45128 ave 45128 max 45128 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
Neighs: 1.27781e+07 ave 1.27781e+07 max 1.27781e+07 min
|
||||
Histogram: 1 0 0 0 0 0 0 0 0 0
|
||||
|
||||
Total # of neighbors = 12778082
|
||||
Ave neighs/atom = 393.414
|
||||
Neighbor list builds = 0
|
||||
Dangerous builds = 0
|
||||
|
||||
Please see the log.cite file for references relevant to this simulation
|
||||
|
||||
Total wall time: 0:07:24
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user