Allowed to skip a list of input scripts as specified in the config file

This commit is contained in:
Trung Nguyen
2024-07-08 15:52:49 -05:00
parent 4746fe74ed
commit c7d729e3d6
3 changed files with 322 additions and 292 deletions

View File

@ -28,6 +28,11 @@
Press: Press:
abs: 1e-2 abs: 1e-2
rel: 1e-4 rel: 1e-4
skip:
[ in.rigid.poems3,
in.rigid.poems4
]
nugget: 1.0 nugget: 1.0
epsilon: 1e-16 epsilon: 1e-16

View File

@ -20,5 +20,14 @@
E_vdwl: E_vdwl:
abs: 1e-3 abs: 1e-3
rel: 1e-7 rel: 1e-7
overrides:
in.rigid.tnr:
Temp:
abs: 1e-3
rel: 1e-5
Press:
abs: 1e-2
rel: 1e-4
nugget: 1.0 nugget: 1.0
epsilon: 1e-16 epsilon: 1e-16

View File

@ -1,5 +1,8 @@
#!/usr/bin/env python3
''' '''
UPDATE: Feb 8, 2024: pip install numpy pyyaml junit_xml
UPDATE: July 5, 2024:
Launching the LAMMPS binary under testing using a configuration defined in a yaml file (e.g. config.yaml). Launching the LAMMPS binary under testing using a configuration defined in a yaml file (e.g. config.yaml).
Comparing the output thermo with that in the existing log file (with the same nprocs) Comparing the output thermo with that in the existing log file (with the same nprocs)
+ data in the log files are extracted and converted into yaml data structure + data in the log files are extracted and converted into yaml data structure
@ -8,8 +11,7 @@ UPDATE: Feb 8, 2024:
+ launch tests with mpirun with multiple procs + launch tests with mpirun with multiple procs
+ specify what LAMMPS binary version to test (e.g., testing separate builds) + specify what LAMMPS binary version to test (e.g., testing separate builds)
+ simplify the build configuration (no need to build the Python module) + simplify the build configuration (no need to build the Python module)
NOTE: Need to allow to tolerances specified for invidual input scripts, + specify tolerances for individual quantities for any input script to override the global values
or each config.yaml is for a set of example folders
Example usage: Example usage:
1) Simple use (using the provided tools/regression-tests/config.yaml and the examples/ folder at the top level) 1) Simple use (using the provided tools/regression-tests/config.yaml and the examples/ folder at the top level)
@ -28,7 +30,8 @@ import fnmatch
import subprocess import subprocess
from argparse import ArgumentParser from argparse import ArgumentParser
# need "pip install pyyaml numpy" import logging
# need "pip install numpy pyyaml"
import yaml import yaml
import numpy as np import numpy as np
@ -145,44 +148,44 @@ def extract_data_to_yaml(inputFileName):
return thermo return thermo
''' '''
return the list of installed packages return a tuple of the list of installed packages, OS, GitInfo and compile_flags
''' '''
def get_lammps_build_configuration(lmp_binary): def get_lammps_build_configuration(lmp_binary):
cmd_str = lmp_binary + " -h" cmd_str = lmp_binary + " -h"
p = subprocess.run(cmd_str, shell=True, text=True, capture_output=True) p = subprocess.run(cmd_str, shell=True, text=True, capture_output=True)
output = p.stdout.split('\n') output = p.stdout.split('\n')
packages = "" packages = ""
reading = False reading = False
row = 0 row = 0
for l in output: for l in output:
if l != "": if l != "":
if l == "Installed packages:": if l == "Installed packages:":
reading = True reading = True
n = row n = row
if "List of individual style options" in l: if "List of individual style options" in l:
reading = False reading = False
if reading == True and row > n: if reading == True and row > n:
packages += l.strip() + " " packages += l.strip() + " "
if "OS:" in l: if "OS:" in l:
operating_system = l operating_system = l
if "Git info" in l: if "Git info" in l:
GitInfo = l GitInfo = l
row += 1 row += 1
packages = packages.strip() packages = packages.strip()
row = 0 row = 0
compile_flags = "" compile_flags = ""
for l in output: for l in output:
if l != "": if l != "":
if "-DLAMMPS" in l: if "-DLAMMPS" in l:
compile_flags += " " + l.strip() compile_flags += " " + l.strip()
row += 1 row += 1
return packages.split(" "), operating_system, GitInfo, compile_flags return packages.split(" "), operating_system, GitInfo, compile_flags
''' '''
launch LAMMPS using the configuration defined in the dictionary config with an input file launch LAMMPS using the configuration defined in the dictionary config with an input file
@ -191,16 +194,16 @@ def get_lammps_build_configuration(lmp_binary):
- wrap subprocess with try/catch to handle exceptions - wrap subprocess with try/catch to handle exceptions
''' '''
def execute(lmp_binary, config, input_file_name, generate_ref_yaml=False): def execute(lmp_binary, config, input_file_name, generate_ref_yaml=False):
cmd_str = config['mpiexec'] + " " + config['mpiexec_numproc_flag'] + " " + config['nprocs'] + " " cmd_str = config['mpiexec'] + " " + config['mpiexec_numproc_flag'] + " " + config['nprocs'] + " "
cmd_str += lmp_binary + " -in " + input_file_name + " " + config['args'] cmd_str += lmp_binary + " -in " + input_file_name + " " + config['args']
print(f"Executing: {cmd_str}") print(f"Executing: {cmd_str}")
p = subprocess.run(cmd_str, shell=True, text=True, capture_output=True) p = subprocess.run(cmd_str, shell=True, text=True, capture_output=True)
#output = p.stdout.split('\n') #output = p.stdout.split('\n')
output = p.stdout output = p.stdout
# process output to handle failed runs # process output to handle failed runs
return cmd_str, output return cmd_str, output
''' '''
@ -210,36 +213,36 @@ def execute(lmp_binary, config, input_file_name, generate_ref_yaml=False):
''' '''
def generate_markers(inputFileName, outputFileName): def generate_markers(inputFileName, outputFileName):
# read in the script # read in the script
with open(inputFileName, 'r') as file: with open(inputFileName, 'r') as file:
data = file.read() data = file.read()
lines = data.splitlines() lines = data.splitlines()
out = [] out = []
for line in lines: for line in lines:
s = line.split() s = line.split()
if len(s) > 0: if len(s) > 0:
if s[0] == "run": if s[0] == "run":
out.append(" #REG:ADD thermo 10") out.append(" #REG:ADD thermo 10")
out.append(" #REG:ADD thermo_style yaml") out.append(" #REG:ADD thermo_style yaml")
out.append(line) out.append(line)
# write data to the new script # write data to the new script
with open(outputFileName, 'w') as file: with open(outputFileName, 'w') as file:
for line in out: for line in out:
file.write(line + "\n") file.write(line + "\n")
def has_markers(input): def has_markers(input):
with open(input) as f: with open(input) as f:
if '#REG' in f.read(): if '#REG' in f.read():
return True return True
return False return False
''' '''
Iterate over a list of input files using the testing configuration Iterate over a list of input files using the given lmp_binary, the testing configuration
return total number of tests, and the number of tests with failures return test results, as a list of TestResult instances
''' '''
def iterate(input_list, config, results, removeAnnotatedInput=False): def iterate(lmp_binary, input_list, config, results, removeAnnotatedInput=False):
EPSILON = np.float64(config['epsilon']) EPSILON = np.float64(config['epsilon'])
nugget = float(config['nugget']) nugget = float(config['nugget'])
@ -253,6 +256,11 @@ def iterate(input_list, config, results, removeAnnotatedInput=False):
# iterate over the input scripts # iterate over the input scripts
for input in input_list: for input in input_list:
# skip the input file if listed
if 'skip' in config:
if input in config['skip']:
continue
str_t = "\nRunning " + input + f" ({test_id+1}/{num_tests})" str_t = "\nRunning " + input + f" ({test_id+1}/{num_tests})"
result = TestResult(name=input, output="", time="", status="passed") result = TestResult(name=input, output="", time="", status="passed")
@ -335,8 +343,10 @@ def iterate(input_list, config, results, removeAnnotatedInput=False):
num_runs = len(thermo) num_runs = len(thermo)
if num_runs == 0: if num_runs == 0:
print(f"ERROR: Failed with the running with {input_test}. The run terminated with the following output:\n") print(f"ERROR: Failed with {input_test}. Check the log file for the run output.\n")
print(f"{output}") #print(f"{output}")
logger.info(f"The run terminated with the following output:\n")
logger.info(f"\n{output}")
result.status = "error" result.status = "error"
results.append(result) results.append(result)
continue continue
@ -351,7 +361,8 @@ def iterate(input_list, config, results, removeAnnotatedInput=False):
# comparing output vs reference values # comparing output vs reference values
width = 20 width = 20
if verbose == True: if verbose == True:
print("Quantities".ljust(width) + "Output".center(width) + "Reference".center(width) + "Abs Diff Check".center(width) + "Rel Diff Check".center(width)) print("Quantities".ljust(width) + "Output".center(width) + "Reference".center(width) +
"Abs Diff Check".center(width) + "Rel Diff Check".center(width))
# check if overrides for this input scipt is specified # check if overrides for this input scipt is specified
overrides = {} overrides = {}
@ -378,64 +389,67 @@ def iterate(input_list, config, results, removeAnnotatedInput=False):
# iterate over the fields # iterate over the fields
for i in range(num_fields): for i in range(num_fields):
quantity = thermo[irun]['keywords'][i] quantity = thermo[irun]['keywords'][i]
val = thermo[irun]['data'][thermo_step][i] val = thermo[irun]['data'][thermo_step][i]
ref = thermo_ref[irun]['data'][thermo_step][i] ref = thermo_ref[irun]['data'][thermo_step][i]
abs_diff = abs(float(val) - float(ref)) abs_diff = abs(float(val) - float(ref))
if abs(float(ref)) > EPSILON: if abs(float(ref)) > EPSILON:
rel_diff = abs(float(val) - float(ref))/abs(float(ref)) rel_diff = abs(float(val) - float(ref))/abs(float(ref))
else: else:
rel_diff = abs(float(val) - float(ref))/abs(float(ref)+nugget) rel_diff = abs(float(val) - float(ref))/abs(float(ref)+nugget)
abs_diff_check = "PASSED" abs_diff_check = "PASSED"
rel_diff_check = "PASSED" rel_diff_check = "PASSED"
if quantity in config['tolerance'] or quantity in overrides: if quantity in config['tolerance'] or quantity in overrides:
if quantity in config['tolerance']: if quantity in config['tolerance']:
abs_tol = float(config['tolerance'][quantity]['abs']) abs_tol = float(config['tolerance'][quantity]['abs'])
rel_tol = float(config['tolerance'][quantity]['rel']) rel_tol = float(config['tolerance'][quantity]['rel'])
# overrides the global tolerance values if specified # overrides the global tolerance values if specified
if quantity in overrides: if quantity in overrides:
abs_tol = float(overrides[quantity]['abs']) abs_tol = float(overrides[quantity]['abs'])
rel_tol = float(overrides[quantity]['rel']) rel_tol = float(overrides[quantity]['rel'])
num_checks = num_checks + 2 num_checks = num_checks + 2
if abs_diff > abs_tol: if abs_diff > abs_tol:
abs_diff_check = "FAILED" abs_diff_check = "FAILED"
reason = f"Run {irun}: {quantity}: actual ({abs_diff:0.2e}) > expected ({abs_tol:0.2e})" reason = f"Run {irun}: {quantity}: actual ({abs_diff:0.2e}) > expected ({abs_tol:0.2e})"
failed_abs_output.append(f"{reason}") failed_abs_output.append(f"{reason}")
num_abs_failed = num_abs_failed + 1 num_abs_failed = num_abs_failed + 1
if rel_diff > rel_tol: if rel_diff > rel_tol:
rel_diff_check = "FAILED" rel_diff_check = "FAILED"
reason = f"Run {irun}: {quantity}: actual ({rel_diff:0.2e}) > expected ({rel_tol:0.2e})" reason = f"Run {irun}: {quantity}: actual ({rel_diff:0.2e}) > expected ({rel_tol:0.2e})"
failed_rel_output.append(f"{reason}") failed_rel_output.append(f"{reason}")
num_rel_failed = num_rel_failed + 1 num_rel_failed = num_rel_failed + 1
else: else:
# N/A means that tolerances are not defined in the config file # N/A means that tolerances are not defined in the config file
abs_diff_check = "N/A" abs_diff_check = "N/A"
rel_diff_check = "N/A" rel_diff_check = "N/A"
if verbose == True and abs_diff_check != "N/A" and rel_diff_check != "N/A": if verbose == True and abs_diff_check != "N/A" and rel_diff_check != "N/A":
print(f"{thermo[irun]['keywords'][i].ljust(width)} {str(val).rjust(20)} {str(ref).rjust(20)} {abs_diff_check.rjust(20)} {rel_diff_check.rjust(20)}") print(f"{thermo[irun]['keywords'][i].ljust(width)} {str(val).rjust(20)} {str(ref).rjust(20)} "
"{abs_diff_check.rjust(20)} {rel_diff_check.rjust(20)}")
if num_abs_failed > 0: if num_abs_failed > 0:
print(f"{num_abs_failed} absolute diff checks failed with the specified tolerances.") print(f"{num_abs_failed} absolute diff checks failed with the specified tolerances.")
result.status = "failed" result.status = "failed"
for i in failed_abs_output: if verbose == True:
print(f"- {i}") for i in failed_abs_output:
print(f"- {i}")
if num_rel_failed > 0: if num_rel_failed > 0:
print(f"{num_rel_failed} relative diff checks failed with the specified tolerances.") print(f"{num_rel_failed} relative diff checks failed with the specified tolerances.")
result.status = "failed" result.status = "failed"
for i in failed_rel_output: if verbose == True:
print(f"- {i}") for i in failed_rel_output:
print(f"- {i}")
if num_abs_failed == 0 and num_rel_failed == 0: if num_abs_failed == 0 and num_rel_failed == 0:
print(f"All {num_checks} checks passed.") print(f"All {num_checks} checks passed.")
result.status = "passed" result.status = "passed"
num_passed = num_passed + 1 num_passed = num_passed + 1
results.append(result) results.append(result)
@ -444,8 +458,8 @@ def iterate(input_list, config, results, removeAnnotatedInput=False):
# remove the annotated input script # remove the annotated input script
if removeAnnotatedInput == True: if removeAnnotatedInput == True:
cmd_str = "rm " + input_test cmd_str = "rm " + input_test
os.system(cmd_str) os.system(cmd_str)
return num_passed return num_passed
@ -455,197 +469,199 @@ def iterate(input_list, config, results, removeAnnotatedInput=False):
''' '''
if __name__ == "__main__": if __name__ == "__main__":
# default values logger = logging.getLogger(__name__)
lmp_binary = "" logging.basicConfig(filename='run.log', level=logging.INFO)
configFileName = "config.yaml"
example_subfolders = []
genref = False
verbose = False
output_file = "output.xml"
# parse the arguments # default values
parser = ArgumentParser() lmp_binary = ""
parser.add_argument("--lmp-bin", dest="lmp_binary", default="", help="LAMMPS binary") configFileName = "config.yaml"
parser.add_argument("--config-file", dest="config_file", default="config.yaml", example_subfolders = []
help="Configuration YAML file") genref = False
parser.add_argument("--example-folders", dest="example_folders", default="", help="Example subfolders") verbose = False
parser.add_argument("--gen-ref",dest="genref", action='store_true', default=False, output_file = "output.xml"
help="Generating reference data")
parser.add_argument("--verbose",dest="verbose", action='store_true', default=False,
help="Verbose output")
parser.add_argument("--output",dest="output", default="output.xml", help="Output file")
args = parser.parse_args() # parse the arguments
parser = ArgumentParser()
parser.add_argument("--lmp-bin", dest="lmp_binary", default="", help="LAMMPS binary")
parser.add_argument("--config-file", dest="config_file", default="config.yaml",
help="Configuration YAML file")
parser.add_argument("--example-folders", dest="example_folders", default="", help="Example subfolders")
parser.add_argument("--gen-ref",dest="genref", action='store_true', default=False,
help="Generating reference data")
parser.add_argument("--verbose",dest="verbose", action='store_true', default=False,
help="Verbose output")
parser.add_argument("--output",dest="output", default="output.xml", help="Output file")
lmp_binary = os.path.abspath(args.lmp_binary) args = parser.parse_args()
configFileName = args.config_file
output_file = args.output
if args.example_folders != "":
example_subfolders = args.example_folders.split(';')
print("Example folders:")
print(example_subfolders)
genref = args.genref
verbose = args.verbose
# read in the configuration of the tests lmp_binary = os.path.abspath(args.lmp_binary)
with open(configFileName, 'r') as f: configFileName = args.config_file
config = yaml.load(f, Loader=Loader) output_file = args.output
absolute_path = os.path.abspath(configFileName) if args.example_folders != "":
print(f"Regression tests with settings defined in {absolute_path}") example_subfolders = args.example_folders.split(';')
print("Example folders:")
print(example_subfolders)
genref = args.genref
verbose = args.verbose
# check if lmp_binary is specified in the config yaml # read in the configuration of the tests
if lmp_binary == "": with open(configFileName, 'r') as f:
if config['lmp_binary'] == "": config = yaml.load(f, Loader=Loader)
print("Needs a valid LAMMPS binary") absolute_path = os.path.abspath(configFileName)
quit() print(f"Regression tests with settings defined in {absolute_path}")
else:
lmp_binary = os.path.abspath(config['lmp_binary'])
# print out the binary info # check if lmp_binary is specified in the config yaml
packages, operating_system, GitInfo, compile_flags = get_lammps_build_configuration(lmp_binary) if lmp_binary == "":
print("LAMMPS build info:") if config['lmp_binary'] == "":
print(f"- {operating_system}") print("Needs a valid LAMMPS binary")
print(f"- {GitInfo}") quit()
print(f"- Active compile flags: {compile_flags}") else:
print(f"- List of installed packages: {packages}") lmp_binary = os.path.abspath(config['lmp_binary'])
# Using in place input scripts # print out the binary info
inplace_input = True packages, operating_system, GitInfo, compile_flags = get_lammps_build_configuration(lmp_binary)
test_cases = [] print("LAMMPS build info:")
print(f"- {operating_system}")
print(f"- {GitInfo}")
print(f"- Active compile flags: {compile_flags}")
print(f"- List of installed packages: {packages}")
# if the example folders are not specified from the command-line argument -example-folders # Using in place input scripts
if len(example_subfolders) == 0: inplace_input = True
example_subfolders.append("../../examples/melt")
example_subfolders.append('../../examples/flow')
example_subfolders.append('../../examples/indent')
example_subfolders.append('../../examples/shear')
example_subfolders.append('../../examples/steinhardt')
# prd log file parsing issue
# neb log file parsing issue
# snap log files obsolete?
# append the example subfolders depending on the installed packages
if 'ASPHERE' in packages:
#example_subfolders.append('../../examples/ASPHERE/ellipsoid')
example_subfolders.append('../../examples/ellipse')
if 'CORESHELL' in packages:
example_subfolders.append('../../examples/coreshell')
if 'MOLECULE' in packages:
example_subfolders.append('../../examples/micelle')
# peptide thermo_style as multi
#example_subfolders.append('../../examples/peptide')
if 'GRANULAR' in packages:
example_subfolders.append('../../examples/granular')
example_subfolders.append('../../examples/pour')
if 'AMOEBA' in packages:
example_subfolders.append('../../examples/amoeba')
if 'BODY' in packages:
example_subfolders.append('../../examples/body')
if 'BPM' in packages:
example_subfolders.append('../../examples/bpm/impact')
example_subfolders.append('../../examples/bpm/pour')
if 'COLLOID' in packages:
example_subfolders.append('../../examples/colloid')
if 'CRACK' in packages:
example_subfolders.append('../../examples/crack')
if 'DIELECTRIC' in packages:
example_subfolders.append('../../examples/PACKAGES/dielectric')
if 'DIPOLE' in packages:
example_subfolders.append('../../examples/dipole')
if 'DPD-BASIC' in packages:
example_subfolders.append('../../examples/PACKAGES/dpd-basic/dpd')
example_subfolders.append('../../examples/PACKAGES/dpd-basic/dpdext')
example_subfolders.append('../../examples/PACKAGES/dpd-basic/dpd_tstat')
example_subfolders.append('../../examples/PACKAGES/dpd-basic/dpdext_tstat')
if 'MANYBODY' in packages:
example_subfolders.append('../../examples/tersoff')
example_subfolders.append('../../examples/vashishta')
example_subfolders.append('../../examples/threebody')
if 'RIGID' in packages:
example_subfolders.append('../../examples/rigid')
if 'SRD' in packages:
example_subfolders.append('../../examples/srd')
all_results = []
if inplace_input == True:
# save current working dir
p = subprocess.run("pwd", shell=True, text=True, capture_output=True)
pwd = p.stdout.split('\n')[0]
pwd = os.path.abspath(pwd)
print("Working directory: " + pwd)
# change dir to a folder under examples/, need to use os.chdir()
# TODO: loop through the subfolders under examples/, depending on the installed packages
total_tests = 0
passed_tests = 0
for directory in example_subfolders:
p = subprocess.run("pwd", shell=True, text=True, capture_output=True)
print("\nEntering " + directory)
os.chdir(directory)
cmd_str = "ls in.*"
p = subprocess.run(cmd_str, shell=True, text=True, capture_output=True)
input_list = p.stdout.split('\n')
input_list.remove('')
print(f"List of input scripts: {input_list}")
total_tests += len(input_list)
# iterate through the input scripts
results = []
num_passed = iterate(input_list, config, results)
passed_tests += num_passed
all_results.extend(results)
# get back to the working dir
os.chdir(pwd)
else:
# or using the input scripts in the working directory -- for debugging purposes
input_list=['in.lj', 'in.rhodo', 'in.eam']
total_tests = len(input_list)
results = []
passed_tests = iterate(input_list, config, results)
print("Summary:")
print(f" - {passed_tests} passed / {total_tests} tests")
print(f" - Details are given in {output_file}.")
# generate a JUnit XML file
with open(output_file, 'w') as f:
test_cases = [] test_cases = []
for result in all_results:
#print(f"{result.name}: {result.status}")
case = TestCase(name=result.name, classname=result.name)
if result.status == "failed":
case.add_failure_info(message="Actual values did not match expected ones.")
if result.status == "skipped":
case.add_skipped_info(message="Test was skipped.")
if result.status == "error":
case.add_skipped_info(message="Test run had errors.")
test_cases.append(case)
current_timestamp = datetime.datetime.now() # if the example folders are not specified from the command-line argument -example-folders
ts = TestSuite(f"{configFileName}", test_cases, timestamp=current_timestamp) if len(example_subfolders) == 0:
TestSuite.to_file(f, [ts], prettyprint=True) example_subfolders.append("../../examples/melt")
example_subfolders.append('../../examples/flow')
example_subfolders.append('../../examples/indent')
example_subfolders.append('../../examples/shear')
example_subfolders.append('../../examples/steinhardt')
# prd log file parsing issue
# neb log file parsing issue
# snap log files obsolete?
# append the example subfolders depending on the installed packages
if 'ASPHERE' in packages:
#example_subfolders.append('../../examples/ASPHERE/ellipsoid')
example_subfolders.append('../../examples/ellipse')
if 'CORESHELL' in packages:
example_subfolders.append('../../examples/coreshell')
if 'MOLECULE' in packages:
example_subfolders.append('../../examples/micelle')
# peptide thermo_style as multi
#example_subfolders.append('../../examples/peptide')
if 'GRANULAR' in packages:
example_subfolders.append('../../examples/granular')
example_subfolders.append('../../examples/pour')
if 'AMOEBA' in packages:
example_subfolders.append('../../examples/amoeba')
if 'BODY' in packages:
example_subfolders.append('../../examples/body')
if 'BPM' in packages:
example_subfolders.append('../../examples/bpm/impact')
example_subfolders.append('../../examples/bpm/pour')
if 'COLLOID' in packages:
example_subfolders.append('../../examples/colloid')
if 'CRACK' in packages:
example_subfolders.append('../../examples/crack')
if 'DIELECTRIC' in packages:
example_subfolders.append('../../examples/PACKAGES/dielectric')
if 'DIPOLE' in packages:
example_subfolders.append('../../examples/dipole')
if 'DPD-BASIC' in packages:
example_subfolders.append('../../examples/PACKAGES/dpd-basic/dpd')
example_subfolders.append('../../examples/PACKAGES/dpd-basic/dpdext')
example_subfolders.append('../../examples/PACKAGES/dpd-basic/dpd_tstat')
example_subfolders.append('../../examples/PACKAGES/dpd-basic/dpdext_tstat')
if 'MANYBODY' in packages:
example_subfolders.append('../../examples/tersoff')
example_subfolders.append('../../examples/vashishta')
example_subfolders.append('../../examples/threebody')
if 'RIGID' in packages:
example_subfolders.append('../../examples/rigid')
if 'SRD' in packages:
example_subfolders.append('../../examples/srd')
all_results = []
if inplace_input == True:
# save current working dir
p = subprocess.run("pwd", shell=True, text=True, capture_output=True)
pwd = p.stdout.split('\n')[0]
pwd = os.path.abspath(pwd)
print("Working directory: " + pwd)
# change dir to a folder under examples/, need to use os.chdir()
# TODO: loop through the subfolders under examples/, depending on the installed packages
total_tests = 0
passed_tests = 0
for directory in example_subfolders:
p = subprocess.run("pwd", shell=True, text=True, capture_output=True)
print("\nEntering " + directory)
os.chdir(directory)
cmd_str = "ls in.*"
p = subprocess.run(cmd_str, shell=True, text=True, capture_output=True)
input_list = p.stdout.split('\n')
input_list.remove('')
print(f"List of input scripts: {input_list}")
total_tests += len(input_list)
# iterate through the input scripts
results = []
num_passed = iterate(lmp_binary, input_list, config, results)
passed_tests += num_passed
all_results.extend(results)
# get back to the working dir
os.chdir(pwd)
else:
# or using the input scripts in the working directory -- for debugging purposes
input_list=['in.lj', 'in.rhodo', 'in.eam']
total_tests = len(input_list)
results = []
passed_tests = iterate(input_list, config, results)
print("Summary:")
print(f" - {passed_tests} passed / {total_tests} tests")
print(f" - Details are given in {output_file}.")
# generate a JUnit XML file
with open(output_file, 'w') as f:
test_cases = []
for result in all_results:
#print(f"{result.name}: {result.status}")
case = TestCase(name=result.name, classname=result.name)
if result.status == "failed":
case.add_failure_info(message="Actual values did not match expected ones.")
if result.status == "skipped":
case.add_skipped_info(message="Test was skipped.")
if result.status == "error":
case.add_skipped_info(message="Test run had errors.")
test_cases.append(case)
current_timestamp = datetime.datetime.now()
ts = TestSuite(f"{configFileName}", test_cases, timestamp=current_timestamp)
TestSuite.to_file(f, [ts], prettyprint=True)