From 3240dd0bcbc6a9d9adc7218e61a737b1546e92d8 Mon Sep 17 00:00:00 2001 From: Jake Koester Date: Mon, 29 Jul 2024 22:13:31 +0000 Subject: [PATCH] move some regression tests and utils into the repo --- test/regression_tests/.gitignore | 2 + test/regression_tests/run_regression_tests.py | 295 +++++++++++++++ .../taylor_bar/fem/compare.exodiff | 57 +++ .../taylor_bar/fem/gold_results.exo | 3 + .../taylor_bar/fem/input.yaml | 48 +++ .../regression_tests/taylor_bar/fem/test.yaml | 23 ++ .../taylor_bar/mesh/cylinder0p02.exo | 3 + .../taylor_bar/mesh/gmsh_to_exo_cylinder.py | 135 +++++++ test/utils/__init__.py | 0 test/utils/regression_test/.gitignore | 2 + test/utils/regression_test/__init__.py | 2 + test/utils/regression_test/regression_test.py | 343 ++++++++++++++++++ .../regression_test/test_files/.gitignore | 2 + .../regression_test/test_files/bad_gold.exo | 3 + .../regression_test/test_files/bad_input.yaml | 49 +++ .../test_files/compare.exodiff | 62 ++++ .../regression_test/test_files/good_gold.exo | 3 + .../regression_test/test_files/input.yaml | 49 +++ .../regression_test/test_files/mesh_1x1x5.exo | 3 + .../regression_test/test_regression_test.py | 113 ++++++ test/utils/unit_test_all_modules.py | 87 +++++ 21 files changed, 1284 insertions(+) create mode 100644 test/regression_tests/.gitignore create mode 100755 test/regression_tests/run_regression_tests.py create mode 100644 test/regression_tests/taylor_bar/fem/compare.exodiff create mode 100644 test/regression_tests/taylor_bar/fem/gold_results.exo create mode 100644 test/regression_tests/taylor_bar/fem/input.yaml create mode 100644 test/regression_tests/taylor_bar/fem/test.yaml create mode 100644 test/regression_tests/taylor_bar/mesh/cylinder0p02.exo create mode 100644 test/regression_tests/taylor_bar/mesh/gmsh_to_exo_cylinder.py create mode 100644 test/utils/__init__.py create mode 100644 test/utils/regression_test/.gitignore create mode 100644 test/utils/regression_test/__init__.py create mode 100644 test/utils/regression_test/regression_test.py create mode 100644 test/utils/regression_test/test_files/.gitignore create mode 100644 test/utils/regression_test/test_files/bad_gold.exo create mode 100644 test/utils/regression_test/test_files/bad_input.yaml create mode 100644 test/utils/regression_test/test_files/compare.exodiff create mode 100644 test/utils/regression_test/test_files/good_gold.exo create mode 100644 test/utils/regression_test/test_files/input.yaml create mode 100644 test/utils/regression_test/test_files/mesh_1x1x5.exo create mode 100755 test/utils/regression_test/test_regression_test.py create mode 100644 test/utils/unit_test_all_modules.py diff --git a/test/regression_tests/.gitignore b/test/regression_tests/.gitignore new file mode 100644 index 00000000..9cdcd1ae --- /dev/null +++ b/test/regression_tests/.gitignore @@ -0,0 +1,2 @@ +*.log +results.exo diff --git a/test/regression_tests/run_regression_tests.py b/test/regression_tests/run_regression_tests.py new file mode 100755 index 00000000..066ce371 --- /dev/null +++ b/test/regression_tests/run_regression_tests.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 + +import argparse +import glob +import os +import sys +import time + +import yaml + +# Add the parent directory to the system path to import custom modules +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +from utils.regression_test import ExodiffCheck, PeakMemoryCheck, RegressionTest + + +def get_inputs_from_yaml_node(yaml_node, test_name_prefix, build_dir): + """ + Extracts test inputs from a YAML node. + """ + inputs = { + "test_name": f"{test_name_prefix}_{yaml_node['hardware']}_np_{yaml_node['num_processors']}", + "input_file": yaml_node["input_file"], + "peak_memory": yaml_node.get("peak_memory_check", {}).get("value"), + "peak_memory_percent_tolerance": yaml_node.get("peak_memory_check", {}).get( + "percent_tolerance" + ), + "exodiff": [ + { + "compare_file": exodiff["compare_file"], + "results_file": exodiff["results_file"], + "gold_file": exodiff["gold_file"], + } + for exodiff in yaml_node["exodiff"] + ], + "executable_path": os.path.join( + build_dir, + "Release_gpu" if yaml_node["hardware"] == "gpu" else "Release", + "aperi-mech", + ), + "num_processors": yaml_node["num_processors"], + } + return inputs + + +def expand_wildcards(file_patterns): + """ + Expands wildcard patterns to a list of matching files. + """ + expanded_files = [] + for pattern in file_patterns: + expanded_files.extend(glob.glob(pattern)) + return expanded_files + + +def should_run_test(test_config, cpu_only, serial_only, parallel_only, gpu_only): + """ + Determines if a test should be run based on the provided flags. + """ + if ( + (cpu_only and test_config["hardware"] != "cpu") + or (serial_only and test_config["num_processors"] != 1) + or (parallel_only and test_config["num_processors"] == 1) + or (gpu_only and test_config["hardware"] != "gpu") + ): + return False + return True + + +def execute_test(test_config, dirpath, build_dir, keep_results): + """ + Executes a single test and returns whether it passed. + """ + print(f" Running test {test_config['hardware']}_{test_config['num_processors']}") + inputs = get_inputs_from_yaml_node( + test_config, os.path.basename(dirpath), build_dir + ) + regression_test = RegressionTest( + inputs["test_name"], + inputs["executable_path"], + inputs["num_processors"], + [inputs["input_file"]], + ) + return_code, stats = regression_test.run() + + if return_code != 0: + print("\033[91m FAIL\033[0m") + return False + + all_exodiff_passed = all( + ExodiffCheck( + f"{inputs['test_name']}_exodiff_{i}", + "exodiff", + exodiff["compare_file"], + exodiff["results_file"], + exodiff["gold_file"], + [], + ).run() + == 0 + for i, exodiff in enumerate(inputs["exodiff"]) + ) + + memcheck_passed = True + if inputs["peak_memory"] is not None: + peak_memory_check = PeakMemoryCheck( + f"{inputs['test_name']}_peak_memory", + stats["peak_memory"], + inputs["peak_memory"], + inputs["peak_memory_percent_tolerance"], + ) + memcheck_passed = peak_memory_check.run() == 0 + + if all_exodiff_passed and memcheck_passed: + print("\033[92m PASS\033[0m") + if ( + not keep_results + and "cleanup" in test_config + and "remove" in test_config["cleanup"] + ): + cleanup_test_results(test_config) + return True + else: + print("\033[91m FAIL\033[0m") + return False + + +def cleanup_test_results(test_config): + """ + Cleans up test results based on the cleanup configuration. + """ + for item in test_config["cleanup"]["remove"]: + for file in expand_wildcards([item]): + os.remove(file) + + +def run_regression_tests_from_directory( + root_dir, + build_dir, + cpu_only=False, + serial_only=False, + parallel_only=False, + gpu_only=False, + keep_results=False, +): + """ + Runs regression tests from the specified directory. + """ + passing_tests = 0 + total_tests = 0 + current_dir = os.getcwd() + + for dirpath, _, filenames in os.walk(root_dir): + if "test.yaml" in filenames: + os.chdir(dirpath) + print("-----------------------------------") + print(f"Running tests in {dirpath}") + + with open("test.yaml", "r") as file: + yaml_node = yaml.safe_load(file) + test_configs = yaml_node["tests"] + + for test_config in test_configs: + if not should_run_test( + test_config, cpu_only, serial_only, parallel_only, gpu_only + ): + continue + + if execute_test(test_config, dirpath, build_dir, keep_results): + passing_tests += 1 + total_tests += 1 + + print("-----------------------------------\n") + os.chdir(current_dir) + + return passing_tests, total_tests + + +def clean_logs(root_dir): + """ + Cleans log files from the specified directory. + """ + for dirpath, _, filenames in os.walk(root_dir): + if "test.yaml" in filenames: + print("-----------------------------------") + print(f"Cleaning logs in {dirpath}") + for log_file in glob.glob(f"{dirpath}/*.log"): + os.remove(log_file) + print("-----------------------------------\n") + + +def clean_results(root_dir): + """ + Cleans result files from the specified directory. + """ + for dirpath, _, filenames in os.walk(root_dir): + if "test.yaml" in filenames: + print("-----------------------------------") + print(f"Cleaning results in {dirpath}") + yaml_file = os.path.join(dirpath, "test.yaml") + with open(yaml_file, "r") as file: + yaml_node = yaml.safe_load(file) + test_configs = yaml_node["tests"] + for test_config in test_configs: + if "cleanup" in test_config and "remove" in test_config["cleanup"]: + for item in test_config["cleanup"]["remove"]: + for file in expand_wildcards([item]): + os.remove(os.path.join(dirpath, file)) + print("-----------------------------------\n") + + +def parse_arguments(): + """ + Parses command-line arguments. + """ + parser = argparse.ArgumentParser(description="Run regression tests.") + parser.add_argument( + "--directory", + help="Directory root containing the tests. Will recursively search for test.yaml files.", + default=".", + ) + parser.add_argument( + "--build_dir", + help="Directory containing the build", + default="/home/azureuser/projects/aperi-mech/build/", + ) + parser.add_argument( + "--clean_logs", + help="Clean the log files from the tests and exit", + action="store_true", + ) + parser.add_argument( + "--clean_results", + help="Clean the results files from the tests and exit", + action="store_true", + ) + parser.add_argument( + "--keep_results", + help="Keep the results files even if the test passes. (results files are always kept if the test fails)", + action="store_true", + ) + parser.add_argument( + "--no_preclean", + help="Do not clean the logs and results before running the tests", + action="store_true", + ) + parser.add_argument("--cpu", help="Only run CPU tests", action="store_true") + parser.add_argument("--serial", help="Only run serial tests", action="store_true") + parser.add_argument( + "--parallel", help="Only run parallel tests", action="store_true" + ) + parser.add_argument("--gpu", help="Only run GPU tests", action="store_true") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_arguments() + build_dir = os.path.abspath(args.build_dir) + directory = os.path.abspath(args.directory) + + # Clean logs and/or results if specified + if args.clean_logs or args.clean_results: + if args.clean_logs: + clean_logs(directory) + if args.clean_results: + clean_results(directory) + sys.exit(0) + + # Pre-clean logs and results unless specified otherwise + if not args.no_preclean: + print("Cleaning logs and results before running the tests") + clean_logs(directory) + clean_results(directory) + + # Run regression tests and measure time + start_time = time.perf_counter() + passing_tests, total_tests = run_regression_tests_from_directory( + directory, + build_dir, + args.cpu, + args.serial, + args.parallel, + args.gpu, + args.keep_results, + ) + end_time = time.perf_counter() + print(f"Total time: {end_time - start_time:.4e} seconds") + + failing_tests = total_tests - passing_tests + if failing_tests > 0: + print(f"{failing_tests} tests failed.") + print(f"{passing_tests} tests passed.") + sys.exit(1) + else: + print(f"All {passing_tests} tests passed.") + sys.exit(0) diff --git a/test/regression_tests/taylor_bar/fem/compare.exodiff b/test/regression_tests/taylor_bar/fem/compare.exodiff new file mode 100644 index 00000000..a67a8341 --- /dev/null +++ b/test/regression_tests/taylor_bar/fem/compare.exodiff @@ -0,0 +1,57 @@ + +# ***************************************************************** +# EXODIFF (Version: 3.33) Modified: 2024-03-13 +# Authors: Richard Drake, rrdrake@sandia.gov +# Greg Sjaardema, gdsjaar@sandia.gov +# Run on 2024/07/13 00:07:11 UTC +# ***************************************************************** + +# FILE 1: /home/azureuser/projects/aperi-mech_test/cylindrical_taylor_bar/regression/fem/gold_results.exo +# Title: IOSS Default Output Title +# Dim = 3, Nodes = 285, Elements = 904, Faces = 0, Edges = 0 +# Element Blocks = 1, Face Blocks = 0, Edge Blocks = 0, Nodesets = 1, Sidesets = 0, Assemblies = 0 +# Vars: Global = 0, Nodal = 18, Element = 1, Face = 0, Edge = 0, Nodeset = 0, Sideset = 0, Times = 2 + + +# ============================================================== +# NOTE: All node and element ids are reported as global ids. + +# NOTES: - The min/max values are reporting the min/max in absolute value. +# - Time values (t) are 1-offset time step numbers. +# - Element block numbers are the block ids. +# - Node(n) and element(e) numbers are 1-offset. + +COORDINATES absolute 1.e-6 # min separation not calculated + +TIME STEPS relative 1.e-6 floor 0.0 # min: 0 @ t1 max: 0.0001 @ t2 + +# No GLOBAL VARIABLES + +NODAL VARIABLES relative 1.e-6 floor 0.0 + displacement_x # min: 0 @ t1,n1 max: 0.0037303132 @ t2,n16 + displacement_y # min: 0 @ t1,n1 max: 0.0034884944 @ t2,n127 + displacement_z # min: 0 @ t1,n1 max: 0.037275041 @ t2,n210 + acceleration_x # min: 0 @ t1,n1 max: 21336627 @ t2,n229 + acceleration_y # min: 0 @ t1,n1 max: 33554769 @ t2,n29 + acceleration_z # min: 0 @ t1,n1 max: 10711345 @ t2,n48 + force_x # min: 0 @ t1,n1 max: 226751.51 @ t2,n276 + force_y # min: 0 @ t1,n1 max: 233764.41 @ t2,n259 + force_z # min: 0 @ t1,n1 max: 351701.62 @ t2,n233 + mass_x # min: 0.001125266 @ t1,n3 max: 0.041543172 @ t1,n259 + mass_y # min: 0.001125266 @ t1,n3 max: 0.041543172 @ t1,n259 + mass_z # min: 0.001125266 @ t1,n3 max: 0.041543172 @ t1,n259 + mass_from_elements_x # min: 0.001125266 @ t1,n3 max: 0.041543172 @ t1,n259 + mass_from_elements_y # min: 0.001125266 @ t1,n3 max: 0.041543172 @ t1,n259 + mass_from_elements_z # min: 0.001125266 @ t1,n3 max: 0.041543172 @ t1,n259 + velocity_x # min: 0 @ t1,n1 max: 96.091629 @ t2,n200 + velocity_y # min: 0 @ t1,n1 max: 123.97411 @ t2,n32 + velocity_z # min: 0 @ t2,n2 max: 373 @ t1,n1 + +# No NODESET VARIABLES + +# No SIDESET VARIABLES + +# No EDGE BLOCK VARIABLES + +# No FACE BLOCK VARIABLES + diff --git a/test/regression_tests/taylor_bar/fem/gold_results.exo b/test/regression_tests/taylor_bar/fem/gold_results.exo new file mode 100644 index 00000000..31db65b1 --- /dev/null +++ b/test/regression_tests/taylor_bar/fem/gold_results.exo @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c2d430a9f1362ff2481a59c86ebe8aac63aec278a37488eea6c405ceed1e5dc +size 129756 diff --git a/test/regression_tests/taylor_bar/fem/input.yaml b/test/regression_tests/taylor_bar/fem/input.yaml new file mode 100644 index 00000000..dca91600 --- /dev/null +++ b/test/regression_tests/taylor_bar/fem/input.yaml @@ -0,0 +1,48 @@ +procedures: + - explicit_dynamics_procedure: + geometry: + mesh: ../mesh/cylinder0p02.exo + parts: + - part: + set: block_1 + formulation: + integration_scheme: + gauss_quadrature: + integration_order: 1 + approximation_space: + finite_element: ~ + material: + elastic: + density: 2700.0 # kg / m^3 + # youngs_modulus: 78.2e9 # Pa, 78.2 GPa + youngs_modulus: 78.2e8 # just scaling down a bit since there is no plasticity + poissons_ratio: 0.3 + initial_conditions: + - velocity: + sets: + - block_1 + components: + Z: -373.0 # m/s + time_stepper: + direct_time_stepper: + time_increment: 0.000001 + time_end: 0.0001 + output: + file: results.exo + time_start: 0 + time_end: 1 + time_increment: 0.0001 + boundary_conditions: + - displacement: + sets: + - nodeset_1 + components: + Z: 0.0 + time_function: + ramp_function: + abscissa_values: + - 0 + - 1 + ordinate_values: + - 0 + - 1 diff --git a/test/regression_tests/taylor_bar/fem/test.yaml b/test/regression_tests/taylor_bar/fem/test.yaml new file mode 100644 index 00000000..85cb2014 --- /dev/null +++ b/test/regression_tests/taylor_bar/fem/test.yaml @@ -0,0 +1,23 @@ +# Define common settings +defaults: &defaults + num_processors: 1 + input_file: input.yaml + cleanup: + remove: [results.exo, "*.log"] + exodiff: + - compare_file: compare.exodiff + gold_file: gold_results.exo + results_file: results.exo + +# List of tests using the defaults +tests: + - <<: *defaults + hardware: cpu + peak_memory_check: + value: 40.0 # Gold peak memory usage in MB + percent_tolerance: 10 + - <<: *defaults + hardware: gpu + - <<: *defaults + num_processors: 4 # Override the default num_processors for this test + hardware: cpu diff --git a/test/regression_tests/taylor_bar/mesh/cylinder0p02.exo b/test/regression_tests/taylor_bar/mesh/cylinder0p02.exo new file mode 100644 index 00000000..2df7242b --- /dev/null +++ b/test/regression_tests/taylor_bar/mesh/cylinder0p02.exo @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8600db4c6994c7572b3c2ef8731d9a00355c829abd20d8a7291f716d47502699 +size 23924 diff --git a/test/regression_tests/taylor_bar/mesh/gmsh_to_exo_cylinder.py b/test/regression_tests/taylor_bar/mesh/gmsh_to_exo_cylinder.py new file mode 100644 index 00000000..7cff54d6 --- /dev/null +++ b/test/regression_tests/taylor_bar/mesh/gmsh_to_exo_cylinder.py @@ -0,0 +1,135 @@ +import os + +import exodus +import gmsh +import meshio +import numpy as np + + +def create_gmsh_cylinder(height, radius, mesh_size, out_file_base): + # Initialize gmsh + gmsh.initialize() + + # Create a new model + gmsh.model.add("Cylinder") + + # Add a cylinder + cylinder_tag = gmsh.model.occ.addCylinder(0, 0, 0, 0, 0, height, radius) + + # Synchronize the CAD kernel with the Gmsh model + gmsh.model.occ.synchronize() + + # Set the mesh size for the points at the bottom and top faces + # Get the points of the bottom and top circle + bottom_circle = gmsh.model.getEntitiesInBoundingBox( + -radius, -radius, 0, radius, radius, 0, 0 + ) + top_circle = gmsh.model.getEntitiesInBoundingBox( + -radius, -radius, height, radius, radius, height, 0 + ) + + # Combine the points and filter for dimension 0 (points) + points = [pt for pt in bottom_circle + top_circle if pt[0] == 0] + + # Set mesh size at the points + for point in points: + gmsh.model.mesh.setSize([point], mesh_size) + + # Define a physical group for the whole cylinder + gmsh.model.addPhysicalGroup(3, [cylinder_tag], tag=1) + gmsh.model.setPhysicalName(3, 1, "Cylinder") + + # Generate a 3D mesh + gmsh.model.mesh.generate(3) + + # Save the mesh to a file + gmsh.write(out_file_base + ".msh") + # gmsh.write(out_file_base+".key") + + # Finalize the gmsh API + gmsh.finalize() + + +def convert_gmsh_to_exo(out_file_base, cleanup=True): + # Convert the Gmsh mesh to ExodusII format using meshio + mesh = meshio.read(out_file_base + ".msh") + meshio.write(out_file_base + "_meshio.e", mesh) + if cleanup: + os.remove(out_file_base + ".msh") + + +def add_nodeset_and_fix_exo(out_file_base, cleanup=True): + in_file = out_file_base + "_meshio.e" + out_file = out_file_base + ".exo" + + # Open the existing ExodusII file + # Struggled to add nodesets to meshio file, so just grab the nodes and connectivity and create a new file + exo_in = exodus.exodus(in_file, mode="r") + + # Get the coordinates of all nodes + points = np.array(exo_in.get_coords()).T # Transpose to get (n_nodes, 3) + + # Leading face nodes + nodeset_nodes = np.where(points[:, 2] == 0)[0] + 1 # Convert to 1-based indexing + + # Element connectivity + elements = np.array(exo_in.get_elem_connectivity(0)[0]) + num_elements = exo_in.num_elems() + + # If the output file already exists, remove it + if os.path.exists(out_file): + os.remove(out_file) + + # Create an ExodusII file and write the mesh data + exo_out = exodus.exodus( + out_file, + mode="w", + array_type="numpy", + title="Cylinder Mesh", + numDims=3, + numNodes=points.shape[0], + numElems=num_elements, + numBlocks=1, + numNodeSets=1, + ) + + # Write coordinates + exo_out.put_coords(points[:, 0], points[:, 1], points[:, 2]) + + # Write element block info + exo_out.put_elem_blk_info(1, "TET4", num_elements, 4, 0) + exo_out.put_elem_connectivity(1, elements.flatten()) + + # Write nodeset info + exo_out.put_set_params("EX_NODE_SET", 1, nodeset_nodes.shape[0], 0) + exo_out.put_node_set(1, nodeset_nodes) + + # Add names to the element block and nodeset + exo_out.put_names("EX_ELEM_BLOCK", ["block_1"]) + exo_out.put_names("EX_NODE_SET", ["nodeset_1"]) + + # Close the ExodusII files + exo_out.close() + exo_in.close() + + # Remove the meshio file + if cleanup: + os.remove(in_file) + + print(f"ExodusII file '{out_file}' created successfully.") + + +if __name__ == "__main__": + + # Define the parameters + mesh_size = 0.02 # Define the desired mesh size + height = 0.2346 + radius = 0.0391 + + out_file_base = "cylinder" + str(mesh_size).replace(".", "p") + + create_gmsh_cylinder(height, radius, mesh_size, out_file_base) + + convert_gmsh_to_exo(out_file_base) + + add_nodeset_and_fix_exo(out_file_base) diff --git a/test/utils/__init__.py b/test/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/utils/regression_test/.gitignore b/test/utils/regression_test/.gitignore new file mode 100644 index 00000000..5e0eccbe --- /dev/null +++ b/test/utils/regression_test/.gitignore @@ -0,0 +1,2 @@ +*.log +__pycache__ diff --git a/test/utils/regression_test/__init__.py b/test/utils/regression_test/__init__.py new file mode 100644 index 00000000..1f0b0b88 --- /dev/null +++ b/test/utils/regression_test/__init__.py @@ -0,0 +1,2 @@ +__all__ = ["ExodiffCheck", "PeakMemoryCheck", "RegressionTest"] +from .regression_test import ExodiffCheck, PeakMemoryCheck, RegressionTest diff --git a/test/utils/regression_test/regression_test.py b/test/utils/regression_test/regression_test.py new file mode 100644 index 00000000..3641432c --- /dev/null +++ b/test/utils/regression_test/regression_test.py @@ -0,0 +1,343 @@ +import argparse +import datetime +import os +import shutil + +# trunk-ignore(bandit/B404) +import subprocess +import sys +import time + +import psutil + + +def _log_output(log_file, message): + with open(log_file, "a") as f: + f.write(message) + + +def _run_executable( + command_pre, executable_path, command_args, log_file, check_memory=False +): + return_code = 1 + stats = {"peak_memory": 0} + + executable_path = os.path.abspath(executable_path) + if not os.path.exists(executable_path): + error_message = f"Executable not found at path: {executable_path}" + _log_output(log_file, error_message) + print(error_message) + return return_code, stats + + command = command_pre + [executable_path] + command_args + try: + if check_memory: + # trunk-ignore(bandit/B603) + process = subprocess.Popen( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + ps_process = psutil.Process(process.pid) + peak_memory = 0 + + while process.poll() is None: + try: + total_memory = ps_process.memory_info().rss + for child in ps_process.children(recursive=True): + total_memory += child.memory_info().rss + peak_memory = max(peak_memory, total_memory) + except psutil.NoSuchProcess: + pass + time.sleep(0.01) + + stdout, stderr = process.communicate() + return_code = process.wait() + + peak_memory_mb = peak_memory / (1024 * 1024) + stats["peak_memory"] = peak_memory_mb + _log_output(log_file, f"Peak memory usage: {peak_memory_mb:.2f} MB\n") + else: + # trunk-ignore(bandit/B603) + result = subprocess.run( + command, capture_output=True, text=True, check=False + ) + stdout, stderr = result.stdout, result.stderr + return_code = result.returncode + + # Decode stdout and stderr if they are in bytes + if isinstance(stdout, bytes): + stdout = stdout.decode("utf-8") + if isinstance(stderr, bytes): + stderr = stderr.decode("utf-8") + + if return_code == 0: + _log_output(log_file, "Executable ran successfully.\nPASSED\n") + else: + error_message = ( + f"Executable returned non-zero exit code: {return_code}\n" + f"Command: {' '.join(command)}\nFAILED\n" + ) + _log_output(log_file, error_message) + print(error_message) + + if stdout: + _log_output(log_file, "Standard output:\n" + stdout) + if stderr: + _log_output(log_file, "Standard error:\n" + stderr) + + except FileNotFoundError as e: + error_message = f"File not found: {e}" + _log_output(log_file, error_message) + print(error_message) + except psutil.Error as e: + error_message = f"psutil error: {e}" + _log_output(log_file, error_message) + print(error_message) + except Exception as e: + error_message = f"An unexpected error occurred: {e}" + _log_output(log_file, error_message) + print(error_message) + + return return_code, stats + + +def _remove_file(filename): + try: + os.remove(filename) + except FileNotFoundError: + pass + # Make sure the file was removed + if os.path.exists(filename): + print(f"Failed to remove {filename}") + sys.exit(1) + + +def _get_date_time(): + now = datetime.datetime.now() + return now.strftime("%Y-%m-%d_%H-%M-%S") + + +def _move_log_files(input_log_file, test_name): + # Move log_file to a unique name with the date and time + date_time = _get_date_time() + log_file_base = input_log_file.split(".")[0] + log_file = log_file_base + "_" + test_name + "_" + date_time + ".log" + os.rename(input_log_file, log_file) + + +def _print_pass_fail(test_name, return_code, executable_time, extra_message=None): + GREEN = "\033[92m" # Green text + RED = "\033[91m" # Red text + RESET = "\033[0m" # Reset color + TEST_NAME_WIDTH = 30 + TIME_WIDTH = 12 + + status = f"{GREEN}PASS{RESET}" if return_code == 0 else f"{RED}FAIL{RESET}" + time_formatted = f"{executable_time:.4e}" + message = f"message: {extra_message}" if extra_message else "" + + print( + f" {status}: time(s): {time_formatted:>{TIME_WIDTH}} test: {test_name:<{TEST_NAME_WIDTH}}{message}" + ) + + if return_code != 0: + print(f" Return code: {return_code}") + + +class RegressionTest: + + def __init__(self, test_name, executable_path, num_procs, exe_args): + self.test_name = test_name + self.log_file = "regression_test.log" + self.executable_path = executable_path + self.num_procs = num_procs + self.exe_args = exe_args + self.executable_time = 0 + self.peak_memory = 0 + + def run(self): + _remove_file(self.log_file) + return_code, stats = self._run() + _print_pass_fail(self.test_name, return_code, self.executable_time) + _move_log_files(self.log_file, self.test_name) + return return_code, stats + + def _run(self): + command_pre = ["mpirun", "-n", str(self.num_procs)] + # Time the executable + start_time = time.perf_counter() + return_code, stats = _run_executable( + command_pre, + self.executable_path, + self.exe_args, + self.log_file, + check_memory=True, + ) + self.peak_memory = stats["peak_memory"] + end_time = time.perf_counter() + self.executable_time = end_time - start_time + return return_code, stats + + +class PeakMemoryCheck: + + def __init__(self, test_name, peak_memory, gold_peak_memory, tolerance_percent): + self.test_name = test_name + self.peak_memory = peak_memory + self.gold_peak_memory = gold_peak_memory + self.tolerance_percent = tolerance_percent / 100.0 + + def run(self): + # Check if the peak memory is within the tolerance + upper_limit = self.gold_peak_memory * (1.0 + self.tolerance_percent) + message = f"Peak memory value: {self.peak_memory} MB, Gold value: {self.gold_peak_memory:.2f} MB, Upper limit {upper_limit:.2f} MB" + return_code = 0 + if self.peak_memory > upper_limit: + print( + f" Peak memory ({self.peak_memory:.2f} MB) exceeded the gold peak memory ({self.gold_peak_memory:.2f} MB) by more than {self.tolerance_percent*100.0}%" + ) + return_code = 1 + _print_pass_fail(self.test_name, return_code, 0, message) + + return return_code + + +class ExodiffCheck: + + def __init__( + self, + test_name, + exodiff_path, + exodiff_file, + exodiff_results_file, + exodiff_gold_results_file, + exodiff_args, + ): + self.test_name = test_name + self.log_file = "exodiff_check.log" + + # Check if exodiff_path is an absolute path or an executable name + if os.path.isabs(exodiff_path) and os.path.exists(exodiff_path): + self.exodiff_path = exodiff_path + else: + self.exodiff_path = shutil.which(exodiff_path) + if self.exodiff_path is None: + raise FileNotFoundError(f"Executable {exodiff_path} not found in PATH.") + + self.exodiff_file = exodiff_file + self.exodiff_results_file = exodiff_results_file + self.exodiff_gold_results_file = exodiff_gold_results_file + self.exodiff_args = exodiff_args + self.executable_time = 0 + + def run(self): + _remove_file(self.log_file) + return_code = self._run() + _print_pass_fail(self.test_name, return_code, self.executable_time) + _move_log_files(self.log_file, self.test_name) + return return_code + + def _run(self): + command_pre = [] + # Time the executable + start_time = time.perf_counter() + return_code, _stats = _run_executable( + command_pre, + self.exodiff_path, + [ + "-f", + self.exodiff_file, + self.exodiff_results_file, + self.exodiff_gold_results_file, + ] + + self.exodiff_args, + self.log_file, + check_memory=False, + ) + end_time = time.perf_counter() + self.executable_time = end_time - start_time + return return_code + + +def _parse_arguments(): + # Define command line arguments + parser = argparse.ArgumentParser( + description="Run an executable and check its return value." + ) + parser.add_argument("--name", help="Name of the test", default="test") + parser.add_argument( + "--num_procs", help="Number of processors for running the executable", default=1 + ) + parser.add_argument( + "--executable_path", help="Path to the executable", default="aperi-mech" + ) + parser.add_argument("--exodiff_path", help="Path to exodiff", default="exodiff") + parser.add_argument( + "--exodiff_file", help="Path to exodiff file.", default="compare.exodiff" + ) + parser.add_argument( + "--exodiff_gold_file", + help="Path to exodiff gold file.", + default="gold_results.exo", + ) + parser.add_argument( + "--exodiff_results_file", + help="Path to exodiff results file.", + default="results.exo", + ) + parser.add_argument( + "--exe_args", nargs="*", help="Additional arguments to pass to the executable" + ) + parser.add_argument( + "--exodiff_args", nargs="*", help="Additional arguments to pass to exodiff" + ) + parser.add_argument( + "--tolerance_percent", + help="Tolerance for peak memory check in percent", + default=10, + ) + parser.add_argument( + "--peak_memory", + help="Peak memory usage in MB. If it is 0, the peak memory check will be skipped.", + default=0, + ) + + # Parse command line arguments + return parser.parse_args() + + +def main(): + # TODO(jake): CLI is not really used so may have issues. Need to test. + args = _parse_arguments() + regression_test = RegressionTest( + args.name + "_regression_test", + args.executable_path, + args.num_procs, + args.exe_args, + ) + return_code, stats = regression_test.run() + if return_code == 0: + exodiff_test, _stats = ExodiffCheck( + args.name + "_exodiff_check", + args.exodiff_path, + args.exodiff_file, + args.exodiff_results_file, + args.exodiff_gold_file, + args.exodiff_args, + ) + return_code = exodiff_test.run() + + if return_code == 0 and args.peak_memory != 0: + memory_test = PeakMemoryCheck( + args.name + "_peak_memory_check", + stats["peak_memory"], + args.peak_memory, + args.tolerance_percent, + ) + return_code = memory_test.run() + + return return_code + + +if __name__ == "__main__": + return_code = main() + exit(return_code) diff --git a/test/utils/regression_test/test_files/.gitignore b/test/utils/regression_test/test_files/.gitignore new file mode 100644 index 00000000..9cdcd1ae --- /dev/null +++ b/test/utils/regression_test/test_files/.gitignore @@ -0,0 +1,2 @@ +*.log +results.exo diff --git a/test/utils/regression_test/test_files/bad_gold.exo b/test/utils/regression_test/test_files/bad_gold.exo new file mode 100644 index 00000000..ef902a7e --- /dev/null +++ b/test/utils/regression_test/test_files/bad_gold.exo @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3392e9d194b04a69a0ea1253fcc589f104ef95502885b279532a97fe97110bd5 +size 5954480 diff --git a/test/utils/regression_test/test_files/bad_input.yaml b/test/utils/regression_test/test_files/bad_input.yaml new file mode 100644 index 00000000..8deaf89e --- /dev/null +++ b/test/utils/regression_test/test_files/bad_input.yaml @@ -0,0 +1,49 @@ +procedures: + - explicit_dynamics_procedure: + geometry: + mesh: non_existent_mesh.exo + parts: + - part: + set: block_1 + formulation: + integration_scheme: + strain_smoothing: ~ + approximation_space: + reproducing_kernel: + kernel_radius_scale_factor: 0.03 + material: + elastic: + density: 2700.0 # kg / m^3 + #youngs_modulus: 78.2e9 # Pa, 78.2 GPa + youngs_modulus: 38.1e9 # just scaling down a bit since there is no plasticity + poissons_ratio: 0.3 + initial_conditions: + - velocity: + sets: + - block_1 + components: + Z: -373.0 # m/s + time_stepper: + direct_time_stepper: + time_increment: 0.00001 + # time_end: 0.00001 + time_end: 0.0005 + output: + file: results.exo + time_start: 0 + time_end: 1 + time_increment: 0.00001 + boundary_conditions: + - displacement: + sets: + - surface_1 + components: + Z: 0.0 + time_function: + ramp_function: + abscissa_values: + - 0 + - 1 + ordinate_values: + - 0 + - 1 diff --git a/test/utils/regression_test/test_files/compare.exodiff b/test/utils/regression_test/test_files/compare.exodiff new file mode 100644 index 00000000..1d7a3168 --- /dev/null +++ b/test/utils/regression_test/test_files/compare.exodiff @@ -0,0 +1,62 @@ + +# ***************************************************************** +# EXODIFF (Version: 3.26) Modified: 2023-05-01 +# Authors: Richard Drake, rrdrake@sandia.gov +# Greg Sjaardema, gdsjaar@sandia.gov +# Run on 2024/02/12 16:02:56 MST +# ***************************************************************** + +# FILE 1: /Users/jake/projects/aperi-mech_test/small_taylor_bar/gold_results.exo +# Title: IOSS Default Output Title +# Dim = 3, Nodes = 325, Elements = 1152, Faces = 0, Edges = 0 +# Element Blocks = 1, Face Blocks = 0, Edge Blocks = 0, Nodesets = 0, Sidesets = 1 +# Vars: Global = 0, Nodal = 15, Element = 0, Face = 0, Edge = 0, Nodeset = 0, Sideset = 0, Times = 2 + + +# ============================================================== +# NOTE: All node and element ids are reported as global ids. + +# NOTES: - The min/max values are reporting the min/max in absolute value. +# - Time values (t) are 1-offset time step numbers. +# - Element block numbers are the block ids. +# - Node(n) and element(e) numbers are 1-offset. + +COORDINATES absolute 1.e-6 # min separation not calculated + +TIME STEPS relative 1.e-6 floor 0.0 # min: 0 @ t1 max: 0.0005 @ t2 + +# No GLOBAL VARIABLES + +NODAL VARIABLES relative 1.e-6 floor 0.0 + displacement_x # min: 0 @ t1,n1 max: 0.0057758469 @ t2,n1 + displacement_y # min: 0 @ t1,n1 max: 0.0057758469 @ t2,n1 + displacement_z # min: 0 @ t1,n1 max: 0.07500381 @ t2,n325 + acceleration_x # min: 0 @ t1,n1 max: 4262230.4 @ t2,n196 + acceleration_y # min: 0 @ t1,n1 max: 4262230.4 @ t2,n180 + acceleration_z # min: 0 @ t1,n1 max: 10355228 @ t2,n196 + force_x # min: 0 @ t1,n1 max: 5788537.5 @ t2,n167 + force_y # min: 0 @ t1,n1 max: 5788537.5 @ t2,n159 + force_z # min: 0 @ t1,n1 max: 35633295 @ t2,n13 + kernel_radius + mass_x # min: 0.21015166 @ t1,n5 max: 2.5218199 @ t1,n32 + mass_y # min: 0.21015166 @ t1,n5 max: 2.5218199 @ t1,n32 + mass_z # min: 0.21015166 @ t1,n5 max: 2.5218199 @ t1,n32 + mass_from_elements_x # min: 0.21015166 @ t1,n5 max: 2.5218199 @ t1,n32 + mass_from_elements_y # min: 0.21015166 @ t1,n5 max: 2.5218199 @ t1,n32 + mass_from_elements_z # min: 0.21015166 @ t1,n5 max: 2.5218199 @ t1,n32 + num_neighbors + velocity_x # min: 0 @ t1,n1 max: 117.84956 @ t2,n200 + velocity_y # min: 0 @ t1,n1 max: 117.84956 @ t2,n200 + velocity_z # min: 0 @ t2,n1 max: 565.14804 @ t2,n301 + +ELEMENT VARIABLES relative 1.e-6 floor 0.0 + num_neighbors # min: 12 @ t1,b1,e1 max: 16 @ t1,b1,e7 + +# No NODESET VARIABLES + +# No SIDESET VARIABLES + +# No EDGE BLOCK VARIABLES + +# No FACE BLOCK VARIABLES + diff --git a/test/utils/regression_test/test_files/good_gold.exo b/test/utils/regression_test/test_files/good_gold.exo new file mode 100644 index 00000000..c7d01e1d --- /dev/null +++ b/test/utils/regression_test/test_files/good_gold.exo @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b5e694312e3111e515da7e98c0d4710e9655d0440b61a5135f230996d09021c +size 5954480 diff --git a/test/utils/regression_test/test_files/input.yaml b/test/utils/regression_test/test_files/input.yaml new file mode 100644 index 00000000..65a65d48 --- /dev/null +++ b/test/utils/regression_test/test_files/input.yaml @@ -0,0 +1,49 @@ +procedures: + - explicit_dynamics_procedure: + geometry: + mesh: mesh_1x1x5.exo + parts: + - part: + set: block_1 + formulation: + integration_scheme: + strain_smoothing: ~ + approximation_space: + reproducing_kernel: + kernel_radius_scale_factor: 0.03 + material: + elastic: + density: 2700.0 # kg / m^3 + #youngs_modulus: 78.2e9 # Pa, 78.2 GPa + youngs_modulus: 38.1e9 # just scaling down a bit since there is no plasticity + poissons_ratio: 0.3 + initial_conditions: + - velocity: + sets: + - block_1 + components: + Z: -373.0 # m/s + time_stepper: + direct_time_stepper: + time_increment: 0.00001 + # time_end: 0.00001 + time_end: 0.0005 + output: + file: results.exo + time_start: 0 + time_end: 1 + time_increment: 0.00001 + boundary_conditions: + - displacement: + sets: + - surface_1 + components: + Z: 0.0 + time_function: + ramp_function: + abscissa_values: + - 0 + - 1 + ordinate_values: + - 0 + - 1 diff --git a/test/utils/regression_test/test_files/mesh_1x1x5.exo b/test/utils/regression_test/test_files/mesh_1x1x5.exo new file mode 100644 index 00000000..fffb31f7 --- /dev/null +++ b/test/utils/regression_test/test_files/mesh_1x1x5.exo @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ce260bfc78ad4f902e4a7d7385917e1a1da56800ec3f0a9a3536c5581dc8fac +size 6240 diff --git a/test/utils/regression_test/test_regression_test.py b/test/utils/regression_test/test_regression_test.py new file mode 100755 index 00000000..95cc4407 --- /dev/null +++ b/test/utils/regression_test/test_regression_test.py @@ -0,0 +1,113 @@ +import os +import shutil +import unittest + +from regression_test import ExodiffCheck, RegressionTest + + +class TestRegressionTest(unittest.TestCase): + + @classmethod + def setUpClass(cls) -> None: + # Find the exodiff executable + cls.exodiff = None + for path in os.environ["PATH"].split(os.pathsep): + exodiff = os.path.join(path, "exodiff") + if os.path.exists(exodiff): + cls.exodiff = exodiff + break + + if cls.exodiff is None: + error_msg = "exodiff not found in PATH" + error_msg += "\nPlease make sure the environment is set up correctly:" + error_msg += "\n - Activate the correct spack environment. e.g 'spack env activate '" + raise FileNotFoundError(error_msg) + + # Make sure aperi-mech is in the PATH + cls.aperi_mech = shutil.which("aperi-mech") + if cls.aperi_mech is None: + error_msg = "aperi-mech not found at: " + cls.aperi_mech + error_msg += "\nPlease make sure the environment is set up correctly:" + error_msg += "\n - Make sure 'aperi-mech' is in the PATH. e.g. 'which aperi-mech' to check. If not, add it to the PATH: 'export PATH=\$PATH:/path/to/aperi-mech'" + raise FileNotFoundError(error_msg) + + # Get the current directory + cls.current_dir = os.path.dirname(os.path.abspath(__file__)) + + # Clean up any old files + for root, _dirs, files in os.walk(cls.current_dir): + for f in files: + if f.startswith("regression_test_") or f.startswith("exodiff_check_"): + if f.endswith(".log"): + os.remove(os.path.join(root, f)) + if "test_output.log" in files: + os.remove(os.path.join(root, "test_output.log")) + + def setUp(self): + self.original_dir = os.getcwd() + os.chdir(os.path.join(self.__class__.current_dir, "test_files")) + + def tearDown(self): + # Clean up any old files, but keep the log files + if os.path.exists("results.exo"): + os.remove("results.exo") + # Change back to the original directory + os.chdir(self.original_dir) + + def test_run_regression_test_fail(self): + # Setup + test = RegressionTest( + "fail_run", self.__class__.aperi_mech, 1, ["bad_input.yaml"] + ) + + # Run the executable and verify the return code + result, _stats = test.run() + self.assertFalse(result == 0) + + def test_run_exodiff_check_fail(self): + # Setup + test = RegressionTest( + "fail_exodiff", self.__class__.aperi_mech, 1, ["input.yaml"] + ) + exodiff_check = ExodiffCheck( + "fail_exodiff", + self.__class__.exodiff, + "compare.exodiff", + "results.exo", + "bad_gold.exo", + [], + ) + + # Run the executable and verify the return code + result, _stats = test.run() + self.assertTrue(result == 0) + + # Run exodiff and verify the return code + result = exodiff_check.run() + self.assertFalse(result == 0) + + def test_run_exodiff_check_success(self): + # Setup + test = RegressionTest( + "success_test", self.__class__.aperi_mech, 1, ["input.yaml"] + ) + exodiff_check = ExodiffCheck( + "success_test", + self.__class__.exodiff, + "compare.exodiff", + "results.exo", + "good_gold.exo", + [], + ) + + # Run the executable and verify the return code + result, _stats = test.run() + self.assertTrue(result == 0) + + # Run exodiff and verify the return code + result = exodiff_check.run() + self.assertTrue(result == 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/utils/unit_test_all_modules.py b/test/utils/unit_test_all_modules.py new file mode 100644 index 00000000..a8b7557a --- /dev/null +++ b/test/utils/unit_test_all_modules.py @@ -0,0 +1,87 @@ +import os +import sys +import unittest + + +class CustomTextTestResult(unittest.TextTestResult): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.passed_tests = [] + self.failed_tests = [] + + def startTest(self, test): + super().startTest(test) + + def addSuccess(self, test): + self.passed_tests.append(str(test)) + super().addSuccess(test) + + def addFailure(self, test, err): + self.failed_tests.append(str(test)) + super().addFailure(test, err) + + +class CustomTextTestRunner(unittest.TextTestRunner): + def _makeResult(self): + return CustomTextTestResult(self.stream, self.descriptions, self.verbosity) + + +def run_tests(test_dirs): + fail = False + all_passed_tests = [] + all_failed_tests = [] + + for dir in test_dirs: + print(f"Running tests in {dir}") + try: + # Add the test directory to the system path + sys.path.append(os.path.abspath(dir)) + # Discover and run tests + loader = unittest.TestLoader() + suite = loader.discover(dir) + runner = CustomTextTestRunner() + result = runner.run(suite) + all_passed_tests.extend(result.passed_tests) + all_failed_tests.extend(result.failed_tests) + if not result.wasSuccessful(): + fail = True + except Exception as e: + print(f"An error occurred while running tests in {dir}: {e}") + fail = True + finally: + # Remove the test directory from the system path + sys.path.pop() + + return fail, all_passed_tests, all_failed_tests + + +if __name__ == "__main__": + test_dirs = ["regression_test"] + fail, all_passed_tests, all_failed_tests = run_tests(test_dirs) + + print("\n############## TEST SUMMARY ##############") + print("Passed tests:") + for test_name in all_passed_tests: + print(f" - {test_name}") + + print("Failed tests:") + for test_name in all_failed_tests: + print(f" - {test_name}") + + print( + "Note: Some tests above check for failure, so a test that has 'FAIL' in the output may actually be passing." + ) + print("------------------------------------------") + if fail: + print("Overall result: \033[91mFAIL\033[0m") + print("##########################################") + sys.exit(1) + elif len(all_passed_tests) == 0: + print("No tests were run.") + print("Overall result: \033[91mFAIL\033[0m") + print("##########################################") + sys.exit(1) + else: + print("Overall result: \033[92mPASS\033[0m") + print("##########################################") + sys.exit(0)