diff --git a/.github/workflows/.github-ci.yml b/.github/workflows/.github-ci.yml new file mode 100644 index 000000000..1e5681d9f --- /dev/null +++ b/.github/workflows/.github-ci.yml @@ -0,0 +1,48 @@ +# yamllint disable +--- +#--------------------------------------------------------------------------------------------------- +env: + TEST_TIMING_OPTION: "" # Set the option if local test should be timed or not. Default is off. + PYTHON_PACKAGE_MANAGER: "conda" # Python package manager to create the python environments +name: github_ci + +on: [push] + +jobs: + run_tests: + runs-on: ubuntu-latest + container: + image: ghcr.io/4c-multiphysics/4c:latest + options: --user root --env OMPI_ALLOW_RUN_AS_ROOT=1 --env OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 + defaults: + run: + shell: bash -l {0} + steps: + - uses: actions/checkout@v4 + - uses: conda-incubator/setup-miniconda@v3 + - name: build + run: | + sudo apt-get update + sudo apt-get install -y rsync + ln -s /home/user/4C/build/4C config/4C + ln -s /home/user/4C/build/post_ensight config/post_ensight + ln -s /home/user/4C/build/post_processor config/post_processor + # /usr/bin/mpirun --bind-to none --use-hwthread-cpus -np 2 config/4C /home/user/4C/tests/input_files/solid_runtime_hex8.dat out + $PYTHON_PACKAGE_MANAGER env create -f environment.yml + $PYTHON_PACKAGE_MANAGER activate queens + pip install -e .[develop] + $PYTHON_PACKAGE_MANAGER env export > pipeline_conda_environment.yml + $PYTHON_PACKAGE_MANAGER list + - name: codechecks + run: | + $PYTHON_PACKAGE_MANAGER activate queens + isort --check-only queens tests > track_isort.txt 2>&1 + black --check queens tests > track_black.txt 2>&1 + pylint --rcfile=.pylintrc_ci queens tests | tee track_pylint.txt 2>&1 + pylint queens tests --rcfile=.pylintrc --output-format=json:pylint_warnings.json --fail-under 0 + python .gitlab/pipeline_utils/code_quality_creator.py pylint_warnings.json + pydocstyle --match-dir='^(?!.*test).*$' queens > track_pydocstyle.txt 2>&1 + - name: tests + run: | + $PYTHON_PACKAGE_MANAGER activate queens + pytest -v -m "unit_tests or integration_tests or integration_tests_fourc" -o log_cli=true --log-cli-level=INFO --cov --cov-report=term --cov-report=html:html_coverage_report --cov-report=xml:xml_coverage_report.xml $TEST_TIMING_OPTION --color=yes --junitxml=test_junit.xml diff --git a/tests/input_files/queens/bmfia_rpvi_park_gp_template.yml b/tests/input_files/queens/bmfia_rpvi_park_gp_template.yml index 6325e56f4..3b52bd03c 100644 --- a/tests/input_files/queens/bmfia_rpvi_park_gp_template.yml +++ b/tests/input_files/queens/bmfia_rpvi_park_gp_template.yml @@ -67,7 +67,7 @@ mf_approx: stochastic_optimizer_name: gp_optimizer bmfia_interface: type: bmfia_interface - num_processors_multi_processing: 4 + num_processors_multi_processing: 2 probabilistic_mapping_type: per_coordinate bmfia_iterator: type: bmfia diff --git a/tests/input_files/queens/bmfia_smc_park.yml b/tests/input_files/queens/bmfia_smc_park.yml index f4ded8b6f..1aa1bb573 100644 --- a/tests/input_files/queens/bmfia_smc_park.yml +++ b/tests/input_files/queens/bmfia_smc_park.yml @@ -42,7 +42,7 @@ mf_approx: mean_function_type: identity_multi_fidelity bmfia_interface: type: bmfia_interface - num_processors_multi_processing: 4 + num_processors_multi_processing: 2 probabilistic_mapping_type: per_coordinate optimizer: type: adam diff --git a/tests/input_files/third_party/fourc/coarse_plate_dirichlet_template.dat b/tests/input_files/third_party/fourc/coarse_plate_dirichlet_template.dat index 44769c5e3..99b250b9f 100644 --- a/tests/input_files/third_party/fourc/coarse_plate_dirichlet_template.dat +++ b/tests/input_files/third_party/fourc/coarse_plate_dirichlet_template.dat @@ -11,8 +11,8 @@ created by pre_exodus DIM 3 -----------------------------------------------------DISCRETISATION NUMSTRUCDIS 1 ---------------------------------------------------------PROBLEM TYP -PROBLEMTYP Structure +--------------------------------------------------------PROBLEM TYPE +PROBLEMTYPE Structure RESTART 0 SHAPEFCT Polynomial -----------------------------------------------------------------IO @@ -23,7 +23,7 @@ STRUCT_STRAIN Yes FILESTEPS 1000 ----------------------------------------------------------STRUCTURAL DYNAMIC LINEAR_SOLVER 1 -DYNAMICTYP Statics +DYNAMICTYPE Statics RESULTSEVRY 1 RESTARTEVRY 0 NLNSOL fullnewton @@ -484,5 +484,3 @@ NODE 121 COORD -1.2500000000000000e+01 -1.2500000000000000e+01 0.00000000 ---------------------------------------------------CELLSCATRA ELEMENTS ---------------------------------------------------ELECTROMAGNETIC ELEMENTS -------------------------------------------------------ARTERY ELEMENTS ----------------------------------------------------------------END -// END diff --git a/tests/input_files/third_party/fourc/solid_runtime_hex8.dat b/tests/input_files/third_party/fourc/solid_runtime_hex8.dat index d75230a02..d9d5d6794 100644 --- a/tests/input_files/third_party/fourc/solid_runtime_hex8.dat +++ b/tests/input_files/third_party/fourc/solid_runtime_hex8.dat @@ -2,9 +2,9 @@ This is a simple test that tests the extrapolation of stressees from Gauss points to nodes for a hex8 discretization -4C git SHA1: b23a601ba839ee3749c44a8d4436fb7c509f9d69 --------------------------------------------------------------------PROBLEM TYP -PROBLEMTYP Structure +tested date: 19.12.2024 +-------------------------------------------------------------------PROBLEM TYPE +PROBLEMTYPE Structure ----------------------------------------------------------------------------IO OUTPUT_BIN yes STRUCT_DISP yes @@ -29,7 +29,7 @@ NAME Structure_Solver SOLVER Superlu ------------------------------------------------------------STRUCTURAL DYNAMIC INT_STRATEGY Standard -DYNAMICTYP Statics +DYNAMICTYPE Statics PRESTRESSTOLDISP 1e-9 RESULTSEVRY 1 RESTARTEVRY 1 @@ -85,4 +85,3 @@ NODE 12 COORD 2.0 1.0 1.0 ------------------------------------------------------------STRUCTURE ELEMENTS 1 SOLID HEX8 1 5 6 2 3 7 8 4 MAT 1 KINEM nonlinear 2 SOLID HEX8 5 9 10 6 7 11 12 8 MAT 1 KINEM nonlinear ----------------------------------------------------------------------------END diff --git a/tests/integration_tests/fourc/test_fourc_mc.py b/tests/integration_tests/fourc/test_fourc_mc.py index a3c162e96..6108f722a 100644 --- a/tests/integration_tests/fourc/test_fourc_mc.py +++ b/tests/integration_tests/fourc/test_fourc_mc.py @@ -14,6 +14,8 @@ # """Test 4C run.""" +import logging + import numpy as np from queens.data_processor.data_processor_pvd import DataProcessorPvd @@ -24,7 +26,10 @@ from queens.models.simulation_model import SimulationModel from queens.parameters.parameters import Parameters from queens.schedulers.local_scheduler import LocalScheduler -from queens.utils.io_utils import load_result +from queens.utils.config_directories import experiment_directory +from queens.utils.io_utils import load_result, read_file + +_logger = logging.getLogger(__name__) def test_fourc_mc( @@ -59,6 +64,7 @@ def test_fourc_mc( input_templates=fourc_input_file_template, executable=fourc_executable, data_processor=data_processor, + mpi_cmd="/usr/bin/mpirun --bind-to none --use-hwthread-cpus", ) model = SimulationModel(scheduler=scheduler, driver=driver) iterator = MonteCarloIterator( @@ -76,7 +82,18 @@ def test_fourc_mc( # Load results results = load_result(global_settings.result_file(".pickle")) - # assert statements - np.testing.assert_array_almost_equal( - results["raw_output_data"]["result"], fourc_example_expected_output, decimal=6 - ) + try: + # assert statements + np.testing.assert_array_almost_equal( + results["raw_output_data"]["result"], fourc_example_expected_output, decimal=6 + ) + except (AssertionError, KeyError) as error: + experiment_dir = experiment_directory(global_settings.experiment_name) + job_dir = experiment_dir / "0" + _logger.info(list(job_dir.iterdir())) + output_dir = job_dir / "output" + _logger.info(list(output_dir.iterdir())) + + _logger.info(read_file(output_dir / "test_fourc_mc_0.err")) + _logger.info(read_file(output_dir / "test_fourc_mc_0.log")) + raise error diff --git a/tests/integration_tests/fourc/test_fourc_mc_random_field_ensight.py b/tests/integration_tests/fourc/test_fourc_mc_random_field_ensight.py index 82aabf72c..dd0855ddd 100644 --- a/tests/integration_tests/fourc/test_fourc_mc_random_field_ensight.py +++ b/tests/integration_tests/fourc/test_fourc_mc_random_field_ensight.py @@ -14,6 +14,8 @@ # """Test 4C with RF materials.""" +import logging + import numpy as np import pytest @@ -26,7 +28,10 @@ from queens.parameters.fields.kl_field import KarhunenLoeveRandomField from queens.parameters.parameters import Parameters from queens.schedulers.local_scheduler import LocalScheduler -from queens.utils.io_utils import load_result +from queens.utils.config_directories import experiment_directory +from queens.utils.io_utils import load_result, read_file + +_logger = logging.getLogger(__name__) class DummyKLField(KarhunenLoeveRandomField): @@ -128,9 +133,20 @@ def test_write_random_material_to_dat( # Load results results = load_result(global_settings.result_file(".pickle")) - # Check if we got the expected results - np.testing.assert_array_almost_equal(results["mean"], expected_mean, decimal=8) - np.testing.assert_array_almost_equal(results["var"], expected_var, decimal=8) + try: + # Check if we got the expected results + np.testing.assert_array_almost_equal(results["mean"], expected_mean, decimal=8) + np.testing.assert_array_almost_equal(results["var"], expected_var, decimal=8) + except (AssertionError, KeyError) as error: + experiment_dir = experiment_directory(global_settings.experiment_name) + job_dir = experiment_dir / "0" + _logger.info(list(job_dir.iterdir())) + output_dir = job_dir / "output" + _logger.info(list(output_dir.iterdir())) + + _logger.info(read_file(output_dir / "test_write_random_material_to_dat_0.err")) + _logger.info(read_file(output_dir / "test_write_random_material_to_dat_0.log")) + raise error @pytest.fixture(name="expected_mean") diff --git a/tests/integration_tests/python/test_bmfia_park.py b/tests/integration_tests/python/test_bmfia_park.py index a2e72c4d6..21ff0cc77 100644 --- a/tests/integration_tests/python/test_bmfia_park.py +++ b/tests/integration_tests/python/test_bmfia_park.py @@ -74,7 +74,7 @@ def test_bmfia_smc_park( coordinate_labels=["x3", "x4"], ) mf_interface = BmfiaInterface( - num_processors_multi_processing=4, + num_processors_multi_processing=2, probabilistic_mapping_type="per_coordinate", ) stochastic_optimizer = Adam( @@ -181,7 +181,7 @@ def test_bmfia_rpvi_gp_park( coordinate_labels=["x3", "x4"], ) mf_interface = BmfiaInterface( - num_processors_multi_processing=4, + num_processors_multi_processing=2, probabilistic_mapping_type="per_coordinate", ) stochastic_optimizer = Adam( diff --git a/tests/integration_tests/python/test_constrained_gp_ip.py b/tests/integration_tests/python/test_constrained_gp_ip.py index a2ed39b1a..ed985303f 100644 --- a/tests/integration_tests/python/test_constrained_gp_ip.py +++ b/tests/integration_tests/python/test_constrained_gp_ip.py @@ -79,9 +79,9 @@ def fixture_likelihood_model(parameters, global_settings): def fixture_expected_mean(): """Expected mean values.""" expected_mean = { - "GPMAP-I": [0.30465568, 0.52168328], - "CGPMAP-II": [0.29862195, 0.74123874], - "CFBGP": [0.29330584, 0.96121542], + "GPMAP-I": [0.301425, 0.653193], + "CGPMAP-II": [0.301557, 0.64682], + "CFBGP": [0.301444, 0.653865], } return expected_mean @@ -90,9 +90,9 @@ def fixture_expected_mean(): def fixture_expected_std(): """Expected standard deviation values.""" expected_std = { - "GPMAP-I": [0.00105374, 0.03230814], - "CGPMAP-II": [0.00197814, 0.04068283], - "CFBGP": [0.00156066, 0.02839873], + "GPMAP-I": [0.00086233, 0.02220657], + "CGPMAP-II": [0.00087329, 0.02323444], + "CFBGP": [0.001561, 0.028399], } return expected_std @@ -106,15 +106,12 @@ def test_constrained_gp_ip_park( global_settings, ): """Test for constrained GP with IP park.""" - num_steps = 3 + num_steps = 4 num_new_samples = 4 num_initial_samples = int(num_new_samples * 2) quantile = 0.90 seed = 41 - if approx_type == "CFBGP": - num_steps = 2 - logpdf_gp_model = LogpdfGPModel( approx_type=approx_type, num_hyper=10, diff --git a/tests/unit_tests/interfaces/test_bmfia_interface.py b/tests/unit_tests/interfaces/test_bmfia_interface.py index a8c8a0045..d5cd61308 100644 --- a/tests/unit_tests/interfaces/test_bmfia_interface.py +++ b/tests/unit_tests/interfaces/test_bmfia_interface.py @@ -194,7 +194,7 @@ def test_init(): def test__init__(): """Test the instantiation of the interface object.""" instantiate_probabilistic_mappings = BmfiaInterface.instantiate_per_coordinate - num_processors_multi_processing = 3 + num_processors_multi_processing = 2 evaluate_method = BmfiaInterface.evaluate_per_coordinate evaluate_and_gradient_method = BmfiaInterface.evaluate_and_gradient_per_coordinate update_mappings_method = BmfiaInterface.update_mappings_per_coordinate @@ -245,7 +245,7 @@ def test_build_approximation( return_value=dummy_plot_instance, ) - default_bmfia_interface.num_processors_multi_processing = 3 + default_bmfia_interface.num_processors_multi_processing = 2 # Test with wrong input dimensions (2D Tensor) --> ValueError with pytest.raises(IndexError): @@ -371,7 +371,7 @@ def test_train_probabilistic_mappings_in_parallel( # test with valid configuration num_coords = Z_LF_train.T.shape[0] - num_processors_multi_processing = 3 + num_processors_multi_processing = 2 return_state_list = BmfiaInterface.train_probabilistic_mappings_in_parallel( num_coords, num_processors_multi_processing, default_probabilistic_obj_lst ) diff --git a/tests/unit_tests/models/test_model.py b/tests/unit_tests/models/test_model.py index c432d4e5a..151aea457 100644 --- a/tests/unit_tests/models/test_model.py +++ b/tests/unit_tests/models/test_model.py @@ -41,15 +41,15 @@ def fixture_model(): def test_init(model): """Test init.""" assert model.response is None - assert model.evaluate_and_gradient_bool is False + assert not model.evaluate_and_gradient_bool def test_evaluate_and_gradient(model): """Test evaluate_and_gradient method.""" - assert model.evaluate_and_gradient_bool is False + assert not model.evaluate_and_gradient_bool def model_eval(self, x): - assert self.evaluate_and_gradient_bool is True + assert self.evaluate_and_gradient_bool return {"result": np.sum(x**2, axis=1, keepdims=True)} model.grad = Mock( @@ -81,4 +81,4 @@ def model_eval(self, x): model.grad.call_args.kwargs["upstream_gradient"], upstream_[:, np.newaxis] ) - assert model.evaluate_and_gradient_bool is False + assert not model.evaluate_and_gradient_bool