From 9d1625fdcc38539f07052839b1f0b6845acb3534 Mon Sep 17 00:00:00 2001 From: "Lea J. Haeusel" Date: Mon, 30 Dec 2024 16:47:35 +0100 Subject: [PATCH] Introduce jobscript exit code check (#3) * feat: introduce jobscript exit code check * fix: incorporate feedback * fix: Improve SubprocessError construction --- queens/data_processor/__init__.py | 4 + queens/drivers/fourc_driver.py | 2 +- queens/drivers/jobscript_driver.py | 153 ++++++--- tests/integration_tests/fourc/conftest.py | 2 +- tests/unit_tests/conftest.py | 6 + .../drivers/test_jobscript_driver.py | 294 +++++++++++++++--- tests/unit_tests/utils/test_rsync.py | 6 - 7 files changed, 362 insertions(+), 105 deletions(-) diff --git a/queens/data_processor/__init__.py b/queens/data_processor/__init__.py index d53caa82..1e5e9f70 100644 --- a/queens/data_processor/__init__.py +++ b/queens/data_processor/__init__.py @@ -23,10 +23,14 @@ DataProcessorEnsightInterfaceDiscrepancy, ) from queens.data_processor.data_processor_numpy import DataProcessorNumpy +from queens.data_processor.data_processor_pvd import DataProcessorPvd +from queens.data_processor.data_processor_txt import DataProcessorTxt VALID_TYPES = { "csv": DataProcessorCsv, "ensight": DataProcessorEnsight, "ensight_interface_discrepancy": DataProcessorEnsightInterfaceDiscrepancy, "numpy": DataProcessorNumpy, + "pvd": DataProcessorPvd, + "txt": DataProcessorTxt, } diff --git a/queens/drivers/fourc_driver.py b/queens/drivers/fourc_driver.py index e5e574c1..04060e34 100644 --- a/queens/drivers/fourc_driver.py +++ b/queens/drivers/fourc_driver.py @@ -38,7 +38,7 @@ def __init__( files_to_copy=None, data_processor=None, gradient_data_processor=None, - post_processor=None, + post_processor="", post_options="", mpi_cmd="/usr/bin/mpirun --bind-to none", ): diff --git a/queens/drivers/jobscript_driver.py b/queens/drivers/jobscript_driver.py index 83bf930a..956d534c 100644 --- a/queens/drivers/jobscript_driver.py +++ b/queens/drivers/jobscript_driver.py @@ -14,11 +14,13 @@ # """Driver to run a jobscript.""" + import logging from dataclasses import dataclass from pathlib import Path from queens.drivers.driver import Driver +from queens.utils.exceptions import SubprocessError from queens.utils.injector import inject, inject_in_template from queens.utils.io_utils import read_file from queens.utils.logger_settings import log_init_args @@ -50,7 +52,7 @@ def to_dict(self): """Create a job options dict. Returns: - dict: dictionary with all the data + dict: Dict containing all the data. """ dictionary = self.__dict__.copy() dictionary.update(dictionary.pop("input_files")) @@ -60,10 +62,10 @@ def add_data_and_to_dict(self, additional_data): """Add additional options to the job options dict. Args: - additional_data (dict): Additional data to combine with the job options + additional_data (dict): Additional data to combine with the job options. Returns: - _type_: _description_ + dict: Dict combining the job options and the additional data. """ return self.to_dict() | additional_data @@ -72,12 +74,14 @@ class JobscriptDriver(Driver): """Driver to run an executable with a jobscript. Attributes: - input_templates (Path): read in simulation input template as string - data_processor (obj): instance of data processor class - gradient_data_processor (obj): instance of data processor class for gradient data - jobscript_template (str): read in jobscript template as string - jobscript_options (dict): Dictionary containing jobscript options - jobscript_file_name (str): Jobscript file name (default: 'jobscript.sh') + input_templates (Path): Read in simulation input template as string. + data_processor (obj): Instance of data processor class. + gradient_data_processor (obj): Instance of data processor class for gradient data. + jobscript_template (str): Read-in jobscript template. + jobscript_options (dict): Dictionary containing jobscript options. + jobscript_file_name (str): Jobscript file name (default: 'jobscript.sh'). + raise_error_on_jobscript_failure (bool): Whether to raise an error for a non-zero jobscript + exit code. """ @log_init_args @@ -92,47 +96,48 @@ def __init__( gradient_data_processor=None, jobscript_file_name="jobscript.sh", extra_options=None, + raise_error_on_jobscript_failure=True, ): """Initialize JobscriptDriver object. Args: - parameters (Parameters): Parameters object - input_templates (str, Path, dict): path(s) to simulation input template - jobscript_template (str, Path): path to jobscript template or read in jobscript template - executable (str, Path): path to main executable of respective software - files_to_copy (list, opt): files or directories to copy to experiment_dir - data_processor (obj, opt): instance of data processor class - gradient_data_processor (obj, opt): instance of data processor class for gradient data - jobscript_file_name (str): Jobscript file name (default: 'jobscript.sh') - extra_options (dict): Extra options to inject into jobscript template + parameters (Parameters): Parameters object. + input_templates (str, Path, dict): Path(s) to simulation input template. + jobscript_template (str, Path): Path to jobscript template or read-in jobscript + template. + executable (str, Path): Path to main executable of respective software. + files_to_copy (list, opt): Files or directories to copy to experiment_dir. + data_processor (obj, opt): Instance of data processor class. + gradient_data_processor (obj, opt): Instance of data processor class for gradient data. + jobscript_file_name (str, opt): Jobscript file name (default: 'jobscript.sh'). + extra_options (dict, opt): Extra options to inject into jobscript template. + raise_error_on_jobscript_failure (bool, opt): Whether to raise an error for a non-zero + jobscript exit code. """ super().__init__(parameters=parameters, files_to_copy=files_to_copy) self.input_templates = self.create_input_templates_dict(input_templates) + self.jobscript_template = self.get_read_in_jobscript_template(jobscript_template) self.files_to_copy.extend(self.input_templates.values()) self.data_processor = data_processor self.gradient_data_processor = gradient_data_processor - if Path(jobscript_template).is_file(): - self.jobscript_template = read_file(jobscript_template) - else: - self.jobscript_template = jobscript_template - if extra_options is None: extra_options = {} self.jobscript_options = extra_options self.jobscript_options["executable"] = executable self.jobscript_file_name = jobscript_file_name + self.raise_error_on_jobscript_failure = raise_error_on_jobscript_failure @staticmethod def create_input_templates_dict(input_templates): """Cast input templates into a dict. Args: - input_templates (str, Path, dict): Input template(s) + input_templates (str, Path, dict): Input template(s). Returns: - dict: containing input file names and template paths + dict: Dict containing input file names and template paths. """ if not isinstance(input_templates, dict): input_templates = {"input_file": input_templates} @@ -143,18 +148,57 @@ def create_input_templates_dict(input_templates): } return input_templates_dict + @staticmethod + def get_read_in_jobscript_template(jobscript_template): + """Get the jobscript template contents. + + If the provided jobscript template is a Path or a string of a + path and a valid file, the corresponding file is read. + + Args: + jobscript_template (str, Path): Path to jobscript template or read-in jobscript + template. + + Returns: + str: Read-in jobscript template + """ + if isinstance(jobscript_template, str): + # Catch an exception due to a long string + try: + if Path(jobscript_template).is_file(): + jobscript_template = read_file(jobscript_template) + except OSError: + _logger.debug( + "The provided jobscript template string is not a regular file so we assume " + "that it holds the read-in jobscript template. The jobscript template reads:\n" + "%s", + {jobscript_template}, + ) + + elif isinstance(jobscript_template, Path): + if jobscript_template.is_file(): + jobscript_template = read_file(jobscript_template) + else: + raise FileNotFoundError( + f"The provided jobscript template path {jobscript_template} is not a file." + ) + else: + raise TypeError("The jobscript template needs to be a string or a Path.") + + return jobscript_template + def run(self, sample, job_id, num_procs, experiment_dir, experiment_name): """Run the driver. Args: - sample (dict): Dict containing sample - job_id (int): Job ID - num_procs (int): number of processors - experiment_name (str): name of QUEENS experiment. + sample (dict): Dict containing sample. + job_id (int): Job ID. + num_procs (int): Number of processors. experiment_dir (Path): Path to QUEENS experiment directory. + experiment_name (str): Name of QUEENS experiment. Returns: - Result and potentially the gradient + Result and potentially the gradient. """ job_dir, output_dir, output_file, input_files, log_file, error_file = self._manage_paths( job_id, experiment_dir, experiment_name @@ -204,17 +248,17 @@ def _manage_paths(self, job_id, experiment_dir, experiment_name): """Manage paths for driver run. Args: - job_id (int): Job id. + job_id (int): Job ID. experiment_dir (Path): Path to QUEENS experiment directory. - experiment_name (str): name of QUEENS experiment. + experiment_name (str): Name of QUEENS experiment. Returns: - job_dir (Path): Path to job directory - output_dir (Path): Path to output directory - output_file (Path): Path to output file(s) - input_files (dict): Dict with name and path of the input file(s) - log_file (Path): Path to log file - error_file (Path): Path to error file + job_dir (Path): Path to job directory. + output_dir (Path): Path to output directory. + output_file (Path): Path to output file(s). + input_files (dict): Dict with name and path of the input file(s). + log_file (Path): Path to log file. + error_file (Path): Path to error file. """ job_dir = experiment_dir / str(job_id) output_dir = job_dir / "output" @@ -235,18 +279,17 @@ def _manage_paths(self, job_id, experiment_dir, experiment_name): return job_dir, output_dir, output_file, input_files, log_file, error_file - @staticmethod - def _run_executable(job_id, execute_cmd, log_file, error_file, verbose=False): + def _run_executable(self, job_id, execute_cmd, log_file, error_file, verbose=False): """Run executable. Args: - job_id (int): Job id - execute_cmd (str): Executed command - log_file (Path): Path to log file - error_file (Path): Path to error file - verbose (bool, opt): flag for additional streaming to terminal + job_id (int): Job ID. + execute_cmd (str): Executed command. + log_file (Path): Path to log file. + error_file (Path): Path to error file. + verbose (bool, opt): Flag for additional streaming to terminal. """ - run_subprocess_with_logging( + process_returncode, _, stdout, stderr = run_subprocess_with_logging( execute_cmd, terminate_expression="PROC.*ERROR", logger_name=__name__ + f"_{job_id}", @@ -255,16 +298,24 @@ def _run_executable(job_id, execute_cmd, log_file, error_file, verbose=False): streaming=verbose, raise_error_on_subprocess_failure=False, ) + if self.raise_error_on_jobscript_failure and process_returncode: + raise SubprocessError.construct_error_from_command( + command=execute_cmd, + command_output=stdout, + error_message=stderr, + additional_message=f"The jobscript with job ID {job_id} has failed with exit code " + f"{process_returncode}.", + ) def _get_results(self, output_dir): """Get results from driver run. Args: - output_dir (Path): Path to output directory + output_dir (Path): Path to output directory. Returns: - result (np.array): Result from the driver run - gradient (np.array, None): Gradient from the driver run (potentially None) + result (np.array): Result from the driver run. + gradient (np.array, None): Gradient from the driver run (potentially None). """ result = None if self.data_processor: @@ -281,9 +332,9 @@ def prepare_input_files(self, sample_dict, experiment_dir, input_files): """Prepare and parse data to input files. Args: - sample_dict (dict): Dict containing sample + sample_dict (dict): Dict containing sample. experiment_dir (Path): Path to QUEENS experiment directory. - input_files (dict): Dict with name and path of the input file(s) + input_files (dict): Dict with name and path of the input file(s). """ for input_template_name, input_template_path in self.input_templates.items(): inject( diff --git a/tests/integration_tests/fourc/conftest.py b/tests/integration_tests/fourc/conftest.py index cabdc0cf..71fbd77c 100644 --- a/tests/integration_tests/fourc/conftest.py +++ b/tests/integration_tests/fourc/conftest.py @@ -93,7 +93,7 @@ def fixture_setup_symbolic_links_fourc(fourc_link_paths, fourc_build_paths_for_g "existing file! \n" "You can create the necessary symbolic links on Linux via:\n" "-------------------------------------------------------------------------\n" - "ln -s /config/fourc\n" + "ln -s /config/4C\n" "ln -s /config/post_ensight\n" "ln -s /config/post_processor\n" "-------------------------------------------------------------------------\n" diff --git a/tests/unit_tests/conftest.py b/tests/unit_tests/conftest.py index 3207e349..536633a1 100644 --- a/tests/unit_tests/conftest.py +++ b/tests/unit_tests/conftest.py @@ -30,6 +30,12 @@ def fixture_dummy_simulation_model(): return model +@pytest.fixture(name="files_to_copy") +def fixture_files_to_copy(): + """Files to copy.""" + return ["fileA", "fileB"] + + @pytest.fixture(name="get_patched_bmfia_iterator") def fixture_get_patched_bmfia_iterator(global_settings): """Function that returns a dummy BMFIA iterator for testing.""" diff --git a/tests/unit_tests/drivers/test_jobscript_driver.py b/tests/unit_tests/drivers/test_jobscript_driver.py index a36ba95d..69c69375 100644 --- a/tests/unit_tests/drivers/test_jobscript_driver.py +++ b/tests/unit_tests/drivers/test_jobscript_driver.py @@ -14,13 +14,18 @@ # """Unit tests for the jobscript driver.""" +import os +from contextlib import nullcontext as does_not_raise + import numpy as np import pytest import yaml +from queens.data_processor import DataProcessorNumpy, DataProcessorTxt from queens.distributions import FreeVariable from queens.drivers.jobscript_driver import JobOptions, JobscriptDriver from queens.parameters import Parameters +from queens.utils.exceptions import SubprocessError @pytest.fixture(name="parameters") @@ -30,6 +35,90 @@ def fixture_parameters(): return parameters +def create_template(list_of_keys, template_path): + """Create dict from list of keys and write it to template path.""" + template_dict = {f: "{{ " + f + " }}" for f in list_of_keys} + # yaml format is used to ease read in of input file + template_path.write_text(yaml.safe_dump(template_dict)) + + +@pytest.fixture(name="input_template") +def fixture_input_template(tmp_path, parameters): + """Generate an input template.""" + input_template = tmp_path / "input_template.yaml" + create_template(parameters.names, input_template) + + return input_template + + +@pytest.fixture(name="input_templates") +def fixture_input_templates(tmp_path, job_options, parameters): + """Generate two input templates.""" + # only add the second parameter + input_template_1 = tmp_path / "input_template_1.yaml" + create_template(list(job_options.to_dict().keys()) + parameters.names[-1:], input_template_1) + + # add both parameters to the template + input_template_2 = tmp_path / "input_template_2.yaml" + create_template(parameters.names + ["input_1", "experiment_name"], input_template_2) + + return input_template_1, input_template_2 + + +@pytest.fixture(name="jobscript_template") +def fixture_jobscript_template(): + """Dummy jobscript template.""" + return 'echo "This is a dummy jobscript"' + + +@pytest.fixture(name="jobscript_template_path") +def fixture_jobscript_template_path(tmp_path, jobscript_template): + """Generate a dummy jobscript template.""" + jobscript_template_path = tmp_path / "dummy_jobscript_template.sh" + jobscript_template_path.write_text(jobscript_template) + + return jobscript_template_path + + +@pytest.fixture(name="executable") +def fixture_executable(tmp_path): + """Generate a dummy executable.""" + executable = tmp_path / "dummy_executable" + executable.write_text("This is a dummy file.") + + # Make the dummy file executable + os.chmod(executable, 0o755) + + return executable + + +@pytest.fixture(name="data_processor") +def fixture_data_processor(): + """Dummy data processor.""" + return DataProcessorNumpy( + file_name_identifier="dummy.npy", + file_options_dict={}, + ) + + +@pytest.fixture(name="gradient_data_processor") +def fixture_gradient_data_processor(): + """Dummy gradient data processor.""" + return DataProcessorTxt(file_name_identifier="dummy.txt", file_options_dict={}) + + +@pytest.fixture(name="jobscript_file_name") +def fixture_jobscript_file_name(): + """Jobscript file name.""" + return "dummy_jobscript.sh" + + +@pytest.fixture(name="extra_options") +def fixture_extra_options(): + """Extra options for JobOptions.""" + return {"option_1": 1, "option_2": "dummy"} + + @pytest.fixture(name="job_id") def fixture_job_id(): """Fixture for the job id.""" @@ -42,6 +131,16 @@ def fixture_experiment_name(): return "test_experiment" +@pytest.fixture(name="injected_input_files") +def fixture_injected_input_files(tmp_path, job_id, experiment_name): + """Fixture for the create input files.""" + input_file_1 = tmp_path / str(job_id) / f"{experiment_name}_input_1_{job_id}.yaml" + input_file_2 = tmp_path / str(job_id) / f"{experiment_name}_input_2_{job_id}.yaml" + injected_input_files = {"input_1": input_file_1, "input_2": input_file_2} + + return injected_input_files + + @pytest.fixture(name="job_options") def fixture_job_options(tmp_path, job_id, experiment_name, injected_input_files): """Job options to be injected.""" @@ -62,72 +161,101 @@ def fixture_job_options(tmp_path, job_id, experiment_name, injected_input_files) return job_options -@pytest.fixture(name="input_templates") -def fixture_input_templates(tmp_path, job_options, parameters): - """Generate input templates.""" - - def templatify(list_of_keys, template_path): - """Create dict from list of keys.""" - template_dict = {f: "{{ " + f + " }}" for f in list_of_keys} - # yaml format is used to ease read in of input file - template_path.write_text(yaml.safe_dump(template_dict)) - - # only add the second parameter - input_template_1 = tmp_path / "input_template_1.yaml" - templatify(list(job_options.to_dict().keys()) + parameters.names[-1:], input_template_1) - - # add both parameters to the template - input_template_2 = tmp_path / "input_template_2.yaml" - templatify(parameters.names + ["input_1", "experiment_name"], input_template_2) - - return input_template_1, input_template_2 - - -@pytest.fixture(name="injected_input_files") -def fixture_injected_input_files(tmp_path, job_id, experiment_name): - """Fixture for the create input files.""" - input_file_1 = tmp_path / str(job_id) / f"{experiment_name}_input_1_{job_id}.yaml" - input_file_2 = tmp_path / str(job_id) / f"{experiment_name}_input_2_{job_id}.yaml" - injected_input_files = {"input_1": input_file_1, "input_2": input_file_2} - - return injected_input_files - - @pytest.fixture(name="jobscript_driver") -def fixture_jobscript_driver(parameters, input_templates): +def fixture_jobscript_driver(parameters, input_templates, executable): """Jobscript driver object.""" input_template_1, input_template_2 = input_templates driver = JobscriptDriver( parameters=parameters, jobscript_template="", - executable="", + executable=executable, input_templates={"input_1": input_template_1, "input_2": input_template_2}, ) return driver -def test_jobscript_driver_multiple_input_files( - jobscript_driver, job_options, injected_input_files, parameters +@pytest.fixture(name="args_init") +def fixture_args_init( + parameters, + jobscript_template, + executable, + input_template, + files_to_copy, + data_processor, + gradient_data_processor, + jobscript_file_name, + extra_options, ): + """Arguments to initialize a JobscriptDriver. + + These arguments are meant for initialization with the default + constructor. + """ + args_init = { + "parameters": parameters, + "jobscript_template": jobscript_template, + "executable": executable, + "input_templates": input_template, + "files_to_copy": files_to_copy, + "data_processor": data_processor, + "gradient_data_processor": gradient_data_processor, + "jobscript_file_name": jobscript_file_name, + "extra_options": extra_options.copy(), + } + return args_init + + +def assert_jobscript_driver_attributes(jobscript_driver, args_init, extra_options): + """Assert that the jobscript driver attributes are set correctly.""" + extra_options.update({"executable": args_init["executable"]}) + + assert jobscript_driver.parameters == args_init["parameters"] + assert jobscript_driver.input_templates == {"input_file": args_init["input_templates"]} + assert jobscript_driver.jobscript_template == args_init["jobscript_template"] + assert jobscript_driver.files_to_copy == args_init["files_to_copy"] + assert jobscript_driver.data_processor == args_init["data_processor"] + assert jobscript_driver.gradient_data_processor == args_init["gradient_data_processor"] + assert jobscript_driver.jobscript_file_name == args_init["jobscript_file_name"] + assert jobscript_driver.jobscript_options == extra_options + + +def test_init_from_jobscript_template_str(args_init, extra_options): + """Test initialization of the JobscriptDriver. + + For this initialization, the jobscript template is provided in the + form of a string describing the jobscript template contents. + """ + driver = JobscriptDriver(**args_init) + assert_jobscript_driver_attributes(driver, args_init, extra_options) + + +def test_init_from_jobscript_template_path(args_init, jobscript_template_path, extra_options): + """Test initialization of the JobscriptDriver. + + For this initialization, the jobscript template is provided in the + form of a string describing the path to a file. + """ + args_init_from_jobscript_template_path = args_init.copy() + args_init_from_jobscript_template_path["jobscript_template"] = jobscript_template_path + driver = JobscriptDriver(**args_init_from_jobscript_template_path) + assert_jobscript_driver_attributes(driver, args_init, extra_options) + + +def test_multiple_input_files(jobscript_driver, job_options, injected_input_files, parameters): """Test if multiple input files are correctly generated.""" # Samples to be injected sample_dict = parameters.sample_as_dict(np.array([1, 2])) - - # Arguments to call the run method of the driver - job_id = job_options.job_id - num_procs = job_options.num_procs - experiment_dir = job_options.experiment_dir - experiment_name = job_options.experiment_name + sample = np.array(list(sample_dict.values())) # Run the driver jobscript_driver.run( - sample=np.array(list(sample_dict.values())), - job_id=job_id, - num_procs=num_procs, - experiment_dir=experiment_dir, - experiment_name=experiment_name, + sample=sample, + job_id=job_options.job_id, + num_procs=job_options.num_procs, + experiment_dir=job_options.experiment_dir, + experiment_name=job_options.experiment_name, ) # Join all options @@ -137,3 +265,77 @@ def test_jobscript_driver_multiple_input_files( for input_file in injected_input_files.values(): for key, value in yaml.safe_load(input_file.read_text()).items(): assert value == str(injectable_options[key]) + + +@pytest.mark.parametrize( + "raise_error_on_jobscript_failure, expectation", + [ + (False, does_not_raise()), + (True, pytest.raises(SubprocessError)), + ], +) +def test_error_in_jobscript_template( + parameters, input_template, job_options, raise_error_on_jobscript_failure, expectation +): + """Test for an error when the jobscript template has an error.""" + jobscript_driver = JobscriptDriver( + parameters=parameters, + input_templates=input_template, + jobscript_template="This jobscript should fail.", + executable="", + raise_error_on_jobscript_failure=raise_error_on_jobscript_failure, + ) + sample_dict = parameters.sample_as_dict(np.array([1, 2])) + sample = np.array(list(sample_dict.values())) + + with expectation: + jobscript_driver.run( + sample=sample, + job_id=job_options.job_id, + num_procs=job_options.num_procs, + experiment_dir=job_options.experiment_dir, + experiment_name=job_options.experiment_name, + ) + + +@pytest.mark.parametrize( + "raise_error_on_jobscript_failure, expectation", + [ + (False, does_not_raise()), + (True, pytest.raises(SubprocessError)), + ], +) +def test_nonzero_exit_code( + parameters, input_template, job_options, raise_error_on_jobscript_failure, expectation +): + """Test for an error when the jobscript exits with a code other than 0.""" + jobscript_driver = JobscriptDriver( + parameters=parameters, + input_templates=input_template, + jobscript_template="exit 1", + executable="", + raise_error_on_jobscript_failure=raise_error_on_jobscript_failure, + ) + sample_dict = parameters.sample_as_dict(np.array([1, 2])) + sample = np.array(list(sample_dict.values())) + + with expectation: + jobscript_driver.run( + sample=sample, + job_id=job_options.job_id, + num_procs=job_options.num_procs, + experiment_dir=job_options.experiment_dir, + experiment_name=job_options.experiment_name, + ) + + +def test_long_jobscript_template_str(parameters, input_template): + """Test that a long jobscript template string does not raise an error.""" + long_str = "dummy" * 100 + jobscript_driver = JobscriptDriver( + parameters=parameters, + input_templates=input_template, + jobscript_template=long_str, + executable="", + ) + assert jobscript_driver.jobscript_template == long_str diff --git a/tests/unit_tests/utils/test_rsync.py b/tests/unit_tests/utils/test_rsync.py index d30efebd..4ed1c7fe 100644 --- a/tests/unit_tests/utils/test_rsync.py +++ b/tests/unit_tests/utils/test_rsync.py @@ -21,12 +21,6 @@ from queens.utils.rsync import rsync -@pytest.fixture(name="files_to_copy") -def fixture_files_to_copy(): - """Files to copy.""" - return ["fileA", "fileB"] - - @pytest.fixture(name="_create_source_files") def fixture_create_source_files(source_path, files_to_copy): """Create source files."""