Skip to content

Commit

Permalink
Add black formatting (#267)
Browse files Browse the repository at this point in the history
  • Loading branch information
smarr authored Sep 29, 2024
2 parents 865f654 + bb6f930 commit 0144014
Show file tree
Hide file tree
Showing 61 changed files with 538 additions and 446 deletions.
6 changes: 6 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,12 @@ jobs:
pylint rebench
if: matrix.python-version == '3.12'

- name: Install and run black
run: |
pip install black
black --check rebench
if: matrix.python-version == '3.12'

- name: Upload coverage results to Coveralls
run: coveralls
env:
Expand Down
1 change: 1 addition & 0 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ disable=raw-checker-failed,
logging-not-lazy,
fixme,
too-many-arguments,
too-many-positional-arguments,
too-few-public-methods,
too-many-instance-attributes,
too-many-locals,
Expand Down
6 changes: 3 additions & 3 deletions docs/conf.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from recommonmark.parser import CommonMarkParser

source_parsers = {
'.md': CommonMarkParser,
".md": CommonMarkParser,
}

source_suffix = ['.md']
html_theme = 'gitbook'
source_suffix = [".md"]
html_theme = "gitbook"
84 changes: 84 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
[tool.black]
extend-exclude = '''
/rebench/interop/jmh_adapter.py
| /rebench/interop/savina_log_adapter.py
| /rebench/interop/perf_adapter.py
| /rebench/interop/adapter.py
| /rebench/interop/test_adapter.py
| /rebench/interop/test_vm_adapter.py
| /rebench/environment.py
| /rebench/interop/plain_seconds_log_adapter.py
| /rebench/interop/multivariate_adapter.py
| /rebench/model/build_cmd.py
| /rebench/interop/validation_log_adapter.py
| /rebench/interop/rebench_log_adapter.py
| /rebench/model/benchmark.py
| /rebench/denoise_client.py
| /rebench/model/benchmark_suite.py
| /rebench/model/executor.py
| /rebench/model/exp_run_details.py
| /rebench/model/profile_data.py
| /rebench/interop/time_adapter.py
| /rebench/model/profiler.py
| /rebench/model/reporting.py
| /rebench/output.py
| /rebench/interop/perf_parser.py
| /rebench/model/data_point.py
| /rebench/model/measurement.py
| /rebench/model/experiment.py
| /rebench/configurator.py
| /rebench/subprocess_kill.py
| /rebench/denoise.py
| /rebench/model/termination_check.py
| /rebench/tests/bugs/issue_27_invalid_run_not_handled_test.py
| /rebench/tests/bugs/issue_112_invocations_setting_ignored_test.py
| /rebench/statistics.py
| /rebench/tests/bugs/issue_54_extra_args_test.py
| /rebench/tests/bugs/issue_117_input_size_setting_ignored_test.py
| /rebench/rebenchdb.py
| /rebench/tests/bugs/issue_111_report_ignores_warmup_test.py
| /rebench/tests/bugs/issue_4_run_equality_and_params_test.py
| /rebench/tests/environment_test.py
| /rebench/tests/features/issue_15_warm_up_support_test.py
| /rebench/tests/features/ignore_timeouts_test.py
| /rebench/model/run_id.py
| /rebench/subprocess_with_timeout.py
| /rebench/tests/features/issue_169_config_composition_test.py
| /rebench/tests/features/issue_16_multiple_data_points_test.py
| /rebench/tests/features/issue_209_adapter.py
| /rebench/tests/features/issue_110_setup_run_test.py
| /rebench/tests/configurator_test.py
| /rebench/tests/features/issue_19_one_data_point_test.py
| /rebench/tests/features/issue_216_test.py
| /rebench/tests/features/issue_32_jmh_support_test.py
| /rebench/tests/executor_test.py
| /rebench/tests/features/issue_209_adapter2.py
| /rebench/tests/features/issue_42_vm.py
| /rebench/tests/features/issue_57_binary_on_path_test.py
| /rebench/reporter.py
| /rebench/tests/features/issue_34_accept_faulty_runs_test.py
| /rebench/tests/features/issue_209_custom_adapter_test.py
| /rebench/tests/interop/plain_seconds_log_adapter_test.py
| /rebench/tests/features/issue_31_multivariate_data_points_test.py
| /rebench/tests/interop/time_adapter_test.py
| /rebench/tests/features/issue_59_build_suite_test.py
| /rebench/tests/features/issue_81_unicode_test.py
| /rebench/tests/features/issue_58_build_vm_test.py
| /rebench/tests/mock_http_server.py
| /rebench/persistence.py
| /rebench/tests/rebenchdb_test.py
| /rebench/tests/rebench_test_case.py
| /rebench/executor.py
| /rebench/tests/model/runs_config_test.py
| /setup.py
| /rebench/tests/subprocess_timeout_test.py
| /rebench/tests/stats_test.py
| /rebench/tests/reporter_test.py
| /rebench/tests/features/issue_42_test.py
| /rebench/rebench.py
| /rebench/tests/interop/rebench_log_adapter_test.py
| /rebench/tests/perf/perf_parser_test.py
| /rebench/tests/perf/issue_166_profiling_test.py
| /rebench/ui.py
| /rebench/tests/persistency_test.py
'''
42 changes: 21 additions & 21 deletions rebench/configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
from .ui import escape_braces

# Disable most logging for pykwalify
logging.getLogger('pykwalify').setLevel(logging.CRITICAL)
logging.getLogger('pykwalify').addHandler(logging.NullHandler())
logging.getLogger("pykwalify").setLevel(logging.CRITICAL)
logging.getLogger("pykwalify").addHandler(logging.NullHandler())


class _ExecutorFilter(object):
Expand Down Expand Up @@ -124,7 +124,7 @@ def load_config(file_name):
and return the configuration.
"""
try:
with open(file_name, 'r') as conf_file: # pylint: disable=unspecified-encoding
with open(file_name, "r") as conf_file: # pylint: disable=unspecified-encoding
data = yaml.safe_load(conf_file)
validator = Core(
source_data=data,
Expand All @@ -135,8 +135,8 @@ def load_config(file_name):

# add file name and directory to config to be able to use it when loading
# for instance gauge adapters
data['__file__'] = file_name
data['__dir__'] = dirname(abspath(file_name))
data["__file__"] = file_name
data["__dir__"] = dirname(abspath(file_name))
except SchemaError as err:
errors = [escape_braces(val_err) for val_err in validator.validation_errors]
raise UIError(
Expand All @@ -155,9 +155,9 @@ def load_config(file_name):


def validate_gauge_adapters(raw_config):
benchmark_suites = raw_config.get('benchmark_suites', {})
benchmark_suites = raw_config.get("benchmark_suites", {})
for suite_name, suite in benchmark_suites.items():
adapter = suite['gauge_adapter']
adapter = suite["gauge_adapter"]
if not isinstance(adapter, (dict, str)):
raise UIError(("Gauge adapter for suite %s must be a string or a dictionary," +
"but is %s.\n") % (suite_name, type(adapter).__name__), None)
Expand All @@ -175,12 +175,12 @@ def __init__(self, raw_config, data_store, ui, cli_options=None, cli_reporter=No
exp_name=None, data_file=None, build_log=None, run_filter=None):
self._raw_config_for_debugging = raw_config # kept around for debugging only

self.build_log = build_log or raw_config.get('build_log', 'build.log')
self.data_file = data_file or raw_config.get('default_data_file', 'rebench.data')
self._exp_name = exp_name or raw_config.get('default_experiment', 'all')
self.artifact_review = raw_config.get('artifact_review', False)
self.config_dir = raw_config.get('__dir__', None)
self.config_file = raw_config.get('__file__', None)
self.build_log = build_log or raw_config.get("build_log", "build.log")
self.data_file = data_file or raw_config.get("default_data_file", "rebench.data")
self._exp_name = exp_name or raw_config.get("default_experiment", "all")
self.artifact_review = raw_config.get("artifact_review", False)
self.config_dir = raw_config.get("__dir__", None)
self.config_file = raw_config.get("__file__", None)

self._rebench_db_connector = None

Expand All @@ -199,17 +199,17 @@ def __init__(self, raw_config, data_store, ui, cli_options=None, cli_reporter=No
raw_config.get('reporting', {}), Reporting.empty(cli_reporter), cli_options, ui)

# Construct ReBenchDB config
rdb_cfg = raw_config.get('reporting', None)
rdb_cfg = raw_config.get("reporting", None)
if rdb_cfg:
rdb_cfg = rdb_cfg.get('rebenchdb', None)
rdb_cfg = rdb_cfg.get("rebenchdb", None)
if rdb_cfg:
self.rebench_db = rdb_cfg
else:
self.rebench_db = {}
if cli_options:
if cli_options.db_server:
self.rebench_db['db_url'] = cli_options.db_server
self.rebench_db['send_to_rebench_db'] = cli_options.send_to_rebench_db
self.rebench_db["db_url"] = cli_options.db_server
self.rebench_db["send_to_rebench_db"] = cli_options.send_to_rebench_db

self.options = cli_options
self.ui = ui
Expand All @@ -220,10 +220,10 @@ def __init__(self, raw_config, data_store, ui, cli_options=None, cli_reporter=No

self.run_filter = _RunFilter(run_filter)

self._executors = raw_config.get('executors', {})
self._suites_config = raw_config.get('benchmark_suites', {})
self._executors = raw_config.get("executors", {})
self._suites_config = raw_config.get("benchmark_suites", {})

experiments = raw_config.get('experiments', {})
experiments = raw_config.get("experiments", {})
self._experiments = self._compile_experiments(experiments)

@property
Expand Down Expand Up @@ -333,7 +333,7 @@ def get_runs(self):
def _compile_experiments(self, experiments):
results = {}

if self._exp_name == 'all':
if self._exp_name == "all":
for exp_name in experiments:
results[exp_name] = self._compile_experiment(exp_name, experiments[exp_name])
else:
Expand Down
24 changes: 12 additions & 12 deletions rebench/denoise.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ def __init__(self):

def get_which(self):
if not self._which_path:
if os.path.isfile('/usr/bin/which'):
self._which_path = '/usr/bin/which'
if os.path.isfile("/usr/bin/which"):
self._which_path = "/usr/bin/which"
else:
raise UIError("The basic `which` command was not found." +
" In many systems it is available at /usr/bin/which." +
Expand Down Expand Up @@ -61,7 +61,7 @@ def _absolute_path_for_command(self, command, arguments_for_successful_exe):

def has_cset(self):
if self._cset_path is None:
self._cset_path = self._absolute_path_for_command('cset', ['--help'])
self._cset_path = self._absolute_path_for_command("cset", ["--help"])

return self._cset_path is not None and self._cset_path is not False

Expand All @@ -73,7 +73,7 @@ def set_cset(self, cset_path):

def has_denoise(self):
if self._denoise_path is None:
self._denoise_path = self._absolute_path_for_command('rebench-denoise', ['--version'])
self._denoise_path = self._absolute_path_for_command("rebench-denoise", ["--version"])

return self._denoise_path is not None and self._denoise_path is not False

Expand All @@ -95,7 +95,7 @@ def get_denoise_python_path(self):

# find the element in active_python_path that has the start of the current file path
for path in active_python_path:
if current_file.startswith(path) and 'rebench' in path.lower():
if current_file.startswith(path) and "rebench" in path.lower():
self._denoise_python_path = path
return path

Expand Down Expand Up @@ -294,7 +294,7 @@ def _exec(num_cores, use_nice, use_shielding, args):
min_cores = _shield_lower_bound(num_cores)
max_cores = _shield_upper_bound(num_cores)
core_spec = "%d-%d" % (min_cores, max_cores)
env['REBENCH_DENOISE_CORE_SET'] = core_spec
env["REBENCH_DENOISE_CORE_SET"] = core_spec

os.execvpe(cmd, cmdline, env)

Expand Down Expand Up @@ -324,7 +324,7 @@ def _test(num_cores):
print("Test on %d cores" % core_cnt)

core_spec = "%d-%d" % (lower, upper)
env_spec = os.environ.get('REBENCH_DENOISE_CORE_SET', None)
env_spec = os.environ.get("REBENCH_DENOISE_CORE_SET", None)
if core_spec != env_spec:
print("Core Spec set by denoise was: ", env_spec)
print("Locally determined one was: ", core_spec)
Expand Down Expand Up @@ -382,15 +382,15 @@ def main_func():
num_cores = int(args.num_cores) if args.num_cores else None
result = {}

if args.command == 'minimize' and num_cores is not None:
if args.command == "minimize" and num_cores is not None:
result = _minimize_noise(num_cores, args.use_nice, args.use_shielding, args.for_profiling)
elif args.command == 'restore' and num_cores is not None:
elif args.command == "restore" and num_cores is not None:
result = _restore_standard_settings(num_cores, args.use_shielding)
elif args.command == 'exec':
elif args.command == "exec":
_exec(num_cores, args.use_nice, args.use_shielding, remaining_args)
elif args.command == 'kill':
elif args.command == "kill":
_kill(remaining_args[0])
elif args.command == 'test' and num_cores is not None:
elif args.command == "test" and num_cores is not None:
_test(num_cores)
else:
arg_parser.print_help()
Expand Down
Loading

0 comments on commit 0144014

Please sign in to comment.