Skip to content

Commit

Permalink
ci: Switch from pydocstyle to ruff to check docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
leahaeusel committed Jan 21, 2025
1 parent c06bb33 commit b70cc81
Show file tree
Hide file tree
Showing 26 changed files with 120 additions and 71 deletions.
7 changes: 5 additions & 2 deletions .github/workflows/tests_local.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,15 @@ jobs:
echo "::group::Run pylint..."
pylint --rcfile=.pylintrc_ci queens tests
# pylint queens tests --rcfile=.pylintrc --output-format=json:pylint_warnings.json --fail-under 0
echo "::endgroup::"
echo "::group::Run ruff..."
ruff check
echo "::endgroup::"
# echo "::group::Create code quality report..."
# pylint queens tests --rcfile=.pylintrc --output-format=json:pylint_warnings.json --fail-under 0
# python .gitlab/pipeline_utils/code_quality_creator.py pylint_warnings.json
# pydocstyle --match-dir='^(?!.*test).*$' queens
# echo "::endgroup::"
echo "::group::Check compatibility with licenses of dependencies..."
Expand Down
5 changes: 3 additions & 2 deletions .gitlab/pipeline_utils/update_changelog.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand All @@ -17,7 +17,7 @@
import sys
from pathlib import Path

import gitlab
import gitlab # pylint: disable=import-error

CHANGE_KEY = "change:"

Expand All @@ -28,6 +28,7 @@ def create_section(name, link, date, message, header_symbol, newline=True):
Args:
name (str): Name of the tag
link (str): Link of the tag
date (obj): Date of the tag
message (str): Tag message
header_symbol (str): Symbol of the section
newline (bool): Add newline after the section name
Expand Down
13 changes: 7 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
default_stages: [pre-commit, pre-merge-commit, pre-push, manual]
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.2 # Ruff version
hooks:
# Run the linter.
# Needs to placed before ruff formatter, black, isort and other formatters
- id: ruff
args: [ --fix ]
- repo: local
hooks:
- id: trailing-whitespace
Expand All @@ -24,12 +31,6 @@ repos:
language: python
types: ["python"]
pass_filenames: true
- id: pydocstyle
name: pydocstyle
entry: pydocstyle
language: python
types: ["python"]
additional_dependencies: ["toml"]
- id: docformatter
name: docformatter
entry: docformatter
Expand Down
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ will lead to failing CI pipelines and will therefore not be merged.
The code checks are conducted with [Pylint](https://pylint.org/),
[isort](https://github.com/PyCQA/isort), and [Black](https://github.com/psf/black).
Compliance with [Google style docstrings](https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings)
is checked with [pydocstyle](https://github.com/PyCQA/pydocstyle).
is checked with [ruff](https://github.com/astral-sh/ruff).
Complete and meaningful docstrings are required as they are used to generate the
[documentation](#reading-and-writing-documentation).

Expand Down
2 changes: 1 addition & 1 deletion dev-requirements.in
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,6 @@ pydata-sphinx-theme
pandoc
pip-tools
commitizen>=3.12.0
pydocstyle>=6.3.0
docformatter>=1.5.1
yamllint>=1.19.0
ruff
8 changes: 3 additions & 5 deletions dev-requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,6 @@ prompt-toolkit==3.0.36
# via questionary
pydata-sphinx-theme==0.16.0
# via -r dev-requirements.in
pydocstyle==6.3.0
# via -r dev-requirements.in
pygments==2.18.0
# via
# -c requirements.txt
Expand Down Expand Up @@ -214,6 +212,8 @@ ruamel-yaml==0.18.6
# via pre-commit-hooks
ruamel-yaml-clib==0.2.12
# via ruamel-yaml
ruff==0.9.2
# via -r dev-requirements.in
semantic-version==2.10.0
# via liccheck
six==1.16.0
Expand All @@ -222,9 +222,7 @@ six==1.16.0
# bleach
# python-dateutil
snowballstemmer==2.2.0
# via
# pydocstyle
# sphinx
# via sphinx
soupsieve==2.6
# via beautifulsoup4
sphinx==8.1.3
Expand Down
8 changes: 5 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,12 @@ force_grid_wrap = 0
multi_line_output = 3
include_trailing_comma = true

[tool.pydocstyle]
[tool.ruff.lint]
select = ["D"] # pydocstyle rules
ignore = ["D104"] # Missing docstring in public package

[tool.ruff.lint.pydocstyle]
convention = "google"
match-dir = '(?!tests).*'
match = '(?!__init__).*\.py'

[tool.pytest.ini_options]
addopts = '-m "not benchmark and not lnm_cluster and not imcs_cluster" --doctest-modules --doctest-continue-on-failure --doctest-ignore-import-errors'
Expand Down
11 changes: 8 additions & 3 deletions queens/data_processor/data_processor_ensight.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand Down Expand Up @@ -176,7 +176,11 @@ def _check_field_specification_dict(file_options_dict):
file_options_dict (dict): Dictionary containing the field description for the
physical fields of interest that should be read-in.
"""
required_keys_lst = ["target_time_lst", "physical_field_dict", "geometric_target"]
required_keys_lst = [
"target_time_lst",
"physical_field_dict",
"geometric_target",
]
if not set(required_keys_lst).issubset(set(file_options_dict.keys())):
raise KeyError(
"The option 'file_options_dict' within the data_processor section must at least "
Expand Down Expand Up @@ -445,6 +449,7 @@ def _vtk_from_ensight(self, raw_data, target_time):
"""Load a vtk-object from the ensight file.
Args:
raw_data (obj): Raw data from file
target_time (float): Time the field should be evaluated on
Returns:
Expand Down Expand Up @@ -488,7 +493,7 @@ def read_geometry_coordinates(external_geometry):
This method uses the QUEENS external geometry module.
Args:
external_geometry_obj (queens.fourc_dat_geometry)
external_geometry (queens.fourc_dat_geometry): QUEENS external geometry object
Returns:
dict: set with 4C topology
Expand Down
3 changes: 2 additions & 1 deletion queens/data_processor/data_processor_ensight_interface.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand Down Expand Up @@ -442,6 +442,7 @@ def deformed_grid(self, raw_data, time):
such that the final result is the deformed grid at the specified time.
Args:
raw_data (obj): Raw data from file
time (float): Time value for data processing
Returns:
Expand Down
33 changes: 26 additions & 7 deletions queens/external_geometry/fourc_dat_geometry.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand Down Expand Up @@ -477,7 +477,10 @@ def _sort_node_coordinates(self):
self.node_coordinates["coordinates"] = [
coord
for _, coord in sorted(
zip(self.node_coordinates["node_mesh"], self.node_coordinates["coordinates"]),
zip(
self.node_coordinates["node_mesh"],
self.node_coordinates["coordinates"],
),
key=lambda pair: pair[0],
)
]
Expand Down Expand Up @@ -713,7 +716,9 @@ def _write_elementwise_materials(self, line, random_field_lst):
material_fields[0]["name"] + "_" + str(i) for i in range(len(self.element_centers))
]
self._write_coords_to_dict(
material_fields[0]["name"], material_field_placeholders, np.array(self.element_centers)
material_fields[0]["name"],
material_field_placeholders,
np.array(self.element_centers),
)

# check if the current material number is equal to base material and rewrite the base
Expand Down Expand Up @@ -899,7 +904,15 @@ def _write_design_point_dirichlet_conditions(self, random_field_lst, line):
)

# write the new fields to the dat file --------------------------------------------
for topo_node, random_field_1, random_field_2, random_field_3, f1, f2, f3 in zip(
for (
topo_node,
random_field_1,
random_field_2,
random_field_3,
f1,
f2,
f3,
) in zip(
node_set["topo_dnodes"],
realized_random_field_1,
realized_random_field_2,
Expand Down Expand Up @@ -1016,7 +1029,9 @@ def _assign_random_dirichlet_fields_per_geo_set(self, fields_dirich_on_geo_set):
fun_3 = dirich_field["funct_for_field"] * np.ones(set_shape)

self._write_coords_to_dict(
dirich_field["name"], placeholders, np.array(self.node_coordinates["coordinates"])
dirich_field["name"],
placeholders,
np.array(self.node_coordinates["coordinates"]),
)

return (
Expand Down Expand Up @@ -1153,7 +1168,7 @@ def _get_my_topology(self, geo_set_name_type):
I.e.e its node mappings, based on the type of the geometric set.
Args:
geo_set_name_type:
geo_set_name_type (str): Name of the geometric set type.
Returns:
my_topology (lst): List with desired geometric topology
Expand Down Expand Up @@ -1203,7 +1218,11 @@ def _create_new_node_sets(self, random_fields_lst):
my_topology_lst = self._get_my_topology(topology_type)
nodes_mesh_lst.extend(
[
{"node_mesh": topo["node_mesh"], "topo_dnodes": [], "name": topology_name}
{
"node_mesh": topo["node_mesh"],
"topo_dnodes": [],
"name": topology_name,
}
for topo in my_topology_lst
if topo["topology_name"] == topology_name
]
Expand Down
4 changes: 3 additions & 1 deletion queens/interfaces/bmfmc_interface.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand Down Expand Up @@ -61,6 +61,8 @@ def evaluate(self, samples, support="y", full_cov=False, gradient_bool=False):
*support=f* the Gaussian process predicts w.r.t. the latent function
*f*. For the choice of *support=y* we predict w.r.t. the
simulation/experimental output *y*
full_cov (bool): Boolean that specifies whether the entire posterior covariance matrix
should be returned or only the posterior variance
gradient_bool (bool): Flag to determine whether the gradient of the function at
the evaluation point is expected (*True*) or not (*False*)
Expand Down
13 changes: 9 additions & 4 deletions queens/iterators/bmfia_iterator.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand Down Expand Up @@ -218,7 +218,9 @@ def core_run(self):

# ----- Set the feature strategy of the probabilistic mapping (select gammas)
self.Z_train = self.set_feature_strategy(
self.Y_LF_train, self.X_train, self.coords_experimental_data[: self.Y_LF_train.shape[0]]
self.Y_LF_train,
self.X_train,
self.coords_experimental_data[: self.Y_LF_train.shape[0]],
)

return self.Z_train, self.Y_HF_train
Expand Down Expand Up @@ -257,7 +259,9 @@ def expand_training_data(self, additional_x_train, additional_y_lf_train=None):
_logger.info("Training data was successfully expanded!")

self.Z_train = self.set_feature_strategy(
self.Y_LF_train, self.X_train, self.coords_experimental_data[: self.Y_LF_train.shape[0]]
self.Y_LF_train,
self.X_train,
self.coords_experimental_data[: self.Y_LF_train.shape[0]],
)

return self.Z_train, self.Y_HF_train
Expand Down Expand Up @@ -397,6 +401,7 @@ def _get_coord_features(self, _, y_lf_mat, coords_mat):
and colum-wise variable dimensions.
y_lf_mat (np.array): Low-fidelity output matrix with row-wise model realizations.
Columns are different dimensions of the output.
coords_mat (np.array): Coordinates matrix.
Returns:
z_mat (np.array): Extended low-fidelity matrix containing
Expand Down Expand Up @@ -454,7 +459,7 @@ def _get_no_features(self, _x_mat, y_lf_mat, __):
def _get_time_features(self, _, y_lf_mat, __):
"""Get the low-fidelity feature matrix with time features.
Args:
Args:
y_lf_mat (np.array): Low-fidelity output matrix with row-wise model realizations.
Columns are different dimensions of the output.
Expand Down
4 changes: 1 addition & 3 deletions queens/iterators/hmc_iterator.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand Down Expand Up @@ -131,8 +131,6 @@ def __init__(
def init_mcmc_method(self):
"""Init the PyMC MCMC Model.
Args:
Returns:
step (obj): The MCMC Method within the PyMC Model
"""
Expand Down
4 changes: 1 addition & 3 deletions queens/iterators/metropolis_hastings_pymc_iterator.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand Down Expand Up @@ -168,8 +168,6 @@ def eval_log_likelihood_grad(self, samples):
def init_mcmc_method(self):
"""Init the PyMC MCMC Model.
Args:
Returns:
step (obj): The MCMC Method within the PyMC Model
"""
Expand Down
10 changes: 7 additions & 3 deletions queens/iterators/pymc_iterator.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright (c) 2024, QUEENS contributors.
# Copyright (c) 2025, QUEENS contributors.
#
# This file is part of QUEENS.
#
Expand Down Expand Up @@ -157,7 +157,7 @@ def __init__(
def eval_log_prior(self, samples):
"""Evaluate natural logarithm of prior at samples of chains.
Args:
Args:
samples (np.array): Samples to evaluate the prior at
Returns:
Expand Down Expand Up @@ -328,7 +328,11 @@ def post_run(self):
# process output takes a dict as input with key 'mean'
results_dict = az.convert_to_inference_data(inference_data_dict)
results = process_outputs(
{"sample_stats": sample_stats, "result": self.chains, "inference_data": results_dict},
{
"sample_stats": sample_stats,
"result": self.chains,
"inference_data": results_dict,
},
self.result_description,
)
if self.result_description["write_results"]:
Expand Down
Loading

0 comments on commit b70cc81

Please sign in to comment.