Skip to content

Commit

Permalink
Use step parameter when logging metrics with NeptuneLogger (#19126)
Browse files Browse the repository at this point in the history
  • Loading branch information
Raalsky authored Dec 14, 2023
1 parent 11bac94 commit 37952fe
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 7 deletions.
3 changes: 3 additions & 0 deletions src/lightning/pytorch/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- The `LightningModule.load_from_checkpoint()` function now calls `.configure_model()` on the model if it is overridden, to ensure all layers can be loaded from the checkpoint ([#19036](https://github.com/Lightning-AI/lightning/pull/19036))


- Restored usage of `step` parameter when logging metrics with `NeptuneLogger` ([#19126](https://github.com/Lightning-AI/pytorch-lightning/pull/19126))


- Changed the `TransformerEnginePrecision(dtype=)` argument to `weights_dtype` and made it required ([#19082](https://github.com/Lightning-AI/lightning/pull/19082))


Expand Down
6 changes: 2 additions & 4 deletions src/lightning/pytorch/loggers/neptune.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ def log_metrics( # type: ignore[override]
Args:
metrics: Dictionary with metric names as keys and measured quantities as values.
step: Step number at which the metrics should be recorded, currently ignored.
step: Step number at which the metrics should be recorded
"""
if rank_zero_only.rank != 0:
Expand All @@ -456,9 +456,7 @@ def log_metrics( # type: ignore[override]
metrics = _add_prefix(metrics, self._prefix, self.LOGGER_JOIN_CHAR)

for key, val in metrics.items():
# `step` is ignored because Neptune expects strictly increasing step values which
# Lightning does not always guarantee.
self.run[key].append(val)
self.run[key].append(val, step=step)

@override
@rank_zero_only
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/loggers/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def test_logger_with_prefix_all(mlflow_mock, wandb_mock, comet_mock, neptune_moc
logger.log_metrics({"test": 1.0}, step=0)
assert logger.experiment.__getitem__.call_count == 1
logger.experiment.__getitem__.assert_called_with("tmp/test")
logger.experiment.__getitem__().append.assert_called_once_with(1.0)
logger.experiment.__getitem__().append.assert_called_once_with(1.0, step=0)

# TensorBoard
if _TENSORBOARD_AVAILABLE:
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/loggers/test_neptune.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def on_validation_epoch_end(self):
logger, run_instance_mock, _ = _get_logger_with_mocks(api_key="test", project="project")
_fit_and_test(logger=logger, model=LoggingModel(), tmp_path=tmp_path)
run_instance_mock.__getitem__.assert_any_call("training/some/key")
run_instance_mock.__getitem__.return_value.append.assert_has_calls([call(42)])
run_instance_mock.__getitem__.return_value.append.assert_has_calls([call(42, step=2)])


def test_log_hyperparams(neptune_mock):
Expand Down Expand Up @@ -204,7 +204,7 @@ def test_log_metrics(neptune_mock):
assert run_instance_mock.__getitem__.call_count == 2
run_instance_mock.__getitem__.assert_any_call(metrics_foo_key)
run_instance_mock.__getitem__.assert_any_call(metrics_bar_key)
run_attr_mock.append.assert_has_calls([call(42), call(555)])
run_attr_mock.append.assert_has_calls([call(42, step=None), call(555, step=None)])


def test_log_model_summary(neptune_mock):
Expand Down

0 comments on commit 37952fe

Please sign in to comment.