diff --git a/src/spikeinterface/core/sortinganalyzer.py b/src/spikeinterface/core/sortinganalyzer.py index 55cbe6070a..e6eb2c23b9 100644 --- a/src/spikeinterface/core/sortinganalyzer.py +++ b/src/spikeinterface/core/sortinganalyzer.py @@ -2033,19 +2033,18 @@ def load(cls, sorting_analyzer): return None def load_run_info(self): + run_info = None if self.format == "binary_folder": extension_folder = self._get_binary_extension_folder() run_info_file = extension_folder / "run_info.json" if run_info_file.is_file(): with open(str(run_info_file), "r") as f: run_info = json.load(f) - else: - warnings.warn(f"Found no run_info file for {self.extension_name}, extension should be re-computed.") - run_info = None elif self.format == "zarr": extension_group = self._get_zarr_extension_group(mode="r") run_info = extension_group.attrs.get("run_info", None) + if run_info is None: warnings.warn(f"Found no run_info file for {self.extension_name}, extension should be re-computed.") self.run_info = run_info diff --git a/src/spikeinterface/postprocessing/template_metrics.py b/src/spikeinterface/postprocessing/template_metrics.py index 6e7bcf21b8..4cf88ffc35 100644 --- a/src/spikeinterface/postprocessing/template_metrics.py +++ b/src/spikeinterface/postprocessing/template_metrics.py @@ -335,13 +335,18 @@ def _run(self, verbose=False): ) existing_metrics = [] - tm_extension = self.sorting_analyzer.get_extension("template_metrics") - if ( - delete_existing_metrics is False - and tm_extension is not None - and tm_extension.data.get("metrics") is not None - ): - existing_metrics = tm_extension.params["metric_names"] + + # Check if we need to propogate any old metrics. If so, we'll do that. + # Otherwise, we'll avoid attempting to load an empty template_metrics. + if set(self.params["metrics_to_compute"]) != set(self.params["metric_names"]): + + tm_extension = self.sorting_analyzer.get_extension("template_metrics") + if ( + delete_existing_metrics is False + and tm_extension is not None + and tm_extension.data.get("metrics") is not None + ): + existing_metrics = tm_extension.params["metric_names"] # append the metrics which were previously computed for metric_name in set(existing_metrics).difference(metrics_to_compute):