diff --git a/avalanche/benchmarks/scenarios/dataset_scenario.py b/avalanche/benchmarks/scenarios/dataset_scenario.py index 32fb446d3..b5af85058 100644 --- a/avalanche/benchmarks/scenarios/dataset_scenario.py +++ b/avalanche/benchmarks/scenarios/dataset_scenario.py @@ -210,7 +210,7 @@ def split_validation_class_balanced( # shuffle exp_indices exp_indices_t = torch.as_tensor(exp_indices)[torch.randperm(len(exp_indices))] # shuffle the targets as well - exp_targets = targets_as_tensor[exp_indices] + exp_targets = targets_as_tensor[exp_indices_t] train_exp_indices: list[int] = [] valid_exp_indices: list[int] = [] diff --git a/tests/benchmarks/scenarios/test_dataset_scenario.py b/tests/benchmarks/scenarios/test_dataset_scenario.py index 1d2303429..d5c11eabd 100644 --- a/tests/benchmarks/scenarios/test_dataset_scenario.py +++ b/tests/benchmarks/scenarios/test_dataset_scenario.py @@ -120,6 +120,7 @@ def test_split_dataset_class_balanced(self): for cid in exp.classes_in_this_experience: train_cnt = (torch.as_tensor(train_d.targets) == cid).sum() valid_cnt = (torch.as_tensor(valid_d.targets) == cid).sum() + # print(train_cnt, valid_cnt) assert abs(train_cnt - valid_cnt) <= 1 ratio = 0.123 diff --git a/tests/benchmarks/utils/test_avalanche_dataset.py b/tests/benchmarks/utils/test_avalanche_dataset.py index 92bf5f849..8138e1063 100644 --- a/tests/benchmarks/utils/test_avalanche_dataset.py +++ b/tests/benchmarks/utils/test_avalanche_dataset.py @@ -148,7 +148,7 @@ def test_subset_subset_merge(self): self.assertTrue(torch.equal(x_curr, x_true)) t_curr = torch.tensor( - [curr_dataset.task_labels[idx] for idx in range(d_sz)] + [curr_dataset.targets_task_labels[idx] for idx in range(d_sz)] ) t_true = torch.stack([dadata[idx] for idx in true_indices], dim=0) self.assertTrue(torch.equal(t_curr, t_true)) diff --git a/tests/training/test_online_strategies.py b/tests/training/test_online_strategies.py index 9768e67f4..520a837d2 100644 --- a/tests/training/test_online_strategies.py +++ b/tests/training/test_online_strategies.py @@ -45,7 +45,7 @@ def test_naive(self): benchmark_streams = benchmark.streams.values() # With task boundaries - model, optimizer, criterion, my_nc_benchmark = self.init_sit() + model, optimizer, criterion, _ = self.init_sit() strategy = OnlineNaive( model, optimizer, @@ -113,7 +113,7 @@ def run_strategy_no_boundaries(self, benchmark, cl_strategy): cl_strategy.evaluator.loggers = [TextLogger(sys.stdout)] results = [] - cl_strategy.train(benchmark.train_stream, num_workers=0) + cl_strategy.train(benchmark.train_online_stream, num_workers=0) print("Training completed") assert cl_strategy.clock.train_exp_counter > 0