Skip to content

Commit

Permalink
Update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
sharkovsky committed Dec 27, 2022
1 parent 568390c commit ec0741b
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 6 deletions.
1 change: 1 addition & 0 deletions tests/test_fedbiosklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,7 @@ def test_sklearntrainingplanpartialfit_02_training_routine(self):
train_data_loader = test_data_loader = NPDataLoader(dataset=test_x, target=test_y, batch_size=1)
training_plan.set_data_loaders(train_data_loader=train_data_loader, test_data_loader=test_data_loader)
training_plan._training_args['epochs'] = 1
training_plan._training_args['batch_maxnum'] = None
with patch.object(training_plan, '_train_over_batch', return_value=0.) as mocked_train:
training_plan._training_routine(history_monitor=None)
self.assertEqual(mocked_train.call_count, 4)
Expand Down
15 changes: 10 additions & 5 deletions tests/test_torchnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ def test_torch_nn_04_logging_progress_computation(self):
mock_dataset = MagicMock(pec=Dataset)

tp.training_data_loader = MagicMock(spec=DataLoader(mock_dataset), batch_size=batch_size)
tp._training_args = {'batch_size': batch_size, 'optimizer_args': {}}
tp._training_args = {'batch_size': batch_size, 'optimizer_args': {}, 'epochs': 1, 'batch_maxnum': None}
mocked_loss_result = MagicMock(spec=torch.Tensor, return_value = torch.Tensor([0.]))
mocked_loss_result.item.return_value = 0.
tp.training_step = lambda x, y: mocked_loss_result
Expand Down Expand Up @@ -527,7 +527,7 @@ def setup_tp(tp, num_samples, batch_size, num_updates):
(MagicMock(spec=torch.Tensor), MagicMock(spec=torch.Tensor)), num_batches_per_epoch))
tp.training_data_loader.__len__.return_value = num_batches_per_epoch
tp._num_updates = num_updates
tp._training_args = {'batch_size': batch_size}
tp._training_args = {'batch_size': batch_size, 'batch_maxnum': None}
return tp

# Case where we do 1 single epoch with 1 batch
Expand Down Expand Up @@ -584,8 +584,7 @@ def set_training_plan(model, aggregator_name:str, loss_value: float = .0):
"""
tp = TorchTrainingPlan()
tp._set_device = MagicMock()
tp._batch_maxnum = 0


tp._model = copy.deepcopy(model)
tp._log_interval = 1
tp.training_data_loader = MagicMock()
Expand Down Expand Up @@ -620,6 +619,10 @@ def training_step(instance, data, target):
tp._optimizer_args = {"lr" : 1e-3}
tp._optimizer = torch.optim.Adam(tp._model.parameters(), **tp._optimizer_args)
tp._dp_controller = FakeDPController()
tp._training_args = {'batch_size': batch_size,
'optimizer_args': tp._optimizer_args,
'epochs': 1,
'batch_maxnum': None}
return tp

model = torch.nn.Linear(3, 1)
Expand Down Expand Up @@ -748,6 +751,7 @@ def test_data_loader_returns_tensors(self, patch_tensor_backward):
tp = TorchTrainingPlan()
tp._model = torch.nn.Module()
tp._optimizer = MagicMock(spec=torch.optim.Adam)
tp._training_args = {'batch_size': batch_size, 'epochs': 1, 'batch_maxnum': None}

tp.training_data_loader = MagicMock(spec=DataLoader(MagicMock(spec=Dataset)), batch_size=2, dataset=[1, 2])
gen_load_data_as_tuples = TestTorchNNTrainingRoutineDataloaderTypes.iterate_once(
Expand All @@ -771,6 +775,7 @@ def test_data_loader_returns_tuples(self, patch_tensor_backward):
tp = TorchTrainingPlan()
tp._model = torch.nn.Module()
tp._optimizer = MagicMock(spec=torch.optim.Adam)
tp._training_args = {'batch_size': batch_size, 'epochs': 1, 'batch_maxnum': None}

mock_dataset = MagicMock(spec=Dataset())
tp.training_data_loader = MagicMock(spec=DataLoader(mock_dataset), batch_size=3)
Expand All @@ -797,7 +802,7 @@ def test_data_loader_returns_dicts(self, patch_tensor_backward):
tp = TorchTrainingPlan()
tp._model = torch.nn.Module()
tp._optimizer = MagicMock(spec=torch.optim.Adam)
tp._training_args = {'batch_size': batch_size}
tp._training_args = {'batch_size': batch_size, 'epochs': 1, 'batch_maxnum': None}

# Set training data loader ---------------------------------------------------------------------------
mock_dataset = MagicMock(spec=Dataset())
Expand Down
12 changes: 11 additions & 1 deletion tests/test_training_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,15 @@ def setUp(self):
def tearDown(self):
pass

def assertSchemeEquality(self, scheme1, scheme2):
self.assertEqual(set(scheme1.keys()), set(scheme2.keys()))
for key in scheme1.keys():
rules1 = scheme1[key]['rules']
rules2 = scheme1[key]['rules']
self.assertEqual(len(rules1), len(rules2))
self.assertEqual(scheme1[key]['required'], scheme2[key]['required'])
self.assertEqual(scheme1[key]['default'], scheme2[key]['default'])

def test_training_args_01_init(self):
"""
simple initialisation tests
Expand Down Expand Up @@ -49,8 +58,9 @@ def test_training_args_02_scheme(self):
"""
play with schemes
"""

t = TrainingArgs()
self.assertEqual(t.scheme(), TrainingArgs.default_scheme())
self.assertSchemeEquality(t.scheme(), TrainingArgs.default_scheme())

my_added_rule = {
'fun': {
Expand Down

0 comments on commit ec0741b

Please sign in to comment.