diff --git a/.github/verify_labels.py b/.github/verify_labels.py index 6b555eb55..08ba8aa4a 100644 --- a/.github/verify_labels.py +++ b/.github/verify_labels.py @@ -51,7 +51,11 @@ def query_repo(cmd: str, *, accept) -> Any: - response = requests.get(f"https://api.github.com/repos/{GH_ORG}/{GH_REPO}/{cmd}", headers=dict(Accept=accept)) + response = requests.get( + f"https://api.github.com/repos/{GH_ORG}/{GH_REPO}/{cmd}", + headers={"Accept": accept}, + timeout=5, + ) return response.json() diff --git a/api/tests/conftest.py b/api/tests/conftest.py index aa7980b77..c9729203a 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -9,7 +9,7 @@ @pytest.fixture(scope="session") def mock_classification_image(tmpdir_factory): url = "https://m.media-amazon.com/images/I/517Nh08xqkL._AC_SX425_.jpg" - return requests.get(url).content + return requests.get(url, timeout=5).content @pytest_asyncio.fixture(scope="function") diff --git a/holocron/optim/wrapper.py b/holocron/optim/wrapper.py index 777654015..bc137ed21 100644 --- a/holocron/optim/wrapper.py +++ b/holocron/optim/wrapper.py @@ -67,8 +67,8 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: # Update last key of class dict self.__setstate__({"base_state_dict": self.base_optimizer.state_dict()}) - def zero_grad(self): - self.base_optimizer.zero_grad() + def zero_grad(self, set_to_none: bool = True) -> None: + self.base_optimizer.zero_grad(set_to_none) def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: """Performs a single optimization step. @@ -200,8 +200,8 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: # Update last key of class dict self.__setstate__({"base_state_dict": self.base_optimizer.state_dict()}) - def zero_grad(self): - self.base_optimizer.zero_grad() + def zero_grad(self, set_to_none: bool = True) -> None: + self.base_optimizer.zero_grad(set_to_none) def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: """Performs a single optimization step. diff --git a/pyproject.toml b/pyproject.toml index f14629daa..9c2468672 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,8 +29,8 @@ classifiers = [ ] dynamic = ["version"] dependencies = [ - "torch>=1.9.1,<2.0.0", - "torchvision>=0.10.1,<1.0.0", + "torch>=2.0.0,<3.0.0", + "torchvision>=0.15.0,<1.0.0", "tqdm>=4.1.0", "numpy>=1.17.2,<2.0.0", "fastprogress>=1.0.0,<2.0.0", @@ -48,6 +48,7 @@ test = [ # cf. https://github.com/frgfm/Holocron/security/dependabot/4 "pytest>=7.2.0", "coverage[toml]>=4.5.4", + "onnx>=1.13.0,<2.0.0", ] training = [ "wandb>=0.10.31,<1.0.0", diff --git a/tests/test_trainer.py b/tests/test_trainer.py index 2b3b61c04..93eb82a62 100644 --- a/tests/test_trainer.py +++ b/tests/test_trainer.py @@ -132,20 +132,20 @@ def _test_trainer( learner.gradient_acc = 2 learner._reset_opt(lr) train_iter = iter(learner.train_loader) - assert all(torch.all(p.grad == 0) for p in learner.model.parameters() if p.requires_grad) + assert all(torch.all(p.grad == 0) for p in learner.model.parameters() if p.requires_grad and p.grad is not None) x, target = next(train_iter) x, target = learner.to_cuda(x, target) loss = learner._get_loss(x, target) learner._backprop_step(loss) assert torch.equal(learner.model.state_dict()[ref_param], model_w) - assert all(torch.any(p.grad != 0) for p in learner.model.parameters() if p.requires_grad) + assert all(torch.any(p.grad != 0) for p in learner.model.parameters() if p.requires_grad and p.grad is not None) # With accumulation of 2, the update step is performed every 2 batches x, target = next(train_iter) x, target = learner.to_cuda(x, target) loss = learner._get_loss(x, target) learner._backprop_step(loss) assert not torch.equal(learner.model.state_dict()[ref_param], model_w) - assert all(torch.all(p.grad == 0) for p in learner.model.parameters() if p.requires_grad) + assert all(torch.all(p.grad == 0) for p in learner.model.parameters() if p.requires_grad and p.grad is not None) def test_classification_trainer(tmpdir_factory):