Skip to content

Commit

Permalink
chore: Bumps torch & torchvision versions (#275)
Browse files Browse the repository at this point in the history
* chore: Bumps torch & torchvision versions

* style: Fixes bandit

* style: Fixes typing

* chore: Adds onnx to test

* test: Fixes tests
  • Loading branch information
frgfm authored Sep 15, 2023
1 parent 6722472 commit 505b8b6
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 11 deletions.
6 changes: 5 additions & 1 deletion .github/verify_labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,11 @@


def query_repo(cmd: str, *, accept) -> Any:
response = requests.get(f"https://api.github.com/repos/{GH_ORG}/{GH_REPO}/{cmd}", headers=dict(Accept=accept))
response = requests.get(
f"https://api.github.com/repos/{GH_ORG}/{GH_REPO}/{cmd}",
headers={"Accept": accept},
timeout=5,
)
return response.json()


Expand Down
2 changes: 1 addition & 1 deletion api/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
@pytest.fixture(scope="session")
def mock_classification_image(tmpdir_factory):
url = "https://m.media-amazon.com/images/I/517Nh08xqkL._AC_SX425_.jpg"
return requests.get(url).content
return requests.get(url, timeout=5).content


@pytest_asyncio.fixture(scope="function")
Expand Down
8 changes: 4 additions & 4 deletions holocron/optim/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
# Update last key of class dict
self.__setstate__({"base_state_dict": self.base_optimizer.state_dict()})

def zero_grad(self):
self.base_optimizer.zero_grad()
def zero_grad(self, set_to_none: bool = True) -> None:
self.base_optimizer.zero_grad(set_to_none)

def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Expand Down Expand Up @@ -200,8 +200,8 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
# Update last key of class dict
self.__setstate__({"base_state_dict": self.base_optimizer.state_dict()})

def zero_grad(self):
self.base_optimizer.zero_grad()
def zero_grad(self, set_to_none: bool = True) -> None:
self.base_optimizer.zero_grad(set_to_none)

def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Expand Down
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ classifiers = [
]
dynamic = ["version"]
dependencies = [
"torch>=1.9.1,<2.0.0",
"torchvision>=0.10.1,<1.0.0",
"torch>=2.0.0,<3.0.0",
"torchvision>=0.15.0,<1.0.0",
"tqdm>=4.1.0",
"numpy>=1.17.2,<2.0.0",
"fastprogress>=1.0.0,<2.0.0",
Expand All @@ -48,6 +48,7 @@ test = [
# cf. https://github.com/frgfm/Holocron/security/dependabot/4
"pytest>=7.2.0",
"coverage[toml]>=4.5.4",
"onnx>=1.13.0,<2.0.0",
]
training = [
"wandb>=0.10.31,<1.0.0",
Expand Down
6 changes: 3 additions & 3 deletions tests/test_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,20 +132,20 @@ def _test_trainer(
learner.gradient_acc = 2
learner._reset_opt(lr)
train_iter = iter(learner.train_loader)
assert all(torch.all(p.grad == 0) for p in learner.model.parameters() if p.requires_grad)
assert all(torch.all(p.grad == 0) for p in learner.model.parameters() if p.requires_grad and p.grad is not None)
x, target = next(train_iter)
x, target = learner.to_cuda(x, target)
loss = learner._get_loss(x, target)
learner._backprop_step(loss)
assert torch.equal(learner.model.state_dict()[ref_param], model_w)
assert all(torch.any(p.grad != 0) for p in learner.model.parameters() if p.requires_grad)
assert all(torch.any(p.grad != 0) for p in learner.model.parameters() if p.requires_grad and p.grad is not None)
# With accumulation of 2, the update step is performed every 2 batches
x, target = next(train_iter)
x, target = learner.to_cuda(x, target)
loss = learner._get_loss(x, target)
learner._backprop_step(loss)
assert not torch.equal(learner.model.state_dict()[ref_param], model_w)
assert all(torch.all(p.grad == 0) for p in learner.model.parameters() if p.requires_grad)
assert all(torch.all(p.grad == 0) for p in learner.model.parameters() if p.requires_grad and p.grad is not None)


def test_classification_trainer(tmpdir_factory):
Expand Down

0 comments on commit 505b8b6

Please sign in to comment.