Skip to content

Commit

Permalink
[fix] cleaning
Browse files Browse the repository at this point in the history
  • Loading branch information
ctr26 committed Sep 14, 2024
1 parent 98e545c commit cc1ea82
Show file tree
Hide file tree
Showing 8 changed files with 13 additions and 495 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ If you don't have a Kaggle account you must create one and then follow the next
You can found how to do it in their [documentation](https://github.com/Kaggle/kaggle-api#api-credentials)
4. After that you will need to add your user and key in a file called `kaggle.json` in this location in your home directory `chmod 600 ~/.kaggle/kaggle.json`
5. Don't forget to accept the conditions for the "2018 Data Science Bowl" on the Kaggle website.
Otherwise you would not be able to pull this data from the command line.
Otherwise you would not be able to pull this data from the command line.

### 4. Developer Installation:

Expand Down
25 changes: 0 additions & 25 deletions bioimage_embed/datasets/__init__.py

This file was deleted.

102 changes: 0 additions & 102 deletions bioimage_embed/hydra.py

This file was deleted.

7 changes: 7 additions & 0 deletions bioimage_embed/inference.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# TODO

"""
Package currently is generally missing inference functionality.
"""
7 changes: 4 additions & 3 deletions bioimage_embed/lightning/pyro.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@


class LitAutoEncoderPyro(pl.LightningModule):
"""
WIP Unsupported
"""

def __init__(self, model, batch_size=1, learning_rate=1e-3):
super().__init__()
# self.autoencoder = AutoEncoder(batch_size, 1)
Expand Down Expand Up @@ -59,6 +63,3 @@ def pyro_training_step(self, train_batch, batch_idx):

def training_step(self, train_batch, batch_idx):
return self.torch_training_step(train_batch, batch_idx)

def training_step(self, train_batch, batch_idx):
return self.pyro_training_step(train_batch, batch_idx)
12 changes: 0 additions & 12 deletions bioimage_embed/lightning/torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,16 +143,6 @@ def eval_step(self, batch, batch_idx):
"""
return self.predict_step(batch, batch_idx)

# def lr_scheduler_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
# # Implement your own logic for updating the lr scheduler
# # This method will be called at each training step
# # Update the lr scheduler based on the provided arguments
# # You can access the lr scheduler using `self.lr_schedulers()`

# # Example:
# for lr_scheduler in self.lr_schedulers():
# lr_scheduler.step()

def timm_optimizers(self, model):
optimizer = optim.create_optimizer(self.args, model.parameters())
lr_scheduler = scheduler.create_scheduler(self.args, optimizer)[0]
Expand All @@ -168,8 +158,6 @@ def timm_to_lightning(self, optimizer, lr_scheduler):
}

def configure_optimizers(self):
# optimizer = optim.create_optimizer(self.args, self.model.parameters())
# lr_scheduler = scheduler.create_scheduler(self.args, optimizer)[0]
optimizer, lr_scheduler = self.timm_optimizers(self.model)
return self.timm_to_lightning(optimizer, lr_scheduler)

Expand Down
Loading

0 comments on commit cc1ea82

Please sign in to comment.