Skip to content

Commit

Permalink
[ref] cleaning up PR
Browse files Browse the repository at this point in the history
  • Loading branch information
ctr26 committed Oct 1, 2024
1 parent 5f7a83f commit 7315c0d
Show file tree
Hide file tree
Showing 16 changed files with 8 additions and 2,080 deletions.
40 changes: 0 additions & 40 deletions bioimage_embed/lightning/tests/test_channel_aware.py

This file was deleted.

37 changes: 0 additions & 37 deletions bioimage_embed/lightning/torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,6 @@
variational_loss -> loss - recon_loss
"""

# x_recon -> output of the model
# z -> latent space
# data -> input to the model
# target -> target for supervised learning
# recon_loss -> reconstruction loss
# loss -> total loss
# variational_loss -> loss - recon_loss


class AutoEncoder(pl.LightningModule):
args = argparse.Namespace(
Expand Down Expand Up @@ -66,8 +58,6 @@ def __init__(self, model, args=SimpleNamespace()):
# TODO update all models to use this for export to onxx
# self.example_input_array = torch.randn(1, *self.model.input_dim)
# self.model.train()
# keep a handle on metrics logged by the model
self.metrics = {}

def forward(self, x: torch.Tensor) -> ModelOutput:
"""
Expand Down Expand Up @@ -153,31 +143,6 @@ def eval_step(self, batch, batch_idx):
"""
return self.predict_step(batch, batch_idx)

def test_step(self, batch, batch_idx):
# x, y = batch
model_output = self.eval_step(batch, batch_idx)
self.log_dict(
{
"loss/test": model_output.loss,
"mse/test": F.mse_loss(model_output.recon_x, model_output.data),
"recon_loss/test": model_output.recon_loss,
"variational_loss/test": model_output.loss - model_output.recon_loss,
}
)
return model_output.loss

# Fangless function to be overloaded later
def batch_to_xy(self, batch):
x, y = batch
return x, y

def eval_step(self, batch, batch_idx):
"""
This function should be overloaded in the child class to implement the evaluation logic.
"""
model_output = self.predict_step(batch, batch_idx)
return model_output

# def lr_scheduler_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
# # Implement your own logic for updating the lr scheduler
# # This method will be called at each training step
Expand Down Expand Up @@ -227,8 +192,6 @@ def log_tensorboard(self, model_output, x):
self.global_step,
)

class AE(AutoEncoder):
pass

class AE(AutoEncoder):
pass
Expand Down
97 changes: 0 additions & 97 deletions bioimage_embed/models/o2vae_shapeembed_integration.diff

This file was deleted.

12 changes: 1 addition & 11 deletions bioimage_embed/shapes/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ def eval_step(self, batch, batch_idx):
[
loss_ops.diagonal_loss(),
loss_ops.symmetry_loss(),
loss_ops.non_negative_loss(),
# loss_ops.triangle_inequality(),
loss_ops.non_negative_loss(),
# loss_ops.clockwise_order_loss(),
]
)
Expand All @@ -68,16 +68,6 @@ def __init__(self, model, args=SimpleNamespace()):
super().__init__(model, args)


class MaskEmbed(MaskEmbedMixin, AutoEncoderUnsupervised):
def __init__(self, model, args=SimpleNamespace()):
super().__init__(model, args)


class MaskEmbedSupervised(MaskEmbedMixin, AutoEncoderSupervised):
def __init__(self, model, args=SimpleNamespace()):
super().__init__(model, args)


class FixedOutput(nn.Module):
def __init__(self, tensor):
super().__init__()
Expand Down
32 changes: 7 additions & 25 deletions bioimage_embed/tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,40 +3,23 @@
from pathlib import Path
from typer.testing import CliRunner

# def test_main_creates_config():
# # Arrange
# config_path = "test_conf"
# job_name = "test_app"

runner = CliRunner()


@pytest.fixture
def config_dir():
return "test_conf"

# # Act
# main(config_path=config_path, job_name=job_name)

# # Assert
# assert os.path.exists(config_path), "Config directory was not created"
# assert os.path.isfile(os.path.join(config_path, "config.yaml")), "Config file was not created"
@pytest.fixture
def config_file():
return "config.yaml"

# # Clean up
# os.remove(os.path.join(config_path, "config.yaml"))
# os.rmdir(config_path)

# @pytest.mark.parametrize("config_path, job_name", [
# ("conf", "test_app"),
# ("another_conf", "another_job")
# ])
# def test_hydra_initializes(config_path, job_name):
# # Act
# main(config_path=config_path, job_name=job_name)
@pytest.fixture
def config_path(config_dir, config_file):
return Path(config_dir).joinpath(config_file)

# # Assert
# # Here you can assert specifics about the cfg object if needed.
# # Since main does not return anything, you might need to adjust
# # the main function to return the cfg for more thorough testing.

@pytest.fixture
def config_directory_setup(config_dir, config_file, config_path):
Expand Down Expand Up @@ -84,7 +67,6 @@ def test_get_default_config(cfg):
# cfg.recipe.max_epochs = 1



# def test_cli():
# # This test checks if the CLI correctly handles the dataset target input
# result = runner.invoke(app, ["bie_train", "--dataset-target", "bioimage_embed.datasets.FakeImageFolder"])
Expand Down
34 changes: 0 additions & 34 deletions bioimage_embed/tests/test_lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,6 @@
from torch.utils.data import DataLoader, TensorDataset
from bioimage_embed.lightning.dataloader import StratifiedSampler

# LitAutoEncoderTorch:3
# RGBLitAutoEncoderTorch:3
# GrayscaleLitAutoEncoderTorch:1
# ChannelAwareLitAutoEncoderTorch:1,3,5

model_channel_map = {
LitAutoEncoderTorch: [3],
RGBLitAutoEncoderTorch: [3],
GrayscaleLitAutoEncoderTorch: [1],
ChannelAwareLitAutoEncoderTorch: [1, 3, 5],
}

torch.manual_seed(42)

Expand Down Expand Up @@ -117,10 +106,6 @@ def input_dim(image_dim, channel_dim):
def data(input_dim):
return torch.rand(*input_dim)

@pytest.fixture(params=_model_classes)
def model_class(request):
return request.param


@pytest.fixture()
def dataset(samples, input_dim, transform, classes=2):
Expand All @@ -134,31 +119,12 @@ def dataset(samples, input_dim, transform, classes=2):
transform=transform,
)

# @pytest.fixture()
# def lit_model(model):
# return LitAutoEncoderTorch(model)


@pytest.fixture()
def model_and_batch(model_name, batch_size):
# Define combinations to ignore
ignored_combinations = [
('ModelA', 1),
('ModelB', 2),
# Add more combinations as needed
]

if (model_name, batch_size) in ignored_combinations:
pytest.skip(f"Ignoring combination of {model_name} and batch size {batch_size}")

return model_name, batch_size

@pytest.fixture(params=[AESupervised, AEUnsupervised])
def lit_model_wrapper(request):
return request.param



# @pytest.mark.skip(reason="Dictionaries not allowed")
# def test_export_onxx(data, lit_model):
# return lit_model.to_onnx("model.onnx", data)
Expand Down
2 changes: 0 additions & 2 deletions scripts/shapeembed/__init__.py

This file was deleted.

42 changes: 0 additions & 42 deletions scripts/shapeembed/common_helpers.py

This file was deleted.

Loading

0 comments on commit 7315c0d

Please sign in to comment.