Skip to content

Commit

Permalink
feat: save merged config file
Browse files Browse the repository at this point in the history
     - merged config file: model/dataset/runner config

 refs: #22
  • Loading branch information
ssaru committed Dec 21, 2020
1 parent db48dfe commit d7af62a
Show file tree
Hide file tree
Showing 8 changed files with 161 additions and 121 deletions.
2 changes: 1 addition & 1 deletion conf/conv/model/model.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ model:
height: 32
channels: &in_channels 3
classes: &out_feature 10
mode: &mode stochastic # stochastic or deterministic
mode: &mode deterministic # stochastic or deterministic

feature_layers:
conv:
Expand Down
13 changes: 7 additions & 6 deletions conf/conv/runner/runner.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
experiment_title: &exp_title binarized_conv_stochastic
project: &project BinaryConnect
experiments: &title binarized_conv(deterministic)

runner:
type: Runner
Expand All @@ -14,14 +15,14 @@ runner:
params:
# initial points
# [0.5, 0.25, 0.1, 0.05, 0.025, 0.01]
lr: 0.25
lr: 0.1

scheduler: # ExponentialLR in paper
type: ExponentialLR
params:
# initial points
# [0.98, 0.97, 0.96, 0.95, 0.94, 0.93, 0.92]
gamma: .96
gamma: .5

# type: StepLR
# params:
Expand Down Expand Up @@ -70,12 +71,12 @@ runner:
earlystopping:
type: EarlyStopping
params:
# monitor: val_acc
monitor: valid/accuracy
mode: max
patience: 500
verbose: True

experiments:
name: binaryconnect_conv
project_name: *exp_title
name: *title
project_name: *project
output_dir: output/runs
6 changes: 5 additions & 1 deletion conf/mlp/runner/runner.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
project: &project BinaryConnect
experiments: &title binarized_conv(deterministic)

runner:
type: Runner

Expand Down Expand Up @@ -47,5 +50,6 @@ runner:
verbose: True

experiments:
name: binaryconnect_mlp
name: *title
project_name: *project
output_dir: output/runs
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,5 @@ omegaconf==2.0.1rc11
type-docopt==0.1.0
isort==5.4.0
black==19.10b0
wandb==0.9.4
wandb==0.10.8
torchsummary==1.5.1
9 changes: 7 additions & 2 deletions src/nn/binarized_conv2d.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import Optional, Tuple, Union

import torch

from torch.nn.modules.utils import _pair
from src.ops.binarized_conv2d import binarized_conv2d


Expand Down Expand Up @@ -30,13 +30,18 @@ def __init__(
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
padding: Union[int, Tuple[int, int]] = (0, 0),
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
bias: Optional[torch.Tensor] = None,
padding_mode: str = "zeros",
mode: str = "deterministic",
) -> torch.nn.Conv2d:
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)

super().__init__(
in_channels,
out_channels,
Expand Down
126 changes: 103 additions & 23 deletions src/runner/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,32 +7,39 @@
from pytorch_lightning.core import LightningModule
from pytorch_lightning.metrics.functional import accuracy
from torch import optim
from torch.optim.lr_scheduler import ExponentialLR, StepLR, ReduceLROnPlateau
from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau, StepLR

from src.utils import load_class


class Runner(LightningModule):
class TrainingContainer(LightningModule):
def __init__(self, model: nn.Module, config: DictConfig):
"""Model Container for Training
Args:
model (nn.Module): model for train
config (DictConfig): configuration with Omegaconf.DictConfig format for dataset/model/runner
"""
super().__init__()
self.model = model
self.save_hyperparameters(config)
self.config = config.runner
print(self.hparams)
print(self.config)

def forward(self, x):
return self.model(x)

def configure_optimizers(self):
opt_args = dict(self.config.optimizer.params)
opt_args.update({"params": self.model.parameters()})

opt = load_class(module=optim, name=self.config.optimizer.type, args=opt_args)

scheduler_args = dict(self.config.scheduler.params)
scheduler_args.update({"optimizer": opt})
scheduler = load_class(
module=optim.lr_scheduler, name=self.config.scheduler.type, args=scheduler_args
module=optim.lr_scheduler,
name=self.config.scheduler.type,
args=scheduler_args,
)

result = {"optimizer": opt, "lr_scheduler": scheduler}
Expand All @@ -41,7 +48,7 @@ def configure_optimizers(self):

return result

def _comm_step(self, x, y):
def shared_step(self, x, y):
y_hat = self(x)
loss = self.model.loss(y_hat, y)

Expand All @@ -52,56 +59,129 @@ def _comm_step(self, x, y):

def training_step(self, batch, batch_idx):
x, y = batch
_, loss, acc = self._comm_step(x, y)
_, loss, acc = self.shared_step(x, y)

self.log(
name="train/accuracy",
value=acc,
on_step=True,
on_epoch=False,
prog_bar=True,
logger=False,
)

self.log(
name="train_acc", value=acc, on_step=True, on_epoch=False, prog_bar=True, logger=False
name="train_accuracy",
value=acc,
on_step=True,
on_epoch=False,
prog_bar=True,
logger=False,
)

return {"loss": loss, "train_acc": acc}
return {
"loss": loss,
"train/accuracy": acc,
"train_loss": loss,
"train_accuracy": acc,
}

def training_epoch_end(self, training_step_outputs):
acc, loss = 0, 0
num_of_outputs = len(training_step_outputs)

for log_dict in training_step_outputs:
acc += log_dict["train_acc"]
acc += log_dict["train/accuracy"]
loss += log_dict["loss"]

acc /= num_of_outputs
loss /= num_of_outputs

self.log(name="loss", value=loss, on_step=False, on_epoch=True, prog_bar=False, logger=True)
self.log(
name="train_acc", value=acc, on_step=False, on_epoch=True, prog_bar=False, logger=True
name="train/loss",
value=loss,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
self.log(
name="train_loss",
value=loss,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
self.log(
name="train/accuracy",
value=acc,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
self.log(
name="train_accuracy",
value=acc,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)

def validation_step(self, batch, batch_idx):
x, y = batch
_, loss, acc = self._comm_step(x, y)
_, loss, acc = self.shared_step(x, y)

return {"val_loss": loss, "val_acc": acc}
return {
"valid/loss": loss,
"valid/accuracy": acc,
"valid_loss": loss,
"valid_accuracy": acc,
}

def validation_epoch_end(self, validation_step_outputs):
acc, loss = 0, 0
num_of_outputs = len(validation_step_outputs)

for log_dict in validation_step_outputs:
acc += log_dict["val_acc"]
loss += log_dict["val_loss"]
acc += log_dict["valid/accuracy"]
loss += log_dict["valid/loss"]

acc = acc / num_of_outputs
loss = loss / num_of_outputs

self.log(
name="val_loss", value=loss, on_step=False, on_epoch=True, prog_bar=False, logger=True
name="valid/loss",
value=loss,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
self.log(
name="val_acc", value=acc, on_step=False, on_epoch=True, prog_bar=False, logger=True
name="valid/accuracy",
value=acc,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)

def test_step(self, batch, batch_idx):
result = self.validation_step(batch, batch_idx)
result.rename_keys({"val_loss": "loss", "val_acc": "acc"})

return result
self.log(
name="valid_loss",
value=loss,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
self.log(
name="valid_accuracy",
value=acc,
on_step=False,
on_epoch=True,
prog_bar=False,
logger=True,
)
Loading

0 comments on commit d7af62a

Please sign in to comment.