-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpytorchSimpleModels.py
130 lines (89 loc) · 3.75 KB
/
pytorchSimpleModels.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader, Dataset
from importlib_metadata import Pair
class Pytorch_FullyConnectedClassifier(pl.LightningModule):
def __init__(self, layerDimensions : list[Pair[int]], lr):
super(Pytorch_FullyConnectedClassifier, self).__init__()
self.seq = torch.nn.Sequential(
torch.nn.Linear(layerDimensions[0][0], layerDimensions[0][1]),
torch.nn.ReLU())
for i in range(1, len(layerDimensions)):
self.seq = torch.nn.Sequential(self.seq,
torch.nn.Linear(layerDimensions[i][0], layerDimensions[i][1]),
torch.nn.ReLU())
self.seq = torch.nn.Sequential(self.seq, torch.nn.LogSoftmax(dim=1))
self.lr = lr
def forward(self, x):
return self.seq(x)
def loss(self, y, y_gt):
loss = torch.nn.NLLLoss()
return loss(y, y_gt.long())
def configure_optimizers(self):
#optimizer = torch.optim.Adam(self.parameters())
optimizer = torch.optim.SGD(self.parameters(), lr=self.lr)
#optimizer = torch.optim.SGD
return optimizer
def training_step(self, trainBatch):
x, y = trainBatch
loss = self.loss(self.forward(x), y)
return loss
class Pytorch_FullyConnectedRegressor(pl.LightningModule):
def __init__(self, layerDimensions : list[Pair[int]], lr):
super(Pytorch_FullyConnectedRegressor, self).__init__()
self.seq = torch.nn.Sequential(
torch.nn.Linear(layerDimensions[0][0], layerDimensions[0][1]),
torch.nn.ReLU())
for i in range(1, len(layerDimensions)):
self.seq = torch.nn.Sequential(self.seq,
torch.nn.Linear(layerDimensions[i][0], layerDimensions[i][1]),
torch.nn.ReLU())
self.lr = lr
def forward(self, x):
return self.seq(x if len(x.shape) > 1 else x[:, None]).squeeze()
def loss(self, y, y_gt):
loss = torch.nn.MSELoss()
return loss(y, y_gt)
def configure_optimizers(self):
#optimizer = torch.optim.Adam(self.parameters())
optimizer = torch.optim.SGD(self.parameters(), lr=self.lr)
#optimizer = torch.optim.SGD
return optimizer
def training_step(self, trainBatch):
x, y = trainBatch
loss = self.loss(self.forward(x), y)
return loss
class Pytorch_LinearRegression(pl.LightningModule):
def __init__(self, inputDim, lr):
super(Pytorch_LinearRegression, self).__init__()
self.lr = lr
self.lin = torch.nn.Linear(inputDim, 1)
torch.nn.init.constant_(self.lin.weight, 0.0)
def forward(self, x):
return self.lin(x if len(x.shape) > 1 else x[:, None]).squeeze()
def loss(self, y, y_gt):
loss = torch.nn.MSELoss()
return loss(y, y_gt)
def configure_optimizers(self):
#optimizer = torch.optim.Adam(self.parameters())
optimizer = torch.optim.SGD(self.parameters(), lr=self.lr)
#optimizer = torch.optim.SGD
return optimizer
def training_step(self, trainBatch):
x, y = trainBatch
loss = self.loss(self.forward(x), y)
return loss
class SimpleDataset(Dataset):
def __init__(self, x, y):
self.x = torch.Tensor(x)
self.y = torch.Tensor(y)
def __getitem__(self, index):
return (self.x[index, ...], self.y[index])
def __len__(self):
return self.x.shape[0]
class Pytorch_Simple_DataModule(pl.LightningDataModule):
def __init__(self, x, y, batchSize):
self.d = SimpleDataset(x, y)
self.batchSize = batchSize
def train_dataloader(self):
return DataLoader(self.d, self.batchSize)