-
Notifications
You must be signed in to change notification settings - Fork 50
/
eval.py
267 lines (220 loc) · 10.2 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
from sklearn.metrics import roc_curve, auc
from sklearn.manifold import TSNE
from torchvision import transforms
from torch.utils.data import DataLoader
import torch
from dataset import MVTecAT
from cutpaste import CutPaste
from model import ProjectionNet
import matplotlib.pyplot as plt
import argparse
from pathlib import Path
from cutpaste import CutPaste, cut_paste_collate_fn
from sklearn.utils import shuffle
from sklearn.model_selection import GridSearchCV
import numpy as np
from collections import defaultdict
from density import GaussianDensitySklearn, GaussianDensityTorch
import pandas as pd
from utils import str2bool
test_data_eval = None
test_transform = None
cached_type = None
def get_train_embeds(model, size, defect_type, transform, device):
# train data / train kde
test_data = MVTecAT("Data", defect_type, size, transform=transform, mode="train")
dataloader_train = DataLoader(test_data, batch_size=64,
shuffle=False, num_workers=0)
train_embed = []
with torch.no_grad():
for x in dataloader_train:
embed, logit = model(x.to(device))
train_embed.append(embed.cpu())
train_embed = torch.cat(train_embed)
return train_embed
def eval_model(modelname, defect_type, device="cpu", save_plots=False, size=256, show_training_data=True, model=None, train_embed=None, head_layer=8, density=GaussianDensityTorch()):
# create test dataset
global test_data_eval,test_transform, cached_type
# TODO: cache is only nice during training. do we need it?
if test_data_eval is None or cached_type != defect_type:
cached_type = defect_type
test_transform = transforms.Compose([])
test_transform.transforms.append(transforms.Resize((size,size)))
test_transform.transforms.append(transforms.ToTensor())
test_transform.transforms.append(transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
test_data_eval = MVTecAT("Data", defect_type, size, transform = test_transform, mode="test")
dataloader_test = DataLoader(test_data_eval, batch_size=64,
shuffle=False, num_workers=0)
# create model
if model is None:
print(f"loading model {modelname}")
head_layers = [512]*head_layer+[128]
print(head_layers)
weights = torch.load(modelname)
classes = weights["out.weight"].shape[0]
model = ProjectionNet(pretrained=False, head_layers=head_layers, num_classes=classes)
model.load_state_dict(weights)
model.to(device)
model.eval()
#get embeddings for test data
labels = []
embeds = []
with torch.no_grad():
for x, label in dataloader_test:
embed, logit = model(x.to(device))
# save
embeds.append(embed.cpu())
labels.append(label.cpu())
labels = torch.cat(labels)
embeds = torch.cat(embeds)
if train_embed is None:
train_embed = get_train_embeds(model, size, defect_type, test_transform, device)
# norm embeds
embeds = torch.nn.functional.normalize(embeds, p=2, dim=1)
train_embed = torch.nn.functional.normalize(train_embed, p=2, dim=1)
#create eval plot dir
if save_plots:
eval_dir = Path("eval") / modelname
eval_dir.mkdir(parents=True, exist_ok=True)
# plot tsne
# also show some of the training data
show_training_data = False
if show_training_data:
#augmentation setting
# TODO: do all of this in a separate function that we can call in training and evaluation.
# very ugly to just copy the code lol
min_scale = 0.5
# create Training Dataset and Dataloader
after_cutpaste_transform = transforms.Compose([])
after_cutpaste_transform.transforms.append(transforms.ToTensor())
after_cutpaste_transform.transforms.append(transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
train_transform = transforms.Compose([])
#train_transform.transforms.append(transforms.RandomResizedCrop(size, scale=(min_scale,1)))
#train_transform.transforms.append(transforms.GaussianBlur(int(size/10), sigma=(0.1,2.0)))
train_transform.transforms.append(CutPaste(transform=after_cutpaste_transform))
# train_transform.transforms.append(transforms.ToTensor())
train_data = MVTecAT("Data", defect_type, transform=train_transform, size=size)
dataloader_train = DataLoader(train_data, batch_size=32,
shuffle=True, num_workers=8, collate_fn=cut_paste_collate_fn,
persistent_workers=True)
# inference training data
train_labels = []
train_embeds = []
with torch.no_grad():
for x1, x2 in dataloader_train:
x = torch.cat([x1,x2], axis=0)
embed, logit = model(x.to(device))
# generate labels:
y = torch.tensor([0, 1])
y = y.repeat_interleave(x1.size(0))
# save
train_embeds.append(embed.cpu())
train_labels.append(y)
# only less data
break
train_labels = torch.cat(train_labels)
train_embeds = torch.cat(train_embeds)
# for tsne we encode training data as 2, and augmentet data as 3
tsne_labels = torch.cat([labels, train_labels + 2])
tsne_embeds = torch.cat([embeds, train_embeds])
else:
tsne_labels = labels
tsne_embeds = embeds
plot_tsne(tsne_labels, tsne_embeds, eval_dir / "tsne.png")
else:
eval_dir = Path("unused")
print(f"using density estimation {density.__class__.__name__}")
density.fit(train_embed)
distances = density.predict(embeds)
#TODO: set threshold on mahalanobis distances and use "real" probabilities
roc_auc = plot_roc(labels, distances, eval_dir / "roc_plot.png", modelname=modelname, save_plots=save_plots)
return roc_auc
def plot_roc(labels, scores, filename, modelname="", save_plots=False):
fpr, tpr, _ = roc_curve(labels, scores)
roc_auc = auc(fpr, tpr)
#plot roc
if save_plots:
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'Receiver operating characteristic {modelname}')
plt.legend(loc="lower right")
# plt.show()
plt.savefig(filename)
plt.close()
return roc_auc
def plot_tsne(labels, embeds, filename):
tsne = TSNE(n_components=2, verbose=1, perplexity=30, n_iter=500)
embeds, labels = shuffle(embeds, labels)
tsne_results = tsne.fit_transform(embeds)
fig, ax = plt.subplots(1)
colormap = ["b", "r", "c", "y"]
ax.scatter(tsne_results[:,0], tsne_results[:,1], color=[colormap[l] for l in labels])
fig.savefig(filename)
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='eval models')
parser.add_argument('--type', default="all",
help='MVTec defection dataset type to train seperated by , (default: "all": train all defect types)')
parser.add_argument('--model_dir', default="models",
help=' directory contating models to evaluate (default: models)')
parser.add_argument('--cuda', default=False, type=str2bool,
help='use cuda for model predictions (default: False)')
parser.add_argument('--head_layer', default=8, type=int,
help='number of layers in the projection head (default: 8)')
parser.add_argument('--density', default="torch", choices=["torch", "sklearn"],
help='density implementation to use. See `density.py` for both implementations. (default: torch)')
parser.add_argument('--save_plots', default=True, type=str2bool,
help='save TSNE and roc plots')
args = parser.parse_args()
args = parser.parse_args()
print(args)
all_types = ['bottle',
'cable',
'capsule',
'carpet',
'grid',
'hazelnut',
'leather',
'metal_nut',
'pill',
'screw',
'tile',
'toothbrush',
'transistor',
'wood',
'zipper']
if args.type == "all":
types = all_types
else:
types = args.type.split(",")
device = "cuda" if args.cuda else "cpu"
density_mapping = {
"torch": GaussianDensityTorch,
"sklearn": GaussianDensitySklearn
}
density = density_mapping[args.density]
# find models
model_names = [list(Path(args.model_dir).glob(f"model-{data_type}*"))[0] for data_type in types if len(list(Path(args.model_dir).glob(f"model-{data_type}*"))) > 0]
if len(model_names) < len(all_types):
print("warning: not all types present in folder")
obj = defaultdict(list)
for model_name, data_type in zip(model_names, types):
print(f"evaluating {data_type}")
roc_auc = eval_model(model_name, data_type, save_plots=args.save_plots, device=device, head_layer=args.head_layer, density=density())
print(f"{data_type} AUC: {roc_auc}")
obj["defect_type"].append(data_type)
obj["roc_auc"].append(roc_auc)
# save pandas dataframe
eval_dir = Path("eval") / args.model_dir
eval_dir.mkdir(parents=True, exist_ok=True)
df = pd.DataFrame(obj)
df.to_csv(str(eval_dir) + "_perf.csv")