-
Notifications
You must be signed in to change notification settings - Fork 0
/
vit_solution.py
425 lines (338 loc) · 17.2 KB
/
vit_solution.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
super(LayerNorm, self).__init__()
self.hidden_size = hidden_size
self.eps = eps
self.weight = nn.Parameter(torch.Tensor(hidden_size))
self.bias = nn.Parameter(torch.Tensor(hidden_size))
self.reset_parameters()
def forward(self, inputs):
"""Layer Normalization.
This module applies Layer Normalization, with rescaling and shift,
only on the last dimension. See Lecture 07 (I), slide 23.
Parameters
----------
inputs (`torch.FloatTensor` of shape `(*dims, hidden_size)`)
The input tensor. This tensor can have an arbitrary number N of
dimensions, as long as `inputs.shape[N-1] == hidden_size`. The
leading N - 1 dimensions `dims` can be arbitrary.
Returns
-------
outputs (`torch.FloatTensor` of shape `(*dims, hidden_size)`)
The output tensor, having the same shape as `inputs`.
"""
mean = torch.mean(inputs, dim=-1, keepdim=True)
var = torch.var(inputs, dim=-1, unbiased=False, keepdim=True)
std = torch.sqrt(var + self.eps)
y_hat = (inputs - mean)/std
y = y_hat * self.weight + self.bias
return y
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
class MultiHeadedAttention(nn.Module):
def __init__(self, head_size, num_heads, sequence_length):
super(MultiHeadedAttention, self).__init__()
self.head_size = head_size
self.num_heads = num_heads
self.sequence_length = sequence_length
self.nl_size = self.head_size*self.num_heads
self.q_linear = nn.Linear(self.nl_size, self.nl_size, bias=True)
self.k_linear = nn.Linear(self.nl_size, self.nl_size, bias=True)
self.v_linear = nn.Linear(self.nl_size, self.nl_size, bias=True)
self.final_linear = nn.Linear(self.nl_size, self.nl_size, bias=True)
def get_attention_weights(self, queries, keys):
"""Compute the attention weights.
This computes the attention weights for all the sequences and all the
heads in the batch. For a single sequence and a single head (for
simplicity), if Q are the queries (matrix of size `(sequence_length, head_size)`),
and K are the keys (matrix of size `(sequence_length, head_size)`), then
the attention weights are computed as
weights = softmax(Q * K^{T} / sqrt(head_size))
Here "*" is the matrix multiplication. See Lecture 06, slides 19-24.
Parameters
----------
queries (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, head_size)`)
Tensor containing the queries for all the positions in the sequences
and all the heads.
keys (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, head_size)`)
Tensor containing the keys for all the positions in the sequences
and all the heads.
Returns
-------
attention_weights (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, sequence_length)`)
Tensor containing the attention weights for all the heads and all
the sequences in the batch.
"""
return F.softmax(torch.matmul(queries, keys.transpose(-2, -1))/math.sqrt(self.head_size), dim=-1)
def apply_attention(self, queries, keys, values):
"""Apply the attention.
This computes the output of the attention, for all the sequences and
all the heads in the batch. For a single sequence and a single head
(for simplicity), if Q are the queries (matrix of size `(sequence_length, head_size)`),
K are the keys (matrix of size `(sequence_length, head_size)`), and V are
the values (matrix of size `(sequence_length, head_size)`), then the ouput
of the attention is given by
weights = softmax(Q * K^{T} / sqrt(head_size))
attended_values = weights * V
outputs = concat(attended_values)
Here "*" is the matrix multiplication, and "concat" is the operation
that concatenates the attended values of all the heads (see the
`merge_heads` function). See Lecture 06, slides 19-24.
Parameters
----------
queries (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, head_size)`)
Tensor containing the queries for all the positions in the sequences
and all the heads.
keys (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, head_size)`)
Tensor containing the keys for all the positions in the sequences
and all the heads.
values (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, head_size)`)
Tensor containing the values for all the positions in the sequences
and all the heads.
Returns
-------
outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_heads * head_size)`)
Tensor containing the concatenated outputs of the attention for all
the sequences in the batch, and all positions in each sequence.
"""
# print(queries.shape, keys.shape, values.shape)
weights = self.get_attention_weights(queries, keys)
attended_values = torch.matmul(weights, values)
# print('attended', attended_values.shape)
output = self.merge_heads(attended_values)
return output
def split_heads(self, tensor):
"""Split the head vectors.
This function splits the head vectors that have been concatenated (e.g.
through the `merge_heads` function) into a separate dimension. This
function also transposes the `sequence_length` and `num_heads` axes.
It only reshapes and transposes the input tensor, and it does not
apply any further transformation to the tensor. The function `split_heads`
is the inverse of the function `merge_heads`.
Parameters
----------
tensor (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_heads * dim)`)
Input tensor containing the concatenated head vectors (each having
a size `dim`, which can be arbitrary).
Returns
-------
output (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, dim)`)
Reshaped and transposed tensor containing the separated head
vectors. Here `dim` is the same dimension as the one in the
definition of the input `tensor` above.
"""
out = tensor.reshape(tensor.shape[0], tensor.shape[1], self.num_heads, -1).transpose(1, 2)
return out
def merge_heads(self, tensor):
"""Merge the head vectors.
This function concatenates the head vectors in a single vector. This
function also transposes the `sequence_length` and the newly created
"merged" dimension. It only reshapes and transposes the input tensor,
and it does not apply any further transformation to the tensor. The
function `merge_heads` is the inverse of the function `split_heads`.
Parameters
----------
tensor (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, dim)`)
Input tensor containing the separated head vectors (each having
a size `dim`, which can be arbitrary).
Returns
-------
output (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_heads * dim)`)
Reshaped and transposed tensor containing the concatenated head
vectors. Here `dim` is the same dimension as the one in the
definition of the input `tensor` above.
"""
out = tensor.permute(0, 2, 1, 3).reshape(tensor.shape[0], tensor.shape[2], -1)
return out
def forward(self, hidden_states):
"""Multi-headed attention.
This applies the multi-headed attention on the input tensors `hidden_states`.
For a single sequence (for simplicity), if X are the hidden states from
the previous layer (a matrix of size `(sequence_length, num_heads * head_size)`
containing the concatenated head vectors), then the output of multi-headed
attention is given by
Q = X * W_{Q} + b_{Q} # Queries
K = X * W_{K} + b_{K} # Keys
V = X * W_{V} + b_{V} # Values
Y = attention(Q, K, V) # Attended values (concatenated for all heads)
outputs = Y * W_{Y} + b_{Y} # Linear projection
Here "*" is the matrix multiplication.
Parameters
----------
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_heads * head_size)`)
Input tensor containing the concatenated head vectors for all the
sequences in the batch, and all positions in each sequence. This
is, for example, the tensor returned by the previous layer.
Returns
-------
output (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_heads * head_size)`)
Tensor containing the output of multi-headed attention for all the
sequences in the batch, and all positions in each sequence.
"""
# hidden_states = self.split_heads(hidden_states)
# hidden_states = self.split_heads(hidden_states)
Q = self.split_heads(self.q_linear(hidden_states))
K = self.split_heads(self.k_linear(hidden_states))
V = self.split_heads(self.v_linear(hidden_states))
# print(Q.shape, K.shape, V.shape)
Y = self.final_linear(self.apply_attention(Q, K, V))
return Y
class PostNormAttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads,sequence_length, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = LayerNorm(embed_dim)
self.attn = MultiHeadedAttention(embed_dim//num_heads, num_heads,sequence_length)
self.layer_norm_2 = LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
attention_outputs = self.attn(x)
#print(inp_x.shape)
attention_outputs = self.layer_norm_1(x + attention_outputs)
outputs=self.linear(attention_outputs)
outputs = self.layer_norm_2(outputs+attention_outputs)
return outputs
class PreNormAttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads,sequence_length, dropout=0.0):
"""A decoder layer.
This module combines a Multi-headed Attention module and an MLP to
create a layer of the transformer, with normalization and skip-connections.
See Lecture 06, slide 33.
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
sequence_length - Length of the sequence
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = LayerNorm(embed_dim)
self.attn = MultiHeadedAttention(embed_dim//num_heads, num_heads,sequence_length)
self.layer_norm_2 = LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
norm_outputs = self.layer_norm_1(x)
attention_outputs = self.attn(norm_outputs)
out_int = x + attention_outputs
ln2_out = self.layer_norm_2(out_int)
lin_out = self.linear(ln2_out)
output = out_int + lin_out
return output
class VisionTransformer(nn.Module):
def __init__(self, embed_dim=256, hidden_dim=512, num_channels=3, num_heads=8, num_layers=4, num_classes=10, patch_size=4, num_patches=64,block='prenorm', dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
block - Type of attention block
dropout - Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
self.patch_size = patch_size
#Adding the cls token to the sequnence
self.sequence_length= 1+ num_patches
# Layers/Networks
# print(dropout)
self.input_layer = nn.Linear(num_channels*(patch_size**2), embed_dim)
if block =='prenorm':
self.transformer = nn.Sequential(*[PreNormAttentionBlock(embed_dim, hidden_dim, num_heads,self.sequence_length, dropout=dropout) for _ in range(num_layers)])
else:
self.transformer = nn.Sequential(*[PostNormAttentionBlock(embed_dim, hidden_dim, num_heads,self.sequence_length, dropout=dropout) for _ in range(num_layers)])
self.mlp_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
self.dropout = nn.Dropout(dropout)
# Parameters/Embeddings
self.cls_token = nn.Parameter(torch.randn(1,1,embed_dim))
self.pos_embedding = nn.Parameter(torch.randn(1,self.sequence_length,embed_dim))
def get_patches(self, image, patch_size, flatten_channels=True):
"""
Inputs:
image - torch.Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
Output : torch.Tensor representing the sequence of shape [B,patches,patch_size*patch_size] for flattened.
"""
# ksize, stride = patch_size, patch_size
# patches = image.unfold(2, ksize, stride).unfold(3, ksize, stride)
# if flatten_channels:
# patches = patches.reshape(patches.shape[0], patches.shape[2] * patches.shape[3], patches.shape[1] * patches.shape[-2] * patches.shape[-1])
B, C, H, W = image.shape
x = image.reshape(B, C, H//patch_size, patch_size, W//patch_size, patch_size)
x = x.permute(0, 2, 4, 1, 3, 5)
x = x.flatten(1,2)
if flatten_channels:
x = x.flatten(2,4)
return x
def forward(self, x):
"""ViT
This is a small version of Vision Transformer
Parameters
----------
x - (`torch.LongTensor` of shape `(batch_size, channels, height , width)`)
The input tensor containing the iamges.
Returns
-------
output (`torch.FloatTensor` of shape `(batch_size, num_classes)`)
A tensor containing the output from the mlp_head.
"""
# Preprocess input
x = self.get_patches(x, self.patch_size)
B, T, _ = x.shape
# print(x.shape)
x = self.input_layer(x)
# Add CLS token and positional encoding
cls_token = self.cls_token.repeat(B, 1, 1)
x = torch.cat([cls_token, x], dim=1)
# print(x, B, T)
x = x + self.pos_embedding[:,:T+1]
#Add dropout and then the transformer
x = self.dropout(x)
z = self.transformer(x)
#Take the cls token representation and send it to mlp_head
# print(z.shape)
return self.mlp_head(z[:, 0, :])
def loss(self,preds,labels):
'''Loss function.
This function computes the loss
parameters:
preds - predictions from the model
labels- True labels of the dataset
'''
return F.cross_entropy(preds, labels)