-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit 2356468
Showing
23 changed files
with
1,967 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"import torch.nn as nn\n", | ||
"import torch\n", | ||
"import torch.nn as nn\n", | ||
"import numpy as np\n", | ||
"import os\n", | ||
"import cv2\n", | ||
"\n", | ||
"import sys\n", | ||
"sys.path.append('../')\n", | ||
"sys.path.append('../../')\n", | ||
"sys.path.append('../../../')\n", | ||
"\n", | ||
"from Scripts.Utils.Imports import *\n", | ||
"from Scripts.Models.AttentionModel import *\n", | ||
"import Scripts.cifar10_DataLoader as DL\n", | ||
"import Modules.Utils.ModelWrapperGenerator as MW\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"# If use GPU\n", | ||
"device = torch.device(GetLowestGPU(pick_from=[0]))\n", | ||
"# If use CPU\n", | ||
"# device = torch.device(\"cpu\")\n", | ||
"\n", | ||
"# helper functions\n", | ||
"def to_torch(x):\n", | ||
" return torch.from_numpy(x).float().to(device)\n", | ||
"def to_numpy(x):\n", | ||
" return x.detach().cpu().numpy()" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"p_train = np.random.permutation(50000)\n", | ||
"p_val = np.random.permutation(10000)\n", | ||
"\n", | ||
"path = 'data'\n", | ||
"train_dataset = DL.DataLoader(p_train, path, train=True)\n", | ||
"val_dataset = DL.DataLoader(p_val, path, train=False)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"lr = 0.001\n", | ||
"criterion = nn.CrossEntropyLoss()\n", | ||
"\n", | ||
"def Radar_BCE(pred, true):\n", | ||
" return nn.functional.binary_cross_entropy(pred, true)\n", | ||
"\n", | ||
"cnn = CNN_with_Attention(input_features=3, output_features=10)\n", | ||
"cnn.to(device)\n", | ||
"opt = torch.optim.Adam(cnn.parameters(), lr=lr)\n", | ||
"\n", | ||
"model = MW.ModelWrapper(\n", | ||
" model=cnn,\n", | ||
" optimizer=opt,\n", | ||
" loss=criterion,\n", | ||
" save_name='../Weights/attention_demo3',\n", | ||
" device=device)\n", | ||
"\n", | ||
"# train\n", | ||
"epochs = 100\n", | ||
"batch_size = 64\n", | ||
"workers = 4\n", | ||
"model.fit(\n", | ||
" train_dataset=train_dataset,\n", | ||
" batch_size=batch_size,\n", | ||
" epochs=epochs,\n", | ||
" # verbose=1,\n", | ||
" validation_dataset=val_dataset,\n", | ||
" workers=workers)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3 (ipykernel)", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.10.12" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 4 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,174 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 1, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"import sys\n", | ||
"sys.path.append('../')\n", | ||
"sys.path.append('../../')\n", | ||
"sys.path.append('../../../')\n", | ||
"\n", | ||
"from Modules.Utils.Imports import *\n", | ||
"# from Packages.PCGrad.PCGrad import PCGrad\n", | ||
"import Projects.Attention.Scripts.Architectures as Architectures\n", | ||
"import Modules.Utils.ModelWrapperGenerator as MW\n", | ||
"import Projects.Attention.Scripts.DataLoader_org as DL\n", | ||
"import Projects.Attention.Scripts.BBoxPlotter_YOLO_Attention as BBP\n", | ||
"import Projects.Attention.Scripts.IoU as IoU\n", | ||
"\n", | ||
"import torch\n", | ||
"import torch.nn as nn\n", | ||
"import torch.optim as optim\n", | ||
"from torch.autograd import Variable\n", | ||
"from torch.utils.data import Dataset, DataLoader\n", | ||
"import numpy as np\n", | ||
"import torchvision\n", | ||
"from torchvision import transforms, datasets, models\n", | ||
"import os\n", | ||
"import cv2" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 2, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Device set to cuda:0\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"# If use GPU\n", | ||
"device = torch.device(GetLowestGPU(pick_from=[0]))\n", | ||
"# If use CPU\n", | ||
"# device = torch.device(\"cpu\")\n", | ||
"\n", | ||
"# helper functions\n", | ||
"def to_torch(x):\n", | ||
" return torch.from_numpy(x).float().to(device)\n", | ||
"def to_numpy(x):\n", | ||
" return x.detach().cpu().numpy()" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 3, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Files already downloaded and verified\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"# Image Preprocessing \n", | ||
"transform = transforms.Compose([\n", | ||
" transforms.Resize(224),\n", | ||
" transforms.ToTensor()])\n", | ||
"\n", | ||
"# CIFAR-10 Dataset\n", | ||
"train_dataset = datasets.CIFAR10(root='./data/',\n", | ||
" train=True, \n", | ||
" transform=transform,\n", | ||
" download=True)\n", | ||
"\n", | ||
"test_dataset = datasets.CIFAR10(root='./data/',\n", | ||
" train=False, \n", | ||
" transform=transform)\n", | ||
"\n", | ||
"# Data Loader (Input Pipeline)\n", | ||
"train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n", | ||
" batch_size=1, \n", | ||
" shuffle=True)\n", | ||
"\n", | ||
"test_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n", | ||
" batch_size=1, \n", | ||
" shuffle=False)\n", | ||
"\n", | ||
"classes = ('plane', 'car', 'bird', 'cat',\n", | ||
" 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"for i, (images, labels) in enumerate(train_loader):\n", | ||
" images = to_numpy(images)\n", | ||
" labels = to_numpy(labels)\n", | ||
"\n", | ||
" data = {}\n", | ||
" data['images'] = images\n", | ||
" data['labels'] = labels\n", | ||
"\n", | ||
" save_name = 'data/cifar10_train/image_'+str(i)+'.npy'\n", | ||
" np.save(save_name,data)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 4, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"for i, (images, labels) in enumerate(test_loader):\n", | ||
" images = to_numpy(images)\n", | ||
" labels = to_numpy(labels)\n", | ||
"\n", | ||
" data = {}\n", | ||
" data['images'] = images\n", | ||
" data['labels'] = labels\n", | ||
"\n", | ||
" save_name = 'data/cifar10_test/image_'+str(i)+'.npy'\n", | ||
" np.save(save_name,data)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 17, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3 (ipykernel)", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.10.12" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 4 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
############## The code adapted from | ||
####### https://github.com/liudaizong/Residual-Attention-Network/blob/master/model/basic_layers.py | ||
|
||
import torch | ||
import torch.nn as nn | ||
from torch.nn import init | ||
import functools | ||
from torch.autograd import Variable | ||
import numpy as np | ||
from Scripts.Layers.ResidualBlock import ResidualBlock | ||
|
||
class AttentionModule(nn.Module): | ||
def __init__(self, in_channels, out_channels, size1, size2, size3): | ||
super(AttentionModule, self).__init__() | ||
self.first_residual_blocks = ResidualBlock(in_channels, out_channels) | ||
self.trunk_branches = nn.Sequential( | ||
ResidualBlock(in_channels, out_channels), | ||
ResidualBlock(in_channels, out_channels)) | ||
|
||
self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) | ||
self.softmax1_blocks = ResidualBlock(in_channels, out_channels) | ||
self.skip1_connection_residual_block = ResidualBlock(in_channels, out_channels) | ||
self.mpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) | ||
self.softmax2_blocks = ResidualBlock(in_channels, out_channels) | ||
self.skip2_connection_residual_block = ResidualBlock(in_channels, out_channels) | ||
self.mpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) | ||
self.softmax3_blocks = nn.Sequential( | ||
ResidualBlock(in_channels, out_channels), | ||
ResidualBlock(in_channels, out_channels)) | ||
self.interpolation3 = nn.UpsamplingBilinear2d(size=size3) | ||
self.softmax4_blocks = ResidualBlock(in_channels, out_channels) | ||
self.interpolation2 = nn.UpsamplingBilinear2d(size=size2) | ||
self.softmax5_blocks = ResidualBlock(in_channels, out_channels) | ||
self.interpolation1 = nn.UpsamplingBilinear2d(size=size1) | ||
self.softmax6_blocks = nn.Sequential( | ||
nn.BatchNorm2d(out_channels), | ||
nn.ReLU(inplace=True), | ||
nn.Conv2d(out_channels, out_channels , kernel_size = 1, stride = 1, bias = False), | ||
nn.BatchNorm2d(out_channels), | ||
nn.ReLU(inplace=True), | ||
nn.Conv2d(out_channels, out_channels , kernel_size = 1, stride = 1, bias = False), | ||
nn.Sigmoid()) | ||
self.last_blocks = ResidualBlock(in_channels, out_channels) | ||
|
||
def forward(self, x): | ||
x = self.first_residual_blocks(x) | ||
out_trunk = self.trunk_branches(x) | ||
self.out_trunk = out_trunk.clone() | ||
out_mpool1 = self.mpool1(x) | ||
out_softmax1 = self.softmax1_blocks(out_mpool1) | ||
out_skip1_connection = self.skip1_connection_residual_block(out_softmax1) | ||
out_mpool2 = self.mpool2(out_softmax1) | ||
out_softmax2 = self.softmax2_blocks(out_mpool2) | ||
out_skip2_connection = self.skip2_connection_residual_block(out_softmax2) | ||
out_mpool3 = self.mpool3(out_softmax2) | ||
out_softmax3 = self.softmax3_blocks(out_mpool3) | ||
out_interp3 = self.interpolation3(out_softmax3) | ||
out = out_interp3 + out_skip2_connection | ||
out_softmax4 = self.softmax4_blocks(out) | ||
out_interp2 = self.interpolation2(out_softmax4) | ||
out = out_interp2 + out_skip1_connection | ||
out_softmax5 = self.softmax5_blocks(out) | ||
out_interp1 = self.interpolation1(out_softmax5) | ||
out_softmax6 = self.softmax6_blocks(out_interp1) | ||
self.out_mask = out_softmax6.clone() | ||
out = (1 + out_softmax6) * out_trunk | ||
out_last = self.last_blocks(out) | ||
|
||
return out_last |
Oops, something went wrong.