Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
shibaidi authored Dec 21, 2020
0 parents commit 9a97a5a
Showing 1 changed file with 325 additions and 0 deletions.
325 changes: 325 additions & 0 deletions Untitled1.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,325 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from torch import nn as nn\n",
"from torch import optim as optim\n",
"from torch.utils import data as Data\n",
"from torchvision.datasets import __file__\n",
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
"# ''' \n",
"# 运行环境:PyTorch1.8.0 & Python3.8.1 & Cuda11.1 & Cudnn8.0\n",
"# CPU:i7-10070F RAM:16G 2600 GPU:RTX3070 \n",
"# '''\n",
"class attention1D_layer(nn.Module):\n",
"# ''' \n",
"# 注意力机制模块\n",
"# input x= [batch_size,channels(信号通道数),points(采样点数目)] 多通道一维信号\n",
"# return out = [b*x,b] \n",
"# 其中:b*x 通道权重乘输入信号 , b为基于神经得到的各采样点权重\n",
" \n",
"# ''' \n",
" def __init__(self,features):\n",
" super(attention1D_layer,self).__init__()\n",
" self.features = features\n",
" # 一维全局平均池化获得全部通道数均值\n",
" self.GobalPooling = nn.AdaptiveAvgPool1d(1)\n",
" \n",
" # 使用一个带Leakly的线性激活函数的BP网络获取采样点权重值 [channels →channels/4 → channels]\n",
" self.Linear1 = nn.Sequential(\n",
" nn.Linear(features,int(features/4)),\n",
" nn.ReLU6())\n",
" self.Linear2 = nn.Sequential(\n",
" nn.Linear(int(features/4),features),\n",
" nn.ReLU6())\n",
" \n",
" #前向传播函数\n",
" def forward(self,x): #传播前,进行维度交换 [batch_size,channels,points]→[batch_size,points,channels]\n",
" b = self.GobalPooling(x.transpose(1,2))\n",
" #全局池化后释放最后一个维度通道数(channels)\n",
" b = b.squeeze(-1)\n",
" #送入BP网络获得b\n",
" b = self.Linear2(self.Linear1(b))\n",
" b = torch.mean(b,0).reshape(1,1,self.features)\n",
" #b = nn.Softmax(b)\n",
" return b*x,b.squeeze(0).squeeze(0)\n",
" \n",
"class Inception_layer(nn.Module):\n",
"# '''\n",
"# 多尺度卷积模块\n",
"# 使用4种模块卷积核处理数据来获取不同大小的感受野 \n",
"# 参考论文:https://arxiv.org/abs/1512.00567 及 InceptionE InceptionV3 InceptionV4\n",
"# '''\n",
" def __init__(self,in_features,out_features):\n",
" super(Inception_layer,self).__init__()\n",
" self.in_features = in_features\n",
" self.Conv1x3 = nn.Sequential(\n",
" nn.Conv1d(in_features,10,kernel_size=1,stride=1,padding=0),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1),\n",
" nn.Conv1d(10,out_features,kernel_size=3,stride=1,padding=1),\n",
" nn.ReLU() \n",
" )\n",
" self.Conv1x5 = nn.Sequential(\n",
" nn.Conv1d(in_features,10,kernel_size=1,stride=1,padding=0),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1),\n",
" nn.Conv1d(10,out_features,kernel_size=5,stride=1,padding=2),\n",
" nn.ReLU(),\n",
" )\n",
" self.Conv3x5 = nn.Sequential(\n",
" nn.Conv1d(in_features,10,kernel_size=3,stride=1,padding=1),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1),\n",
" nn.Conv1d(10,out_features,kernel_size=5,stride=1,padding=2),\n",
" nn.ReLU()\n",
" )\n",
" self.short_cut = nn.Sequential(\n",
" nn.Conv1d(in_features,out_features,kernel_size=1,stride=1,padding=0)\n",
" )\n",
" def forward(self,x):\n",
" part_1 = self.Conv1x3(x)\n",
" part_2 = self.Conv3x5(x)\n",
" part_3 = self.Conv1x5(x)\n",
" x = self.short_cut(x)\n",
" # 返回四种尺度信号词义\n",
" return part_1,part_2,part_3,x"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"class ResPart(nn.Module):\n",
"# '''\n",
"# 恒等连接结构 : by conv1→conv3→conv1\n",
"# 考虑输入通道数是否需要调整来满足残差相接: \n",
"# F(x) = adjust(x) + f(x)\n",
"# 参考论文结构:https://arxiv.org/pdf/1512.03385.pdf\n",
"# '''\n",
" def __init__(self,in_features,out_features):\n",
" self.in_features = in_features\n",
" self.out_features = out_features\n",
" super(ResPart,self).__init__()\n",
" self.in_features = in_features\n",
" self.out_features = out_features\n",
" if in_features != out_features:\n",
" self.conv = nn.Sequential(\n",
" nn.Conv1d(in_features,out_features,kernel_size=1,stride=1,padding=0),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1),\n",
" nn.Conv1d(out_features,out_features,kernel_size=3,stride=1,padding=1),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1),\n",
" nn.Conv1d(out_features,out_features,kernel_size=1,stride=1,padding=0),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1)\n",
" )\n",
" self.adjust_x = nn.Conv1d(in_features,out_features,kernel_size=1,stride=1,padding=0)\n",
" else:\n",
" self.conv = nn.Sequential(\n",
" nn.Conv1d(in_features,out_features,kernel_size=1,stride=1,padding=0),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1),\n",
" nn.Conv1d(out_features,out_features,kernel_size=1,stride=1,padding=0),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1),\n",
" nn.Conv1d(out_features,out_features,kernel_size=1,stride=1,padding=0),\n",
" nn.ReLU(),\n",
" nn.Dropout(0.1)\n",
" )\n",
" def forward(self,x):\n",
" if self.in_features != self.out_features:\n",
" short_cut = self.adjust_x(x)\n",
" x = self.conv(x)\n",
" return short_cut.add(x)\n",
" else:\n",
" return self.conv(x).add(x) "
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"class sbd_mode(nn.Module):\n",
" def __init__(self,in_channels,goal_channel=1):\n",
" super(sbd_mode,self).__init__()\n",
" self.in_channels = in_channels\n",
" self.goal_channel = goal_channel\n",
" self.ResBody_layer1 = ResPart(in_channels,100)\n",
" self.InCeption_layer = Inception_layer(100,100)\n",
" self.ResBody_layer2 = ResPart(400,100)\n",
" self.attention_layer = attention1D_layer(100) \n",
" self.conv = nn.Conv1d(100,self.goal_channel,kernel_size=1)\n",
" self.fully = nn.Sequential(\n",
" nn.Linear(100,200),\n",
" nn.ReLU(),\n",
" nn.Linear(200,100))\n",
" \n",
" \n",
" #\n",
" def forward(self,x):\n",
" x = self.ResBody_layer1(x)\n",
" #print(x.shape)\n",
" p1,p2,p3,x = self.InCeption_layer(x)\n",
" x = torch.cat((p1,p2,p3,x),axis=1)\n",
" #print(x.shape)\n",
" x = self.ResBody_layer2(x)\n",
" x,b = self.attention_layer(x)\n",
" x = self.conv(x)\n",
" x = x.squeeze(1)\n",
" return self.fully(x)\n",
"model = sbd_mode(in_channels=3).to(device) "
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"sbd_mode(\n",
" (ResBody_layer1): ResPart(\n",
" (conv): Sequential(\n",
" (0): Conv1d(3, 100, kernel_size=(1,), stride=(1,))\n",
" (1): ReLU()\n",
" (2): Dropout(p=0.1, inplace=False)\n",
" (3): Conv1d(100, 100, kernel_size=(3,), stride=(1,), padding=(1,))\n",
" (4): ReLU()\n",
" (5): Dropout(p=0.1, inplace=False)\n",
" (6): Conv1d(100, 100, kernel_size=(1,), stride=(1,))\n",
" (7): ReLU()\n",
" (8): Dropout(p=0.1, inplace=False)\n",
" )\n",
" (adjust_x): Conv1d(3, 100, kernel_size=(1,), stride=(1,))\n",
" )\n",
" (InCeption_layer): Inception_layer(\n",
" (Conv1x3): Sequential(\n",
" (0): Conv1d(100, 10, kernel_size=(1,), stride=(1,))\n",
" (1): ReLU()\n",
" (2): Dropout(p=0.1, inplace=False)\n",
" (3): Conv1d(10, 100, kernel_size=(3,), stride=(1,), padding=(1,))\n",
" (4): ReLU()\n",
" )\n",
" (Conv1x5): Sequential(\n",
" (0): Conv1d(100, 10, kernel_size=(1,), stride=(1,))\n",
" (1): ReLU()\n",
" (2): Dropout(p=0.1, inplace=False)\n",
" (3): Conv1d(10, 100, kernel_size=(5,), stride=(1,), padding=(2,))\n",
" (4): ReLU()\n",
" )\n",
" (Conv3x5): Sequential(\n",
" (0): Conv1d(100, 10, kernel_size=(3,), stride=(1,), padding=(1,))\n",
" (1): ReLU()\n",
" (2): Dropout(p=0.1, inplace=False)\n",
" (3): Conv1d(10, 100, kernel_size=(5,), stride=(1,), padding=(2,))\n",
" (4): ReLU()\n",
" )\n",
" (short_cut): Sequential(\n",
" (0): Conv1d(100, 100, kernel_size=(1,), stride=(1,))\n",
" )\n",
" )\n",
" (ResBody_layer2): ResPart(\n",
" (conv): Sequential(\n",
" (0): Conv1d(400, 100, kernel_size=(1,), stride=(1,))\n",
" (1): ReLU()\n",
" (2): Dropout(p=0.1, inplace=False)\n",
" (3): Conv1d(100, 100, kernel_size=(3,), stride=(1,), padding=(1,))\n",
" (4): ReLU()\n",
" (5): Dropout(p=0.1, inplace=False)\n",
" (6): Conv1d(100, 100, kernel_size=(1,), stride=(1,))\n",
" (7): ReLU()\n",
" (8): Dropout(p=0.1, inplace=False)\n",
" )\n",
" (adjust_x): Conv1d(400, 100, kernel_size=(1,), stride=(1,))\n",
" )\n",
" (attention_layer): attention1D_layer(\n",
" (GobalPooling): AdaptiveAvgPool1d(output_size=1)\n",
" (Linear1): Sequential(\n",
" (0): Linear(in_features=100, out_features=25, bias=True)\n",
" (1): ReLU6()\n",
" )\n",
" (Linear2): Sequential(\n",
" (0): Linear(in_features=25, out_features=100, bias=True)\n",
" (1): ReLU6()\n",
" )\n",
" )\n",
" (conv): Conv1d(100, 1, kernel_size=(1,), stride=(1,))\n",
" (fully): Sequential(\n",
" (0): Linear(in_features=100, out_features=200, bias=True)\n",
" (1): ReLU()\n",
" (2): Linear(in_features=200, out_features=100, bias=True)\n",
" )\n",
")\n"
]
}
],
"source": [
"print(model)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## train \n",
"import hiddenlayer as h1\n",
"optimizer = optim.Adam(model.parameters())\n",
"loss_func = nn.MSELoss()\n",
"history1 = h1.History()\n",
"canvas1 = h1.Canvas()\n",
"for epoch in range(500):\n",
" all_loss = 0\n",
" for step,(b_x,b_y) in enumerate(data_gen):\n",
" b_x = b_x.to(device)\n",
" b_y = b_y.to(device)\n",
" pre = model(b_x)\n",
" loss = loss_func(pre,b_y)\n",
" optimizer.zero_grad()\n",
" loss.backward()\n",
" optimizer.step()\n",
" all_loss += loss.item()\n",
" epo_loss = all_loss / 10\n",
" history1.log(epoch,train_loss=epo_loss)\n",
" \n",
" with canvas1:\n",
" canvas1.draw_plot(history1['train_loss']) "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

0 comments on commit 9a97a5a

Please sign in to comment.