Skip to content

Commit

Permalink
Merge pull request #1 from ILLIXR/eye-tracking
Browse files Browse the repository at this point in the history
ILLIXR eye-tracking
  • Loading branch information
yingj4 authored May 19, 2020
2 parents 7844557 + 7470e93 commit 6083929
Show file tree
Hide file tree
Showing 5 changed files with 126 additions and 7 deletions.
3 changes: 1 addition & 2 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
/logs/
/0try/
/_pycache_/
/Semantic_Segmentation_Dataset/
/test/
/__pycache__/
*.zip
Empty file.
91 changes: 88 additions & 3 deletions densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@
It is a simplied version of DenseNet with U-NET architecture.
2D implementation
"""

"""
For timing measurement, Ying added a few lines of code in this file
"""
import time

import torch
import math
import torch.nn as nn
Expand All @@ -34,22 +40,47 @@ def __init__(self,input_channels,output_channels,down_size,dropout=False,prob=0)
self.bn = torch.nn.BatchNorm2d(num_features=output_channels)

def forward(self, x):
#time0 = time.time_ns()
if self.down_size != None:
x = self.max_pool(x)
#time1 = time.time_ns()
if self.dropout:
x1 = self.relu(self.dropout1(self.conv1(x)))
#time2 = time.time_ns()
x21 = torch.cat((x,x1),dim=1)
#time3 = time.time_ns()
x22 = self.relu(self.dropout2(self.conv22(self.conv21(x21))))
#time4 = time.time_ns()
x31 = torch.cat((x21,x22),dim=1)
#time5 = time.time_ns()
out = self.relu(self.dropout3(self.conv32(self.conv31(x31))))
else:
x1 = self.relu(self.conv1(x))
#time2 = time.time_ns()
x21 = torch.cat((x,x1),dim=1)
#time3 = time.time_ns()
x22 = self.relu(self.conv22(self.conv21(x21)))
#time4 = time.time_ns()
x31 = torch.cat((x21,x22),dim=1)
#time5= time.time_ns()
out = self.relu(self.conv32(self.conv31(x31)))
return self.bn(out)

#time6 = time.time_ns()
dbBn = self.bn(out)
#time7 = time.time_ns()

'''
deltaTime1 = time1 - time0
deltaTime2 = time2 - time1
deltaTime3 = time3 - time2
deltaTime4 = time4 - time3
deltaTime5 = time5 - time4
deltaTime6 = time6 - time5
deltaTime7 = time7 - time6
print("DownBlock " + str(deltaTime1) + ' ' + str(deltaTime2) + ' ' + str(deltaTime3) + ' ' + str(deltaTime4) + ' ' + str(deltaTime5) + ' ' + str(deltaTime6) + ' ' + str(deltaTime7))
'''
return dbBn


class DenseNet2D_up_block_concat(nn.Module):
Expand All @@ -67,16 +98,35 @@ def __init__(self,skip_channels,input_channels,output_channels,up_stride,dropout
self.dropout2 = nn.Dropout(p=prob)

def forward(self,prev_feature_map,x):
#time0 = time.time_ns()
x = nn.functional.interpolate(x,scale_factor=self.up_stride,mode='nearest')
#time1 = time.time_ns()
x = torch.cat((x,prev_feature_map),dim=1)
#time2 = time.time_ns()
if self.dropout:
x1 = self.relu(self.dropout1(self.conv12(self.conv11(x))))
#time3 = time.time_ns()
x21 = torch.cat((x,x1),dim=1)
#time4 = time.time_ns()
out = self.relu(self.dropout2(self.conv22(self.conv21(x21))))
else:
x1 = self.relu(self.conv12(self.conv11(x)))
#time3 = time.time_ns()
x21 = torch.cat((x,x1),dim=1)
#time4 = time.time_ns()
out = self.relu(self.conv22(self.conv21(x21)))
#time5 = time.time_ns()

'''
deltaTime1 = time1 - time0
deltaTime2 = time2 - time1
deltaTime3 = time3 - time2
deltaTime4 = time4 - time3
deltaTime5 = time5 - time4
print("UpBlock " + str(deltaTime1) + ' ' + str(deltaTime2) + ' ' + str(deltaTime3) + ' ' + str(deltaTime4) + ' ' + str(deltaTime5))
'''

return out

class DenseNet2D(nn.Module):
Expand Down Expand Up @@ -126,19 +176,54 @@ def _initialize_weights(self):
m.bias.data.zero_()

def forward(self,x):
#time0 = time.time_ns()
self.x1 = self.down_block1(x)
#time1 = time.time_ns()
self.x2 = self.down_block2(self.x1)
#time2 = time.time_ns()
self.x3 = self.down_block3(self.x2)
#time3 = time.time_ns()
self.x4 = self.down_block4(self.x3)
#time4 = time.time_ns()
self.x5 = self.down_block5(self.x4)
#time5 = time.time_ns()
self.x6 = self.up_block1(self.x4,self.x5)
#time6 = time.time_ns()
self.x7 = self.up_block2(self.x3,self.x6)
#time7 = time.time_ns()
self.x8 = self.up_block3(self.x2,self.x7)
#time8 = time.time_ns()
self.x9 = self.up_block4(self.x1,self.x8)

#time9 = time.time_ns()
if self.dropout:
out = self.out_conv1(self.dropout1(self.x9))
else:
out = self.out_conv1(self.x9)


#time10 = time.time_ns()

#deltaTime10 = time10 - time9
'''
deltaTime1 = time1 - time0;
deltaTime2 = time2 - time1;
deltaTime3 = time3 - time2;
deltaTime4 = time4 - time3;
deltaTime5 = time5 - time4;
deltaTime6 = time6 - time5;
deltaTime7 = time7 - time6;
deltaTime8 = time8 - time7;
deltaTime9 = time9 - time8;
deltaTime10 = time10 - time9;
print(str(deltaTime1) + ' ' + str(deltaTime2) + ' ' + str(deltaTime3) + ' ' + str(deltaTime4) + ' ' + str(deltaTime5) + ' ' + str(deltaTime6) + ' ' + str(deltaTime7) + ' ' + str(deltaTime8) + ' ' + str(deltaTime9) + ' ' + str(deltaTime10))
print("FinalConv2d " + str(deltaTime10))
'''

#deltaTime = time10 - time0
#print(str(deltaTime))

return out

2 changes: 1 addition & 1 deletion opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def parse_args():
parser.add_argument('--lr', type=float,default= 1e-3,help='Learning rate')
parser.add_argument('--save', help='save folder name',default='0try')
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--load', type=str, default=None, help='load checkpoint file name')
parser.add_argument('--load', type=str, default='best_model.pkl', help='load checkpoint file name')
parser.add_argument('--resume', action='store_true', help='resume train from load chkpoint')
parser.add_argument('--test', action='store_true', help='test only')
parser.add_argument('--savemodel',action='store_true',help='checkpoint save the model')
Expand Down
37 changes: 36 additions & 1 deletion test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,33 @@
from utils import get_predictions
#%%

"""
The new codes for profiling starts here
@author: Ying
"""

"""
#The following line of code is useful when we profile with PyProf
import torch.cuda.profiler as profiler
from apex import pyprof
pyprof.nvtx.init()
#torch.autograd.profiler.emit_nvtx()
"""
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

"""
#The following line of code is useful when we profile with cProfile
import cProfile
"""

"""
The new codes for profiling ends here
@author: Ying
"""

if __name__ == '__main__':

args = parse_args()
Expand All @@ -35,10 +62,12 @@
model = model_dict[args.model]
model = model.to(device)
filename = args.load

if not os.path.exists(filename):
print("model path not found !!!")
exit(1)



model.load_state_dict(torch.load(filename))
model = model.to(device)
model.eval()
Expand All @@ -53,6 +82,8 @@
os.makedirs('test/labels/',exist_ok=True)
os.makedirs('test/output/',exist_ok=True)
os.makedirs('test/mask/',exist_ok=True)

count = 0

with torch.no_grad():
for i, batchdata in tqdm(enumerate(testloader),total=len(testloader)):
Expand All @@ -73,5 +104,9 @@
img_orig = np.array(img_orig)
combine = np.hstack([img_orig,pred_img])
plt.imsave('test/mask/{}.jpg'.format(index[j]),combine)

count = count + 1
if (count == 20):
break

os.rename('test',args.save)

0 comments on commit 6083929

Please sign in to comment.