Skip to content

Commit

Permalink
1st
Browse files Browse the repository at this point in the history
  • Loading branch information
Akella17 committed Jul 4, 2018
0 parents commit 059f3dd
Show file tree
Hide file tree
Showing 90 changed files with 485 additions and 0 deletions.
14 changes: 14 additions & 0 deletions PSNR.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@

import numpy as np
import math

def psnr(target, ref):
#assume RGB image
target_data = np.array(target)

ref_data = np.array(ref)

diff = ref_data - target_data
diff = diff.flatten('C')
rmse = math.sqrt( np.mean(diff ** 2.) )
return 20*math.log10(1.0/rmse)
Binary file added __pycache__/PSNR.cpython-36.pyc
Binary file not shown.
Binary file added __pycache__/backup_enhance_method.cpython-36.pyc
Binary file not shown.
Binary file added __pycache__/enhance_method.cpython-36.pyc
Binary file not shown.
Binary file added __pycache__/enhance_method_1.cpython-36.pyc
Binary file not shown.
Binary file added __pycache__/nnlib.cpython-36.pyc
Binary file not shown.
44 changes: 44 additions & 0 deletions enhancenet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from nnlib import *
import PSNR

PER_CHANNEL_MEANS = np.array([0.47614917, 0.45001204, 0.40904046])
fns = sorted([fn for fn in os.listdir('input/')])
if not os.path.exists('output'):
os.makedirs('output')
for fn in fns:
fne = ''.join(fn.split('.')[:-1])
if os.path.isfile('output/%s-EnhanceNet.png' % fne):
print('skipping %s' % fn)
continue
imgs = loadimg('input/'+fn)
if imgs is None:
continue
imgs = np.expand_dims(imgs, axis=0)
imgsize = np.shape(imgs)[1:]
print('processing %s' % fn)
xs = tf.placeholder(tf.float32, [1, imgsize[0], imgsize[1], imgsize[2]])
rblock = [resi, [[conv], [relu], [conv]]]
ys_est = NN('generator',
[xs,
[conv], [relu],
rblock, rblock, rblock, rblock, rblock,
rblock, rblock, rblock, rblock, rblock,
[upsample], [conv], [relu],
[upsample], [conv], [relu],
[conv], [relu],
[conv, 3]])
ys_res = tf.image.resize_images(xs, [4*imgsize[0], 4*imgsize[1]],
method=tf.image.ResizeMethod.BICUBIC)
ys_est += ys_res + PER_CHANNEL_MEANS
sess = tf.InteractiveSession()
tf.train.Saver().restore(sess, os.getcwd()+'/weights')
output = sess.run([ys_est, ys_res+PER_CHANNEL_MEANS],
feed_dict={xs: imgs-PER_CHANNEL_MEANS})

#print(PSNR.psnr(output[0][0][::4,::4,:], imgs))
saveimg(output[0][0], 'output/%s-EnhanceNet.png' % fne)
saveimg(output[1][0], 'output/%s-Bicubic.png' % fne)
sess.close()
tf.reset_default_graph()
1 change: 1 addition & 0 deletions eval.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"eval.ipynb","version":"0.3.2","views":{},"default_view":{},"provenance":[],"collapsed_sections":[]},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"metadata":{"id":"oMKxe3FlTO9Z","colab_type":"code","colab":{"autoexec":{"startup":false,"wait_interval":0},"base_uri":"https://localhost:8080/","height":105},"outputId":"fcb4e363-52af-4c25-81cf-27e0a48f117b","executionInfo":{"status":"ok","timestamp":1525861235345,"user_tz":-330,"elapsed":20596,"user":{"displayName":"Ravi Tej Akella","photoUrl":"//lh4.googleusercontent.com/-CxyhmYRONEI/AAAAAAAAAAI/AAAAAAAAR2A/PCmQzRxsHmo/s50-c-k-no/photo.jpg","userId":"106703338527093367257"}}},"cell_type":"code","source":["!apt-get install -y -qq software-properties-common python-software-properties module-init-tools\n","!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null\n","!apt-get update -qq 2>&1 > /dev/null\n","!apt-get -y install -qq google-drive-ocamlfuse fuse\n","from google.colab import auth\n","auth.authenticate_user()\n","from oauth2client.client import GoogleCredentials\n","creds = GoogleCredentials.get_application_default()\n","import getpass\n","!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL\n","vcode = getpass.getpass()\n","!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}"],"execution_count":1,"outputs":[{"output_type":"stream","text":["Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id=32555940559.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force\r\n","··········\n","Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id=32555940559.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force\n","Please enter the verification code: Access token retrieved correctly.\n"],"name":"stdout"}]},{"metadata":{"id":"HDA8JfxgTUGk","colab_type":"code","colab":{"autoexec":{"startup":false,"wait_interval":0}}},"cell_type":"code","source":["!mkdir -p drive\n","!google-drive-ocamlfuse drive"],"execution_count":0,"outputs":[]},{"metadata":{"id":"0EjTxU_36dbq","colab_type":"code","colab":{"autoexec":{"startup":false,"wait_interval":0},"base_uri":"https://localhost:8080/","height":394},"outputId":"af8ad33b-ea04-41d9-b2f8-b99a7eb679fa","executionInfo":{"status":"ok","timestamp":1525861371344,"user_tz":-330,"elapsed":122732,"user":{"displayName":"Ravi Tej Akella","photoUrl":"//lh4.googleusercontent.com/-CxyhmYRONEI/AAAAAAAAAAI/AAAAAAAAR2A/PCmQzRxsHmo/s50-c-k-no/photo.jpg","userId":"106703338527093367257"}}},"cell_type":"code","source":["import os\n","#!ls\n","os.chdir(\"/content/drive/App/Enhancenet\")\n","!python enhancenet.py\n"],"execution_count":3,"outputs":[{"output_type":"stream","text":["/usr/local/lib/python3.6/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n"," from ._conv import register_converters as _register_converters\n","processing baboon.bmp\n","processing barbara.bmp\n","processing bridge.bmp\n","processing coastguard.bmp\n","processing comic.bmp\n","processing face.bmp\n","processing flowers.bmp\n","processing foreman.bmp\n","processing lenna.bmp\n","processing man.bmp\n","processing monarch.bmp\n","processing pepper.bmp\n","processing sun_aaalbzqrimafwbiv.jpg\n","processing sun_aaaulhwrhqgejnyt.jpg\n","processing sun_aacphuqehdodwawg.jpg\n","processing sun_aacyknxirsfolpon.jpg\n","processing sun_aakfpvtynorwesef.jpg\n","processing sun_aamvxnvouicstkjb.jpg\n","processing zebra.bmp\n"],"name":"stdout"}]},{"metadata":{"id":"cPmg_utaXcE8","colab_type":"code","colab":{"autoexec":{"startup":false,"wait_interval":0},"base_uri":"https://localhost:8080/","height":88},"outputId":"75b198d6-926b-4aa6-945d-2eb244c5b32b","executionInfo":{"status":"ok","timestamp":1525833657681,"user_tz":-330,"elapsed":81298,"user":{"displayName":"Ravi Tej Akella","photoUrl":"//lh4.googleusercontent.com/-CxyhmYRONEI/AAAAAAAAAAI/AAAAAAAAR2A/PCmQzRxsHmo/s50-c-k-no/photo.jpg","userId":"106703338527093367257"}}},"cell_type":"code","source":["'''%matplotlib inline\n","import matplotlib.pyplot as plt\n","import os\n","os.chdir(\"/content/drive/App/EnhanceNet\")\n","!python mod_enhancenet.py\n","# upscales HR image by 4x zoom'''"],"execution_count":4,"outputs":[{"output_type":"stream","text":["/usr/local/lib/python3.6/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n"," from ._conv import register_converters as _register_converters\n","(3, 256, 256, 3)\n"],"name":"stdout"}]},{"metadata":{"id":"ivXqnhdVGlb_","colab_type":"code","colab":{"autoexec":{"startup":false,"wait_interval":0},"base_uri":"https://localhost:8080/","height":173},"outputId":"9e45b039-0319-4f36-b780-3ee32edb32ee","executionInfo":{"status":"ok","timestamp":1525820031786,"user_tz":-330,"elapsed":25797,"user":{"displayName":"Ravi Tej Akella","photoUrl":"//lh4.googleusercontent.com/-CxyhmYRONEI/AAAAAAAAAAI/AAAAAAAAR2A/PCmQzRxsHmo/s50-c-k-no/photo.jpg","userId":"106703338527093367257"}}},"cell_type":"code","source":["'''%matplotlib inline\n","import matplotlib.pyplot as plt\n","import os\n","os.chdir(\"/content/drive/App/EnhanceNet\")\n","!python modified_enhancenet.py\n","# upscales LR image by 4x zoom\n","######@@@@@@@@@@@@@ OOM problem in enhance graph for 256x256 input'''"],"execution_count":4,"outputs":[{"output_type":"stream","text":["/usr/local/lib/python3.6/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n"," from ._conv import register_converters as _register_converters\n","2018-05-08 22:54:15.749260: W tensorflow/core/common_runtime/bfc_allocator.cc:219] Allocator (GPU_0_bfc) ran out of memory trying to allocate 528.50MiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.\n","2018-05-08 22:54:15.768065: W tensorflow/core/common_runtime/bfc_allocator.cc:219] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.05GiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.\n","2018-05-08 22:54:15.769560: W tensorflow/core/common_runtime/bfc_allocator.cc:219] Allocator (GPU_0_bfc) ran out of memory trying to allocate 288.56MiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.\n","2018-05-08 22:54:15.783967: W tensorflow/core/common_runtime/bfc_allocator.cc:219] Allocator (GPU_0_bfc) ran out of memory trying to allocate 144.00MiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.\n","2018-05-08 22:54:15.789339: W tensorflow/core/common_runtime/bfc_allocator.cc:219] Allocator (GPU_0_bfc) ran out of memory trying to allocate 1.12GiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.\n","2018-05-08 22:54:15.817047: W tensorflow/core/common_runtime/bfc_allocator.cc:219] Allocator (GPU_0_bfc) ran out of memory trying to allocate 603.03MiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.\n"],"name":"stdout"}]},{"metadata":{"id":"DUD_-NI8xdDk","colab_type":"code","colab":{"autoexec":{"startup":false,"wait_interval":0}}},"cell_type":"code","source":[""],"execution_count":0,"outputs":[]}]}
Binary file added input/baboon.bmp
Binary file not shown.
Binary file added input/barbara.bmp
Binary file not shown.
Binary file added input/bridge.bmp
Binary file not shown.
Binary file added input/coastguard.bmp
Binary file not shown.
Binary file added input/comic.bmp
Binary file not shown.
Binary file added input/face.bmp
Binary file not shown.
Binary file added input/flowers.bmp
Binary file not shown.
Binary file added input/foreman.bmp
Binary file not shown.
Binary file added input/lenna.bmp
Binary file not shown.
Binary file added input/man.bmp
Binary file not shown.
Binary file added input/monarch.bmp
Binary file not shown.
Binary file added input/pepper.bmp
Binary file not shown.
Binary file added input/sun_aaalbzqrimafwbiv.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added input/sun_aaaulhwrhqgejnyt.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added input/sun_aacphuqehdodwawg.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added input/sun_aacyknxirsfolpon.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added input/sun_aakfpvtynorwesef.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added input/sun_aamvxnvouicstkjb.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added input/zebra.bmp
Binary file not shown.
119 changes: 119 additions & 0 deletions mod_enhancenet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from nnlib import *
#%matplotlib inline

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import os
os.chdir("/content/drive/App/EnhanceNet")

#import cv2
from PIL import Image
image_files = os.listdir("/content/drive/App/CVAE/natural_images")
trainX = []
# print(len(image_files))
for i, file in enumerate(image_files):
if i%100 == 0 and file.endswith(".jpg"):
img = Image.open("/content/drive/App/CVAE/natural_images/"+file).convert('RGB')
w, h = img.size
# ar = cv2.cvtColor(cv2.imread("/content/drive/App/CVAE/natural_images/"+file),cv2.COLOR_BGR2RGB)
trainX.append(np.array(img)/255)
trainX = np.asarray(trainX)
#trainX = trainX/255
#plt.axis("off")
#plt.imshow(trainX[0])

from sklearn.model_selection import train_test_split
x_train, x_test = train_test_split(trainX, test_size = 0.1, random_state=2)

###############################################################################################
import tensorflow.contrib.layers as lays
def autoencoder(inputs):
with tf.variable_scope("CAE", reuse = tf.AUTO_REUSE) :
net = lays.conv2d(inputs, 3, [3, 3], stride=2, padding='SAME')
net = lays.conv2d(net, 3, [3, 3], stride=2, padding='SAME')
net0 = lays.conv2d(net, 3, [3, 3], stride=2, padding='SAME')
#net = lays.conv2d(net, 3, [3, 3], stride=2, padding='SAME')

upsampled256 = tf.image.resize_bicubic(net, (256,256))
upsampled128 = tf.image.resize_bicubic(net, (128,128))
upsampled64 = tf.image.resize_bicubic(net, (64,64))

net1 = lays.conv2d_transpose(net0, 3, [3, 3], stride=2, padding='SAME')
net1 = tf.add(net1,upsampled64)
net2 = lays.conv2d_transpose(net1, 3, [3, 3], stride=2, padding='SAME')
net2 = tf.add(net2,upsampled128)
net3 = lays.conv2d_transpose(net2, 3, [3, 3], stride=2, padding='SAME', activation_fn=tf.nn.sigmoid)
net3 = tf.add(net3,upsampled256)
return [net3,net1]

ae_inputs = tf.placeholder(tf.float32, (None, 256, 256, 3))
ae_outputs,net1 = autoencoder(ae_inputs) #tf.image.resize_bicubic(temp, (256,256))
#loss = tf.reduce_mean(tf.square(ae_outputs - ae_inputs))
#train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
init = tf.global_variables_initializer()
#################################
'''all_vars = tf.global_variables()
model_one_vars = [k for k in all_vars if k.name.startswith("CAE")]
print(len(all_vars))
print(len(model_one_vars))'''
#################################
saver = tf.train.Saver()

with tf.Session() as sess:
sess.run(init)
saver.restore(sess, "/content/drive/App/EnhanceNet/models/model.ckpt")
#batch_img = x_test
recon_LR_img, recon_HR_img = sess.run([net1, ae_outputs], feed_dict={ae_inputs: x_test})
###############################################################################################
timepas = recon_HR_img
print(timepas.shape)
PER_CHANNEL_MEANS = np.array([0.47614917, 0.45001204, 0.40904046])
for i, image in enumerate(recon_HR_img[:5]) :
imgs = np.expand_dims(image, axis=0)
imgsize = np.shape(imgs)[1:]
#print('processing %s' % fn)
xs = tf.placeholder(tf.float32, [1, imgsize[0], imgsize[1], imgsize[2]])
rblock = [resi, [[conv], [relu], [conv]]]
ys_est = NN('generator',
[xs,
[conv], [relu],
rblock, rblock, rblock, rblock, rblock,
rblock, rblock, rblock, rblock, rblock,
[upsample], [conv], [relu],
[upsample], [conv], [relu],
[conv], [relu],
[conv, 3]])
ys_res = tf.image.resize_images(xs, [4*imgsize[0], 4*imgsize[1]],
method=tf.image.ResizeMethod.BICUBIC)
ys_est += ys_res + PER_CHANNEL_MEANS
sess = tf.InteractiveSession()
###################################################################
all_vars = tf.global_variables()
model_one_vars = [k for k in all_vars if k.name.startswith("CAE")]
set_1 = set(model_one_vars)
temp_set = [k for k in all_vars if k.name.startswith("generator")]
model_two_vars = [o for o in all_vars if o not in set_1]
'''print(len(all_vars))
print(len(model_one_vars))
print(len(temp_set))
print(len(model_two_vars))'''
#for i in model_one_vars :
#print(i)

###################################################################
tf.train.Saver(model_two_vars).restore(sess, os.getcwd()+'/weights')
output = sess.run([ys_est, ys_res+PER_CHANNEL_MEANS],
feed_dict={xs: imgs-PER_CHANNEL_MEANS})
plt.axis("off")
plt.imshow(output[0][0])
saveimg(output[0][0], 'output/IMG-%d-EnhanceNet.png' % i)
saveimg(recon_HR_img[i], 'output/IMG-%d-HR.png' % i)
saveimg(x_test[i], 'output/IMG-%d-LR.png' % i)
#halter = input("halted")
#saveimg(output[1][0], 'output/%s-Bicubic.png' % fne)
sess.close()
tf.reset_default_graph()

2 changes: 2 additions & 0 deletions models/checkpoint
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
model_checkpoint_path: "/content/drive/App/EnhanceNet/models/model.ckpt"
all_model_checkpoint_paths: "/content/drive/App/EnhanceNet/models/model.ckpt"
2 changes: 2 additions & 0 deletions models/last_model/checkpoint
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
model_checkpoint_path: "/content/drive/App/EnhanceNet/models/model.ckpt"
all_model_checkpoint_paths: "/content/drive/App/EnhanceNet/models/model.ckpt"
Binary file added models/last_model/model.ckpt.data-00000-of-00001
Binary file not shown.
Binary file added models/last_model/model.ckpt.index
Binary file not shown.
Binary file added models/last_model/model.ckpt.meta
Binary file not shown.
Binary file added models/model.ckpt.data-00000-of-00001
Binary file not shown.
Binary file added models/model.ckpt.index
Binary file not shown.
Binary file added models/model.ckpt.meta
Binary file not shown.
Loading

0 comments on commit 059f3dd

Please sign in to comment.