forked from NifTK/NiftyNet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathactivation.py
executable file
·73 lines (57 loc) · 2.4 KB
/
activation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
from niftynet.layer.base_layer import TrainableLayer
from niftynet.utilities.util_common import look_up_operations
def prelu(f_in, channelwise_params):
pos = tf.nn.relu(f_in)
neg = channelwise_params * (f_in - tf.abs(f_in)) * 0.5
return pos + neg
def selu(x, name):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))
def leaky_relu(x, name):
half_alpha = 0.01
return (0.5 + half_alpha) * x + (0.5 - half_alpha) * abs(x)
SUPPORTED_OP = {'relu': tf.nn.relu,
'relu6': tf.nn.relu6,
'elu': tf.nn.elu,
'softplus': tf.nn.softplus,
'softsign': tf.nn.softsign,
'sigmoid': tf.nn.sigmoid,
'tanh': tf.nn.tanh,
'prelu': prelu,
'selu': selu,
'leakyrelu': leaky_relu,
'dropout': tf.nn.dropout}
class ActiLayer(TrainableLayer):
"""
Apply an element-wise non-linear activation function.
'Prelu' uses trainable parameters and those are initialised to zeros
Dropout function is also supported
"""
def __init__(self, func, regularizer=None, name='activation'):
self.func = func.lower()
self.layer_name = '{}_{}'.format(self.func, name)
super(ActiLayer, self).__init__(name=self.layer_name)
# these are used for prelu variables
self.initializers = {'alpha': tf.constant_initializer(0.0)}
self.regularizers = {'alpha': regularizer}
def layer_op(self, input_tensor, keep_prob=None):
func_ = look_up_operations(self.func, SUPPORTED_OP)
if self.func == 'prelu':
alphas = tf.get_variable(
'alpha', input_tensor.get_shape()[-1],
initializer=self.initializers['alpha'],
regularizer=self.regularizers['alpha'])
output_tensor = func_(input_tensor, alphas)
elif self.func == 'dropout':
assert keep_prob > 0.0
assert keep_prob <= 1.0
output_tensor = func_(input_tensor,
keep_prob=keep_prob,
name='dropout')
else:
output_tensor = func_(input_tensor, name='acti')
return output_tensor