Skip to content

Commit

Permalink
兼容tf2+
Browse files Browse the repository at this point in the history
  • Loading branch information
kerlomz committed Nov 15, 2020
1 parent 4f04a92 commit f2f0a31
Show file tree
Hide file tree
Showing 13 changed files with 273 additions and 228 deletions.
12 changes: 6 additions & 6 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,18 +74,18 @@ def __init__(self, parent: tk.Tk):
self.edit_var = tk.DoubleVar()
self.label_from_var = tk.StringVar()

self.memory_usage_menu = tk.Menu(self.menubar, tearoff=False)
self.memory_usage_menu.add_radiobutton(label="50%", variable=self.edit_var, value=0.5)
self.memory_usage_menu.add_radiobutton(label="60%", variable=self.edit_var, value=0.6)
self.memory_usage_menu.add_radiobutton(label="70%", variable=self.edit_var, value=0.7)
self.memory_usage_menu.add_radiobutton(label="80%", variable=self.edit_var, value=0.8)
# self.memory_usage_menu = tk.Menu(self.menubar, tearoff=False)
# self.memory_usage_menu.add_radiobutton(label="50%", variable=self.edit_var, value=0.5)
# self.memory_usage_menu.add_radiobutton(label="60%", variable=self.edit_var, value=0.6)
# self.memory_usage_menu.add_radiobutton(label="70%", variable=self.edit_var, value=0.7)
# self.memory_usage_menu.add_radiobutton(label="80%", variable=self.edit_var, value=0.8)

self.label_from_menu = tk.Menu(self.menubar, tearoff=False)
self.label_from_menu.add_radiobutton(label="FileName", variable=self.label_from_var, value='FileName')
self.label_from_menu.add_radiobutton(label="TXT", variable=self.label_from_var, value='TXT')

self.menubar.add_cascade(label="System", menu=self.system_menu)
self.system_menu.add_cascade(label="Memory Usage", menu=self.memory_usage_menu)
# self.system_menu.add_cascade(label="Memory Usage", menu=self.memory_usage_menu)

self.data_menu.add_command(label="Data Augmentation", command=lambda: self.popup_data_augmentation())
self.data_menu.add_command(label="Pretreatment", command=lambda: self.popup_pretreatment())
Expand Down
14 changes: 7 additions & 7 deletions core.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def _build_train_op(self):
"""构建训练操作符"""

# 步数
self.global_step = tf.train.get_or_create_global_step()
self.global_step = tf.compat.v1.train.get_or_create_global_step()

# Loss函数
if self.model_conf.loss_func == LossFunction.CTC:
Expand Down Expand Up @@ -175,7 +175,7 @@ def _build_train_op(self):
amsbound=True
)
elif self.model_conf.neu_optimizer == Optimizer.Adam:
self.optimizer = tf.train.AdamOptimizer(
self.optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=self.lrn_rate
)
elif self.model_conf.neu_optimizer == Optimizer.RAdam:
Expand All @@ -185,26 +185,26 @@ def _build_train_op(self):
min_lr=1e-6
)
elif self.model_conf.neu_optimizer == Optimizer.Momentum:
self.optimizer = tf.train.MomentumOptimizer(
self.optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=self.lrn_rate,
use_nesterov=True,
momentum=0.9,
)
elif self.model_conf.neu_optimizer == Optimizer.SGD:
self.optimizer = tf.train.GradientDescentOptimizer(
self.optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=self.lrn_rate,
)
elif self.model_conf.neu_optimizer == Optimizer.AdaGrad:
self.optimizer = tf.train.AdagradOptimizer(
self.optimizer = tf.compat.v1.train.AdagradOptimizer(
learning_rate=self.lrn_rate,
)
elif self.model_conf.neu_optimizer == Optimizer.RMSProp:
self.optimizer = tf.train.RMSPropOptimizer(
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(
learning_rate=self.lrn_rate,
)

# BN 操作符更新(moving_mean, moving_variance)
update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)

# 将 train_op 和 update_ops 融合
with tf.control_dependencies(update_ops):
Expand Down
2 changes: 1 addition & 1 deletion decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def __init__(self, model_conf: ModelConfig):

def ctc(self, inputs, sequence_length):
"""针对CTC Loss的解码"""
ctc_decode, _ = tf.nn.ctc_beam_search_decoder_v2(inputs, sequence_length, beam_width=1)
ctc_decode, _ = tf.compat.v1.nn.ctc_beam_search_decoder_v2(inputs, sequence_length, beam_width=1)
decoded_sequences = tf.sparse.to_dense(ctc_decode[0], default_value=self.category_num, name='dense_decoded')
return decoded_sequences

Expand Down
2 changes: 1 addition & 1 deletion loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def cross_entropy(labels, logits):
def ctc(labels, logits, sequence_length):
"""CTC 损失函数"""

return tf.nn.ctc_loss_v2(
return tf.compat.v1.nn.ctc_loss_v2(
labels=labels,
logits=logits,
logit_length=sequence_length,
Expand Down
2 changes: 1 addition & 1 deletion network/CNN.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def block(self, inputs, filters, kernel_size, strides, dilation_rate=(1, 1)):
kernel_initializer=self.utils.msra_initializer(kernel_size, filters),
padding='SAME',
)(inputs)
inputs = tf.layers.batch_normalization(
inputs = tf.compat.v1.layers.batch_normalization(
inputs,
reuse=False,
momentum=0.9,
Expand Down
28 changes: 14 additions & 14 deletions network/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def cnn_layer(self, index, inputs, filters, kernel_size, strides):
padding='same',
name='cnn-{}'.format(index + 1),
)(inputs)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
fused=True,
renorm_clipping={
Expand Down Expand Up @@ -114,7 +114,7 @@ def dense_building_block(self, input_tensor, growth_rate, name, dropout_rate=Non
Output tensor for the block.
"""
# 1x1 Convolution (Bottleneck layer)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
input_tensor,
reuse=False,
momentum=0.9,
Expand All @@ -133,7 +133,7 @@ def dense_building_block(self, input_tensor, growth_rate, name, dropout_rate=Non
x = tf.keras.layers.Dropout(dropout_rate)(x)

# 3x3 Convolution
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
reuse=False,
momentum=0.9,
Expand Down Expand Up @@ -180,7 +180,7 @@ def transition_block(self, input_tensor, reduction, name):
# Returns
output tensor for the block.
"""
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
input_tensor,
reuse=False,
momentum=0.9,
Expand Down Expand Up @@ -228,7 +228,7 @@ def residual_building_block(self, input_tensor, kernel_size, filters, stage, blo
kernel_initializer='he_normal',
padding='same',
name=conv_name_base + '2a')(input_tensor)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
reuse=False,
momentum=0.9,
Expand All @@ -243,7 +243,7 @@ def residual_building_block(self, input_tensor, kernel_size, filters, stage, blo
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
reuse=False,
momentum=0.9,
Expand All @@ -258,7 +258,7 @@ def residual_building_block(self, input_tensor, kernel_size, filters, stage, blo
kernel_initializer='he_normal',
padding='same',
name=conv_name_base + '2c')(x)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
reuse=False,
momentum=0.9,
Expand All @@ -273,7 +273,7 @@ def residual_building_block(self, input_tensor, kernel_size, filters, stage, blo
kernel_initializer='he_normal',
padding='same',
name=conv_name_base + '1')(input_tensor)
shortcut = tf.layers.batch_normalization(
shortcut = tf.compat.v1.layers.batch_normalization(
shortcut,
reuse=False,
momentum=0.9,
Expand Down Expand Up @@ -310,7 +310,7 @@ def identity_block(self, input_tensor, kernel_size, filters, stage, block):
padding='same',
name=conv_name_base + '2a'
)(input_tensor)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
axis=bn_axis,
reuse=False,
Expand All @@ -327,7 +327,7 @@ def identity_block(self, input_tensor, kernel_size, filters, stage, block):
kernel_initializer='he_normal',
name=conv_name_base + '2b'
)(x)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
axis=bn_axis,
reuse=False,
Expand All @@ -343,7 +343,7 @@ def identity_block(self, input_tensor, kernel_size, filters, stage, block):
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
axis=bn_axis,
reuse=False,
Expand Down Expand Up @@ -383,7 +383,7 @@ def inverted_res_block(self, input_tensor, expansion, stride, filters, block_id)
activation=None,
name=prefix + 'expand'
)(x)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
reuse=False,
momentum=0.9,
Expand All @@ -403,7 +403,7 @@ def inverted_res_block(self, input_tensor, expansion, stride, filters, block_id)
padding='same',
name=prefix + 'depthwise'
)(x)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
reuse=False,
momentum=0.9,
Expand All @@ -421,7 +421,7 @@ def inverted_res_block(self, input_tensor, expansion, stride, filters, block_id)
activation=None,
name=prefix + 'project'
)(x)
x = tf.layers.batch_normalization(
x = tf.compat.v1.layers.batch_normalization(
x,
reuse=False,
momentum=0.9,
Expand Down
Loading

0 comments on commit f2f0a31

Please sign in to comment.