From dd216a1a164a64fb874b8bb42ff75f211698db51 Mon Sep 17 00:00:00 2001 From: dpinthinker Date: Sun, 16 Sep 2018 16:12:51 +0800 Subject: [PATCH] Translate Chinese comments to English (#3) * Translate Chinese comments to English * Translate Chinese comments to English * Translate Chinese comments to English * Translate Chinese comments to English * Translate Chinese comments to English * Translate Chinese comments to English * Translate Chinese comments to English * Update traslation * Update translation * Translate Chinese comments to English * Update traslation * Update translation * Update translation * Update translation * Update translations --- .../code/en/basic/example/tensorflow_autograd.py | 4 ++-- .../code/en/basic/example/tensorflow_eager.py | 8 ++++---- .../en/basic/example/tensorflow_manual_grad.py | 16 ++++++++-------- source/_static/code/en/basic/graph/1plus1.py | 12 ++++++------ source/_static/code/en/basic/graph/AmatmulB.py | 4 ++-- source/_static/code/en/basic/graph/aplusb.py | 8 ++++---- source/_static/code/en/basic/graph/variable.py | 8 ++++---- .../en/basic/graph/variable_with_initializer.py | 4 ++-- 8 files changed, 32 insertions(+), 32 deletions(-) diff --git a/source/_static/code/en/basic/example/tensorflow_autograd.py b/source/_static/code/en/basic/example/tensorflow_autograd.py index 59c0f274..b7001a56 100644 --- a/source/_static/code/en/basic/example/tensorflow_autograd.py +++ b/source/_static/code/en/basic/example/tensorflow_autograd.py @@ -17,7 +17,7 @@ y_pred = a * X_ + b loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_)) -# 反向传播,利用TensorFlow的梯度下降优化器自动计算并更新变量(模型参数)的梯度 +# Back propagation,calculate and update gradient of varaibles(model parameters) with TensorFlow's GradientDescentOptimier train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_).minimize(loss) num_epoch = 10000 @@ -26,4 +26,4 @@ tf.global_variables_initializer().run() for e in range(num_epoch): sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate}) - print(sess.run([a, b])) \ No newline at end of file + print(sess.run([a, b])) diff --git a/source/_static/code/en/basic/example/tensorflow_eager.py b/source/_static/code/en/basic/example/tensorflow_eager.py index 27e366c7..04e30c49 100644 --- a/source/_static/code/en/basic/example/tensorflow_eager.py +++ b/source/_static/code/en/basic/example/tensorflow_eager.py @@ -18,15 +18,15 @@ num_epoch = 10000 learning_rate = 1e-3 for e in range(num_epoch): - # 前向传播 + # Forward propagation y_pred = a * X + b loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y)) # loss = 0.5 * np.sum(np.square(a * X + b - y)) - # 反向传播,手动计算变量(模型参数)的梯度 + # Back propagation, calculate gradient of variables(model parameters) manually grad_a = tf.reduce_sum((y_pred - y) * X) grad_b = tf.reduce_sum(y_pred - y) - # 更新参数 + # Update parameters a, b = a - learning_rate * grad_a, b - learning_rate * grad_b -print(a, b) \ No newline at end of file +print(a, b) diff --git a/source/_static/code/en/basic/example/tensorflow_manual_grad.py b/source/_static/code/en/basic/example/tensorflow_manual_grad.py index e043e981..23d5b2b1 100644 --- a/source/_static/code/en/basic/example/tensorflow_manual_grad.py +++ b/source/_static/code/en/basic/example/tensorflow_manual_grad.py @@ -8,7 +8,7 @@ import tensorflow as tf -# 定义数据流图 +# Define data flow gragh learning_rate_ = tf.placeholder(dtype=tf.float32) X_ = tf.placeholder(dtype=tf.float32, shape=[5]) y_ = tf.placeholder(dtype=tf.float32, shape=[5]) @@ -18,26 +18,26 @@ y_pred = a * X_ + b loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_)) -# 反向传播,手动计算变量(模型参数)的梯度 +# Back propagation, calculate gradient of variables(model parameters) manually grad_a = tf.reduce_sum((y_pred - y_) * X_) grad_b = tf.reduce_sum(y_pred - y_) -# 梯度下降法,手动更新参数 +# Gradient descent, update parameters manually new_a = a - learning_rate_ * grad_a new_b = b - learning_rate_ * grad_b update_a = tf.assign(a, new_a) update_b = tf.assign(b, new_b) train_op = [update_a, update_b] -# 数据流图定义到此结束 -# 注意,直到目前,我们都没有进行任何实质的数据计算,仅仅是定义了一个数据图 +# End of defining of data flow gragh +# Attention, until now, we haven't do any actually data calculation, just defined a data flow gragh num_epoch = 10000 learning_rate = 1e-3 with tf.Session() as sess: - # 初始化变量a和b + # Initialize variables a and b tf.global_variables_initializer().run() - # 循环将数据送入上面建立的数据流图中进行计算和更新变量 + # Put data in the data flow gragh created above to calculate and update variables for e in range(num_epoch): sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate}) - print(sess.run([a, b])) \ No newline at end of file + print(sess.run([a, b])) diff --git a/source/_static/code/en/basic/graph/1plus1.py b/source/_static/code/en/basic/graph/1plus1.py index 6950fc58..9b27a6a3 100644 --- a/source/_static/code/en/basic/graph/1plus1.py +++ b/source/_static/code/en/basic/graph/1plus1.py @@ -1,10 +1,10 @@ import tensorflow as tf -# 定义一个“计算图” -a = tf.constant(1) # 定义一个常量Tensor(张量) +# Defince a "Computation Graph" +a = tf.constant(1) # Defince a constant Tensor b = tf.constant(1) -c = a + b # 等价于 c = tf.add(a, b),c是张量a和张量b通过Add这一Operation(操作)所形成的新张量 +c = a + b # Equal to c = tf.add(a, b),c is a new Tensor created by Tensor a and Tesor b's add Operation -sess = tf.Session() # 实例化一个Session(会话) -c_ = sess.run(c) # 通过Session的run()方法对计算图里的节点(张量)进行实际的计算 -print(c_) \ No newline at end of file +sess = tf.Session() # Initailize a Session +c_ = sess.run(c) # Session的run() will do actually computation to the nodes (Tensor) in the Computation Graph +print(c_) diff --git a/source/_static/code/en/basic/graph/AmatmulB.py b/source/_static/code/en/basic/graph/AmatmulB.py index 501c70c0..6dac5969 100644 --- a/source/_static/code/en/basic/graph/AmatmulB.py +++ b/source/_static/code/en/basic/graph/AmatmulB.py @@ -1,9 +1,9 @@ import tensorflow as tf -A = tf.ones(shape=[2, 3]) # tf.ones(shape)定义了一个形状为shape的全1矩阵 +A = tf.ones(shape=[2, 3]) # tf.ones(shape) defines a all one matrix with shape B = tf.ones(shape=[3, 2]) C = tf.matmul(A, B) sess = tf.Session() C_ = sess.run(C) -print(C_) \ No newline at end of file +print(C_) diff --git a/source/_static/code/en/basic/graph/aplusb.py b/source/_static/code/en/basic/graph/aplusb.py index ce0bc0c5..6052eaa7 100644 --- a/source/_static/code/en/basic/graph/aplusb.py +++ b/source/_static/code/en/basic/graph/aplusb.py @@ -1,12 +1,12 @@ import tensorflow as tf -a = tf.placeholder(dtype=tf.int32) # 定义一个占位符Tensor +a = tf.placeholder(dtype=tf.int32) # Define a placeholder Tensor b = tf.placeholder(dtype=tf.int32) c = a + b -a_ = input("a = ") # 从终端读入一个整数并放入变量a_ +a_ = input("a = ") # Read an Integer from terminal and put it into a_ b_ = input("b = ") sess = tf.Session() -c_ = sess.run(c, feed_dict={a: a_, b: b_}) # feed_dict参数传入为了计算c所需要的张量的值 -print("a + b = %d" % c_) \ No newline at end of file +c_ = sess.run(c, feed_dict={a: a_, b: b_}) # feed_dict will input Tensors' value needed by computing c +print("a + b = %d" % c_) diff --git a/source/_static/code/en/basic/graph/variable.py b/source/_static/code/en/basic/graph/variable.py index e513d902..8daf6152 100644 --- a/source/_static/code/en/basic/graph/variable.py +++ b/source/_static/code/en/basic/graph/variable.py @@ -1,13 +1,13 @@ import tensorflow as tf a = tf.get_variable(name='a', shape=[]) -initializer = tf.assign(a, 0) # tf.assign(x, y)返回一个“将张量y的值赋给变量x”的操作 -a_plus_1 = a + 1 # 等价于 a + tf.constant(1) +initializer = tf.assign(a, 0) # tf.assign(x, y) will return a operation “assign Tensor y's value to Tensor x” +a_plus_1 = a + 1 # Equal to a + tf.constant(1) plus_one_op = tf.assign(a, a_plus_1) sess = tf.Session() sess.run(initializer) for i in range(5): - sess.run(plus_one_op) # 对变量a执行加一操作 - a_ = sess.run(a) # 获得变量a的值并存入a_ + sess.run(plus_one_op) # Do plus one operation to a + a_ = sess.run(a) # Calculate a‘s value and put the result to a_ print(a_) diff --git a/source/_static/code/en/basic/graph/variable_with_initializer.py b/source/_static/code/en/basic/graph/variable_with_initializer.py index fd0513b1..0e4e4fd6 100644 --- a/source/_static/code/en/basic/graph/variable_with_initializer.py +++ b/source/_static/code/en/basic/graph/variable_with_initializer.py @@ -1,11 +1,11 @@ import tensorflow as tf -a = tf.get_variable(name='a', shape=[], initializer=tf.zeros_initializer) # 指定初始化器为全0初始化 +a = tf.get_variable(name='a', shape=[], initializer=tf.zeros_initializer) # Made initializer as a all zero initializer a_plus_1 = a + 1 plus_one_op = tf.assign(a, a_plus_1) sess = tf.Session() -sess.run(tf.global_variables_initializer()) # 初始化所有变量 +sess.run(tf.global_variables_initializer()) # Initailize all the for i in range(5): sess.run(plus_one_op) a_ = sess.run(a)