Skip to content

Commit

Permalink
Translate Chinese comments to English (#3)
Browse files Browse the repository at this point in the history
* Translate Chinese comments to English

* Translate Chinese comments to English

* Translate Chinese comments to English

* Translate Chinese comments to English

* Translate Chinese comments to English

* Translate Chinese comments to English

* Translate Chinese comments to English

* Update traslation

* Update translation

* Translate Chinese comments to English

* Update traslation

* Update translation

* Update translation

* Update translation

* Update translations
  • Loading branch information
dpinthinker authored and snowkylin committed Sep 16, 2018
1 parent 69493cd commit dd216a1
Show file tree
Hide file tree
Showing 8 changed files with 32 additions and 32 deletions.
4 changes: 2 additions & 2 deletions source/_static/code/en/basic/example/tensorflow_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
y_pred = a * X_ + b
loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_))

# 反向传播,利用TensorFlow的梯度下降优化器自动计算并更新变量(模型参数)的梯度
# Back propagation,calculate and update gradient of varaibles(model parameters) with TensorFlow's GradientDescentOptimier
train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_).minimize(loss)

num_epoch = 10000
Expand All @@ -26,4 +26,4 @@
tf.global_variables_initializer().run()
for e in range(num_epoch):
sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate})
print(sess.run([a, b]))
print(sess.run([a, b]))
8 changes: 4 additions & 4 deletions source/_static/code/en/basic/example/tensorflow_eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@
num_epoch = 10000
learning_rate = 1e-3
for e in range(num_epoch):
# 前向传播
# Forward propagation
y_pred = a * X + b
loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y)) # loss = 0.5 * np.sum(np.square(a * X + b - y))

# 反向传播,手动计算变量(模型参数)的梯度
# Back propagation, calculate gradient of variables(model parameters) manually
grad_a = tf.reduce_sum((y_pred - y) * X)
grad_b = tf.reduce_sum(y_pred - y)

# 更新参数
# Update parameters
a, b = a - learning_rate * grad_a, b - learning_rate * grad_b

print(a, b)
print(a, b)
16 changes: 8 additions & 8 deletions source/_static/code/en/basic/example/tensorflow_manual_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import tensorflow as tf

# 定义数据流图
# Define data flow gragh
learning_rate_ = tf.placeholder(dtype=tf.float32)
X_ = tf.placeholder(dtype=tf.float32, shape=[5])
y_ = tf.placeholder(dtype=tf.float32, shape=[5])
Expand All @@ -18,26 +18,26 @@
y_pred = a * X_ + b
loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_))

# 反向传播,手动计算变量(模型参数)的梯度
# Back propagation, calculate gradient of variables(model parameters) manually
grad_a = tf.reduce_sum((y_pred - y_) * X_)
grad_b = tf.reduce_sum(y_pred - y_)

# 梯度下降法,手动更新参数
# Gradient descent, update parameters manually
new_a = a - learning_rate_ * grad_a
new_b = b - learning_rate_ * grad_b
update_a = tf.assign(a, new_a)
update_b = tf.assign(b, new_b)

train_op = [update_a, update_b]
# 数据流图定义到此结束
# 注意,直到目前,我们都没有进行任何实质的数据计算,仅仅是定义了一个数据图
# End of defining of data flow gragh
# Attention, until now, we haven't do any actually data calculation, just defined a data flow gragh

num_epoch = 10000
learning_rate = 1e-3
with tf.Session() as sess:
# 初始化变量a和b
# Initialize variables a and b
tf.global_variables_initializer().run()
# 循环将数据送入上面建立的数据流图中进行计算和更新变量
# Put data in the data flow gragh created above to calculate and update variables
for e in range(num_epoch):
sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate})
print(sess.run([a, b]))
print(sess.run([a, b]))
12 changes: 6 additions & 6 deletions source/_static/code/en/basic/graph/1plus1.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import tensorflow as tf

# 定义一个“计算图”
a = tf.constant(1) # 定义一个常量Tensor(张量)
# Defince a "Computation Graph"
a = tf.constant(1) # Defince a constant Tensor
b = tf.constant(1)
c = a + b # 等价于 c = tf.add(a, b),c是张量a和张量b通过Add这一Operation(操作)所形成的新张量
c = a + b # Equal to c = tf.add(a, b),c is a new Tensor created by Tensor a and Tesor b's add Operation

sess = tf.Session() # 实例化一个Session(会话)
c_ = sess.run(c) # 通过Session的run()方法对计算图里的节点(张量)进行实际的计算
print(c_)
sess = tf.Session() # Initailize a Session
c_ = sess.run(c) # Session的run() will do actually computation to the nodes (Tensor) in the Computation Graph
print(c_)
4 changes: 2 additions & 2 deletions source/_static/code/en/basic/graph/AmatmulB.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import tensorflow as tf

A = tf.ones(shape=[2, 3]) # tf.ones(shape)定义了一个形状为shape的全1矩阵
A = tf.ones(shape=[2, 3]) # tf.ones(shape) defines a all one matrix with shape
B = tf.ones(shape=[3, 2])
C = tf.matmul(A, B)

sess = tf.Session()
C_ = sess.run(C)
print(C_)
print(C_)
8 changes: 4 additions & 4 deletions source/_static/code/en/basic/graph/aplusb.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import tensorflow as tf

a = tf.placeholder(dtype=tf.int32) # 定义一个占位符Tensor
a = tf.placeholder(dtype=tf.int32) # Define a placeholder Tensor
b = tf.placeholder(dtype=tf.int32)
c = a + b

a_ = input("a = ") # 从终端读入一个整数并放入变量a_
a_ = input("a = ") # Read an Integer from terminal and put it into a_
b_ = input("b = ")

sess = tf.Session()
c_ = sess.run(c, feed_dict={a: a_, b: b_}) # feed_dict参数传入为了计算c所需要的张量的值
print("a + b = %d" % c_)
c_ = sess.run(c, feed_dict={a: a_, b: b_}) # feed_dict will input Tensors' value needed by computing c
print("a + b = %d" % c_)
8 changes: 4 additions & 4 deletions source/_static/code/en/basic/graph/variable.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import tensorflow as tf

a = tf.get_variable(name='a', shape=[])
initializer = tf.assign(a, 0) # tf.assign(x, y)返回一个“将张量y的值赋给变量x”的操作
a_plus_1 = a + 1 # 等价于 a + tf.constant(1)
initializer = tf.assign(a, 0) # tf.assign(x, y) will return a operation “assign Tensor y's value to Tensor x”
a_plus_1 = a + 1 # Equal to a + tf.constant(1)
plus_one_op = tf.assign(a, a_plus_1)

sess = tf.Session()
sess.run(initializer)
for i in range(5):
sess.run(plus_one_op) # 对变量a执行加一操作
a_ = sess.run(a) # 获得变量a的值并存入a_
sess.run(plus_one_op) # Do plus one operation to a
a_ = sess.run(a) # Calculate a‘s value and put the result to a_
print(a_)
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import tensorflow as tf

a = tf.get_variable(name='a', shape=[], initializer=tf.zeros_initializer) # 指定初始化器为全0初始化
a = tf.get_variable(name='a', shape=[], initializer=tf.zeros_initializer) # Made initializer as a all zero initializer
a_plus_1 = a + 1
plus_one_op = tf.assign(a, a_plus_1)

sess = tf.Session()
sess.run(tf.global_variables_initializer()) # 初始化所有变量
sess.run(tf.global_variables_initializer()) # Initailize all the
for i in range(5):
sess.run(plus_one_op)
a_ = sess.run(a)
Expand Down

0 comments on commit dd216a1

Please sign in to comment.