53、tensorflow基本操作

import tensorflow as tf
import numpy as np
x_data = np.float32(np.random.rand(2,100))
print(x_data)
y_data = np.dot([0.100,0.200],x_data) + 0.300
print(y_data)
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1,2],-1.0,1.0))
y = tf.matmul(W,x_data) + b
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in np.arange(0,201):
    sess.run(train)
    if step % 20 == 0:
        print(step,sess.run(W),sess.run(b))
>>> import testTensorflow
[[ 0.19973956  0.16739862  0.15858267  0.04722507  0.19596119  0.81711286
   0.92377388  0.35614383  0.39491668  0.67010045  0.42327231  0.83889592
   0.00850873  0.26917413  0.97868949  0.13200051  0.90181822  0.50647962
   0.761379    0.10195658  0.3984865   0.83295298  0.36956981  0.20502378
   0.93639976  0.20199312  0.21077876  0.5948227   0.29240388  0.50293821
   0.58044183  0.71276587  0.32259133  0.76028866  0.5752154   0.55170006
   0.31895116  0.31566954  0.83059841  0.02788422  0.1739264   0.84005469
   0.21437983  0.06674702  0.93111533  0.59845936  0.09614947  0.96966016
   0.61548668  0.23930366  0.52561933  0.13136983  0.03776945  0.46551761
   0.59285414  0.27717781  0.29892424  0.26322085  0.82392263  0.98384923
   0.27857226  0.52306014  0.65995163  0.06507403  0.36095095  0.75242752
   0.8814683   0.58934295  0.45939791  0.44734281  0.76442283  0.8815344
   0.7056703   0.27994496  0.0329699   0.43244225  0.1495771   0.81560552
   0.6201663   0.53501105  0.25517786  0.41227514  0.00588593  0.92267019
   0.63735855  0.43193081  0.11244099  0.09052325  0.8034566   0.09081198
   0.47067729  0.36029741  0.79698795  0.20955186  0.81617475  0.11639476
   0.6875121   0.77578318  0.00680351  0.95461601]
 [ 0.67086971  0.72803783  0.48773968  0.46353742  0.09106466  0.77719897
   0.63074362  0.40372008  0.88138324  0.54678655  0.08489656  0.49613088
   0.24366626  0.89506823  0.11740237  0.40266839  0.76421058  0.3193655
   0.54588401  0.48711354  0.55574679  0.33889616  0.76450878  0.34872222
   0.5065375   0.039746    0.08313783  0.19343667  0.64963359  0.25033969
   0.25755394  0.73925525  0.78360468  0.67241889  0.07166966  0.99968213
   0.83292675  0.10939927  0.53977299  0.78841841  0.32458925  0.57405293
   0.16409875  0.05016828  0.02617516  0.0646539   0.45070267  0.82357401
   0.25270018  0.27731678  0.8181566   0.97194064  0.18956329  0.66432667
   0.91821116  0.31203365  0.97170323  0.12718523  0.06367094  0.74980855
   0.50792503  0.01701127  0.13227516  0.17003129  0.59326243  0.7097097
   0.6589005   0.43706962  0.91880661  0.46747798  0.09852902  0.13755837
   0.00790515  0.28889963  0.89563406  0.11847007  0.67797345  0.02893432
   0.33689809  0.92307913  0.18034695  0.8641994   0.48432577  0.64221871
   0.24944213  0.22134747  0.20596626  0.71872956  0.83808893  0.73436451
   0.23837468  0.1081854   0.94581962  0.67415166  0.14609784  0.1863541
   0.65377831  0.36235628  0.01571035  0.29108971]]
[ 0.4541479   0.46234743  0.4134062   0.39742999  0.33780905  0.53715108
  0.51852611  0.4163584   0.51576832  0.47636735  0.35930654  0.48311577
  0.34958413  0.50593106  0.42134942  0.39373373  0.54302394  0.41452106
  0.4853147   0.40761836  0.45099801  0.45107453  0.48985874  0.39024682
  0.49494748  0.32814851  0.33770544  0.3981696   0.4591671   0.40036176
  0.40955497  0.51912764  0.48898007  0.51051264  0.37185547  0.55510643
  0.49848047  0.35344681  0.49101444  0.4604721   0.38231049  0.49881606
  0.35425773  0.31670836  0.39834656  0.37277672  0.39975548  0.56168082
  0.4120887   0.37939372  0.51619325  0.50752511  0.3416896   0.47941709
  0.54292765  0.39012451  0.52423307  0.35175913  0.39512645  0.54834663
  0.42944223  0.35570827  0.3924502   0.34051366  0.45474758  0.51718469
  0.51992693  0.44634822  0.52970111  0.43822988  0.39614809  0.41566511
  0.37214806  0.38577442  0.4824238   0.36693824  0.4505524   0.38734742
  0.42939625  0.53811693  0.36158718  0.51406739  0.39745375  0.52071076
  0.41362428  0.38746257  0.35243735  0.45279824  0.54796345  0.4559541
  0.39474266  0.35766682  0.56886272  0.45578552  0.41083704  0.3489103
  0.49950687  0.45004957  0.30382242  0.45367954]
0 [[ 0.87464035 -0.60235059]] [ 0.56324285]
20 [[ 0.21375839  0.03494079]] [ 0.32297963]
40 [[ 0.117922    0.16860159]] [ 0.3062222]
60 [[ 0.10254548  0.1938384 ]] [ 0.30170473]
80 [[ 0.1002738  0.1987415]] [ 0.30047071]
100 [[ 0.09999874  0.19973046]] [ 0.30013064]
120 [[ 0.09998582  0.19993921]] [ 0.30003637]
140 [[ 0.09999356  0.19998558]] [ 0.30001014]
160 [[ 0.09999776  0.19999643]] [ 0.30000284]
180 [[ 0.09999929  0.19999908]] [ 0.30000082]
200 [[ 0.09999978  0.19999975]] [ 0.30000022]
>>> 

二、OPerator

import tensorflow as tf
#创建一个常量op,产生一个1*2的矩阵,这个op被作为一个节点
#加到默认图中。
#
#构造器的返回值代表该常量op的返回值
matrix1 = tf.constant([[3.,3.]])
#创建另外一个常量op,产生一个2*1矩阵
matrix2 = tf.constant([[2.],[2.]])
#创建一个矩阵乘法matmul op,把'matrix1'和'matrix2'作为输入
#返回值product代表矩阵乘法的结果
product = tf.matmul(matrix1,matrix2)
#在一个回话中启动图
sess = tf.Session()
#调用sess的'run()'方法来执行矩阵乘法op,传入'product'作为该方法的参数。
#‘product’代表了矩阵乘法op的输出,传入是它向方法表明,我们希望取回
#矩阵乘法op的输出
# 整个执行过程是自动化的, 会话负责传递 op 所需的全部输入. op 通常是并发执行的
# 函数调用 'run(product)' 触发了图中三个 op (两个常量 op 和一个矩阵乘法 op) 的执行.
# 返回值 'result' 是一个 numpy `ndarray` 对象.
result = sess.run(product)
print(result)
sess.close()
#Session 对象在使用完后需要关闭以释放资源. 除了显式调用 close 外, 也可以使用 "with" 代码块 来自动完成关闭动作
#with tf.Session() as sess:
    #result = sess.run([product])
    #print(result)

结果如下所示

>>> import testOP
[[ 12.]]
>>> 

三、variables

import tensorflow as tf
#创建一个变量,初始化为标量0
state = tf.Variable(0,name="counter")
#创建一个op,其作用是使state增加1
one = tf.constant(1)
new_value = tf.add(state,one)
update = tf.assign(state,new_value)
#启动图后,变量必须先经过‘初始化’(init) op初始化,
#首先必须增加一个‘初始化’op到图中。
init_op = tf.global_variables_initializer()
#启动图,运行op
with tf.Session() as sess:
    #运行'init'op
    sess.run(init_op)
    #打印'state'的初始值
    print(sess.run(state))
    #运行op,更新'state',并打印'state'
    for _ in range(3):
        sess.run(update)
        print(sess.run(state))

结果如下所示

>>> import testVariables
0
1
2
3
>>> 

四、Feed

import tensorflow as tf
input1 = tf.placeholder(tf.float32,[None])
input2 = tf.placeholder(tf.float32,[None])
output = tf.mul(input1,input2)
with tf.Session() as sess:
    print(sess.run([output],feed_dict={input1:[7.],input2:[2.]}))

五、Interactive

import tensorflow as tf
#进入一个交互式Tensorflow会话
sess = tf.InteractiveSession()
x = tf.Variable([1.0,2.0])
a = tf.constant([3.0,3.0])
#使用初始化器initializer op 的run()方法 初始化'x'
x.initializer.run()
#增加一个减法sub op,从'x'减去'a',运行减法op,输出结果
sub = tf.sub(x,a)
print(sub.eval())

结果如下所示

>>> import testInteractive

[-2. -1.]

>>>

六、fetch

import tensorflow as tf
input1= tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
intermed = tf.add(input2,input3)
mul = tf.mul(input1,intermed)
with tf.Session() as sess:
    result = sess.run([mul,intermed])
    print(result)

结果如下所示

>>> import testFetch

[21.0, 7.0]

>>>