tensorflow--建立一个简单的小网络

In [19]:

import tensorflow as tf
import numpy as np
# #简单的数据形网络
# #定义输入参数
# X=tf.constant(value=[[0.7,0.5]])
# W1=tf.Variable(tf.truncated_normal(shape=[2,3],mean=0,stddev=2))
# W2=tf.Variable(tf.truncated_normal(shape=[3,1],mean=0,stddev=2))
# #定义前向传播过程
# a=tf.matmul(X,W1)
# y=tf.matmul(a,W2)
# #变量初始化
# init=tf.global_variables_initializer()
# with tf.Session() as sess:
#     sess.run(init)
#     print("Y",sess.run(y))
#multiply是对应元素相乘,matul是矩阵相乘
# 定义网络变量
#s输入常亮
x_input=tf.placeholder(dtype=tf.float32,shape=[1,2],name="x_input")
#权重变量
W1=tf.Variable(tf.random_normal(shape=[2,3],mean=0,stddev=2))
W2=tf.Variable(tf.random_normal(shape=[3,1],mean=0,stddev=2))
#定义网络运算图
a=tf.matmul(x_input,W1)
Y=tf.matmul(a,W2)
#初始化全局变量
init=tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    result=sess.run(Y,feed_dict={x_input:[[0.7,0.5]]})
    print("result", result)
result [[-4.5023837]]
#在pycharm运行程序
import tensorflow as tf
import numpy as np
BATCH_SIZE = 8 # 一次输入网络的数据,称为batch。一次不能喂太多数据
SEED = 23455 # 产生统一的随机数
rdm = np.random.RandomState(SEED)
X = rdm.rand(32, 2)
i=0
Y=np.zeros((32,), dtype=np.int)
Y_=np.transpose([Y])
for (x0, x1) in X:
y=int(x0 + x1 < 1)
Y_[[i]]=y
i=i+1
print("X:\n", X)
print("Y_:\n",Y_)
# 1定义神经网络的输入、参数和输出,定义前向传播过程。
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
# 2定义损失函数及反向传播方法。
loss = tf.reduce_mean(tf.square(y - y_))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss) # 三种优化方法选择一个就可以
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
# 输出目前(未经训练)的参数取值。
print("w1:\n", sess.run(w1))
print("w2:\n", sess.run(w2))
print("\n")
STEPS = 3000
for i in range(STEPS): #0-2999
start = (i * BATCH_SIZE) % 32
end = start + BATCH_SIZE
sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})
if i % 500 == 0:
total_loss = sess.run(loss, feed_dict={x: X, y_: Y_})
print("After %d training step(s), loss on all data is %g"%(i,total_loss))
print("\n")
print("w1:\n", sess.run(w1))
print("w2:\n", sess.run(w2))
#比较完整的一个网络

import tensorflow as tf

import numpy as np

batch_size=8

seed=23455

#制造一些假数据

rng=np.random.RandomState(seed)

X=rng.rand(32,2)

print(X)

Y=np.zeros(shape=(32,1),dtype=np.int)

Y=[[np.int(x0+x1<1)]for (x0,x1) in X]

print(Y)

#定义网络

x_input=tf.placeholder(shape=[None,2],dtype=np.float,name="input")

y_output=tf.placeholder(shape=[None,1],dtype=np.float,name="output")

#定义变量

W1=tf.Variable(tf.random_normal(shape=[2,3],stddev=1,seed=1))

W2=tf.Variable(tf.random_normal(shape=[3,1],stddev=1,seed=1))

#定义静态网络函数

a=tf.matmul(x_input,W1)

y=tf.matmul(a,W2)

#定义损失函数

loss=tf.reduce_mean(tf.square(y-y_output))

train_step=tf.train.GradientDescentOptimizer(0.001).minimize(loss)

#初始化变量

init=tf.global_variables_initializer()

with tf.Session() as sess:

sess.run(init)

print("W1:\n",W1)

#开始批量载入数据

for i in range(2000):

data_start=(i*batch_size)%32

data_end=data_start+batch_size

#开始训练数据

sess.run(train_step,feed_dict={x_input:X[data_start:data_end],y_output:Y[data_start:data_end]})

#每隔一段就打印出损失值

if ((i)%500==0):

Loss=sess.run(loss,feed_dict={x_input:X,y_output:Y})

print("loss",Loss)

print("w1",sess.run(W1))

print("W2",sess.run(W2))