tensorflow实战系列,四基于TensorFlow构建AlexNet代码解析
整体流程介绍:
我们从main函数走,在train函数中,首先new了一个network;然后初始化后开始训练,训练时设定设备和迭代的次数,训练完后关闭流程图。
下面看network这个类,这个类有许多方法,inference方法定义整个网络的结构,包括每一层的规格和连接的顺序。__init__方法是把权值和偏置初始化。其他两个方法一个是optimer,定义优化器,一个是sorfmax_loss定义损失函数。
程序最开始的两个函数read_and_decode和get_batch。一个是读取tfrecords,一个是生成批次数据。
OK。就是这样简单。
下面展开说明。
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 16 11:08:21 2017
@author: root
"""
import tensorflow as tf
import frecordfortrain
tf.device(0)
def read_and_decode(filename):
#根据文件名生成一个队列
#读取已有的tfrecords,返回图片和标签
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) #返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
\'label\': tf.FixedLenFeature([], tf.int64),
\'img_raw\' : tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features[\'img_raw\'], tf.uint8)
img = tf.reshape(img, [227, 227, 3])
# img = tf.reshape(img, [39, 39, 3])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
label = tf.cast(features[\'label\'], tf.int32)
print img,label
return img, label
def get_batch(image, label, batch_size,crop_size):
#数据扩充变换
distorted_image = tf.random_crop(image, [crop_size, crop_size, 3])#随机裁剪
distorted_image = tf.image.random_flip_up_down(distorted_image)#上下随机翻转
#生成batch
#shuffle_batch的参数:capacity用于定义shuttle的范围,如果是对整个训练数据集,获取batch,那么capacity就应该够大
#保证数据打的足够乱
# num_threads=16,capacity=50000,min_after_dequeue=10000)
images, label_batch = tf.train.shuffle_batch([distorted_image, label],batch_size=batch_size,
num_threads=2,capacity=2,min_after_dequeue=10)
# 调试显示
#tf.image_summary(\'images\', images)
print "in get batch"
print images,label_batch
return images, tf.reshape(label_batch, [batch_size])
#from data_encoder_decoeder import encode_to_tfrecords,decode_from_tfrecords,get_batch,get_test_batch
import cv2
import os
class network(object):
def inference(self,images):
# 向量转为矩阵
# images = tf.reshape(images, shape=[-1, 39,39, 3])
images = tf.reshape(images, shape=[-1, 227,227, 3])# [batch, in_height, in_width, in_channels]
images=(tf.cast(images,tf.float32)/255.-0.5)*2#归一化处理
#第一层 定义卷积偏置和下采样
conv1=tf.nn.bias_add(tf.nn.conv2d(images, self.weights[\'conv1\'], strides=[1, 4, 4, 1], padding=\'VALID\'),
self.biases[\'conv1\'])
relu1= tf.nn.relu(conv1)
pool1=tf.nn.max_pool(relu1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=\'VALID\')
#第二层
conv2=tf.nn.bias_add(tf.nn.conv2d(pool1, self.weights[\'conv2\'], strides=[1, 1, 1, 1], padding=\'SAME\'),
self.biases[\'conv2\'])
relu2= tf.nn.relu(conv2)
pool2=tf.nn.max_pool(relu2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=\'VALID\')
# 第三层
conv3=tf.nn.bias_add(tf.nn.conv2d(pool2, self.weights[\'conv3\'], strides=[1, 1, 1, 1], padding=\'SAME\'),
self.biases[\'conv3\'])
relu3= tf.nn.relu(conv3)
# pool3=tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\'VALID\')
conv4=tf.nn.bias_add(tf.nn.conv2d(relu3, self.weights[\'conv4\'], strides=[1, 1, 1, 1], padding=\'SAME\'),
self.biases[\'conv4\'])
relu4= tf.nn.relu(conv4)
conv5=tf.nn.bias_add(tf.nn.conv2d(relu4, self.weights[\'conv5\'], strides=[1, 1, 1, 1], padding=\'SAME\'),
self.biases[\'conv5\'])
relu5= tf.nn.relu(conv5)
pool5=tf.nn.max_pool(relu5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=\'VALID\')
# 全连接层1,先把特征图转为向量
flatten = tf.reshape(pool5, [-1, self.weights[\'fc1\'].get_shape().as_list()[0]])
drop1=tf.nn.dropout(flatten,0.5)
fc1=tf.matmul(drop1, self.weights[\'fc1\'])+self.biases[\'fc1\']
fc_relu1=tf.nn.relu(fc1)
fc2=tf.matmul(fc_relu1, self.weights[\'fc2\'])+self.biases[\'fc2\']
fc_relu2=tf.nn.relu(fc2)
fc3=tf.matmul(fc_relu2, self.weights[\'fc3\'])+self.biases[\'fc3\']
return fc3
def __init__(self):
#初始化权值和偏置
with tf.variable_scope("weights"):
self.weights={
#39*39*3->36*36*20->18*18*20
\'conv1\':tf.get_variable(\'conv1\',[11,11,3,96],initializer=tf.contrib.layers.xavier_initializer_conv2d()),
#18*18*20->16*16*40->8*8*40
\'conv2\':tf.get_variable(\'conv2\',[5,5,96,256],initializer=tf.contrib.layers.xavier_initializer_conv2d()),
#8*8*40->6*6*60->3*3*60
\'conv3\':tf.get_variable(\'conv3\',[3,3,256,384],initializer=tf.contrib.layers.xavier_initializer_conv2d()),
#3*3*60->120
\'conv4\':tf.get_variable(\'conv4\',[3,3,384,384],initializer=tf.contrib.layers.xavier_initializer_conv2d()),
\'conv5\':tf.get_variable(\'conv5\',[3,3,384,256],initializer=tf.contrib.layers.xavier_initializer_conv2d()),
\'fc1\':tf.get_variable(\'fc1\',[6*6*256,4096],initializer=tf.contrib.layers.xavier_initializer()),
\'fc2\':tf.get_variable(\'fc2\',[4096,4096],initializer=tf.contrib.layers.xavier_initializer()),
#120->6
\'fc3\':tf.get_variable(\'fc3\',[4096,2],initializer=tf.contrib.layers.xavier_initializer()),
}
with tf.variable_scope("biases"):
self.biases={
\'conv1\':tf.get_variable(\'conv1\',[96,],initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),
\'conv2\':tf.get_variable(\'conv2\',[256,],initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),
\'conv3\':tf.get_variable(\'conv3\',[384,],initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),
\'conv4\':tf.get_variable(\'conv4\',[384,],initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),
\'conv5\':tf.get_variable(\'conv5\',[256,],initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),
\'fc1\':tf.get_variable(\'fc1\',[4096,],initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),
\'fc2\':tf.get_variable(\'fc2\',[4096,],initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),
\'fc3\':tf.get_variable(\'fc3\',[2,],initializer=tf.constant_initializer(value=0.0, dtype=tf.float32))
}
def inference_test(self,images):
# 向量转为矩阵
#这个是用于测试的
images = tf.reshape(images, shape=[-1, 39,39, 3])# [batch, in_height, in_width, in_channels]
images=(tf.cast(images,tf.float32)/255.-0.5)*2#归一化处理
#第一层
conv1=tf.nn.bias_add(tf.nn.conv2d(images, self.weights[\'conv1\'], strides=[1, 1, 1, 1], padding=\'VALID\'),
self.biases[\'conv1\'])
relu1= tf.nn.relu(conv1)
pool1=tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\'VALID\')
#第二层
conv2=tf.nn.bias_add(tf.nn.conv2d(pool1, self.weights[\'conv2\'], strides=[1, 1, 1, 1], padding=\'VALID\'),
self.biases[\'conv2\'])
relu2= tf.nn.relu(conv2)
pool2=tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\'VALID\')
# 第三层
conv3=tf.nn.bias_add(tf.nn.conv2d(pool2, self.weights[\'conv3\'], strides=[1, 1, 1, 1], padding=\'VALID\'),
self.biases[\'conv3\'])
relu3= tf.nn.relu(conv3)
pool3=tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\'VALID\')
# 全连接层1,先把特征图转为向量
flatten = tf.reshape(pool3, [-1, self.weights[\'fc1\'].get_shape().as_list()[0]])
fc1=tf.matmul(flatten, self.weights[\'fc1\'])+self.biases[\'fc1\']
fc_relu1=tf.nn.relu(fc1)
fc2=tf.matmul(fc_relu1, self.weights[\'fc2\'])+self.biases[\'fc2\']
return fc2
#计算softmax交叉熵损失函数
def sorfmax_loss(self,predicts,labels):
predicts=tf.nn.softmax(predicts)
labels=tf.one_hot(labels,self.weights[\'fc3\'].get_shape().as_list()[1])
loss = tf.nn.softmax_cross_entropy_with_logits(predicts, labels)
# loss =-tf.reduce_mean(labels * tf.log(predicts))# tf.nn.softmax_cross_entropy_with_logits(predicts, labels)
self.cost= loss
return self.cost
#梯度下降
def optimer(self,loss,lr=0.01):
train_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss)
return train_optimizer
def train():
batch_image,batch_label=read_and_decode("/home/zenggq/data/imagedata/data.tfrecords")
#网络链接,训练所用
net=network()
inf=net.inference(batch_image)
loss=net.sorfmax_loss(inf,batch_label)
opti=net.optimer(loss)
#验证集所用
init=tf.initialize_all_variables()
with tf.Session() as session:
with tf.device("/gpu:1"):
session.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
max_iter=9000
iter=0
if os.path.exists(os.path.join("model",\'model.ckpt\')) is True:
tf.train.Saver(max_to_keep=None).restore(session, os.path.join("model",\'model.ckpt\'))
while iter<max_iter:
loss_np,_,label_np,image_np,inf_np=session.run([loss,opti,batch_image,batch_label,inf])
if iter%50==0:
print \'trainloss:\',loss_np
iter+=1
coord.request_stop()#queue需要关闭,否则报错
coord.join(threads)
if __name__ == \'__main__\':
#主函数训练
train()