1 # 1、加载数据---特征值与目标值
2 # 2、随机初始化权重与偏置---变量op
3 # 3、预测--->矩阵相乘
4 # 4、预测值与真实值 ---损失--均方误差损失
5 # 5、构建优化算法进行优化损失---设置学习率
6 # 6、不断优化op
7 import tensorflow as tf
8
9
10 class MyLinearRegression(object):
11 def __init__(self):
12 self.learning_rate = 0.1
13 # self.learning_rate = 0.01
14 # self.learning_rate = 0.00000000001 # 学习率不能过小,会造成梯度消失
15 # self.learning_rate = 100 # 学习率过大会造成梯度爆炸
16
17 def build_data(self):
18 """
19 构建特征与目标值
20 :return: x,y
21 """
22 # 给构建数据 套个命名空间
23 with tf.variable_scope("build_data"):
24 # 随机初始化特征值[100,1]
25 x = tf.random_normal(
26 shape=[100, 1],
27 mean=0.0,
28 stddev=1.0,
29 name="x"
30 )
31
32 # x [100,1] * w + b = y[100,1]
33 # w [1,1]
34 # b []
35 # 矩阵与 数的相加----与矩阵的每一个元素相加
36 y = tf.matmul(x, [[0.7]]) + 0.8
37
38 return x, y
39
40 def get_weight(self, shape):
41 """
42 生成权重--变量op
43 :param shape:生成权重的形状
44 :return: weight
45 """
46 with tf.variable_scope("get_weight"):
47 weight = tf.Variable(
48 initial_value=tf.random_normal(
49 shape=shape,
50 mean=0.0,
51 stddev=1.0
52 ),
53 name="w"
54 )
55 return weight
56
57 def get_bias(self, shape):
58 """
59 生成偏置--变量op
60 :param shape:生成偏置的形状
61 :return: bias
62 """
63 with tf.variable_scope("get_bias"):
64 bias = tf.Variable(
65 initial_value=tf.random_normal(
66 shape=shape,
67 mean=0.0,
68 stddev=1.0
69 ),
70 name="b"
71 )
72 return bias
73
74 def linear_model(self, x):
75 """
76 构建线性关系
77 :param x: 特征值
78 :return: 预测值
79 """
80 with tf.variable_scope("linear_model"):
81 # x [100,1] * w[1,1] + b[] = y_predict[100,1]
82 # w [1,1]
83 # y[100,1]
84 # b []
85 # (1)初始化权重
86 self.weight = self.get_weight(shape=[1, 1])
87 # (2)初始化偏置
88 self.bias = self.get_bias(shape=[])
89 # (3)求取预测值
90 y_predict = tf.matmul(x, self.weight) + self.bias
91
92 return y_predict
93
94 def losses(self, y_true, y_predict):
95 """
96 计算均方误差损失
97 :param y_true: 真实值[100,1]
98 :param y_predict: 预测值[100,1]
99 :return: 均方误差损失
100 """
101 with tf.variable_scope("losses"):
102 loss = tf.reduce_mean(tf.square(y_true - y_predict))
103 return loss
104
105 def sgd(self, loss):
106 """
107 使用sgd进行优化损失
108 :param loss: 均方误差损失
109 :return: 优化op
110 """
111 with tf.variable_scope("sgd"):
112 # GradientDescentOptimizer sgd随机梯度下降优化算法
113 # 返回sgd算法对象
114 sgd = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
115 # 优化均方误差损失-- 损失减小的方向
116 train_op = sgd.minimize(loss)
117
118 return train_op
119
120 def train(self):
121 """
122 进行训练
123 :return:None
124 """
125 with tf.variable_scope("train"):
126 # 1、加载数据
127 x, y = self.build_data()
128
129 # 2、构建线性模型
130 y_predict = self.linear_model(x)
131
132 # 3、计算均方误差损失
133 loss = self.losses(y, y_predict)
134
135 # 4、指定sgd优化算法来优化loss
136 train_op = self.sgd(loss)
137
138 # 开启会话--执行run--train_op
139 with tf.Session() as ss:
140 # 显式的初始化w,b
141 ss.run(tf.global_variables_initializer())
142
143 # 序列化events
144 tf.summary.FileWriter("./tmp/",graph=ss.graph)
145
146 # 不断的run --train_op--循环
147 for i in range(500):
148 ss.run(train_op)
149
150 print("第%d次的损失为%f,权重为%f,偏置为%f" % (
151 i + 1,
152 loss.eval(),
153 self.weight.eval(),
154 self.bias.eval()
155 ))
156
157
158 # 1、实例化对象
159 lr = MyLinearRegression()
160 # 2、调用方法
161 lr.train()
发表于 2019-12-29 20:19 一棵树0108 阅读(211) 评论(0) 编辑收藏举报