NEURAL NETWORKS神经网络,pytorch官网60分钟闪电战第三节

import torch
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as optim

文章目录

一、Define the network定义网络

class Net(nn.Module):

    def __init__(self):
        super(Net, self).__init__()
        # 1 input image channel, 6 output channels, 3x3 square convolution
        # kernel
        self.conv1 = nn.Conv2d(1, 6, 3)
        self.conv2 = nn.Conv2d(6, 16, 3)
        # an affine operation: y = Wx + b
        self.fc1 = nn.Linear(16 * 6 * 6, 120)  # 6*6 from image dimension图像尺寸
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = f.max_pool2d(f.relu(self.conv1(x)), (2, 2))
        # If the size is a square you can only specify a single number
        # 如果大小为正方形,则只能指定单个数字
        x = f.max_pool2d(f.relu(self.conv2(x)), 2)
        x = x.view(-1, self.num_flat_features(x))
        x = f.relu(self.fc1(x))
        x = f.relu(self.fc2(x))
        x = self.fc3(x)
        return x

    def num_flat_features(self, x):
        # all dimensions except the batch dimension除批次维度外的所有维度
        size = x.size()[1:]
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

net = Net()
print(net)

只需要定义forward函数,backward就可以使用自动定义函数(计算梯度)autograd,可以在forward函数中使用任何Tensor操作。

模型的可学习参数由net.parameters()返回

params = list(net.parameters())
print(params)
print(len(params))
print(params[0].size())  # conv1\'s .weight

# randn[1, 1, 32, 32]表示:批次大小batch_size=1, 1通道(灰度图像),图片尺寸:32x32
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)

# 用随机梯度将所有参数和反向传播器的梯度缓冲区归零
net.zero_grad()
out.backward(torch.randn(1, 10))

二、Loss Function损失函数

output = net(input)
target = torch.randn(10)  # a dummy target, for example
target = target.view(1, -1)  # make it the same shape as output
criterion = nn.MSELoss()  # 一个简单的损失是:nn.MSELoss计算输入和目标之间的均方误差

loss = criterion(output, target)
print(loss)

print(loss.grad_fn)  # MSELoss
print(loss.grad_fn.next_functions[0][0])  # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0])  # ReLU

上面这个顺序不是很能理解,可以参考官网再研究一下

三、Backprop 反向传播

net.zero_grad()     # zeroes the gradient buffers of all parameters

print(\'conv1.bias.grad before backward\')
print(net.conv1.bias.grad)

loss.backward()

print(\'conv1.bias.grad after backward\')
print(net.conv1.bias.grad)

四、Update the weights 更新权重

learning_rate = 0.01
for f in net.parameters():
    f.data.sub_(f.grad.data * learning_rate)

但是,在使用神经网络时,需要使用各种不同的更新规则,例如SGD,Nesterov-SGD,Adam,RMSProp等。为实现此目的,pytorch构建了一个小程序包:torch.optim实现所有这些方法。使用它非常简单:

# create your optimizer创建优化器
optimizer = optim.SGD(net.parameters(), lr=0.01)

# in your training loop:训练回路
optimizer.zero_grad()   # zero the gradient buffers将梯度缓冲区设置为零
output = net(input)
loss = criterion(output, target) # 如果从头开始写,这里的criterion是nn.MSELoss(),在第二部分损失函数给出
loss.backward()
optimizer.step()    # Does the update