PyTorch入门:基于LeNet5和CIFAR10的图片分类

PyTorch入门:使用PyTorch搭建神经网络LeNet5一文中,我们已经使用PyTorch实现了一个简单的神经网络LeNet5,本文将基于PyTorch使用LeNet5和CIFAR10实现图片分类模型的定义、训练和测试的全过程,代码(有详细注释)如下:

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np


## 构建神经网络模型:将LeNet5模型的输入改为3个通道
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
        self.fc1 = nn.Linear(in_features=16*5*5, out_features=120)
        self.fc2 = nn.Linear(in_features=120, out_features=84)
        self.fc3 = nn.Linear(in_features=84, out_features=10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16*5*5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


## normalize:torchvision中数据集是元素值在[0,1]范围的PIL图片(C,H,W),需将其数值范围转换为[-1,1]
normalization = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])


## 加载CIFAR10数据集
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, transform=normalization, download=True)
train_set_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=4, shuffle=True, num_workers=0) # Windows系统中建议把num_workers设为0
test_set = torchvision.datasets.CIFAR10(root='./data', train=False, transform=normalization, download=True)
test_set_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=4, shuffle=False, num_workers=0) # Windows系统中建议把num_workers设为0


# ## 显示CIFAR10数据集中的一些图片
# def imshow(img):
#     # print(img.size())
#     img = img / 2 + 0.5 # unnormalize: [-1,1] => [0,1]
#     img = img.numpy()
#     plt.imshow(np.transpose(img, (1, 2, 0))) # PIL的(C,H,W) => matplotlib的(H,W,C)
#     plt.show()

# classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# data_iter = iter(train_set_loader)
# images, labels = data_iter.next() # images, labels都是tensor
# # print(images.size())
# # print(labels.size())
# imshow(torchvision.utils.make_grid(images))
# print(' '.join('%s' % classes[labels[j]] for j in range(len(labels))))


## 定义神经网络、损失函数和优化器
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(params=net.parameters(), lr=0.001, momentum=0.9) # SGD with momentum


## 训练神经网络
print('Training Started')
for epoch in range(5): # 1个epoch会将所有数据训练一次
    running_loss = 0.0 # 用来在控制台输出loss,以观察训练情况
    for i, data in enumerate(iterable=train_set_loader, start=0):
        # 获取数据
        inputs, labels = data
        # 清空梯度缓存
        optimizer.zero_grad()
        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        # 输出每2000个mini-batch的平均loss
        running_loss += loss.item()
        if i % 2000 == 1999: # print every 2000 mini-batches
            print('[%3d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
            running_loss = 0.0
print('Finished Training')


## 保存模型参数
torch.save(net.state_dict(), './data/LeNet5.pt')


## 测试模型
print('Testing Started')
net_new = Net()
net_new.load_state_dict(torch.load('./data/LeNet5.pt'))
correct = 0
total = 0
with torch.no_grad():
    for data in test_set_loader:
        images, labels = data
        _, predictions = torch.max(net_new(images), 1)
        total += labels.size(0)
        correct += (predictions==labels).sum().item()
print('Accuracy: %d/%d = %.2f%%' % (correct, total, correct/total*100) )

"""Explore:
使用GPU后会发现速度并没有增加很多,原因是LeNet这个模型非常小。
如果将模型宽度增大(增加2个卷积层的卷积核数量),GPU对模型的加速效果会是怎么样的呢?
"""

Github(github.com):@chouxianyu

Github Pages(github.io):@臭咸鱼

知乎(zhihu.com):@臭咸鱼

博客园(cnblogs.com):@臭咸鱼

B站(bilibili.com):@绝版臭咸鱼

微信公众号:@臭咸鱼

转载请注明出处,欢迎讨论和交流!