从原始图片数据开始构建卷积神经网络(Pytorch)

时间:2022-06-26
本文章向大家介绍从原始图片数据开始构建卷积神经网络(Pytorch),主要内容包括其使用实例、应用技巧、基本知识点总结和需要注意事项,具有一定的参考价值,需要的朋友可以参考一下。
说在前面入门机器学习的时候,我们往往使用的是框架自带的数据集来进行学习的,这样其实跳过了机器学习最重要的步骤,数据预处理,本文通过从原始数据(图片格式)到卷积神经网络的设计,逐步实现 MNIST 的分类本文使用的是 Facebook 的深度学习框架 PytorchMNIST 数据集是机器学习界的 HelloWorld ,主要是手写字符(0-9)数据下载:后台回复 MNIST 获取下载链接# 导入所需要的包
import torch # 1.1.0 版本
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset
import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import shutil
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
原始数据处理10 个文件夹下面各有 10000 张图片,我们对原始数据集进行分配原始数据集将其分成训练集、测试集、验证集,各自按照类别文件夹放置。智能一点,让程序自己根据设定的比例拆分 1def path_init(src_path, dst_path, rate=(0.6, 0.2, 0.2)):
 2    """
 3    将原始数据按比较分配成 train validation test
 4    :param src_path: 原始数据路径,要求格式如下
 5    - src_path
 6        - class_1
 7        - class_2
 8        ...
 9    :param dst_path: 目标路径
10    :param rate: 分配比例,加起来一定要等于 1
11    :return:
12    """
13    # 以下几行是创建如下格式的文件夹
14    """
15    - img_data
16        - train
17            - class_1
18            - class_2
19            ...
20        - validation
21            - class_1
22            - class_2
23            ...
24        - test
25            - class_1
26            - class_2
27            ...
28    """
29    try:
30        class_names = os.listdir(src_path)  # 获取原始数据所有类别的纯文件名
31        dst_path = dst_path + '/' + 'MNIST100000_init'
32        os.mkdir(dst_path)  # 创建目标文件夹
33        three_paths = [dst_path + '/' +
34                       i for i in ['train', 'validation', 'test']]  # 三个文件夹的路径
35        for three_path in three_paths:
36            os.mkdir(three_path)
37            for class_name in class_names:
38                os.mkdir(three_path+'/'+class_name)
39        # -----------------------------
40
41        dst_train = dst_path + '/' + 'train'
42        dst_validation = dst_path + '/' + 'validation'
43        dst_test = dst_path + '/' + 'test'
44
45        class_names_list = [src_path + '/' +
46                            class_name for class_name in class_names]  # 获取原始数据所有类别的路径
47
48        for class_li in class_names_list:
49            imgs = os.listdir(class_li)  # 当前类别所有图片的文件名,不包括路径
50            # 得到当前类别的所有图片的路径,指定后缀
51            imgs_list = [class_li + '/' +
52                         img for img in imgs if img.endswith("png")]
53            print(len(imgs_list))
54            img_num = len(imgs_list)  # 当前类别的图片数量
55            # 三个文件夹的数量
56            train_num = int(rate[0]*img_num)
57            validation_num = int(rate[1]*img_num)
58            # test_num = int(rate[2]*img_num)
59
60            for img in imgs_list[0:train_num]:
61                # 训练集复制
62                src = img
63                dst = dst_train + '/' + 
64                    img.split('/')[-2] + '/' + img.split('/')[-1]
65                # print(src, " ", dst)
66                shutil.copy(src=img, dst=dst)
67            print("训练集数量:", len(imgs_list[0:train_num]))
68
69            for img in imgs_list[train_num:train_num+validation_num]:
70                # 验证集复制
71                src = img
72                dst = dst_validation + '/' + 
73                    img.split('/')[-2] + '/' + img.split('/')[-1]
74                # print(src, " ", dst)
75                shutil.copy(src=img, dst=dst)
76            print("验证集数量:", len(imgs_list[train_num:train_num+validation_num]))
77
78            for img in imgs_list[train_num + validation_num:]:
79                # 测试集复制
80                src = img
81                dst = dst_test + '/' + 
82                    img.split('/')[-2] + '/' + img.split('/')[-1]
83                # print(src, " ", dst)
84                shutil.copy(src=img, dst=dst)
85            print("测试集数量:", len(imgs_list[train_num + validation_num:]))
86
87    except:
88        print("目标文件夹已经存在或原始文件夹不存在,请检查!")
89
90
91# # 例程
92src_path = './data/MNIST100000/'
93dst_path = './data/'    
94path_init(src_path, dst_path, rate=(0.6, 0.2, 0.2))
根据原始数据创建数据集自己的类制作自己的数据集类,需要继承 torch.utils.data.dataset.Dataset 并重写 __getitem__ 和 __len__ 方法可以参考框架中 MNIST 数据集类的写法:https://pytorch.org/docs/stable/_modules/torchvision/datasets/mnist.html#MNIST 1# 创建一个数据集类:继承 Dataset
 2class My_DataSet(Dataset):
 3    def __init__(self, img_dir, transform=None):
 4        super(My_DataSet, self).__init__()
 5        self.img_dir = img_dir
 6        class_dir = [self.img_dir + '/' + i for i in os.listdir(self.img_dir)] # 10 个数字的路径
 7        img_list = []
 8        for num in range(len(class_dir)):
 9            img_list += [class_dir[num]+'/'+img_name for img_name in os.listdir(class_dir[num]) if img_name.endswith("png")]
10        self.img_list = img_list # 得到所有图片的路径
11        self.transform = transform
12
13    def __getitem__(self, index):
14        label = self.img_list[index].split("/")[-2]
15        img = np.array(Image.open(self.img_list[index]))
16
17        if self.transform is not None:
18            img = self.transform(img)
19        return img, int(label) # 得到的是字符串,故要进行类型转换
20
21    def __len__(self):
22        return len(self.img_list)
23
24transform = transforms.Compose(
25    [transforms.ToTensor(),
26     transforms.Normalize((0.1307, ), (0.3081, ))]) # 这个归一化值是数据提供方给的
简单可视化1# 这里可视化就没有进行 transform 操作
2for data, label in DataLoader(My_DataSet("./data/MNIST100000_init/train/"), batch_size=32, shuffle=True):
3    break
4
5fig, ax = plt.subplots(4, 8, figsize=(6, 6))
6for i, axi in enumerate(ax.flat):
7    axi.imshow(data[i], cmap='binary')
8    axi.set(xticks=[], yticks=[])
9    axi.set_title(str(label[i].item())) 
可视化结果构建训练、测试数据加载器1BATCH_SIZE = 512  # 大概需要2G的显存
2EPOCHS = 20  # 总共训练批次
3DEVICE = torch.device("cuda" if torch.cuda.is_available()
4                      else "cpu")  # 让torch判断是否使用GPU,建议使用GPU环境,因为会快很多
1train_dataSet = My_DataSet("./data/MNIST100000_init/train/", transform=transform)
2train_dataSet_loader = DataLoader(train_dataSet, batch_size=BATCH_SIZE, shuffle=True)
3test_dataSet = My_DataSet("./data/MNIST100000_init/test/", transform=transform)
4test_dataSet_loader = DataLoader(test_dataSet, batch_size=BATCH_SIZE, shuffle=True)
构建卷积神经网络 1class ConvNet(nn.Module):
 2    def __init__(self):
 3        super().__init__()
 4        # 1,28x28
 5        self.conv1 = nn.Conv2d(1, 10, 5)  # 10, 24x24  # 图片输入只有一个通道,10 个卷积,kernel_size=5
 6        self.conv2 = nn.Conv2d(10, 20, 3)  # 20, 10x10 # 20 个卷积 ,kernel_size=3
 7        self.fc1 = nn.Linear(20*10*10, 500) # 20 个特征图,每个 10*10
 8        self.fc2 = nn.Linear(500, 10)
 9
10    def forward(self, x):
11        in_size = x.size(0)
12        out = self.conv1(x)  # 24
13        out = F.relu(out)
14        out = F.max_pool2d(out, 2, 2)  # 12
15        out = self.conv2(out)  # 10
16        out = F.relu(out)
17        out = out.view(in_size, -1)
18        out = self.fc1(out)
19        out = F.relu(out)
20        out = self.fc2(out)
21        out = F.log_softmax(out, dim=1)
22        return out
23
24model = ConvNet().to(DEVICE)
25optimizer = optim.Adam(model.parameters())
 1# 将训练的过程封装成函数
 2def train(model, device, train_loader, optimizer, epoch):
 3    model.train() # 训练模式
 4    for batch_idx, (data, target) in enumerate(train_loader):
 5        data, target = data.to(device), target.to(device)
 6        optimizer.zero_grad()  # 清除前一步的梯度
 7        output = model(data)
 8        loss = F.nll_loss(output, target)
 9        loss.backward()
10        optimizer.step()
11        if (batch_idx + 1) % 30 == 0:
12            print(('Train Epoch: {} [{}/{} ({:.0f}%)]tLoss: {:.6f}'.format(
13                epoch, batch_idx * len(data), len(train_loader.dataset),
14                100. * batch_idx / len(train_loader), loss.item())))
15
16# 将测试的过程封装成函数
17def test(model, device, test_loader):
18    model.eval() # 评估模式
19    test_loss = 0
20    correct = 0
21    with torch.no_grad():
22        for data, target in test_loader:
23            data, target = data.to(device), target.to(device)
24            output = model(data)
25            test_loss += F.nll_loss(output, target, reduction='sum').item() # 将一批的损失相加
26            pred = output.max(1, keepdim=True)[1] # 找到概率最大的下标
27            correct += pred.eq(target.view_as(pred)).sum().item()
28
29    test_loss /= len(test_loader.dataset)
30    print('nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)n'.format(
31        test_loss, correct, len(test_loader.dataset),
32        100. * correct / len(test_loader.dataset)))
1# 开始训练
2for epoch in range(1, EPOCHS + 1):
3    train(model, DEVICE, train_dataSet_loader, optimizer, epoch)
4    test(model, DEVICE, test_dataSet_loader)
 1# 部分训练结果
 2Train Epoch: 1 [14848/60000 (25%)]    Loss: 0.328185
 3Train Epoch: 1 [30208/60000 (50%)]    Loss: 0.207250
 4Train Epoch: 1 [45568/60000 (75%)]    Loss: 0.140810
 5Test set: Average loss: 0.1036, Accuracy: 19401/20000 (97%)
 6...
 7Train Epoch: 20 [14848/60000 (25%)]    Loss: 0.000147
 8Train Epoch: 20 [30208/60000 (50%)]    Loss: 0.000108
 9Train Epoch: 20 [45568/60000 (75%)]    Loss: 0.000108
10
11Test set: Average loss: 0.0224, Accuracy: 19922/20000 (100%)
训练的精度已经到 1 了!说明效果相当不错,记得保存一下模型,方便下次使用1# 保存整个模型
2torch.save(model, './data/model.pkl')
3
4# # 仅保存和加载模型参数(推荐使用)
5torch.save(model.state_dict(), './data/model_only_weighrt.pkl')
预测 1def predict(model, data, label):
 2    model.eval()
 3    with torch.no_grad():
 4        data, label = data.to(DEVICE), label.to(DEVICE)
 5        output = model(data)
 6        pred = output.max(-1)[1]
 7        print(pred.eq(label.view_as(pred)).sum().item()/BATCH_SIZE)
 8
 9
10val_dataSet = My_DataSet("./data/MNIST100000_init/validation/", transform=transform)
11val_dataSet_loader = DataLoader(val_dataSet, batch_size=BATCH_SIZE, shuffle=True)
12for data, label in val_dataSet_loader:
13    break
1model_load = torch.load('./data/model.pkl')
2predict(model_load, data, label)
3# 预测结果
4# 0.994140625
总结本文实现从原始数据(图片)到卷积神经网络的设计,一步一步的实现 MNIST 的分类器的训练,练习了如何制作自己的数据集类,当面对一个新的问题,就知道懂得建立自己的数据集类,而不是无从下手!