Python 深度学习6:PyTorch 卷积神经网络

870阅读 0评论2021-08-16 专注的阿熊
分类:Python/Ruby

import torch

import torch.nn as nn

import torch.optim as optim

from torchvision import datasets, transforms

from torch.utils.data import DataLoader

# 下载训练集

train_dataset = datasets.MNIST(root='./',

                               train=True,

                               transform=transforms.ToTensor(),

                               download=True)

# 下载测试集

test_dataset = datasets.MNIST(root='./',

                              train=False,

                              transform=transforms.ToTensor(),

                              download=True)

# 批次大小

batch_size = 64

# 装载训练集

train_loader = DataLoader(dataset=train_dataset,

                          batch_size=batch_size,

                          shuffle=True)

# 装载测试集

test_loader = DataLoader(dataset=test_dataset,

                         batch_size=batch_size,

                         shuffle=True)

for i, data in enumerate(train_loader):

    # 获得数据和对应的标签

    inputs, labels = data

    print(inputs.shape)

    print(labels.shape)

    break

# 定义网络结构

class Net(nn.Module):

    def __init__(self):

        super(Net, self).__init__()

        # 卷积层1

        # Conv2d 参数1:输入通道数,黑白图片为1,彩色为3 参数2:输出通道数,生成32个特征图 参数3:5*5卷积窗口 参数4:步长1 参数5padding203*3卷积窗口填充10,5*5填充20

        # 使用ReLU激活函数 池化窗口大小2*2,步长2

        self.conv1 = nn.Sequential(nn.Conv2d(1, 32, 5, 1, 2), nn.ReLU(), nn.MaxPool2d(2, 2))

        # 卷积层2 输入32个特征图 输出64个特征图

        self.conv2 = nn.Sequential(nn.Conv2d(32, 64, 5, 1, 2), nn.ReLU(), nn.MaxPool2d(2, 2))

        # 全连接层1 输入64*7*7(原先为28,每次池化/2),1000

        self.fc1 = nn.Sequential(nn.Linear(64 * 7 * 7, 1000), nn.Dropout(p=0.4), nn.ReLU())

        # 全连接层2 输出10个分类,并转化为概率

        self.fc2 = nn.Sequential(nn.Linear(1000, 10), nn.Softmax(dim=1))

    def forward(self, x):

        # 卷积层使用4维的数据

        # 批次数量64 黑白1 图片大小28*28

        # ([64, 1, 28, 28])

        x = self.conv1(x)

        x = self.conv2(x)

        # 全连接层对2维数据进行计算

        x = x.view(x.size()[0], -1)

        x = self.fc1(x)

        x = self.fc2(x)

        return x

LR = 0.0003

# 定义模型

model = Net()

# 定义代价函数

entropy_loss = nn.CrossEntropyLoss()

# 定义优化器

optimizer = optim.Adam(model.parameters(), LR)

def train():

    model.train()

    for i, data in enumerate(train_loader):

        # 获得数据和对应的标签

        inputs, labels = data

        # 获得模型预测结果,(6410

        out = model(inputs)

        # 交叉熵代价函数out(batch,C),labels(batch)

        loss = entropy_loss(out, labels)

        # 梯度清0

        optimizer.zero_grad()

        # 计算梯度

        loss.backward()

        # 修改权值

        optimizer.step()

def test():

    model.eval()

    correct = 0

    for i, data in enumerate(test_loader):

        # 获得数据和对应的标签

        inputs, labels = data

        # 获得模型预测结果

        out = model(inputs)

        # 获得最大值,以及最大值所在的位置

        _, predicted = torch.max(out, 1)

        # 预测正确的数量

        correct += (predicted == labels).sum()

    print("Test acc: {0}".format(correct.item() / len(test_dataset)))

    correct = 0

    for i, data in enumerate(train_loader):

        # 获得数据和对应的标签

        inputs, labels = data

        # 获得模型预测结果

        out = model(inputs)

        # 获得最大值,以及最大值所在的位置

        _, predicted = torch.max(out, 1)

        # 预测正确的数量

        correct += (predicted == labels).sum()

    print("Train acc: {0}".format(correct.item() / len(train_dataset)))

for epoch in range(0, 10):

    print('epoch:', epoch)

    train()

    test()

上一篇:python爬取百度地图搜索结果
下一篇:python-实现tcp上传下载文件