本帖最后由 15732670364 于 2021-8-30 13:40 编辑
①利用pytorch进行深度学习 - 准备数据集(Prepare dataset)
- 设计用于计算最终结果的模型(Design model)
- 构造损失函数及优化器(Construct loss and optimizer)
- 设计循环周期(Training cycle)——前馈、反馈、更新
- import torch
- from torchvision import transforms
- from torchvision import datasets
- from torch.utils.data import DataLoader
- import torch.nn.functional as F
- import torch.optim as optim
- import matplotlib.pyplot as plt
-
- # prepare dataset
-
- batch_size = 64
- transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
-
- train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
- train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
- test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
- test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
-
-
- # design model using class
-
-
- class Net(torch.nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
- self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
- self.pooling = torch.nn.MaxPool2d(2)
- self.fc = torch.nn.Linear(320, 10)
-
- def forward(self, x):
- # flatten data from (n,1,28,28) to (n, 784)
-
- batch_size = x.size(0)
- x = F.relu(self.pooling(self.conv1(x)))
- x = F.relu(self.pooling(self.conv2(x)))
- x = x.view(batch_size, -1) # -1 此处自动算出的是320
- # print("x.shape",x.shape)
- x = self.fc(x)
-
- return x
-
-
- model = Net()
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- model.to(device)
-
- # construct loss and optimizer
- criterion = torch.nn.CrossEntropyLoss()
- optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
-
-
- # training cycle forward, backward, update
-
-
- def train(epoch):
- running_loss = 0.0
- for batch_idx, data in enumerate(train_loader, 0):
- inputs, target = data
- inputs, target = inputs.to(device), target.to(device)
- optimizer.zero_grad()
-
- outputs = model(inputs)
- loss = criterion(outputs, target)
- loss.backward()
- optimizer.step()
-
- running_loss += loss.item()
- if batch_idx % 300 == 299:
- print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
- running_loss = 0.0
-
-
- def test():
- correct = 0
- total = 0
- with torch.no_grad():
- for data in test_loader:
- images, labels = data
- images, labels = images.to(device), labels.to(device)
- outputs = model(images)
- _, predicted = torch.max(outputs.data, dim=1)
- total += labels.size(0)
- correct += (predicted == labels).sum().item()
- print('accuracy on test set: %d %% ' % (100 * correct / total))
- return correct / total
-
-
- if __name__ == '__main__':
- epoch_list = []
- acc_list = []
-
- for epoch in range(10):
- train(epoch)
- acc = test()
- epoch_list.append(epoch)
- acc_list.append(acc)
-
- plt.plot(epoch_list, acc_list)
- plt.ylabel('accuracy')
- plt.xlabel('epoch')
- plt.show()
-
复制代码
|