import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
dtype = torch.float32
device = torch.device("cpu")
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, transform, data, label):
super(MnistDataset, self).__init__()
self.transform = transform
self.images = data
self.labels = label
def __getitem__(self, idx):
img = self.images[idx]
img = self.transform(img)
label = self.labels[idx]
return img, label
def __len__(self):
return len(self.images)
transforms = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]
)
train_data = np.load("train_img.npy")
train_label = np.load("train_label.npy")
test_data = np.load("test_img.npy")
test_label = np.load("test_label.npy")
trainset = MnistDataset(transform=transforms, data=train_data, label=train_label)
#dataset,readin number each time, random, parallel process
#return a iterable
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=4, shuffle=True, num_workers=0
)
testset = MnistDataset(transform=transforms, data=test_data, label=test_label)
testloader = torch.utils.data.DataLoader(
testset, batch_size=4, shuffle=False, num_workers=0
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 4, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(4, 10, 5)
#fully connected
self.fc1 = nn.Linear(10 * 4 * 4, 100)
self.fc2 = nn.Linear(100, 70)
self.fc3 = nn.Linear(70, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 10 * 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=1e-4, momentum=0.9)
running_loss = 0.0
for epoch in range(10):
for i, data in enumerate(trainloader, 0):
inputs, labels = data
optimizer.zero_grad()
inputs = torch.tensor(inputs, dtype=dtype)
labels = torch.tensor(labels, dtype=torch.long)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
torch.save(net.state_dict(), "./cifar_net.pth")
net = Net()
net.load_state_dict(torch.load("./cifar_net.pth"))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
with torch.no_grad():
for data in testloader:
images, labels = data
images = torch.tensor(images, dtype=dtype)
labels = torch.tensor(labels, dtype=torch.long)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print("Accuracy of %5s : %f %%" % (
classes[i], 100 * class_correct[i] / class_total[i]
))