-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfeedforward.py
More file actions
114 lines (89 loc) · 2.85 KB
/
feedforward.py
File metadata and controls
114 lines (89 loc) · 2.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# -*- coding: utf-8 -*-
"""FeedForward.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/10P4oWoynyE5XxnBNy2kdM4rdO-m-PP5F
"""
# MNIST
# DataLoader, Transformation
# Multilayer Neural Net, activation function
# Loss and Optimizer
# Training Loop (batch training)
# Model evaluation
# GPU support
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# device config
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# hyper parameters
input_size = 784 # 28x28
hidden_size = 100
num_classes = 10
num_epochs = 2
batch_size = 100
learning_rate = 0.001
# MNIST
train_datasets = torchvision.datasets.MNIST(root='./data', train=True,
transform=transforms.ToTensor(), download=True)
test_datasets = torchvision.datasets.MNIST(root='./data', train=False,
transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_datasets, batch_size=
batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_datasets, batch_size=
batch_size, shuffle=False)
examples = iter(train_loader)
samples, labels = next(examples)
print(samples.shape, labels.shape)
for i in range(6):
plt.subplot(2, 3, i+1)
plt.imshow(samples[i][0], cmap='gray')
plt.show()
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.l2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
return out
model = NeuralNet(input_size, hidden_size, num_classes)
# loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)
# training loop
n_total_steps = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# 100, 1, 28, 28
# 100, 784
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
# forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print(f'epoch {epoch+1}/{num_epochs}, step = {i+1}/{n_total_steps}, loss = {loss.item():.4f}')
# test
with torch.no_grad():
n_correct = 0
n_samples = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
outputs = model(images)
# value, index
_, predictions = torch.max(outputs, 1)
n_samples += labels.shape[0]
n_correct = (predictions == labels).sum().item()
acc = 100.0 * n_correct / n_samples
print(f'accuracy = {acc}')