Classification Neural Network Model #191
-
Hello, import sklearn
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_circles
import torch
from torch import nn
# num samples
N = 1000
X, y = make_circles(N, noise=0.03, random_state=42)
X = torch.from_numpy(X).type(torch.float)
y = torch.from_numpy(y).type(torch.float)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=42)
device = "cuda" if torch.cuda.is_available() else "cpu"
class CircleModelV2(nn.Module):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear(in_features=2, out_features=10)
self.layer_2 = nn.Linear(in_features=10, out_features=10)
self.layer_3 = nn.Linear(in_features=10, out_features=1)
self.relu = nn.ReLU() # relu is a non-linear activation function
def forward(self, x):
# Where should we put our non-linear activation functions?
return self.layer_3(self.relu(self.layer_2(self.relu(self.layer_1(x)))))
model_3 = CircleModelV2().to(device)
# Setup loss and optimizer
loss_fn = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model_3.parameters(),
lr=0.1)
# Random seeds
torch.manual_seed(42)
torch.cuda.manual_seed(42)
# Put all data on target device
X_train, y_train = X_train.to(device), y_train.to(device)
X_test, y_test = X_test.to(device), y_test.to(device)
# Loop through data
epochs = 1000
for epoch in range(epochs):
### Training
model_3.train()
# 1. Forward pass
y_logits = model_3(X_train).squeeze()
y_pred = torch.round(torch.sigmoid(y_logits)) # logits -> prediction probabilities -> prediction labels
# 2. Calculate the loss
loss = loss_fn(y_logits, y_train) # BCEWithLogitsLoss (takes in logits as first input)
acc = accuracy_fn(y_true=y_train,
y_pred=y_pred)
# 3. Optimizer zero grad
optimizer.zero_grad()
# 4. Loss backward
loss.backward()
# 5. Step the optimizer
optimizer.step()
### Testing
model_3.eval()
with torch.inference_mode():
test_logits = model_3(X_test).squeeze()
test_pred = torch.round(torch.sigmoid(test_logits))
test_loss = loss_fn(test_logits, y_test)
test_acc = accuracy_fn(y_true=y_test,
y_pred=test_pred)
# Print out what's this happenin'
if epoch % 100 == 0:
print(f"Epoch: {epoch} | Loss: {loss:.4f}, Acc: {acc:.2f}% | Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.2f}%") Here's my accuracy_fn function: def accuracy_fn(y_true, y_pred):
correct = torch.eq(y_true, y_pred).sum().item()
acc = (correct / len(y_pred)) * 100
return acc When I run this code, I get the following results:
My Test Plan: I would run the code above and get the results listed above. What I've done next is copy the same code for CircleModelV2 from 02_pytorch_classification_video.ipynb into my notebook and I would get the same results as shown in the classification_video notebook. What I do next as a test is disconnect the runtime, then reconnect the runtime (simulating if I were to close the notebook, then reopening it). Once I do this and re-run the same code, I get the results I listed above. My question is why is the results changing that drastically when it's the same code, unless I have something incorrect. |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment
-
Hi @musicfanEE, I just copied your code into a sample notebook and get the same results as the notebook here: https://github.com/mrdbourke/pytorch-deep-learning/blob/main/02_pytorch_classification.ipynb Have you tried making sure the random seeds are all the same across runs? See my demo notebook here: https://colab.research.google.com/drive/1nwk9Bnpk2u3p2lI2fArAR2bkPPC_Llt9?usp=sharing |
Beta Was this translation helpful? Give feedback.
Hi @musicfanEE,
I just copied your code into a sample notebook and get the same results as the notebook here: https://github.com/mrdbourke/pytorch-deep-learning/blob/main/02_pytorch_classification.ipynb
Have you tried making sure the random seeds are all the same across runs?
See my demo notebook here: https://colab.research.google.com/drive/1nwk9Bnpk2u3p2lI2fArAR2bkPPC_Llt9?usp=sharing