-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathMNIST_model.py
89 lines (71 loc) · 2.64 KB
/
MNIST_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from che3le.nn.module import Module
from che3le.nn.linear import Linear
from che3le.nn.optimizer import SGD
from che3le.nn.loss import CrossEntropyLoss
from che3le.nn.activation import ReLU, Softmax
from che3le.extensions.dataset import MNIST
from che3le.extensions.dataloader import DataLoader
from che3le.extensions.transforms import Compose, ToTensor, Standardize
import numpy as np
import matplotlib.pyplot as plt
# -- using our implemented dataset module
transformation=Compose([ToTensor(), Standardize()])
train_data = MNIST(root='data/', train=True, download=True,transform=transformation)
test_data = MNIST(root='data/', train=False, download=True,transform=transformation)
# -- using our implemented dataloader module
train_loader = DataLoader(dataset=train_data, batch_size=32, shuffle=True)
test_loader = DataLoader(dataset=test_data, batch_size=32, shuffle=True)
# -- model definition
class Model(Module):
def __init__(self):
super().__init__()
self.linear1 = Linear(28*28,20)
self.relu=ReLU()
self.linear2 = Linear(20, 10)
self.softmax=Softmax()
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
x = self.linear2(x)
return self.softmax(x)
model = Model()
optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9)
loss_fn = CrossEntropyLoss()
# -- training
# List to store accuracies from each run
accuracies = []
# Run the experiment 10 times
for run in range(10):
print(f"Run {run + 1} / 10:")
# -- training loop
for epoch in range(1): # You can adjust the number of epochs as needed
for batch_no, (x, y) in enumerate(train_loader):
# Flatten the batch (32, 1, 28, 28) to (784, 32)
x = x.flatten_batch() # (784, 32)
optimizer.zero_grad()
y_hat = model(x)
loss = loss_fn(y, y_hat)
loss.backward()
optimizer.step()
# -- testing
correct = 0
total = 0
for batch_no, (x, y) in enumerate(test_loader):
x = x.flatten_batch()
y_hat = model(x)
predictions = np.argmax(y_hat, axis=0)
correct += np.sum(predictions == y)
total += y.data.size
accuracy = correct / total * 100
accuracies.append(accuracy)
print(f'Accuracy for run {run + 1}: {accuracy:.2f}%')
print('------------------')
average_accuracy = sum(accuracies) / len(accuracies)
print(f'Average Accuracy over 10 runs: {average_accuracy:.2f}%')
plt.plot(accuracies, marker='o', linestyle='-', color='b', label='Accuracy')
plt.title("Accuracy over 10 Runs")
plt.xlabel("Run")
plt.ylabel("Accuracy (%)")
plt.grid(True)
plt.legend()
plt.show()