diff --git a/src/cnn.py b/src/cnn.py new file mode 100644 index 0000000..b359c5f --- /dev/null +++ b/src/cnn.py @@ -0,0 +1,57 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim + + +class CNN(nn.Module): + """ + Convolutional Neural Network (CNN) class. + """ + + def __init__(self): + """ + Initialize the CNN model. + """ + super(CNN, self).__init__() + self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1) + self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1) + self.fc1 = nn.Linear(7 * 7 * 64, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + """ + Forward pass of the CNN. + """ + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(x.size(0), -1) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return x + + def train(self, trainloader): + """ + Train the CNN model. + """ + criterion = nn.CrossEntropyLoss() + optimizer = optim.SGD(self.parameters(), lr=0.01) + + for _epoch in range(10): # loop over the dataset multiple times + for _i, data in enumerate(trainloader, 0): + inputs, labels = data + optimizer.zero_grad() + outputs = self(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + return self + + def save_model(self, path): + """ + Save the trained model. + """ + torch.save(self.state_dict(), path) diff --git a/src/main.py b/src/main.py index 243a31e..501fbd9 100644 --- a/src/main.py +++ b/src/main.py @@ -1,48 +1,22 @@ from PIL import Image -import torch -import torch.nn as nn -import torch.optim as optim -from torchvision import datasets, transforms from torch.utils.data import DataLoader -import numpy as np +from torchvision import datasets, transforms + +from cnn import CNN # Step 1: Load MNIST Data and Preprocess -transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.5,), (0.5,)) -]) +transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))] +) -trainset = datasets.MNIST('.', download=True, train=True, transform=transform) +trainset = datasets.MNIST(".", download=True, train=True, transform=transform) trainloader = DataLoader(trainset, batch_size=64, shuffle=True) # Step 2: Define the PyTorch Model -class Net(nn.Module): - def __init__(self): - super().__init__() - self.fc1 = nn.Linear(28 * 28, 128) - self.fc2 = nn.Linear(128, 64) - self.fc3 = nn.Linear(64, 10) - - def forward(self, x): - x = x.view(-1, 28 * 28) - x = nn.functional.relu(self.fc1(x)) - x = nn.functional.relu(self.fc2(x)) - x = self.fc3(x) - return nn.functional.log_softmax(x, dim=1) +model = CNN() # Step 3: Train the Model -model = Net() -optimizer = optim.SGD(model.parameters(), lr=0.01) -criterion = nn.NLLLoss() - -# Training loop -epochs = 3 -for epoch in range(epochs): - for images, labels in trainloader: - optimizer.zero_grad() - output = model(images) - loss = criterion(output, labels) - loss.backward() - optimizer.step() +model.train(trainloader) -torch.save(model.state_dict(), "mnist_model.pth") \ No newline at end of file +# Save the trained model +model.save_model("mnist_model.pth")