From 0fd9c95906daf90fe8209ec665a540d5a6a3730c Mon Sep 17 00:00:00 2001
From: Mina Moshfegh <mina.moshfegh@fau.de>
Date: Wed, 19 Feb 2025 15:56:41 +0000
Subject: [PATCH] Upload New File

---
 src/models/small_cnn.py | 45 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 45 insertions(+)
 create mode 100644 src/models/small_cnn.py

diff --git a/src/models/small_cnn.py b/src/models/small_cnn.py
new file mode 100644
index 0000000..19f35e9
--- /dev/null
+++ b/src/models/small_cnn.py
@@ -0,0 +1,45 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+# This is a basic CNN architecture suitable for smaller images,
+# especially used for MNIST in many adversarial training setups.
+class SmallCNN(nn.Module):
+    def __init__(self, num_channels=1, num_classes=10):
+        super().__init__()
+        # Four convolutional layers for feature extraction.
+        # Typically used for grayscale MNIST, so default num_channels=1.
+        self.conv1 = nn.Conv2d(num_channels, 32, kernel_size=3, stride=1, padding=1)
+        self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
+        self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
+        self.conv4 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
+
+        # Then three fully-connected layers for classification.
+        self.fc1 = nn.Linear(64 * 7 * 7, 200)
+        self.fc2 = nn.Linear(200, 200)
+        self.fc3 = nn.Linear(200, num_classes)
+
+    def forward(self, x):
+        # Pass through two conv layers, each with relu,
+        # then do a 2x2 max pool.
+        x = F.relu(self.conv1(x))
+        x = F.relu(self.conv2(x))
+        x = F.max_pool2d(x, 2)
+
+        # Another pair of conv+relu, then max pool.
+        x = F.relu(self.conv3(x))
+        x = F.relu(self.conv4(x))
+        x = F.max_pool2d(x, 2)
+
+        # Flatten the feature maps into a 1D vector.
+        x = x.view(x.size(0), -1)
+
+        # Pass through FC layers with relu, except final layer.
+        x = F.relu(self.fc1(x))
+        x = F.relu(self.fc2(x))
+        x = self.fc3(x)
+
+        # Apply softmax for classification outputs.
+        x = F.softmax(x, dim=1)
+
+        return x
-- 
GitLab