diff --git a/2_CNN/Optimizers.py b/2_CNN/Optimizers.py
new file mode 100644
index 0000000000000000000000000000000000000000..60e3cf0343bfcad58d5e42d7cce67511f037960d
--- /dev/null
+++ b/2_CNN/Optimizers.py
@@ -0,0 +1,67 @@
+import numpy as np
+
+
+class Sgd:
+    learning_rate = None
+    output = None
+
+    def __init__(self, learning_rate):
+        self.learning_rate = learning_rate
+
+    def calculate_update(self, weight_tensor, gradient_tensor):
+        self.output = weight_tensor - (self.learning_rate * gradient_tensor)
+
+        return np.copy(self.output)
+
+
+class SgdWithMomentum:
+    learning_rate = None
+    momentum_rate = None
+
+    def __init__(self, learning_rate, momentum_rate):
+        self.learning_rate = learning_rate
+        self.momentum_rate = momentum_rate
+        self.prev_momentum = 0
+
+    def calculate_update(self, weight_tensor, gradient_tensor):
+        momentum = (self.momentum_rate * self.prev_momentum) - (self.learning_rate * gradient_tensor)
+
+        output = weight_tensor + momentum
+
+        self.prev_momentum = np.copy(momentum)
+
+        return np.copy(output)
+
+
+class Adam:
+    k = 1
+
+    def __init__(self, learning_rate, mu, rho):
+        self.learning_rate = learning_rate
+        self.momentum_rate = mu
+        self.prev_momentum = 0
+        self.prev_rho = 0
+        self.rho = rho
+
+    def calculate_update(self, weight_tensor, gradient_tensor):
+        g_curr = self.learning_rate * gradient_tensor
+
+        momentum = (self.momentum_rate * self.prev_momentum) + (1 - self.momentum_rate) * (
+            self.learning_rate * gradient_tensor)
+
+        r_curr = self.rho * self.prev_rho + (1 - self.rho) * (g_curr ** 2)
+
+        bias_curr_mom = momentum / (1 - self.momentum_rate ** self.k)
+
+        bias_curr_rho = r_curr / (1 - self.rho ** self.k)
+
+        self.prev_rho = np.copy(r_curr)
+
+        self.prev_momentum = np.copy(momentum)
+
+        output = weight_tensor - self.learning_rate * (bias_curr_mom / (np.sqrt(bias_curr_rho) + np.finfo(float).eps))
+
+
+        self.k += 1
+
+        return np.copy(output)