diff --git a/2_CNN/FullyConnected.py b/2_CNN/FullyConnected.py
new file mode 100644
index 0000000000000000000000000000000000000000..10ffd8a2257bf26916c0557f9a5f5d35864d7d17
--- /dev/null
+++ b/2_CNN/FullyConnected.py
@@ -0,0 +1,71 @@
+from .Base import BaseLayer
+import numpy as np
+import copy
+
+
+class FullyConnected(BaseLayer):
+    input_size = None
+    output_size = None
+    error_tensor = None
+    input_tensor = None
+
+    def __init__(self, input_size, output_size):
+        super().__init__()
+        self.trainable = True
+        self.weights = np.random.uniform(0.0, 1.0, (input_size+1,output_size))#removed
+        self.input_size = input_size
+        self.output_size = output_size
+        self._optimizer = None
+        self._gradient_weights = None
+
+    def initialize(self, weights_initializer, bias_initializer):
+        weights_shape = (self.input_size, self.output_size)
+        self.weights[0:self.input_size, :] = weights_initializer.initialize(weights_shape, self.input_size,
+                                                                            self.output_size)
+        bias_shape = (1, self.output_size)
+        self.weights[self.input_size, :] = bias_initializer.initialize(bias_shape, self.input_size, self.output_size)
+
+    def forward(self, input_tensor):
+        batch_size = input_tensor.shape[0]
+        input_plus_bias = np.ones((batch_size,input_tensor.shape[1]+1))#replaced input_size with input cols
+        input_plus_bias[:, 0:input_tensor.shape[1]] = input_tensor #replaced input_size with input cols
+        self.input_tensor = input_plus_bias # updated this call
+        output = np.matmul(input_plus_bias,self.weights)
+        return output
+
+    @property
+    def gradient_weights(self):
+        return self._gradient_weights
+
+    @gradient_weights.setter
+    def gradient_weights(self, w):
+        self._gradient_weights = w
+
+    # gradient_weights = property(get_gradient_weights, set_gradient_weights)
+
+    @property
+    def optimizer(self):
+        return self._optimizer
+
+    @optimizer.setter
+    def optimizer(self, ow):
+        self._optimizer = ow
+
+    # optimizer = property(get_optimizer, set_optimizer)
+
+    def backward(self, error_tensor):
+        self.error_tensor = np.copy(error_tensor)
+        error_tensor = np.matmul(error_tensor, self.weights.T)
+        #enter gradient_tensor in place of error_tensor
+
+        grad = np.matmul(self.input_tensor.T, self.error_tensor)
+        self._gradient_weights = grad
+
+        if not (self._optimizer == None ):
+            self.weights = self._optimizer.calculate_update(self.weights, grad)
+
+        return error_tensor[:,0:error_tensor.shape[1]-1]
+
+
+
+