Skip to content
Snippets Groups Projects
Commit 82e00c7f authored by Falguni Ghosh's avatar Falguni Ghosh
Browse files

Upload New File

parent 3a9e349f
No related branches found
No related tags found
No related merge requests found
from exercise1_material.src_to_implement.Layers.Base import BaseLayer
import numpy as np
import copy
class FullyConnected(BaseLayer):
input_size = None
output_size = None
error_tensor = None
input_tensor = None
def __init__(self, input_size, output_size):
super().__init__()
self.trainable = True
self.weights = np.random.uniform(0.0, 1.0, (input_size + 1, output_size)) # removed
self.input_size = input_size
self.output_size = output_size
self._optimizer = None
self._gradient_weights = None
def forward(self, input_tensor):
batch_size = input_tensor.shape[0]
input_plus_bias = np.ones((batch_size, input_tensor.shape[1] + 1)) # replaced input_size with input cols
input_plus_bias[:, 0:input_tensor.shape[1]] = input_tensor # replaced input_size with input cols
self.input_tensor = input_plus_bias # updated this call
output = np.matmul(input_plus_bias, self.weights)
return output
def get_optimizer(self): #consider @property method
return self._optimizer
def set_optimizer(self, a):
self._optimizer = a
optimizer = property(get_optimizer, set_optimizer)
def get_gradient_weights(self):
return self._gradient_weights
def set_gradient_weights(self, a):
self.gradient_weights = a
gradient_weights = property(get_gradient_weights, set_gradient_weights)
def backward(self, error_tensor):
self.error_tensor = np.copy(error_tensor)
error_tensor = np.matmul(error_tensor, self.weights.T)
# enter gradient_tensor in place of error_tensor
grad = np.matmul(self.input_tensor.T, self.error_tensor)
self._gradient_weights = grad
if not (self._optimizer == None):
self.weights = self._optimizer.calculate_update(self.weights, grad)
return error_tensor[:, 0:error_tensor.shape[1] - 1]
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment