From 9d615f81a5aaca5eea5a744fbf744a47b4a726a5 Mon Sep 17 00:00:00 2001
From: Falguni Ghosh <falguni.ghosh@fau.de>
Date: Sun, 15 Oct 2023 21:14:48 +0000
Subject: [PATCH] Upload New File

---
 3_RNN/RNN.py | 200 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 200 insertions(+)
 create mode 100644 3_RNN/RNN.py

diff --git a/3_RNN/RNN.py b/3_RNN/RNN.py
new file mode 100644
index 0000000..ff189ff
--- /dev/null
+++ b/3_RNN/RNN.py
@@ -0,0 +1,200 @@
+import copy
+
+from scipy import signal as sp
+from .Base import BaseLayer
+from .FullyConnected import FullyConnected
+from .TanH import TanH
+from .Sigmoid import Sigmoid
+import numpy as np
+import math
+
+class RNN(BaseLayer):
+
+    def __init__(self,input_size,hidden_size,output_size):
+        super().__init__()
+        self.trainable = True
+        self.input_size = input_size  #single dim
+        self.hidden_size = hidden_size #single dim
+        self.output_size = output_size #single dim
+        self.batch_size = None
+
+        self.hidden_state = np.zeros(self.hidden_size)
+        self._memorize = False
+
+        #elman unit
+        self.tan = TanH()
+        self.sig = Sigmoid()
+        self.fc1 = FullyConnected(self.input_size+self.hidden_size,self.hidden_size)
+        self.fc2 = FullyConnected(self.hidden_size,self.output_size)
+
+        #saving values for backward pass
+
+        self.activ_tan = None
+        self.activ_sig = None
+        self.fc1_list = None
+        self.fc2_list = None
+
+        self._optimizer = None
+        self.optimizer_fc1 = None
+        self.optimizer_fc2 = None
+        # self.weights_fc1 = np.random.uniform(0.0, 1.0, (self.input_size+self.hidden_size+1,self.hidden_size))
+        # self.weights_fc2 = np.random.uniform(0.0, 1.0, (self.hidden_size+1,self.output_size))
+        self.weight = np.random.uniform(0.0, 1.0, (self.input_size+self.hidden_size+1,self.hidden_size))
+        self.grad_w_fc1 = None
+        self.grad_w_fc2 = None
+        self._gradient_weight = None
+
+    @property
+    def memorize(self):
+        return self._memorize
+
+    @memorize.setter
+    def memorize(self, m):
+        self._memorize = m
+
+    @property
+    def optimizer(self):
+        return self._optimizer
+
+    @optimizer.setter
+    def optimizer(self, ow):
+        self._optimizer = ow
+        self.optimizer_fc1 = copy.deepcopy(self._optimizer)
+        self.optimizer_fc2 = copy.deepcopy(self._optimizer)
+
+    @property
+    def gradient_weights(self):
+        return self._gradient_weight
+
+    # @gradient_weights.setter
+    # def gradient_weights(self, g):
+    #     self._gradient_weights = g
+
+    @property
+    def weights(self):
+        return self.fc1.weights
+
+    @weights.setter
+    def weights(self, w):
+        self.fc1.weights = w
+
+    def forward(self,input_tensor):  # input = input x batch
+
+        self.activ_tan = []
+        self.activ_sig = []
+        self.fc1_list = []
+        self.fc2_list = []
+
+        self.batch_size = input_tensor.shape[0]
+
+        output = np.empty((self.batch_size,self.output_size))
+
+        if not self._memorize:
+            self.hidden_state = np.zeros((1, self.hidden_size))
+
+        for i in range(self.batch_size):
+
+
+            fc1_inp_shape = (1,self.input_size + self.hidden_size)
+
+            #fc1_inp = np.empty(fc1_inp_shape)
+
+            # print(curr_inp.ndim,"dimension of x")
+            #
+            # print(self.hidden_state.ndim,"dimension of h")
+
+            fc1_inp = np.concatenate((input_tensor[i,:].reshape(-1,1), self.hidden_state.reshape(-1,1))).T
+            #print(fc1_inp.shape)
+
+            #curr_fc1 = FullyConnected(self.input_size+self.hidden_size,self.hidden_size)
+
+            #if not np.any(self.weights_fc1 ==None):
+            #curr_fc1.weights = self.weights_fc1
+
+            fc1_op = self.fc1.forward(fc1_inp)
+
+            self.fc1_list.append(self.fc1.input_tensor)
+
+            fc2_inp = self.tan.forward(fc1_op)
+
+            self.activ_tan.append(self.tan.activation)
+
+            self.hidden_state = fc2_inp
+
+            #curr_fc2 = FullyConnected(self.hidden_size,self.output_size)
+
+            #if not np.any(self.weights_fc2 ==None):
+            #curr_fc2.weights = self.weights_fc2
+
+            fc2_op = self.fc2.forward(fc2_inp)
+
+            self.fc2_list.append(self.fc2.input_tensor)
+
+            output[i] = self.sig.forward(fc2_op)
+
+            self.activ_sig.append(self.sig.activation)
+
+        return output
+
+    def backward(self,error_tensor):
+
+        output = np.zeros((self.batch_size,self.input_size))
+
+        self.grad_w_fc1 = np.zeros((self.input_size+self.hidden_size+1,self.hidden_size))
+        self.grad_w_fc2 = np.zeros((self.hidden_size + 1, self.output_size))
+
+        next_hidden_grad = None
+
+        for i in reversed(range(self.batch_size)):
+
+            self.sig.activation = self.activ_sig[i]
+
+            self.fc2.input_tensor = self.fc2_list[i]
+
+            self.tan.activation = self.activ_tan[i]
+
+            self.fc1.input_tensor = self.fc1_list[i]
+
+            b1 = self.sig.backward(error_tensor[i])
+
+            # if np.all(next_hidden_grad is None):
+            #
+            #     b2 = curr_fc2.backward(b1)
+            # else:
+            #     b2 = curr_fc2.backward(b1) + next_hidden_grad
+
+            b2 = self.fc2.backward(b1)
+            if not np.all(next_hidden_grad is None):
+                b2 += next_hidden_grad
+
+            self.grad_w_fc2 += self.fc2.gradient_weights #check with Falguni
+
+            b3 = self.tan.backward(b2)
+
+            b4 = self.fc1.backward(b3)
+
+            self.grad_w_fc1 += self.fc1.gradient_weights #check with falguni
+
+            next_hidden_grad = b4[0,self.input_size:self.hidden_size+self.input_size] #gradient wrt h
+
+            output[i] = b4[0,0:self.input_size] #gradient wrt input
+
+            # self._gradient_weights(grad_w_fc1)
+
+        self._gradient_weight = self.grad_w_fc1
+
+        if not (self._optimizer == None):
+            self.fc1.weights = self.optimizer_fc1.calculate_update(self.fc1.weights, self.grad_w_fc1)
+            self.fc2.weights = self.optimizer_fc2.calculate_update(self.fc2.weights, self.grad_w_fc2)
+
+        return output
+
+    def calculate_regularization_loss(self):
+        if not (self._optimizer == None):
+            return self._optimizer.regularizer.norm(self.weight)
+
+    def initialize(self, weights_initializer, bias_initializer):
+        self.fc1.initialize(weights_initializer,bias_initializer)
+        self.weight = self.fc1.weights
+
+        self.fc2.initialize(weights_initializer, bias_initializer)
-- 
GitLab