Skip to content
Snippets Groups Projects
Commit 8068810f authored by Falguni Ghosh's avatar Falguni Ghosh
Browse files

Upload New File

parent 4ff7f3c2
Branches
No related tags found
No related merge requests found
import numpy as np
class Sgd:
learning_rate = None
output = None
def __init__(self, learning_rate):
self.learning_rate = learning_rate
def calculate_update(self, weight_tensor, gradient_tensor):
self.output = weight_tensor - (self.learning_rate * gradient_tensor)
return np.copy(self.output)
class SgdWithMomentum:
learning_rate = None
momentum_rate = None
def __init__(self, learning_rate, momentum_rate):
self.learning_rate = learning_rate
self.momentum_rate = momentum_rate
self.prev_momentum = 0
def calculate_update(self, weight_tensor, gradient_tensor):
momentum = (self.momentum_rate * self.prev_momentum) - (self.learning_rate * gradient_tensor)
output = weight_tensor + momentum
self.prev_momentum = np.copy(momentum)
return np.copy(output)
class Adam:
k = 1
def __init__(self, learning_rate, mu, rho):
self.learning_rate = learning_rate
self.momentum_rate = mu
self.prev_momentum = 0
self.prev_rho = 0
self.rho = rho
def calculate_update(self, weight_tensor, gradient_tensor):
g_curr = self.learning_rate * gradient_tensor
momentum = (self.momentum_rate * self.prev_momentum) + (1 - self.momentum_rate) * (
self.learning_rate * gradient_tensor)
r_curr = self.rho * self.prev_rho + (1 - self.rho) * (g_curr ** 2)
bias_curr_mom = momentum / (1 - self.momentum_rate ** self.k)
bias_curr_rho = r_curr / (1 - self.rho ** self.k)
self.prev_rho = np.copy(r_curr)
self.prev_momentum = np.copy(momentum)
output = weight_tensor - self.learning_rate * (bias_curr_mom / (np.sqrt(bias_curr_rho) + np.finfo(float).eps))
self.k += 1
return np.copy(output)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment