From 6dbb64455550dd2436e79bf80eda06c292e5516a Mon Sep 17 00:00:00 2001
From: Chinmay Datar <chinmay.datar@fau.de>
Date: Fri, 3 Sep 2021 12:56:18 +0200
Subject: [PATCH] implemented data generator for non-linear regression function
 example

---
 .gitignore                                    |   1 -
 .../Data_Generator/function_generator.py      | 462 ++++++++++++++++++
 LayerParallelLearning/Data_Generator/main.py  |  68 ++-
 .../Data_Generator/nonlin_DG.py               |   0
 .../Data_Generator/nonlin_reg.py              | 139 ++++++
 LayerParallelLearning/LReLU.sh                |  13 +
 .../single_solution/single_solution.cfg       | 130 +++++
 .../Poisson/8_trig/SSHE_num.cfg               |   6 +-
 .../8_Poly_1/poisson.cfg                      |   2 +-
 .../8_Poly_2/poisson.cfg                      |   2 +-
 .../8_Poly_3/poisson.cfg                      |   2 +-
 .../exp5_LReLU/8_Poly_1/poisson.cfg           | 130 +++++
 .../exp5_LReLU/8_Poly_2/poisson.cfg           | 130 +++++
 .../exp5_LReLU/8_Poly_3/poisson.cfg           | 130 +++++
 .../examples/quadratic/quadratic.cfg          | 130 +++++
 .../plotting_routines/main.py                 |  16 +-
 LayerParallelLearning/src/main.cpp            |  12 +
 LayerParallelLearning/src/network.cpp         |  19 +
 LayerParallelLearning/tanh.sh                 |  13 -
 19 files changed, 1346 insertions(+), 59 deletions(-)
 create mode 100644 LayerParallelLearning/Data_Generator/function_generator.py
 create mode 100644 LayerParallelLearning/Data_Generator/nonlin_DG.py
 create mode 100644 LayerParallelLearning/Data_Generator/nonlin_reg.py
 create mode 100755 LayerParallelLearning/LReLU.sh
 create mode 100644 LayerParallelLearning/examples/SSHE_Analytical/single_solution/single_solution.cfg
 rename LayerParallelLearning/examples/poisson_experiments/{exp4_reg_datanorm _tanh => exp4_tanh}/8_Poly_1/poisson.cfg (98%)
 rename LayerParallelLearning/examples/poisson_experiments/{exp4_reg_datanorm _tanh => exp4_tanh}/8_Poly_2/poisson.cfg (98%)
 rename LayerParallelLearning/examples/poisson_experiments/{exp4_reg_datanorm _tanh => exp4_tanh}/8_Poly_3/poisson.cfg (98%)
 create mode 100644 LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_1/poisson.cfg
 create mode 100644 LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_2/poisson.cfg
 create mode 100644 LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_3/poisson.cfg
 create mode 100644 LayerParallelLearning/examples/quadratic/quadratic.cfg
 delete mode 100755 LayerParallelLearning/tanh.sh

diff --git a/.gitignore b/.gitignore
index 92067fd..f211a40 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,3 @@
-examples/
 *.o
 *.dat
 LayerParallelLearning/main
diff --git a/LayerParallelLearning/Data_Generator/function_generator.py b/LayerParallelLearning/Data_Generator/function_generator.py
new file mode 100644
index 0000000..3bf9a9a
--- /dev/null
+++ b/LayerParallelLearning/Data_Generator/function_generator.py
@@ -0,0 +1,462 @@
+import csv
+import numpy as np
+from numpy import *
+import matplotlib.pyplot as plt
+from mpl_toolkits import mplot3d
+import random
+from mpl_toolkits.mplot3d import Axes3D
+import matplotlib as cm
+'''
+import os.path
+save_path = '/home/multigrid/Master Thesis/LayerParallelLearning/examples/function'
+completeName = os.path.join(save_path, name_of_file+".dat")
+'''
+
+''' For Function Learning: Features- (x,y), to learn- f(x,y) '''
+
+# Generate training and testing data features(x,y)
+training_size = 6000
+testing_size = 1500
+
+# Create and store the grid: Specify domain E.g here[-1,1] and grid size(including boundary points)
+nx, ny = (8, 8)
+x = np.linspace(-1., 1., nx)
+y = np.linspace(-1., 1., ny)
+# xx, yy = np.meshgrid(x, y, sparse=True)
+xx, yy = np.meshgrid(x, y)
+
+# Step size in x and y directions
+h = 2.0 / (nx - 1)
+
+
+def ax2plusby2(x, y):
+    a = random.uniform(-10.0, 10.0)
+    b = random.uniform(-10.0, 10.0)
+    xx_sq = (x ** 2)
+    yy_sq = (y ** 2)
+    return a * xx_sq + b * yy_sq, a, b
+
+def poisson(x, y):
+    k = random.uniform(-5.0, 5.0)
+    l = random.uniform(-5.0, 5.0)
+    return np.sin(k * x) * np.cos(l * y)
+
+# Numerical solution of Laplace Equation with uniform discretization in x and y dimensions 
+def Laplace_disc_cosnt_bdry(T):
+    # Choose BC: Constant BC on each side chosen as follows
+    T_top = random.uniform(0.0, 100.0)
+    T_bottom = 0.0 # random.uniform(-100., 0.)
+    T_left = 0.0 # random.uniform(-100., 0.)
+    T_right = random.uniform(50.0, 100.0) #random.uniform(0, 100.0)
+
+    # Initial guess of interior grid
+    T_guess = (T_top + T_bottom + T_left + T_right) / 4.
+    maxIter = 1000
+
+    # Set the interior value with T_guess
+    T.fill(T_guess)
+
+    # Set Boundary condition
+    T[:, (nx - 1):] = T_right
+    T[:, :1] = T_left
+    T[(ny - 1):, :] = T_top
+    T[:1, :] = T_bottom
+
+
+    # Iteration (Assume that the solution converges in 500 iterations)
+    for iteration in range(0, maxIter):
+        for i in range(1, nx - 1):
+            for j in range(1, ny - 1):
+                T[i, j] = 0.25 * (T[i + 1][j] + T[i - 1][j] + T[i][j + 1] + T[i][j - 1])
+    #print("Iterations finished")
+    return T
+
+def Laplace_cont_bdry(u_true, T):
+    # Initial guess of interior grid
+    T_guess = 0.
+    maxIter = 1000
+
+    # Set the interior value with T_guess
+    T.fill(T_guess)
+
+    # Set Boundary condition
+    T[:, (nx - 1):] = u_true[:, (nx - 1):]
+    T[:, :1] = u_true[:, :1]
+    T[(ny - 1):, :] = u_true[(ny - 1):, :]
+    T[:1, :] = u_true[:1, :]
+
+    # Iteration (Assume that the solution converges in 500 iterations)
+    for iteration in range(0, maxIter):
+        for i in range(1, nx - 1):
+            for j in range(1, ny - 1):
+                T[i, j] = 0.25 * (T[i + 1][j] + T[i - 1][j] + T[i][j + 1] + T[i][j - 1])
+    #print("Iterations finished")
+    return T
+
+def Poisson_solver(u_true, f, u_num):
+    # Initial guess of interior grid
+    T_guess = 0.
+    maxIter = 1000
+
+    # Set the interior value with T_guess
+    u_num.fill(T_guess)
+
+    # Set Boundary condition
+    u_num[:, (nx - 1):] = u_true[:, (nx - 1):]
+    u_num[:, :1] = u_true[:, :1]
+    u_num[(ny - 1):, :] = u_true[(ny - 1):, :]
+    u_num[:1, :] = u_true[:1, :]
+
+    # Iteration (Assume that the solution converges in maxIter iterations)
+    for iteration in range(0, maxIter):
+        for i in range(1, nx - 1):
+            for j in range(1, ny - 1):
+                sum = u_num[i + 1][j] + u_num[i - 1][j] + u_num[i][j + 1] + u_num[i][j - 1]
+                u_num[i, j] = 0.25 * (sum - h * h * f[i][j])
+
+
+def set_features(f):
+    Left, Right = f[1:-1:1, 0], f[1:-1:1, -1]
+    Bottom, Top = f[0, :], f[-1, :]
+    boundary = np.hstack((Bottom, Left, Right, Top))
+    return np.reshape(boundary, (1, len(boundary)))
+
+
+def set_interior(f):
+    return np.reshape(f[1:-1:1, 1:-1:1], (1, (nx - 2) * (ny - 2)))
+
+
+def Poisson_Set_TrueSol_And_rhs(xx, yy):
+    r = np.random.randint(low=1, high=11) # low included, high excluded
+    # Caution: Comment this out for generating data with generalized dataset
+    r = 4
+    # Initialize the true solution and rhs grids
+    u_true = np.empty((nx, ny))
+    f = np.empty((nx, ny))
+
+    if r == 1:
+        # Type 1: np.sin(k * xx) * np.sinh(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        u_true = np.sin(k * xx) * np.sinh(l * yy)
+        f = (-1.0 * k * k + l * l) * u_true
+        # scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
+        # u_true = u_true/scaling_factor
+        # f = f/scaling_factor
+
+    elif r == 2:
+        # Type 2: np.cos(k * xx) * np.sinh(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        u_true = np.cos(k * xx) * np.sinh(l * yy)
+        f = (-1.0 * k * k + l * l) * u_true
+        # scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
+        # u_true = u_true/scaling_factor
+        # f = f/scaling_factor
+
+    elif r == 3:
+        # Type 3: c * np.sin(k * xx) * np.cos(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        c = random.uniform(-10, 10)
+        u_true = c * np.sin(k * xx) * np.cos(l * yy)
+        f = (-1.0 * k * k - 1.0 * l * l) * u_true
+
+
+    elif r == 4:
+        # Type 4: np.cos(k * xx) * np.sin(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        c = random.uniform(-10, 10)
+        u_true = c * np.cos(k * xx) * np.sin(l * yy)
+        f = (-1.0 * k * k - 1.0 * l * l) * u_true
+
+
+    elif r == 5:
+        # Type 1: np.cos(k * xx) * np.cosh(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        c = random.uniform(-5,5)
+        u_true = c * np.cos(k * xx) * np.cosh(l * yy)
+        f = (-1.0 * k * k + l * l) * u_true
+
+
+    elif r == 6:
+        # Type 1: np.sin(k * xx) * np.cosh(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        c = random.uniform(-5,5)
+        u_true = c * np.sin(k * xx) * np.cosh(l * yy)
+        f = (-1.0 * k * k + l * l) * u_true
+
+
+    elif r == 7:
+        # Type 1: np.sinh(k * xx) * np.cos(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        u_true = np.sinh(k * xx) * np.cos(l * yy)
+        f = (k * k - 1.0 * l * l) * u_true
+
+
+    elif r == 8:
+        # Type 2: np.sinh(k * xx) * np.sin(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        u_true = np.sinh(k * xx) * np.sin(l * yy)
+        f = (k * k - 1.0 * l * l) * u_true
+
+
+    elif r == 9:
+        # Type 3: c * np.cosh(k * xx) * np.sin(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        c = random.uniform(-10, 10)
+        u_true = c * np.cosh(k * xx) * np.sin(l * yy)
+        f = (k * k - 1.0 * l * l) * u_true
+
+
+    elif r == 10:
+        # Type 10: np.cosh(k * xx) * np.cos(l * yy)
+        k = random.uniform(-5, 5)
+        l = random.uniform(-5, 5)
+        c = random.uniform(-10, 10)
+        u_true = c * np.cosh(k * xx) * np.cos(l * yy)
+        f = (k * k - 1.0 * l * l) * u_true
+
+
+    # Polynomial Data set
+    elif r == 11 or r == 15:
+        # Type 11: Constant functions
+        c = random.uniform(-1, 1)
+        # print(c)
+        u_true.fill(c)
+        f.fill(0)
+
+    elif r == 12 or r ==16:
+        # Type 12: Linear functions ax + by +c
+        a = random.uniform(-5, 5)
+        b = random.uniform(-5, 5)
+        c = random.uniform(-10, 10)
+        u_true = a * xx + b * yy + c
+        f.fill(0)
+        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()))
+        u_true = u_true/scaling_factor
+
+    elif r == 13 or r == 17:
+        # Type 11: 2nd degree polynomial
+        a = random.uniform(-10, 10)
+        b = random.uniform(-10, 10)
+        c = random.uniform(-10, 10)
+        d = random.uniform(-5, 5)
+        e = random.uniform(-5, 5)
+        g = random.uniform(-10, 10)
+        u_true = a * xx * xx + b * yy * yy + c * xx * yy + d * xx + e * yy + g
+        f.fill(2.0 * a + 2.0 * b)
+        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
+        u_true = u_true/scaling_factor
+        f = f/scaling_factor
+
+    elif r == 14 or r == 18:
+        # Type 14: 3rd degree polynomial
+        a = random.uniform(-80, 80)
+        b = random.uniform(-80, 80)
+        c = random.uniform(-80, 80)
+        d = random.uniform(-80, 80)
+        e = random.uniform(-20, 20)
+        f1 = random.uniform(-20, 20)
+        g = random.uniform(-10, 10)
+        h = random.uniform(-10, 10)
+        i = random.uniform(-10, 10)
+        j = random.uniform(-10, 10)
+
+        u_true = a * xx * xx * xx + b * yy * yy * yy
+        + c * xx * xx * yy + d * xx * yy * yy
+        + e * xx * xx + f1 * yy * yy + g * xx * yy
+        + h * xx + i * yy + j
+
+        f = 6.0 * a * xx + 2.0 * c * yy + 2.0 * e
+        + 6.0 * b * yy + 2.0 * d * xx + 2.0 * f1
+        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
+        u_true = u_true/scaling_factor
+        f = f/scaling_factor
+
+    return u_true, f
+
+
+def Laplace_true_sol(xx,yy):
+    k = random.uniform(-5, 5)
+    return np.sin(k * xx) * np.sinh(k * yy)
+
+
+def sampled_plot(u, u_true):
+    # using subplot function and creating plot one
+    fig1 = plt.figure()
+    fig1.suptitle('Temperature Contour', fontsize=14)
+
+    plt.subplot(1, 2, 1, aspect=1.0)  # row 1, column 2, count 1
+    plt.title("Numerical Solution ")
+    colorinterpolation = 100
+    colourMap = plt.cm.jet
+    plt.contourf(xx, yy, u, colorinterpolation, cmap=colourMap)
+    cb1 = plt.colorbar(orientation='horizontal')
+
+    # row 1, column 2, count 2
+    plt.subplot(1, 2, 2, aspect=1.0)
+    plt.title("True Solution- ")
+    colorinterpolation = 100
+    colourMap = plt.cm.jet
+    plt.contourf(xx, yy, u_true, colorinterpolation, cmap=colourMap)
+    cb2 = plt.colorbar(orientation='horizontal')  # cb1 thevla tar it shows the same
+    plt.show()
+    # Second figure
+    fig2 = plt.figure()
+    fig2.suptitle('Temperature Distribution', fontsize=14)
+
+    plt.subplot(1, 2, 1, aspect=1.0)  # row 1, column 2, count 1
+    ax1 = plt.axes(projection='3d')
+    ax1.plot_surface(xx, yy, u)
+    plt.title("Numerical Solution")
+    ax1.set_xlabel('x')
+    ax1.set_ylabel('y')
+    ax1.set_zlabel('temp')
+    plt.show()
+
+    plt.subplot(1, 2, 2, aspect=1.0)  # row 1, column 2, count 1
+    ax2 = plt.axes(projection='3d')
+    ax2.plot_surface(xx, yy, u_true)
+    plt.title("True Solution")
+    ax2.set_xlabel('x')
+    ax2.set_ylabel('y')
+    ax2.set_zlabel('temp')
+    plt.show()
+
+def sampled_plot_new(u):
+    fig = plt.figure(figsize=(12, 6))
+    ax = fig.add_subplot(111, projection='3d')
+    colorinterpolation = 50
+
+    surf = ax.plot_surface(xx, yy, u, rstride=1, cstride=1)
+    cset = ax.contourf(xx, yy, u, colorinterpolation, zdir='z',  offset=np.min(u))
+    # cset = ax.contourf(xx, yy, u, colorinterpolation, zdir='x', offset=-1)
+    # cset = ax.contourf(xx, yy, u,colorinterpolation, zdir='y', offset=1)
+    # cb2 = plt.colorbar(orientation='vertical')
+    fig.colorbar(surf, ax=ax, shrink=0.5)
+    ax.set_xlabel('X')
+    ax.set_xlim(-1, 1)
+    ax.set_ylabel('Y')
+    ax.set_ylim(-1, 1)
+    ax.set_zlabel('temp')
+    ax.set_zlim(np.min(u), np.max(u))
+    ax.set_title('3D surface with 2D contour plot projections')
+    plt.show()
+
+with open('features_training.dat', 'a') as f1:
+    with open('output_training.dat', 'a') as f2:
+        with open('features_validation.dat', 'a') as f3:
+            with open('output_validation.dat', 'a') as f4:
+                for i in range(training_size + testing_size):
+                    '''
+                    # Calc the analytical solution on the grid (u = u_analytic)
+                    u = poisson(xx, yy)
+                    
+                    # Compute the numerical solution(u=u_num) of laplace equation
+                    # Initializing the numerical solution grid    
+                    u_num = np.empty((nx, ny))
+                    u_true = np.empty((nx, ny))
+                    f = np.empty((nx, ny))
+                    '''
+                    u_num = np.empty((nx, ny))
+                    u_true = np.empty((nx, ny))
+                    f = np.empty((nx, ny))
+                    u_true, a, b = ax2plusby2(xx,yy)                
+                    # Poisson Problem -> RHS: f not necessarily = 0
+                    # u_true, f= Poisson_Set_TrueSol_And_rhs(xx, yy)
+
+                    # Poisson numerical solver: soln -> u_num
+                    # Poisson_solver(u_true, f, u_num)
+
+                    '''
+                    # Laplace Equation -> RHS: f = 0
+                    u_true = Laplace_true_sol(xx, yy)
+                    u_num = Laplace_cont_bdry(u_true, grid1)
+                    '''
+
+                    # Set the features as BCs (Input to the network)
+                    # features_bc = set_features(u_num)
+                    features = set_features(u_true)
+                    ground_truth = set_interior(u_true)
+                    #  = set_interior(u_true)
+                    # features = np.hstack((features_bc, features_rhs))
+
+                    # Set the true solution for training and validation
+                    # ground_truth = set_interior(u_num)
+
+
+                    # Write the grid coordinates, training and validation data
+                    if i < training_size:
+                        np.savetxt(f1, features)
+                        np.savetxt(f2, ground_truth)
+                    else:
+                        np.savetxt(f3, features)
+                        np.savetxt(f4, ground_truth)
+                    if i % 500 == 0:
+                        fig1 = plt.figure()
+                        fig1.suptitle('Function Data', fontsize=14)
+
+                        # plt.subplot(1, 2, 1, aspect=1.0)  # row 1, column 2, count 1
+                        plt.title("True solution:f(x,y)=" + str(a) + "*x*x" + " + " + str(b) + "*y*y")
+                        colorinterpolation = 100
+                        colourMap = plt.cm.jet
+                        plt.contourf(xx, yy, u_true, colorinterpolation, cmap=colourMap)
+                        cb1 = plt.colorbar(orientation='horizontal')
+                        plt.show()
+
+                    '''
+                    if i % 1 == 0:
+                        sampled_plot_new(u_true)
+                    
+                    if i % 500 == 0:
+                        fig1 = plt.figure()
+                        fig1.suptitle('Temperature Contour', fontsize=14)
+
+                        # plt.subplot(1, 2, 1, aspect=1.0)  # row 1, column 2, count 1
+                        plt.title("True Solution ")
+                        colorinterpolation = 100
+                        colourMap = plt.cm.jet
+                        plt.contourf(xx, yy, u_true, colorinterpolation, cmap=colourMap)
+                        cb1 = plt.colorbar(orientation='horizontal')
+                        plt.show()
+
+                    
+                    plt.title("Temperature contour: True Solution- ")
+                    colorinterpolation = 50
+                    colourMap = plt.cm.jet
+                    plt.contourf(xx, yy, u_true, colorinterpolation, cmap=colourMap)
+                    cb1 = plt.colorbar()
+                    plt.show()
+                    '''
+
+
+
+
+
+# Sanity Checks: All look good!
+'''
+    print(z)
+    print(Left)
+    print(Right)
+    print(Top)
+    print(Bottom)
+    print(boundary)
+    print(interior)
+'''
+
+# Plotting routines
+'''
+ax = plt.axes(projection='3d')
+ax.plot_surface(xx, yy, f_nodes)
+ax.set_xlabel('x')
+ax.set_ylabel('y')
+ax.set_zlabel('f(x,y)')
+plt.show()
+'''
diff --git a/LayerParallelLearning/Data_Generator/main.py b/LayerParallelLearning/Data_Generator/main.py
index b42f0b2..384dde0 100644
--- a/LayerParallelLearning/Data_Generator/main.py
+++ b/LayerParallelLearning/Data_Generator/main.py
@@ -30,8 +30,8 @@ h = 2.0 / (nx - 1)
 
 
 def ax2plusby2(x, y):
-    a = random.uniform(-100.0, 100.0)
-    b = random.uniform(-100.0, 100.0)
+    a = random.uniform(-10.0, 10.0)
+    b = random.uniform(-10.0, 10.0)
     xx_sq = (x ** 2)
     yy_sq = (y ** 2)
     return a * xx_sq + b * yy_sq
@@ -127,8 +127,9 @@ def set_interior(f):
 
 
 def Poisson_Set_TrueSol_And_rhs(xx, yy):
-    r = np.random.randint(low=1, high=19) # low included, high excluded
-
+    r = np.random.randint(low=1, high=11) # low included, high excluded
+    # Caution: Comment this out for generating data with generalized dataset
+    r = 4
     # Initialize the true solution and rhs grids
     u_true = np.empty((nx, ny))
     f = np.empty((nx, ny))
@@ -139,9 +140,9 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         l = random.uniform(-5, 5)
         u_true = np.sin(k * xx) * np.sinh(l * yy)
         f = (-1.0 * k * k + l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+        # scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
+        # u_true = u_true/scaling_factor
+        # f = f/scaling_factor
 
     elif r == 2:
         # Type 2: np.cos(k * xx) * np.sinh(l * yy)
@@ -149,9 +150,9 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         l = random.uniform(-5, 5)
         u_true = np.cos(k * xx) * np.sinh(l * yy)
         f = (-1.0 * k * k + l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+        # scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
+        # u_true = u_true/scaling_factor
+        # f = f/scaling_factor
 
     elif r == 3:
         # Type 3: c * np.sin(k * xx) * np.cos(l * yy)
@@ -160,9 +161,7 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         c = random.uniform(-10, 10)
         u_true = c * np.sin(k * xx) * np.cos(l * yy)
         f = (-1.0 * k * k - 1.0 * l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+
 
     elif r == 4:
         # Type 4: np.cos(k * xx) * np.sin(l * yy)
@@ -171,9 +170,7 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         c = random.uniform(-10, 10)
         u_true = c * np.cos(k * xx) * np.sin(l * yy)
         f = (-1.0 * k * k - 1.0 * l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+
 
     elif r == 5:
         # Type 1: np.cos(k * xx) * np.cosh(l * yy)
@@ -182,9 +179,7 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         c = random.uniform(-5,5)
         u_true = c * np.cos(k * xx) * np.cosh(l * yy)
         f = (-1.0 * k * k + l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+
 
     elif r == 6:
         # Type 1: np.sin(k * xx) * np.cosh(l * yy)
@@ -193,9 +188,7 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         c = random.uniform(-5,5)
         u_true = c * np.sin(k * xx) * np.cosh(l * yy)
         f = (-1.0 * k * k + l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+
 
     elif r == 7:
         # Type 1: np.sinh(k * xx) * np.cos(l * yy)
@@ -203,9 +196,7 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         l = random.uniform(-5, 5)
         u_true = np.sinh(k * xx) * np.cos(l * yy)
         f = (k * k - 1.0 * l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+
 
     elif r == 8:
         # Type 2: np.sinh(k * xx) * np.sin(l * yy)
@@ -213,9 +204,7 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         l = random.uniform(-5, 5)
         u_true = np.sinh(k * xx) * np.sin(l * yy)
         f = (k * k - 1.0 * l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+
 
     elif r == 9:
         # Type 3: c * np.cosh(k * xx) * np.sin(l * yy)
@@ -224,9 +213,7 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         c = random.uniform(-10, 10)
         u_true = c * np.cosh(k * xx) * np.sin(l * yy)
         f = (k * k - 1.0 * l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+
 
     elif r == 10:
         # Type 10: np.cosh(k * xx) * np.cos(l * yy)
@@ -235,9 +222,7 @@ def Poisson_Set_TrueSol_And_rhs(xx, yy):
         c = random.uniform(-10, 10)
         u_true = c * np.cosh(k * xx) * np.cos(l * yy)
         f = (k * k - 1.0 * l * l) * u_true
-        scaling_factor = max(np.abs(u_true.max()), np.abs(u_true.min()), np.abs(f.max()), np.abs(f.min()))
-        u_true = u_true/scaling_factor
-        f = f/scaling_factor
+
 
     # Polynomial Data set
     elif r == 11 or r == 15:
@@ -410,11 +395,22 @@ with open('features_training.dat', 'a') as f1:
                     else:
                         np.savetxt(f3, features)
                         np.savetxt(f4, ground_truth)
+                    if i % 500 == 0:
+                        fig1 = plt.figure()
+                        fig1.suptitle('Temperature Contour', fontsize=14)
+
+                        # plt.subplot(1, 2, 1, aspect=1.0)  # row 1, column 2, count 1
+                        plt.title("True Solution ")
+                        colorinterpolation = 100
+                        colourMap = plt.cm.jet
+                        plt.contourf(xx, yy, u_true, colorinterpolation, cmap=colourMap)
+                        cb1 = plt.colorbar(orientation='horizontal')
+                        plt.show()
 
                     '''
                     if i % 1 == 0:
                         sampled_plot_new(u_true)
-                    '''
+                    
                     if i % 500 == 0:
                         fig1 = plt.figure()
                         fig1.suptitle('Temperature Contour', fontsize=14)
@@ -427,7 +423,7 @@ with open('features_training.dat', 'a') as f1:
                         cb1 = plt.colorbar(orientation='horizontal')
                         plt.show()
 
-                    '''
+                    
                     plt.title("Temperature contour: True Solution- ")
                     colorinterpolation = 50
                     colourMap = plt.cm.jet
diff --git a/LayerParallelLearning/Data_Generator/nonlin_DG.py b/LayerParallelLearning/Data_Generator/nonlin_DG.py
new file mode 100644
index 0000000..e69de29
diff --git a/LayerParallelLearning/Data_Generator/nonlin_reg.py b/LayerParallelLearning/Data_Generator/nonlin_reg.py
new file mode 100644
index 0000000..ff7642c
--- /dev/null
+++ b/LayerParallelLearning/Data_Generator/nonlin_reg.py
@@ -0,0 +1,139 @@
+import numpy as np
+import matplotlib.pyplot as plt
+from sklearn.metrics import mean_squared_error
+from sklearn.preprocessing import MinMaxScaler
+
+
+def f(x, order):
+    """
+
+    :param x: input array (6 * 1)
+    :param order: order of non-linearity
+    :return: y = f(x)
+    """
+    col_powers = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
+    y = 1.0 + np.sum(np.power(x, col_powers), axis=1)
+
+    if order == 1:
+        # p(x) = 1 + x_1¹ + x_2² + x_3³ + x_4⁴ + x_5⁵ + x_6⁶ for x_i in [0,4]
+        # f(x) = min(p(x), 400)
+        y[y > 400] = 400
+
+    elif order == 2:
+        y[(400 < y) & (y < 800)] = 400
+        y[y >= 800] = 800
+
+    elif order == 3:
+        y[(400 <= y) & (y < 800)] = 400
+        y[(800 <= y) & (y < 1200)] = 800
+        y[y >= 1200] = 1200
+
+    elif order == 4:
+        y[(400 <= y) & (y < 800)] = 400
+        y[(800 <= y) & (y < 1200)] = 800
+        y[(1200 <= y) & (y < 1600)] = 1200
+        y[y >= 1600] = 1600
+
+    return y
+
+
+# Number of training, validation, and testing samples to be generated
+#n_train = 6.75 * pow(10, 6)
+#n_val   = 0.75 * pow(10, 6)
+#n_test  = 2.5  * pow(10, 6)
+n_train = 5
+n_val   = 2
+n_test  = 10
+
+# Training data generation
+'''
+Information:
+  Set A <--> Training + validation(Known data)
+  Set B <--> Testing (Unknown data)
+
+Params: 
+  x_A = 6 * 1 input array 
+  y_A = scalar output of the non-linear function
+  f:(x_A) -> y_A
+
+'''
+# Generate an input vector x_A: 6*1 uniformly randomly distributed in [0, 4]
+x_A = np.random.uniform(low=0.0, high=4.0, size=(int(n_train + n_val), 6))
+x_B = np.random.uniform(low=0.0, high=4.0, size=(int(n_test), 6))
+nonlin_order = 4 # Different non-linear datasets <-> order
+
+# Evaluate the non-linear function
+y_A = f(x_A, nonlin_order)
+y_B = f(x_B, nonlin_order)
+
+#### Normalize the data ####
+
+
+# Compute minmax scaling for features (per-feature min and max)
+x_A_min = np.min(x_A, axis=0)
+x_A_max = np.max(x_A, axis=0)
+x_A_scaled = (x_A - x_A_min)/(x_A_max - x_A_min)
+
+# Compute minmax for scaling for the function output
+y_A_min = np.min(y_A, axis=0)
+y_A_max = np.max(y_A, axis=0)
+output_scalars = [y_A_min, y_A_max]
+y_A_scaled = (y_A - y_A_min)/(y_A_max - y_A_min)
+
+# Normalize the data with parameters from training dataset
+x_B_scaled = (x_B - x_A_min)/(x_A_max - x_A_min)
+y_B_scaled = (y_B - y_A_min)/(y_A_max - y_A_min)
+
+# Store the output scaling parameters: Required for inverse transform
+with open('output_scaling_params.dat', 'w') as f_scale:
+    np.savetxt(f_scale, output_scalars)
+
+# Store all 3 datasets: training, validation, and testing
+with open('features_training.dat', 'w') as f1:
+    np.savetxt(f1, x_A_scaled[0:n_train, :])
+
+with open('output_training.dat', 'w') as f2:
+    np.savetxt(f2, y_A_scaled[0:n_train])
+
+with open('features_validation.dat', 'w') as f3:
+    np.savetxt(f3, x_A_scaled[n_train:, :])
+
+with open('output_validation.dat', 'w') as f4:
+    np.savetxt(f4, y_A_scaled[n_train:])
+
+with open('features_testing.dat', 'w') as f3:
+    np.savetxt(f3, x_B_scaled)
+
+with open('output_testing.dat', 'w') as f4:
+    np.savetxt(f4, y_B_scaled)
+
+'''  
+# Visualize the generated data
+plt.figure()
+plt.plot(y_A[0:400], 'rx')
+plt.title('Result for ResNet Regression')
+plt.ylabel('Y value')
+plt.xlabel('Instance')
+plt.legend(['Real value'], loc='upper right')
+plt.show()
+'''
+
+# Sanity check
+# print(np.shape(y_A))
+
+'''
+Sanity checks
+x_a = a = np.arange(9).reshape(3,3)
+print(x_a)
+col_p = [1.0,2.0,3.0]
+y_a = np.power(x_a, col_p)
+print(y_a)
+y_b = np.sum(np.power(x_a, col_p), axis=1)
+print(y_b)
+y_c = 1.0 + np.sum(np.power(x_a, col_p), axis=1)
+print(y_c)
+print(np.shape(y_c))
+'''
+# x_B = np.random.uniform(low=0.0, high=4.0, size=(int(n_test), 6))
+
+
diff --git a/LayerParallelLearning/LReLU.sh b/LayerParallelLearning/LReLU.sh
new file mode 100755
index 0000000..8b7a2d2
--- /dev/null
+++ b/LayerParallelLearning/LReLU.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+# Bash script to run experiments on one parameter at a time
+# Suitable hidden layers for 8*8 case
+# For increasing layers, regularization should help quite a bit
+
+mpirun -np 1 ./main examples/poisson_experiments/exp5_LReLU/8_Poly_1/poisson.cfg
+python plotting_routines/main.py ../examples/poisson_experiments/exp5_LReLU/8_Poly_1/
+
+mpirun -np 2 ./main examples/poisson_experiments/exp5_LReLU/8_Poly_2/poisson.cfg
+python plotting_routines/main.py ../examples/poisson_experiments/exp5_LReLU/8_Poly_2/
+
+mpirun -np 4 ./main examples/poisson_experiments/exp5_LReLU/8_Poly_3/poisson.cfg
+python plotting_routines/main.py ../examples/poisson_experiments/exp5_LReLU/8_Poly_3/
diff --git a/LayerParallelLearning/examples/SSHE_Analytical/single_solution/single_solution.cfg b/LayerParallelLearning/examples/SSHE_Analytical/single_solution/single_solution.cfg
new file mode 100644
index 0000000..9e28c9c
--- /dev/null
+++ b/LayerParallelLearning/examples/SSHE_Analytical/single_solution/single_solution.cfg
@@ -0,0 +1,130 @@
+################################
+# Data set 
+################################
+
+# relative data folder location 
+datafolder = examples/SSHE_Analytical/single_solution/
+# filename of training data feature vectors
+ftrain_ex = features_training.dat
+# filename of training data labels/classes
+ftrain_labels = output_training.dat
+# filename of validation data feature 
+fval_ex = features_validation.dat
+# filename of validation data labels/classes 
+fval_labels = output_validation.dat
+# number of training data elements (that many lines will be read!) 
+ntraining = 6000
+# number of validation data elements (that many lines will be read!)
+nvalidation = 1500
+# number of features within the training and validation data set
+nfeatures = 64
+# number of labels/classes within the training and validation data set
+nclasses = 36
+
+# filename for opening weights and bias (set to NONE if not given)
+weightsopenfile = NONE
+# filename for classification weights and bias (set to NONE if not given)
+weightsclassificationfile = NONE
+
+################################
+# Neural Network  
+################################
+
+# number of channels
+nchannels = 40
+# number of layers (including opening layer and classification layer) (nlayer >= 3 !)
+nlayers = 3
+# final time
+T = 10.0
+# Activation function ("tanh" or "ReLu" or "SmoothReLu" or "LRELU")
+activation = Linear
+# Type of network ("dense" the default, or "convolutional")
+network_type = dense 
+# Opening layer type.  
+#  "replicate": replicate image for each convolution.  
+#  "activate": same as replicate, only apply tuned, shifted tanh activation function for MNIST. 
+type_openlayer = activate
+# factor for scaling initial opening layer weights and bias
+weights_open_init = 1e-3
+# factor for scaling initial weights and bias of intermediate layers
+weights_init = 1e-3
+# factor for scaling initial classification weights and bias 
+weights_class_init = 1e-3
+
+################################
+# XBraid 
+################################
+
+# coarsening factor on level 0
+#   generally, cfactor0 = nlayers / P_t
+#   where P_t is the processors in time, and nlayers is the number of time-steps
+braid_cfactor0 = 2 
+# coarsening factor on all other levels
+braid_cfactor = 2 
+# maximum number of levels 
+braid_maxlevels = 10
+# minimum allowed coarse time time grid size (values in 10-30 are usually best)
+braid_mincoarse = 10
+# maximum number of iterations
+braid_maxiter = 2
+# absolute tolerance
+braid_abstol = 1e-15
+# absolute adjoint tolerance
+braid_adjtol = 1e-15
+# printlevel
+braid_printlevel = 1
+# access level
+braid_accesslevel = 0 
+# skip work on downcycle?
+braid_setskip = 0 
+# V-cycle (0) or full multigrid  (1)
+braid_fmg = 0
+# Number of CF relaxations
+braid_nrelax = 1
+# Number of CF relaxations on level 0  (1 or 0 are usually the best values)
+braid_nrelax0 = 0
+
+####################################
+# Optimization
+####################################
+# Type of batch selection ("deterministic" or "stochastic")
+# deterministic:
+# fixes batch size => trains on this one
+#
+# stochastic: uses some (dead) pool
+# batch elements are randomly chosen in each iteration during training
+# smaller batch size makes sense
+batch_type = stochastic
+# Batch size
+nbatch = 750
+# relaxation param for tikhonov term
+gamma_tik = 1e-5
+# relaxation param for time-derivative term
+gamma_ddt = 1e-5
+# relaxation param for tikhonov term of classification weights 
+gamma_class = 1e-5
+# stepsize selection type ("fixed" or "backtrackingLS" or "oneoverk")
+# determines how to choose alpha in design update x_new = x_old - alpha * direction
+# fixed          : constant alpha being the initial stepsize
+# backtrackingLS : find alpha from backtracking linesearch, starting at initial stepsize
+# oneoverk       : alpha = 1/k  where k is the current optimization iteration index
+stepsize_type = backtrackingLS
+# initial stepsize
+stepsize = 1.0 #1.0
+# maximum number of optimization iterations
+optim_maxiter = 1000
+# absolute stopping criterion for the gradient norm
+gtol = 1e-4
+# maximum number of linesearch iterations
+ls_maxiter = 15
+# factor for modifying the stepsize within a linesearch iteration
+ls_factor = 0.5
+# Hessian Approximation ("BFGS", "L-BFGS" or "Identity")
+hessian_approx = L-BFGS
+# number of stages for l-bfgs method 
+lbfgs_stages = 10
+# level for validation computation: 
+#  -1 = never validate
+#   0 = validate only after optimization finishes. 
+#   1 = validate in each optimization iteration
+validationlevel = 1
diff --git a/LayerParallelLearning/examples/SSHE_numerical/Poisson/8_trig/SSHE_num.cfg b/LayerParallelLearning/examples/SSHE_numerical/Poisson/8_trig/SSHE_num.cfg
index 62d9235..7a1899a 100644
--- a/LayerParallelLearning/examples/SSHE_numerical/Poisson/8_trig/SSHE_num.cfg
+++ b/LayerParallelLearning/examples/SSHE_numerical/Poisson/8_trig/SSHE_num.cfg
@@ -31,15 +31,15 @@ weightsclassificationfile = NONE
 ################################
 
 # number of channels
-nchannels = 36
+nchannels = 256
 # number of layers (including opening layer and classification layer) (nlayer >= 3 !)
-nlayers = 4
+nlayers = 10
 # final time
 T = 10.0
 # Activation function ("tanh" or "ReLu" or "SmoothReLu" or "LRELU")
 activation = Linear
 # Type of network ("dense" the default, or "convolutional")
-network_type = dense 
+network_type = convolutional 
 # Opening layer type.  
 #  "replicate": replicate image for each convolution.  
 #  "activate": same as replicate, only apply tuned, shifted tanh activation function for MNIST. 
diff --git a/LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_1/poisson.cfg b/LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_1/poisson.cfg
similarity index 98%
rename from LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_1/poisson.cfg
rename to LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_1/poisson.cfg
index a17581f..2cafff9 100644
--- a/LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_1/poisson.cfg	
+++ b/LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_1/poisson.cfg
@@ -3,7 +3,7 @@
 ################################
 
 # relative data folder location 
-datafolder = examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_1/
+datafolder = examples/poisson_experiments/exp4_tanh/8_Poly_1/
 # filename of training data feature vectors
 ftrain_ex = features_training.dat
 # filename of training data labels/classes
diff --git a/LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_2/poisson.cfg b/LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_2/poisson.cfg
similarity index 98%
rename from LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_2/poisson.cfg
rename to LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_2/poisson.cfg
index 5fe0c9f..0367eed 100644
--- a/LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_2/poisson.cfg	
+++ b/LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_2/poisson.cfg
@@ -3,7 +3,7 @@
 ################################
 
 # relative data folder location 
-datafolder = examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_2/
+datafolder = examples/poisson_experiments/exp4_tanh/8_Poly_2/
 # filename of training data feature vectors
 ftrain_ex = features_training.dat
 # filename of training data labels/classes
diff --git a/LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_3/poisson.cfg b/LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_3/poisson.cfg
similarity index 98%
rename from LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_3/poisson.cfg
rename to LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_3/poisson.cfg
index ec3cb49..dd5488c 100644
--- a/LayerParallelLearning/examples/poisson_experiments/exp4_reg_datanorm _tanh/8_Poly_3/poisson.cfg	
+++ b/LayerParallelLearning/examples/poisson_experiments/exp4_tanh/8_Poly_3/poisson.cfg
@@ -3,7 +3,7 @@
 ################################
 
 # relative data folder location 
-datafolder = examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_3/
+datafolder = examples/poisson_experiments/exp4_tanh/8_Poly_3/
 # filename of training data feature vectors
 ftrain_ex = features_training.dat
 # filename of training data labels/classes
diff --git a/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_1/poisson.cfg b/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_1/poisson.cfg
new file mode 100644
index 0000000..fb1b604
--- /dev/null
+++ b/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_1/poisson.cfg
@@ -0,0 +1,130 @@
+################################
+# Data set 
+################################
+
+# relative data folder location 
+datafolder = examples/poisson_experiments/exp5_LReLU/8_Poly_1/
+# filename of training data feature vectors
+ftrain_ex = features_training.dat
+# filename of training data labels/classes
+ftrain_labels = output_training.dat
+# filename of validation data feature 
+fval_ex = features_validation.dat
+# filename of validation data labels/classes 
+fval_labels = output_validation.dat
+# number of training data elements (that many lines will be read!) 
+ntraining = 6000
+# number of validation data elements (that many lines will be read!)
+nvalidation = 1500
+# number of features within the training and validation data set
+nfeatures = 64
+# number of labels/classes within the training and validation data set
+nclasses = 36
+
+# filename for opening weights and bias (set to NONE if not given)
+weightsopenfile = NONE
+# filename for classification weights and bias (set to NONE if not given)
+weightsclassificationfile = NONE
+
+################################
+# Neural Network  
+################################
+
+# number of channels
+nchannels = 64
+# number of layers (including opening layer and classification layer) (nlayer >= 3 !)
+nlayers = 3
+# final time
+T = 10.0
+# Activation function ("tanh" or "ReLu" or "SmoothReLu" or "LRELU")
+activation = LRELU
+# Type of network ("dense" the default, or "convolutional")
+network_type = dense 
+# Opening layer type.  
+#  "replicate": replicate image for each convolution.  
+#  "activate": same as replicate, only apply tuned, shifted tanh activation function for MNIST. 
+type_openlayer = activate
+# factor for scaling initial opening layer weights and bias
+weights_open_init = 1e-3
+# factor for scaling initial weights and bias of intermediate layers
+weights_init = 1e-3
+# factor for scaling initial classification weights and bias 
+weights_class_init = 1e-3
+
+################################
+# XBraid 
+################################
+
+# coarsening factor on level 0
+#   generally, cfactor0 = nlayers / P_t
+#   where P_t is the processors in time, and nlayers is the number of time-steps
+braid_cfactor0 = 2 
+# coarsening factor on all other levels
+braid_cfactor = 2 
+# maximum number of levels 
+braid_maxlevels = 10
+# minimum allowed coarse time time grid size (values in 10-30 are usually best)
+braid_mincoarse = 10
+# maximum number of iterations
+braid_maxiter = 2
+# absolute tolerance
+braid_abstol = 1e-15
+# absolute adjoint tolerance
+braid_adjtol = 1e-15
+# printlevel
+braid_printlevel = 1
+# access level
+braid_accesslevel = 0 
+# skip work on downcycle?
+braid_setskip = 0 
+# V-cycle (0) or full multigrid  (1)
+braid_fmg = 0
+# Number of CF relaxations
+braid_nrelax = 1
+# Number of CF relaxations on level 0  (1 or 0 are usually the best values)
+braid_nrelax0 = 0
+
+####################################
+# Optimization
+####################################
+# Type of batch selection ("deterministic" or "stochastic")
+# deterministic:
+# fixes batch size => trains on this one
+#
+# stochastic: uses some (dead) pool
+# batch elements are randomly chosen in each iteration during training
+# smaller batch size makes sense
+batch_type = stochastic
+# Batch size
+nbatch = 750
+# relaxation param for tikhonov term
+gamma_tik = 1e-6
+# relaxation param for time-derivative term
+gamma_ddt = 1e-6
+# relaxation param for tikhonov term of classification weights 
+gamma_class = 1e-6
+# stepsize selection type ("fixed" or "backtrackingLS" or "oneoverk")
+# determines how to choose alpha in design update x_new = x_old - alpha * direction
+# fixed          : constant alpha being the initial stepsize
+# backtrackingLS : find alpha from backtracking linesearch, starting at initial stepsize
+# oneoverk       : alpha = 1/k  where k is the current optimization iteration index
+stepsize_type = backtrackingLS
+# initial stepsize
+stepsize = 1.0 #1.0
+# maximum number of optimization iterations
+optim_maxiter = 5600
+# absolute stopping criterion for the gradient norm
+gtol = 1e-4
+# maximum number of linesearch iterations
+ls_maxiter = 15
+# factor for modifying the stepsize within a linesearch iteration
+ls_factor = 0.5
+# Hessian Approximation ("BFGS", "L-BFGS" or "Identity")
+hessian_approx = L-BFGS
+# number of stages for l-bfgs method 
+lbfgs_stages = 10
+# level for validation computation: 
+#  -1 = never validate
+#   0 = validate only after optimization finishes. 
+#   1 = validate in each optimization iteration
+validationlevel = 1
diff --git a/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_2/poisson.cfg b/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_2/poisson.cfg
new file mode 100644
index 0000000..351d722
--- /dev/null
+++ b/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_2/poisson.cfg
@@ -0,0 +1,130 @@
+################################
+# Data set 
+################################
+
+# relative data folder location 
+datafolder = examples/poisson_experiments/exp5_LReLU/8_Poly_2/
+# filename of training data feature vectors
+ftrain_ex = features_training.dat
+# filename of training data labels/classes
+ftrain_labels = output_training.dat
+# filename of validation data feature 
+fval_ex = features_validation.dat
+# filename of validation data labels/classes 
+fval_labels = output_validation.dat
+# number of training data elements (that many lines will be read!) 
+ntraining = 6000
+# number of validation data elements (that many lines will be read!)
+nvalidation = 1500
+# number of features within the training and validation data set
+nfeatures = 64
+# number of labels/classes within the training and validation data set
+nclasses = 36
+
+# filename for opening weights and bias (set to NONE if not given)
+weightsopenfile = NONE
+# filename for classification weights and bias (set to NONE if not given)
+weightsclassificationfile = NONE
+
+################################
+# Neural Network  
+################################
+
+# number of channels
+nchannels = 50
+# number of layers (including opening layer and classification layer) (nlayer >= 3 !)
+nlayers = 4
+# final time
+T = 10.0
+# Activation function ("tanh" or "ReLu" or "SmoothReLu" or "LRELU")
+activation = tanh
+# Type of network ("dense" the default, or "convolutional")
+network_type = dense 
+# Opening layer type.  
+#  "replicate": replicate image for each convolution.  
+#  "activate": same as replicate, only apply tuned, shifted tanh activation function for MNIST. 
+type_openlayer = activate
+# factor for scaling initial opening layer weights and bias
+weights_open_init = 1e-3
+# factor for scaling initial weights and bias of intermediate layers
+weights_init = 1e-3
+# factor for scaling initial classification weights and bias 
+weights_class_init = 1e-3
+
+################################
+# XBraid 
+################################
+
+# coarsening factor on level 0
+#   generally, cfactor0 = nlayers / P_t
+#   where P_t is the processors in time, and nlayers is the number of time-steps
+braid_cfactor0 = 2 
+# coarsening factor on all other levels
+braid_cfactor = 2 
+# maximum number of levels 
+braid_maxlevels = 10
+# minimum allowed coarse time time grid size (values in 10-30 are usually best)
+braid_mincoarse = 10
+# maximum number of iterations
+braid_maxiter = 2
+# absolute tolerance
+braid_abstol = 1e-15
+# absolute adjoint tolerance
+braid_adjtol = 1e-15
+# printlevel
+braid_printlevel = 1
+# access level
+braid_accesslevel = 0 
+# skip work on downcycle?
+braid_setskip = 0 
+# V-cycle (0) or full multigrid  (1)
+braid_fmg = 0
+# Number of CF relaxations
+braid_nrelax = 1
+# Number of CF relaxations on level 0  (1 or 0 are usually the best values)
+braid_nrelax0 = 0
+
+####################################
+# Optimization
+####################################
+# Type of batch selection ("deterministic" or "stochastic")
+# deterministic:
+# fixes batch size => trains on this one
+#
+# stochastic: uses some (dead) pool
+# batch elements are randomly chosen in each iteration during training
+# smaller batch size makes sense
+batch_type = stochastic
+# Batch size
+nbatch = 750
+# relaxation param for tikhonov term
+gamma_tik = 1e-5
+# relaxation param for time-derivative term
+gamma_ddt = 1e-5
+# relaxation param for tikhonov term of classification weights 
+gamma_class = 1e-5
+# stepsize selection type ("fixed" or "backtrackingLS" or "oneoverk")
+# determines how to choose alpha in design update x_new = x_old - alpha * direction
+# fixed          : constant alpha being the initial stepsize
+# backtrackingLS : find alpha from backtracking linesearch, starting at initial stepsize
+# oneoverk       : alpha = 1/k  where k is the current optimization iteration index
+stepsize_type = backtrackingLS
+# initial stepsize
+stepsize = 1.0 #1.0
+# maximum number of optimization iterations
+optim_maxiter = 5600
+# absolute stopping criterion for the gradient norm
+gtol = 1e-4
+# maximum number of linesearch iterations
+ls_maxiter = 15
+# factor for modifying the stepsize within a linesearch iteration
+ls_factor = 0.5
+# Hessian Approximation ("BFGS", "L-BFGS" or "Identity")
+hessian_approx = L-BFGS
+# number of stages for l-bfgs method 
+lbfgs_stages = 10
+# level for validation computation: 
+#  -1 = never validate
+#   0 = validate only after optimization finishes. 
+#   1 = validate in each optimization iteration
+validationlevel = 1
diff --git a/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_3/poisson.cfg b/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_3/poisson.cfg
new file mode 100644
index 0000000..62f7cf1
--- /dev/null
+++ b/LayerParallelLearning/examples/poisson_experiments/exp5_LReLU/8_Poly_3/poisson.cfg
@@ -0,0 +1,130 @@
+################################
+# Data set 
+################################
+
+# relative data folder location 
+datafolder = examples/poisson_experiments/exp5_LReLU/8_Poly_3/
+# filename of training data feature vectors
+ftrain_ex = features_training.dat
+# filename of training data labels/classes
+ftrain_labels = output_training.dat
+# filename of validation data feature 
+fval_ex = features_validation.dat
+# filename of validation data labels/classes 
+fval_labels = output_validation.dat
+# number of training data elements (that many lines will be read!) 
+ntraining = 6000
+# number of validation data elements (that many lines will be read!)
+nvalidation = 1500
+# number of features within the training and validation data set
+nfeatures = 64
+# number of labels/classes within the training and validation data set
+nclasses = 36
+
+# filename for opening weights and bias (set to NONE if not given)
+weightsopenfile = NONE
+# filename for classification weights and bias (set to NONE if not given)
+weightsclassificationfile = NONE
+
+################################
+# Neural Network  
+################################
+
+# number of channels
+nchannels = 50
+# number of layers (including opening layer and classification layer) (nlayer >= 3 !)
+nlayers = 10
+# final time
+T = 10.0
+# Activation function ("tanh" or "ReLu" or "SmoothReLu" or "LRELU")
+activation = LRELU
+# Type of network ("dense" the default, or "convolutional")
+network_type = dense 
+# Opening layer type.  
+#  "replicate": replicate image for each convolution.  
+#  "activate": same as replicate, only apply tuned, shifted tanh activation function for MNIST. 
+type_openlayer = activate
+# factor for scaling initial opening layer weights and bias
+weights_open_init = 1e-3
+# factor for scaling initial weights and bias of intermediate layers
+weights_init = 1e-3
+# factor for scaling initial classification weights and bias 
+weights_class_init = 1e-3
+
+################################
+# XBraid 
+################################
+
+# coarsening factor on level 0
+#   generally, cfactor0 = nlayers / P_t
+#   where P_t is the processors in time, and nlayers is the number of time-steps
+braid_cfactor0 = 2 
+# coarsening factor on all other levels
+braid_cfactor = 2 
+# maximum number of levels 
+braid_maxlevels = 10
+# minimum allowed coarse time time grid size (values in 10-30 are usually best)
+braid_mincoarse = 10
+# maximum number of iterations
+braid_maxiter = 2
+# absolute tolerance
+braid_abstol = 1e-15
+# absolute adjoint tolerance
+braid_adjtol = 1e-15
+# printlevel
+braid_printlevel = 1
+# access level
+braid_accesslevel = 0 
+# skip work on downcycle?
+braid_setskip = 0 
+# V-cycle (0) or full multigrid  (1)
+braid_fmg = 0
+# Number of CF relaxations
+braid_nrelax = 1
+# Number of CF relaxations on level 0  (1 or 0 are usually the best values)
+braid_nrelax0 = 0
+
+####################################
+# Optimization
+####################################
+# Type of batch selection ("deterministic" or "stochastic")
+# deterministic:
+# fixes batch size => trains on this one
+#
+# stochastic: uses some (dead) pool
+# batch elements are randomly chosen in each iteration during training
+# smaller batch size makes sense
+batch_type = stochastic
+# Batch size
+nbatch = 750
+# relaxation param for tikhonov term
+gamma_tik = 1e-7
+# relaxation param for time-derivative term
+gamma_ddt = 1e-7
+# relaxation param for tikhonov term of classification weights 
+gamma_class = 1e-7
+# stepsize selection type ("fixed" or "backtrackingLS" or "oneoverk")
+# determines how to choose alpha in design update x_new = x_old - alpha * direction
+# fixed          : constant alpha being the initial stepsize
+# backtrackingLS : find alpha from backtracking linesearch, starting at initial stepsize
+# oneoverk       : alpha = 1/k  where k is the current optimization iteration index
+stepsize_type = backtrackingLS
+# initial stepsize
+stepsize = 1.0 #1.0
+# maximum number of optimization iterations
+optim_maxiter = 5600
+# absolute stopping criterion for the gradient norm
+gtol = 1e-4
+# maximum number of linesearch iterations
+ls_maxiter = 15
+# factor for modifying the stepsize within a linesearch iteration
+ls_factor = 0.5
+# Hessian Approximation ("BFGS", "L-BFGS" or "Identity")
+hessian_approx = L-BFGS
+# number of stages for l-bfgs method 
+lbfgs_stages = 10
+# level for validation computation: 
+#  -1 = never validate
+#   0 = validate only after optimization finishes. 
+#   1 = validate in each optimization iteration
+validationlevel = 1
diff --git a/LayerParallelLearning/examples/quadratic/quadratic.cfg b/LayerParallelLearning/examples/quadratic/quadratic.cfg
new file mode 100644
index 0000000..25c1b16
--- /dev/null
+++ b/LayerParallelLearning/examples/quadratic/quadratic.cfg
@@ -0,0 +1,130 @@
+################################
+# Data set 
+################################
+
+# relative data folder location 
+datafolder = examples/quadratic/
+# filename of training data feature vectors
+ftrain_ex = features_training.dat
+# filename of training data labels/classes
+ftrain_labels = output_training.dat
+# filename of validation data feature 
+fval_ex = features_validation.dat
+# filename of validation data labels/classes 
+fval_labels = output_validation.dat
+# number of training data elements (that many lines will be read!) 
+ntraining = 6000
+# number of validation data elements (that many lines will be read!)
+nvalidation = 1500
+# number of features within the training and validation data set
+nfeatures = 28
+# number of labels/classes within the training and validation data set
+nclasses = 36
+
+# filename for opening weights and bias (set to NONE if not given)
+weightsopenfile = NONE
+# filename for classification weights and bias (set to NONE if not given)
+weightsclassificationfile = NONE
+
+################################
+# Neural Network  
+################################
+
+# number of channels
+nchannels = 10
+# number of layers (including opening layer and classification layer) (nlayer >= 3 !)
+nlayers = 10
+# final time
+T = 10.0
+# Activation function ("tanh" or "ReLu" or "SmoothReLu" or "LRELU")
+activation = Linear
+# Type of network ("dense" the default, or "convolutional")
+network_type = dense 
+# Opening layer type.  
+#  "replicate": replicate image for each convolution.  
+#  "activate": same as replicate, only apply tuned, shifted tanh activation function for MNIST. 
+type_openlayer = activate
+# factor for scaling initial opening layer weights and bias
+weights_open_init = 1e-3
+# factor for scaling initial weights and bias of intermediate layers
+weights_init = 1e-3
+# factor for scaling initial classification weights and bias 
+weights_class_init = 1e-3
+
+################################
+# XBraid 
+################################
+
+# coarsening factor on level 0
+#   generally, cfactor0 = nlayers / P_t
+#   where P_t is the processors in time, and nlayers is the number of time-steps
+braid_cfactor0 = 2 
+# coarsening factor on all other levels
+braid_cfactor = 2 
+# maximum number of levels 
+braid_maxlevels = 10
+# minimum allowed coarse time time grid size (values in 10-30 are usually best)
+braid_mincoarse = 10
+# maximum number of iterations
+braid_maxiter = 2
+# absolute tolerance
+braid_abstol = 1e-15
+# absolute adjoint tolerance
+braid_adjtol = 1e-15
+# printlevel
+braid_printlevel = 1
+# access level
+braid_accesslevel = 0 
+# skip work on downcycle?
+braid_setskip = 0 
+# V-cycle (0) or full multigrid  (1)
+braid_fmg = 0
+# Number of CF relaxations
+braid_nrelax = 1
+# Number of CF relaxations on level 0  (1 or 0 are usually the best values)
+braid_nrelax0 = 0
+
+####################################
+# Optimization
+####################################
+# Type of batch selection ("deterministic" or "stochastic")
+# deterministic:
+# fixes batch size => trains on this one
+#
+# stochastic: uses some (dead) pool
+# batch elements are randomly chosen in each iteration during training
+# smaller batch size makes sense
+batch_type = stochastic
+# Batch size
+nbatch = 750
+# relaxation param for tikhonov term
+gamma_tik = 1e-5
+# relaxation param for time-derivative term
+gamma_ddt = 1e-5
+# relaxation param for tikhonov term of classification weights 
+gamma_class = 1e-5
+# stepsize selection type ("fixed" or "backtrackingLS" or "oneoverk")
+# determines how to choose alpha in design update x_new = x_old - alpha * direction
+# fixed          : constant alpha being the initial stepsize
+# backtrackingLS : find alpha from backtracking linesearch, starting at initial stepsize
+# oneoverk       : alpha = 1/k  where k is the current optimization iteration index
+stepsize_type = backtrackingLS
+# initial stepsize
+stepsize = 1.0 #1.0
+# maximum number of optimization iterations
+optim_maxiter = 5600
+# absolute stopping criterion for the gradient norm
+gtol = 1e-4
+# maximum number of linesearch iterations
+ls_maxiter = 15
+# factor for modifying the stepsize within a linesearch iteration
+ls_factor = 0.5
+# Hessian Approximation ("BFGS", "L-BFGS" or "Identity")
+hessian_approx = L-BFGS
+# number of stages for l-bfgs method 
+lbfgs_stages = 10
+# level for validation computation: 
+#  -1 = never validate
+#   0 = validate only after optimization finishes. 
+#   1 = validate in each optimization iteration
+validationlevel = 1
diff --git a/LayerParallelLearning/plotting_routines/main.py b/LayerParallelLearning/plotting_routines/main.py
index 853bf8b..07a8ac3 100644
--- a/LayerParallelLearning/plotting_routines/main.py
+++ b/LayerParallelLearning/plotting_routines/main.py
@@ -63,9 +63,15 @@ class Plotting:
             self.ResNet_Prediction[::, 1:self.lenY - 1:1, 1:self.lenX - 1:1] = np.reshape([x.strip().split(' ') for x in f2],
                                                                                (self.v_size, self.rows_in, self.cols_in))
         with open(path3, 'r') as f3:
+
+            # Uncomment this section if rhs 'f' is also supplied!!
             bdry_plus_f = np.reshape([x.strip().split(' ') for x in f3], (self.v_size, (self.NGridPts_Bdry + self.NGridPts_int)))
             self.Boundary = bdry_plus_f[:, 0:self.NGridPts_Bdry:1]
 
+            # Uncomment the following if rhs 'f' is also supplied
+            # self.Boundary = np.reshape([x.strip().split(' ') for x in f3], (self.v_size, self.NGridPts_Bdry))
+
+
         with open(path4, 'r') as f5:
             self.loss = np.loadtxt(path.join(in_path, 'loss_history_pe.dat'))
             self.iter = self.loss[:, 0]
@@ -84,6 +90,7 @@ class Plotting:
         for v in range(Nplots):
             fig = plt.figure()
             fig.suptitle('Comparison of temperature contours', fontsize= 14)
+            #fig.suptitle('Comparison of functions', fontsize=14)
             plt.subplot(1, 2, 1)
             plt.title("Ground truth solution- " + str(v))
             plt.contourf(self.X, self.Y, self.Numerical_Sol[v], colorinterpolation, cmap=colourMap)
@@ -157,9 +164,12 @@ Nplots = 20
 data_path = path.abspath(path.dirname(__file__))
 
 # If you want to supply a command-line argument- use this!
-path_CL = sys.argv[1]
-# data_path = path.join(data_path, "../examples/SSHE_Analytical/Poisson/8_Poly/")
-in_path = path.join(data_path, path_CL)
+# path_CL = sys.argv[1]
+path_CL = "../examples/SSHE_Analytical/single_solution/"
+# path_CL = "../examples/quadratic/"
+data_path = path.join(data_path, "../examples/SSHE_Analytical/single_solution/")
+# in_path = path.join(data_path, path_CL)
+in_path = data_path
 out_path = path.join(in_path, "figures/")
 
 # To show the images or not     
diff --git a/LayerParallelLearning/src/main.cpp b/LayerParallelLearning/src/main.cpp
index b67d5bf..5df64a9 100644
--- a/LayerParallelLearning/src/main.cpp
+++ b/LayerParallelLearning/src/main.cpp
@@ -57,8 +57,14 @@ int main(int argc, char *argv[]) {
 
   /* --- XBraid --- */
   myBraidApp *primaltrainapp;         /**< Braid App for training data */
+  /* C: I think it's responsible for forward layer-parallel pass for training */
+  
   myAdjointBraidApp *adjointtrainapp; /**< Adjoint Braid for training data */
+    /* C: I think it's responsible for backward layer-parallel pass for training */
+
   myBraidApp *primalvalapp;           /**< Braid App for validation data */
+    /* C: I think it's responsible for forward layer-parallel pass for validation */
+
 
   /* --- Optimization --- */
   int ndesign_local;  /**< Number of local design variables on this processor */
@@ -86,6 +92,10 @@ int main(int argc, char *argv[]) {
   MyReal StartTime, StopTime, myMB, globalMB;
   MyReal UsedTime = 0.0;
 
+  /* Initializing all losses for different loss functions (newly) incorporated for regression 
+  MAPE:  Mean Absolute Percentage Error 
+  WMAPE: Weighted Mean Absolute Percentage Error 
+  */
   MyReal loss_MAPE_train = 0.0;
   MyReal loss_MAPE_val = 0.0; 
   MyReal loss_WMAPE_train = 0.0; 
@@ -162,6 +172,8 @@ int main(int argc, char *argv[]) {
   validationdata->readData(config->datafolder, config->fval_ex,
                            config->fval_labels);
 
+  //C: Add testing data: initialize and read
+
   /* Initialize XBraid */
   primaltrainapp =
       new myBraidApp(trainingdata, network, config, MPI_COMM_WORLD);
diff --git a/LayerParallelLearning/src/network.cpp b/LayerParallelLearning/src/network.cpp
index 1c1a161..e452385 100644
--- a/LayerParallelLearning/src/network.cpp
+++ b/LayerParallelLearning/src/network.cpp
@@ -64,7 +64,26 @@ void Network::createLayerBlock(int StartLayerID, int EndLayerID, Config *config)
   nchannels = config->nchannels;
   nclasses = config->nclasses;
   dt = (config->T) / (MyReal)(config->nlayers - 2);  // nlayers-2 = nhiddenlayers
+  /* 
+    C: 
+    Significance of 'dt': It's basically another tuning knob!
 
+    nlayers - 2 <--> #layers involved in the ResNet propagation. i.e 
+    U_t+1 = U_t + dt * F(U_n * W_n + b_n) 
+    
+    Q. What happens if dt is not = 1 ?
+    -->  It seems that 'dt' would act as a weighting parameter that's multiplied with the 
+    contribution of the input that gets modified with weights and biases and non-linearity. 
+    
+    -->  I think it would represent the ResNet only if dt = 1. Otherwise it would represent a 
+    modified form of ResNet! So to represent ResNet, ensure nlyers - 2 = T !!!!!! (Very imp)
+    'dt' = 1 : nlayers - 2 = T --> nlayers = T + 2 --> mimics ResNet
+    'dt' < 1 : nlayers - 2 > T --> More weight to the unmodified input (residual connection) 
+    'dt' > 1 : nlayers - 2 < T --> More weight to the modified input (With weights, bias, and non-lin)
+    
+    -->  The parameter 't' is the layer number here and that will always be an integer! 
+    There's no need for 'dt' to be an integer!
+  */
   ndesign_local = 0;
   int mylayermax = 0;
 
diff --git a/LayerParallelLearning/tanh.sh b/LayerParallelLearning/tanh.sh
deleted file mode 100755
index d0a9730..0000000
--- a/LayerParallelLearning/tanh.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-# Bash script to run experiments on one parameter at a time
-# Suitable hidden layers for 8*8 case
-# For increasing layers, regularization should help quite a bit
-
-mpirun -np 1 main examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_1/poisson.cfg
-python plotting_routines/main.py ../examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_1/
-
-mpirun -np 2 main examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_2/poisson.cfg
-python plotting_routines/main.py ../examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_2/
-
-mpirun -np 4 main examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_3/poisson.cfg
-python plotting_routines/main.py ../examples/poisson_experiments/exp4_reg_datanorm_tanh/8_Poly_3/
-- 
GitLab