diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..18bf4120ff7b773dd3e76af0d5e526e2051ee2bc
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,9 @@
+# created by virtualenv automatically
+*.cfg
+/.idea
+*.pdf
+*/__pycache__/*
+# Byte-compiled / optimized / DLL files
+__pycache__/
+
+venv/
diff --git a/ex0/code/Blue.png b/ex0/code/Blue.png
new file mode 100644
index 0000000000000000000000000000000000000000..0f6556f222d661f9e26a3e0ec9cef94e859ba5b0
Binary files /dev/null and b/ex0/code/Blue.png differ
diff --git a/ex0/code/Green.png b/ex0/code/Green.png
new file mode 100644
index 0000000000000000000000000000000000000000..ac2cedcfb691f435361f01c5655e9765f4b26e00
Binary files /dev/null and b/ex0/code/Green.png differ
diff --git a/ex0/code/Red.png b/ex0/code/Red.png
new file mode 100644
index 0000000000000000000000000000000000000000..34ae7a1583cfef3225ff2ff06888df436ad8fe17
Binary files /dev/null and b/ex0/code/Red.png differ
diff --git a/ex0/code/img.jpg b/ex0/code/img.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a8d88fb8b799e61b4273680ed5a876d45d889b16
Binary files /dev/null and b/ex0/code/img.jpg differ
diff --git a/ex0/code/img.png b/ex0/code/img.png
new file mode 100644
index 0000000000000000000000000000000000000000..1fcd992f8a533628e9d213a8cd0ec285facf0312
Binary files /dev/null and b/ex0/code/img.png differ
diff --git a/ex0/code/main.py b/ex0/code/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fc695317a788fe19b8aa8a7bc6039dd1dca4eb0
--- /dev/null
+++ b/ex0/code/main.py
@@ -0,0 +1,61 @@
+import numpy
+import cv2
+
+## Done
+## Load Image
+## Show it on screen
+file = cv2.imread('img.png', flags=cv2.IMREAD_COLOR) ## path to the image
+'''
+Your code here
+'''
+# cv2.imshow('Image', file)
+# cv2.waitKey()
+# cv2.imwrite('img.jpg', file)
+
+###############################################
+## Done
+## Resize Image by a factor of 0.5
+## Show it on screen
+## Save as small.jpg
+'''
+Your code here
+'''
+resized_factor = 0.5
+resizedImg = cv2.resize(file, dsize=(int(file.shape[1] * resized_factor), int(file.shape[0] * resized_factor)))
+
+cv2.imshow('resizedImg', resizedImg)
+cv2.waitKey()
+cv2.imwrite('small.png', resizedImg)
+
+###############################################
+## Done
+## Create and save 3 single-channel images from small image
+## one image each channel (r, g, b)
+## Display the channel-images on screen
+'''
+Your code here
+'''
+#Blue
+Blue = file.copy()
+Blue[:, :, 1] = 0
+Blue[:, :, 2] = 0
+cv2.imshow('BlueChannel', Blue)
+cv2.waitKey()
+cv2.imwrite('Blue.png', Blue)
+
+#Red
+Red = file.copy()
+Red[:, :, 0] = 0
+Red[:, :, 1] = 0
+cv2.imshow('RedChannel', Red)
+cv2.waitKey()
+cv2.imwrite('Red.png', Red)
+
+#Green
+Green = file.copy()
+Green[:, :, 0] = 0
+Green[:, :, 2] = 0
+cv2.imshow('GreenChannel', Green)
+cv2.waitKey()
+cv2.imwrite('Green.png', Green)
+###############################################
diff --git a/ex0/code/small.png b/ex0/code/small.png
new file mode 100644
index 0000000000000000000000000000000000000000..e65a975342e6cd6132c6237e9033c6931462ce5e
Binary files /dev/null and b/ex0/code/small.png differ
diff --git a/ex1/code/Skeleton/A.png b/ex1/code/Skeleton/A.png
new file mode 100644
index 0000000000000000000000000000000000000000..71d747961144e1e7eb852145b0f34ca189674479
Binary files /dev/null and b/ex1/code/Skeleton/A.png differ
diff --git a/ex1/code/Skeleton/B.png b/ex1/code/Skeleton/B.png
new file mode 100644
index 0000000000000000000000000000000000000000..b8b9da1cc0f4da6f2275eb0a55c8b7eb701c4e31
Binary files /dev/null and b/ex1/code/Skeleton/B.png differ
diff --git a/ex1/code/Skeleton/C.png b/ex1/code/Skeleton/C.png
new file mode 100644
index 0000000000000000000000000000000000000000..9208269e1decec8e5a809095a0fa244a28a742e7
Binary files /dev/null and b/ex1/code/Skeleton/C.png differ
diff --git a/ex1/code/Skeleton/blox.jpg b/ex1/code/Skeleton/blox.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b682b2c0aa3a25890569ca3e63b681c4a7a38604
Binary files /dev/null and b/ex1/code/Skeleton/blox.jpg differ
diff --git a/ex1/code/Skeleton/corners.png b/ex1/code/Skeleton/corners.png
new file mode 100644
index 0000000000000000000000000000000000000000..1569792af2f76861f663143368c7b4b1049ef23f
Binary files /dev/null and b/ex1/code/Skeleton/corners.png differ
diff --git a/ex1/code/Skeleton/dIdx.png b/ex1/code/Skeleton/dIdx.png
new file mode 100644
index 0000000000000000000000000000000000000000..21f4539808fa48c8166f70e9c3d8253d65b6ba8a
Binary files /dev/null and b/ex1/code/Skeleton/dIdx.png differ
diff --git a/ex1/code/Skeleton/dIdy.png b/ex1/code/Skeleton/dIdy.png
new file mode 100644
index 0000000000000000000000000000000000000000..02f6c8a1cec4c33ec7efc4ef590e79b247d3168f
Binary files /dev/null and b/ex1/code/Skeleton/dIdy.png differ
diff --git a/ex1/code/Skeleton/edges.png b/ex1/code/Skeleton/edges.png
new file mode 100644
index 0000000000000000000000000000000000000000..0d1457e99505a6ece3d5b56cf91a1d29370595b2
Binary files /dev/null and b/ex1/code/Skeleton/edges.png differ
diff --git a/ex1/code/Skeleton/main.py b/ex1/code/Skeleton/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..db9e66511420809312a0033d1f595bad74c23e35
--- /dev/null
+++ b/ex1/code/Skeleton/main.py
@@ -0,0 +1,140 @@
+import cv2
+import numpy as np
+
+def show(name, img, x, y):
+    windowStartX  = 10
+    windowStartY  = 50
+    windowXoffset = 5
+    windowYoffset = 40
+
+    w = img.shape[0] + windowXoffset
+    h = img.shape[1] + windowYoffset
+
+    cv2.namedWindow(name)
+    cv2.moveWindow(name, windowStartX + w * x, windowStartY + h * y)
+    cv2.imshow(name, img)
+
+
+def harrisResponseImage(img):
+
+    ## Compute the spatial derivatives in x and y direction.
+
+    dIdx = cv2.Sobel(img, cv2.CV_64F, 1, 0, 3)
+    dIdy = cv2.Sobel(img, cv2.CV_64F, 0, 1, 3)
+    # show("dI/dx", abs(dIdx), 1, 0)
+    # show("dI/dy", abs(dIdy), 2, 0)
+
+    ##########################################################
+    ## Compute Ixx, Iyy, and Ixy with
+    ## Ixx = (dI/dx) * (dI/dx),
+    ## Iyy = (dI/dy) * (dI/dy),
+    ## Ixy = (dI/dx) * (dI/dy).
+    ## Note: The multiplication between the images is element-wise (not a matrix
+    ## multiplication)!!
+
+    Ixx = dIdx * dIdx
+    Iyy = dIdy * dIdy
+    Ixy = dIdx * dIdy
+
+    # show("Ixx", abs(dIdx), 0, 1)
+    # show("Iyy", abs(dIdy), 1, 1)
+    # show("Ixy", abs(dIdx), 2, 1)
+
+    ##########################################################
+    ## Compute the images A,B, and C by blurring the
+    ## images Ixx, Iyy, and Ixy with a
+    ## Gaussian filter of size 3x3 and standard deviation of 1.
+
+    kernelSize = (3, 3)
+    sdev = 1
+
+    A = cv2.GaussianBlur(Ixx, kernelSize, sdev)
+    B = cv2.GaussianBlur(Iyy, kernelSize, sdev)
+    C = cv2.GaussianBlur(Ixy, kernelSize, sdev)
+
+    # show("A", abs(A) * 5, 0, 1);
+    # show("B", abs(B) * 5, 1, 1);
+    # show("C", abs(C) * 5, 2, 1);
+
+    ##########################################################
+    ## TODO 1.4
+    ## Compute the harris response with the following formula:
+    ## R = Det - k * Trace*Trace
+    ## Det = A * B - C * C
+    ## Trace = A + B
+    k = 0.06
+    Det = A * B - C * C
+    Trace = A + B
+    response = Det - k*(Trace**2)
+    ## Normalize the response image
+    dbg = (response - np.min(response)) / (np.max(response) - np.min(response))
+    dbg = dbg.astype(np.float32)
+    # show("Harris Response", dbg, 0, 2)
+
+    ##########################################################
+    cv2.imwrite("dIdx.png", (abs(dIdx) * 255.0))
+    cv2.imwrite("dIdy.png", (abs(dIdy) * 255.0))
+
+    cv2.imwrite("A.png", (abs(A) * 5 * 255.0))
+    cv2.imwrite("B.png", (abs(B) * 5 * 255.0))
+    cv2.imwrite("C.png", (abs(C) * 5 * 255.0))
+
+    cv2.imwrite("response.png", np.uint8(dbg * 255.0))
+    cv2.waitKey(0)
+    return response
+
+def harrisKeypoints(response, threshold = 0.1):
+
+    ## TODO 2.1
+    ## Generate a keypoint for a pixel,
+    ## if the response is larger than the threshold
+    ## and it is a local maximum.
+    ##
+    ## Don't generate keypoints at the image border.
+    ## Note: Keypoints are stored with (x,y) and images are accessed with (y,x)!!
+    points = []
+    points_idx = np.argwhere(response>threshold)
+    for each_point in points_idx:
+        points.append(cv2.KeyPoint(each_point[1].astype(float), each_point[0].astype(float), 1))
+    return points
+
+def harrisEdges(input, response, edge_threshold=-0.01):
+
+    ## TODO 3.1
+    ## Set edge pixels to red.
+    ##
+    ## A pixel belongs to an edge, if the response is smaller than a threshold
+    ## and it is a minimum in x or y direction.
+    ##
+    ## Don't generate edges at the image border.
+    result = input.copy()
+    edges_idx = np.argwhere(response < edge_threshold)
+    for each_pixel in edges_idx:
+        result[each_pixel[0], each_pixel[1]] = (0,0, 255)
+
+    return result
+
+
+def main():
+
+    input_img = cv2.imread('blox.jpg')  ## read the image
+    input_gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY) ## convert to grayscale
+    input_gray = (input_gray - np.min(input_gray)) / (np.max(input_gray) - np.min(input_gray))  ## normalize
+    input_gray = input_gray.astype(np.float32)  ## convert to float32 for filtering
+
+    ## Obtain Harris Response, corners and edges
+    response = harrisResponseImage(input_gray)
+    points = harrisKeypoints(response)
+    edges = harrisEdges(input_img, response)
+
+    imgKeypoints1 = cv2.drawKeypoints(input_img, points,  outImage=None, color=(0, 255, 0))
+    show("Harris Keypoints", imgKeypoints1, 1, 2)
+    show("Harris Edges", edges, 2, 2)
+    cv2.waitKey(0)
+
+    cv2.imwrite("edges.png", edges)
+    cv2.imwrite("corners.png", imgKeypoints1)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/ex1/code/Skeleton/reference/A.png b/ex1/code/Skeleton/reference/A.png
new file mode 100644
index 0000000000000000000000000000000000000000..71a980807e8261ac34d216a1d1f4bdafc4912304
Binary files /dev/null and b/ex1/code/Skeleton/reference/A.png differ
diff --git a/ex1/code/Skeleton/reference/B.png b/ex1/code/Skeleton/reference/B.png
new file mode 100644
index 0000000000000000000000000000000000000000..61b987e66b7aed28e2dca463936609884cc47aae
Binary files /dev/null and b/ex1/code/Skeleton/reference/B.png differ
diff --git a/ex1/code/Skeleton/reference/C.png b/ex1/code/Skeleton/reference/C.png
new file mode 100644
index 0000000000000000000000000000000000000000..ea52f8c78e0659c8f2ab2a76bebb00fa403064be
Binary files /dev/null and b/ex1/code/Skeleton/reference/C.png differ
diff --git a/ex1/code/Skeleton/reference/corners.png b/ex1/code/Skeleton/reference/corners.png
new file mode 100644
index 0000000000000000000000000000000000000000..70d33188e539f5683366f72c3c70068e3e6c4641
Binary files /dev/null and b/ex1/code/Skeleton/reference/corners.png differ
diff --git a/ex1/code/Skeleton/reference/dIdx.png b/ex1/code/Skeleton/reference/dIdx.png
new file mode 100644
index 0000000000000000000000000000000000000000..ba2852f2f27f2d73bc6cf07111b279edafa21726
Binary files /dev/null and b/ex1/code/Skeleton/reference/dIdx.png differ
diff --git a/ex1/code/Skeleton/reference/dIdy.png b/ex1/code/Skeleton/reference/dIdy.png
new file mode 100644
index 0000000000000000000000000000000000000000..cdfe00d21df6ee1001d421bb14fe64b2aa648b3a
Binary files /dev/null and b/ex1/code/Skeleton/reference/dIdy.png differ
diff --git a/ex1/code/Skeleton/reference/edges.png b/ex1/code/Skeleton/reference/edges.png
new file mode 100644
index 0000000000000000000000000000000000000000..19793211a220bb433172d426694b2684d342ebe6
Binary files /dev/null and b/ex1/code/Skeleton/reference/edges.png differ
diff --git a/ex1/code/Skeleton/reference/response.png b/ex1/code/Skeleton/reference/response.png
new file mode 100644
index 0000000000000000000000000000000000000000..c3e7e6660f14687790d4e707d5bde46505ff597d
Binary files /dev/null and b/ex1/code/Skeleton/reference/response.png differ
diff --git a/ex1/code/Skeleton/response.png b/ex1/code/Skeleton/response.png
new file mode 100644
index 0000000000000000000000000000000000000000..3ddd8d146315600e41a2e03d48fa410fbfb601c2
Binary files /dev/null and b/ex1/code/Skeleton/response.png differ
diff --git a/ex2/skeleton/data/10.jpg b/ex2/skeleton/data/10.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5100d36c8418ea72594f26958922f5e139e46127
Binary files /dev/null and b/ex2/skeleton/data/10.jpg differ
diff --git a/ex2/skeleton/data/11.jpg b/ex2/skeleton/data/11.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..561889704e2783e805ee29ec5eb6ab120b513804
Binary files /dev/null and b/ex2/skeleton/data/11.jpg differ
diff --git a/ex2/skeleton/data/12.jpg b/ex2/skeleton/data/12.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..35bc3f732d704075ba90e478af2042c859f58bde
Binary files /dev/null and b/ex2/skeleton/data/12.jpg differ
diff --git a/ex2/skeleton/data/13.jpg b/ex2/skeleton/data/13.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8b88c803d935025030dd4edf0221b11f56487ec0
Binary files /dev/null and b/ex2/skeleton/data/13.jpg differ
diff --git a/ex2/skeleton/data/14.jpg b/ex2/skeleton/data/14.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b856be4c1e11ceee97e32be7f21898de806054e7
Binary files /dev/null and b/ex2/skeleton/data/14.jpg differ
diff --git a/ex2/skeleton/data/15.jpg b/ex2/skeleton/data/15.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5476374a2da4bcac40f5c6c95c54982f745fed5f
Binary files /dev/null and b/ex2/skeleton/data/15.jpg differ
diff --git a/ex2/skeleton/data/7.jpg b/ex2/skeleton/data/7.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..086673854d72c77c2ff0841cada236fd775296df
Binary files /dev/null and b/ex2/skeleton/data/7.jpg differ
diff --git a/ex2/skeleton/data/8.jpg b/ex2/skeleton/data/8.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a26a324695521253132dcee5a341b70ddbffdc28
Binary files /dev/null and b/ex2/skeleton/data/8.jpg differ
diff --git a/ex2/skeleton/data/9.jpg b/ex2/skeleton/data/9.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..908a3ba3b5cfae4395891cfaee26a32df560b17b
Binary files /dev/null and b/ex2/skeleton/data/9.jpg differ
diff --git a/ex2/skeleton/src/__pycache__/homography.cpython-310.pyc b/ex2/skeleton/src/__pycache__/homography.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ebe8e150e9b2db541e014ad6e608d177c7c423bb
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/homography.cpython-310.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/homography.cpython-39.pyc b/ex2/skeleton/src/__pycache__/homography.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6015aee696fcca8762ec198aa3910dadabd9c66
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/homography.cpython-39.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/matching.cpython-310.pyc b/ex2/skeleton/src/__pycache__/matching.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6976e8998f4656ddf04835f966bd2102daf53673
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/matching.cpython-310.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/matching.cpython-39.pyc b/ex2/skeleton/src/__pycache__/matching.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7598acf5e247577b5f70ab9d0cb58afd17b5b6ff
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/matching.cpython-39.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/ransac.cpython-310.pyc b/ex2/skeleton/src/__pycache__/ransac.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e3a74f26592399a214feab1f4c7fb9159a751eef
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/ransac.cpython-310.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/ransac.cpython-39.pyc b/ex2/skeleton/src/__pycache__/ransac.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ea646c8398f4128ff79f52e8aeb33fb45f582d5a
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/ransac.cpython-39.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/stitcher.cpython-310.pyc b/ex2/skeleton/src/__pycache__/stitcher.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba662f7d8c2b158dc664ace2036eef024fefa698
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/stitcher.cpython-310.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/stitcher.cpython-39.pyc b/ex2/skeleton/src/__pycache__/stitcher.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..12a8cc05ffa8609d5b69e04637ef146e9c7cbbb3
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/stitcher.cpython-39.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/utils.cpython-310.pyc b/ex2/skeleton/src/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5124aa425368fd4b7e09fc347ebfeb021097b26a
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/utils.cpython-310.pyc differ
diff --git a/ex2/skeleton/src/__pycache__/utils.cpython-39.pyc b/ex2/skeleton/src/__pycache__/utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e99d97740ed323b144d79f42b569f114f310b3a
Binary files /dev/null and b/ex2/skeleton/src/__pycache__/utils.cpython-39.pyc differ
diff --git a/ex2/skeleton/src/homography.py b/ex2/skeleton/src/homography.py
new file mode 100644
index 0000000000000000000000000000000000000000..36d7252d54c134dd38fbc2831fa537045d22949f
--- /dev/null
+++ b/ex2/skeleton/src/homography.py
@@ -0,0 +1,68 @@
+import numpy as np
+import cv2
+
+## Compute a homography matrix from 4 point matches
+def computeHomography(points1, points2):
+    '''
+    Solution with OpenCV calls not allowed
+    '''
+    assert(len(points1) == 4)
+    assert(len(points2) == 4)
+
+    ## TODO 3
+    ## Construct the 8x9 matrix A.
+    ## Use the formula from the exercise sheet.
+    ## Note that every match contributes to exactly two rows of the matrix.
+    A = np.ndarray((8, 9))
+    peers = list(zip(points1, points2))
+    i = 0
+    j= 1
+
+    for points in peers:
+        px = points[0][0]
+        py = points[0][1]
+        qx = points[1][0]
+        qy = points[1][1]
+        A[i, :] = [-px, -py, -1, 0, 0, 0, px*qx, py*qx, qx]
+        A[j,:] = [ 0, 0, 0, -px, -py, -1, px*qy, py*qy, qy]
+        i+=2
+        j+=2
+
+    U, s, V = np.linalg.svd(A, full_matrices=True)
+    V = np.transpose(V)
+
+    H = np.zeros((3, 3))
+    ## TODO 3
+    ## - Extract the homogeneous solution of Ah=0 as the rightmost column vector of V.
+    ## - Store the result in H.
+    ## - Normalize H
+    h = V[:,-1]
+    H = h.reshape(3,3)
+    H = H*(1/h[-1])
+
+    return H
+
+def testHomography():
+    points1 = [(1, 1), (3, 7), (2, -5), (10, 11)]
+    points2 = [(25, 156), (51, -83), (-144, 5), (345, 15)]
+
+    H = computeHomography(points1, points2)
+
+    print ("Testing Homography...")
+    print ("Your result:" + str(H))
+
+    Href = np.array([[-151.2372466105457,   36.67990057507507,   130.7447340624461],
+                 [-27.31264543681857,   10.22762978292494,   118.0943169422209],
+                 [-0.04233528054472634, -0.3101691983762523, 1]])
+
+    print ("Reference: " + str(Href))
+
+    error = Href - H
+    e   = np.linalg.norm(error)
+    print ("Error: " + str(e))
+
+    if (e < 1e-10):
+        print ("Test: SUCCESS!")
+    else:
+        print ("Test: FAIL!")
+    print ("============================")
diff --git a/ex2/skeleton/src/main.py b/ex2/skeleton/src/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..2900eaddf753918ffaa9ea3ded85b811d168b23d
--- /dev/null
+++ b/ex2/skeleton/src/main.py
@@ -0,0 +1,76 @@
+import numpy as np
+import cv2
+import os
+
+from matching import computeMatches, createMatchImage
+from homography import testHomography
+from ransac import computeHomographyRansac
+from stitcher import createStichedImage
+from utils import computeFeatures
+
+IMAGE_DIR = '../data/'
+
+def main():
+
+    testHomography()
+
+    ## =========== Loading ===========
+
+    images = [
+        "7.jpg",  "8.jpg",  "9.jpg",  "10.jpg", "11.jpg",
+        "12.jpg", "13.jpg", "14.jpg", "15.jpg",
+    ]
+
+    image_data_dicts = []
+    for i, image_name in enumerate(images):
+        image_data = {}
+        image_data['file'] = image_name
+
+        image_path = os.path.join(IMAGE_DIR, image_name)
+        image_data['img'] = cv2.imread(image_path)
+        image_data['img'] = cv2.resize(image_data['img'], None, fx=0.5, fy=0.5)
+
+        image_data['id'] = i
+
+        image_data['HtoReference'] = np.eye(3)
+        image_data['HtoPrev'] = np.eye(3)
+        image_data['HtoNext'] = np.eye(3)
+
+        assert len(image_data['img']) > 0 ## Image read properly check
+
+        image_data_dicts.append(image_data)
+        print ("Loaded image " + str(image_data['id']) + " " +
+                str(image_data['file']) + " " + str(image_data['img'].shape[0])
+                + "x" + str(image_data['img'].shape[1]))
+
+    ## =========== Feature Detection ===========
+    temp_image_data_dicts = []
+    for image_data in image_data_dicts:
+        new_image_data = computeFeatures(image_data)
+        temp_image_data_dicts.append(new_image_data)
+    image_data_dicts = temp_image_data_dicts#new_image_data_dicts
+
+    ## =========== Pairwise Feature Matching ===========
+    for i in range(1, len(image_data_dicts)):
+        matches = computeMatches(image_data_dicts[i-1], image_data_dicts[i])
+
+        ## Debug output
+        matchImg = createMatchImage(image_data_dicts[i-1],  image_data_dicts[i], matches)
+        h             = 200
+        w             = int((float(matchImg.shape[1]) / matchImg.shape[0]) * h)
+        matchImg = cv2.resize(matchImg, (w, h))
+        name = "Matches (" + str(i - 1) + "," + str(i) + ") " + image_data_dicts[i - 1]['file'] + " - " + image_data_dicts[i]['file']
+        cv2.namedWindow(name)
+        cv2.moveWindow(name, int(10 + w * ((i - 1) % 2)), int(10 + (h + 30) * ((i - 1) / 2)))
+        cv2.imshow(name, matchImg)
+
+        H = computeHomographyRansac(image_data_dicts[i-1], image_data_dicts[i], matches, 1000, 2.0)
+        image_data_dicts[i]['HtoPrev'] = np.linalg.inv(H)
+        image_data_dicts[i-1]['HtoNext'] = H
+
+    ## =============== Stitching ==================
+    simg = createStichedImage(image_data_dicts)
+    cv2.imwrite("output.png", simg)
+
+if __name__ == "__main__":
+    main()
diff --git a/ex2/skeleton/src/matching.py b/ex2/skeleton/src/matching.py
new file mode 100644
index 0000000000000000000000000000000000000000..727182eddafcbe2baeb59bde97503fc3763d73a1
--- /dev/null
+++ b/ex2/skeleton/src/matching.py
@@ -0,0 +1,42 @@
+import pdb
+import cv2
+from utils import createMatchImage
+
+def matchknn2(descriptors1, descriptors2):
+
+    ## Initialize an empty list of matches. (HINT: N x 2)
+    knnmatches = [] #np.ndarray((descriptors2.shape(0), 2))
+    ## Find the two nearest neighbors for every descriptor in image 1.
+    for point_idx, each_point in enumerate(descriptors1):
+        dists = [cv2.norm(each_point, j, cv2.NORM_HAMMING) for j in descriptors2]
+        idx_dist = sorted(range(len(dists)), key=lambda k: dists[k])
+        knnmatches.append([cv2.DMatch(point_idx, idx_dist[0], dists[idx_dist[0]]),
+                          cv2.DMatch(point_idx, idx_dist[1], dists[idx_dist[1]])])
+
+    ## For a given descriptor i in image 1:
+    ## Store the best match (smallest distance) in knnmatches[i][0]
+    ## Store the second best match in knnmatches[i][1]
+
+    ## Hint: The hamming distance between two descriptors can be computed with
+    ## double distance = norm(descriptors1.row(i), descriptors2.row(j),NORM_HAMMING);
+
+
+    return knnmatches
+
+def ratioTest(knnmatches, ratio_threshold):
+    matches = []
+
+    ## DONE 2.2
+    ## Compute the ratio between the nearest and second nearest neighbor.
+    ## Add the nearest neighbor to the output matches if the ratio is smaller than ratio_threshold.
+    #if SOLUTION >= 2
+    for x in knnmatches:
+        if x[0].distance/x[1].distance < ratio_threshold:
+            matches.append(x[0])
+    return matches
+
+def computeMatches(img1, img2):
+    knnmatches = matchknn2(img1['descriptors'], img2['descriptors'])
+    matches = ratioTest(knnmatches, 0.7)
+    print ("(" + str(img1['id']) + "," + str(img2['id']) + ") found " + str(len(matches)) + " matches.")
+    return matches
diff --git a/ex2/skeleton/src/output.png b/ex2/skeleton/src/output.png
new file mode 100644
index 0000000000000000000000000000000000000000..1c538b5f673b29f2dc012d90c8a9732c8506ca6e
Binary files /dev/null and b/ex2/skeleton/src/output.png differ
diff --git a/ex2/skeleton/src/ransac.py b/ex2/skeleton/src/ransac.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fe94195fc23d36aa224b1cbeb4d62deb91eeda9
--- /dev/null
+++ b/ex2/skeleton/src/ransac.py
@@ -0,0 +1,55 @@
+import cv2
+import numpy as np
+from homography import computeHomography
+
+def numInliers(points1, points2, H, threshold):
+
+    inlierCount = 0
+
+    ## TODO 4.1
+    ## Compute the number of inliers for the given homography
+    ## - Project the image points from image 1 to image 2
+    ## - A point is an inlier if the distance between the projected point and
+    ##      the point in image 2 is smaller than threshold.
+    ##
+    ## Hint: Construct a Homogeneous point of type 'Vec3' before applying H.
+    peers = list(zip(points1, points2))
+
+    for points_matches in peers:
+        q = list(points_matches[1]) # point in image 2
+        q.append(1)
+        p = list(points_matches[0]) #point in image 1
+        p.append(1)
+        approximation_ = np.linalg.norm(np.array(q).reshape(-1,1)- np.dot(H, np.array(p).reshape(-1,1)),ord=1)
+        if approximation_ < threshold:
+            inlierCount += 1
+    return inlierCount
+
+def computeHomographyRansac(img1, img2, matches, iterations, threshold):
+
+    points1 = []
+    points2 = []
+    for i in range(len(matches)):
+        points1.append(img1['keypoints'][matches[i].queryIdx].pt)
+        points2.append(img2['keypoints'][matches[i].trainIdx].pt)
+
+    ## The best homography and the number of inlier for this H
+    bestInlierCount = 0
+    for i in range(iterations):
+        ## TODO 4.2
+        ## - Construct the subsets by randomly choosing 4 matches.
+        idx_points1 = np.random.randint(0,len(points1), 4)
+        subset1 = [points1[idx] for idx in idx_points1]
+        subset2 = [points2[idx] for idx in idx_points1]
+
+        ## - Compute the homography for this subset
+        H = computeHomography(subset1, subset2)
+        ## - Compute the number of inliers
+        this_inline_count = numInliers(points1, points2, H, threshold)
+        if this_inline_count > bestInlierCount:
+            bestInlierCount = this_inline_count
+            bestH = H
+        ## - Keep track of the best homography (use the variables bestH and bestInlierCount)
+
+    print ("(" + str(img1['id']) + "," + str(img2['id']) + ") found " + str(bestInlierCount) + " RANSAC inliers.")
+    return bestH
diff --git a/ex2/skeleton/src/stitcher.py b/ex2/skeleton/src/stitcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fb09e19b9356762a11324167cfdbed4bc9a6d21
--- /dev/null
+++ b/ex2/skeleton/src/stitcher.py
@@ -0,0 +1,94 @@
+import numpy as np
+import cv2, pdb
+from math import floor, ceil
+
+def computeHtoref(image_data_dir, center):
+    for i in range(center-1, -1, -1):
+        c = image_data_dir[i]
+        next_ = image_data_dir[i+1]
+        c['HtoReference'] = np.matmul(next_['HtoReference'], c['HtoNext'])
+
+    for i in range(center+1, len(image_data_dir), 1):
+        c = image_data_dir[i]
+        prev = image_data_dir[i-1]
+        c['HtoReference'] = np.matmul(prev['HtoReference'], c['HtoPrev'])
+
+    return image_data_dir
+
+def createStichedImage(image_data_dir):
+
+    print ("Stitching with "
+        + str(len(image_data_dir)) +
+        " images..")
+
+    center = len(image_data_dir) // 2
+    ref = image_data_dir[center]
+    image_data_dir = computeHtoref(image_data_dir, center)
+
+    print ("Reference Image : " + str(center) + " - " + ref['file'])
+
+    minx = 2353535
+    maxx = -2353535
+    miny = 2353535
+    maxy = -2353535
+
+    for i in range(len(image_data_dir)):
+        img2 = image_data_dir[i]
+        corners2 = [0, 0, 0, 0]
+        corners2[0] = (0, 0)
+        corners2[1] = (img2['img'].shape[1], 0)
+        corners2[2] = (img2['img'].shape[1], img2['img'].shape[0])
+        corners2[3] = (0, img2['img'].shape[0])
+        corners2 = np.array(corners2, dtype='float32')
+        corners2_in_1 = cv2.perspectiveTransform(corners2[None, :, :], img2['HtoReference'])
+
+        for p in corners2_in_1[0]:
+            minx = min(minx, p[0])
+            maxx = max(maxx, p[0])
+            miny = min(miny, p[1])
+            maxy = max(maxy, p[1])
+
+    roi = np.array([floor(minx), floor(miny), ceil(maxx)-floor(minx), ceil(maxy)-floor(miny)])
+    print ("ROI " + str(roi))
+
+    ## Translate everything so the top left corner is at (0,0)
+    ## Note: This can be simply done by adding the negavite offset to the
+    ## homopgrahy
+    offsetX            = floor(minx);
+    offsetY            = floor(miny);
+    ref['HtoReference'][0, 2]= -offsetX;
+    ref['HtoReference'][1, 2] = -offsetY;
+    computeHtoref(image_data_dir, center)
+
+    cv2.namedWindow('Panorama')
+    cv2.moveWindow('Panorama', 0, 500)
+
+    stitchedImage = np.zeros([roi[3], roi[2], 3], dtype='uint8')
+    for k in range(len(image_data_dir) + 1):
+        if k % 2 == 0:
+            tmp = 1
+        else:
+            tmp = -1
+        i = center + tmp * ((k + 1)//2)
+
+        ## Out of index bounds check
+        if (i < 0 or i >= len(image_data_dir)):
+            continue
+
+        ## Project the image onto the reference image plane
+        img2 = image_data_dir[i]
+        tmp = np.zeros([roi[3], roi[2], 3])
+        tmp = cv2.warpPerspective(img2['img'], img2['HtoReference'], (tmp.shape[1], tmp.shape[0]), cv2.INTER_LINEAR)
+
+        ## Added it to the output image
+        for y in range(stitchedImage.shape[0]):
+            for x in range(stitchedImage.shape[1]):
+                if (x < stitchedImage.shape[1] and y < stitchedImage.shape[0] and np.array_equal(stitchedImage[y, x, :], np.array([0, 0, 0]))):
+                    stitchedImage[y, x] = tmp[y, x, :]
+
+        print ("Added image " + str(i) + " - " + str(img2['file']) + ".")
+        print ("Press any key to continue...")
+        cv2.imshow("Panorama", stitchedImage)
+        cv2.waitKey(0)
+
+    return stitchedImage
diff --git a/ex2/skeleton/src/utils.py b/ex2/skeleton/src/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a0fa428193fc6eec76f56b8f289bf56cedd0966
--- /dev/null
+++ b/ex2/skeleton/src/utils.py
@@ -0,0 +1,19 @@
+import cv2
+
+def computeFeatures(image_data):
+    # static thread_local Ptr<ORB> detector = cv::ORB::create(2000);
+    # detector->(img, noArray(), keypoints, descriptors);
+    # cout << "Found " << keypoints.size() << " ORB features on image " << id << endl;
+    orb = cv2.ORB_create(nfeatures=500, scoreType=cv2.ORB_FAST_SCORE)
+    keypoints, descriptors = orb.detectAndCompute(image_data['img'], None)
+    print ("Found " + str(len(keypoints)) + " ORB features on image " + str(image_data['id']))
+
+    image_data['keypoints'] = keypoints
+    image_data['descriptors'] = descriptors
+
+    return image_data
+
+def createMatchImage(img1, img2, matches):
+    img_matches = cv2.drawMatches(img1['img'], img1['keypoints'], img2['img'], img2['keypoints'], matches,
+               outImg=None, matchColor=(0, 255, 0), singlePointColor=(0, 255, 0), flags=2)
+    return img_matches
diff --git a/ex3/skeleton.zip b/ex3/skeleton.zip
new file mode 100644
index 0000000000000000000000000000000000000000..6d5d00f2ddcd6acc592309ee11f03742044d16fb
Binary files /dev/null and b/ex3/skeleton.zip differ
diff --git a/ex3/skeleton/data/img1.jpg b/ex3/skeleton/data/img1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..364a63284951b3521a6e77b54349716a329bfa97
Binary files /dev/null and b/ex3/skeleton/data/img1.jpg differ
diff --git a/ex3/skeleton/data/img2.jpg b/ex3/skeleton/data/img2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d3278a47055270c089bb981a59dc2ee3dcdbf0fe
Binary files /dev/null and b/ex3/skeleton/data/img2.jpg differ
diff --git a/ex3/skeleton/src/__init__.py b/ex3/skeleton/src/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/ex3/skeleton/src/__pycache__/decompose.cpython-39.pyc b/ex3/skeleton/src/__pycache__/decompose.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55202a4e7fbab95c5da94a82ac29813134d12e89
Binary files /dev/null and b/ex3/skeleton/src/__pycache__/decompose.cpython-39.pyc differ
diff --git a/ex3/skeleton/src/__pycache__/fundamentalMatrix.cpython-39.pyc b/ex3/skeleton/src/__pycache__/fundamentalMatrix.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..98baef5cdaf532cd44806f1903d412d52142269d
Binary files /dev/null and b/ex3/skeleton/src/__pycache__/fundamentalMatrix.cpython-39.pyc differ
diff --git a/ex3/skeleton/src/__pycache__/triangulate.cpython-39.pyc b/ex3/skeleton/src/__pycache__/triangulate.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9fbd9b04d02988361e021a3d45a5e7f6a137e8d2
Binary files /dev/null and b/ex3/skeleton/src/__pycache__/triangulate.cpython-39.pyc differ
diff --git a/ex3/skeleton/src/decompose.py b/ex3/skeleton/src/decompose.py
new file mode 100644
index 0000000000000000000000000000000000000000..590ee16cc9f205666317d88c2fdf4f852f21ecf1
--- /dev/null
+++ b/ex3/skeleton/src/decompose.py
@@ -0,0 +1,96 @@
+import numpy as np
+from triangulate import triangulate_all_points
+'''
+## estimate the rotation and translation of the camera given the essential matrix E
+## see:
+## Richard Hartley and Andrew Zisserman (2003). Multiple View Geometry in computer vision
+## http://isit.u-clermont1.fr/~ab/Classes/DIKU-3DCV2/Handouts/Lecture16.pdf
+'''
+
+def decompose(E):
+
+    ## TODO 4.1
+    ## Decompose the essential matrix E into R1, R2, t1, t2.
+
+
+
+    return R1, R2, t1, t2
+
+def relativeTransformation(E, points1, points2, K):
+
+    R1, R2, t1, t2 = decompose(E)
+    ## A negative determinant means that R contains a reflection.This is not rigid transformation!
+    if np.linalg.det(R1) < 0:
+        E = -E
+        R1, R2, t1, t2 = decompose(E)
+
+    bestCount = 0
+
+    for dR in range(2):
+        if dR == 0:
+            cR = R1
+        else:
+            cR = R2
+        for dt in range(2):
+            if dt == 0:
+                ct = t1
+            else:
+                ct = t2
+
+            View1 = np.eye(3, 4)
+            View2 = np.zeros((3, 4))
+            for i in range(3):
+                for j in range(3):
+                    View2[i, j] = cR[i, j]
+            for i in range(3):
+                View2[i, 3] = ct[i]
+
+            count = len(triangulate_all_points(View1, View2, K, points1, points2))
+            if (count > bestCount):
+                V = View2
+
+        cR = np.transpose(cR)
+
+    print ("V " + str(V))
+    return V
+
+
+def testDecompose():
+    E = np.array([[3.193843825323984,  1.701122615578195,  -6.27143074201245],
+                  [-41.95294882825382, 127.76771801644763, 141.5226870527082],
+                  [-8.074440557013252, -134.9928434422784, 1]])
+    R1, R2, t1, t2 = decompose(E)
+
+    print("Testing decompose...")
+
+    t_ref = np.array([-0.9973654079783344, -0.04579551552933408, -0.05625845470338976])
+
+    if np.linalg.norm(t1 - t_ref) < 1e-5:
+        if np.linalg.norm(t2 + t_ref) < 1e-5:
+            print("Test (Translation): SUCCESS")
+        else:
+            print("Test (Translation): FAIL!")
+    elif np.linalg.norm(t1 + t_ref) < 1e-5:
+        if np.linalg.norm(t2 - t_ref) < 1e-5:
+            print("Test (Translation): SUCCESS")
+        else:
+            print("Test (Translation): FAIL!")
+
+    R1_ref = np.array([[0.9474295893819155,  -0.1193419720472381, 0.2968748336782551],
+                       [0.2288481582901039, 0.9012012887804273, -0.3680553729369057],
+                       [-0.2236195286884464, 0.4166458097813701, 0.8811359574894123]])
+    R2_ref = np.array([[0.9332689072231527,  0.01099472166665292, 0.3590101153254257],
+                       [-0.1424930907107563, -0.9061762138657301, 0.3981713055001164],
+                       [0.329704209724715,   -0.4227573601008549, -0.8441394129942975]])
+
+    if np.linalg.norm(R1-R1_ref) < 1e-5:
+        if np.linalg.norm(R2-R2_ref) < 1e-5:
+            print("Test (Rotation): SUCCESS!")
+        else:
+            print("Test (Rotation): FAIL!")
+    elif np.linalg.norm(R1 - R2_ref) < 1e-5:
+        if np.linalg.norm(R2 - R1_ref) < 1e-5:
+            print("Test (Rotation): SUCCESS!")
+        else:
+            print("Test (Rotation): FAIL!")
+    print ('='*25)
\ No newline at end of file
diff --git a/ex3/skeleton/src/fundamentalMatrix.py b/ex3/skeleton/src/fundamentalMatrix.py
new file mode 100644
index 0000000000000000000000000000000000000000..588cf09bbaaf2a14696ba513425e15cb2c75e296
--- /dev/null
+++ b/ex3/skeleton/src/fundamentalMatrix.py
@@ -0,0 +1,118 @@
+import numpy as np
+
+
+def computeF(points1, points2):
+
+    assert (len(points1) == 8), "Length of points1 should be 8!"
+    assert (len(points2) == 8), "Length of points2 should be 8!"
+
+    A = np.zeros((8, 9)).astype('int')
+    peer = list(zip(points1, points2))
+    for i in range(len(points1)):
+        ## TODO 2.2
+        ## Step 1
+        ## Fill the Matrix A
+        px = peer[i][0][0]
+        py = peer[i][0][1]
+        qx = peer[i][1][0]
+        qy = peer[i][1][1]
+        A[i, :] = [px*qx, px*qy, px, py*qx, py*qy, py, qx, qy, 1]
+
+    ## TODO 2.2
+    ## Step 2
+    ## Solve Af = 0 with SVD
+    #val, vec = np.linalg.eig(A[:,:-1])
+    #val_ = np.argmax(val)
+    #F = vec[:, val_]
+
+    U, _, V  = np.linalg.svd(A, full_matrices=True)
+    V = np.transpose(V)
+    f = V[:, -1]
+    F = f.reshape(3, 3)
+    F = F * (1 / f[-1])
+    #F = F_flat.reshape((3,3))
+
+    ## TODO 2.2
+    ## Step 3
+    ## - Enforce Rank(F) = 2 or 3?
+    uf, sf, vf = np.linalg.svd(F, full_matrices=True)
+    sf[2:] = 0
+    sf =np.diag(sf)
+    #s.reshape((2,2))
+    F_rank_2 = np.matmul(np.matmul(uf, sf),vf)
+    F = F_rank_2.reshape((3,3))
+    # ## Step 4 - Normalize F
+    F = F * (1.0 / F[2, 2])
+    return np.transpose(F)
+
+def epipolarConstraint(p1, p2, F, t):
+
+    p1h = np.array([p1[0], p1[1], 1])
+    p2h = np.array([p2[0], p2[1], 1])
+
+    ## TODO 2.1
+    ## - Compute the normalized epipolar line
+    l2 = np.dot(F, p1h)
+    ## - Compute the distance to the epipolar line
+    l2_prime = l2/ np.sqrt(l2[0]+l2[1])
+    ## - Check if the distance is smaller than t
+    if np.norm(np.dot(p2h.T, l2_prime)) < t:
+        return True
+    return False
+
+def numInliers(points1, points2, F, threshold):
+    inliers = []
+    for i in range(len(points1)):
+        if (epipolarConstraint(points1[i], points2[i], F, threshold)):
+            inliers.append(i)
+
+    return inliers
+
+def computeFRANSAC(points1, points2):
+
+    ## The best fundamental matrix and the number of inlier for this F.
+    bestInlierCount = 0
+    threshold = 4
+    iterations = 1000
+
+    for k in range(iterations):
+        subset1 = []
+        subset2 = []
+        for i in range(8):
+            x = np.random.randint(0, len(points1)-1)
+            subset1.append(points1[x])
+            subset2.append(points2[x])
+        F = computeF(subset1, subset2)
+        num = numInliers(points1, points2, F, threshold)
+        if (len(num) > bestInlierCount):
+            bestF = F
+            bestInlierCount = len(num)
+            bestInliers = num
+
+    return (bestF, bestInliers)
+
+def testFundamentalMat():
+    points1 = [(1, 1), (3, 7), (2, -5), (10, 11), (11, 2), (-3, 14), (236, -514), (-5, 1)]
+    points2 = [(25, 156), (51, -83), (-144, 5), (345, 15),
+                                    (215, -156), (151, 83), (1544, 15), (451, -55)]
+
+    F = computeF(points1, points2)
+
+    print ("Testing Fundamental Matrix...")
+    print ("Your result:" + str(F))
+
+    Href = np.array([[0.001260822171230067,  0.0001614643951166923, -0.001447955678643285],
+                 [-0.002080014358205309, -0.002981504896782918, 0.004626528742122177],
+                 [-0.8665185546662642,   -0.1168790312603214,   1]])
+
+    print ("Reference: " + str(Href))
+
+    error = Href - F
+    e = np.linalg.norm(error)
+    print ("Error: " + str(e))
+
+    if (e < 1e-10):
+        print ("Test: SUCCESS!")
+    else:
+        print ("Test: FAIL!")
+    print ("============================")
diff --git a/ex3/skeleton/src/main.py b/ex3/skeleton/src/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..f005fc558011224efafd7f22abc1122310bd0bbb
--- /dev/null
+++ b/ex3/skeleton/src/main.py
@@ -0,0 +1,111 @@
+import cv2
+import numpy as np
+import open3d as o3d
+
+from fundamentalMatrix import testFundamentalMat, computeFRANSAC
+from triangulate import testTriangulate, triangulate_all_points
+from decompose import testDecompose, relativeTransformation
+
+def ratioTest(knnmatches, ratio_threshold):
+    matches = []
+    for inner_list in knnmatches:
+        first  = inner_list[0]
+        second = inner_list[1]
+
+        if (first.distance < ratio_threshold * second.distance):
+            matches.append(first)
+    return matches
+
+def main():
+    testFundamentalMat()
+    testTriangulate()
+    testDecompose()
+
+    img1 = cv2.imread("../data/img1.jpg")
+    img2 = cv2.imread("../data/img2.jpg")
+
+    assert (img1.data), "Image 1 is not properly read"
+    assert (img2.data), "Image 2 is not properly read"
+
+    K = np.array([[2890, 0, 1440], [0, 2890, 960], [0, 0, 1]])
+
+    #******************Feature Matching*******************#
+    detector = cv2.ORB_create(20000)
+    keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
+    keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
+    print("Num Features: " + str(len(keypoints1)) + " " + str(len(keypoints2)))
+
+    FLANN_INDEX_LSH = 6
+    flann_params= dict(algorithm = FLANN_INDEX_LSH,
+                               table_number = 12,
+                               key_size = 20,
+                               multi_probe_level = 2)
+    matcher = cv2.FlannBasedMatcher(flann_params, {})
+    knnmatches = matcher.knnMatch(descriptors1, descriptors2, 2)
+
+    matches = ratioTest(knnmatches, 0.8)
+    img_matches = cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches,
+                                  outImg=None, matchColor=(0, 255, 0),
+                                  singlePointColor=(0, 255, 0), flags=2)
+    # pdb.set_trace()
+    img_matches = cv2.resize(img_matches, None, fx=0.2, fy=0.2)
+    cv2.imshow("All Matches", img_matches)
+    cv2.waitKey(0)
+
+    print ("Matches after ratio test: " + str(len(matches)))
+
+    #**************************Compute F and E**************************#
+    points1 = []
+    points2 = []
+    for m in matches:
+        points1.append(keypoints1[m.queryIdx].pt)
+        points2.append(keypoints2[m.trainIdx].pt)
+
+    F, inliers = computeFRANSAC(points1, points2)
+    print ("RANSAC inliers " + str(len(inliers)))
+
+    ## Compute E and normalize
+    E = np.dot(np.dot(np.transpose(K), F), K)
+    E *= 1.0 / E[2, 2]
+
+    inlier_matches = []
+    inlier_points1 = []
+    inlier_points2 = []
+    for i in inliers:
+        inlier_matches.append(matches[i])
+        inlier_points1.append(points1[i])
+        inlier_points2.append(points2[i])
+
+    ## Draw filtered matches (ransac inliers)
+    img_matches = cv2.drawMatches(img1, keypoints1, img2, keypoints2, inlier_matches,
+                                  outImg=None, matchColor=(0, 255, 0),
+                                  singlePointColor=(0, 255, 0), flags=2)
+    img_matches = cv2.resize(img_matches, None, fx=0.2, fy=0.2)
+    cv2.imshow("RANSAC Inlier Matches", img_matches)
+    cv2.waitKey(0)
+
+    ## Compute relative transformation
+    View1 = np.eye(3, 4)
+    View2 = relativeTransformation(E, inlier_points1, inlier_points2, K)
+
+    ## Triangulate inlier matches
+    wps = triangulate_all_points(View1, View2, K, inlier_points1, inlier_points2)
+    # small sanity check to remove low angle triangulate points and some more outliers
+    wps_updated = []
+    for p in wps:
+        if p[2] < 10:
+            wps_updated.append(p)
+
+    ## Rendering in Open3D
+    colors = []
+    for i in range(len(wps_updated)):
+        colors.append(img1[int(inlier_points1[i][1]), int(inlier_points1[i][0])])
+    colors = np.array(colors)
+
+    point_cloud = o3d.geometry.PointCloud()
+    point_cloud.points = o3d.utility.Vector3dVector(wps_updated)
+    point_cloud.colors = o3d.utility.Vector3dVector(colors)
+    o3d.visualization.draw_geometries([point_cloud])
+
+if __name__ == '__main__':
+    main()
diff --git a/ex3/skeleton/src/triangulate.py b/ex3/skeleton/src/triangulate.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b9c22960629b763d77f7e05f1ef278dc1371a52
--- /dev/null
+++ b/ex3/skeleton/src/triangulate.py
@@ -0,0 +1,60 @@
+import cv2
+import numpy as np
+
+def triangulate(P1, P2, p1, p2):
+
+    A = np.eye(4)
+
+    ## TODO 3
+    ## - Construct the matrix A
+
+
+    svd_mats = np.linalg.svd(A, full_matrices=True)
+    V = np.transpose(svd_mats[2])
+
+    ## TODO 3
+    ## Extract the solution and project it back to 3D (from homogenous space)
+    x = np.array([])
+
+    return x
+
+def triangulate_all_points(View1, View2, K, points1, points2):
+
+    wps = []
+    P1 = np.dot(K, View1)
+    P2 = np.dot(K, View2)
+
+    for i in range(len(points1)):
+        wp = triangulate(P1, P2, points1[i], points2[i])
+
+        ## Check if this points is in front of both cameras
+        ptest = [wp[0], wp[1], wp[2], 1]
+        p1 = np.matmul(P1, ptest)
+        p2 = np.matmul(P2, ptest)
+
+        if (p1[2] > 0 and p2[2] > 0):
+            wps.append(wp)
+
+    return wps
+
+def testTriangulate():
+
+    P1 = np.array([[1, 2, 3, 6], [4, 5, 6, 37], [7, 8, 9, 15]]).astype('float')
+    P2 = np.array([[51, 12, 53, 73], [74, 15, -6, -166], [714, -8, 95, 16]]).astype('float')
+
+    F = triangulate(P1, P2, (14.0, 267.0), (626.0, 67.0))
+    print ("Testing Triangulation...")
+    print ("Your result: " + str(F))
+
+    wpref = [0.782409, 3.89115, -5.72358]
+    print ("Reference: " + str(wpref))
+
+    error = wpref - F
+    e = cv2.norm(error)
+    print ("Error: " + str(e))
+
+    if (e < 1e-5):
+        print ("Test: SUCCESS!")
+    else:
+        print ("Test: FAIL")
+    print ("================================")
diff --git a/ex4/Skeleton.zip b/ex4/Skeleton.zip
new file mode 100644
index 0000000000000000000000000000000000000000..7f72f81bf11307e47c7d2c18ac777ba7b92c2cda
Binary files /dev/null and b/ex4/Skeleton.zip differ