diff --git a/Practical_sessions/Session_1/linear_regression-completed.py b/Practical_sessions/Session_1/linear_regression-completed.py
new file mode 100644
index 0000000000000000000000000000000000000000..67d29999dd90286bd13106cb3aab643da5009b11
--- /dev/null
+++ b/Practical_sessions/Session_1/linear_regression-completed.py
@@ -0,0 +1,185 @@
+import matplotlib.pyplot as plt
+import numpy as np
+
+def read_data(file_name, delimiter=','):
+    """ Read the data file and returns the corresponding matrices
+
+    Parameters
+    ----------
+    file_name : file name containg data
+    delimiter : character separating columns in the file ("," by default)
+
+    Returns
+    -------
+    X : data matrix of size [N, nb_var]
+    Y : matrix containg values of the target variable of size [N, 1]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+    """
+    
+    data = np.loadtxt(file_name, delimiter=delimiter)
+    nb_var = data.shape[1] - 1
+    N = data.shape[0]
+
+    X = data[:, :-1]
+    Y = data[:, -1].reshape(N,1)
+    
+    return X, Y, N, nb_var
+
+def normalization(X):
+    """ Normalize the provided matrix (substracts mean and divides by standard deviation)
+    
+
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+    Returns
+    -------
+    X_norm : normalized data matrix of size [N, nb_var]
+    mu : means of the variables of size [1,nb_var]
+    sigma : standard deviations of the variables of size [1,nb_var]
+
+    """
+    
+    mu = np.mean(X, 0)
+    sigma = np.std(X, 0)
+    X_norm = (X - mu) / sigma
+
+    return X_norm, mu, sigma
+
+def compute_loss(X, Y, theta):
+    """ Compute the loss function value (mean square error)
+    
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var+1]
+    Y : matrix containg values of the target variable of size [N, 1]
+    theta : matrix containing the theta parameters of the linear model of size [1, nb_var+1]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+    Returns
+    -------
+    loss : loss function value (mean square error)
+
+    """
+
+    N = X.shape[0]
+
+    loss = np.sum((X.dot(theta.T) - Y) ** 2) / (2 * N)
+
+    return loss
+
+def gradient_descent(X, Y, theta, alpha, nb_iters):
+    """ Training to compute the linear regression parameters by gradient descent
+    
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var+1]
+    Y : matrix containg values of the target variable of size [N, 1]
+    theta : matrix containing the theta parameters of the linear model of size [1, nb_var+1]
+    alpha : learning rate
+    nb_iters : number of iterations
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+
+    Returns
+    -------
+    theta : matrix containing the theta parameters learnt by gradient descent of size [1, nb_var+1]
+    J_history : list containg the loss function values for each iteration of length nb_iters
+
+
+    """
+    
+    # Initialisation de variables utiles
+    N = X.shape[0]
+    J_history = np.zeros(nb_iters)
+
+    for i in range(0, nb_iters):
+
+        error = X.dot(theta.T) - Y
+        theta -= (alpha/N)*np.sum(X*error, 0)
+
+        J_history[i] = compute_loss(X, Y, theta)
+        
+
+    return theta, J_history
+
+def display(X, Y, theta):
+    """ Display in 2 dimensions of data points and of the linear regression curve defined by theta parameters
+    
+
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var+1]
+    Y : matrix containg values of the target variable of size [N, 1]
+    theta : matrix containing the theta parameters of the linear model of size [1, nb_var+1]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+    Returns
+    -------
+    None
+
+    """
+    plt.figure(0)
+    plt.scatter(X[:, 1], Y, c='r', marker="x")
+    line1, = plt.plot(X[:, 1], X.dot(theta.T))
+    plt.title("Linear Regression")
+    
+    plt.show()
+
+
+if __name__ == "__main__":
+
+    # ===================== Part 1: Data loading and normalization =====================
+    print("Data loading ...")
+
+    X, Y, N, nb_var = read_data("food_truck.txt")
+    # X, Y, N, nb_var = read_data("houses.txt")
+
+    # Print of the ten first examples of the dataset
+    print("Print of the ten first examples of the dataset : ")
+    for i in range(0, 10):
+        print(f"x = {X[i,:]}, y = {Y[i]}")
+
+    # Normalization of variables 
+    print("Normalization of variables  ...")
+
+    X, mu, sigma = normalization(X)
+
+    # Add one column of 1 values to X (for theta 0)
+    X = np.hstack((np.ones((N,1)), X)) 
+
+    # ===================== Part 2: Gradient descent =====================
+    print("Training by gradient descent ...")
+
+    # Choice of the learning rate and number of iterations
+    alpha = 0.01
+    nb_iters = 1500
+
+    # Initialization of theta and call to the gradient descent function
+    theta = np.zeros((1,nb_var+1))
+    theta, J_history = gradient_descent(X, Y, theta, alpha, nb_iters)
+
+    # Display of the loss function values obtained during gradient descent training
+    plt.figure()
+    plt.title("Loss function values obtained during gradient descent training")
+    plt.plot(np.arange(J_history.size), J_history)
+    plt.xlabel("Nomber of iterations")
+    plt.ylabel("Loss function J")
+
+    # Print of theta values
+    print(f"Theta computed by gradient descent : {theta}")
+
+    # In case of only one predictor variable, display the linear regression curve
+    if nb_var == 1 :
+        display(X,Y,theta)
+    plt.show()
+
+    print("Linear Regression completed.")
diff --git a/Practical_sessions/Session_1/logistic_regression-completed.py b/Practical_sessions/Session_1/logistic_regression-completed.py
new file mode 100644
index 0000000000000000000000000000000000000000..826b54bea8a7c93fc1ab9a7fd917252b4581b994
--- /dev/null
+++ b/Practical_sessions/Session_1/logistic_regression-completed.py
@@ -0,0 +1,261 @@
+import matplotlib.pyplot as plt
+import numpy as np
+
+def read_data(file_name, delimiter=','):
+    """ Read the data file and returns the corresponding matrices
+
+    Parameters
+    ----------
+    file_name : file name containg data
+    delimiter : character separating columns in the file ("," by default)
+
+    Returns
+    -------
+    X : data matrix of size [N, nb_var]
+    Y : matrix containg values of the target variable of size [N, 1]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+    """
+    
+    data = np.loadtxt(file_name, delimiter=delimiter)
+    nb_var = data.shape[1] - 1
+    N = data.shape[0]
+
+    X = data[:, :-1]
+    Y = data[:, -1].reshape(N,1)
+    
+    return X, Y, N, nb_var
+
+def normalization(X):
+    """ Normalize the provided matrix (substracts mean and divides by standard deviation)
+    
+
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+    Returns
+    -------
+    X_norm : normalized data matrix of size [N, nb_var]
+    mu : means of the variables of sizede dimension [1,nb_var]
+    sigma : standar deviations of the variables of size [1,nb_var]
+
+    """
+    
+    mu = np.mean(X, 0)
+    sigma = np.std(X, 0)
+    X_norm = (X - mu) / sigma
+
+    return X_norm, mu, sigma
+
+def sigmoid(z):
+    """ Compute the value of the sigmoid function applied to z
+    
+    Parameters
+    ----------
+    z : can be a scalar value or a matrix
+
+    Returns
+    -------
+    s : sigmoid value of z. Same size as z
+
+    """
+
+    s = 1 / (1 + np.exp(-z))
+
+    return s
+
+def compute_loss(X, Y, theta):
+    """ Compute the loss function value (log likelihood)
+    
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var+1]
+    Y : matrix containg values of the target variable of size [N, 1]
+    theta : matrix containing the theta parameters of the linear model of size [1, nb_var+1]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+    Returns
+    -------
+    loss : loss function value (log likelihood)
+    """
+
+    N = X.shape[0]
+    
+    loss = - (Y*np.log(sigmoid(X.dot(theta.T))) + (1-Y)*np.log(1-sigmoid(X.dot(theta.T)))).sum() / N
+
+    return loss
+
+def gradient_descent(X, Y, theta, alpha, nb_iters):
+    """ Training to compute the logistic regression parameters by gradient descent
+    
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var+1]
+    Y : matrix containg values of the target variable of size [N, 1]
+    theta : matrix containing the theta parameters of the logistic model of size [1, nb_var+1]
+    alpha : learning rate
+    nb_iters : number of iterations
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+
+    Returns
+    -------
+    theta : matrix containing the theta parameters learnt by gradient descent of size [1, nb_var+1]
+    J_history : list containg the loss function values for each iteration of length nb_iters
+
+
+    """
+    
+    # Init of useful variables
+    N = X.shape[0]
+    J_history = np.zeros(nb_iters)
+
+    for i in range(0, nb_iters):
+
+        error = sigmoid(X.dot(theta.T)) - Y
+        theta -= (alpha/N)*np.sum(X*error, 0)
+
+        J_history[i] = compute_loss(X, Y, theta)
+        
+
+    return theta, J_history
+
+def prediction(X,theta):
+    """ Predict the class of each element in X
+    
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var+1]
+    Y : matrix containg values of the target variable of size [N, 1]
+    theta : matrix containing the theta parameters of the logistic model of size [1, nb_var+1]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+
+    Returns
+    -------
+    p : matrix of size [N,1] providing the class of each element in X (either 0 or 1)
+
+    """
+
+    p = sigmoid(X.dot(theta.T))
+    pos = np.where(p >= 0.5)
+    neg = np.where(p < 0.5)
+
+    p[pos] = 1
+    p[neg] = 0
+
+    return p
+
+def classification_rate(Ypred,Y):
+    """ Compute the classification rate (proportion of correctly classified elements)
+    
+    Parameters
+    ----------
+    Ypred : matrix containing the predicted values of the class of size [N, 1]
+    Y : matrix containing the values of the target variable of size [N, 1]
+    
+    with N : number of elements 
+
+
+    Returns
+    -------
+    r : classification rate
+
+    """
+
+    N = Ypred.size
+    nb_errors = np.sum(np.abs(Ypred-Y))
+    
+    t = (N-nb_errors) / N
+
+    return t
+
+def display(X, Y):
+    """ Display of data in 2 dimensions (2 dimensions of X) and class representation (provided by Y) by a color
+    
+    Parameters
+    ----------
+    X : data matrix of size [N, nb_var+1]
+    Y : matrix containg values of the target variable of size [N, 1]
+    
+    with N : number of elements and nb_var : number of predictor variables
+
+    Returns
+    -------
+    None
+
+    """
+
+    pos = np.where(Y == 1)[0]
+    neg = np.where(Y == 0)[0]
+    plt.scatter(X[pos, 1], X[pos, 2], marker="+", c='b')
+    plt.scatter(X[neg, 1], X[neg, 2], marker="o", c='r')
+
+
+if __name__ == "__main__":
+    # ===================== Part 1: Data loading and normalization =====================
+    print("Data loading ...")
+
+    X, Y, N, nb_var = read_data("scores.txt")
+
+    # Print of the ten first examples of the dataset
+    print("Print of the ten first examples of the dataset : ")
+    for i in range(0, 10):
+        print(f"x = {X[i,:]}, y = {Y[i]}")
+        
+    # Normalization of variables 
+    print("Normalization of variables  ...")
+
+    X, mu, sigma = normalization(X)
+
+    # Add one column of 1 values to X (for theta 0)
+    X = np.hstack((np.ones((N,1)), X)) 
+
+    # Display in 2D of data points and actual class representation by a color
+    if nb_var == 2 :
+        plt.figure(0)
+        plt.title("Coordinates of data points in 2D - Reality")
+        display(X,Y)
+
+    # ===================== Part 2: Gradient descent =====================
+    print("Training by gradient descent ...")
+
+    # Choice of the learning rate and number of iterations
+    alpha = 0.01
+    nb_iters = 10000
+
+    # Initialization of theta and call to the gradient descent function
+    theta = np.zeros((1,nb_var+1))
+    theta, J_history = gradient_descent(X, Y, theta, alpha, nb_iters)
+
+    # Display of the loss function values obtained during gradient descent training
+    plt.figure(1)
+    plt.title("Loss function values obtained during gradient descent training")
+    plt.plot(np.arange(J_history.size), J_history)
+    plt.xlabel("Nomber of iterations")
+    plt.ylabel("Loss function J")
+
+    # Print of theta values
+    print(f"Theta computed by gradient descent : {theta}")
+
+    # Evaluation of the model
+    Ypred = prediction(X,theta)
+
+    print("Classification rate : ", classification_rate(Ypred,Y))
+
+    # Display in 2D of data points and predicted class representation by a color
+    if nb_var == 2 :
+        plt.figure(2)
+        plt.title("Coordinates of data points in 2D - Prediction")
+        display(X,Ypred)
+        
+    plt.show()
+
+    print("Logistic Regression completed.")
diff --git a/Practical_sessions/Session_2/Subject_2_Neural_Networks.pdf b/Practical_sessions/Session_2/Subject_2_Neural_Networks.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..8de18ffc4611c0be1bf3c2ee322848e99ae20f4f
Binary files /dev/null and b/Practical_sessions/Session_2/Subject_2_Neural_Networks.pdf differ
diff --git a/Practical_sessions/Session_2/food_truck.txt b/Practical_sessions/Session_2/food_truck.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0f88ccb611f840ba9283e0de2a26b6cb9b8fde02
--- /dev/null
+++ b/Practical_sessions/Session_2/food_truck.txt
@@ -0,0 +1,97 @@
+6.1101,17.592
+5.5277,9.1302
+8.5186,13.662
+7.0032,11.854
+5.8598,6.8233
+8.3829,11.886
+7.4764,4.3483
+8.5781,12
+6.4862,6.5987
+5.0546,3.8166
+5.7107,3.2522
+14.164,15.505
+5.734,3.1551
+8.4084,7.2258
+5.6407,0.71618
+5.3794,3.5129
+6.3654,5.3048
+5.1301,0.56077
+6.4296,3.6518
+7.0708,5.3893
+6.1891,3.1386
+20.27,21.767
+5.4901,4.263
+6.3261,5.1875
+5.5649,3.0825
+18.945,22.638
+12.828,13.501
+10.957,7.0467
+13.176,14.692
+22.203,24.147
+5.2524,-1.22
+6.5894,5.9966
+9.2482,12.134
+5.8918,1.8495
+8.2111,6.5426
+7.9334,4.5623
+8.0959,4.1164
+5.6063,3.3928
+12.836,10.117
+6.3534,5.4974
+5.4069,0.55657
+6.8825,3.9115
+11.708,5.3854
+5.7737,2.4406
+7.8247,6.7318
+7.0931,1.0463
+5.0702,5.1337
+5.8014,1.844
+11.7,8.0043
+5.5416,1.0179
+7.5402,6.7504
+5.3077,1.8396
+7.4239,4.2885
+7.6031,4.9981
+6.3328,1.4233
+6.3589,-1.4211
+6.2742,2.4756
+5.6397,4.6042
+9.3102,3.9624
+9.4536,5.4141
+8.8254,5.1694
+5.1793,-0.74279
+21.279,17.929
+14.908,12.054
+18.959,17.054
+7.2182,4.8852
+8.2951,5.7442
+10.236,7.7754
+5.4994,1.0173
+20.341,20.992
+10.136,6.6799
+7.3345,4.0259
+6.0062,1.2784
+7.2259,3.3411
+5.0269,-2.6807
+6.5479,0.29678
+7.5386,3.8845
+5.0365,5.7014
+10.274,6.7526
+5.1077,2.0576
+5.7292,0.47953
+5.1884,0.20421
+6.3557,0.67861
+9.7687,7.5435
+6.5159,5.3436
+8.5172,4.2415
+9.1802,6.7981
+6.002,0.92695
+5.5204,0.152
+5.0594,2.8214
+5.7077,1.8451
+7.6366,4.2959
+5.8707,7.2029
+5.3054,1.9869
+8.2934,0.14454
+13.394,9.0551
+5.4369,0.61705
diff --git a/Practical_sessions/Session_2/houses.txt b/Practical_sessions/Session_2/houses.txt
new file mode 100644
index 0000000000000000000000000000000000000000..79e9a807edd86632d58aa2ec832e190d997f43e7
--- /dev/null
+++ b/Practical_sessions/Session_2/houses.txt
@@ -0,0 +1,47 @@
+2104,3,399900
+1600,3,329900
+2400,3,369000
+1416,2,232000
+3000,4,539900
+1985,4,299900
+1534,3,314900
+1427,3,198999
+1380,3,212000
+1494,3,242500
+1940,4,239999
+2000,3,347000
+1890,3,329999
+4478,5,699900
+1268,3,259900
+2300,4,449900
+1320,2,299900
+1236,3,199900
+2609,4,499998
+3031,4,599000
+1767,3,252900
+1888,2,255000
+1604,3,242900
+1962,4,259900
+3890,3,573900
+1100,3,249900
+1458,3,464500
+2526,3,469000
+2200,3,475000
+2637,3,299900
+1839,2,349900
+1000,1,169900
+2040,4,314900
+3137,3,579900
+1811,4,285900
+1437,3,249900
+1239,3,229900
+2132,4,345000
+4215,4,549000
+2162,4,287000
+1664,2,368500
+2238,3,329900
+2567,4,314000
+1200,3,299000
+852,2,179900
+1852,4,299900
+1203,3,239500
diff --git a/Practical_sessions/Session_2/iris.txt b/Practical_sessions/Session_2/iris.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1de4bbac2d427f72603868801f7647f04cb281a1
--- /dev/null
+++ b/Practical_sessions/Session_2/iris.txt
@@ -0,0 +1,150 @@
+5.1,3.5,1.4,0.2,0
+4.9,3.0,1.4,0.2,0
+4.7,3.2,1.3,0.2,0
+4.6,3.1,1.5,0.2,0
+5.0,3.6,1.4,0.2,0
+5.4,3.9,1.7,0.4,0
+4.6,3.4,1.4,0.3,0
+5.0,3.4,1.5,0.2,0
+4.4,2.9,1.4,0.2,0
+4.9,3.1,1.5,0.1,0
+5.4,3.7,1.5,0.2,0
+4.8,3.4,1.6,0.2,0
+4.8,3.0,1.4,0.1,0
+4.3,3.0,1.1,0.1,0
+5.8,4.0,1.2,0.2,0
+5.7,4.4,1.5,0.4,0
+5.4,3.9,1.3,0.4,0
+5.1,3.5,1.4,0.3,0
+5.7,3.8,1.7,0.3,0
+5.1,3.8,1.5,0.3,0
+5.4,3.4,1.7,0.2,0
+5.1,3.7,1.5,0.4,0
+4.6,3.6,1.0,0.2,0
+5.1,3.3,1.7,0.5,0
+4.8,3.4,1.9,0.2,0
+5.0,3.0,1.6,0.2,0
+5.0,3.4,1.6,0.4,0
+5.2,3.5,1.5,0.2,0
+5.2,3.4,1.4,0.2,0
+4.7,3.2,1.6,0.2,0
+4.8,3.1,1.6,0.2,0
+5.4,3.4,1.5,0.4,0
+5.2,4.1,1.5,0.1,0
+5.5,4.2,1.4,0.2,0
+4.9,3.1,1.5,0.1,0
+5.0,3.2,1.2,0.2,0
+5.5,3.5,1.3,0.2,0
+4.9,3.1,1.5,0.1,0
+4.4,3.0,1.3,0.2,0
+5.1,3.4,1.5,0.2,0
+5.0,3.5,1.3,0.3,0
+4.5,2.3,1.3,0.3,0
+4.4,3.2,1.3,0.2,0
+5.0,3.5,1.6,0.6,0
+5.1,3.8,1.9,0.4,0
+4.8,3.0,1.4,0.3,0
+5.1,3.8,1.6,0.2,0
+4.6,3.2,1.4,0.2,0
+5.3,3.7,1.5,0.2,0
+5.0,3.3,1.4,0.2,0
+7.0,3.2,4.7,1.4,1
+6.4,3.2,4.5,1.5,1
+6.9,3.1,4.9,1.5,1
+5.5,2.3,4.0,1.3,1
+6.5,2.8,4.6,1.5,1
+5.7,2.8,4.5,1.3,1
+6.3,3.3,4.7,1.6,1
+4.9,2.4,3.3,1.0,1
+6.6,2.9,4.6,1.3,1
+5.2,2.7,3.9,1.4,1
+5.0,2.0,3.5,1.0,1
+5.9,3.0,4.2,1.5,1
+6.0,2.2,4.0,1.0,1
+6.1,2.9,4.7,1.4,1
+5.6,2.9,3.6,1.3,1
+6.7,3.1,4.4,1.4,1
+5.6,3.0,4.5,1.5,1
+5.8,2.7,4.1,1.0,1
+6.2,2.2,4.5,1.5,1
+5.6,2.5,3.9,1.1,1
+5.9,3.2,4.8,1.8,1
+6.1,2.8,4.0,1.3,1
+6.3,2.5,4.9,1.5,1
+6.1,2.8,4.7,1.2,1
+6.4,2.9,4.3,1.3,1
+6.6,3.0,4.4,1.4,1
+6.8,2.8,4.8,1.4,1
+6.7,3.0,5.0,1.7,1
+6.0,2.9,4.5,1.5,1
+5.7,2.6,3.5,1.0,1
+5.5,2.4,3.8,1.1,1
+5.5,2.4,3.7,1.0,1
+5.8,2.7,3.9,1.2,1
+6.0,2.7,5.1,1.6,1
+5.4,3.0,4.5,1.5,1
+6.0,3.4,4.5,1.6,1
+6.7,3.1,4.7,1.5,1
+6.3,2.3,4.4,1.3,1
+5.6,3.0,4.1,1.3,1
+5.5,2.5,4.0,1.3,1
+5.5,2.6,4.4,1.2,1
+6.1,3.0,4.6,1.4,1
+5.8,2.6,4.0,1.2,1
+5.0,2.3,3.3,1.0,1
+5.6,2.7,4.2,1.3,1
+5.7,3.0,4.2,1.2,1
+5.7,2.9,4.2,1.3,1
+6.2,2.9,4.3,1.3,1
+5.1,2.5,3.0,1.1,1
+5.7,2.8,4.1,1.3,1
+6.3,3.3,6.0,2.5,2
+5.8,2.7,5.1,1.9,2
+7.1,3.0,5.9,2.1,2
+6.3,2.9,5.6,1.8,2
+6.5,3.0,5.8,2.2,2
+7.6,3.0,6.6,2.1,2
+4.9,2.5,4.5,1.7,2
+7.3,2.9,6.3,1.8,2
+6.7,2.5,5.8,1.8,2
+7.2,3.6,6.1,2.5,2
+6.5,3.2,5.1,2.0,2
+6.4,2.7,5.3,1.9,2
+6.8,3.0,5.5,2.1,2
+5.7,2.5,5.0,2.0,2
+5.8,2.8,5.1,2.4,2
+6.4,3.2,5.3,2.3,2
+6.5,3.0,5.5,1.8,2
+7.7,3.8,6.7,2.2,2
+7.7,2.6,6.9,2.3,2
+6.0,2.2,5.0,1.5,2
+6.9,3.2,5.7,2.3,2
+5.6,2.8,4.9,2.0,2
+7.7,2.8,6.7,2.0,2
+6.3,2.7,4.9,1.8,2
+6.7,3.3,5.7,2.1,2
+7.2,3.2,6.0,1.8,2
+6.2,2.8,4.8,1.8,2
+6.1,3.0,4.9,1.8,2
+6.4,2.8,5.6,2.1,2
+7.2,3.0,5.8,1.6,2
+7.4,2.8,6.1,1.9,2
+7.9,3.8,6.4,2.0,2
+6.4,2.8,5.6,2.2,2
+6.3,2.8,5.1,1.5,2
+6.1,2.6,5.6,1.4,2
+7.7,3.0,6.1,2.3,2
+6.3,3.4,5.6,2.4,2
+6.4,3.1,5.5,1.8,2
+6.0,3.0,4.8,1.8,2
+6.9,3.1,5.4,2.1,2
+6.7,3.1,5.6,2.4,2
+6.9,3.1,5.1,2.3,2
+5.8,2.7,5.1,1.9,2
+6.8,3.2,5.9,2.3,2
+6.7,3.3,5.7,2.5,2
+6.7,3.0,5.2,2.3,2
+6.3,2.5,5.0,1.9,2
+6.5,3.0,5.2,2.0,2
+6.2,3.4,5.4,2.3,2
+5.9,3.0,5.1,1.8,2
\ No newline at end of file
diff --git a/Practical_sessions/Session_2/nn_regression.py b/Practical_sessions/Session_2/nn_regression.py
new file mode 100644
index 0000000000000000000000000000000000000000..e80e6c64451abfa4a8b2f559bde32918a18d77be
--- /dev/null
+++ b/Practical_sessions/Session_2/nn_regression.py
@@ -0,0 +1,323 @@
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+def read_data(file_name, delimiter=','):
+    """ Reads the file containing the data and returns the corresponding matrices
+
+    Parameters
+    ----------
+    file_name : name of the file containing the data
+    delimiter : character separating columns in the file ("," by default)
+
+    Returns
+    -------
+    x : data matrix of size [N, num_vars]
+    d : matrix containing the target variable values of size [N, num_targets]
+    N : number of elements
+    num_vars : number of predictor variables
+    num_targets : number of target variables
+
+    """
+    
+    data = np.loadtxt(file_name, delimiter=delimiter)
+        
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return x, d, N, num_vars, num_targets
+
+def normalization(x):
+    """ Normalizes the data by centering and scaling the predictor variables
+    
+    Parameters
+    ----------
+    X : data matrix of size [N, num_vars]
+    
+    with N : number of elements and num_vars : number of predictor variables
+
+    Returns
+    -------
+    X_norm : centered-scaled data matrix of size [N, num_vars]
+    mu : mean of the variables of size [1, num_vars]
+    sigma : standard deviation of the variables of size [1, num_vars]
+    
+    """
+    
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return x_norm, mu, sigma
+
+def split_data(x, d, val_prop=0.2, test_prop=0.2):
+    """ Splits the initial data into three distinct subsets for training, validation, and testing
+    
+    Parameters
+    ----------
+    x : data matrix of size [N, num_vars]
+    d : matrix of target values [N, num_targets]
+    val_prop : proportion of validation data over the entire dataset (between 0 and 1)
+    test_prop : proportion of test data over the entire dataset (between 0 and 1)
+    
+    with N : number of elements, num_vars : number of predictor variables, num_targets : number of target variables
+
+    Returns
+    -------
+    x_train : training data matrix
+    d_train : training target values matrix
+    x_val : validation data matrix
+    d_val : validation target values matrix
+    x_test : test data matrix
+    d_test : test target values matrix
+
+    """
+    
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return x_train, d_train, x_val, d_val, x_test, d_test
+
+def calculate_mse_cost(y, d):
+    """ Calculates the value of the MSE (mean squared error) cost function
+    
+    Parameters
+    ----------
+    y : matrix of predicted data 
+    d : matrix of actual data 
+    
+    Returns
+    -------
+    cost : value corresponding to the MSE cost function (mean squared error)
+
+    """
+
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return cost
+
+def forward_pass(x, W, b, activation):
+    """ Performs a forward pass in the neural network
+    
+    Parameters
+    ----------
+    x : input matrix, of size num_vars x N
+    W : list containing the weight matrices of the network
+    b : list containing the bias matrices of the network
+    activation : list containing the activation functions of the network layers
+
+    with N : number of elements, num_vars : number of predictor variables 
+
+    Returns
+    -------
+    a : list containing the input potentials of the network layers
+    h : list containing the outputs of the network layers
+
+    """
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return a, h
+
+def backward_pass(delta_h, a, h, W, activation):
+    """ Performs a backward pass in the neural network (backpropagation)
+    
+    Parameters
+    ----------
+    delta_h : matrix containing the gradient of the cost with respect to the output of the network
+    a : list containing the input potentials of the network layers
+    h : list containing the outputs of the network layers
+    W : list containing the weight matrices of the network
+    activation : list containing the activation functions of the network layers
+
+    Returns
+    -------
+    delta_W : list containing the gradient matrices of the network layer weights
+    delta_b : list containing the gradient matrices of the network layer biases
+
+    """
+
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return delta_W, delta_b
+
+def sigmoid(z, deriv=False):
+    """ Calculates the value of the sigmoid function or its derivative applied to z
+    
+    Parameters
+    ----------
+    z : can be a scalar or a matrix
+    deriv : boolean. If False returns the value of the sigmoid function, if True returns its derivative
+
+    Returns
+    -------
+    s : value of the sigmoid function applied to z or its derivative. Same dimension as z
+
+    """
+
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return s
+
+def linear(z, deriv=False):
+    """ Calculates the value of the linear function or its derivative applied to z
+    
+    Parameters
+    ----------
+    z : can be a scalar or a matrix
+    deriv : boolean. If False returns the value of the linear function, if True returns its derivative
+
+    Returns
+    -------
+    s : value of the linear function applied to z or its derivative. Same dimension as z
+
+    """
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return s
+
+def relu(z, deriv=False):
+    """ Calculates the value of the relu function or its derivative applied to z
+    
+    Parameters
+    ----------
+    z : can be a scalar or a matrix
+    deriv : boolean. If False returns the value of the relu function, if True returns its derivative
+
+    Returns
+    -------
+    s : value of the relu function applied to z or its derivative. Same dimension as z
+
+    """
+
+    #######################################################
+    ##### To complete (and remove the pass statement) #####
+    ####################################################### 
+    pass
+
+    # return s
+
+
+# ===================== Part 1: Data Reading and Normalization =====================
+print("Reading data ...")
+
+x, d, N, num_vars, num_targets = read_data("food_truck.txt")
+# x, d, N, num_vars, num_targets = read_data("houses.txt")
+
+# Displaying the first 10 examples from the dataset
+print("Displaying the first 10 examples from the dataset: ")
+for i in range(0, 10):
+    print(f"x = {x[i,:]}, d = {d[i]}")
+    
+# Normalizing the variables (centering and scaling)
+print("Normalizing the variables ...")
+x, mu, sigma = normalization(x)
+dmax = d.max()
+d = d / dmax
+
+# Splitting the data into training, validation, and test subsets
+x_train, d_train, x_val, d_val, x_test, d_test = split_data(x, d)
+
+# ===================== Part 2: Training =====================
+
+# Choosing the learning rate and number of iterations
+alpha = 0.001
+num_iters = 500
+train_costs = np.zeros(num_iters)
+val_costs = np.zeros(num_iters)
+
+# Network dimensions
+D_c = [num_vars, 5, 10, num_targets] # list containing the number of neurons for each layer 
+activation = [relu, sigmoid, linear] # list containing the activation functions for the hidden layers and the output layer 
+
+# Random initialization of the network weights
+W = []
+b = []
+for i in range(len(D_c)-1):    
+    W.append(2 * np.random.random((D_c[i+1], D_c[i])) - 1)
+    b.append(np.zeros((D_c[i+1],1)))
+
+x_train = x_train.T # Data is presented as column vectors at the input of the network
+d_train = d_train.T 
+
+x_val = x_val.T # Data is presented as column vectors at the input of the network
+d_val = d_val.T 
+
+x_test = x_test.T # Data is presented as column vectors at the input of the network
+d_test = d_test.T 
+
+for t in range(num_iters):
+
+    #############################################################################
+    # Forward pass: calculating predicted output y on validation data #
+    #############################################################################
+    a, h = forward_pass(x_val, W, b, activation)
+    y_val = h[-1] # Predicted output
+
+    ###############################################################################
+    # Forward pass: calculating predicted output y on training data #
+    ###############################################################################
+    a, h = forward_pass(x_train, W, b, activation)
+    y_train = h[-1] # Predicted output
+
+    ###########################################
+    # Calculating the MSE loss function #
+    ###########################################
+    train_costs[t] = calculate_mse_cost(y_train, d_train)
+    val_costs[t] = calculate_mse_cost(y_val, d_val)
+
+    ####################################
+    # Backward pass: backpropagation #
+    ####################################
+    delta_h = (y_train-d_train) # For the last layer 
+    delta_W, delta_b = backward_pass(delta_h, a, h, W, activation)
+  
+    #############################################
+    # Updating weights and biases #
+    ############################################# 
+    for i in range(len(b)-1,-1,-1):
+        b[i] -= alpha * delta_b[i]
+        W[i] -= alpha * delta_W[i]
+
+print("Final cost on the training set: ", train_costs[-1])
+print("Final cost on the validation set: ", val_costs[-1])
+
+# Plotting the evolution of the cost function during backpropagation
+plt.figure(0)
+plt.title("Evolution of the cost function during backpropagation")
+plt.plot(np.arange(train_costs.size), train_costs, label="Training")
+plt.plot(np.arange(val_costs.size), val_costs, label="Validation")
+plt.legend(loc="upper left")
+plt.xlabel("Number of iterations")
+plt.ylabel("Cost")
+plt.show()
+
+# ===================== Part 3: Evaluation on the test set =====================
+
+#######################################################################
+# Forward pass: calculating predicted output y on test data #
+#######################################################################
+a, h = forward_pass(x_test, W, b, activation)
+y_test = h[-1] # Predicted output
+
+cost = calculate_mse_cost(y_test, d_test)
+print("Test set cost: ", cost)
diff --git a/Practical_sessions/Session_2/scores.txt b/Practical_sessions/Session_2/scores.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3a5f95245719c6f7f08ece4a7785c0f0467c610e
--- /dev/null
+++ b/Practical_sessions/Session_2/scores.txt
@@ -0,0 +1,100 @@
+34.62365962451697,78.0246928153624,0
+30.28671076822607,43.89499752400101,0
+35.84740876993872,72.90219802708364,0
+60.18259938620976,86.30855209546826,1
+79.0327360507101,75.3443764369103,1
+45.08327747668339,56.3163717815305,0
+61.10666453684766,96.51142588489624,1
+75.02474556738889,46.55401354116538,1
+76.09878670226257,87.42056971926803,1
+84.43281996120035,43.53339331072109,1
+95.86155507093572,38.22527805795094,0
+75.01365838958247,30.60326323428011,0
+82.30705337399482,76.48196330235604,1
+69.36458875970939,97.71869196188608,1
+39.53833914367223,76.03681085115882,0
+53.9710521485623,89.20735013750205,1
+69.07014406283025,52.74046973016765,1
+67.94685547711617,46.67857410673128,0
+70.66150955499435,92.92713789364831,1
+76.97878372747498,47.57596364975532,1
+67.37202754570876,42.83843832029179,0
+89.67677575072079,65.79936592745237,1
+50.534788289883,48.85581152764205,0
+34.21206097786789,44.20952859866288,0
+77.9240914545704,68.9723599933059,1
+62.27101367004632,69.95445795447587,1
+80.1901807509566,44.82162893218353,1
+93.114388797442,38.80067033713209,0
+61.83020602312595,50.25610789244621,0
+38.78580379679423,64.99568095539578,0
+61.379289447425,72.80788731317097,1
+85.40451939411645,57.05198397627122,1
+52.10797973193984,63.12762376881715,0
+52.04540476831827,69.43286012045222,1
+40.23689373545111,71.16774802184875,0
+54.63510555424817,52.21388588061123,0
+33.91550010906887,98.86943574220611,0
+64.17698887494485,80.90806058670817,1
+74.78925295941542,41.57341522824434,0
+34.1836400264419,75.2377203360134,0
+83.90239366249155,56.30804621605327,1
+51.54772026906181,46.85629026349976,0
+94.44336776917852,65.56892160559052,1
+82.36875375713919,40.61825515970618,0
+51.04775177128865,45.82270145776001,0
+62.22267576120188,52.06099194836679,0
+77.19303492601364,70.45820000180959,1
+97.77159928000232,86.7278223300282,1
+62.07306379667647,96.76882412413983,1
+91.56497449807442,88.69629254546599,1
+79.94481794066932,74.16311935043758,1
+99.2725269292572,60.99903099844988,1
+90.54671411399852,43.39060180650027,1
+34.52451385320009,60.39634245837173,0
+50.2864961189907,49.80453881323059,0
+49.58667721632031,59.80895099453265,0
+97.64563396007767,68.86157272420604,1
+32.57720016809309,95.59854761387875,0
+74.24869136721598,69.82457122657193,1
+71.79646205863379,78.45356224515052,1
+75.3956114656803,85.75993667331619,1
+35.28611281526193,47.02051394723416,0
+56.25381749711624,39.26147251058019,0
+30.05882244669796,49.59297386723685,0
+44.66826172480893,66.45008614558913,0
+66.56089447242954,41.09209807936973,0
+40.45755098375164,97.53518548909936,1
+49.07256321908844,51.88321182073966,0
+80.27957401466998,92.11606081344084,1
+66.74671856944039,60.99139402740988,1
+32.72283304060323,43.30717306430063,0
+64.0393204150601,78.03168802018232,1
+72.34649422579923,96.22759296761404,1
+60.45788573918959,73.09499809758037,1
+58.84095621726802,75.85844831279042,1
+99.82785779692128,72.36925193383885,1
+47.26426910848174,88.47586499559782,1
+50.45815980285988,75.80985952982456,1
+60.45555629271532,42.50840943572217,0
+82.22666157785568,42.71987853716458,0
+88.9138964166533,69.80378889835472,1
+94.83450672430196,45.69430680250754,1
+67.31925746917527,66.58935317747915,1
+57.23870631569862,59.51428198012956,1
+80.36675600171273,90.96014789746954,1
+68.46852178591112,85.59430710452014,1
+42.0754545384731,78.84478600148043,0
+75.47770200533905,90.42453899753964,1
+78.63542434898018,96.64742716885644,1
+52.34800398794107,60.76950525602592,0
+94.09433112516793,77.15910509073893,1
+90.44855097096364,87.50879176484702,1
+55.48216114069585,35.57070347228866,0
+74.49269241843041,84.84513684930135,1
+89.84580670720979,45.35828361091658,1
+83.48916274498238,48.38028579728175,1
+42.2617008099817,87.10385094025457,1
+99.31500880510394,68.77540947206617,1
+55.34001756003703,64.9319380069486,1
+74.77589300092767,89.52981289513276,1