diff --git a/mlp.py b/mlp.py
index f98157d465249915f819035eab6adce7661bd50c..b8e5eb0fddf9ddecec9eb5860b15f38a4fd5aff8 100644
--- a/mlp.py
+++ b/mlp.py
@@ -1,5 +1,35 @@
+import numpy as np
 
+def sigmoid(x):
+    return 1 / (1 + np.exp(-x))
 
 def learn_once_mse(w1, b1, w2, b2, data, targets, learning_rate):
+    N = len(targets)
+    # Forward pass
+    a0 = data # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+    a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2  # input of the output layer
+    a2 = 1 / (1 + np.exp(-z2))  # output of the output layer (sigmoid activation function)
+    predictions = a2  # the predicted values are the outputs of the output layer
+    
+    # Compute loss (MSE)
+    loss = np.mean(np.square(predictions - targets))
+    
+    # According to the formulas established by theory :
+    d_a2 = 2 / N * (1 - targets)
+    d_z2 = d_a2 * a2 * (1 - a2)
+    d_w2 = np.matmul(a1.T, d_z2)
+    d_b2 = d_z2
+    d_a1 = np.matmul(d_z2, w2.T)
+    d_z1 = d_a1 * a1 * (1 - a1)
+    d_w1 = np.matmul(a0.T, d_z1)
+    d_b1 = d_z1
+    
+    # Calculation of the updated weights and biases of the network
+    
+    
     return w1, b1, w2, b2, loss
 
+
+