diff --git a/__pycache__/knn.cpython-311.pyc b/__pycache__/knn.cpython-311.pyc
index a72d35584c1c74976a3fcd277918627ba2303a9c..8d7e7845954ee751abc727671f08df64a2dd4c24 100644
Binary files a/__pycache__/knn.cpython-311.pyc and b/__pycache__/knn.cpython-311.pyc differ
diff --git a/__pycache__/mlp.cpython-311.pyc b/__pycache__/mlp.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14eb718b7f10cfb9e0df0a26c9232185a86fbcc1
Binary files /dev/null and b/__pycache__/mlp.cpython-311.pyc differ
diff --git a/__pycache__/read_cifar.cpython-311.pyc b/__pycache__/read_cifar.cpython-311.pyc
index ab2c0562b5bc35efe9e0bad8233915e686e34383..bc65355c2ca1f3ad43727313bf094f561a997e58 100644
Binary files a/__pycache__/read_cifar.cpython-311.pyc and b/__pycache__/read_cifar.cpython-311.pyc differ
diff --git a/main.py b/main.py
index bcda22e66e4f61bbe2bc6aef7c9100203c8c7bd4..324157472c24e419a3d19f7e0f711a4a40fa1c2d 100644
--- a/main.py
+++ b/main.py
@@ -1,8 +1,9 @@
 from read_cifar import *
-from knn import *
+#from knn import *
 
 path = r'C:\Users\hp\Desktop\BE\image-classification\data'
 
+
 if __name__ == "__main__":
     split_factor = 0.9
     X, y = read_cifar(path)
@@ -10,4 +11,28 @@ if __name__ == "__main__":
     
     K_max=20
     accuries=evaluate_knn_for_k(X_train, y_train, X_test, y_test, K_max)
-    plot_accuracy_versus_k(accuries)
\ No newline at end of file
+    plot_accuracy_versus_k(accuries)
+
+
+from read_cifar import *
+from mlp import *
+
+
+
+if __name__ == "__main__":
+    split_factor = 0.9
+    data, labels = read_cifar(path)
+    data_train, labels_train, data_test, labels_test = split_dataset(data, labels, split=split_factor)
+    data_train, data_test = data_train/255.0, data_test/255.0
+    # parameters of the MLP :
+    d_h = 64
+    learning_rate = 0.9
+    num_epoch = 100
+    
+    train_accuracies, test_accuracy = run_mlp_training(data_train, labels_train, data_test,
+                                                       labels_test, d_h, learning_rate, num_epoch)
+    plot_accuracy_versus_epoch(train_accuracies)
+
+
+
+
diff --git a/mlp.py b/mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..183beef2c9e6e727f814c260ac047357ed0837b3
--- /dev/null
+++ b/mlp.py
@@ -0,0 +1,176 @@
+import numpy as np 
+import matplotlib.pyplot as plt
+
+def learn_once_mse(w1, b1, w2, b2, data, targets, learning_rate):
+
+    # Forward pass
+    a0 = data                    # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+    a1 = 1 / (1 + np.exp(-z1))   # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2  # input of the output layer
+    a2 = 1 / (1 + np.exp(-z2))   # output of the output layer (sigmoid activation function)
+    predictions = a2             # the predicted values are the outputs of the output layer
+
+    # Compute loss (MSE)
+    loss = np.mean(np.square(predictions - targets))
+
+    N = data.shape[0]
+    # Backward pass
+    da2 = (2 / N) * (predictions - targets)
+    dz2 = da2 * a2 * (1 - a2)
+
+    dw2 = np.dot(a1.T, dz2) / N
+    db2 = np.sum(dz2, axis=0, keepdims=True) / N
+    
+    da1 = np.dot(dz2, w2.T)
+    dz1 = da1 * a1 * (1 - a1)   
+
+    dw1 = np.dot(a0.T, dz1) / N
+    db1 = np.sum(dz1, axis=0, keepdims=True) / N
+    
+    # One step of gradient descent
+    w1 -= learning_rate * dw1
+    w2 -= learning_rate * dw2
+    b1 -= learning_rate * db1
+    b2 -= learning_rate * db2
+
+    return w1, b1, w2, b2, loss
+
+def one_hot(x):
+    """One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
+    """
+    n_classes = 10
+    return np.eye(n_classes)[x]
+
+def softmax(x):
+    """Compute softmax values for each sets of scores in x."""
+    # Substracting the max value helps with numerical stability issues.
+    exp = np.exp(x - np.max(x))
+    return exp / np.sum(exp, axis=1, keepdims=True)
+
+def learn_once_cross_entropy(w1, b1, w2, b2, data, targets, learning_rate):
+    """
+    Perform one forward and backward pass of an MLP using the cross-entropy loss.
+    Returns:
+        w1, b1, w2, b2 : the updated weights & biases of the MLP.
+        loss : the loss
+    """
+    N = data.shape[0]
+
+    # Forward pass
+    a0 = data                       # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1     # input of the hidden layer
+    a1 = 1 / (1 + np.exp(-z1))      # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2     # input of the output layer
+    a2 = softmax(z2)                # output of the output layer (softmax activation function)
+    predictions = a2                # the predicted values are the outputs of the output layer
+
+    # One-hot encode the targets
+    oh_targets = one_hot(targets)
+
+    # Compute the Cross-Entropy loss (or Negative Likelihood Loss)
+    loss = - np.sum(
+        oh_targets * np.log(predictions + 1e-9)
+        ) / N
+
+    # Backward pass
+    dz2 = predictions - oh_targets
+
+    dw2 = np.dot(a1.T, dz2) / N
+    db2 = np.sum(dz2, axis=0, keepdims=True) / N
+    
+    da1 = np.dot(dz2, w2.T)
+    dz1 = da1 * a1 * (1 - a1)
+
+    dw1 = np.dot(a0.T, dz1) / N
+    db1 = np.sum(dz1, axis=0, keepdims=True) / N
+    
+    # One step of gradient descent
+    w1 -= learning_rate * dw1
+    w2 -= learning_rate * dw2
+    b1 -= learning_rate * db1
+    b2 -= learning_rate * db2
+
+    return w1, b1, w2, b2, loss
+
+def predict_mlp(w1, b1, w2, b2, data):
+    """Do the forward pass of the MLP on data.
+    Returns:
+        numpy array: the predictions for images in data
+    """
+    # Forward pass
+    a0 = data                    # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+    a1 = 1 / (1 + np.exp(-z1))   # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2  # input of the output layer
+    a2 = softmax(z2)             # output of the output layer (softmax activation function)
+    predictions = np.argmax(a2, axis=1)
+    
+    return predictions
+
+def train_mlp(w1, b1, w2, b2, data_train, labels_train, learning_rate, num_epoch):
+    """
+    Perform num_epoch of training steps of the MLP using cross-entropy loss.
+    Returns:
+        w1, b1, w2, b2 : the updated weights & biases of the MLP after num_epoch of training steps.
+        train_accuracies : list of train accuracies across epochs.
+    """
+
+    train_accuracies = [0] * num_epoch
+    for epoch in range(num_epoch):
+        w1, b1, w2, b2, loss = learn_once_cross_entropy(w1, b1, w2, b2, data_train, labels_train, learning_rate)
+        labels_pred = predict_mlp(w1, b1, w2, b2, data_train)
+        accuracy = np.mean(labels_pred == labels_train)
+        train_accuracies[epoch] = accuracy
+
+        print(f"Epoch loss [{epoch+1}/{num_epoch}] : {loss} --- accuracy : {accuracy}")
+
+    return w1, b1, w2, b2, train_accuracies
+
+# This function can't be named 'test_mlp' because pytest will think it's a test function and it will give an error
+# thus I chose to name it 'Test_mlp'
+def Test_mlp(w1, b1, w2, b2, data_test, labels_test):
+    """Test the MLP on test data and compute the accuracy.
+    Returns:
+        float: test accuracy on data_test
+    """
+    labels_pred = predict_mlp(w1, b1, w2, b2, data_test)
+    test_accuracy = np.mean(labels_pred == labels_test)
+    
+    return test_accuracy
+
+def run_mlp_training(data_train, labels_train, data_test, labels_test, d_h, learning_rate, num_epoch):
+    """
+    Train a simple Neural Net with d_h hidden neurons and return the performance of the obtained model.
+    Returns:
+        train_accuracies (list): list of training accuracies over num_epoch steps.
+        test_accuracy (float): the accuracy of the predictions of the trained model.
+    """
+    d_in = data_train.shape[1]
+    d_out = len(set(labels_train))
+
+    # Random initialization of the network weights and biaises
+    w1 = 2 * np.random.rand(d_in, d_h) - 1  # first layer weights
+    b1 = np.zeros((1, d_h))                 # first layer biaises
+    w2 = 2 * np.random.rand(d_h, d_out) - 1 # second layer weights
+    b2 = np.zeros((1, d_out))               # second layer biaises
+
+    w1, b1, w2, b2, train_accuracies = train_mlp(w1, b1, w2, b2, data_train, labels_train, learning_rate, num_epoch)
+    test_accuracy = Test_mlp(w1, b1, w2, b2, data_test, labels_test)
+
+    return train_accuracies, test_accuracy
+
+def plot_accuracy_versus_epoch(accuracies):
+    """This function plots the variation of the accuracy asa function of k and saves the plot
+    into /results.
+    Args:
+        accuracies (List): the list of accuracies for each value of k.
+    """
+
+    plt.figure(figsize=(18, 10))
+    plt.plot(accuracies, 'o-b')
+    plt.title("Variation of the accuracy over the epochs")
+    plt.xlabel("Epochs")
+    plt.ylabel("Accuracy")
+    plt.grid(axis='both', which='both')
+    plt.savefig(r'C:\Users\hp\Desktop\BE\image-classification\resultats\mlp.png')
\ No newline at end of file
diff --git a/read_cifar.py b/read_cifar.py
index bc11bac06a81539364b298640d2d88efe0425425..55ed7ebc66d3f833a52564949f7ddc19077dd4d7 100644
--- a/read_cifar.py
+++ b/read_cifar.py
@@ -8,37 +8,27 @@ def unpickle(file):
         dict = pickle.load(fo, encoding='bytes')
     return dict
 
-#La fonction lecture_cifar : prenne en argument le chemin du répertoire contenant les données, et renvoyant une matrice X de taille NxD où N correspond au nombre de données disponibles, et D à la dimension de ces données (nombre de valeurs numériques décrivant les données), ainsi qu'un vecteur Y de taille N dont les valeurs correspondent au code de la classe de la donnée de même indice dans X. 
 
-#X et Y sont objets numpy
 def read_cifar_batch(file):
     """
-    read_cifaar_batch function: read the path of a single batch.
-
-    Arguments:
-    - The path of a single batch as a string, 
-
-    Returns: 
-    - Matrix data of size (batch_size x data_size)
-    - Vector labels of size batch_size
+        La fonction lecture_cifar_batch : prenne en argument le chemin du répertoire contenant les données, et renvoyant:
+    Une matrice Data de taille NxD où N correspond au nombre de données disponibles, et D à la dimension de ces données (nombre de valeurs numériques décrivant les données),
+    Ainsi qu'un vecteur labels de taille N dont les valeurs correspondent au code de la classe de la donnée de même indice dans data. 
+    Data et label sont objets numpy
+    
     """
+
     dict = unpickle(file)
     data = dict[b'data'].astype(np.float32)
     labels = np.array(dict[b'labels'], dtype=np.int64)
     labels = labels.reshape(labels.shape[0])
-
     return data, labels
 
 def read_cifar(path):
     """
-    read_cifaar function: read the path of the directory containing all batches (including test_batch).
-
-    Arguments:
-    - the path of the directory containing the six batches (five data_batch and one test_batch) as a string
-
-    Returns:
-    - Matrix data of size (batch_size x data_size)
-    - Vector labels of size batch_size
+    Cette fonction prend en argument le chemin du répertoire de toutes les batches y compris test_batch et renvoyant:
+    - Matrice data de taille (batch_size x data_size)
+    - Vecteur labels de taille batch_size
     """
     data_batches = ["data_batch_" + str(i) for i in range(1, 6)] + ['test_batch']
 
@@ -57,17 +47,8 @@ def read_cifar(path):
 
 def split_dataset(data, labels, split=0.6):
     """
-    split_dataset function: splits the dataset into a training set and a test set.
-
-    Arguments:
-    - data and labels, two arrays that have the same size in the first dimension.
-    - split, a float between 0 and 1 which determines the split factor of the training set with respect to the test set.
+    Cette fonction divise l'ensemble de notre data en training data set et testing data set.
 
-    Returns:
-    - data_train: the training data,
-    - labels_train: the corresponding labels,
-    - data_test: the testing data, and
-    - labels_test: the corresponding labels.
     """
     n = data.shape[0]
     indices = np.random.permutation(n)
diff --git a/resultats/Knn.png b/resultats/Knn.png
index 926556a3309ceb450d489a8e4e5d4d85b3763aee..d0ec86484604e1735247aa9c4de02ee0a31e9f40 100644
Binary files a/resultats/Knn.png and b/resultats/Knn.png differ
diff --git a/resultats/mlp.png b/resultats/mlp.png
new file mode 100644
index 0000000000000000000000000000000000000000..9243fc8ec1de2f9d6babe5f203e7ae9f38e7a554
Binary files /dev/null and b/resultats/mlp.png differ