diff --git a/knn.py b/knn.py
index 6332eddc448d7c139f676123fdab10b02b355f2c..fece0caf4ff327e411841430f7df1bd0a3cb6b6b 100644
--- a/knn.py
+++ b/knn.py
@@ -1,24 +1,30 @@
 import read_cifar
 import numpy as np
+import matplotlib.pyplot as plt
 
 def distance_matrix(matrix1, matrix2):
     #X_test then X_train in this order
-    sum_of_squares_matrix1 = np.sum(np.square(matrix1), axis=1, keepdims=True)
-    sum_of_squares_matrix2 = np.sum(np.square(matrix2), axis=1, keepdims=True)
+    sum_of_squares_matrix1 = np.sum(np.square(matrix1), axis=1, keepdims=True) #A^2
+    sum_of_squares_matrix2 = np.sum(np.square(matrix2), axis=1, keepdims=True) #B^2
 
-    dot_product = np.dot(matrix1, matrix2.T)
+    dot_product = np.dot(matrix1, matrix2.T) # A * B (matrix mutliplication)
     
-    dists = np.sqrt(sum_of_squares_matrix1 + sum_of_squares_matrix2.T - 2 * dot_product)
+    dists = np.sqrt(sum_of_squares_matrix1 + sum_of_squares_matrix2.T - 2 * dot_product) # Compute the product
     return dists
 
 def knn_predict(dists, labels_train, k):
     output = []
+    # Loop on all the images_test
     for i in range(len(dists)):
+        # Innitialize table to store the neighbors
         res = [0] * 10
-        b = np.argsort(dists[i])[:k]
-        for lab in b:
-            res[labels_train[lab]] += 1
-        label_temp = np.argmax(res) #Attention à la logique ici
+        # Get the closest neighbors
+        labels_close = np.argsort(dists[i])[:k]
+        for label in labels_close:
+            #add a label to the table of result
+            res[labels_train[label]] += 1
+        # Get the class with the maximum neighbors
+        label_temp = np.argmax(res) #Careful to the logic here, if there is two or more maximum, the function the first maximum encountered
         output.append(label_temp)
     return(np.array(output))
 
@@ -30,20 +36,39 @@ def evaluate_knn(data_train, labels_train, data_test, labels_tests, k):
     N = labels_tests.shape[0]
     accuracy = (labels_tests == result_test).sum() / N
     return(accuracy)
-    
-
 
+def bench_knn():
 
+    k_indices = [i for i in range(20) if i % 2 != 0]
+    accuracies = []
 
-
-if __name__ == "__main__":
-
+    # Load data
     data, labels = read_cifar.read_cifar('image-classification/data/cifar-10-batches-py')
-    X_train, X_test, y_train, y_test = read_cifar.split_dataset(data, labels, 0.8)
-    print(evaluate_knn(X_train[:1000], y_train[:1000], X_test, y_test, 5))
+    X_train, X_test, y_train, y_test = read_cifar.split_dataset(data, labels, 0.9)
+    #Load one batch
+    # data, labels = read_cifar.read_cifar_batch('image-classification/data/cifar-10-batches-py/data_batch_1')
+    # X_train, X_test, y_train, y_test = read_cifar.split_dataset(data, labels, 0.9)
+
+    # Loop on the k_indices to get all the accuracies
+    for k in k_indices:
+        accuracy = evaluate_knn(X_train, y_train, X_test, y_test, k)
+        accuracies.append(accuracy)
+    
+    # Save and show the graph of accuracies
+    fig = plt.figure()
+    plt.plot(k_indices, accuracies)
+    plt.title("Accuracy as function of k")
+    plt.show()
+    plt.savefig('image-classification/results/knn_batch_1.png')
+    plt.close(fig)
 
 
+if __name__ == "__main__":
 
+    bench_knn()
+    # data, labels = read_cifar.read_cifar('image-classification/data/cifar-10-batches-py')
+    # X_train, X_test, y_train, y_test = read_cifar.split_dataset(data, labels, 0.9)
+    # print(evaluate_knn(X_train, y_train, X_test, y_test, 5))
     # print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
 
     # y_test = []
diff --git a/mlp.py b/mlp.py
index 36635fb25f0d27ad67a7f01951b80867322ffce5..5f449bff4fe38e44be026dd32b8ddc5586c3c24e 100644
--- a/mlp.py
+++ b/mlp.py
@@ -110,45 +110,6 @@ def learn_once_cross_entropy(w1, b1, w2, b2, data, labels_train, learning_rate):
 
     return w1, b1, w2, b2, loss
 
-def learn_once_cross_entropy_2(w1, w2, data, labels_train, learning_rate):
-
-    N_out = len(labels_train) #number of training examples
-
-    # Forward pass
-    # Feedforward propagation
-    z1 = np.dot(data, w1)
-    a1 = sigmoid(z1)
-    z2 = np.dot(a1, w2)
-    a2 = sigmoid(z2)
-
-
-    # Compute loss (cross-entropy loss)
-    y_true_one_hot = one_hot(labels_train)
-    loss = cross_entropy_loss(a2, y_true_one_hot)
-    
-    # Backpropagation
-    E1 = a2 - np.eye(10)[labels_train]
-    dw1 = E1 * a2 * (1 - a2)
-    E2 = np.dot(dw1, w2.T)
-    dw2 = E2 * a1 * (1 - a1)
-
-    # Update weights
-    W2_update = np.dot(a1.T, dw1) / N_out
-    W1_update = np.dot(data.T, dw2) / N_out
-    w2 = w2 - learning_rate * W2_update
-    w1 = w1 - learning_rate * W1_update
-
-    return w1, w2, loss
-
-def forward_2(w1, w2, data):
-    # Forward pass
-    a0 = data # the data are the input of the first layer
-    z1 = np.matmul(a0, w1)  # input of the hidden layer
-    a1 = sigmoid(z1)  # output of the hidden layer (sigmoid activation function)
-    z2 = np.matmul(a1, w2)  # input of the output layer
-    a2 = softmax_stable(z2)  # output of the output layer (sigmoid activation function)
-    predictions = a2  # the predicted values are the outputs of the output layer
-    return(predictions)
 
 def forward(w1, b1, w2, b2, data):
     # Forward pass
@@ -177,22 +138,6 @@ def train_mlp(w1, b1, w2, b2, data_train, labels_train, learning_rate, num_epoch
         print(f'Epoch {epoch + 1}/{num_epoch}, Loss: {loss:.3f}, Train Accuracy: {accuracy:.2f}')
 
     return w1, b1, w2, b2, train_accuracies
-def train_mlp_2(w1, w2, data_train, labels_train, learning_rate, num_epoch):
-    train_accuracies = []
-    for epoch in range(num_epoch):
-        w1, w2, loss = learn_once_cross_entropy_2(w1, w2, data_train, labels_train, learning_rate)
-        # Compute accuracy
-        predictions = forward_2(w1, w2, data_train)
-        predicted_labels = np.argmax(predictions, axis=1)
-        # print(predictions.shape)
-        # print(predicted_labels.shape)
-        # print(labels_train.shape)
-        accuracy = np.mean(predicted_labels == labels_train)
-        train_accuracies.append(accuracy)
-
-        print(f'Epoch {epoch + 1}/{num_epoch}, Loss: {loss:.3f}, Train Accuracy: {accuracy:.2f}')
-
-    return w1, w2, train_accuracies
 
 def test_mlp(w1, b1, w2, b2, data_test, labels_test):