diff --git a/knn.py b/knn.py
index 0be4178eaf8d12e447b0e66e298ccff85b04f52b..1fd7fa48ac3bf08a5583b63d19aa2920f107c265 100644
--- a/knn.py
+++ b/knn.py
@@ -8,49 +8,65 @@ import read_cifar
 import numpy as np
 import statistics
 from statistics import mode
+import time
+import matplotlib.pyplot as plt
 
 def distance_matrix(A,B) : 
-    # sum_of_squaresA = np.sum(A ** 2, axis=1)
-    # sum_of_squaresB = np.sum(B ** 2, axis=1)
-    sum_of_squaresA = np.sum(np.square(A), axis=1)
-    sum_of_squaresB = np.sum(np.square(B) ** 2, axis=1)
-    
+    print("test0")
+    sum_of_squaresA= np.sum(A**2, axis = 1, keepdims = True)
+    sum_of_squaresB = np.sum(B**2, axis = 1)
+    print("test1")
+    # sum_of_squaresA = np.tile(sum_of_squaresAVect, (np.shape(B)[0], 1))
+    # sum_of_squaresB = np.tile(sum_of_squaresBVect, (np.shape(A)[0], 1))
 
     # Calculate the dot product between the two matrices
-    dot_product = np.dot(A, B.T)
-
+    # dot_product = np.matmul(A, B.T)
+    dot_product = np.einsum('ij,jk', A, B.T)
+    print("test2")
     # Calculate the Euclidean distance matrix using the hint provided
     dists = np.sqrt(sum_of_squaresA + sum_of_squaresB - 2 * dot_product)
-
+    print("test3")
     return dists
 
 def knn_predict(dists, labels_train, k) : 
-    number_test, number_train = dists.shape
+    number_train, number_test = dists.shape
     
     # initialze the predicted labels to zeros
     labels_predicted = np.zeros(number_test)
     
-    for i in range(number_test) : 
-        sorted_indices = np.argsort(dists[i])
+    for j in range(number_test) : 
+        sorted_indices = np.argsort(dists[:, j])
+        print(len(dists[:, j]))
+        break
         knn_indices = sorted_indices[ : k]
         knn_labels = labels_train[knn_indices]
         label_predicted = mode(knn_labels)
-        labels_predicted[i] = label_predicted
+        labels_predicted[j] = label_predicted
         
     return labels_predicted
 
 def evaluate_knn(data_train, labels_train, data_test, labels_test, k) :
-    dists = distance_matrix(data_test, data_train)
+    dists = distance_matrix(data_train, data_test)
     labels_predicted = knn_predict(dists, labels_train, k)
     number_true_prediction = np.sum(labels_test == labels_predicted)
     number_total_prediction = labels_test.shape[0]
     classification_rate = number_true_prediction/number_total_prediction
     
     return classification_rate
+
+def plot_accuracy(data_train, labels_train, data_test, labels_test, k_max) : 
+    Y = []
+    for k in range(1, k_max+1) : 
+        Y += [evaluate_knn(data_train, labels_train, data_test, labels_test, k)]
+    plt.plot(list(range(1, k_max+1)), Y)
+    plt.xlabel('k (Number of Neighbors)')
+    plt.ylabel('Accuracy')
+    plt.savefig('results/knn.png')
+     
     
     
 if __name__ == "__main__" :
-    
+    t1 = time.time()
     # # Example distance matrix, training labels, and k value
     # dists = np.array([[1000, 2, 3],
     #                  [4, 0.1, 6],
@@ -62,14 +78,23 @@ if __name__ == "__main__" :
     # predicted_labels = knn_predict(dists, labels_train, k)
 
     
-    classification_rate = evaluate_knn(np.array([[1, 27], [100, 300]]), np.array([0.002, 9000]), np.array([[25, 350]]), np.array([9000]), 1)
-    print("Classification rate:")
-    print(classification_rate)    
-
-    # file = "./data/cifar-10-python/"
-    # data, labels = read_cifar.read_cifar(file)
-    # data_train, labels_train, data_test, labels_test = read_cifar.split_dataset(data, labels, 0.8)
+    # classification_rate = evaluate_knn(np.array([[1, 27], [100, 300]]), np.array([0.002, 9000]), np.array([[25, 350]]), np.array([9000]), 1)
+    # print("Classification rate:")
+    # print(classification_rate)    
 
+    file = "./data/cifar-10-python/"
+    data, labels = read_cifar.read_cifar(file)
+    data_train, labels_train, data_test, labels_test = read_cifar.split_dataset(data, labels, 0.9)
+    k = 10
+    print(len(data_train))
+    print(len(data_test))
+    print(len(data_train[0]))
+    print(len(data_test[0]))
     # dists = distance_matrix(data_train, data_test)
-    # k = 2
-    # knn_predict(dists, labels_train, k)
\ No newline at end of file
+    # knn_predict(dists, labels_train, k)
+    classification_rate = evaluate_knn(data_train, labels_train, data_test, labels_test, k)
+    print("classification rate :", classification_rate)
+    # plot_accuracy(data_train, labels_train, data_test, labels_test, 4)
+    t2 = time.time()
+    print('run time (second): ')
+    print(t2-t1)
\ No newline at end of file
diff --git a/mlp.py b/mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..825ce394b75b7307eeeace901b920186255b6d9c
--- /dev/null
+++ b/mlp.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Oct 27 16:48:16 2023
+
+@author: oscar
+"""
+
+import numpy as np
+
+
+def learn_once_mse(w1, b1, w2, b2, data, targets, learning_rate) : 
+    a0 = data # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+    a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2  # input of the output layer
+    a2 = 1 / (1 + np.exp(-z2))  # output of the output layer (sigmoid activation function)
+    predictions = a2  # the predicted values are the outputs of the output layer
+    N = targets.shape[0]
+    
+    # calculation of partial derivates of C
+    dCdA2 = 2/N * (a2 - targets)
+    dCdZ2 = dCdA2 * (a2 - a2**2)
+    dCdW2 = np.matmul(a1.T, dCdZ2)
+    dCdB2 = (1/N) * np.sum(dCdZ2, axis=0, keepdims=True)
+    dCdA1 = np.matmul(dCdZ2, w2.T)
+    dCdZ1 = dCdA1 * (a1 - a1**2)
+    dCdW1 = np.matmul(a0.T, dCdZ1)
+    dCdB1 = (1/N) * np.sum(dCdZ1, axis=0, keepdims=True)
+    
+    # one gradient descent step
+    w1 -= dCdW1 * learning_rate
+    b1 -= dCdB1 * learning_rate
+    w2 -= dCdW2 * learning_rate
+    b2 -= dCdB2 * learning_rate
+    
+    loss = np.mean(np.square(predictions - targets))
+    
+    return w1, b1, w2, b2, loss
+    
+    
\ No newline at end of file
diff --git a/read_cifar.py b/read_cifar.py
index 6d5369d0cb4811936d948940ff6bd82e1f671615..f25324d500f4cdcb148bd6dcb38f9b482cbe749e 100644
--- a/read_cifar.py
+++ b/read_cifar.py
@@ -24,24 +24,41 @@ def read_cifar (batch_dir) :
     data_batches = []
     label_batches = []
     
-    for i in range(1,6) :
+    for i in range(1,4) :
         batch_filename = f'data_batch_{i}'
         batch_path = os.path.join(batch_dir, batch_filename)
         data, labels = read_cifar_batch(batch_path)
         data_batches.append(data)
         label_batches.append(labels)
         
-        test_batch_filename = 'test_batch'
-        test_batch_path = os.path.join(batch_dir, test_batch_filename)
-        data_test, labels_test = read_cifar_batch(test_batch_path)
-        data_batches.append(data_test)
-        label_batches.append(labels_test)
+        # test_batch_filename = 'test_batch'
+        # test_batch_path = os.path.join(batch_dir, test_batch_filename)
+        # data_test, labels_test = read_cifar_batch(test_batch_path)
+        # data_batches.append(data_test)
+        # label_batches.append(labels_test)
         
         data = np.concatenate(data_batches, axis=0)
         labels = np.concatenate(label_batches, axis=0)
 
     return data, labels
 
+# def read_cifar(directory_path):
+#     batches = os.listdir(directory_path)
+#     data=None
+#     labels=None
+
+#     for batch in batches:
+#         batch_path = os.path.join(directory_path, batch)
+#         if not batch_path.endswith(".meta"):
+#             data_batch,labels_batch=read_cifar_batch(batch_path)
+#             if data is None:
+#                 data=data_batch
+#                 labels=labels_batch
+#             else:
+#                 data=np.concatenate((data,data_batch))
+#                 labels=np.concatenate((labels,labels_batch))
+#     return(data, labels)
+
 def split_dataset(data, labels, split) : 
     
     number_total = data.shape[0]
diff --git a/results/knn.png b/results/knn.png
new file mode 100644
index 0000000000000000000000000000000000000000..393d194fbe612e40cf83ef7dd8f14d34f3fc698b
Binary files /dev/null and b/results/knn.png differ
diff --git a/test1.py b/test1.py
new file mode 100644
index 0000000000000000000000000000000000000000..5bc93fb8ee1dd43c732eb6da06ce8dc62c77586b
--- /dev/null
+++ b/test1.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Oct 23 19:43:47 2023
+
+@author: oscar
+"""
+
+import numpy as np
+from collections import Counter
+import read_cifar
+
+def distance_matrix(M1,M2):
+    # dists(i,j) = dist entre ième ligne de M1 et jème ligne de M1, soit la racine de sum((M1i,p - M2j,p)²))
+    # qu'on peut simplifier en sum(M1i,p²) + sum(M2j,p²) - sum(2* M1j,p * M2i,p)
+
+    l1=np.shape(M1)[0]
+    l2=np.shape(M2)[0]
+    Vect1=np.sum(M1**2,1)
+    Vect2=np.sum(M2**2,1)
+
+    Mat1=np.tile(Vect1, (l2,1))
+    Mat2=np.tile(Vect2, (l1,1))
+    Mat3=2*np.dot(M1,M2.T)
+    
+    dists=np.sqrt(Mat1.T+Mat2-Mat3)
+
+    return dists
+
+def knn_predict(dists,labels_train,k):
+    labels_predict=np.array([])
+    size_test=np.shape(dists)[1]
+    for j in range(size_test):
+        list_arg_min=np.argsort(dists[:,j])
+        labels_sorted=[labels_train[i] for i in list_arg_min]
+        k_labels=labels_sorted[:k]
+        count = Counter(k_labels)
+
+        labels_predict=np.append(labels_predict,count.most_common(1)[0][0])
+    
+    return labels_predict
+
+def evaluate_knn(data_train,data_test,labels_train,labels_test,k):
+    dists=distance_matrix(data_train,data_test)
+    labels_predict=knn_predict(dists,labels_train,k)
+    count=np.sum(labels_predict==labels_test)
+    return count/np.shape(labels_predict)
+
+if __name__ == "__main__":
+    file = "./data/cifar-10-python/"
+    data, labels = read_cifar.read_cifar(file)
+    data_train,labels_train,data_test,labels_test=read_cifar.split_dataset(data,labels,0.9)
+    print(evaluate_knn(data_train,data_test,labels_train,labels_test,20))
\ No newline at end of file