diff --git a/data/cifar-10-batches-py/batches.meta b/data/cifar-10-batches-py/batches.meta
new file mode 100644
index 0000000000000000000000000000000000000000..4467a6ec2e886a9f14f25e31776fb0152d8ac64a
Binary files /dev/null and b/data/cifar-10-batches-py/batches.meta differ
diff --git a/data/cifar-10-batches-py/data_batch_1 b/data/cifar-10-batches-py/data_batch_1
new file mode 100644
index 0000000000000000000000000000000000000000..ab404a5ac32492b807a5c6cd02b83dc4dd5ff980
Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_1 differ
diff --git a/data/cifar-10-batches-py/data_batch_2 b/data/cifar-10-batches-py/data_batch_2
new file mode 100644
index 0000000000000000000000000000000000000000..6bf1369a6cacadfdbd2f8c61e354cc7d0c17bbae
Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_2 differ
diff --git a/data/cifar-10-batches-py/data_batch_3 b/data/cifar-10-batches-py/data_batch_3
new file mode 100644
index 0000000000000000000000000000000000000000..66a0d630a7eb736563b1861ce716bdc489f2113b
Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_3 differ
diff --git a/data/cifar-10-batches-py/data_batch_4 b/data/cifar-10-batches-py/data_batch_4
new file mode 100644
index 0000000000000000000000000000000000000000..cf8d03d1e80e6d9e440d1764faa85aedd1d6b960
Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_4 differ
diff --git a/data/cifar-10-batches-py/data_batch_5 b/data/cifar-10-batches-py/data_batch_5
new file mode 100644
index 0000000000000000000000000000000000000000..468b2aa538c551bc9f590f213b19d96915b85062
Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_5 differ
diff --git a/data/cifar-10-batches-py/readme.html b/data/cifar-10-batches-py/readme.html
new file mode 100644
index 0000000000000000000000000000000000000000..e377adef45c85dc91051edf2dee72c1d4d57732c
--- /dev/null
+++ b/data/cifar-10-batches-py/readme.html
@@ -0,0 +1 @@
+<meta HTTP-EQUIV="REFRESH" content="0; url=http://www.cs.toronto.edu/~kriz/cifar.html">
diff --git a/data/cifar-10-batches-py/test_batch b/data/cifar-10-batches-py/test_batch
new file mode 100644
index 0000000000000000000000000000000000000000..3e03f1fc5261d102600fc1c130454f1f5cda567b
Binary files /dev/null and b/data/cifar-10-batches-py/test_batch differ
diff --git a/knn.py b/knn.py
new file mode 100644
index 0000000000000000000000000000000000000000..584ff72b1ea1f2b604f4842204e6789d5c79af74
--- /dev/null
+++ b/knn.py
@@ -0,0 +1,77 @@
+import math
+import numpy as np
+#Question1
+#X1:train , X2:test
+def distance_matrix(X1,X2):
+    x1=np.sum(X1*X1,axis=1)
+    x2=np.sum(X2*X2,axis=1)
+    x1=x1[:,None] # convertir en vecteur colonne
+    x2=x2[np.newaxis] #convertir en vecteur ligne
+    dists=np.sqrt(x1+x2-2*(X1@(X2.T)))
+    return(dists)
+
+#Question2
+# dists est considérée comme ma matrice de taille [taille_train*taille_test]
+def knn_predict(dists,labels_train,k):
+    labels_test=np.zeros((np.shape(dists)[1],1))
+    for i in range(np.shape(dists)[1]):
+        nearest_indices=np.argsort(dists[:,i])[:k]
+        nearest_labels=labels_train[nearest_indices]
+        uni_labels, frequence = np.unique(nearest_labels,return_counts=True)
+        labels_test[i]=uni_labels[np.argmax(frequence)]
+    return(labels_test)
+
+
+#Question3
+
+def evaluate_knn(data_train, labels_train,data_test, labels_test,k):
+    dists=distance_matrix(data_train,data_test)
+    labels=knn_predict(dists,labels_train,k)
+
+    compare=[labels[i]==labels_train[i] for i in range(len(labels))]
+    uni, count=np.unique(compare,return_counts=True)
+    uni=list(uni)
+    accuracy=count[uni.index(True)]/len(labels)
+
+
+    return(accuracy)
+
+
+if __name__== '__main__':
+
+#Question4
+     
+    from read_cifar import *
+    import matplotlib.pyplot as plt
+    split=0.9
+    path='C:/Users/LENOVO/Desktop/deeplearning/BE1 - Image Classification/image-classification/data/cifar-10-batches-py'
+    data, labels = read_cifar(path)
+    data_train,labels_train,data_test,labels_test = split_dataset(data, labels,split) 
+    K=list(range(1,21))
+    A = []
+    for k in K:
+        accuracy=evaluate_knn(data_train, labels_train,data_test, labels_test,k)
+        A.append(accuracy)
+        print(accuracy)
+
+    plt.plot(K,A)
+    plt.title("Accuracy=f(k)")
+    plt.xlabel("k")
+    plt.ylabel("Accuracy")   
+    plt.show() 
+    plt.savefig('results/knn.png')
+
+
+#conclusion:
+# On constate que la précision de l'algorithme est autour de 10% pour les valeurs 
+# de k parcourues ce qui confirme que le knn n'est pas adapté à la classification   
+# d'images    
+
+
+
+
+    
+
+
+
+    
diff --git a/mlp.py b/mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f55733fec42a20173dda442580c3b9893e6ad65
--- /dev/null
+++ b/mlp.py
@@ -0,0 +1,169 @@
+import numpy as np
+
+
+#Q10
+def learn_once_mse(w1,b1,w2,b2,data,targets,learning_rate):
+
+    # Forward pass
+    a0 = data # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+    a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2  # input of the output layer
+    a2 = np.exp(z2)/np.sum(z2)  # output of the output layer (softmax activation function)
+    predictions = a2  # the predicted values are the outputs of the output layer
+
+    # Compute loss (MSE)
+    loss = np.mean(np.square(predictions - targets))
+
+    # Backward pass
+    N,_= np.shape(data)
+    da2=2/N * (a2-targets)
+    dz2= da2*(a2*(1-a2))
+    dw2=np.dot(np.transpose(a1),dz2)
+    db2=dz2
+
+    da1=np.dot(dz2,np.transpose(w2))
+    dz1=da1*a1*(1-a1)
+    dw1=np.dot(np.transpose(a0),dz1)
+    db1=dz1
+
+    w1 -=learning_rate*dw1
+    b1 -=learning_rate*np.transpose(db1)
+    w2 -=learning_rate*dw2
+    b2 -=learning_rate*np.transpose(db2)
+
+    return w1,b1,w2,b2, loss
+
+#Q11
+def one_hot(A):
+    s=np.size(A)
+    m=np.max(A)
+    R= np.zeros((s,m+1))
+    for i in range(len(A)):
+        R[i,A[i]]=1
+    return R
+
+
+#Q12
+def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
+    N,_=np.shape(data)
+    #encoding one hot labels
+    one_hot_labels = one_hot(labels_train)
+    # Forward pass
+    a0 = data # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+    a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2  # input of the output layer
+    a2 = np.exp(z2)/np.sum(z2)  # output of the output layer (softmax activation function)
+    predictions = a2  # the predicted values are the outputs of the output layer
+
+    # Compute loss (Binary X-entropy)
+    loss = - np.sum(one_hot_labels*np.log(predictions)+(1-one_hot_labels)*np.log(1-predictions))/N
+
+    # Backward pass
+
+    dz2= a2-one_hot_labels
+    dw2=np.dot(np.transpose(a1),dz2)
+    db2=dz2
+
+    da1=np.dot(dz2,np.transpose(w2))
+    dz1=da1*a1*(1-a1)
+    dw1=np.dot(np.transpose(a0),dz1)
+    db1=dz1
+
+    w1 -=learning_rate*dw1
+    b1 =b1 -learning_rate*db1
+    w2 -=learning_rate*dw2
+    b2 =b2 - learning_rate*db2
+ 
+
+    #predictions(a2) are returned to serve accuracy calculation in Question 13
+    return w1,b1,w2,b2, loss,a2
+
+#Q13
+def train_mlp(w1,b1,w2,b2,data_train,labels_train,learning_rate,num_epoch):
+    #encoding one hot labels
+    one_hot_labels = one_hot(labels_train)
+    N,_=np.shape(data)
+    train_accuracies=[]
+    for i in range(num_epoch):
+        w1,b1,w2,b2, loss, predictions= learn_once_cross_entropy(w1,b1,w2,b2,data_train,labels_train,learning_rate)
+        # predictions is a matrix of probabilities, we need to put one for the biggest propobility for each indivual
+        maxi=np.max(predictions,1)
+        predictions_zeros_ones=np.floor(predictions/maxi[:, np.newaxis]).astype(int)
+        A=np.sum(one_hot_labels==predictions_zeros_ones)
+        train_accuracies.append(A/N)
+
+    return w1,b1,w2,b2, train_accuracies
+
+#Q14
+def test_mlp(w1,b1,w2,b2,data_test,labels_test):
+
+    #encoding one hot labels
+    one_hot_labels = one_hot(labels_test)
+
+    a0 = data_test # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+    a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2  # input of the output layer
+    a2 = np.exp(z2)/np.sum(z2)  # output of the output layer (softmax activation function)
+    predictions = a2
+
+
+    N,_=np.shape(data)
+    maxi=np.max(predictions,1)
+    predictions_zeros_ones=np.floor(predictions/maxi[:, np.newaxis]).astype(int)
+    A=np.sum(one_hot_labels==predictions_zeros_ones)
+    test_accuracy=A/N
+
+    return test_accuracy
+
+#Q15
+def run_mlp_training(data_train, labels_train, data_test, labels_test,d_h,learning_rate,num_epoch):
+    _,d_in=np.shape(data_test)
+    d_out=1+np.max(labels_train)
+    
+    w1 = 2 * np.random.rand(d_in, d_h) - 1  # first layer weights
+    b1 = np.zeros((1,d_h))  # first layer biaises
+    w2 = 2 * np.random.rand(d_h, d_out) - 1  # second layer weights
+    b2 = np.zeros((1,d_out))  # second layer biaises
+
+
+    _,_,_,_,train_accuracies=train_mlp(w1,b1,w2,b2,data_train,labels_train,learning_rate,num_epoch)
+    test_accuracy=test_mlp(w1,b1,w2,b2,data_test,labels_test)
+
+    return train_accuracies, test_accuracy
+
+#Q16
+if __name__== '__main__':
+
+    from read_cifar import *
+    import matplotlib.pyplot as plt
+
+    split=0.9
+    d_h=64
+    learning_rate=0.1
+    num_epoch=100
+    path='C:/Users/LENOVO/Desktop/deeplearning/BE1 - Image Classification/image-classification/data/cifar-10-batches-py'
+    data, labels = read_cifar(path)
+    data_train,labels_train,data_test,labels_test = split_dataset(data, labels,split) 
+
+    train_accuracies, test_accuracy=run_mlp_training(data_train, labels_train, data_test, labels_test,d_h,learning_rate,num_epoch)
+    train_accuracies.append(test_accuracy)
+    print(train_accuracies)
+    K=list(range(1,num_epoch+1))
+    plt.plot(K,train_accuracies)
+    plt.title("Accuracy=f(k)")
+    plt.xlabel("k")
+    plt.ylabel("Accuracy")   
+    plt.show() 
+    plt.savefig('results/knn.png')
+
+
+
+
+
+
+
+
+
diff --git a/read_cifar.py b/read_cifar.py
new file mode 100644
index 0000000000000000000000000000000000000000..e824371b501a85eae939da4cbf2e95b8eae13d4b
--- /dev/null
+++ b/read_cifar.py
@@ -0,0 +1,44 @@
+import numpy as np
+import pickle
+
+#Question 2
+def read_cifar_batch(path):
+    with open(path,'rb') as fo:
+        dic = pickle.load(fo, encoding='bytes')
+    
+    data = np.array(dic[b'data'],np.float32)
+    labels= np.array(dic[b'labels'],np.int64)
+
+    return data, labels
+# test1
+#path='C:/Users/LENOVO/Desktop/deeplearning/BE1 - Image Classification/image-classification/data/cifar-10-batches-py/data_batch_1'
+# print (read_cifar_batch(path))  
+
+#Question 3 
+def read_cifar(path):
+    L=['data_batch_'+str(i) for i in range(1,6)]+['test_batch']
+    if len(L)>0:
+        data, labels = read_cifar_batch(path + '/'+L[0])
+    for i in range(1,len(L)):
+        data_i, labels_i = read_cifar_batch(path + '/'+L[i])               
+        data = np.concatenate((data,data_i),axis=0)
+        labels = np.concatenate((labels, labels_i),axis=0)
+        
+    return data, labels
+
+
+#test2
+path='C:/Users/LENOVO/Desktop/deeplearning/BE1 - Image Classification/image-classification/data/cifar-10-batches-py'
+print (read_cifar(path))  
+
+#Question4
+def split_dataset(data, labels,split):
+    x=np.size(labels)
+    rand = np.arange(x)
+    np.random.shuffle(rand)
+    data=data[rand]
+    labels=labels[rand]
+    return data[:int(split*x),], labels[:int(split*x)], data[int(split*x):,], labels[int(split*x):]
+
+#test3
+print (split_dataset(read_cifar(path)[0],read_cifar(path)[1],0.5))
\ No newline at end of file