diff --git a/knn.py b/knn.py
index 584ff72b1ea1f2b604f4842204e6789d5c79af74..23f64f3ee1d67c334d5300de7a604cd8bd1c33c0 100644
--- a/knn.py
+++ b/knn.py
@@ -57,9 +57,10 @@ if __name__== '__main__':
     plt.plot(K,A)
     plt.title("Accuracy=f(k)")
     plt.xlabel("k")
-    plt.ylabel("Accuracy")   
+    plt.ylabel("Accuracy") 
+    plt.savefig('C:\\Users\\LENOVO\\Desktop\\deeplearning\\BE1 - Image Classification\\image-classification\\results')  
     plt.show() 
-    plt.savefig('results/knn.png')
+    
 
 
 #conclusion:
diff --git a/mlp.py b/mlp.py
index 4f55733fec42a20173dda442580c3b9893e6ad65..77dc772a4c987640b6001611dcbb692a78404fcf 100644
--- a/mlp.py
+++ b/mlp.py
@@ -45,6 +45,12 @@ def one_hot(A):
 
 
 #Q12
+#DEFinition de softmax de manière stable
+def softmax(z):
+    exp_z = np.exp(z - np.max(z, axis=1, keepdims=True))
+    return exp_z / np.sum(exp_z, axis=1, keepdims=True)
+
+
 def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
     N,_=np.shape(data)
     #encoding one hot labels
@@ -54,7 +60,7 @@ def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
     z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
     a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
     z2 = np.matmul(a1, w2) + b2  # input of the output layer
-    a2 = np.exp(z2)/np.sum(z2)  # output of the output layer (softmax activation function)
+    a2 = softmax(z2)  # output of the output layer (softmax activation function)
     predictions = a2  # the predicted values are the outputs of the output layer
 
     # Compute loss (Binary X-entropy)
@@ -84,7 +90,7 @@ def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
 def train_mlp(w1,b1,w2,b2,data_train,labels_train,learning_rate,num_epoch):
     #encoding one hot labels
     one_hot_labels = one_hot(labels_train)
-    N,_=np.shape(data)
+    N,_=np.shape(data_train)
     train_accuracies=[]
     for i in range(num_epoch):
         w1,b1,w2,b2, loss, predictions= learn_once_cross_entropy(w1,b1,w2,b2,data_train,labels_train,learning_rate)
@@ -106,11 +112,11 @@ def test_mlp(w1,b1,w2,b2,data_test,labels_test):
     z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
     a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
     z2 = np.matmul(a1, w2) + b2  # input of the output layer
-    a2 = np.exp(z2)/np.sum(z2)  # output of the output layer (softmax activation function)
+    a2 = softmax(z2)  # output of the output layer (softmax activation function)
     predictions = a2
 
 
-    N,_=np.shape(data)
+    N,_=np.shape(data_test)
     maxi=np.max(predictions,1)
     predictions_zeros_ones=np.floor(predictions/maxi[:, np.newaxis]).astype(int)
     A=np.sum(one_hot_labels==predictions_zeros_ones)
@@ -151,14 +157,13 @@ if __name__== '__main__':
     train_accuracies, test_accuracy=run_mlp_training(data_train, labels_train, data_test, labels_test,d_h,learning_rate,num_epoch)
     train_accuracies.append(test_accuracy)
     print(train_accuracies)
-    K=list(range(1,num_epoch+1))
+    K=list(range(num_epoch+1))
     plt.plot(K,train_accuracies)
-    plt.title("Accuracy=f(k)")
-    plt.xlabel("k")
-    plt.ylabel("Accuracy")   
-    plt.show() 
-    plt.savefig('results/knn.png')
-
+    plt.title("Accuracy=f(epoch)")
+    plt.xlabel("epoch")
+    plt.ylabel("Accuracy") 
+    plt.savefig('C:\\Users\\LENOVO\\Desktop\\deeplearning\\BE1 - Image Classification\\image-classification\\results\\mpl.png')  
+    plt.show()
 
 
 
diff --git a/results/knn.png b/results/knn.png
new file mode 100644
index 0000000000000000000000000000000000000000..8f10e2be9388a90b5dcb2719f3035192db69527e
Binary files /dev/null and b/results/knn.png differ
diff --git a/results/mpl.png b/results/mpl.png
new file mode 100644
index 0000000000000000000000000000000000000000..643b74238983a35fd944f52f4e934a3f879781eb
Binary files /dev/null and b/results/mpl.png differ