Skip to content
Snippets Groups Projects
Commit 3644e96e authored by Khalil's avatar Khalil
Browse files

modifs des codes

rajouts des graphes des précisions
le graphe de mlp est incorrect avec des valeurs >1 :(((
encore à revoir
parent 68e10453
No related branches found
No related tags found
No related merge requests found
...@@ -58,8 +58,9 @@ if __name__== '__main__': ...@@ -58,8 +58,9 @@ if __name__== '__main__':
plt.title("Accuracy=f(k)") plt.title("Accuracy=f(k)")
plt.xlabel("k") plt.xlabel("k")
plt.ylabel("Accuracy") plt.ylabel("Accuracy")
plt.savefig('C:\\Users\\LENOVO\\Desktop\\deeplearning\\BE1 - Image Classification\\image-classification\\results')
plt.show() plt.show()
plt.savefig('results/knn.png')
#conclusion: #conclusion:
......
...@@ -45,6 +45,12 @@ def one_hot(A): ...@@ -45,6 +45,12 @@ def one_hot(A):
#Q12 #Q12
#DEFinition de softmax de manière stable
def softmax(z):
exp_z = np.exp(z - np.max(z, axis=1, keepdims=True))
return exp_z / np.sum(exp_z, axis=1, keepdims=True)
def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate): def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
N,_=np.shape(data) N,_=np.shape(data)
#encoding one hot labels #encoding one hot labels
...@@ -54,7 +60,7 @@ def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate): ...@@ -54,7 +60,7 @@ def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
z1 = np.matmul(a0, w1) + b1 # input of the hidden layer z1 = np.matmul(a0, w1) + b1 # input of the hidden layer
a1 = 1 / (1 + np.exp(-z1)) # output of the hidden layer (sigmoid activation function) a1 = 1 / (1 + np.exp(-z1)) # output of the hidden layer (sigmoid activation function)
z2 = np.matmul(a1, w2) + b2 # input of the output layer z2 = np.matmul(a1, w2) + b2 # input of the output layer
a2 = np.exp(z2)/np.sum(z2) # output of the output layer (softmax activation function) a2 = softmax(z2) # output of the output layer (softmax activation function)
predictions = a2 # the predicted values are the outputs of the output layer predictions = a2 # the predicted values are the outputs of the output layer
# Compute loss (Binary X-entropy) # Compute loss (Binary X-entropy)
...@@ -84,7 +90,7 @@ def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate): ...@@ -84,7 +90,7 @@ def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
def train_mlp(w1,b1,w2,b2,data_train,labels_train,learning_rate,num_epoch): def train_mlp(w1,b1,w2,b2,data_train,labels_train,learning_rate,num_epoch):
#encoding one hot labels #encoding one hot labels
one_hot_labels = one_hot(labels_train) one_hot_labels = one_hot(labels_train)
N,_=np.shape(data) N,_=np.shape(data_train)
train_accuracies=[] train_accuracies=[]
for i in range(num_epoch): for i in range(num_epoch):
w1,b1,w2,b2, loss, predictions= learn_once_cross_entropy(w1,b1,w2,b2,data_train,labels_train,learning_rate) w1,b1,w2,b2, loss, predictions= learn_once_cross_entropy(w1,b1,w2,b2,data_train,labels_train,learning_rate)
...@@ -106,11 +112,11 @@ def test_mlp(w1,b1,w2,b2,data_test,labels_test): ...@@ -106,11 +112,11 @@ def test_mlp(w1,b1,w2,b2,data_test,labels_test):
z1 = np.matmul(a0, w1) + b1 # input of the hidden layer z1 = np.matmul(a0, w1) + b1 # input of the hidden layer
a1 = 1 / (1 + np.exp(-z1)) # output of the hidden layer (sigmoid activation function) a1 = 1 / (1 + np.exp(-z1)) # output of the hidden layer (sigmoid activation function)
z2 = np.matmul(a1, w2) + b2 # input of the output layer z2 = np.matmul(a1, w2) + b2 # input of the output layer
a2 = np.exp(z2)/np.sum(z2) # output of the output layer (softmax activation function) a2 = softmax(z2) # output of the output layer (softmax activation function)
predictions = a2 predictions = a2
N,_=np.shape(data) N,_=np.shape(data_test)
maxi=np.max(predictions,1) maxi=np.max(predictions,1)
predictions_zeros_ones=np.floor(predictions/maxi[:, np.newaxis]).astype(int) predictions_zeros_ones=np.floor(predictions/maxi[:, np.newaxis]).astype(int)
A=np.sum(one_hot_labels==predictions_zeros_ones) A=np.sum(one_hot_labels==predictions_zeros_ones)
...@@ -151,14 +157,13 @@ if __name__== '__main__': ...@@ -151,14 +157,13 @@ if __name__== '__main__':
train_accuracies, test_accuracy=run_mlp_training(data_train, labels_train, data_test, labels_test,d_h,learning_rate,num_epoch) train_accuracies, test_accuracy=run_mlp_training(data_train, labels_train, data_test, labels_test,d_h,learning_rate,num_epoch)
train_accuracies.append(test_accuracy) train_accuracies.append(test_accuracy)
print(train_accuracies) print(train_accuracies)
K=list(range(1,num_epoch+1)) K=list(range(num_epoch+1))
plt.plot(K,train_accuracies) plt.plot(K,train_accuracies)
plt.title("Accuracy=f(k)") plt.title("Accuracy=f(epoch)")
plt.xlabel("k") plt.xlabel("epoch")
plt.ylabel("Accuracy") plt.ylabel("Accuracy")
plt.savefig('C:\\Users\\LENOVO\\Desktop\\deeplearning\\BE1 - Image Classification\\image-classification\\results\\mpl.png')
plt.show() plt.show()
plt.savefig('results/knn.png')
......
results/knn.png

30.2 KiB

results/mpl.png

25.9 KiB

0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment