Skip to content
Snippets Groups Projects
Commit ef4c73ac authored by Khalil's avatar Khalil
Browse files

commit

parent 3644e96e
No related branches found
No related tags found
No related merge requests found
......@@ -58,7 +58,7 @@ if __name__== '__main__':
plt.title("Accuracy=f(k)")
plt.xlabel("k")
plt.ylabel("Accuracy")
plt.savefig('C:\\Users\\LENOVO\\Desktop\\deeplearning\\BE1 - Image Classification\\image-classification\\results')
plt.savefig('C:\\Users\\LENOVO\\Desktop\\deeplearning\\BE1 - Image Classification\\image-classification\\results\\knn.png')
plt.show()
......
......@@ -9,7 +9,7 @@ def learn_once_mse(w1,b1,w2,b2,data,targets,learning_rate):
z1 = np.matmul(a0, w1) + b1 # input of the hidden layer
a1 = 1 / (1 + np.exp(-z1)) # output of the hidden layer (sigmoid activation function)
z2 = np.matmul(a1, w2) + b2 # input of the output layer
a2 = np.exp(z2)/np.sum(z2) # output of the output layer (softmax activation function)
a2 = 1 / (1 + np.exp(-z2)) # output of the output layer (sigmoid activation function)
predictions = a2 # the predicted values are the outputs of the output layer
# Compute loss (MSE)
......@@ -64,17 +64,17 @@ def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
predictions = a2 # the predicted values are the outputs of the output layer
# Compute loss (Binary X-entropy)
loss = - np.sum(one_hot_labels*np.log(predictions)+(1-one_hot_labels)*np.log(1-predictions))/N
loss = - np.sum(one_hot_labels*np.log(predictions)+(1-one_hot_labels)*np.log(1-predictions))
# Backward pass
dz2= a2-one_hot_labels
dw2=np.dot(np.transpose(a1),dz2)
dw2=np.dot(np.transpose(a1),dz2)/N
db2=dz2
da1=np.dot(dz2,np.transpose(w2))
dz1=da1*a1*(1-a1)
dw1=np.dot(np.transpose(a0),dz1)
dw1=np.dot(np.transpose(a0),dz1)/N
db1=dz1
w1 -=learning_rate*dw1
......@@ -88,17 +88,19 @@ def learn_once_cross_entropy(w1,b1,w2,b2,data,labels_train,learning_rate):
#Q13
def train_mlp(w1,b1,w2,b2,data_train,labels_train,learning_rate,num_epoch):
#encoding one hot labels
one_hot_labels = one_hot(labels_train)
one_hot_labels = one_hot(labels_train)#encoding one hot labels
N,_=np.shape(data_train)
train_accuracies=[]
for i in range(num_epoch):
w1,b1,w2,b2, loss, predictions= learn_once_cross_entropy(w1,b1,w2,b2,data_train,labels_train,learning_rate)
# predictions is a matrix of probabilities, we need to put one for the biggest propobility for each indivual
maxi=np.max(predictions,1)
predictions_zeros_ones=np.floor(predictions/maxi[:, np.newaxis]).astype(int)
A=np.sum(one_hot_labels==predictions_zeros_ones)
A=np.sum(np.all(one_hot_labels==predictions_zeros_ones,axis=1))
train_accuracies.append(A/N)
print(i,A/N)
return w1,b1,w2,b2, train_accuracies
......@@ -119,9 +121,12 @@ def test_mlp(w1,b1,w2,b2,data_test,labels_test):
N,_=np.shape(data_test)
maxi=np.max(predictions,1)
predictions_zeros_ones=np.floor(predictions/maxi[:, np.newaxis]).astype(int)
A=np.sum(one_hot_labels==predictions_zeros_ones)
V=np.all(one_hot_labels==predictions_zeros_ones,axis=1)
A=np.sum(V)
test_accuracy=A/N
print('test',A/N)
return test_accuracy
#Q15
......
results/mpl.png

25.9 KiB | W: | H:

results/mpl.png

20.5 KiB | W: | H:

results/mpl.png
results/mpl.png
results/mpl.png
results/mpl.png
  • 2-up
  • Swipe
  • Onion skin
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment