Skip to content
Snippets Groups Projects
Commit 98dc5f38 authored by Audard Lucile's avatar Audard Lucile
Browse files

Update mlp.py

parent 3b9f1865
No related branches found
No related tags found
No related merge requests found
......@@ -58,7 +58,22 @@ def learn_once_cross_entropy(w1, b1, w2, b2, data, labels_train, learning_rate):
targets_one_hot = one_hot(labels_train) # target as a one-hot encoding for the desired labels
# cross-entropy loss
loss =
loss = -np.sum(targets_one_hot * np.log(predictions)) / N
# Backpropagation
d_z2 = a2 - targets_one_hot
d_w2 = np.dot(a1.T, d_z2) / N
d_b2 = d_z2 / N
d_a1 = np.dot(d_z2, w2.T)
d_z1 = d_a1 * z1 * (1 - a1)
d_w1 = np.dot(a0.T, d_z1) / N
d_b1 = d_z1 / N
# Calculation of the updated weights and biases of the network with gradient descent method
w1 -= learning_rate * d_w1
w2 -= learning_rate * d_w2
b2 -= learning_rate * d_b2
b1 -= learning_rate * d_b1
return w1, b1, w2, b2, loss
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment