Skip to content
Snippets Groups Projects
Commit aa8bc07b authored by Dubray Chloe's avatar Dubray Chloe
Browse files

Update mlp.py

parent a4b8f722
Branches
No related tags found
No related merge requests found
......@@ -77,7 +77,7 @@ def learn_one_cross_entropy (w1, b1, w2, b2, data, labels_train, learning_rate)
#Ajout d'un coefficient epsilon très faible dans la fonction de coût pour éviter les problèmes de division par zéro
epsilon = 0.00001
loss = -np.sum(y * np.log2(predictions + epsilon) + (1 - y) * np.log2(1 - predictions + epsilon)) / N
loss = -np.sum(y * np.log(predictions + epsilon) + (1 - y) * np.log(1 - predictions + epsilon)) / N
return (w1, b1, w2, b2, loss)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment