From a60e1f05fed421cd5e0cef6386303d96c741368b Mon Sep 17 00:00:00 2001
From: lucile <lucile.audard@ecl20.ec-lyon.fr>
Date: Thu, 9 Nov 2023 12:41:38 +0100
Subject: [PATCH] Update mlp.py

---
 mlp.py | 44 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/mlp.py b/mlp.py
index 8518643..f0d84e0 100644
--- a/mlp.py
+++ b/mlp.py
@@ -78,5 +78,49 @@ def learn_once_cross_entropy(w1, b1, w2, b2, data, labels_train, learning_rate):
     return w1, b1, w2, b2, loss
 
 
+def train_mlp(w1, b1, w2, b2, data_train, labels_train, learning_rate, num_epoch):
+    train_accuracies = [0] * num_epoch
+    for i in range(num_epoch):
+        w1, b1, w2, b2, loss = learn_once_cross_entropy(w1, b1, w2, b2, data_train, labels_train, learning_rate)
+        
+        # Forward pass
+        a0 = data_train # the data are the input of the first layer
+        z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+        a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
+        z2 = np.matmul(a1, w2) + b2  # input of the output layer
+        a2 = 1 / (1 + np.exp(-z2))  # output of the output layer (sigmoid activation function)
+        predictions = a2  # the predicted values are the outputs of the output layer
+        
+        # Find the predicted class
+        prediction = np.argmax(predictions, axis = 1)
+        
+        # Calculate the accuracy
+        accuracy = np.mean(labels_train == prediction)
+        train_accuracies[i] = accuracy
+        
+        
+    return w1, b1, w2, b2, train_accuracies
+
+
+def test_mlp(w1, b1, w2, b2, data_test, labels_test):
+    return test_accuracy
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+
+
 
 
-- 
GitLab