diff --git a/mlp.py b/mlp.py
index 8518643b265faafce530a11b54ac70fe10dda235..f0d84e019172cf6ec8d0fb16b58d3808ad805aa0 100644
--- a/mlp.py
+++ b/mlp.py
@@ -78,5 +78,49 @@ def learn_once_cross_entropy(w1, b1, w2, b2, data, labels_train, learning_rate):
     return w1, b1, w2, b2, loss
 
 
+def train_mlp(w1, b1, w2, b2, data_train, labels_train, learning_rate, num_epoch):
+    train_accuracies = [0] * num_epoch
+    for i in range(num_epoch):
+        w1, b1, w2, b2, loss = learn_once_cross_entropy(w1, b1, w2, b2, data_train, labels_train, learning_rate)
+        
+        # Forward pass
+        a0 = data_train # the data are the input of the first layer
+        z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+        a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
+        z2 = np.matmul(a1, w2) + b2  # input of the output layer
+        a2 = 1 / (1 + np.exp(-z2))  # output of the output layer (sigmoid activation function)
+        predictions = a2  # the predicted values are the outputs of the output layer
+        
+        # Find the predicted class
+        prediction = np.argmax(predictions, axis = 1)
+        
+        # Calculate the accuracy
+        accuracy = np.mean(labels_train == prediction)
+        train_accuracies[i] = accuracy
+        
+        
+    return w1, b1, w2, b2, train_accuracies
+
+
+def test_mlp(w1, b1, w2, b2, data_test, labels_test):
+    return test_accuracy
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+
+