diff --git a/mlp.py b/mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..1505d0891bcd976023dec9ebefc33936311780bd
--- /dev/null
+++ b/mlp.py
@@ -0,0 +1,172 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import pylab as pl
+
+def sigmoid(x):
+ return 1/(1 + np.exp(-x))
+
+def learn_once_mse(w1, b1, w2, b2, data, targets, learning_rate):
+
+    # Forward pass
+    a0 = data # the data are the input of the first layer
+    z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+    a1 = sigmoid(z1)  # output of the hidden layer (sigmoid activation function)
+    z2 = np.matmul(a1, w2) + b2  # input of the output layer
+    a2 = sigmoid(z2)  # output of the output layer (sigmoid activation function)
+    predictions = a2  # the predicted values are the outputs of the output layer
+
+    # Compute loss (MSE)
+    loss = np.mean(np.square(predictions - targets))
+
+    return w1, b1, w2, b2, loss
+
+
+def one_hot(vet):
+    encoded = np.zeros((len(vet), max(vet) + 1), dtype=int)
+    encoded[np.arange(len(vet)), vet]=1
+    return encoded
+
+
+def learn_once_cross_entropy(w1, b1, w2, b2, data, labels_train, learning_rate):
+    # Forward Pass
+    Z1 = np.dot(data, w1) + b1
+    A1 = sigmoid(Z1)
+    Z2 = np.dot(A1, w2) + b2
+    A2 = sigmoid(Z2)
+
+    # Calculate loss (Binary Cross Entropy)
+    m = labels_train.shape[0]
+    epsilon = 1e-15  # small constant to avoid log(0)
+    loss = (-1.0 / m) * np.sum(labels_train * np.log(A2 + epsilon) + (1 - labels_train) * np.log(1 - A2 + epsilon))
+
+    # Backward Pass
+    dZ2 = A2 - labels_train
+    dW2 = (1 / data.shape[0]) * np.dot(A1.T, dZ2)
+    db2 = (1 / data.shape[0]) * np.sum(dZ2, axis=0)
+    dZ1 = np.dot(dZ2, w2.T) * A1 * (1 - A1)
+    dW1 = (1 / data.shape[0]) * np.dot(data.T, dZ1)
+    db1 = (1 / data.shape[0]) * np.sum(dZ1, axis=0)
+
+    # Update weights and biases
+    w1 -= learning_rate * dW1
+    b1 -= learning_rate * db1
+    w2 -= learning_rate * dW2
+    b2 -= learning_rate * db2
+
+    return w1, b1, w2, b2, loss
+
+
+def accuracy(Y, Y_pred):
+    m = Y.shape[0]
+    correct_predictions = np.sum(Y == Y_pred)
+    return correct_predictions / m
+
+
+def train_mlp(w1, b1, w2, b2, data_train, labels_train, learning_rate, num_epochs):
+    train_accuracies = []
+
+    for epoch in range(num_epochs):
+        for i in range(data_train.shape[0]):
+            x = data_train[i:i+1]
+            y = labels_train[i:i+1]
+            w1, b1, w2, b2, loss = learn_once_cross_entropy(w1, b1, w2, b2, x, y, learning_rate)
+
+        # Calculate accuracy for the epoch
+        Z1 = np.dot(data_train, w1) + b1
+        A1 = sigmoid(Z1)
+        Z2 = np.dot(A1, w2) + b2
+        A2 = sigmoid(Z2)
+        train_pred = (A2 > 0.5).astype(int)
+        acc = accuracy(labels_train, train_pred)
+        train_accuracies.append(acc)
+
+    return w1, b1, w2, b2, train_accuracies
+
+def test_mlp(w1, b1, w2, b2, data_test, labels_test):
+    Z1 = np.dot(data_test, w1) + b1
+    A1 = sigmoid(Z1)
+    Z2 = np.dot(A1, w2) + b2
+    A2 = sigmoid(Z2)
+    test_pred = (A2 > 0.5).astype(int)
+    test_acc = accuracy(labels_test, test_pred)
+    return test_acc
+
+def run_mlp_training(data_train, labels_train, data_test, labels_test, d_h, learning_rate, num_epochs):
+    d_in = data_train.shape[1]
+    w1 = np.random.randn(d_in, d_h)
+    b1 = np.zeros((1, d_h))
+    w2 = np.random.randn(d_h, 1)
+    b2 = np.zeros((1, 1))
+
+    w1, b1, w2, b2, train_accuracies = train_mlp(w1, b1, w2, b2, data_train, labels_train, learning_rate, num_epochs)
+    test_accuracy = test_mlp(w1, b1, w2, b2, data_test, labels_test)
+
+    return train_accuracies, test_accuracy
+
+
+
+
+def main_MSE():
+    N = 30  # number of input data
+    d_in = 3  # input dimension
+    d_h = 3  # number of neurons in the hidden layer
+    d_out = 2  # output dimension (number of neurons of the output layer)
+    learning_rate = 0.1
+
+
+    # Random initialization of the network weights and biaises
+    w1 = 2 * np.random.rand(d_in, d_h) - 1  # first layer weights
+    b1 = np.zeros((1, d_h))  # first layer biaises
+    w2 = 2 * np.random.rand(d_h, d_out) - 1  # second layer weights
+    b2 = np.zeros((1, d_out))  # second layer biaises
+
+    data = np.random.rand(N, d_in)  # create a random data
+    targets = np.random.rand(N, d_out)  # create a random targets
+
+    w1, b1, w2, b2, loss = learn_once_mse(w1, b1, w2, b2, data, targets, learning_rate)
+
+    print('Loss (MSE) : ' + str(loss))
+
+
+
+
+def main_CrossEntropy():
+    split_factor = 0.9
+    d_h = 64
+    learning_rate = 0.1
+    num_epochs = 100
+
+    # Define your data, labels, and parameters here
+
+    # Generate some sample data for demonstration
+    # Replace this with your actual data
+    data_train = np.random.rand(100, 10)
+    labels_train = np.random.randint(2, size=100)
+    data_test = np.random.rand(20, 10)
+    labels_test = np.random.randint(2, size=20)
+
+    # Call run_mlp_training with your data and parameters
+    train_accuracies, test_accuracy = run_mlp_training(data_train, labels_train, data_test, labels_test, d_h,
+                                                       learning_rate, num_epochs)
+
+    # Create a plot of training accuracies across epochs
+    plt.figure(figsize=(10, 6))
+    x=range(1, num_epochs +1)
+    plt.plot(x, train_accuracies)
+    plt.xlabel('Epochs')
+    plt.ylabel('Accuracy')
+    plt.title('Training Accuracy Evolution')
+    pl.grid()
+
+    plt.savefig('mlp.png')
+    plt.show()
+
+
+if __name__ == "__main__":
+    main_MSE()
+    main_CrossEntropy()
+
+
+
+
+