diff --git a/mlp.py b/mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc28bc2a6d51eb90d0e28dfab1a094cbbcaf4fc0
--- /dev/null
+++ b/mlp.py
@@ -0,0 +1,38 @@
+import pickle
+import numpy as np
+import random as rd
+import read_cifar as rd
+from math import *
+import matplotlib.pyplot as plt
+
+
+N = 30  # number of input data
+d_in = 3  # input dimension
+d_h = 3  # number of neurons in the hidden layer
+d_out = 2  # output dimension (number of neurons of the output layer)
+
+# Random initialization of the network weights and biaises
+w1 = 2 * np.random.rand(d_in, d_h) - 1  # first layer weights
+b1 = np.zeros((1, d_h))  # first layer biaises
+w2 = 2 * np.random.rand(d_h, d_out) - 1  # second layer weights
+b2 = np.zeros((1, d_out))  # second layer biaises
+
+data = np.random.rand(N, d_in)  # create a random data
+targets = np.random.rand(N, d_out)  # create a random targets
+
+# Forward pass
+a0 = data # the data are the input of the first layer
+z1 = np.matmul(a0, w1) + b1  # input of the hidden layer
+a1 = 1 / (1 + np.exp(-z1))  # output of the hidden layer (sigmoid activation function)
+z2 = np.matmul(a1, w2) + b2  # input of the output layer
+a2 = 1 / (1 + np.exp(-z2))  # output of the output layer (sigmoid activation function)
+predictions = a2  # the predicted values are the outputs of the output layer
+
+# Compute loss (MSE)
+loss = np.mean(np.square(predictions - targets))
+print(loss)
+
+
+if __name__ == "__main__":   
+    
+    print("")
\ No newline at end of file