diff --git a/mlp.py b/mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..f669758cc32274ac93c982f4c7196a8cde4b1523
--- /dev/null
+++ b/mlp.py
@@ -0,0 +1,21 @@
+import numpy as np
+#We are using the segmoid activation function
+def segmoid(x):
+    return 1/(1+np.exp(-x))
+
+#We will also need the derivation function to instore the gradient
+def derivation(x):
+    deriv_segmoid = segmoid(x)*(1-segmoid(x))
+    return deriv_segmoid
+
+def learn_once_mse(w1,b1,w2,b2,data,targets,learning_rate):
+    # This function performs one gradient descent step
+    # w1, b1, w2 and b2 -- the weights and biases of the network,
+    # data -- a matrix of shape (batch_size x d_in)
+    # targets -- a matrix of shape (batch_size x d_out)
+    # learning_rate -- the learning rate
+    A0=data
+    A1=segmoid(np.matmul(A0, w1) + b1)
+    A2=segmoid(np.matmul(A1,w2) + b2)
+    #Let calculate the partial derivates
+