Skip to content
Snippets Groups Projects
Commit b1739e66 authored by Saidi Aya's avatar Saidi Aya
Browse files

Create mlp.py

parent b4689a40
Branches
No related tags found
No related merge requests found
mlp.py 0 → 100644
import numpy as np
#We are using the segmoid activation function
def segmoid(x):
return 1/(1+np.exp(-x))
#We will also need the derivation function to instore the gradient
def derivation(x):
deriv_segmoid = segmoid(x)*(1-segmoid(x))
return deriv_segmoid
def learn_once_mse(w1,b1,w2,b2,data,targets,learning_rate):
# This function performs one gradient descent step
# w1, b1, w2 and b2 -- the weights and biases of the network,
# data -- a matrix of shape (batch_size x d_in)
# targets -- a matrix of shape (batch_size x d_out)
# learning_rate -- the learning rate
A0=data
A1=segmoid(np.matmul(A0, w1) + b1)
A2=segmoid(np.matmul(A1,w2) + b2)
#Let calculate the partial derivates
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment