diff --git a/.gitignore b/.gitignore
index 45755846413dd3cfa8c5f8237cc10e65c801b9ab..eb921714a55dfd01a060adc58aeda16ee22f9283 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,5 +3,5 @@ image/*
 _pycache_/*
 plotting.py
 image-classification/
-cifar.PNG
+
 
diff --git a/README.md b/README.md
index 77c08a88a1e81fab607f233d3710d3b7037883cd..0df47aac02c5acb42f1516fcee4202807c5408c9 100644
--- a/README.md
+++ b/README.md
@@ -57,7 +57,17 @@ X, y = rc.read_cifar('data')
 # Split the Dataset
 X_train, y_train, X_test, y_test = rc.split_dataset(X, y, split=0.9) 
 ```
+2. Function to run knn
+```bash
+import knn
+knn.plot_KNN(X_train, y_train, X_test, y_test) 
+```
+3. Running the ANN Code
 
+```bash
+import mlp
+mlp.plot_ANN(X_train,y_train,X_test,y_test)
+```
 ## Results :
 ### Generating the Graph
 1. Results using KNN:
diff --git a/cifar.PNG b/cifar.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..048f6dada1bcbc0a4f089a3657686108305ec32e
Binary files /dev/null and b/cifar.PNG differ
diff --git a/mlp.py b/mlp.py
index 7fa9b88cedc561c0a344be830268fc7b3d0e97a8..5839834691166e3d08e5ba56657bf21014d4e9ae 100644
--- a/mlp.py
+++ b/mlp.py
@@ -94,7 +94,7 @@ def learn_once_mse(W1, b1, W2, b2, data, targets, learning_rate):
 
     # Update weights and biases of the output layer
     W2 = W2 - learning_rate * np.dot(hidden_layer_output.T, output_layer_gradients) / data.shape[0]
-    b2 = b2 - learning_rate * (1 / hidden_layer_output.shape[1]) * output_layer_gradients.sum(axis=0)
+    b2 = b2 - learning_rate * (1 / hidden_layer_output.shape[1]) * output_layer_gradients.sum(axis=0, keepdims=True)
 
     # Calculate the error at the hidden layer
     hidden_layer_error = np.dot(output_layer_gradients, W2.T)
@@ -291,11 +291,11 @@ def run_mlp_training(X_train, labels_train, data_test, labels_test, num_hidden_u
     - train_accuracies: List of training accuracies across epochs.
     - test_accuracy: The final testing accuracy.
     """
-    input_dimension = X_train.shape[1]
-    output_dimension = np.unique(labels_train).shape[0]  # Number of classes
+    #input_dimension = X_train.shape[1]
+    #output_dimension = np.unique(labels_train).shape[0]  # Number of classes
 
     # Initialize weights and biases
-    W1, b1, W2, b2 = initialization(input_dimension, num_hidden_units, output_dimension)
+    W1, b1, W2, b2 = initialization(d_in, d_h, d_out)
     
     train_accuracies = []  # List to store training accuracies