diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb
index c86f3a01b79fbd92c55dd7ea035376f86cc959e3..c3503a68a0b31dceeaca891ea29a1980aa23f892 100644
--- a/TD2 Deep Learning.ipynb	
+++ b/TD2 Deep Learning.ipynb	
@@ -542,10 +542,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "On peut détecter un overfitting en surveillant les performances du modèle sur les données\n",
-    "d'entraînement et de test au fil du temps. Si les performances du modèle sur les données \n",
-    "d'entraînement continuent de s'améliorer tandis que celles sur les données de test diminuent, \n",
-    "cela indique un surapprentissage. Ici dans notre cas à partir de l'epoch 15, on remarque que la valeur de valid_loss commence à augmenter alors que train_loss diminue toujours.\n"
+    "    -One can detect overfitting by monitoring the model's performance on both training and testing data . If the  model's performance on the training data continues to improve while its performance on the testing data decreases, it indicates overfitting. In our case, starting from epoch 15, we observe that the validation loss value begins to increase while the training loss continues to decrease. So we have to do an early stopping around epoch 15. Implementing early stopping at this point can help prevent the model from further overfitting and potentially improve its generalization to new, unseen data\n"
    ]
   },
   {
@@ -663,6 +660,13 @@
     "Compare the results obtained with this new network to those obtained previously."
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "    -New network model"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": 37,
@@ -999,7 +1003,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Le nouveau modèle est beaucoup plus performant, on a pas de problème d'overfitting contrairement à l'encien modèle."
+    "    -The new model is significantly more effective, and we do not have any overfitting issues, unlike the previous model."
    ]
   },
   {
@@ -1017,11 +1021,12 @@
    ],
    "source": [
     "# Classification model2 Test\n",
+    "\n",
     "import torch\n",
     "from torchvision import transforms\n",
     "from PIL import Image\n",
     "\n",
-    "# Define the transformation for the input image\n",
+    "# We Define the transformation for the input image\n",
     "prediction_transform = transforms.Compose([\n",
     "    transforms.ToTensor(),\n",
     "    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n",
@@ -1062,82 +1067,110 @@
     "print(\"Predicted Class:\", classes[predicted_class])\n"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "    -In this case, the test prediction is incorrect. The image given is a dog image while the prediction is a cat."
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 119,
+   "execution_count": 135,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Test Loss: 1.983950\n",
+      "Test Loss: 126.366212\n",
       "\n",
-      "Test Accuracy of     0: 58% (589/1000)\n",
-      "Test Accuracy of     1: 14% (145/1000)\n",
-      "Test Accuracy of     2: 15% (157/1000)\n",
-      "Test Accuracy of     3: 71% (713/1000)\n",
+      "Test Accuracy of airplane: 58% (589/1000)\n",
+      "Test Accuracy of automobile: 14% (145/1000)\n",
+      "Test Accuracy of  bird: 15% (157/1000)\n",
+      "Test Accuracy of   cat: 71% (713/1000)\n",
+      "Test Accuracy of  deer: 10% (102/1000)\n",
+      "Test Accuracy of   dog: 34% (347/1000)\n",
+      "Test Accuracy of  frog:  2% (24/1000)\n",
+      "Test Accuracy of horse: 16% (163/1000)\n",
+      "Test Accuracy of  ship: 75% (756/1000)\n",
+      "Test Accuracy of truck: 21% (217/1000)\n",
       "\n",
-      "Test Accuracy (Overall): 40% (1604/4000)\n"
+      "Test Accuracy (Overall): 32% (3213/10000)\n"
      ]
     }
    ],
    "source": [
+    "# Model2 test \n",
+    "\n",
     "import torch\n",
     "import numpy as np\n",
-    "from torchvision import datasets, transforms\n",
-    "from torch.utils.data import DataLoader\n",
     "\n",
-    "# Assuming you have the trained model saved as \"model_cifar.pt\"\n",
-    "# Assuming you have the test data loader defined as 'test_loader'\n",
+    "classes = [\n",
+    "    \"airplane\",\n",
+    "    \"automobile\",\n",
+    "    \"bird\",\n",
+    "    \"cat\",\n",
+    "    \"deer\",\n",
+    "    \"dog\",\n",
+    "    \"frog\",\n",
+    "    \"horse\",\n",
+    "    \"ship\",\n",
+    "    \"truck\",\n",
+    "]\n",
     "\n",
-    "# Load the saved model state\n",
-    "model2.load_state_dict(torch.load(\"model_cifar.pt\", map_location=torch.device('cpu')))\n",
     "\n",
-    "# Evaluate the model on test data\n",
-    "model2.eval()\n",
+    "model2.load_state_dict(torch.load(\"./model_cifar.pt\"))\n",
+    "\n",
+    "# track test loss\n",
     "test_loss = 0.0\n",
-    "num_classes = 4  # Replace with the actual number of classes in your problem\n",
-    "class_correct = [0.0] * num_classes\n",
-    "class_total = [0.0] * num_classes\n",
+    "class_correct = list(0.0 for i in range(10))\n",
+    "class_total = list(0.0 for i in range(10))\n",
     "\n",
-    "with torch.no_grad():\n",
-    "    for data, target in test_loader:\n",
-    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
-    "        output = model2(data)\n",
-    "        # Calculate the batch loss\n",
-    "        loss = criterion(output, target)\n",
-    "        # Update test loss\n",
-    "        test_loss += loss.item() * data.size(0)\n",
-    "        # Convert output probabilities to predicted class\n",
-    "        _, pred = torch.max(output, 1)\n",
-    "        # Compare predictions to true label\n",
-    "        correct_tensor = pred.eq(target.data.view_as(pred))\n",
-    "        correct = np.squeeze(correct_tensor.numpy())\n",
-    "        # Calculate test accuracy for each object class\n",
-    "        for i in range(len(target)):\n",
-    "            label = target.data[i].item()\n",
-    "            if label < num_classes:  # Check if label is within the expected range\n",
-    "                class_correct[label] += correct[i].item()\n",
-    "                class_total[label] += 1\n",
-    "\n",
-    "# Average test loss\n",
-    "test_loss = test_loss / len(test_loader.dataset)\n",
+    "model2.eval()\n",
+    "# iterate over test data\n",
+    "for data, target in test_loader:\n",
+    "    # move tensors to GPU if CUDA is available\n",
+    "    if train_on_gpu:\n",
+    "        data, target = data.cuda(), target.cuda()\n",
+    "    # forward pass: compute predicted outputs by passing inputs to the model\n",
+    "    output = model2(data)\n",
+    "    # calculate the batch loss\n",
+    "    loss = criterion(output, target)\n",
+    "    # update test loss\n",
+    "    test_loss += loss.item() * data.size(0)\n",
+    "    # convert output probabilities to predicted class\n",
+    "    _, pred = torch.max(output, 1)\n",
+    "    # compare predictions to true label\n",
+    "    correct_tensor = pred.eq(target.data.view_as(pred))\n",
+    "    correct = (\n",
+    "        np.squeeze(correct_tensor.numpy())\n",
+    "        if not train_on_gpu\n",
+    "        else np.squeeze(correct_tensor.cpu().numpy())\n",
+    "    )\n",
+    "    # calculate test accuracy for each object class\n",
+    "    for i in range(len(target)):  # Iterate over the actual batch size\n",
+    "        label = target.data[i].item()  # Extract the item from the tensor\n",
+    "        class_correct[label] += correct[i].item()\n",
+    "        class_total[label] += 1\n",
+    "\n",
+    "# average test loss\n",
+    "test_loss = test_loss / len(test_loader)\n",
     "print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n",
     "\n",
-    "for i in range(num_classes):\n",
+    "for i in range(10):\n",
     "    if class_total[i] > 0:\n",
     "        print(\n",
     "            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
     "            % (\n",
-    "                str(i),\n",
+    "                classes[i],\n",
     "                100 * class_correct[i] / class_total[i],\n",
     "                np.sum(class_correct[i]),\n",
     "                np.sum(class_total[i]),\n",
     "            )\n",
     "        )\n",
     "    else:\n",
-    "        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (str(i)))\n",
+    "        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n",
     "\n",
     "print(\n",
     "    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n",
@@ -1153,7 +1186,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "    -Our new model has achieved a test accuracy of 0.4, which is significantly better than the previous neural network implemented during TD1 that had a test accuracy of 0.15"
+    "    -The accuracy of model2 is (0.32), which is lower than the accuracy of the previous CNN model (0.63). However, it surpasses the performance of the MLP model from TP1, which had an accuracy of (0.15)."
    ]
   },
   {
@@ -1286,6 +1319,7 @@
    "outputs": [],
    "source": [
     "# function to evaluate the accuracy of the model\n",
+    "\n",
     "def evaluate_model(model, dataloader):\n",
     "    model.eval()\n",
     "    correct = {cls: 0 for cls in classes}\n",
@@ -1307,7 +1341,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 55,
+   "execution_count": 137,
    "metadata": {},
    "outputs": [
     {
@@ -1315,36 +1349,47 @@
      "output_type": "stream",
      "text": [
       "Accuracy before quantization:\n",
-      "airplane: 0.6300\n",
-      "automobile: 0.8050\n",
-      "bird: 0.3740\n",
-      "cat: 0.3750\n",
-      "deer: 0.7130\n",
-      "dog: 0.5410\n",
-      "frog: 0.7490\n",
-      "horse: 0.7570\n",
-      "ship: 0.8580\n",
-      "truck: 0.8130\n",
-      "Overall Accuracy: 0.6615\n",
-      "model:  int8  \t Size (KB): 168.478\n",
+      "airplane: 0.5890\n",
+      "automobile: 0.1450\n",
+      "bird: 0.1570\n",
+      "cat: 0.7130\n",
+      "deer: 0.1020\n",
+      "dog: 0.3470\n",
+      "frog: 0.0240\n",
+      "horse: 0.1630\n",
+      "ship: 0.7560\n",
+      "truck: 0.2170\n",
+      "Overall Accuracy: 0.3213\n",
       "Accuracy after quantization:\n",
-      "airplane: 0.6270\n",
-      "automobile: 0.8040\n",
-      "bird: 0.3770\n",
-      "cat: 0.3790\n",
-      "deer: 0.7100\n",
-      "dog: 0.5370\n",
-      "frog: 0.7490\n",
-      "horse: 0.7560\n",
-      "ship: 0.8580\n",
-      "truck: 0.8140\n",
-      "Overall Accuracy: 0.6611\n"
+      "airplane: 0.5860\n",
+      "automobile: 0.1440\n",
+      "bird: 0.1590\n",
+      "cat: 0.7100\n",
+      "deer: 0.1030\n",
+      "dog: 0.3520\n",
+      "frog: 0.0250\n",
+      "horse: 0.1630\n",
+      "ship: 0.7550\n",
+      "truck: 0.2160\n",
+      "Overall Accuracy: 0.3213\n",
+      "model:  int8  \t Size (KB): 168.478\n",
+      "model:  int8  \t Size (KB): 365.058\n"
      ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "365058"
+      ]
+     },
+     "execution_count": 137,
+     "metadata": {},
+     "output_type": "execute_result"
     }
    ],
    "source": [
     "\n",
-    "# Evaluate the accuracy of the original model on the test set\n",
+    "# Evaluate the accuracy of  the initial model and the quantized model\n",
     "\n",
     "accuracy_before_quantization = evaluate_model(model2, test_loader)\n",
     "overall_accuracy_before_quantization = sum(accuracy_before_quantization.values()) / len(classes)\n",
@@ -1357,9 +1402,6 @@
     "# Post-training quantization\n",
     "quantized_model = torch.quantization.quantize_dynamic(model2, dtype=torch.qint8)\n",
     "\n",
-    "# Print the size of the quantized model\n",
-    "print_size_of_model(quantized_model, \"int8\")\n",
-    "\n",
     "# Evaluate the accuracy of the quantized model on the test set\n",
     "accuracy_after_quantization = evaluate_model(quantized_model, test_loader)\n",
     "overall_accuracy_after_quantization = sum(accuracy_after_quantization.values()) / len(classes)\n",
@@ -1367,7 +1409,12 @@
     "print(\"Accuracy after quantization:\")\n",
     "for cls, acc in accuracy_after_quantization.items():\n",
     "    print(f\"{cls}: {acc:.4f}\")\n",
-    "print(f\"Overall Accuracy: {overall_accuracy_after_quantization:.4f}\")\n"
+    "print(f\"Overall Accuracy: {overall_accuracy_after_quantization:.4f}\")\n",
+    "\n",
+    "# Print the size of the quantized model\n",
+    "print_size_of_model(quantized_model, \"int8\")\n",
+    "#Print the size of the inial model\n",
+    "print_size_of_model(model2, \"int8\")\n"
    ]
   },
   {