diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb
index 45b5f52117dde269fe97dad627800b63c934595a..ce4cc57ae4b828c9201e12f1ecd04f1675ae6241 100644
--- a/TD2 Deep Learning.ipynb	
+++ b/TD2 Deep Learning.ipynb	
@@ -1856,73 +1856,90 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 55,
+   "execution_count": 56,
    "id": "572d824c",
    "metadata": {},
    "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "Downloading: \"https://download.pytorch.org/models/resnet18-f37072fd.pth\" to C:\\Users\\ACHRAF FAYTOUT/.cache\\torch\\hub\\checkpoints\\resnet18-f37072fd.pth\n",
-      "100.0%\n"
-     ]
-    },
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Epoch 1/10\n",
+      "Epoch 1/15\n",
+      "----------\n",
+      "train Loss: 0.6171 Acc: 0.6352\n",
+      "val Loss: 0.2310 Acc: 0.9085\n",
+      "\n",
+      "Epoch 2/15\n",
+      "----------\n",
+      "train Loss: 0.7051 Acc: 0.6926\n",
+      "val Loss: 0.1882 Acc: 0.9281\n",
+      "\n",
+      "Epoch 3/15\n",
+      "----------\n",
+      "train Loss: 0.3120 Acc: 0.8730\n",
+      "val Loss: 0.1851 Acc: 0.9412\n",
+      "\n",
+      "Epoch 4/15\n",
+      "----------\n",
+      "train Loss: 0.6827 Acc: 0.7418\n",
+      "val Loss: 0.3261 Acc: 0.8627\n",
+      "\n",
+      "Epoch 5/15\n",
+      "----------\n",
+      "train Loss: 0.7511 Acc: 0.7008\n",
+      "val Loss: 0.1926 Acc: 0.9477\n",
+      "\n",
+      "Epoch 6/15\n",
       "----------\n",
-      "train Loss: 0.5744 Acc: 0.7131\n",
-      "val Loss: 0.2297 Acc: 0.9477\n",
+      "train Loss: 0.4810 Acc: 0.7705\n",
+      "val Loss: 0.2801 Acc: 0.9216\n",
       "\n",
-      "Epoch 2/10\n",
+      "Epoch 7/15\n",
       "----------\n",
-      "train Loss: 0.4688 Acc: 0.7746\n",
-      "val Loss: 0.2737 Acc: 0.8758\n",
+      "train Loss: 0.4013 Acc: 0.8279\n",
+      "val Loss: 0.1993 Acc: 0.9477\n",
       "\n",
-      "Epoch 3/10\n",
+      "Epoch 8/15\n",
       "----------\n",
-      "train Loss: 0.4663 Acc: 0.8033\n",
-      "val Loss: 0.1806 Acc: 0.9542\n",
+      "train Loss: 0.3120 Acc: 0.8689\n",
+      "val Loss: 0.2077 Acc: 0.9412\n",
       "\n",
-      "Epoch 4/10\n",
+      "Epoch 9/15\n",
       "----------\n",
-      "train Loss: 0.7846 Acc: 0.6885\n",
-      "val Loss: 0.1757 Acc: 0.9477\n",
+      "train Loss: 0.3344 Acc: 0.8484\n",
+      "val Loss: 0.2006 Acc: 0.9477\n",
       "\n",
-      "Epoch 5/10\n",
+      "Epoch 10/15\n",
       "----------\n",
-      "train Loss: 0.3483 Acc: 0.8361\n",
-      "val Loss: 0.1934 Acc: 0.9477\n",
+      "train Loss: 0.4115 Acc: 0.8361\n",
+      "val Loss: 0.1926 Acc: 0.9477\n",
       "\n",
-      "Epoch 6/10\n",
+      "Epoch 11/15\n",
       "----------\n",
-      "train Loss: 0.4295 Acc: 0.8238\n",
-      "val Loss: 0.1808 Acc: 0.9542\n",
+      "train Loss: 0.3024 Acc: 0.8689\n",
+      "val Loss: 0.2158 Acc: 0.9477\n",
       "\n",
-      "Epoch 7/10\n",
+      "Epoch 12/15\n",
       "----------\n",
-      "train Loss: 0.3530 Acc: 0.8607\n",
-      "val Loss: 0.2110 Acc: 0.9346\n",
+      "train Loss: 0.2788 Acc: 0.8811\n",
+      "val Loss: 0.1920 Acc: 0.9412\n",
       "\n",
-      "Epoch 8/10\n",
+      "Epoch 13/15\n",
       "----------\n",
-      "train Loss: 0.3518 Acc: 0.8648\n",
-      "val Loss: 0.1919 Acc: 0.9542\n",
+      "train Loss: 0.3594 Acc: 0.8648\n",
+      "val Loss: 0.2051 Acc: 0.9412\n",
       "\n",
-      "Epoch 9/10\n",
+      "Epoch 14/15\n",
       "----------\n",
-      "train Loss: 0.3662 Acc: 0.8484\n",
-      "val Loss: 0.2032 Acc: 0.9542\n",
+      "train Loss: 0.3710 Acc: 0.8402\n",
+      "val Loss: 0.1804 Acc: 0.9477\n",
       "\n",
-      "Epoch 10/10\n",
+      "Epoch 15/15\n",
       "----------\n",
-      "train Loss: 0.3640 Acc: 0.8115\n",
-      "val Loss: 0.1849 Acc: 0.9542\n",
+      "train Loss: 0.4038 Acc: 0.8074\n",
+      "val Loss: 0.1927 Acc: 0.9542\n",
       "\n",
-      "Training complete in 8m 23s\n",
+      "Training complete in 13m 38s\n",
       "Best val Acc: 0.954248\n"
      ]
     }
@@ -2087,25 +2104,25 @@
     "\n",
     "\n",
     "# Download a pre-trained ResNet18 model and freeze its weights\n",
-    "model = torchvision.models.resnet18(pretrained=True)\n",
-    "for param in model.parameters():\n",
+    "model3 = torchvision.models.resnet18(pretrained=True)\n",
+    "for param in model3.parameters():\n",
     "    param.requires_grad = False\n",
     "\n",
     "# Replace the final fully connected layer\n",
     "# Parameters of newly constructed modules have requires_grad=True by default\n",
-    "num_ftrs = model.fc.in_features\n",
-    "model.fc = nn.Linear(num_ftrs, 2)\n",
+    "num_ftrs = model3.fc.in_features\n",
+    "model3.fc = nn.Linear(num_ftrs, 2)\n",
     "# Send the model to the GPU\n",
-    "model = model.to(device)\n",
+    "model3 = model3.to(device)\n",
     "# Set the loss function\n",
     "criterion = nn.CrossEntropyLoss()\n",
     "\n",
     "# Observe that only the parameters of the final layer are being optimized\n",
-    "optimizer_conv = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)\n",
+    "optimizer_conv = optim.SGD(model3.fc.parameters(), lr=0.001, momentum=0.9)\n",
     "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
-    "model, epoch_time = train_model(\n",
-    "    model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",
-    ")\n"
+    "model3, epoch_time = train_model(\n",
+    "    model3, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=15\n",
+    ")"
    ]
   },
   {
@@ -2126,18 +2143,151 @@
   },
   {
    "cell_type": "markdown",
-   "id": "04a263f0",
    "metadata": {},
    "source": [
-    "## Optional\n",
-    "    \n",
-    "Try this at home!! \n",
+    "<span style=\"color:orange;\">Results obtained : </span><br>\n",
+    "After running 10 epochs: \n",
+    "Training concluded within 8 minutes and 46 seconds, yielding the best validation accuracy of 0.954248. <br>\n",
+    "After 15 epochs, the training finished in 13 minutes and 38 seconds, maintaining the same best validation accuracy of 0.954248. Despite the high accuracy of 95%, it appears unnecessary to extend the epochs beyond 10 as there's no improvement beyond this point."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "A crucial aspect of training this model involves augmenting the training set. Rather than utilizing the original 245 images directly, modified versions of these images are employed. In this case, the modification involves a horizontal flip occurring randomly with a 50% probability."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "`transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability`\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "This represents just one among various forms of modifications viable for image data augmentation. For instance, this blog post provides insights into Random Rotation (https://blog.roboflow.com/why-and-how-to-implement-random-rotate-data-augmentation/). \n",
+    "Opting for a horizontal flip for photos of ants and bees is rational: it maintains the orientation of up and down, producing a mirrored image that retains the same meaningful context as the original. Conversely, a vertical flip might not be as practical, as viewing images upside down is infrequent. Horizontal flipping remains suitable in this context and many similar scenarios, although it may not be suitable for images predominantly focused on textual content, for example."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Evaluation of the model on a test set"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Earlier, we established two datasets: one containing images sourced from the 'train' directory and the other comprising images from the 'val' directory. Subsequently, we generated a fresh dataset that retrieves images from both directories, accompanied by the corresponding dataloader. To craft our test dataset, we apply the 'val' transformation."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 57,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "397\n"
+     ]
+    }
+   ],
+   "source": [
+    "#test_dataset = datasets.ImageFolder(data_dir, data_transforms['val'])\n",
+    "# datasets.ImageFolder doesn't work this way\n",
+    "\n",
+    "test_dataset_train = datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms['val'])\n",
+    "test_dataset = torch.utils.data.ConcatDataset([test_dataset_train, image_datasets['val']])\n",
+    "\n",
+    "test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=4, shuffle=True, num_workers=4)\n",
+    "print(len(test_dataset))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 58,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Test Accuracy: 0.9673 (384/397)\n"
+     ]
+    }
+   ],
+   "source": [
+    "def eval_model(model):\n",
+    "    model.eval()  # Set model to evaluate mode\n",
+    "    running_corrects = 0\n",
     "\n",
+    "    # Iterate over data.\n",
+    "    for inputs, labels in test_dataloader:\n",
+    "        inputs = inputs.to(device)\n",
+    "        labels = labels.to(device)\n",
+    "                \n",
+    "        # Forward\n",
+    "        outputs = model(inputs)\n",
+    "        _, preds = torch.max(outputs, 1)\n",
     "\n",
-    "Pytorch offers a framework to export a given CNN to your selfphone (either android or iOS). Have a look at the tutorial https://pytorch.org/mobile/home/\n",
+    "        # Statistics\n",
+    "        running_corrects += torch.sum(preds == labels.data)\n",
     "\n",
-    "The Exercise consists in deploying the CNN of Exercise 4 in your phone and then test it on live.\n",
-    "\n"
+    "    epoch_acc = running_corrects.double() / len(test_dataset)\n",
+    "\n",
+    "    print(\"Test Accuracy: {:.4f} ({}/{})\".format(epoch_acc, running_corrects, len(test_dataset)))\n",
+    "\n",
+    "eval_model(model3)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Quatization"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 59,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Model 3\n",
+      "model:  int8  \t Size (KB): 44780.42\n",
+      "Quantized Model 3\n",
+      "model:  int8  \t Size (KB): 44778.17\n",
+      "Test Accuracy: 0.9673 (384/397)\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(\"Model 3\")\n",
+    "print_size_of_model(model3, \"int8\")\n",
+    "\n",
+    "quantized_model3 = torch.quantization.quantize_dynamic(model3, dtype=torch.qint8)\n",
+    "\n",
+    "print(\"Quantized Model 3\")\n",
+    "print_size_of_model(quantized_model3, \"int8\")\n",
+    "eval_model(quantized_model3)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "<span style = \"color:orange;\">Answer : </span>The reduction in size of the quantized model is minimal, and as anticipated, the accuracy remains unchanged."
    ]
   },
   {
@@ -2147,7 +2297,10 @@
    "source": [
     "## Author\n",
     "\n",
-    "Alberto BOSIO - Ph. D."
+    "Alberto BOSIO - Ph. D.\n",
+    "## Student\n",
+    "\n",
+    "Achraf FAYTOUT - Eng. Student"
    ]
   }
  ],