diff --git a/.gitignore b/.gitignore
index 01f3c46bd609817bdd93d1ba966e1fc44e22b035..89e6584916ea84383ad740c2a182062d7363197d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,8 @@
 # Data
 data/*
 transfer_learning/hymenoptera_data/*
-hymenoptera_data/*
+hymenoptera_data/train
+hymenoptera_data/val
 
 # Torch model
 *.pt
diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb
index af5fc4dd3fd26f3f9b8fad9211c155c7592ad919..6d753c04d9d47a5d770c1f538470f306b08ff7cc 100644
--- a/TD2 Deep Learning.ipynb	
+++ b/TD2 Deep Learning.ipynb	
@@ -157,7 +157,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 3,
    "id": "6e18f2fd",
    "metadata": {},
    "outputs": [
@@ -479,7 +479,9 @@
    "id": "13e1df74",
    "metadata": {},
    "source": [
-    "Does overfit occur? If so, do an early stopping."
+    "Does overfit occur? If so, do an early stopping.\n",
+    "\n",
+    "Overfitting occurs, as the minimum validation loss is reached around epoch 15."
    ]
   },
   {
@@ -819,7 +821,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 24,
+   "execution_count": 5,
    "id": "ef623c26",
    "metadata": {},
    "outputs": [
@@ -827,7 +829,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "model:  fp32  \t Size (KB): 2330.946\n"
+      "model:  fp32  \t Size (KB): 251.278\n"
      ]
     }
    ],
@@ -1743,7 +1745,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 1,
    "id": "572d824c",
    "metadata": {},
    "outputs": [
@@ -1777,16 +1779,56 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "train Loss: 0.5870 Acc: 0.7049\n",
-      "val Loss: 0.2112 Acc: 0.9608\n",
+      "train Loss: 0.8695 Acc: 0.5902\n",
+      "val Loss: 0.8018 Acc: 0.6209\n",
       "\n",
       "Epoch 2/10\n",
       "----------\n",
-      "train Loss: 0.4342 Acc: 0.7787\n",
-      "val Loss: 0.1977 Acc: 0.9542\n",
+      "train Loss: 0.5815 Acc: 0.7664\n",
+      "val Loss: 0.2042 Acc: 0.9281\n",
       "\n",
       "Epoch 3/10\n",
-      "----------\n"
+      "----------\n",
+      "train Loss: 0.4379 Acc: 0.7992\n",
+      "val Loss: 0.2194 Acc: 0.9412\n",
+      "\n",
+      "Epoch 4/10\n",
+      "----------\n",
+      "train Loss: 0.5824 Acc: 0.7951\n",
+      "val Loss: 0.2210 Acc: 0.9150\n",
+      "\n",
+      "Epoch 5/10\n",
+      "----------\n",
+      "train Loss: 0.5536 Acc: 0.7869\n",
+      "val Loss: 0.2275 Acc: 0.9216\n",
+      "\n",
+      "Epoch 6/10\n",
+      "----------\n",
+      "train Loss: 0.4033 Acc: 0.8484\n",
+      "val Loss: 0.2132 Acc: 0.9477\n",
+      "\n",
+      "Epoch 7/10\n",
+      "----------\n",
+      "train Loss: 0.3938 Acc: 0.8115\n",
+      "val Loss: 0.1939 Acc: 0.9477\n",
+      "\n",
+      "Epoch 8/10\n",
+      "----------\n",
+      "train Loss: 0.3606 Acc: 0.8484\n",
+      "val Loss: 0.1879 Acc: 0.9542\n",
+      "\n",
+      "Epoch 9/10\n",
+      "----------\n",
+      "train Loss: 0.3211 Acc: 0.8525\n",
+      "val Loss: 0.1788 Acc: 0.9542\n",
+      "\n",
+      "Epoch 10/10\n",
+      "----------\n",
+      "train Loss: 0.3518 Acc: 0.8525\n",
+      "val Loss: 0.1932 Acc: 0.9477\n",
+      "\n",
+      "Training complete in 12m 41s\n",
+      "Best val Acc: 0.954248\n"
      ]
     }
    ],
@@ -1970,7 +2012,7 @@
     "    model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",
     ")\n",
     "\n",
-    "torch.save(model.state_dict(), \"trained_model.pt\")\n"
+    "torch.save(model.state_dict(), \"trained_resnet.pt\")\n"
    ]
   },
   {
@@ -1996,14 +2038,25 @@
    "metadata": {},
    "outputs": [
     {
-     "ename": "NameError",
-     "evalue": "name 'model' is not defined",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
-      "Cell \u001b[0;32mIn[1], line 83\u001b[0m\n\u001b[1;32m     72\u001b[0m             \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTest Accuracy of \u001b[39m\u001b[38;5;132;01m%5s\u001b[39;00m\u001b[38;5;124m: N/A (no training examples)\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m%\u001b[39m (class_names[i]))\n\u001b[1;32m     74\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\n\u001b[1;32m     75\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mTest Accuracy (Overall): \u001b[39m\u001b[38;5;132;01m%2d\u001b[39;00m\u001b[38;5;132;01m%%\u001b[39;00m\u001b[38;5;124m (\u001b[39m\u001b[38;5;132;01m%2d\u001b[39;00m\u001b[38;5;124m/\u001b[39m\u001b[38;5;132;01m%2d\u001b[39;00m\u001b[38;5;124m)\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m     76\u001b[0m         \u001b[38;5;241m%\u001b[39m (\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m     80\u001b[0m         )\n\u001b[1;32m     81\u001b[0m     )\n\u001b[0;32m---> 83\u001b[0m eval_model(\u001b[43mmodel\u001b[49m)\n",
-      "\u001b[0;31mNameError\u001b[0m: name 'model' is not defined"
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/Users/MBNath/.pyenv/versions/3.10.3/envs/IA/lib/python3.10/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
+      "  warnings.warn(\n",
+      "/Users/MBNath/.pyenv/versions/3.10.3/envs/IA/lib/python3.10/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.\n",
+      "  warnings.warn(msg)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "['ants', 'bees']\n",
+      "Value of the loss : 0.46008248378833133\n",
+      "Test Accuracy of  ants: 93% (14/15)\n",
+      "Test Accuracy of  bees: 86% (13/15)\n",
+      "\n",
+      "Test Accuracy (Overall): 90% (27/30)\n"
      ]
     }
    ],
@@ -2011,6 +2064,7 @@
     "def eval_model(model):\n",
     "    device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
     "    \n",
+    "    batch_size = 2\n",
     "    test_loss = 0.0\n",
     "    class_correct = [0.0, 0.0]  # there are only 2 classes of labels\n",
     "    class_total = [0.0, 0.0]\n",
@@ -2027,10 +2081,8 @@
     "    data_dir = \"hymenoptera_data\"\n",
     "    # Create test dataset and loader\n",
     "    image_dataset = datasets.ImageFolder(os.path.join(data_dir, \"test\"), data_transform)\n",
-    "    batch_size = 2\n",
     "    dataloaders = torch.utils.data.DataLoader(image_dataset, batch_size, shuffle=True, num_workers=4)\n",
     "    class_names = image_dataset.classes\n",
-    "    print(class_names)\n",
     "\n",
     "    model.eval()  # Set model to evaluate mode\n",
     "\n",
@@ -2068,18 +2120,15 @@
     "    print(f\"Value of the loss : {test_loss}\")\n",
     "\n",
     "    for i in range(2):\n",
-    "        if class_total[i] > 0:\n",
-    "            print(\n",
-    "                \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
-    "                % (\n",
-    "                    class_names[i],\n",
-    "                    100 * class_correct[i] / class_total[i],\n",
-    "                    np.sum(class_correct[i]),\n",
-    "                    np.sum(class_total[i]),\n",
-    "                )\n",
+    "        print(\n",
+    "            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
+    "            % (\n",
+    "                class_names[i],\n",
+    "                100 * class_correct[i] / class_total[i],\n",
+    "                np.sum(class_correct[i]),\n",
+    "                np.sum(class_total[i]),\n",
     "            )\n",
-    "        else:\n",
-    "            print(\"Test Accuracy of %5s: N/A (no training examples)\" % (class_names[i]))\n",
+    "        )\n",
     "\n",
     "    print(\n",
     "        \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n",
@@ -2090,11 +2139,211 @@
     "        )\n",
     "    )\n",
     "\n",
-    "model = Net()\n",
-    "model.load_state_dict(torch.load(\"./trained_model.pt\", map_location=torch.device('cpu')))\n",
+    "model = torchvision.models.resnet18(pretrained=True)\n",
+    "for param in model.parameters():\n",
+    "    param.requires_grad = False\n",
+    "\n",
+    "# Replace the final fully connected layer\n",
+    "# Parameters of newly constructed modules have requires_grad=True by default\n",
+    "num_ftrs = model.fc.in_features\n",
+    "model.fc = nn.Linear(num_ftrs, 2)\n",
+    "# Send the model to the GPU\n",
+    "model = model.to(device)\n",
+    "model.load_state_dict(torch.load(\"./trained_resnet.pt\", map_location=torch.device('cpu')))\n",
+    "# print(model)\n",
+    "# model = torchvision.models.resnet18(pretrained=True)\n",
     "eval_model(model)"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "id": "13c54aa2",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/Users/MBNath/.pyenv/versions/3.10.3/envs/IA/lib/python3.10/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
+      "  warnings.warn(\n",
+      "/Users/MBNath/.pyenv/versions/3.10.3/envs/IA/lib/python3.10/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.\n",
+      "  warnings.warn(msg)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch 1/10\n",
+      "----------\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/Users/MBNath/.pyenv/versions/3.10.3/envs/IA/lib/python3.10/site-packages/torch/optim/lr_scheduler.py:143: UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`.  Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\n",
+      "  warnings.warn(\"Detected call of `lr_scheduler.step()` before `optimizer.step()`. \"\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "train Loss: 0.6985 Acc: 0.5287\n",
+      "val Loss: 0.6568 Acc: 0.5948\n",
+      "\n",
+      "Epoch 2/10\n",
+      "----------\n",
+      "train Loss: 0.6437 Acc: 0.6107\n",
+      "val Loss: 0.5098 Acc: 0.8954\n",
+      "\n",
+      "Epoch 3/10\n",
+      "----------\n",
+      "train Loss: 0.5965 Acc: 0.6639\n",
+      "val Loss: 0.5052 Acc: 0.8366\n",
+      "\n",
+      "Epoch 4/10\n",
+      "----------\n",
+      "train Loss: 0.5486 Acc: 0.7377\n",
+      "val Loss: 0.3847 Acc: 0.9216\n",
+      "\n",
+      "Epoch 5/10\n",
+      "----------\n",
+      "train Loss: 0.5297 Acc: 0.7459\n",
+      "val Loss: 0.3610 Acc: 0.9020\n",
+      "\n",
+      "Epoch 6/10\n",
+      "----------\n",
+      "train Loss: 0.5105 Acc: 0.7213\n",
+      "val Loss: 0.3419 Acc: 0.9346\n",
+      "\n",
+      "Epoch 7/10\n",
+      "----------\n",
+      "train Loss: 0.4094 Acc: 0.8607\n",
+      "val Loss: 0.3405 Acc: 0.9085\n",
+      "\n",
+      "Epoch 8/10\n",
+      "----------\n",
+      "train Loss: 0.4349 Acc: 0.8443\n",
+      "val Loss: 0.3349 Acc: 0.9216\n",
+      "\n",
+      "Epoch 9/10\n",
+      "----------\n",
+      "train Loss: 0.4094 Acc: 0.8934\n",
+      "val Loss: 0.3287 Acc: 0.9346\n",
+      "\n",
+      "Epoch 10/10\n",
+      "----------\n",
+      "train Loss: 0.4406 Acc: 0.8156\n",
+      "val Loss: 0.3332 Acc: 0.9216\n",
+      "\n",
+      "Training complete in 12m 40s\n",
+      "Best val Acc: 0.934641\n"
+     ]
+    }
+   ],
+   "source": [
+    "new_resnet = torchvision.models.resnet18(pretrained=True)\n",
+    "for param in new_resnet.parameters():\n",
+    "    param.requires_grad = False\n",
+    "\n",
+    "# Replace the final fully connected layer\n",
+    "# Parameters of newly constructed modules have requires_grad=True by default\n",
+    "num_ftrs = new_resnet.fc.in_features\n",
+    "\n",
+    "new_resnet.fc = nn.Linear(num_ftrs, 2)\n",
+    "\n",
+    "# Change the last layers by using Sequential\n",
+    "new_resnet.fc = nn.Sequential(\n",
+    "    nn.Linear(num_ftrs, 3),\n",
+    "    nn.ReLU(),\n",
+    "    nn.Linear(3, 2),\n",
+    "    nn.Dropout(p=0.1))\n",
+    "\n",
+    "# Send the model to the device : here CPU\n",
+    "new_resnet = new_resnet.to(device)\n",
+    "# Set the loss function\n",
+    "criterion = nn.CrossEntropyLoss()\n",
+    "\n",
+    "# Observe that only the parameters of the final layer are being optimized\n",
+    "optimizer_conv = optim.SGD(new_resnet.fc.parameters(), lr=0.001, momentum=0.9)\n",
+    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
+    "\n",
+    "new_resnet, epoch_time = train_model(\n",
+    "    new_resnet, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",
+    ")\n",
+    "torch.save(new_resnet.state_dict(), \"trained_new_resnet.pt\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bf27e5d1",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "model:  fp32  \t Size (KB): 44783.162\n",
+      "The size of the new Resnet model is 44.78MB\n",
+      "model:  int8  \t Size (KB): 44780.006\n",
+      "Quantized modified Resnet model is 1.00007 times smaller than the original one\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Quantize the new Resnet model and test it on the same images\n",
+    "size_new_resnet = print_size_of_model(new_resnet, \"fp32\")\n",
+    "\n",
+    "print(f\"The size of the new Resnet model is {size_new_resnet / 1000000}MB\")\n",
+    "\n",
+    "quantized_new_resnet = torch.quantization.quantize_dynamic(\n",
+    "    new_resnet, dtype=torch.qint8)\n",
+    "torch.save(quantized_new_resnet.state_dict(), \"trained_new_qresnet.pt\")\n",
+    "\n",
+    "size_new_resnet_quantized = print_size_of_model(quantized_new_resnet, \"int8\")\n",
+    "\n",
+    "print(f\"Quantized modified Resnet model is {size_new_resnet / size_new_resnet_quantized}f times smaller than the original one\" %)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 15,
+   "id": "64a85d2d",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Evaluation of the new resnet model\n",
+      "['ants', 'bees']\n",
+      "Value of the loss : 0.7961034814516703\n",
+      "Test Accuracy of  ants: 93% (14/15)\n",
+      "Test Accuracy of  bees: 73% (11/15)\n",
+      "\n",
+      "Test Accuracy (Overall): 83% (25/30)\n",
+      "\n",
+      "Evaluation of the quantized new resnet model\n",
+      "['ants', 'bees']\n",
+      "Value of the loss : 0.796722040573756\n",
+      "Test Accuracy of  ants: 93% (14/15)\n",
+      "Test Accuracy of  bees: 73% (11/15)\n",
+      "\n",
+      "Test Accuracy (Overall): 83% (25/30)\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(\"Evaluation of the new resnet model\")\n",
+    "acc_new_resnet = eval_model(new_resnet)\n",
+    "print(\"\\nEvaluation of the quantized new resnet model\")\n",
+    "acc_quant_new_resnet = eval_model(quantized_new_resnet)"
+   ]
+  },
   {
    "cell_type": "markdown",
    "id": "04a263f0",