From 778f3ecd0b34da1b64d5d22e5371e423b9f1a7ff Mon Sep 17 00:00:00 2001
From: zineb15022001 <zineb.kabbaj@etu.ec-lyon.fr>
Date: Fri, 1 Dec 2023 17:15:11 +0100
Subject: [PATCH] Update TD2 Deep Learning.ipynb

---
 TD2 Deep Learning.ipynb | 79 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 79 insertions(+)

diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb
index bb92c82..baa86cc 100644
--- a/TD2 Deep Learning.ipynb	
+++ b/TD2 Deep Learning.ipynb	
@@ -2910,6 +2910,85 @@
     "eval_model(model, test_dataloader, criterion)\n"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": 147,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Original Model:\n",
+      "Test Loss: 0.1839 Test Acc: 0.9477\n",
+      "\n",
+      "Quantized Model:\n",
+      "Test Loss: 0.1839 Test Acc: 0.9477\n",
+      "\n",
+      "Original model size: 43.21 MB\n",
+      "Quantized model size: 42.83 MB\n"
+     ]
+    }
+   ],
+   "source": [
+    "import torch.quantization\n",
+    "\n",
+    "# Function to evaluate the quantized model on a test set\n",
+    "def eval_model_quantized(model, dataloader, criterion, device):\n",
+    "    model.eval()\n",
+    "    running_loss = 0.0\n",
+    "    running_corrects = 0\n",
+    "\n",
+    "    for inputs, labels in dataloader:\n",
+    "        inputs = inputs.to(device)\n",
+    "        labels = labels.to(device)\n",
+    "\n",
+    "        with torch.no_grad():\n",
+    "            outputs = model(inputs)\n",
+    "            _, preds = torch.max(outputs, 1)\n",
+    "            loss = criterion(outputs, labels)\n",
+    "\n",
+    "        running_loss += loss.item() * inputs.size(0)\n",
+    "        running_corrects += torch.sum(preds == labels.data)\n",
+    "\n",
+    "    loss = running_loss / len(dataloader.dataset)\n",
+    "    acc = running_corrects.double() / len(dataloader.dataset)\n",
+    "\n",
+    "    print(\"Test Loss: {:.4f} Test Acc: {:.4f}\".format(loss, acc))\n",
+    "    return acc\n",
+    "\n",
+    "# Post-training quantization\n",
+    "quantized_model = torch.quantization.quantize_dynamic(\n",
+    "    model,  # Original model\n",
+    "    {torch.nn.Linear},  # Specify the type of layers to be quantized\n",
+    "    dtype=torch.qint8  # Specify the quantization data type\n",
+    ")\n",
+    "\n",
+    "# Evaluate the original and quantized models on the test set\n",
+    "print(\"Original Model:\")\n",
+    "original_model_acc = eval_model(model, test_dataloader, criterion)\n",
+    "\n",
+    "print(\"\\nQuantized Model:\")\n",
+    "quantized_model_acc = eval_model_quantized(quantized_model, test_dataloader, criterion, device)\n",
+    "\n",
+    "# Compare the sizes of the original and quantized models\n",
+    "def get_size(model):\n",
+    "    torch.save(model.state_dict(), \"temp.pth\")\n",
+    "    size = os.path.getsize(\"temp.pth\") / (1024.0 ** 2)  # Size in megabytes\n",
+    "    os.remove(\"temp.pth\")\n",
+    "    return size\n",
+    "\n",
+    "original_model_size = get_size(model)\n",
+    "quantized_model_size = get_size(quantized_model)\n",
+    "original_model_size = get_size(model)\n",
+    "quantized_model_size = get_size(quantized_model)\n",
+    "\n",
+    "print(f\"\\nOriginal model size: {original_model_size:.2f} MB\")\n",
+    "print(f\"Quantized model size: {quantized_model_size:.2f} MB\")\n",
+    "\n",
+    "\n"
+   ]
+  },
   {
    "cell_type": "markdown",
    "id": "04a263f0",
-- 
GitLab