diff --git a/TD2_Deep_Learning.ipynb b/TD2_Deep_Learning.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..68f9b7fa2bfd20a991648205a9c95bae0b55595c
--- /dev/null
+++ b/TD2_Deep_Learning.ipynb
@@ -0,0 +1,1795 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "7edf7168",
+   "metadata": {},
+   "source": [
+    "# TD2: Deep learning"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "fbb8c8df",
+   "metadata": {},
+   "source": [
+    "In this TD, you must modify this notebook to answer the questions. To do this,\n",
+    "\n",
+    "1. Fork this repository\n",
+    "2. Clone your forked repository on your local computer\n",
+    "3. Answer the questions\n",
+    "4. Commit and push regularly\n",
+    "\n",
+    "The last commit is due on Wednesday, December 4, 11:59 PM. Later commits will not be taken into account."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3d167a29",
+   "metadata": {},
+   "source": [
+    "Install and test PyTorch from  https://pytorch.org/get-started/locally."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "330a42f5",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Requirement already satisfied: torch in c:\\users\\thoma\\anaconda3\\lib\\site-packages (2.5.1)\n",
+      "Requirement already satisfied: torchvision in c:\\users\\thoma\\anaconda3\\lib\\site-packages (0.20.1)\n",
+      "Requirement already satisfied: filelock in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from torch) (3.3.1)\n",
+      "Requirement already satisfied: typing-extensions>=4.8.0 in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from torch) (4.12.2)\n",
+      "Requirement already satisfied: networkx in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from torch) (2.6.3)\n",
+      "Requirement already satisfied: jinja2 in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from torch) (2.11.3)\n",
+      "Requirement already satisfied: fsspec in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from torch) (2021.10.1)\n",
+      "Requirement already satisfied: sympy==1.13.1 in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from torch) (1.13.1)\n",
+      "Requirement already satisfied: mpmath<1.4,>=1.1.0 in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from sympy==1.13.1->torch) (1.2.1)\n",
+      "Requirement already satisfied: numpy in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from torchvision) (1.20.3)\n",
+      "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from torchvision) (9.3.0)\n",
+      "Requirement already satisfied: MarkupSafe>=0.23 in c:\\users\\thoma\\anaconda3\\lib\\site-packages (from jinja2->torch) (1.1.1)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "WARNING: Ignoring invalid distribution -illow (c:\\users\\thoma\\anaconda3\\lib\\site-packages)\n",
+      "WARNING: Error parsing dependencies of pyodbc: Invalid version: '4.0.0-unsupported'\n",
+      "WARNING: Ignoring invalid distribution -illow (c:\\users\\thoma\\anaconda3\\lib\\site-packages)\n",
+      "ERROR: Exception:\n",
+      "Traceback (most recent call last):\n",
+      "  File \"C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\pip\\_internal\\cli\\base_command.py\", line 105, in _run_wrapper\n",
+      "    status = _inner_run()\n",
+      "  File \"C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\pip\\_internal\\cli\\base_command.py\", line 96, in _inner_run\n",
+      "    return self.run(options, args)\n",
+      "  File \"C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\pip\\_internal\\cli\\req_command.py\", line 67, in wrapper\n",
+      "    return func(self, options, args)\n",
+      "  File \"C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\pip\\_internal\\commands\\install.py\", line 483, in run\n",
+      "    installed_versions[distribution.canonical_name] = distribution.version\n",
+      "  File \"C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\pip\\_internal\\metadata\\pkg_resources.py\", line 192, in version\n",
+      "    return parse_version(self._dist.version)\n",
+      "  File \"C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\pip\\_vendor\\packaging\\version.py\", line 56, in parse\n",
+      "    return Version(version)\n",
+      "  File \"C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\pip\\_vendor\\packaging\\version.py\", line 202, in __init__\n",
+      "    raise InvalidVersion(f\"Invalid version: '{version}'\")\n",
+      "pip._vendor.packaging.version.InvalidVersion: Invalid version: '4.0.0-unsupported'\n"
+     ]
+    }
+   ],
+   "source": [
+    "%pip install torch torchvision"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "0882a636",
+   "metadata": {},
+   "source": [
+    "\n",
+    "To test run the following code"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "id": "b1950f0a",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "tensor([[-0.0911,  0.0937, -0.3551, -1.0340, -0.0470, -0.8980,  1.0151, -0.2386,\n",
+      "          0.9468, -0.6654],\n",
+      "        [ 1.2260, -2.4299,  0.3165, -0.0942, -0.7884,  0.1000, -0.1902,  1.4085,\n",
+      "         -0.0049, -1.9006],\n",
+      "        [-0.3996,  0.4213,  0.1147, -0.2291, -0.5700, -1.6733, -1.0677, -1.4452,\n",
+      "         -0.5478, -0.3316],\n",
+      "        [ 0.7371, -0.2672, -0.6266,  1.2011, -0.1029,  1.0186, -0.9307, -0.5767,\n",
+      "         -1.3065,  0.6337],\n",
+      "        [ 1.4523, -2.0288, -0.1501,  1.2346, -0.6855,  1.2375, -1.0683,  0.7816,\n",
+      "          1.0790,  0.9691],\n",
+      "        [-0.2542, -0.7905, -0.7583,  0.2133,  0.3426, -0.9073,  0.9450, -0.3895,\n",
+      "         -1.1175, -0.9227],\n",
+      "        [ 2.7889,  1.0267, -0.8037,  2.2269, -2.6086,  0.5387, -0.3729,  2.2338,\n",
+      "         -1.1905,  0.6453],\n",
+      "        [-0.6251,  1.7669,  0.3064, -0.2883,  0.7485,  0.7840,  0.5777, -0.0385,\n",
+      "         -1.9255, -0.4606],\n",
+      "        [-0.2813, -1.1661, -1.4528, -1.6918,  1.5964, -0.7515, -0.5145, -1.6772,\n",
+      "         -0.8552,  0.0992],\n",
+      "        [ 0.3848, -0.3482, -0.9222,  1.9756,  0.8679, -1.9951, -0.4393, -1.7853,\n",
+      "         -0.0113,  0.4706],\n",
+      "        [-0.2662, -1.1537,  0.1385, -0.7331,  0.4919,  0.1670, -1.6089, -0.1584,\n",
+      "          0.6205, -0.5546],\n",
+      "        [ 0.1197,  0.8053, -1.4554,  0.0194,  1.3408, -0.5291,  0.5926, -0.0122,\n",
+      "         -0.3422,  1.1973],\n",
+      "        [ 1.8626, -1.2796,  0.2934, -0.4424,  0.3709, -0.7601,  1.7269,  0.4201,\n",
+      "          2.2315,  0.7984],\n",
+      "        [ 1.6506,  1.0549,  0.8871, -1.5745,  2.4543,  0.9559, -0.2421, -0.0486,\n",
+      "         -0.3529,  1.6273]])\n",
+      "AlexNet(\n",
+      "  (features): Sequential(\n",
+      "    (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))\n",
+      "    (1): ReLU(inplace=True)\n",
+      "    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "    (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n",
+      "    (4): ReLU(inplace=True)\n",
+      "    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "    (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "    (7): ReLU(inplace=True)\n",
+      "    (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "    (9): ReLU(inplace=True)\n",
+      "    (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "    (11): ReLU(inplace=True)\n",
+      "    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "  )\n",
+      "  (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))\n",
+      "  (classifier): Sequential(\n",
+      "    (0): Dropout(p=0.5, inplace=False)\n",
+      "    (1): Linear(in_features=9216, out_features=4096, bias=True)\n",
+      "    (2): ReLU(inplace=True)\n",
+      "    (3): Dropout(p=0.5, inplace=False)\n",
+      "    (4): Linear(in_features=4096, out_features=4096, bias=True)\n",
+      "    (5): ReLU(inplace=True)\n",
+      "    (6): Linear(in_features=4096, out_features=1000, bias=True)\n",
+      "  )\n",
+      ")\n"
+     ]
+    }
+   ],
+   "source": [
+    "import torch\n",
+    "\n",
+    "N, D = 14, 10\n",
+    "x = torch.randn(N, D).type(torch.FloatTensor)\n",
+    "print(x)\n",
+    "\n",
+    "from torchvision import models\n",
+    "\n",
+    "alexnet = models.alexnet()\n",
+    "print(alexnet)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "23f266da",
+   "metadata": {},
+   "source": [
+    "## Exercise 1: CNN on CIFAR10\n",
+    "\n",
+    "The goal is to apply a Convolutional Neural Net (CNN) model on the CIFAR10 image dataset and test the accuracy of the model on the basis of image classification. Compare the Accuracy VS the neural network implemented during TD1.\n",
+    "\n",
+    "Have a look at the following documentation to be familiar with PyTorch.\n",
+    "\n",
+    "https://pytorch.org/tutorials/beginner/pytorch_with_examples.html\n",
+    "\n",
+    "https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4ba1c82d",
+   "metadata": {},
+   "source": [
+    "You can test if GPU is available on your machine and thus train on it to speed up the process"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "6e18f2fd",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "CUDA is not available.  Training on CPU ...\n"
+     ]
+    }
+   ],
+   "source": [
+    "import torch\n",
+    "\n",
+    "# check if CUDA is available\n",
+    "train_on_gpu = torch.cuda.is_available()\n",
+    "\n",
+    "if not train_on_gpu:\n",
+    "    print(\"CUDA is not available.  Training on CPU ...\")\n",
+    "else:\n",
+    "    print(\"CUDA is available!  Training on GPU ...\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5cf214eb",
+   "metadata": {},
+   "source": [
+    "Next we load the CIFAR10 dataset"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "id": "462666a2",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Files already downloaded and verified\n",
+      "Files already downloaded and verified\n"
+     ]
+    }
+   ],
+   "source": [
+    "import numpy as np\n",
+    "from torchvision import datasets, transforms\n",
+    "from torch.utils.data.sampler import SubsetRandomSampler\n",
+    "\n",
+    "# number of subprocesses to use for data loading\n",
+    "num_workers = 0\n",
+    "# how many samples per batch to load\n",
+    "batch_size = 20\n",
+    "# percentage of training set to use as validation\n",
+    "valid_size = 0.2\n",
+    "\n",
+    "# convert data to a normalized torch.FloatTensor\n",
+    "transform = transforms.Compose(\n",
+    "    [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n",
+    ")\n",
+    "\n",
+    "# choose the training and test datasets\n",
+    "train_data = datasets.CIFAR10(\"data\", train=True, download=True, transform=transform)\n",
+    "test_data = datasets.CIFAR10(\"data\", train=False, download=True, transform=transform)\n",
+    "\n",
+    "# obtain training indices that will be used for validation\n",
+    "num_train = len(train_data)\n",
+    "indices = list(range(num_train))\n",
+    "np.random.shuffle(indices)\n",
+    "split = int(np.floor(valid_size * num_train))\n",
+    "train_idx, valid_idx = indices[split:], indices[:split]\n",
+    "\n",
+    "# define samplers for obtaining training and validation batches\n",
+    "train_sampler = SubsetRandomSampler(train_idx)\n",
+    "valid_sampler = SubsetRandomSampler(valid_idx)\n",
+    "\n",
+    "# prepare data loaders (combine dataset and sampler)\n",
+    "train_loader = torch.utils.data.DataLoader(\n",
+    "    train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers\n",
+    ")\n",
+    "valid_loader = torch.utils.data.DataLoader(\n",
+    "    train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers\n",
+    ")\n",
+    "test_loader = torch.utils.data.DataLoader(\n",
+    "    test_data, batch_size=batch_size, num_workers=num_workers\n",
+    ")\n",
+    "\n",
+    "# specify the image classes\n",
+    "classes = [\n",
+    "    \"airplane\",\n",
+    "    \"automobile\",\n",
+    "    \"bird\",\n",
+    "    \"cat\",\n",
+    "    \"deer\",\n",
+    "    \"dog\",\n",
+    "    \"frog\",\n",
+    "    \"horse\",\n",
+    "    \"ship\",\n",
+    "    \"truck\",\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "58ec3903",
+   "metadata": {},
+   "source": [
+    "CNN definition (this one is an example)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "317bf070",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Net(\n",
+      "  (dropout): Dropout2d(p=0.1, inplace=False)\n",
+      "  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "  (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "  (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "  (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "  (fc1): Linear(in_features=1024, out_features=512, bias=True)\n",
+      "  (fc2): Linear(in_features=512, out_features=64, bias=True)\n",
+      "  (fc3): Linear(in_features=64, out_features=10, bias=True)\n",
+      ")\n"
+     ]
+    }
+   ],
+   "source": [
+    "import torch.nn as nn\n",
+    "import torch.nn.functional as F\n",
+    "\n",
+    "# define the CNN architecture\n",
+    "\n",
+    "\n",
+    "class Net(nn.Module):\n",
+    "    def __init__(self):\n",
+    "        super(Net, self).__init__()\n",
+    "        \n",
+    "        self.dropout = nn.Dropout2d(p=0.1)\n",
+    "        \n",
+    "        self.pool = nn.MaxPool2d(2)\n",
+    "        \n",
+    "        self.conv1 = nn.Conv2d(3, 16, 3, padding=1)\n",
+    "        self.conv2 = nn.Conv2d(16, 32, 3, padding=1)\n",
+    "        self.conv3 = nn.Conv2d(32, 64, 3, padding=1)\n",
+    "        \n",
+    "        self.fc1 = nn.Linear(64 * 4 * 4, 512)\n",
+    "        self.fc2 = nn.Linear(512, 64)\n",
+    "        self.fc3 = nn.Linear(64, 10)\n",
+    "\n",
+    "    def forward(self, x):\n",
+    "        \n",
+    "        x = self.pool(F.relu(self.conv1(x)))\n",
+    "        x = self.pool(F.relu(self.conv2(x)))\n",
+    "        x = self.pool(F.relu(self.conv3(x)))\n",
+    "        \n",
+    "        x = x.view(-1, 64 * 4 * 4)\n",
+    "        x = self.dropout(F.relu(self.fc1(x)))\n",
+    "        x = self.dropout(F.relu(self.fc2(x)))\n",
+    "        x = self.fc3(x)\n",
+    "        \n",
+    "        return x\n",
+    "\n",
+    "\n",
+    "# create a complete CNN\n",
+    "model = Net()\n",
+    "print(model)\n",
+    "# move tensors to GPU if CUDA is available\n",
+    "if train_on_gpu:\n",
+    "    model.cuda()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a2dc4974",
+   "metadata": {},
+   "source": [
+    "Loss function and training using SGD (Stochastic Gradient Descent) optimizer"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "id": "4b53f229",
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\torch\\nn\\functional.py:1538: UserWarning: dropout2d: Received a 2-D input to dropout2d, which is deprecated and will result in an error in a future release. To retain the behavior and silence this warning, please use dropout instead. Note that dropout2d exists to provide channel-wise dropout on inputs with 2 spatial dimensions, a channel dimension, and an optional batch dimension (i.e. 3D or 4D inputs).\n",
+      "  warnings.warn(warn_msg)\n"
+     ]
+    },
+    {
+     "ename": "KeyboardInterrupt",
+     "evalue": "",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
+      "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_39460/1321297987.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     16\u001b[0m     \u001b[1;31m# Train the model\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     17\u001b[0m     \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 18\u001b[1;33m     \u001b[1;32mfor\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mtrain_loader\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     19\u001b[0m         \u001b[1;31m# Move tensors to GPU if CUDA is available\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     20\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mtrain_on_gpu\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\u001b[0m in \u001b[0;36m__next__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    699\u001b[0m                 \u001b[1;31m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    700\u001b[0m                 \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_reset\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m  \u001b[1;31m# type: ignore[call-arg]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 701\u001b[1;33m             \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_next_data\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    702\u001b[0m             \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_num_yielded\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    703\u001b[0m             if (\n",
+      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\u001b[0m in \u001b[0;36m_next_data\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    755\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_next_data\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    756\u001b[0m         \u001b[0mindex\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_next_index\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m  \u001b[1;31m# may raise StopIteration\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 757\u001b[1;33m         \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_dataset_fetcher\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfetch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mindex\u001b[0m\u001b[1;33m)\u001b[0m  \u001b[1;31m# may raise StopIteration\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    758\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_pin_memory\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    759\u001b[0m             \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_utils\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpin_memory\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpin_memory\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_pin_memory_device\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py\u001b[0m in \u001b[0;36mfetch\u001b[1;34m(self, possibly_batched_index)\u001b[0m\n\u001b[0;32m     50\u001b[0m                 \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__getitems__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpossibly_batched_index\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     51\u001b[0m             \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 52\u001b[1;33m                 \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0midx\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0midx\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mpossibly_batched_index\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     53\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     54\u001b[0m             \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mpossibly_batched_index\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m     50\u001b[0m                 \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__getitems__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpossibly_batched_index\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     51\u001b[0m             \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 52\u001b[1;33m                 \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0midx\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0midx\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mpossibly_batched_index\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     53\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     54\u001b[0m             \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mpossibly_batched_index\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\torchvision\\datasets\\cifar.py\u001b[0m in \u001b[0;36m__getitem__\u001b[1;34m(self, index)\u001b[0m\n\u001b[0;32m    117\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    118\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtransform\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 119\u001b[1;33m             \u001b[0mimg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtransform\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    120\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    121\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtarget_transform\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\torchvision\\transforms\\transforms.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, img)\u001b[0m\n\u001b[0;32m     93\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mimg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     94\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mt\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtransforms\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 95\u001b[1;33m             \u001b[0mimg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mt\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     96\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mimg\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     97\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\torchvision\\transforms\\transforms.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, pic)\u001b[0m\n\u001b[0;32m    135\u001b[0m             \u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mConverted\u001b[0m \u001b[0mimage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    136\u001b[0m         \"\"\"\n\u001b[1;32m--> 137\u001b[1;33m         \u001b[1;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto_tensor\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpic\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    138\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    139\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m__repr__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\torchvision\\transforms\\functional.py\u001b[0m in \u001b[0;36mto_tensor\u001b[1;34m(pic)\u001b[0m\n\u001b[0;32m    172\u001b[0m     \u001b[0mimg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mimg\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mview\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpic\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpic\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mF_pil\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_image_num_channels\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpic\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    173\u001b[0m     \u001b[1;31m# put it from HWC to CHW format\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 174\u001b[1;33m     \u001b[0mimg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mimg\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpermute\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcontiguous\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    175\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mByteTensor\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    176\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mimg\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdefault_float_dtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdiv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m255\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
+     ]
+    }
+   ],
+   "source": [
+    "import torch.optim as optim\n",
+    "\n",
+    "criterion = nn.CrossEntropyLoss()  # specify loss function\n",
+    "optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n",
+    "\n",
+    "n_epochs = 30  # number of epochs to train the model\n",
+    "train_loss_list = []  # list to store loss to visualize\n",
+    "valid_loss_min = np.Inf  # track change in validation loss\n",
+    "\n",
+    "i = 0\n",
+    "for epoch in range(n_epochs):\n",
+    "    # Keep track of training and validation loss\n",
+    "    train_loss = 0.0\n",
+    "    valid_loss = 0.0\n",
+    "\n",
+    "    # Train the model\n",
+    "    model.train()\n",
+    "    for data, target in train_loader:\n",
+    "        # Move tensors to GPU if CUDA is available\n",
+    "        if train_on_gpu:\n",
+    "            data, target = data.cuda(), target.cuda()\n",
+    "        # Clear the gradients of all optimized variables\n",
+    "        optimizer.zero_grad()\n",
+    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
+    "        output = model(data)\n",
+    "        # Calculate the batch loss\n",
+    "        loss = criterion(output, target)\n",
+    "        # Backward pass: compute gradient of the loss with respect to model parameters\n",
+    "        loss.backward()\n",
+    "        # Perform a single optimization step (parameter update)\n",
+    "        optimizer.step()\n",
+    "        # Update training loss\n",
+    "        train_loss += loss.item() * data.size(0)\n",
+    "\n",
+    "    # Validate the model\n",
+    "    model.eval()\n",
+    "    for data, target in valid_loader:\n",
+    "        # Move tensors to GPU if CUDA is available\n",
+    "        if train_on_gpu:\n",
+    "            data, target = data.cuda(), target.cuda()\n",
+    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
+    "        output = model(data)\n",
+    "        # Calculate the batch loss\n",
+    "        loss = criterion(output, target)\n",
+    "        # Update average validation loss\n",
+    "        valid_loss += loss.item() * data.size(0)\n",
+    "\n",
+    "    # Calculate average losses\n",
+    "    train_loss = train_loss / len(train_loader)\n",
+    "    valid_loss = valid_loss / len(valid_loader)\n",
+    "    train_loss_list.append(train_loss)\n",
+    "\n",
+    "    # Print training/validation statistics\n",
+    "    print(\n",
+    "        \"Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}\".format(\n",
+    "            epoch, train_loss, valid_loss\n",
+    "        )\n",
+    "    )\n",
+    "\n",
+    "    # Save model if validation loss has decreased\n",
+    "    if valid_loss <= valid_loss_min:\n",
+    "        print(\n",
+    "            \"Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...\".format(\n",
+    "                valid_loss_min, valid_loss\n",
+    "            )\n",
+    "        )\n",
+    "        torch.save(model.state_dict(), \"model_cifar.pt\")\n",
+    "        valid_loss_min = valid_loss\n",
+    "    else:\n",
+    "        i += 1\n",
+    "    \n",
+    "    if i == 5:\n",
+    "        break"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "13e1df74",
+   "metadata": {},
+   "source": [
+    "Does overfit occur? If so, do an early stopping."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "11df8fd4",
+   "metadata": {},
+   "source": [
+    "Now loading the model with the lowest validation loss value\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "id": "e93efdfc",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "C:\\Users\\thoma\\AppData\\Local\\Temp/ipykernel_39460/3291884398.py:1: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
+      "  model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Test Loss: 17.244733\n",
+      "\n",
+      "Test Accuracy of airplane: 78% (780/1000)\n",
+      "Test Accuracy of automobile: 87% (879/1000)\n",
+      "Test Accuracy of  bird: 57% (576/1000)\n",
+      "Test Accuracy of   cat: 48% (482/1000)\n",
+      "Test Accuracy of  deer: 74% (742/1000)\n",
+      "Test Accuracy of   dog: 60% (602/1000)\n",
+      "Test Accuracy of  frog: 74% (740/1000)\n",
+      "Test Accuracy of horse: 79% (794/1000)\n",
+      "Test Accuracy of  ship: 80% (809/1000)\n",
+      "Test Accuracy of truck: 75% (752/1000)\n",
+      "\n",
+      "Test Accuracy (Overall): 71% (7156/10000)\n"
+     ]
+    }
+   ],
+   "source": [
+    "model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n",
+    "\n",
+    "# track test loss\n",
+    "test_loss = 0.0\n",
+    "class_correct = list(0.0 for i in range(10))\n",
+    "class_total = list(0.0 for i in range(10))\n",
+    "\n",
+    "model.eval()\n",
+    "# iterate over test data\n",
+    "for data, target in test_loader:\n",
+    "    # move tensors to GPU if CUDA is available\n",
+    "    if train_on_gpu:\n",
+    "        data, target = data.cuda(), target.cuda()\n",
+    "    # forward pass: compute predicted outputs by passing inputs to the model\n",
+    "    output = model(data)\n",
+    "    # calculate the batch loss\n",
+    "    loss = criterion(output, target)\n",
+    "    # update test loss\n",
+    "    test_loss += loss.item() * data.size(0)\n",
+    "    # convert output probabilities to predicted class\n",
+    "    _, pred = torch.max(output, 1)\n",
+    "    # compare predictions to true label\n",
+    "    correct_tensor = pred.eq(target.data.view_as(pred))\n",
+    "    correct = (\n",
+    "        np.squeeze(correct_tensor.numpy())\n",
+    "        if not train_on_gpu\n",
+    "        else np.squeeze(correct_tensor.cpu().numpy())\n",
+    "    )\n",
+    "    # calculate test accuracy for each object class\n",
+    "    for i in range(batch_size):\n",
+    "        label = target.data[i]\n",
+    "        class_correct[label] += correct[i].item()\n",
+    "        class_total[label] += 1\n",
+    "\n",
+    "# average test loss\n",
+    "test_loss = test_loss / len(test_loader)\n",
+    "print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n",
+    "\n",
+    "for i in range(10):\n",
+    "    if class_total[i] > 0:\n",
+    "        print(\n",
+    "            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
+    "            % (\n",
+    "                classes[i],\n",
+    "                100 * class_correct[i] / class_total[i],\n",
+    "                np.sum(class_correct[i]),\n",
+    "                np.sum(class_total[i]),\n",
+    "            )\n",
+    "        )\n",
+    "    else:\n",
+    "        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n",
+    "\n",
+    "print(\n",
+    "    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n",
+    "    % (\n",
+    "        100.0 * np.sum(class_correct) / np.sum(class_total),\n",
+    "        np.sum(class_correct),\n",
+    "        np.sum(class_total),\n",
+    "    )\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "944991a2",
+   "metadata": {},
+   "source": [
+    "Build a new network with the following structure.\n",
+    "\n",
+    "- It has 3 convolutional layers of kernel size 3 and padding of 1.\n",
+    "- The first convolutional layer must output 16 channels, the second 32 and the third 64.\n",
+    "- At each convolutional layer output, we apply a ReLU activation then a MaxPool with kernel size of 2.\n",
+    "- Then, three fully connected layers, the first two being followed by a ReLU activation and a dropout whose value you will suggest.\n",
+    "- The first fully connected layer will have an output size of 512.\n",
+    "- The second fully connected layer will have an output size of 64.\n",
+    "\n",
+    "Compare the results obtained with this new network to those obtained previously : \n",
+    "The first model has a test accuracy of 63%. The new one has a test accuracy of 71%."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "bc381cf4",
+   "metadata": {},
+   "source": [
+    "## Exercise 2: Quantization: try to compress the CNN to save space\n",
+    "\n",
+    "Quantization doc is available from https://pytorch.org/docs/stable/quantization.html#torch.quantization.quantize_dynamic\n",
+    "        \n",
+    "The Exercise is to quantize post training the above CNN model. Compare the size reduction and the impact on the classification accuracy \n",
+    "\n",
+    "\n",
+    "The size of the model is simply the size of the file."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "id": "ef623c26",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "model:  fp32  \t Size (KB): 2330.946\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "2330946"
+      ]
+     },
+     "execution_count": 10,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import os\n",
+    "\n",
+    "\n",
+    "def print_size_of_model(model, label=\"\"):\n",
+    "    torch.save(model.state_dict(), \"temp.p\")\n",
+    "    size = os.path.getsize(\"temp.p\")\n",
+    "    print(\"model: \", label, \" \\t\", \"Size (KB):\", size / 1e3)\n",
+    "    os.remove(\"temp.p\")\n",
+    "    return size\n",
+    "\n",
+    "\n",
+    "print_size_of_model(model, \"fp32\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "05c4e9ad",
+   "metadata": {},
+   "source": [
+    "Post training quantization example"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "id": "c4c65d4b",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "model:  int8  \t Size (KB): 659.806\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "659806"
+      ]
+     },
+     "execution_count": 11,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import torch.quantization\n",
+    "\n",
+    "\n",
+    "quantized_model = torch.quantization.quantize_dynamic(model, dtype=torch.qint8)\n",
+    "print_size_of_model(quantized_model, \"int8\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "7b108e17",
+   "metadata": {},
+   "source": [
+    "For each class, compare the classification test accuracy of the initial model and the quantized model. Also give the overall test accuracy for both models."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a0a34b90",
+   "metadata": {},
+   "source": [
+    "Try training aware quantization to mitigate the impact on the accuracy (doc available here https://pytorch.org/docs/stable/quantization.html#torch.quantization.quantize_dynamic)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "id": "6467a286",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "C:\\Users\\thoma\\AppData\\Local\\Temp/ipykernel_39460/681464573.py:1: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
+      "  model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Test Loss: 17.244733\n",
+      "\n",
+      "Test Accuracy of airplane: 78% (780/1000)\n",
+      "Test Accuracy of automobile: 87% (879/1000)\n",
+      "Test Accuracy of  bird: 57% (576/1000)\n",
+      "Test Accuracy of   cat: 48% (482/1000)\n",
+      "Test Accuracy of  deer: 74% (742/1000)\n",
+      "Test Accuracy of   dog: 60% (602/1000)\n",
+      "Test Accuracy of  frog: 74% (740/1000)\n",
+      "Test Accuracy of horse: 79% (794/1000)\n",
+      "Test Accuracy of  ship: 80% (809/1000)\n",
+      "Test Accuracy of truck: 75% (752/1000)\n",
+      "\n",
+      "Test Accuracy (Overall): 71% (7156/10000)\n",
+      "\n",
+      "Quantized test Loss: 17.257180\n",
+      "\n",
+      "Quantized test Accuracy of airplane: 77% (779/1000)\n",
+      "Quantized test Accuracy of automobile: 88% (881/1000)\n",
+      "Quantized test Accuracy of  bird: 58% (582/1000)\n",
+      "Quantized test Accuracy of   cat: 47% (479/1000)\n",
+      "Quantized test Accuracy of  deer: 74% (743/1000)\n",
+      "Quantized test Accuracy of   dog: 59% (599/1000)\n",
+      "Quantized test Accuracy of  frog: 73% (739/1000)\n",
+      "Quantized test Accuracy of horse: 79% (790/1000)\n",
+      "Quantized test Accuracy of  ship: 81% (811/1000)\n",
+      "Quantized test Accuracy of truck: 74% (749/1000)\n",
+      "\n",
+      "Quantized test Accuracy (Overall): 71% (7152/10000)\n"
+     ]
+    }
+   ],
+   "source": [
+    "model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n",
+    "\n",
+    "# track test loss\n",
+    "test_loss = 0.0\n",
+    "class_correct = list(0.0 for i in range(10))\n",
+    "class_total = list(0.0 for i in range(10))\n",
+    "\n",
+    "model.eval()\n",
+    "# iterate over test data\n",
+    "for data, target in test_loader:\n",
+    "    # move tensors to GPU if CUDA is available\n",
+    "    if train_on_gpu:\n",
+    "        data, target = data.cuda(), target.cuda()\n",
+    "    # forward pass: compute predicted outputs by passing inputs to the model\n",
+    "    output = model(data)\n",
+    "    # calculate the batch loss\n",
+    "    loss = criterion(output, target)\n",
+    "    # update test loss\n",
+    "    test_loss += loss.item() * data.size(0)\n",
+    "    # convert output probabilities to predicted class\n",
+    "    _, pred = torch.max(output, 1)\n",
+    "    # compare predictions to true label\n",
+    "    correct_tensor = pred.eq(target.data.view_as(pred))\n",
+    "    correct = (\n",
+    "        np.squeeze(correct_tensor.numpy())\n",
+    "        if not train_on_gpu\n",
+    "        else np.squeeze(correct_tensor.cpu().numpy())\n",
+    "    )\n",
+    "    # calculate test accuracy for each object class\n",
+    "    for i in range(batch_size):\n",
+    "        label = target.data[i]\n",
+    "        class_correct[label] += correct[i].item()\n",
+    "        class_total[label] += 1\n",
+    "\n",
+    "# average test loss\n",
+    "test_loss = test_loss / len(test_loader)\n",
+    "print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n",
+    "\n",
+    "for i in range(10):\n",
+    "    if class_total[i] > 0:\n",
+    "        print(\n",
+    "            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
+    "            % (\n",
+    "                classes[i],\n",
+    "                100 * class_correct[i] / class_total[i],\n",
+    "                np.sum(class_correct[i]),\n",
+    "                np.sum(class_total[i]),\n",
+    "            )\n",
+    "        )\n",
+    "    else:\n",
+    "        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n",
+    "\n",
+    "print(\n",
+    "    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\\n\"\n",
+    "    % (\n",
+    "        100.0 * np.sum(class_correct) / np.sum(class_total),\n",
+    "        np.sum(class_correct),\n",
+    "        np.sum(class_total),\n",
+    "    )\n",
+    ")\n",
+    "\n",
+    "test_loss = 0.0\n",
+    "class_correct = list(0.0 for i in range(10))\n",
+    "class_total = list(0.0 for i in range(10))\n",
+    "quantized_model.eval()\n",
+    "# iterate over test data\n",
+    "for data, target in test_loader:\n",
+    "    # move tensors to GPU if CUDA is available\n",
+    "    if train_on_gpu:\n",
+    "        data, target = data.cuda(), target.cuda()\n",
+    "    # forward pass: compute predicted outputs by passing inputs to the model\n",
+    "    output = quantized_model(data)\n",
+    "    # calculate the batch loss\n",
+    "    loss = criterion(output, target)\n",
+    "    # update test loss\n",
+    "    test_loss += loss.item() * data.size(0)\n",
+    "    # convert output probabilities to predicted class\n",
+    "    _, pred = torch.max(output, 1)\n",
+    "    # compare predictions to true label\n",
+    "    correct_tensor = pred.eq(target.data.view_as(pred))\n",
+    "    correct = (\n",
+    "        np.squeeze(correct_tensor.numpy())\n",
+    "        if not train_on_gpu\n",
+    "        else np.squeeze(correct_tensor.cpu().numpy())\n",
+    "    )\n",
+    "    # calculate test accuracy for each object class\n",
+    "    for i in range(batch_size):\n",
+    "        label = target.data[i]\n",
+    "        class_correct[label] += correct[i].item()\n",
+    "        class_total[label] += 1\n",
+    "\n",
+    "# average test loss\n",
+    "test_loss = test_loss / len(test_loader)\n",
+    "print(\"Quantized test Loss: {:.6f}\\n\".format(test_loss))\n",
+    "\n",
+    "for i in range(10):\n",
+    "    if class_total[i] > 0:\n",
+    "        print(\n",
+    "            \"Quantized test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
+    "            % (\n",
+    "                classes[i],\n",
+    "                100 * class_correct[i] / class_total[i],\n",
+    "                np.sum(class_correct[i]),\n",
+    "                np.sum(class_total[i]),\n",
+    "            )\n",
+    "        )\n",
+    "    else:\n",
+    "        print(\"Quantized test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n",
+    "\n",
+    "print(\n",
+    "    \"\\nQuantized test Accuracy (Overall): %2d%% (%2d/%2d)\"\n",
+    "    % (\n",
+    "        100.0 * np.sum(class_correct) / np.sum(class_total),\n",
+    "        np.sum(class_correct),\n",
+    "        np.sum(class_total),\n",
+    "    )\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "84fe7b31",
+   "metadata": {},
+   "source": [
+    "The two tests are almost equally performant, so the quantization doesn't have any impact on the porformance although it weights way less."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "201470f9",
+   "metadata": {},
+   "source": [
+    "## Exercise 3: working with pre-trained models.\n",
+    "\n",
+    "PyTorch offers several pre-trained models https://pytorch.org/vision/0.8/models.html        \n",
+    "We will use ResNet50 trained on ImageNet dataset (https://www.image-net.org/index.php). Use the following code with the files `imagenet-simple-labels.json` that contains the imagenet labels and the image dog.png that we will use as test.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "id": "b4d13080",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Resnet\n",
+      "model:  fp32  \t Size (KB): 102523.238\n",
+      "model:  int8  \t Size (KB): 96379.996\n",
+      "dog.png\n",
+      "For the test, predicted class is: Golden Retriever\n",
+      "For the quantized test, predicted class is: Golden Retriever\n",
+      "airplane.jpg\n",
+      "For the test, predicted class is: airliner\n",
+      "For the quantized test, predicted class is: airliner\n",
+      "automobile.jpeg\n",
+      "For the test, predicted class is: sports car\n",
+      "For the quantized test, predicted class is: sports car\n",
+      "ship.jpg\n",
+      "For the test, predicted class is: motorboat\n",
+      "For the quantized test, predicted class is: motorboat\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\torchvision\\models\\_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=AlexNet_Weights.IMAGENET1K_V1`. You can also use `weights=AlexNet_Weights.DEFAULT` to get the most up-to-date weights.\n",
+      "  warnings.warn(msg)\n",
+      "Downloading: \"https://download.pytorch.org/models/alexnet-owt-7be5be79.pth\" to C:\\Users\\thoma/.cache\\torch\\hub\\checkpoints\\alexnet-owt-7be5be79.pth\n",
+      "100%|██████████| 233M/233M [00:21<00:00, 11.4MB/s] \n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Alexnet\n",
+      "model:  fp32  \t Size (KB): 244408.234\n",
+      "model:  int8  \t Size (KB): 68544.39\n",
+      "dog.png\n",
+      "For the test, predicted class is: Golden Retriever\n",
+      "For the quantized test, predicted class is: Golden Retriever\n",
+      "airplane.jpg\n",
+      "For the test, predicted class is: airliner\n",
+      "For the quantized test, predicted class is: airliner\n",
+      "automobile.jpeg\n",
+      "For the test, predicted class is: station wagon\n",
+      "For the quantized test, predicted class is: sports car\n",
+      "ship.jpg\n",
+      "For the test, predicted class is: motorboat\n",
+      "For the quantized test, predicted class is: motorboat\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "C:\\Users\\thoma\\anaconda3\\lib\\site-packages\\torchvision\\models\\_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=VGG16_Weights.IMAGENET1K_V1`. You can also use `weights=VGG16_Weights.DEFAULT` to get the most up-to-date weights.\n",
+      "  warnings.warn(msg)\n",
+      "Downloading: \"https://download.pytorch.org/models/vgg16-397923af.pth\" to C:\\Users\\thoma/.cache\\torch\\hub\\checkpoints\\vgg16-397923af.pth\n",
+      "100%|██████████| 528M/528M [00:49<00:00, 11.2MB/s] \n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Vgg16\n",
+      "model:  fp32  \t Size (KB): 553439.178\n",
+      "model:  int8  \t Size (KB): 182540.454\n",
+      "dog.png\n",
+      "For the test, predicted class is: Golden Retriever\n",
+      "For the quantized test, predicted class is: Golden Retriever\n",
+      "airplane.jpg\n",
+      "For the test, predicted class is: airliner\n",
+      "For the quantized test, predicted class is: airliner\n",
+      "automobile.jpeg\n",
+      "For the test, predicted class is: sports car\n",
+      "For the quantized test, predicted class is: sports car\n",
+      "ship.jpg\n",
+      "For the test, predicted class is: motorboat\n",
+      "For the quantized test, predicted class is: motorboat\n"
+     ]
+    }
+   ],
+   "source": [
+    "import json\n",
+    "from PIL import Image\n",
+    "\n",
+    "def initialize_model():\n",
+    "    print_size_of_model(model, \"fp32\")\n",
+    "    # Send the model to the GPU\n",
+    "    # model.cuda()\n",
+    "    # Set layers such as dropout and batchnorm in evaluation mode\n",
+    "    quantized_model = torch.quantization.quantize_dynamic(model, dtype=torch.qint8)\n",
+    "    print_size_of_model(quantized_model, \"int8\")\n",
+    "    model.eval()\n",
+    "    quantized_model.eval()\n",
+    "    # Configure matplotlib for pretty inline plots\n",
+    "    #%matplotlib inline\n",
+    "    #%config InlineBackend.figure_format = 'retina'\n",
+    "\n",
+    "    # Prepare the labels\n",
+    "    with open(\"imagenet-simple-labels.json\") as f:\n",
+    "        labels = json.load(f)\n",
+    "\n",
+    "    # First prepare the transformations: resize the image to what the model was trained on and convert it to a tensor\n",
+    "    data_transform = transforms.Compose(\n",
+    "        [\n",
+    "            transforms.Resize((224, 224)),\n",
+    "            transforms.ToTensor(),\n",
+    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
+    "        ]\n",
+    "    )\n",
+    "    \n",
+    "def classify(test_image):\n",
+    "    # Load the image\n",
+    "\n",
+    "    image = Image.open(test_image)\n",
+    "\n",
+    "    # Now apply the transformation, expand the batch dimension, and send the image to the GPU\n",
+    "    # image = data_transform(image).unsqueeze(0).cuda()\n",
+    "    image = data_transform(image).unsqueeze(0)\n",
+    "\n",
+    "    # Get the 1000-dimensional model output\n",
+    "    out = model(image)\n",
+    "    quantized_out = quantized_model(image)\n",
+    "    # Find the predicted class\n",
+    "    print(test_image)\n",
+    "    print(\"For the test, predicted class is: {}\".format(labels[out.argmax()]))\n",
+    "    print(\"For the quantized test, predicted class is: {}\".format(labels[quantized_out.argmax()]))\n",
+    "    \n",
+    "    \n",
+    "model = models.resnet50(pretrained=True)\n",
+    "print('Resnet')\n",
+    "initialize_model()\n",
+    "classify(\"dog.png\")\n",
+    "classify(\"airplane.jpg\")\n",
+    "classify(\"automobile.jpeg\")\n",
+    "classify(\"ship.jpg\")\n",
+    "\n",
+    "model = models.alexnet(pretrained=True)\n",
+    "print('Alexnet')\n",
+    "initialize_model()\n",
+    "classify(\"dog.png\")\n",
+    "classify(\"airplane.jpg\")\n",
+    "classify(\"automobile.jpeg\")\n",
+    "classify(\"ship.jpg\")\n",
+    "\n",
+    "model = models.vgg16(pretrained=True)\n",
+    "print('Vgg16')\n",
+    "initialize_model()\n",
+    "classify(\"dog.png\")\n",
+    "classify(\"airplane.jpg\")\n",
+    "classify(\"automobile.jpeg\")\n",
+    "classify(\"ship.jpg\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "184cfceb",
+   "metadata": {},
+   "source": [
+    "Experiments:\n",
+    "\n",
+    "Study the code and the results obtained. Possibly add other images downloaded from the internet.\n",
+    "\n",
+    "What is the size of the model? Quantize it and then check if the model is still able to correctly classify the other images.\n",
+    "\n",
+    "Experiment with other pre-trained CNN models.\n",
+    "\n",
+    "We can see similar performance with all models, wheither it's quantized or not, except for Alexnet which predict wrong of automobile.jpeg, but rigth with its quantized model.\n",
+    "\n",
+    "    \n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5d57da4b",
+   "metadata": {},
+   "source": [
+    "## Exercise 4: Transfer Learning\n",
+    "    \n",
+    "    \n",
+    "For this work, we will use a pre-trained model (ResNet18) as a descriptor extractor and will refine the classification by training only the last fully connected layer of the network. Thus, the output layer of the pre-trained network will be replaced by a layer adapted to the new classes to be recognized which will be in our case ants and bees.\n",
+    "Download and unzip in your working directory the dataset available at the address :\n",
+    "    \n",
+    "https://download.pytorch.org/tutorial/hymenoptera_data.zip\n",
+    "    \n",
+    "Execute the following code in order to display some images of the dataset."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "be2d31f5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import torch\n",
+    "import torchvision\n",
+    "from torchvision import datasets, transforms\n",
+    "\n",
+    "# Data augmentation and normalization for training\n",
+    "# Just normalization for validation\n",
+    "data_transforms = {\n",
+    "    \"train\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.RandomResizedCrop(\n",
+    "                224\n",
+    "            ),  # ImageNet models were trained on 224x224 images\n",
+    "            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n",
+    "            transforms.ToTensor(),  # convert it to a PyTorch tensor\n",
+    "            transforms.Normalize(\n",
+    "                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n",
+    "            ),  # ImageNet models expect this norm\n",
+    "        ]\n",
+    "    ),\n",
+    "    \"val\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.Resize(256),\n",
+    "            transforms.CenterCrop(224),\n",
+    "            transforms.ToTensor(),\n",
+    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
+    "        ]\n",
+    "    ),\n",
+    "}\n",
+    "\n",
+    "data_dir = \"hymenoptera_data\"\n",
+    "# Create train and validation datasets and loaders\n",
+    "image_datasets = {\n",
+    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataloaders = {\n",
+    "    x: torch.utils.data.DataLoader(\n",
+    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=0\n",
+    "    )\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n",
+    "class_names = image_datasets[\"train\"].classes\n",
+    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
+    "\n",
+    "# Helper function for displaying images\n",
+    "def imshow(inp, title=None):\n",
+    "    \"\"\"Imshow for Tensor.\"\"\"\n",
+    "    inp = inp.numpy().transpose((1, 2, 0))\n",
+    "    mean = np.array([0.485, 0.456, 0.406])\n",
+    "    std = np.array([0.229, 0.224, 0.225])\n",
+    "\n",
+    "    # Un-normalize the images\n",
+    "    inp = std * inp + mean\n",
+    "    # Clip just in case\n",
+    "    inp = np.clip(inp, 0, 1)\n",
+    "    plt.imshow(inp)\n",
+    "    if title is not None:\n",
+    "        plt.title(title)\n",
+    "    plt.pause(0.001)  # pause a bit so that plots are updated\n",
+    "    plt.show()\n",
+    "\n",
+    "\n",
+    "# Get a batch of training data\n",
+    "inputs, classes = next(iter(dataloaders[\"train\"]))\n",
+    "\n",
+    "# Make a grid from batch\n",
+    "out = torchvision.utils.make_grid(inputs)\n",
+    "\n",
+    "# imshow(out, title=[class_names[x] for x in classes])\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "bbd48800",
+   "metadata": {},
+   "source": [
+    "Now, execute the following code which uses a pre-trained model ResNet18 having replaced the output layer for the ants/bees classification and performs the model training by only changing the weights of this output layer."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "572d824c",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "C:\\Users\\thoma\\anaconda3\\Lib\\site-packages\\torchvision\\models\\_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
+      "  warnings.warn(\n",
+      "C:\\Users\\thoma\\anaconda3\\Lib\\site-packages\\torchvision\\models\\_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.\n",
+      "  warnings.warn(msg)\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch 1/10\n",
+      "----------\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "C:\\Users\\thoma\\anaconda3\\Lib\\site-packages\\torch\\optim\\lr_scheduler.py:224: UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`.  Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\n",
+      "  warnings.warn(\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "train Loss: 0.6799 Acc: 0.5779\n",
+      "val Loss: 0.3839 Acc: 0.8039\n",
+      "\n",
+      "Epoch 2/10\n",
+      "----------\n",
+      "train Loss: 0.5218 Acc: 0.7500\n",
+      "val Loss: 0.1294 Acc: 0.9542\n",
+      "\n",
+      "Epoch 3/10\n",
+      "----------\n",
+      "train Loss: 0.4172 Acc: 0.7828\n",
+      "val Loss: 0.0696 Acc: 0.9869\n",
+      "\n",
+      "Epoch 4/10\n",
+      "----------\n",
+      "train Loss: 0.3890 Acc: 0.8197\n",
+      "val Loss: 0.0614 Acc: 1.0000\n",
+      "\n",
+      "Epoch 5/10\n",
+      "----------\n",
+      "train Loss: 0.4475 Acc: 0.7910\n",
+      "val Loss: 0.0508 Acc: 1.0000\n",
+      "\n",
+      "Epoch 6/10\n",
+      "----------\n",
+      "train Loss: 0.5432 Acc: 0.7418\n",
+      "val Loss: 0.0341 Acc: 1.0000\n",
+      "\n",
+      "Epoch 7/10\n",
+      "----------\n",
+      "train Loss: 0.4899 Acc: 0.7541\n",
+      "val Loss: 0.0289 Acc: 1.0000\n",
+      "\n",
+      "Epoch 8/10\n",
+      "----------\n",
+      "train Loss: 0.3774 Acc: 0.8115\n",
+      "val Loss: 0.0292 Acc: 1.0000\n",
+      "\n",
+      "Epoch 9/10\n",
+      "----------\n",
+      "train Loss: 0.4988 Acc: 0.7787\n",
+      "val Loss: 0.0289 Acc: 1.0000\n",
+      "\n",
+      "Epoch 10/10\n",
+      "----------\n",
+      "train Loss: 0.4675 Acc: 0.7869\n",
+      "val Loss: 0.0291 Acc: 1.0000\n",
+      "\n",
+      "Training complete in 5m 8s\n",
+      "Best val Acc: 1.000000\n"
+     ]
+    }
+   ],
+   "source": [
+    "import copy\n",
+    "import os\n",
+    "import time\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import torch\n",
+    "import torch.nn as nn\n",
+    "import torch.optim as optim\n",
+    "import torchvision\n",
+    "from torch.optim import lr_scheduler\n",
+    "from torchvision import datasets, transforms\n",
+    "\n",
+    "# Data augmentation and normalization for training\n",
+    "# Just normalization for validation\n",
+    "data_transforms = {\n",
+    "    \"train\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.RandomResizedCrop(\n",
+    "                224\n",
+    "            ),  # ImageNet models were trained on 224x224 images\n",
+    "            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n",
+    "            transforms.ToTensor(),  # convert it to a PyTorch tensor\n",
+    "            transforms.Normalize(\n",
+    "                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n",
+    "            ),  # ImageNet models expect this norm\n",
+    "        ]\n",
+    "    ),\n",
+    "    \"val\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.Resize(256),\n",
+    "            transforms.CenterCrop(224),\n",
+    "            transforms.ToTensor(),\n",
+    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
+    "        ]\n",
+    "    ),\n",
+    "}\n",
+    "\n",
+    "# Helper function for displaying images\n",
+    "def imshow(inp, title=None):\n",
+    "    \"\"\"Imshow for Tensor.\"\"\"\n",
+    "    inp = inp.numpy().transpose((1, 2, 0))\n",
+    "    mean = np.array([0.485, 0.456, 0.406])\n",
+    "    std = np.array([0.229, 0.224, 0.225])\n",
+    "\n",
+    "    # Un-normalize the images\n",
+    "    inp = std * inp + mean\n",
+    "    # Clip just in case\n",
+    "    inp = np.clip(inp, 0, 1)\n",
+    "    plt.imshow(inp)\n",
+    "    if title is not None:\n",
+    "        plt.title(title)\n",
+    "    plt.pause(0.001)  # pause a bit so that plots are updated\n",
+    "    plt.show()\n",
+    "\n",
+    "\n",
+    "# Get a batch of training data\n",
+    "# inputs, classes = next(iter(dataloaders['train']))\n",
+    "\n",
+    "# Make a grid from batch\n",
+    "# out = torchvision.utils.make_grid(inputs)\n",
+    "\n",
+    "# imshow(out, title=[class_names[x] for x in classes])\n",
+    "# training\n",
+    "\n",
+    "\n",
+    "data_dir = \"hymenoptera_data\"\n",
+    "# Create train and validation datasets and loaders\n",
+    "image_datasets = {\n",
+    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataloaders = {\n",
+    "    x: torch.utils.data.DataLoader(\n",
+    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=4\n",
+    "    )\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n",
+    "class_names = image_datasets[\"train\"].classes\n",
+    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
+    "    \n",
+    "\n",
+    "def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n",
+    "    since = time.time()\n",
+    "\n",
+    "    best_model_wts = copy.deepcopy(model.state_dict())\n",
+    "    best_acc = 0.0\n",
+    "\n",
+    "    epoch_time = []  # we'll keep track of the time needed for each epoch\n",
+    "\n",
+    "    for epoch in range(num_epochs):\n",
+    "        epoch_start = time.time()\n",
+    "        print(\"Epoch {}/{}\".format(epoch + 1, num_epochs))\n",
+    "        print(\"-\" * 10)\n",
+    "\n",
+    "        # Each epoch has a training and validation phase\n",
+    "        for phase in [\"train\", \"val\"]:\n",
+    "            if phase == \"train\":\n",
+    "                scheduler.step()\n",
+    "                model.train()  # Set model to training mode\n",
+    "            else:\n",
+    "                model.eval() # Set model to evaluate mode\n",
+    "\n",
+    "            running_loss = 0.0\n",
+    "            running_corrects = 0\n",
+    "\n",
+    "            # Iterate over data.\n",
+    "            for inputs, labels in dataloaders[phase]:\n",
+    "                inputs = inputs.to(device)\n",
+    "                labels = labels.to(device)\n",
+    "\n",
+    "                # zero the parameter gradients\n",
+    "                optimizer.zero_grad()\n",
+    "\n",
+    "                # Forward\n",
+    "                # Track history if only in training phase\n",
+    "                with torch.set_grad_enabled(phase == \"val\"):\n",
+    "                    outputs = model(inputs)\n",
+    "                    _, preds = torch.max(outputs, 1)\n",
+    "                    loss = criterion(outputs, labels)\n",
+    "\n",
+    "                    # backward + optimize only if in training phase\n",
+    "                    if phase == \"val\":\n",
+    "                        loss.backward()\n",
+    "                        optimizer.step()\n",
+    "\n",
+    "                # Statistics\n",
+    "                running_loss += loss.item() * inputs.size(0)\n",
+    "                running_corrects += torch.sum(preds == labels.data)\n",
+    "\n",
+    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
+    "            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n",
+    "\n",
+    "            print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n",
+    "\n",
+    "            # Deep copy the model\n",
+    "            if phase == \"val\" and epoch_acc > best_acc:\n",
+    "                best_acc = epoch_acc\n",
+    "                best_model_wts = copy.deepcopy(model.state_dict())\n",
+    "\n",
+    "        # Add the epoch time\n",
+    "        t_epoch = time.time() - epoch_start\n",
+    "        epoch_time.append(t_epoch)\n",
+    "        print()\n",
+    "\n",
+    "    time_elapsed = time.time() - since\n",
+    "    print(\n",
+    "        \"Training complete in {:.0f}m {:.0f}s\".format(\n",
+    "            time_elapsed // 60, time_elapsed % 60\n",
+    "        )\n",
+    "    )\n",
+    "    print(\"Best val Acc: {:4f}\".format(best_acc))\n",
+    "\n",
+    "    # Load best model weights\n",
+    "    model.load_state_dict(best_model_wts)\n",
+    "    return model, epoch_time\n",
+    "\n",
+    "\n",
+    "# Download a pre-trained ResNet18 model and freeze its weights\n",
+    "model = torchvision.models.resnet18(pretrained=True)\n",
+    "for param in model.parameters():\n",
+    "    param.requires_grad = False\n",
+    "\n",
+    "# Replace the final fully connected layer\n",
+    "# Parameters of newly constructed modules have requires_grad=True by default\n",
+    "num_ftrs = model.fc.in_features\n",
+    "model.fc = nn.Linear(num_ftrs, 2)\n",
+    "# Send the model to the GPU\n",
+    "model = model.to(device)\n",
+    "# Set the loss function\n",
+    "criterion = nn.CrossEntropyLoss()\n",
+    "\n",
+    "# Observe that only the parameters of the final layer are being optimized\n",
+    "optimizer_conv = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)\n",
+    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
+    "model, epoch_time = train_model(\n",
+    "    model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",
+    ")\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "aa560a1b-ea90-4927-bf1d-c7a84f39ddd1",
+   "metadata": {},
+   "source": [
+    "Experiments:\n",
+    "Study the code and the results obtained.\n",
+    "\n",
+    "We can see that the results have an accuracy of 1 at the epoch 4, so it tends to be very performant quite fastly."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "4bd4216d-f3dc-4dd9-b0b4-e80207390fa9",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Testing loss: 0.1952 Acc: 0.9231\n"
+     ]
+    }
+   ],
+   "source": [
+    "import torch\n",
+    "import torch.nn as nn\n",
+    "from torchvision import transforms, datasets\n",
+    "import os\n",
+    "\n",
+    "def eval_model(model):\n",
+    "    # Define data transformations for evaluation\n",
+    "    data_transforms = transforms.Compose(\n",
+    "        [\n",
+    "            transforms.Resize(256),          # Resize the shorter side to 256\n",
+    "            transforms.CenterCrop(224),      # Crop the center to 224x224\n",
+    "            transforms.ToTensor(),           # Convert to PyTorch tensor\n",
+    "            transforms.Normalize(\n",
+    "                [0.485, 0.456, 0.406],       # Mean normalization\n",
+    "                [0.229, 0.224, 0.225]        # Standard deviation normalization\n",
+    "            ),\n",
+    "        ]\n",
+    "    )\n",
+    "    \n",
+    "    # Specify test dataset directory\n",
+    "    data_dir = \"hymenoptera_data\"\n",
+    "    image_datasets = datasets.ImageFolder(\n",
+    "        os.path.join(data_dir, \"test\"), transform=data_transforms\n",
+    "    )\n",
+    "    \n",
+    "    # Create dataloader for the test set\n",
+    "    dataloaders = torch.utils.data.DataLoader(\n",
+    "        image_datasets, batch_size=4, shuffle=False, num_workers=4\n",
+    "    )\n",
+    "    dataset_size = len(image_datasets)\n",
+    "    class_names = image_datasets.classes\n",
+    "\n",
+    "    # Put the model in evaluation mode\n",
+    "    model.eval()\n",
+    "    \n",
+    "    running_loss = 0.0\n",
+    "    running_corrects = 0\n",
+    "\n",
+    "    # Disable gradient computation for evaluation\n",
+    "    with torch.no_grad():\n",
+    "        for inputs, labels in dataloaders:\n",
+    "            inputs = inputs.to(device)\n",
+    "            labels = labels.to(device)\n",
+    "\n",
+    "            # Forward pass\n",
+    "            outputs = model(inputs)\n",
+    "            _, preds = torch.max(outputs, 1)\n",
+    "            loss = criterion(outputs, labels)\n",
+    "\n",
+    "            # Accumulate loss and correct predictions\n",
+    "            running_loss += loss.item() * inputs.size(0)\n",
+    "            running_corrects += torch.sum(preds == labels.data)\n",
+    "\n",
+    "    # Calculate average loss and accuracy\n",
+    "    loss = running_loss / dataset_size\n",
+    "    acc = running_corrects.double() / dataset_size\n",
+    "\n",
+    "    print(\"Testing loss: {:.4f} Acc: {:.4f}\".format(loss, acc))\n",
+    "\n",
+    "eval_model(model)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "44b8aeb2",
+   "metadata": {},
+   "source": [
+    "Modify the code and add an \"eval_model\" function to allow\n",
+    "the evaluation of the model on a test set (different from the learning and validation sets used during the learning phase). Study the results obtained.\n",
+    "\n",
+    "The accuracy is 0.9231 so the model is still performant. The test set is made by pictures downloaded from google."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "1d38b7ae-601f-402f-a3d5-eb7e51140fc9",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch 1/10\n",
+      "----------\n",
+      "train Loss: 0.7085 Acc: 0.5000\n",
+      "val Loss: 0.5717 Acc: 0.6928\n",
+      "\n",
+      "Epoch 2/10\n",
+      "----------\n",
+      "train Loss: 0.5101 Acc: 0.7869\n",
+      "val Loss: 0.2690 Acc: 0.9281\n",
+      "\n",
+      "Epoch 3/10\n",
+      "----------\n",
+      "train Loss: 0.4458 Acc: 0.7910\n",
+      "val Loss: 0.1533 Acc: 0.9608\n",
+      "\n",
+      "Epoch 4/10\n",
+      "----------\n",
+      "train Loss: 0.4387 Acc: 0.7746\n",
+      "val Loss: 0.1142 Acc: 0.9739\n",
+      "\n",
+      "Epoch 5/10\n",
+      "----------\n",
+      "train Loss: 0.4396 Acc: 0.7787\n",
+      "val Loss: 0.0691 Acc: 0.9935\n",
+      "\n",
+      "Epoch 6/10\n",
+      "----------\n",
+      "train Loss: 0.4906 Acc: 0.7582\n",
+      "val Loss: 0.0451 Acc: 1.0000\n",
+      "\n",
+      "Epoch 7/10\n",
+      "----------\n",
+      "train Loss: 0.4779 Acc: 0.7828\n",
+      "val Loss: 0.0443 Acc: 1.0000\n",
+      "\n",
+      "Epoch 8/10\n",
+      "----------\n",
+      "train Loss: 0.4591 Acc: 0.7828\n",
+      "val Loss: 0.0413 Acc: 1.0000\n",
+      "\n",
+      "Epoch 9/10\n",
+      "----------\n",
+      "train Loss: 0.4367 Acc: 0.8279\n",
+      "val Loss: 0.0361 Acc: 1.0000\n",
+      "\n",
+      "Epoch 10/10\n",
+      "----------\n",
+      "train Loss: 0.4916 Acc: 0.7992\n",
+      "val Loss: 0.0415 Acc: 1.0000\n",
+      "\n",
+      "Training complete in 4m 37s\n",
+      "Best val Acc: 1.000000\n",
+      "Testing loss: 0.2004 Acc: 0.9231\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Download a pre-trained ResNet18 model and freeze its weights\n",
+    "model = torchvision.models.resnet18(pretrained=True)\n",
+    "for param in model.parameters():\n",
+    "    param.requires_grad = False\n",
+    "\n",
+    "# Replace the final fully connected layer\n",
+    "# Parameters of newly constructed modules have requires_grad=True by default\n",
+    "num_ftrs = model.fc.in_features\n",
+    "model.fc = nn.Sequential(\n",
+    "    nn.Linear(num_ftrs, 256),\n",
+    "    nn.ReLU(),\n",
+    "    nn.Dropout(0.1),\n",
+    "    nn.Linear(256, 2)\n",
+    ")\n",
+    "# Send the model to the GPU\n",
+    "model = model.to(device)\n",
+    "# Set the loss function\n",
+    "criterion = nn.CrossEntropyLoss()\n",
+    "\n",
+    "# Observe that only the parameters of the final layer are being optimized\n",
+    "optimizer_conv = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)\n",
+    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
+    "model, epoch_time = train_model(\n",
+    "    model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",
+    ")\n",
+    "eval_model(model)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "dd097239-180f-460d-b0ff-3b12fd899bc0",
+   "metadata": {},
+   "source": [
+    "Now modify the code to replace the current classification layer with a set of two layers using a \"relu\" activation function for the middle layer, and the \"dropout\" mechanism for both layers. Renew the experiments and study the results obtained.\n",
+    "\n",
+    "The validation is equivalent, but the accuraccy on the test data set is not 1."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "id": "4f8db07b-e708-473f-8988-f8bfec74c36b",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "model:  fp32  \t Size (KB): 45304.25\n",
+      "model:  int8  \t Size (KB): 44911.014\n",
+      "Testing loss: 0.2012 Acc: 0.9231\n"
+     ]
+    }
+   ],
+   "source": [
+    "def print_size_of_model(model, label=\"\"):\n",
+    "    torch.save(model.state_dict(), \"temp.p\")\n",
+    "    size = os.path.getsize(\"temp.p\")\n",
+    "    print(\"model: \", label, \" \\t\", \"Size (KB):\", size / 1e3)\n",
+    "    os.remove(\"temp.p\")\n",
+    "    return size\n",
+    "\n",
+    "print_size_of_model(model, \"fp32\")\n",
+    "quantized_model = torch.quantization.quantize_dynamic(model, dtype=torch.qint8)\n",
+    "print_size_of_model(quantized_model, \"int8\")\n",
+    "eval_model(quantized_model)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5fe1bfad-17d2-4ed2-b3fc-12c095d29753",
+   "metadata": {},
+   "source": [
+    "Apply ther quantization (post and quantization aware) and evaluate impact on model size and accuracy.\n",
+    "\n",
+    "The model is a bit less heavy, but not significaly. The accuracy on the testing set is the same."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "04a263f0",
+   "metadata": {},
+   "source": [
+    "## Optional\n",
+    "    \n",
+    "Try this at home!! \n",
+    "\n",
+    "\n",
+    "Pytorch offers a framework to export a given CNN to your selfphone (either android or iOS). Have a look at the tutorial https://pytorch.org/mobile/home/\n",
+    "\n",
+    "The Exercise consists in deploying the CNN of Exercise 4 in your phone and then test it on live.\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "fe954ce4",
+   "metadata": {},
+   "source": [
+    "## Author\n",
+    "\n",
+    "Alberto BOSIO - Ph. D."
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.7"
+  },
+  "vscode": {
+   "interpreter": {
+    "hash": "9e3efbebb05da2d4a1968abe9a0645745f54b63feb7a85a514e4da0495be97eb"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}