diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb
index 2ecfce959ae6b947b633a758433f9bea0bf6992e..beeedead715e6462decd75a7c9889807ae984cc7 100644
--- a/TD2 Deep Learning.ipynb	
+++ b/TD2 Deep Learning.ipynb	
@@ -33,14 +33,34 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 70,
    "id": "330a42f5",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Couldn't find program: 'false'\n"
+     ]
+    }
+   ],
    "source": [
+    "%%script false\n",
+    "\n",
     "%pip install torch torchvision"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": 71,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\n"
+   ]
+  },
   {
    "cell_type": "markdown",
    "id": "0882a636",
@@ -52,10 +72,78 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 72,
    "id": "b1950f0a",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "tensor([[ 1.1058e+00,  1.7336e+00,  1.6643e+00,  4.8814e-01,  1.0503e+00,\n",
+      "          3.3081e-01,  4.2909e-01,  2.5513e-01,  4.4685e-01,  1.2261e+00],\n",
+      "        [ 1.6754e+00,  2.5322e-01, -3.2846e-01,  8.9585e-01, -1.3316e+00,\n",
+      "          2.3624e-01,  5.5837e-01,  9.2219e-01,  5.8599e-01,  8.2756e-01],\n",
+      "        [-5.0919e-04,  5.2701e-01,  1.6103e+00, -1.1456e+00, -3.3717e-01,\n",
+      "         -1.9402e-01,  8.6480e-01, -1.5003e+00,  8.3813e-01, -1.2842e-01],\n",
+      "        [ 2.1325e+00,  4.6389e-02,  3.8270e-01, -4.7796e-01,  6.9516e-01,\n",
+      "          4.3799e-01,  1.4166e+00, -9.7244e-01, -4.0094e-02, -2.5280e+00],\n",
+      "        [-1.2872e+00, -3.9930e-01, -8.1700e-01, -1.0437e+00, -1.0481e+00,\n",
+      "          1.7232e+00, -1.6563e+00, -4.3769e-01,  1.3422e+00, -7.3623e-01],\n",
+      "        [-7.6558e-01, -8.4420e-01,  1.0399e-01, -2.7014e-01, -2.8180e-02,\n",
+      "         -9.6759e-01, -1.1035e-01,  6.1477e-01, -9.9411e-02, -1.2770e+00],\n",
+      "        [ 1.7107e-01, -1.4533e+00, -1.0981e-01,  9.6294e-01,  1.8770e-01,\n",
+      "          2.1585e-01,  8.4826e-01,  8.2598e-01,  5.2848e-01, -6.2572e-01],\n",
+      "        [-6.8073e-01,  1.5341e+00,  5.4558e-01,  1.1158e+00,  9.1971e-01,\n",
+      "         -1.0714e+00,  1.1650e-01,  5.2230e-01, -9.3863e-01,  5.0782e-01],\n",
+      "        [-8.9026e-02,  1.3079e-01, -1.3377e+00,  7.9199e-01,  1.4043e+00,\n",
+      "         -7.5685e-01,  8.6716e-01,  6.6349e-01, -4.2035e-01, -8.4952e-01],\n",
+      "        [ 3.0876e-01,  1.6299e+00,  1.0647e+00, -6.7523e-01, -2.7187e-01,\n",
+      "          6.6396e-01,  3.1289e-01, -1.8232e-01, -6.1341e-01, -3.0799e-01],\n",
+      "        [ 1.5986e-01,  4.0908e-01, -1.9692e-02,  1.2336e+00,  1.0539e-01,\n",
+      "          1.4811e+00, -1.7229e+00,  5.4524e-01,  3.1768e-01, -6.7840e-01],\n",
+      "        [-1.5905e-01, -1.8926e-01,  3.9945e-01,  2.6893e-01,  2.3556e-01,\n",
+      "          1.1111e+00,  4.4930e-01, -2.0415e-02,  9.0348e-01,  6.3381e-02],\n",
+      "        [-1.3243e+00,  6.9054e-01,  2.4535e-01,  1.2984e+00, -6.0229e-01,\n",
+      "         -8.0071e-01,  4.5005e-02,  1.3536e+00,  5.6984e-01, -1.2901e+00],\n",
+      "        [ 5.6000e-01,  1.6408e-01,  1.2537e+00, -1.4064e+00,  1.0504e+00,\n",
+      "          5.6275e-01, -1.2924e-01, -4.2453e-01,  1.2955e+00, -1.0917e-01]])\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "AlexNet(\n",
+      "  (features): Sequential(\n",
+      "    (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))\n",
+      "    (1): ReLU(inplace=True)\n",
+      "    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "    (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n",
+      "    (4): ReLU(inplace=True)\n",
+      "    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "    (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "    (7): ReLU(inplace=True)\n",
+      "    (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "    (9): ReLU(inplace=True)\n",
+      "    (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
+      "    (11): ReLU(inplace=True)\n",
+      "    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "  )\n",
+      "  (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))\n",
+      "  (classifier): Sequential(\n",
+      "    (0): Dropout(p=0.5, inplace=False)\n",
+      "    (1): Linear(in_features=9216, out_features=4096, bias=True)\n",
+      "    (2): ReLU(inplace=True)\n",
+      "    (3): Dropout(p=0.5, inplace=False)\n",
+      "    (4): Linear(in_features=4096, out_features=4096, bias=True)\n",
+      "    (5): ReLU(inplace=True)\n",
+      "    (6): Linear(in_features=4096, out_features=1000, bias=True)\n",
+      "  )\n",
+      ")\n"
+     ]
+    }
+   ],
    "source": [
     "import torch\n",
     "\n",
@@ -95,10 +183,18 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 73,
    "id": "6e18f2fd",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "CUDA is not available.  Training on CPU ...\n"
+     ]
+    }
+   ],
    "source": [
     "import torch\n",
     "\n",
@@ -121,10 +217,19 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 74,
    "id": "462666a2",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Files already downloaded and verified\n",
+      "Files already downloaded and verified\n"
+     ]
+    }
+   ],
    "source": [
     "import numpy as np\n",
     "from torchvision import datasets, transforms\n",
@@ -193,10 +298,25 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 75,
    "id": "317bf070",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Net(\n",
+      "  (conv1): Conv2d(3, 6, kernel_size=(5, 5), stride=(1, 1))\n",
+      "  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "  (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n",
+      "  (fc1): Linear(in_features=400, out_features=120, bias=True)\n",
+      "  (fc2): Linear(in_features=120, out_features=84, bias=True)\n",
+      "  (fc3): Linear(in_features=84, out_features=10, bias=True)\n",
+      ")\n"
+     ]
+    }
+   ],
    "source": [
     "import torch.nn as nn\n",
     "import torch.nn.functional as F\n",
@@ -242,17 +362,43 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 76,
    "id": "4b53f229",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch: 0 \tTraining Loss: 45.068168 \tValidation Loss: 41.658336\n",
+      "Validation loss decreased (inf --> 41.658336).  Saving model ...\n",
+      "Epoch: 1 \tTraining Loss: 36.543189 \tValidation Loss: 33.181536\n",
+      "Validation loss decreased (41.658336 --> 33.181536).  Saving model ...\n",
+      "Epoch: 2 \tTraining Loss: 31.468431 \tValidation Loss: 29.895601\n",
+      "Validation loss decreased (33.181536 --> 29.895601).  Saving model ...\n",
+      "Epoch: 3 \tTraining Loss: 28.879008 \tValidation Loss: 27.669525\n",
+      "Validation loss decreased (29.895601 --> 27.669525).  Saving model ...\n",
+      "Epoch: 4 \tTraining Loss: 26.925435 \tValidation Loss: 26.301845\n",
+      "Validation loss decreased (27.669525 --> 26.301845).  Saving model ...\n",
+      "Epoch: 5 \tTraining Loss: 25.447193 \tValidation Loss: 25.830709\n",
+      "Validation loss decreased (26.301845 --> 25.830709).  Saving model ...\n",
+      "Epoch: 6 \tTraining Loss: 24.231406 \tValidation Loss: 24.478850\n",
+      "Validation loss decreased (25.830709 --> 24.478850).  Saving model ...\n",
+      "Epoch: 7 \tTraining Loss: 23.158099 \tValidation Loss: 25.051581\n",
+      "Epoch: 8 \tTraining Loss: 22.216344 \tValidation Loss: 23.490295\n",
+      "Validation loss decreased (24.478850 --> 23.490295).  Saving model ...\n",
+      "Epoch: 9 \tTraining Loss: 21.386869 \tValidation Loss: 22.800535\n",
+      "Validation loss decreased (23.490295 --> 22.800535).  Saving model ...\n"
+     ]
+    }
+   ],
    "source": [
     "import torch.optim as optim\n",
     "\n",
     "criterion = nn.CrossEntropyLoss()  # specify loss function\n",
     "optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n",
     "\n",
-    "n_epochs = 30  # number of epochs to train the model\n",
+    "n_epochs = 10  # number of epochs to train the model\n",
     "train_loss_list = []  # list to store loss to visualize\n",
     "valid_loss_min = np.Inf  # track change in validation loss\n",
     "\n",
@@ -326,10 +472,21 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 77,
    "id": "d39df818",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjMAAAHFCAYAAAAHcXhbAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABK3klEQVR4nO3dd3hU1aIF8HVmJpn0kEJ6pQQSIKGEQCjSkSqIFQFB37WighWV6xX1CoqKDcWLBaUJKoqAgPQeIJRAgEBASAjpvdeZ/f4IGWdMaCHJmbJ+3zffM+ecmVmTuc8sz9n7bEkIIUBERERkohRyByAiIiK6HSwzREREZNJYZoiIiMikscwQERGRSWOZISIiIpPGMkNEREQmjWWGiIiITBrLDBEREZk0lhkiIiIyaSwzRI30/fffQ5Ik3UOlUsHPzw+PPPIIUlNTm/S9qqqq8OSTT8Lb2xtKpRJdu3Zt0tena1u9ejU6deoEW1tbSJKEuLi4Bo/btWuX7n8L33//fYPHDB48GJIkISgoqEkzBgUFYdq0aY16riRJmDNnzg2PW7p0KR588EF06NABCoWiyT8D0e1QyR2AyNQtWbIEHTt2RHl5Ofbs2YN58+Zh9+7diI+Ph729fZO8x6JFi/C///0Pn3/+OXr06AEHB4cmeV26vuzsbEyZMgUjRozAl19+CbVajZCQkOs+x9HREd9++229cnHp0iXs2rULTk5OzZi4+SxbtgwZGRmIioqCVqtFdXW13JGIdFhmiG5T586dERkZCQAYNGgQNBoN3nnnHaxduxaTJk26rdcuKyuDnZ0dTp06BVtbWzzzzDNNERkAUF5eDltb2yZ7PXOUmJiI6upqTJ48GQMGDLip5zzwwAP45ptvcP78ebRv3163/bvvvoOvry+6dOmCM2fONFfkZvPnn39Coag9mT9mzBicOnVK5kREf+NlJqIm1rt3bwBAcnIyAEAIgS+//BJdu3aFra0tXFxccO+99+LixYsGzxs4cCA6d+6MPXv2oE+fPrCzs8Ojjz4KSZLwzTffoLy8vN5ljIqKCrz22msIDg6GtbU1fH19MX36dBQUFBi8dlBQEMaMGYNff/0V3bp1g42NDd566y3dpZGVK1di1qxZ8Pb2hoODA8aOHYvMzEwUFxfj8ccfh7u7O9zd3fHII4+gpKTE4LW/+OIL3HHHHfDw8IC9vT26dOmC+fPn1/sv97rPFxsbi/79+8POzg5t2rTBe++9B61Wa3BsQUEBXnzxRbRp0wZqtRoeHh4YNWoUzp49qzumqqoK//3vf9GxY0eo1Wq0bt0ajzzyCLKzs2/qe1q3bh2io6NhZ2cHR0dHDBs2DDExMbr906ZNQ79+/QDUFhRJkjBw4MAbvu6wYcPg7++P7777TrdNq9Xihx9+wNSpU3WFQN/Nfo/V1dV45ZVX4OXlBTs7O/Tr1w+HDx9uMEdGRgaeeOIJ+Pn5wdraGsHBwXjrrbdQU1NzE7+d+hrKTWQ0BBE1ypIlSwQAERsba7D9008/FQDE4sWLhRBCPPbYY8LKykq8+OKLYvPmzWLlypWiY8eOwtPTU2RkZOieN2DAAOHq6ir8/f3F559/Lnbu3Cl2794tYmJixKhRo4Stra2IiYkRMTExIisrS2i1WnHnnXcKlUol3njjDbFlyxbx4YcfCnt7e9GtWzdRUVGhe+3AwEDh7e0t2rRpI7777juxc+dOcfjwYbFz504BQAQGBopp06aJzZs3i6+++ko4ODiIQYMGiWHDhomXXnpJbNmyRbz//vtCqVSKZ5991uDzPv/882LRokVi8+bNYseOHeLjjz8W7u7u4pFHHjE4bsCAAcLNzU20b99efPXVV2Lr1q3i6aefFgDEDz/8oDuuqKhIdOrUSdjb24u3335b/Pnnn2LNmjVixowZYseOHUIIITQajRgxYoSwt7cXb731lti6dav45ptvhK+vrwgLCxNlZWXX/e5WrFghAIjhw4eLtWvXitWrV4sePXoIa2trsXfvXiGEEBcuXBBffPGFACDmzp0rYmJixOnTp6/5mnW/y59//lm88cYbwsfHR9TU1AghhNi0aZOQJElcuHBBjB49WgQGBuqedyvf49SpU4UkSeLll18WW7ZsEQsWLBC+vr7CyclJTJ06VXdcenq68Pf3F4GBgeJ///uf2LZtm3jnnXeEWq0W06ZNM8gNQLz55pvX/X390z8/A5HcWGaIGqmuzBw8eFBUV1eL4uJisWHDBtG6dWvh6OgoMjIyRExMjAAgPvroI4PnpqSkCFtbW/HKK6/otg0YMEAAENu3b6/3XlOnThX29vYG2zZv3iwAiPnz5xtsX716tUGZEqK2zCiVSnHu3DmDY+v+AI8dO9Zg+8yZMwUA8dxzzxlsHz9+vHB1db3m70Sj0Yjq6mqxdOlSoVQqRV5eXr3Pd+jQIYPnhIWFiTvvvFP389tvvy0AiK1bt17zfX788UcBQKxZs8Zge2xsrAAgvvzyy+tm9PHxEV26dBEajUa3vbi4WHh4eIg+ffrotukXlBvRP/bixYtCkiSxYcMGIYQQ9913nxg4cKAQon4RuNnvMSEhQQAQzz//vMFxdcVMv8w88cQTwsHBQSQnJxsc++GHHwoABqWMZYbMAc8bEt2m3r17w8rKCo6OjhgzZgy8vLywadMmeHp6YsOGDZAkCZMnT0ZNTY3u4eXlhYiICOzatcvgtVxcXDB48OCbet8dO3YAQL2Bpvfddx/s7e2xfft2g+3h4eHXHLw6ZswYg59DQ0MBAKNHj663PS8vz+BS0/Hjx3HXXXfBzc0NSqUSVlZWePjhh6HRaJCYmGjwfC8vL0RFRdXLVXdJDgA2bdqEkJAQDB069FofHRs2bECrVq0wduxYg99r165d4eXlVe/3qu/cuXNIS0vDlClTDC6dODg44J577sHBgwdRVlZ2zeffjODgYAwcOBDfffcdcnNz8fvvv+PRRx9t8Nib/R537twJAPXGYd1///1QqQyHP27YsAGDBg2Cj4+Pwe9n5MiRAIDdu3ff1ucjMjYcAEx0m5YuXYrQ0FCoVCp4enrC29tbty8zMxNCCHh6ejb43DZt2hj8rP/cG8nNzYVKpULr1q0NtkuSBC8vL+Tm5t70a7u6uhr8bG1tfd3tFRUVcHBwwOXLl9G/f3906NABn376KYKCgmBjY4PDhw9j+vTpKC8vN3i+m5tbvfdWq9UGx2VnZyMgIOCaWYHa32tBQYEuzz/l5ORc87l1v5eGfh8+Pj7QarXIz8+HnZ3ddTPcyP/93//hkUcewYIFC2Bra4t77733mnlu5nus+79eXl4Gx6lUqnq/18zMTKxfvx5WVlYNvuf1fj9Epohlhug2hYaG6mYz/ZO7uzskScLevXuhVqvr7f/nNkmSbvp93dzcUFNTg+zsbIM/hEIIZGRkoGfPno1+7Zu1du1alJaW4tdff0VgYKBu+7XuxXIzWrdujStXrlz3GHd3d7i5uWHz5s0N7nd0dLzmc+v+8Kenp9fbl5aWBoVCARcXl1tI3LAJEyZg+vTpeO+99/DYY49dc+bYzX6PdbkzMjLg6+urO66mpqZecXV3d0d4eDjefffdBt/Tx8fntj4bkbHhZSaiZjRmzBgIIZCamorIyMh6jy5dujT6tYcMGQIAWL58ucH2NWvWoLS0VLe/OdUVJP1SJoTA119/3ejXHDlyJBITE3WXXxoyZswY5ObmQqPRNPh77dChwzWf26FDB/j6+mLlypUQQui2l5aWYs2aNboZTrfL1tYW//nPfzB27Fg89dRT1zzuZr/HuplUK1asMDjup59+qjdDqW7qdNu2bRv8/bDMkLnhmRmiZtS3b188/vjjeOSRR3DkyBHccccdsLe3R3p6Ovbt24cuXbpc9w/d9QwbNgx33nknZs2ahaKiIvTt2xcnT57Em2++iW7dumHKlClN/GkazmBtbY2JEyfilVdeQUVFBRYtWoT8/PxGv+bMmTOxevVqjBs3Dq+++iqioqJQXl6O3bt3Y8yYMRg0aBAefPBBrFixAqNGjcKMGTMQFRUFKysrXLlyBTt37sS4ceNw9913N/j6CoUC8+fPx6RJkzBmzBg88cQTqKysxAcffICCggK89957jc7+Ty+88AJeeOGF6x5zs99jaGgoJk+ejE8++QRWVlYYOnQoTp06hQ8//LDejfjefvttbN26FX369MFzzz2HDh06oKKiAklJSdi4cSO++uor+Pn53dJnOXPmjO7+OBkZGSgrK8Mvv/wCAAgLC0NYWNgtvR5Rk5Jz9DGRKbvW1OyGfPfdd6JXr17C3t5e2NrairZt24qHH35YHDlyRHfMgAEDRKdOnRp8fkOzmYQQory8XMyaNUsEBgYKKysr4e3tLZ566imRn59vcFxgYKAYPXp0vedfa7bOtT7bm2++KQCI7Oxs3bb169eLiIgIYWNjI3x9fcXLL78sNm3aJACInTt33vDzTZ06td7MmPz8fDFjxgwREBAgrKyshIeHhxg9erQ4e/as7pjq6mrx4Ycf6t7bwcFBdOzYUTzxxBPi/Pnz9d7nn9auXSt69eolbGxshL29vRgyZIjYv3//Tf1+GnKzxzY0E+hmv8fKykrx4osvCg8PD2FjYyN69+4tYmJiRGBgoMFsJiGEyM7OFs8995wIDg4WVlZWwtXVVfTo0UPMnj1blJSU6I7DTc5mqvvuG3rc6mwooqYmCaF3npWIiIjIxHDMDBEREZk0lhkiIiIyaSwzREREZNJYZoiIiMikscwQERGRSWOZISIiIpNm9jfN02q1SEtLg6OjY7Pczp2IiIianhACxcXF8PHxMVgUtiFmX2bS0tLg7+8vdwwiIiJqhJSUlBvesdrsy0zdgnMpKSn1bvlNRERExqmoqAj+/v7XXTi2jtmXmbpLS05OTiwzREREJuZmhohwADARERGZNJYZIiIiMmksM0RERGTSWGaIiIjIpLHMEBERkUljmSEiIiKTxjJDREREJo1lhoiIiEwaywwRERGZNJYZIiIiMmksM0RERGTSjKbMzJs3D5IkYebMmbpt06ZNgyRJBo/evXvLF5KIiIiMjlEsNBkbG4vFixcjPDy83r4RI0ZgyZIlup+tra1bMtp1JaQXwc3eGh5ONnJHISIisliyn5kpKSnBpEmT8PXXX8PFxaXefrVaDS8vL93D1dVVhpT1vbPhDEZ+uhffH0iSOwoREZFFk73MTJ8+HaNHj8bQoUMb3L9r1y54eHggJCQEjz32GLKysq77epWVlSgqKjJ4NIeeQbXFa3VsCiprNM3yHkRERHRjspaZVatW4dixY5g3b16D+0eOHIkVK1Zgx44d+OijjxAbG4vBgwejsrLymq85b948ODs76x7+/v7Nkn1oqCe8nGyQW1qFzacymuU9iIiI6MZkKzMpKSmYMWMGli9fDhubhsecPPDAAxg9ejQ6d+6MsWPHYtOmTUhMTMQff/xxzdd97bXXUFhYqHukpKQ0S36VUoGHegUAAJbFJDfLexAREdGNyVZmjh49iqysLPTo0QMqlQoqlQq7d+/GZ599BpVKBY2m/qUbb29vBAYG4vz589d8XbVaDScnJ4NHc3mwpz9UCglHkvNxJq15LmcRERHR9clWZoYMGYL4+HjExcXpHpGRkZg0aRLi4uKgVCrrPSc3NxcpKSnw9vaWIXF9Hk42uLOzFwBg+SGenSEiIpKDbGXG0dERnTt3NnjY29vDzc0NnTt3RklJCV566SXExMQgKSkJu3btwtixY+Hu7o67775brtj1TOkdCABYezwVRRXVMqchIiKyPLLPZroWpVKJ+Ph4jBs3DiEhIZg6dSpCQkIQExMDR0dHuePp9Ap2RXsPB5RVafDbsVS54xAREVkcSQgh5A7RnIqKiuDs7IzCwsJmGz+zNCYJ//n9NNp5OGDr83dAkqRmeR8iIiJLcSt/v432zIwpububL+yslbiQVYKDF/PkjkNERGRRWGaagKONFe7u5gsAWH6QA4GJiIhaEstME5l8dSDwn6czkFlUIXMaIiIiy8Ey00RCvZ3QM8gFNVqBVYeb50Z9REREVB/LTBOqOzuz8nAyqjVamdMQERFZBpaZJjSisxfcHayRWVSJ7QmZcschIiKyCCwzTUitUuKBnrULWy7lek1EREQtgmWmiU2MCoBCAg78lYsLWcVyxyEiIjJ7LDNNzM/FDoM7egIAlh+8LHMaIiIi88cy0wymRNcOBF5z9ArKqmpkTkNERGTeWGaaQf927gh0s0NxZQ1+j0uTOw4REZFZY5lpBgqFhMm9as/OLItJhpkvf0VERCQrlplmcm8PP6hVCpxJL8KxywVyxyEiIjJbLDPNxMXeGmMjfABwvSYiIqLmxDLTjKZcvSPwHyfTkVtSKXMaIiIi88Qy04wi/Fsh3M8ZVRotfjpyRe44REREZollppnVrde04lAyNFoOBCYiImpqLDPNbGy4D5xtrXAlvxy7E7PkjkNERGR2WGaama21Evf18ANQO02biIiImhbLTAuYdPVS067EbFzOLZM5DRERkXlhmWkBwe72uCOkNYQAVhzm2RkiIqKmxDLTQuqmaf8Um4KKao3MaYiIiMwHy0wLGdzRA76tbJFfVo2N8elyxyEiIjIbLDMtRKmQ8FCvAADAMt4RmIiIqMmwzLSg+yP9YaWUcPxyAU6lFsodh4iIyCywzLSg1o5qjOzsDYDrNRERETUVlpkWNiW6diDw2rhUFJZXy5yGiIjI9LHMtLDIQBd09HJERbUWa45yvSYiIqLbxTLTwiRJ0q3XtPxgMoTgek1ERES3g2VGBuO7+cJBrcLFnFIc+CtX7jhEREQmjWVGBg5qFSZ09wXA9ZqIiIhuF8uMTOouNW1NyER6YbnMaYiIiEwXy4xMQjwd0SvYFRqtwI+HLssdh4iIyGSxzMiobpr2j7EpqKrRypyGiIjINLHMyGh4mBdaO6qRXVyJLWcy5I5DRERkklhmZGStUmBiT38AHAhMRETUWCwzMpvYKwBKhYRDl/KQmFksdxwiIiKTwzIjM29nWwwN9QDA9ZqIiIgag2XGCEzpHQQA+PVYKkoqa+QNQ0REZGJYZoxAn7ZuaONuj5LKGqw9nip3HCIiIpPCMmMEFAoJk7heExERUaOwzBiJe7v7wcZKgbMZxTiSnC93HCIiIpPBMmMknO2sMC6C6zURERHdKpYZI1J3R+BNp9KRXVwpcxoiIiLTwDJjRDr7OqOrfytUawR+OpIidxwiIiKTwDJjZKZcHQi84mAyNFoOBCYiIroRlhkjMzrcGy52VkgrrMCOs1lyxyEiIjJ6LDNGxsZKifvr1mviHYGJiIhuiGXGCE2KCoQkAXsSs5GUUyp3HCIiIqPGMmOEAtzsMDCkNQBgxSGenSEiIroelhkjVTdN+6cjV1BRrZE5DRERkfFimTFSA0I84Odii8Lyaqw/kSZ3HCIiIqPFMmOklAoJk3r9vV4TERERNYxlxojdH+kHa6UCJ64U4kRKgdxxiIiIjBLLjBFzc1BjdLg3AJ6dISIiuhaWGSM3+eodgdedSENBWZXMaYiIiIwPy4yR6x7QCmHeTqis0eLnI1fkjkNERGR0WGaMnCRJumnayw8lQ8v1moiIiAywzJiAcV194KhWITm3DHsv5Mgdh4iIyKiwzJgAO2sV7unhBwBYFsOBwERERPqMpszMmzcPkiRh5syZum1CCMyZMwc+Pj6wtbXFwIEDcfr0aflCyqhuIPCOs5m4kl8mcxoiIiLjYRRlJjY2FosXL0Z4eLjB9vnz52PBggVYuHAhYmNj4eXlhWHDhqG4uFimpPJp5+GAPm3doBXAj4cvyx2HiIjIaMheZkpKSjBp0iR8/fXXcHFx0W0XQuCTTz7B7NmzMWHCBHTu3Bk//PADysrKsHLlShkTy2fK1bMzq2NTUFnD9ZqIiIgAIygz06dPx+jRozF06FCD7ZcuXUJGRgaGDx+u26ZWqzFgwAAcOHDgmq9XWVmJoqIig4e5GBrmCU8nNXJKqrD5VIbccYiIiIyCrGVm1apVOHbsGObNm1dvX0ZG7R9rT09Pg+2enp66fQ2ZN28enJ2ddQ9/f/+mDS0jK6UCE6MCAPCOwERERHVkKzMpKSmYMWMGli9fDhsbm2seJ0mSwc9CiHrb9L322msoLCzUPVJSUposszGYGBUApUJCbFI+EtLN56wTERFRY8lWZo4ePYqsrCz06NEDKpUKKpUKu3fvxmeffQaVSqU7I/PPszBZWVn1ztboU6vVcHJyMniYE08nG9zZqfbz8+wMERGRjGVmyJAhiI+PR1xcnO4RGRmJSZMmIS4uDm3atIGXlxe2bt2qe05VVRV2796NPn36yBXbKNRN0/7teCqKK6plTkNERCQvlVxv7OjoiM6dOxtss7e3h5ubm277zJkzMXfuXLRv3x7t27fH3LlzYWdnh4ceekiOyEYjuo0b2ra2x1/ZpfjteCoejg6SOxIREZFsZJ/NdD2vvPIKZs6ciaeffhqRkZFITU3Fli1b4OjoKHc0WUmSpJumvSwmGUJwvSYiIrJckjDzv4RFRUVwdnZGYWGhWY2fKaqoRq93t6O8WoNVj/dG7zZuckciIiJqMrfy99uoz8zQtTnZWGF8N18AwDIOBCYiIgvGMmPC6i41/XkqA1lFFTKnISIikgfLjAkL83FCZKALarQCq2LN6346REREN4tlxsRNia49O7Py0GXUaLQypyEiImp5LDMmbkRnL7jZWyOjqALbErLkjkNERNTiWGZMnFqlxAM9a9ef4h2BiYjIErHMmIGHegVAkoB9F3LwV3aJ3HGIiIhaFMuMGfBzscOQjh4AgBUHL8uchoiIqGWxzJiJuvWafj6agrKqGpnTEBERtRyWGTNxR/vWCHC1Q3FFDdafSJM7DhERUYthmTETCoWEyb0DAABLuV4TERFZEJYZM3JfD39YqxQ4nVaE4ykFcschIiJqESwzZsTF3hpjw30AAMtjOE2biIgsA8uMmam7I/CGk+nIK62SOQ0REVHzY5kxMxF+zuji64wqjRY/HeF6TUREZP5YZsyMJEm61bRXHEqGRsuBwEREZN5YZszQ2AgfONmokJJXjj2J2XLHISIialYsM2bI1lqJ+yJr12taxvWaiIjIzLHMmKlJvWrvObPzXBZS8spkTkNERNR8WGbMVJvWDujf3h1CACsOcb0mIiIyXywzZqxuvaafjqSgolojcxoiIqLmwTJjxoZ09IC3sw3ySquw6VS63HGIiIiaBcuMGVMpFXgoqnbszDLeEZiIiMwUy4yZeyDKHyqFhGOXC3AqtVDuOERERE2OZcbMeTjaYERnLwC1N9EjIiIyNywzFqDujsBrj6ehsLxa5jRERERNi2XGAkQFuyLE0wHl1Rr8euyK3HGIiIiaFMuMBZAkCVOigwDU3hFYCK7XRERE5oNlxkLc3c0X9tZKXMwuRcxfuXLHISIiajIsMxbCQa3ChO5+ALheExERmReWGQtSd0fgLWcykVFYIXMaIiKipsEyY0E6eDkiKtgVGq3Aj4e5XhMREZkHlhkLUzdN+8fDl1Gt0cqchoiI6PaxzFiYOzt5wd1BjaziSmw9kyl3HCIiotvGMmNhrFUKTIzyB8D1moiIyDywzFigiVEBUEhAzMVcnM8sljsOERHRbWGZsUA+rWwxNNQTALCc07SJiMjEscxYqCnRtQOB1xxLRWlljcxpiIiIGo9lxkL1beuOYHd7lFTWYG1cqtxxiIiIGo1lxkIpFBIm9QoAUDsQmOs1ERGRqWKZsWD39fCHjZUCZzOKcTQ5X+44REREjcIyY8Gc7axwV4QPAODLXX/x7AwREZkklhkL96/+bWCllLDjbBZ+OXpF7jhERES3jGXGwoV4OuL5YSEAgLfWn0FKXpnMiYiIiG4NywzhiTvaomeQC0oqa/DiTyeg0fJyExERmQ6WGYJSIWHB/V1hb63E4aQ8fL33otyRiIiIbhrLDAEA/F3t8OZdnQAAH205hzNpRTInIiIiujksM6RzXw8/DA/zRLVGYObq46io1sgdiYiI6IZYZkhHkiTMm9AF7g7WSMwswYd/npM7EhER0Q2xzJABNwc13r8nHADwzb5LOHAhR+ZERERE18cyQ/UMCfXExKjapQ5e+vkECsurZU5ERER0bSwz1KB/jw5FkJsd0gorMGfdabnjEBERXRPLDDXIXq3Cgge6QiEBvx1PxYaTaXJHIiIiahDLDF1T9wAXPDOoHQBg9m+nkFFYIXMiIiKi+lhm6LqeHdIeXXydUVhejZd/OcHFKImIyOiwzNB1WSkV+PiBrlCrFNh7PgdLY5LljkRERGSAZYZuqJ2HA14fFQoAmLsxAReySmRORERE9DeWGbopU3oHon97d1TWaPH86jhUa7RyRyIiIgLAMkM3SaGQ8MG9EXC2tUJ8aiE+335e7khEREQAZC4zixYtQnh4OJycnODk5ITo6Ghs2rRJt3/atGmQJMng0bt3bxkTWzYvZxvMvbsLAGDhzgs4djlf5kREREQylxk/Pz+89957OHLkCI4cOYLBgwdj3LhxOH3675u0jRgxAunp6brHxo0bZUxMo8O9cXc3X2gF8MLqOJRW1sgdiYiILJysZWbs2LEYNWoUQkJCEBISgnfffRcODg44ePCg7hi1Wg0vLy/dw9XVVcbEBABz7uoEH2cbJOWW4d2NCXLHISIiC2c0Y2Y0Gg1WrVqF0tJSREdH67bv2rULHh4eCAkJwWOPPYasrCwZUxIAONta4cP7IwAAKw9dxo6zmTInIiIiSyZ7mYmPj4eDgwPUajWefPJJ/PbbbwgLCwMAjBw5EitWrMCOHTvw0UcfITY2FoMHD0ZlZeU1X6+yshJFRUUGD2p6fdq641/9ggEAr/wSj9ySa38nREREzUkSMt/StaqqCpcvX0ZBQQHWrFmDb775Brt379YVGn3p6ekIDAzEqlWrMGHChAZfb86cOXjrrbfqbS8sLISTk1OT57dkFdUa3LVwHxIzSzA8zBP/m9IDkiTJHYuIiMxAUVERnJ2db+rvt+xnZqytrdGuXTtERkZi3rx5iIiIwKefftrgsd7e3ggMDMT589eeFvzaa6+hsLBQ90hJSWmu6BbPxkqJjx/oCiulhC1nMvHz0StyRyIiIgske5n5JyHENS8j5ebmIiUlBd7e3td8vlqt1k31rntQ8+nk44wXhnUAALy17jRS8spkTkRERJZG1jLz+uuvY+/evUhKSkJ8fDxmz56NXbt2YdKkSSgpKcFLL72EmJgYJCUlYdeuXRg7dizc3d1x9913yxmb/uHxO9ogKsgVpVUavPBTHDRaLkZJREQtR9Yyk5mZiSlTpqBDhw4YMmQIDh06hM2bN2PYsGFQKpWIj4/HuHHjEBISgqlTpyIkJAQxMTFwdHSUMzb9g1Ih4aP7I+CgViE2KR+L91yUOxIREVkQ2QcAN7dbGUBEt+fnIyl4+ZeTsFJKWDu9Lzr5OMsdiYiITJRJDQAm83FvDz/c2ckT1RqB51fHoaJaI3ckIiKyACwz1GQkScLcu7vA3UGNxMwSfPDnObkjERGRBWCZoSbl5qDG/HtrF6P8dt8l7L+QI3MiIiIydywz1OQGd/TEQ70CAAAv/XwCheXVMiciIiJzxjJDzWL2qFAEudkhvbACb/5+Su44RERkxlhmqFnYq1X4+IGuUCokrI1Lw/oTaXJHIiIiM8UyQ82mW4ALpg9qBwD499pTyCiskDkRERGZo0aVmZSUFFy58vc6PIcPH8bMmTOxePHiJgtG5uHZwe0Q7ueMwvJqvPzLCWh5d2AiImpijSozDz30EHbu3AkAyMjIwLBhw3D48GG8/vrrePvtt5s0IJk2K6UCHz/QFTZWCuw9n4NlB5PljkRERGamUWXm1KlTiIqKAgD89NNP6Ny5Mw4cOICVK1fi+++/b8p8ZAbatnbA66NCAQBzNybgQlaxzImIiMicNKrMVFdXQ61WAwC2bduGu+66CwDQsWNHpKenN106MhtTegfijpDWqKzRYubqOFTVaOWOREREZqJRZaZTp0746quvsHfvXmzduhUjRowAAKSlpcHNza1JA5J5kCQJH9wbDmdbK5xKLcLnO87LHYmIiMxEo8rM+++/j//9738YOHAgJk6ciIiICADAunXrdJefiP7J08kGc++uvTvwFzsv4GhyvsyJiIjIHDR61WyNRoOioiK4uLjotiUlJcHOzg4eHh5NFvB2cdVs4/PC6jj8ejwVgW522Phcf9irVXJHIiIiI9Psq2aXl5ejsrJSV2SSk5PxySef4Ny5c0ZVZMg4zRnXCb6tbJGcW4b//pEgdxwiIjJxjSoz48aNw9KlSwEABQUF6NWrFz766COMHz8eixYtatKAZH6cbKzw4X0RkCTgx8OXsT0hU+5IRERkwhpVZo4dO4b+/fsDAH755Rd4enoiOTkZS5cuxWeffdakAck8Rbd1w7/6BQMAZq05idySSpkTERGRqWpUmSkrK4OjoyMAYMuWLZgwYQIUCgV69+6N5GTeFI1uzovDO6CDpyNySqrw6q/xaOTwLSIisnCNKjPt2rXD2rVrkZKSgj///BPDhw8HAGRlZXGQLd00GyslPn6gK6yUEraeycTPR67c+ElERET/0Kgy85///AcvvfQSgoKCEBUVhejoaAC1Z2m6devWpAHJvIX5OOHF4R0AAG+tP43LuWUyJyIiIlPT6KnZGRkZSE9PR0REBBSK2k50+PBhODk5oWPHjk0a8nZwarbx02gFJn59EIcv5SEy0AWrn4iGUiHJHYuIiGTU7FOzAcDLywvdunVDWloaUlNTAQBRUVFGVWTINCgVEj66LwIOahWOJOfjf3v+kjsSERGZkEaVGa1Wi7fffhvOzs4IDAxEQEAAWrVqhXfeeQdaLdfcoVvn72qHOXd1AgB8vDURp1ILZU5ERESmolFlZvbs2Vi4cCHee+89HD9+HMeOHcPcuXPx+eef44033mjqjGQh7unuixGdvFCtEXh+dRwqqjVyRyIiIhPQqDEzPj4++Oqrr3SrZdf5/fff8fTTT+suOxkDjpkxLXmlVRj+8R7klFTi//oF440xYXJHIiIiGTT7mJm8vLwGx8Z07NgReXl5jXlJIgCAq701Prg3HADw7b5L2H8hR+ZERERk7BpVZiIiIrBw4cJ62xcuXIjw8PDbDkWWbVBHD0zqFQAAeOnnEygsq5Y5ERERGbNGLVc8f/58jB49Gtu2bUN0dDQkScKBAweQkpKCjRs3NnVGskCzR4fiwF+5uJRTiv+sO4VPH+T9i4iIqGGNOjMzYMAAJCYm4u6770ZBQQHy8vIwYcIEnD59GkuWLGnqjGSB7KxVWHB/BJQKCb/HpWHdiTS5IxERkZFq9E3zGnLixAl0794dGo3xzELhAGDT9vHWRHy6/TycbFTY8vwAeDnbyB2JiIhaQIvcNI+oJTwzuB0i/JxRVFGDl385Aa2Wi1ESEZEhlhkyalZKBRY80BU2VgrsPZ+DpTFJckciIiIjwzJDRq9tawfMHhUKAJi36SwuZBXLnIiIiIzJLc1mmjBhwnX3FxQU3E4Womua3DsQWxOysCcxGzNXx+HXp/rCWsUuTkREt3hmxtnZ+bqPwMBAPPzww82VlSyYJEn44N5wtLKzwqnUIny2/bzckYiIyEg06WwmY8TZTOZlY3w6nl5xDAoJ+PnJaPQIdJU7EhERNQPOZiKzNaqLNyZ094VWAM+vPoHSyhq5IxERkcxYZsjkzLmrE3xb2eJyXhn++8cZueMQEZHMWGbI5DjZWOGj+yMgScCPh1Ow7Uym3JGIiEhGLDNkknq3ccNj/dsAAF799SRySiplTkRERHJhmSGT9eLwEHT0ckROSRVeXRMPMx/LTkRE18AyQyZLrVLi4we6wlqpwLaETPx0JEXuSEREJAOWGTJpod5OeHF4CADgrfVnkJxbKnMiIiJqaSwzZPL+1b8NooJdUValwSNLYpGQXiR3JCIiakEsM2TylAoJC+6PgJeTDS7mlGLcF/uxLCaJY2iIiCwEywyZBT8XO/zxXD8M6tAaVTVavPH7aTy5/CgKy6rljkZERM2MZYbMhpuDGt9N64l/jw6FlVLCn6czMeqzvTiSlCd3NCIiakYsM2RWJEnCv/q3wa9P9UWQmx1SC8rxwOKDWLjjPDRaXnYiIjJHLDNklrr4OWPDc/0xvqsPNFqBD7ckYsq3h5BZVCF3NCIiamIsM2S2HNQqfPxAV3x4XwRsrZQ48FcuRn66FzvPZckdjYiImhDLDJk1SZJwbw8/bHiuH0K9nZBXWoVHlsTi3T/OoKpGK3c8IiJqAiwzZBHatnbAb0/3wbQ+QQCAr/dewr1fHeBN9oiIzADLDFkMGysl5tzVCYun9ICzrRVOXinE6M/24fe4VLmjERHRbWCZIYszvJMXNs3oj55BLiiprMGMVXF45ZcTKKuqkTsaERE1AssMWSSfVrb48bHeeG5Ie0gS8NORKxj7+T4uhUBEZIJYZshiqZQKvDAsBCv/1RueTmr8lc2lEIiITBHLDFm86LZu2DTjDgzu6MGlEIiITBDLDBEAV3trfDs1Em+MCeNSCEREJoZlhugqSZLwf/2CuRQCEZGJYZkh+gcuhUBEZFpkLTOLFi1CeHg4nJyc4OTkhOjoaGzatEm3XwiBOXPmwMfHB7a2thg4cCBOnz4tY2KyFFwKgYjIdMhaZvz8/PDee+/hyJEjOHLkCAYPHoxx48bpCsv8+fOxYMECLFy4ELGxsfDy8sKwYcNQXFwsZ2yyEFwKgYjINEjCyOagurq64oMPPsCjjz4KHx8fzJw5E7NmzQIAVFZWwtPTE++//z6eeOKJm3q9oqIiODs7o7CwEE5OTs0ZncxYRbUG7206i+8PJAEAwv2c8fnEbgh0s5c3GBGRmbqVv99GM2ZGo9Fg1apVKC0tRXR0NC5duoSMjAwMHz5cd4xarcaAAQNw4MABGZOSJdJfCqGVHZdCICIyJrKXmfj4eDg4OECtVuPJJ5/Eb7/9hrCwMGRkZAAAPD09DY739PTU7WtIZWUlioqKDB5ETWV4Jy9sfK4/ooJcuRQCEZGRkL3MdOjQAXFxcTh48CCeeuopTJ06FWfOnNHtlyTJ4HghRL1t+ubNmwdnZ2fdw9/fv9myk2XyaWWLlY/14lIIRERGwujGzAwdOhRt27bFrFmz0LZtWxw7dgzdunXT7R83bhxatWqFH374ocHnV1ZWorKyUvdzUVER/P39OWaGmkXMX7mYufo4MosqYa1S4I3RoZjcO/C6hZuIiG7MJMfM1BFCoLKyEsHBwfDy8sLWrVt1+6qqqrB792706dPnms9Xq9W6qd51D6LmwqUQiIjkp5LzzV9//XWMHDkS/v7+KC4uxqpVq7Br1y5s3rwZkiRh5syZmDt3Ltq3b4/27dtj7ty5sLOzw0MPPSRnbCIDdUshfLc/Ce9tSsCfpzNxKnUvPn2wKyKDXOWOR0Rk9mQtM5mZmZgyZQrS09Ph7OyM8PBwbN68GcOGDQMAvPLKKygvL8fTTz+N/Px89OrVC1u2bIGjo6OcsYnqqVsKISrIFc/+eAxJuWV4YPFBPD+0PZ4a2A5KBS87ERE1F6MbM9PUeJ8ZamkllTX492/xWBuXBgDo09YNHz/QFZ5ONjInIyIyHSY9ZobI1OkvhWBnzaUQiIiaG8sMUTOoWwph/bP9EMalEIiImhXLDFEzatvaAb8+3QfT+gQBAL7eewn3fnUAybml8gYjIjIjLDNEzYxLIRARNS+WGaIWwqUQiIiaB8sMUQviUghERE2PZYaohamUCrwwLAQr/9Ubnk5q/JVdinFf7MeymCSY+Z0SiIiaBcsMkUy4FAIRUdNgmSGSUd1SCG+MCYOVUsKfpzMx6rO9OHgxV+5oREQmg2WGSGZ1SyH8+lRfBLnZIbWgHA8uPoinlh9FUg6ncBMR3QjLDJGR6OLnjA3P9cdDvQKgkIBNpzIwdMFuzFl3GnmlVXLHIyIyWlybicgIncsoxrxNCdh1LhsA4GijwjOD2mFqnyDYWCllTkdE1Pxu5e83ywyREdt3PgdzNybgzNWp276tbPHKiA4YG+4DBVfiJiIzxjKjh2WGTJ1WK/Db8VR8uOUc0gsrAABdfJ3x+qhQRLd1kzkdEVHzYJnRwzJD5qKiWoNv913Col1/oaSy9q7BQ0M98OrIjmjn4ShzOiKipsUyo4dlhsxNTkklPt12HisPX4ZGK6BUSJgY5Y+ZQ0Pg7qCWOx4RUZNgmdHDMkPm6kJWCd7ffBZbz2QCABzUKjw5oA3+r18b2FpzkDARmTaWGT0sM2TuDl3MxbsbE3DySiEAwMvJBi8OD8GE7n5QcpAwEZkolhk9LDNkCbRagfUn0zB/8zmkFpQDAEK9nfD6qI7o3761zOmIiG4dy4welhmyJBXVGiyNScLnOy6guKJ2kPCAkNZ4fVQoOnhxkDARmQ6WGT0sM2SJ8kur8PmOC1h2MAnVGgGFBNzXwx8vDA+Bp5ON3PGIiG6IZUYPywxZsqScUsz/8yw2xmcAAGytlHj8jjZ4/I42sFerZE5HRHRtLDN6WGaIgKPJeXj3jwQcu1wAAGjtqMYLw0JwXw8/qJRcoo2IjA/LjB6WGaJaQghsOpWB9zefRXJuGQCgvYcDXh8VioEdWkOSOPOJiIwHy4welhkiQ1U1Wiw/mIzPdpxHQVk1AKBvOze8NjIUnX2dZU5HRFSLZUYPywxRwwrLqvHFrgv4fn8SqjRaSBJwdzdfvDS8A3xa2codj4gsHMuMHpYZoutLySvDB3+ew7oTaQAAtUqB/+sXjKcGtoWjjZXM6YjIUrHM6GGZIbo5J1IK8O7GBBy+lAcAcLO3xoyh7TExKgBWHCRMRC2MZUYPywzRzRNCYFtCFuZtSsDF7FIAQBt3e8wa2RHDwzw5SJiIWgzLjB6WGaJbV63RYtXhy/hk23nkllYBAKKCXPH66FB09W8lbzgisggsM3pYZogar7iiGl/t/gvf7L2EyhotAGBshA9eubMD/F3tZE5HROaMZUYPywzR7UsrKMdHWxLx6/ErEAKwViowtU8gnhnUHs52HCRMRE2PZUYPywxR0zmdVoi5GxOw/0IuAMDZ1grPDWmPKb0DYa3iIGEiajosM3pYZoialhACuxKzMW9jAhIzSwAAAa52mDWiI0Z18eIgYSJqEiwzelhmiJpHjUaLX45ewUdbE5FdXAkA6BbQCv8eHYoega4ypyMiU8cyo4dlhqh5lVbWYPGei1i85yLKqzUAgJGdvTBrREcEudvLnI6ITBXLjB6WGaKWkVlUgY+3JuKnIynQCkClkPBAT3880jcI7Twc5Y5HRCaGZUYPywxRyzqXUYx5mxKw61y2bluftm54ODoQQ0M9oeLdhInoJrDM6GGZIZLHgb9ysGR/ErYnZEJ79d8y3s42eCgqAA9GBaC1o1regERk1Fhm9LDMEMnrSn4ZVhy6jNWxKci7ejdhK6WEUV288XB0ILoHuHAGFBHVwzKjh2WGyDhUVGuwMT4dS2OSEZdSoNse5u2Eh6MDMa6rL2ytlfIFJCKjwjKjh2WGyPjEXynE0pgkrDuRplsmwclGhfsj/TG5dyBnQRERy4w+lhki45VfWoWfjqRg+aFkpOSV67YPCGmNh6MDMbCDB5QKXoIiskQsM3pYZoiMn0YrsDsxC0tjkg1mQfm72mJyr0DcH+kPF3trGRMSUUtjmdHDMkNkWpJySrH8YDJ+OpKCoooaAIBapcDYCB88HB2IcL9W8gYkohbBMqOHZYbINJVXabDuRCqWxiTjdFqRbnuEfytMjQ7EqC7esLHigGEic8Uyo4dlhsi0CSFw7HIBlsUk4Y/4dFRrav+V5WpvjQd6+mNSrwD4udjJnJKImhrLjB6WGSLzkV1cidWxl7Hi0GWkF1YAABQSMLijJx6ODkS/du5QcMAwkVlgmdHDMkNkfmo0WmxLyMKyg0nYfyFXt72Nuz0m9w7EPT384GxrJWNCIrpdLDN6WGaIzNuFrBIsP5iMX45eQUll7YBhWyslxnfzxcPRgQj15v/fE5kilhk9LDNElqGksga/HU/FspgkJGaW6LZHBbliSnQg7uzkBWsVF7kkMhUsM3pYZogsixAChy7lYVlMMjafzoDm6iqXrR3VmBgVgIeiAuDlbCNzSiK6EZYZPSwzRJYrs6gCKw9dxsrDl5FdXAkAUCok3NnJEw9HB6FXsCsXuSQyUiwzelhmiKiqRos/T2dgWUwyDifl6baHeDpgSnQQJnTzhb1aJWNCIvonlhk9LDNEpC8hvQjLDibjt2OpKK/WAAAc1Crc090XU6KD0M7DQeaERASwzBhgmSGihhRVVGPN0StYFpOMizmluu1927lhSu8gDA31gErJAcNEcmGZ0cMyQ0TXo9UK7P8rB0tjkrE9IRNXxwvDx9kGD/UKwINRAXB3UMsbksgCsczoYZkhopt1Jb8MKw9dxqrYFOSVVgEArJQSRnT2xrgIH9wR0prTu4laCMuMHpYZIrpVlTUabIxPxw8HkhGXUqDb7mSjwsjO3hgb4YPebVx5GYqoGbHM6GGZIaLbcSq1EL8eS8WGk2nIujq9GwDcHawxqkttsekR4MI1oYia2K38/Zb1PyvmzZuHnj17wtHRER4eHhg/fjzOnTtncMy0adMgSZLBo3fv3jIlJiJL09nXGf8ZG4aY14Zg1eO9MalXAFzsrJBTUoWlMcm476sY9H1/B9794wxOXimAmf/3IZFRkvXMzIgRI/Dggw+iZ8+eqKmpwezZsxEfH48zZ87A3t4eQG2ZyczMxJIlS3TPs7a2hqur6029B8/MEFFTq9ZoceCvXKyLS8OW0xkovromFAAEudlhbIQPxkb4IMTTUcaURKbNZC8zZWdnw8PDA7t378Ydd9wBoLbMFBQUYO3atY16TZYZImpOFdUa7E7MxvoTadiWkImKaq1uXwdPR4yN8MaYcB8EudvLmJLI9NzK32+juuVlYWEhANQ767Jr1y54eHigVatWGDBgAN599114eHjIEZGIyICNlRJ3dvLCnZ28UFpZg+1ns7D+RBp2n8vGucxinNtSjA+3JCLczxljw30wJsIb3s62cscmMitGc2ZGCIFx48YhPz8fe/fu1W1fvXo1HBwcEBgYiEuXLuGNN95ATU0Njh49CrW6/r0fKisrUVn59yC9oqIi+Pv788wMEbWowvJq/Hk6A+tPpOHAX7m6BS+B2pW8x0Z4Y2QXb97DhugaTPIy0/Tp0/HHH39g37598PPzu+Zx6enpCAwMxKpVqzBhwoR6++fMmYO33nqr3naWGSKSS05JJTadqi02hy/9vTaUQgL6tnPH2HAf3NnJC852VjKmJDIuJldmnn32WaxduxZ79uxBcHDwDY9v3749/vWvf2HWrFn19vHMDBEZs/TCcvxxMh3rT6ThxJVC3XYrpYQBIR4YG+GNoaGeXPiSLJ7JjJkRQuDZZ5/Fb7/9hl27dt1UkcnNzUVKSgq8vb0b3K9Wqxu8/EREZAy8nW3xr/5t8K/+bZCcW4oNV4vN2YxibEvIxLaETNhYKTAk1BNjw30wsENr2Fgp5Y5NZNRkPTPz9NNPY+XKlfj999/RoUMH3XZnZ2fY2tqipKQEc+bMwT333ANvb28kJSXh9ddfx+XLl5GQkABHxxtPe+RsJiIyBYmZxVh/Ig3rT6QhKbdMt91BrcLwTp4YG+GDfu3cYcW7DpOFMJnLTJLU8B0zlyxZgmnTpqG8vBzjx4/H8ePHUVBQAG9vbwwaNAjvvPMO/P39b+o9WGaIyJQIIXAqtQjrT9YWm/TCCt0+FzsrjOzijbHhPogKdoWSdx0mM2YyZaYlsMwQkanSagWOXc7HuhNp2BifjpySKt0+D0c1RofXLqfQzb/VNf/jkMhUsczoYZkhInNQo9Hi4MU8rD+Rhk2n0lFU8fddh/1cbDEm3Ad3Rfgg1NuRxYbMAsuMHpYZIjI3VTVa7D1fe9fhLWcyUVal0e1r29pet5xC29YOMqYkuj0sM3pYZojInJVXabDj6l2Hd5zLQlXN38sphHk7YWyED8aEe8Pf1U7GlES3jmVGD8sMEVmK4opqbD2TifUn0rD3fA5q9O463D2gFUaH+2B4mCeLDZkElhk9LDNEZInyS6t0dx0+eCkX+v+m7+DpiKFhHhga6okIv1ZQcFYUGSGWGT0sM0Rk6bKKKrDhZDq2nMlAbFK+wTpR7g5qDOnogaFhnujXzh221rxBHxkHlhk9LDNERH8rKKvCrnPZ2JqQid3nslFS+fesKLVKgf7t3TE01BODQz3g4WgjY1KydCwzelhmiIgaVlWjxaFLudiekIWtZzKRWlBusD/CvxWGhdaetengySnf1LJYZvSwzBAR3ZgQonZ9qDO160PpL4IJ1N7LZmioJ4aFeaJnkCusVVxWgZoXy4welhkioluXWVSBHWezsO1MJvZdyEGl3pRvR7UKAzq0xrAwTwwM8YCznZWMSclcsczoYZkhIro9ZVU12Hc+B9sTsrD9bKbBsgpKhYSoIFcMCfXAsDBPBLrZy5iUzAnLjB6WGSKipqPVCsRdKdBdjkrMLDHY397DAUPDPDE01BNd/VtxMUxqNJYZPSwzRETN53JuGbYl1BabQ5fyDKZ9u9lbY/DVad/927vDzlolY1IyNSwzelhmiIhaRmFZNXYlZmFbQhZ2nctCsd5imNYqBfq1q532PSTUA55OnPZN18cyo4dlhoio5VVrtIi9lIetV8/apOQZTvsO93PG0NDay1Fc6ZsawjKjh2WGiEheQggkZpboLkfFpRQYLK/g28oWQ0Jrl1fo1cYVahXvQkwsMwZYZoiIjEtWcQV2ns3C1jNZ2HchGxXVf0/7dlCrMCCkNYaGeWBgiAdc7K1lTEpyYpnRwzJDRGS8Kqo12H8h5+pZmyxkF1fq9ikkIDLIFcNCPTE0zBPB7pz2bUlYZvSwzBARmQatVuBkaqFu2vfZjGKD/W1b218dQOyJbgGtYKXkXYjNGcuMHpYZIiLTlJJXhu1Xz9gcvJiLGr1p3w5qFXq3cUXfdu7o184d7TwcOIjYzLDM6GGZISIyfUUV1diTmI2tZzKxJzEb+WXVBvs9ndS6YtO3nTunfpsBlhk9LDNEROZFqxU4k16EfRdysP9CDg5fyjNYOwoAQjwddOWmVxs3OKh5wz5TwzKjh2WGiMi8VVRrcDQ5X1du4lMLDaZ+qxQSugW00pWbCH+OtzEFLDN6WGaIiCxLQVkVDvyVqys3ybllBvs53sY0sMzoYZkhIrJsKXll2HchB/su5ODAhZzrjrfp184dHhxvYxRYZvSwzBARUR2OtzEdLDN6WGaIiOhabmW8Tf/27gj343iblsIyo4dlhoiIblZ+aRViLt54vE2/du7o194dbVtzvE1zYZnRwzJDRESNdbPjbfq3d0ffthxv05RYZvSwzBARUVO42fE2/dq1Rr/2bogK5nib28Eyo4dlhoiImsPNjLfpHuBSO5i4vRvH29wilhk9LDNERNQS9Mfb7Dufg8t5DY23cUO/dm4cb3MTWGb0sMwQEZEcLueWYf9ftcVm/185KGhgvE1UsBt6BrkgMtAVHbwcoVSw3NRhmdHDMkNERHLTH2+z73wODifloeof420c1Sp0D3SpLTdBrujq3wo2VkqZEsuPZUYPywwRERmbimoNjl3Ox9GkfMQm5+NYcj5KKmsMjrFSSuji64yeQa6IDHJFZKALXOytZUrc8lhm9LDMEBGRsavRaHE2oxhHkvIQm5yP2Et5yCqurHdcOw8H3WWpnkGu8He1NdtxNywzelhmiIjI1AghcCW/HLFJeYhNykdsUh4uZJXUO87TSY3IIFf0DKy9NBXq7WQ2425YZvSwzBARkTnIK63C0eT82rM3SXmITy1EtcbwT7iDWoVuAa2uXppyQTd/F9ham+a4G5YZPSwzRERkjiqqNYhLKbhabmrH3RT/Y9yNSiGhs6+zblBxZKAL3BzUMiW+NSwzelhmiIjIEmi0AucyinEk+eqlqUt5yCiqqHdcm9b26BlYe+amZ5ArAt3sjHLcDcuMHpYZIiKyRHXjburKzZGkPCRm1h9309pRbTCoONTbESojuFMxy4welhkiIqJaBWW1427qBhWfvFJQb9yNnbUS3QNcdGduuvq3gr0Ma0yxzOhhmSEiImpYRbUGJ68UIjYpD0eS8nAkOR/FFYbjbpQKCZ18nNAzyBU9g1zQI9AVrR2bf9wNy4welhkiIqKbo9UKJGYV68bcxCblIb2w/ribYHd7RAa66GZNBbvbN/m4G5YZPSwzREREjZdaUK6bDn4kKR/nMovxz+bwYE9/vHdPeJO+7638/W75i2BERERkMnxb2cK3qy/GdfUFABSWVePo5b8HFZ9IKUQnH3lPFrDMEBER0U1ztrPC4I6eGNzRE0DtuButzBd5WGaIiIio0YxhZW/5J5ITERER3QaWGSIiIjJpLDNERERk0lhmiIiIyKSxzBAREZFJY5khIiIik8YyQ0RERCaNZYaIiIhMGssMERERmTSWGSIiIjJpLDNERERk0lhmiIiIyKSxzBAREZFJM/tVs8XVZcmLiopkTkJEREQ3q+7vdt3f8esx+zJTXFwMAPD395c5CREREd2q4uJiODs7X/cYSdxM5TFhWq0WaWlpcHR0hCRJTfraRUVF8Pf3R0pKCpycnJr0tenW8fswLvw+jAu/D+PC7+PGhBAoLi6Gj48PFIrrj4ox+zMzCoUCfn5+zfoeTk5O/B+jEeH3YVz4fRgXfh/Ghd/H9d3ojEwdDgAmIiIik8YyQ0RERCaNZeY2qNVqvPnmm1Cr1XJHIfD7MDb8PowLvw/jwu+jaZn9AGAiIiIybzwzQ0RERCaNZYaIiIhMGssMERERmTSWGSIiIjJpLDON9OWXXyI4OBg2Njbo0aMH9u7dK3ckizRv3jz07NkTjo6O8PDwwPjx43Hu3Dm5Y9FV8+bNgyRJmDlzptxRLFpqaiomT54MNzc32NnZoWvXrjh69KjcsSxSTU0N/v3vfyM4OBi2trZo06YN3n77bWi1WrmjmTSWmUZYvXo1Zs6cidmzZ+P48ePo378/Ro4cicuXL8sdzeLs3r0b06dPx8GDB7F161bU1NRg+PDhKC0tlTuaxYuNjcXixYsRHh4udxSLlp+fj759+8LKygqbNm3CmTNn8NFHH6FVq1ZyR7NI77//Pr766issXLgQCQkJmD9/Pj744AN8/vnnckczaZya3Qi9evVC9+7dsWjRIt220NBQjB8/HvPmzZMxGWVnZ8PDwwO7d+/GHXfcIXcci1VSUoLu3bvjyy+/xH//+1907doVn3zyidyxLNKrr76K/fv38+yxkRgzZgw8PT3x7bff6rbdc889sLOzw7Jly2RMZtp4ZuYWVVVV4ejRoxg+fLjB9uHDh+PAgQMypaI6hYWFAABXV1eZk1i26dOnY/To0Rg6dKjcUSzeunXrEBkZifvuuw8eHh7o1q0bvv76a7ljWax+/fph+/btSExMBACcOHEC+/btw6hRo2ROZtrMfqHJppaTkwONRgNPT0+D7Z6ensjIyJApFQG1K6y+8MIL6NevHzp37ix3HIu1atUqHDt2DLGxsXJHIQAXL17EokWL8MILL+D111/H4cOH8dxzz0GtVuPhhx+WO57FmTVrFgoLC9GxY0colUpoNBq8++67mDhxotzRTBrLTCNJkmTwsxCi3jZqWc888wxOnjyJffv2yR3FYqWkpGDGjBnYsmULbGxs5I5DALRaLSIjIzF37lwAQLdu3XD69GksWrSIZUYGq1evxvLly7Fy5Up06tQJcXFxmDlzJnx8fDB16lS545kslplb5O7uDqVSWe8sTFZWVr2zNdRynn32Waxbtw579uyBn5+f3HEs1tGjR5GVlYUePXrotmk0GuzZswcLFy5EZWUllEqljAktj7e3N8LCwgy2hYaGYs2aNTIlsmwvv/wyXn31VTz44IMAgC5duiA5ORnz5s1jmbkNHDNzi6ytrdGjRw9s3brVYPvWrVvRp08fmVJZLiEEnnnmGfz666/YsWMHgoOD5Y5k0YYMGYL4+HjExcXpHpGRkZg0aRLi4uJYZGTQt2/fercrSExMRGBgoEyJLFtZWRkUCsM/vUqlklOzbxPPzDTCCy+8gClTpiAyMhLR0dFYvHgxLl++jCeffFLuaBZn+vTpWLlyJX7//Xc4Ojrqzpg5OzvD1tZW5nSWx9HRsd54JXt7e7i5uXEck0yef/559OnTB3PnzsX999+Pw4cPY/HixVi8eLHc0SzS2LFj8e677yIgIACdOnXC8ePHsWDBAjz66KNyRzNtghrliy++EIGBgcLa2lp0795d7N69W+5IFglAg48lS5bIHY2uGjBggJgxY4bcMSza+vXrRefOnYVarRYdO3YUixcvljuSxSoqKhIzZswQAQEBwsbGRrRp00bMnj1bVFZWyh3NpPE+M0RERGTSOGaGiIiITBrLDBEREZk0lhkiIiIyaSwzREREZNJYZoiIiMikscwQERGRSWOZISIiIpPGMkNEFkeSJKxdu1buGETURFhmiKhFTZs2DZIk1XuMGDFC7mhEZKK4NhMRtbgRI0ZgyZIlBtvUarVMaYjI1PHMDBG1OLVaDS8vL4OHi4sLgNpLQIsWLcLIkSNha2uL4OBg/PzzzwbPj4+Px+DBg2Fraws3Nzc8/vjjKCkpMTjmu+++Q6dOnaBWq+Ht7Y1nnnnGYH9OTg7uvvtu2NnZoX379li3bl3zfmgiajYsM0RkdN544w3cc889OHHiBCZPnoyJEyciISEBAFBWVoYRI0bAxcUFsbGx+Pnnn7Ft2zaDsrJo0SJMnz4djz/+OOLj47Fu3Tq0a9fO4D3eeust3H///Th58iRGjRqFSZMmIS8vr0U/JxE1EblXuiQiyzJ16lShVCqFvb29wePtt98WQtSuhP7kk08aPKdXr17iqaeeEkIIsXjxYuHi4iJKSkp0+//44w+hUChERkaGEEIIHx8fMXv27GtmACD+/e9/634uKSkRkiSJTZs2NdnnJKKWwzEzRNTiBg0ahEWLFhlsc3V11f1zdHS0wb7o6GjExcUBABISEhAREQF7e3vd/r59+0Kr1eLcuXOQJAlpaWkYMmTIdTOEh4fr/tne3h6Ojo7Iyspq7EciIhmxzBBRi7O3t6932edGJEkCAAghdP/c0DG2trY39XpWVlb1nqvVam8pExEZB46ZISKjc/DgwXo/d+zYEQAQFhaGuLg4lJaW6vbv378fCoUCISEhcHR0RFBQELZv396imYlIPjwzQ0QtrrKyEhkZGQbbVCoV3N3dAQA///wzIiMj0a9fP6xYsQKHDx/Gt99+CwCYNGkS3nzzTUydOhVz5sxBdnY2nn32WUyZMgWenp4AgDlz5uDJJ5+Eh4cHRo4cieLiYuzfvx/PPvtsy35QImoRLDNE1OI2b94Mb29vg20dOnTA2bNnAdTONFq1ahWefvppeHl5YcWKFQgLCwMA2NnZ4c8//8SMGTPQs2dP2NnZ4Z577sGCBQt0rzV16lRUVFTg448/xksvvQR3d3fce++9LfcBiahFSUIIIXcIIqI6kiTht99+w/jx4+WOQkQmgmNmiIiIyKSxzBAREZFJ45gZIjIqvPJNRLeKZ2aIiIjIpLHMEBERkUljmSEiIiKTxjJDREREJo1lhoiIiEwaywwRERGZNJYZIiIiMmksM0RERGTSWGaIiIjIpP0/zfyUVT1Il8QAAAAASUVORK5CYII=",
+      "text/plain": [
+       "<Figure size 640x480 with 1 Axes>"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
     "import matplotlib.pyplot as plt\n",
     "\n",
@@ -350,10 +507,31 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 78,
    "id": "e93efdfc",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Test Loss: 22.462551\n",
+      "\n",
+      "Test Accuracy of airplane: 67% (676/1000)\n",
+      "Test Accuracy of automobile: 66% (666/1000)\n",
+      "Test Accuracy of  bird: 48% (480/1000)\n",
+      "Test Accuracy of   cat: 38% (380/1000)\n",
+      "Test Accuracy of  deer: 43% (438/1000)\n",
+      "Test Accuracy of   dog: 46% (464/1000)\n",
+      "Test Accuracy of  frog: 72% (728/1000)\n",
+      "Test Accuracy of horse: 69% (697/1000)\n",
+      "Test Accuracy of  ship: 68% (681/1000)\n",
+      "Test Accuracy of truck: 76% (769/1000)\n",
+      "\n",
+      "Test Accuracy (Overall): 59% (5979/10000)\n"
+     ]
+    }
+   ],
    "source": [
     "model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n",
     "\n",
@@ -362,6 +540,12 @@
     "class_correct = list(0.0 for i in range(10))\n",
     "class_total = list(0.0 for i in range(10))\n",
     "\n",
+    "import torch.optim as optim\n",
+    "\n",
+    "criterion = nn.CrossEntropyLoss()  # specify loss function\n",
+    "optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n",
+    "\n",
+    "\n",
     "model.eval()\n",
     "# iterate over test data\n",
     "for data, target in test_loader:\n",
@@ -434,6 +618,383 @@
     "Compare the results obtained with this new network to those obtained previously."
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": 79,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "class newNet(nn.Module):\n",
+    "    def __init__(self):\n",
+    "        super(newNet, self).__init__()\n",
+    "        self.conv1 = nn.Conv2d(3, 16, 3)\n",
+    "        self.pool = nn.MaxPool2d(2, 2)\n",
+    "        self.conv2 = nn.Conv2d(16, 32, 3)\n",
+    "        self.conv3 = nn.Conv2d(32, 64, 3)\n",
+    "        self.fc1 = nn.Linear(64 * 2 * 2, 512)  # Ajuster la taille ici\n",
+    "        self.fc2 = nn.Linear(512, 64)\n",
+    "        self.fc3 = nn.Linear(64, 10)\n",
+    "        self.dropout = nn.Dropout(p=0.5)\n",
+    "\n",
+    "    def forward(self, x):\n",
+    "        x = self.pool(F.relu(self.conv1(x)))\n",
+    "        x = self.pool(F.relu(self.conv2(x)))\n",
+    "        x = self.pool(F.relu(self.conv3(x)))\n",
+    "        # Utilisation de la méthode .view() de manière dynamique\n",
+    "        x = x.view(x.size(0), -1)\n",
+    "        x = F.relu(self.fc1(x))\n",
+    "        x = self.dropout(x)\n",
+    "        x = F.relu(self.fc2(x))\n",
+    "        x = self.dropout(x)\n",
+    "        x = self.fc3(x)\n",
+    "        return x"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 80,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "newNet(\n",
+      "  (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1))\n",
+      "  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
+      "  (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))\n",
+      "  (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))\n",
+      "  (fc1): Linear(in_features=256, out_features=512, bias=True)\n",
+      "  (fc2): Linear(in_features=512, out_features=64, bias=True)\n",
+      "  (fc3): Linear(in_features=64, out_features=10, bias=True)\n",
+      "  (dropout): Dropout(p=0.5, inplace=False)\n",
+      ")\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch: 0 \tTraining Loss: 45.997170 \tValidation Loss: 45.813581\n",
+      "Validation loss decreased (inf --> 45.813581).  Saving model ...\n",
+      "Epoch: 1 \tTraining Loss: 43.447996 \tValidation Loss: 39.434123\n",
+      "Validation loss decreased (45.813581 --> 39.434123).  Saving model ...\n",
+      "Epoch: 2 \tTraining Loss: 37.817678 \tValidation Loss: 33.835315\n",
+      "Validation loss decreased (39.434123 --> 33.835315).  Saving model ...\n",
+      "Epoch: 3 \tTraining Loss: 34.411652 \tValidation Loss: 32.083492\n",
+      "Validation loss decreased (33.835315 --> 32.083492).  Saving model ...\n",
+      "Epoch: 4 \tTraining Loss: 32.522201 \tValidation Loss: 29.788260\n",
+      "Validation loss decreased (32.083492 --> 29.788260).  Saving model ...\n",
+      "Epoch: 5 \tTraining Loss: 30.807354 \tValidation Loss: 28.055189\n",
+      "Validation loss decreased (29.788260 --> 28.055189).  Saving model ...\n",
+      "Epoch: 6 \tTraining Loss: 29.412794 \tValidation Loss: 27.665930\n",
+      "Validation loss decreased (28.055189 --> 27.665930).  Saving model ...\n",
+      "Epoch: 7 \tTraining Loss: 28.139727 \tValidation Loss: 25.605682\n",
+      "Validation loss decreased (27.665930 --> 25.605682).  Saving model ...\n",
+      "Epoch: 8 \tTraining Loss: 27.044169 \tValidation Loss: 25.173172\n",
+      "Validation loss decreased (25.605682 --> 25.173172).  Saving model ...\n",
+      "Epoch: 9 \tTraining Loss: 25.927621 \tValidation Loss: 23.948350\n",
+      "Validation loss decreased (25.173172 --> 23.948350).  Saving model ...\n"
+     ]
+    }
+   ],
+   "source": [
+    "# define the CNN architecture\n",
+    "\n",
+    "if __name__ == \"__main__\":\n",
+    "    # create a complete CNN\n",
+    "    model = newNet()\n",
+    "    print(model)\n",
+    "    # move tensors to GPU if CUDA is available\n",
+    "    if train_on_gpu:\n",
+    "        model.cuda()\n",
+    "\n",
+    "    criterion = nn.CrossEntropyLoss()  # specify loss function\n",
+    "    optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n",
+    "\n",
+    "    n_epochs = 10  # number of epochs to train the model\n",
+    "    train_loss_list = []  # list to store loss to visualize\n",
+    "    valid_loss_min = np.Inf  # track change in validation loss\n",
+    "\n",
+    "    for epoch in range(n_epochs):\n",
+    "        # Keep track of training and validation loss\n",
+    "        train_loss = 0.0\n",
+    "        valid_loss = 0.0\n",
+    "\n",
+    "        # Train the model\n",
+    "        model.train()\n",
+    "        for data, target in train_loader:\n",
+    "            # Move tensors to GPU if CUDA is available\n",
+    "            if train_on_gpu:\n",
+    "                data, target = data.cuda(), target.cuda()\n",
+    "            # Clear the gradients of all optimized variables\n",
+    "            optimizer.zero_grad()\n",
+    "            # Forward pass: compute predicted outputs by passing inputs to the model\n",
+    "            output = model(data)\n",
+    "            # Calculate the batch loss\n",
+    "            loss = criterion(output, target)\n",
+    "            # Backward pass: compute gradient of the loss with respect to model parameters\n",
+    "            loss.backward()\n",
+    "            # Perform a single optimization step (parameter update)\n",
+    "            optimizer.step()\n",
+    "            # Update training loss\n",
+    "            train_loss += loss.item() * data.size(0)\n",
+    "\n",
+    "        # Validate the model\n",
+    "        model.eval()\n",
+    "        for data, target in valid_loader:\n",
+    "            # Move tensors to GPU if CUDA is available\n",
+    "            if train_on_gpu:\n",
+    "                data, target = data.cuda(), target.cuda()\n",
+    "            # Forward pass: compute predicted outputs by passing inputs to the model\n",
+    "            output = model(data)\n",
+    "            # Calculate the batch loss\n",
+    "            loss = criterion(output, target)\n",
+    "            # Update average validation loss\n",
+    "            valid_loss += loss.item() * data.size(0)\n",
+    "\n",
+    "        # Calculate average losses\n",
+    "        train_loss = train_loss / len(train_loader)\n",
+    "        valid_loss = valid_loss / len(valid_loader)\n",
+    "        train_loss_list.append(train_loss)\n",
+    "\n",
+    "        # Print training/validation statistics\n",
+    "        print(\n",
+    "            \"Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}\".format(\n",
+    "                epoch, train_loss, valid_loss\n",
+    "            )\n",
+    "        )\n",
+    "\n",
+    "        # Save model if validation loss has decreased\n",
+    "        if valid_loss <= valid_loss_min:\n",
+    "            print(\n",
+    "                \"Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...\".format(\n",
+    "                    valid_loss_min, valid_loss\n",
+    "                )\n",
+    "            )\n",
+    "            torch.save(model.state_dict(), \"my_model_cifar.pt\")\n",
+    "            valid_loss_min = valid_loss"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 81,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Test Loss: 23.851512\n",
+      "\n",
+      "Test Accuracy of airplane: 53% (537/1000)\n",
+      "Test Accuracy of automobile: 65% (658/1000)\n",
+      "Test Accuracy of  bird: 41% (413/1000)\n",
+      "Test Accuracy of   cat: 43% (439/1000)\n",
+      "Test Accuracy of  deer: 33% (338/1000)\n",
+      "Test Accuracy of   dog: 39% (395/1000)\n",
+      "Test Accuracy of  frog: 82% (826/1000)\n",
+      "Test Accuracy of horse: 57% (575/1000)\n",
+      "Test Accuracy of  ship: 76% (769/1000)\n",
+      "Test Accuracy of truck: 76% (762/1000)\n",
+      "\n",
+      "Test Accuracy (Overall): 57% (5712/10000)\n"
+     ]
+    }
+   ],
+   "source": [
+    "model.load_state_dict(torch.load(\"./my_model_cifar.pt\"))\n",
+    "\n",
+    "# track test loss\n",
+    "test_loss = 0.0\n",
+    "class_correct = list(0.0 for i in range(10))\n",
+    "class_total = list(0.0 for i in range(10))\n",
+    "\n",
+    "import torch.optim as optim\n",
+    "\n",
+    "criterion = nn.CrossEntropyLoss()  # specify loss function\n",
+    "optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n",
+    "\n",
+    "\n",
+    "model.eval()\n",
+    "# iterate over test data\n",
+    "for data, target in test_loader:\n",
+    "    # move tensors to GPU if CUDA is available\n",
+    "    if train_on_gpu:\n",
+    "        data, target = data.cuda(), target.cuda()\n",
+    "    # forward pass: compute predicted outputs by passing inputs to the model\n",
+    "    output = model(data)\n",
+    "    # calculate the batch loss\n",
+    "    loss = criterion(output, target)\n",
+    "    # update test loss\n",
+    "    test_loss += loss.item() * data.size(0)\n",
+    "    # convert output probabilities to predicted class\n",
+    "    _, pred = torch.max(output, 1)\n",
+    "    # compare predictions to true label\n",
+    "    correct_tensor = pred.eq(target.data.view_as(pred))\n",
+    "    correct = (\n",
+    "        np.squeeze(correct_tensor.numpy())\n",
+    "        if not train_on_gpu\n",
+    "        else np.squeeze(correct_tensor.cpu().numpy())\n",
+    "    )\n",
+    "    # calculate test accuracy for each object class\n",
+    "    for i in range(batch_size):\n",
+    "        label = target.data[i]\n",
+    "        class_correct[label] += correct[i].item()\n",
+    "        class_total[label] += 1\n",
+    "\n",
+    "# average test loss\n",
+    "test_loss = test_loss / len(test_loader)\n",
+    "print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n",
+    "\n",
+    "for i in range(10):\n",
+    "    if class_total[i] > 0:\n",
+    "        print(\n",
+    "            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
+    "            % (\n",
+    "                classes[i],\n",
+    "                100 * class_correct[i] / class_total[i],\n",
+    "                np.sum(class_correct[i]),\n",
+    "                np.sum(class_total[i]),\n",
+    "            )\n",
+    "        )\n",
+    "    else:\n",
+    "        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n",
+    "\n",
+    "print(\n",
+    "    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n",
+    "    % (\n",
+    "        100.0 * np.sum(class_correct) / np.sum(class_total),\n",
+    "        np.sum(class_correct),\n",
+    "        np.sum(class_total),\n",
+    "    )\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 82,
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "RuntimeError",
+     "evalue": "Error(s) in loading state_dict for newNet:\n\tMissing key(s) in state_dict: \"conv3.weight\", \"conv3.bias\". \n\tsize mismatch for conv1.weight: copying a param with shape torch.Size([6, 3, 5, 5]) from checkpoint, the shape in current model is torch.Size([16, 3, 3, 3]).\n\tsize mismatch for conv1.bias: copying a param with shape torch.Size([6]) from checkpoint, the shape in current model is torch.Size([16]).\n\tsize mismatch for conv2.weight: copying a param with shape torch.Size([16, 6, 5, 5]) from checkpoint, the shape in current model is torch.Size([32, 16, 3, 3]).\n\tsize mismatch for conv2.bias: copying a param with shape torch.Size([16]) from checkpoint, the shape in current model is torch.Size([32]).\n\tsize mismatch for fc1.weight: copying a param with shape torch.Size([120, 400]) from checkpoint, the shape in current model is torch.Size([512, 256]).\n\tsize mismatch for fc1.bias: copying a param with shape torch.Size([120]) from checkpoint, the shape in current model is torch.Size([512]).\n\tsize mismatch for fc2.weight: copying a param with shape torch.Size([84, 120]) from checkpoint, the shape in current model is torch.Size([64, 512]).\n\tsize mismatch for fc2.bias: copying a param with shape torch.Size([84]) from checkpoint, the shape in current model is torch.Size([64]).\n\tsize mismatch for fc3.weight: copying a param with shape torch.Size([10, 84]) from checkpoint, the shape in current model is torch.Size([10, 64]).",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[1;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
+      "\u001b[1;32mc:\\Users\\Utilisateur\\Documents\\GitHub\\image-classification\\TD2 Deep Learning.ipynb Cell 25\u001b[0m line \u001b[0;36m1\n\u001b[0;32m     <a href='vscode-notebook-cell:/c%3A/Users/Utilisateur/Documents/GitHub/image-classification/TD2%20Deep%20Learning.ipynb#X56sZmlsZQ%3D%3D?line=10'>11</a>\u001b[0m model1 \u001b[39m=\u001b[39m newNet()  \u001b[39m# Remplacez Net par le type de modèle que vous utilisez\u001b[39;00m\n\u001b[0;32m     <a href='vscode-notebook-cell:/c%3A/Users/Utilisateur/Documents/GitHub/image-classification/TD2%20Deep%20Learning.ipynb#X56sZmlsZQ%3D%3D?line=11'>12</a>\u001b[0m model2 \u001b[39m=\u001b[39m newNet()  \u001b[39m# Assurez-vous que les deux modèles ont la même architecture\u001b[39;00m\n\u001b[1;32m---> <a href='vscode-notebook-cell:/c%3A/Users/Utilisateur/Documents/GitHub/image-classification/TD2%20Deep%20Learning.ipynb#X56sZmlsZQ%3D%3D?line=13'>14</a>\u001b[0m model1\u001b[39m.\u001b[39mload_state_dict(torch\u001b[39m.\u001b[39mload(model_path1))\n\u001b[0;32m     <a href='vscode-notebook-cell:/c%3A/Users/Utilisateur/Documents/GitHub/image-classification/TD2%20Deep%20Learning.ipynb#X56sZmlsZQ%3D%3D?line=14'>15</a>\u001b[0m model2\u001b[39m.\u001b[39mload_state_dict(torch\u001b[39m.\u001b[39mload(model_path2))\n\u001b[0;32m     <a href='vscode-notebook-cell:/c%3A/Users/Utilisateur/Documents/GitHub/image-classification/TD2%20Deep%20Learning.ipynb#X56sZmlsZQ%3D%3D?line=16'>17</a>\u001b[0m \u001b[39m# Mettez les modèles en mode évaluation\u001b[39;00m\n",
+      "File \u001b[1;32mc:\\Users\\Utilisateur\\anaconda3\\Lib\\site-packages\\torch\\nn\\modules\\module.py:2152\u001b[0m, in \u001b[0;36mModule.load_state_dict\u001b[1;34m(self, state_dict, strict, assign)\u001b[0m\n\u001b[0;32m   2147\u001b[0m         error_msgs\u001b[39m.\u001b[39minsert(\n\u001b[0;32m   2148\u001b[0m             \u001b[39m0\u001b[39m, \u001b[39m'\u001b[39m\u001b[39mMissing key(s) in state_dict: \u001b[39m\u001b[39m{}\u001b[39;00m\u001b[39m. \u001b[39m\u001b[39m'\u001b[39m\u001b[39m.\u001b[39mformat(\n\u001b[0;32m   2149\u001b[0m                 \u001b[39m'\u001b[39m\u001b[39m, \u001b[39m\u001b[39m'\u001b[39m\u001b[39m.\u001b[39mjoin(\u001b[39mf\u001b[39m\u001b[39m'\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m{\u001b[39;00mk\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m\u001b[39m'\u001b[39m \u001b[39mfor\u001b[39;00m k \u001b[39min\u001b[39;00m missing_keys)))\n\u001b[0;32m   2151\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(error_msgs) \u001b[39m>\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[1;32m-> 2152\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m(\u001b[39m'\u001b[39m\u001b[39mError(s) in loading state_dict for \u001b[39m\u001b[39m{}\u001b[39;00m\u001b[39m:\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39m\\t\u001b[39;00m\u001b[39m{}\u001b[39;00m\u001b[39m'\u001b[39m\u001b[39m.\u001b[39mformat(\n\u001b[0;32m   2153\u001b[0m                        \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39m\\t\u001b[39;00m\u001b[39m\"\u001b[39m\u001b[39m.\u001b[39mjoin(error_msgs)))\n\u001b[0;32m   2154\u001b[0m \u001b[39mreturn\u001b[39;00m _IncompatibleKeys(missing_keys, unexpected_keys)\n",
+      "\u001b[1;31mRuntimeError\u001b[0m: Error(s) in loading state_dict for newNet:\n\tMissing key(s) in state_dict: \"conv3.weight\", \"conv3.bias\". \n\tsize mismatch for conv1.weight: copying a param with shape torch.Size([6, 3, 5, 5]) from checkpoint, the shape in current model is torch.Size([16, 3, 3, 3]).\n\tsize mismatch for conv1.bias: copying a param with shape torch.Size([6]) from checkpoint, the shape in current model is torch.Size([16]).\n\tsize mismatch for conv2.weight: copying a param with shape torch.Size([16, 6, 5, 5]) from checkpoint, the shape in current model is torch.Size([32, 16, 3, 3]).\n\tsize mismatch for conv2.bias: copying a param with shape torch.Size([16]) from checkpoint, the shape in current model is torch.Size([32]).\n\tsize mismatch for fc1.weight: copying a param with shape torch.Size([120, 400]) from checkpoint, the shape in current model is torch.Size([512, 256]).\n\tsize mismatch for fc1.bias: copying a param with shape torch.Size([120]) from checkpoint, the shape in current model is torch.Size([512]).\n\tsize mismatch for fc2.weight: copying a param with shape torch.Size([84, 120]) from checkpoint, the shape in current model is torch.Size([64, 512]).\n\tsize mismatch for fc2.bias: copying a param with shape torch.Size([84]) from checkpoint, the shape in current model is torch.Size([64]).\n\tsize mismatch for fc3.weight: copying a param with shape torch.Size([10, 84]) from checkpoint, the shape in current model is torch.Size([10, 64])."
+     ]
+    }
+   ],
+   "source": [
+    "import torch\n",
+    "import torch.nn as nn\n",
+    "import torch.optim as optim\n",
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "# Charger les modèles\n",
+    "model_path1 = \"./model_cifar.pt\"\n",
+    "model_path2 = \"./my_model_cifar.pt\"\n",
+    "\n",
+    "model1 = Net()  # Remplacez Net par le type de modèle que vous utilisez\n",
+    "model2 = newNet()  # Assurez-vous que les deux modèles ont la même architecture\n",
+    "\n",
+    "model1.load_state_dict(torch.load(model_path1))\n",
+    "model2.load_state_dict(torch.load(model_path2))\n",
+    "\n",
+    "# Mettez les modèles en mode évaluation\n",
+    "model1.eval()\n",
+    "model2.eval()\n",
+    "\n",
+    "# Initialiser les variables pour le suivi des performances\n",
+    "test_loss = [0.0, 0.0]  # Liste pour stocker les pertes de test pour chaque modèle\n",
+    "class_correct = [list(0.0 for i in range(10)), list(0.0 for i in range(10))]\n",
+    "class_total = [list(0.0 for i in range(10)), list(0.0 for i in range(10))]\n",
+    "\n",
+    "# Définir le critère et l'optimiseur\n",
+    "criterion = nn.CrossEntropyLoss()\n",
+    "\n",
+    "# Boucle sur le jeu de données de test\n",
+    "for model_num, model in enumerate([model1, model2]):\n",
+    "    for data, target in test_loader:\n",
+    "        if train_on_gpu:\n",
+    "            data, target = data.cuda(), target.cuda()\n",
+    "        output = model(data)\n",
+    "        loss = criterion(output, target)\n",
+    "        test_loss[model_num] += loss.item() * data.size(0)\n",
+    "\n",
+    "        _, pred = torch.max(output, 1)\n",
+    "        correct_tensor = pred.eq(target.data.view_as(pred))\n",
+    "        correct = (\n",
+    "            np.squeeze(correct_tensor.numpy())\n",
+    "            if not train_on_gpu\n",
+    "            else np.squeeze(correct_tensor.cpu().numpy())\n",
+    "        )\n",
+    "\n",
+    "        for i in range(batch_size):\n",
+    "            label = target.data[i]\n",
+    "            class_correct[model_num][label] += correct[i].item()\n",
+    "            class_total[model_num][label] += 1\n",
+    "\n",
+    "    test_loss[model_num] = test_loss[model_num] / len(test_loader)\n",
+    "\n",
+    "# Afficher les performances de chaque modèle\n",
+    "for model_num, model in enumerate([\"Model 1\", \"Model 2\"]):\n",
+    "    print(f\"\\n{model} Test Loss: {test_loss[model_num]:.6f}\\n\")\n",
+    "    for i in range(10):\n",
+    "        if class_total[model_num][i] > 0:\n",
+    "            print(\n",
+    "                f\"Test Accuracy of {classes[i]}: {100 * class_correct[model_num][i] / class_total[model_num][i]:.2f}%\"\n",
+    "            )\n",
+    "        else:\n",
+    "            print(f\"Test Accuracy of {classes[i]}: N/A (no training examples)\")\n",
+    "\n",
+    "# Plotting\n",
+    "labels = [\"Model 1\", \"Model 2\"]\n",
+    "accuracy_overall = [\n",
+    "    100.0 * np.sum(class_correct[0]) / np.sum(class_total[0]),\n",
+    "    100.0 * np.sum(class_correct[1]) / np.sum(class_total[1]),\n",
+    "]\n",
+    "\n",
+    "plt.bar(labels, accuracy_overall)\n",
+    "plt.xlabel(\"Models\")\n",
+    "plt.ylabel(\"Overall Test Accuracy (%)\")\n",
+    "plt.title(\"Comparison of Model Performance\")\n",
+    "plt.show()\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 68,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<All keys matched successfully>"
+      ]
+     },
+     "execution_count": 68,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "model_path1 = \"./model_cifar.pt\"\n",
+    "model_path2 = \"./my_model_cifar.pt\"\n",
+    "\n",
+    "model1 = newNet()  # Remplacez Net par le type de modèle que vous utilisez\n",
+    "model1.load_state_dict(torch.load(model_path1))\n",
+    "\n",
+    "\n",
+    "model2 = newNet()  # Assurez-vous que les deux modèles ont la même architecture\n",
+    "model2.load_state_dict(torch.load(model_path2))"
+   ]
+  },
   {
    "cell_type": "markdown",
    "id": "bc381cf4",
@@ -940,7 +1501,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.5"
+   "version": "3.11.5"
   },
   "vscode": {
    "interpreter": {
diff --git a/model_cifar.pt b/model_cifar.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d1b01a8d2cbf2acabc1fef6a881ffe781267057c
Binary files /dev/null and b/model_cifar.pt differ
diff --git a/my_model_cifar.pt b/my_model_cifar.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b5f2675edc8aecf7fb70e03f9105cc0bdf9e26d9
Binary files /dev/null and b/my_model_cifar.pt differ