diff --git a/BE2_GAN_and_cGAN.ipynb b/BE2_GAN_and_cGAN.ipynb
index 01bf87603bd2aeee135f8ff4708167186d77ac36..09a9f92bde736a82c0561bc7b266a5a50eb6ded6 100644
--- a/BE2_GAN_and_cGAN.ipynb
+++ b/BE2_GAN_and_cGAN.ipynb
@@ -69,15 +69,172 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 4,
    "metadata": {
     "colab": {},
     "colab_type": "code",
     "id": "sIL7UvYAZx6L"
    },
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "cpu\n"
+     ]
+    },
+    {
+     "ename": "KeyboardInterrupt",
+     "evalue": "",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
+      "Cell \u001b[1;32mIn[4], line 85\u001b[0m\n\u001b[0;32m     83\u001b[0m \u001b[38;5;66;03m# Train the DCGAN\u001b[39;00m\n\u001b[0;32m     84\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m epoch \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(epochs):\n\u001b[1;32m---> 85\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m i, (images, _) \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(dataloader):\n\u001b[0;32m     86\u001b[0m         \u001b[38;5;66;03m# Train Discriminator\u001b[39;00m\n\u001b[0;32m     87\u001b[0m         real_images \u001b[38;5;241m=\u001b[39m images\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m     88\u001b[0m         real_labels \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mones(batch_size, \u001b[38;5;241m1\u001b[39m)\u001b[38;5;241m.\u001b[39mto(device)\u001b[38;5;241m.\u001b[39msqueeze()\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torch\\utils\\data\\dataloader.py:631\u001b[0m, in \u001b[0;36m_BaseDataLoaderIter.__next__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    628\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sampler_iter \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    629\u001b[0m     \u001b[38;5;66;03m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001b[39;00m\n\u001b[0;32m    630\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reset()  \u001b[38;5;66;03m# type: ignore[call-arg]\u001b[39;00m\n\u001b[1;32m--> 631\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_next_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    632\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_yielded \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m    633\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataset_kind \u001b[38;5;241m==\u001b[39m _DatasetKind\u001b[38;5;241m.\u001b[39mIterable \u001b[38;5;129;01mand\u001b[39;00m \\\n\u001b[0;32m    634\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_IterableDataset_len_called \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \\\n\u001b[0;32m    635\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_yielded \u001b[38;5;241m>\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_IterableDataset_len_called:\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torch\\utils\\data\\dataloader.py:675\u001b[0m, in \u001b[0;36m_SingleProcessDataLoaderIter._next_data\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    673\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_next_data\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m    674\u001b[0m     index \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_next_index()  \u001b[38;5;66;03m# may raise StopIteration\u001b[39;00m\n\u001b[1;32m--> 675\u001b[0m     data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_dataset_fetcher\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfetch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mindex\u001b[49m\u001b[43m)\u001b[49m  \u001b[38;5;66;03m# may raise StopIteration\u001b[39;00m\n\u001b[0;32m    676\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pin_memory:\n\u001b[0;32m    677\u001b[0m         data \u001b[38;5;241m=\u001b[39m _utils\u001b[38;5;241m.\u001b[39mpin_memory\u001b[38;5;241m.\u001b[39mpin_memory(data, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pin_memory_device)\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py:51\u001b[0m, in \u001b[0;36m_MapDatasetFetcher.fetch\u001b[1;34m(self, possibly_batched_index)\u001b[0m\n\u001b[0;32m     49\u001b[0m         data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset\u001b[38;5;241m.\u001b[39m__getitems__(possibly_batched_index)\n\u001b[0;32m     50\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m---> 51\u001b[0m         data \u001b[38;5;241m=\u001b[39m [\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset[idx] \u001b[38;5;28;01mfor\u001b[39;00m idx \u001b[38;5;129;01min\u001b[39;00m possibly_batched_index]\n\u001b[0;32m     52\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m     53\u001b[0m     data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset[possibly_batched_index]\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py:51\u001b[0m, in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m     49\u001b[0m         data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset\u001b[38;5;241m.\u001b[39m__getitems__(possibly_batched_index)\n\u001b[0;32m     50\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m---> 51\u001b[0m         data \u001b[38;5;241m=\u001b[39m [\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdataset\u001b[49m\u001b[43m[\u001b[49m\u001b[43midx\u001b[49m\u001b[43m]\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m idx \u001b[38;5;129;01min\u001b[39;00m possibly_batched_index]\n\u001b[0;32m     52\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m     53\u001b[0m     data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset[possibly_batched_index]\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torchvision\\datasets\\mnist.py:145\u001b[0m, in \u001b[0;36mMNIST.__getitem__\u001b[1;34m(self, index)\u001b[0m\n\u001b[0;32m    142\u001b[0m img \u001b[38;5;241m=\u001b[39m Image\u001b[38;5;241m.\u001b[39mfromarray(img\u001b[38;5;241m.\u001b[39mnumpy(), mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mL\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    144\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtransform \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m--> 145\u001b[0m     img \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtransform\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimg\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    147\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtarget_transform \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    148\u001b[0m     target \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtarget_transform(target)\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torchvision\\transforms\\transforms.py:95\u001b[0m, in \u001b[0;36mCompose.__call__\u001b[1;34m(self, img)\u001b[0m\n\u001b[0;32m     93\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, img):\n\u001b[0;32m     94\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m t \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtransforms:\n\u001b[1;32m---> 95\u001b[0m         img \u001b[38;5;241m=\u001b[39m \u001b[43mt\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimg\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     96\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m img\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torch\\nn\\modules\\module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1509\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m   1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1511\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torch\\nn\\modules\\module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m   1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m   1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m   1518\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m   1519\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1520\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m   1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m   1523\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torchvision\\transforms\\transforms.py:277\u001b[0m, in \u001b[0;36mNormalize.forward\u001b[1;34m(self, tensor)\u001b[0m\n\u001b[0;32m    269\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, tensor: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m    270\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m    271\u001b[0m \u001b[38;5;124;03m    Args:\u001b[39;00m\n\u001b[0;32m    272\u001b[0m \u001b[38;5;124;03m        tensor (Tensor): Tensor image to be normalized.\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    275\u001b[0m \u001b[38;5;124;03m        Tensor: Normalized Tensor image.\u001b[39;00m\n\u001b[0;32m    276\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m--> 277\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnormalize\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmean\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstd\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minplace\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torchvision\\transforms\\functional.py:349\u001b[0m, in \u001b[0;36mnormalize\u001b[1;34m(tensor, mean, std, inplace)\u001b[0m\n\u001b[0;32m    346\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tensor, torch\u001b[38;5;241m.\u001b[39mTensor):\n\u001b[0;32m    347\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mimg should be Tensor Image. Got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(tensor)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m--> 349\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF_t\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnormalize\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtensor\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmean\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmean\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstd\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minplace\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minplace\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[1;32mc:\\Users\\polux\\anaconda3\\lib\\site-packages\\torchvision\\transforms\\_functional_tensor.py:915\u001b[0m, in \u001b[0;36mnormalize\u001b[1;34m(tensor, mean, std, inplace)\u001b[0m\n\u001b[0;32m    910\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    911\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExpected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtensor\u001b[38;5;241m.\u001b[39msize()\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    912\u001b[0m     )\n\u001b[0;32m    914\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m inplace:\n\u001b[1;32m--> 915\u001b[0m     tensor \u001b[38;5;241m=\u001b[39m \u001b[43mtensor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mclone\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    917\u001b[0m dtype \u001b[38;5;241m=\u001b[39m tensor\u001b[38;5;241m.\u001b[39mdtype\n\u001b[0;32m    918\u001b[0m mean \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mas_tensor(mean, dtype\u001b[38;5;241m=\u001b[39mdtype, device\u001b[38;5;241m=\u001b[39mtensor\u001b[38;5;241m.\u001b[39mdevice)\n",
+      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
+     ]
+    }
+   ],
    "source": [
-    "#TO DO: your code here to adapt the code from the tutorial to experiment on MNIST dataset"
+    "#TO DO: your code here to adapt the code from the tutorial to experiment on MNIST dataset\n",
+    "\n",
+    "import torch\n",
+    "from torch import nn\n",
+    "from torchvision import datasets, transforms\n",
+    "from torchvision.utils import save_image\n",
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "if torch.cuda.is_available():\n",
+    "    print('cuda')\n",
+    "    device = torch.device('cuda')\n",
+    "else:\n",
+    "    device = torch.device('cpu')\n",
+    "    print('cpu')\n",
+    "\n",
+    "# Load MNIST dataset\n",
+    "transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n",
+    "dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n",
+    "dataloader = torch.utils.data.DataLoader(dataset, batch_size=128, shuffle=True)\n",
+    "\n",
+    "# Define Generator and Discriminator\n",
+    "class Generator(nn.Module):\n",
+    "    def __init__(self):\n",
+    "        super(Generator, self).__init__()\n",
+    "        self.main = nn.Sequential(\n",
+    "            # input is Z, going into a convolution\n",
+    "            nn.ConvTranspose2d(100, 256, 4, 1, 0, bias=False),\n",
+    "            nn.BatchNorm2d(256),\n",
+    "            nn.ReLU(True),\n",
+    "            # state size. 256 x 4 x 4\n",
+    "            nn.ConvTranspose2d(256, 128, 3, 2, 1, bias=False),\n",
+    "            nn.BatchNorm2d(128),\n",
+    "            nn.ReLU(True),\n",
+    "            # state size. 128 x 7 x 7\n",
+    "            nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),\n",
+    "            nn.BatchNorm2d(64),\n",
+    "            nn.ReLU(True),\n",
+    "            # state size. 64 x 14 x 14\n",
+    "            nn.ConvTranspose2d(64, 1, 4, 2, 1, bias=False),\n",
+    "            nn.Tanh()\n",
+    "            # state size. 1 x 28 x 28\n",
+    "        )\n",
+    "\n",
+    "    def forward(self, input):\n",
+    "        return self.main(input)\n",
+    "\n",
+    "class Discriminator(nn.Module):\n",
+    "    def __init__(self):\n",
+    "        super(Discriminator, self).__init__()\n",
+    "        self.main = nn.Sequential(\n",
+    "            # input is 1 x 28 x 28\n",
+    "            nn.Conv2d(1, 64, 4, 2, 1, bias=False),\n",
+    "            nn.LeakyReLU(0.2, inplace=True),\n",
+    "            # state size. 64 x 14 x 14\n",
+    "            nn.Conv2d(64, 128, 4, 2, 1, bias=False),\n",
+    "            nn.BatchNorm2d(128),\n",
+    "            nn.LeakyReLU(0.2, inplace=True),\n",
+    "            # state size. 128 x 7 x 7\n",
+    "            nn.Conv2d(128, 256, 3, 2, 1, bias=False),\n",
+    "            nn.BatchNorm2d(256),\n",
+    "            nn.LeakyReLU(0.2, inplace=True),\n",
+    "            # state size. 256 x 4 x 4\n",
+    "            nn.Conv2d(256, 1, 4, 1, 0, bias=False),\n",
+    "            nn.Sigmoid()\n",
+    "        )\n",
+    "\n",
+    "    def forward(self, input):\n",
+    "        return self.main(input).view(-1, 1).squeeze(1)\n",
+    "\n",
+    "# Create the models\n",
+    "generator = Generator()\n",
+    "discriminator = Discriminator()\n",
+    "\n",
+    "# Define loss function and optimizers\n",
+    "criterion = nn.BCELoss()\n",
+    "optimizer_g = torch.optim.Adam(generator.parameters(), lr=0.0002)\n",
+    "optimizer_d = torch.optim.Adam(discriminator.parameters(), lr=0.0002)\n",
+    "\n",
+    "# hyperparameters\n",
+    "batch_size = 128\n",
+    "epochs = 100\n",
+    "\n",
+    "# Train the DCGAN\n",
+    "for epoch in range(epochs):\n",
+    "    for i, (images, _) in enumerate(dataloader):\n",
+    "        # Train Discriminator\n",
+    "        real_images = images.to(device)\n",
+    "        real_labels = torch.ones(batch_size, 1).to(device).squeeze()\n",
+    "        fake_labels = torch.zeros(batch_size, 1).to(device).squeeze()\n",
+    "\n",
+    "        # Real images\n",
+    "        real_outputs = discriminator(real_images)\n",
+    "        d_loss_real = criterion(real_outputs, real_labels)\n",
+    "\n",
+    "        # Fake images\n",
+    "        z = torch.randn(batch_size, 100, 1, 1).to(device)\n",
+    "        fake_images = generator(z)\n",
+    "        fake_outputs = discriminator(fake_images)\n",
+    "        d_loss_fake = criterion(fake_outputs, fake_labels)\n",
+    "\n",
+    "        # Backprop and optimize\n",
+    "        d_loss = d_loss_real + d_loss_fake\n",
+    "        discriminator.zero_grad()\n",
+    "        d_loss.backward()\n",
+    "        optimizer_d.step()\n",
+    "\n",
+    "        # Train Generator\n",
+    "        z = torch.randn(batch_size, 100, 1, 1).to(device)\n",
+    "        fake_images = generator(z)\n",
+    "        outputs = discriminator(fake_images)\n",
+    "\n",
+    "        # Backprop and optimize\n",
+    "        g_loss = criterion(outputs, real_labels)\n",
+    "        generator.zero_grad()\n",
+    "        g_loss.backward()\n",
+    "        optimizer_g.step()\n",
+    "\n",
+    "    print(f'Epoch [{epoch}/{epochs}], d_loss: {d_loss.item()}, g_loss: {g_loss.item()}')\n",
+    "\n",
+    "# Generate new handwritten digits\n",
+    "noise = torch.randn(64, 100, 1, 1)\n",
+    "generated_images = generator(noise)\n",
+    "save_image(generated_images.view(64, 1, 28, 28), 'generated_images.png')\n",
+    "\n",
+    "# Display the generated images\n",
+    "img = plt.imread('generated_images.png')\n",
+    "plt.imshow(img)\n",
+    "plt.show()"
    ]
   },
   {
@@ -296,7 +453,7 @@
    },
    "outputs": [],
    "source": [
-    " class U_Net(nn.Module):\n",
+    "class U_Net(nn.Module):\n",
     "    ''' \n",
     "    Ck denotes a Convolution-BatchNorm-ReLU layer with k filters.\n",
     "    CDk denotes a Convolution-BatchNorm-Dropout-ReLU layer with a dropout rate of 50%\n",
@@ -306,43 +463,55 @@
     "      CD512 - CD1024 - CD1024 - C1024 - C1024 - C512 - C256 - C128\n",
     "    '''\n",
     "    def __init__(self, n_channels, n_classes):\n",
-    "        super(U_Net, self).__init__()\n",
-    "        # Encoder\n",
-    "        self.inc = inconv(n_channels, 64) # 64 filters\n",
-    "        # TO DO :\n",
-    "        # Create the 7 encoder layers called \"down1\" to \"down7\" following this sequence\n",
-    "        # C64   - C128   - C256   - C512  - C512  - C512 - C512 - C512\n",
-    "        # The first one has already been implemented\n",
-    "        \n",
-    "        \n",
-    "        # Decoder\n",
-    "        # TO DO :\n",
-    "        # Create the 7 decoder layers called up1 to up7 following this sequence :\n",
-    "        # CD512 - CD1024 - CD1024 - C1024 - C1024 - C512 - C256 - C128\n",
-    "        # The last layer has already been defined\n",
-    "        \n",
-    "        \n",
-    "        self.outc = outconv(128, n_classes) # 128 filters\n",
+    "      super(U_Net, self).__init__()\n",
+    "      # Encoder\n",
+    "      self.inc = inconv(n_channels, 64) # 64 filters\n",
+    "      # TO DO :\n",
+    "      # Create the 7 encoder layers called \"down1\" to \"down7\" following this sequence\n",
+    "      # C64   - C128   - C256   - C512  - C512  - C512 - C512 - C512\n",
+    "      # The first one has already been implemented\n",
+    "      self.down1 = down(64, 128)\n",
+    "      self.down2 = down(128, 256)\n",
+    "      self.down3 = down(256, 512)\n",
+    "      self.down4 = down(512, 512)\n",
+    "      self.down5 = down(512, 512)\n",
+    "      self.down6 = down(512, 512)\n",
+    "      self.down7 = down(512, 512)\n",
+    "\n",
+    "      # Decoder\n",
+    "      # TO DO :\n",
+    "      # Create the 7 decoder layers called up1 to up7 following this sequence :\n",
+    "      # CD512 - CD1024 - CD1024 - C1024 - C1024 - C512 - C256 - C128\n",
+    "      # The last layer has already been defined\n",
+    "      self.up1 = up(1024, 512, dropout=True)\n",
+    "      self.up2 = up(1024, 512, dropout=True)\n",
+    "      self.up3 = up(1024, 512)\n",
+    "      self.up4 = up(1024, 512)\n",
+    "      self.up5 = up(1024, 256)\n",
+    "      self.up6 = up(512, 128)\n",
+    "      self.up7 = up(256, 64)\n",
+    "      \n",
+    "      self.outc = outconv(128, n_classes) # 128 filters\n",
     "\n",
     "    def forward(self, x):\n",
-    "        x1 = self.inc(x)\n",
-    "        x2 = self.down1(x1)\n",
-    "        x3 = self.down2(x2)\n",
-    "        x4 = self.down3(x3)\n",
-    "        x5 = self.down4(x4)\n",
-    "        x6 = self.down5(x5)\n",
-    "        x7 = self.down6(x6)\n",
-    "        x8 = self.down7(x7)\n",
-    "        # At this stage x8 is our encoded vector, we will now decode it\n",
-    "        x = self.up7(x8, x7)\n",
-    "        x = self.up6(x, x6)\n",
-    "        x = self.up5(x, x5)\n",
-    "        x = self.up4(x, x4)\n",
-    "        x = self.up3(x, x3)\n",
-    "        x = self.up2(x, x2)\n",
-    "        x = self.up1(x, x1)\n",
-    "        x = self.outc(x)\n",
-    "        return x"
+    "      x1 = self.inc(x)\n",
+    "      x2 = self.down1(x1)\n",
+    "      x3 = self.down2(x2)\n",
+    "      x4 = self.down3(x3)\n",
+    "      x5 = self.down4(x4)\n",
+    "      x6 = self.down5(x5)\n",
+    "      x7 = self.down6(x6)\n",
+    "      x8 = self.down7(x7)\n",
+    "      # At this stage x8 is our encoded vector, we will now decode it\n",
+    "      x = self.up7(x8, x7)\n",
+    "      x = self.up6(x, x6)\n",
+    "      x = self.up5(x, x5)\n",
+    "      x = self.up4(x, x4)\n",
+    "      x = self.up3(x, x3)\n",
+    "      x = self.up2(x, x2)\n",
+    "      x = self.up1(x, x1)\n",
+    "      x = self.outc(x)\n",
+    "      return x"
    ]
   },
   {
@@ -381,8 +550,13 @@
     "<font color='red'>**Question 1**</font>                                                                  \n",
     "Knowing the input and output images will be 256x256, what will be the dimension of the encoded vector x8  ?\n",
     "\n",
+    "After 8 layers of downsampling, the dimension of the encoded vector would be: 256 / 2^8 = 256 / 256 = 1\n",
+    "\n",
     "<font color='red'>**Question 2**</font>                                                                  \n",
-    "As you can see, U-net has an encoder-decoder architecture with skip connections. Explain why it works better than a traditional encoder-decoder."
+    "As you can see, U-net has an encoder-decoder architecture with skip connections. Explain why it works better than a traditional encoder-decoder.\n",
+    "\n",
+    "The U-Net's inclusion of skip connections improves performance over traditional encoder-decoder architectures mainly by preserving spatial information, mitigating information loss, addressing the vanishing gradient problem, and enabling enhanced feature reuse.\n",
+    "\n"
    ]
   },
   {
@@ -513,10 +687,10 @@
     "        super(PatchGAN, self).__init__()\n",
     "        # TODO :\n",
     "        # create the 4 first layers named conv1 to conv4\n",
-    "        self.conv1 =\n",
-    "        self.conv2 =\n",
-    "        self.conv3 =\n",
-    "        self.conv4 =\n",
+    "        self.conv1 = conv_block(n_channels * 2, 64)  # Assuming the input is a concatenation of two images\n",
+    "        self.conv2 = conv_block(64, 128)\n",
+    "        self.conv3 = conv_block(128, 256)\n",
+    "        self.conv4 = conv_block(256, 512)\n",
     "        # output layer\n",
     "        self.out = out_block(512, n_classes)\n",
     "        \n",
@@ -961,14 +1135,18 @@
     "\n",
     "        optimizer_G.zero_grad()\n",
     "\n",
+    "        # Generate a batch of images\n",
+    "        fake_A = generator(real_B)\n",
+    "\n",
     "        # GAN loss\n",
-    "        # TO DO: Put here your GAN loss\n",
+    "        pred_fake = discriminator(fake_A, real_B)\n",
+    "        loss_GAN = criterion_GAN(pred_fake, valid)\n",
     "\n",
     "        # Pixel-wise loss\n",
-    "        # TO DO: Put here your pixel loss\n",
+    "        loss_pixel = torch.nn.functional.l1_loss(fake_A, real_A)\n",
     "\n",
     "        # Total loss\n",
-    "        # TO DO: Put here your total loss\n",
+    "        loss_G = loss_GAN + lambda_pixel * loss_pixel\n",
     "\n",
     "        loss_G.backward()\n",
     "\n",
@@ -1212,7 +1390,7 @@
    "provenance": []
   },
   "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
+   "display_name": "base",
    "language": "python",
    "name": "python3"
   },
@@ -1226,7 +1404,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.8"
+   "version": "3.9.7"
   }
  },
  "nbformat": 4,
diff --git a/data/MNIST/raw/t10k-images-idx3-ubyte b/data/MNIST/raw/t10k-images-idx3-ubyte
new file mode 100644
index 0000000000000000000000000000000000000000..1170b2cae98de7a524b163fcc379ac8f00925b12
Binary files /dev/null and b/data/MNIST/raw/t10k-images-idx3-ubyte differ
diff --git a/data/MNIST/raw/t10k-images-idx3-ubyte.gz b/data/MNIST/raw/t10k-images-idx3-ubyte.gz
new file mode 100644
index 0000000000000000000000000000000000000000..5ace8ea93f8d2a3741f4d267954e2ad37e1b3a39
Binary files /dev/null and b/data/MNIST/raw/t10k-images-idx3-ubyte.gz differ
diff --git a/data/MNIST/raw/t10k-labels-idx1-ubyte b/data/MNIST/raw/t10k-labels-idx1-ubyte
new file mode 100644
index 0000000000000000000000000000000000000000..d1c3a970612bbd2df47a3c0697f82bd394abc450
Binary files /dev/null and b/data/MNIST/raw/t10k-labels-idx1-ubyte differ
diff --git a/data/MNIST/raw/t10k-labels-idx1-ubyte.gz b/data/MNIST/raw/t10k-labels-idx1-ubyte.gz
new file mode 100644
index 0000000000000000000000000000000000000000..a7e141541c1d08d3f2ed01eae03e644f9e2fd0c5
Binary files /dev/null and b/data/MNIST/raw/t10k-labels-idx1-ubyte.gz differ
diff --git a/data/MNIST/raw/train-images-idx3-ubyte b/data/MNIST/raw/train-images-idx3-ubyte
new file mode 100644
index 0000000000000000000000000000000000000000..bbce27659e0fc2b7ed2a64c127849380a477099b
Binary files /dev/null and b/data/MNIST/raw/train-images-idx3-ubyte differ
diff --git a/data/MNIST/raw/train-images-idx3-ubyte.gz b/data/MNIST/raw/train-images-idx3-ubyte.gz
new file mode 100644
index 0000000000000000000000000000000000000000..b50e4b6bccdebde3d57f575c7fbeb24bec277f10
Binary files /dev/null and b/data/MNIST/raw/train-images-idx3-ubyte.gz differ
diff --git a/data/MNIST/raw/train-labels-idx1-ubyte b/data/MNIST/raw/train-labels-idx1-ubyte
new file mode 100644
index 0000000000000000000000000000000000000000..d6b4c5db3b52063d543fb397aede09aba0dc5234
Binary files /dev/null and b/data/MNIST/raw/train-labels-idx1-ubyte differ
diff --git a/data/MNIST/raw/train-labels-idx1-ubyte.gz b/data/MNIST/raw/train-labels-idx1-ubyte.gz
new file mode 100644
index 0000000000000000000000000000000000000000..707a576bb523304d5b674de436c0779d77b7d480
Binary files /dev/null and b/data/MNIST/raw/train-labels-idx1-ubyte.gz differ