diff --git a/BE2_GAN_and_cGAN.ipynb b/BE2_GAN_and_cGAN.ipynb
index a25bf21bd03c181be8dfb2f66ace9071c6c6c7c5..b2223ba9c67088db5f7a419d7f023a11c1eebcc1 100644
--- a/BE2_GAN_and_cGAN.ipynb
+++ b/BE2_GAN_and_cGAN.ipynb
@@ -72870,131 +72870,6 @@
     "Therefore, the total number of parameters in the network is:3,136 + 131,200 + 524,544 + 2,097,664 + 8,193 = 2,764,737."
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": 38,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Number of parameters in the generator: 54420483\n",
-      "Number of parameters in the discriminator: 2769601\n",
-      "Total number of parameters in the GAN architecture: 57190084\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Calculate the number of parameters for the generator\n",
-    "generator_params = sum(p.numel() for p in generator.parameters())\n",
-    "print(\"Number of parameters in the generator:\", generator_params)\n",
-    "\n",
-    "# Calculate the number of parameters for the discriminator\n",
-    "discriminator_params = sum(p.numel() for p in discriminator.parameters())\n",
-    "print(\"Number of parameters in the discriminator:\", discriminator_params)\n",
-    "\n",
-    "# Total number of parameters in the GAN architecture\n",
-    "total_params = generator_params + discriminator_params\n",
-    "print(\"Total number of parameters in the GAN architecture:\", total_params)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 43,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "----------------------------------------------------------------\n",
-      "        Layer (type)               Output Shape         Param #\n",
-      "================================================================\n",
-      "            Conv2d-1         [-1, 64, 128, 128]           3,136\n",
-      "         LeakyReLU-2         [-1, 64, 128, 128]               0\n",
-      "            inconv-3         [-1, 64, 128, 128]               0\n",
-      "            Conv2d-4          [-1, 128, 64, 64]         131,200\n",
-      "       BatchNorm2d-5          [-1, 128, 64, 64]             256\n",
-      "         LeakyReLU-6          [-1, 128, 64, 64]               0\n",
-      "              down-7          [-1, 128, 64, 64]               0\n",
-      "            Conv2d-8          [-1, 256, 32, 32]         524,544\n",
-      "       BatchNorm2d-9          [-1, 256, 32, 32]             512\n",
-      "        LeakyReLU-10          [-1, 256, 32, 32]               0\n",
-      "             down-11          [-1, 256, 32, 32]               0\n",
-      "           Conv2d-12          [-1, 512, 16, 16]       2,097,664\n",
-      "      BatchNorm2d-13          [-1, 512, 16, 16]           1,024\n",
-      "        LeakyReLU-14          [-1, 512, 16, 16]               0\n",
-      "             down-15          [-1, 512, 16, 16]               0\n",
-      "           Conv2d-16            [-1, 512, 8, 8]       4,194,816\n",
-      "      BatchNorm2d-17            [-1, 512, 8, 8]           1,024\n",
-      "        LeakyReLU-18            [-1, 512, 8, 8]               0\n",
-      "             down-19            [-1, 512, 8, 8]               0\n",
-      "           Conv2d-20            [-1, 512, 4, 4]       4,194,816\n",
-      "      BatchNorm2d-21            [-1, 512, 4, 4]           1,024\n",
-      "        LeakyReLU-22            [-1, 512, 4, 4]               0\n",
-      "             down-23            [-1, 512, 4, 4]               0\n",
-      "           Conv2d-24            [-1, 512, 2, 2]       4,194,816\n",
-      "      BatchNorm2d-25            [-1, 512, 2, 2]           1,024\n",
-      "        LeakyReLU-26            [-1, 512, 2, 2]               0\n",
-      "             down-27            [-1, 512, 2, 2]               0\n",
-      "           Conv2d-28            [-1, 512, 1, 1]       4,194,816\n",
-      "      BatchNorm2d-29            [-1, 512, 1, 1]           1,024\n",
-      "        LeakyReLU-30            [-1, 512, 1, 1]               0\n",
-      "             down-31            [-1, 512, 1, 1]               0\n",
-      "  ConvTranspose2d-32            [-1, 512, 2, 2]       4,194,816\n",
-      "      BatchNorm2d-33            [-1, 512, 2, 2]           1,024\n",
-      "          Dropout-34            [-1, 512, 2, 2]               0\n",
-      "             ReLU-35            [-1, 512, 2, 2]               0\n",
-      "               up-36           [-1, 1024, 2, 2]               0\n",
-      "  ConvTranspose2d-37            [-1, 512, 4, 4]       8,389,120\n",
-      "      BatchNorm2d-38            [-1, 512, 4, 4]           1,024\n",
-      "          Dropout-39            [-1, 512, 4, 4]               0\n",
-      "             ReLU-40            [-1, 512, 4, 4]               0\n",
-      "               up-41           [-1, 1024, 4, 4]               0\n",
-      "  ConvTranspose2d-42            [-1, 512, 8, 8]       8,389,120\n",
-      "      BatchNorm2d-43            [-1, 512, 8, 8]           1,024\n",
-      "          Dropout-44            [-1, 512, 8, 8]               0\n",
-      "             ReLU-45            [-1, 512, 8, 8]               0\n",
-      "               up-46           [-1, 1024, 8, 8]               0\n",
-      "  ConvTranspose2d-47          [-1, 512, 16, 16]       8,389,120\n",
-      "      BatchNorm2d-48          [-1, 512, 16, 16]           1,024\n",
-      "             ReLU-49          [-1, 512, 16, 16]               0\n",
-      "               up-50         [-1, 1024, 16, 16]               0\n",
-      "  ConvTranspose2d-51          [-1, 256, 32, 32]       4,194,560\n",
-      "      BatchNorm2d-52          [-1, 256, 32, 32]             512\n",
-      "             ReLU-53          [-1, 256, 32, 32]               0\n",
-      "               up-54          [-1, 512, 32, 32]               0\n",
-      "  ConvTranspose2d-55          [-1, 128, 64, 64]       1,048,704\n",
-      "      BatchNorm2d-56          [-1, 128, 64, 64]             256\n",
-      "             ReLU-57          [-1, 128, 64, 64]               0\n",
-      "               up-58          [-1, 256, 64, 64]               0\n",
-      "  ConvTranspose2d-59         [-1, 64, 128, 128]         262,208\n",
-      "      BatchNorm2d-60         [-1, 64, 128, 128]             128\n",
-      "             ReLU-61         [-1, 64, 128, 128]               0\n",
-      "               up-62        [-1, 128, 128, 128]               0\n",
-      "  ConvTranspose2d-63          [-1, 3, 256, 256]           6,147\n",
-      "             Tanh-64          [-1, 3, 256, 256]               0\n",
-      "          outconv-65          [-1, 3, 256, 256]               0\n",
-      "================================================================\n",
-      "Total params: 54,420,483\n",
-      "Trainable params: 54,420,483\n",
-      "Non-trainable params: 0\n",
-      "----------------------------------------------------------------\n",
-      "Input size (MB): 0.75\n",
-      "Forward/backward pass size (MB): 134.80\n",
-      "Params size (MB): 207.60\n",
-      "Estimated Total Size (MB): 343.14\n",
-      "----------------------------------------------------------------\n"
-     ]
-    }
-   ],
-   "source": [
-    "from torchsummary import summary\n",
-    "\n",
-    "summary(generator, (3, 256, 256))"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": 9,
diff --git a/README.md b/README.md
index 1ec8131d5e4c568ed03bae5deb08e7d63851f102..0af9f88fd0f6dfedf7146bd178aa5c26301db7d6 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,43 @@
 # TD 2 : GAN & cGAN
 
-MSO 3.4 Apprentissage Automatique
+MSO 3.4 Apprentissage Automatique  
 
----
+This project focuses on understanding and implementing Generative Adversarial Networks (GANs) and Conditional GANs (cGANs) for image generation and image-to-image translation tasks. Below is an overview of the project structure, including the tasks performed and the provided solutions.
 
-We recommand to use the notebook (.ipynb) but the Python script (.py) is also provided if more convenient for you.
+# Deep Convolutional GAN (DCGAN)
+In this section, we explore the basics of GANs by implementing a DCGAN architecture to generate handwritten digits.  
+In this part, a tutorial for celebrity faces generation in applied to the handwritten digits. It gives a first introduction to GANs for a simple case.
 
-# How to submit your Work ?
+# Conditional GAN (cGAN)
 
-This work must be done individually. The expected output is a private repository named gan-cgan on https://gitlab.ec-lyon.fr. It must contain your notebook (or python files) and a README.md file that explains briefly the successive steps of the project. Don't forget to add your teacher as developer member of the project. The last commit is due before 11:59 pm on Monday, April 1st, 2024. Subsequent commits will not be considered.
+In this section, we delve into cGANs, particularly focusing on image-to-image translation using a U-Net generator and a PatchGAN discriminator. The goal is to create images building facade images based on a mask of the facade. 
+
+## U-Net architecture
+
+The U-Net is a model based on several convolutional layers, that create an output image from an input one. It represents the generator The two main steps are: 
+* Define classes for U-Net architecture components.
+* Create the U-Net model for image-to-image translation.
+
+## PatchGAN
+
+The PatchGAN is the discriminator that will classify the images. 
+
+## Loss functions
+
+The loss function used in this project combines GAN loss and L1 norm loss to map both high and low-frequency features in the generated images.
+
+# Results
+Comparative analysis of generated images between 1 epoch and 50 epochs shows significant improvement in image quality and resemblance to real images.
+
+# Conclusion
+
+This project provides hands-on experience with GANs and cGANs, demonstrating their effectiveness in generating realistic images and performing image-to-image translation tasks. The pain point of the work carried out was mainly the computing time to train the model, which led to reducing the number of epochs and thus the performances of the model. To conclude, through practical implementation and analysis, learners gain insights into the complexities and nuances of deep structured learning.
+
+# Acknowledgments
+
+The code and tutorials used in this project are primarily based on PyTorch documentation and research papers cited within the assignment.
+
+# Author:
+Oscar Chaufour
+
+Feel free to expand on this readme with additional details or instructions as needed for your project documentation.