From 37fce13b191bdc7a8aa8346a279dcfa292ffcb34 Mon Sep 17 00:00:00 2001
From: epaganel <emilien.paganelli@etu.ecl.fr>
Date: Fri, 1 Dec 2023 11:27:52 +0100
Subject: [PATCH] final commit

---
 .gitignore                  |   1 -
 TD2 Deep Learning (1).ipynb |   1 +
 TD2 Deep Learning.ipynb     | 953 ------------------------------------
 koala.jpg                   | Bin 0 -> 9981 bytes
 raton.jpg                   | Bin 0 -> 8330 bytes
 5 files changed, 1 insertion(+), 954 deletions(-)
 create mode 100644 TD2 Deep Learning (1).ipynb
 delete mode 100644 TD2 Deep Learning.ipynb
 create mode 100644 koala.jpg
 create mode 100644 raton.jpg

diff --git a/.gitignore b/.gitignore
index f3436fe..9d12ab9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,3 @@
-*.jpg
 .DS_Store
 
 # Data
diff --git a/TD2 Deep Learning (1).ipynb b/TD2 Deep Learning (1).ipynb
new file mode 100644
index 0000000..62382f1
--- /dev/null
+++ b/TD2 Deep Learning (1).ipynb	
@@ -0,0 +1 @@
+{"cells":[{"cell_type":"markdown","id":"7edf7168","metadata":{"id":"7edf7168"},"source":["# TD2: Deep learning"]},{"cell_type":"markdown","id":"fbb8c8df","metadata":{"id":"fbb8c8df"},"source":["In this TD, you must modify this notebook to answer the questions. To do this,\n","\n","1. Fork this repository\n","2. Clone your forked repository on your local computer\n","3. Answer the questions\n","4. Commit and push regularly\n","\n","The last commit is due on Sunday, December 1, 11:59 PM. Later commits will not be taken into account."]},{"cell_type":"markdown","id":"3d167a29","metadata":{"id":"3d167a29"},"source":["Install and test PyTorch from  https://pytorch.org/get-started/locally."]},{"cell_type":"markdown","source":[],"metadata":{"id":"QCeGtWXR9J0y"},"id":"QCeGtWXR9J0y"},{"cell_type":"code","execution_count":27,"id":"330a42f5","metadata":{"colab":{"base_uri":"https://localhost:8080/","height":348},"id":"330a42f5","executionInfo":{"status":"ok","timestamp":1701426305624,"user_tz":-60,"elapsed":7349,"user":{"displayName":"Emilien Paganelli","userId":"14204960141695635735"}},"outputId":"2919d0d6-6505-4f44-c820-d714835dbf32"},"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (2.1.0+cu118)\n","Requirement already satisfied: torchvision in /usr/local/lib/python3.10/dist-packages (0.16.0+cu118)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch) (3.13.1)\n","Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch) (4.5.0)\n","Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch) (1.12)\n","Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch) (3.2.1)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch) (3.1.2)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch) (2023.6.0)\n","Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch) (2.1.0)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from torchvision) (1.23.5)\n","Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from torchvision) (2.31.0)\n","Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.10/dist-packages (from torchvision) (9.4.0)\n","Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch) (2.1.3)\n","Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (3.3.2)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (3.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (2.0.7)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (2023.7.22)\n","Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch) (1.3.0)\n"]},{"output_type":"execute_result","data":{"text/plain":["'/content'"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"}},"metadata":{},"execution_count":27}],"source":["%pip install torch torchvision\n","%pwd"]},{"cell_type":"markdown","id":"0882a636","metadata":{"id":"0882a636"},"source":["\n","To test run the following code"]},{"cell_type":"code","execution_count":28,"id":"b1950f0a","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"b1950f0a","executionInfo":{"status":"ok","timestamp":1701426306511,"user_tz":-60,"elapsed":894,"user":{"displayName":"Emilien Paganelli","userId":"14204960141695635735"}},"outputId":"ad7c03d1-2f59-4fc4-fc8a-95cd79d1dbc0"},"outputs":[{"output_type":"stream","name":"stdout","text":["tensor([[ 3.5464e-01, -2.5162e-01, -3.4084e-01, -4.0203e-01,  3.5056e-01,\n","         -6.9368e-01,  3.9031e-01,  3.7319e-01, -1.4964e+00, -4.2178e-01],\n","        [-4.0251e-01,  1.5534e+00, -1.0358e+00, -5.9098e-01, -6.0426e-02,\n","         -9.6500e-01, -7.1777e-01, -3.0196e-01, -2.0247e-01,  2.8037e-01],\n","        [-4.0223e-01, -7.3649e-01, -8.6344e-01, -7.9131e-01, -6.1953e-02,\n","         -1.8424e+00,  9.4855e-01, -1.0007e-01, -1.4111e+00, -6.5363e-01],\n","        [ 1.3629e-01, -3.6519e-01,  1.7978e-01, -4.4087e-01, -4.5886e-01,\n","          4.6493e-01, -1.0318e+00,  1.2653e-01, -3.5993e-01,  7.4553e-01],\n","        [-2.5067e-01,  9.4126e-01, -6.1452e-01,  7.3656e-01, -2.8452e-01,\n","          9.6051e-01,  1.0045e+00,  4.4673e-04,  1.2508e+00, -9.4441e-01],\n","        [-1.3628e+00, -8.0748e-01,  8.7495e-01, -6.0305e-01, -3.0637e+00,\n","         -4.7690e-01,  7.7525e-01, -8.4844e-02, -8.0040e-01, -1.6611e+00],\n","        [-6.5572e-01,  2.8375e-01, -8.9761e-01,  6.8970e-01, -2.1472e+00,\n","         -5.3845e-01, -1.3347e-01, -4.6737e-01,  5.3647e-01, -9.9889e-02],\n","        [-6.5048e-01,  1.7173e-01, -2.7836e-01,  6.2470e-02,  2.4870e+00,\n","          1.0898e+00,  7.3483e-01, -9.4346e-01,  1.0283e+00,  1.3574e-01],\n","        [ 5.7310e-01, -1.9131e-01,  6.2758e-01,  1.4844e-01, -7.4922e-01,\n","         -8.9184e-01,  8.0467e-01, -7.2978e-01, -7.0883e-01, -1.4534e-01],\n","        [ 1.1513e+00, -8.5424e-01,  1.3269e+00, -4.4614e-01, -4.5424e-01,\n","         -1.0125e+00, -7.7922e-01, -9.7093e-01,  2.2431e+00, -1.8060e+00],\n","        [ 1.0384e+00, -1.8293e+00, -4.7758e-01, -1.7817e+00, -3.3394e-01,\n","          1.7667e+00,  1.1108e+00, -1.9820e-01, -5.5519e-01, -5.8780e-01],\n","        [-1.3990e+00, -1.7740e+00, -1.2106e+00,  3.2282e-03, -1.7323e+00,\n","         -2.3775e-01,  5.3892e-01,  9.7521e-01, -3.2019e-01, -1.3805e+00],\n","        [-5.7714e-02,  1.2586e-01, -1.7178e+00,  2.8869e+00, -8.5589e-01,\n","         -4.0656e-01,  1.0384e+00,  8.4800e-01,  4.2282e-01,  7.2093e-01],\n","        [-1.9805e-02, -2.7115e-01,  3.3575e-01, -1.1353e-01, -4.9451e-01,\n","          2.2995e-01,  1.0682e+00,  1.1319e+00, -4.9516e-01, -1.2863e+00]])\n","AlexNet(\n","  (features): Sequential(\n","    (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))\n","    (1): ReLU(inplace=True)\n","    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n","    (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n","    (4): ReLU(inplace=True)\n","    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n","    (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n","    (7): ReLU(inplace=True)\n","    (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n","    (9): ReLU(inplace=True)\n","    (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n","    (11): ReLU(inplace=True)\n","    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n","  )\n","  (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))\n","  (classifier): Sequential(\n","    (0): Dropout(p=0.5, inplace=False)\n","    (1): Linear(in_features=9216, out_features=4096, bias=True)\n","    (2): ReLU(inplace=True)\n","    (3): Dropout(p=0.5, inplace=False)\n","    (4): Linear(in_features=4096, out_features=4096, bias=True)\n","    (5): ReLU(inplace=True)\n","    (6): Linear(in_features=4096, out_features=1000, bias=True)\n","  )\n",")\n"]}],"source":["import torch\n","\n","N, D = 14, 10\n","x = torch.randn(N, D).type(torch.FloatTensor)\n","print(x)\n","\n","from torchvision import models\n","\n","alexnet = models.alexnet()\n","print(alexnet)"]},{"cell_type":"markdown","id":"23f266da","metadata":{"id":"23f266da"},"source":["## Exercise 1: CNN on CIFAR10\n","\n","The goal is to apply a Convolutional Neural Net (CNN) model on the CIFAR10 image dataset and test the accuracy of the model on the basis of image classification. Compare the Accuracy VS the neural network implemented during TD1.\n","\n","Have a look at the following documentation to be familiar with PyTorch.\n","\n","https://pytorch.org/tutorials/beginner/pytorch_with_examples.html\n","\n","https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html"]},{"cell_type":"markdown","id":"4ba1c82d","metadata":{"id":"4ba1c82d"},"source":["You can test if GPU is available on your machine and thus train on it to speed up the process"]},{"cell_type":"code","execution_count":29,"id":"6e18f2fd","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"6e18f2fd","executionInfo":{"status":"ok","timestamp":1701426306511,"user_tz":-60,"elapsed":3,"user":{"displayName":"Emilien Paganelli","userId":"14204960141695635735"}},"outputId":"02f0f21b-1da2-46f2-d67e-a27d41d7ff93"},"outputs":[{"output_type":"stream","name":"stdout","text":["CUDA is available!  Training on GPU ...\n"]}],"source":["import torch\n","\n","# check if CUDA is available\n","train_on_gpu = torch.cuda.is_available()\n","\n","if not train_on_gpu:\n","    print(\"CUDA is not available.  Training on CPU ...\")\n","else:\n","    print(\"CUDA is available!  Training on GPU ...\")"]},{"cell_type":"markdown","id":"5cf214eb","metadata":{"id":"5cf214eb"},"source":["Next we load the CIFAR10 dataset"]},{"cell_type":"code","execution_count":30,"id":"462666a2","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"462666a2","executionInfo":{"status":"ok","timestamp":1701426308369,"user_tz":-60,"elapsed":1860,"user":{"displayName":"Emilien Paganelli","userId":"14204960141695635735"}},"outputId":"a0158530-9554-4462-d7de-7b69df58e40b"},"outputs":[{"output_type":"stream","name":"stdout","text":["Files already downloaded and verified\n","Files already downloaded and verified\n"]}],"source":["import numpy as np\n","from torchvision import datasets, transforms\n","from torch.utils.data.sampler import SubsetRandomSampler\n","\n","# number of subprocesses to use for data loading\n","num_workers = 0\n","# how many samples per batch to load\n","batch_size = 20\n","# percentage of training set to use as validation\n","valid_size = 0.2\n","\n","# convert data to a normalized torch.FloatTensor\n","transform = transforms.Compose(\n","    [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n",")\n","\n","# choose the training and test datasets\n","train_data = datasets.CIFAR10(\"data\", train=True, download=True, transform=transform)\n","test_data = datasets.CIFAR10(\"data\", train=False, download=True, transform=transform)\n","\n","# obtain training indices that will be used for validation\n","num_train = len(train_data)\n","indices = list(range(num_train))\n","np.random.shuffle(indices)\n","split = int(np.floor(valid_size * num_train))\n","train_idx, valid_idx = indices[split:], indices[:split]\n","\n","# define samplers for obtaining training and validation batches\n","train_sampler = SubsetRandomSampler(train_idx)\n","valid_sampler = SubsetRandomSampler(valid_idx)\n","\n","# prepare data loaders (combine dataset and sampler)\n","train_loader = torch.utils.data.DataLoader(\n","    train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers\n",")\n","valid_loader = torch.utils.data.DataLoader(\n","    train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers\n",")\n","test_loader = torch.utils.data.DataLoader(\n","    test_data, batch_size=batch_size, num_workers=num_workers\n",")\n","\n","# specify the image classes\n","classes = [\n","    \"airplane\",\n","    \"automobile\",\n","    \"bird\",\n","    \"cat\",\n","    \"deer\",\n","    \"dog\",\n","    \"frog\",\n","    \"horse\",\n","    \"ship\",\n","    \"truck\",\n","]"]},{"cell_type":"markdown","id":"58ec3903","metadata":{"id":"58ec3903"},"source":["CNN definition (this one is an example)"]},{"cell_type":"code","execution_count":31,"id":"317bf070","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"317bf070","executionInfo":{"status":"ok","timestamp":1701426308369,"user_tz":-60,"elapsed":4,"user":{"displayName":"Emilien Paganelli","userId":"14204960141695635735"}},"outputId":"39844deb-a37a-4d6b-dc9d-a5824620d7ad"},"outputs":[{"output_type":"stream","name":"stdout","text":["Net(\n","  (conv1): Conv2d(3, 6, kernel_size=(5, 5), stride=(1, 1))\n","  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n","  (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n","  (fc1): Linear(in_features=400, out_features=120, bias=True)\n","  (fc2): Linear(in_features=120, out_features=84, bias=True)\n","  (fc3): Linear(in_features=84, out_features=10, bias=True)\n",")\n"]}],"source":["import torch.nn as nn\n","import torch.nn.functional as F\n","\n","# define the CNN architecture\n","\n","\n","class Net(nn.Module):\n","    def __init__(self):\n","        super(Net, self).__init__()\n","        self.conv1 = nn.Conv2d(3, 6, 5)\n","        self.pool = nn.MaxPool2d(2, 2)\n","        self.conv2 = nn.Conv2d(6, 16, 5)\n","        self.fc1 = nn.Linear(16 * 5 * 5, 120)\n","        self.fc2 = nn.Linear(120, 84)\n","        self.fc3 = nn.Linear(84, 10)\n","\n","    def forward(self, x):\n","        x = self.pool(F.relu(self.conv1(x)))\n","        x = self.pool(F.relu(self.conv2(x)))\n","        x = x.view(-1, 16 * 5 * 5)\n","        x = F.relu(self.fc1(x))\n","        x = F.relu(self.fc2(x))\n","        x = self.fc3(x)\n","        return x\n","\n","\n","# create a complete CNN\n","model = Net()\n","print(model)\n","# move tensors to GPU if CUDA is available\n","if train_on_gpu:\n","    model.cuda()"]},{"cell_type":"markdown","id":"a2dc4974","metadata":{"id":"a2dc4974"},"source":["Loss function and training using SGD (Stochastic Gradient Descent) optimizer"]},{"cell_type":"code","execution_count":null,"id":"4b53f229","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"4b53f229","outputId":"ffb784e2-4c61-46f8-dee6-8b81f12e9662"},"outputs":[{"output_type":"stream","name":"stdout","text":["Epoch: 0 \tTraining Loss: 43.538748 \tValidation Loss: 39.015653\n","Validation loss decreased (inf --> 39.015653).  Saving model ...\n"]}],"source":["import torch.optim as optim\n","\n","criterion = nn.CrossEntropyLoss()  # specify loss function\n","optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n","\n","n_epochs = 30  # number of epochs to train the model\n","train_loss_list = []  # list to store loss to visualize\n","valid_loss_min = np.Inf  # track change in validation loss\n","\n","for epoch in range(n_epochs):\n","    # Keep track of training and validation loss\n","    train_loss = 0.0\n","    valid_loss = 0.0\n","\n","    # Train the model\n","    model.train()\n","    for data, target in train_loader:\n","        # Move tensors to GPU if CUDA is available\n","        if train_on_gpu:\n","            data, target = data.cuda(), target.cuda()\n","        # Clear the gradients of all optimized variables\n","        optimizer.zero_grad()\n","        # Forward pass: compute predicted outputs by passing inputs to the model\n","        output = model(data)\n","        # Calculate the batch loss\n","        loss = criterion(output, target)\n","        # Backward pass: compute gradient of the loss with respect to model parameters\n","        loss.backward()\n","        # Perform a single optimization step (parameter update)\n","        optimizer.step()\n","        # Update training loss\n","        train_loss += loss.item() * data.size(0)\n","\n","    # Validate the model\n","    model.eval()\n","    for data, target in valid_loader:\n","        # Move tensors to GPU if CUDA is available\n","        if train_on_gpu:\n","            data, target = data.cuda(), target.cuda()\n","        # Forward pass: compute predicted outputs by passing inputs to the model\n","        output = model(data)\n","        # Calculate the batch loss\n","        loss = criterion(output, target)\n","        # Update average validation loss\n","        valid_loss += loss.item() * data.size(0)\n","\n","    # Calculate average losses\n","    train_loss = train_loss / len(train_loader)\n","    valid_loss = valid_loss / len(valid_loader)\n","    train_loss_list.append(train_loss)\n","\n","    # Print training/validation statistics\n","    print(\n","        \"Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}\".format(\n","            epoch, train_loss, valid_loss\n","        )\n","    )\n","\n","    # Save model if validation loss has decreased\n","    if valid_loss <= valid_loss_min:\n","        print(\n","            \"Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...\".format(\n","                valid_loss_min, valid_loss\n","            )\n","        )\n","        torch.save(model.state_dict(), \"model_cifar.pt\")\n","        valid_loss_min = valid_loss"]},{"cell_type":"markdown","id":"13e1df74","metadata":{"id":"13e1df74"},"source":["Does overfit occur? If so, do an early stopping."]},{"cell_type":"code","execution_count":null,"id":"d39df818","metadata":{"id":"d39df818"},"outputs":[],"source":["import matplotlib.pyplot as plt\n","\n","plt.plot(range(n_epochs), train_loss_list)\n","plt.xlabel(\"Epoch\")\n","plt.ylabel(\"Loss\")\n","plt.title(\"Performance of Model 1\")\n","plt.show()"]},{"cell_type":"markdown","id":"11df8fd4","metadata":{"id":"11df8fd4"},"source":["Now loading the model with the lowest validation loss value\n"]},{"cell_type":"code","execution_count":null,"id":"e93efdfc","metadata":{"id":"e93efdfc"},"outputs":[],"source":["model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n","\n","# track test loss\n","test_loss = 0.0\n","class_correct = list(0.0 for i in range(10))\n","class_total = list(0.0 for i in range(10))\n","\n","model.eval()\n","# iterate over test data\n","for data, target in test_loader:\n","    # move tensors to GPU if CUDA is available\n","    if train_on_gpu:\n","        data, target = data.cuda(), target.cuda()\n","    # forward pass: compute predicted outputs by passing inputs to the model\n","    output = model(data)\n","    # calculate the batch loss\n","    loss = criterion(output, target)\n","    # update test loss\n","    test_loss += loss.item() * data.size(0)\n","    # convert output probabilities to predicted class\n","    _, pred = torch.max(output, 1)\n","    # compare predictions to true label\n","    correct_tensor = pred.eq(target.data.view_as(pred))\n","    correct = (\n","        np.squeeze(correct_tensor.numpy())\n","        if not train_on_gpu\n","        else np.squeeze(correct_tensor.cpu().numpy())\n","    )\n","    # calculate test accuracy for each object class\n","    for i in range(batch_size):\n","        label = target.data[i]\n","        class_correct[label] += correct[i].item()\n","        class_total[label] += 1\n","\n","# average test loss\n","test_loss = test_loss / len(test_loader)\n","print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n","\n","for i in range(10):\n","    if class_total[i] > 0:\n","        print(\n","            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n","            % (\n","                classes[i],\n","                100 * class_correct[i] / class_total[i],\n","                np.sum(class_correct[i]),\n","                np.sum(class_total[i]),\n","            )\n","        )\n","    else:\n","        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n","\n","print(\n","    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n","    % (\n","        100.0 * np.sum(class_correct) / np.sum(class_total),\n","        np.sum(class_correct),\n","        np.sum(class_total),\n","    )\n",")"]},{"cell_type":"markdown","id":"944991a2","metadata":{"id":"944991a2"},"source":["Build a new network with the following structure.\n","\n","- It has 3 convolutional layers of kernel size 3 and padding of 1.\n","- The first convolutional layer must output 16 channels, the second 32 and the third 64.\n","- At each convolutional layer output, we apply a ReLU activation then a MaxPool with kernel size of 2.\n","- Then, three fully connected layers, the first two being followed by a ReLU activation and a dropout whose value you will suggest.\n","- The first fully connected layer will have an output size of 512.\n","- The second fully connected layer will have an output size of 64.\n","\n","Compare the results obtained with this new network to those obtained previously."]},{"cell_type":"markdown","source":["Net(\n","  (conv1): Conv2d(3, 6, kernel_size=(5, 5), stride=(1, 1))\n","  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n","  (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n","  (fc1): Linear(in_features=400, out_features=120, bias=True)\n","  (fc2): Linear(in_features=120, out_features=84, bias=True)\n","  (fc3): Linear(in_features=84, out_features=10, bias=True)\n",")"],"metadata":{"id":"fgfXjKBy3mwR"},"id":"fgfXjKBy3mwR"},{"cell_type":"code","source":["import torch.nn as nn\n","import torch.nn.functional as F\n","\n","# define the CNN architecture\n","\n","\n","class Net(nn.Module):\n","    def __init__(self):\n","        super(Net, self).__init__()\n","        self.conv1 = nn.Conv2d(3, 16, 3, padding=1)\n","        self.conv2 = nn.Conv2d(16, 32 , 3, padding=1)\n","        self.conv3 = nn.Conv2d(32, 64 , 3, padding=1)\n","        self.pool = nn.MaxPool2d(2, 2)\n","        self.fc1 = nn.Linear(64 * 4 * 4, 512)\n","        self.fc2 = nn.Linear(512, 64)\n","        self.fc3 = nn.Linear(64, 10)\n","        self.dropout = nn.Dropout()\n","\n","    def forward(self, x):\n","        x = self.pool(F.relu(self.conv1(x)))\n","        x = self.pool(F.relu(self.conv2(x)))\n","        x = self.pool(F.relu(self.conv3(x)))\n","        x = x.view(-1, 64 * 4 * 4)\n","        x = F.relu(x)\n","        x = self.dropout(self.fc1(x))\n","        x = F.relu(x)\n","        x = self.dropout(self.fc2(x))\n","        x = self.fc3(x)\n","        return x\n","\n","\n","# create a complete CNN\n","model = Net()\n","print(model)\n","# move tensors to GPU if CUDA is available\n","if train_on_gpu:\n","    model.cuda()"],"metadata":{"id":"BNqCMzKT3SfJ"},"id":"BNqCMzKT3SfJ","execution_count":null,"outputs":[]},{"cell_type":"code","source":["import torch.optim as optim\n","\n","criterion = nn.CrossEntropyLoss()  # specify loss function\n","optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n","\n","n_epochs = 30  # number of epochs to train the model\n","train_loss_list = []  # list to store loss to visualize\n","valid_loss_min = np.Inf  # track change in validation loss\n","\n","for epoch in range(n_epochs):\n","    # Keep track of training and validation loss\n","    train_loss = 0.0\n","    valid_loss = 0.0\n","\n","    # Train the model\n","    model.train()\n","    for data, target in train_loader:\n","        # Move tensors to GPU if CUDA is available\n","        if train_on_gpu:\n","            data, target = data.cuda(), target.cuda()\n","        # Clear the gradients of all optimized variables\n","        optimizer.zero_grad()\n","        # Forward pass: compute predicted outputs by passing inputs to the model\n","        output = model(data)\n","        # Calculate the batch loss\n","        loss = criterion(output, target)\n","        # Backward pass: compute gradient of the loss with respect to model parameters\n","        loss.backward()\n","        # Perform a single optimization step (parameter update)\n","        optimizer.step()\n","        # Update training loss\n","        train_loss += loss.item() * data.size(0)\n","\n","    # Validate the model\n","    model.eval()\n","    for data, target in valid_loader:\n","        # Move tensors to GPU if CUDA is available\n","        if train_on_gpu:\n","            data, target = data.cuda(), target.cuda()\n","        # Forward pass: compute predicted outputs by passing inputs to the model\n","        output = model(data)\n","        # Calculate the batch loss\n","        loss = criterion(output, target)\n","        # Update average validation loss\n","        valid_loss += loss.item() * data.size(0)\n","\n","    # Calculate average losses\n","    train_loss = train_loss / len(train_loader)\n","    valid_loss = valid_loss / len(valid_loader)\n","    train_loss_list.append(train_loss)\n","\n","    # Print training/validation statistics\n","    print(\n","        \"Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}\".format(\n","            epoch, train_loss, valid_loss\n","        )\n","    )\n","\n","    # Save model if validation loss has decreased\n","    if valid_loss <= valid_loss_min:\n","        print(\n","            \"Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...\".format(\n","                valid_loss_min, valid_loss\n","            )\n","        )\n","        torch.save(model.state_dict(), \"model_cifar_3D.pt\")\n","        valid_loss_min = valid_loss"],"metadata":{"id":"heaTYMsTZIEu"},"id":"heaTYMsTZIEu","execution_count":null,"outputs":[]},{"cell_type":"code","source":["import matplotlib.pyplot as plt\n","\n","plt.plot(range(n_epochs), train_loss_list)\n","plt.xlabel(\"Epoch\")\n","plt.ylabel(\"Loss\")\n","plt.title(\"Performance of Model 2\")\n","plt.show()"],"metadata":{"id":"K5lIwyTZZNTt"},"id":"K5lIwyTZZNTt","execution_count":null,"outputs":[]},{"cell_type":"code","source":["model.load_state_dict(torch.load(\"./model_cifar_3D.pt\"))\n","\n","# track test loss\n","test_loss = 0.0\n","class_correct = list(0.0 for i in range(10))\n","class_total = list(0.0 for i in range(10))\n","\n","model.eval()\n","# iterate over test data\n","for data, target in test_loader:\n","    # move tensors to GPU if CUDA is available\n","    if train_on_gpu:\n","        data, target = data.cuda(), target.cuda()\n","    # forward pass: compute predicted outputs by passing inputs to the model\n","    output = model(data)\n","    # calculate the batch loss\n","    loss = criterion(output, target)\n","    # update test loss\n","    test_loss += loss.item() * data.size(0)\n","    # convert output probabilities to predicted class\n","    _, pred = torch.max(output, 1)\n","    # compare predictions to true label\n","    correct_tensor = pred.eq(target.data.view_as(pred))\n","    correct = (\n","        np.squeeze(correct_tensor.numpy())\n","        if not train_on_gpu\n","        else np.squeeze(correct_tensor.cpu().numpy())\n","    )\n","    # calculate test accuracy for each object class\n","    for i in range(batch_size):\n","        label = target.data[i]\n","        class_correct[label] += correct[i].item()\n","        class_total[label] += 1\n","\n","# average test loss\n","test_loss = test_loss / len(test_loader)\n","print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n","\n","for i in range(10):\n","    if class_total[i] > 0:\n","        print(\n","            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n","            % (\n","                classes[i],\n","                100 * class_correct[i] / class_total[i],\n","                np.sum(class_correct[i]),\n","                np.sum(class_total[i]),\n","            )\n","        )\n","    else:\n","        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n","\n","print(\n","    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n","    % (\n","        100.0 * np.sum(class_correct) / np.sum(class_total),\n","        np.sum(class_correct),\n","        np.sum(class_total),\n","    )\n",")"],"metadata":{"id":"oAeC05EmZN8P"},"id":"oAeC05EmZN8P","execution_count":null,"outputs":[]},{"cell_type":"markdown","id":"bc381cf4","metadata":{"id":"bc381cf4"},"source":["## Exercise 2: Quantization: try to compress the CNN to save space\n","\n","Quantization doc is available from https://pytorch.org/docs/stable/quantization.html#torch.quantization.quantize_dynamic\n","        \n","The Exercise is to quantize post training the above CNN model. Compare the size reduction and the impact on the classification accuracy\n","\n","\n","The size of the model is simply the size of the file."]},{"cell_type":"code","execution_count":null,"id":"ef623c26","metadata":{"id":"ef623c26"},"outputs":[],"source":["import os\n","\n","def print_size_of_model(model, label=\"\"):\n","    torch.save(model.state_dict(), \"temp.p\")\n","    size = os.path.getsize(\"temp.p\")\n","    print(\"model: \", label, \" \\t\", \"Size (KB):\", size / 1e3)\n","    os.remove(\"temp.p\")\n","    return size\n","\n","\n","size_model=print_size_of_model(model, \"fp32\")"]},{"cell_type":"markdown","id":"05c4e9ad","metadata":{"id":"05c4e9ad"},"source":["Post training quantization example"]},{"cell_type":"code","execution_count":null,"id":"c4c65d4b","metadata":{"id":"c4c65d4b"},"outputs":[],"source":["import torch.quantization\n","\n","\n","quantized_model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)\n","torch.save(quantized_model.state_dict(), \"quantized_model_cifar.pt\")\n","\n","size_quantized = print_size_of_model(quantized_model, \"int8\")\n","\n","print(\"The size of the original model has been divided by %.2f compared to the Quantized model\" % (size_model / size_quantized))"]},{"cell_type":"markdown","id":"7b108e17","metadata":{"id":"7b108e17"},"source":["For each class, compare the classification test accuracy of the initial model and the quantized model. Also give the overall test accuracy for both models."]},{"cell_type":"markdown","id":"a0a34b90","metadata":{"id":"a0a34b90"},"source":["Try training aware quantization to mitigate the impact on the accuracy (doc available here https://pytorch.org/docs/stable/quantization.html#torch.quantization.quantize_dynamic)"]},{"cell_type":"code","source":["quantized_model.load_state_dict(torch.load(\"./quantized_model_cifar.pt\", map_location=torch.device('cpu')))\n","\n","# track test loss\n","test_loss_quantized = 0.0\n","class_correct_quantized = list(0.0 for i in range(10))\n","class_total_quantized = list(0.0 for i in range(10))\n","\n","quantized_model.eval()\n","quantized_model.cpu()\n","# iterate over test data\n","for data, target in test_loader:\n","    # forward pass: compute predicted outputs by passing inputs to the model\n","    output = quantized_model(data)\n","    # calculate the batch loss\n","    loss = criterion(output, target)\n","    # update test loss\n","    test_loss_quantized += loss.item() * data.size(0)\n","    # convert output probabilities to predicted class\n","    _, pred = torch.max(output, 1)\n","    # compare predictions to true label\n","    correct_tensor = pred.eq(target.data.view_as(pred))\n","    correct = np.squeeze(correct_tensor.numpy()) #np.squeeze(correct_tensor.cpu().numpy()\n","    # calculate test accuracy for each object class\n","    for i in range(batch_size):\n","        label = target.data[i]\n","        class_correct_quantized[label] += correct[i].item()\n","        class_total_quantized[label] += 1\n","\n","# average test loss\n","test_loss_quantized = test_loss_quantized / len(test_loader)\n","loss_delta = test_loss_quantized - test_loss\n","print(\"Original Test Loss: {:.6f}\\n\".format(test_loss))\n","print(\"Quantized Test Loss: {:.6f}\\n\".format(test_loss_quantized))\n","print(\"Loss Delta: {:.6f}\\n\".format(loss_delta))\n","\n","for i in range(10):\n","    if class_total[i] > 0:\n","        print(\n","            \"Quantized Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n","            % (\n","                classes[i],\n","                100 * class_correct_quantized[i] / class_total_quantized[i],\n","                np.sum(class_correct_quantized[i]),\n","                np.sum(class_total_quantized[i]),\n","            ))\n","        print(\n","            \"Original Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n","            % (\n","                classes[i],\n","                100 * class_correct[i] / class_total[i],\n","                np.sum(class_correct[i]),\n","                np.sum(class_total[i]),\n","            ))\n","        print(\n","            \"Difference in Instances Correctly classified of %5s: %2d \\n\"\n","            % (\n","                classes[i],\n","                class_correct[i]-class_correct_quantized[i],\n","\n","            )\n","        )\n","    else:\n","        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n","\n","print(\n","    \"\\nQuantized Test Accuracy (Overall): %2d%% (%2d/%2d)\"\n","    % (\n","        100.0 * np.sum(class_correct_quantized) / np.sum(class_total_quantized),\n","        np.sum(class_correct_quantized),\n","        np.sum(class_total_quantized),\n","    )\n",")\n","print(\n","    \"\\nOriginal Test Accuracy (Overall): %2d%% (%2d/%2d)\"\n","    % (\n","        100.0 * np.sum(class_correct) / np.sum(class_total),\n","        np.sum(class_correct),\n","        np.sum(class_total),\n","    )\n",")\n","print(\n","         \"\\nDifference in Instances Correctly classified overall: %2d \\n\"\n","        % (\n","            np.sum(class_correct)-np.sum(class_correct_quantized),\n","            )\n","        )"],"metadata":{"id":"-1DG65AMAHFd"},"id":"-1DG65AMAHFd","execution_count":null,"outputs":[]},{"cell_type":"markdown","id":"201470f9","metadata":{"id":"201470f9"},"source":["## Exercise 3: working with pre-trained models.\n","\n","PyTorch offers several pre-trained models https://pytorch.org/vision/0.8/models.html        \n","We will use ResNet50 trained on ImageNet dataset (https://www.image-net.org/index.php). Use the following code with the files `imagenet-simple-labels.json` that contains the imagenet labels and the image dog.png that we will use as test.\n"]},{"cell_type":"code","execution_count":null,"id":"b4d13080","metadata":{"id":"b4d13080"},"outputs":[],"source":["import json\n","from PIL import Image\n","\n","# Choose an image to pass through the model\n","test_image = \"dog.png\"\n","\n","# Configure matplotlib for pretty inline plots\n","#%matplotlib inline\n","#%config InlineBackend.figure_format = 'retina'\n","\n","# Prepare the labels\n","with open(\"imagenet-simple-labels.json\") as f:\n","    labels = json.load(f)\n","\n","# First prepare the transformations: resize the image to what the model was trained on and convert it to a tensor\n","data_transform = transforms.Compose(\n","    [\n","        transforms.Resize((224, 224)),\n","        transforms.ToTensor(),\n","        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n","    ]\n",")\n","# Load the image\n","\n","image = Image.open(test_image)\n","plt.imshow(image), plt.xticks([]), plt.yticks([])\n","\n","# Now apply the transformation, expand the batch dimension, and send the image to the GPU\n","# image = data_transform(image).unsqueeze(0).cuda()\n","image = data_transform(image).unsqueeze(0)\n","\n","# Download the model if it's not there already. It will take a bit on the first run, after that it's fast\n","model = models.resnet50(pretrained=True)\n","# Send the model to the GPU\n","# model.cuda()\n","# Set layers such as dropout and batchnorm in evaluation mode\n","model.eval()\n","\n","# Get the 1000-dimensional model output\n","out = model(image)\n","# Find the predicted class\n","print(\"Predicted class is: {}\".format(labels[out.argmax()]))"]},{"cell_type":"markdown","id":"184cfceb","metadata":{"id":"184cfceb"},"source":["Experiments:\n","\n","Study the code and the results obtained. Possibly add other images downloaded from the internet.\n","\n","What is the size of the model? Quantize it and then check if the model is still able to correctly classify the other images.\n","\n","Experiment with other pre-trained CNN models.\n","\n","    \n"]},{"cell_type":"code","source":["import json\n","from PIL import Image\n","\n","# Choose an image to pass through the model\n","test_image1 = \"koala.jpg\"\n","test_image2 = \"raton.jpg\"\n","\n","# Configure matplotlib for pretty inline plots\n","#%matplotlib inline\n","#%config InlineBackend.figure_format = 'retina'\n","\n","# Prepare the labels\n","with open(\"imagenet-simple-labels.json\") as f:\n","    labels = json.load(f)\n","\n","# First prepare the transformations: resize the image to what the model was trained on and convert it to a tensor\n","data_transform = transforms.Compose(\n","    [\n","        transforms.Resize((224, 224)),\n","        transforms.ToTensor(),\n","        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n","    ]\n",")\n","# Load the image\n","image1 = Image.open(test_image1)\n","image2 = Image.open(test_image2)\n","\n","plt.figure()\n","\n","\n","f, axarr = plt.subplots(2,1)\n","\n","axarr[0].imshow(image1)\n","axarr[1].imshow(image2)\n","\n","#plt.imshow(image), plt.xticks([]), plt.yticks([])\n","\n","# Now apply the transformation, expand the batch dimension, and send the image to the GPU\n","# image = data_transform(image).unsqueeze(0).cuda()\n","image1 = data_transform(image1).unsqueeze(0)\n","image2 = data_transform(image2).unsqueeze(0)\n","\n","# Download the model if it's not there already. It will take a bit on the first run, after that it's fast\n","\n","resnet50_model=models.resnet50(pretrained=True)\n","resnet50_model_quantized=torch.quantization.quantize_dynamic(resnet50_model, dtype=torch.qint8)\n","\n","googlenet_model = models.googlenet(pretrained=True)\n","googlenet_model_quantized = torch.quantization.quantize_dynamic(googlenet_model, dtype=torch.qint8)\n","\n","\n","# Send the model to the GPU\n","# model.cuda()\n","# Set layers such as dropout and batchnorm in evaluation mode\n","resnet50_model.eval()\n","resnet50_model_quantized.eval()\n","\n","googlenet_model.eval()\n","googlenet_model_quantized.eval()\n","\n","out1=resnet50_model(image1)\n","out1q=resnet50_model_quantized(image1)\n","out2=googlenet_model(image1)\n","out2q=googlenet_model_quantized(image1)\n","\n","out3=resnet50_model(image2)\n","out3q=resnet50_model_quantized(image2)\n","out4=googlenet_model(image2)\n","out4q=googlenet_model_quantized(image2)\n","\n","\n","# Get the 1000-dimensional model output\n","\n","# Find the predicted class\n","print(\"Predicted class for Resnet50 is Koala: {}\".format(labels[out1.argmax()]))\n","print(\"Predicted class for GoogleNet is Koala: {}\".format(labels[out2.argmax()]))\n","print(\"Predicted class for Quantized_Resnet is Koala: {}\".format(labels[out1q.argmax()]))\n","print(\"Predicted class for Quantized_GoogleNet is Koala: {}\".format(labels[out2q.argmax()]))\n","print(\"Predicted class for Resnet50 is Racoon: {}\".format(labels[out3.argmax()]))\n","print(\"Predicted class for GoogleNet is Racoon: {}\".format(labels[out4.argmax()]))\n","print(\"Predicted class for Quantized_Resnet is Racoon: {}\".format(labels[out3q.argmax()]))\n","print(\"Predicted class for Quantized_GoogleNet is Racoon: {}\".format(labels[out4q.argmax()]))"],"metadata":{"id":"wfgNGDlsnYBg"},"id":"wfgNGDlsnYBg","execution_count":null,"outputs":[]},{"cell_type":"code","source":["size_resnet50=print_size_of_model(resnet50_model, \"int8\")\n","size_resnet50_quantized = print_size_of_model(resnet50_model_quantized, \"int8\")\n","\n","print(\"The size of the original ResNet50 model  has been divided by %.2f compared to the Quantized ResNet50 model\" % (size_resnet50 /size_resnet50_quantized))\n","\n","size_googlenet=print_size_of_model(googlenet_model, \"int8\")\n","size_googlenet_quantized = print_size_of_model(googlenet_model_quantized, \"int8\")\n","\n","print(\"The size of the original GoogleNet model has been divided by %.2f compared to the Quantized GoogleNet model\" % (size_resnet50 /size_resnet50_quantized))"],"metadata":{"id":"ehW_YwAp5CiO"},"id":"ehW_YwAp5CiO","execution_count":null,"outputs":[]},{"cell_type":"markdown","id":"5d57da4b","metadata":{"id":"5d57da4b"},"source":["## Exercise 4: Transfer Learning\n","    \n","    \n","For this work, we will use a pre-trained model (ResNet18) as a descriptor extractor and will refine the classification by training only the last fully connected layer of the network. Thus, the output layer of the pre-trained network will be replaced by a layer adapted to the new classes to be recognized which will be in our case ants and bees.\n","Download and unzip in your working directory the dataset available at the address :\n","    \n","https://download.pytorch.org/tutorial/hymenoptera_data.zip\n","    \n","Execute the following code in order to display some images of the dataset."]},{"cell_type":"code","source":["!wget https://download.pytorch.org/tutorial/hymenoptera_data.zip\n","!unzip hymenoptera_data.zip"],"metadata":{"id":"-GhBj4ORpEr3"},"id":"-GhBj4ORpEr3","execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"id":"be2d31f5","metadata":{"id":"be2d31f5"},"outputs":[],"source":["import os\n","\n","import matplotlib.pyplot as plt\n","import numpy as np\n","import torch\n","import torchvision\n","from torchvision import datasets, transforms\n","\n","# Data augmentation and normalization for training\n","# Just normalization for validation\n","data_transforms = {\n","    \"train\": transforms.Compose(\n","        [\n","            transforms.RandomResizedCrop(\n","                224\n","            ),  # ImageNet models were trained on 224x224 images\n","            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n","            transforms.ToTensor(),  # convert it to a PyTorch tensor\n","            transforms.Normalize(\n","                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n","            ),  # ImageNet models expect this norm\n","        ]\n","    ),\n","    \"val\": transforms.Compose(\n","        [\n","            transforms.Resize(256),\n","            transforms.CenterCrop(224),\n","            transforms.ToTensor(),\n","            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n","        ]\n","    ),\n","}\n","\n","data_dir = \"hymenoptera_data\"\n","# Create train and validation datasets and loaders\n","image_datasets = {\n","    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n","    for x in [\"train\", \"val\"]\n","}\n","dataloaders = {\n","    x: torch.utils.data.DataLoader(\n","        image_datasets[x], batch_size=4, shuffle=True, num_workers=0\n","    )\n","    for x in [\"train\", \"val\"]\n","}\n","dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n","class_names = image_datasets[\"train\"].classes\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","\n","# Helper function for displaying images\n","def imshow(inp, title=None):\n","    \"\"\"Imshow for Tensor.\"\"\"\n","    inp = inp.numpy().transpose((1, 2, 0))\n","    mean = np.array([0.485, 0.456, 0.406])\n","    std = np.array([0.229, 0.224, 0.225])\n","\n","    # Un-normalize the images\n","    inp = std * inp + mean\n","    # Clip just in case\n","    inp = np.clip(inp, 0, 1)\n","    plt.imshow(inp)\n","    if title is not None:\n","        plt.title(title)\n","    plt.pause(0.001)  # pause a bit so that plots are updated\n","    plt.show()\n","\n","\n","# Get a batch of training data\n","inputs, classes = next(iter(dataloaders[\"train\"]))\n","\n","# Make a grid from batch\n","out = torchvision.utils.make_grid(inputs)\n","\n","imshow(out, title=[class_names[x] for x in classes])\n","\n"]},{"cell_type":"markdown","id":"bbd48800","metadata":{"id":"bbd48800"},"source":["Now, execute the following code which uses a pre-trained model ResNet18 having replaced the output layer for the ants/bees classification and performs the model training by only changing the weights of this output layer."]},{"cell_type":"code","execution_count":null,"id":"572d824c","metadata":{"id":"572d824c"},"outputs":[],"source":["import copy\n","import os\n","import time\n","\n","import matplotlib.pyplot as plt\n","import numpy as np\n","import torch\n","import torch.nn as nn\n","import torch.optim as optim\n","import torchvision\n","from torch.optim import lr_scheduler\n","from torchvision import datasets, transforms\n","\n","# Data augmentation and normalization for training\n","# Just normalization for validation\n","data_transforms = {\n","    \"train\": transforms.Compose(\n","        [\n","            transforms.RandomResizedCrop(\n","                224\n","            ),  # ImageNet models were trained on 224x224 images\n","            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n","            transforms.ToTensor(),  # convert it to a PyTorch tensor\n","            transforms.Normalize(\n","                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n","            ),  # ImageNet models expect this norm\n","        ]\n","    ),\n","    \"val\": transforms.Compose(\n","        [\n","            transforms.Resize(256),\n","            transforms.CenterCrop(224),\n","            transforms.ToTensor(),\n","            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n","        ]\n","    ),\n","}\n","\n","data_dir = \"hymenoptera_data\"\n","# Create train and validation datasets and loaders\n","image_datasets = {\n","    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n","    for x in [\"train\", \"val\"]\n","}\n","dataloaders = {\n","    x: torch.utils.data.DataLoader(\n","        image_datasets[x], batch_size=4, shuffle=True, num_workers=4\n","    )\n","    for x in [\"train\", \"val\"]\n","}\n","dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n","class_names = image_datasets[\"train\"].classes\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","\n","# Helper function for displaying images\n","def imshow(inp, title=None):\n","    \"\"\"Imshow for Tensor.\"\"\"\n","    inp = inp.numpy().transpose((1, 2, 0))\n","    mean = np.array([0.485, 0.456, 0.406])\n","    std = np.array([0.229, 0.224, 0.225])\n","\n","    # Un-normalize the images\n","    inp = std * inp + mean\n","    # Clip just in case\n","    inp = np.clip(inp, 0, 1)\n","    plt.imshow(inp)\n","    if title is not None:\n","        plt.title(title)\n","    plt.pause(0.001)  # pause a bit so that plots are updated\n","    plt.show()\n","\n","\n","# Get a batch of training data\n","# inputs, classes = next(iter(dataloaders['train']))\n","\n","# Make a grid from batch\n","# out = torchvision.utils.make_grid(inputs)\n","\n","# imshow(out, title=[class_names[x] for x in classes])\n","# training\n","\n","\n","def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n","    since = time.time()\n","\n","    best_model_wts = copy.deepcopy(model.state_dict())\n","    best_acc = 0.0\n","\n","    epoch_time = []  # we'll keep track of the time needed for each epoch\n","\n","    for epoch in range(num_epochs):\n","        epoch_start = time.time()\n","        print(\"Epoch {}/{}\".format(epoch + 1, num_epochs))\n","        print(\"-\" * 10)\n","\n","        # Each epoch has a training and validation phase\n","        for phase in [\"train\", \"val\"]:\n","            if phase == \"train\":\n","                scheduler.step()\n","                model.train()  # Set model to training mode\n","            else:\n","                model.eval()  # Set model to evaluate mode\n","\n","            running_loss = 0.0\n","            running_corrects = 0\n","\n","            # Iterate over data.\n","            for inputs, labels in dataloaders[phase]:\n","                inputs = inputs.to(device)\n","                labels = labels.to(device)\n","\n","                # zero the parameter gradients\n","                optimizer.zero_grad()\n","\n","                # Forward\n","                # Track history if only in training phase\n","                with torch.set_grad_enabled(phase == \"train\"):\n","                    outputs = model(inputs)\n","                    _, preds = torch.max(outputs, 1)\n","                    loss = criterion(outputs, labels)\n","\n","                    # backward + optimize only if in training phase\n","                    if phase == \"train\":\n","                        loss.backward()\n","                        optimizer.step()\n","\n","                # Statistics\n","                running_loss += loss.item() * inputs.size(0)\n","                running_corrects += torch.sum(preds == labels.data)\n","\n","            epoch_loss = running_loss / dataset_sizes[phase]\n","            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n","\n","            print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n","\n","            # Deep copy the model\n","            if phase == \"val\" and epoch_acc > best_acc:\n","                best_acc = epoch_acc\n","                best_model_wts = copy.deepcopy(model.state_dict())\n","\n","        # Add the epoch time\n","        t_epoch = time.time() - epoch_start\n","        epoch_time.append(t_epoch)\n","        print()\n","\n","    time_elapsed = time.time() - since\n","    print(\n","        \"Training complete in {:.0f}m {:.0f}s\".format(\n","            time_elapsed // 60, time_elapsed % 60\n","        )\n","    )\n","    print(\"Best val Acc: {:4f}\".format(best_acc))\n","\n","    # Load best model weights\n","    model.load_state_dict(best_model_wts)\n","    return model, epoch_time\n","\n","\n"]},{"cell_type":"code","source":["# Download a pre-trained ResNet18 model and freeze its weights\n","resNet18 = torchvision.models.resnet18(pretrained=True)\n","for param in resNet18.parameters():\n","    param.requires_grad = False\n","\n","# Replace the final fully connected layer\n","# Parameters of newly constructed modules have requires_grad=True by default\n","num_ftrs = resNet18.fc.in_features\n","resNet18.fc = nn.Linear(num_ftrs, 2)\n","# Send the model to the GPU\n","resNet18 = resNet18.to(device)\n","# Set the loss function\n","criterion = nn.CrossEntropyLoss()\n","\n","# Observe that only the parameters of the final layer are being optimized\n","optimizer_conv = optim.SGD(resNet18.fc.parameters(), lr=0.001, momentum=0.9)\n","exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n","resNet18, epoch_time = train_model(\n","    resNet18, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",")\n"],"metadata":{"id":"aRrvPNdgSPJd"},"id":"aRrvPNdgSPJd","execution_count":null,"outputs":[]},{"cell_type":"code","source":["def eval_model(model, test_loader, criterion):\n","    model.eval()\n","\n","    test_loss = 0.0\n","    class_correct = list(0.0 for i in range(2))\n","    class_total = list(0.0 for i in range(2))\n","\n","    with torch.no_grad():\n","        for data, target in test_loader:\n","            data, target = data.to(device), target.to(device)\n","\n","            # Forward pass\n","            output = model(data)\n","            loss = criterion(output, target)\n","\n","            # Update statistics\n","            test_loss += loss.item() * data.size(0)\n","            _, pred = torch.max(output, 1)\n","            correct = pred.eq(target.data.view_as(pred))\n","\n","            for i in range(len(target)):\n","                label = target.data[i]\n","                class_correct[label] += correct[i].item()\n","                class_total[label] += 1\n","\n","    # Calculate average test loss\n","    test_loss = test_loss / len(test_loader.dataset)\n","    print(f\"Test Loss: {test_loss:.6f}\\n\")\n","\n","    # Print accuracy for each class\n","    for i in range(2):\n","        if class_total[i] > 0:\n","            print(\n","                f\"Test Accuracy of {class_names[i]}: {100 * class_correct[i] / class_total[i]:.2f}% \"\n","                f\"({int(np.sum(class_correct[i]))}/{int(np.sum(class_total[i]))})\"\n","            )\n","        else:\n","            print(f\"Test Accuracy of {class_names[i]}: N/A (no training examples)\")\n","\n","    # Print overall accuracy\n","    overall_accuracy = 100.0 * np.sum(class_correct) / np.sum(class_total)\n","    print(f\"\\nTest Accuracy (Overall): {overall_accuracy:.2f}% \"\n","          f\"({int(np.sum(class_correct))}/{int(np.sum(class_total))})\")\n","\n","\n","\n","\n"],"metadata":{"id":"06D2WVv_Wwu4"},"id":"06D2WVv_Wwu4","execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"CiWFsGydy1TB"},"source":["Experiments:\n","Study the code and the results obtained.\n","\n","Modify the code and add an \"eval_model\" function to allow\n","the evaluation of the model on a test set (different from the learning and validation sets used during the learning phase). Study the results obtained.\n","\n","Now modify the code to replace the current classification layer with a set of two layers using a \"relu\" activation function for the middle layer, and the \"dropout\" mechanism for both layers. Renew the experiments and study the results obtained.\n","\n","Apply ther quantization (post and quantization aware) and evaluate impact on model size and accuracy."],"id":"CiWFsGydy1TB"},{"cell_type":"code","source":["import copy\n","import os\n","import time\n","import matplotlib.pyplot as plt\n","import numpy as np\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import torch.optim as optim\n","from torch.optim import lr_scheduler\n","from torchvision import datasets, transforms\n","\n","# Function to train the model\n","def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n","    since = time.time()\n","\n","    best_model_wts = copy.deepcopy(model.state_dict())\n","    best_acc = 0.0\n","\n","    epoch_time = []  # we'll keep track of the time needed for each epoch\n","\n","    for epoch in range(num_epochs):\n","        epoch_start = time.time()\n","        print(\"Epoch {}/{}\".format(epoch + 1, num_epochs))\n","        print(\"-\" * 10)\n","\n","        # Each epoch has a training and validation phase\n","        for phase in [\"train\", \"val\"]:\n","            if phase == \"train\":\n","                scheduler.step()\n","                model.train()  # Set model to training mode\n","            else:\n","                model.eval()  # Set model to evaluate mode\n","\n","            running_loss = 0.0\n","            running_corrects = 0\n","\n","            # Iterate over data.\n","            for inputs, labels in dataloaders[phase]:\n","                inputs = inputs.to(device)\n","                labels = labels.to(device)\n","\n","                optimizer.zero_grad()\n","\n","                with torch.set_grad_enabled(phase == \"train\"):\n","                    outputs = model(inputs)\n","                    _, preds = torch.max(outputs, 1)\n","                    loss = criterion(outputs, labels)\n","\n","                    if phase == \"train\":\n","                        loss.backward()\n","                        optimizer.step()\n","\n","                running_loss += loss.item() * inputs.size(0)\n","                running_corrects += torch.sum(preds == labels.data)\n","\n","            epoch_loss = running_loss / dataset_sizes[phase]\n","            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n","\n","            print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n","\n","            if phase == \"val\" and epoch_acc > best_acc:\n","                best_acc = epoch_acc\n","                best_model_wts = copy.deepcopy(model.state_dict())\n","\n","        t_epoch = time.time() - epoch_start\n","        epoch_time.append(t_epoch)\n","        print()\n","\n","    time_elapsed = time.time() - since\n","    print(\n","        \"Training complete in {:.0f}m {:.0f}s\".format(\n","            time_elapsed // 60, time_elapsed % 60\n","        )\n","    )\n","    print(\"Best val Acc: {:4f}\".format(best_acc))\n","\n","    model.load_state_dict(best_model_wts)\n","    return model, epoch_time\n","\n","# Define the modified ResNet18 model\n","class ModifiedResNet18(nn.Module):\n","    def __init__(self, num_classes=2):\n","        super(ModifiedResNet18, self).__init__()\n","        resnet18 = torchvision.models.resnet18(pretrained=True)\n","        for param in resnet18.parameters():\n","            param.requires_grad = False\n","\n","        # Remove the original fully connected layer\n","        self.features = nn.Sequential(*list(resnet18.children())[:-1])\n","\n","        # Add two new fully connected layers\n","        self.fc = nn.Linear(resnet18.fc.in_features, 16)\n","        self.fc2 = nn.Linear(16, num_classes)\n","\n","    def forward(self, x):\n","        x = self.features(x)\n","        x = x.view(x.size(0), -1)\n","        x = F.relu(self.fc(x))\n","        x = self.fc2(x)\n","        return x\n","\n","# Data transformations\n","data_transforms = {\n","    \"train\": transforms.Compose(\n","        [\n","            transforms.RandomResizedCrop(224),\n","            transforms.RandomHorizontalFlip(),\n","            transforms.ToTensor(),\n","            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n","        ]\n","    ),\n","    \"val\": transforms.Compose(\n","        [\n","            transforms.Resize(256),\n","            transforms.CenterCrop(224),\n","            transforms.ToTensor(),\n","            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n","        ]\n","    ),\n","}\n","\n","# Load the data\n","data_dir = \"hymenoptera_data\"\n","image_datasets = {\n","    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n","    for x in [\"train\", \"val\"]\n","}\n","dataloaders = {\n","    x: torch.utils.data.DataLoader(\n","        image_datasets[x], batch_size=4, shuffle=True, num_workers=0\n","    )\n","    for x in [\"train\", \"val\"]\n","}\n","dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n","class_names = image_datasets[\"train\"].classes\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","\n","# Initialize the modified model\n","new_resNet18 = ModifiedResNet18(num_classes=2)\n","new_resNet18 = new_resNet18.to(device)\n","\n","# Set the loss function\n","criterion = nn.CrossEntropyLoss()\n","\n","# Observe that only the parameters of the final layer are being optimized\n","optimizer_conv = optim.SGD(new_resNet18.parameters(), lr=0.001, momentum=0.9)\n","exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n","\n","# Train the modified model\n","new_resNet18, epoch_time = train_model(new_resNet18, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10)\n"],"metadata":{"id":"XSy45U7fM72l"},"id":"XSy45U7fM72l","execution_count":null,"outputs":[]},{"cell_type":"code","source":["\n","print(\"Original Model: \\n\")\n","eval_model(resNet18.to(device), dataloaders[\"val\"], criterion)\n","print(\"\\n\"+\"New Model: \\n\")\n","eval_model(new_resNet18.to(device), dataloaders[\"val\"], criterion)\n"],"metadata":{"id":"mNfk6kNI3AsQ"},"id":"mNfk6kNI3AsQ","execution_count":null,"outputs":[]},{"cell_type":"code","source":["import torchvision.models as models\n","new_resNet18_quantized = torch.quantization.quantize_dynamic(new_resNet18, dtype=torch.qint8)\n","\n","size_resNet18 = print_size_of_model(new_resNet18, \"fp32\")\n","size_resNet18_quantized = print_size_of_model(new_resNet18_quantized, \"fp32_quant\")\n","\n","print(\n","    \"\\nThe size of the resNet18 model is %.2fMB, which is %.0f times bigger than the resNet18_quantized model\"\n","    % (\n","        size_resNet18 / 1000000,\n","        size_resNet18 / size_resNet18_quantized\n","    )\n",")"],"metadata":{"id":"AGeIeacy2Xgd"},"id":"AGeIeacy2Xgd","execution_count":null,"outputs":[]},{"cell_type":"markdown","id":"04a263f0","metadata":{"id":"04a263f0"},"source":["## Optional\n","    \n","Try this at home!!\n","\n","\n","Pytorch offers a framework to export a given CNN to your selfphone (either android or iOS). Have a look at the tutorial https://pytorch.org/mobile/home/\n","\n","The Exercise consists in deploying the CNN of Exercise 4 in your phone and then test it on live.\n","\n"]},{"cell_type":"markdown","id":"fe954ce4","metadata":{"id":"fe954ce4"},"source":["## Author\n","\n","Alberto BOSIO - Ph. D."]}],"metadata":{"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.8.5"},"vscode":{"interpreter":{"hash":"9e3efbebb05da2d4a1968abe9a0645745f54b63feb7a85a514e4da0495be97eb"}},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":5}
\ No newline at end of file
diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb
deleted file mode 100644
index 2ecfce9..0000000
--- a/TD2 Deep Learning.ipynb	
+++ /dev/null
@@ -1,953 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "id": "7edf7168",
-   "metadata": {},
-   "source": [
-    "# TD2: Deep learning"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "fbb8c8df",
-   "metadata": {},
-   "source": [
-    "In this TD, you must modify this notebook to answer the questions. To do this,\n",
-    "\n",
-    "1. Fork this repository\n",
-    "2. Clone your forked repository on your local computer\n",
-    "3. Answer the questions\n",
-    "4. Commit and push regularly\n",
-    "\n",
-    "The last commit is due on Sunday, December 1, 11:59 PM. Later commits will not be taken into account."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "3d167a29",
-   "metadata": {},
-   "source": [
-    "Install and test PyTorch from  https://pytorch.org/get-started/locally."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "330a42f5",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%pip install torch torchvision"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "0882a636",
-   "metadata": {},
-   "source": [
-    "\n",
-    "To test run the following code"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "b1950f0a",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import torch\n",
-    "\n",
-    "N, D = 14, 10\n",
-    "x = torch.randn(N, D).type(torch.FloatTensor)\n",
-    "print(x)\n",
-    "\n",
-    "from torchvision import models\n",
-    "\n",
-    "alexnet = models.alexnet()\n",
-    "print(alexnet)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "23f266da",
-   "metadata": {},
-   "source": [
-    "## Exercise 1: CNN on CIFAR10\n",
-    "\n",
-    "The goal is to apply a Convolutional Neural Net (CNN) model on the CIFAR10 image dataset and test the accuracy of the model on the basis of image classification. Compare the Accuracy VS the neural network implemented during TD1.\n",
-    "\n",
-    "Have a look at the following documentation to be familiar with PyTorch.\n",
-    "\n",
-    "https://pytorch.org/tutorials/beginner/pytorch_with_examples.html\n",
-    "\n",
-    "https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "4ba1c82d",
-   "metadata": {},
-   "source": [
-    "You can test if GPU is available on your machine and thus train on it to speed up the process"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "6e18f2fd",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import torch\n",
-    "\n",
-    "# check if CUDA is available\n",
-    "train_on_gpu = torch.cuda.is_available()\n",
-    "\n",
-    "if not train_on_gpu:\n",
-    "    print(\"CUDA is not available.  Training on CPU ...\")\n",
-    "else:\n",
-    "    print(\"CUDA is available!  Training on GPU ...\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "5cf214eb",
-   "metadata": {},
-   "source": [
-    "Next we load the CIFAR10 dataset"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "462666a2",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "from torchvision import datasets, transforms\n",
-    "from torch.utils.data.sampler import SubsetRandomSampler\n",
-    "\n",
-    "# number of subprocesses to use for data loading\n",
-    "num_workers = 0\n",
-    "# how many samples per batch to load\n",
-    "batch_size = 20\n",
-    "# percentage of training set to use as validation\n",
-    "valid_size = 0.2\n",
-    "\n",
-    "# convert data to a normalized torch.FloatTensor\n",
-    "transform = transforms.Compose(\n",
-    "    [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n",
-    ")\n",
-    "\n",
-    "# choose the training and test datasets\n",
-    "train_data = datasets.CIFAR10(\"data\", train=True, download=True, transform=transform)\n",
-    "test_data = datasets.CIFAR10(\"data\", train=False, download=True, transform=transform)\n",
-    "\n",
-    "# obtain training indices that will be used for validation\n",
-    "num_train = len(train_data)\n",
-    "indices = list(range(num_train))\n",
-    "np.random.shuffle(indices)\n",
-    "split = int(np.floor(valid_size * num_train))\n",
-    "train_idx, valid_idx = indices[split:], indices[:split]\n",
-    "\n",
-    "# define samplers for obtaining training and validation batches\n",
-    "train_sampler = SubsetRandomSampler(train_idx)\n",
-    "valid_sampler = SubsetRandomSampler(valid_idx)\n",
-    "\n",
-    "# prepare data loaders (combine dataset and sampler)\n",
-    "train_loader = torch.utils.data.DataLoader(\n",
-    "    train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers\n",
-    ")\n",
-    "valid_loader = torch.utils.data.DataLoader(\n",
-    "    train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers\n",
-    ")\n",
-    "test_loader = torch.utils.data.DataLoader(\n",
-    "    test_data, batch_size=batch_size, num_workers=num_workers\n",
-    ")\n",
-    "\n",
-    "# specify the image classes\n",
-    "classes = [\n",
-    "    \"airplane\",\n",
-    "    \"automobile\",\n",
-    "    \"bird\",\n",
-    "    \"cat\",\n",
-    "    \"deer\",\n",
-    "    \"dog\",\n",
-    "    \"frog\",\n",
-    "    \"horse\",\n",
-    "    \"ship\",\n",
-    "    \"truck\",\n",
-    "]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "58ec3903",
-   "metadata": {},
-   "source": [
-    "CNN definition (this one is an example)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "317bf070",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import torch.nn as nn\n",
-    "import torch.nn.functional as F\n",
-    "\n",
-    "# define the CNN architecture\n",
-    "\n",
-    "\n",
-    "class Net(nn.Module):\n",
-    "    def __init__(self):\n",
-    "        super(Net, self).__init__()\n",
-    "        self.conv1 = nn.Conv2d(3, 6, 5)\n",
-    "        self.pool = nn.MaxPool2d(2, 2)\n",
-    "        self.conv2 = nn.Conv2d(6, 16, 5)\n",
-    "        self.fc1 = nn.Linear(16 * 5 * 5, 120)\n",
-    "        self.fc2 = nn.Linear(120, 84)\n",
-    "        self.fc3 = nn.Linear(84, 10)\n",
-    "\n",
-    "    def forward(self, x):\n",
-    "        x = self.pool(F.relu(self.conv1(x)))\n",
-    "        x = self.pool(F.relu(self.conv2(x)))\n",
-    "        x = x.view(-1, 16 * 5 * 5)\n",
-    "        x = F.relu(self.fc1(x))\n",
-    "        x = F.relu(self.fc2(x))\n",
-    "        x = self.fc3(x)\n",
-    "        return x\n",
-    "\n",
-    "\n",
-    "# create a complete CNN\n",
-    "model = Net()\n",
-    "print(model)\n",
-    "# move tensors to GPU if CUDA is available\n",
-    "if train_on_gpu:\n",
-    "    model.cuda()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "a2dc4974",
-   "metadata": {},
-   "source": [
-    "Loss function and training using SGD (Stochastic Gradient Descent) optimizer"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "4b53f229",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import torch.optim as optim\n",
-    "\n",
-    "criterion = nn.CrossEntropyLoss()  # specify loss function\n",
-    "optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n",
-    "\n",
-    "n_epochs = 30  # number of epochs to train the model\n",
-    "train_loss_list = []  # list to store loss to visualize\n",
-    "valid_loss_min = np.Inf  # track change in validation loss\n",
-    "\n",
-    "for epoch in range(n_epochs):\n",
-    "    # Keep track of training and validation loss\n",
-    "    train_loss = 0.0\n",
-    "    valid_loss = 0.0\n",
-    "\n",
-    "    # Train the model\n",
-    "    model.train()\n",
-    "    for data, target in train_loader:\n",
-    "        # Move tensors to GPU if CUDA is available\n",
-    "        if train_on_gpu:\n",
-    "            data, target = data.cuda(), target.cuda()\n",
-    "        # Clear the gradients of all optimized variables\n",
-    "        optimizer.zero_grad()\n",
-    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
-    "        output = model(data)\n",
-    "        # Calculate the batch loss\n",
-    "        loss = criterion(output, target)\n",
-    "        # Backward pass: compute gradient of the loss with respect to model parameters\n",
-    "        loss.backward()\n",
-    "        # Perform a single optimization step (parameter update)\n",
-    "        optimizer.step()\n",
-    "        # Update training loss\n",
-    "        train_loss += loss.item() * data.size(0)\n",
-    "\n",
-    "    # Validate the model\n",
-    "    model.eval()\n",
-    "    for data, target in valid_loader:\n",
-    "        # Move tensors to GPU if CUDA is available\n",
-    "        if train_on_gpu:\n",
-    "            data, target = data.cuda(), target.cuda()\n",
-    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
-    "        output = model(data)\n",
-    "        # Calculate the batch loss\n",
-    "        loss = criterion(output, target)\n",
-    "        # Update average validation loss\n",
-    "        valid_loss += loss.item() * data.size(0)\n",
-    "\n",
-    "    # Calculate average losses\n",
-    "    train_loss = train_loss / len(train_loader)\n",
-    "    valid_loss = valid_loss / len(valid_loader)\n",
-    "    train_loss_list.append(train_loss)\n",
-    "\n",
-    "    # Print training/validation statistics\n",
-    "    print(\n",
-    "        \"Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}\".format(\n",
-    "            epoch, train_loss, valid_loss\n",
-    "        )\n",
-    "    )\n",
-    "\n",
-    "    # Save model if validation loss has decreased\n",
-    "    if valid_loss <= valid_loss_min:\n",
-    "        print(\n",
-    "            \"Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...\".format(\n",
-    "                valid_loss_min, valid_loss\n",
-    "            )\n",
-    "        )\n",
-    "        torch.save(model.state_dict(), \"model_cifar.pt\")\n",
-    "        valid_loss_min = valid_loss"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "13e1df74",
-   "metadata": {},
-   "source": [
-    "Does overfit occur? If so, do an early stopping."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "d39df818",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "\n",
-    "plt.plot(range(n_epochs), train_loss_list)\n",
-    "plt.xlabel(\"Epoch\")\n",
-    "plt.ylabel(\"Loss\")\n",
-    "plt.title(\"Performance of Model 1\")\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "11df8fd4",
-   "metadata": {},
-   "source": [
-    "Now loading the model with the lowest validation loss value\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "e93efdfc",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n",
-    "\n",
-    "# track test loss\n",
-    "test_loss = 0.0\n",
-    "class_correct = list(0.0 for i in range(10))\n",
-    "class_total = list(0.0 for i in range(10))\n",
-    "\n",
-    "model.eval()\n",
-    "# iterate over test data\n",
-    "for data, target in test_loader:\n",
-    "    # move tensors to GPU if CUDA is available\n",
-    "    if train_on_gpu:\n",
-    "        data, target = data.cuda(), target.cuda()\n",
-    "    # forward pass: compute predicted outputs by passing inputs to the model\n",
-    "    output = model(data)\n",
-    "    # calculate the batch loss\n",
-    "    loss = criterion(output, target)\n",
-    "    # update test loss\n",
-    "    test_loss += loss.item() * data.size(0)\n",
-    "    # convert output probabilities to predicted class\n",
-    "    _, pred = torch.max(output, 1)\n",
-    "    # compare predictions to true label\n",
-    "    correct_tensor = pred.eq(target.data.view_as(pred))\n",
-    "    correct = (\n",
-    "        np.squeeze(correct_tensor.numpy())\n",
-    "        if not train_on_gpu\n",
-    "        else np.squeeze(correct_tensor.cpu().numpy())\n",
-    "    )\n",
-    "    # calculate test accuracy for each object class\n",
-    "    for i in range(batch_size):\n",
-    "        label = target.data[i]\n",
-    "        class_correct[label] += correct[i].item()\n",
-    "        class_total[label] += 1\n",
-    "\n",
-    "# average test loss\n",
-    "test_loss = test_loss / len(test_loader)\n",
-    "print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n",
-    "\n",
-    "for i in range(10):\n",
-    "    if class_total[i] > 0:\n",
-    "        print(\n",
-    "            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
-    "            % (\n",
-    "                classes[i],\n",
-    "                100 * class_correct[i] / class_total[i],\n",
-    "                np.sum(class_correct[i]),\n",
-    "                np.sum(class_total[i]),\n",
-    "            )\n",
-    "        )\n",
-    "    else:\n",
-    "        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n",
-    "\n",
-    "print(\n",
-    "    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n",
-    "    % (\n",
-    "        100.0 * np.sum(class_correct) / np.sum(class_total),\n",
-    "        np.sum(class_correct),\n",
-    "        np.sum(class_total),\n",
-    "    )\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "944991a2",
-   "metadata": {},
-   "source": [
-    "Build a new network with the following structure.\n",
-    "\n",
-    "- It has 3 convolutional layers of kernel size 3 and padding of 1.\n",
-    "- The first convolutional layer must output 16 channels, the second 32 and the third 64.\n",
-    "- At each convolutional layer output, we apply a ReLU activation then a MaxPool with kernel size of 2.\n",
-    "- Then, three fully connected layers, the first two being followed by a ReLU activation and a dropout whose value you will suggest.\n",
-    "- The first fully connected layer will have an output size of 512.\n",
-    "- The second fully connected layer will have an output size of 64.\n",
-    "\n",
-    "Compare the results obtained with this new network to those obtained previously."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "bc381cf4",
-   "metadata": {},
-   "source": [
-    "## Exercise 2: Quantization: try to compress the CNN to save space\n",
-    "\n",
-    "Quantization doc is available from https://pytorch.org/docs/stable/quantization.html#torch.quantization.quantize_dynamic\n",
-    "        \n",
-    "The Exercise is to quantize post training the above CNN model. Compare the size reduction and the impact on the classification accuracy \n",
-    "\n",
-    "\n",
-    "The size of the model is simply the size of the file."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "ef623c26",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "\n",
-    "\n",
-    "def print_size_of_model(model, label=\"\"):\n",
-    "    torch.save(model.state_dict(), \"temp.p\")\n",
-    "    size = os.path.getsize(\"temp.p\")\n",
-    "    print(\"model: \", label, \" \\t\", \"Size (KB):\", size / 1e3)\n",
-    "    os.remove(\"temp.p\")\n",
-    "    return size\n",
-    "\n",
-    "\n",
-    "print_size_of_model(model, \"fp32\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "05c4e9ad",
-   "metadata": {},
-   "source": [
-    "Post training quantization example"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "c4c65d4b",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import torch.quantization\n",
-    "\n",
-    "\n",
-    "quantized_model = torch.quantization.quantize_dynamic(model, dtype=torch.qint8)\n",
-    "print_size_of_model(quantized_model, \"int8\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "7b108e17",
-   "metadata": {},
-   "source": [
-    "For each class, compare the classification test accuracy of the initial model and the quantized model. Also give the overall test accuracy for both models."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "a0a34b90",
-   "metadata": {},
-   "source": [
-    "Try training aware quantization to mitigate the impact on the accuracy (doc available here https://pytorch.org/docs/stable/quantization.html#torch.quantization.quantize_dynamic)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "201470f9",
-   "metadata": {},
-   "source": [
-    "## Exercise 3: working with pre-trained models.\n",
-    "\n",
-    "PyTorch offers several pre-trained models https://pytorch.org/vision/0.8/models.html        \n",
-    "We will use ResNet50 trained on ImageNet dataset (https://www.image-net.org/index.php). Use the following code with the files `imagenet-simple-labels.json` that contains the imagenet labels and the image dog.png that we will use as test.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "b4d13080",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import json\n",
-    "from PIL import Image\n",
-    "\n",
-    "# Choose an image to pass through the model\n",
-    "test_image = \"dog.png\"\n",
-    "\n",
-    "# Configure matplotlib for pretty inline plots\n",
-    "#%matplotlib inline\n",
-    "#%config InlineBackend.figure_format = 'retina'\n",
-    "\n",
-    "# Prepare the labels\n",
-    "with open(\"imagenet-simple-labels.json\") as f:\n",
-    "    labels = json.load(f)\n",
-    "\n",
-    "# First prepare the transformations: resize the image to what the model was trained on and convert it to a tensor\n",
-    "data_transform = transforms.Compose(\n",
-    "    [\n",
-    "        transforms.Resize((224, 224)),\n",
-    "        transforms.ToTensor(),\n",
-    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
-    "    ]\n",
-    ")\n",
-    "# Load the image\n",
-    "\n",
-    "image = Image.open(test_image)\n",
-    "plt.imshow(image), plt.xticks([]), plt.yticks([])\n",
-    "\n",
-    "# Now apply the transformation, expand the batch dimension, and send the image to the GPU\n",
-    "# image = data_transform(image).unsqueeze(0).cuda()\n",
-    "image = data_transform(image).unsqueeze(0)\n",
-    "\n",
-    "# Download the model if it's not there already. It will take a bit on the first run, after that it's fast\n",
-    "model = models.resnet50(pretrained=True)\n",
-    "# Send the model to the GPU\n",
-    "# model.cuda()\n",
-    "# Set layers such as dropout and batchnorm in evaluation mode\n",
-    "model.eval()\n",
-    "\n",
-    "# Get the 1000-dimensional model output\n",
-    "out = model(image)\n",
-    "# Find the predicted class\n",
-    "print(\"Predicted class is: {}\".format(labels[out.argmax()]))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "184cfceb",
-   "metadata": {},
-   "source": [
-    "Experiments:\n",
-    "\n",
-    "Study the code and the results obtained. Possibly add other images downloaded from the internet.\n",
-    "\n",
-    "What is the size of the model? Quantize it and then check if the model is still able to correctly classify the other images.\n",
-    "\n",
-    "Experiment with other pre-trained CNN models.\n",
-    "\n",
-    "    \n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "5d57da4b",
-   "metadata": {},
-   "source": [
-    "## Exercise 4: Transfer Learning\n",
-    "    \n",
-    "    \n",
-    "For this work, we will use a pre-trained model (ResNet18) as a descriptor extractor and will refine the classification by training only the last fully connected layer of the network. Thus, the output layer of the pre-trained network will be replaced by a layer adapted to the new classes to be recognized which will be in our case ants and bees.\n",
-    "Download and unzip in your working directory the dataset available at the address :\n",
-    "    \n",
-    "https://download.pytorch.org/tutorial/hymenoptera_data.zip\n",
-    "    \n",
-    "Execute the following code in order to display some images of the dataset."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "be2d31f5",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "\n",
-    "import matplotlib.pyplot as plt\n",
-    "import numpy as np\n",
-    "import torch\n",
-    "import torchvision\n",
-    "from torchvision import datasets, transforms\n",
-    "\n",
-    "# Data augmentation and normalization for training\n",
-    "# Just normalization for validation\n",
-    "data_transforms = {\n",
-    "    \"train\": transforms.Compose(\n",
-    "        [\n",
-    "            transforms.RandomResizedCrop(\n",
-    "                224\n",
-    "            ),  # ImageNet models were trained on 224x224 images\n",
-    "            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n",
-    "            transforms.ToTensor(),  # convert it to a PyTorch tensor\n",
-    "            transforms.Normalize(\n",
-    "                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n",
-    "            ),  # ImageNet models expect this norm\n",
-    "        ]\n",
-    "    ),\n",
-    "    \"val\": transforms.Compose(\n",
-    "        [\n",
-    "            transforms.Resize(256),\n",
-    "            transforms.CenterCrop(224),\n",
-    "            transforms.ToTensor(),\n",
-    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
-    "        ]\n",
-    "    ),\n",
-    "}\n",
-    "\n",
-    "data_dir = \"hymenoptera_data\"\n",
-    "# Create train and validation datasets and loaders\n",
-    "image_datasets = {\n",
-    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
-    "    for x in [\"train\", \"val\"]\n",
-    "}\n",
-    "dataloaders = {\n",
-    "    x: torch.utils.data.DataLoader(\n",
-    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=0\n",
-    "    )\n",
-    "    for x in [\"train\", \"val\"]\n",
-    "}\n",
-    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n",
-    "class_names = image_datasets[\"train\"].classes\n",
-    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
-    "\n",
-    "# Helper function for displaying images\n",
-    "def imshow(inp, title=None):\n",
-    "    \"\"\"Imshow for Tensor.\"\"\"\n",
-    "    inp = inp.numpy().transpose((1, 2, 0))\n",
-    "    mean = np.array([0.485, 0.456, 0.406])\n",
-    "    std = np.array([0.229, 0.224, 0.225])\n",
-    "\n",
-    "    # Un-normalize the images\n",
-    "    inp = std * inp + mean\n",
-    "    # Clip just in case\n",
-    "    inp = np.clip(inp, 0, 1)\n",
-    "    plt.imshow(inp)\n",
-    "    if title is not None:\n",
-    "        plt.title(title)\n",
-    "    plt.pause(0.001)  # pause a bit so that plots are updated\n",
-    "    plt.show()\n",
-    "\n",
-    "\n",
-    "# Get a batch of training data\n",
-    "inputs, classes = next(iter(dataloaders[\"train\"]))\n",
-    "\n",
-    "# Make a grid from batch\n",
-    "out = torchvision.utils.make_grid(inputs)\n",
-    "\n",
-    "imshow(out, title=[class_names[x] for x in classes])\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "bbd48800",
-   "metadata": {},
-   "source": [
-    "Now, execute the following code which uses a pre-trained model ResNet18 having replaced the output layer for the ants/bees classification and performs the model training by only changing the weights of this output layer."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "572d824c",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import copy\n",
-    "import os\n",
-    "import time\n",
-    "\n",
-    "import matplotlib.pyplot as plt\n",
-    "import numpy as np\n",
-    "import torch\n",
-    "import torch.nn as nn\n",
-    "import torch.optim as optim\n",
-    "import torchvision\n",
-    "from torch.optim import lr_scheduler\n",
-    "from torchvision import datasets, transforms\n",
-    "\n",
-    "# Data augmentation and normalization for training\n",
-    "# Just normalization for validation\n",
-    "data_transforms = {\n",
-    "    \"train\": transforms.Compose(\n",
-    "        [\n",
-    "            transforms.RandomResizedCrop(\n",
-    "                224\n",
-    "            ),  # ImageNet models were trained on 224x224 images\n",
-    "            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n",
-    "            transforms.ToTensor(),  # convert it to a PyTorch tensor\n",
-    "            transforms.Normalize(\n",
-    "                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n",
-    "            ),  # ImageNet models expect this norm\n",
-    "        ]\n",
-    "    ),\n",
-    "    \"val\": transforms.Compose(\n",
-    "        [\n",
-    "            transforms.Resize(256),\n",
-    "            transforms.CenterCrop(224),\n",
-    "            transforms.ToTensor(),\n",
-    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
-    "        ]\n",
-    "    ),\n",
-    "}\n",
-    "\n",
-    "data_dir = \"hymenoptera_data\"\n",
-    "# Create train and validation datasets and loaders\n",
-    "image_datasets = {\n",
-    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
-    "    for x in [\"train\", \"val\"]\n",
-    "}\n",
-    "dataloaders = {\n",
-    "    x: torch.utils.data.DataLoader(\n",
-    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=4\n",
-    "    )\n",
-    "    for x in [\"train\", \"val\"]\n",
-    "}\n",
-    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n",
-    "class_names = image_datasets[\"train\"].classes\n",
-    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
-    "\n",
-    "# Helper function for displaying images\n",
-    "def imshow(inp, title=None):\n",
-    "    \"\"\"Imshow for Tensor.\"\"\"\n",
-    "    inp = inp.numpy().transpose((1, 2, 0))\n",
-    "    mean = np.array([0.485, 0.456, 0.406])\n",
-    "    std = np.array([0.229, 0.224, 0.225])\n",
-    "\n",
-    "    # Un-normalize the images\n",
-    "    inp = std * inp + mean\n",
-    "    # Clip just in case\n",
-    "    inp = np.clip(inp, 0, 1)\n",
-    "    plt.imshow(inp)\n",
-    "    if title is not None:\n",
-    "        plt.title(title)\n",
-    "    plt.pause(0.001)  # pause a bit so that plots are updated\n",
-    "    plt.show()\n",
-    "\n",
-    "\n",
-    "# Get a batch of training data\n",
-    "# inputs, classes = next(iter(dataloaders['train']))\n",
-    "\n",
-    "# Make a grid from batch\n",
-    "# out = torchvision.utils.make_grid(inputs)\n",
-    "\n",
-    "# imshow(out, title=[class_names[x] for x in classes])\n",
-    "# training\n",
-    "\n",
-    "\n",
-    "def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n",
-    "    since = time.time()\n",
-    "\n",
-    "    best_model_wts = copy.deepcopy(model.state_dict())\n",
-    "    best_acc = 0.0\n",
-    "\n",
-    "    epoch_time = []  # we'll keep track of the time needed for each epoch\n",
-    "\n",
-    "    for epoch in range(num_epochs):\n",
-    "        epoch_start = time.time()\n",
-    "        print(\"Epoch {}/{}\".format(epoch + 1, num_epochs))\n",
-    "        print(\"-\" * 10)\n",
-    "\n",
-    "        # Each epoch has a training and validation phase\n",
-    "        for phase in [\"train\", \"val\"]:\n",
-    "            if phase == \"train\":\n",
-    "                scheduler.step()\n",
-    "                model.train()  # Set model to training mode\n",
-    "            else:\n",
-    "                model.eval()  # Set model to evaluate mode\n",
-    "\n",
-    "            running_loss = 0.0\n",
-    "            running_corrects = 0\n",
-    "\n",
-    "            # Iterate over data.\n",
-    "            for inputs, labels in dataloaders[phase]:\n",
-    "                inputs = inputs.to(device)\n",
-    "                labels = labels.to(device)\n",
-    "\n",
-    "                # zero the parameter gradients\n",
-    "                optimizer.zero_grad()\n",
-    "\n",
-    "                # Forward\n",
-    "                # Track history if only in training phase\n",
-    "                with torch.set_grad_enabled(phase == \"train\"):\n",
-    "                    outputs = model(inputs)\n",
-    "                    _, preds = torch.max(outputs, 1)\n",
-    "                    loss = criterion(outputs, labels)\n",
-    "\n",
-    "                    # backward + optimize only if in training phase\n",
-    "                    if phase == \"train\":\n",
-    "                        loss.backward()\n",
-    "                        optimizer.step()\n",
-    "\n",
-    "                # Statistics\n",
-    "                running_loss += loss.item() * inputs.size(0)\n",
-    "                running_corrects += torch.sum(preds == labels.data)\n",
-    "\n",
-    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
-    "            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n",
-    "\n",
-    "            print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n",
-    "\n",
-    "            # Deep copy the model\n",
-    "            if phase == \"val\" and epoch_acc > best_acc:\n",
-    "                best_acc = epoch_acc\n",
-    "                best_model_wts = copy.deepcopy(model.state_dict())\n",
-    "\n",
-    "        # Add the epoch time\n",
-    "        t_epoch = time.time() - epoch_start\n",
-    "        epoch_time.append(t_epoch)\n",
-    "        print()\n",
-    "\n",
-    "    time_elapsed = time.time() - since\n",
-    "    print(\n",
-    "        \"Training complete in {:.0f}m {:.0f}s\".format(\n",
-    "            time_elapsed // 60, time_elapsed % 60\n",
-    "        )\n",
-    "    )\n",
-    "    print(\"Best val Acc: {:4f}\".format(best_acc))\n",
-    "\n",
-    "    # Load best model weights\n",
-    "    model.load_state_dict(best_model_wts)\n",
-    "    return model, epoch_time\n",
-    "\n",
-    "\n",
-    "# Download a pre-trained ResNet18 model and freeze its weights\n",
-    "model = torchvision.models.resnet18(pretrained=True)\n",
-    "for param in model.parameters():\n",
-    "    param.requires_grad = False\n",
-    "\n",
-    "# Replace the final fully connected layer\n",
-    "# Parameters of newly constructed modules have requires_grad=True by default\n",
-    "num_ftrs = model.fc.in_features\n",
-    "model.fc = nn.Linear(num_ftrs, 2)\n",
-    "# Send the model to the GPU\n",
-    "model = model.to(device)\n",
-    "# Set the loss function\n",
-    "criterion = nn.CrossEntropyLoss()\n",
-    "\n",
-    "# Observe that only the parameters of the final layer are being optimized\n",
-    "optimizer_conv = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)\n",
-    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
-    "model, epoch_time = train_model(\n",
-    "    model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",
-    ")\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "bbd48800",
-   "metadata": {},
-   "source": [
-    "Experiments:\n",
-    "Study the code and the results obtained.\n",
-    "\n",
-    "Modify the code and add an \"eval_model\" function to allow\n",
-    "the evaluation of the model on a test set (different from the learning and validation sets used during the learning phase). Study the results obtained.\n",
-    "\n",
-    "Now modify the code to replace the current classification layer with a set of two layers using a \"relu\" activation function for the middle layer, and the \"dropout\" mechanism for both layers. Renew the experiments and study the results obtained.\n",
-    "\n",
-    "Apply ther quantization (post and quantization aware) and evaluate impact on model size and accuracy."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "04a263f0",
-   "metadata": {},
-   "source": [
-    "## Optional\n",
-    "    \n",
-    "Try this at home!! \n",
-    "\n",
-    "\n",
-    "Pytorch offers a framework to export a given CNN to your selfphone (either android or iOS). Have a look at the tutorial https://pytorch.org/mobile/home/\n",
-    "\n",
-    "The Exercise consists in deploying the CNN of Exercise 4 in your phone and then test it on live.\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "fe954ce4",
-   "metadata": {},
-   "source": [
-    "## Author\n",
-    "\n",
-    "Alberto BOSIO - Ph. D."
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.8.5 ('base')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.5"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "9e3efbebb05da2d4a1968abe9a0645745f54b63feb7a85a514e4da0495be97eb"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/koala.jpg b/koala.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6106b5347bc979d2bc2b47d1d8021998f623ff1c
GIT binary patch
literal 9981
zcmex=<NpH&0WUXCHwH#VMg|WC4+e(+4;lJgD>Bm<7<_#hv=|r|I2f21g&3F_7#J8C
z7#SGaGZ|RGYz7831`uGGz|6qJz`!8Fz`zhQfd$Skm^2a0W;QTjV3;%s!3JqEGB7Z;
zGBmO>FjX)#wK6idGBjbpCI)g3<D86=k^(Dz{k&qm#FYG`RK4W<T>Xl~0)0b01APV?
z`-+0Z<m}WEg{0K<%sgB5_q+G2D`ckFs)w8U8u%7Cr)Fe&R28KLSNVk`S7j$#rl{N7
z@p9QzSXJZ}<ffJ+Dpcg=<P}?0*eZcdx5_KF0txFY+1V7Oq*(>IxItAErKH&^LETbb
zUanVete0Puu5V~*X{m2uq;F)TTa=QfTU?n}l31aeSF8**z$LXfxhS)sBr`ux0c2oe
zQhsTPtx{=eW{Oo>Vw!1EN{Xp&QlfE+uAyN{qHbb}v7v5?v2lvIsiB2YnqiU>)cT6t
z0=V^9gF?y9rXsh%%DE^tu_V<cu_V<F6r{R_M!E*3A+TV!(MMBbqYrWvk|XUPB0(;0
zb~gI(Pyz)RJfu_eQf$@B)$Q&6-)3-TU}IrrV`X7uV`XD!XXD@!;Ns%s<PzcI=MfMW
zk&qA<5fhV=Rh5^LR+bSHQ_xXRR#Vs1(v+0fHPF>CP}R`X02#u_&d$!o!6nSaC9EMO
zCZ$0#_<w*wkb~hU!wF_aB?cxzMrJ|A|3?_)85kH@8NmRQ>_C{2iJ66!jh%y&i~Ii(
zhOGh&OpMITOf1Z-tSl@H42-pmOw0@{f~-P{hK_8)fr;!&g(60c6BlwQJ8e8D8g%i4
zig8j=6DOCLxP+vXs+zinrk07RnYo3fm9vYho4bdnS8zyZSa?KaRB}pcT6#uiR&hyb
zS$RceRdY*gTYE=m*QCi)rcRqaW9F<yi<c~2wtU6PRhu?%*}84}j-9&>9X@jO*zpr5
zPhGlv<?6NTH*Vf~_~`MIr_Y|hc=_t%r_W!$e*6C8=P!`I7#Wzsz5)>-UokMC`Ad+2
ziIItgg_(sN<S$01a*zT+7FI<=HX+AA_QXPAC8I_T5vPd@Hy-3vHV*nAnpAX=OH9S&
zq3TDF*T6m_&SOnv`3&wcguiYv@GvtnFbOgXGT1Zx;y+j@x>a@cvNiF%hb|m)E}WO0
zHJf4CiEbs)#EE{3Cdiog=t|u)PvAQyBk%10Vd{q8M#871UYBx5yn7p`8Dcc?-iAH4
zIk|dITK~PU*S|D>M*PBy$0b}vJ{h};G#tLRIOz1Q4exX>C*HDLn_E?o#<hQLy8BEH
z=867=Uxb}^n-x|1e)l}F?wI;#WghG6hH^9HLW_J$w!KnnVX9d6O?{=VsK|AJLp!Ir
z2ENOl_mRKn%UU6gTb0d{2IA}=S8A$#v&z!$-SP41oUn^$6ek|;>0P|wtNK>|1wWqq
zT+2JVb+fS2-***q`yV>|jC&p)bG-kKTDzsKnbgZ)*9|{1F&8fMS$*<W*29mIr<0%E
zK6xPR`Cixj63LCTPG3#SSeN4Vij!^i%cA$Y0^Woge>O92ymL+H&P#u1_u~O)D^r(b
zTr1e}J^XX$l3qXil07`XnEZ<0#{S(mTfS|N$+p!S?(AdfSv_axfdkANWcRW^j+x&2
zUioTG-gUE;JC(U44?PSO$#psTWX_y<bJ<s}dmj;8Y7!_@vtZ-*mp%WcEcvoAFzU|v
z$p*Jg=G0ydoM5kirvKEAwWasnKAgB!e>qQ_$3HC1@_gQM_1j9jC7-3wh+fpY^VV)R
z59Xf9U+>1nK6?G~{T@%<m6jVe%P=0C_~VK2yW*eM3S-V(ebJK}rE@$tTV6mTQue?(
zft2U>RnjjnU$JO@rr;H^H#Y4xyLEf+PHm8G_{CH5@8zQH&%B+@xnI>@W)##op0z_I
z$avOsov+oEi&(st_eU*VmHpvBYv8Bjm)1PVmF+0C<yd;Q@=Tp_#zP(XiD&a)R_%^C
zw8rsgs~GoOnLegjhcgfC*Rkf>@+>mEk8urep8p1;%^L&iyCs)r^U8*luL##EjGVP?
ze{Nb|>P%%W<4M^{(w9x7oq0ArjrDW<lYRQ1!a>gR?tm+cBO+>#o_qLL+j{%PRbBVo
z!z&-Z)h_AX;UV&1zSo)lM|E?R=BsF==I(Z{`qbGrclpb6lFy1lTGJWKFW=kq&3dQQ
z>yz7cLiRm8pS9xHU)Q(d$uD2jX{_quaFjT)N^1I%+p9bJj(JR8&$cJ((zl4U2cB%J
z>icSwa^m^<Zw$=0HrL!e`}pMEzF+?tj&2ur-nnkRO_Sv6^M70)dEeAynVkAKxb?gO
z*H+bKUs<#_YWMEl{dLV*w~KduiWEM5{4_D#_vR~C->|l~`@8c@xj7bgg~T>Llis*%
z*MEk#Ut0pQz4CM)HE2Ga&S3uZ`p5Nr5odcpJ>0HW<oIYW`{Am!y<0BqQt8zzI>P@d
z+fzHI_~>`L0~>cn?@pa&P$qb+`f6+;%d|CXFLWLLkhZq4cG*#7m3?sr`>nP=VfeRo
z?@DIX-O0ArJpogfXy(*Rf9&lj-=QDqE|+*T=yIaK?R!4udEVtW#8)Pn^*@g_@6(mj
zt$Mm7__LvM*WL$nE%R*7eE2Qp^Q&-ceNesDu`1tLm#%57{_x&Ss;+DE`DvLRlXZ4{
zv7hm{KQPw!V8+Ec`(oYd{5=0Ns9ak=UBvlhi|ob)p)ucNwWofp{dF_?(5f|;&-@F~
zo@C@7cC5aI=Wk);iF<m_OpksD)IFnjd)u3ibb0-|fBmYi&(1Pmq`o_;aqcrCNv;nf
z=?pxj7ANhl-hJ&W!>Qdnd%ISO3|j*Ki{<5yv`>Fc+H@!S_RMqY);HcAn(zPotX9=-
zqi1i{MRW&D_WJO&C4tf8<`je2BYbX^?~3ZJw5ImFEv?(mD`{}+Xh)K|7SqRH8|`~V
zS8qzodY}A4Tl<)E!`q)%zwXq#vpwqB;-bRl=BR0To|0=6?j2`6n{Q##EhN9kE&Gy<
zKHCj(o4%{h=BG?9m|Ai0Y?Xa${^Ln^m>d!hir)3L&Qkemb%aZNX;GaC<C^<rY#hhJ
zO)u^}wd%>N&+nyP7w)|kwWw3vvd-h5+w6;G*|JfxSsS|@LOjo_pH^Z1@pZk$$7{cD
zn%!>Nq+PSj#kO`v{|nzLma6H$jC3^E+?Xc!pV~2B+_^IEtnx9&2@f5PMc=lWb;>Z~
zTHdSU6Mrm{o)Mn2$@Q6FkK6KRzb0SXkdu2)bNh*#ImO3K@4Wt1*k&@XGGKRUf1cBS
zhOe)u?(EHd#%^(^?rprc#L~5AYfVEM&Yym4ZDbX^UcECV?e5+wLWLKuvfP#X=4lyF
zWH^UEK5KJkrnJg*b0OA72Av~?jn{vL<*B_&-CgUu_?)M88{?N1dG8KR3=t~lYxrie
zD7&omeyZ&r_jjiiPW)$RWIXZfYNhXys40QR`QOF#Y*6NYJ@MYG?&x-Vv!mWGT94m7
zbZT+i+J25ZC-b##+hp9dP;Dy?xtj8s@4_0x`+eEh{;~h?E%kNsjXc1e=Th^a?Zgwo
zu=#Fj*MqPA;muX4%2D%RQEB|{dGHUv@>jX5H>TW|<bCPQ6L?s{V`3n?YhZ&tSFO7C
zrkslFN8bttsq748=55O6fA~kqRxbL`Zr+P}YuxNh_UM1L|6ui1zvJ4*J1^y8X8e83
z68Oru@Pfm^B$wVNU3VEjUGFz|%#gm}-;`IsuO%;DdQs#?n^8ob`lP-6@&6e@3nxX~
z^{-XEw*2YgRYv=7=4{;gs3ZA(S<KhxyBUwMRkLomdM$hR?$@tVXF0j9;g;YQv+tOA
zTJQ#c)~9^g?6nJSZ{6AwUUXVf$-Aj#%i4KM`**&)P(3eO)z|iHY+P)Qzi8~aUfnGR
z)*ZU-X0}mT_Vcl_zFLm+Sq~MP(q={eD8F?`;cTk|BfGr+%fPtoSxf&M@=VRw`IMY|
zYpY3nU$(@g5Qz?tGmgJ++S<(tc>Ly@uK%rft_!0!-MX{ob^+JX1_Oqei~bjv^xW8D
zbW$Se%u#>!iLTyNuQIk>_@-j9HrKFug7vdi9oC_5&F<%PRTT$|rd3|*b}wHRS-VJ7
zVzs!G$+uPMTZ{VG3aq3qez?WI*?rdioeseg>m4)Fre1uytH0wbzs8ZeusF_Dzr^P2
zem%Hj$7$h{YFoFywT%vt4*#3@Y?rLoS>3k>n74W_yR+__S<hQ(-81_)XnC|>UcS}4
zlqK9?!eI-EJMN}SO%x_a*s1M{H99&cxk85Lu}R35q$kJyK26uMxg7W5&aBq2M<#mt
z81zpwwLf#6chybxsrz{(ix&D;r8Pd{c&|R~-V?bc#&QgHQ%_V}yK}=|*^PhFlZkyl
z*LlB~xn=5-i^pQ39Sf7CP92T5-WT_LA=|b$N;j7;S;IeBT3hgSlm8F-LmL%SQ?=eV
zot<EH?ai%4db4ij2*?|GeBiLW-S_;~`kCr0XWuwD`^W@O`!hSktP7)yB4TDs=q22F
z-T2+NaQ}_`Z(j<O{X^^x&cBSEVm>*u@G$F{6W@N9eK0ui_4(~tMJtwWSTLttf%oaL
z$To@6=gXJYs(xU*bNr!X+KInkb~3-vep>Nta^RH~vB2r+wOs<~rM2-7=O%l8aBs=U
zHI<$*>2)5DM$zq!laF$mMOVr72cEYH`)D%x#KuKE(~=vsPkxQ>30uw6bm+cOS-B14
z>-TY)kN9O?MQwVPy)!i0Uc-VV`S!2N8?ENee7s&N^KGr9yU`M5&h77-@9v-e)AYD~
z*SmMOi##?&ulzl+$@s_je_M}4-MSm^|9ytb>i!@%ZMRpiY^{#YDpIZ3aOZ>ZuksaB
zCTV7DF)Lj!aGq!K-+4u5M>kjIg<ITV{IhTBqj@?XdCceK?((=aae}n`(y))>UF*ME
z&%7kONk-tf#Mwm0v_mdBY*mx|^P`q7TQl2|Tj*zeMV|7`wX=O(mm3}PyeswOvBdL<
zKmIdBEf<MP*7~$6g{yk<KZmul?m5XEm3d5GUReK2+B|t*dx&ysvHY3aC2}&H!VQLV
zp06+oP7IV=a^PX-9d-*@tBX;Qg3_B`=B$>}4m&1rK<e4FAP&>LOXGSM7(Uy)QCF?X
zy)DKyPssVp+L`YJG*7P6Ivg0Ulekkf%lo5Ja%riX*KzB%CDZ#R#l4e19R6Ug8{@i)
zlbz;i>)b7J5<UmM?f&LA_sps;|83bP?UY`#Ww$kH8Z5rFn_nbb>C(lid@0KPRUgl0
zJzQ6;)4Mmy`Q(Gb^Pf%6hppf8J?yxzl-I0h)pPFrTC&ZYDLaUnp-fEv=z+7og^!l*
z-TK@1N7B(9X>SZeOSvT_Lsp*1nY`t}<Vzd$*YHn2d3w$VKF^~TTeF2Nws{uRtn}G-
zS-WpB`&PBNtM2i-a;An9iJr7wE%oSD#4-6NEQRMyCb+EO5<e}sWZu-WFv~l!Z_2}G
z9i8HLTg81!GNWs1eB9C>Y>(I~EpA^C{JO@vx}-IH{)}yw&rO%}I23Rku&}VrQf3uj
z7*^}~W%lZB>n)RxMe5#l{JGdd`9*fswaDk~XC|E1S>vfP=b!!guWMFUB=>e3nMpj6
z_y4$-_qN}ki4Dfh4OU+sZQ-6C6|_pZU(tv^pz=@V#%VLZw+H!unAqT9YgPQ8;rgnB
zT`it5>*e(<&Urpxn)~wFBF{F5iRCGO7JoYb^QCi{;DwBi&Sl07ArlyQuh!PjEwVG-
zF;!4EI8oT_+R3M`rMG&IZr!>v_cl{s@x48-UOj(X)v$B+4}a&&^8Z#n&ySwvWNv94
zVRWWzf8~x}rT-bE_HA4K=JVYr91Em9-c`Q+ae2ARHtnYyGNqOKAAVi=^u&kLi#~n!
z{PF3B{Mz8@!6|Fpv!$(lHQ2w`tb9Dp{z<6hI^}6U8T0S1w61u?`1;4yyI!eHl41PQ
zvwkMaJoxgTLF#PR?VPT6UYm|Zr&YdUwYXhSep6t^l=~i^cyyABMc?^*X+2%Vx&8=4
zp(e|^_kQ+kMe{QAROUP1z87G9QTG0{3e*0=0>{^Z?v}~d7SA|5<JF?8De=3Gt-t;1
zUE%a~o-vP>^@YWK`MHaGrtI-=GcpdUtvy+%v*p{aiT^&$ZMIOkF1c*?ox4Xl&Pi^l
zoBQg1r(QUtgU*CyN8dLFe7z_gbUj!)<cjLv`nfklp2(DcliS)`kYx67$w}+Bdlrvv
zS^YNNz4QFk9f^<gjI19mZFZa%>b~=|sO5PFKmMP8GM<<g8Ej<_I4yBVuATkk^Xq$y
zKQEfb%p+2JO<DA=-At~Tq2Kbj&pn<af1GKP+U3K>Dv_Gko3G!Rx<-3W>m82s-x?Ti
zaRl&NzW@3%a<bdTeQu188*lBpxg}*v@48oqdnV4Sea-gSXJ_Y)Ejc$NPWAlNTJ(Fe
z##dL37=tIp*Vp@Lhuw7Mk(+Vw<bzqKL#(Gu@Xnhzb!$3bxWx12eqYmcHnv#rOKzxQ
zyf3R2GDT%~<n;xTiQoGdo^`k=dL#TPOG4o}!>Czlvo4?NRNy+?;BhYV-Vw2q-=1m{
z<y=pk;PRI^p7YQ2>iX#{?wJRfb#5H|;~L&#x`s{7a96V8#;!+O|1${gy>dTw3v0qj
zzEhR?F(KwflXlz}ex3HtUP(<_DuD4tH2bWja(weu*42G}6Z+*{wq|+kea*nWvSV}Y
z_CNol?H{(gv14;Tw~!uxp7rFu=fAFamRKpQ>4{W3o$uha>Cf(6bH4wn4r`yTe3(aY
zM_NaxOydNOC#8>9JX+!Y`}eZ4hv5zE7RMdK^9s{tGA{LOxOcnlf&YJos5!e7`irbR
zrgon<Hc&j^GmmrCBim1R@8;c|re!vNZ}a=$tnVATwKLWVoOoxn=5A=8ef;OQp#p+W
z5Ad1&*;~ZEHYE5?x!d`6-SFajGj3&a-sobhIBwtYs!}#%^|kA~Y`vm;S9uC3FWSc3
zGtc8(rRAcHQ*WDiyfw8>mbp{1s$d~+tmz*nhazn**H23nE(KXEl?#h(E!XwiD0W~~
zrrE_PsYrA4z$0^}<!~)s*?jNBi!HMDZ?>(qPCh$9cIV@auPY||Sharq%q<b#mQ&zY
zs~xfLrDV90#xcf&$C=mFbM0kzWt~1NeEJi8pQ7vtcjwIy!`w=b7~EWUBQaFIlu`1z
zY3mB^*KKMh2kbR|ZZLe^|6%RVCp~AMc^H5CV5R@?{n^ymd&UMDZ-saYDlPsqd<|>4
z^Yop`%d)o<52$>3GWmS`^N8(*5+^h6$e26q-WaR5u%KrDo`_9?Cw{hnUBf+rr%BCm
zhwP1G1;*#<y_|Q>Rhi9plhJO8#&P@BupLu!>^#bjt3N$fH|4Km_tV3<!6#yGs+9a$
z)pBWu|F5G8{WA@>be;2hswt{3;3@Y+;e=J+eXp_^zum5@R6k#;b6I82!h_G3vwvC@
zY^{HG;>16ZE4#O~dTDt*ciWb)_eZ8tp8G~q{ol}QFT;dvD&`9+@oZF|6Bd8?_KEk3
zJGwFsmO8h|@_WrcKI_HhFYh<I)Ylg<t?DzdO8@xMdEPe5Cu`mm+v#6F?5n$Uaif^Q
z8OuL*dsol(tm)Q0!!XJDfczngb9QsJ=I-4c6Te<@s)tGw|CRp?{^yqLo8zm@drGwZ
zlyQ&7%O6*7y*t+JT9`Ed<OG}6=ADPs-UwgsnK!r6lx6)^PwP0hr~OICmp}X`8u!e#
zbI!J?r2TsDnEJE&jwvgp`5s|f^0~>cVs)T*P|WNy^DQqPc147KxM6R2{C-aQr?Y9V
z)?B}S$;VsZOsIoxdvM^{irdd8GO|kt%RJuxW7cfX9_yN$0SvM`<Qitp3!87aXwvju
zx_xsuJuq6w)DSWG^5rl8N;hUR-0Rn3SYcaPe`rPI`F)l#6Kt+ohZHtgh<uwmO}6`F
zQJ{x?&96)48`tEtzuvWcrc9?m=mTwIv+Al}fgZDJln#~eve&7%HnW;%VEb2NskAQV
z<Z|1S4W(=523;<C7kD;e({8>6!e^tVvUJB>SIVB7QDkIxruo{oJ&M!!m7f<>%&Gn&
ze{N;gwp626+N(a~wY^;IU2-tuOo8FwTbI;LEq?!+e)&wsq5hfkxvW1PkNaBpwro+G
za=}lpcgLzq{(SVz%$EsLv}7%CU-_TGVtM?Gt2=()e=mC8xqr&P{pWTiZ``t7D_)Q%
zQ>FdPjGCjxpL!-)ik^=<EjjUO=Lt(SpVH^@!NFPEjZbQ^aICw;!}PH2;^|uc&ugkX
z|LmN3N7Lf5yx;tJS)DuGgJx%&th(LCW@KdjMQ*OPPHJMMx<#ePgLG;3ytcUo2ZPV=
zP@g<u$@2_}<3E2@h2<?RNa=8zxN67C`}r$Pb@bA+J}t@(VamLb%&h)F<hDO|>{<EQ
z)dhhO1>()Wu6r-ZoDj`#+L6h5Ct}WX`R7aHr>ri!bK``?F%5^WmeW4;2(Dy&$6S_h
zr0e<DXzpI!LuZ_iRC(}6eQ9s_J7Zmh*b|xO@((X<-ddelovgy!@TBngUHj)hFJ-LQ
zos;s%?#hF$y9)~f0~C{vJYekeExM<CCFN@A%zT+YJ8uh>9#8zqwdT{-Ddt>9@_+B;
z_@i+-;nqsy&0>Lb<{kO__~*y3D+9MJE}N_?aqZlRV^3zP{(e&6@cw6gZ@S`{^+p9Z
zkJN7K+HAe`$eX*$$1a}FdHyRjdzxI)VL1l*ioUD|ySs8)h0pH^O^`A?E%t&z{@R*Y
zp8_${V~#P&?5}-#JVMWI3oYI;!E&X(NVL1Og|0IHblbffL*5pJotbq_LU+yiTc6HU
zzU~Q5x^Z!Lj^Ua<rl#Un*?PyHSF=Mi8H9Eue(%h`{$crPg+06aL~2(Y(6BhhUca1a
z>ek)5)+$k%ViErt3cQXu9AdsvV6pm_?cUud&O}QntIUzl;jjPrMyqGC+oYdi8V&0$
z^A^i~eED@n+Md3%%1O@q9vqqf==fHbbyp`Yy>xZj9?z;x@$Tu&Elmd8pBwz_f8Adz
zIx{;@UoN`SFZ9Ot<&ujgZ3%5%edU`(YDz(N-MJ?o4e7I<&#gGIJo?TS{p($ycg{Vt
z((9J-^6b(bsZPc_9_`iCS-j=)TZK-pV-uVE+V-4xpK&k7RmNxGX$EdN)~d6gtPZR=
zyK^E#5}Q%o)SLdnkt%a9o{2vv=kjUsv3o^+e6GQJs<nH~0#^B~xS4YI#J?S@&8`)n
zHk{-nTl{JLU8Oa#5l6X9CLG#*UY%KT?V~S-)1J?K6)cfAl~W_uG1PF5knzuX^ZtZY
zyQsyka@1}wyfELt>Wyhi{>IamjUjgBJf93-#>7nYHFs6upCPw@#quH#{k3}hGo_h@
z@&aZ^@>KD!kI(AU=2v>Ldc(x+Jl6Gs{~0Wo=kJ*IR4H9XYlq6agHOVC9|^kuSC#Kg
z?{bOTzXDh-Z=Vk3?%N~4D7WYGo5lB4CJS&JKV@5$zW?hIv-P@%jz67t_p{7nh3ac_
zCLZf-jS*#Fyxb#Ke}1V6tE5Wc<O_Voa|$E3U+Up~E;GsEY~~#oj#QtCllUF)ZFu*D
zTW0C)r|gzrmK@BeytiE1ceSTb!qb~OWJ=doTN|q69Z5TP)%?tp#};qrpZ~mOw@T)^
z<)4gFrk*$-!}D!zzCi5KdGp!2)0<Kfbx-Su*-roSjcJ*Gw6e#Gb!p1edW;q=oG^`{
zuX(GZ41*AxS?!~@$NYbUH6GTuXDP#H&DC%$J?zsbg>&2!ul(Z5>|OXl?d#p9iR;f8
zAD3IU`V`O29g}5J1FQSj|7VC=o5h;2mE$Ly<0QjAR-=2Sy+L7bPpy3WE@OXy_d>sq
zQQB-aoO4<gK7Wz;&tRLib??eX$>mA02VS1d(8({i%3*M8XP9{9yUOJ!tu8rl$^{QK
z`kr3sHTUkR56*JW1QuIQoOFDJ>`UG`<yLbt9&0s-v0t`-_*X05Hu1LLEjj0%Cv4g4
zv!;7XFP&C?BB<PW!#}zC4+Cy{?AE?HO)tq|X~>i|y9$|&i&n*Yf7^M>C%2O^B<A1#
zU{|j&=e##xZ6Awr_gv9>A`{x+$h0$Q>N$;RCDMy?SKStR)1@MrYN0&qyKYb@gVDy6
zsiKW{=N^_nw@1n1!@&cR?53|b#?)t46#MQz#uE|7Vc1je^`dj{`<?xzYfZPlpTOW<
z`rJa@G)*^8dZw7gO#iw)Qb#^=?Y}+s!439MYj5Ge%#W+iZ0DBR^jLSZ<g)E*?%tI$
zMJwE&$gPgpZdJPGvh2o&im+P+4oh}jGq#`1<|(r|CHZCO(tWcew`;N&J#DY~bv0(<
z`G-^c9$9ykTd%ad<HoUiqwem_+71g=o(uUH;JVsd>!H`&yL`r;dnXx7_I|T-`zm`r
z$vU`*+3Kd4^~%%;=E^e#hJ9?-{~6|GP5Eeh_ROUDQ~g5B99ZP8Ubu5ue5%wWmO^!{
zY_3<&i_<hdv^RLZe(ya;C-914uGFe|hSzr5dPRt;{0?M{w%0417%l#tx3FXb^ZB5E
zSC9PI?_(~s=Tgkc2BQx4Q}$dtUa6%WEzHa|J-9XOtDm!efYm>{sZpocGo>nL?3pAp
zO?~owuhq+rK0CqGdq5_vN2v0#;h%*K4D&z#xLjo%v3hCYren85JuQ-(+zV`ZBU=8w
zVA#=VdBQ?u)6uq!#kzYE%O@Uqm*sKjcI40H&n>mOD(~Fgv2tH)bkOeTt!Gc4dc5IA
z`{j-H=N8Sq^{K4Ol7FJa&-?NfSG!kEyL5DM{;X#Y1m`<c|J9Oq;ml$xu$rd6a@n-$
z;?YWJI!vw%A<FA(7cIT{bcM2X__FFp^I1}pR1d`7`6sv4@6$`qpEp<nzt``58<@RY
zIeW$GO>>s$2>E?n_x;^nSM8YEbrpx6Iz13_W4m}GBwSoR!=+3*)UAM>Y3(hy4F_Z%
zOqV>z@SnkM{=Z$@@6L3OZBak9J%rU$_Vbm$8cQ5^_OwfT9{9d!dD3=UgX5EDZRBzP
zuq1#xY2Bg466;ENeS}on<fqSi{ZIbxyB)KArnXNv659E7T_OYbS(o%l*8)xVte5F3
zlH}u!y4bsU#om;ynQQf?r%7~tzVX*L@hIoE9l8C=Y!5ykf8Bp++p@h|Kbm&>yt#7V
zh5N5Fd^^9ckyZPCVwc&jhJ<jfHUIWKn=PE|%n`AsdhV464Fy>RJnd&JoxeV~YU{A4
zoOMA!a`HJg#_ub2)5Gtm*gWCk>MPLK-crMveDg*!OZBtmZ7+`Qey-jlSMlqr*r)O&
zvsaZTS4|i9mfCoS>C1!CB~MJU{A(7mSu)J|cD7jjR?nKAi3`)upK?6@I&#*u9kXiI
z>O7h8@$$}f#&I8OzWXF^?1^^2vd_TRDk~`AmR0uLBwe{%Y5OKf?_@vKbu;<xtjEi$
zSD*R2D3f`=s&Mb>myu;3P8$EbuKIMwHqYeAUw&OXdnY?Pg=InIrDDz3+wBk8{!0I}
zZmyE5*mo^$k)v~0{Y-M4DbZK<@vrgEtDP#*m7xq;B8d)fk4LTA*=qBhC#9&U__9ph
zT+P)6v%)7{m|ziN+gALKsXOL!E^l<iiPoogm<`VHTiCV=8R#uMr`%E?{rm1K^_?Nx
z0w?ZQws=xquhr({)5yogJc-@%(Xw+-UnX%MH@>%~aKi5Xd&&}`X=<M9%nt5|b}CrD
zd;3}=XBn~c%2SQbnelv+{MW4hY4t7HJw?+B3WfTo)LRvmbsF|Q(S4XK$-yiddwiZz
zRsH;>i7|dLGeR9gE!6ERo^4sbuBTt2)1zlQ`;D-mB}@+t3YE9{U9~*;FaF#rtrNjD
z=Z^gGESQ-4_m<#p^_9>6{1eUkcx1j#%H#%)V{ElrOH$lUZhtnj?{S}l-Mp`r%05CV
zS{~;jN=@~Pu3b||3YS#=@Y2=eQ$kM3y`2xQ?n`v_J(^aPB3qR8q=@ls2(zfnq_-^T
zvzldlg^M_kWm%@Pge9GP?>i~Jis|Wzck?)}dCXY7@`TdvTD|Oi!6=S*X7yPb4^JHX
z_^vkl?3p<qqr!xy9t?7>mP%DE_Kw`LXXC`HeJ4&n<#_yIP0N)~<&+tfdYlJe@s?c5
zcB>BLTPU!@vB%tv`NlQArJ1Q!3mgkXZ#+<GW~nosvf$d0t+{8KSDuuT6rDL`?qb=!
zS(>G$+|yLI7die4RO_tF)-dQd72Pf%Ay9u#TR*z#k?3dDo+C>hq?Ddtnlfd5f}7j!
z+Had~Gcx)WzdV~T#c5vhpCd)xZ(n73pX9Dze5C04v~SCz=36M75O6t=XCrPmf1dV*
zmdb$4;;4fxE|&FkYx#rk{MFhr+4RbRNoJb2<#)cjs(RlnOSB_T+~#3l<-g@0R_!b7
z=y;@cFx=kpzWSX<-C|pWK7L<+g6|NM`xn_4eP$>6*Q`0LBFDbq7hgut)-(T-|1+#^
zN#HxW|JPNm8DWv>li%sRNqDSNe<1i1cYNUY=9-H~S{@c1>AU~@SIDs|-QkrgH<pwp
z_gO#wuwu)V?D?iwo*5J!zq;3+Yud~1+nY~$9av+q<Mn@r*QUI;on6+gOFy($_t-Vw
ztvjFY;!k`Z>%y@ys-+_*(`d2m^qs%1$!^fz=4sBX{kieL7tzJGH$E+$Bhcj7ay~Th
zKf|??e=;WRYvgtn$x{^md1~!8o}#;4UCRzx2<RAW5@Y7Nq5dlC?<$oQHWwmA-raK7
zsjd_G-4W;eiB&FnJ%>?5f1<B-hPvERYmR>x4)NZZv!%Z$H}GZnx|xYS6V~iKAZf9B
z7vG$Vx6XW8H8X`}YUz{YZCU#k^{v}iBq056;%DY2+gH+8GBeU%iJNV3iM2V{XC~M@
zOIto>ig(TGX@_{`^b}QHjB?YuHPb8am)u3oV}&70EN#9n>N=Q{=J3vOnYdb)hQP!*
zv#wn}R_H2nHgozDdChh8fvVBU*SfSKUWwFP|MyY8w^-`gV(XYwXKsEvKUG3qv4}7F
zwX0~!cEji?Ik{pU*Q<pt8S>QMl-=)tZPGcBc@Ls_PfBeTY<^MkuOu+K+%!8WH|fFA
z9t}l_2UQQsO{Z_1RnlS^@L1>Y%Eu;wzBxiBiyz%mkx{m0V9zQ1akVzE!o?{x^1y+A
zC4s@RXH~YCDjx0K(Pew2z<A!<IrA^A&eEy*vGDw<j+9>$EEaXO?mb-{QPscoXf~&s
z>2GI++qan}2i4!xdi*4C!oG$Qk-%`-6Vnd8tep4iuBY<Znp*o~+x682eRlMEJ(;_>
z=dr{e%SBy5YQOlV-pejv&Pj3>TD+~Q-s|v{t(^8-&E~ub{O+7=5*T?j_EmJ5WpwlG
S&WNwNXGH>IIi98ezX<^L_o>$a

literal 0
HcmV?d00001

diff --git a/raton.jpg b/raton.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..82c70150a60411460bedfd1661db4cd1044633ae
GIT binary patch
literal 8330
zcmex=<Ns|2X9hMFRyI}^Ha1o^c6K%nE&(nsPEIZnK7JknaS;g#aS<^wDOpu{DQRUH
zF);-l1!XmLO)X7Hd0hit4FgpTO%0GCjO^^}TpV1&TwKB$QesjXB!mA47z8;O+8MSm
zGb%AK2{JMZGX6ipAj81G#LURZ$jAT#tZWR-EbL4iAX#ex1|~)(77$=#WM*YxU|?io
zW)Wmy6;fmqHgr@9Oe_>>oVbu(+329CQ_#kXA3mxWKU6hIYFhOF2!j+O1FFd&%?vE8
zZ2yliC<-z#F)@K{hS|%=B*?5NWGJlU7?{ZN{}uxeGb00&AhRHYJ;PN;ef|$u^;Wc`
zZu!q}XX016?eZ_SiEX}Tw(@LR#LD!E3#&CQ9I4-Huzk*lYsoW06|&t0cW|YASRs41
zeH#DMNad736Tt)hb3GQg^15VQ)QK%++h6;u=Hq#tdHY&Y&91NX4o$CY)>t-gV&+4$
zv{e(9t**_lecNktGhE4P*R9Qdv(_q1GRoC_6H*nw|9Vd5;)N^bS<P6rX=~RMgM`zT
zPa77mTYSHI;tihsHR)?lYR=<)aot03XIGFT_qGMLuG`e^>J*-ztXwtykn!Z$8CsPB
z9%>Uf)sjj&{wSU=PRh-xIIY&g?IdE>w1%r;-QC>WqrV~^`1)(AH7%YQ7_cy;gU63+
z_RBZAJC&=y9rP-iS(<V+X2(~bBORw^ul)A@d0+JMP3Dfbrktu1nqYL2r_gQtis@hX
zOqY&c+V*|l@8pt`Q+MwXHGDmVIdhVeqx!G*mm5D>KV-K!r=4Bwv4tT_<DK#a9p3Lr
zwjbh_ty*O2D{`X4;ri*mgSXG*<-Xm2pX<?+FGqdvwrDL}A#lZ=XTeeBhJ_`o=3O)2
zepzzmqqWN`zU)2CEo(h7cX#=hqhGEDDjhhqFio*?vO;iy*E{{Z8#lc!Y&m#n=E9Ak
z3^%9mRJ%R<(J#Ml$JMSTBEe^6&K#`_s<=73Y`=58?e=YK=|R4-JQISHrX?<3_<`U0
ze*MSZnQa+I)tXG7O7~iEHC4&}wXS@S8uGQ+>Altt1>PveYrGp>_f3rRe5qJ9U13>7
zOxUfwtXE-Mf95}_-+t+a>9k8h{;C#}=7m=D^%fa&Z@ay|Zn2nJ>`ZSi$56jjIVw{W
zxIAq2_t)!wOpBgcEV#69<)Y=*akVq*z1H8aZQVDczO2<Nf5VwcF~>87j1I)PvUcw<
zV?LpA`(^K@fXl&8Kg>G0aNa`8S?lE+OQQ=|_w4#|<59C`xWCIK$>e!zxr$+~8tck7
zZuk4UF)l2n*Vo%RR$!(0u8I2&^r+0W(ztcWZB?qt7PnQQi{}^#S#G(eXLfbjceW#M
zFX!9vt~#^nhnlwD47Ir?FRa$MzTaWX$K{Z;av67s=^?#RHnk`Gyx(v9PAIL5l?gs*
zqQtrG;1!SCwI26xi+Ej)$hw~T>cZ7Sz1%I~c1o|))Hm8z*WEZ7I`#0G!cgWtd5uxG
zS8Oy>m_2XXjgn&(D^?3G>1$QJBI&*BWNPezON?8OJ!X5@RIxZxD>bE*p*3%n@3)u9
z`<Q&oQub|NstGBU3p#Eqvnw>2`<KW4!0o@c@jeys`*>^VL590ib#kl)^IB?;89q%~
zb2d<>(3nxt$YRqEw{P2P*M2$pYt=7ZDcLBC+|^MGYYXp8eBCRud*Wh=Z)euB@Q59&
zEa|nHo>2aF<B<bAmonnkY-#Y`u-Q^M?=HKKPri!#CzVjoh^(xCvP(S|+;jNVaqaHf
zFWaw67i+!hOzGPcxyb7-lSH7@bBB9(t={J-Urdv1nk+p@Y44d?C0loYdD~Pvv&&22
zN6n#gDIZqzw%yIYGI_&}ZNk!}E(%*N1RJhD|Da-F=#NuI_C}IrNrwv#$o5B9y|1a9
zsl^j0<*_*Z{tVGP#@pE?TTdRVYFoQ}>GGM9l_yJrJ_*F{yz{2I{B_UQsh^&zhUI02
zNPOM$-HOAuVek591(Uvobez5KI3sG)RN0l+EVN4RPE~wny3XqU{_E=`CO3VKxiyRH
zR92Uj`Gm<!XRcdZ9h0f_eVfpabc;nx*IkWN>5odjD6F6u_PYA!+9J+v{6WtxTP&9t
za@}h4lo05-#^v^%q5SRV<9_^~-3p&h?3IYxDQ$HAu*{iIf0L~{YtEabnN2g#7N`$z
z_qa1VzxXoqgOl1tk-VB~4bE(uz+{kc`U!jV-0z3)iVKGxHS81cj(f)Gu<^_ObpjRt
z8Eocm4xLvq$w-Aww3qjnJm=4q_fJdzl_<2>FMV?<YurMfRZ}ygHeR0=7x6SSY=Y9}
z2@L8RWXq3=SmvyB{Liq$oh$TJDw7$D>9-xJDzfXU6VoPJH)Sqax{ht>bLN<aJA8)^
zZ~U_3b@R{M&{(m!mpmn<QYkl=1wBr@@SnkB>)*noeSEhjZ0O$GvTD<;jKyA+EqVt!
znG|)4xBdDpm{6B2Rdg$>E2~pIbB__*lr<eDd(S)BIWM1Pb2NM!S6V}y)Uu#At?vC>
zr%tbqK7S%1*VI>Uv-A!Z<!NDN=HB1!mAhYNytrhx*KJmlSCLD@DzToIQtl45^)-2`
z)}Pi=NUu12UO9q$OKVM6li9W(hZW=7L;E6AU2aCYUY&iTY{S1Fe<$@h266{q5fxH@
zc3g`g;Fo-i$n0OKnNg~BQLEH?rM7COPvtr3v-*+Q4i&j+*7FT(m}a#W{R$H}@n^@K
z8JDz$mdtx&e!q5YVvU7e(t62hOH*b^B!y0L)LPxD96#U5#`=8@Z`R9ev&{OY2Fiyt
z1i$kyvOV_q@3w7R@okrX@Sl3tr*vL%VtC<_`GPOZ|1(J3dC`^DH+Rv*)WlO^OfNj{
z=Pj>)+c2{?q_Svb#x09^pV*!JCUAV@w_6Y|_u@#=WeZQOnsQ$^?e5$8KV`oi=$`a*
z!SP8KS1o?DQembnqsNW59?gA+ckVkf$@hKF&bBJ$Ns|<rHM<N~J1BjZt37c0wv_gd
zs!iRheT#o=wM{wZx~JrU*d_lk`}>kwFHS9+%M^L?8Mkau^t!i~i+qn)OV6=cthP;D
zl0{QXIYab7n%e|U6{YJp-ncK2h&=OJaniDixCL$(S3I4>%`WzW^_#8D?gzVrB<83r
z^4KaM=*bYj_Dk>ni(Q|s>SCW)JbQM;#^4S6{_ERV>Lu5$HSm=Q_V=0OZf#(`_t<Rd
zFE0uWgsQrpYIT1!oFWxHr7XE9|BFJ+MuUGMM$HS`mQ9=Ec&2NbyT+2u87z9YC$SyA
zUn6;X)33@+Usg=gc%Tv)xmdAt+pT`bn()xW?|r6xHNBc1v!r7IyZ6HPW$*acaVa#d
zIPPv^d{)BO$n0Q%^o=`T^Zn;p-ubah`E<+5GQCTwpO^ZCP6*oSq&zRlUvHVX_(oZW
zm)ZF_)hkauT{|&lMW4o=FjmgXN%@JZm#%G&OjYPxWjVwCuO-K>+nHs`N38FZyeM3y
zvuxVJr?VFNIH_ma2nnv>YxpI*&US-T?r%kzm2YL%o1D5Qb55;fmPPZuxsivhXaD4X
z`gh~SA9BmJ&TYzEl0L)R<=V=EU58F^ZtITOV1Jvn=%3ih&A&9YloomQoHNShxop&Q
zdt#EkhswN$2jzlK1p<N&eBE+YQs<JvRvV)>Plq=gm$q*&-XZa_=A&+jwV!sz*T7)^
z^fRjS;=DF5DOfahTKb#izZ^EI{7R4)x#%6~=PJD0F);J$m*$G^d(wAk-CF;jVNT|n
zRXQ7=PBF6O6=3pe=q%K*o^Zb4Nc4WcYX|FOt29@7?Tuyad&qKXufx9M%N;up*$3QM
zFk|K6$`VnV8!m>YmDb)0V~B`f_dNHT^j8a&o7GP%JD+v_%sO^Dh{N1AB07J4>;~`7
zO~z*KvHs2?T7A3DZ1j*ivt;7s=M8t{XQY){vXtEM^}BT0y|gcEzv1Lf)z$xgy!g0m
zOYHFnj@e;ee$lQxL30b&+}UvKj86IfY1)=H#+Piml0C2V@~GTxuwU|@A@tkt$F({)
zZHi{h*PobkpegEUUe(-f7GGx<&htNHa@pi()Rl8@ECV-H@V?2f{ryeuM`)AxHvePQ
zKKr&!cDnfDMvj=n)7pdYyPh>0&sz8-GiBm019i8F&uZDEcJOyBev~KkPkDA~NXfUE
zO{~$eWp97~e(<y`a(bwvU29fkNVkZcyy3>%&)X{&?b29O^q=A3rHk{TKWB-~$$BR1
z(zkedeof{ozVNB%GSYVJytrV6{3YGQ{Iwfy&wg_8BumI5hOI0US|b|u|1%`-uzh=f
z8*^BS=*_&HS}DDHF4NQQ@^3GG+2O~h<C%PSfvktOtIa1DuVqc2n9Eq+@aMj5t}!dR
zYSJH|)IaZ-?Ua*=`%k+6IFwgBBdfQ?Hh;0xqyrOltLOdQaQ)EDR9T}Mw}i9jdQCdc
z9@0-!yZf&GIXg?tQ~xEY+ml^wY9ytP^@rO`5>uZwu{F5vx^==*z0gDJmTEp}U3+<E
z05{{)mn}8N4Hu@k_qA|_2ylB`61dCSEu25$z{jJN(_&@vbpo#(;!u=zo$&2#bx+xz
zK<|uFZMouKc6_a0pA}`zQeYM-pA=vnbLZ~|w)M6Tc15;a`I&PyKRbJx6WhP+>csHQ
z9fy`%bSllVN)-IKGITR@Yo^Mkjdz;&9j-At%=cpU(ho0me@u1cHgY|@Girf#o$veC
z)k(=SIyIkuG}NeySY3BxMpnn2{focMao4wfd}e9y;n{*BsnLl$H}1Q8;vl>G%9WW%
zx@8KFi8OCi&#ukQQRlfdx9hZ$zOAFjEvLG@U!^M_yUGg`%~M}h=JCeK^R(V}+db?5
z6@KPcS^DPMd8;Q+lkZGCE^2b0XI2bfV)_QIy?^!h`&2A$($om6YBYAcxKT@o_m_O}
z4hxUfXBAf}bS|Hu+~S&;a7p&)0rT6}a}1u|Jnif%QkrJcHpl4PL+{KhyGvf??>ruK
zQq$ARKV#9hx8LL+x;+V%NSr!pT0khXqC5YK>bf7P+u|d=RZUB+LN1+H>UG9O$b6yw
z8C%11aXz0s9<F@2t7X>22<u3{4OTZBZym|ME-N+5GhvZSMChEgKg&N%d%dEnf5-9l
z-74vF=T@Aw<zD@x+CAib=(N2ylU$ZK%O-smW2%~JSGMEcx4j=scZjXmYA}{=xYc!*
zrP%#_y^fiG=uCI+lhe*KB;=l&A|&xMcbk~;Ia?)V*N7;sYwWRFo<DP@F$X5-FW<Xs
za*dvclh*OB#~0LFE^9}w&dSQTc40$#LGHtssd3Xb9Q1koF}YExs>+II*RE`zm1_e-
zbqhE*-kCo=C-l%vwM45l(`ECgPnOV?H<J~ze4qU_UWe&KMSk<A>1Qn3?K9PWi0F%Q
z$zQ7$`@Bt#*K~>9zu7O0-|;UzYuvecrh2D#v02mnX-mHxT47W6Y3Jc(l?P=skIvKz
zNj2SY?ZB+a${^<CXXQJ-9h#~Zcu2nEj&iiAp=EBZaE#%GF!m#iJN`429ct%&5|}#k
z->j^e$_@?%mju_J`juz*BUg2^{llw5twpyaHKv9fmTKkVcfN6((XT#K&`?QeMZUDv
zUB7z~=a0uYX!c}_ES+$S>7{j!^+PG2k13q14_)Ic6;4iW@j7(#a^~g}YMqHIrf8kG
zKcnQ!{>hd7J{il_&o}lup}Oa@!!J{>Z_o5UZD0EPck+~@Gj8k*3NB3IJ$Rr0KSS%+
zEQ^x~6Q?XqRaV*@ad*P~-wCIrjHVr&<2~7v<-^GcWsV+suYQh=Z)$cWO<PyKq|0KK
zr4qw5SL=I!dAIL4o^x)K>sh0dFIBXh7Y5$;v)Jp;_3^H)%ZE!Bo<3uH@g_fCiT9gz
zqM<6YpM8VW%JWA&mj`rju+4wnGgVl}VMSq){>!H156}D47*(@4g2NV^R+_x;+x`2@
zuj_d%uD)ViIq~S(mg%nX+kHxGI#;P5Y|2teyOeO<>dU5;yYBz}e7s}HiXg8=(;3$1
zZ=WZrVb5!POY`6v+4<qEQI@6|a%Jkp_wTa@x2#(3)^J$mSAOP~sp4jL>wYH*ow)As
zP=C1vfA)U{-Lqd>lk*oGIqp$^c+uJNl}mW<M_R3aUE{;Aa`fYgXOVOC-YIK*Sfgc`
zt9tG6-jhF*O9Xf8n%dfCg+6^Hmnmi&l~lCL(OkOZm48jbx;1CdT$tIdmn!kpr|oFU
zc2AD2o!jo~h)CLHN6MP44;B$A=T&gMyzcue9rizukJnAvQe@>dab6SWq)oHtl?pEI
ztrsxo&6~D8xNh+_siHlnjTf%2&`!*FG>xq_&7E<p(2|PX-QC^2@7!yOHwS5M7U^o9
z@gZCLOoC$?`w@r3dC>vSXP+od(^;!BIab*_cSY`n`1AWr7Cm}fmRk6dS9EHp4L8rT
z%0nAw6iuBt>2Sg63+t!<Gq|lTta+;FYtyO;-jbmcJWOA0*xI@`r(m`}t6QeRvhuDK
zN)2au4tvJ!j(BH&C+@j)(VSTeH&5%Ha^dM~%@?612HRu5M(?|KW1Gz6!UCUVYWG(>
zWcfOC%WAP`Z;w^?G_|rM8meMAu4KJ$o+-~BkXrgFP1N~EU$&z7>Xi&rm=`U0v7oka
zQ`;;LlUa3s&DW0H`8`LlEB3VK<`SkKUzAqL^&~8ckExt;$-S`1s?6f&>fTR>-RFcJ
zQ7LNecv;<iTi)gDnrWp8YcKq~EtB2#NU~pEW*zs5Pj}BvT<yWdX<f;aX|Sc}ux?(w
zyNaRpQRX+N+HURr^P#vcW#%IHt4nVC-YB{Kwl~Imid6c_Wzo#iz2f)ficZ<LaEko8
zJcDcX%D*aK_AfoUuc9XF<jJOrb*m~}*otNyyig)s{Q7O<#%uiTEL*!OJn~jQO}<k<
z$4^I}x5j|i%c-LK{Z8Au8~hd(;bGU!c33yr8l3hC*cKgfDMar;!`<HPho^bZ)(bsq
z$s3p?X1VVBzezdn7alUJO$_sOnYh31$NkXYb&JGQX3On8cVpEJE4xciRSzHW3=?NC
zyY_aQ#wx}MtJhwcEh@|L^}s&v^;PSHPsfGnD=rUs=b`fU-rbLjjk9K~RdJ2c*ss%A
z{EGMAjmDHvlU9a5XANC)O4jbX@zr;xw%dm-_pEORZnRe@>|M^+xa#6T>z5ac^8*aH
z7HP~|cu#}J=-5#&tK20e3Vp}7F}C@ff4KbVk%(7mj4@FU`sUrg&G2OIf|VjdS8e`$
zIA-7w#!_8(?PZVQHMg~of|VE8Nm_B&=jZKMw`}jLm9k5hb1LmfIN^1N_uKv&Db^OX
z)vZfZAEoELagg7qCjMaUYR;c-8CPb%yu`aNU08yhasApepRJ^y3m+HDx}<ebN+VLD
zBscf<HuI-nD(9N$Sr+*UFv?Fk@M_}=hp%tDHov(L`n+l1tp0!Vx=e&lIOK~SjQ%w{
z=J(@6{;npWQ$ud9a*kDA-Bs4xP*mMy(;of!+@!CU7Iy_cF*qKnu(-#aZ{>xFrwvxE
ziEU!8?zfNoyCHW$*Q&nA%@WB=7k7r7S7~~(GGR@_y=ULw7MNQ+pBj1UNYK0hl`D64
z%3ag4x>@)8!8iZ8=ea|JgD!<Wn;Gj7amZ`QA^`@5_`*w1?h8gd`R=5#@51-FZkvvJ
zJ5M^b<>KYb?k3vItJ$;jdaSm+(mA}opuqXVZPR~>GmM@InuwY!9M}4qcxUq7zpNY2
z7JF)GR&Fu&6qqESvb;AXveL`zZnVGOUc2h#bsywnLlwMsKHj(Dfag}GYd3Dp+aD7z
zCp9VcV{%^Dxt7aDZgZBte40~tSlxF2TfzSfr*6wsoJzVVH>>TL#~KF}#>CpT+sP>{
zY#YS({pOqT*7}mA`=fNpXE)Vqrigf+UYZho_NjP!;ZE))_pZG%nWI`KnQru~rgGME
z$z5XetjYwvguPy=9skd8V*8oL8w>N2CIs*3VYYX0PbfU`*)1i#;)I)R$s2={MInCv
z-J9#`Rz`>Gh7@J(+H5}4?6yR9cU@|OvR|v5dy?IQ{^W*`scS_(bKWT`iI`M3W9lDA
z5lxkZPT4C{KZzf9zwUDHz@6}b^|O9Ne+pCF9<=1f#0F8LV)F^B<Bx26V_<JGYgc9W
zl;kz<+%(#B4lTR5#Ls=E7(>FFgPvI~DWVE@^7})COhan~Hf}6qsNzZSnQauk+2NY_
z*8aD>J5_9UUFk|*<mvvZFlk@%efG%?m+cxK=p0%#%cZMJw(ywA6??N&3(CZAY_rNS
z4!(A1PSd0VPkN(T+vnNUKdkJ_4cKB}*sj%Z_k{JS+FHX)+CSE-pOBl>t+zyN=c!j7
z9gOWWW%WB&=FP0S*7o9+iYu$b6^0GjmA9@R6`ds|_K{<rle1NW@2}eWV>&gLv}TFU
zcTBi;V|_<;-whe2pqW}M%R(n)Y1Mn~FR<OeO(tljVykFspt6tkZ0(qOv)hKptTv=2
zoq765yVUH~w@<GOe+NfCyfACqUv<Aj|Mq^Ir+Gu8YmwXBbBm;|tML2hB#YQ2S+mVt
zeCuJ3@4g>1{>)pkrnRIocg>Ud-;a_R9$D(@e4Qh$D;LgvJ}@{UWtyv;K*ww`+jSlX
zX5Er{Gi_bS@_Fp}PkI+eGR;@s9>cljk<Hx~iEUOVla@+NOzQr0q2EC_NAzq;uTjp<
z1U0LfBHnr50u$=*>lE4)`PUk^9`bCK@|v|WY+Z$N<J74s5!1cye6gyQ&tE2?p*1UP
z)3cy0tu}_It_8oauAdXileTKgre32fPCT8t(?XN-Y&q^rf3lNayJ}^ZAw$MDhN$D~
zeWH)uKh&kWVVVD-OCRO3&o;`LYOQ+a%B5VmaogOreP0jm3{HtGy5JEqHP*=`c8Toy
zeFh)qIr>kycvPtES!t4geU1^=7glABh`yG-e?RJ`%1-E23Ki`#OFFZPBXPlmvM<tq
zHmJosTDiC^RWSa`<jc?a%&%TG*O}iwb?wG`2kO2Z(0R6OmA&{ub{@U6(!JBq^eund
z)RxzpbE$Ia%Cu=75nE4$#M<xt^-#U5n03JtBTq@y-YJ=8``1sCSC@K{p0Mjig@1M3
z4emKlO>VX@Y0Yrib|-%0(M27ebLxL=zNR;gOTG3$bc6lHHZ7i|OM0bTc5b-SW8!|;
zWJOX(@QW^Sz814LxqptYJQ$HNb+&KQ3I*v>^EYgoP7!nY`Ddm@cTcF<_T}fpY3=P{
zDmP7qpBX%#p5iODFePGf8W*Rnt>@vu6PXDv*CzSAa+LCrExWz_KG#zbqwaK@1rM*z
z-Qq5p$=mKDeRc=C?sVPkrOz3f4&9z(n|d+WB{uV5xSVj}{xjdce?7>uL{+*V>erUZ
zS1J=_ZoepH*=-lK>c=|)Qzp@3{}ofR-*LX;Una3btWU0{a$3?GZ&{b->e|9fJ8riu
zJiBP>%OKSq@2viAv-tVm+H#$9P|?r3m*#zYUn;x4wQ%o(<x%Mw(|cbQ+o^U<nl?>s
zdGmowjJ56;5@uiFb=k?#xZJ6mf5F?=E%Jv}tazC<ZPjw;^4)TO`(&1#%iJ7uYSXf5
z0#{x=6fE1kzd-kOal`xr{~5feEm<s@ahh#|{;H?-CyrIlVo?+h-u+Hi$3yv7o2pB8
zz|%N|O`=*{vIp!UR{v+n^fX!WVumK;Ob;_z&V!6A0`FOT-R>&3_1L{_ziKubZ08sE
zv7EK)<s+-}ZK<mJwjVjTEZbe>?5B+%yOOw1g(&c{FxcmapEN38y=1Ctpz?$+&fo{k
z1tLEq{(ii)vYv0%YR$KyI$liuXA|-~KC_g_{m`BMq1G+#<BZi?G!kS~5>LI|5xnj0
zcXpv(R~28^uMTqZ4=$N_ya;q++G~E?;iS=z%#|X?r6ffr#OSWGH8+#E&VT6Q)Go`J
zTkE#|&9AqcE^b>}SF!V+Np9+q!k=PH{~5I7UpC!Nof4!y)lS)Y#a&OWZwmd<<;Rz*
z@Wsr!@y2xS_Rrsscg^ZrbRu(6<Am9<Hygja(9cPkCpB}%-dyjO(nX8>B7WVO{PmcG
z`3bk4nT)B{4QJ|p#GiYmx$;4Iu++AD3WvVQ?u$@%vHY+_*duRs!fhe<wZ9&Q8=k(T
z@Zuw<3Tva@wk!O%Up9+w@O*Jf>+uTRg^ibYzO4yXiJBo6Enp?P{$Jq(MlK_dk6Oyi
z)zSHh-JRioy>j;InIFB#mU>m%P`^kdr#kECPvKS97F`wj5WBuw$adFb-kkO+FD6V2
z+;Pe6-|VD2t7dA2X?@*Os*oXWmBXH7x8qWkQMAmXb4n{XKDo-c2uTO`Gv49;RX-<I
zt|C%J@YtkPKRAV=9+We0lnB+%sNq<*UA0mA{@>R<B{$c)Y<ce&=Bc%4!Uhhem-n-y
zFAFW&RuYl*VC8b5#Y#r+uGh{eQ@gvdMDnf4Dn-$aDr@$MhP|)-{%QX<;T5WDQjDGn
zIV<hB{Ncr}()Z5_*Uej&W$CMy>CWO7ZEMrj&``zw&934~wshvCa>LMLr|+iR`?a@y
z?%REW{}}|06z8Uvn)*65MT9WCEUFFNTXyh-vBk=D&c?^imIwaXudn@JtImY8GV{a*
z1@zkM=W#KI)m-K0zrHsl<U{C5UTs+&_E6dH$Ky^{l-W$zTC%kLNJptjOWv`&bw57N
zdGgXDWxnTA?k2apzY}{-Og=7?*palW?9<lmpO2M(5Zu3DRcoO4%Ki?%y*loNPkSz3
z%=Va8)F-{cI{*1I!)<PcC)U<x=;W}zsVX_@WEIt3EpkndB|$Lx-(LeZvCTU3=LW6`
zcKa?H(%9MbYxVRm!48ghUQZ0Ue_L2Wq`g8d>66ORu5}OYulUbkvEb^#kSGJ0m(qO=
z-M4Fee0&ve&Z|gWqU@}ylDn@%AS?UV&&T&>vaDLqv}TF4*@7eeMumn34q40ebkA=6
zwo&X()uTCwCPmGVefjhWD@!)h??*|=tc*stzNIsE{y4E_Ql_2w+rsx7kL4U&a_vO2
zudnI)?AzJ5z8^Vq`1nSZFM3z!x0f+6YqA$Vy)WNy*!g&~*`tHi%$=QGlkL52otMu4
zv_J09?W8LcANsR61Y2G%%x;s5oBb#$!M0WE(h7lzCp<UZ?UdT~pTSMH^Cin^p1`c>
zp)+%r<X^7w@llr8ufKkAN9*eD(1RNlHd^24&zoBiXENPjS;b@bIc}zkWz5MhKOH%8
zjPqT?3{AB?j(6Yf?QLC~`#bq6&l?4nt+V)<Gj_Gzx@%Ya(a^9V)JJU9JdY<<4~;6O
zet&)acRbT)Nv)`rJj`h-)>(I|1TQq19XWEqF|oXrSJ$S`q_Heu_uSsyzaFzC$I9w-
zt@e{zcsU{|$K?J>xr!Mx!j3I8%eiNBE`4QN<>kQ5I>}ybpX76Ve0-$DXWRe32>{VW
Bic0_h

literal 0
HcmV?d00001

-- 
GitLab