diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f3436fe1fd3e8a7064887098b38e50dfda48b27d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,170 @@
+*.jpg
+.DS_Store
+
+# Data
+data/*
+transfer_learning/hymenoptera_data/*
+
+# Torch model
+*.pt
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+#   For a library or package, you might want to ignore these files since the code is
+#   intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+#   This is especially recommended for binary packages to ensure reproducibility, and is more
+#   commonly ignored for libraries.
+#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+#   in version control.
+#   https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+#  and can be added to the global gitignore or merged into this file.  For a more nuclear
+#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
diff --git a/README.md b/README.md
deleted file mode 100644
index 02a9b9d1c74268b91e14b1f4dc037df48e785999..0000000000000000000000000000000000000000
--- a/README.md
+++ /dev/null
@@ -1,92 +0,0 @@
-# MOD_4_6-TD2
-
-
-
-## Getting started
-
-To make it easy for you to get started with GitLab, here's a list of recommended next steps.
-
-Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
-
-## Add your files
-
-- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
-- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
-
-```
-cd existing_repo
-git remote add origin https://gitlab.ec-lyon.fr/edelland/mod_4_6-td2.git
-git branch -M main
-git push -uf origin main
-```
-
-## Integrate with your tools
-
-- [ ] [Set up project integrations](https://gitlab.ec-lyon.fr/edelland/mod_4_6-td2/-/settings/integrations)
-
-## Collaborate with your team
-
-- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
-- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
-- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
-- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
-- [ ] [Set auto-merge](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
-
-## Test and Deploy
-
-Use the built-in continuous integration in GitLab.
-
-- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html)
-- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
-- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
-- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
-- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
-
-***
-
-# Editing this README
-
-When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template.
-
-## Suggestions for a good README
-Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
-
-## Name
-Choose a self-explaining name for your project.
-
-## Description
-Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
-
-## Badges
-On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
-
-## Visuals
-Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
-
-## Installation
-Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
-
-## Usage
-Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
-
-## Support
-Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
-
-## Roadmap
-If you have ideas for releases in the future, it is a good idea to list them in the README.
-
-## Contributing
-State if you are open to contributions and what your requirements are for accepting them.
-
-For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
-
-You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
-
-## Authors and acknowledgment
-Show your appreciation to those who have contributed to the project.
-
-## License
-For open source projects, say how it is licensed.
-
-## Project status
-If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..b604c399c0390a8dc10e84e48ebd430c696ac8d5
--- /dev/null
+++ b/TD2 Deep Learning.ipynb	
@@ -0,0 +1,953 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "7edf7168",
+   "metadata": {},
+   "source": [
+    "# TD2: Deep learning"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "fbb8c8df",
+   "metadata": {},
+   "source": [
+    "In this TD, you must modify this notebook to answer the questions. To do this,\n",
+    "\n",
+    "1. Fork this repository\n",
+    "2. Clone your forked repository on your local computer\n",
+    "3. Answer the questions\n",
+    "4. Commit and push regularly\n",
+    "\n",
+    "The last commit is due on Sunday, November 27, 11:59 PM. Later commits will not be taken into account."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3d167a29",
+   "metadata": {},
+   "source": [
+    "Install and test PyTorch from  https://pytorch.org/get-started/locally."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "330a42f5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install torch torchvision"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "0882a636",
+   "metadata": {},
+   "source": [
+    "\n",
+    "To test run the following code"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b1950f0a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch\n",
+    "\n",
+    "N, D = 14, 10\n",
+    "x = torch.randn(N, D).type(torch.FloatTensor)\n",
+    "print(x)\n",
+    "\n",
+    "from torchvision import models\n",
+    "\n",
+    "alexnet = models.alexnet()\n",
+    "print(alexnet)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "23f266da",
+   "metadata": {},
+   "source": [
+    "## Exercise 1: CNN on CIFAR10\n",
+    "\n",
+    "The goal is to apply a Convolutional Neural Net (CNN) model on the CIFAR10 image dataset and test the accuracy of the model on the basis of image classification. Compare the Accuracy VS the neural network implemented during TD1.\n",
+    "\n",
+    "Have a look at the following documentation to be familiar with PyTorch.\n",
+    "\n",
+    "https://pytorch.org/tutorials/beginner/pytorch_with_examples.html\n",
+    "\n",
+    "https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4ba1c82d",
+   "metadata": {},
+   "source": [
+    "You can test if GPU is available on your machine and thus train on it to speed up the process"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6e18f2fd",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch\n",
+    "\n",
+    "# check if CUDA is available\n",
+    "train_on_gpu = torch.cuda.is_available()\n",
+    "\n",
+    "if not train_on_gpu:\n",
+    "    print(\"CUDA is not available.  Training on CPU ...\")\n",
+    "else:\n",
+    "    print(\"CUDA is available!  Training on GPU ...\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5cf214eb",
+   "metadata": {},
+   "source": [
+    "Next we load the CIFAR10 dataset"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "462666a2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "from torchvision import datasets, transforms\n",
+    "from torch.utils.data.sampler import SubsetRandomSampler\n",
+    "\n",
+    "# number of subprocesses to use for data loading\n",
+    "num_workers = 0\n",
+    "# how many samples per batch to load\n",
+    "batch_size = 20\n",
+    "# percentage of training set to use as validation\n",
+    "valid_size = 0.2\n",
+    "\n",
+    "# convert data to a normalized torch.FloatTensor\n",
+    "transform = transforms.Compose(\n",
+    "    [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n",
+    ")\n",
+    "\n",
+    "# choose the training and test datasets\n",
+    "train_data = datasets.CIFAR10(\"data\", train=True, download=True, transform=transform)\n",
+    "test_data = datasets.CIFAR10(\"data\", train=False, download=True, transform=transform)\n",
+    "\n",
+    "# obtain training indices that will be used for validation\n",
+    "num_train = len(train_data)\n",
+    "indices = list(range(num_train))\n",
+    "np.random.shuffle(indices)\n",
+    "split = int(np.floor(valid_size * num_train))\n",
+    "train_idx, valid_idx = indices[split:], indices[:split]\n",
+    "\n",
+    "# define samplers for obtaining training and validation batches\n",
+    "train_sampler = SubsetRandomSampler(train_idx)\n",
+    "valid_sampler = SubsetRandomSampler(valid_idx)\n",
+    "\n",
+    "# prepare data loaders (combine dataset and sampler)\n",
+    "train_loader = torch.utils.data.DataLoader(\n",
+    "    train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers\n",
+    ")\n",
+    "valid_loader = torch.utils.data.DataLoader(\n",
+    "    train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers\n",
+    ")\n",
+    "test_loader = torch.utils.data.DataLoader(\n",
+    "    test_data, batch_size=batch_size, num_workers=num_workers\n",
+    ")\n",
+    "\n",
+    "# specify the image classes\n",
+    "classes = [\n",
+    "    \"airplane\",\n",
+    "    \"automobile\",\n",
+    "    \"bird\",\n",
+    "    \"cat\",\n",
+    "    \"deer\",\n",
+    "    \"dog\",\n",
+    "    \"frog\",\n",
+    "    \"horse\",\n",
+    "    \"ship\",\n",
+    "    \"truck\",\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "58ec3903",
+   "metadata": {},
+   "source": [
+    "CNN definition (this one is an example)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "317bf070",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch.nn as nn\n",
+    "import torch.nn.functional as F\n",
+    "\n",
+    "# define the CNN architecture\n",
+    "\n",
+    "\n",
+    "class Net(nn.Module):\n",
+    "    def __init__(self):\n",
+    "        super(Net, self).__init__()\n",
+    "        self.conv1 = nn.Conv2d(3, 6, 5)\n",
+    "        self.pool = nn.MaxPool2d(2, 2)\n",
+    "        self.conv2 = nn.Conv2d(6, 16, 5)\n",
+    "        self.fc1 = nn.Linear(16 * 5 * 5, 120)\n",
+    "        self.fc2 = nn.Linear(120, 84)\n",
+    "        self.fc3 = nn.Linear(84, 10)\n",
+    "\n",
+    "    def forward(self, x):\n",
+    "        x = self.pool(F.relu(self.conv1(x)))\n",
+    "        x = self.pool(F.relu(self.conv2(x)))\n",
+    "        x = x.view(-1, 16 * 5 * 5)\n",
+    "        x = F.relu(self.fc1(x))\n",
+    "        x = F.relu(self.fc2(x))\n",
+    "        x = self.fc3(x)\n",
+    "        return x\n",
+    "\n",
+    "\n",
+    "# create a complete CNN\n",
+    "model = Net()\n",
+    "print(model)\n",
+    "# move tensors to GPU if CUDA is available\n",
+    "if train_on_gpu:\n",
+    "    model.cuda()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a2dc4974",
+   "metadata": {},
+   "source": [
+    "Loss function and training using SGD (Stochastic Gradient Descent) optimizer"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4b53f229",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch.optim as optim\n",
+    "\n",
+    "criterion = nn.CrossEntropyLoss()  # specify loss function\n",
+    "optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n",
+    "\n",
+    "n_epochs = 30  # number of epochs to train the model\n",
+    "train_loss_list = []  # list to store loss to visualize\n",
+    "valid_loss_min = np.Inf  # track change in validation loss\n",
+    "\n",
+    "for epoch in range(n_epochs):\n",
+    "    # Keep track of training and validation loss\n",
+    "    train_loss = 0.0\n",
+    "    valid_loss = 0.0\n",
+    "\n",
+    "    # Train the model\n",
+    "    model.train()\n",
+    "    for data, target in train_loader:\n",
+    "        # Move tensors to GPU if CUDA is available\n",
+    "        if train_on_gpu:\n",
+    "            data, target = data.cuda(), target.cuda()\n",
+    "        # Clear the gradients of all optimized variables\n",
+    "        optimizer.zero_grad()\n",
+    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
+    "        output = model(data)\n",
+    "        # Calculate the batch loss\n",
+    "        loss = criterion(output, target)\n",
+    "        # Backward pass: compute gradient of the loss with respect to model parameters\n",
+    "        loss.backward()\n",
+    "        # Perform a single optimization step (parameter update)\n",
+    "        optimizer.step()\n",
+    "        # Update training loss\n",
+    "        train_loss += loss.item() * data.size(0)\n",
+    "\n",
+    "    # Validate the model\n",
+    "    model.eval()\n",
+    "    for data, target in valid_loader:\n",
+    "        # Move tensors to GPU if CUDA is available\n",
+    "        if train_on_gpu:\n",
+    "            data, target = data.cuda(), target.cuda()\n",
+    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
+    "        output = model(data)\n",
+    "        # Calculate the batch loss\n",
+    "        loss = criterion(output, target)\n",
+    "        # Update average validation loss\n",
+    "        valid_loss += loss.item() * data.size(0)\n",
+    "\n",
+    "    # Calculate average losses\n",
+    "    train_loss = train_loss / len(train_loader)\n",
+    "    valid_loss = valid_loss / len(valid_loader)\n",
+    "    train_loss_list.append(train_loss)\n",
+    "\n",
+    "    # Print training/validation statistics\n",
+    "    print(\n",
+    "        \"Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}\".format(\n",
+    "            epoch, train_loss, valid_loss\n",
+    "        )\n",
+    "    )\n",
+    "\n",
+    "    # Save model if validation loss has decreased\n",
+    "    if valid_loss <= valid_loss_min:\n",
+    "        print(\n",
+    "            \"Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...\".format(\n",
+    "                valid_loss_min, valid_loss\n",
+    "            )\n",
+    "        )\n",
+    "        torch.save(model.state_dict(), \"model_cifar.pt\")\n",
+    "        valid_loss_min = valid_loss"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "13e1df74",
+   "metadata": {},
+   "source": [
+    "Does overfit occur? If so, do an early stopping."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d39df818",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "plt.plot(range(n_epochs), train_loss_list)\n",
+    "plt.xlabel(\"Epoch\")\n",
+    "plt.ylabel(\"Loss\")\n",
+    "plt.title(\"Performance of Model 1\")\n",
+    "plt.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "11df8fd4",
+   "metadata": {},
+   "source": [
+    "Now loading the model with the lowest validation loss value\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e93efdfc",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n",
+    "\n",
+    "# track test loss\n",
+    "test_loss = 0.0\n",
+    "class_correct = list(0.0 for i in range(10))\n",
+    "class_total = list(0.0 for i in range(10))\n",
+    "\n",
+    "model.eval()\n",
+    "# iterate over test data\n",
+    "for data, target in test_loader:\n",
+    "    # move tensors to GPU if CUDA is available\n",
+    "    if train_on_gpu:\n",
+    "        data, target = data.cuda(), target.cuda()\n",
+    "    # forward pass: compute predicted outputs by passing inputs to the model\n",
+    "    output = model(data)\n",
+    "    # calculate the batch loss\n",
+    "    loss = criterion(output, target)\n",
+    "    # update test loss\n",
+    "    test_loss += loss.item() * data.size(0)\n",
+    "    # convert output probabilities to predicted class\n",
+    "    _, pred = torch.max(output, 1)\n",
+    "    # compare predictions to true label\n",
+    "    correct_tensor = pred.eq(target.data.view_as(pred))\n",
+    "    correct = (\n",
+    "        np.squeeze(correct_tensor.numpy())\n",
+    "        if not train_on_gpu\n",
+    "        else np.squeeze(correct_tensor.cpu().numpy())\n",
+    "    )\n",
+    "    # calculate test accuracy for each object class\n",
+    "    for i in range(batch_size):\n",
+    "        label = target.data[i]\n",
+    "        class_correct[label] += correct[i].item()\n",
+    "        class_total[label] += 1\n",
+    "\n",
+    "# average test loss\n",
+    "test_loss = test_loss / len(test_loader)\n",
+    "print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n",
+    "\n",
+    "for i in range(10):\n",
+    "    if class_total[i] > 0:\n",
+    "        print(\n",
+    "            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
+    "            % (\n",
+    "                classes[i],\n",
+    "                100 * class_correct[i] / class_total[i],\n",
+    "                np.sum(class_correct[i]),\n",
+    "                np.sum(class_total[i]),\n",
+    "            )\n",
+    "        )\n",
+    "    else:\n",
+    "        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n",
+    "\n",
+    "print(\n",
+    "    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n",
+    "    % (\n",
+    "        100.0 * np.sum(class_correct) / np.sum(class_total),\n",
+    "        np.sum(class_correct),\n",
+    "        np.sum(class_total),\n",
+    "    )\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "944991a2",
+   "metadata": {},
+   "source": [
+    "Build a new network with the following structure.\n",
+    "\n",
+    "- It has 3 convolutional layers of kernel size 3 and padding of 1.\n",
+    "- The first convolutional layer must output 16 channels, the second 32 and the third 64.\n",
+    "- At each convolutional layer output, we apply a ReLU activation then a MaxPool with kernel size of 2.\n",
+    "- Then, three fully connected layers, the first two being followed by a ReLU activation and a dropout whose value you will suggest.\n",
+    "- The first fully connected layer will have an output size of 512.\n",
+    "- The second fully connected layer will have an output size of 64.\n",
+    "\n",
+    "Compare the results obtained with this new network to those obtained previously."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "bc381cf4",
+   "metadata": {},
+   "source": [
+    "## Exercise 2: Quantization: try to compress the CNN to save space\n",
+    "\n",
+    "Quantization doc is available from https://pytorch.org/docs/stable/quantization.html#torch.quantization.quantize_dynamic\n",
+    "        \n",
+    "The Exercise is to quantize post training the above CNN model. Compare the size reduction and the impact on the classification accuracy \n",
+    "\n",
+    "\n",
+    "The size of the model is simply the size of the file."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ef623c26",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "\n",
+    "\n",
+    "def print_size_of_model(model, label=\"\"):\n",
+    "    torch.save(model.state_dict(), \"temp.p\")\n",
+    "    size = os.path.getsize(\"temp.p\")\n",
+    "    print(\"model: \", label, \" \\t\", \"Size (KB):\", size / 1e3)\n",
+    "    os.remove(\"temp.p\")\n",
+    "    return size\n",
+    "\n",
+    "\n",
+    "print_size_of_model(model, \"fp32\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "05c4e9ad",
+   "metadata": {},
+   "source": [
+    "Post training quantization example"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c4c65d4b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch.quantization\n",
+    "\n",
+    "\n",
+    "quantized_model = torch.quantization.quantize_dynamic(model, dtype=torch.qint8)\n",
+    "print_size_of_model(quantized_model, \"int8\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "7b108e17",
+   "metadata": {},
+   "source": [
+    "For each class, compare the classification test accuracy of the initial model and the quantized model. Also give the overall test accuracy for both models."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a0a34b90",
+   "metadata": {},
+   "source": [
+    "Try training aware quantization to mitigate the impact on the accuracy (doc available here https://pytorch.org/docs/stable/quantization.html#torch.quantization.quantize_dynamic)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "201470f9",
+   "metadata": {},
+   "source": [
+    "## Exercise 3: working with pre-trained models.\n",
+    "\n",
+    "PyTorch offers several pre-trained models https://pytorch.org/vision/0.8/models.html        \n",
+    "We will use ResNet50 trained on ImageNet dataset (https://www.image-net.org/index.php). Use the following code with the files `imagenet-simple-labels.json` that contains the imagenet labels and the image dog.png that we will use as test.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b4d13080",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import json\n",
+    "from PIL import Image\n",
+    "\n",
+    "# Choose an image to pass through the model\n",
+    "test_image = \"dog.png\"\n",
+    "\n",
+    "# Configure matplotlib for pretty inline plots\n",
+    "#%matplotlib inline\n",
+    "#%config InlineBackend.figure_format = 'retina'\n",
+    "\n",
+    "# Prepare the labels\n",
+    "with open(\"imagenet-simple-labels.json\") as f:\n",
+    "    labels = json.load(f)\n",
+    "\n",
+    "# First prepare the transformations: resize the image to what the model was trained on and convert it to a tensor\n",
+    "data_transform = transforms.Compose(\n",
+    "    [\n",
+    "        transforms.Resize((224, 224)),\n",
+    "        transforms.ToTensor(),\n",
+    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
+    "    ]\n",
+    ")\n",
+    "# Load the image\n",
+    "\n",
+    "image = Image.open(test_image)\n",
+    "plt.imshow(image), plt.xticks([]), plt.yticks([])\n",
+    "\n",
+    "# Now apply the transformation, expand the batch dimension, and send the image to the GPU\n",
+    "# image = data_transform(image).unsqueeze(0).cuda()\n",
+    "image = data_transform(image).unsqueeze(0)\n",
+    "\n",
+    "# Download the model if it's not there already. It will take a bit on the first run, after that it's fast\n",
+    "model = models.resnet50(pretrained=True)\n",
+    "# Send the model to the GPU\n",
+    "# model.cuda()\n",
+    "# Set layers such as dropout and batchnorm in evaluation mode\n",
+    "model.eval()\n",
+    "\n",
+    "# Get the 1000-dimensional model output\n",
+    "out = model(image)\n",
+    "# Find the predicted class\n",
+    "print(\"Predicted class is: {}\".format(labels[out.argmax()]))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "184cfceb",
+   "metadata": {},
+   "source": [
+    "Experiments:\n",
+    "\n",
+    "Study the code and the results obtained. Possibly add other images downloaded from the internet.\n",
+    "\n",
+    "What is the size of the model? Quantize it and then check if the model is still able to correctly classify the other images.\n",
+    "\n",
+    "Experiment with other pre-trained CNN models.\n",
+    "\n",
+    "    \n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5d57da4b",
+   "metadata": {},
+   "source": [
+    "## Exercise 4: Transfer Learning\n",
+    "    \n",
+    "    \n",
+    "For this work, we will use a pre-trained model (ResNet18) as a descriptor extractor and will refine the classification by training only the last fully connected layer of the network. Thus, the output layer of the pre-trained network will be replaced by a layer adapted to the new classes to be recognized which will be in our case ants and bees.\n",
+    "Download and unzip in your working directory the dataset available at the address :\n",
+    "    \n",
+    "https://download.pytorch.org/tutorial/hymenoptera_data.zip\n",
+    "    \n",
+    "Execute the following code in order to display some images of the dataset."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "be2d31f5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import torch\n",
+    "import torchvision\n",
+    "from torchvision import datasets, transforms\n",
+    "\n",
+    "# Data augmentation and normalization for training\n",
+    "# Just normalization for validation\n",
+    "data_transforms = {\n",
+    "    \"train\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.RandomResizedCrop(\n",
+    "                224\n",
+    "            ),  # ImageNet models were trained on 224x224 images\n",
+    "            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n",
+    "            transforms.ToTensor(),  # convert it to a PyTorch tensor\n",
+    "            transforms.Normalize(\n",
+    "                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n",
+    "            ),  # ImageNet models expect this norm\n",
+    "        ]\n",
+    "    ),\n",
+    "    \"val\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.Resize(256),\n",
+    "            transforms.CenterCrop(224),\n",
+    "            transforms.ToTensor(),\n",
+    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
+    "        ]\n",
+    "    ),\n",
+    "}\n",
+    "\n",
+    "data_dir = \"hymenoptera_data\"\n",
+    "# Create train and validation datasets and loaders\n",
+    "image_datasets = {\n",
+    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataloaders = {\n",
+    "    x: torch.utils.data.DataLoader(\n",
+    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=0\n",
+    "    )\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n",
+    "class_names = image_datasets[\"train\"].classes\n",
+    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
+    "\n",
+    "# Helper function for displaying images\n",
+    "def imshow(inp, title=None):\n",
+    "    \"\"\"Imshow for Tensor.\"\"\"\n",
+    "    inp = inp.numpy().transpose((1, 2, 0))\n",
+    "    mean = np.array([0.485, 0.456, 0.406])\n",
+    "    std = np.array([0.229, 0.224, 0.225])\n",
+    "\n",
+    "    # Un-normalize the images\n",
+    "    inp = std * inp + mean\n",
+    "    # Clip just in case\n",
+    "    inp = np.clip(inp, 0, 1)\n",
+    "    plt.imshow(inp)\n",
+    "    if title is not None:\n",
+    "        plt.title(title)\n",
+    "    plt.pause(0.001)  # pause a bit so that plots are updated\n",
+    "    plt.show()\n",
+    "\n",
+    "\n",
+    "# Get a batch of training data\n",
+    "inputs, classes = next(iter(dataloaders[\"train\"]))\n",
+    "\n",
+    "# Make a grid from batch\n",
+    "out = torchvision.utils.make_grid(inputs)\n",
+    "\n",
+    "imshow(out, title=[class_names[x] for x in classes])\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "bbd48800",
+   "metadata": {},
+   "source": [
+    "Now, execute the following code which uses a pre-trained model ResNet18 having replaced the output layer for the ants/bees classification and performs the model training by only changing the weights of this output layer."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "572d824c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import copy\n",
+    "import os\n",
+    "import time\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import torch\n",
+    "import torch.nn as nn\n",
+    "import torch.optim as optim\n",
+    "import torchvision\n",
+    "from torch.optim import lr_scheduler\n",
+    "from torchvision import datasets, transforms\n",
+    "\n",
+    "# Data augmentation and normalization for training\n",
+    "# Just normalization for validation\n",
+    "data_transforms = {\n",
+    "    \"train\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.RandomResizedCrop(\n",
+    "                224\n",
+    "            ),  # ImageNet models were trained on 224x224 images\n",
+    "            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n",
+    "            transforms.ToTensor(),  # convert it to a PyTorch tensor\n",
+    "            transforms.Normalize(\n",
+    "                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n",
+    "            ),  # ImageNet models expect this norm\n",
+    "        ]\n",
+    "    ),\n",
+    "    \"val\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.Resize(256),\n",
+    "            transforms.CenterCrop(224),\n",
+    "            transforms.ToTensor(),\n",
+    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
+    "        ]\n",
+    "    ),\n",
+    "}\n",
+    "\n",
+    "data_dir = \"hymenoptera_data\"\n",
+    "# Create train and validation datasets and loaders\n",
+    "image_datasets = {\n",
+    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataloaders = {\n",
+    "    x: torch.utils.data.DataLoader(\n",
+    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=4\n",
+    "    )\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n",
+    "class_names = image_datasets[\"train\"].classes\n",
+    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
+    "\n",
+    "# Helper function for displaying images\n",
+    "def imshow(inp, title=None):\n",
+    "    \"\"\"Imshow for Tensor.\"\"\"\n",
+    "    inp = inp.numpy().transpose((1, 2, 0))\n",
+    "    mean = np.array([0.485, 0.456, 0.406])\n",
+    "    std = np.array([0.229, 0.224, 0.225])\n",
+    "\n",
+    "    # Un-normalize the images\n",
+    "    inp = std * inp + mean\n",
+    "    # Clip just in case\n",
+    "    inp = np.clip(inp, 0, 1)\n",
+    "    plt.imshow(inp)\n",
+    "    if title is not None:\n",
+    "        plt.title(title)\n",
+    "    plt.pause(0.001)  # pause a bit so that plots are updated\n",
+    "    plt.show()\n",
+    "\n",
+    "\n",
+    "# Get a batch of training data\n",
+    "# inputs, classes = next(iter(dataloaders['train']))\n",
+    "\n",
+    "# Make a grid from batch\n",
+    "# out = torchvision.utils.make_grid(inputs)\n",
+    "\n",
+    "# imshow(out, title=[class_names[x] for x in classes])\n",
+    "# training\n",
+    "\n",
+    "\n",
+    "def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n",
+    "    since = time.time()\n",
+    "\n",
+    "    best_model_wts = copy.deepcopy(model.state_dict())\n",
+    "    best_acc = 0.0\n",
+    "\n",
+    "    epoch_time = []  # we'll keep track of the time needed for each epoch\n",
+    "\n",
+    "    for epoch in range(num_epochs):\n",
+    "        epoch_start = time.time()\n",
+    "        print(\"Epoch {}/{}\".format(epoch + 1, num_epochs))\n",
+    "        print(\"-\" * 10)\n",
+    "\n",
+    "        # Each epoch has a training and validation phase\n",
+    "        for phase in [\"train\", \"val\"]:\n",
+    "            if phase == \"train\":\n",
+    "                scheduler.step()\n",
+    "                model.train()  # Set model to training mode\n",
+    "            else:\n",
+    "                model.eval()  # Set model to evaluate mode\n",
+    "\n",
+    "            running_loss = 0.0\n",
+    "            running_corrects = 0\n",
+    "\n",
+    "            # Iterate over data.\n",
+    "            for inputs, labels in dataloaders[phase]:\n",
+    "                inputs = inputs.to(device)\n",
+    "                labels = labels.to(device)\n",
+    "\n",
+    "                # zero the parameter gradients\n",
+    "                optimizer.zero_grad()\n",
+    "\n",
+    "                # Forward\n",
+    "                # Track history if only in training phase\n",
+    "                with torch.set_grad_enabled(phase == \"train\"):\n",
+    "                    outputs = model(inputs)\n",
+    "                    _, preds = torch.max(outputs, 1)\n",
+    "                    loss = criterion(outputs, labels)\n",
+    "\n",
+    "                    # backward + optimize only if in training phase\n",
+    "                    if phase == \"train\":\n",
+    "                        loss.backward()\n",
+    "                        optimizer.step()\n",
+    "\n",
+    "                # Statistics\n",
+    "                running_loss += loss.item() * inputs.size(0)\n",
+    "                running_corrects += torch.sum(preds == labels.data)\n",
+    "\n",
+    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
+    "            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n",
+    "\n",
+    "            print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n",
+    "\n",
+    "            # Deep copy the model\n",
+    "            if phase == \"val\" and epoch_acc > best_acc:\n",
+    "                best_acc = epoch_acc\n",
+    "                best_model_wts = copy.deepcopy(model.state_dict())\n",
+    "\n",
+    "        # Add the epoch time\n",
+    "        t_epoch = time.time() - epoch_start\n",
+    "        epoch_time.append(t_epoch)\n",
+    "        print()\n",
+    "\n",
+    "    time_elapsed = time.time() - since\n",
+    "    print(\n",
+    "        \"Training complete in {:.0f}m {:.0f}s\".format(\n",
+    "            time_elapsed // 60, time_elapsed % 60\n",
+    "        )\n",
+    "    )\n",
+    "    print(\"Best val Acc: {:4f}\".format(best_acc))\n",
+    "\n",
+    "    # Load best model weights\n",
+    "    model.load_state_dict(best_model_wts)\n",
+    "    return model, epoch_time\n",
+    "\n",
+    "\n",
+    "# Download a pre-trained ResNet18 model and freeze its weights\n",
+    "model = torchvision.models.resnet18(pretrained=True)\n",
+    "for param in model.parameters():\n",
+    "    param.requires_grad = False\n",
+    "\n",
+    "# Replace the final fully connected layer\n",
+    "# Parameters of newly constructed modules have requires_grad=True by default\n",
+    "num_ftrs = model.fc.in_features\n",
+    "model.fc = nn.Linear(num_ftrs, 2)\n",
+    "# Send the model to the GPU\n",
+    "model = model.to(device)\n",
+    "# Set the loss function\n",
+    "criterion = nn.CrossEntropyLoss()\n",
+    "\n",
+    "# Observe that only the parameters of the final layer are being optimized\n",
+    "optimizer_conv = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)\n",
+    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
+    "model, epoch_time = train_model(\n",
+    "    model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",
+    ")\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "bbd48800",
+   "metadata": {},
+   "source": [
+    "Experiments:\n",
+    "Study the code and the results obtained.\n",
+    "\n",
+    "Modify the code and add an \"eval_model\" function to allow\n",
+    "the evaluation of the model on a test set (different from the learning and validation sets used during the learning phase). Study the results obtained.\n",
+    "\n",
+    "Now modify the code to replace the current classification layer with a set of two layers using a \"relu\" activation function for the middle layer, and the \"dropout\" mechanism for both layers. Renew the experiments and study the results obtained.\n",
+    "\n",
+    "Apply ther quantization (post and quantization aware) and evaluate impact on model size and accuracy."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "04a263f0",
+   "metadata": {},
+   "source": [
+    "## Optional\n",
+    "    \n",
+    "Try this at home!! \n",
+    "\n",
+    "\n",
+    "Pytorch offers a framework to export a given CNN to your selfphone (either android or iOS). Have a look at the tutorial https://pytorch.org/mobile/home/\n",
+    "\n",
+    "The Exercise consists in deploying the CNN of Exercise 4 in your phone and then test it on live.\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "fe954ce4",
+   "metadata": {},
+   "source": [
+    "## Author\n",
+    "\n",
+    "Alberto BOSIO - Ph. D."
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3.8.5 ('base')",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.5"
+  },
+  "vscode": {
+   "interpreter": {
+    "hash": "9e3efbebb05da2d4a1968abe9a0645745f54b63feb7a85a514e4da0495be97eb"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/dog.png b/dog.png
new file mode 100644
index 0000000000000000000000000000000000000000..786dc161c6d8981020d04d7a7082ab1d4fcb1bef
Binary files /dev/null and b/dog.png differ
diff --git a/imagenet-simple-labels.json b/imagenet-simple-labels.json
new file mode 100644
index 0000000000000000000000000000000000000000..4528298045cba5f1f6cded2f2f6b2da4b4775d4b
--- /dev/null
+++ b/imagenet-simple-labels.json
@@ -0,0 +1,1000 @@
+["tench",
+"goldfish",
+"great white shark",
+"tiger shark",
+"hammerhead shark",
+"electric ray",
+"stingray",
+"cock",
+"hen",
+"ostrich",
+"brambling",
+"goldfinch",
+"house finch",
+"junco",
+"indigo bunting",
+"American robin",
+"bulbul",
+"jay",
+"magpie",
+"chickadee",
+"American dipper",
+"kite",
+"bald eagle",
+"vulture",
+"great grey owl",
+"fire salamander",
+"smooth newt",
+"newt",
+"spotted salamander",
+"axolotl",
+"American bullfrog",
+"tree frog",
+"tailed frog",
+"loggerhead sea turtle",
+"leatherback sea turtle",
+"mud turtle",
+"terrapin",
+"box turtle",
+"banded gecko",
+"green iguana",
+"Carolina anole",
+"desert grassland whiptail lizard",
+"agama",
+"frilled-necked lizard",
+"alligator lizard",
+"Gila monster",
+"European green lizard",
+"chameleon",
+"Komodo dragon",
+"Nile crocodile",
+"American alligator",
+"triceratops",
+"worm snake",
+"ring-necked snake",
+"eastern hog-nosed snake",
+"smooth green snake",
+"kingsnake",
+"garter snake",
+"water snake",
+"vine snake",
+"night snake",
+"boa constrictor",
+"African rock python",
+"Indian cobra",
+"green mamba",
+"sea snake",
+"Saharan horned viper",
+"eastern diamondback rattlesnake",
+"sidewinder",
+"trilobite",
+"harvestman",
+"scorpion",
+"yellow garden spider",
+"barn spider",
+"European garden spider",
+"southern black widow",
+"tarantula",
+"wolf spider",
+"tick",
+"centipede",
+"black grouse",
+"ptarmigan",
+"ruffed grouse",
+"prairie grouse",
+"peacock",
+"quail",
+"partridge",
+"grey parrot",
+"macaw",
+"sulphur-crested cockatoo",
+"lorikeet",
+"coucal",
+"bee eater",
+"hornbill",
+"hummingbird",
+"jacamar",
+"toucan",
+"duck",
+"red-breasted merganser",
+"goose",
+"black swan",
+"tusker",
+"echidna",
+"platypus",
+"wallaby",
+"koala",
+"wombat",
+"jellyfish",
+"sea anemone",
+"brain coral",
+"flatworm",
+"nematode",
+"conch",
+"snail",
+"slug",
+"sea slug",
+"chiton",
+"chambered nautilus",
+"Dungeness crab",
+"rock crab",
+"fiddler crab",
+"red king crab",
+"American lobster",
+"spiny lobster",
+"crayfish",
+"hermit crab",
+"isopod",
+"white stork",
+"black stork",
+"spoonbill",
+"flamingo",
+"little blue heron",
+"great egret",
+"bittern",
+"crane",
+"limpkin",
+"common gallinule",
+"American coot",
+"bustard",
+"ruddy turnstone",
+"dunlin",
+"common redshank",
+"dowitcher",
+"oystercatcher",
+"pelican",
+"king penguin",
+"albatross",
+"grey whale",
+"killer whale",
+"dugong",
+"sea lion",
+"Chihuahua",
+"Japanese Chin",
+"Maltese",
+"Pekingese",
+"Shih Tzu",
+"King Charles Spaniel",
+"Papillon",
+"toy terrier",
+"Rhodesian Ridgeback",
+"Afghan Hound",
+"Basset Hound",
+"Beagle",
+"Bloodhound",
+"Bluetick Coonhound",
+"Black and Tan Coonhound",
+"Treeing Walker Coonhound",
+"English foxhound",
+"Redbone Coonhound",
+"borzoi",
+"Irish Wolfhound",
+"Italian Greyhound",
+"Whippet",
+"Ibizan Hound",
+"Norwegian Elkhound",
+"Otterhound",
+"Saluki",
+"Scottish Deerhound",
+"Weimaraner",
+"Staffordshire Bull Terrier",
+"American Staffordshire Terrier",
+"Bedlington Terrier",
+"Border Terrier",
+"Kerry Blue Terrier",
+"Irish Terrier",
+"Norfolk Terrier",
+"Norwich Terrier",
+"Yorkshire Terrier",
+"Wire Fox Terrier",
+"Lakeland Terrier",
+"Sealyham Terrier",
+"Airedale Terrier",
+"Cairn Terrier",
+"Australian Terrier",
+"Dandie Dinmont Terrier",
+"Boston Terrier",
+"Miniature Schnauzer",
+"Giant Schnauzer",
+"Standard Schnauzer",
+"Scottish Terrier",
+"Tibetan Terrier",
+"Australian Silky Terrier",
+"Soft-coated Wheaten Terrier",
+"West Highland White Terrier",
+"Lhasa Apso",
+"Flat-Coated Retriever",
+"Curly-coated Retriever",
+"Golden Retriever",
+"Labrador Retriever",
+"Chesapeake Bay Retriever",
+"German Shorthaired Pointer",
+"Vizsla",
+"English Setter",
+"Irish Setter",
+"Gordon Setter",
+"Brittany",
+"Clumber Spaniel",
+"English Springer Spaniel",
+"Welsh Springer Spaniel",
+"Cocker Spaniels",
+"Sussex Spaniel",
+"Irish Water Spaniel",
+"Kuvasz",
+"Schipperke",
+"Groenendael",
+"Malinois",
+"Briard",
+"Australian Kelpie",
+"Komondor",
+"Old English Sheepdog",
+"Shetland Sheepdog",
+"collie",
+"Border Collie",
+"Bouvier des Flandres",
+"Rottweiler",
+"German Shepherd Dog",
+"Dobermann",
+"Miniature Pinscher",
+"Greater Swiss Mountain Dog",
+"Bernese Mountain Dog",
+"Appenzeller Sennenhund",
+"Entlebucher Sennenhund",
+"Boxer",
+"Bullmastiff",
+"Tibetan Mastiff",
+"French Bulldog",
+"Great Dane",
+"St. Bernard",
+"husky",
+"Alaskan Malamute",
+"Siberian Husky",
+"Dalmatian",
+"Affenpinscher",
+"Basenji",
+"pug",
+"Leonberger",
+"Newfoundland",
+"Pyrenean Mountain Dog",
+"Samoyed",
+"Pomeranian",
+"Chow Chow",
+"Keeshond",
+"Griffon Bruxellois",
+"Pembroke Welsh Corgi",
+"Cardigan Welsh Corgi",
+"Toy Poodle",
+"Miniature Poodle",
+"Standard Poodle",
+"Mexican hairless dog",
+"grey wolf",
+"Alaskan tundra wolf",
+"red wolf",
+"coyote",
+"dingo",
+"dhole",
+"African wild dog",
+"hyena",
+"red fox",
+"kit fox",
+"Arctic fox",
+"grey fox",
+"tabby cat",
+"tiger cat",
+"Persian cat",
+"Siamese cat",
+"Egyptian Mau",
+"cougar",
+"lynx",
+"leopard",
+"snow leopard",
+"jaguar",
+"lion",
+"tiger",
+"cheetah",
+"brown bear",
+"American black bear",
+"polar bear",
+"sloth bear",
+"mongoose",
+"meerkat",
+"tiger beetle",
+"ladybug",
+"ground beetle",
+"longhorn beetle",
+"leaf beetle",
+"dung beetle",
+"rhinoceros beetle",
+"weevil",
+"fly",
+"bee",
+"ant",
+"grasshopper",
+"cricket",
+"stick insect",
+"cockroach",
+"mantis",
+"cicada",
+"leafhopper",
+"lacewing",
+"dragonfly",
+"damselfly",
+"red admiral",
+"ringlet",
+"monarch butterfly",
+"small white",
+"sulphur butterfly",
+"gossamer-winged butterfly",
+"starfish",
+"sea urchin",
+"sea cucumber",
+"cottontail rabbit",
+"hare",
+"Angora rabbit",
+"hamster",
+"porcupine",
+"fox squirrel",
+"marmot",
+"beaver",
+"guinea pig",
+"common sorrel",
+"zebra",
+"pig",
+"wild boar",
+"warthog",
+"hippopotamus",
+"ox",
+"water buffalo",
+"bison",
+"ram",
+"bighorn sheep",
+"Alpine ibex",
+"hartebeest",
+"impala",
+"gazelle",
+"dromedary",
+"llama",
+"weasel",
+"mink",
+"European polecat",
+"black-footed ferret",
+"otter",
+"skunk",
+"badger",
+"armadillo",
+"three-toed sloth",
+"orangutan",
+"gorilla",
+"chimpanzee",
+"gibbon",
+"siamang",
+"guenon",
+"patas monkey",
+"baboon",
+"macaque",
+"langur",
+"black-and-white colobus",
+"proboscis monkey",
+"marmoset",
+"white-headed capuchin",
+"howler monkey",
+"titi",
+"Geoffroy's spider monkey",
+"common squirrel monkey",
+"ring-tailed lemur",
+"indri",
+"Asian elephant",
+"African bush elephant",
+"red panda",
+"giant panda",
+"snoek",
+"eel",
+"coho salmon",
+"rock beauty",
+"clownfish",
+"sturgeon",
+"garfish",
+"lionfish",
+"pufferfish",
+"abacus",
+"abaya",
+"academic gown",
+"accordion",
+"acoustic guitar",
+"aircraft carrier",
+"airliner",
+"airship",
+"altar",
+"ambulance",
+"amphibious vehicle",
+"analog clock",
+"apiary",
+"apron",
+"waste container",
+"assault rifle",
+"backpack",
+"bakery",
+"balance beam",
+"balloon",
+"ballpoint pen",
+"Band-Aid",
+"banjo",
+"baluster",
+"barbell",
+"barber chair",
+"barbershop",
+"barn",
+"barometer",
+"barrel",
+"wheelbarrow",
+"baseball",
+"basketball",
+"bassinet",
+"bassoon",
+"swimming cap",
+"bath towel",
+"bathtub",
+"station wagon",
+"lighthouse",
+"beaker",
+"military cap",
+"beer bottle",
+"beer glass",
+"bell-cot",
+"bib",
+"tandem bicycle",
+"bikini",
+"ring binder",
+"binoculars",
+"birdhouse",
+"boathouse",
+"bobsleigh",
+"bolo tie",
+"poke bonnet",
+"bookcase",
+"bookstore",
+"bottle cap",
+"bow",
+"bow tie",
+"brass",
+"bra",
+"breakwater",
+"breastplate",
+"broom",
+"bucket",
+"buckle",
+"bulletproof vest",
+"high-speed train",
+"butcher shop",
+"taxicab",
+"cauldron",
+"candle",
+"cannon",
+"canoe",
+"can opener",
+"cardigan",
+"car mirror",
+"carousel",
+"tool kit",
+"carton",
+"car wheel",
+"automated teller machine",
+"cassette",
+"cassette player",
+"castle",
+"catamaran",
+"CD player",
+"cello",
+"mobile phone",
+"chain",
+"chain-link fence",
+"chain mail",
+"chainsaw",
+"chest",
+"chiffonier",
+"chime",
+"china cabinet",
+"Christmas stocking",
+"church",
+"movie theater",
+"cleaver",
+"cliff dwelling",
+"cloak",
+"clogs",
+"cocktail shaker",
+"coffee mug",
+"coffeemaker",
+"coil",
+"combination lock",
+"computer keyboard",
+"confectionery store",
+"container ship",
+"convertible",
+"corkscrew",
+"cornet",
+"cowboy boot",
+"cowboy hat",
+"cradle",
+"crane",
+"crash helmet",
+"crate",
+"infant bed",
+"Crock Pot",
+"croquet ball",
+"crutch",
+"cuirass",
+"dam",
+"desk",
+"desktop computer",
+"rotary dial telephone",
+"diaper",
+"digital clock",
+"digital watch",
+"dining table",
+"dishcloth",
+"dishwasher",
+"disc brake",
+"dock",
+"dog sled",
+"dome",
+"doormat",
+"drilling rig",
+"drum",
+"drumstick",
+"dumbbell",
+"Dutch oven",
+"electric fan",
+"electric guitar",
+"electric locomotive",
+"entertainment center",
+"envelope",
+"espresso machine",
+"face powder",
+"feather boa",
+"filing cabinet",
+"fireboat",
+"fire engine",
+"fire screen sheet",
+"flagpole",
+"flute",
+"folding chair",
+"football helmet",
+"forklift",
+"fountain",
+"fountain pen",
+"four-poster bed",
+"freight car",
+"French horn",
+"frying pan",
+"fur coat",
+"garbage truck",
+"gas mask",
+"gas pump",
+"goblet",
+"go-kart",
+"golf ball",
+"golf cart",
+"gondola",
+"gong",
+"gown",
+"grand piano",
+"greenhouse",
+"grille",
+"grocery store",
+"guillotine",
+"barrette",
+"hair spray",
+"half-track",
+"hammer",
+"hamper",
+"hair dryer",
+"hand-held computer",
+"handkerchief",
+"hard disk drive",
+"harmonica",
+"harp",
+"harvester",
+"hatchet",
+"holster",
+"home theater",
+"honeycomb",
+"hook",
+"hoop skirt",
+"horizontal bar",
+"horse-drawn vehicle",
+"hourglass",
+"iPod",
+"clothes iron",
+"jack-o'-lantern",
+"jeans",
+"jeep",
+"T-shirt",
+"jigsaw puzzle",
+"pulled rickshaw",
+"joystick",
+"kimono",
+"knee pad",
+"knot",
+"lab coat",
+"ladle",
+"lampshade",
+"laptop computer",
+"lawn mower",
+"lens cap",
+"paper knife",
+"library",
+"lifeboat",
+"lighter",
+"limousine",
+"ocean liner",
+"lipstick",
+"slip-on shoe",
+"lotion",
+"speaker",
+"loupe",
+"sawmill",
+"magnetic compass",
+"mail bag",
+"mailbox",
+"tights",
+"tank suit",
+"manhole cover",
+"maraca",
+"marimba",
+"mask",
+"match",
+"maypole",
+"maze",
+"measuring cup",
+"medicine chest",
+"megalith",
+"microphone",
+"microwave oven",
+"military uniform",
+"milk can",
+"minibus",
+"miniskirt",
+"minivan",
+"missile",
+"mitten",
+"mixing bowl",
+"mobile home",
+"Model T",
+"modem",
+"monastery",
+"monitor",
+"moped",
+"mortar",
+"square academic cap",
+"mosque",
+"mosquito net",
+"scooter",
+"mountain bike",
+"tent",
+"computer mouse",
+"mousetrap",
+"moving van",
+"muzzle",
+"nail",
+"neck brace",
+"necklace",
+"nipple",
+"notebook computer",
+"obelisk",
+"oboe",
+"ocarina",
+"odometer",
+"oil filter",
+"organ",
+"oscilloscope",
+"overskirt",
+"bullock cart",
+"oxygen mask",
+"packet",
+"paddle",
+"paddle wheel",
+"padlock",
+"paintbrush",
+"pajamas",
+"palace",
+"pan flute",
+"paper towel",
+"parachute",
+"parallel bars",
+"park bench",
+"parking meter",
+"passenger car",
+"patio",
+"payphone",
+"pedestal",
+"pencil case",
+"pencil sharpener",
+"perfume",
+"Petri dish",
+"photocopier",
+"plectrum",
+"Pickelhaube",
+"picket fence",
+"pickup truck",
+"pier",
+"piggy bank",
+"pill bottle",
+"pillow",
+"ping-pong ball",
+"pinwheel",
+"pirate ship",
+"pitcher",
+"hand plane",
+"planetarium",
+"plastic bag",
+"plate rack",
+"plow",
+"plunger",
+"Polaroid camera",
+"pole",
+"police van",
+"poncho",
+"billiard table",
+"soda bottle",
+"pot",
+"potter's wheel",
+"power drill",
+"prayer rug",
+"printer",
+"prison",
+"projectile",
+"projector",
+"hockey puck",
+"punching bag",
+"purse",
+"quill",
+"quilt",
+"race car",
+"racket",
+"radiator",
+"radio",
+"radio telescope",
+"rain barrel",
+"recreational vehicle",
+"reel",
+"reflex camera",
+"refrigerator",
+"remote control",
+"restaurant",
+"revolver",
+"rifle",
+"rocking chair",
+"rotisserie",
+"eraser",
+"rugby ball",
+"ruler",
+"running shoe",
+"safe",
+"safety pin",
+"salt shaker",
+"sandal",
+"sarong",
+"saxophone",
+"scabbard",
+"weighing scale",
+"school bus",
+"schooner",
+"scoreboard",
+"CRT screen",
+"screw",
+"screwdriver",
+"seat belt",
+"sewing machine",
+"shield",
+"shoe store",
+"shoji",
+"shopping basket",
+"shopping cart",
+"shovel",
+"shower cap",
+"shower curtain",
+"ski",
+"ski mask",
+"sleeping bag",
+"slide rule",
+"sliding door",
+"slot machine",
+"snorkel",
+"snowmobile",
+"snowplow",
+"soap dispenser",
+"soccer ball",
+"sock",
+"solar thermal collector",
+"sombrero",
+"soup bowl",
+"space bar",
+"space heater",
+"space shuttle",
+"spatula",
+"motorboat",
+"spider web",
+"spindle",
+"sports car",
+"spotlight",
+"stage",
+"steam locomotive",
+"through arch bridge",
+"steel drum",
+"stethoscope",
+"scarf",
+"stone wall",
+"stopwatch",
+"stove",
+"strainer",
+"tram",
+"stretcher",
+"couch",
+"stupa",
+"submarine",
+"suit",
+"sundial",
+"sunglass",
+"sunglasses",
+"sunscreen",
+"suspension bridge",
+"mop",
+"sweatshirt",
+"swimsuit",
+"swing",
+"switch",
+"syringe",
+"table lamp",
+"tank",
+"tape player",
+"teapot",
+"teddy bear",
+"television",
+"tennis ball",
+"thatched roof",
+"front curtain",
+"thimble",
+"threshing machine",
+"throne",
+"tile roof",
+"toaster",
+"tobacco shop",
+"toilet seat",
+"torch",
+"totem pole",
+"tow truck",
+"toy store",
+"tractor",
+"semi-trailer truck",
+"tray",
+"trench coat",
+"tricycle",
+"trimaran",
+"tripod",
+"triumphal arch",
+"trolleybus",
+"trombone",
+"tub",
+"turnstile",
+"typewriter keyboard",
+"umbrella",
+"unicycle",
+"upright piano",
+"vacuum cleaner",
+"vase",
+"vault",
+"velvet",
+"vending machine",
+"vestment",
+"viaduct",
+"violin",
+"volleyball",
+"waffle iron",
+"wall clock",
+"wallet",
+"wardrobe",
+"military aircraft",
+"sink",
+"washing machine",
+"water bottle",
+"water jug",
+"water tower",
+"whiskey jug",
+"whistle",
+"wig",
+"window screen",
+"window shade",
+"Windsor tie",
+"wine bottle",
+"wing",
+"wok",
+"wooden spoon",
+"wool",
+"split-rail fence",
+"shipwreck",
+"yawl",
+"yurt",
+"website",
+"comic book",
+"crossword",
+"traffic sign",
+"traffic light",
+"dust jacket",
+"menu",
+"plate",
+"guacamole",
+"consomme",
+"hot pot",
+"trifle",
+"ice cream",
+"ice pop",
+"baguette",
+"bagel",
+"pretzel",
+"cheeseburger",
+"hot dog",
+"mashed potato",
+"cabbage",
+"broccoli",
+"cauliflower",
+"zucchini",
+"spaghetti squash",
+"acorn squash",
+"butternut squash",
+"cucumber",
+"artichoke",
+"bell pepper",
+"cardoon",
+"mushroom",
+"Granny Smith",
+"strawberry",
+"orange",
+"lemon",
+"fig",
+"pineapple",
+"banana",
+"jackfruit",
+"custard apple",
+"pomegranate",
+"hay",
+"carbonara",
+"chocolate syrup",
+"dough",
+"meatloaf",
+"pizza",
+"pot pie",
+"burrito",
+"red wine",
+"espresso",
+"cup",
+"eggnog",
+"alp",
+"bubble",
+"cliff",
+"coral reef",
+"geyser",
+"lakeshore",
+"promontory",
+"shoal",
+"seashore",
+"valley",
+"volcano",
+"baseball player",
+"bridegroom",
+"scuba diver",
+"rapeseed",
+"daisy",
+"yellow lady's slipper",
+"corn",
+"acorn",
+"rose hip",
+"horse chestnut seed",
+"coral fungus",
+"agaric",
+"gyromitra",
+"stinkhorn mushroom",
+"earth star",
+"hen-of-the-woods",
+"bolete",
+"ear",
+"toilet paper"]