diff --git a/Lectures/04_Deep_Learning-Convolutional_Neural_Networks.pdf b/Lectures/04_Deep_Learning-Convolutional_Neural_Networks.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..f4e47e06cdd22ca35a4c3234adbc21e66d97b1b9
Binary files /dev/null and b/Lectures/04_Deep_Learning-Convolutional_Neural_Networks.pdf differ
diff --git a/Practical_sessions/Session_3/food_truck.txt b/Practical_sessions/Session_3/food_truck.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0f88ccb611f840ba9283e0de2a26b6cb9b8fde02
--- /dev/null
+++ b/Practical_sessions/Session_3/food_truck.txt
@@ -0,0 +1,97 @@
+6.1101,17.592
+5.5277,9.1302
+8.5186,13.662
+7.0032,11.854
+5.8598,6.8233
+8.3829,11.886
+7.4764,4.3483
+8.5781,12
+6.4862,6.5987
+5.0546,3.8166
+5.7107,3.2522
+14.164,15.505
+5.734,3.1551
+8.4084,7.2258
+5.6407,0.71618
+5.3794,3.5129
+6.3654,5.3048
+5.1301,0.56077
+6.4296,3.6518
+7.0708,5.3893
+6.1891,3.1386
+20.27,21.767
+5.4901,4.263
+6.3261,5.1875
+5.5649,3.0825
+18.945,22.638
+12.828,13.501
+10.957,7.0467
+13.176,14.692
+22.203,24.147
+5.2524,-1.22
+6.5894,5.9966
+9.2482,12.134
+5.8918,1.8495
+8.2111,6.5426
+7.9334,4.5623
+8.0959,4.1164
+5.6063,3.3928
+12.836,10.117
+6.3534,5.4974
+5.4069,0.55657
+6.8825,3.9115
+11.708,5.3854
+5.7737,2.4406
+7.8247,6.7318
+7.0931,1.0463
+5.0702,5.1337
+5.8014,1.844
+11.7,8.0043
+5.5416,1.0179
+7.5402,6.7504
+5.3077,1.8396
+7.4239,4.2885
+7.6031,4.9981
+6.3328,1.4233
+6.3589,-1.4211
+6.2742,2.4756
+5.6397,4.6042
+9.3102,3.9624
+9.4536,5.4141
+8.8254,5.1694
+5.1793,-0.74279
+21.279,17.929
+14.908,12.054
+18.959,17.054
+7.2182,4.8852
+8.2951,5.7442
+10.236,7.7754
+5.4994,1.0173
+20.341,20.992
+10.136,6.6799
+7.3345,4.0259
+6.0062,1.2784
+7.2259,3.3411
+5.0269,-2.6807
+6.5479,0.29678
+7.5386,3.8845
+5.0365,5.7014
+10.274,6.7526
+5.1077,2.0576
+5.7292,0.47953
+5.1884,0.20421
+6.3557,0.67861
+9.7687,7.5435
+6.5159,5.3436
+8.5172,4.2415
+9.1802,6.7981
+6.002,0.92695
+5.5204,0.152
+5.0594,2.8214
+5.7077,1.8451
+7.6366,4.2959
+5.8707,7.2029
+5.3054,1.9869
+8.2934,0.14454
+13.394,9.0551
+5.4369,0.61705
diff --git a/Practical_sessions/Session_3/nn_regression-pytorch-completed.py b/Practical_sessions/Session_3/nn_regression-pytorch-completed.py
new file mode 100644
index 0000000000000000000000000000000000000000..2732c93dec8b18a863b118f5e9ebe63de410de60
--- /dev/null
+++ b/Practical_sessions/Session_3/nn_regression-pytorch-completed.py
@@ -0,0 +1,148 @@
+import torch
+import torch.nn as nn
+import torch.optim as optim
+import numpy as np
+import matplotlib.pyplot as plt
+
+def read_data(file_name, delimiter=','):
+    """ Reads the file containing the data and returns the corresponding tensors """
+    data = np.loadtxt(file_name, delimiter=delimiter)
+    num_targets = 1
+    num_vars = data.shape[1] - num_targets
+    N = data.shape[0]
+
+    x = torch.tensor(data[:, :num_vars], dtype=torch.float32)
+    d = torch.tensor(data[:, num_vars:], dtype=torch.float32).view(N, 1)
+    
+    return x, d, N, num_vars, num_targets
+
+def normalization(x):
+    """ Normalizes the data by centering and scaling the predictor variables """
+    mu = x.mean(0)
+    sigma = x.std(0)
+    x_norm = (x - mu) / sigma
+
+    return x_norm, mu, sigma
+
+def split_data(x, d, val_prop=0.2, test_prop=0.2):
+    """ Splits the initial data into training, validation, and testing subsets """
+    assert val_prop + test_prop < 1.0
+
+    N = x.size(0)
+    indices = torch.randperm(N)
+    num_val = int(N * val_prop)
+    num_test = int(N * test_prop)
+    num_train = N - num_val - num_test
+
+    x = x[indices]
+    d = d[indices]
+
+    x_train = x[:num_train]
+    d_train = d[:num_train]
+
+    x_val = x[num_train:num_train + num_val]
+    d_val = d[num_train:num_train + num_val]
+
+    x_test = x[num_train + num_val:]
+    d_test = d[num_train + num_val:]
+
+    return x_train, d_train, x_val, d_val, x_test, d_test
+
+# Define the neural network class
+class NeuralNetwork(nn.Module):
+    def __init__(self, layer_dims, activations):
+        super(NeuralNetwork, self).__init__()
+        layers = []
+        for i in range(len(layer_dims) - 1):
+            layers.append(nn.Linear(layer_dims[i], layer_dims[i + 1]))
+            if activations[i] == 'relu':
+                layers.append(nn.ReLU())
+            elif activations[i] == 'sigmoid':
+                layers.append(nn.Sigmoid())
+            elif activations[i] == 'linear':
+                pass  # Linear activation is implicit
+        self.model = nn.Sequential(*layers)
+
+    def forward(self, x):
+        return self.model(x)
+
+def calculate_mse_cost(y, d):
+    """ Calculates the MSE loss function """
+    return ((y - d) ** 2).mean() / 2
+
+# ===================== Part 1: Data Reading and Normalization =====================
+print("Reading data ...")
+
+x, d, N, num_vars, num_targets = read_data("food_truck.txt")
+# x, d, N, num_vars, num_targets = read_data("houses.txt")
+
+# Displaying the first 10 examples from the dataset
+print("Displaying the first 10 examples from the dataset: ")
+for i in range(10):
+    print(f"x = {x[i]}, d = {d[i]}")
+
+# Normalizing the variables
+print("Normalizing the variables ...")
+x, mu, sigma = normalization(x)
+dmax = d.max()
+d = d / dmax
+
+# Splitting the data
+x_train, d_train, x_val, d_val, x_test, d_test = split_data(x, d)
+
+# ===================== Part 2: Training =====================
+
+# Hyperparameters
+alpha = 0.001
+num_iters = 500
+layer_dims = [num_vars, 5, 10, num_targets]
+activations = ['relu', 'sigmoid', 'linear']
+
+# Model, loss, and optimizer
+model = NeuralNetwork(layer_dims, activations)
+criterion = nn.MSELoss()
+optimizer = optim.SGD(model.parameters(), lr=alpha)
+
+train_costs = []
+val_costs = []
+
+for t in range(num_iters):
+    # Training forward pass
+    model.train()
+    y_train = model(x_train)
+    train_loss = criterion(y_train, d_train)
+
+    # Backpropagation
+    optimizer.zero_grad()
+    train_loss.backward()
+    optimizer.step()
+
+    # Validation forward pass
+    model.eval()
+    with torch.no_grad():
+        y_val = model(x_val)
+        val_loss = criterion(y_val, d_val)
+
+    train_costs.append(train_loss.item())
+    val_costs.append(val_loss.item())
+
+print("Final cost on the training set: ", train_costs[-1])
+print("Final cost on the validation set: ", val_costs[-1])
+
+# Plotting the evolution of the cost function
+plt.figure()
+plt.title("Evolution of the cost function during training")
+plt.plot(range(num_iters), train_costs, label="Training")
+plt.plot(range(num_iters), val_costs, label="Validation")
+plt.legend(loc="upper left")
+plt.xlabel("Number of iterations")
+plt.ylabel("Cost")
+plt.show()
+
+# ===================== Part 3: Evaluation on the Test Set =====================
+model.eval()
+with torch.no_grad():
+    y_test = model(x_test)
+    test_loss = criterion(y_test, d_test)
+
+print("Test set cost: ", test_loss.item())
diff --git a/Practical_sessions/Session_4/Subject_4_CNN.ipynb b/Practical_sessions/Session_4/Subject_4_CNN.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..7123cea269bfa09813ac5cccced7f4b9f2843115
--- /dev/null
+++ b/Practical_sessions/Session_4/Subject_4_CNN.ipynb
@@ -0,0 +1,735 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "7edf7168",
+   "metadata": {},
+   "source": [
+    "### **_Deep Learning  - Bsc Data Science for Responsible Business - Centrale Lyon_**\n",
+    "\n",
+    "2024-2025\n",
+    "\n",
+    "Emmanuel Dellandréa\t  \n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4c69d182",
+   "metadata": {},
+   "source": [
+    "# Practical Session 4 – Convolutional Neural Networks"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "fa71eda4",
+   "metadata": {},
+   "source": [
+    "The objective of this tutorial is to use the PyTorch library for building, training, and evaluating CNN models."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "23f266da",
+   "metadata": {},
+   "source": [
+    "## Sequence 1: Training a CNN to classify CIFAR10 images\n",
+    "\n",
+    "The goal is to apply a Convolutional Neural Net (CNN) model on the CIFAR10 image dataset and test the accuracy of the model on the basis of image classification. \n",
+    "\n",
+    "Be sure to check the PyTorch tutorials and documentation when needed:\n",
+    "\n",
+    "https://pytorch.org/tutorials/\n",
+    "\n",
+    "https://pytorch.org/docs/stable/index.html\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4ba1c82d",
+   "metadata": {},
+   "source": [
+    "You can test if GPU is available on your machine and thus train on it to speed up the process"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6e18f2fd",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch\n",
+    "\n",
+    "# check if CUDA is available\n",
+    "train_on_gpu = torch.cuda.is_available()\n",
+    "\n",
+    "if not train_on_gpu:\n",
+    "    print(\"CUDA is not available.  Training on CPU ...\")\n",
+    "else:\n",
+    "    print(\"CUDA is available!  Training on GPU ...\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5cf214eb",
+   "metadata": {},
+   "source": [
+    "Next we load the CIFAR10 dataset"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "462666a2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "from torchvision import datasets, transforms\n",
+    "from torch.utils.data.sampler import SubsetRandomSampler\n",
+    "\n",
+    "# number of subprocesses to use for data loading\n",
+    "num_workers = 0\n",
+    "# how many samples per batch to load\n",
+    "batch_size = 20\n",
+    "# percentage of training set to use as validation\n",
+    "valid_size = 0.2\n",
+    "\n",
+    "# convert data to a normalized torch.FloatTensor\n",
+    "transform = transforms.Compose(\n",
+    "    [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n",
+    ")\n",
+    "\n",
+    "# choose the training and test datasets\n",
+    "train_data = datasets.CIFAR10(\"data\", train=True, download=True, transform=transform)\n",
+    "test_data = datasets.CIFAR10(\"data\", train=False, download=True, transform=transform)\n",
+    "\n",
+    "# obtain training indices that will be used for validation\n",
+    "num_train = len(train_data)\n",
+    "indices = list(range(num_train))\n",
+    "np.random.shuffle(indices)\n",
+    "split = int(np.floor(valid_size * num_train))\n",
+    "train_idx, valid_idx = indices[split:], indices[:split]\n",
+    "\n",
+    "# define samplers for obtaining training and validation batches\n",
+    "train_sampler = SubsetRandomSampler(train_idx)\n",
+    "valid_sampler = SubsetRandomSampler(valid_idx)\n",
+    "\n",
+    "# prepare data loaders (combine dataset and sampler)\n",
+    "train_loader = torch.utils.data.DataLoader(\n",
+    "    train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers\n",
+    ")\n",
+    "valid_loader = torch.utils.data.DataLoader(\n",
+    "    train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers\n",
+    ")\n",
+    "test_loader = torch.utils.data.DataLoader(\n",
+    "    test_data, batch_size=batch_size, num_workers=num_workers\n",
+    ")\n",
+    "\n",
+    "# specify the image classes\n",
+    "classes = [\n",
+    "    \"airplane\",\n",
+    "    \"automobile\",\n",
+    "    \"bird\",\n",
+    "    \"cat\",\n",
+    "    \"deer\",\n",
+    "    \"dog\",\n",
+    "    \"frog\",\n",
+    "    \"horse\",\n",
+    "    \"ship\",\n",
+    "    \"truck\",\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "58ec3903",
+   "metadata": {},
+   "source": [
+    "CNN definition (this one is an example)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "317bf070",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch.nn as nn\n",
+    "import torch.nn.functional as F\n",
+    "\n",
+    "# define the CNN architecture\n",
+    "\n",
+    "\n",
+    "class Net(nn.Module):\n",
+    "    def __init__(self):\n",
+    "        super(Net, self).__init__()\n",
+    "        self.conv1 = nn.Conv2d(3, 6, 5)\n",
+    "        self.pool = nn.MaxPool2d(2, 2)\n",
+    "        self.conv2 = nn.Conv2d(6, 16, 5)\n",
+    "        self.fc1 = nn.Linear(16 * 5 * 5, 120)\n",
+    "        self.fc2 = nn.Linear(120, 84)\n",
+    "        self.fc3 = nn.Linear(84, 10)\n",
+    "\n",
+    "    def forward(self, x):\n",
+    "        x = self.pool(F.relu(self.conv1(x)))\n",
+    "        x = self.pool(F.relu(self.conv2(x)))\n",
+    "        x = x.view(-1, 16 * 5 * 5)\n",
+    "        x = F.relu(self.fc1(x))\n",
+    "        x = F.relu(self.fc2(x))\n",
+    "        x = self.fc3(x)\n",
+    "        return x\n",
+    "\n",
+    "\n",
+    "# create a complete CNN\n",
+    "model = Net()\n",
+    "print(model)\n",
+    "# move tensors to GPU if CUDA is available\n",
+    "if train_on_gpu:\n",
+    "    model.cuda()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a2dc4974",
+   "metadata": {},
+   "source": [
+    "Loss function and training using SGD (Stochastic Gradient Descent) optimizer"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4b53f229",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch.optim as optim\n",
+    "\n",
+    "criterion = nn.CrossEntropyLoss()  # specify loss function\n",
+    "optimizer = optim.SGD(model.parameters(), lr=0.01)  # specify optimizer\n",
+    "\n",
+    "n_epochs = 30  # number of epochs to train the model\n",
+    "train_loss_list = []  # list to store loss to visualize\n",
+    "valid_loss_min = np.Inf  # track change in validation loss\n",
+    "\n",
+    "for epoch in range(n_epochs):\n",
+    "    # Keep track of training and validation loss\n",
+    "    train_loss = 0.0\n",
+    "    valid_loss = 0.0\n",
+    "\n",
+    "    # Train the model\n",
+    "    model.train()\n",
+    "    for data, target in train_loader:\n",
+    "        # Move tensors to GPU if CUDA is available\n",
+    "        if train_on_gpu:\n",
+    "            data, target = data.cuda(), target.cuda()\n",
+    "        # Clear the gradients of all optimized variables\n",
+    "        optimizer.zero_grad()\n",
+    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
+    "        output = model(data)\n",
+    "        # Calculate the batch loss\n",
+    "        loss = criterion(output, target)\n",
+    "        # Backward pass: compute gradient of the loss with respect to model parameters\n",
+    "        loss.backward()\n",
+    "        # Perform a single optimization step (parameter update)\n",
+    "        optimizer.step()\n",
+    "        # Update training loss\n",
+    "        train_loss += loss.item() * data.size(0)\n",
+    "\n",
+    "    # Validate the model\n",
+    "    model.eval()\n",
+    "    for data, target in valid_loader:\n",
+    "        # Move tensors to GPU if CUDA is available\n",
+    "        if train_on_gpu:\n",
+    "            data, target = data.cuda(), target.cuda()\n",
+    "        # Forward pass: compute predicted outputs by passing inputs to the model\n",
+    "        output = model(data)\n",
+    "        # Calculate the batch loss\n",
+    "        loss = criterion(output, target)\n",
+    "        # Update average validation loss\n",
+    "        valid_loss += loss.item() * data.size(0)\n",
+    "\n",
+    "    # Calculate average losses\n",
+    "    train_loss = train_loss / len(train_loader)\n",
+    "    valid_loss = valid_loss / len(valid_loader)\n",
+    "    train_loss_list.append(train_loss)\n",
+    "\n",
+    "    # Print training/validation statistics\n",
+    "    print(\n",
+    "        \"Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}\".format(\n",
+    "            epoch, train_loss, valid_loss\n",
+    "        )\n",
+    "    )\n",
+    "\n",
+    "    # Save model if validation loss has decreased\n",
+    "    if valid_loss <= valid_loss_min:\n",
+    "        print(\n",
+    "            \"Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...\".format(\n",
+    "                valid_loss_min, valid_loss\n",
+    "            )\n",
+    "        )\n",
+    "        torch.save(model.state_dict(), \"model_cifar.pt\")\n",
+    "        valid_loss_min = valid_loss"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "13e1df74",
+   "metadata": {},
+   "source": [
+    "Does overfit occur? If so, do an early stopping."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d39df818",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "plt.plot(range((len(train_loss_list))), train_loss_list)\n",
+    "plt.xlabel(\"Epoch\")\n",
+    "plt.ylabel(\"Loss\")\n",
+    "plt.title(\"Performance of Model 1\")\n",
+    "plt.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "11df8fd4",
+   "metadata": {},
+   "source": [
+    "Now loading the model with the lowest validation loss value\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e93efdfc",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "model.load_state_dict(torch.load(\"./model_cifar.pt\"))\n",
+    "\n",
+    "# track test loss\n",
+    "test_loss = 0.0\n",
+    "class_correct = list(0.0 for i in range(10))\n",
+    "class_total = list(0.0 for i in range(10))\n",
+    "\n",
+    "model.eval()\n",
+    "# iterate over test data\n",
+    "for data, target in test_loader:\n",
+    "    # move tensors to GPU if CUDA is available\n",
+    "    if train_on_gpu:\n",
+    "        data, target = data.cuda(), target.cuda()\n",
+    "    # forward pass: compute predicted outputs by passing inputs to the model\n",
+    "    output = model(data)\n",
+    "    # calculate the batch loss\n",
+    "    loss = criterion(output, target)\n",
+    "    # update test loss\n",
+    "    test_loss += loss.item() * data.size(0)\n",
+    "    # convert output probabilities to predicted class\n",
+    "    _, pred = torch.max(output, 1)\n",
+    "    # compare predictions to true label\n",
+    "    correct_tensor = pred.eq(target.data.view_as(pred))\n",
+    "    correct = (\n",
+    "        np.squeeze(correct_tensor.numpy())\n",
+    "        if not train_on_gpu\n",
+    "        else np.squeeze(correct_tensor.cpu().numpy())\n",
+    "    )\n",
+    "    # calculate test accuracy for each object class\n",
+    "    for i in range(batch_size):\n",
+    "        label = target.data[i]\n",
+    "        class_correct[label] += correct[i].item()\n",
+    "        class_total[label] += 1\n",
+    "\n",
+    "# average test loss\n",
+    "test_loss = test_loss / len(test_loader)\n",
+    "print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n",
+    "\n",
+    "for i in range(10):\n",
+    "    if class_total[i] > 0:\n",
+    "        print(\n",
+    "            \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n",
+    "            % (\n",
+    "                classes[i],\n",
+    "                100 * class_correct[i] / class_total[i],\n",
+    "                np.sum(class_correct[i]),\n",
+    "                np.sum(class_total[i]),\n",
+    "            )\n",
+    "        )\n",
+    "    else:\n",
+    "        print(\"Test Accuracy of %5s: N/A (no training examples)\" % (classes[i]))\n",
+    "\n",
+    "print(\n",
+    "    \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n",
+    "    % (\n",
+    "        100.0 * np.sum(class_correct) / np.sum(class_total),\n",
+    "        np.sum(class_correct),\n",
+    "        np.sum(class_total),\n",
+    "    )\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "944991a2",
+   "metadata": {},
+   "source": [
+    "### Experiments:\n",
+    "\n",
+    "Build a new network with the following structure.\n",
+    "\n",
+    "- It has 3 convolutional layers of kernel size 3 and padding of 1.\n",
+    "- The first convolutional layer must output 16 channels, the second 32 and the third 64.\n",
+    "- At each convolutional layer output, we apply a ReLU activation then a MaxPool with kernel size of 2.\n",
+    "- Then, three fully connected layers, the first two being followed by a ReLU activation.\n",
+    "- The first fully connected layer will have an output size of 512.\n",
+    "- The second fully connected layer will have an output size of 64.\n",
+    "\n",
+    "Compare the results obtained with this new network to those obtained previously."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "201470f9",
+   "metadata": {},
+   "source": [
+    "## Sequence 2: Working with pre-trained models.\n",
+    "\n",
+    "PyTorch offers several pre-trained models https://pytorch.org/vision/0.8/models.html        \n",
+    "We will use ResNet50 trained on ImageNet dataset (https://www.image-net.org/index.php). Use the following code with the files `imagenet-simple-labels.json` that contains the imagenet labels and the image dog.png that we will use as test.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b4d13080",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import json\n",
+    "from PIL import Image\n",
+    "from torchvision import models\n",
+    "\n",
+    "# Choose an image to pass through the model\n",
+    "test_image = \"dog.png\"\n",
+    "\n",
+    "# Configure matplotlib for pretty inline plots\n",
+    "#%matplotlib inline\n",
+    "#%config InlineBackend.figure_format = 'retina'\n",
+    "\n",
+    "# Prepare the labels\n",
+    "with open(\"imagenet-simple-labels.json\") as f:\n",
+    "    labels = json.load(f)\n",
+    "\n",
+    "# First prepare the transformations: resize the image to what the model was trained on and convert it to a tensor\n",
+    "data_transform = transforms.Compose(\n",
+    "    [\n",
+    "        transforms.Resize((224, 224)),\n",
+    "        transforms.ToTensor(),\n",
+    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
+    "    ]\n",
+    ")\n",
+    "# Load the image\n",
+    "\n",
+    "image = Image.open(test_image)\n",
+    "plt.imshow(image), plt.xticks([]), plt.yticks([])\n",
+    "\n",
+    "# Now apply the transformation, expand the batch dimension, and send the image to the GPU\n",
+    "# image = data_transform(image).unsqueeze(0).cuda()\n",
+    "image = data_transform(image).unsqueeze(0)\n",
+    "\n",
+    "# Download the model if it's not there already. It will take a bit on the first run, after that it's fast\n",
+    "model = models.resnet50(pretrained=True)\n",
+    "# Send the model to the GPU\n",
+    "# model.cuda()\n",
+    "# Set layers such as dropout and batchnorm in evaluation mode\n",
+    "model.eval()\n",
+    "\n",
+    "# Get the 1000-dimensional model output\n",
+    "out = model(image)\n",
+    "# Find the predicted class\n",
+    "print(\"Predicted class is: {}\".format(labels[out.argmax()]))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "184cfceb",
+   "metadata": {},
+   "source": [
+    "### Experiments:\n",
+    "\n",
+    "Study the code and the results obtained. Possibly add other images downloaded from the internet.\n",
+    "\n",
+    "Experiment with other pre-trained CNN models.\n",
+    "\n",
+    "    \n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5d57da4b",
+   "metadata": {},
+   "source": [
+    "## Sequence 3: Transfer Learning\n",
+    "    \n",
+    "    \n",
+    "For this work, we will use a pre-trained model (ResNet18) as a descriptor extractor and will refine the classification by training only the last fully connected layer of the network. Thus, the output layer of the pre-trained network will be replaced by a layer adapted to the new classes to be recognized which will be in our case ants and bees.\n",
+    "Download and unzip in your working directory the dataset available at the address :\n",
+    "    \n",
+    "https://download.pytorch.org/tutorial/hymenoptera_data.zip\n",
+    "    \n",
+    "Execute the following code in order to display some images of the dataset."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "be2d31f5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import torch\n",
+    "import torchvision\n",
+    "from torchvision import datasets, transforms\n",
+    "import torch.nn as nn\n",
+    "import torch.optim as optim\n",
+    "from torch.optim import lr_scheduler\n",
+    "\n",
+    "# Data augmentation and normalization for training\n",
+    "# Just normalization for validation\n",
+    "data_transforms = {\n",
+    "    \"train\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.RandomResizedCrop(\n",
+    "                224\n",
+    "            ),  # ImageNet models were trained on 224x224 images\n",
+    "            transforms.RandomHorizontalFlip(),  # flip horizontally 50% of the time - increases train set variability\n",
+    "            transforms.ToTensor(),  # convert it to a PyTorch tensor\n",
+    "            transforms.Normalize(\n",
+    "                [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n",
+    "            ),  # ImageNet models expect this norm\n",
+    "        ]\n",
+    "    ),\n",
+    "    \"val\": transforms.Compose(\n",
+    "        [\n",
+    "            transforms.Resize(256),\n",
+    "            transforms.CenterCrop(224),\n",
+    "            transforms.ToTensor(),\n",
+    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
+    "        ]\n",
+    "    ),\n",
+    "}\n",
+    "\n",
+    "data_dir = \"hymenoptera_data\"\n",
+    "# Create train and validation datasets and loaders\n",
+    "image_datasets = {\n",
+    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataloaders = {\n",
+    "    x: torch.utils.data.DataLoader(\n",
+    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=0\n",
+    "    )\n",
+    "    for x in [\"train\", \"val\"]\n",
+    "}\n",
+    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n",
+    "class_names = image_datasets[\"train\"].classes\n",
+    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
+    "\n",
+    "# Helper function for displaying images\n",
+    "def imshow(inp, title=None):\n",
+    "    \"\"\"Imshow for Tensor.\"\"\"\n",
+    "    inp = inp.numpy().transpose((1, 2, 0))\n",
+    "    mean = np.array([0.485, 0.456, 0.406])\n",
+    "    std = np.array([0.229, 0.224, 0.225])\n",
+    "\n",
+    "    # Un-normalize the images\n",
+    "    inp = std * inp + mean\n",
+    "    # Clip just in case\n",
+    "    inp = np.clip(inp, 0, 1)\n",
+    "    plt.imshow(inp)\n",
+    "    if title is not None:\n",
+    "        plt.title(title)\n",
+    "    plt.pause(0.001)  # pause a bit so that plots are updated\n",
+    "    plt.show()\n",
+    "\n",
+    "\n",
+    "# Get a batch of training data\n",
+    "inputs, classes = next(iter(dataloaders[\"train\"]))\n",
+    "\n",
+    "# Make a grid from batch\n",
+    "out = torchvision.utils.make_grid(inputs)\n",
+    "\n",
+    "imshow(out, title=[class_names[x] for x in classes])\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "bbd48800",
+   "metadata": {},
+   "source": [
+    "Now, execute the following code which uses a pre-trained model ResNet18 having replaced the output layer for the ants/bees classification and performs the model training by only changing the weights of this output layer."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "572d824c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import copy\n",
+    "import time\n",
+    "\n",
+    "\n",
+    "def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n",
+    "    since = time.time()\n",
+    "\n",
+    "    best_model_wts = copy.deepcopy(model.state_dict())\n",
+    "    best_acc = 0.0\n",
+    "\n",
+    "    epoch_time = []  # we'll keep track of the time needed for each epoch\n",
+    "\n",
+    "    for epoch in range(num_epochs):\n",
+    "        epoch_start = time.time()\n",
+    "        print(\"Epoch {}/{}\".format(epoch + 1, num_epochs))\n",
+    "        print(\"-\" * 10)\n",
+    "\n",
+    "        # Each epoch has a training and validation phase\n",
+    "        for phase in [\"train\", \"val\"]:\n",
+    "            if phase == \"train\":\n",
+    "                scheduler.step()\n",
+    "                model.train()  # Set model to training mode\n",
+    "            else:\n",
+    "                model.eval()  # Set model to evaluate mode\n",
+    "\n",
+    "            running_loss = 0.0\n",
+    "            running_corrects = 0\n",
+    "\n",
+    "            # Iterate over data.\n",
+    "            for inputs, labels in dataloaders[phase]:\n",
+    "                inputs = inputs.to(device)\n",
+    "                labels = labels.to(device)\n",
+    "\n",
+    "                # zero the parameter gradients\n",
+    "                optimizer.zero_grad()\n",
+    "\n",
+    "                # Forward\n",
+    "                # Track history if only in training phase\n",
+    "                with torch.set_grad_enabled(phase == \"train\"):\n",
+    "                    outputs = model(inputs)\n",
+    "                    _, preds = torch.max(outputs, 1)\n",
+    "                    loss = criterion(outputs, labels)\n",
+    "\n",
+    "                    # backward + optimize only if in training phase\n",
+    "                    if phase == \"train\":\n",
+    "                        loss.backward()\n",
+    "                        optimizer.step()\n",
+    "\n",
+    "                # Statistics\n",
+    "                running_loss += loss.item() * inputs.size(0)\n",
+    "                running_corrects += torch.sum(preds == labels.data)\n",
+    "\n",
+    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
+    "            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n",
+    "\n",
+    "            print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n",
+    "\n",
+    "            # Deep copy the model\n",
+    "            if phase == \"val\" and epoch_acc > best_acc:\n",
+    "                best_acc = epoch_acc\n",
+    "                best_model_wts = copy.deepcopy(model.state_dict())\n",
+    "\n",
+    "        # Add the epoch time\n",
+    "        t_epoch = time.time() - epoch_start\n",
+    "        epoch_time.append(t_epoch)\n",
+    "        print()\n",
+    "\n",
+    "    time_elapsed = time.time() - since\n",
+    "    print(\n",
+    "        \"Training complete in {:.0f}m {:.0f}s\".format(\n",
+    "            time_elapsed // 60, time_elapsed % 60\n",
+    "        )\n",
+    "    )\n",
+    "    print(\"Best val Acc: {:4f}\".format(best_acc))\n",
+    "\n",
+    "    # Load best model weights\n",
+    "    model.load_state_dict(best_model_wts)\n",
+    "    return model, epoch_time\n",
+    "\n",
+    "\n",
+    "# Download a pre-trained ResNet18 model and freeze its weights\n",
+    "model = torchvision.models.resnet18(pretrained=True)\n",
+    "for param in model.parameters():\n",
+    "    param.requires_grad = False\n",
+    "\n",
+    "# Replace the final fully connected layer\n",
+    "# Parameters of newly constructed modules have requires_grad=True by default\n",
+    "num_ftrs = model.fc.in_features\n",
+    "model.fc = nn.Linear(num_ftrs, 2)\n",
+    "# Send the model to the GPU\n",
+    "model = model.to(device)\n",
+    "# Set the loss function\n",
+    "criterion = nn.CrossEntropyLoss()\n",
+    "\n",
+    "# Observe that only the parameters of the final layer are being optimized\n",
+    "optimizer_conv = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)\n",
+    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
+    "model, epoch_time = train_model(\n",
+    "    model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10\n",
+    ")\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "bbd48800",
+   "metadata": {},
+   "source": [
+    "### Experiments:\n",
+    "\n",
+    "Study the code and the results obtained.\n",
+    "\n",
+    "Modify the code and add an \"eval_model\" function to allow\n",
+    "the evaluation of the model on a test set (different from the learning and validation sets used during the learning phase). Study the results obtained.\n",
+    "\n",
+    "Now modify the code to replace the current classification layer with a set of two layers using a \"relu\" activation function for the middle layer. Renew the experiments and study the results obtained.\n",
+    "\n",
+    "Experiment with other models and datasets."
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "base",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.20"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/Practical_sessions/Session_4/dog.png b/Practical_sessions/Session_4/dog.png
new file mode 100644
index 0000000000000000000000000000000000000000..786dc161c6d8981020d04d7a7082ab1d4fcb1bef
Binary files /dev/null and b/Practical_sessions/Session_4/dog.png differ
diff --git a/Practical_sessions/Session_4/imagenet-simple-labels.json b/Practical_sessions/Session_4/imagenet-simple-labels.json
new file mode 100644
index 0000000000000000000000000000000000000000..4528298045cba5f1f6cded2f2f6b2da4b4775d4b
--- /dev/null
+++ b/Practical_sessions/Session_4/imagenet-simple-labels.json
@@ -0,0 +1,1000 @@
+["tench",
+"goldfish",
+"great white shark",
+"tiger shark",
+"hammerhead shark",
+"electric ray",
+"stingray",
+"cock",
+"hen",
+"ostrich",
+"brambling",
+"goldfinch",
+"house finch",
+"junco",
+"indigo bunting",
+"American robin",
+"bulbul",
+"jay",
+"magpie",
+"chickadee",
+"American dipper",
+"kite",
+"bald eagle",
+"vulture",
+"great grey owl",
+"fire salamander",
+"smooth newt",
+"newt",
+"spotted salamander",
+"axolotl",
+"American bullfrog",
+"tree frog",
+"tailed frog",
+"loggerhead sea turtle",
+"leatherback sea turtle",
+"mud turtle",
+"terrapin",
+"box turtle",
+"banded gecko",
+"green iguana",
+"Carolina anole",
+"desert grassland whiptail lizard",
+"agama",
+"frilled-necked lizard",
+"alligator lizard",
+"Gila monster",
+"European green lizard",
+"chameleon",
+"Komodo dragon",
+"Nile crocodile",
+"American alligator",
+"triceratops",
+"worm snake",
+"ring-necked snake",
+"eastern hog-nosed snake",
+"smooth green snake",
+"kingsnake",
+"garter snake",
+"water snake",
+"vine snake",
+"night snake",
+"boa constrictor",
+"African rock python",
+"Indian cobra",
+"green mamba",
+"sea snake",
+"Saharan horned viper",
+"eastern diamondback rattlesnake",
+"sidewinder",
+"trilobite",
+"harvestman",
+"scorpion",
+"yellow garden spider",
+"barn spider",
+"European garden spider",
+"southern black widow",
+"tarantula",
+"wolf spider",
+"tick",
+"centipede",
+"black grouse",
+"ptarmigan",
+"ruffed grouse",
+"prairie grouse",
+"peacock",
+"quail",
+"partridge",
+"grey parrot",
+"macaw",
+"sulphur-crested cockatoo",
+"lorikeet",
+"coucal",
+"bee eater",
+"hornbill",
+"hummingbird",
+"jacamar",
+"toucan",
+"duck",
+"red-breasted merganser",
+"goose",
+"black swan",
+"tusker",
+"echidna",
+"platypus",
+"wallaby",
+"koala",
+"wombat",
+"jellyfish",
+"sea anemone",
+"brain coral",
+"flatworm",
+"nematode",
+"conch",
+"snail",
+"slug",
+"sea slug",
+"chiton",
+"chambered nautilus",
+"Dungeness crab",
+"rock crab",
+"fiddler crab",
+"red king crab",
+"American lobster",
+"spiny lobster",
+"crayfish",
+"hermit crab",
+"isopod",
+"white stork",
+"black stork",
+"spoonbill",
+"flamingo",
+"little blue heron",
+"great egret",
+"bittern",
+"crane",
+"limpkin",
+"common gallinule",
+"American coot",
+"bustard",
+"ruddy turnstone",
+"dunlin",
+"common redshank",
+"dowitcher",
+"oystercatcher",
+"pelican",
+"king penguin",
+"albatross",
+"grey whale",
+"killer whale",
+"dugong",
+"sea lion",
+"Chihuahua",
+"Japanese Chin",
+"Maltese",
+"Pekingese",
+"Shih Tzu",
+"King Charles Spaniel",
+"Papillon",
+"toy terrier",
+"Rhodesian Ridgeback",
+"Afghan Hound",
+"Basset Hound",
+"Beagle",
+"Bloodhound",
+"Bluetick Coonhound",
+"Black and Tan Coonhound",
+"Treeing Walker Coonhound",
+"English foxhound",
+"Redbone Coonhound",
+"borzoi",
+"Irish Wolfhound",
+"Italian Greyhound",
+"Whippet",
+"Ibizan Hound",
+"Norwegian Elkhound",
+"Otterhound",
+"Saluki",
+"Scottish Deerhound",
+"Weimaraner",
+"Staffordshire Bull Terrier",
+"American Staffordshire Terrier",
+"Bedlington Terrier",
+"Border Terrier",
+"Kerry Blue Terrier",
+"Irish Terrier",
+"Norfolk Terrier",
+"Norwich Terrier",
+"Yorkshire Terrier",
+"Wire Fox Terrier",
+"Lakeland Terrier",
+"Sealyham Terrier",
+"Airedale Terrier",
+"Cairn Terrier",
+"Australian Terrier",
+"Dandie Dinmont Terrier",
+"Boston Terrier",
+"Miniature Schnauzer",
+"Giant Schnauzer",
+"Standard Schnauzer",
+"Scottish Terrier",
+"Tibetan Terrier",
+"Australian Silky Terrier",
+"Soft-coated Wheaten Terrier",
+"West Highland White Terrier",
+"Lhasa Apso",
+"Flat-Coated Retriever",
+"Curly-coated Retriever",
+"Golden Retriever",
+"Labrador Retriever",
+"Chesapeake Bay Retriever",
+"German Shorthaired Pointer",
+"Vizsla",
+"English Setter",
+"Irish Setter",
+"Gordon Setter",
+"Brittany",
+"Clumber Spaniel",
+"English Springer Spaniel",
+"Welsh Springer Spaniel",
+"Cocker Spaniels",
+"Sussex Spaniel",
+"Irish Water Spaniel",
+"Kuvasz",
+"Schipperke",
+"Groenendael",
+"Malinois",
+"Briard",
+"Australian Kelpie",
+"Komondor",
+"Old English Sheepdog",
+"Shetland Sheepdog",
+"collie",
+"Border Collie",
+"Bouvier des Flandres",
+"Rottweiler",
+"German Shepherd Dog",
+"Dobermann",
+"Miniature Pinscher",
+"Greater Swiss Mountain Dog",
+"Bernese Mountain Dog",
+"Appenzeller Sennenhund",
+"Entlebucher Sennenhund",
+"Boxer",
+"Bullmastiff",
+"Tibetan Mastiff",
+"French Bulldog",
+"Great Dane",
+"St. Bernard",
+"husky",
+"Alaskan Malamute",
+"Siberian Husky",
+"Dalmatian",
+"Affenpinscher",
+"Basenji",
+"pug",
+"Leonberger",
+"Newfoundland",
+"Pyrenean Mountain Dog",
+"Samoyed",
+"Pomeranian",
+"Chow Chow",
+"Keeshond",
+"Griffon Bruxellois",
+"Pembroke Welsh Corgi",
+"Cardigan Welsh Corgi",
+"Toy Poodle",
+"Miniature Poodle",
+"Standard Poodle",
+"Mexican hairless dog",
+"grey wolf",
+"Alaskan tundra wolf",
+"red wolf",
+"coyote",
+"dingo",
+"dhole",
+"African wild dog",
+"hyena",
+"red fox",
+"kit fox",
+"Arctic fox",
+"grey fox",
+"tabby cat",
+"tiger cat",
+"Persian cat",
+"Siamese cat",
+"Egyptian Mau",
+"cougar",
+"lynx",
+"leopard",
+"snow leopard",
+"jaguar",
+"lion",
+"tiger",
+"cheetah",
+"brown bear",
+"American black bear",
+"polar bear",
+"sloth bear",
+"mongoose",
+"meerkat",
+"tiger beetle",
+"ladybug",
+"ground beetle",
+"longhorn beetle",
+"leaf beetle",
+"dung beetle",
+"rhinoceros beetle",
+"weevil",
+"fly",
+"bee",
+"ant",
+"grasshopper",
+"cricket",
+"stick insect",
+"cockroach",
+"mantis",
+"cicada",
+"leafhopper",
+"lacewing",
+"dragonfly",
+"damselfly",
+"red admiral",
+"ringlet",
+"monarch butterfly",
+"small white",
+"sulphur butterfly",
+"gossamer-winged butterfly",
+"starfish",
+"sea urchin",
+"sea cucumber",
+"cottontail rabbit",
+"hare",
+"Angora rabbit",
+"hamster",
+"porcupine",
+"fox squirrel",
+"marmot",
+"beaver",
+"guinea pig",
+"common sorrel",
+"zebra",
+"pig",
+"wild boar",
+"warthog",
+"hippopotamus",
+"ox",
+"water buffalo",
+"bison",
+"ram",
+"bighorn sheep",
+"Alpine ibex",
+"hartebeest",
+"impala",
+"gazelle",
+"dromedary",
+"llama",
+"weasel",
+"mink",
+"European polecat",
+"black-footed ferret",
+"otter",
+"skunk",
+"badger",
+"armadillo",
+"three-toed sloth",
+"orangutan",
+"gorilla",
+"chimpanzee",
+"gibbon",
+"siamang",
+"guenon",
+"patas monkey",
+"baboon",
+"macaque",
+"langur",
+"black-and-white colobus",
+"proboscis monkey",
+"marmoset",
+"white-headed capuchin",
+"howler monkey",
+"titi",
+"Geoffroy's spider monkey",
+"common squirrel monkey",
+"ring-tailed lemur",
+"indri",
+"Asian elephant",
+"African bush elephant",
+"red panda",
+"giant panda",
+"snoek",
+"eel",
+"coho salmon",
+"rock beauty",
+"clownfish",
+"sturgeon",
+"garfish",
+"lionfish",
+"pufferfish",
+"abacus",
+"abaya",
+"academic gown",
+"accordion",
+"acoustic guitar",
+"aircraft carrier",
+"airliner",
+"airship",
+"altar",
+"ambulance",
+"amphibious vehicle",
+"analog clock",
+"apiary",
+"apron",
+"waste container",
+"assault rifle",
+"backpack",
+"bakery",
+"balance beam",
+"balloon",
+"ballpoint pen",
+"Band-Aid",
+"banjo",
+"baluster",
+"barbell",
+"barber chair",
+"barbershop",
+"barn",
+"barometer",
+"barrel",
+"wheelbarrow",
+"baseball",
+"basketball",
+"bassinet",
+"bassoon",
+"swimming cap",
+"bath towel",
+"bathtub",
+"station wagon",
+"lighthouse",
+"beaker",
+"military cap",
+"beer bottle",
+"beer glass",
+"bell-cot",
+"bib",
+"tandem bicycle",
+"bikini",
+"ring binder",
+"binoculars",
+"birdhouse",
+"boathouse",
+"bobsleigh",
+"bolo tie",
+"poke bonnet",
+"bookcase",
+"bookstore",
+"bottle cap",
+"bow",
+"bow tie",
+"brass",
+"bra",
+"breakwater",
+"breastplate",
+"broom",
+"bucket",
+"buckle",
+"bulletproof vest",
+"high-speed train",
+"butcher shop",
+"taxicab",
+"cauldron",
+"candle",
+"cannon",
+"canoe",
+"can opener",
+"cardigan",
+"car mirror",
+"carousel",
+"tool kit",
+"carton",
+"car wheel",
+"automated teller machine",
+"cassette",
+"cassette player",
+"castle",
+"catamaran",
+"CD player",
+"cello",
+"mobile phone",
+"chain",
+"chain-link fence",
+"chain mail",
+"chainsaw",
+"chest",
+"chiffonier",
+"chime",
+"china cabinet",
+"Christmas stocking",
+"church",
+"movie theater",
+"cleaver",
+"cliff dwelling",
+"cloak",
+"clogs",
+"cocktail shaker",
+"coffee mug",
+"coffeemaker",
+"coil",
+"combination lock",
+"computer keyboard",
+"confectionery store",
+"container ship",
+"convertible",
+"corkscrew",
+"cornet",
+"cowboy boot",
+"cowboy hat",
+"cradle",
+"crane",
+"crash helmet",
+"crate",
+"infant bed",
+"Crock Pot",
+"croquet ball",
+"crutch",
+"cuirass",
+"dam",
+"desk",
+"desktop computer",
+"rotary dial telephone",
+"diaper",
+"digital clock",
+"digital watch",
+"dining table",
+"dishcloth",
+"dishwasher",
+"disc brake",
+"dock",
+"dog sled",
+"dome",
+"doormat",
+"drilling rig",
+"drum",
+"drumstick",
+"dumbbell",
+"Dutch oven",
+"electric fan",
+"electric guitar",
+"electric locomotive",
+"entertainment center",
+"envelope",
+"espresso machine",
+"face powder",
+"feather boa",
+"filing cabinet",
+"fireboat",
+"fire engine",
+"fire screen sheet",
+"flagpole",
+"flute",
+"folding chair",
+"football helmet",
+"forklift",
+"fountain",
+"fountain pen",
+"four-poster bed",
+"freight car",
+"French horn",
+"frying pan",
+"fur coat",
+"garbage truck",
+"gas mask",
+"gas pump",
+"goblet",
+"go-kart",
+"golf ball",
+"golf cart",
+"gondola",
+"gong",
+"gown",
+"grand piano",
+"greenhouse",
+"grille",
+"grocery store",
+"guillotine",
+"barrette",
+"hair spray",
+"half-track",
+"hammer",
+"hamper",
+"hair dryer",
+"hand-held computer",
+"handkerchief",
+"hard disk drive",
+"harmonica",
+"harp",
+"harvester",
+"hatchet",
+"holster",
+"home theater",
+"honeycomb",
+"hook",
+"hoop skirt",
+"horizontal bar",
+"horse-drawn vehicle",
+"hourglass",
+"iPod",
+"clothes iron",
+"jack-o'-lantern",
+"jeans",
+"jeep",
+"T-shirt",
+"jigsaw puzzle",
+"pulled rickshaw",
+"joystick",
+"kimono",
+"knee pad",
+"knot",
+"lab coat",
+"ladle",
+"lampshade",
+"laptop computer",
+"lawn mower",
+"lens cap",
+"paper knife",
+"library",
+"lifeboat",
+"lighter",
+"limousine",
+"ocean liner",
+"lipstick",
+"slip-on shoe",
+"lotion",
+"speaker",
+"loupe",
+"sawmill",
+"magnetic compass",
+"mail bag",
+"mailbox",
+"tights",
+"tank suit",
+"manhole cover",
+"maraca",
+"marimba",
+"mask",
+"match",
+"maypole",
+"maze",
+"measuring cup",
+"medicine chest",
+"megalith",
+"microphone",
+"microwave oven",
+"military uniform",
+"milk can",
+"minibus",
+"miniskirt",
+"minivan",
+"missile",
+"mitten",
+"mixing bowl",
+"mobile home",
+"Model T",
+"modem",
+"monastery",
+"monitor",
+"moped",
+"mortar",
+"square academic cap",
+"mosque",
+"mosquito net",
+"scooter",
+"mountain bike",
+"tent",
+"computer mouse",
+"mousetrap",
+"moving van",
+"muzzle",
+"nail",
+"neck brace",
+"necklace",
+"nipple",
+"notebook computer",
+"obelisk",
+"oboe",
+"ocarina",
+"odometer",
+"oil filter",
+"organ",
+"oscilloscope",
+"overskirt",
+"bullock cart",
+"oxygen mask",
+"packet",
+"paddle",
+"paddle wheel",
+"padlock",
+"paintbrush",
+"pajamas",
+"palace",
+"pan flute",
+"paper towel",
+"parachute",
+"parallel bars",
+"park bench",
+"parking meter",
+"passenger car",
+"patio",
+"payphone",
+"pedestal",
+"pencil case",
+"pencil sharpener",
+"perfume",
+"Petri dish",
+"photocopier",
+"plectrum",
+"Pickelhaube",
+"picket fence",
+"pickup truck",
+"pier",
+"piggy bank",
+"pill bottle",
+"pillow",
+"ping-pong ball",
+"pinwheel",
+"pirate ship",
+"pitcher",
+"hand plane",
+"planetarium",
+"plastic bag",
+"plate rack",
+"plow",
+"plunger",
+"Polaroid camera",
+"pole",
+"police van",
+"poncho",
+"billiard table",
+"soda bottle",
+"pot",
+"potter's wheel",
+"power drill",
+"prayer rug",
+"printer",
+"prison",
+"projectile",
+"projector",
+"hockey puck",
+"punching bag",
+"purse",
+"quill",
+"quilt",
+"race car",
+"racket",
+"radiator",
+"radio",
+"radio telescope",
+"rain barrel",
+"recreational vehicle",
+"reel",
+"reflex camera",
+"refrigerator",
+"remote control",
+"restaurant",
+"revolver",
+"rifle",
+"rocking chair",
+"rotisserie",
+"eraser",
+"rugby ball",
+"ruler",
+"running shoe",
+"safe",
+"safety pin",
+"salt shaker",
+"sandal",
+"sarong",
+"saxophone",
+"scabbard",
+"weighing scale",
+"school bus",
+"schooner",
+"scoreboard",
+"CRT screen",
+"screw",
+"screwdriver",
+"seat belt",
+"sewing machine",
+"shield",
+"shoe store",
+"shoji",
+"shopping basket",
+"shopping cart",
+"shovel",
+"shower cap",
+"shower curtain",
+"ski",
+"ski mask",
+"sleeping bag",
+"slide rule",
+"sliding door",
+"slot machine",
+"snorkel",
+"snowmobile",
+"snowplow",
+"soap dispenser",
+"soccer ball",
+"sock",
+"solar thermal collector",
+"sombrero",
+"soup bowl",
+"space bar",
+"space heater",
+"space shuttle",
+"spatula",
+"motorboat",
+"spider web",
+"spindle",
+"sports car",
+"spotlight",
+"stage",
+"steam locomotive",
+"through arch bridge",
+"steel drum",
+"stethoscope",
+"scarf",
+"stone wall",
+"stopwatch",
+"stove",
+"strainer",
+"tram",
+"stretcher",
+"couch",
+"stupa",
+"submarine",
+"suit",
+"sundial",
+"sunglass",
+"sunglasses",
+"sunscreen",
+"suspension bridge",
+"mop",
+"sweatshirt",
+"swimsuit",
+"swing",
+"switch",
+"syringe",
+"table lamp",
+"tank",
+"tape player",
+"teapot",
+"teddy bear",
+"television",
+"tennis ball",
+"thatched roof",
+"front curtain",
+"thimble",
+"threshing machine",
+"throne",
+"tile roof",
+"toaster",
+"tobacco shop",
+"toilet seat",
+"torch",
+"totem pole",
+"tow truck",
+"toy store",
+"tractor",
+"semi-trailer truck",
+"tray",
+"trench coat",
+"tricycle",
+"trimaran",
+"tripod",
+"triumphal arch",
+"trolleybus",
+"trombone",
+"tub",
+"turnstile",
+"typewriter keyboard",
+"umbrella",
+"unicycle",
+"upright piano",
+"vacuum cleaner",
+"vase",
+"vault",
+"velvet",
+"vending machine",
+"vestment",
+"viaduct",
+"violin",
+"volleyball",
+"waffle iron",
+"wall clock",
+"wallet",
+"wardrobe",
+"military aircraft",
+"sink",
+"washing machine",
+"water bottle",
+"water jug",
+"water tower",
+"whiskey jug",
+"whistle",
+"wig",
+"window screen",
+"window shade",
+"Windsor tie",
+"wine bottle",
+"wing",
+"wok",
+"wooden spoon",
+"wool",
+"split-rail fence",
+"shipwreck",
+"yawl",
+"yurt",
+"website",
+"comic book",
+"crossword",
+"traffic sign",
+"traffic light",
+"dust jacket",
+"menu",
+"plate",
+"guacamole",
+"consomme",
+"hot pot",
+"trifle",
+"ice cream",
+"ice pop",
+"baguette",
+"bagel",
+"pretzel",
+"cheeseburger",
+"hot dog",
+"mashed potato",
+"cabbage",
+"broccoli",
+"cauliflower",
+"zucchini",
+"spaghetti squash",
+"acorn squash",
+"butternut squash",
+"cucumber",
+"artichoke",
+"bell pepper",
+"cardoon",
+"mushroom",
+"Granny Smith",
+"strawberry",
+"orange",
+"lemon",
+"fig",
+"pineapple",
+"banana",
+"jackfruit",
+"custard apple",
+"pomegranate",
+"hay",
+"carbonara",
+"chocolate syrup",
+"dough",
+"meatloaf",
+"pizza",
+"pot pie",
+"burrito",
+"red wine",
+"espresso",
+"cup",
+"eggnog",
+"alp",
+"bubble",
+"cliff",
+"coral reef",
+"geyser",
+"lakeshore",
+"promontory",
+"shoal",
+"seashore",
+"valley",
+"volcano",
+"baseball player",
+"bridegroom",
+"scuba diver",
+"rapeseed",
+"daisy",
+"yellow lady's slipper",
+"corn",
+"acorn",
+"rose hip",
+"horse chestnut seed",
+"coral fungus",
+"agaric",
+"gyromitra",
+"stinkhorn mushroom",
+"earth star",
+"hen-of-the-woods",
+"bolete",
+"ear",
+"toilet paper"]