From c8d94f5e6915c540499cc2ea94331352b6a65307 Mon Sep 17 00:00:00 2001
From: HeberArteagaJ <heberarteagajimenez@gmail.com>
Date: Tue, 3 Dec 2024 01:01:49 +0100
Subject: [PATCH] Exercise 4 updated

---
 TD2 Deep Learning.ipynb | 324 ----------------------------------------
 1 file changed, 324 deletions(-)

diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb
index 6cd69cc..bc173a2 100644
--- a/TD2 Deep Learning.ipynb	
+++ b/TD2 Deep Learning.ipynb	
@@ -2595,175 +2595,6 @@
     "This function evaluates the trained model on a separate test dataset."
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "bf24a914",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import copy\n",
-    "import os\n",
-    "import time\n",
-    "\n",
-    "import matplotlib.pyplot as plt\n",
-    "import numpy as np\n",
-    "import torch\n",
-    "import torch.nn as nn\n",
-    "import torch.optim as optim\n",
-    "import torchvision\n",
-    "from torch.optim import lr_scheduler\n",
-    "from torchvision import datasets, transforms\n",
-    "\n",
-    "# Data augmentation and normalization for training and validation\n",
-    "data_transforms = {\n",
-    "    \"train\": transforms.Compose(\n",
-    "        [\n",
-    "            transforms.RandomResizedCrop(224),\n",
-    "            transforms.RandomHorizontalFlip(),\n",
-    "            transforms.ToTensor(),\n",
-    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
-    "        ]\n",
-    "    ),\n",
-    "    \"val\": transforms.Compose(\n",
-    "        [\n",
-    "            transforms.Resize(256),\n",
-    "            transforms.CenterCrop(224),\n",
-    "            transforms.ToTensor(),\n",
-    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
-    "        ]\n",
-    "    ),\n",
-    "    \"test\": transforms.Compose(  # Test set transforms\n",
-    "        [\n",
-    "            transforms.Resize(256),\n",
-    "            transforms.CenterCrop(224),\n",
-    "            transforms.ToTensor(),\n",
-    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
-    "        ]\n",
-    "    ),\n",
-    "}\n",
-    "\n",
-    "data_dir = \"hymenoptera_data\"\n",
-    "# Create datasets and loaders\n",
-    "image_datasets = {\n",
-    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
-    "    for x in [\"train\", \"val\", \"test\"]  # Assuming \"test\" folder exists\n",
-    "}\n",
-    "dataloaders = {\n",
-    "    x: torch.utils.data.DataLoader(\n",
-    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=4\n",
-    "    )\n",
-    "    for x in [\"train\", \"val\", \"test\"]\n",
-    "}\n",
-    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\", \"test\"]}\n",
-    "class_names = image_datasets[\"train\"].classes\n",
-    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
-    "\n",
-    "\n",
-    "def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n",
-    "    since = time.time()\n",
-    "\n",
-    "    best_model_wts = copy.deepcopy(model.state_dict())\n",
-    "    best_acc = 0.0\n",
-    "\n",
-    "    for epoch in range(num_epochs):\n",
-    "        print(\"Epoch {}/{}\".format(epoch + 1, num_epochs))\n",
-    "        print(\"-\" * 10)\n",
-    "\n",
-    "        for phase in [\"train\", \"val\"]:\n",
-    "            if phase == \"train\":\n",
-    "                scheduler.step()\n",
-    "                model.train()\n",
-    "            else:\n",
-    "                model.eval()\n",
-    "\n",
-    "            running_loss = 0.0\n",
-    "            running_corrects = 0\n",
-    "\n",
-    "            for inputs, labels in dataloaders[phase]:\n",
-    "                inputs = inputs.to(device)\n",
-    "                labels = labels.to(device)\n",
-    "\n",
-    "                optimizer.zero_grad()\n",
-    "\n",
-    "                with torch.set_grad_enabled(phase == \"train\"):\n",
-    "                    outputs = model(inputs)\n",
-    "                    _, preds = torch.max(outputs, 1)\n",
-    "                    loss = criterion(outputs, labels)\n",
-    "\n",
-    "                    if phase == \"train\":\n",
-    "                        loss.backward()\n",
-    "                        optimizer.step()\n",
-    "\n",
-    "                running_loss += loss.item() * inputs.size(0)\n",
-    "                running_corrects += torch.sum(preds == labels.data)\n",
-    "\n",
-    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
-    "            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n",
-    "\n",
-    "            print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n",
-    "\n",
-    "            if phase == \"val\" and epoch_acc > best_acc:\n",
-    "                best_acc = epoch_acc\n",
-    "                best_model_wts = copy.deepcopy(model.state_dict())\n",
-    "\n",
-    "        print()\n",
-    "\n",
-    "    time_elapsed = time.time() - since\n",
-    "    print(\n",
-    "        \"Training complete in {:.0f}m {:.0f}s\".format(\n",
-    "            time_elapsed // 60, time_elapsed % 60\n",
-    "        )\n",
-    "    )\n",
-    "    print(\"Best val Acc: {:4f}\".format(best_acc))\n",
-    "\n",
-    "    model.load_state_dict(best_model_wts)\n",
-    "    return model\n",
-    "\n",
-    "\n",
-    "def eval_model(model, criterion, dataloader, dataset_size):\n",
-    "    model.eval()\n",
-    "    running_loss = 0.0\n",
-    "    running_corrects = 0\n",
-    "\n",
-    "    with torch.no_grad():\n",
-    "        for inputs, labels in dataloader:\n",
-    "            inputs = inputs.to(device)\n",
-    "            labels = labels.to(device)\n",
-    "\n",
-    "            outputs = model(inputs)\n",
-    "            _, preds = torch.max(outputs, 1)\n",
-    "            loss = criterion(outputs, labels)\n",
-    "\n",
-    "            running_loss += loss.item() * inputs.size(0)\n",
-    "            running_corrects += torch.sum(preds == labels.data)\n",
-    "\n",
-    "    loss = running_loss / dataset_size\n",
-    "    acc = running_corrects.double() / dataset_size\n",
-    "    print(f\"Test Loss: {loss:.4f} Acc: {acc:.4f}\")\n",
-    "    return loss, acc\n",
-    "\n",
-    "\n",
-    "# Load pre-trained model\n",
-    "model = torchvision.models.resnet18(pretrained=True)\n",
-    "for param in model.parameters():\n",
-    "    param.requires_grad = False\n",
-    "\n",
-    "num_ftrs = model.fc.in_features\n",
-    "model.fc = nn.Linear(num_ftrs, 2)\n",
-    "model = model.to(device)\n",
-    "\n",
-    "criterion = nn.CrossEntropyLoss()\n",
-    "optimizer_conv = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)\n",
-    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
-    "\n",
-    "# Train the model\n",
-    "model = train_model(model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10)\n",
-    "\n",
-    "# Evaluate the model on the test set\n",
-    "test_loss, test_acc = eval_model(model, criterion, dataloaders[\"test\"], dataset_sizes[\"test\"])"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": 54,
@@ -3187,161 +3018,6 @@
     "Replacement of the current classification layer with a two-layer architecture using ReLU and Dropout."
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "e394104d",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import copy\n",
-    "import os\n",
-    "import time\n",
-    "\n",
-    "import matplotlib.pyplot as plt\n",
-    "import numpy as np\n",
-    "import torch\n",
-    "import torch.nn as nn\n",
-    "import torch.optim as optim\n",
-    "import torchvision\n",
-    "from torch.optim import lr_scheduler\n",
-    "from torchvision import datasets, transforms\n",
-    "\n",
-    "# Data augmentation and normalization for training and validation\n",
-    "data_transforms = {\n",
-    "    \"train\": transforms.Compose(\n",
-    "        [\n",
-    "            transforms.RandomResizedCrop(224),\n",
-    "            transforms.RandomHorizontalFlip(),\n",
-    "            transforms.ToTensor(),\n",
-    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
-    "        ]\n",
-    "    ),\n",
-    "    \"val\": transforms.Compose(\n",
-    "        [\n",
-    "            transforms.Resize(256),\n",
-    "            transforms.CenterCrop(224),\n",
-    "            transforms.ToTensor(),\n",
-    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
-    "        ]\n",
-    "    ),\n",
-    "}\n",
-    "\n",
-    "data_dir = \"hymenoptera_data\"\n",
-    "image_datasets = {\n",
-    "    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n",
-    "    for x in [\"train\", \"val\"]\n",
-    "}\n",
-    "dataloaders = {\n",
-    "    x: torch.utils.data.DataLoader(\n",
-    "        image_datasets[x], batch_size=4, shuffle=True, num_workers=4\n",
-    "    )\n",
-    "    for x in [\"train\", \"val\"]\n",
-    "}\n",
-    "dataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\n",
-    "class_names = image_datasets[\"train\"].classes\n",
-    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
-    "\n",
-    "\n",
-    "def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n",
-    "    since = time.time()\n",
-    "\n",
-    "    best_model_wts = copy.deepcopy(model.state_dict())\n",
-    "    best_acc = 0.0\n",
-    "\n",
-    "    for epoch in range(num_epochs):\n",
-    "        print(\"Epoch {}/{}\".format(epoch + 1, num_epochs))\n",
-    "        print(\"-\" * 10)\n",
-    "\n",
-    "        for phase in [\"train\", \"val\"]:\n",
-    "            if phase == \"train\":\n",
-    "                scheduler.step()\n",
-    "                model.train()\n",
-    "            else:\n",
-    "                model.eval()\n",
-    "\n",
-    "            running_loss = 0.0\n",
-    "            running_corrects = 0\n",
-    "\n",
-    "            for inputs, labels in dataloaders[phase]:\n",
-    "                inputs = inputs.to(device)\n",
-    "                labels = labels.to(device)\n",
-    "\n",
-    "                optimizer.zero_grad()\n",
-    "\n",
-    "                with torch.set_grad_enabled(phase == \"train\"):\n",
-    "                    outputs = model(inputs)\n",
-    "                    _, preds = torch.max(outputs, 1)\n",
-    "                    loss = criterion(outputs, labels)\n",
-    "\n",
-    "                    if phase == \"train\":\n",
-    "                        loss.backward()\n",
-    "                        optimizer.step()\n",
-    "\n",
-    "                running_loss += loss.item() * inputs.size(0)\n",
-    "                running_corrects += torch.sum(preds == labels.data)\n",
-    "\n",
-    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
-    "            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n",
-    "\n",
-    "            print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n",
-    "\n",
-    "            if phase == \"val\" and epoch_acc > best_acc:\n",
-    "                best_acc = epoch_acc\n",
-    "                best_model_wts = copy.deepcopy(model.state_dict())\n",
-    "\n",
-    "        print()\n",
-    "\n",
-    "    time_elapsed = time.time() - since\n",
-    "    print(\n",
-    "        \"Training complete in {:.0f}m {:.0f}s\".format(\n",
-    "            time_elapsed // 60, time_elapsed % 60\n",
-    "        )\n",
-    "    )\n",
-    "    print(\"Best val Acc: {:4f}\".format(best_acc))\n",
-    "\n",
-    "    model.load_state_dict(best_model_wts)\n",
-    "    return model\n",
-    "\n",
-    "\n",
-    "# Modify the classification layer\n",
-    "class ModifiedResNet18(nn.Module):\n",
-    "    def __init__(self, pretrained_model, num_classes):\n",
-    "        super(ModifiedResNet18, self).__init__()\n",
-    "        self.features = nn.Sequential(*list(pretrained_model.children())[:-1])\n",
-    "        num_ftrs = pretrained_model.fc.in_features\n",
-    "        self.classifier = nn.Sequential(\n",
-    "            nn.Dropout(0.5),  # Dropout before the first layer\n",
-    "            nn.Linear(num_ftrs, 256),  # Fully connected layer 1\n",
-    "            nn.ReLU(),  # Activation\n",
-    "            nn.Dropout(0.5),  # Dropout after activation\n",
-    "            nn.Linear(256, num_classes),  # Fully connected layer 2\n",
-    "        )\n",
-    "\n",
-    "    def forward(self, x):\n",
-    "        x = self.features(x)\n",
-    "        x = torch.flatten(x, 1)\n",
-    "        x = self.classifier(x)\n",
-    "        return x\n",
-    "\n",
-    "\n",
-    "# Load pre-trained ResNet18 model\n",
-    "pretrained_model = torchvision.models.resnet18(pretrained=True)\n",
-    "for param in pretrained_model.parameters():\n",
-    "    param.requires_grad = False\n",
-    "\n",
-    "# Replace the classifier with the modified version\n",
-    "model = ModifiedResNet18(pretrained_model, num_classes=2)\n",
-    "model = model.to(device)\n",
-    "\n",
-    "criterion = nn.CrossEntropyLoss()\n",
-    "optimizer_conv = optim.SGD(model.classifier.parameters(), lr=0.001, momentum=0.9)\n",
-    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n",
-    "\n",
-    "# Train the model with the modified classification layer\n",
-    "model = train_model(model, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=10)"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": 74,
-- 
GitLab