From 57a2287f1266a09271eb17952e8463979f32cf5e Mon Sep 17 00:00:00 2001
From: Aya SAIDI <aya.saidi@auditeur.ec-lyon.fr>
Date: Tue, 8 Nov 2022 21:25:40 +0100
Subject: [PATCH] Create py_test.ipynb

---
 py_test.ipynb | 912 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 912 insertions(+)
 create mode 100644 py_test.ipynb

diff --git a/py_test.ipynb b/py_test.ipynb
new file mode 100644
index 0000000..41b3e38
--- /dev/null
+++ b/py_test.ipynb
@@ -0,0 +1,912 @@
+{
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "EibeqmPGInSe"
+      },
+      "source": [
+        "# **Read_cifar**"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 16,
+      "metadata": {
+        "id": "vgwvAYHuGRzI"
+      },
+      "outputs": [],
+      "source": [
+        "import numpy as np\n",
+        "from six.moves import cPickle as pickle\n",
+        "import platform\n",
+        "import os\n",
+        "import random\n",
+        "import math"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 2,
+      "metadata": {
+        "id": "rHc9pFYYN4GV"
+      },
+      "outputs": [],
+      "source": [
+        "def unpickle(file):\n",
+        "  '''loads the data dictionnary.'''\n",
+        "  with open(file, 'rb') as fo:\n",
+        "    dict = pickle.load(fo, encoding='bytes')\n",
+        "  return dict"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 3,
+      "metadata": {
+        "id": "NKQPmczBGiTd"
+      },
+      "outputs": [],
+      "source": [
+        "def read_cifar_batch (batch_path):\n",
+        "    #This function takes as parameter the path of a single batch as a string, and returns a matrix data of size (batch_size x data_size) and a a vector labels of size batch_size.\n",
+        "    \n",
+        "    data_dict = unpickle(batch_path)\n",
+        "    data = data_dict[b'data']\n",
+        "    labels = data_dict[b'labels']\n",
+        "    data = data.reshape(len(data),len(data[0]))\n",
+        "    data = data.astype('f')  #data must be np.float32 array.\n",
+        "    labels = np.array(labels, dtype='int64')  #labels must be np.int64 array.\n",
+        "    return data, labels"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "8V1BrTxMICkl",
+        "outputId": "9392be7b-7137-4c38-891c-d993b1d27cf4"
+      },
+      "outputs": [
+        {
+          "data": {
+            "text/plain": [
+              "(array([[ 59.,  43.,  50., ..., 140.,  84.,  72.],\n",
+              "        [154., 126., 105., ..., 139., 142., 144.],\n",
+              "        [255., 253., 253., ...,  83.,  83.,  84.],\n",
+              "        ...,\n",
+              "        [ 71.,  60.,  74., ...,  68.,  69.,  68.],\n",
+              "        [250., 254., 211., ..., 215., 255., 254.],\n",
+              "        [ 62.,  61.,  60., ..., 130., 130., 131.]], dtype=float32),\n",
+              " array([6, 9, 9, ..., 1, 1, 5]))"
+            ]
+          },
+          "execution_count": 4,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "#test read_cifar_batch\n",
+        "read_cifar_batch ('/content/drive/MyDrive/cifar10/data_batch_1')"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 4,
+      "metadata": {
+        "id": "u1XzFhiZLitc"
+      },
+      "outputs": [],
+      "source": [
+        "def read_cifar(fo):\n",
+        "  #This function takes as parameter the path of the directory containing the six batches and returns a matrix data a vector lables of size batch_size\n",
+        "  files=['/data_batch_1','/data_batch_2','/data_batch_3','/data_batch_4','/data_batch_5','/test_batch']\n",
+        "  A=10000\n",
+        "  N=60000\n",
+        "  P=3072\n",
+        "  X=np.empty((N,P),dtype=np.float)\n",
+        "  Y=np.empty(A,dtype=np.int64)\n",
+        "  for i in range(len(files)):\n",
+        "    fichier=fo+files[i]   \n",
+        "    data_dict=unpickle(fichier)\n",
+        "    M=data_dict[b'data']\n",
+        "    L=data_dict[b'labels']\n",
+        "    L=np.array(L)\n",
+        "    X=np.vstack((X,M))\n",
+        "    Y=np.hstack((Y,L))\n",
+        "  X=X[N:2*N,]\n",
+        "  Y=Y[A:,]\n",
+        "  return X,Y"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "S7O9HQMeIZ_j",
+        "outputId": "bfe54a1c-3736-4f8a-bc08-0f46eb4eeab3"
+      },
+      "outputs": [
+        {
+          "name": "stderr",
+          "output_type": "stream",
+          "text": [
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:7: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n",
+            "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
+            "  import sys\n"
+          ]
+        },
+        {
+          "data": {
+            "text/plain": [
+              "(array([[ 59.,  43.,  50., ..., 140.,  84.,  72.],\n",
+              "        [154., 126., 105., ..., 139., 142., 144.],\n",
+              "        [255., 253., 253., ...,  83.,  83.,  84.],\n",
+              "        ...,\n",
+              "        [ 20.,  19.,  15., ...,  50.,  53.,  47.],\n",
+              "        [ 25.,  15.,  23., ...,  80.,  81.,  80.],\n",
+              "        [ 73.,  98.,  99., ...,  94.,  58.,  26.]]),\n",
+              " array([6, 9, 9, ..., 5, 1, 7]))"
+            ]
+          },
+          "execution_count": 6,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "#test read_cifar\n",
+        "read_cifar(\"/content/drive/MyDrive/cifar10\")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 5,
+      "metadata": {
+        "id": "lDoVHRAuJN0H"
+      },
+      "outputs": [],
+      "source": [
+        "def split_dataset(data,labels,split):\n",
+        "    #This function splits the dataset into a training set and a test set\n",
+        "    #It takes as parameter data and labels, two arrays that have the same size in the first dimension. And a split, a float between 0 and 1 which determines the split factor of the training set with respect to the test set.\n",
+        "    #split -- the split factor\n",
+        "    #data -- the whole data (all the batches including the test batch)\n",
+        "    #labels -- the labels associated to the data\n",
+        "    data_train=[]\n",
+        "    labels=labels.reshape(data.shape[0],1)\n",
+        "    # Stack our Data and labels\n",
+        "    con = np.hstack((data, labels))\n",
+        "    k=int(split*con.shape[0])\n",
+        "    # Shuffle all our Data\n",
+        "    np.random.shuffle(con)\n",
+        "    # Train\n",
+        "    data_train=con[:k,:-1]\n",
+        "    labels_train=con[:k,-1]\n",
+        "    # Test\n",
+        "    data_test=con[k:,:-1]\n",
+        "    labels_test=con[k:,-1]\n",
+        "    return data_train,labels_train,data_test,labels_test\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "qurY45xiIs5R",
+        "outputId": "7cec7d66-1f1e-4747-85c8-dd6bc7bf8fbd"
+      },
+      "outputs": [
+        {
+          "name": "stderr",
+          "output_type": "stream",
+          "text": [
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:7: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n",
+            "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
+            "  import sys\n"
+          ]
+        },
+        {
+          "data": {
+            "text/plain": [
+              "(array([[ 39.,  28.,  55., ..., 136., 108., 101.],\n",
+              "        [122., 126., 130., ...,  88.,  85.,  82.],\n",
+              "        [ 55.,  53.,  53., ..., 171., 170., 173.],\n",
+              "        ...,\n",
+              "        [159., 158., 159., ..., 192., 193., 205.],\n",
+              "        [198., 195., 185., ...,  75.,  74.,  77.],\n",
+              "        [182., 195., 194., ...,  79.,  80.,  77.]]),\n",
+              " array([1., 7., 3., ..., 8., 9., 3.]),\n",
+              " array([[166., 165., 162., ..., 140., 144., 149.],\n",
+              "        [155., 157., 159., ..., 125., 127., 128.],\n",
+              "        [144., 139., 139., ..., 119., 123., 122.],\n",
+              "        ...,\n",
+              "        [138., 240., 241., ..., 182., 191., 131.],\n",
+              "        [245., 241., 240., ..., 115., 127., 129.],\n",
+              "        [224., 222., 222., ..., 111., 110., 112.]]),\n",
+              " array([0., 5., 1., ..., 3., 3., 8.]))"
+            ]
+          },
+          "execution_count": 8,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "#test split_dataset\n",
+        "data,labels=read_cifar(\"/content/drive/MyDrive/cifar10/\")\n",
+        "split_dataset(data,labels,0.8)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "dIpakhQvRiA4",
+        "outputId": "e4b3e7f2-22f6-4c08-b82d-8ebe94063484"
+      },
+      "outputs": [
+        {
+          "data": {
+            "text/plain": [
+              "True"
+            ]
+          },
+          "execution_count": 12,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "data_train.shape == (54000, 3072)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "P4A4BEH5SKga"
+      },
+      "source": [
+        "# **KNN**"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 6,
+      "metadata": {
+        "id": "PEgnqGZkRvfv"
+      },
+      "outputs": [],
+      "source": [
+        "import numpy as np\n",
+        "import matplotlib.pyplot as plt\n",
+        "import math \n",
+        "import random"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 7,
+      "metadata": {
+        "id": "CTksclO-SRin"
+      },
+      "outputs": [],
+      "source": [
+        "def distance_matrix(Y , X):\n",
+        "    #This function takes as parameters two matrices X and Y\n",
+        "    a_2=(Y**2).sum(axis=1)\n",
+        "    a_2=a_2.reshape(-1,1)\n",
+        "    b_2=(X**2).sum(axis=1)\n",
+        "    b_2=b_2.reshape(1,-1)\n",
+        "    dist = np.sqrt(a_2 + b_2 -2*Y.dot(X.T))\n",
+        "    #dist is the euclidian distance between two matrices\n",
+        "    return dist"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 8,
+      "metadata": {
+        "id": "M79g1H3KUpMz"
+      },
+      "outputs": [],
+      "source": [
+        "def knn_predict(dists, labels_train, k):\n",
+        "    #This function takes as parameters: dists (from above), labels_train, and k the number of neighbors\n",
+        "    labels_pred=np.zeros(labels_train.shape[0])\n",
+        "    for i in range(0,dists.shape[0]):\n",
+        "            # Find index of k smallest distances\n",
+        "            index_smallest_distance = np.argsort(dists[i,:])[0:k+1]\n",
+        "            #   Index the labels according to these distances\n",
+        "            labels_distances = [labels_train[i] for i in index_smallest_distance]\n",
+        "            #Predict the class / label\n",
+        "            labels_pred[i]=max(labels_distances,key=labels_distances.count)\n",
+        "    return labels_pred"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 9,
+      "metadata": {
+        "id": "aX8Sug3mU9ak"
+      },
+      "outputs": [],
+      "source": [
+        "(data,labels)=read_cifar_batch('/content/drive/MyDrive/cifar10/data_batch_2')                                # training with the second batch only for memory purposes\n",
+        "(data_test,labels_test)=read_cifar_batch('/content/drive/MyDrive/cifar10/test_batch')"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 10,
+      "metadata": {
+        "id": "iZ9etwnSUuOe"
+      },
+      "outputs": [],
+      "source": [
+        "assert distance_matrix(data,data_test).shape == (data.shape[0],data_test.shape[0])"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 11,
+      "metadata": {
+        "id": "ZKElHxMzVch0"
+      },
+      "outputs": [],
+      "source": [
+        "assert knn_predict(data,labels,2).shape == labels.shape"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 12,
+      "metadata": {
+        "id": "FWneIBSkVeju"
+      },
+      "outputs": [],
+      "source": [
+        "def evaluate_knn(data_train, labels_train, data_test, labels_test, k):\n",
+        "    #This function evaluates the knn classifier rate\n",
+        "    labels_test_pred=knn_predict(distance_matrix(data_train, data_test), labels_train, k)\n",
+        "    num_samples= data_test.shape[0]\n",
+        "    num_correct= (labels_test == labels_test_pred).sum().item()\n",
+        "    accuracy= 100 * (num_correct / num_samples)  #The accuracy is the percentage of the correctly predicted classes\n",
+        "    return accuracy"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 13,
+      "metadata": {
+        "id": "bDQJ2HCDVvlw"
+      },
+      "outputs": [],
+      "source": [
+        "assert 0 < evaluate_knn(data,labels,data_test,labels_test,5) < 100"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 14,
+      "metadata": {
+        "id": "YPyZ7ES4V0cV"
+      },
+      "outputs": [],
+      "source": [
+        "def accuracy_graph(k,dirname,num_batch):\n",
+        "    #This function is used to plot the variation of the accuracy as a function of k\n",
+        "    # k -- the max number of neighbors\n",
+        "    x=[] #axis x : k\n",
+        "    y=[] #axis y : accuracy\n",
+        "    dir_batch=str(dirname)+\"/data_batch_\"+str(num_batch)\n",
+        "    dir_test = str(dirname)+\"/test_batch\"\n",
+        "    (data_test, labels_test)=read_cifar_batch(dir_test)\n",
+        "    (data_train, labels_train)=read_cifar_batch(dir_batch)\n",
+        "    for i in range (1,k+1):\n",
+        "        x.append(i)     #axis (k from 1 to 20)\n",
+        "        accuracy=evaluate_knn(data_train , labels_train , data_test , labels_test , i)\n",
+        "        y.append(accuracy)\n",
+        "    plt.plot(x,y)\n",
+        "    plt.show()\n",
+        "    plt.savefig(str(dirname)+\"/results/accuracy_knn\")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 34,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/",
+          "height": 282
+        },
+        "id": "lKASCFvdWOw_",
+        "outputId": "de8eeec2-8dd8-4227-c911-9ab6badee9e2"
+      },
+      "outputs": [
+        {
+          "output_type": "display_data",
+          "data": {
+            "text/plain": [
+              "<Figure size 432x288 with 1 Axes>"
+            ],
+            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD4CAYAAADiry33AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3deXyU5bn/8c89kz1kTwghZAUCCcgaIMiqEKRo1dqjtm6IC8dTPdW2drPnWM/pqb/2WHpqq61SZdG6tFqttOKSoAFkDzskhJAQQgIkIYQkhOxz//6YCcWQkGX2mev9evHKZOae57kYhm+eXHM/96O01gghhPBcBmcXIIQQwr4k6IUQwsNJ0AshhIeToBdCCA8nQS+EEB7Ox9kFdBcdHa2Tk5OdXYYQQriV3bt3n9Vax/T0mMsFfXJyMvn5+c4uQwgh3IpS6kRvj0nrRgghPJwEvRBCeDgJeiGE8HAS9EII4eEk6IUQwsNJ0AshhIeToBdCCA8nQe9C/ra3ktP1zc4uQwjhYSToXcT+k+d54s/7+PmHhc4uRQjhYSToXcTqLccB+OjQGTmqF0LYlAS9C6hqaOHDg6e5YVwsWmte39brmcxCCDFgEvQu4E/bT9Bh0jy1JJ3sjFje3FlOc1uns8sSQngICXona2nv5M0d5SwYG0tSVDAPzErh/MV2/rav0tmlCSE8hAS9k63bf4rapjYemJUMwPSUSDLiQlm95Thy4XYhhC1I0DuR1ppVXxxn7LAQZo6MAkApxbJZyRytusCWY7VOrlAI4Qkk6J1oe+k5jpxpZNmsZJRSl+7/6sThRA/xuzQTRwghrCFB70SrthwnIsiXWybFf+n+AF8jd81I4rOiao6fbXJSdUIITyFB7yTltRfJLazi7hlJBPgar3j8nqxEfAyKtVvLHF+cEMKjSNA7ydptZRiV4t6ZST0+PjQkgK9OGM47+SdpaGl3bHFCCI8iQe8EF1o7+Muuk9w4IY7Y0IBexy2blUJTWyd/2XXSgdUJITyNBL0TvJt/ksbWDpbNSrnquGtGhDEtOYK128roNMlUSyHE4PQZ9EqpVUqpaqXUocvui1RK5Silii1fI67y/FClVIVS6gVbFe3OTCbNmq1lTE4MZ1JCeJ/jl81K4eS5ZjYUVjmgOiGEJ+rPEf0aYHG3+34EbNBajwY2WL7vzc+ATYOqzgN9XlRNWe1FHujjaL7LooxY4sMDWSVTLYUQg9Rn0GutNwHnut19C7DWcnstcGtPz1VKTQVigU+tqNGjrN5SxrDQABaPH9av8T5GA/fNTGJ76TkKTzfYuTohhCcabI8+Vmt92nL7DOYw/xKllAFYATzZ18aUUsuVUvlKqfyamppBluT6is408sWxs9w7MwlfY/9f+m9MSyTQ1ygnUAkhBsXqD2O1eUGWnj4p/BawXmtd0Y9trNRaZ2qtM2NiYqwtyWWt2Xocfx8Dd01PHNDzwoJ8+frUeP627xS1F1rtVJ0QwlMNNuirlFJxAJav1T2MmQk8ppQqA34F3KeU+sUg9+f26praeG9PJbdNiSci2G/Az7//2hTaOky8uaPcDtUJITzZYIN+HbDUcnsp8EH3AVrru7XWiVrrZMztm9e01lf70NajvbmznNYOU59TKnszaugQ5qbF8Pr2E7R1mGxcnRDCk/VneuVbwDZgjGWa5IPAL4BspVQxsNDyPUqpTKXUK/Ys2B21d5p4fdsJZo+KJi02ZNDbeWBWMtWNraw/eLrvwUIIYeHT1wCt9Td7eWhBD2PzgYd6uH8N5mmaXumjQ2c409DCz7823qrtzB0dQ2pMMKu2HOeWScO/tOKlEEL0Rs6MdYDVW46THBXEdWOGWrUdg0GxbFYKByrq2VNeZ6PqhBCeToLezvaW17G3/Dz3X5uMwWD9EfjXp8QTGuDDqi1l1hcnhPAKEvR2tnpLGSH+PvxLZoJNthfk58M3pify8aEznDrfbJNtCiE8mwS9HZ2pb2H9wdPcMS2BIf59fhzSb/fNTEJrzWvbTthsm0IIzyVBb0d/2n6CTq1ZOjPZptsdERHEDeOG8dbOcprbOm26bSGE55Ggt5OW9k7e2HGC7PRYEqOCbL79B2anUN/czvt7K22+bSGEZ5Ggt5MP9lVSd7F90CdI9SUzKYLx8aGs3nIc8yoUQgjRMwl6O9Bas+qLMtLjQslKjbTLPpRSLLs2heLqC3xx7Kxd9iGE8AwS9HawraSWoqpGls1KtutJTTdNjCN6iD+rZaqlEOIqJOjtYNWWMqKC/bh54nC77sffx8g9WYl8dqSa0poLdt2XEMJ9SdDb2InaJjYcqeLuGYkE+Brtvr+7ZyThZzSwdmuZ3fclhHBPEvQ2tmZrGT4GxT1ZSQ7ZX0yIP1+dOJx3dldQ39zukH0KIdyLBL0NNba0805+BTdNGM7Q0ACH7XfZrGQutnXyTv5Jh+1TCOE+JOht6J38Ci60drBsVrJD9zs+PozpKZGs2VpGp0mmWgohvkyC3kZMJs3abWVMTYpgwohwh+//gVnJVNQ1y7x6IcQVJOhtZH/FeU7UXuTuGQO7HqytZGcMY3pKJP/zYSHfWLmdo1WNTqlDCOF6JOhtJLewCqNBcf1Y69acHyyjQfHWw1k8+7VrOHKmkSXPb+bZ9YU0tXY4pR4hhOuQoLeRnIIqpidHEh408At/24rRoLhrRiKfPzmfr08ZwcpNpSxYsZEPD5yWdo4QXkyC3gZO1DZxtOoCCzNinV0KAJHBfvzyXybw13+7lshgPx59cw/3rdopJ1UJ4aUk6G0gp6AKgOx01wj6LlOTIlj32Cye+WoG+8rPs/g3m/nVJ0WytLEQXkaC3gZyC6sYExtil+WIreVjNHD/rBQ2PDmPmybE8cLnx1j46418eviMtHOE8BIS9FY6f7GNXWV1LMxwzoew/TU0JIBf3zmJPy/PItjfyPLXd/Pg2nzKay86uzQhhJ1J0Fvp86JqOk2a7Ixhzi6lX2akRvHht+fwkyXp7CitJfv/NvJ8bjEt7dLOEcJTSdBbKbegmpgQfybEhzm7lH7zNRp4eG4qG743n4UZsfxf7lFu+M0mPi+qdnZpQgg7kKC3QmtHJxuP1rAwfSgGg/3WnbeXYWEBvHjXFP704AyMBsWy1bt4e2e5s8sSQtiYBL0Vtpee40JrB9kuMq1ysGaPjuajx+cwMiaYDw+ednY5Qggbk6C3Qm5BFYG+Rq4dGe3sUqzm72Nk9qho8svqaO80ObscIYQNSdAPktaa3MIq5oyOdsgFRhwhKzWK5vZODlTUO7sUIYQNSdAP0uFTDZyub3H7ts3lZqRGAbC9tNbJlQghbKnPoFdKrVJKVSulDl12X6RSKkcpVWz5GtHD8yYppbYppQ4rpQ4ope60dfHOlFNQhVI4bREze4gM9mPssBAJeiE8TH+O6NcAi7vd9yNgg9Z6NLDB8n13F4H7tNbjLM//jVLK8Qu120luYRVTEyOIGuLv7FJsKis1ivyyOto6pE8vhKfoM+i11puAc93uvgVYa7m9Fri1h+cd1VoXW26fAqqBGKuqdRGV55s5fKrBo9o2XbJSI2lu7+Rg5XlnlyKEsJHB9uhjtdZd8/DOAFdNPKXUdMAPKBnk/lzKhkLzImauslqlLU1P6erTd//ZLoRwV1Z/GKvNK2P1ujqWUioOeB1YprXusR+glFqulMpXSuXX1NRYW5Ld5RRUkRoTzMiYIc4uxeakTy+E5xls0FdZArwryHs8d14pFQp8CPxEa729t41prVdqrTO11pkxMa7d3WloaWd7aa3LLUlsS9KnF8KzDDbo1wFLLbeXAh90H6CU8gPeB17TWr87yP24nE1Ha2jv1B7ZtukifXohPEt/ple+BWwDxiilKpRSDwK/ALKVUsXAQsv3KKUylVKvWJ56BzAXuF8ptc/yZ5Jd/hYOlFNQRWSwH1MSr5hR6jGkTy+EZ/Hpa4DW+pu9PLSgh7H5wEOW238C/mRVdS6mvdPE50eqWTRuGEY3XMSsv7r69NtKann0ulHOLkcIYSU5M3YAdpWdo6Glg4Ue3J/vkpUaRf6Jc9KnF8IDSNAPQE5BFX4+Buamuf8iZn3JSo2ipd3EgQrp0wvh7iTo+6lrEbPZo6IJ8uuz4+X2ZqREArLujRCeQIK+n45WXeDkuWavaNsARFyaTy8fyArh7iTo+ymn4AwAC9M9ZxGzvkifXgjPIEHfTzmF1UxMCGdoaICzS3EY6dML4Rkk6PuhqqGF/SfPk+1FR/Ng7tMrJX16IdydBH0/bCg0r/CQnTHMyZU4lrlPH8o2CXoh3JpHBb15fTXbyy2sIiEykLRYz1vErC9ZqZHsPlFHa0ens0sRQgySxwR9dUML//LSNpu3GZpaO/ji2Fmy04ehlOeeDdubf/bp5TqyQrgrjwl6f18jdRfb+Lc/7ebkuYs22+7m4rO0dZhYmOFd/fkul/r0JdK+EcJdecyZP2GBvry6dBq3vriFh9bm89dvXcsQf+v/ermFVYQG+DAtOdIGVbqf8CBzn3778Vr+ndHOLseu/vNvhzh+ton48EDiIwK/9HVYWAC+Ro85LhJexmOCHiAlOpgX75rC0tU7eeLtvbx8b6ZVi491mjSfHanm+rFDvfo/eVZqJG/tLKe1oxN/H6Ozy7GLqoYWXt9+gvjwQI6caeTshdYvPW5QEBsa0OMPgRERgcSHBxHo55mvjXB/HhX0ALNHR/P0TRn8dN1hfvVpET9cPHbQ29pTXse5pjaPXnu+P7JSo1i9pYwDFfUe+5vNxiLzlc1eWZpJelwoLe2dnDrfTOX5Zirr/vm14nwz+WV1/KPhNJ2mL3/4Hxnsx5OLxnDXjERn/BWE6JXHBT3AfTOTKKpq5A95JYyJDeHWyfGD2k5uQRW+RsW8NNe+6pW9Xd6n99SgzztazbDQAMYOCwEgwNdIaswQUnu5XGRHp4mqxlbLD4GLVNY1896eSlZvOS5BL1yORwa9Uor/unkcJdUX+MFfD5AUFcTkQVwoJKegiqzUKEICfO1Qpfvo6tNvK63l3xd4Xp++o9PE5uKzLBkf1++ZVT5Gg7l9Ex4ImH/4Bfn58N//KKDsbBPJ0cF2rFiIgfHYxrOv0cAf7plKbKg/y1/fzen65gE9v6TmAqVnm8j28rZNl5mpUR47n35P+XkaWzqYP8a639y63iu5hVW2KEsIm/HYoAdzz/TVpdO42NrB8td209zW/5DKLTD/Z13gJatV9iUrNZLWDhP7T3refPq8omp8DIpZo627zkBCZBBjh4WQUyBBL1yLRwc9QFpsCM9/YzKHTtXz/Xf39/vs2ZyCKsYND7X8ai6me/C6N3lFNUxJiiDUBi26hemx5J+oo66pzQaVCWEbHh/0AAszYvnh4rH848BpXvjsWJ/jay+0sru8zmvWnu+P8CA/0oeFelzQVze0UHC6weq2TZeFGbF0mjSfF1XbZHtC2IJXBD3Av85N5bbJ8azIOcrHh05fdeyGI9VojfTnu8nywD593lHztMr5abY583lCfBhDQ/ylTy9citcEvVKKZ2+7hkkJ4Xznz/s5fKr3XnNuQRVxYQGMGx7qwApdnyf26TcW1RAb6k96XIhNtmcwKBakx7KxqMajfiAK9+Y1QQ/mudEr751KWKAvy1/bfcXZjwAt7Z1sLj7LwvRYr1zE7Gq6+vTbPGTdG/O0yhrmpcXY9N86O2MoTW2dHvM6CffnVUEPMDQ0gD/el0ltUyuPvL77iqOuLcfO0tzeKW2bHnhan37vyfM0tHQwf4xtF6y7dmQ0gb5Gad8Il+F1QQ9wzYgwVtw+ifwTdfzH+4e+NBMnt7CKIf4+zEj1zDNArZWVGsWe8jpa2t2/LZFXVI3RoJg1yrppld0F+BqZmxZNbkG13a6RIMRAeGXQA9w4IY5vLxjNO7srePWL4wCYTJrcwmrmpcV47OJd1po5MsrSp3f/68jmFdUwNTGCsEDbn/mcnTGMMw0tHKpssPm2hRgorw16gCcWjOYr44fx7PpCPi+qZn/FeWoaW6VtcxXTk7vm059zdilWqW5s4fCpBubZaFpld9eNicGgIEfaN8IFeHXQGwyKFXdMZOywUL795l5e2Xwco0HZbE61JwoL8iUjzv379F2rVdrr3zpqiD9TkyLkLFnhErw66MG8ENUfl2bi72vgw4OnmZYcQXiQn7PLcmme0KfPO1rD0BB/MuLsN4U2OyOWwtMNVNTZ7opnQgyG1wc9QHx4IC/fO5VAXyO3TRnh7HJcXlaqe/fpOzpNbD5q+2mV3XWdWb2hUM6SFc7VZ9ArpVYppaqVUocuuy9SKZWjlCq2fO1xDWCl1FLLmGKl1FJbFm5rU5Mi2ft0NndkJji7FJfX1aff5qbtm312mlbZnXk9+2Bp3win688R/Rpgcbf7fgRs0FqPBjZYvv8SpVQk8FNgBjAd+GlvPxBcRYCvzLTpD3fv0+cV1WA0KGZbuVplf2RnxLK9tJaGlna770uI3vQZ9FrrTUD3KRa3AGstt9cCt/bw1BuAHK31Oa11HZDDlT8whJsy9+nPu2WfPu9oNVMSw+0yrbK77PRYOkz60oe/QjjDYHv0sVrrrpXBzgA9zUeMB05e9n2F5b4rKKWWK6XylVL5NTXyH8IdZKVG0dZhYp+b9emrG81z2+3dtukyOTGCqGA/ad8Ip7L6w1htPvXPqtP/tNYrtdaZWuvMmBiZ2ugO3HV9+k1HzwI47DrARoPi+rFD+byomvZOk0P2KUR3gw36KqVUHIDla0/TCiqByz/ZHGG5T3iAsEBfxg13vz59XlE1MSH+Dl2ZdGFGLI0tHew87t4nmQn3NdigXwd0zaJZCnzQw5hPgEVKqQjLh7CLLPcJD5GV4l59+q6LgNt7WmV3c0ZH4+9jkPaNcJr+TK98C9gGjFFKVSilHgR+AWQrpYqBhZbvUUplKqVeAdBanwN+Buyy/Plvy33CQ7hbn35/xXnqm9sdfuZzkJ8Ps0dFk1tYJYucCafw6WuA1vqbvTy0oIex+cBDl32/Clg16OqES5t22fr0WalRzi6nT3lFNRgUzBnl+M+BFmbEsuFINUfONJJup7Nx12w5TnJ0sMM+aBbuQ86MFYPmbn36vKIaJidGEBZk/2mV3S1IN4dvrp3aN7tPnOOZvxfw2w3Fdtm+cG8S9MIqWSlR7D1pfZ++8nwzP37vAHl2uqh2TWMrByvrme+g2TbdDQ0JYFJCuF0uRtJp0jz9wWEADlTU09TaYfN9CPcmQS+s0tWn31s+uD59W4eJFz8/xoIVeby18yRPvnOARjucRbqp6yLgTmxrZGfEsr+inqqGFptu9+1d5Rw+1cAdmSPoMGl2n6iz6faF+5OgF1aZlhKJYZDz6bccO8vi5zfx3CdFzEuL4aV7plDb1MrzubZvP+QdrSF6iJ9TL/jedZ0DWx7V1zW18dwnRWSlRvLTr47Dx6Dcdg0iYT8S9MIq5j592ICC/kx9C4+9uYe7X9lBp0mzetk0Xr43k8Xj47gzM4E1W8s4Vt1osxo7TZrNxTXMTYvBYHDeBd9HDx1CYmSQTfv0K3KKaGzp4JmbxxHs78PEhHC3+cxEOI4EvbBaVmpkv/r07Z0m/riplAUr8vi0oIrvLEzjkyfmct1l7ZTv3zCGID8jz6wrsNlUxH0nz3P+YrvTZ6MopcjOiGVLSa1N+uiHT9Xz5o5y7s1KYuww828qWamR0qcXV5CgF1br6tPvKe+9N7yjtJabfvsFP19fyIzUKHK/M4/HF46+YsXQqCH+fDc7jS+OneWTw2dsUt/GomoMCuY6YLXKvixMj6Wtw8TmYuvWdNJa88y6w0QE+fGd7LRL92elRtFp0uRLn15cRoJeWC0zuatPf+X5cDWNrXz3z/u4c+V2LrR2sPLeqby6NJPEqKBet3dPVhJjh4Xws38U0txm/Vm3eUdrmJQQ7hJXDpuWbL4Y+adWtm8+2HeKXWV1/GDxmC+twjk1KQIfg5L2jfgSCXphtZ769J0mzdqtZVy/Io+/HzjFo9eNJPe781g0blifyw/4GA08c/M4Ks8389LGEqtqO3uhlQMV9U5v23TxMRrMi5wdqaZjkIucXWjt4Nn1hUwcEcbtU798oZwgP+nTiytJ0AubyEqNZJ9l3Zs95XXc/MIX/HTdYSYlhPPJE3P5/g1jCfTr/4VdslKj+OrE4by0sYST5wZ/zdWuFokrXfB9YXosdRfb2TPIKam/+6yY6sZWnrl5XI8fLkufXnQnQS9sIis1irZOEw+u3cVtv99K7YU2XrxrCq89MJ3UmCGD2uZTS8ZiUIr/+bBg0HXlFZmnVY4fHjbobdjavDEx+BkN5BQM/DOIkpoLrPriOLdPHcHkxJ4v2CZ9etGdBL2wiWkpkRgNih2l51g+N5Xc783jxglxVq0SGRcWyGPXj+KTw1WXTngaiE6TZtPRGuaOdu60yu6G+PuQNTKKnIKBLXKmtea//l5AgK+RHywe2+u4qUkR+BoV20qkfSPMJOiFTYQG+LJ22XQ+enwOTy1JZ4h/n+vl9ctDc1JIjgrimb8fpq1jYD3tAxXnqbvYzjwXatt0yU4fSlntRUpqmvr9nJwC8w+87yxMIybEv9dxQX4+TBwhfXrxTxL0wmZmj45mdGyITbfp72Pk6a9mUFrTxJqtxwf03K7VKueOdr2gX2g5S7a/a9S3tHfysw8LSIsdwr0zk/ocn5UaxcHKei5In14gQS/cwPVjY7l+7FCezy2megDrxOQdrWFiQjgRwc6fVtldXFgg4+ND+70cwspNpZw818wzN4/D19j3f9tLffoyuQSEkKAXbuLpmzJo79T84qMj/Rpfe6GVAxXnmZ/mGtMqe7IwPZY95XWcvdB61XEVdRf5fd4xbrwmjmtH9u+krylJ4fgaVY/nNgjvI0Ev3EJydDAPz03hvb2V7D7Rd3htLj6L1q41rbK77IxYtIbPCq++NPPPPywE4Kkb0/u9benTi8tJ0Au38eh1o4gLC+DpDw7Tabr6bJW8omqigv24Jt51plV2lxEXyvCwAHKu0r7ZcuwsHx06w6PzRxEfHjig7UufXnSRoBduI8jPh6eWpHP4VANv7yrvdZzJpNlUfNbpq1X2RSnFwoxYNhfX9LjUQ3uniZ+uO0xiZBAPz00d8Pa7+vS7pE/v9STohVu5aUIcWamRPPdJEXVNbT2OOVBZz7mmNpdu23TJzoilpd3ElmNnr3hs7dYyjlVf4OmbMq5Y/K0/uubTS/tGSNALt6KU4pmbx9HY0sGKnKIex+QVVaMUzHHBaZXdzUiJIsTf54rZNzWN5guwzB8Tc+l6swMV6GdkUkK4fCArJOiF+xk7LJR7s5J4c0c5h0/VX/F4XlENE0eEE+mC0yq78/MxMG9MDLmF1Zgu+9zhlx8foaWjk6dvyrDq7OKs1CgOVdbb5fKMwn1I0Au39J2FaYQH+fHMusNfWkbgXFMb+yvOu0Xbpkt2RixnL7Syr8K8yNnuE3W8u7uCB2enDnqdoC6y7o0ACXrhpsKCfPnh4jHsKqvjg32nLt2/ubjGMq3SdefPdzc/bShGgyK3oIpOk/mCIrGh/vz79aOs3vaUROnTCwl64cZun5rAxBFhPLu+8NIUwryiGiKD/ZjgwtMquwsL8mVGSiQ5BVX8Jf8kByvreWpJOsE2WC9I+vQCJOiFGzMYzB/MVje28rvPis3TKo/WMHd0tEtPq+zJwvRYiqsv8OyHhUxPjuTmicNttm3p0wsJeuHWJidGcPvUEaz64jjr9p+itqnNrdo2XbIti5w1tXXwzM3jrPoAtruZl9a9kT69t5KgF27vB4vHEuBj5AfvHkApmJvmPh/EdkmIDGJh+lD+bf5IMoaH2nTbkxMj8DMapE/vxSTohduLCfHniew02jpNTHCTaZU9eWXpNL5/Q+8XFBmsf/bpJei9lQS98Aj3zUzi+rFDuXtGorNLcUlZqZEclD6917Iq6JVSjyulDimlDiulnujh8TCl1N+VUvstY5ZZsz8heuNrNLDq/mnckZng7FJcUlZqFCaN9Om91KCDXik1HngYmA5MBG5SSnWf+PsoUKC1ngjMB1Yopdzz92oh3Jj06b2bNUf06cAOrfVFrXUHsBG4rdsYDYQo8xSCIcA5QNZMFcLBpE/v3awJ+kPAHKVUlFIqCFgCdP+9+QXMPxBOAQeBx7XWV1zhWSm1XCmVr5TKr6mpsaIkIURvuvr0DdKn9zqDDnqtdSHwS+BT4GNgH9B9Ue0bLPcPByYBLyilrpg7prVeqbXO1FpnxsS439Q4IdxB1siuPr2cJettrPowVmv9qtZ6qtZ6LlAHHO02ZBnwnjY7BhwHbD9/TAjRpymX+vQS9N7G2lk3Qy1fEzH359/sNqQcWGAZEwuMAUqt2acQYnACfI1MSpQ+vTeydh79X5VSBcDfgUe11ueVUo8opR6xPP4z4Fql1EFgA/BDrfWVl9IRQjhE17o30qf3LlYtj6e1ntPDfS9ddvsUsMiafQghbCcrNZLfbjD36a8fG+vscoSDyJmxQngR6dN7Jwl6IbyI9Om9kwS9EF5mpvTpvY4EvRBepmvdm13HpX3jLSTohfAykxPD8fORdW+8iQS9EF4mwNfIZLmOrFeRoBfCC2WlRnH4VD31zdKn9wYS9EJ4oX+uTy9H9d5Agl4ILyR9eu8iQS+EFwrwNTIlMZxtEvReQYJeCC9l7tM3SJ/eRRw/28Tecvtc6lGCXggvlZUahZb59C5he2ktX/v9Fr7/7gE6Tdrm25egF8JLTUqQPr0reCf/JPe+uoOoYD9eXZqJ0aBsvg+rVq8UQrivrj799uMS9M5gMmme+7SIP+SVMHtUNC/ePYWwQF+77EuO6IXwYtKnd47mtk6+9cYe/pBXwl0zElm9bJrdQh4k6IXwatKnd7yqhhbueHkbnxSc4T9vyuDnt47H12jfKJagF8KLSZ/esQ6fqufWF7dQUnOBV+7L5MHZKShl+558d9KjF8KLBfgamZoYIfPpHSCnoIrH395LWKAv7z5yLRnDQx22bzmiF8LLZaVGUXC6gfqL0qe3B601f9xUyvLX8xk9dAgfPDrLoSEPEvRCeL2s1Ei0hp2y7o3NtXeaeOr9Q/x8fSFfGT+Mt5fPZL8DFJgAAA/JSURBVGhogMPrkKAXwstNTAjHX/r0Nld/sZ37V+/krZ3lPHrdSF745hQC/YxOqUV69EJ4OfN8+ggJehs6UdvEsjW7OHnuIitun8jXp45waj1yRC+EkD69De0oreXWF7dQ19TGGw9lOT3kQYJeCIH06W3lr7sruOfVHUQE+/H+t2YxPSXS2SUBEvRCCGBSorlPv7XkrLNLcVsrN5XwvXf2Mz0lkvf/bRbJ0cHOLukSCXohBP4+RuamxfC3vZU0t3U6uxy3c7q+mV99epQbxsWyZtl0woLst5zBYEjQCyEAeHhOKnUX23ln90lnlzJgWmt+nXOUtVvLnLL/Fz47htaa/7wpw+7LGQyG61UkhHCKackRTEoI55XNx+noNDm7nAH55cdF/HZDMT/7RwGlNRccuu/y2ov8eddJvjk9kRERQQ7dd39J0AshAFBK8ci8VMrPXeTjw2ecXU6/rdxUwksbS7htcjx+Pgb+9+Mih+7/+Q3FGA2KR68b5dD9DoQEvRDikuyMYSRHBfHyxlK0tv2VjmztnfyTPLv+CDdNiOO52yfyr3NH8vHhM+Q7aPbQseoLvL+3gvtmJhHrhDNe+8uqoFdKPa6UOqSUOqyUeqKXMfOVUvssYzZasz8hhH0ZDYqH56ZysLLe5Rc6yymo4kfvHWTO6Gh+fcckS+0pDA3x59n1hQ75QfWb3KME+hp5ZN5Iu+/LGoMOeqXUeOBhYDowEbhJKTWq25hw4PfAzVrrccDtVtQqhHCAr08ZQfQQP1ZuKnV2Kb3aUVrLo2/uYXx8GC/dMxU/H3OUBfn58N3sNPaUn+ejQ/ZtPxWcauAfB07zwOwUoob423Vf1rLmiD4d2KG1vqi17gA2Ard1G3MX8J7WuhxAa11txf6EEA4Q4Gtk6cxk8opqKDzd4OxyrnD4VD0Prc0nISKQ1fdPI9j/yyu53J6ZQFrsEH758RHaOuz3ofKvc44SGuDDQ3NS7bYPW7Em6A8Bc5RSUUqpIGAJkNBtTBoQoZTKU0rtVkrd19OGlFLLlVL5Sqn8mpoaK0oSQtjCvTOTCPQ18kcXO6o/UdvE0lW7CAnw4fUHZxAZ7HfFGKNB8eOvpHOi9iJv7Dhhlzr2nTxPbmEVy+em2vUSgLYy6KDXWhcCvwQ+BT4G9gHdz7TwAaYCNwI3AP+plErrYVsrtdaZWuvMmJiYwZYkhLCR8CA/7pyWwLr9pzh1vtnZ5QBQ3dDCPa/uoNNk4rUHZzA8PLDXsfPHxHDtyCh+u6HYLtfDXfFpEZHBfiyblWLzbduDVR/Gaq1f1VpP1VrPBeqAo92GVACfaK2btNZngU2Y+/lCCBf34OwUNLDqi+POLoX65nbuW7WT2gttrF42nVFDh1x1vFKKp5akU3exnT/kldi0lh2ltWwuPsu35o+8om3kqqyddTPU8jURc3/+zW5DPgBmK6V8LO2dGUChNfsUQjhGQmQQN14Tx1s7y+1yVNxfzW2dPLR2FyU1F1h5byaTEsL79bzx8WF8bXI8q7Ycp9JGv5VorVnx6VGGhvhzT1aSTbbpCNbOo/+rUqoA+DvwqNb6vFLqEaXUI3CpvfMxcADYCbyitT5k5T6FEA6yfG4qTW2ddut196W908Rjb+4h/0Qdv7lzMrNHRw/o+d9bZO4Ur/jENidRbS4+y86yc/z79aMI8HXORUQGw9rWzRytdYbWeqLWeoPlvpe01i9dNuY5y5jxWuvfWFuwEMJxxseHMWd0NKu3lNHa4djFzkwmzQ/fPcCGI9X8z63juXFC3IC3MSIiiAdmpfD+vkoOVdZbVY/5aL6I+PBA7pyWaNW2HE3OjBVCXNXyuanUNLbyt72VDtun1pqfry/kvb2VfC87jbtnDL5N8q3rRhIe6Mv/+8i6k6hyCqrYX1HP4wtHX5q37y7cq1ohhMPNHhVNRlwoKzeVYjI5ZlmE3+eV8OoXx7n/2mQeu966NWRCA3z59oLRbDlWS97RwU3fNpnMq2OmRAdz2+R4q+pxBgl6IcRVKaX413mplNQ0seGI/c95fGtnOc99UsStk4bz9E0ZKKWs3ubdM5JIjgriF+uP0DmIH1YfHjzNkTONPLFwND4uuAxxX9yvYiGEwy25Jo748EBWbrLtVMXuPj50mp+8f5D5Y2J47vaJGAzWhzyAn4+BHyweS1FVI+8OcL39jk4T/5d7lDGxIXx1wnCb1ONoEvRCiD75Gg08ODuFXWV17D5hn5Uhtx47y7ff2sekhHB+f/cUm1/A4yvjhzElMZwVnx7lYltHv5/3/t5KSmua+O6iNJv94HE0CXohRL/cOS2BsEBfXt5o+2URthw7y4Nr80mODmLV/dMI8rP9iUhKKX5yYzrVja38cVP/TgJr6zDx/IZirokPY1FGrM1rchQJeiFEvwT7+3BvVhI5hVWU2PAqTp8XVbNszS6SooJ446EswoOuXL/GVqYmRbJ43DBe3lRCdWNLn+P/kn+SirpmvrcozSafFTiLBL0Qot+WXpuMr9HAK5ttc1T/6eEzLH8tn7TYIbz1cBYxIfZf7veHXxlLW4eJ3+QWX3VcS3snv/usmMykCOalufcaXBL0Qoh+iwnx51+mjuCveyr7dUR8Nf84cIpvvbGHccPDeOOhLCJ6WInSHlKig7l7RiJ/3nWSY9WNvY770/YTVDW08uQNY9z6aB4k6IUQA/TwnFTaO02s3Vo26G28t6eCb7+1lymJEfzpoRkOX+r32wtGE+Rr5BcfHenx8abWDv6QV8LsUdFkpUY5tDZ7kKAXQgxISnQwN2QM4/VtJ2hq7f/slS5v7Szne+/sZ+bIKNY8MI0hTlgBMmqIP4/MH0luYTXbe7hk4pqtZdQ2tfHdRVesqu6WJOiFEAO2fF4qDS0dvL1rYHPS124t48fvHWReWgyvLrXP7Jr+enB2CnFhATy7vvBLZ/zWN7fz8sYSFowdypTECKfVZ0sS9EKIAZuSGMH05EhWfXGc9s7+Xa5v5aYSfrruMNkZsbx871Snr/4Y4Gvke4vGcKCinr8fOHXp/lc3l9LQ0uExR/MgQS+EGKTlc1OpPN/MhwdO9zn2dxuKeXb9EW6cEMfv756Cv49rLPH7tcnxZMSF8twnRbR2dHKuqY1XvzjOjdfEMW54mLPLsxkJeiHEoFw/diijhg7h5U2lva4KqbXmV58UsSLnKLdNjuf5OyfZ/IxXaxgN5itRVdQ189rWE7y8sYTm9k6+kz3a2aXZlOu84kIIt2IwKJbPTaXwdAObi89e8bjWmmfXF/LC58f4xrQEfnX7RJdcEGz26GjmpcXwu8+KWbutjFsnxTNqaIizy7Ip13vVhRBu45ZJwxka4s/L3RY7M5k0P113mD9uPs7SmUk8+7VrXHqdmB8vGcuF1g46OjWPL/Sso3kA97iyrRDCJfn7GHlgdgq/+OgIhyrrGR8fRqdJ85P3D/L2rpMsn5vKj78y1uVPOBo7LJSnlqTjY1AkRQU7uxybkyN6IYRV7pqRyBB/H17eVEpHp4kn39nP27tO8u3rR7lFyHd5aE4q989KcXYZdiFH9EIIq4QG+HLXjERe2VxKY0s7eUU1PLkojceu97wWiLuSI3ohhNWWzUrGoBR5RTX8x43pEvIuRo7ohRBWiwsL5NnbrsHfx8Atk9zvmqqeToJeCGETd2QmOLsE0Qtp3QghhIeToBdCCA8nQS+EEB5Ogl4IITycBL0QQng4CXohhPBwEvRCCOHhJOiFEMLDqd4uGOAsSqka4ISz67iKaODKxbddh9RnHanPOlKfdaypL0lrHdPTAy4X9K5OKZWvtc50dh29kfqsI/VZR+qzjr3qk9aNEEJ4OAl6IYTwcBL0A7fS2QX0QeqzjtRnHanPOnapT3r0Qgjh4eSIXgghPJwEvRBCeDgJ+m6UUglKqc+VUgVKqcNKqcd7GDNfKVWvlNpn+fO0E+osU0odtOw/v4fHlVLqt0qpY0qpA0qpKQ6sbcxlr80+pVSDUuqJbmMc+hoqpVYppaqVUocuuy9SKZWjlCq2fI3o5blLLWOKlVJLHVjfc0qpI5Z/v/eVUuG9PPeq7wU71veMUqrysn/DJb08d7FSqsjyXvyRA+v782W1lSml9vXyXEe8fj3misPeg1pr+XPZHyAOmGK5HQIcBTK6jZkP/MPJdZYB0Vd5fAnwEaCALGCHk+o0Amcwn8zhtNcQmAtMAQ5ddt//Aj+y3P4R8MsenhcJlFq+RlhuRziovkWAj+X2L3uqrz/vBTvW9wzwZD/+/UuAVMAP2N/9/5O96uv2+ArgaSe+fj3miqPeg3JE343W+rTWeo/ldiNQCLjjRTBvAV7TZtuBcKVUnBPqWACUaK2deraz1noTcK7b3bcAay231wK39vDUG4AcrfU5rXUdkAMsdkR9WutPtdYdlm+3AyNsvd/+6uX164/pwDGtdanWug14G/PrblNXq08ppYA7gLdsvd/+ukquOOQ9KEF/FUqpZGAysKOHh2cqpfYrpT5SSo1zaGFmGvhUKbVbKbW8h8fjgZOXfV+Bc35gfYPe/4M5+zWM1Vqfttw+A8T2MMZVXscHMP+G1pO+3gv29JiltbSql7aDK7x+c4AqrXVxL4879PXrlisOeQ9K0PdCKTUE+CvwhNa6odvDezC3IiYCvwP+5uj6gNla6ynAV4BHlVJznVDDVSml/ICbgXd6eNgVXsNLtPl3ZJeca6yU+gnQAbzRyxBnvRf+AIwEJgGnMbdHXNE3ufrRvMNev6vlij3fgxL0PVBK+WL+x3hDa/1e98e11g1a6wuW2+sBX6VUtCNr1FpXWr5WA+9j/hX5cpVAwmXfj7Dc50hfAfZorau6P+AKryFQ1dXOsnyt7mGMU19HpdT9wE3A3ZYguEI/3gt2obWu0lp3aq1NwB972a+zXz8f4Dbgz72NcdTr10uuOOQ9KEHfjaWf9ypQqLX+dS9jhlnGoZSajvl1rHVgjcFKqZCu25g/tDvUbdg64D7L7JssoP6yXxEdpdcjKWe/hhbrgK4ZDEuBD3oY8wmwSCkVYWlNLLLcZ3dKqcXAD4CbtdYXexnTn/eCveq7/DOfr/Wy313AaKVUiuU3vG9gft0dZSFwRGtd0dODjnr9rpIrjnkP2vOTZnf8A8zG/OvTAWCf5c8S4BHgEcuYx4DDmGcQbAeudXCNqZZ977fU8RPL/ZfXqIAXMc94OAhkOrjGYMzBHXbZfU57DTH/wDkNtGPucT4IRAEbgGIgF4i0jM0EXrnsuQ8Axyx/ljmwvmOYe7Nd78OXLGOHA+uv9l5wUH2vW95bBzAHVlz3+izfL8E8y6TEkfVZ7l/T9Z67bKwzXr/ecsUh70FZAkEIITyctG6EEMLDSdALIYSHk6AXQggPJ0EvhBAeToJeCCE8nAS9EEJ4OAl6IYTwcP8fFg2mbFsV+vMAAAAASUVORK5CYII=\n"
+          },
+          "metadata": {
+            "needs_background": "light"
+          }
+        },
+        {
+          "output_type": "display_data",
+          "data": {
+            "text/plain": [
+              "<Figure size 432x288 with 0 Axes>"
+            ]
+          },
+          "metadata": {}
+        }
+      ],
+      "source": [
+        "accuracy_graph(20,'/content/drive/MyDrive/cifar10',4)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "wXtjvPMieRJz"
+      },
+      "source": [
+        "# **mlp**"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 17,
+      "metadata": {
+        "id": "59O-O-EMeWYf"
+      },
+      "outputs": [],
+      "source": [
+        "def segmoid(x):\n",
+        "    return 1/(1+np.exp(-x))"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 18,
+      "metadata": {
+        "id": "kgsyRpJ8epc4"
+      },
+      "outputs": [],
+      "source": [
+        "def derivation(x):\n",
+        "    deriv_segmoid = segmoid(x)*(1-segmoid(x))\n",
+        "    return deriv_segmoid"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 19,
+      "metadata": {
+        "id": "SeUL8_MFeriO"
+      },
+      "outputs": [],
+      "source": [
+        "def learn_once_mse(w1,b1,w2,b2,data,targets,learning_rate):\n",
+        "    # This function performs one gradient descent step\n",
+        "    # w1, b1, w2 and b2 -- the weights and biases of the network,\n",
+        "    # data -- a matrix of shape (batch_size x d_in)\n",
+        "    # targets -- a matrix of shape (batch_size x d_out)\n",
+        "    # learning_rate -- the learning rate\n",
+        "    A0=data\n",
+        "    A1=segmoid(np.matmul(A0, w1) + b1)\n",
+        "    A2=segmoid(np.matmul(A1,w2) + b2)\n",
+        "    #Let calculate the partial derivates\n",
+        "    #2\n",
+        "    D_A2=2*(A2-targets)\n",
+        "    D_A2_T=np.matmul(A2,(1-A2).T)\n",
+        "    D_Z2=np.matmul(D_A2_T,D_A2)\n",
+        "    D_W2=np.matmul(A1.T,D_Z2)\n",
+        "    D_B2=D_Z2\n",
+        "    #1\n",
+        "    D_A1=np.matmul(D_Z2,w2.T)\n",
+        "    D_Z1=np.matmul(np.matmul(A1,(1-A1).T),D_A1)\n",
+        "    D_B1=D_Z1\n",
+        "    D_W1=np.matmul(A0.T,D_Z1)\n",
+        "    #The backpropagation of the gradient\n",
+        "    w1=w1-learning_rate*D_W1\n",
+        "    w2=w2-learning_rate*D_W2\n",
+        "    b1=b1-learning_rate*D_B1\n",
+        "    b2=b2-learning_rate*D_B2\n",
+        "    # Forward pass\n",
+        "    G1 = np.matmul(A0, w1) + b1\n",
+        "    C1 = segmoid(G1)\n",
+        "    G2 = np.matmul(C1, w2) + b2\n",
+        "    C2 = segmoid(G2)\n",
+        "    predictions = C2\n",
+        "\n",
+        "    # Compute loss (MSE)\n",
+        "    loss = np.mean(np.square(predictions - targets))\n",
+        "\n",
+        "    return(w1,b1,w2,b2,loss)\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 20,
+      "metadata": {
+        "id": "KjUbOvRxe0E8"
+      },
+      "outputs": [],
+      "source": [
+        "def one_hot(D_array):\n",
+        "    #This function transforms an array to the one-hot encoding\n",
+        "    n=D_array.shape[0]\n",
+        "    o_h_matrix = np.zeros((D_array.shape[0],int(np.max(D_array)+1)))\n",
+        "    for i in range(0,n):\n",
+        "        o_h_matrix[i,int(D_array[i])]=1\n",
+        "    return o_h_matrix"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 21,
+      "metadata": {
+        "id": "0BFdXb8yfCUM"
+      },
+      "outputs": [],
+      "source": [
+        "def softmax(x):\n",
+        "    #the softmax activation function\n",
+        "    exp_x=np.exp(x)\n",
+        "    func=exp_x/exp_x.sum(axis=1, keepdims=True)\n",
+        "    return func"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 22,
+      "metadata": {
+        "id": "1jHMG2AhfEBA"
+      },
+      "outputs": [],
+      "source": [
+        "def learn_once_cross_entropy(w1,b1,w2,b2,data,targets,learning_rate):\n",
+        "    # This function performs one gradient descent step using a binary cross-entropy loss\n",
+        "    A0=data\n",
+        "    Targets=one_hot(targets)\n",
+        "    A1=segmoid(np.matmul(A0, w1) + b1)\n",
+        "    A2=softmax(np.matmul(A1,w2) + b2)\n",
+        "    #Let calculate the partial derivates\n",
+        "    #2\n",
+        "    D_Z2=(A2-Targets)\n",
+        "    D_W2=np.matmul(A1.T,D_Z2)\n",
+        "    D_B2=D_Z2\n",
+        "    #1\n",
+        "    D_A1=np.matmul(D_Z2,w2.T)\n",
+        "    D_Z1=np.matmul(np.matmul(A1,(1-A1).T),D_A1)\n",
+        "    D_B1=D_Z1\n",
+        "    D_W1=np.matmul(A0.T,D_Z1)\n",
+        "    #The backpropagation of the gradient\n",
+        "    w1=w1-learning_rate*D_W1\n",
+        "    w2=w2-learning_rate*D_W2\n",
+        "    b1=b1-learning_rate*D_B1\n",
+        "    b2=b2-learning_rate*D_B2\n",
+        "    # Forward pass\n",
+        "    G1 = np.matmul(A0, w1) + b1\n",
+        "    C1 = segmoid(G1)\n",
+        "    G2 = np.matmul(C1, w2) + b2\n",
+        "    C2 = softmax(G2)\n",
+        "    #Cross entropy loss\n",
+        "    loss = -np.sum(np.multiply(Targets,np.log(C2)))/float(C2.shape[0])\n",
+        "    return (w1,b1,w2,b2,loss)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 23,
+      "metadata": {
+        "id": "QKm6UIr8fJkO"
+      },
+      "outputs": [],
+      "source": [
+        "def train_mlp(w1,b1,w2,b2,data_train,labels_train,learning_rate,num_epoch):\n",
+        "    #This function returns the different accuracies of the program depending on the number of epoches chosen\n",
+        "    train_accuracies=[]\n",
+        "    for i in range(0,num_epoch):\n",
+        "        (w1,b1,w2,b2,loss)=learn_once_cross_entropy(w1,b1,w2,b2,data_train,labels_train,learning_rate)\n",
+        "        # forward pass in order to determine the accuracy\n",
+        "        A0=data_train\n",
+        "        G1 = np.matmul(A0, w1) + b1\n",
+        "        C1 = segmoid(G1)\n",
+        "        G2 = np.matmul(C1, w2) + b2\n",
+        "        C2 = softmax(G2)\n",
+        "        predictions = np.argmax(C2,axis=1)\n",
+        "        acc=(np.sum(predictions == labels_train)/predictions.shape[0])*100\n",
+        "        train_accuracies.append(acc)\n",
+        "    return (w1,w2,b1,b2,train_accuracies)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "def test_mlp(w1,b1,w2,b2,data_test,labels_test):\n",
+        "    # This function tests the previous function on the data_test.\n",
+        "    # First: predict the classes\n",
+        "    A0=data_test\n",
+        "    G1 = np.matmul(A0, w1) + b1\n",
+        "    C1 = segmoid(G1)\n",
+        "    G2 = np.matmul(C1, w2) + b2\n",
+        "    C2 = softmax(G2)\n",
+        "    # the predicted classes\n",
+        "    predictions = np.argmax(C2,axis=1)\n",
+        "    # The accuracy of the predictions\n",
+        "    test_accuracy = (np.sum(predictions == labels_test)/predictions.shape[0])*100\n",
+        "    return test_accuracy"
+      ],
+      "metadata": {
+        "id": "iSieEBYBgg81"
+      },
+      "execution_count": 24,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "def run_mlp_training(data_train, labels_train, data_test, labels_test,d_h,learning_rate ,num_epoch ):\n",
+        "    # This function trains an MLP classifier and return the training accuracies across epochs as a list of floats and the final testing accuracy as a float.\n",
+        "    d_in = data_train.shape[1]\n",
+        "    d_out = 10\n",
+        "    w1 = 2 * np.random.rand(d_in, d_h) - 1\n",
+        "    b1 = np.zeros((1, d_h))\n",
+        "    w2 = 2 * np.random.rand(d_h, d_out) - 1\n",
+        "    b2 = np.zeros((1, d_out))\n",
+        "    # training\n",
+        "    (w1,w2,b1,b2,train_accuracies)=train_mlp(w1,b1,w2,b2,data_train,labels_train,learning_rate,num_epoch)\n",
+        "    # Testing\n",
+        "    final_accuracy=test_mlp(w1,b1,w2,b2,data_test,labels_test)\n",
+        "    return train_accuracies, final_accuracy"
+      ],
+      "metadata": {
+        "id": "1SSJD3megVde"
+      },
+      "execution_count": 25,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "N = 30                                             \n",
+        "d_in = 5                                           \n",
+        "d_h = 3                                             \n",
+        "d_out = 2                                          \n",
+        "w1 = 2 * np.random.rand(d_in, d_h) - 1              \n",
+        "b1 = np.zeros((1, d_h))                             \n",
+        "w2 = 2 * np.random.rand(d_h, d_out) - 1            \n",
+        "b2 = np.zeros((1, d_out))                           \n",
+        "\n",
+        "data = np.random.rand(N, d_in)                      \n",
+        "targets = np.random.rand(N, d_out)                  \n",
+        "learning_rate=0.5\n",
+        "\n",
+        "(w1n,b1n,w2n,b2n,loss)=learn_once_mse(w1,b1,w2,b2,data,targets,learning_rate)"
+      ],
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "d-VmkeiKhTmq",
+        "outputId": "217ede54-c377-439b-9b32-0b1f73d945a6"
+      },
+      "execution_count": 35,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stderr",
+          "text": [
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: RuntimeWarning: overflow encountered in exp\n",
+            "  \n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "0 < loss < 1"
+      ],
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "D0hOQsp0hZon",
+        "outputId": "084b78ba-58f8-4e35-a4e8-85261c8238f0"
+      },
+      "execution_count": 27,
+      "outputs": [
+        {
+          "output_type": "execute_result",
+          "data": {
+            "text/plain": [
+              "True"
+            ]
+          },
+          "metadata": {},
+          "execution_count": 27
+        }
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "w1n.shape==w1.shape"
+      ],
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "WYvzC-u8heLi",
+        "outputId": "49c343f7-43b5-489f-bf06-74d20679c179"
+      },
+      "execution_count": 28,
+      "outputs": [
+        {
+          "output_type": "execute_result",
+          "data": {
+            "text/plain": [
+              "True"
+            ]
+          },
+          "metadata": {},
+          "execution_count": 28
+        }
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        " assert ((one_hot(np.array([1,2,0])) == [[0, 1, 0],[0, 0, 1],[1, 0, 0]]).all())==True"
+      ],
+      "metadata": {
+        "id": "B9VCT11QhwN5"
+      },
+      "execution_count": 29,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "(data,labels)=read_cifar_batch('/content/drive/MyDrive/cifar10/data_batch_1')"
+      ],
+      "metadata": {
+        "id": "bHIBfG7Ahz8r"
+      },
+      "execution_count": 30,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "N = data.shape[0]                              \n",
+        "d_in = data.shape[1]                           \n",
+        "d_h = 64                                       \n",
+        "d_out = 10                                     \n",
+        "w1 = 2 * np.random.rand(d_in, d_h) - 1         \n",
+        "b1 = np.zeros((1, d_h))                        \n",
+        "w2 = 2 * np.random.rand(d_h, d_out) - 1        \n",
+        "b2 = np.zeros((1, d_out))                      \n",
+        "learning_rate=0.1                              \n",
+        "num_epoch=100"
+      ],
+      "metadata": {
+        "id": "HBnSDU6piFIG"
+      },
+      "execution_count": 31,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "0< learn_once_cross_entropy(w1,b1,w2,b2,data,labels,learning_rate)[4] <1"
+      ],
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "XgNaZ6APiXOH",
+        "outputId": "bfbb7888-2a42-46a2-f0c2-1d5b4fa75dde"
+      },
+      "execution_count": 32,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stderr",
+          "text": [
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: RuntimeWarning: overflow encountered in exp\n",
+            "  \n",
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:3: RuntimeWarning: overflow encountered in exp\n",
+            "  This is separate from the ipykernel package so we can avoid doing imports until\n",
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: invalid value encountered in true_divide\n",
+            "  after removing the cwd from sys.path.\n",
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:28: RuntimeWarning: divide by zero encountered in log\n",
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:28: RuntimeWarning: invalid value encountered in multiply\n"
+          ]
+        },
+        {
+          "output_type": "execute_result",
+          "data": {
+            "text/plain": [
+              "False"
+            ]
+          },
+          "metadata": {},
+          "execution_count": 32
+        }
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "print(train_mlp(w1,b1,w2,b2,data,labels,learning_rate,num_epoch)[4])"
+      ],
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "-i79V01ljBi9",
+        "outputId": "b59af252-6ec6-4888-b0db-17b82da6e0ce"
+      },
+      "execution_count": 33,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stderr",
+          "text": [
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: RuntimeWarning: overflow encountered in exp\n",
+            "  \n",
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:3: RuntimeWarning: overflow encountered in exp\n",
+            "  This is separate from the ipykernel package so we can avoid doing imports until\n",
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: invalid value encountered in true_divide\n",
+            "  after removing the cwd from sys.path.\n",
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:28: RuntimeWarning: divide by zero encountered in log\n",
+            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:28: RuntimeWarning: invalid value encountered in multiply\n"
+          ]
+        },
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "[10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05, 10.05]\n"
+          ]
+        }
+      ]
+    }
+  ],
+  "metadata": {
+    "colab": {
+      "provenance": []
+    },
+    "kernelspec": {
+      "display_name": "Python 3",
+      "name": "python3"
+    },
+    "language_info": {
+      "name": "python"
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 0
+}
\ No newline at end of file
-- 
GitLab