diff --git a/TD2 Deep Learning.ipynb b/TD2 Deep Learning.ipynb index 1cdde7c12ce25e35731c3bef44edbf6f7419278a..975918d059923d79b3df00ac9ea9f4d947640ea2 100644 --- a/TD2 Deep Learning.ipynb +++ b/TD2 Deep Learning.ipynb @@ -1122,9 +1122,30 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Test Loss: 22.242847\n", + "\n", + "Test Accuracy of airplane: 52% (523/1000)\n", + "Test Accuracy of automobile: 85% (853/1000)\n", + "Test Accuracy of bird: 34% (342/1000)\n", + "Test Accuracy of cat: 43% (430/1000)\n", + "Test Accuracy of deer: 66% (660/1000)\n", + "Test Accuracy of dog: 45% (452/1000)\n", + "Test Accuracy of frog: 74% (749/1000)\n", + "Test Accuracy of horse: 64% (649/1000)\n", + "Test Accuracy of ship: 83% (835/1000)\n", + "Test Accuracy of truck: 64% (645/1000)\n", + "\n", + "Test Accuracy (Overall): 61% (6138/10000)\n" + ] + } + ], "source": [ "# quantize model\n", "quantized_model = torch.quantization.quantize_dynamic(model, dtype=torch.qint8)\n", @@ -1134,18 +1155,18 @@ "quantized_class_correct = list(0.0 for i in range(10))\n", "quantized_class_total = list(0.0 for i in range(10))\n", "\n", - "model.eval()\n", + "quantized_model.eval()\n", "# iterate over test data\n", "for data, target in test_loader:\n", " # move tensors to GPU if CUDA is available\n", " if train_on_gpu:\n", " data, target = data.cuda(), target.cuda()\n", " # forward pass: compute predicted outputs by passing inputs to the model\n", - " output = model(data)\n", + " output = quantized_model(data)\n", " # calculate the batch loss\n", " loss = criterion(output, target)\n", " # update test loss\n", - " test_loss += loss.item() * data.size(0)\n", + " quantized_test_loss += loss.item() * data.size(0)\n", " # convert output probabilities to predicted class\n", " _, pred = torch.max(output, 1)\n", " # compare predictions to true label\n", @@ -1158,22 +1179,22 @@ " # calculate test accuracy for each object class\n", " for i in range(batch_size):\n", " label = target.data[i]\n", - " class_correct[label] += correct[i].item()\n", - " class_total[label] += 1\n", + " quantized_class_correct[label] += correct[i].item()\n", + " quantized_class_total[label] += 1\n", "\n", "# average test loss\n", - "test_loss = test_loss / len(test_loader)\n", - "print(\"Test Loss: {:.6f}\\n\".format(test_loss))\n", + "quantized_test_loss = quantized_test_loss / len(test_loader)\n", + "print(\"Test Loss: {:.6f}\\n\".format(quantized_test_loss))\n", "\n", "for i in range(10):\n", - " if class_total[i] > 0:\n", + " if quantized_class_total[i] > 0:\n", " print(\n", " \"Test Accuracy of %5s: %2d%% (%2d/%2d)\"\n", " % (\n", " classes[i],\n", - " 100 * class_correct[i] / class_total[i],\n", - " np.sum(class_correct[i]),\n", - " np.sum(class_total[i]),\n", + " 100 * quantized_class_correct[i] / quantized_class_total[i],\n", + " np.sum(quantized_class_correct[i]),\n", + " np.sum(quantized_class_total[i]),\n", " )\n", " )\n", " else:\n", @@ -1182,13 +1203,20 @@ "print(\n", " \"\\nTest Accuracy (Overall): %2d%% (%2d/%2d)\"\n", " % (\n", - " 100.0 * np.sum(class_correct) / np.sum(class_total),\n", - " np.sum(class_correct),\n", - " np.sum(class_total),\n", + " 100.0 * np.sum(quantized_class_correct) / np.sum(quantized_class_total),\n", + " np.sum(quantized_class_correct),\n", + " np.sum(quantized_class_total),\n", " )\n", ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The result is that the test accuracy is really similar for the initial model and for the quantized model." + ] + }, { "cell_type": "markdown", "id": "201470f9",