diff --git a/VGG19.ipynb b/VGG19.ipynb
new file mode 100644
index 000000000..611fbf51f
--- /dev/null
+++ b/VGG19.ipynb
@@ -0,0 +1,415 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "source": [
+ "
VGG19
"
+ ],
+ "metadata": {
+ "id": "IVs7HLFkrKkM"
+ }
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Loading the required libraries"
+ ],
+ "metadata": {
+ "id": "HRdvHkJUrEn0"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "oWgRRd5xXA6_"
+ },
+ "outputs": [],
+ "source": [
+ "import tensorflow as tf\n",
+ "from tensorflow.keras.models import Sequential, Model\n",
+ "from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\n",
+ "from tensorflow.keras.applications import VGG19\n",
+ "from tensorflow.keras.datasets import cifar10\n",
+ "from tensorflow.keras.utils import to_categorical"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Loading the CIFAR-10 dataset"
+ ],
+ "metadata": {
+ "id": "npvMLpc7rA1k"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()\n",
+ "train_images, test_images = train_images / 255.0, test_images / 255.0\n",
+ "train_labels = to_categorical(train_labels, num_classes=10)\n",
+ "test_labels = to_categorical(test_labels, num_classes=10)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "riMP32--XOaZ",
+ "outputId": "9526925d-260d-40f8-cda3-36b1376a0e88"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\n",
+ "170498071/170498071 [==============================] - 6s 0us/step\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Creating the VGG19 model from a scratch"
+ ],
+ "metadata": {
+ "id": "IqHcT-08q56w"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "def create_vgg19_scratch():\n",
+ " model = Sequential([\n",
+ " Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)),\n",
+ " Conv2D(64, (3, 3), activation='relu', padding='same'),\n",
+ " MaxPooling2D((2, 2), strides=(2, 2)),\n",
+ "\n",
+ " Conv2D(128, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(128, (3, 3), activation='relu', padding='same'),\n",
+ " MaxPooling2D((2, 2), strides=(2, 2)),\n",
+ "\n",
+ " Conv2D(256, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(256, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(256, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(256, (3, 3), activation='relu', padding='same'),\n",
+ " MaxPooling2D((2, 2), strides=(2, 2)),\n",
+ "\n",
+ " Conv2D(512, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(512, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(512, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(512, (3, 3), activation='relu', padding='same'),\n",
+ " MaxPooling2D((2, 2), strides=(2, 2)),\n",
+ "\n",
+ " Conv2D(512, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(512, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(512, (3, 3), activation='relu', padding='same'),\n",
+ " Conv2D(512, (3, 3), activation='relu', padding='same'),\n",
+ " MaxPooling2D((2, 2), strides=(2, 2)),\n",
+ "\n",
+ " Flatten(),\n",
+ " Dense(4096, activation='relu'),\n",
+ " Dense(4096, activation='relu'),\n",
+ " Dense(10, activation='softmax') # Output layer for 10 classes in CIFAR-10\n",
+ " ])\n",
+ " return model"
+ ],
+ "metadata": {
+ "id": "8J9-x0sjXRFV"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Training the model"
+ ],
+ "metadata": {
+ "id": "W7EP2SP4qv39"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "vgg19_scratch = create_vgg19_scratch()\n",
+ "optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)\n",
+ "vgg19_scratch.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n",
+ "scratch_history = vgg19_scratch.fit(\n",
+ " train_images, train_labels,\n",
+ " epochs=70,\n",
+ " validation_data=(test_images, test_labels)\n",
+ ")"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "DHMF1fMTXTQi",
+ "outputId": "89b176ad-1406-4e07-89a8-5ee6890b0c2b"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Epoch 1/70\n",
+ "1563/1563 [==============================] - 81s 45ms/step - loss: 1.9639 - accuracy: 0.2097 - val_loss: 1.7949 - val_accuracy: 0.2813\n",
+ "Epoch 2/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 1.6663 - accuracy: 0.3465 - val_loss: 1.5634 - val_accuracy: 0.3866\n",
+ "Epoch 3/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 1.5258 - accuracy: 0.4094 - val_loss: 1.4458 - val_accuracy: 0.4520\n",
+ "Epoch 4/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 1.4298 - accuracy: 0.4576 - val_loss: 1.3861 - val_accuracy: 0.4803\n",
+ "Epoch 5/70\n",
+ "1563/1563 [==============================] - 71s 46ms/step - loss: 1.3467 - accuracy: 0.4931 - val_loss: 1.3204 - val_accuracy: 0.5016\n",
+ "Epoch 6/70\n",
+ "1563/1563 [==============================] - 71s 46ms/step - loss: 1.2736 - accuracy: 0.5266 - val_loss: 1.2891 - val_accuracy: 0.5179\n",
+ "Epoch 7/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 1.2024 - accuracy: 0.5552 - val_loss: 1.2442 - val_accuracy: 0.5431\n",
+ "Epoch 8/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 1.1329 - accuracy: 0.5864 - val_loss: 1.1586 - val_accuracy: 0.5744\n",
+ "Epoch 9/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 1.0770 - accuracy: 0.6056 - val_loss: 1.1315 - val_accuracy: 0.5835\n",
+ "Epoch 10/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 1.0084 - accuracy: 0.6335 - val_loss: 1.1725 - val_accuracy: 0.5853\n",
+ "Epoch 11/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.9437 - accuracy: 0.6586 - val_loss: 1.1610 - val_accuracy: 0.5986\n",
+ "Epoch 12/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.8826 - accuracy: 0.6840 - val_loss: 1.0822 - val_accuracy: 0.6276\n",
+ "Epoch 13/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.8199 - accuracy: 0.7046 - val_loss: 1.1908 - val_accuracy: 0.5996\n",
+ "Epoch 14/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.7528 - accuracy: 0.7333 - val_loss: 1.0764 - val_accuracy: 0.6382\n",
+ "Epoch 15/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.6955 - accuracy: 0.7498 - val_loss: 1.1096 - val_accuracy: 0.6473\n",
+ "Epoch 16/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.6335 - accuracy: 0.7745 - val_loss: 1.2055 - val_accuracy: 0.6227\n",
+ "Epoch 17/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.5781 - accuracy: 0.7949 - val_loss: 1.2069 - val_accuracy: 0.6390\n",
+ "Epoch 18/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.5224 - accuracy: 0.8143 - val_loss: 1.2445 - val_accuracy: 0.6444\n",
+ "Epoch 19/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.4715 - accuracy: 0.8321 - val_loss: 1.2478 - val_accuracy: 0.6387\n",
+ "Epoch 20/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.4298 - accuracy: 0.8490 - val_loss: 1.3028 - val_accuracy: 0.6462\n",
+ "Epoch 21/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.3844 - accuracy: 0.8649 - val_loss: 1.4008 - val_accuracy: 0.6401\n",
+ "Epoch 22/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.3492 - accuracy: 0.8770 - val_loss: 1.3167 - val_accuracy: 0.6428\n",
+ "Epoch 23/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.3204 - accuracy: 0.8884 - val_loss: 1.4801 - val_accuracy: 0.6511\n",
+ "Epoch 24/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.2864 - accuracy: 0.9012 - val_loss: 1.4595 - val_accuracy: 0.6487\n",
+ "Epoch 25/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.2539 - accuracy: 0.9127 - val_loss: 1.7098 - val_accuracy: 0.6355\n",
+ "Epoch 26/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.2380 - accuracy: 0.9183 - val_loss: 1.4833 - val_accuracy: 0.6458\n",
+ "Epoch 27/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.2111 - accuracy: 0.9282 - val_loss: 1.7273 - val_accuracy: 0.6432\n",
+ "Epoch 28/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.1888 - accuracy: 0.9360 - val_loss: 1.6997 - val_accuracy: 0.6493\n",
+ "Epoch 29/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.1830 - accuracy: 0.9384 - val_loss: 1.6955 - val_accuracy: 0.6536\n",
+ "Epoch 30/70\n",
+ "1563/1563 [==============================] - 71s 46ms/step - loss: 0.1584 - accuracy: 0.9473 - val_loss: 1.7181 - val_accuracy: 0.6554\n",
+ "Epoch 31/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.1554 - accuracy: 0.9479 - val_loss: 1.8023 - val_accuracy: 0.6508\n",
+ "Epoch 32/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.1372 - accuracy: 0.9545 - val_loss: 1.8693 - val_accuracy: 0.6439\n",
+ "Epoch 33/70\n",
+ "1563/1563 [==============================] - 71s 46ms/step - loss: 0.1328 - accuracy: 0.9569 - val_loss: 1.8244 - val_accuracy: 0.6584\n",
+ "Epoch 34/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.1263 - accuracy: 0.9583 - val_loss: 1.7487 - val_accuracy: 0.6574\n",
+ "Epoch 35/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.1146 - accuracy: 0.9620 - val_loss: 1.8116 - val_accuracy: 0.6535\n",
+ "Epoch 36/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.1064 - accuracy: 0.9655 - val_loss: 2.0310 - val_accuracy: 0.6449\n",
+ "Epoch 37/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.1053 - accuracy: 0.9654 - val_loss: 1.9424 - val_accuracy: 0.6543\n",
+ "Epoch 38/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0983 - accuracy: 0.9682 - val_loss: 1.8256 - val_accuracy: 0.6615\n",
+ "Epoch 39/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0940 - accuracy: 0.9685 - val_loss: 1.9305 - val_accuracy: 0.6640\n",
+ "Epoch 40/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0903 - accuracy: 0.9707 - val_loss: 1.9351 - val_accuracy: 0.6324\n",
+ "Epoch 41/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.0897 - accuracy: 0.9707 - val_loss: 2.1238 - val_accuracy: 0.6465\n",
+ "Epoch 42/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.0867 - accuracy: 0.9719 - val_loss: 1.9466 - val_accuracy: 0.6526\n",
+ "Epoch 43/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.0797 - accuracy: 0.9735 - val_loss: 1.9463 - val_accuracy: 0.6611\n",
+ "Epoch 44/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.0824 - accuracy: 0.9734 - val_loss: 1.8838 - val_accuracy: 0.6601\n",
+ "Epoch 45/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.0720 - accuracy: 0.9768 - val_loss: 2.0007 - val_accuracy: 0.6595\n",
+ "Epoch 46/70\n",
+ "1563/1563 [==============================] - 70s 45ms/step - loss: 0.0725 - accuracy: 0.9749 - val_loss: 2.0515 - val_accuracy: 0.6632\n",
+ "Epoch 47/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0672 - accuracy: 0.9782 - val_loss: 2.1421 - val_accuracy: 0.6633\n",
+ "Epoch 48/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0705 - accuracy: 0.9768 - val_loss: 2.0357 - val_accuracy: 0.6592\n",
+ "Epoch 49/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0720 - accuracy: 0.9761 - val_loss: 2.0108 - val_accuracy: 0.6585\n",
+ "Epoch 50/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.0606 - accuracy: 0.9811 - val_loss: 2.0068 - val_accuracy: 0.6638\n",
+ "Epoch 51/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0691 - accuracy: 0.9778 - val_loss: 1.9444 - val_accuracy: 0.6603\n",
+ "Epoch 52/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0620 - accuracy: 0.9797 - val_loss: 1.9663 - val_accuracy: 0.6658\n",
+ "Epoch 53/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0562 - accuracy: 0.9816 - val_loss: 2.1736 - val_accuracy: 0.6540\n",
+ "Epoch 54/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.0581 - accuracy: 0.9805 - val_loss: 2.0302 - val_accuracy: 0.6696\n",
+ "Epoch 55/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.0545 - accuracy: 0.9824 - val_loss: 2.3498 - val_accuracy: 0.6616\n",
+ "Epoch 56/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.0583 - accuracy: 0.9806 - val_loss: 2.0120 - val_accuracy: 0.6667\n",
+ "Epoch 57/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.0526 - accuracy: 0.9827 - val_loss: 2.0945 - val_accuracy: 0.6697\n",
+ "Epoch 58/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0534 - accuracy: 0.9826 - val_loss: 2.2315 - val_accuracy: 0.6702\n",
+ "Epoch 59/70\n",
+ "1563/1563 [==============================] - 70s 45ms/step - loss: 0.0494 - accuracy: 0.9835 - val_loss: 2.4048 - val_accuracy: 0.6433\n",
+ "Epoch 60/70\n",
+ "1563/1563 [==============================] - 69s 44ms/step - loss: 0.0481 - accuracy: 0.9848 - val_loss: 2.1883 - val_accuracy: 0.6684\n",
+ "Epoch 61/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0506 - accuracy: 0.9835 - val_loss: 2.1281 - val_accuracy: 0.6671\n",
+ "Epoch 62/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0482 - accuracy: 0.9842 - val_loss: 2.1164 - val_accuracy: 0.6606\n",
+ "Epoch 63/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.0470 - accuracy: 0.9840 - val_loss: 2.1156 - val_accuracy: 0.6744\n",
+ "Epoch 64/70\n",
+ "1563/1563 [==============================] - 70s 45ms/step - loss: 0.0500 - accuracy: 0.9833 - val_loss: 2.0449 - val_accuracy: 0.6776\n",
+ "Epoch 65/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0451 - accuracy: 0.9855 - val_loss: 2.0069 - val_accuracy: 0.6700\n",
+ "Epoch 66/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.0462 - accuracy: 0.9848 - val_loss: 2.0895 - val_accuracy: 0.6831\n",
+ "Epoch 67/70\n",
+ "1563/1563 [==============================] - 73s 46ms/step - loss: 0.0431 - accuracy: 0.9853 - val_loss: 2.1603 - val_accuracy: 0.6723\n",
+ "Epoch 68/70\n",
+ "1563/1563 [==============================] - 71s 45ms/step - loss: 0.0448 - accuracy: 0.9854 - val_loss: 1.9228 - val_accuracy: 0.6750\n",
+ "Epoch 69/70\n",
+ "1563/1563 [==============================] - 68s 44ms/step - loss: 0.0411 - accuracy: 0.9869 - val_loss: 2.2212 - val_accuracy: 0.6759\n",
+ "Epoch 70/70\n",
+ "1563/1563 [==============================] - 68s 43ms/step - loss: 0.0422 - accuracy: 0.9862 - val_loss: 2.2184 - val_accuracy: 0.6535\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Printing the training and testing accuracy"
+ ],
+ "metadata": {
+ "id": "iZU4To80qs0E"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "train_loss = scratch_history.history['loss']\n",
+ "train_acc = scratch_history.history['accuracy']\n",
+ "val_loss = scratch_history.history['val_loss']\n",
+ "val_acc = scratch_history.history['val_accuracy']\n",
+ "\n",
+ "print(f\"Final Training Accuracy: {train_acc[-1]*100:.2f}%\")\n",
+ "print(f\"Final Testing Accuracy: {val_acc[-1]*100:.2f}%\")"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "vAh60GtGgJLD",
+ "outputId": "f9256a30-4d8c-4644-a4df-d0ab955642f8"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Final Training Accuracy: 98.62%\n",
+ "Final Testing Accuracy: 65.35%\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "## Plotting the accuracy and loss"
+ ],
+ "metadata": {
+ "id": "N9x5QQZPqlKR"
+ }
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "epochs = range(1, len(train_loss) + 1)\n",
+ "\n",
+ "plt.figure(figsize=(12, 5))\n",
+ "plt.subplot(1, 2, 1)\n",
+ "plt.plot(epochs, train_acc, 'b-', label='Training accuracy')\n",
+ "plt.plot(epochs, val_acc, 'r-', label='Testing accuracy')\n",
+ "plt.title('Training and Testing Accuracy')\n",
+ "plt.xlabel('Epochs')\n",
+ "plt.ylabel('Accuracy')\n",
+ "plt.legend()\n",
+ "\n",
+ "plt.subplot(1, 2, 2)\n",
+ "plt.plot(epochs, train_loss, 'b-', label='Training loss')\n",
+ "plt.plot(epochs, val_loss, 'r-', label='Testing loss')\n",
+ "plt.title('Training and Testing Loss')\n",
+ "plt.xlabel('Epochs')\n",
+ "plt.ylabel('Loss')\n",
+ "plt.legend()\n",
+ "\n",
+ "plt.tight_layout()\n",
+ "plt.show()"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 494
+ },
+ "id": "hjLK5nyGgK6K",
+ "outputId": "557b38af-2e4c-4f5f-8013-b54273cb814f"
+ },
+ "execution_count": null,
+ "outputs": [
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ "