diff --git a/docker/Dockerfile.finn_dev b/docker/Dockerfile.finn_dev index 35bc316b6951de5b69bfe611427b9b9321f59347..4c8557b53b05bea67c96ad9edf523d532640ebfe 100644 --- a/docker/Dockerfile.finn_dev +++ b/docker/Dockerfile.finn_dev @@ -55,6 +55,9 @@ RUN pip install sphinx_rtd_theme==0.5.0 RUN pip install pytest-xdist==2.0.0 RUN pip install pytest-parallel==0.1.0 RUN pip install netron>=4.7.9 +RUN pip install pandas==1.1.5 +RUN pip install scikit-learn==0.24.1 +RUN pip install tqdm==4.31.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading # switch user diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh index a8e05114c312028d18a006d10d5b210b44afb9d3..bd2338305ef24d98f582d09a140175a243c62c7e 100644 --- a/docker/finn_entrypoint.sh +++ b/docker/finn_entrypoint.sh @@ -12,7 +12,7 @@ gecho () { # checkout the correct dependency repo commits # the repos themselves are cloned in the Dockerfile -FINN_BASE_COMMIT=91fb6066927d965471e66e103fd5201ac217c755 +FINN_BASE_COMMIT=8908c6a3f6674c4fa790954bd41c23ee5bf053df BREVITAS_COMMIT=aff49758ec445d77c75721c7de3091a2a1797ca8 CNPY_COMMIT=4e8810b1a8637695171ed346ce68f6984e585ef4 HLSLIB_COMMIT=2e49322d1bbc4969ca293843bda1f3f9c05456fc diff --git a/docs/finn/getting_started.rst b/docs/finn/getting_started.rst index 3b475303d7dde91e8b6a21856eb4d66417f164d7..bff31cde45122ebc25f515422ffc523f4f78e3be 100644 --- a/docs/finn/getting_started.rst +++ b/docs/finn/getting_started.rst @@ -120,13 +120,15 @@ These are summarized below: * ``VIVADO_PATH`` points to your Vivado installation on the host * (optional, for Vitis & Alveo only) ``VITIS_PATH``, ``PLATFORM_REPO_PATHS`` and ``XILINX_XRT`` respectively point to your Vitis installation, the Vitis platform files, and Xilinx XRT -* ``JUPYTER_PORT`` (default 8888) changes the port for Jupyter inside Docker -* ``NETRON_PORT`` (default 8081) changes the port for Netron inside Docker -* ``NUM_DEFAULT_WORKERS`` (default 1) specifies the degree of parallelization for the transformations that can be run in parallel -* ``PYNQ_BOARD`` or ``ALVEO_BOARD`` specifies the type of PYNQ/Alveo board used (see "supported hardware" below) for the test suite -* ``PYNQ_IP`` and ``PYNQ_PORT`` (or ``ALVEO_IP`` and ``ALVEO_PORT``) specify ip address and port number to access the PYNQ board / Alveo target -* ``PYNQ_USERNAME`` and ``PYNQ_PASSWORD`` (or ``ALVEO_USERNAME`` and ``ALVEO_PASSWORD``) specify the PYNQ board / Alveo host access credentials for the test suite. For PYNQ, password is always needed to run as sudo. For Alveo, you can leave the password empty and place your ssh private key in the ``finn/ssh_keys`` folder to use keypair authentication. -* ``PYNQ_TARGET_DIR`` (or ``ALVEO_TARGET_DIR``) specifies the target dir on the PYNQ board / Alveo host for the test suite +* (optional) ``JUPYTER_PORT`` (default 8888) changes the port for Jupyter inside Docker +* (optional) ``JUPYTER_PASSWD_HASH`` (default "") Set the Jupyter notebook password hash. If set to empty string, token authentication will be used (token printed in terminal on launch). +* (optional) ``LOCALHOST_URL`` (default localhost) sets the base URL for accessing e.g. Netron from inside the container. Useful when running FINN remotely. +* (optional) ``NETRON_PORT`` (default 8081) changes the port for Netron inside Docker +* (optional) ``NUM_DEFAULT_WORKERS`` (default 1) specifies the degree of parallelization for the transformations that can be run in parallel +* (optional) ``PYNQ_BOARD`` or ``ALVEO_BOARD`` specifies the type of PYNQ/Alveo board used (see "supported hardware" below) for the test suite +* (optional) ``PYNQ_IP`` and ``PYNQ_PORT`` (or ``ALVEO_IP`` and ``ALVEO_PORT``) specify ip address and port number to access the PYNQ board / Alveo target +* (optional) ``PYNQ_USERNAME`` and ``PYNQ_PASSWORD`` (or ``ALVEO_USERNAME`` and ``ALVEO_PASSWORD``) specify the PYNQ board / Alveo host access credentials for the test suite. For PYNQ, password is always needed to run as sudo. For Alveo, you can leave the password empty and place your ssh private key in the ``finn/ssh_keys`` folder to use keypair authentication. +* (optional) ``PYNQ_TARGET_DIR`` (or ``ALVEO_TARGET_DIR``) specifies the target dir on the PYNQ board / Alveo host for the test suite * (optional) ``FINN_HOST_BUILD_DIR`` specifies which directory on the host will be used as the build directory. Defaults to ``/tmp/finn_dev_<username>`` * (optional) ``IMAGENET_VAL_PATH`` specifies the path to the ImageNet validation directory for tests. diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index 91a776f84e9554579d97447c9ca0889da5c29e48..ff4c5704002219ca18bb07eeb8c768f860f3ffbf 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -7,6 +7,13 @@ "# Train a Quantized MLP on UNSW-NB15 with Brevitas" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<font color=\"red\">**Live FINN tutorial:** We recommend clicking **Cell -> Run All** when you start reading this notebook for \"latency hiding\".</font>" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -26,7 +33,7 @@ "*The task:* The goal of [*network intrusion detection*](https://ieeexplore.ieee.org/abstract/document/283931) is to identify, preferably in real time, unauthorized use, misuse, and abuse of computer systems by both system insiders and external penetrators. This may be achieved by a mix of techniques, and machine-learning (ML) based techniques are increasing in popularity. \n", "\n", "*The dataset:* Several datasets are available for use in ML-based methods for intrusion detection.\n", - "The [UNSW-NB15](https://www.unsw.adfa.edu.au/unsw-canberra-cyber/cybersecurity/ADFA-NB15-Datasets/) is one such dataset created by the Australian Centre for Cyber Security (ACCS) to provide a comprehensive network based data set which can reflect modern network traffic scenarios. You can find more details about the dataset on [its homepage](https://www.unsw.adfa.edu.au/unsw-canberra-cyber/cybersecurity/ADFA-NB15-Datasets/).\n", + "The **UNSW-NB15** is one such dataset created by the Australian Centre for Cyber Security (ACCS) to provide a comprehensive network based data set which can reflect modern network traffic scenarios. You can find more details about the dataset on [its homepage](https://www.unsw.adfa.edu.au/unsw-canberra-cyber/cybersecurity/ADFA-NB15-Datasets/).\n", "\n", "*Performance considerations:* FPGAs are commonly used for implementing high-performance packet processing systems that still provide a degree of programmability. To avoid introducing bottlenecks on the network, the DNN implementation must be capable of detecting malicious ones at line rate, which can be millions of packets per second, and is expected to increase further as next-generation networking solutions provide increased\n", "throughput. This is a good reason to consider FPGA acceleration for this particular use-case." @@ -39,25 +46,59 @@ "## Outline\n", "-------------\n", "\n", - "* [Initial setup](#initial_setup)\n", - "* [Define the Quantized MLP model](#define_quantized_mlp)\n", - "* [Load the UNSW_NB15 dataset](#load_dataset) \n", + "* [Load the UNSW_NB15 Dataset](#load_dataset) \n", + "* [Define the Quantized MLP Model](#define_quantized_mlp)\n", "* [Define Train and Test Methods](#train_test)\n", - "* [(Option 1) Train the Model from Scratch](#train_scratch)\n", - "* [(Option 2) Load Pre-Trained Parameters](#load_pretrained)\n", + " * [(Option 1) Train the Model from Scratch](#train_scratch)\n", + " * [(Option 2) Load Pre-Trained Parameters](#load_pretrained)\n", "* [Network Surgery Before Export](#network_surgery)\n", - "* [Export to FINN-ONNX](#export_finn_onnx)\n", - "* [View the Exported ONNX in Netron](#view_in_netron)\n", - "* [That's it!](#thats_it)" + "* [Export to FINN-ONNX](#export_finn_onnx)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import onnx\n", + "import torch" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Initial Setup <a id='initial_setup'></a>\n", + "**This is important -- always import onnx before torch**. This is a workaround for a [known bug](https://github.com/onnx/onnx/issues/2394)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load the UNSW_NB15 Dataset <a id='load_dataset'></a>\n", + "\n", + "### Dataset Quantization <a id='dataset_qnt'></a>\n", "\n", - "Let's start by making sure we have all the Python packages we'll need for this notebook." + "The goal of this notebook is to train a Quantized Neural Network (QNN) to be later deployed as an FPGA accelerator generated by the FINN compiler. Although we can choose a variety of different precisions for the input, [Murovic and Trost](https://ev.fe.uni-lj.si/1-2-2019/Murovic.pdf) have previously shown we can actually binarize the inputs and still get good (90%+) accuracy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will create a binarized representation for the dataset by following the procedure defined by Murovic and Trost, which we repeat briefly here:\n", + "\n", + "* Original features have different formats ranging from integers, floating numbers to strings.\n", + "* Integers, which for example represent a packet lifetime, are binarized with as many bits as to include the maximum value. \n", + "* Another case is with features formatted as strings (protocols), which are binarized by simply counting the number of all different strings for each feature and coding them in the appropriate number of bits.\n", + "* Floating-point numbers are reformatted into fixed-point representation.\n", + "* In the end, each sample is transformed into a 593-bit wide binary vector. \n", + "* All vectors are labeled as bad (0) or normal (1)\n", + "\n", + "Following Murovic and Trost's open-source implementation provided as a Matlab script [here](https://github.com/TadejMurovic/BNN_Deployment/blob/master/cybersecurity_dataset_unswb15.m), we've created a [Python version](dataloader_quantized.py).\n", + "\n", + "<font color=\"red\">**FPGA'21 tutorial:** Downloading the original dataset and quantizing it can take some time, so we provide a download link to the pre-quantized version for your convenience. </font>" ] }, { @@ -69,51 +110,124 @@ "name": "stdout", "output_type": "stream", "text": [ - "Requirement already satisfied: pandas in /workspace/.local/lib/python3.6/site-packages (1.1.5)\n", - "Requirement already satisfied: pytz>=2017.2 in /opt/conda/lib/python3.6/site-packages (from pandas) (2019.1)\n", - "Requirement already satisfied: numpy>=1.15.4 in /opt/conda/lib/python3.6/site-packages (from pandas) (1.19.4)\n", - "Requirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/lib/python3.6/site-packages (from pandas) (2.8.1)\n", - "Requirement already satisfied: six>=1.5 in /opt/conda/lib/python3.6/site-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)\n", - "Requirement already satisfied: scikit-learn in /workspace/.local/lib/python3.6/site-packages (0.23.2)\n", - "Requirement already satisfied: scipy>=0.19.1 in /opt/conda/lib/python3.6/site-packages (from scikit-learn) (1.5.2)\n", - "Requirement already satisfied: joblib>=0.11 in /workspace/.local/lib/python3.6/site-packages (from scikit-learn) (1.0.0)\n", - "Requirement already satisfied: numpy>=1.13.3 in /opt/conda/lib/python3.6/site-packages (from scikit-learn) (1.19.4)\n", - "Requirement already satisfied: threadpoolctl>=2.0.0 in /workspace/.local/lib/python3.6/site-packages (from scikit-learn) (2.1.0)\n", - "Requirement already satisfied: tqdm in /opt/conda/lib/python3.6/site-packages (4.31.1)\n" + "--2021-02-24 16:57:33-- https://zenodo.org/record/4519767/files/unsw_nb15_binarized.npz?download=1\n", + "Resolving zenodo.org (zenodo.org)... 137.138.76.77\n", + "Connecting to zenodo.org (zenodo.org)|137.138.76.77|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 13391907 (13M) [application/octet-stream]\n", + "Saving to: 'unsw_nb15_binarized.npz'\n", + "\n", + "unsw_nb15_binarized 100%[===================>] 12.77M 2.17MB/s in 8.9s \n", + "\n", + "2021-02-24 16:57:44 (1.44 MB/s) - 'unsw_nb15_binarized.npz' saved [13391907/13391907]\n", + "\n" ] } ], "source": [ - "!pip install --user pandas\n", - "!pip install --user scikit-learn\n", - "!pip install --user tqdm" + "! wget -O unsw_nb15_binarized.npz https://zenodo.org/record/4519767/files/unsw_nb15_binarized.npz?download=1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can extract the binarized numpy arrays from the .npz archive and wrap them as a PyTorch `TensorDataset` as follows:" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Samples in each set: train = 175341, test = 82332\n", + "Shape of one input sample: torch.Size([593])\n" + ] + } + ], "source": [ - "import onnx\n", - "import torch" + "import numpy as np\n", + "from torch.utils.data import TensorDataset\n", + "\n", + "def get_preqnt_dataset(data_dir: str, train: bool):\n", + " unsw_nb15_data = np.load(data_dir + \"/unsw_nb15_binarized.npz\")\n", + " if train:\n", + " partition = \"train\"\n", + " else:\n", + " partition = \"test\"\n", + " part_data = unsw_nb15_data[partition].astype(np.float32)\n", + " part_data = torch.from_numpy(part_data)\n", + " part_data_in = part_data[:, :-1]\n", + " part_data_out = part_data[:, -1]\n", + " return TensorDataset(part_data_in, part_data_out)\n", + "\n", + "train_quantized_dataset = get_preqnt_dataset(\".\", True)\n", + "test_quantized_dataset = get_preqnt_dataset(\".\", False)\n", + "\n", + "print(\"Samples in each set: train = %d, test = %s\" % (len(train_quantized_dataset), len(test_quantized_dataset))) \n", + "print(\"Shape of one input sample: \" + str(train_quantized_dataset[0][0].shape))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**This is important -- always import onnx before torch**. This is a workaround for a [known bug](https://github.com/onnx/onnx/issues/2394)." + "## Set up DataLoader\n", + "\n", + "Following either option, we now have access to the quantized dataset. We will wrap the dataset in a PyTorch `DataLoader` for easier access in batches." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "batch_size = 1000\n", + "\n", + "# dataset loaders\n", + "train_quantized_loader = DataLoader(train_quantized_dataset, batch_size=batch_size, shuffle=True)\n", + "test_quantized_loader = DataLoader(test_quantized_dataset, batch_size=batch_size, shuffle=False) " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Input shape for 1 batch: torch.Size([1000, 593])\n", + "Label shape for 1 batch: torch.Size([1000])\n" + ] + } + ], + "source": [ + "count = 0\n", + "for x,y in train_quantized_loader:\n", + " print(\"Input shape for 1 batch: \" + str(x.shape))\n", + " print(\"Label shape for 1 batch: \" + str(y.shape))\n", + " count += 1\n", + " if count == 1:\n", + " break" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Define the Quantized MLP Model <a id='define_quantized_mlp'></a>\n", + "# Define the Quantized MLP Model <a id='define_quantized_mlp'></a>\n", "\n", "We'll now define an MLP model that will be trained to perform inference with quantized weights and activations.\n", - "For this, we'll use the quantization-aware training (QAT) capabilities offered by[Brevitas](https://github.com/Xilinx/brevitas).\n", + "For this, we'll use the quantization-aware training (QAT) capabilities offered by [Brevitas](https://github.com/Xilinx/brevitas).\n", "\n", "Our MLP will have four fully-connected (FC) layers in total: three hidden layers with 64 neurons, and a final output layer with a single output, all using 2-bit weights. We'll use 2-bit quantized ReLU activation functions, and apply batch normalization between each FC layer and its activation.\n", "\n", @@ -122,7 +236,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -144,13 +258,16 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "from brevitas.nn import QuantLinear, QuantReLU\n", "import torch.nn as nn\n", "\n", + "# Setting seeds for reproducibility\n", + "torch.manual_seed(0)\n", + "\n", "model = nn.Sequential(\n", " QuantLinear(input_size, hidden1, bias=True, weight_bit_width=weight_bit_width),\n", " nn.BatchNorm1d(hidden1),\n", @@ -179,80 +296,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Load the UNSW_NB15 Dataset <a id='load_dataset'></a>\n", - "\n", - "### Dataset Quantization <a id='dataset_qnt'></a>\n", - "\n", - "The goal of this notebook is to train a Quantized Neural Network (QNN) to be later deployed as an FPGA accelerator generated by the FINN compiler. Although we can choose a variety of different precisions for the input, [Murovic and Trost](https://ev.fe.uni-lj.si/1-2-2019/Murovic.pdf) have previously shown we can actually binarize the inputs and still get good (90%+) accuracy.\n", - "Thus, we will create a binarized representation for the dataset by following the procedure defined by [Murovic and Trost](https://ev.fe.uni-lj.si/1-2-2019/Murovic.pdf), which we repeat briefly here:\n", - "\n", - "* Original features have different formats ranging from integers, floating numbers to strings.\n", - "* Integers, which for example represent a packet lifetime, are binarized with as many bits as to include the maximum value. \n", - "* Another case is with features formatted as strings (protocols), which are binarized by simply counting the number of all different strings for each feature and coding them in the appropriate number of bits.\n", - "* Floating-point numbers are reformatted into fixed-point representation.\n", - "* In the end, each sample is transformed into a 593-bit wide binary vector. \n", - "* All vectors are labeled as bad (0) or normal (1)\n", - "\n", - "Following their open-source implementation provided as a Matlab script [here](https://github.com/TadejMurovic/BNN_Deployment/blob/master/cybersecurity_dataset_unswb15.m), we've created a [Python version](dataloader_quantized.py).\n", - "This `UNSW_NB15_quantized` class implements a PyTorch `DataLoader`, which represents a Python iterable over a dataset. This is useful because enables access to data in batches." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Download the training and test set from the [official website](https://www.unsw.adfa.edu.au/unsw-canberra-cyber/cybersecurity/ADFA-NB15-Datasets/) - uncomment the following lines to download:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "#! wget https://www.unsw.adfa.edu.au/unsw-canberra-cyber/cybersecurity/ADFA-NB15-Datasets/a%20part%20of%20training%20and%20testing%20set/UNSW_NB15_training-set.csv\n", - "#! wget https://www.unsw.adfa.edu.au/unsw-canberra-cyber/cybersecurity/ADFA-NB15-Datasets/a%20part%20of%20training%20and%20testing%20set/UNSW_NB15_testing-set.csv" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "from torch.utils.data import DataLoader, Dataset\n", - "from dataloader_quantized import UNSW_NB15_quantized\n", - "\n", - "file_path_train = \"UNSW_NB15_training-set.csv\"\n", - "file_path_test = \"UNSW_NB15_testing-set.csv\"\n", - "\n", - "train_quantized_dataset = UNSW_NB15_quantized(file_path_train = file_path_train, \\\n", - " file_path_test = file_path_test, \\\n", - " train=True)\n", - "\n", - "test_quantized_dataset = UNSW_NB15_quantized(file_path_train = file_path_train, \\\n", - " file_path_test = file_path_test, \\\n", - " train=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "batch_size = 1000\n", - "\n", - "# dataset loaders\n", - "train_quantized_loader = DataLoader(train_quantized_dataset, batch_size=batch_size, shuffle=True)\n", - "test_quantized_loader = DataLoader(test_quantized_dataset, batch_size=batch_size, shuffle=True) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Define Train and Test Methods <a id='train_test'></a>\n", + "# Define Train and Test Methods <a id='train_test'></a>\n", "The train and test methods will use a `DataLoader`, which feeds the model with a new predefined batch of training data in each iteration, until the entire training data is fed to the model. Each repetition of this process is called an `epoch`." ] }, @@ -319,7 +363,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## (Option 1) Train the Model from Scratch <a id=\"train_scratch\"></a>\n" + "# Train the QNN <a id=\"train_qnn\"></a>\n", + "\n", + "We provide two options for training below: you can opt for training the model from scratch (slower) or use a pre-trained model (faster). The first option will give more insight into how the training process works, while the second option will likely give better accuracy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## (Option 1, slower) Train the Model from Scratch <a id=\"train_scratch\"></a>\n" ] }, { @@ -335,7 +388,7 @@ "metadata": {}, "outputs": [], "source": [ - "num_epochs = 5\n", + "num_epochs = 10\n", "lr = 0.001 \n", "\n", "def display_loss_plot(losses, title=\"Training loss\", xlabel=\"Iterations\", ylabel=\"Loss\"):\n", @@ -360,16 +413,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Training loss = 0.132480 test accuracy = 0.797989: 100%|██████████| 10/10 [00:58<00:00, 5.70s/it]\n" + ] + } + ], "source": [ "import numpy as np\n", "from sklearn.metrics import accuracy_score\n", "from tqdm import tqdm, trange\n", "\n", + "# Setting seeds for reproducibility\n", + "torch.manual_seed(0)\n", + "np.random.seed(0)\n", + "\n", "running_loss = []\n", "running_test_acc = []\n", "t = trange(num_epochs, desc=\"Training loss\", leave=True)\n", @@ -385,12 +450,26 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAjcUlEQVR4nO3de5QcZ33m8e/TPTfNrceyRrdu2ZJBMsiewQZhwhIICRBsQmwSjokJIWQ3OQ5ZHEggm5gkBxJnOccLWTbZXbOLN5jsbiCObQirJAYTwj0JoDG+yJKxLcsXjWRdLWlGl7n/9o+uGfeMeqSRND3VPf18zukzVW9Vdf+mj6RHVW+9bykiMDMzmymTdgFmZladHBBmZlaWA8LMzMpyQJiZWVkOCDMzK8sBYWZmZTkgzGYh6cuS3jPf+55lDa+X1D/f72s2Fw1pF2A2nyQdK1ltBYaB8WT91yPic3N9r4i4phL7mtUKB4QtKhHRPrks6Wng1yLiazP3k9QQEWMLWZtZrfElJqsLk5dqJP2epL3AZyVdIOnvJR2QdDhZLpQc801Jv5Ys/4qk70r602TfpyRdc477rpP0bUmDkr4m6TZJfzXH3+OlyWcdkbRN0rUl294iaXvyvrsl/U7Sviz53Y5Iel7SdyT5776dkf+QWD1ZCSwFLgZupPjn/7PJ+kXASeC/n+b4VwGPAcuAjwOfkaRz2PfzwA+AC4E/At49l+IlNQJ/B3wVWA78JvA5SZcmu3yG4mW0DuBy4OtJ+4eAfqAbWAH8PuA5duyMHBBWTyaAj0bEcEScjIhDEfGFiDgREYPAx4CfOM3xz0TE/4qIceB/A6so/oM7530lXQS8EvhIRIxExHeBzXOs/8eAduDW5NivA38PvDPZPgpslNQZEYcj4ocl7auAiyNiNCK+E56EzebAAWH15EBEDE2uSGqV9GlJz0gaAL4NdEnKznL83smFiDiRLLaf5b6rgedL2gB2zbH+1cCuiJgoaXsGyCfLbwfeAjwj6VuSXp20fwLYAXxV0k5JN8/x86zOOSCsnsz8X/OHgEuBV0VEJ/C6pH22y0bz4TlgqaTWkrY1czx2D7BmRv/BRcBugIjYEhHXUbz89CXgrqR9MCI+FBGXANcCH5T0hvP7NaweOCCsnnVQ7Hc4Imkp8NFKf2BEPAP0AX8kqSn5X/7PzvHw7wMngN+V1Cjp9cmxdybv9S5JuYgYBQYoXlJD0lslvTjpAzlK8bbfibKfYFbCAWH17M+AJcBB4HvAVxboc98FvBo4BPxH4G8ojtc4rYgYoRgI11Cs+VPAL0fEj5Jd3g08nVwue2/yOQDrga8Bx4B/BT4VEd+Yt9/GFi25r8osXZL+BvhRRFT8DMbsbPgMwmyBSXqlpBdJyki6GriOYp+BWVXxSGqzhbcS+CLFcRD9wG9ExAPplmR2Kl9iMjOzsnyJyczMylo0l5iWLVsWa9euTbsMM7Oacv/99x+MiO5y2xZNQKxdu5a+vr60yzAzqymSnpltmy8xmZlZWQ4IMzMrywFhZmZlVTQgJF0t6TFJO043g6Skt0sKSZtK2j6cHPeYpDdXsk4zMztVxTqpkymTbwPeRHEw0BZJmyNi+4z9OoAPUJyIbLJtI3ADcBnFKY6/JmlDMre+mZktgEqeQVwF7IiInckkY3dSnFJgpj8B/hMwVNJ2HXBn8mCXpyjOZX9VBWs1M7MZKhkQeaY/CKWfFx5sAoCklwNrIuIfzvbY5PgbJfVJ6jtw4MD8VG1mZkCKndTJQ08+SfGhLeckIm6PiE0Rsam7u+w4jzM6cmKEP//aE2ztP3quZZiZLUqVHCi3m+lPyiokbZMmH6z+zeRZ7iuBzZKuncOx8yabEf/la4+TEfQUcpX4CDOzmlTJM4gtwHpJ6yQ1Uex0nno4e0QcjYhlEbE2ItZSfGDLtRHRl+x3g6RmSesoPvDkB5UosqOlkUu629i622cQZmalKnYGERFjkm4C7gOywB0RsU3SLUBfRGw+zbHbJN0FbAfGgPdV8g6m3nyO7z/1fKXe3sysJlV0LqaIuBe4d0bbR2bZ9/Uz1j8GfKxixZW4PJ/jSw/uYf/gEMs7WhbiI83Mqp5HUgO9hS4AHvFlJjOzKQ4I4LLVnUjwsO9kMjOb4oAA2pobeHF3u88gzMxKOCASPYWczyDMzEo4IBI9+Rz7B4fZNzB05p3NzOqAAyLRmwyS81mEmVmRAyKxcVWOjGBr/5G0SzEzqwoOiMSSpiwbVnR4RLWZWcIBUaInn2Pr7qNERNqlmJmlzgFRoqeQ4+CxEZ476o5qMzMHRImevDuqzcwmOSBKvHRVJw0ZsXX3kbRLMTNLnQOiREvjZEf1QNqlmJmlzgExQ08+x9b+I+6oNrO654CYoaeQ4/CJUfoPn0y7FDOzVDkgZpgcUe3xEGZW7xwQM1y6soPGrHwnk5nVPQfEDM0NWV6ystNTf5tZ3XNAlHF5PsfD7qg2szrngCijt5BjYGiMZ58/kXYpZmapcUCU4RHVZmYOiLI2rOigqSHjfggzq2sOiDKaGjK8dFWnzyDMrK45IGbRky/eyTQx4Y5qM6tPDohZ9Oa7GBwe4+lDx9MuxcwsFQ6IWfR4RLWZ1TkHxCzWL2+nuSHDVvdDmFmdckDMoiGbYePqTh72GYSZ1amKBoSkqyU9JmmHpJvLbH+vpK2SHpT0XUkbk/a1kk4m7Q9K+p+VrHM2vfkc23YfZdwd1WZWhyoWEJKywG3ANcBG4J2TAVDi8xHRExFXAB8HPlmy7cmIuCJ5vbdSdZ5OT6GL4yPjPHXwWBofb2aWqkqeQVwF7IiInRExAtwJXFe6Q0SUPrqtDaiq/6pPTv3t8RBmVo8qGRB5YFfJen/SNo2k90l6kuIZxPtLNq2T9ICkb0l6bbkPkHSjpD5JfQcOHJjP2gF4UXc7SxqzvpPJzOpS6p3UEXFbRLwI+D3gD5Pm54CLIuJK4IPA5yV1ljn29ojYFBGburu75722bEZctrrTdzKZWV2qZEDsBtaUrBeSttncCbwNICKGI+JQsnw/8CSwoTJlnl5PIce2PQOMjU+k8fFmZqmpZEBsAdZLWiepCbgB2Fy6g6T1Jas/AzyRtHcnndxIugRYD+ysYK2z6i3kODk6zpMHPKLazOpLQ6XeOCLGJN0E3AdkgTsiYpukW4C+iNgM3CTpjcAocBh4T3L464BbJI0CE8B7I+L5StV6Oj35LgAe7j/CpSs70ijBzCwVFQsIgIi4F7h3RttHSpY/MMtxXwC+UMna5uqSZW20NWV5ZPdRrt+05swHmJktEql3Ule7TEZcls95RLWZ1R0HxBz05nNs3zPAqDuqzayOOCDmoKeQY3hsgif2eUS1mdUPB8Qc9Ba6ANi6+0iqdZiZLSQHxBxcvLSVjpYGj6g2s7rigJiDTEZcvjrnEdVmVlccEHPUW8jx6HODjIy5o9rM6oMDYo56CjlGxid4fN9g2qWYmS0IB8Qc9SYjqt0PYWb1wgExR2uWLiG3pNHPhjCzuuGAmCNJ9ORzvtXVzOqGA+Is9BRyPLZ3kKHR8bRLMTOrOAfEWejN5xgdDx7b645qM1v8HBBnoSd5RrU7qs2sHjggzkK+awkXtDZ6wJyZ1QUHxFmQRE+hy1N/m1ldcECcpd58jsf3uaPazBY/B8RZ6inkGJ8Itj83kHYpZmYV5YA4S71JR/UjvsxkZoucA+IsrexsYVl7k0dUm9mi54A4S1Mjqh0QZrbIOSDOQU+hiyf2D3JiZCztUszMKsYBcQ568zkmArbvcUe1mS1eDohz4BHVZlYPHBDnYEVnC8s7mt0PYWaLmgPiHPUWch5RbWaLmgPiHPXku3jywDGODbuj2swWJwfEOeot5IiAbT6LMLNFqqIBIelqSY9J2iHp5jLb3ytpq6QHJX1X0saSbR9OjntM0psrWee5uDzvjmozW9wqFhCSssBtwDXARuCdpQGQ+HxE9ETEFcDHgU8mx24EbgAuA64GPpW8X9Xo7mhmVa7FAWFmi1YlzyCuAnZExM6IGAHuBK4r3SEiSgcStAGRLF8H3BkRwxHxFLAjeb+q4hHVZraYVTIg8sCukvX+pG0aSe+T9CTFM4j3n82xaest5Nh58DgDQ6Npl2JmNu9S76SOiNsi4kXA7wF/eDbHSrpRUp+kvgMHDlSmwNPoKXQBntnVzBanSgbEbmBNyXohaZvNncDbzubYiLg9IjZFxKbu7u7zq/Yc9OQ99beZLV6VDIgtwHpJ6yQ1Uex03ly6g6T1Jas/AzyRLG8GbpDULGkdsB74QQVrPSdL25rIdy3x1N9mtig1VOqNI2JM0k3AfUAWuCMitkm6BeiLiM3ATZLeCIwCh4H3JMduk3QXsB0YA94XEVX5jM/eQs53MpnZolSxgACIiHuBe2e0faRk+QOnOfZjwMcqV9386Cnk+PIjezl6YpRca2Pa5ZiZzZvUO6lr3VQ/xB6fRZjZ4uKAOE+TAeF+CDNbbBwQ56mrtYmLlraydfeRtEsxM5tXDoh50FPI+QzCzBYdB8Q86M3n6D98ksPHR9Iuxcxs3jgg5kGPZ3Y1s0XIATEPLnNAmNki5ICYB7kljaxb1sbD/UfSLsXMbN44IOaJp/42s8XGATFPegs59hwd4uCx4bRLMTObFw6IeeJHkJrZYuOAmCeXre5EwpeZzGzRcEDMk46WRi5Z1uYBc2a2aMwpICS1ScokyxskXSvJU5fO0Fvo8pQbZrZozPUM4ttAi6Q88FXg3cBfVqqoWtWTz7FvYJj9A0Npl2Jmdt7mGhCKiBPAzwOfiojrgcsqV1Zt6im4o9rMFo85B4SkVwPvAv4hactWpqTatXFVJxl56m8zWxzmGhC/BXwY+NvkcaCXAN+oWFU1qq25gRcvb/cZhJktCnN65GhEfAv4FkDSWX0wIt5fycJqVU++i289foCIQFLa5ZiZnbO53sX0eUmdktqAR4Dtkv5DZUurTT35Tg4eG2avO6rNrMbN9RLTxogYAN4GfBlYR/FOJpuhp9AFeMCcmdW+uQZEYzLu4W3A5ogYBaJiVdWwjas6yWbkfggzq3lzDYhPA08DbcC3JV0MDFSqqFq2pCnL+uXtvpPJzGrenAIiIv5rROQj4i1R9AzwkxWurWb1FnJs3X2UCJ9kmVntmmsndU7SJyX1Ja//TPFswsroyed4/vgIe466o9rMatdcLzHdAQwC70heA8BnK1VUrXuho/pIqnWYmZ2PuQbEiyLioxGxM3n9MXBJJQurZS9Z2UFDRu6HMLOaNteAOCnpxydXJL0GOFmZkmpfS2OWS1d2+E4mM6tpcxpJDbwX+D+Scsn6YeA9lSlpcegt5Lh3616PqDazmjXXu5geioiXAb1Ab0RcCfzUmY6TdLWkxyTtkHRzme0flLRd0sOS/im5fXZy27ikB5PX5rP4narC5fkcR0+O0n/YJ1pmVpvO6olyETGQjKgG+ODp9pWUBW4DrgE2Au+UtHHGbg8AmyKiF7gH+HjJtpMRcUXyuvZs6qwGvfkuwDO7mlntOp9Hjp7puslVwI6kU3sEuBO4rnSHiPhG8pwJgO8BhfOop6psWNlOUzbDw37CnJnVqPMJiDONAssDu0rW+5O22fwqxXmeJrUkYy6+J+lt5Q6QdOPk2IwDBw7MpeYF09yQ5SWrOjwnk5nVrNN2UksapHwQCFgyX0VI+iVgE/ATJc0XR8Tu5NkTX5e0NSKeLD0uIm4HbgfYtGlT1Q1bvjyf4+8e2uOOajOrSac9g4iIjojoLPPqiIgz3QG1G1hTsl5I2qaR9EbgD4BrI2K45LN3Jz93At8ErpzTb1RFevM5BofGeObQiTPvbGZWZc7nEtOZbAHWS1onqQm4AZh2N5KkKylOBHhtROwvab9AUnOyvAx4DbC9grVWxOQzqh/2eAgzq0EVC4iIGANuAu4DHgXuSh5XeoukybuSPgG0A3fPuJ31pUCfpIcoPtr01oiouYDYsKKDpoaMp9wws5o014Fy5yQi7gXundH2kZLlN85y3L8APZWsbSE0ZjNsXNXpW13NrCZV8hKTUZzZddueASYmqq4P3czstBwQFdZTyHFseIynDh1PuxQzs7PigKiw3qSj2uMhzKzWOCAq7MXd7bQ0ZtwPYWY1xwFRYQ3ZDJetzrHVU26YWY1xQCyAnnyOR3YPMO6OajOrIQ6IBdCTz3FydJydB46lXYqZ2Zw5IBbAZEe1+yHMrJY4IBbAJd3ttDZl/QhSM6spDogFkM2Iy1Z38rCn3DCzGuKAWCA9+S62PzfA2PhE2qWYmc2JA2KB9BZyDI1OsMMd1WZWIxwQC6THHdVmVmMcEAtk3YVttDc3eMoNM6sZDogFkpnsqPadTGZWIxwQC6i3kOPR5wYYdUe1mdUAB8QC6il0MTI2weP7BtMuxczsjBwQC6g376m/zax2OCAW0MUXttLR0uB+CDOrCQ6IBSSJnnzOZxBmVhMcEAusp5DjR3sHGB4bT7sUM7PTckAssN58F6PjweN7PaLazKqbA2KBTU397SfMmVmVc0AssMIFS+hqbXQ/hJlVPQfEApvsqPacTGZW7RwQKejJ53h83yBDo+6oNrPq5YBIQW8hx9hE8KO9HlFtZtXLAZGCnkIXAFv9hDkzq2IVDQhJV0t6TNIOSTeX2f5BSdslPSzpnyRdXLLtPZKeSF7vqWSdC211roWlbU3uhzCzqlaxgJCUBW4DrgE2Au+UtHHGbg8AmyKiF7gH+Hhy7FLgo8CrgKuAj0q6oFK1LrSpEdWecsPMqlglzyCuAnZExM6IGAHuBK4r3SEivhERJ5LV7wGFZPnNwD9GxPMRcRj4R+DqCta64HoLOZ7Yf4yTI+6oNrPqVMmAyAO7Stb7k7bZ/Crw5bM5VtKNkvok9R04cOA8y11YPfkc4xPB9ucG0i7FzKysquiklvRLwCbgE2dzXETcHhGbImJTd3d3ZYqrkF53VJtZlatkQOwG1pSsF5K2aSS9EfgD4NqIGD6bY2vZis5mlrU3e+pvM6talQyILcB6SeskNQE3AJtLd5B0JfBpiuGwv2TTfcBPS7og6Zz+6aRt0ZBEb8FTf5tZ9apYQETEGHATxX/YHwXuiohtkm6RdG2y2yeAduBuSQ9K2pwc+zzwJxRDZgtwS9K2qPTkczx54BjHh8fSLsXM7BQNlXzziLgXuHdG20dKlt94mmPvAO6oXHXp6y3kmAjY/twAr1y7NO1yzMymqYpO6nrVkzyj2gPmzKwaOSBStLyzhRWdzb6TycyqkgMiZT35Lo+oNrOq5IBIWW8hx86DxxkcGk27FDOzaRwQKesp5IiAbXs8otrMqosDImWTHdUeD2Fm1cYBkbJl7c2szrV4RLWZVR0HRBXoKeR4xAFhZlXGAVEFegtdPHXwOEdPuqPazKqHA6IKTPZDbPNZhJlVEQdEFZgaUe2AMLMq4oCoAhe0NVG4YInvZDKzquKAqBK9BT+j2syqiwOiSvTku3j2+RPsHxhKuxQzM8ABUTVe/aILkeAnPvFNPnjXg3xv5yEiIu2yzKyOVfR5EDZ3V6zp4kv//jXcuWUXf/fQHr74w91cfGEr17+iwNtfUWBVbknaJZpZndFi+V/qpk2boq+vL+0y5sXJkXG+su057trSz7/uPIQEr13fzTs2FXjTxhU0N2TTLtHMFglJ90fEprLbHBDV7dlDJ7jn/l3cc38/e44O0dXayHUvW831m9ZweXJ7rJnZuXJALALjE8G/PHmQu/r6uW/bXkbGJti4qpN3bCpw3RV5LmhrSrtEM6tBDohF5siJETY/tIe7+/rZuvsoTdkMb9q4gus3FXjt+m6yGaVdopnVCAfEIrZ9zwB337+LLz2wm8MnRlnZ2cLbX5Hn+lesYe2ytrTLM7Mq54CoA8Nj4/zTo/u5u28X33r8ABMBV61byjs2reEtPStpbfINa2Z2KgdEndl7dIgv/LCfu/t28fShE7Q1ZXlr72re8coCL7/oAiRfgjKzIgdEnYoI+p45zF1bdvEPW5/jxMg4l3S3cf0r1vD2l+dZ3tmSdolmljIHhHFseIx7H36Ou+/fxZanD5PNiNdv6Ob6TQV+6iUraGrwoHqzeuSAsGl2HjjG3ff384X7+9k/OMzStiZ+7so879i0hktXdqRdnpktIAeElTU2PsF3njjIXX27+Nqj+xgdD3oLOa67Is/lqzvZsKLD4yvMFjkHhJ3RoWPDfOnBPdzdt4sf7R2cau/uaGbDinbWL+/g0pUdxeUVHXS2NKZYrZnNFweEzVlEsOfoEI/vG+SJfYM8vu9YsnyMk6PjU/utyrWwfkUHG5a3s2FFBxtWdrB+eTttzb6d1qyWnC4gKvq3WdLVwJ8DWeAvIuLWGdtfB/wZ0AvcEBH3lGwbB7Ymq89GxLWVrNWKJJHvWkK+awk/eenyqfaJiWD3kZM8vm+Qx5LAeHzfIP935yGGxyam9st3LWHDinY2rOxgw/IONqzo4MXL21nS5AkGzWpNxQJCUha4DXgT0A9skbQ5IraX7PYs8CvA75R5i5MRcUWl6rOzk8mINUtbWbO0lTe8dMVU+/hE8OzzJ0454/jnHYcYGS8GhwQXLW1l/fLiJapLV3awfnkHl3S30dLo4DCrVpU8g7gK2BEROwEk3QlcB0wFREQ8nWybKPcGVv2yGbFuWRvrlrXx5stWTrWPjU/w9KET00Lj8X2DfPOx/YxNFC9rZgRrL2wrXqJK+jY2rOhg3bI233ZrVgUqGRB5YFfJej/wqrM4vkVSHzAG3BoRX5q5g6QbgRsBLrroonOv1OZdQzbDi5e38+Ll7VzT80L7yNgETx86zmN7S8449g/y1e17SXKDhpLQWZlrYUXn5Kt5armzpcEjws0qrJp7FC+OiN2SLgG+LmlrRDxZukNE3A7cDsVO6jSKtLPT1JBJzhimj7cYGh1n54HjPLF/MDnbOMbTh47z/aee5+jJ0VPep6UxUxIcLazoaGZlroXlJcsrOlt8CcvsPFQyIHYDa0rWC0nbnETE7uTnTknfBK4EnjztQVazWhqzbFzdycbVnadsGxodZ//AMHsHhtg37VVs29p/hH8cGGJo9NQrlZ0tDVNhsbyjhZW55pLl4lnJsvZmGrO+pGU2UyUDYguwXtI6isFwA/CLczlQ0gXAiYgYlrQMeA3w8YpValWtpTHLRRe2ctGFrbPuExEMDI2xf2AoCZLhaWGyd2CYHfsPsn9wmPGJ6SebEixrby5ewupoYUWupfizs5kVuRaWdxRDZWlrExk/a8PqSMUCIiLGJN0E3EfxNtc7ImKbpFuAvojYLOmVwN8CFwA/K+mPI+Iy4KXAp5PO6wzFPojts3yUGZLILWkkt6SR9Stmny5kfCI4dHy4eEZydIh9g0mYJMt7jg7xwK4jPH985JRjGzKiu6OZ5R3NdCcBsryjheWdzS8sdzRzYXuzH9pki4IHypmVMTxWvKy1b2CI/YPD7B8YYt9gMVj2Dw5N/Tx84tT+kUxyRrI8OSNZXhIky0uCZVl7Ew2+tGUpS22gnFmtam7ITo37OJ3hsXEODA4nITI9PPYNDLPn6BAP9R/h4LFTz0gkuLCteEYyPUyaWd7ZMvWzu73Zt/1aKhwQZuehuSFL4YJWChecPkhGxyc4eGx4+llJcmZSXB5i254BDh0bZqLMSf3Stia6WouX0DpbGulc0khnS0Nxfaqt4ZTtnUsa3QFv58wBYbYAGrMZVuWWsCq35LT7jU8Eh44Vw+OFy1vD7Bsc4uiJUQaGRjlyYoRnnz/B0ZOjDJwcnRp4OJvWpuysATIzYCaXJ/fraGlwx3wdc0CYVZFsRsXLS50tXJ7PnXH/iODk6DgDJ8cYGBqdCo2BodFi28mkbXJ9aJS9A0M8vn9wav103ZAStDc3TAuVxmyGTEY0ZERGIpuBhkyxLSvIZjJkM8XfJZsRWSnZJrLZ5OeMbQ3JekaiITv5vi/sM7Wc7NOYFUsas7Q0ZWltytLa2MCSpmzx1Zj1TQLzxAFhVsMk0drUQGtTcbzH2ZqYCI6PjCXBMpYEyWSojE0Lm6MnRxkcGuXk6DjjEzH1mohgbCKYmAjGIxgbL7ZN7VO6XLJ/Je+PaW7IsKQpS2tjdio4JkOkNQmR6csNxeVkffpycVtL4wv718tZlQPCrI5lMqKjpZGOlsbizeYLKJLgGJsZMmcIlvGJYgidHB3n5Mg4J0fHOTEyzsmRsZLl5Ofo5HJx25ETI+w5Utw2NPrCPmerpTFTDJbGYnA0NWRobszSnM3Q3JihuSFDc0O2+LOxZHlyv4aSfU67//TtTQ2ZBT07ckCYWSqUXE5qSHk2lImJYHhsghMjY9OCoxgeY5wcmZgKmFODZ5yR8QmGR8cZHptgeGycY8NjHDpWXC62lW4//3lJG7M6JXAuz+f4b++8ch6+jekcEGZW1zIZTV2GurDCnxURjI7HrOExbXlsnOHRkuWxiWR9/JTta5ae/uaHc+WAMDNbIJJoahBNDRlmH+9fPXyDtJmZleWAMDOzshwQZmZWlgPCzMzKckCYmVlZDggzMyvLAWFmZmU5IMzMrKxF80Q5SQeAZ87jLZYBB+epnFrn72I6fx/T+ft4wWL4Li6OiO5yGxZNQJwvSX2zPXav3vi7mM7fx3T+Pl6w2L8LX2IyM7OyHBBmZlaWA+IFt6ddQBXxdzGdv4/p/H28YFF/F+6DMDOzsnwGYWZmZTkgzMysrLoPCElXS3pM0g5JN6ddT5okrZH0DUnbJW2T9IG0a0qbpKykByT9fdq1pE1Sl6R7JP1I0qOSXp12TWmS9NvJ35NHJP21pJa0a5pvdR0QkrLAbcA1wEbgnZI2pltVqsaAD0XERuDHgPfV+fcB8AHg0bSLqBJ/DnwlIl4CvIw6/l4k5YH3A5si4nIgC9yQblXzr64DArgK2BEROyNiBLgTuC7lmlITEc9FxA+T5UGK/wDk060qPZIKwM8Af5F2LWmTlANeB3wGICJGIuJIqkWlrwFYIqkBaAX2pFzPvKv3gMgDu0rW+6njfxBLSVoLXAl8P+VS0vRnwO8CEynXUQ3WAQeAzyaX3P5CUlvaRaUlInYDfwo8CzwHHI2Ir6Zb1fyr94CwMiS1A18AfisiBtKuJw2S3grsj4j7066lSjQALwf+R0RcCRwH6rbPTtIFFK82rANWA22SfindquZfvQfEbmBNyXohaatbkhophsPnIuKLadeTotcA10p6muKlx5+S9FfplpSqfqA/IibPKO+hGBj16o3AUxFxICJGgS8C/yblmuZdvQfEFmC9pHWSmih2Mm1OuabUSBLFa8yPRsQn064nTRHx4YgoRMRain8uvh4Ri+5/iHMVEXuBXZIuTZreAGxPsaS0PQv8mKTW5O/NG1iEnfYNaReQpogYk3QTcB/FuxDuiIhtKZeVptcA7wa2Snowafv9iLg3vZKsivwm8LnkP1M7gX+bcj2piYjvS7oH+CHFu/8eYBFOu+GpNszMrKx6v8RkZmazcECYmVlZDggzMyvLAWFmZmU5IMzMrCwHhFlC0rHk51pJvzjP7/37M9b/ZT7f36wSHBBmp1oLnFVAJBO2nc60gIiIRTfq1hYfB4TZqW4FXivpwWTO/6ykT0jaIulhSb8OIOn1kr4jaTPJqGJJX5J0f/KcgBuTtlspzvr5oKTPJW2TZytK3vsRSVsl/ULJe3+z5PkLn0tG7CLp1uSZHQ9L+tMF/3asbtT1SGqzWdwM/E5EvBUg+Yf+aES8UlIz8M+SJmfufDlweUQ8laz/u4h4XtISYIukL0TEzZJuiogrynzWzwNXUHy+wrLkmG8n264ELqM4jfQ/A6+R9Cjwc8BLIiIkdc3vr272Ap9BmJ3ZTwO/nEw/8n3gQmB9su0HJeEA8H5JDwHfozgR5HpO78eBv46I8YjYB3wLeGXJe/dHxATwIMVLX0eBIeAzkn4eOHGev5vZrBwQZmcm4Dcj4orkta5k7v/jUztJr6c4y+erI+JlFOfnOZ/HUA6XLI8DDRExRvFBV/cAbwW+ch7vb3ZaDgizUw0CHSXr9wG/kUyFjqQNszwsJwccjogTkl5C8bGtk0Ynj5/hO8AvJP0c3RSf2vaD2QpLntWRSyZQ/G2Kl6bMKsJ9EGanehgYTy4V/SXFZzGvBX6YdBQfAN5W5rivAO9N+gkeo3iZadLtwMOSfhgR7ypp/1vg1cBDQAC/GxF7k4AppwP4f5JaKJ7ZfPCcfkOzOfBsrmZmVpYvMZmZWVkOCDMzK8sBYWZmZTkgzMysLAeEmZmV5YAwM7OyHBBmZlbW/wc3oVuWUfor/QAAAABJRU5ErkJggg==\n", + "text/plain": [ + "<Figure size 432x288 with 1 Axes>" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], "source": [ + "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", "\n", "loss_per_epoch = [np.mean(loss_per_epoch) for loss_per_epoch in running_loss]\n", @@ -399,25 +478,69 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAo3UlEQVR4nO3deXxddZ3/8dcnW5OmbbokXUiXtHRnLYRSBCq0LBWQVaEgiz8XRkeY0XEZdNRxYFTGUdFRRgcRF0CQRZjKIFvCJhZoS8vSJt2XJKVJuiZps+fz++Oe0tv2Jr1tc3PuTd7Px+M+mrPdfO5Fzzvn+z3n+zV3R0RE5EBpYRcgIiLJSQEhIiIxKSBERCQmBYSIiMSkgBARkZgUECIiEpMCQkREYlJASMozs4aoV4eZNUYtf+II3u8lM/tMImoVSSUZYRcgcrTcfcDen81sA/AZd38hvIoSy8wy3L0t7Dqk99MVhPRaZpZmZreZ2Voz22Zmj5jZ0GBbtpk9EKzfaWaLzGyEmX0XOBv4eXAF8vNO3vtRM9tiZrvM7BUzOy5qW46Z/cjMNgbb/2pmOcG2s8zsb8HvrDCzTwbr97tqMbNPmtlfo5bdzL5gZquB1cG6nwbvUWdmS8zs7Kj9083sG8Fnrw+2jzGzu83sRwd8lgVm9qWj/8alt1FASG92K3A58GHgGGAHcHew7SYgDxgDDAM+BzS6+78ArwK3uPsAd7+lk/f+CzAJGA68BTwYte2HwKnAh4ChwNeADjMbFxz3M6AAOBlYdhif53LgdGB6sLwoeI+hwB+AR80sO9j2T8C1wEXAIOBTwB7gd8C1ZpYGYGb5wHnB8SL7UROT9GafI3KirwQws+8Am8zsBqCVSDBMdPd3gCWH88buft/en4P33WFmeUA9kZPxLHevCnb5W7DfdcAL7v5QsH5b8IrX9919e1QND0Rt+5GZfROYArwNfAb4mruvDLa/vfd3mtkuYC7wPDAfeMndqw+jDukjdAUhvdk44ImgOWcnUAa0AyOA+4FngYfNbLOZ/cDMMuN506D55s6g+aYO2BBsyg9e2cDaGIeO6WR9vCoOqOMrZlYWNGPtJHJFlB/H7/odcH3w8/VEvguRgyggpDerAD7i7oOjXtnuXuXure7+b+4+nUhT0CXAjcFxhxri+DrgMiJNM3lAUbDegK1AE3BsJ/XEWg+wG+gftTwyxj4f1BX0N3wNuBoY4u6DgV1BDYf6XQ8Al5nZScA04MlO9pM+TgEhvdkvge8Gbf+YWYGZXRb8fK6ZnWBm6UAdkSanjuC4amBCF+87EGgm0jzUH/je3g3u3gHcB/zYzI4JrjbOMLN+RPopzjOzq80sw8yGmdnJwaHLgCvNrL+ZTQQ+fYjPNhBoA2qBDDP7NpG+hr3uBe4ws0kWcaKZDQtqrCTSf3E/8Li7Nx7id0kfpYCQ3uynwALgOTOrB14n0skLkb/QHyMSDmXAy+xravkp8DEz22Fm/xXjfX8PbASqgBXB+0b7CvAukZPwduA/gDR330Sk0/jLwfplwEnBMXcBLUTC6Xfs3+kdy7PAM8CqoJYm9m+C+jHwCPBc8Bl/DeREbf8dcAJqXpIumCYMEul7zGw2kaamca6TgHRCVxAifUzQGf+PwL0KB+mKAkKkDzGzacBOYBTwk1CLkaSnJiYREYlJVxAiIhJTr3mSOj8/34uKisIuQ0QkpSxZsmSruxfE2tZrAqKoqIjFixeHXYaISEoxs42dbVMTk4iIxKSAEBGRmBQQIiISU0IDwszmmdlKM1tjZrfF2D7WzF40s6Vm9o6ZXRS17evBcSvN7MJE1ikiIgdLWCd1MAja3cD5QCWwyMwWuPuKqN2+CTzi7r8ws+nA00BR8PN84DgiE728YGaT3b09UfWKiMj+EnkFMRNY4+7r3L0FeJjIEMnRnH0jUOYBm4OfLwMedvdmd18PrAneT0REekgiA6KQ/UeXrAzWRfsOcL2ZVRK5erj1MI4VEZEECvs5iGuB37r7j8zsDOB+Mzs+3oPN7GbgZoCxY8cmqEQRkeTS1t5B5Y5G1m/dzbqtu8nJTOe607v/HJjIgKgiMu3hXqODddE+DcwDcPeFwYTr+XEei7vfA9wDUFxcrEGlRKTXcHdqG5pZX7v7gyBYV7ub9Vsb2LR9D63t+055p4wdnHIBsQiYZGbjiZzc5xOZqjHaJiKTp/82GGUym8gMWQuAP5jZj4l0Uk8C3kxgrSIioWhobmPDBwHQwPqtkUBYX7ub+ua2D/bLykijaFh/Jg4fwAXHjWR8fi4T8nOZUDCAIf3jmk79sCUsINy9zcxuITLzVTpwn7svN7PbgcXuvoDIzFq/MrMvEemw/mQwPv1yM3uEyGxdbcAXdAeTiKSq1vYOKrbviVwJ1EbCYP3WSBhU1zV/sJ8ZHJOXw4SCXK48pZDx+bmMLxjAhPxcjhmcQ3qadfFbul+vGe67uLjYNRaTiITF3ampbw4CoGG/pqFN2/fQ3rHvXDs0Nyty8g9exxbkMj5/AOOG9Sc7M71H6zazJe5eHGtb2J3UIiIpoaWtg227m6mt3/favKspuCqIXA3sadnX0JGdmUbRsFymjRrIxSeMCq4GIs1Cg/tnhfhJ4qeAEJE+q73D2bGnZb+Tfm1DM1uDf6PX7dzTetDxaQajh/RnfH4uM8cPZUJ+5EpgfEEuowZlk9bDTULdTQEhIr2Ku1PX1Lbfyb22vpmtDc0HBcG2hmY6YrSy52SmM3xQP/IH9OPYggHMmjCMgoH9Iq8B/ciP+jkro/cOaaeAEJGU0tDcxmtrtlJT10RtQ0vMv/xb2joOOi4z3cgfEDmxj8rL5sTReR+c9PeuLwj+ze2nUyMoIEQkRayqruf+hRv501uV7A7a+s1gWG7WByf4Ywty9zvRf/DvwH7k5WRiltpNPj1NASEiSau1vYPnlldz/+sbeH3ddrLS07jkxFFcfdoYJuTnMjQ3i4z03tvEEzYFhIgknZq6Jv7w5iYeenMT1XXNFA7O4Z/nTeXq4tEMG9Av7PL6DAWEiCQFd+eN9du5f+FGnl2+hbYO58OTC/ju5eM4d+rwHn9ITBQQIhKyhuY2nnirkvtf38iq6gYGZWfwyQ8Vcf2scRTl54ZdXp+mgBCRUKyuruf+1zfyp7eqaGhu4/jCQfzgqhP56EnHkJPVs08TS2wKCBHpMa3tHTy/oprfL9y/0/n6M8YxY8xg3WWUZBQQIpJwNXVNPPRmBX94c+MHnc5fmzeFa4rHqNM5iSkgRCQh3J0312/n969v5Nn3Ip3Os9XpnFIUECLSrRqa23hiaRUPLNzIyup6BmVncFPQ6Txenc4pRQEhIt1iTU3kSefHg07n444ZxH9cdQKXnlSoTucUpYAQSXItbR0sXLeN5tZ2BuVkMig7k0E5GeTlZJKblRHqiKFtH3Q6b2Thum1kpadx8YmjuH7WOE4Zq07nVKeAEElCHR3Okk07eGJpFU+/+37MoaYhMtz0wKjAGJQdeeXlRNZFwqST5exMsjPTjugkvrfT+aE3N7GlronCwTl89cIpXHPaGPLV6dxrKCBEksiamnqeXLqZJ5dVUbmjkZzMdC48bgSXnVxIwcB+1DW1UtfYSl1jG3VNrexqDJab2qhrjCyv29pAXWMbuxpbaWzteqbezHT7IFAG5mQyKDtjvwDZGyqRgMmko8N5/K1Kngk6nc+elM8dlx/PHHU690oKCJGQ1dQ1seDtSCi8V1VHmsHZkwr4ygVTOH/6iKMaerqlrYP6vUESFSKRoDk4ZHY1tlK1o/GD9a3tB0+WsLfT+ROnj2VCwYCj+eiS5BQQIiFoaG7jueVbeGJpFa+t2UqHw4mj8/j2JdO55KRRDB+Y3S2/JysjjWED+h3RswbuTlNrx34h0tjazqnjhtA/S6eOvkD/lUV6SGt7B39dvZUnllbx3IotNLV2MGZoDrecO5HLZhRybJL9NW5m5GSlk5OVzohB3RNYkloUECIJ5O4sq9jJk0ureOqd99m2u4XB/TP52KmjuWJGIaeMHaI7fSRpKSBEEmDD1t08uayKJ5dWsWHbHvplpHHe9BFccXIhsycX9Op5jKX3UECIdJNtDc089c77PLG0imUVOzGDMyYM4+/Pnci840cyKDsz7BJFDosCQuQoNLa083xZNU8ureLlVbW0dzjTRg3iGxdN5dKTChmZp7Z7SV0KCJHD1N7h/G1tpLP52fe2sLulnVF52Xz27AlcPuMYpo4cFHaJIt1CASESB3dn+eY6nlhaxZ/f3kxNfTMDszP46EnHcPmMQmYWDQ11yAuRRFBAiHTh/V2N/OmtKp5YWsWamgYy041zpwznihmFnDt1ONmZGoROei8FhEgMDc1t/PeLa7j3r+tpaetgZtFQvnfFCVx0wkgG988KuzyRHqGAEInS3uE8tqSC/3x2FVsbmrlyRiFfPG8yY4f1D7s0kR6ngBAJLFy7jTueWsGK9+s4ddwQ7r2pmJPHDA67LJHQKCCkz9uwdTffe7qM51ZUUzg4h59dO4NLThylJ5ylz1NASJ9V19TKz0vX8JvX1pOZnsZXL5zCp88ar45nkYACQvqctvYOHl5UwY+fX8WOPS18/NTRfOWCKQzXgHQi+1FASJ/y6upa7nhqBauqGzh9/FC+dcl0ji/MC7sskaSU0IAws3nAT4F04F53v/OA7XcB5waL/YHh7j442NYOvBts2+TulyayVund1tQ08L2nyygtr2Hs0P788vpTuPC4kepnEOlCwgLCzNKBu4HzgUpgkZktcPcVe/dx9y9F7X8rMCPqLRrd/eRE1Sd9w849LfzkhdU88PpGsjPT+fpHpvLJM4vol6F+BpFDSeQVxExgjbuvAzCzh4HLgBWd7H8t8K8JrEf6kNb2Dh58fSN3vbCa+qZW5s8cyz+dP5n8I5hZTaSvSmRAFAIVUcuVwOmxdjSzccB4oDRqdbaZLQbagDvd/ckYx90M3AwwduzY7qlaUpq78+LKGr77f2Wsrd3NWRPz+eYl0zSAnsgRSJZO6vnAY+7eHrVunLtXmdkEoNTM3nX3tdEHufs9wD0AxcXFB8+uLn3Kqup67nhqBa+u3sqE/Fx+fVMxc6YOVz+DyBFKZEBUAWOilkcH62KZD3wheoW7VwX/rjOzl4j0T6w9+FDp67Y1NHPXC6v4wxubGNAvg29dMp0bZo3TrG0iRymRAbEImGRm44kEw3zgugN3MrOpwBBgYdS6IcAed282s3zgTOAHCaxVUlBLWwe/X7iBn5asZk9LOzfMGscXz5vMkFwNpifSHRIWEO7eZma3AM8Suc31Pndfbma3A4vdfUGw63zgYXePbiKaBvyPmXUAaUT6IDrr3JY+xt15bkU133+6jA3b9nDOlAK+efE0Jg4fGHZpIr2K7X9eTl3FxcW+ePHisMuQBFuxuY47nlrBwnXbmDh8AN+8eBrnTBkedlkiKcvMlrh7caxtydJJLdKl2vpmfvTcSv64uILBOZncftlxXDdzLBnp6mcQSRQFhCS1ptZ27nttPf/94lqaWtv59JnjuXXOJPL6Z4Zdmkivp4CQpOTuPP3uFr7/lzIqdzRy3rQRfOOiqUwoGBB2aSJ9hgJCksr23S08ubSKRxZXUL6lnqkjB/LgZ07nzIn5YZcm0ucoICR07R3Oq6treXRxJc+vqKalvYMTR+fxg6tO5KpTR5OepgfdRMKggJDQbNq2h0eXVPDYkkre39XEkP6ZfGLWWK4uHsO0URoaQyRsCgjpUU2t7Tzz3hb+uKiCheu2YQazJxXwzYunc9704RplVSSJKCAk4dydd6t28cdFFSx4ezP1TW2MGZrDl8+fzFWnjuaYwTlhlygiMSggJGEO7HDul5HGRSeM4uPFo5k1fhhp6lsQSWoKCOlWezucH1lcwfMrqmltd04ance/X348Hz3pGPJy9PyCSKpQQEi3iNXhfMOsIq4+bbTmYhBJUQoIOWKNLe08s/x9/riogtfXbSfNYPbkAr59yXTmThuh4bZFUpwCQg6Lu/NO5S4eWVzBgmWbqW9uY+zQ/nzlgkiH86g8dTiL9BYKCInL9t0tPLG0ikeDDufszDQuOn4UHy8ew+njh6rDWaQXUkBIp9o7nFdW1/JodIfzmMF894pIh/OgbHU4i/RmCgg5yMZtu3l0cSWPLalkS10TQ3OzuPGMIq4uHsOUkZqUR6SvUEDIfp55730+98BbpBl8eHIB37l0OnOmqsNZpC9SQMh+HllcSeHgHB7//IcYmZcddjkiEiL9WSgfaGxp57U1W7nguBEKBxFRQMg+f1u7lea2DuZOHRF2KSKSBDptYjKzK+M4vsndn+7GeiREJeU15GalM3P80LBLEZEk0FUfxK+A/wW6usF9NqCA6AXcndKyGmZPLlCHtIgAXQfEX9z9U10dbGYPdHM9EpLlm+vYUtfEnKnDwy5FRJJEp38quvv1hzo4nn0kNZSW12AG50xRQIhIRNxtCWY20cweMLPHzeyMRBYlPa+kvIaTRg+mYGC/sEsRkSTRaUCY2YH3Od4BfB34IvCLBNYkPay2vpm3K3Zy3jRdPYjIPl1dQfzZzG6MWm4FioBxQHsii5Ke9eLKGgDm6PZWEYnSVUDMAwaZ2TNmNhv4CnAhcAXwiZ4oTnpGSVk1o/KymTZK4yyJyD6d3sXk7u3Az83sfuBbwOeBb7r72p4qThKvua2dV1dv5YoZhZhpyG4R2aerB+VOB74KtADfAxqB75pZFXCHu+/skQolod5Yt509Le3MVf+DiBygq+cg/ge4CBgA/MbdzwTmm9mHgT8SaW6SFFdaXkN2ZhofOjY/7FJEJMl0FRBtRDqlc4lcRQDg7i8DLye2LOkJ7k5JeTVnHptPdmZ62OWISJLpqpP6OuAqYA5wYxf7SYpaU9NAxfZG5k7T3UsicrCuOqlXAV/uwVqkh5WU7729Vf0PInKwrh6Ue+pQBx9qHzObZ2YrzWyNmd0WY/tdZrYseK0ys51R224ys9XB66ZD1SKHr6SsmuOOGaS5H0Qkpq76IM4yswVdbDdgeqcbzdKBu4HzgUpgkZktcPcVe/dx9y9F7X8rMCP4eSjwr0Ax4MCS4Ngdh/5IEo8du1tYsnEHt5w7MexSRCRJdRUQl8VxfEsX22YCa9x9HYCZPRy854pO9r+WSChA5A6p5919e3Ds80Qe3HsojpokDi+vqqXDYY76H0SkE131QRztnUqFQEXUciVweqwdzWwcMB4o7eLYwhjH3QzcDDB27NijLLdvKSmvIX9AFicW5oVdiogkqWSZGWY+8Fjw9Hbc3P0edy929+KCgoIEldb7tLZ38PLKGs6dMpy0ND09LSKxJTIgqoAxUcujg3WxzGf/5qPDOVYO05KNO6hratPT0yLSpUMGhJl91MyOJEgWAZPMbLyZZREJgYM6vc1sKjAEWBi1+lngAjMbYmZDgAuCddINSstryEpP46xJuuoSkc7Fc+K/BlhtZj8ITuZxcfc24BYiJ/Yy4BF3X25mt5vZpVG7zgcednePOnY7kfknFgWv2/d2WMvRe6GsmtMnDGVAv67uURCRvu6QZwh3v97MBhG5y+i3ZubAb4CH3L3+EMc+DTx9wLpvH7D8nU6OvQ+471D1yeFZv3U362p3c+OscWGXIiJJLq6mI3evAx4DHgZGEZkT4q3g2QVJIaXlmhxIROITTx/EpWb2BPASkAnMdPePACehoThSTml5NZOGD2DssP5hlyIiSS6eRuirgLvc/ZXole6+x8w+nZiyJBHqm1p5Y912Pn32+LBLEZEUEE9AfAd4f++CmeUAI9x9g7uXJKow6X6vrt5KW4czV81LIhKHePogHgU6opbbg3WSYkrKahjcP5NTxg4OuxQRSQHxBESGu0dPGNQCZCWuJEmE9g7nxZU1nDO5gIz0ZHmAXkSSWTxnitro5xbM7DJga+JKkkRYVrGT7btbNDifiMQtnj6IzwEPmtnPiQzxXYFmmEs5peXVpKcZH9bT0yISp3gelFsLzDKzAcFyQ8Krkm5XUlZD8bgh5PXPDLsUEUkRcY21YGYXA8cB2WaR0T/d/fYE1iXdqGpnI+Vb6vnGRXGPlCIiEteDcr8kMh7TrUSamD4OaJyGFKKnp0XkSMTTSf0hd78R2OHu/wacAUxObFnSnUrKqika1p9jC3LDLkVEUkg8AdEU/LvHzI4BWomMxyQpYE9LG39bu405U0ewt3lQRCQe8fRB/NnMBgP/CbwFOPCrRBYl3ee1NdtoaevQ5EAicti6DIhgoqASd98JPG5mTwHZ7r6rJ4qTo1daXs2AfhmcVjQ07FJEJMV02cTk7h3A3VHLzQqH1OHulJTVMHtyPlkZenpaRA5PPGeNEjO7ytSAnXKWb66jpr5Zdy+JyBGJJyD+jsjgfM1mVmdm9WZWl+C6pBuUlNVgBudM0dPTInL44nmSemBPFCLdr6S8mhljBpM/oF/YpYhICjpkQJjZ7FjrD5xASJJLTV0T71Tu4qsXTgm7FBFJUfHc5vrVqJ+zgZnAEmBOQiqSbvHiyr1PT+v2VhE5MvE0MX00etnMxgA/SVRB0j1Kymo4Ji+bqSPVQigiR+ZI7n2sBKZ1dyHSfZpa2/nrmq3MmTZcT0+LyBGLpw/iZ0SenoZIoJxM5IlqSVJvrN/OnpZ2zT0tIkclnj6IxVE/twEPuftrCapHukFpWTXZmWmcceywsEsRkRQWT0A8BjS5ezuAmaWbWX9335PY0uRIuDsvlNVw1sQCsjPTwy5HRFJYXE9SAzlRyznAC4kpR47WquoGqnY2anA+ETlq8QREdvQ0o8HP/RNXkhyNkvJqAM6dooAQkaMTT0DsNrNT9i6Y2alAY+JKkqNRWlbD8YWDGJmXHXYpIpLi4umD+CLwqJltJjLl6EgiU5BKktm+u4W3Nu3gljmTwi5FRHqBeB6UW2RmU4G9YzasdPfWxJYlR+LlVTV0OMzV09Mi0g0O2cRkZl8Act39PXd/DxhgZn+f+NLkcL1QVkPBwH6cUJgXdiki0gvE0wfx2WBGOQDcfQfw2YRVJEektb2DV1bWMmfKcNLS9PS0iBy9eAIiPXqyIDNLB7ISV5IciUUbtlPf3MYc3d4qIt0knoB4Bvijmc01s7nAQ8G6QzKzeWa20szWmNltnexztZmtMLPlZvaHqPXtZrYseC2I5/f1ZaVlNWSlp3HWxPywSxGRXiKeu5j+GbgZ+Hyw/Dzwq0MdFFxp3A2cT2SAv0VmtsDdV0TtMwn4OnCmu+8ws+g/fxvd/eS4PoVQWl7DrGOHkdsvnv+kIiKHdsgrCHfvcPdfuvvH3P1jwArgZ3G890xgjbuvc/cW4GHgsgP2+Sxwd9CvgbvXHF75ArCutoF1W3fr7iUR6VZxDfdtZjPM7AdmtgG4HSiP47BCoCJquTJYF20yMNnMXjOz181sXtS2bDNbHKy/vJO6bg72WVxbWxvPR+mVSss1OZCIdL9O2yPMbDJwbfDaCvwRMHc/t5t//yTgHGA08IqZnRDcNTXO3avMbAJQambvuvva6IPd/R7gHoDi4mKnjyopq2HyiAGMGaoRUESk+3R1BVFOZFrRS9z9LHf/GdB+GO9dBYyJWh4drItWCSxw91Z3Xw+sIhIYuHtV8O864CVgxmH87j5jV2MrizZsZ+40zf0gIt2rq4C4EngfeNHMfhXcwXQ4N9gvAiaZ2XgzywLmAwfejfQkkasHzCyfSJPTOjMbYmb9otafSaTvQw7w6upa2jpc/Q8i0u06DQh3f9Ld5wNTgReJjMk03Mx+YWYXHOqN3b0NuAV4FigDHnH35WZ2u5ldGuz2LLDNzFYEv+Or7r6NyJSmi83s7WD9ndF3P8k+pWU1DO6fyYyxQ8IuRUR6GXOPv+nezIYAHweucfe5CavqCBQXF/vixYsPvWMv0t7hFP/785wzZTh3XXNy2OWISAoysyXuXhxrW1x3Me3l7jvc/Z5kC4e+alnFDnbsadXdSyKSEIcVEJJcSspqSE8zZk8uCLsUEemFFBAprKSshtOKhpCXkxl2KSLSCykgUlTF9j2srK5n7lTd3ioiiaGASFEvrow8PT1Xo7eKSIIoIFJUSVkN4/NzmVAwIOxSRKSXUkCkoN3NbSxcu013L4lIQikgUtBra7bS0t6hp6dFJKEUECmotLyGgf0yKC4aGnYpItKLKSBSTEeHU1Jew+zJBWRl6D+fiCSOzjAp5r3Nu6itb1b/g4gknAIixZSU1WAG5yogRCTBFBApprS8hlPGDmFoblbYpYhIL6eASCHVdU28W7VLzUsi0iMUECnkxXI9PS0iPUcBkUJeKKuhcHAOU0YMDLsUEekDFBApoqm1ndfWbGXO1OGYHc7MryIiR0YBkSIWrttGY2s7c9S8JCI9RAGRIkrLasjJTOeMCcPCLkVE+ggFRApwd0rLazhrUj7ZmelhlyMifYQCIgWsrK6namejBucTkR6lgEgBJWWR21v19LSI9CQFRAooKavmhMI8RgzKDrsUEelDFBBJbltDM0srdurpaRHpcQqIJPfSylrc9fS0iPQ8BUSSKy2vYfjAfhx/TF7YpYhIH6OASGItbR28sqqWOVOHk5amp6dFpGcpIJLY4g3bqW9uU/+DiIRCAZHESspryMpI48yJ+WGXIiJ9kAIiSbk7JWXVnDFhGLn9MsIuR0T6IAVEklq3dTcbtu3R3UsiEhoFRJIqDZ6eVv+DiIRFAZGkSsqrmTpyIKOH9A+7FBHpoxQQSWhXYyuLNuzQ1YOIhCqhAWFm88xspZmtMbPbOtnnajNbYWbLzewPUetvMrPVweumRNaZbF5ZVUt7h6v/QURClbDbY8wsHbgbOB+oBBaZ2QJ3XxG1zyTg68CZ7r7DzIYH64cC/woUAw4sCY7dkah6k0lpeQ1D+mdy8pghYZciIn1YIq8gZgJr3H2du7cADwOXHbDPZ4G795743b0mWH8h8Ly7bw+2PQ/MS2CtSaOtvYMXV9Zw7pThpOvpaREJUSIDohCoiFquDNZFmwxMNrPXzOx1M5t3GMdiZjeb2WIzW1xbW9uNpYdnacVOdu5p1dzTIhK6sDupM4BJwDnAtcCvzGxwvAe7+z3uXuzuxQUFBYmpsIeVlNWQkWbMntw7Po+IpK5EBkQVMCZqeXSwLlolsMDdW919PbCKSGDEc2yvVFpezczxQxmUnRl2KSLSxyUyIBYBk8xsvJllAfOBBQfs8ySRqwfMLJ9Ik9M64FngAjMbYmZDgAuCdb1axfY9rKpu0O2tIpIUEnYXk7u3mdktRE7s6cB97r7czG4HFrv7AvYFwQqgHfiqu28DMLM7iIQMwO3uvj1RtSaL0vJIH/3caSNCrkREBMzdw66hWxQXF/vixYvDLuOo3PDrN6ja0UjpV84JuxQR6SPMbIm7F8faFnYntQQamtt4Y912NS+JSNLQONJJ4KWVNdzx1Apa2juYd/zIsMsREQEUEKFaW9vAd/+vjNLyGoqG9efeG4spLhoadlkiIoACIhS7Glv5r5LV/O5vG8jOTOcbF03lpg8V0S8jPezSREQ+oIDoQe0dzsOLNvGj51axY08L1xSP4csXTKFgYL+wSxMROYgCoocsXLuN259aQdn7dcwsGsq3Pzqd4wvzwi5LRKRTCogE27RtD997uoxnlm+hcHAOd193ChedMBIzDcQnIslNAZEgDc1t/PeLa7j3r+tJN+PL50/ms7MnkJ2pfgYRSQ0KiG7W0eH8aWkVP3imnJr6Zq6YUcg/z5vKyLzssEsTETksCohutGTjDm7/83LertzFSWMG88sbTuWUsZr0R0RSkwKiG7y/q5E7/1LO/y7bzIhB/fjx1Sdx+cmFpGnCHxFJYQqIo9DY0s49r6zjFy+vocPhlnMn8vlzjiW3n75WEUl9OpMdAXfnqXfe5/tPl7F5VxMXnzCK2z4ylTFD+4ddmohIt1FAHKZ3K3dx+1PLWbRhB9NHDeLH15zMrAnDwi5LRKTbKSDiVFPfxA+fXcmjSyoZ2j+L7195AlcXjyFd/Qwi0kspIA6hua2d37y2gZ+XrqG5rZ3Pnj2BW+ZM1JSgItLrKSA64e48t6Ka7z1dxsZtezhv2nD+5eLpjM/PDbs0EZEeoYCIYeWWem5/ajmvrdnGpOED+P2nZjJ7ckHYZYmI9CgFRJTtu1u46/lVPPjGRgZmZ/Jvlx7HJ04fS0a6Jt4Tkb5HAQG0tndw/8KN/OSFVexuaeeGWeP44nmTGZKbFXZpIiKh6fMBUbF9D5/8zZusrd3N2ZPy+dYl05k8YmDYZYmIhK7PB8SIQdmMG5bL1z8yjbnThmsYbhGRQJ8PiKyMNO775GlhlyEiknTU+yoiIjEpIEREJCYFhIiIxKSAEBGRmBQQIiISkwJCRERiUkCIiEhMCggREYnJ3D3sGrqFmdUCG4/iLfKBrd1UTqrTd7E/fR/70/exT2/4Lsa5e8zhqntNQBwtM1vs7sVh15EM9F3sT9/H/vR97NPbvws1MYmISEwKCBERiUkBsc89YReQRPRd7E/fx/70fezTq78L9UGIiEhMuoIQEZGYFBAiIhJTnw8IM5tnZivNbI2Z3RZ2PWEyszFm9qKZrTCz5Wb2j2HXFDYzSzezpWb2VNi1hM3MBpvZY2ZWbmZlZnZG2DWFycy+FPz/5D0ze8jMssOuqbv16YAws3TgbuAjwHTgWjObHm5VoWoDvuzu04FZwBf6+PcB8I9AWdhFJImfAs+4+1TgJPrw92JmhcA/AMXufjyQDswPt6ru16cDApgJrHH3de7eAjwMXBZyTaFx9/fd/a3g53oiJ4DCcKsKj5mNBi4G7g27lrCZWR4wG/g1gLu3uPvOUIsKXwaQY2YZQH9gc8j1dLu+HhCFQEXUciV9+IQYzcyKgBnAGyGXEqafAF8DOkKuIxmMB2qB3wRNbveaWW7YRYXF3auAHwKbgPeBXe7+XLhVdb++HhASg5kNAB4HvujudWHXEwYzuwSocfclYdeSJDKAU4BfuPsMYDfQZ/vszGwIkdaG8cAxQK6ZXR9uVd2vrwdEFTAmanl0sK7PMrNMIuHwoLv/Kex6QnQmcKmZbSDS9DjHzB4It6RQVQKV7r73ivIxIoHRV50HrHf3WndvBf4EfCjkmrpdXw+IRcAkMxtvZllEOpkWhFxTaMzMiLQxl7n7j8OuJ0zu/nV3H+3uRUT+d1Hq7r3uL8R4ufsWoMLMpgSr5gIrQiwpbJuAWWbWP/j/zVx6Yad9RtgFhMnd28zsFuBZInch3Ofuy0MuK0xnAjcA75rZsmDdN9z96fBKkiRyK/Bg8MfUOuD/hVxPaNz9DTN7DHiLyN1/S+mFw25oqA0REYmprzcxiYhIJxQQIiISkwJCRERiUkCIiEhMCggREYlJASESMLOG4N8iM7uum9/7Gwcs/607318kERQQIgcrAg4rIIIB27qyX0C4e6976lZ6HwWEyMHuBM42s2XBmP/pZvafZrbIzN4xs78DMLNzzOxVM1tA8FSxmT1pZkuCeQJuDtbdSWTUz2Vm9mCwbu/VigXv/Z6ZvWtm10S990tR8y88GDyxi5ndGczZ8Y6Z/bDHvx3pM/r0k9QinbgN+Iq7XwIQnOh3uftpZtYPeM3M9o7ceQpwvLuvD5Y/5e7bzSwHWGRmj7v7bWZ2i7ufHON3XQmcTGR+hfzgmFeCbTOA44gMI/0acKaZlQFXAFPd3c1scPd+dJF9dAUhcmgXADcGw4+8AQwDJgXb3owKB4B/MLO3gdeJDAQ5ia6dBTzk7u3uXg28DJwW9d6V7t4BLCPS9LULaAJ+bWZXAnuO8rOJdEoBIXJoBtzq7icHr/FRY//v/mAns3OIjPJ5hrufRGR8nqOZhrI56ud2IMPd24hMdPUYcAnwzFG8v0iXFBAiB6sHBkYtPwt8PhgKHTOb3MlkOXnADnffY2ZTiUzbulfr3uMP8CpwTdDPUUBk1rY3OyssmKsjLxhA8UtEmqZEEkJ9ECIHewdoD5qKfktkLuYi4K2go7gWuDzGcc8Anwv6CVYSaWba6x7gHTN7y90/EbX+CeAM4G3Aga+5+5YgYGIZCPyvmWUTubL5pyP6hCJx0GiuIiISk5qYREQkJgWEiIjEpIAQEZGYFBAiIhKTAkJERGJSQIiISEwKCBERien/A0GEoRH/AAAdAAAAAElFTkSuQmCC\n", + "text/plain": [ + "<Figure size 432x288 with 1 Axes>" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "acc_per_epoch = [np.mean(acc_per_epoch) for acc_per_epoch in running_test_acc]\n", + "display_loss_plot(acc_per_epoch, title=\"Test accuracy\", ylabel=\"Accuracy [%]\")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.7979886313948404" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "test(model, test_quantized_loader)" ] }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# Save the Brevitas model to disk\n", + "torch.save(model.state_dict(), \"state_dict_self-trained.pth\")" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## (Option 2) Load Pre-Trained Parameters <a id=\"load_pretrained\"></a>\n", + "## (Option 2, faster) Load Pre-Trained Parameters <a id=\"load_pretrained\"></a>\n", "\n", "Instead of training from scratch, you can also use pre-trained parameters we provide here. These parameters should achieve ~91.9% test accuracy." ] }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -426,7 +549,7 @@ "IncompatibleKeys(missing_keys=[], unexpected_keys=[])" ] }, - "execution_count": 36, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -441,7 +564,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 18, "metadata": { "scrolled": true }, @@ -452,7 +575,7 @@ "0.9188772287810328" ] }, - "execution_count": 37, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -465,7 +588,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Network Surgery Before Export <a id=\"network_surgery\"></a>\n", + "**Why do these parameters give better accuracy vs training from scratch?** Even with the topology and quantization fixed, achieving good accuracy on a given dataset requires [*hyperparameter tuning*](https://towardsdatascience.com/hyperparameters-optimization-526348bb8e2d) and potentially running training for a long time. The \"training from scratch\" example above is only intended as a quick example, whereas the pretrained parameters are obtained from a longer training run using the [determined.ai](https://determined.ai/) platform for hyperparameter tuning." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Network Surgery Before Export <a id=\"network_surgery\"></a>\n", "\n", "Sometimes, it's desirable to make some changes to our trained network prior to export (this is known in general as \"network surgery\"). This depends on the model and is not generally necessary, but in this case we want to make a couple of changes to get better results with FINN." ] @@ -479,7 +609,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -488,7 +618,7 @@ "(64, 593)" ] }, - "execution_count": 24, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -504,7 +634,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -513,7 +643,7 @@ "(64, 600)" ] }, - "execution_count": 25, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -528,7 +658,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -537,7 +667,7 @@ "torch.Size([64, 600])" ] }, - "execution_count": 26, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -553,14 +683,14 @@ "source": [ "Next, we'll modify the expected input/output ranges. In FINN, we prefer to work with bipolar {-1, +1} instead of binary {0, 1} values. To achieve this, we'll create a \"wrapper\" model that handles the pre/postprocessing as follows:\n", "\n", - "* on the input side, we'll pre-process by (x + 1) / 2 in order to map incoming {-1, +1} inputs to {0, 1} ones which the trained network is used to. Since we're just multiplying/adding a scalar, these operations can be *streamlined* in FINN and implemented with no extra cost.\n", + "* on the input side, we'll pre-process by (x + 1) / 2 in order to map incoming {-1, +1} inputs to {0, 1} ones which the trained network is used to. Since we're just multiplying/adding a scalar, these operations can be [*streamlined*](https://finn.readthedocs.io/en/latest/nw_prep.html#streamlining-transformations) by FINN and implemented with no extra cost.\n", "\n", "* on the output side, we'll add a binary quantizer which maps everthing below 0 to -1 and everything above 0 to +1. This is essentially the same behavior as the sigmoid we used earlier, except the outputs are bipolar instead of binary." ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -588,7 +718,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -618,7 +748,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -627,7 +757,7 @@ "0.9188772287810328" ] }, - "execution_count": 29, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -640,14 +770,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Export to FINN-ONNX <a id=\"export_finn_onnx\" ></a>\n", + "# Export to FINN-ONNX <a id=\"export_finn_onnx\" ></a>\n", + "\n", "\n", - "FINN expects an ONNX model as input. We'll now export our network into ONNX to be imported and used in FINN for the next notebooks. Note that the particular ONNX representation used for FINN differs from standard ONNX, you can read more about this [here](https://finn.readthedocs.io/en/latest/internals.html#intermediate-representation-finn-onnx)." + "[ONNX](https://onnx.ai/) is an open format built to represent machine learning models, and the FINN compiler expects an ONNX model as input. We'll now export our network into ONNX to be imported and used in FINN for the next notebooks. Note that the particular ONNX representation used for FINN differs from standard ONNX, you can read more about this [here](https://finn.readthedocs.io/en/latest/internals.html#intermediate-representation-finn-onnx)." ] }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 25, "metadata": { "scrolled": true }, @@ -682,27 +813,71 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## View the Exported ONNX in Netron <a id=\"view_in_netron\" ></a>\n", + "## One final fix: input datatype\n", + "\n", + "There's one more thing we'll do: we will mark the input tensor datatype as `DataType.BIPOLAR`, which will be used by the compiler later on. To do this, we'll utilize the `ModelWrapper` component from FINN, which lets us examine and manipulate the ONNX graph in an easier way.\n", + "\n", + "*In the near future it will be possible to add this information to the model [while exporting](https://github.com/Xilinx/brevitas/issues/232), instead of having to add it manually.*" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Input tensor name: 0\n", + "Input tensor shape: [1, 600]\n", + "Input tensor datatype: DataType.BIPOLAR\n" + ] + } + ], + "source": [ + "from finn.core.modelwrapper import ModelWrapper\n", + "from finn.core.datatype import DataType\n", + "\n", + "finn_model = ModelWrapper(export_onnx_path)\n", + "\n", + "finnonnx_in_tensor_name = finn_model.graph.input[0].name\n", + "finnonnx_model_in_shape = finn_model.get_tensor_shape(finnonnx_in_tensor_name)\n", + "finn_model.set_tensor_datatype(finnonnx_in_tensor_name, DataType.BIPOLAR)\n", + "print(\"Input tensor name: %s\" % finnonnx_in_tensor_name)\n", + "print(\"Input tensor shape: %s\" % str(finnonnx_model_in_shape))\n", + "print(\"Input tensor datatype: %s\" % str(finn_model.get_tensor_datatype(finnonnx_in_tensor_name)))\n", + "\n", + "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n", + "finn_model.save(ready_model_filename)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## View the Exported ONNX in Netron\n", "\n", - "Let's examine the exported ONNX model with Netron. Particular things of note:\n", + "Let's examine the exported ONNX model with [Netron](https://github.com/lutzroeder/netron), which is a visualizer for neural networks and allows interactive investigation of network properties. For example, you can click on the individual nodes and view the properties. Particular things of note:\n", "\n", - "* The input preprocessing (x + 1) / 2 is exported as part of the network (initial Add and Div layers)\n", - "* We've exported the padded version; shape of the first MatMul node's weight parameter is 600x64\n", - "* The weight parameters (second inputs) for MatMul nodes are annotated with `quantization: finn_datatype:INT2`\n", + "* The input tensor \"0\" is annotated with `quantization: finn_datatype: BIPOLAR`\n", + "* The input preprocessing (x + 1) / 2 is exported as part of the network (initial `Add` and `Div` layers)\n", + "* Brevitas `QuantLinear` layers are exported to ONNX as `MatMul`. We've exported the padded version; shape of the first MatMul node's weight parameter is 600x64\n", + "* The weight parameters (second inputs) for MatMul nodes are annotated with `quantization: finn_datatype: INT2`\n", "* The quantized activations are exported as `MultiThreshold` nodes with `domain=finn.custom_op.general`\n", "* There's a final `MultiThreshold` node with threshold=0 to produce the final bipolar output (this is the `qnt_output` from `CybSecMLPForExport`" ] }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 27, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Serving 'cybsec-mlp.onnx' at http://0.0.0.0:8081\n" + "Serving 'cybsec-mlp-ready.onnx' at http://0.0.0.0:8081\n" ] }, { @@ -712,17 +887,17 @@ " <iframe\n", " width=\"100%\"\n", " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", + " src=\"http://localhost:8081/\"\n", " frameborder=\"0\"\n", " allowfullscreen\n", " ></iframe>\n", " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f4045ac19e8>" + "<IPython.lib.display.IFrame at 0x7f77214fa630>" ] }, - "execution_count": 32, + "execution_count": 27, "metadata": {}, "output_type": "execute_result" } @@ -730,7 +905,7 @@ "source": [ "from finn.util.visualization import showInNetron\n", "\n", - "showInNetron(export_onnx_path)" + "showInNetron(ready_model_filename)" ] }, { diff --git a/notebooks/end2end_example/cybersecurity/2-export-to-finn-and-verify.ipynb b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb similarity index 59% rename from notebooks/end2end_example/cybersecurity/2-export-to-finn-and-verify.ipynb rename to notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb index f48cada0dd25f08f1659a778d04785bda27f443e..6ac4e52072d71f527e4ec5d923a76851b77dc247 100644 --- a/notebooks/end2end_example/cybersecurity/2-export-to-finn-and-verify.ipynb +++ b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb @@ -6,10 +6,15 @@ "source": [ "# Verify Exported ONNX Model in FINN\n", "\n", - "**Important: This notebook depends on the 1-train-mlp-with-brevitas notebook, because we are using the ONNX model that was exported there. So please make sure the needed .onnx file is generated before you run this notebook. Also remember to 'close and halt' any other FINN notebooks, since Netron visualizations use the same port.**\n", + "<font color=\"red\">**Live FINN tutorial:** We recommend clicking **Cell -> Run All** when you start reading this notebook for \"latency hiding\".</font>\n", + "\n", + "**Important: This notebook depends on the 1-train-mlp-with-brevitas notebook, because we are using the ONNX model that was exported there. So please make sure the needed .onnx file is generated before you run this notebook.**\n", + "\n", + "**Also remember to 'close and halt' any other FINN notebooks, since Netron visualizations use the same port.**\n", "\n", "In this notebook we will show how to import the network we trained in Brevitas and verify it in the FINN compiler. \n", "This verification process can actually be done at various stages in the compiler [as explained in this notebook](../bnn-pynq/tfc_end2end_verification.ipynb) but for this example we'll only consider the first step: verifying the exported high-level FINN-ONNX model.\n", + "Another goal of this notebook is to introduce you to the concept of *graph transformations* -- we'll be applying some transformations to the graph to make it executable for verification. \n", "Once this model is sucessfully verified, we'll generate an FPGA accelerator from it in the next notebook." ] }, @@ -36,8 +41,8 @@ "source": [ "## Outline\n", "-------------\n", - "1. [Import model and visualize in Netron](#brevitas_import_visualization)\n", - "2. [Network preperations: Tidy up transformations](#network_preparations)\n", + "1. [Import model into FINN with ModelWrapper](#brevitas_import_visualization)\n", + "2. [Network preparations: Tidy-up transformations](#network_preparations)\n", "3. [Load the dataset and Brevitas model](#load_dataset) \n", "4. [Compare FINN and Brevitas execution](#compare_brevitas)" ] @@ -46,7 +51,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 1. Import model and visualize in Netron <a id=\"brevitas_import_visualization\"></a>\n", + "# 1. Import model into FINN with ModelWrapper <a id=\"brevitas_import_visualization\"></a>\n", "\n", "Now that we have the model in .onnx format, we can work with it using FINN. To import it into FINN, we'll use the [`ModelWrapper`](https://finn.readthedocs.io/en/latest/source_code/finn.core.html#finn.core.modelwrapper.ModelWrapper). It is a wrapper around the ONNX model which provides several helper functions to make it easier to work with the model." ] @@ -59,46 +64,91 @@ "source": [ "from finn.core.modelwrapper import ModelWrapper\n", "\n", - "model_file_path = \"cybsec-mlp.onnx\"\n", - "model_for_sim = ModelWrapper(model_file_path)" + "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n", + "model_for_sim = ModelWrapper(ready_model_filename)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To visualize the exported model, Netron can be used. Netron is a visualizer for neural networks and allows interactive investigation of network properties. For example, you can click on the individual nodes and view the properties." + "Let's have a look at some of the member functions exposed by `ModelWrapper` to see what kind of information we can extract from it." ] }, { "cell_type": "code", "execution_count": 3, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving 'cybsec-mlp.onnx' at http://0.0.0.0:8081\n" - ] - }, { "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc1fc950748>" + "['__class__',\n", + " '__delattr__',\n", + " '__dict__',\n", + " '__dir__',\n", + " '__doc__',\n", + " '__eq__',\n", + " '__format__',\n", + " '__ge__',\n", + " '__getattribute__',\n", + " '__gt__',\n", + " '__hash__',\n", + " '__init__',\n", + " '__init_subclass__',\n", + " '__le__',\n", + " '__lt__',\n", + " '__module__',\n", + " '__ne__',\n", + " '__new__',\n", + " '__reduce__',\n", + " '__reduce_ex__',\n", + " '__repr__',\n", + " '__setattr__',\n", + " '__sizeof__',\n", + " '__str__',\n", + " '__subclasshook__',\n", + " '__weakref__',\n", + " '_model_proto',\n", + " 'analysis',\n", + " 'check_all_tensor_shapes_specified',\n", + " 'check_compatibility',\n", + " 'cleanup',\n", + " 'find_consumer',\n", + " 'find_consumers',\n", + " 'find_direct_predecessors',\n", + " 'find_direct_successors',\n", + " 'find_producer',\n", + " 'find_upstream',\n", + " 'get_all_tensor_names',\n", + " 'get_finn_nodes',\n", + " 'get_initializer',\n", + " 'get_metadata_prop',\n", + " 'get_node_index',\n", + " 'get_nodes_by_op_type',\n", + " 'get_non_finn_nodes',\n", + " 'get_tensor_datatype',\n", + " 'get_tensor_fanout',\n", + " 'get_tensor_layout',\n", + " 'get_tensor_shape',\n", + " 'get_tensor_sparsity',\n", + " 'get_tensor_valueinfo',\n", + " 'graph',\n", + " 'is_fork_node',\n", + " 'is_join_node',\n", + " 'make_empty_exec_context',\n", + " 'make_new_valueinfo_name',\n", + " 'model',\n", + " 'rename_tensor',\n", + " 'save',\n", + " 'set_initializer',\n", + " 'set_metadata_prop',\n", + " 'set_tensor_datatype',\n", + " 'set_tensor_layout',\n", + " 'set_tensor_shape',\n", + " 'set_tensor_sparsity',\n", + " 'temporary_fix_oldstyle_domain',\n", + " 'transform']" ] }, "execution_count": 3, @@ -107,60 +157,33 @@ } ], "source": [ - "from finn.util.visualization import showInNetron\n", - "showInNetron(model_file_path)" + "dir(model_for_sim)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# 2. Network preperation: Tidy up transformations <a id=\"network_preparations\"></a>\n", - "\n", - "Before running the verification, we need to prepare our FINN-ONNX model. In particular, all the intermediate tensors need to have statically defined shapes. To do this, we apply some transformations to the model like a kind of \"tidy-up\" to make it easier to process. You can read more about these transformations in [this notebook](../bnn-pynq/tfc_end2end_example.ipynb).\n" + "Many of these helper functions relate to extracting information about the structure and properties of the ONNX model. You can find out more about examining and manipulating ONNX models programmatically in [this tutorial](../../basics/0_how_to_work_with_onnx.ipynb), but we'll show a few basic functions here. For instance, we can extract the shape and datatype annotation for various tensors in the graph, as well as information related to the operation types associated with each node." ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, - "outputs": [], - "source": [ - "from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, RemoveStaticGraphInputs\n", - "from finn.transformation.infer_shapes import InferShapes\n", - "from finn.transformation.infer_datatypes import InferDataTypes\n", - "from finn.transformation.fold_constants import FoldConstants\n", - "\n", - "model_for_sim = model_for_sim.transform(InferShapes())\n", - "model_for_sim = model_for_sim.transform(FoldConstants())\n", - "model_for_sim = model_for_sim.transform(GiveUniqueNodeNames())\n", - "model_for_sim = model_for_sim.transform(GiveReadableTensorNames())\n", - "model_for_sim = model_for_sim.transform(InferDataTypes())\n", - "model_for_sim = model_for_sim.transform(RemoveStaticGraphInputs())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There's one more thing we'll do: we will mark the input tensor datatype as bipolar, which will be used by the compiler later on. \n", - "\n", - "*In the near future it will be possible to add this information to the model while exporting, instead of having to add it manually.*" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Input tensor name: global_in\n", - "Output tensor name: global_out\n", + "Input tensor name: 0\n", + "Output tensor name: 78\n", "Input tensor shape: [1, 600]\n", - "Input tensor datatype: DataType.BIPOLAR\n" + "Output tensor shape: [1, 1]\n", + "Input tensor datatype: DataType.BIPOLAR\n", + "Output tensor datatype: DataType.FLOAT32\n", + "List of node operator types in the graph: \n", + "['Add', 'Div', 'MatMul', 'Add', 'Mul', 'Unsqueeze', 'BatchNormalization', 'Squeeze', 'MultiThreshold', 'Mul', 'MatMul', 'Add', 'Mul', 'Unsqueeze', 'BatchNormalization', 'Squeeze', 'MultiThreshold', 'Mul', 'MatMul', 'Add', 'Mul', 'Unsqueeze', 'BatchNormalization', 'Squeeze', 'MultiThreshold', 'Mul', 'MatMul', 'Add', 'Mul', 'MultiThreshold']\n" ] } ], @@ -172,22 +195,69 @@ "print(\"Input tensor name: %s\" % finnonnx_in_tensor_name)\n", "print(\"Output tensor name: %s\" % finnonnx_out_tensor_name)\n", "finnonnx_model_in_shape = model_for_sim.get_tensor_shape(finnonnx_in_tensor_name)\n", + "finnonnx_model_out_shape = model_for_sim.get_tensor_shape(finnonnx_out_tensor_name)\n", "print(\"Input tensor shape: %s\" % str(finnonnx_model_in_shape))\n", - "model_for_sim.set_tensor_datatype(finnonnx_in_tensor_name, DataType.BIPOLAR)\n", + "print(\"Output tensor shape: %s\" % str(finnonnx_model_out_shape))\n", + "finnonnx_model_in_dt = model_for_sim.get_tensor_datatype(finnonnx_in_tensor_name)\n", + "finnonnx_model_out_dt = model_for_sim.get_tensor_datatype(finnonnx_out_tensor_name)\n", "print(\"Input tensor datatype: %s\" % str(model_for_sim.get_tensor_datatype(finnonnx_in_tensor_name)))\n", - "\n", - "verified_model_filename = \"cybsec-mlp-verified.onnx\"\n", - "model_for_sim.save(verified_model_filename)" + "print(\"Output tensor datatype: %s\" % str(model_for_sim.get_tensor_datatype(finnonnx_out_tensor_name)))\n", + "print(\"List of node operator types in the graph: \")\n", + "print([x.op_type for x in model_for_sim.graph.node])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let's view our ready-to-go model. Some changes to note:\n", + "Note that the output tensor is (as of yet) marked as a float32 value, even though we know the output is binary. This will be automatically inferred by the compiler in the next step when we run the `InferDataTypes` transformation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2. Network preparation: Tidy-up transformations <a id=\"network_preparations\"></a>\n", + "\n", + "Before running the verification, we need to prepare our FINN-ONNX model. In particular, all the intermediate tensors need to have statically defined shapes. To do this, we apply some graph transformations to the model like a kind of \"tidy-up\" to make it easier to process. \n", + "\n", + "**Graph transformations in FINN.** The whole FINN compiler is built around the idea of transformations, which gradually transform the model into a synthesizable hardware description. Although FINN offers functionality that automatically calls a standard sequence of transformations (covered in the next notebook), you can also manually call individual transformations (like we do here), as well as adding your own transformations, to create custom flows. You can read more about these transformations in [this notebook](../bnn-pynq/tfc_end2end_example.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, RemoveStaticGraphInputs\n", + "from finn.transformation.infer_shapes import InferShapes\n", + "from finn.transformation.infer_datatypes import InferDataTypes\n", + "from finn.transformation.fold_constants import FoldConstants\n", + "\n", + "model_for_sim = model_for_sim.transform(InferShapes())\n", + "model_for_sim = model_for_sim.transform(FoldConstants())\n", + "model_for_sim = model_for_sim.transform(GiveUniqueNodeNames())\n", + "model_for_sim = model_for_sim.transform(GiveReadableTensorNames())\n", + "model_for_sim = model_for_sim.transform(InferDataTypes())\n", + "model_for_sim = model_for_sim.transform(RemoveStaticGraphInputs())\n", "\n", - "* all intermediate tensors now have their shapes specified (indicated by numbers next to the arrows going between layers)\n", - "* the datatype on the input tensor is set to DataType.BIPOLAR (click on the `global_in` node to view properties)" + "verif_model_filename = \"cybsec-mlp-verification.onnx\"\n", + "model_for_sim.save(verif_model_filename)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Would the FINN compiler still work if we didn't do this?** The compilation step in the next notebook applies these transformations internally and would work fine, but we're going to use FINN's verification capabilities below and these require the tidy-up transformations." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's view our ready-to-go model after the transformations. Note that all intermediate tensors now have their shapes specified (indicated by numbers next to the arrows going between layers). Additionally, the datatype inference step has propagated quantization annotations to the outputs of `MultiThreshold` layers (expand by clicking the + next to the name of the tensor to see the quantization annotation) and the final output tensor." ] }, { @@ -199,9 +269,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving 'cybsec-mlp-verified.onnx' at http://0.0.0.0:8081\n" + "Serving 'cybsec-mlp-verification.onnx' at http://0.0.0.0:8081\n" ] }, { @@ -211,14 +279,14 @@ " <iframe\n", " width=\"100%\"\n", " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", + " src=\"http://localhost:8081/\"\n", " frameborder=\"0\"\n", " allowfullscreen\n", " ></iframe>\n", " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc280154278>" + "<IPython.lib.display.IFrame at 0x7f388298b470>" ] }, "execution_count": 6, @@ -227,7 +295,9 @@ } ], "source": [ - "showInNetron(verified_model_filename)" + "from finn.util.visualization import showInNetron\n", + "\n", + "showInNetron(verif_model_filename)" ] }, { @@ -236,9 +306,7 @@ "source": [ "# 3. Load the Dataset and the Brevitas Model <a id=\"load_dataset\"></a>\n", "\n", - "We'll use some example data from the quantized UNSW-NB15 dataset (from the previous notebook) to use as inputs for the verification. \n", - "\n", - "Recall that the quantized values from the dataset are 593-bit binary {0, 1} vectors whereas our exported model takes 600-bit bipolar {-1, +1} vectors, so we'll have to preprocess it a bit before we can use it for verifying the ONNX model." + "We'll use some example data from the quantized UNSW-NB15 dataset (from the previous notebook) to use as inputs for the verification. " ] }, { @@ -258,16 +326,24 @@ } ], "source": [ - "from torch.utils.data import DataLoader, Dataset\n", - "from dataloader_quantized import UNSW_NB15_quantized\n", + "import numpy as np\n", + "from torch.utils.data import TensorDataset\n", "\n", - "test_quantized_dataset = UNSW_NB15_quantized(file_path_train='UNSW_NB15_training-set.csv', \\\n", - " file_path_test = \"UNSW_NB15_testing-set.csv\", \\\n", - " train=False)\n", + "def get_preqnt_dataset(data_dir: str, train: bool):\n", + " unsw_nb15_data = np.load(data_dir + \"/unsw_nb15_binarized.npz\")\n", + " if train:\n", + " partition = \"train\"\n", + " else:\n", + " partition = \"test\"\n", + " part_data = unsw_nb15_data[partition].astype(np.float32)\n", + " part_data = torch.from_numpy(part_data)\n", + " part_data_in = part_data[:, :-1]\n", + " part_data_out = part_data[:, -1]\n", + " return TensorDataset(part_data_in, part_data_out)\n", "\n", "n_verification_inputs = 100\n", - "# last column is the label, exclude it\n", - "input_tensor = test_quantized_dataset.data[:n_verification_inputs,:-1]\n", + "test_quantized_dataset = get_preqnt_dataset(\".\", False)\n", + "input_tensor = test_quantized_dataset.tensors[0][:n_verification_inputs]\n", "input_tensor.shape" ] }, @@ -325,6 +401,9 @@ "# replace this with your trained network checkpoint if you're not\n", "# using the pretrained weights\n", "trained_state_dict = torch.load(\"state_dict.pth\")[\"models_state_dict\"][0]\n", + "# Uncomment the following line if you previously chose to train the network yourself\n", + "#trained_state_dict = torch.load(\"state_dict_self-trained.pth\")\n", + "\n", "brevitas_model.load_state_dict(trained_state_dict, strict=False)" ] }, @@ -355,7 +434,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Let's make helper functions to execute the same input with Brevitas and FINN. For FINN, we'll use the [`finn.core.onnx_exec`](https://finn.readthedocs.io/en/latest/source_code/finn.core.html#finn.core.onnx_exec.execute_onnx) function to execute the exported FINN-ONNX on the inputs." + "Let's make helper functions to execute the same input with Brevitas and FINN. For FINN, we'll use the [`finn.core.onnx_exec`](https://finn.readthedocs.io/en/latest/source_code/finn.core.html#finn.core.onnx_exec.execute_onnx) function to execute the exported FINN-ONNX on the inputs. Note that this ONNX execution is for verification only; not for accelerated execution.\n", + "\n", + "Recall that the quantized values from the dataset are 593-bit binary {0, 1} vectors whereas our exported model takes 600-bit bipolar {-1, +1} vectors, so we'll have to preprocess it a bit before we can use it for verifying the ONNX model." ] }, { @@ -364,7 +445,12 @@ "metadata": {}, "outputs": [], "source": [ + "import finn.core.onnx_exec as oxe\n", + "\n", "def inference_with_finn_onnx(current_inp):\n", + " finnonnx_in_tensor_name = model_for_sim.graph.input[0].name\n", + " finnonnx_model_in_shape = model_for_sim.get_tensor_shape(finnonnx_in_tensor_name)\n", + " finnonnx_out_tensor_name = model_for_sim.graph.output[0].name\n", " # convert input to numpy for FINN\n", " current_inp = current_inp.detach().numpy()\n", " # add padding and re-scale to bipolar\n", @@ -397,12 +483,11 @@ "name": "stderr", "output_type": "stream", "text": [ - "ok 100 nok 0: 100%|██████████| 100/100 [00:48<00:00, 2.09it/s]\n" + "ok 100 nok 0: 100%|██████████| 100/100 [00:47<00:00, 2.09it/s]\n" ] } ], "source": [ - "import finn.core.onnx_exec as oxe\n", "import numpy as np\n", "from tqdm import trange\n", "\n", @@ -421,12 +506,12 @@ " ok += 1 if finn_output == brevitas_output else 0\n", " nok += 1 if finn_output != brevitas_output else 0\n", " verify_range.set_description(\"ok %d nok %d\" % (ok, nok))\n", - " verify_range.refresh() # to show immediately the update" + " verify_range.refresh()" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 12, "metadata": {}, "outputs": [ { diff --git a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb index 1ee1cefbe17d96ffd7a2e6384e037e1d9fbdd989..551c321534cfefa13b8d34b7f1e7685000702ec0 100644 --- a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb +++ b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb @@ -6,7 +6,9 @@ "source": [ "# Building the Streaming Dataflow Accelerator\n", "\n", - "**Important: This notebook depends on the 2-cybersecurity-finn-verification notebook because we are using models that were created by these notebooks. So please make sure the needed .onnx files are generated prior to running this notebook.**\n", + "<font color=\"red\">**Live FINN tutorial:** We recommend clicking **Cell -> Run All** when you start reading this notebook for \"latency hiding\".</font>\n", + "\n", + "**Important: This notebook depends on the 1-train-mlp-with-brevitas notebook because we are using models that were created by that notebook. So please make sure the needed .onnx files are generated prior to running this notebook.**\n", "\n", "<img align=\"left\" src=\"finn-example.png\" alt=\"drawing\" style=\"margin-right: 20px\" width=\"250\"/>\n", "\n", @@ -29,7 +31,8 @@ " 2.3 [Configuring the Performance](#config_perf) \n", "4. [Launch a Build: Only Estimate Reports](#build_estimate_report)\n", "5. [Launch a Build: Stitched IP, out-of-context synth and rtlsim Performance](#build_ip_synth_rtlsim)\n", - "6. [Launch a Build: PYNQ Bitfile and Driver](#build_bitfile_driver)" + "6. [(Optional) Launch a Build: PYNQ Bitfile and Driver](#build_bitfile_driver)\n", + "7. [(Optional) Run on PYNQ board](#run_on_pynq)" ] }, { @@ -38,10 +41,10 @@ "source": [ "## Introduction to `build_dataflow` Tool <a id=\"intro_build_dataflow\"></a>\n", "\n", - "Since version 0.5b, the FINN compiler has a `build_dataflow` tool. Compared to previous versions which required setting up all the needed transformations in a Python script, it makes experimenting with dataflow architecture generation easier. The core idea is to specify the relevant build info as a configuration `dict`, which invokes all the necessary steps to make the dataflow build happen. It can be invoked either from the [command line](https://finn-dev.readthedocs.io/en/latest/command_line.html) or with a single Python function call\n", + "Since version 0.5b, the FINN compiler has a `build_dataflow` tool. Compared to previous versions which required setting up all the needed transformations in a Python script, it makes experimenting with dataflow architecture generation easier. The core idea is to specify the relevant build info as a configuration `dict`, which invokes all the necessary steps to make the dataflow build happen. It can be invoked either from the [command line](https://finn-dev.readthedocs.io/en/latest/command_line.html) or with a single Python function call.\n", "\n", "\n", - "In this notebook, we'll use the Python function call to invoke the builds to stay inside the Jupyter notebook, but feel free to experiment with reproducing what we do here with the `./run-docker.sh build_dataflow` and `./run-docker.sh build_custom` command-line entry points too, as documented [here]((https://finn-dev.readthedocs.io/en/latest/command_line.html))." + "In this notebook, we'll use the Python function call to invoke the builds to stay inside the Jupyter notebook, but feel free to experiment with reproducing what we do here with the `./run-docker.sh build_dataflow` and `./run-docker.sh build_custom` command-line entry points too. " ] }, { @@ -69,8 +72,8 @@ " - `BITFILE` : integrate the accelerator into a shell to produce a standalone bitfile\n", " - `PYNQ_DRIVER` : generate a PYNQ Python driver that can be used to launch the accelerator\n", " - `DEPLOYMENT_PACKAGE` : create a folder with the `BITFILE` and `PYNQ_DRIVER` outputs, ready to be copied to the target FPGA platform.\n", - "* `output_dir`: the directory where the all the generated build outputs above will be written into.\n", - "* `steps`: list of predefined (or custom) build steps FINN will go through. Use `build_dataflow_config.estimate_only_dataflow_steps` to execute only the steps needed for estimation (without any synthesis), and the `build_dataflow_config.default_build_dataflow_steps` otherwise (which is the default value).\n", + "* `output_dir`: the directory where all the generated build outputs above will be written into.\n", + "* `steps`: list of predefined (or custom) build steps FINN will go through. Use `build_dataflow_config.estimate_only_dataflow_steps` to execute only the steps needed for estimation (without any synthesis), and the `build_dataflow_config.default_build_dataflow_steps` otherwise (which is the default value). You can find the list of default steps [here](https://finn.readthedocs.io/en/latest/source_code/finn.builder.html#finn.builder.build_dataflow_config.default_build_dataflow_steps) in the documentation.\n", "\n", "### Configuring the Board and FPGA Part <a id=\"config_fpga\"></a>\n", "\n", @@ -80,7 +83,7 @@ "\n", "### Configuring the Performance <a id=\"config_perf\"></a>\n", "\n", - "You can configure the performance (and correspondingly, the FPGA resource footprint) of the generated in two ways:\n", + "You can configure the performance (and correspondingly, the FPGA resource footprint) of the generated dataflow accelerator in two ways:\n", "\n", "1) (basic) Set a target performance and let the compiler figure out the per-node parallelization settings.\n", "\n", @@ -88,7 +91,7 @@ "\n", "This notebook only deals with the basic approach, for which you need to set up:\n", "\n", - "* `target_fps`: target inference performance in frames per second. Note that target may not be achievable due to specific layer constraints, or due to resource limitations of the FPGA.\n", + "* `target_fps`: target inference performance in frames per second. Note that target may not be achievable due to specific layer constraints, or due to resource limitations of the FPGA. \n", "* `synth_clk_period_ns`: target clock frequency (in nanoseconds) for Vivado synthesis. e.g. `synth_clk_period_ns=5.0` will target a 200 MHz clock. Note that the target clock period may not be achievable depending on the FPGA part and design complexity." ] }, @@ -103,15 +106,57 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Previous run results deleted!\n" + ] + } + ], + "source": [ + "import finn.builder.build_dataflow as build\n", + "import finn.builder.build_dataflow_config as build_cfg\n", + "import os\n", + "import shutil\n", + "\n", + "model_file = \"cybsec-mlp-ready.onnx\"\n", + "\n", + "estimates_output_dir = \"output_estimates_only\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_cfg.estimate_only_dataflow_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Building dataflow accelerator from cybsec-mlp-verified.onnx\n", - "Intermediate outputs will be generated in /tmp/finn_dev_osboxes\n", + "Building dataflow accelerator from cybsec-mlp-ready.onnx\n", + "Intermediate outputs will be generated in /tmp/finn_dev_ubuntu\n", "Final outputs will be generated in output_estimates_only\n", "Build log is at output_estimates_only/build_dataflow.log\n", "Running step: step_tidy_up [1/7]\n", @@ -121,7 +166,9 @@ "Running step: step_target_fps_parallelization [5/7]\n", "Running step: step_apply_folding_config [6/7]\n", "Running step: step_generate_estimate_reports [7/7]\n", - "Completed successfully\n" + "Completed successfully\n", + "CPU times: user 1.84 s, sys: 599 ms, total: 2.44 s\n", + "Wall time: 1.77 s\n" ] }, { @@ -130,31 +177,14 @@ "0" ] }, - "execution_count": 1, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "import finn.builder.build_dataflow as build\n", - "import finn.builder.build_dataflow_config as build_cfg\n", - "\n", - "model_file = \"cybsec-mlp-verified.onnx\"\n", - "\n", - "estimates_output_dir = \"output_estimates_only\"\n", - "\n", - "cfg = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", - " target_fps = 1000000,\n", - " synth_clk_period_ns = 10.0,\n", - " fpga_part = \"xc7z020clg400-1\",\n", - " steps = build_cfg.estimate_only_dataflow_steps,\n", - " generate_outputs=[\n", - " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", - " ]\n", - ")\n", - "\n", - "build.build_dataflow_cfg(model_file, cfg)" + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" ] }, { @@ -166,7 +196,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -183,7 +213,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -209,7 +239,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -217,11 +247,11 @@ "output_type": "stream", "text": [ "{\r\n", - " \"critical_path_cycles\": 272,\r\n", - " \"max_cycles\": 80,\r\n", - " \"max_cycles_node_name\": \"StreamingFCLayer_Batch_0\",\r\n", - " \"estimated_throughput_fps\": 1250000.0,\r\n", - " \"estimated_latency_ns\": 2720.0\r\n", + " \"critical_path_cycles\": 252,\r\n", + " \"max_cycles\": 64,\r\n", + " \"max_cycles_node_name\": \"StreamingFCLayer_Batch_1\",\r\n", + " \"estimated_throughput_fps\": 1562500.0,\r\n", + " \"estimated_latency_ns\": 2520.0\r\n", "}" ] } @@ -234,12 +264,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Since all of these reports are .json files, we can easily load them into Python for further processing. Let's define a helper function and look at the `estimate_layer_cycles.json` report." + "Since all of these reports are .json files, we can easily load them into Python for further processing. This can be useful if you are building your own design automation tools on top of FINN. Let's define a helper function and look at the `estimate_layer_cycles.json` report." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -252,19 +282,19 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'StreamingFCLayer_Batch_0': 80,\n", + "{'StreamingFCLayer_Batch_0': 60,\n", " 'StreamingFCLayer_Batch_1': 64,\n", " 'StreamingFCLayer_Batch_2': 64,\n", " 'StreamingFCLayer_Batch_3': 64}" ] }, - "execution_count": 6, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -277,34 +307,34 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here, we can see the estimated number of clock cycles each layer will take. Recall that all of these layers will be running in parallel, and the slowest layer will determine the overall throughput of the entire neural network. FINN attempts to parallelize each layer such that they all take a similar number of cycles, and less than the corresponding number of cycles that would be required to meet `target_fps`.\n", + "Here, we can see the estimated number of clock cycles each layer will take. Recall that all of these layers will be running in parallel, and the slowest layer will determine the overall throughput of the entire neural network. FINN attempts to parallelize each layer such that they all take a similar number of cycles, and less than the corresponding number of cycles that would be required to meet `target_fps`. Additionally by summing up all layer cycle estimates one can obtain an estimate for the overall latency of the whole network. \n", "\n", "Finally, we can see the layer-by-layer resource estimates in the `estimate_layer_resources.json` report:" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'StreamingFCLayer_Batch_0': {'BRAM_18K': 27,\n", - " 'BRAM_efficiency': 0.15432098765432098,\n", - " 'LUT': 8149,\n", + "{'StreamingFCLayer_Batch_0': {'BRAM_18K': 36,\n", + " 'BRAM_efficiency': 0.11574074074074074,\n", + " 'LUT': 8184,\n", " 'URAM': 0,\n", " 'URAM_efficiency': 1,\n", " 'DSP': 0},\n", " 'StreamingFCLayer_Batch_1': {'BRAM_18K': 4,\n", " 'BRAM_efficiency': 0.1111111111111111,\n", - " 'LUT': 1435,\n", + " 'LUT': 1217,\n", " 'URAM': 0,\n", " 'URAM_efficiency': 1,\n", " 'DSP': 0},\n", " 'StreamingFCLayer_Batch_2': {'BRAM_18K': 4,\n", " 'BRAM_efficiency': 0.1111111111111111,\n", - " 'LUT': 1435,\n", + " 'LUT': 1217,\n", " 'URAM': 0,\n", " 'URAM_efficiency': 1,\n", " 'DSP': 0},\n", @@ -314,10 +344,10 @@ " 'URAM': 0,\n", " 'URAM_efficiency': 1,\n", " 'DSP': 0},\n", - " 'total': {'BRAM_18K': 36.0, 'LUT': 11360.0, 'URAM': 0.0, 'DSP': 0.0}}" + " 'total': {'BRAM_18K': 45.0, 'LUT': 10959.0, 'URAM': 0.0, 'DSP': 0.0}}" ] }, - "execution_count": 7, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -332,7 +362,7 @@ "source": [ "This particular report is useful to determine whether the current configuration will fit into a particular FPGA. If you see that the resource requirements are too high for the FPGA you had in mind, you should consider lowering the `target_fps`.\n", "\n", - "*Note that the analytical models tend to over-estimate how much resources are needed, since they can't capture the effects of various synthesis optimizations.*" + "**Note that the analytical models tend to over-estimate how much resources are needed, since they can't capture the effects of various synthesis optimizations.**" ] }, { @@ -341,61 +371,47 @@ "source": [ "## Launch a Build: Stitched IP, out-of-context synth and rtlsim Performance <a id=\"build_ip_synth_rtlsim\"></a>\n", "\n", - "Once we have a configuration that gives satisfactory estimates, we can move on to generating the accelerator. We can do this in different ways depending on how we want to integrate the accelerator into a larger system. For instance, if we have a larger streaming system built in Vivado or if we'd like to re-use this generated accelerator as an IP component in other projects, the `STITCHED_IP` output product is a good choice. We can also use the `OOC_SYNTH` output product to get post-synthesis resource and clock frequency numbers for our accelerator." + "Once we have a configuration that gives satisfactory estimates, we can move on to generating the accelerator. We can do this in different ways depending on how we want to integrate the accelerator into a larger system. For instance, if we have a larger streaming system built in Vivado or if we'd like to re-use this generated accelerator as an IP component in other projects, the `STITCHED_IP` output product is a good choice. We can also use the `OOC_SYNTH` output product to get post-synthesis resource and clock frequency numbers for our accelerator.\n", + "\n", + "<font color=\"red\">**Live FINN tutorial:** These next builds will take about 10 minutes to complete since multiple calls to Vivado and a call to RTL simulation are involved. While this is running, you can examine the generated files with noVNC -- it is running on **(your AWS URL):6080/vnc.html**\n", + "\n", + "* Once the `step_hls_codegen [8/16]` below is completed, you can view the generated HLS code under its own folder for each layer: `/tmp/finn_dev_ubuntu/code_gen_ipgen_StreamingFCLayer_Batch_XXXXXX`\n", + " \n", + "* Once the `step_create_stitched_ip [11/16]` below is completed, you can view the generated stitched IP in Vivado under `/home/ubuntu/finn/notebooks/end2end_example/cybersecurity/output_ipstitch_ooc_rtlsim/stitched_ip`\n", + "</font> " ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Building dataflow accelerator from cybsec-mlp-verified.onnx\n", - "Intermediate outputs will be generated in /tmp/finn_dev_osboxes\n", - "Final outputs will be generated in output_ipstitch_ooc_rtlsim\n", - "Build log is at output_ipstitch_ooc_rtlsim/build_dataflow.log\n", - "Running step: step_tidy_up [1/15]\n", - "Running step: step_streamline [2/15]\n", - "Running step: step_convert_to_hls [3/15]\n", - "Running step: step_create_dataflow_partition [4/15]\n", - "Running step: step_target_fps_parallelization [5/15]\n", - "Running step: step_apply_folding_config [6/15]\n", - "Running step: step_generate_estimate_reports [7/15]\n", - "Running step: step_hls_ipgen [8/15]\n", - "Running step: step_set_fifo_depths [9/15]\n", - "Running step: step_create_stitched_ip [10/15]\n", - "Running step: step_measure_rtlsim_performance [11/15]\n", - "Running step: step_make_pynq_driver [12/15]\n", - "Running step: step_out_of_context_synthesis [13/15]\n", - "Running step: step_synthesize_bitfile [14/15]\n", - "Running step: step_deployment_package [15/15]\n", - "Completed successfully\n" + "Previous run results deleted!\n" ] - }, - { - "data": { - "text/plain": [ - "0" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ "import finn.builder.build_dataflow as build\n", "import finn.builder.build_dataflow_config as build_cfg\n", + "import os\n", + "import shutil\n", "\n", - "model_file = \"cybsec-mlp-verified.onnx\"\n", + "model_file = \"cybsec-mlp-ready.onnx\"\n", "\n", "rtlsim_output_dir = \"output_ipstitch_ooc_rtlsim\"\n", "\n", - "cfg = build.DataflowBuildConfig(\n", + "#Delete previous run results if exist\n", + "if os.path.exists(rtlsim_output_dir):\n", + " shutil.rmtree(rtlsim_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "cfg_stitched_ip = build.DataflowBuildConfig(\n", " output_dir = rtlsim_output_dir,\n", + " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", @@ -404,21 +420,76 @@ " build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE,\n", " build_cfg.DataflowOutputType.OOC_SYNTH,\n", " ]\n", - ")\n", - "\n", - "build.build_dataflow_cfg(model_file, cfg)" + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Building dataflow accelerator from cybsec-mlp-ready.onnx\n", + "Intermediate outputs will be generated in /tmp/finn_dev_ubuntu\n", + "Final outputs will be generated in output_ipstitch_ooc_rtlsim\n", + "Build log is at output_ipstitch_ooc_rtlsim/build_dataflow.log\n", + "Running step: step_tidy_up [1/16]\n", + "Running step: step_streamline [2/16]\n", + "Running step: step_convert_to_hls [3/16]\n", + "Running step: step_create_dataflow_partition [4/16]\n", + "Running step: step_target_fps_parallelization [5/16]\n", + "Running step: step_apply_folding_config [6/16]\n", + "Running step: step_generate_estimate_reports [7/16]\n", + "Running step: step_hls_codegen [8/16]\n", + "Running step: step_hls_ipgen [9/16]\n", + "Running step: step_set_fifo_depths [10/16]\n", + "Running step: step_create_stitched_ip [11/16]\n", + "Running step: step_measure_rtlsim_performance [12/16]\n", + "Running step: step_make_pynq_driver [13/16]\n", + "Running step: step_out_of_context_synthesis [14/16]\n", + "Running step: step_synthesize_bitfile [15/16]\n", + "Running step: step_deployment_package [16/16]\n", + "Completed successfully\n", + "CPU times: user 4.76 s, sys: 710 ms, total: 5.47 s\n", + "Wall time: 8min 5s\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_stitched_ip)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Why is e.g. `step_synthesize_bitfile` listed above even though we didn't ask for a bitfile in the output products? This is because we're using the default set of build steps, which includes `step_synthesize_bitfile`. Since its output product is not selected, this step will do nothing." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Among the output products, we will find the accelerator exported as IP:" + "Among the output products, we will find the accelerator exported as a stitched IP block design:" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -427,9 +498,9 @@ "text": [ "all_verilog_srcs.txt\t\t finn_vivado_stitch_proj.xpr\r\n", "finn_vivado_stitch_proj.cache\t ip\r\n", - "finn_vivado_stitch_proj.hbs\t make_project.sh\r\n", - "finn_vivado_stitch_proj.hw\t make_project.tcl\r\n", - "finn_vivado_stitch_proj.ip_user_files vivado.jou\r\n", + "finn_vivado_stitch_proj.hw\t make_project.sh\r\n", + "finn_vivado_stitch_proj.ip_user_files make_project.tcl\r\n", + "finn_vivado_stitch_proj.sim\t vivado.jou\r\n", "finn_vivado_stitch_proj.srcs\t vivado.log\r\n" ] } @@ -447,7 +518,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -472,7 +543,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -480,15 +551,15 @@ "output_type": "stream", "text": [ "{\r\n", - " \"vivado_proj_folder\": \"/tmp/finn_dev_osboxes/synth_out_of_context_wy3b6qf4/results_finn_design_wrapper\",\r\n", - " \"LUT\": 7073.0,\r\n", - " \"FF\": 7534.0,\r\n", + " \"vivado_proj_folder\": \"/tmp/finn_dev_ubuntu/synth_out_of_context_iut077er/results_finn_design_wrapper\",\r\n", + " \"LUT\": 8667.0,\r\n", + " \"FF\": 9063.0,\r\n", " \"DSP\": 0.0,\r\n", - " \"BRAM\": 18.0,\r\n", - " \"WNS\": 0.632,\r\n", + " \"BRAM\": 22.0,\r\n", + " \"WNS\": 0.946,\r\n", " \"\": 0,\r\n", - " \"fmax_mhz\": 106.7463706233988,\r\n", - " \"estimated_throughput_fps\": 1334329.6327924852\r\n", + " \"fmax_mhz\": 110.44842058758559,\r\n", + " \"estimated_throughput_fps\": 1725756.5716810247\r\n", "}" ] } @@ -501,12 +572,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In `rtlsim_performance.json` we can find the steady-state throughput and latency for the accelerator, as obtained by rtlsim. If the DRAM bandwidth numbers reported here are below what the hardware platform is capable of (i.e. the accelerator is not memory-bound), you can expect the same steady-state throughput in real hardware." + "In `rtlsim_performance.json` we can find the steady-state throughput and latency for the accelerator, as obtained by rtlsim. If the DRAM bandwidth numbers reported here are below what the hardware platform is capable of (i.e. the accelerator is not memory-bound), you can expect the same steady-state throughput (excluding any software/driver overheads) in real hardware." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -514,14 +585,14 @@ "output_type": "stream", "text": [ "{\r\n", - " \"cycles\": 838,\r\n", - " \"runtime[ms]\": 0.00838,\r\n", - " \"throughput[images/s]\": 954653.9379474939,\r\n", - " \"DRAM_in_bandwidth[Mb/s]\": 71.59904534606204,\r\n", - " \"DRAM_out_bandwidth[Mb/s]\": 0.11933174224343673,\r\n", + " \"cycles\": 643,\r\n", + " \"runtime[ms]\": 0.00643,\r\n", + " \"throughput[images/s]\": 1088646.967340591,\r\n", + " \"DRAM_in_bandwidth[Mb/s]\": 81.64852255054431,\r\n", + " \"DRAM_out_bandwidth[Mb/s]\": 0.13608087091757387,\r\n", " \"fclk[mhz]\": 100.0,\r\n", - " \"N\": 8,\r\n", - " \"latency_cycles\": 229\r\n", + " \"N\": 7,\r\n", + " \"latency_cycles\": 211\r\n", "}" ] } @@ -539,7 +610,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -554,8 +625,8 @@ " \"impl_style\": \"rtl\"\r\n", " },\r\n", " \"StreamingFCLayer_Batch_0\": {\r\n", - " \"PE\": 32,\r\n", - " \"SIMD\": 15,\r\n", + " \"PE\": 16,\r\n", + " \"SIMD\": 40,\r\n", " \"ram_style\": \"auto\",\r\n", " \"resType\": \"lut\",\r\n", " \"mem_mode\": \"decoupled\",\r\n", @@ -565,8 +636,8 @@ " \"impl_style\": \"hls\"\r\n", " },\r\n", " \"StreamingFCLayer_Batch_1\": {\r\n", - " \"PE\": 4,\r\n", - " \"SIMD\": 16,\r\n", + " \"PE\": 1,\r\n", + " \"SIMD\": 64,\r\n", " \"ram_style\": \"auto\",\r\n", " \"resType\": \"lut\",\r\n", " \"mem_mode\": \"decoupled\",\r\n", @@ -576,16 +647,13 @@ " \"impl_style\": \"hls\"\r\n", " },\r\n", " \"StreamingFCLayer_Batch_2\": {\r\n", - " \"PE\": 4,\r\n", - " \"SIMD\": 16,\r\n", + " \"PE\": 1,\r\n", + " \"SIMD\": 64,\r\n", " \"ram_style\": \"auto\",\r\n", " \"resType\": \"lut\",\r\n", " \"mem_mode\": \"decoupled\",\r\n", " \"runtime_writeable_weights\": 0\r\n", " },\r\n", - " \"StreamingDataWidthConverter_Batch_2\": {\r\n", - " \"impl_style\": \"hls\"\r\n", - " },\r\n", " \"StreamingFCLayer_Batch_3\": {\r\n", " \"PE\": 1,\r\n", " \"SIMD\": 1,\r\n", @@ -606,38 +674,78 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Launch a Build: PYNQ Bitfile and Driver <a id=\"build_bitfile_driver\"></a>" + "## (Optional) Launch a Build: PYNQ Bitfile and Driver <a id=\"build_bitfile_driver\"></a>\n", + "\n", + "<font color=\"red\">**Live FINN tutorial:** This section is not included in the hands-on tutorial due to the bitfile synthesis time (15-20 min). If you own a PYNQ board, we encourage you to uncomment the cells below to try it out on your own after the tutorial.</font>" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow as build\n", + "import finn.builder.build_dataflow_config as build_cfg\n", + "import os\n", + "import shutil\n", + "\n", + "model_file = \"cybsec-mlp-ready.onnx\"\n", + "\n", + "final_output_dir = \"output_final\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(final_output_dir):\n", + " shutil.rmtree(final_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "cfg = build.DataflowBuildConfig(\n", + " output_dir = final_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " board = \"Pynq-Z1\",\n", + " shell_flow_type = build_cfg.ShellFlowType.VIVADO_ZYNQ,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.BITFILE,\n", + " build_cfg.DataflowOutputType.PYNQ_DRIVER,\n", + " build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Building dataflow accelerator from cybsec-mlp-verified.onnx\n", - "Intermediate outputs will be generated in /tmp/finn_dev_osboxes\n", + "Building dataflow accelerator from cybsec-mlp-ready.onnx\n", + "Intermediate outputs will be generated in /tmp/finn_dev_ubuntu\n", "Final outputs will be generated in output_final\n", "Build log is at output_final/build_dataflow.log\n", - "Running step: step_tidy_up [1/15]\n", - "Running step: step_streamline [2/15]\n", - "Running step: step_convert_to_hls [3/15]\n", - "Running step: step_create_dataflow_partition [4/15]\n", - "Running step: step_target_fps_parallelization [5/15]\n", - "Running step: step_apply_folding_config [6/15]\n", - "Running step: step_generate_estimate_reports [7/15]\n", - "Running step: step_hls_ipgen [8/15]\n", - "Running step: step_set_fifo_depths [9/15]\n", - "Running step: step_create_stitched_ip [10/15]\n", - "Running step: step_measure_rtlsim_performance [11/15]\n", - "Running step: step_make_pynq_driver [12/15]\n", - "Running step: step_out_of_context_synthesis [13/15]\n", - "Running step: step_synthesize_bitfile [14/15]\n", - "Running step: step_deployment_package [15/15]\n", - "Completed successfully\n" + "Running step: step_tidy_up [1/16]\n", + "Running step: step_streamline [2/16]\n", + "Running step: step_convert_to_hls [3/16]\n", + "Running step: step_create_dataflow_partition [4/16]\n", + "Running step: step_target_fps_parallelization [5/16]\n", + "Running step: step_apply_folding_config [6/16]\n", + "Running step: step_generate_estimate_reports [7/16]\n", + "Running step: step_hls_codegen [8/16]\n", + "Running step: step_hls_ipgen [9/16]\n", + "Running step: step_set_fifo_depths [10/16]\n", + "Running step: step_create_stitched_ip [11/16]\n", + "Running step: step_measure_rtlsim_performance [12/16]\n", + "Running step: step_make_pynq_driver [13/16]\n", + "Running step: step_out_of_context_synthesis [14/16]\n", + "Running step: step_synthesize_bitfile [15/16]\n", + "Running step: step_deployment_package [16/16]\n", + "Completed successfully\n", + "CPU times: user 4.47 s, sys: 766 ms, total: 5.24 s\n", + "Wall time: 22min 13s\n" ] }, { @@ -646,33 +754,14 @@ "0" ] }, - "execution_count": 8, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "import finn.builder.build_dataflow as build\n", - "import finn.builder.build_dataflow_config as build_cfg\n", - "\n", - "model_file = \"cybsec-mlp-verified.onnx\"\n", - "\n", - "final_output_dir = \"output_final\"\n", - "\n", - "cfg = build.DataflowBuildConfig(\n", - " output_dir = final_output_dir,\n", - " target_fps = 1000000,\n", - " synth_clk_period_ns = 10.0,\n", - " board = \"Pynq-Z1\",\n", - " shell_flow_type = build_cfg.ShellFlowType.VIVADO_ZYNQ,\n", - " generate_outputs=[\n", - " build_cfg.DataflowOutputType.BITFILE,\n", - " build_cfg.DataflowOutputType.PYNQ_DRIVER,\n", - " build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE,\n", - " ]\n", - ")\n", - "\n", - "build.build_dataflow_cfg(model_file, cfg)" + "#%%time\n", + "#build.build_dataflow_cfg(model_file, cfg)" ] }, { @@ -684,7 +773,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -696,7 +785,7 @@ } ], "source": [ - "! ls {final_output_dir}/bitfile" + "#! ls {final_output_dir}/bitfile" ] }, { @@ -708,7 +797,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -720,7 +809,7 @@ } ], "source": [ - "! ls {final_output_dir}/driver" + "#! ls {final_output_dir}/driver" ] }, { @@ -732,7 +821,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -745,7 +834,7 @@ } ], "source": [ - "! ls {final_output_dir}/report" + "#! ls {final_output_dir}/report" ] }, { @@ -757,7 +846,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -769,7 +858,138 @@ } ], "source": [ - "! ls {final_output_dir}/deploy" + "#! ls {final_output_dir}/deploy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## (Optional) Run on PYNQ board <a id=\"run_on_pynq\"></a>\n", + "\n", + "<font color=\"red\">**Live FINN tutorial:** This section is not included in the hands-on tutorial due to the bitfile synthesis time (15-20 min) of the previous section. If you own a PYNQ board, we encourage you to uncomment the cells below to try it out on your own after the tutorial.</font>\n", + "\n", + "To test the accelerator on the board, we'll put a copy of the dataset and a premade Python script that validates the accuracy into the `driver` folder, then make a zip archive of the whole deployment folder." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "#! cp unsw_nb15_binarized.npz {final_output_dir}/deploy/driver" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "#! cp validate-unsw-nb15.py {final_output_dir}/deploy/driver" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "driver.py\tfinn\t\t unsw_nb15_binarized.npz validate.py\r\n", + "driver_base.py\truntime_weights validate-unsw-nb15.py\r\n" + ] + } + ], + "source": [ + "#! ls {final_output_dir}/deploy/driver" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'/workspace/finn/notebooks/end2end_example/cybersecurity/deploy-on-pynq.zip'" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#from shutil import make_archive\n", + "#make_archive('deploy-on-pynq', 'zip', final_output_dir+\"/deploy\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can now download the created zipfile (**File -> Open**, mark the checkbox next to the `deploy-on-pynq.zip` and select Download from the toolbar), then copy it to your PYNQ board (for instance via `scp` or `rsync`). Then, run the following commands **on the PYNQ board** to extract the archive and run the validation:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```shell\n", + "unzip deploy-on-pynq.zip -d finn-cybsec-mlp-demo\n", + "cd finn-cybsec-mlp-demo/driver\n", + "sudo python3.6 -m pip install bitstring\n", + "sudo python3.6 validate-unsw-nb15.py --batchsize 1000\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should see `Final accuracy: 91.868293` at the end. You may have noticed that the validation doesn't *quite* run at 1M inferences per second. This is because of the Python packing/unpacking and data movement overheads. To see this in more detail, the generated driver includes a benchmarking mode that shows the runtime breakdown:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```shell\n", + "sudo python3.6 driver.py --exec_mode throughput_test --bitfile ../bitfile/finn-accel.bit --batchsize 1000\n", + "cat nw_metrics.txt\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{'runtime[ms]': 1.0602474212646484,\n", + " 'throughput[images/s]': 943176.0737575893,\n", + " 'DRAM_in_bandwidth[Mb/s]': 70.7382055318192,\n", + " 'DRAM_out_bandwidth[Mb/s]': 0.9431760737575894,\n", + " 'fclk[mhz]': 100.0,\n", + " 'batch_size': 1000,\n", + " 'fold_input[ms]': 9.679794311523438e-05,\n", + " 'pack_input[ms]': 0.060115814208984375,\n", + " 'copy_input_data_to_device[ms]': 0.002428770065307617,\n", + " 'copy_output_data_from_device[ms]': 0.0005249977111816406,\n", + " 'unpack_output[ms]': 0.3773000240325928,\n", + " 'unfold_output[ms]': 6.818771362304688e-05}```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, the various `pack_input/unpack_output` calls show the overhead of packing/unpacking the inputs/outputs to convert from numpy arrays to the bit-contiguous data representation our accelerator expects. The `copy_input_data_to_device` and `copy_output_data_from_device` indicate the cost of moving the data between the CPU and accelerator memories. These overheads can dominate the execution time when running with small batch sizes.\n", + "\n", + "Finally, we can see that `throughput[images/s]`, which is the pure hardware throughput without any software and data movement overheads, is close to 1M inferences per second." ] }, { diff --git a/notebooks/end2end_example/cybersecurity/validate-unsw-nb15.py b/notebooks/end2end_example/cybersecurity/validate-unsw-nb15.py new file mode 100644 index 0000000000000000000000000000000000000000..622c69c8d0abdf8025b0486c63bf336e4f8675f5 --- /dev/null +++ b/notebooks/end2end_example/cybersecurity/validate-unsw-nb15.py @@ -0,0 +1,103 @@ +# Copyright (c) 2020 Xilinx, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of Xilinx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import argparse +from driver import io_shape_dict +from driver_base import FINNExampleOverlay +import numpy as np + + +def make_unsw_nb15_test_batches(bsize, dataset_root): + unsw_nb15_data = np.load(dataset_root + "/unsw_nb15_binarized.npz")["test"][:82000] + test_imgs = unsw_nb15_data[:, :-1] + test_labels = unsw_nb15_data[:, -1] + n_batches = int(test_imgs.shape[0] / bsize) + test_imgs = test_imgs.reshape(n_batches, bsize, -1) + test_labels = test_labels.reshape(n_batches, bsize) + return (test_imgs, test_labels) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Validate top-1 accuracy for FINN-generated accelerator" + ) + parser.add_argument( + "--batchsize", help="number of samples for inference", type=int, default=1000 + ) + parser.add_argument( + "--platform", help="Target platform: zynq-iodma alveo", default="zynq-iodma" + ) + parser.add_argument( + "--bitfile", + help='name of bitfile (i.e. "resizer.bit")', + default="../bitfile/finn-accel.bit", + ) + parser.add_argument( + "--dataset_root", help="dataset root dir for download/reuse", default="." + ) + # parse arguments + args = parser.parse_args() + bsize = args.batchsize + bitfile = args.bitfile + platform = args.platform + dataset_root = args.dataset_root + + print("Loading dataset...") + (test_imgs, test_labels) = make_unsw_nb15_test_batches(bsize, dataset_root) + + ok = 0 + nok = 0 + n_batches = test_imgs.shape[0] + total = n_batches * bsize + + print("Initializing driver, flashing bitfile...") + + driver = FINNExampleOverlay( + bitfile_name=bitfile, + platform=platform, + io_shape_dict=io_shape_dict, + batch_size=bsize, + ) + + n_batches = int(total / bsize) + + print("Starting...") + + for i in range(n_batches): + inp = np.pad(test_imgs[i].astype(np.float32), [(0, 0), (0, 7)], mode="constant") + exp = test_labels[i].astype(np.float32) + inp = 2 * inp - 1 + exp = 2 * exp - 1 + out = driver.execute(inp) + matches = np.count_nonzero(out.flatten() == exp.flatten()) + nok += bsize - matches + ok += matches + print("batch %d / %d : total OK %d NOK %d" % (i + 1, n_batches, ok, nok)) + + acc = 100.0 * ok / (total) + print("Final accuracy: %f" % acc) diff --git a/run-docker.sh b/run-docker.sh index 135f51d4d613c8454862385cc8cad656e620cdbd..99f337c24116d56c2015ae0c21b044699fcb8f7a 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -89,7 +89,9 @@ SCRIPTPATH=$(dirname "$SCRIPT") # the settings below will be taken from environment variables if available, # otherwise the defaults below will be used : ${JUPYTER_PORT=8888} +: ${JUPYTER_PASSWD_HASH=""} : ${NETRON_PORT=8081} +: ${LOCALHOST_URL="localhost"} : ${PYNQ_USERNAME="xilinx"} : ${PYNQ_PASSWORD="xilinx"} : ${PYNQ_BOARD="Pynq-Z1"} @@ -115,7 +117,12 @@ elif [ "$1" = "quicktest" ]; then DOCKER_CMD="quicktest.sh" elif [ "$1" = "notebook" ]; then gecho "Running Jupyter notebook server" - DOCKER_CMD="jupyter notebook --ip=0.0.0.0 --port $JUPYTER_PORT notebooks" + if [ -z "$JUPYTER_PASSWD_HASH" ]; then + JUPYTER_PASSWD_ARG="" + else + JUPYTER_PASSWD_ARG="--NotebookApp.password='$JUPYTER_PASSWD_HASH'" + fi + DOCKER_CMD="jupyter notebook --no-browser --ip=0.0.0.0 --port $JUPYTER_PORT $JUPYTER_PASSWD_ARG notebooks" DOCKER_EXTRA+="-e JUPYTER_PORT=$JUPYTER_PORT " DOCKER_EXTRA+="-e NETRON_PORT=$NETRON_PORT " DOCKER_EXTRA+="-p $JUPYTER_PORT:$JUPYTER_PORT " @@ -184,6 +191,7 @@ DOCKER_EXEC+="-v $FINN_HOST_BUILD_DIR:$FINN_HOST_BUILD_DIR " DOCKER_EXEC+="-v $FINN_SSH_KEY_DIR:/home/$DOCKER_UNAME/.ssh " DOCKER_EXEC+="-e FINN_BUILD_DIR=$FINN_HOST_BUILD_DIR " DOCKER_EXEC+="-e FINN_ROOT="/workspace/finn" " +DOCKER_EXEC+="-e LOCALHOST_URL=$LOCALHOST_URL " DOCKER_EXEC+="-e VIVADO_IP_CACHE=$VIVADO_IP_CACHE " DOCKER_EXEC+="-e PYNQ_BOARD=$PYNQ_BOARD " DOCKER_EXEC+="-e PYNQ_IP=$PYNQ_IP " diff --git a/src/finn/util/visualization.py b/src/finn/util/visualization.py index 3eb7e55e307c380ecc6712ff4d0c74577a9e7a43..d8547a32e06aa3b688601aa550abb2c50bcf77d6 100644 --- a/src/finn/util/visualization.py +++ b/src/finn/util/visualization.py @@ -27,6 +27,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect +import os import netron from IPython.display import IFrame @@ -34,6 +35,8 @@ from IPython.display import IFrame def showSrc(what): print("".join(inspect.getsourcelines(what)[0])) + def showInNetron(model_filename): netron.start(model_filename, address=("0.0.0.0", 8081)) - return IFrame(src="http://0.0.0.0:8081/", width="100%", height=400) \ No newline at end of file + localhost_url = os.getenv("LOCALHOST_URL", default="localhost") + return IFrame(src="http://%s:8081/" % localhost_url, width="100%", height=400)