diff --git a/.gitignore b/.gitignore
index 225fb5cfa3df45124797da425df14974308b90c2..126321cf4deccaa01ab0f2025460e53519d4c06f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -96,3 +96,6 @@ MANIFEST
 
 # generated files as part of end2end notebooks
 /notebooks/end2end_example/**/*.onnx
+
+# downloaded dep repos
+/deps/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 143514b36ba31cb2b292f3a1961187709798efbf..f8f12a0269fb124bed7efa93b1826a66cfca5982 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -32,7 +32,7 @@ default_language_version:
     python: python3
 
 repos:
-- repo: git://github.com/pre-commit/pre-commit-hooks
+- repo: https://github.com/pre-commit/pre-commit-hooks
   rev: v3.2.0
   hooks:
   - id: trailing-whitespace
@@ -50,12 +50,12 @@ repos:
   - id: mixed-line-ending
     args: ['--fix=no']
 
-- repo: git://github.com/PyCQA/isort
+- repo: https://github.com/PyCQA/isort
   rev: 5.5.3
   hooks:
   - id: isort
 
-- repo: git://github.com/psf/black
+- repo: https://github.com/psf/black
   rev: stable
   hooks:
   - id: black
diff --git a/README.md b/README.md
index f36eac3a911315c260f1849a0406a9a467f0d53f..37c98dcb34ce07c0948a4ff1096e422dd6536245 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
 
 <img align="left" src="https://raw.githubusercontent.com/Xilinx/finn/github-pages/docs/img/finn-stack.png" alt="drawing" style="margin-right: 20px" width="250"/>
 
-[![Gitter](https://badges.gitter.im/xilinx-finn/community.svg)](https://gitter.im/xilinx-finn/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+[![GitHub Discussions](https://img.shields.io/badge/discussions-join-green)](https://github.com/Xilinx/finn/discussions)
 [![ReadTheDocs](https://readthedocs.org/projects/finn/badge/?version=latest&style=plastic)](http://finn.readthedocs.io/)
 
 FINN is an experimental framework from Xilinx Research Labs to explore deep neural network
diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn
index 84078aec59856305062b9c0f50d17a2450fe3bbf..71f41acbb618a8cde9bb8ab07cb2cf5a3be90544 100644
--- a/docker/Dockerfile.finn
+++ b/docker/Dockerfile.finn
@@ -91,14 +91,14 @@ RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg
 
 # git-based Python repo dependencies
 # these are installed in editable mode for easier co-development
-ARG FINN_BASE_COMMIT="7cd7e00ba6709a85073ba22beeb5827e684fe085"
+ARG FINN_BASE_COMMIT="585bccad29ba6416511256c732a2c1da21d00bdf"
 ARG QONNX_COMMIT="9f9eff95227cc57aadc6eafcbd44b7acda89f067"
 ARG FINN_EXP_COMMIT="af6102769226b82b639f243dc36f065340991513"
 ARG BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03"
 ARG PYVERILATOR_COMMIT="0c3eb9343500fc1352a02c020a736c8c2db47e8e"
 ARG CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4"
-ARG HLSLIB_COMMIT="da7b47cd65a967b76554a0dda74c097803c5e550"
-ARG OMX_COMMIT="1dfc4aa2f2895632742cd5751520c6b472feb74e"
+ARG HLSLIB_COMMIT="269410aa217389fc02e69bd7de210cd026f10971"
+ARG OMX_COMMIT="a97f0bf145a2f7e57ca416ea76c9e45df4e9aa37"
 ARG AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b"
 
 # finn-base
@@ -129,7 +129,7 @@ RUN git -C /workspace/cnpy checkout $CNPY_COMMIT
 RUN git clone https://github.com/Xilinx/finn-hlslib.git /workspace/finn-hlslib
 RUN git -C /workspace/finn-hlslib checkout $HLSLIB_COMMIT
 # oh-my-xilinx
-RUN git clone https://bitbucket.org/maltanar/oh-my-xilinx.git /workspace/oh-my-xilinx
+RUN git clone https://github.com/maltanar/oh-my-xilinx.git /workspace/oh-my-xilinx
 RUN git -C /workspace/oh-my-xilinx checkout $OMX_COMMIT
 # board files
 RUN cd /tmp; \
diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh
index 699fef53cef59278881fb1205797080915c3c969..788e6bf51b4c0748883be371f4dd77941ef2c99d 100644
--- a/docker/finn_entrypoint.sh
+++ b/docker/finn_entrypoint.sh
@@ -28,7 +28,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
-export FINN_ROOT=/workspace/finn
+export FINN_ROOT=/workspace
 export HOME=/tmp/home_dir
 export SHELL=/bin/bash
 export LANG="en_US.UTF-8"
@@ -54,11 +54,11 @@ recho () {
   echo -e "${RED}ERROR: $1${NC}"
 }
 
-if [ -f "$FINN_ROOT/setup.py" ];then
+if [ -f "$FINN_ROOT/finn/setup.py" ];then
   # run pip install for finn
-  pip install --user -e $FINN_ROOT
+  pip install --user -e $FINN_ROOT/finn
 else
-  recho "Unable to find FINN source code in /workspace/finn"
+  recho "Unable to find FINN source code in $FINN_ROOT/finn"
   recho "Ensure you have passed -v <path-to-finn-repo>:/workspace/finn to the docker run command"
   exit -1
 fi
diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile
index f3211941890d634b12142ed13c0f0cf49a9003d8..dab0833166234fc8ec9f123adf8c6157acdf5d5d 100644
--- a/docker/jenkins/Jenkinsfile
+++ b/docker/jenkins/Jenkinsfile
@@ -1,108 +1,46 @@
-pipeline {
-    agent any
-    parameters {
-        string(name: 'FINN_CI_BRANCH', defaultValue: '', description: 'FINN branch to build')
-        string(name: 'FINN_XILINX_PATH', defaultValue: '', description: 'Path to Xilinx tool installation')
-        string(name: 'FINN_XILINX_VERSION', defaultValue: '2020.1', description: 'Xilinx tool version')
-        string(name: 'PYNQ_BOARD', defaultValue: 'Pynq-Z1', description: 'PYNQ board type')
-        string(name: 'PYNQ_IP', defaultValue: '', description: 'PYNQ board IP address')
-        string(name: 'PYNQ_USERNAME', defaultValue: 'xilinx', description: 'PYNQ board username')
-        string(name: 'PYNQ_PASSWORD', defaultValue: 'xilinx', description: 'PYNQ board password')
-        string(name: 'PYNQ_TARGET_DIR', defaultValue: '/home/xilinx/finn', description: 'PYNQ board target deployment directory')
-        string(name: 'NUM_DEFAULT_WORKERS', defaultValue: '1', description: 'Number of cores for parallel transformations')
-        // main test: everything except rtlsim and end2end tests, parallel run with xdist, no parallel transformations to save on memory
-        string(name: 'DOCKER_CMD_MAIN', defaultValue: """python setup.py test --addopts "-k 'not (rtlsim or end2end)' --dist=loadfile -n auto" """, description: 'Main test command')
-        // rtlsim tests: parallel run with pytest-parallel, no parallel transformations to save on memory
-        string(name: 'DOCKER_CMD_RTLSIM', defaultValue: """python setup.py test --addopts "-k rtlsim --workers auto" """, description: 'rtlsim test command')
-        // end2end tests: no parallel testing, use NUM_DEFAULT_WORKERS for parallel transformations
-        string(name: 'DOCKER_CMD_END2END', defaultValue: """python setup.py test --addopts "-k end2end" """, description: 'end2end test command')
-        // allow specifying where to mount the cloned folder from, since Jenkins and FINN may be running in separate containers
-        string(name: 'WORKSPACE_MOUNT', defaultValue: '/var/jenkins_home/workspace/finn', description: 'Path to Jenkins workspace mount')
+node {
+    def app
+    stage('Clone repository') {
+        /* Let's make sure we have the repository cloned to our workspace */
+        checkout scm
     }
-    environment {
-        DOCKER_TAG='finn_ci:$BUILD_ID'
-        DOCKER_INST_NAME='finn_ci'
-        BUILD_PATH='/tmp/finn_ci'
-        VIVADO_PATH=${params.FINN_XILINX_PATH}/Vivado/${params.FINN_XILINX_VERSION}
-        VITIS_PATH=${params.FINN_XILINX_PATH}/Vitis/${params.FINN_XILINX_VERSION}
-    }
-    stages {
-        stage("Clone") {
-            steps {
-                git branch: "${params.FINN_CI_BRANCH}", url: 'https://github.com/Xilinx/finn.git'
-            }
-        }
-      stage('Build') {
-            steps {
-                sh """
-                docker build -t $DOCKER_TAG -f docker/Dockerfile.finn_ci \
-                --build-arg BUILD_PATH=$BUILD_PATH \
-                .
-                """
+    withEnv([
+        "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64",
+        "FINN_XILINX_VERSION=2022.1",
+        "FINN_DOCKER_TAG=xilinx/finn:jenkins",
+        "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci",
+        "PLATFORM_REPO_PATHS=/opt/xilinx/dsa"
+    ]){
+        parallel firstBranch: {
+            stage('Brevitas export') {
+                dir("${env.WORKSPACE}") {
+                sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export")
+                }
             }
-        }
-        stage('test-main') {
-            steps {
-                catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
-                sh """
-                docker run --init \
-                --hostname $DOCKER_INST_NAME \
-                -v ${params.WORKSPACE_MOUNT}:/workspace/finn \
-                -v ${params.FINN_XILINX_PATH}:${params.FINN_XILINX_PATH}:ro \
-                -e NUM_DEFAULT_WORKERS=1 \
-                -e FINN_INST_NAME=$DOCKER_INST_NAME \
-                -e VIVADO_PATH=$VIVADO_PATH \
-                -e VITIS_PATH=$VITIS_PATH \
-                -e PYNQ_BOARD=${params.PYNQ_BOARD} \
-                -e PYNQ_IP=${params.PYNQ_IP} \
-                -e PYNQ_USERNAME=${params.PYNQ_USERNAME} \
-                -e PYNQ_PASSWORD=${params.PYNQ_PASSWORD} \
-                -e PYNQ_TARGET_DIR=${params.PYNQ_TARGET_DIR} \
-                $DOCKER_TAG ${params.DOCKER_CMD_MAIN}
-                """}
+        }, secondBranch: {
+            stage('Streamlining transformations') {
+                dir("${env.WORKSPACE}") {
+                sh("bash run-docker.sh python setup.py test --addopts -mstreamline")
+                }
+            } 
+        }, thirdBranch: {
+            stage('Util functions') {
+                dir("${env.WORKSPACE}") {
+                sh("bash run-docker.sh python setup.py test --addopts -mutil")
+                }
             }
-        }
-        stage('test-rtlsim') {
-            steps {
-                catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
-                sh """
-                docker run --init \
-                --hostname $DOCKER_INST_NAME \
-                -v ${params.WORKSPACE_MOUNT}:/workspace/finn \
-                -v $VIVADO_PATH:$VIVADO_PATH:ro \
-                -e NUM_DEFAULT_WORKERS=1 \
-                -e FINN_INST_NAME=$DOCKER_INST_NAME \
-                -e VIVADO_PATH=$VIVADO_PATH \
-                -e VITIS_PATH=$VITIS_PATH \
-                -e PYNQ_BOARD=${params.PYNQ_BOARD} \
-                -e PYNQ_IP=${params.PYNQ_IP} \
-                -e PYNQ_USERNAME=${params.PYNQ_USERNAME} \
-                -e PYNQ_PASSWORD=${params.PYNQ_PASSWORD} \
-                -e PYNQ_TARGET_DIR=${params.PYNQ_TARGET_DIR} \
-                $DOCKER_TAG ${params.DOCKER_CMD_RTLSIM}
-                """}
+        }, fourthBranch: {
+            stage('General transformations') {
+                dir("${env.WORKSPACE}") {
+                sh("bash run-docker.sh python setup.py test --addopts -mtransform")
+                }
             }
-        }
-        stage('test-end2end') {
-            steps {
-                catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
-                sh """
-                docker run --init \
-                --hostname $DOCKER_INST_NAME \
-                -v ${params.WORKSPACE_MOUNT}:/workspace/finn \
-                -v $VIVADO_PATH:$VIVADO_PATH:ro \
-                -e NUM_DEFAULT_WORKERS=${params.NUM_DEFAULT_WORKERS} \
-                -e FINN_INST_NAME=$DOCKER_INST_NAME \
-                -e VIVADO_PATH=$VIVADO_PATH \
-                -e VITIS_PATH=$VITIS_PATH \
-                -e PYNQ_BOARD=${params.PYNQ_BOARD} \
-                -e PYNQ_IP=${params.PYNQ_IP} \
-                -e PYNQ_USERNAME=${params.PYNQ_USERNAME} \
-                -e PYNQ_PASSWORD=${params.PYNQ_PASSWORD} \
-                -e PYNQ_TARGET_DIR=${params.PYNQ_TARGET_DIR} \
-                $DOCKER_TAG ${params.DOCKER_CMD_END2END}
-                """ }
+        }, fifthBranch: {
+            stage('Fpgadataflow transformations and simulations') {
+                dir("${env.WORKSPACE}") {
+                sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow")
+                }
             }
-        }
+        }        
     }
 }
diff --git a/docker/quicktest.sh b/docker/quicktest.sh
index b4ad37232fa69754a86e9064d7592d7474e8617e..f625f2b1ef722f386180a8409a9eb9e759a2f3b6 100755
--- a/docker/quicktest.sh
+++ b/docker/quicktest.sh
@@ -2,7 +2,7 @@
 
 : ${PYTEST_PARALLEL=auto}
 
-cd $FINN_ROOT
+cd $FINN_ROOT/finn
 # check if command line argument is empty or not present
 if [ -z $1 ]; then
   echo "Running quicktest: not (vivado or slow or board) with pytest-xdist"
diff --git a/docs/finn/command_line.rst b/docs/finn/command_line.rst
index ccb891a0ab42eebdd85f10c14384aaa217e8ed8b..54ffca9430a57ed4513ce822afbe0f1642b77404 100644
--- a/docs/finn/command_line.rst
+++ b/docs/finn/command_line.rst
@@ -186,20 +186,23 @@ This is possible by using the `build_custom` entry as follows:
 outside the FINN repo folder for cleaner separation. Let's call this folder
 ``custom_build_dir``.
 
-2. Create a ``custom_build_dir/build.py`` file that will perform the build when
-executed. You should also put any ONNX model(s) or other Python modules you
-may want to include in your build flow in this folder (so that they get mounted
-into the Docker container while building). Besides the filename and data placement,
+2. Create one or more Python files under this directory that perform the build(s)
+you would like when executed, for instance ``custom_build_dir/build.py`` and
+``custom_build_dir/build_quick.py``.
+You should also put any ONNX model(s) or other
+Python modules you may want to include in your build flow in this folder (so that they get
+mounted into the Docker container while building). Besides the data placement,
 you have complete freedom on how to implement the build flow here, including
 calling the steps from the simple dataflow build mode above,
 making calls to FINN library functions, preprocessing and altering models, building several variants etc.
-You can find a basic example of build.py under ``src/finn/qnn-data/build_dataflow/build.py``.
+You can find a basic example of a build flow under ``src/finn/qnn-data/build_dataflow/build.py``.
 
-You can launch the custom build flow using:
+You can launch the desired custom build flow using:
 
 ::
 
- ./run-docker.sh build_custom <path/to/custom_build_dir/>
+ ./run-docker.sh build_custom <path/to/custom_build_dir> <name-of-build-flow>
 
 This will mount the specified folder into the FINN Docker container and launch
-your ``build.py``.
+the build flow. If ``<name-of-build-flow>`` is not specified it will default to ``build``
+and thus execute ``build.py``. If it is specified, it will be ``<name-of-build-flow>.py``.
diff --git a/fetch-repos.sh b/fetch-repos.sh
new file mode 100755
index 0000000000000000000000000000000000000000..50ca89e459b3d93c835049fe9e9c1b45571b5a52
--- /dev/null
+++ b/fetch-repos.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+# Copyright (c) 2020-2022, Advanced Micro Devices
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+#
+# * Neither the name of FINN nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+FINN_BASE_COMMIT="585bccad29ba6416511256c732a2c1da21d00bdf"
+QONNX_COMMIT="9f9eff95227cc57aadc6eafcbd44b7acda89f067"
+FINN_EXP_COMMIT="af6102769226b82b639f243dc36f065340991513"
+BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03"
+PYVERILATOR_COMMIT="0c3eb9343500fc1352a02c020a736c8c2db47e8e"
+CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4"
+HLSLIB_COMMIT="c6cd928bc6f7e2e41c4d6a0376ad5c3ebe9d2d82"
+OMX_COMMIT="a97f0bf145a2f7e57ca416ea76c9e45df4e9aa37"
+AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b"
+
+FINN_BASE_URL="https://github.com/Xilinx/finn-base.git"
+QONNX_URL="https://github.com/fastmachinelearning/qonnx.git"
+FINN_EXP_URL="https://github.com/Xilinx/finn-experimental.git"
+BREVITAS_URL="https://github.com/Xilinx/brevitas.git"
+PYVERILATOR_URL="https://github.com/maltanar/pyverilator.git"
+CNPY_URL="https://github.com/rogersce/cnpy.git"
+HLSLIB_URL="https://github.com/Xilinx/finn-hlslib.git"
+OMX_URL="https://github.com/maltanar/oh-my-xilinx.git"
+AVNET_BDF_URL="https://github.com/Avnet/bdf.git"
+
+FINN_BASE_DIR="finn-base"
+QONNX_DIR="qonnx"
+FINN_EXP_DIR="finn-experimental"
+BREVITAS_DIR="brevitas"
+PYVERILATOR_DIR="pyverilator"
+CNPY_DIR="cnpy"
+HLSLIB_DIR="finn-hlslib"
+OMX_DIR="oh-my-xilinx"
+AVNET_BDF_DIR="avnet-bdf"
+
+# absolute path to this script, e.g. /home/user/bin/foo.sh
+SCRIPT=$(readlink -f "$0")
+# absolute path this script is in, thus /home/user/bin
+SCRIPTPATH=$(dirname "$SCRIPT")
+
+fetch_repo() {
+    # URL for git repo to be cloned
+    REPO_URL=$1
+    # commit hash for repo
+    REPO_COMMIT=$2
+    # directory to clone to under deps/
+    REPO_DIR=$3
+    # absolute path for the repo local copy
+    CLONE_TO=$SCRIPTPATH/deps/$REPO_DIR
+
+    # clone repo if dir not found
+    if [ ! -d "$CLONE_TO" ]; then
+        git clone $REPO_URL $CLONE_TO
+    fi
+    # verify and try to pull repo if not at correct commit
+    CURRENT_COMMIT=$(git -C $CLONE_TO rev-parse HEAD)
+    if [ $CURRENT_COMMIT != $REPO_COMMIT ]; then
+        git -C $CLONE_TO pull
+        # checkout the expected commit
+        git -C $CLONE_TO checkout $REPO_COMMIT
+    fi
+    # verify one last time
+    CURRENT_COMMIT=$(git -C $CLONE_TO rev-parse HEAD)
+    if [ $CURRENT_COMMIT == $REPO_COMMIT ]; then
+        echo "Successfully checked out $REPO_DIR at commit $CURRENT_COMMIT"
+    else
+        echo "Could not check out $REPO_DIR. Check your internet connection and try again."
+    fi
+}
+
+fetch_repo $FINN_BASE_URL $FINN_BASE_COMMIT $FINN_BASE_DIR
+fetch_repo $QONNX_URL $QONNX_COMMIT $QONNX_DIR
+fetch_repo $FINN_EXP_URL $FINN_EXP_COMMIT $FINN_EXP_DIR
+fetch_repo $BREVITAS_URL $BREVITAS_COMMIT $BREVITAS_DIR
+fetch_repo $PYVERILATOR_URL $PYVERILATOR_COMMIT $PYVERILATOR_DIR
+fetch_repo $CNPY_URL $CNPY_COMMIT $CNPY_DIR
+fetch_repo $HLSLIB_URL $HLSLIB_COMMIT $HLSLIB_DIR
+fetch_repo $OMX_URL $OMX_COMMIT $OMX_DIR
+fetch_repo $AVNET_BDF_URL $AVNET_BDF_COMMIT $AVNET_BDF_DIR
+
+# TODO download extra Pynq board files and extract if needed
diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb
index 2c9f4a99ed3edd05a8e8d32db2fe6bcdad204716..69ac1f7717f281a43b7d6215eee91e8d3e1d9478 100644
--- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb
+++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb
@@ -103,27 +103,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "--2021-10-12 15:49:17--  https://zenodo.org/record/4519767/files/unsw_nb15_binarized.npz?download=1\n",
-      "Resolving zenodo.org (zenodo.org)... 137.138.76.77\n",
-      "Connecting to zenodo.org (zenodo.org)|137.138.76.77|:443... connected.\n",
-      "HTTP request sent, awaiting response... 200 OK\n",
-      "Length: 13391907 (13M) [application/octet-stream]\n",
-      "Saving to: ‘unsw_nb15_binarized.npz’\n",
-      "\n",
-      "unsw_nb15_binarized 100%[===================>]  12.77M  3.56MB/s    in 3.7s    \n",
-      "\n",
-      "2021-10-12 15:49:22 (3.44 MB/s) - ‘unsw_nb15_binarized.npz’ saved [13391907/13391907]\n",
-      "\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "! wget -O unsw_nb15_binarized.npz https://zenodo.org/record/4519767/files/unsw_nb15_binarized.npz?download=1"
    ]
@@ -137,18 +119,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Samples in each set: train = 175341, test = 82332\n",
-      "Shape of one input sample: torch.Size([593])\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "import numpy as np\n",
     "from torch.utils.data import TensorDataset\n",
@@ -220,6 +193,33 @@
     "        break"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Define a PyTorch Device <a id='define_pytorch_device'></a> \n",
+    "\n",
+    "GPUs can significantly speed-up training of deep neural networks. We check for availability of a GPU and if so define it as target device."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Target device: cuda\n"
+     ]
+    }
+   ],
+   "source": [
+    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
+    "print(\"Target device: \" + str(device))"
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -236,7 +236,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": 7,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -258,7 +258,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -282,7 +282,9 @@
     "      nn.Dropout(0.5),\n",
     "      QuantReLU(bit_width=act_bit_width),\n",
     "      QuantLinear(hidden3, num_classes, bias=True, weight_bit_width=weight_bit_width)\n",
-    ")\n"
+    ")\n",
+    "\n",
+    "model.to(device)"
    ]
   },
   {
@@ -302,7 +304,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": 9,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -313,6 +315,7 @@
     "    \n",
     "    for i, data in enumerate(train_loader, 0):        \n",
     "        inputs, target = data\n",
+    "        inputs, target = inputs.to(device), target.to(device)\n",
     "        optimizer.zero_grad()   \n",
     "                \n",
     "        # forward pass\n",
@@ -324,14 +327,14 @@
     "        optimizer.step()\n",
     "        \n",
     "        # keep track of loss value\n",
-    "        losses.append(loss.data.numpy()) \n",
+    "        losses.append(loss.data.cpu().numpy()) \n",
     "           \n",
     "    return losses"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
+   "execution_count": 10,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -347,12 +350,13 @@
     "    with torch.no_grad():\n",
     "        for data in test_loader:\n",
     "            inputs, target = data\n",
+    "            inputs, target = inputs.to(device), target.to(device)\n",
     "            output_orig = model(inputs.float())\n",
     "            # run the output through sigmoid\n",
     "            output = torch.sigmoid(output_orig)  \n",
     "            # compare against a threshold of 0.5 to generate 0/1\n",
-    "            pred = (output.detach().numpy() > 0.5) * 1\n",
-    "            target = target.float()\n",
+    "            pred = (output.detach().cpu().numpy() > 0.5) * 1\n",
+    "            target = target.cpu().float()\n",
     "            y_true.extend(target.tolist()) \n",
     "            y_pred.extend(pred.reshape(-1).tolist())\n",
     "        \n",
@@ -384,7 +388,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": 11,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -402,18 +406,18 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 11,
+   "execution_count": 12,
    "metadata": {},
    "outputs": [],
    "source": [
     "# loss criterion and optimizer\n",
-    "criterion = nn.BCEWithLogitsLoss()\n",
+    "criterion = nn.BCEWithLogitsLoss().to(device)\n",
     "optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": 13,
    "metadata": {
     "scrolled": true
    },
@@ -422,7 +426,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "Training loss = 0.132918 test accuracy = 0.798341: 100%|██████████| 10/10 [00:44<00:00,  4.45s/it]\n"
+      "Training loss = 0.131165 test accuracy = 0.809102: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10/10 [02:24<00:00, 14.43s/it]\n"
      ]
     }
    ],
@@ -450,14 +454,14 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 13,
+   "execution_count": 14,
    "metadata": {
     "scrolled": true
    },
    "outputs": [
     {
      "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEWCAYAAABxMXBSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAofElEQVR4nO3de3Rd5X3m8e+jo5slSzq2JRMsHWMbTIi5SLSGXEsTSlpIO8C0uUCbhLRpmXRKmpY2DWlmpR2mWSuFTpJ2SjowJSSZkFJCksaTQsiNQNKEBBOMb9yM8N1g+Spbsu6/+eNsiSMhyTq2js+R9HzW0tLe776c3z4herz3u/e7FRGYmZlNVVmxCzAzs5nFwWFmZnlxcJiZWV4cHGZmlhcHh5mZ5cXBYWZmeXFwmJ0ASQ9Ium66182zhjdL2jnd+zU7nvJiF2B2qkg6mjNbA/QCg8n8f4mIu6e6r4i4ohDrms0EDg6bMyJi/vC0pK3A70fEd8euJ6k8IgZOZW1mM4kvVdmcN3zJR9JHJL0I3CVpgaRvSuqQdDCZbsnZ5geSfj+Zfp+kH0n6u2TdFyRdcYLrLpf0iKQjkr4r6TZJX5ricbwm+axDkjZJujJn2dskbU72u0vSnyftjcmxHZJ0QNIPJfnvgk3K/4GYZb0KWAicAVxP9v8bdyXzS4FjwD9Osv1rgWeARuAW4E5JOoF1vwz8DFgE/DXwnqkUL6kC+H/At4HFwAeBuyW9OlnlTrKX4+qA84DvJ+1/BuwEmoDTgL8EPA6RTcrBYZY1BPxVRPRGxLGI2B8RX42I7og4AnwC+OVJtt8WEf8nIgaBLwCnk/1DPOV1JS0FLgI+HhF9EfEjYM0U638dMB/4ZLLt94FvAtcmy/uBVZLqI+JgRPw8p/104IyI6I+IH4YHsLPjcHCYZXVERM/wjKQaSbdL2iapE3gESEtKTbD9i8MTEdGdTM7Pc90lwIGcNoAdU6x/CbAjIoZy2rYBzcn0bwFvA7ZJeljS65P2W4EtwLcltUu6aYqfZ3OYg8Msa+y/sv8MeDXw2oioBy5J2ie6/DQd9gALJdXktGWmuO1uIDOmf2IpsAsgIh6LiKvIXsb6N+DepP1IRPxZRKwArgRulPQrJ3cYNts5OMzGV0e2X+OQpIXAXxX6AyNiG7AW+GtJlclZwX+a4uY/BbqBv5BUIenNybb3JPv6HUkNEdEPdJK9NIek35B0VtLHcpjs7clD436CWcLBYTa+zwDzgH3Ao8C3TtHn/g7wemA/8DfAv5J93mRSEdFHNiiuIFvzZ4H3RsTTySrvAbYml90+kHwOwErgu8BR4CfAZyPioWk7GpuV5H4ws9Il6V+BpyOi4Gc8ZlPlMw6zEiLpIklnSiqTdDlwFdk+CbOS4SfHzUrLq4CvkX2OYyfwhxHxRHFLMhvNl6rMzCwvvlRlZmZ5mROXqhobG2PZsmXFLsPMbEZ5/PHH90VE09j2OREcy5YtY+3atcUuw8xsRpG0bbx2X6oyM7O8ODjMzCwvDg4zM8uLg8PMzPLi4DAzs7w4OMzMLC8ODjMzy4uDYxLfWLeLLz067m3MZmZzloNjEt/a+CJ3PNJe7DLMzEqKg2MSrZk02w90c6Crr9ilmJmVDAfHJFpb0gA8ueNQUeswMyslDo5JXNDSQJlgnYPDzGyEg2MStVXlrFxcx5M7DxW7FDOzkuHgOI62TJondxzCL7wyM8tycBxHaybNwe5+th/oLnYpZmYloaDBIelySc9I2iLppnGWf0DSBknrJP1I0qqcZR9NtntG0q9NdZ/TrTXTALifw8xsWMGCQ1IKuA24AlgFXJsbDIkvR8T5EdEG3AJ8Ktl2FXANcC5wOfBZSakp7nNavfq0OqoryhwcZmaJQp5xXAxsiYj2iOgD7gGuyl0hIjpzZmuB4Y6Eq4B7IqI3Il4AtiT7O+4+p1t5qozzmxt8S66ZWaKQwdEM7MiZ35m0jSLpjyQ9T/aM44+Ps+2U9pns93pJayWt7ejoOOGDgOzzHBt3d9I/OHRS+zEzmw2K3jkeEbdFxJnAR4D/No37vSMiVkfE6qamV7xrPS9tS9P0DQzx9J4j01SdmdnMVcjg2AVkcuZbkraJ3ANcfZxt893ntBh+gnydn+cwMytocDwGrJS0XFIl2c7uNbkrSFqZM/vrwHPJ9BrgGklVkpYDK4GfTWWfhdCyYB6LaitZt/1QoT/KzKzklRdqxxExIOkG4EEgBXwuIjZJuhlYGxFrgBskXQb0AweB65JtN0m6F9gMDAB/FBGDAOPts1DHMExS9kFAn3GYmRUuOAAi4n7g/jFtH8+Z/tAk234C+MRU9nkqtGbSfP+ZvXT29FNfXXGqP97MrGQUvXN8pmjLpImADTsPF7sUM7OicnBM0QUtfoLczAwcHFOWrqlkeWOtHwQ0sznPwZGHtkyadR4p18zmOAdHHlpbGth7pJcXO3uKXYqZWdE4OPLQmkkDfpWsmc1tDo48rFpST0VKPOHgMLM5zMGRh6ryFKtOr/cZh5nNaQ6OPLVm0mzYeZjBIXeQm9nc5ODIU1smTVffIFv2Hi12KWZmReHgyJM7yM1srnNw5Gn5olrqq8s9xLqZzVkOjjyVlYnWTNpDrJvZnOXgOAGtLWmeeekIx/oGi12Kmdkp5+A4AW2ZNINDwcbdHinXzOYeB8cJuCCTHSnXHeRmNhc5OE7A4rpqmtPzPMS6mc1JDo4TNDxSrpnZXOPgOEGtmQZ2HjzGvqO9xS7FzOyUKmhwSLpc0jOStki6aZzlN0raLGm9pO9JOiNpf4ukdTk/PZKuTpZ9XtILOcvaCnkME2ltSQOw3s9zmNkcU7DgkJQCbgOuAFYB10paNWa1J4DVEXEBcB9wC0BEPBQRbRHRBlwKdAPfztnuw8PLI2JdoY5hMue3NFAm/DyHmc05hTzjuBjYEhHtEdEH3ANclbtCEhDdyeyjQMs4+3k78EDOeiWhprKcs0+rY91O35JrZnNLIYOjGdiRM78zaZvI+4EHxmm/BviXMW2fSC5vfVpS1Xg7k3S9pLWS1nZ0dORT95S1ZdI86VfJmtkcUxKd45LeDawGbh3TfjpwPvBgTvNHgXOAi4CFwEfG22dE3BERqyNidVNTU0HqbsukOXysn637S+pkyMysoAoZHLuATM58S9I2iqTLgI8BV0bE2FuU3gl8PSL6hxsiYk9k9QJ3kb0kVhQeKdfM5qJCBsdjwEpJyyVVkr3ktCZ3BUkXAreTDY294+zjWsZcpkrOQpAk4Gpg4/SXPjVnn1ZHTWXKz3OY2ZxSXqgdR8SApBvIXmZKAZ+LiE2SbgbWRsQaspem5gNfyeYA2yPiSgBJy8iesTw8Ztd3S2oCBKwDPlCoYzieVJk4r7nBwWFmc0rBggMgIu4H7h/T9vGc6csm2XYr43SmR8Sl01jiSWvLpPn8f2ylb2CIyvKS6DIyMyso/6U7SW2ZNH2DQzy1p7PYpZiZnRIOjpM00kHuJ8jNbI5wcJykJQ3VNM6vcj+Hmc0ZDo6TJMkj5ZrZnOLgmAZtmQbaO7o4fKz/+Cubmc1wDo5pMNzPscHjVpnZHODgmAYXJEOsr9txsLiFmJmdAg6OadAwr4IVTbWs2+EzDjOb/Rwc02S4g9wj5ZrZbOfgmCZtmTT7jvay+3BPsUsxMysoB8c0GX6VrEfKNbPZzsExTV5zej2VqTIHh5nNeg6OaVJZXsaqJfU84eAws1nOwTGN2jJpNuw8zMDgULFLMTMrGAfHNGrLpDnWP8hze48WuxQzs4JxcEwjv0rWzOYCB8c0WraohoZ5FR5i3cxmNQfHNJJEaybNE9sPFbsUM7OCcXBMs7aWBp596QjdfQPFLsXMrCAKGhySLpf0jKQtkm4aZ/mNkjZLWi/pe5LOyFk2KGld8rMmp325pJ8m+/xXSZWFPIZ8tS1NMxSwcZdfJWtms1PBgkNSCrgNuAJYBVwradWY1Z4AVkfEBcB9wC05y45FRFvyc2VO+98Cn46Is4CDwPsLdQwnwiPlmtlsV8gzjouBLRHRHhF9wD3AVbkrRMRDEdGdzD4KtEy2Q0kCLiUbMgBfAK6ezqJPVuP8KloWzONJj5RrZrNUIYOjGdiRM78zaZvI+4EHcuarJa2V9Kikq5O2RcChiBjuQJhwn5KuT7Zf29HRcUIHcKL8Klkzm81KonNc0ruB1cCtOc1nRMRq4LeBz0g6M599RsQdEbE6IlY3NTVNY7XH15ZJs+vQMTqO9J7SzzUzOxUKGRy7gEzOfEvSNoqky4CPAVdGxMhf2ojYlfxuB34AXAjsB9KSyifbZ7H5QUAzm80KGRyPASuTu6AqgWuANbkrSLoQuJ1saOzNaV8gqSqZbgTeCGyO7FuSHgLenqx6HfCNAh7DCTlvSQOpMvlBQDOblQoWHEk/xA3Ag8BTwL0RsUnSzZKG75K6FZgPfGXMbbevAdZKepJsUHwyIjYnyz4C3ChpC9k+jzsLdQwnal5lilefVud+DjOblcqPv8qJi4j7gfvHtH08Z/qyCbb7MXD+BMvayd6xVdJaM2n+ff1uhoaCsjIVuxwzs2lTEp3js9GFmTSdPQNs3d9V7FLMzKaVg6NAhjvIfbnKzGYbB0eBnLV4PrWVKd9ZZWazjoOjQFJl4vyWBp9xmNms4+AooNZMms17OukdGCx2KWZm08bBUUAXZtL0DwZP7TlS7FLMzKaNg6OARjrIt3ukXDObPRwcBfSq+moW11Xx5E6PlGtms4eDo4Ak0ZZJ+84qM5tVHBwF1ppJ076vi8Pd/cUuxcxsWjg4CqxteKRcD3hoZrOEg6PAzm9pQPIQ62Y2ezg4Cqy+uoIzm+b7QUAzmzUcHKdAa0uaJ3ceIvs6ETOzmc3BcQq0LU2z72gfuw4dK3YpZmYnzcFxCrS1pAGPlGtms4OD4xQ45/Q6KsvL3EFuZrOCg+MUqEiVcd6Sep9xmNms4OA4RVozaTbsOszA4FCxSzEzOylTCg5JtZLKkumzJV0pqWIK210u6RlJWyTdNM7yGyVtlrRe0vcknZG0t0n6iaRNybJ35WzzeUkvSFqX/LRN+WiLqC2Tpqd/iGdfOlrsUszMTspUzzgeAaolNQPfBt4DfH6yDSSlgNuAK4BVwLWSVo1Z7QlgdURcANwH3JK0dwPvjYhzgcuBz0hK52z34YhoS37WTfEYiqrNr5I1s1liqsGhiOgGfhP4bES8Azj3ONtcDGyJiPaI6APuAa7KXSEiHkr2C/Ao0JK0PxsRzyXTu4G9QNMUay1JSxfWsKCmwh3kZjbjTTk4JL0e+B3g35O21HG2aQZ25MzvTNom8n7ggXE++GKgEng+p/kTySWsT0uqmqDg6yWtlbS2o6PjOKUWniRaM2mPWWVmM95Ug+NPgI8CX4+ITZJWAA9NVxGS3g2sBm4d03468H+B342I4V7ljwLnABcBC4GPjLfPiLgjIlZHxOqmptI4WWltSfPsS0fo6h0odilmZidsSsEREQ9HxJUR8bdJJ/m+iPjj42y2C8jkzLckbaNIugz4GHBlRPTmtNeTPbv5WEQ8mlPLnsjqBe4ie0lsRmjLpBkK2LDLL3Yys5lrqndVfVlSvaRaYCOwWdKHj7PZY8BKScslVQLXAGvG7PdC4HayobE3p70S+DrwxYi4b8w2pye/BVyd1DMjDL9K1v0cZjaTTfVS1aqI6CT7h/oBYDnZO6smFBEDwA3Ag8BTwL3JZa6bJV2ZrHYrMB/4SnJr7XCwvBO4BHjfOLfd3i1pA7ABaAT+ZorHUHQLaytZurDGd1aZ2YxWPsX1KpLnNq4G/jEi+iUdd6jXiLgfuH9M28dzpi+bYLsvAV+aYNmlU6y5JLVl0qzdeqDYZZiZnbCpnnHcDmwFaoFHkgf1OgtV1GzWmkmz+3APezt7il2KmdkJmWrn+D9ERHNEvC3pmN4GvKXAtc1KbZkGwA8CmtnMNdXO8QZJnxp+LkLS/yR79mF5OndJA+Vl8vMcZjZjTfVS1eeAI2Q7rd9J9jLVXYUqajarrkhxzul1PuMwsxlrqp3jZ0bEb+XM/3dJ6wpQz5zQ2pJmzbrdDA0FZWUqdjlmZnmZ6hnHMUlvGp6R9EbA70E9QW2ZNEd6B2jf11XsUszM8jbVM44PAF+U1JDMHwSuK0xJs1/uSLlnLZ5f3GLMzPI01buqnoyIVuAC4IKIuBCY0c9TFNOKpvnMryr3E+RmNiPl9QbAiOhMniAHuLEA9cwJqTJxQUuD76wysxnpZF4d617dk9CaSfPUnk56+geLXYqZWV5OJjiOO+SITay1JU3/YLB5jx/AN7OZZdLOcUlHGD8gBMwrSEVzxIVL00B2pNxfWLqguMWYmeVh0uCIiLpTVchcc1p9Na+qr/aDgGY245zMpSo7SW2ZtO+sMrMZx8FRRK2ZNFv3d3Oou6/YpZiZTZmDo4haPVKumc1ADo4iuqAljQRP7vA7yM1s5nBwFNH8qnJWLp7vBwHNbEZxcBRZa0uadTsOEeHHYsxsZihocEi6XNIzkrZIummc5TdK2ixpvaTvJa+kHV52naTnkp/rctp/UdKGZJ//IGlGP8HetjTNga4+dh70YMNmNjMULDgkpYDbgCuAVcC1klaNWe0JYHVEXADcB9ySbLsQ+CvgtcDFwF9JGn5K7p+APwBWJj+XF+oYToXWljQAT7iD3MxmiEKecVwMbImI9ojoA+4BrspdISIeiojuZPZRoCWZ/jXgOxFxICIOAt8BLpd0OlAfEY9G9trOF4GrC3gMBffqV9VRVV7m5znMbMYoZHA0Azty5ncmbRN5P/DAcbZtTqaPu09J1w+/I72joyPP0k+dilQZ5zc3ODjMbMYoic5xSe8GVgO3Ttc+I+KOiFgdEaubmpqma7cF0ZpJs2HXYfoHh4pdipnZcRUyOHYBmZz5lqRtFEmXAR8DroyI3uNsu4uXL2dNuM+Zpi2TpndgiGdePFLsUszMjquQwfEYsFLSckmVwDXAmtwVJF0I3E42NPbmLHoQ+FVJC5JO8V8FHoyIPUCnpNcld1O9F/hGAY/hlBh+layf5zCzmaBgwRERA8ANZEPgKeDeiNgk6WZJVyar3QrMB74iaZ2kNcm2B4D/QTZ8HgNuTtoA/ivwz8AW4Hle7heZsVoWzGNhbSXrth8qdilmZsc16bDqJysi7gfuH9P28ZzpyybZ9nPA58ZpXwucN41lFp2k7Ei5PuMwsxmgJDrHLfs8x3N7j3K0d6DYpZiZTcrBUSJaMw1EwHqfdZhZiXNwlIiRDnKPlGtmJc7BUSLSNZUsW1TjBwHNrOQ5OEpIaybtlzqZWclzcJSQtkyaFzt7ePFwT7FLMTObkIOjhLQm/Rw+6zCzUubgKCGrTq+nIiU/z2FmJc3BUUKqK1K85vR6d5CbWUlzcJSY1pY063ceZnDIr5I1s9Lk4CgxbZk0R3sHaO84WuxSzMzG5eAoMe4gN7NS5+AoMSsaa6mrLndwmFnJcnCUmLIy0drikXLNrHQ5OEpQa6aBp/ccoad/sNilmJm9goOjBLW2pBkYCjbt9oCHZlZ6HBwlqG2kg9zBYWalx8FRghbXV7OkodoPAppZSXJwlCiPlGtmpaqgwSHpcknPSNoi6aZxll8i6eeSBiS9Paf9LZLW5fz0SLo6WfZ5SS/kLGsr5DEUS1smzfYD3Rzo6it2KWZmoxQsOCSlgNuAK4BVwLWSVo1ZbTvwPuDLuY0R8VBEtEVEG3Ap0A18O2eVDw8vj4h1hTmC4modeSPgoaLWYWY2ViHPOC4GtkREe0T0AfcAV+WuEBFbI2I9MDTJft4OPBAR3YUrtfSc39xAmfwEuZmVnkIGRzOwI2d+Z9KWr2uAfxnT9glJ6yV9WlLViRZYymqryjn7tDo/CGhmJaekO8clnQ6cDzyY0/xR4BzgImAh8JEJtr1e0lpJazs6OgpeayG0tqR5cschIjxSrpmVjkIGxy4gkzPfkrTl453A1yOif7ghIvZEVi9wF9lLYq8QEXdExOqIWN3U1JTnx5aGtqVpDnb3s/3AnLpKZ2YlrpDB8RiwUtJySZVkLzmtyXMf1zLmMlVyFoIkAVcDG0++1NLU2pIG3M9hZqWlYMEREQPADWQvMz0F3BsRmyTdLOlKAEkXSdoJvAO4XdKm4e0lLSN7xvLwmF3fLWkDsAFoBP6mUMdQbGefNp95FSkHh5mVlPJC7jwi7gfuH9P28Zzpx8hewhpv262M05keEZdOb5WlqzxVxvnNDb4l18xKSkl3jlt2pNyNuzvpH5zsjmUzs1PHwVHiWjNp+gaGeHrPkWKXYmYGODhK3shIuX6ew8xKhIOjxDWn59Gcnsct33qaf/jecxzp6T/+RmZmBeTgKHGS+MLvXcwbzlzEp77zLJfc8hC3P/w8x/r8dkAzKw7NhaeSV69eHWvXri12GSdt/c5DfOo7z/KDZzponF/FDW85k2tfu5Sq8lSxSzOzWUjS4xGx+hXtDo6ZZ+3WA/zdt5/h0fYDLGmo5oZLV/KO1S1UpHwCaWbTx8Exi4Jj2I+37OPWbz/DE9sPsXRhDX9y2UquamsmVaZil2Zms8BEweF/os5gbzirka/94Ru4630XUVddzo33Psmvfvphvrl+N0NDs/8fBGZWHA6OGU4SbzlnMd/84Jv43+/+BVJl4oYvP8Hb/uGHfGfzSx5Z18ymnYNjlpDE5eedzgMfuoS/v6aNnv5B/uCLa7n6tv/gkWc7HCBmNm0cHLNMqkxc1dbMd2/8ZW75rQvYd7SP937uZ7zr9kf5afv+YpdnZrOAO8dnud6BQe59bAf/6/tb2Hukl19a2ciNbz2bC5cuKHZpZlbifFfVHA2OYT39g3zp0W189gfPc6Crj8tes5g/fevZnLukodilmVmJcnDM8eAY1tU7wOd/vJXbH36ezp4Bfv380/nTt67krMV1xS7NzEqMg8PBMcrhY/3c+cN27vzRCxzrH+TqtmY+dNlKzlhUW+zSzKxEODgcHOM60NXH7Q8/zxd+spX+weAdv9jCB39lJc3pecUuzcyKzMHh4JjU3s4ePvuD5/nyT7cDcO3FGf7oLWexuL66yJWZWbE4OBwcU7Lr0DH+8fvP8ZW1O0mVievesIwP/PKZLKytLHZpZnaKOTgcHHnZtr+Lv//uc3x93S5qKlL83puW8/u/tIKGeRXFLs3MTpGiBIeky4G/B1LAP0fEJ8csvwT4DHABcE1E3JezbBDYkMxuj4grk/blwD3AIuBx4D0R0TdZHQ6OE/fcS0f4zHef49837KGuupxLVjZxbnM95zc3cN6SBhb4TMRs1jrlwSEpBTwLvBXYCTwGXBsRm3PWWQbUA38OrBkTHEcjYv44+70X+FpE3CPpfwNPRsQ/TVaLg+Pkbdp9mP/zSDuPbz/IjgPHRtqb0/M4LwmSc5MwaaqrKmKlZjZdJgqO8gJ+5sXAlohoTwq4B7gKGAmOiNiaLBuayg4lCbgU+O2k6QvAXwOTBoedvHOXNPCZay4E4FB3H5t2d7Jh12E27jrMpt2dPLjppZF1X1VfzXnN9Zy7pCF7ZtLcwGn1VWT/5zOzma6QwdEM7MiZ3wm8No/tqyWtBQaAT0bEv5G9PHUoIgZy9tk83saSrgeuB1i6dGl+lduk0jWVvPGsRt54VuNIW2dPP5t3d7IxCZONuzv53tN7GT6hbZxfxXnN9Zy3JBsk5zXX05ye5zAxm4EKGRwn64yI2CVpBfB9SRuAw1PdOCLuAO6A7KWqAtVoifrqCl63YhGvW7FopK2rd4Cn9mTDZMOuTjbtPswPn9vHYPKukAU1FZzX3JBzZlLP0oU1DhOzElfI4NgFZHLmW5K2KYmIXcnvdkk/AC4EvgqkJZUnZx157dNOrdqqclYvW8jqZQtH2nr6B7NhsruTjTsPs3H3Ye78UTv9g9kwqasuT85K6pMzkwaWL6qlzG81NCsZhQyOx4CVyV1Qu4BreLlvYlKSFgDdEdErqRF4I3BLRISkh4C3k72z6jrgGwWp3gqiuiLFhUsXjBqdt3dgkGdfPMrG3YfZsOswm3Yd5gs/2UbfQLbrq7YyxblLGkbu5jp3SQPLGmuoKk8V6zDM5rRC3477NrK326aAz0XEJyTdDKyNiDWSLgK+DiwAeoAXI+JcSW8AbgeGyL4z5DMRcWeyzxVkQ2Mh8ATw7ojonawO31U18/QPDvHcS9kwGe432bynk57+bJiUCTILa1jeWMuKxvmsaKplRWMtK5rmuyPebJr4AUAHx4w3MDhE+74uNu0+zAsdXTy/r4v2ji5e2Hd0JFAAaipT2UBpms/yxlrObMqGy/KmWuZXlXK3nllpKcbtuGbTqjxVxtmn1XH2aaOHgB8aCl7s7BkJkec7umjf18W6HQf55vrd5P7baHFdFSuaalneOD8bKMl0ZsE8ylN+IabZVDg4bMYrKxNL0vNYkp7Hm1Y2jlrW0z/I9gPdtHdkA+WFfV20dxzlgY17ONTdP7JeRUosXVgzEijDZywrmmpZVFvpS19mORwcNqtVV6TGPUsBONjVR/u+o7QnZyjtHUd5YV8XjzzbQd/gy5e+6qrLWdE0nzMba0ddAlveWMu8SnfQ29zj4LA5a0FtJb9Yu5BfPGPhqPbBoWDXwWMjofLCvi7a9x3lJ+37+doTo+/+flV9NWcsqmHZolrOaEx+L6rhjEXuT7HZy/9lm42RKhNLF9WwdFENb3716GXdfQPJ5a5soGzb3822/V187+m97Ds6+ua+xvlVLEtCZFmyv2WLalm2qJaGGo8ybDOXg8MsDzWV5dlnSpY0vGLZ0d4Btu3PhsnW/V1s29fNtgNd/Pj5fXz15z2j1k3XVIwEytjfC92nYiXOwWE2TeZXTRwqw530W/flBMv+bh7fdpD/9+RuhnLu/KqrKueMxrGBkr0EtrjOz6hY8Tk4zE6ByTrpewcG2XnwGNv2d7F1X3c2YPZ3sXl3Jw9ufJGBnFSZV5FK+lCG+1NqWTS/kqGhYGAoGBz5PUT/4Oj5gaFgcPDl9fqHhkbNj1ovmR8YHMpZFgyM7DPb3p8zX1NZzuK6KhbXV3NafRWnJb8X11WzuL6KRbVVpDx0zKzg4DArsqryFGc2zefMple8foaBwSF2H+pJzlC62Jr0qTzf0cVDT4+++ysfqTJRnvykykR5qmykLfd3xSvas/NVFeU5+8i2He0dYPfhHtbtOMT+rle+Wy1VJprmV2XDpL6axXU54VJfzWlJwCysqfTYZCXOwWFWwspTZSMd9dA0atlg8uDjwa4+ylPDf9zLcsLg5T/qYwOh0Je7+gaG6Djay0udPezt7GXvkR5e6uzhpc5s244D3azdeoCDOc/SjBxzmUaduSyuywmXnLYFNRW+bFckDg6zGSpVJprT82hOzyt2Ka9QWV42pdp6+gfpOJINlr1JqLx05OXAeWFfF4+2H+DwsVcGTGWqjKa6qpHLYsNhU1ddTlV5GVXlqezvipenqytSLy+rKBuZriwv82W0PDg4zKxoqitSZBbWkFlYM+l6Pf2D2WAZFTAvTz+39yg/2rKPIz0Dk+5nMhUpvRw25WVUDYdMRU7bmMAZG0zD4VRZXkZFaswZYHJJcOzZ38hZYWr89orUmPXKVPRLeQ4OMyt51RWpnEt2EzvWN0hX3wC9A0P09g9mf4+dHhiktz873TPSnvzuz5ketd0gR3sH2H+0b2R5T866w68AOFUkXhk8EwTUnddddNzvLV8ODjObNeZVpooyDMzQUNA3ODqYcu9G6x97d9pQzvzgOO2D49/l9vKdba9sf+VnZNerqpj+wTsdHGZmJ6msTFSXpaiuSAGzf1QAjyNtZmZ5cXCYmVleHBxmZpYXB4eZmeWloMEh6XJJz0jaIummcZZfIunnkgYkvT2nvU3STyRtkrRe0rtyln1e0guS1iU/bYU8BjMzG61gd1VJSgG3AW8FdgKPSVoTEZtzVtsOvA/48zGbdwPvjYjnJC0BHpf0YEQcSpZ/OCLuK1TtZmY2sULejnsxsCUi2gEk3QNcBYwER0RsTZaNenomIp7Nmd4taS/ZgXoOFbBeMzObgkJeqmoGduTM70za8iLpYqASeD6n+RPJJaxPS6qaYLvrJa2VtLajoyPfjzUzswmU9AOAkk4H/i9wXUQMn5V8FHiRbJjcAXwEuHnsthFxR7IcSR2Stp1gGY3AvhPcdjby9/Eyfxej+fsYbTZ8H2eM11jI4NgFZHLmW5K2KZFUD/w78LGIeHS4PSL2JJO9ku7ilf0jrxARTcdbZ5I61kbE6hPdfrbx9/Eyfxej+fsYbTZ/H4W8VPUYsFLSckmVwDXAmqlsmKz/deCLYzvBk7MQlB2I/2pg43QWbWZmkytYcETEAHAD8CDwFHBvRGySdLOkKwEkXSRpJ/AO4HZJm5LN3wlcArxvnNtu75a0AdhA9lTwbwp1DGZm9kqKiOOvNYdJuj7pLzH8feTydzGav4/RZvP34eAwM7O8eMgRMzPLi4PDzMzy4uCYxPHG2porJGUkPSRpczJ+2IeKXVMpkJSS9ISkbxa7lmKTlJZ0n6SnJT0l6fXFrqlYJP1p8v+TjZL+RVJ1sWuabg6OCeSMtXUFsAq4VtKq4lZVNAPAn0XEKuB1wB/N4e8i14fI3jFo8PfAtyLiHKCVOfq9SGoG/hhYHRHnASmyjyLMKg6OiY2MtRURfcDwWFtzTkTsiYifJ9NHyP5RyHv4mNlEUgvw68A/F7uWYpPUQPb2+TsBIqIvZ0DSuagcmCepHKgBdhe5nmnn4JjYtIy1NdtIWgZcCPy0yKUU22eAvwCGjrPeXLAc6ADuSi7d/bOk2mIXVQwRsQv4O7Ijf+8BDkfEt4tb1fRzcNiUSZoPfBX4k4joLHY9xSLpN4C9EfF4sWspEeXALwD/FBEXAl3AnOwTlLSA7JWJ5cASoFbSu4tb1fRzcEzspMbamm0kVZANjbsj4mvFrqfI3ghcKWkr2UuYl0r6UnFLKqqdwM6IGD4LvY9skMxFlwEvRERHRPQDXwPeUOSapp2DY2InPNbWbJOMC3Yn8FREfKrY9RRbRHw0IloiYhnZ/y6+HxGz7l+VUxURLwI7JL06afoVct67M8dsB14nqSb5/82vMAtvFCjpYdWLKSIGJA2PtZUCPhcRm46z2Wz1RuA9wAZJ65K2v4yI+4tXkpWYD5IdR64SaAd+t8j1FEVE/FTSfcDPyd6N+ATJ6x1mEw85YmZmefGlKjMzy4uDw8zM8uLgMDOzvDg4zMwsLw4OMzPLi4PD7DgkHU1+L5P029O8778cM//j6dy/WSE4OMymbhmQV3AkA91NZlRwRMSse8rYZh8Hh9nUfRL4JUnrkncupCTdKukxSesl/RcASW+W9ENJa0ieoJb0b5IeT97TcH3S9kmyo6iuk3R30jZ8dqNk3xslbZD0rpx9/yDn3Rd3J08oI+mTyTtT1kv6u1P+7dic4SfHzabuJuDPI+I3AJIAOBwRF0mqAv5D0vBIqL8AnBcRLyTzvxcRByTNAx6T9NWIuEnSDRHRNs5n/SbQRvbdFo3JNo8kyy4EziU7XPd/AG+U9BTwn4FzIiIkpaf30M1e5jMOsxP3q8B7k2FYfgosAlYmy36WExoAfyzpSeBRsoNnrmRybwL+JSIGI+Il4GHgopx974yIIWAd2Utoh4Ee4E5Jvwl0n+SxmU3IwWF24gR8MCLakp/lOe9e6BpZSXoz2VFTXx8RrWTHLzqZ14n25kwPAuURMUD25WP3Ab8BfOsk9m82KQeH2dQdAepy5h8E/jAZch5JZ0/wAqMG4GBEdEs6h+zrd4f1D28/xg+BdyX9KE1k37D3s4kKS96V0pAMPPmnZC9xmRWE+zjMpm49MJhccvo82fdsLwN+nnRQdwBXj7Pdt4APJP0Qz5C9XDXsDmC9pJ9HxO/ktH8deD3wJBDAX0TEi0nwjKcO+IakarJnQjee0BGaTYFHxzUzs7z4UpWZmeXFwWFmZnlxcJiZWV4cHGZmlhcHh5mZ5cXBYWZmeXFwmJlZXv4/QAgzW/yBXxUAAAAASUVORK5CYII=\n",
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEWCAYAAABxMXBSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAoTklEQVR4nO3de5hdd13v8fdnrslkLjuXSZPOnjRpCW0jnQkYCrUcLlY8bVUC+qitWKtSS32sioJS+AM5h8M5FQHxHAsl1GJVsKK0Nmqh3IQCvZi0JGl6SUnTtJkkTSbXmSSTzO17/thrkp3JTrJ3Mit7z57P63m2e63fuuzv3pb5ZK3fWr+liMDMzKxYNeUuwMzMJhcHh5mZlcTBYWZmJXFwmJlZSRwcZmZWEgeHmZmVxMFhdgYkfU3SjRO9bok1vFVSz0Tv1+x06spdgNm5IulA3mwTcAQYSebfGxFfKnZfEXFNGuuaTQYODpsyIqJ5bFrSZuCmiPjW+PUk1UXE8LmszWwy8akqm/LGTvlI+qCkV4AvSpop6d8l9Uram0xn87b5rqSbkunflPQDSZ9M1n1R0jVnuO4iSQ9L6pf0LUl3SPqHIr/Hpcln7ZP0tKR35C27VtIzyX63SvpA0j4n+W77JO2R9H1J/rtgp+T/QMxy5gGzgAuAm8n9b+OLyfwCYAD461Ns/wZgAzAH+ATwN5J0But+GfgvYDbwUeCGYoqXVA/8G/ANYC7w+8CXJF2crPI35E7HtQCvAb6TtL8f6AHagfOADwMeh8hOycFhljMK/FlEHImIgYjYHRFfjYhDEdEPfBx4yym2fykivhARI8A9wHxyf4iLXlfSAuD1wEciYjAifgCsLLL+NwLNwO3Jtt8B/h24Plk+BCyR1BoReyPiybz2+cAFETEUEd8PD2Bnp+HgMMvpjYjDYzOSmiR9XtJLkvqAh4GMpNqTbP/K2EREHEomm0tc93xgT14bwJYi6z8f2BIRo3ltLwEdyfQvAdcCL0n6nqQrkva/ADYC35C0SdJtRX6eTWEODrOc8f/Kfj9wMfCGiGgF3py0n+z000TYDsyS1JTX1lnkttuAznH9EwuArQARsSoilpM7jfWvwFeS9v6IeH9EXAj8AvDHkq46u69h1c7BYVZYC7l+jX2SZgF/lvYHRsRLwGrgo5IakqOCXyhy88eBg8CfSqqX9NZk23uTfb1bUltEDAF9JJchS/p5Sa9K+ljG2kcKfoJZwsFhVthngOnALuAx4Ovn6HPfDVwB7Ab+F/BP5O43OaWIGATeAVxDrubPAr8REc8lq9wAbE5Ou90C/HrSvhj4FnAAeBT4bER8d6K+jFUnuR/MrHJJ+ifguYhI/YjHrFg+4jCrIJJeL+kiSTWSrgaWk+uTMKsYvnPcrLLMA+4jdx9HD/C7EfGj8pZkdjyfqjIzs5L4VJWZmZVkSpyqmjNnTixcuLDcZZiZTSpPPPHErohoH98+JYJj4cKFrF69utxlmJlNKpJeKtTuU1VmZlYSB4eZmZXEwWFmZiVxcJiZWUkcHGZmVhIHh5mZlcTBYWZmJXFwnMJ/btjJZ7+7sdxlmJlVFAfHKTyycRef+daPGRwePf3KZmZTRKrBIelqSRskbSz0LGNJyyWtk7RG0mpJbzrdtpJmSfqmpB8n7zPTqr+7M8Pg8CgbXulP6yPMzCad1IJDUi1wB7knki0Brpe0ZNxq3wa6I2Ip8NvAXUVsexvw7YhYnGx/QiBNlO5sBoC1PfvS+ggzs0knzSOOy4GNEbEpeazlveQeSnNURByIY+O6zwCiiG2XA/ck0/cA70zrC2RnTmfWjAbWbtmX1keYmU06aQZHB7Alb74naTuOpHdJeg74D3JHHafb9ryI2A6QvM+d4Lrza6Mr28a6nv1pfYSZ2aSTZnCoQNsJT42KiPsj4hJyRw4fK2XbU364dHPSb7K6t7e3lE2P053N8OOd/Rw8MnzG+zAzqyZpBkcP0Jk3nwW2nWzliHgYuEjSnNNsu0PSfIDkfedJ9rciIpZFxLL29hOGky9ad2cbowHrt/qow8wM0g2OVcBiSYskNQDXASvzV5D0KklKpl8HNAC7T7PtSuDGZPpG4IEUvwNd7iA3MztOag9yiohhSbcCDwG1wN0R8bSkW5LldwK/BPyGpCFgAPjVpLO84LbJrm8HviLpPcDLwC+n9R0A5jQ3kp05nbVbfMRhZgYpPwEwIh4EHhzXdmfe9J8Df17stkn7buCqia301LqzGR9xmJklfOd4Ebo72+jZO8DuA0fKXYqZWdk5OIow1s/hy3LNzBwcRbmso40awRrfCGhm5uAoxozGOl41t5l17ucwM3NwFCvXQb6fYyOkmJlNTQ6OInV3ZthzcJCevQPlLsXMrKwcHEXySLlmZjkOjiJdPK+Fhroaj5RrZlOeg6NIDXU1LJnfylpfkmtmU5yDowRLOzOs37qfkVF3kJvZ1OXgKEFXto1DgyNs3Hmg3KWYmZWNg6ME3Z0ZAPdzmNmU5uAowaLZM2iZVscaX1llZlOYg6MENTVjj5LdV+5SzMzKxsFRou5shue293N4aKTcpZiZlYWDo0Rd2QzDo8Ez2/vKXYqZWVk4OEq01B3kZjbFOThKNK9tGnNbGv1sDjObshwcZ6C7M+MjDjObshwcZ2BpZ4ZNuw6yf2Co3KWYmZ1zqQaHpKslbZC0UdJtBZa/W9K65PWIpO6k/WJJa/JefZLelyz7qKStecuuTfM7FNKVbQPgKZ+uMrMpqC6tHUuqBe4A3g70AKskrYyIZ/JWexF4S0TslXQNsAJ4Q0RsAJbm7WcrcH/edn8ZEZ9Mq/bT6erIALkh1t+0eE65yjAzK4s0jzguBzZGxKaIGATuBZbnrxARj0TE3mT2MSBbYD9XAS9ExEsp1lqStqZ6Fs2Z4X4OM5uS0gyODmBL3nxP0nYy7wG+VqD9OuAfx7XdmpzeulvSzEI7k3SzpNWSVvf29pZSd1G6s21+qJOZTUlpBocKtBUcj1zS28gFxwfHtTcA7wD+Oa/5c8BF5E5lbQc+VWifEbEiIpZFxLL29vaSiz+drmyGHX1HeGX/4Qnft5lZJUszOHqAzrz5LLBt/EqSuoC7gOURsXvc4muAJyNix1hDROyIiJGIGAW+QO6U2Dl3dKRcH3WY2RSTZnCsAhZLWpQcOVwHrMxfQdIC4D7ghoh4vsA+rmfcaSpJ8/Nm3wWsn9Cqi/QT57dSVyP3c5jZlJPaVVURMSzpVuAhoBa4OyKelnRLsvxO4CPAbOCzkgCGI2IZgKQmcldkvXfcrj8haSm5016bCyw/J6bV13LxvBbfQW5mU05qwQEQEQ8CD45ruzNv+ibgppNse4hcqIxvv2GCyzxj3Z0Z/m3tNkZHg5qaQl06ZmbVx3eOn4XubBv9h4fZvPtguUsxMztnHBxnwR3kZjYVOTjOwuK5LTQ11LJ2i/s5zGzqcHCchdoa8ZrzfSOgmU0tDo6z1N3ZxtPb+hgcHi13KWZm54SD4yx1ZTMMDo/y/I7+cpdiZnZOODjO0tijZNf4RkAzmyIcHGcpO3M6M5vqWed+DjObIhwcZ0lS8ihZX1llZlODg2MCdGcz/HhnPwePDJe7FDOz1Dk4JkB3ZxujAeu3+qjDzKqfg2MCdGUzgO8gN7OpwcExAeY0N9KRmc5aj5RrZlOAg2OCLO3M+NkcZjYlODgmSFe2jZ69A+w+cKTcpZiZpcrBMUHGRsr1g53MrNo5OCbIZR1t1Mgd5GZW/RwcE2RGYx2vmtvsfg4zq3oOjgnUnc2wrmc/EVHuUszMUuPgmEBdnRl2HxykZ+9AuUsxM0tNqsEh6WpJGyRtlHRbgeXvlrQueT0iqTtv2WZJT0laI2l1XvssSd+U9OPkfWaa36EUS30joJlNAakFh6Ra4A7gGmAJcL2kJeNWexF4S0R0AR8DVoxb/raIWBoRy/LabgO+HRGLgW8n8xXh4nktNNTW+MoqM6tqaR5xXA5sjIhNETEI3Assz18hIh6JiL3J7GNAtoj9LgfuSabvAd45MeWevYa6Gpac3+pnc5hZVUszODqALXnzPUnbybwH+FrefADfkPSEpJvz2s+LiO0AyfvcQjuTdLOk1ZJW9/b2ntEXOBNLOzOs37qfkVF3kJtZdUozOFSgreBfU0lvIxccH8xrvjIiXkfuVNfvSXpzKR8eESsiYllELGtvby9l07PSlW3j0OAIG3ceOGefaWZ2LqUZHD1AZ958Ftg2fiVJXcBdwPKI2D3WHhHbkvedwP3kTn0B7JA0P9l2PrAzlerP0Ngd5O4gN7NqlWZwrAIWS1okqQG4DliZv4KkBcB9wA0R8Xxe+wxJLWPTwM8C65PFK4Ebk+kbgQdS/A4lWzR7Bi2Ndb4R0MyqVl1aO46IYUm3Ag8BtcDdEfG0pFuS5XcCHwFmA5+VBDCcXEF1HnB/0lYHfDkivp7s+nbgK5LeA7wM/HJa3+FM1NSIrs42H3GYWdVKLTgAIuJB4MFxbXfmTd8E3FRgu01A9/j2ZNlu4KqJrXRidWUzfOHhTRweGmFafW25yzEzm1C+czwF3dkMw6PBM9v7yl2KmdmEc3CkYOnYEOvu5zCzKuTgSMG8tmnMbWn0o2TNrCo5OFLS3ZlxB7mZVSUHR0q6s21s6j3I/oGhcpdiZjahHBwpGbsR8CmfrjKzKuPgSElXRwbwHeRmVn0cHClpa6pn0ZwZvoPczKqOgyNF3dk2P5vDzKqOgyNFXdkMr/QdZkff4XKXYmY2YRwcKTo6Uq5PV5lZFXFwpOgnzm+lrkbuIDezquLgSNG0+lountfC2i3u5zCz6uHgSFlXNsO6nn2M+lGyZlYlHBwpW9rZRt/hYTbvPljuUszMJoSDI2VjHeS+LNfMqoWDI2Wvam9men0ta3xllZlVCQdHyupqa7iso411vrLKzKqEg+Mc6Mq2sX5bH0Mjo+UuxczsrKUaHJKulrRB0kZJtxVY/m5J65LXI5K6k/ZOSf8p6VlJT0v6w7xtPippq6Q1yevaNL/DROjuzDA4PMqGV/rLXYqZ2VlLLTgk1QJ3ANcAS4DrJS0Zt9qLwFsiogv4GLAiaR8G3h8RlwJvBH5v3LZ/GRFLk9eDaX2HidKdzQAeKdfMqkOaRxyXAxsjYlNEDAL3AsvzV4iIRyJibzL7GJBN2rdHxJPJdD/wLNCRYq2p6pw1nZlN9R56xMyqQprB0QFsyZvv4dR//N8DfG18o6SFwGuBx/Oab01Ob90taWahnUm6WdJqSat7e3tLLn4iSaK7M+NLcs2sKhQVHJJmSKpJpl8t6R2S6k+3WYG2grdPS3obueD44Lj2ZuCrwPsioi9p/hxwEbAU2A58qtA+I2JFRCyLiGXt7e2nKTV9XdkMz+/o59DgcLlLMTM7K8UecTwMTJPUAXwb+C3gb0+zTQ/QmTefBbaNX0lSF3AXsDwidue115MLjS9FxH1j7RGxIyJGImIU+AK5U2IVb2lnG6MB67f2nX5lM7MKVmxwKCIOAb8I/L+IeBe5Du9TWQUslrRIUgNwHbDyuJ1KC4D7gBsi4vm8dgF/AzwbEZ8et838vNl3AeuL/A5l1TXWQe5+DjOb5OqKXE+SrgDeTe6U0mm3jYhhSbcCDwG1wN0R8bSkW5LldwIfAWYDn81lBcMRsQy4ErgBeErSmmSXH06uoPqEpKXkTnttBt5b5HcoqznNjXRkprPGV1aZ2SRXbHC8D/gQcH/yx/9C4D9Pt1Hyh/7BcW135k3fBNxUYLsfULiPhIi4ociaK053p+8gN7PJr6jgiIjvAd8DSDrJd0XEH6RZWDXqzmZ48KlX2H3gCLObG8tdjpnZGSn2qqovS2qVNAN4Btgg6U/SLa36HB0pd6svyzWzyavYzvElyeWw7yR36mkBuT4IK8FrOtqQ3EFuZpNbscFRn1we+07ggYgY4iT3ZNjJNTfWsXhus28ENLNJrdjg+Dy5K5hmAA9LugDwDQlnoCubYe2WfUQ4d81scioqOCLi/0ZER0RcGzkvAW9Lubaq1N2ZYffBQXr2DpS7FDOzM1Js53ibpE+Pjf0k6VPkjj6sRN3ZNsCPkjWzyavYU1V3A/3ArySvPuCLaRVVzS6Z10pDbY2HWDezSavYGwAviohfypv/H3l3dFsJGupqWHJ+q6+sMrNJq9gjjgFJbxqbkXQl4JP0Z6g728ZTW/czMuoOcjObfIoNjluAOyRtlrQZ+GsmyRhRlai7M8OhwRFe6D1Q7lLMzEpW7FVVayOiG+gCuiLitcBPp1pZFRsbKXeNT1eZ2SRU0hMAI6Iv74FKf5xCPVPChXNm0NJY534OM5uUzubRsQVHr7XTq6kRl2XbfEmumU1KZxMc7tk9C92dGZ7d3sfhoZFyl2JmVpJTXo4rqZ/CASFgeioVTRHd2QzDo8Gz2/t47YKZ5S7HzKxop3uKX8u5KmSq6e7M3UG+dss+B4eZTSpnc6rKzsK81mnMbWl0P4eZTToOjjKRRFc242eQm9mk4+Aoo6WdbWzqPcj+gaFyl2JmVrRUg0PS1ZI2SNoo6bYCy98taV3yekRS9+m2lTRL0jcl/Th5n7QdBGM3Aq73o2TNbBJJLTgk1QJ3ANcAS4DrJS0Zt9qLwFsiogv4GLCiiG1vA74dEYuBbyfzk1JXMsS67yA3s8kkzSOOy4GNEbEpIgaBe4Hl+StExCMRsTeZfQzIFrHtcuCeZPoeco+znZQyTQ0smjODde7nMLNJJM3g6AC25M33JG0n8x7ga0Vse15EbAdI3ucW2pmkm8cePNXb23sG5Z8bXdk21m7xqSozmzzSDI5CQ5IUvNtc0tvIBccHS932ZCJiRUQsi4hl7e3tpWx6TnVnM7zSd5gdfYfLXYqZWVHSDI4eoDNvPgtsG7+SpC7gLmB5ROwuYtsdkuYn284Hdk5w3edU/o2AZmaTQZrBsQpYLGmRpAbgOmBl/gqSFgD3ATdExPNFbrsSuDGZvhF4IMXvkLqfOL+N2hr5UbJmNmkU++jYkkXEsKRbgYeAWuDuiHha0i3J8juBjwCzgc9KAhhOTi8V3DbZ9e3AVyS9B3gZ+OW0vsO5MK2+lovPa/Ed5GY2aaQWHAAR8SDw4Li2O/OmbwJuKnbbpH03cNXEVlpe3Z0Z/mPdNiKCJEDNzCqW7xyvAEs72+g7PMzm3YfKXYqZ2Wk5OCrA2B3k7iA3s8nAwVEBFs9tZnp9rTvIzWxScHBUgLraGl7T0eojDjObFBwcFaI7m+HpbX0MjYyWuxQzs1NycFSIrs4MR4ZH2fBKf7lLMTM7JQdHhVg61kHufg4zq3AOjgrROWs6M5vqWecBD82swjk4KsTYo2R9xGFmlc7BUUG6OzM8v6OfQ4PD5S7FzOykHBwVpDvbxmjA+q195S7FzOykHBwVZOwOcj8R0MwqmYOjgrS3NNKRme5nkJtZRXNwVJjuzjZ3kJtZRXNwVJjubIYtewbYc3Cw3KWYmRXk4KgwXb4R0MwqnIOjwlyWbUPCNwKaWcVycFSY5sY6XtXe7CMOM6tYDo4K1N2ZYV3PPiKi3KWYmZ3AwVGBurNt7DowyNZ9A+UuxczsBKkGh6SrJW2QtFHSbQWWXyLpUUlHJH0gr/1iSWvyXn2S3pcs+6ikrXnLrk3zO5RDd2cGgLXu5zCzClSX1o4l1QJ3AG8HeoBVklZGxDN5q+0B/gB4Z/62EbEBWJq3n63A/Xmr/GVEfDKt2svtknmtNNTWsK5nHz/XNb/c5ZiZHSfNI47LgY0RsSkiBoF7geX5K0TEzohYBQydYj9XAS9ExEvplVpZGupquPT8Vt9BbmYVKc3g6AC25M33JG2lug74x3Ftt0paJ+luSTMLbSTpZkmrJa3u7e09g48tr6XZNtZv3c/IqDvIzayypBkcKtBW0l9BSQ3AO4B/zmv+HHARuVNZ24FPFdo2IlZExLKIWNbe3l7Kx1aErmyGg4MjvNB7oNylmJkdJ83g6AE68+azwLYS93EN8GRE7BhriIgdETESEaPAF8idEqs6xzrI95W1DjOz8dIMjlXAYkmLkiOH64CVJe7jesadppKU31v8LmD9WVVZoS6cM4OWxjrfCGhmFSe1q6oiYljSrcBDQC1wd0Q8LemWZPmdkuYBq4FWYDS55HZJRPRJaiJ3RdZ7x+36E5KWkjvttbnA8qpQUyMuy7b5klwzqzipBQdARDwIPDiu7c686VfIncIqtO0hYHaB9hsmuMyK1d2Z4a7vb+Lw0AjT6mvLXY6ZGeA7xytad7aNoZHg2e1+lKyZVQ4HRwUb6yBf1+PTVWZWORwcFWxe6zTaWxp9ZZWZVRQHRwWTRHc24yurzKyiODgqXHe2jRd6D9J3+FSjspiZnTsOjgo31s/xlPs5zKxCODgqXFe2DfAzyM2scjg4KlymqYGFs5v4lyd6+O6GnX4qoJmVnYNjErjtmks5eGSY3/ziKq75q+/z1Sd6GBweLXdZZjZFaSr8C3bZsmWxevXqcpdxVgaHR3lgzVa+8P1NPL/jAPNap/Hbb1rI9ZcvoGVafbnLM7MqJOmJiFh2QruDY3KJCL67oZfPP/wCj23aQ0tjHb/2hgX81pWLmNc2rdzlmVkVcXBUSXDkW9ezj88/vImvPbWd2hrxju4Obn7zhVw8r6XcpZlZFXBwVGFwjHl59yHu/uGL/NOqLQwMjfDWi9u5+c0XcsWFs5EKPU/LzOz0HBxVHBxj9h4c5B8ee4l7Ht3MrgODXNbRxs1vvpBrXjOPulpfB2FmpXFwTIHgGHN4aIT7ntzKXd/fxKZdB+mcNZ33XLmIX3l9J00NqY6kb2ZVxMExhYJjzOho8M1nd7Di4U088dJeMk313PDGC7jxpxYyp7mx3OWZWYVzcEzB4Mi3evMePv/wJr717A7qa2v4pddl+Z3/togL25vLXZqZVaiTBYfPW0wRyxbOYtnCWbzQe4C7vv8iX32yh3tXvczbLz2P977lQn7yglnlLtHMJgkfcUxRvf1H+LtHN/N3j77E/oEhfvKCmdz85gt5+6XnUVPjK7HM7ORHHKleaiPpakkbJG2UdFuB5ZdIelTSEUkfGLdss6SnJK2RtDqvfZakb0r6cfI+M83vUK3aWxp5/89ezKMf+mk++gtL2NF3mPf+/RP8zKe/x5cff5nDQyPlLtHMKlRqRxySaoHngbcDPcAq4PqIeCZvnbnABcA7gb0R8cm8ZZuBZRGxa9x+PwHsiYjbkzCaGREfPFUtPuI4veGRUb62/hVWPLyJp7buZ05zAzdesZAbrriATFNDucszszIoxxHH5cDGiNgUEYPAvcDy/BUiYmdErAJKeUrRcuCeZPoecqFjZ6mutoZf6D6flbdeyZd/5w28pqONT33zea74P9/hoyufZsueQ+Uu0cwqRJqd4x3Alrz5HuANJWwfwDckBfD5iFiRtJ8XEdsBImJ7ctRyAkk3AzcDLFiwoNTapyxJ/NRFc/ipi+aw4ZV+Vjy8iS89/hJ/9+hmrr1sPj932Xwund/KgllN7gsxm6LSDI5Cf1VKOS92ZURsS4Lhm5Kei4iHi904CZoVkDtVVcLnWuLieS186le6+ZP/fjFf/OGLfPnxl/n3ddsBmNFQyyXzW7l0fgtL5rdx6fwWLpnXyvSG2jJXbWZpSzM4eoDOvPkssK3YjSNiW/K+U9L95E59PQzskDQ/OdqYD+ycwJqtgHlt0/jQtZfyR29/Nc/v6OeZbX08u72PZ7f388CPtvEPj70MQI1g4ZwZXDq/lSVjr/NbmdvS6DGzzKpImsGxClgsaRGwFbgO+LViNpQ0A6iJiP5k+meB/5ksXgncCNyevD8w0YVbYdPqa+nKZujKZo62RQQ9ewd4+miY9LF2yz7+IzkyAZg1o4ElY0cn57dy6fxWLmpvpt7jZ5lNSqnexyHpWuAzQC1wd0R8XNItABFxp6R5wGqgFRgFDgBLgDnA/clu6oAvR8THk33OBr4CLABeBn45Ivacqg5fVXXu7R8Y4rkkSJ5Jjk427Og/+uTChtoaFp/XfPToZOy9rckPpTKrFB5yxMFRdsMjo2zadTAXJtvGAqWPXQcGj67TkZme9JskYXJ+K50z3RFvVg4ecsTKrq62hlef18Krz2th+dKOo+07+w/z7Pb8vpM+vvPcTkaTf9PMaKjl0iRILk1OeS2aM4O26fXuOzErAx9xWEU6PDRyQkf8s9v76D8yfHSdlsY6OmZOp3NWE9mZ0+mcmbwn834Wu9nZ8RGHTSqn6oh/dnsfL+85RM/eAXr2HuLl3Yf44cZdHBo8fpiUTFP90UAZHy7ZmU2+dNjsDDk4bNKQROesXAiMFxHsPTREz95DbNmTC5QtyfTzO/r5znM7OZJ0zI+Z09xAdtxRyliwdMycTmOdg8WsEAeHVQVJzJrRwKwZDccdpYyJCHoPHDkaKmNHK1v2DPDU1v089PQrDI1E3v7gvJZpBUOlc1YT89qm+XJim7IcHDYlSGJuyzTmtkzjJy84cUDlkdFgR99hevYOsCU5DbZl7yF69h7iv17cwwNrBo521gPU1ojzWhrJNDXQOr2Otun1x71ax79PO7asoc6BY5Obg8OMXBCcn5nO+ZnpXL7oxIdaDY2M8sr+w7kw2ZMLlW37DrN/YJD9A0Ns3nWI/QND7B8YYuA0Q9JPr69NQqXuuKDJD5f80Ml/Tauv8ZVkVnYODrMi1NfWHOtfuejU6w4Oj7J/YIi+w0NHw6Rv3Hv+a9u+3OXIfQNDx101VrgOnXAkM7OpnvaWxtwRVWvjcdMtjXUOGptwDg6zCdZQV0N7S+4PeKmGR0bpPzx8XOjkAmd43HwumPYeGuSF3gP09h85ofMfYFp9zbEgaWnMvVqnHa1vbrJs9owG32RpRXNwmFWQutoaZs5oYOaM0h6eFRH0HR6mt/8wO/uOsLP/CDuT6d4DR9jZd4Tnd/Tzg4276D984lFNbY2Y09xwNGDGQqW99fjAmdPc4KvNzMFhVg0kHe0HedXcllOuOzA4Qm8SLLn3YyGzs/8I2/YfZm3PPnYfHKTQ/cGZpvqjRyq5cGmkvbmRWTMaaGqoZXpDXe69vpYZjcl0Qy1N9bXU+Uq0quDgMJtipjfUsmB2Ewtmn3g/TL7hkVF2HxxMAuVwLmDypnv7j/DiroPs7D983KXMp9JQW5MLkbEwaailqb7uhLYZDfltdTTV5y+vK7j9ub5aLSKIOPaQoYgg4GjYVvPVcw4OMyuorraG81qncV7rNKDtpOtFBPsODbFvYIhDg8MMDI5wKHkNDA1z8MjIsbahY8tz78McGhxh76FBtu47vq1Qn80p660R0xtqqa+tOe6P+Nh0rliS9uP/yAd5IVCgLX8fxY7S1NRQm+tLam482qfU3tx49AKG9uZcX9Ps5oZJd0+Qg8PMzoqkM+qXOZ2R0WBgaOTEMBoc4eBxbcn0UG7ZSHLDjZR7DOnYVWVjF5cJ5S3LW578n/zlx/ZzfBvSse2TZWP7kHLhsn9g6OiR2Y93HuCRF3azf2Co4HedNaPhaMCM9THlh83YdKUM7OngMLOKVFsjmhvraG6snj9TR4ZH2HVgMNe31HeY3gO5YDn6OnCEx188SO+BI0efXZOvoTZ3ldyc8Ucy+UczSfu0+vQuYqie/4+YmVW4xrpaOjLT6chMP+V6x66SOxYo44OmZ+8h1mzZe9KLGFqm1dHe0sj/ftdlvPHC2RP6PRwcZmYV5vir5JpPue7YRQzjj1zGrpzLpPBUTQeHmdkkdvxFDOfG5OrKNzOzsnNwmJlZSVINDklXS9ogaaOk2wosv0TSo5KOSPpAXnunpP+U9KykpyX9Yd6yj0raKmlN8ro2ze9gZmbHS62PQ1ItcAfwdqAHWCVpZUQ8k7faHuAPgHeO23wYeH9EPCmpBXhC0jfztv3LiPhkWrWbmdnJpXnEcTmwMSI2RcQgcC+wPH+FiNgZEauAoXHt2yPiyWS6H3gW6EixVjMzK1KawdEBbMmb7+EM/vhLWgi8Fng8r/lWSesk3S3pxMe55ba7WdJqSat7e3tL/VgzMzuJNIOj0H3xRY7ykuxAaga+CrwvIvqS5s+Re5TOUmA78KlC20bEiohYFhHL2tvbS/lYMzM7hTSDowfozJvPAtuK3VhSPbnQ+FJE3DfWHhE7ImIkIkaBL5A7JWZmZudImjcArgIWS1oEbAWuA36tmA2VG8Xrb4BnI+LT45bNj4jtyey7gPWn298TTzyxS9JLpRSfZw6w6wy3rUb+PY7xb3E8/x7Hq4bf44JCjYpixwg+A8mlsp8BaoG7I+Ljkm4BiIg7Jc0DVgOtwChwAFgCdAHfB55K2gE+HBEPSvp7cqepAtgMvDcvSNL4DqsjYlla+59s/Hsc49/ieP49jlfNv0eqQ45ExIPAg+Pa7sybfoXcKazxfkDhPhIi4oaJrNHMzErjO8fNzKwkDo7TW1HuAiqMf49j/Fscz7/H8ar290i1j8PMzKqPjzjMzKwkDg4zMyuJg+MUTje671RxqtGKpzJJtZJ+JOnfy11LuUnKSPoXSc8l/51cUe6aykXSHyX/O1kv6R8lnbsnLJ0jDo6TyBvd9xpy95ZcL2lJeasqm7HRii8F3gj83hT+LfL9IbkBOA3+Cvh6RFwCdDNFfxdJHeRG/F4WEa8hdw/bdeWtauI5OE7utKP7ThUerfhEkrLAzwF3lbuWcpPUCryZ3GgPRMRgROwra1HlVQdMl1QHNFHCUEuThYPj5CZkdN9qc5LRiqeizwB/yrGRDaayC4Fe4IvJqbu7JM0od1HlEBFbgU8CL5MbhHV/RHyjvFVNPAfHyZ316L7V5iSjFU85kn4e2BkRT5S7lgpRB7wO+FxEvBY4CEzJPsHkMQ/LgUXA+cAMSb9e3qomnoPj5M5qdN9qc7LRiqeoK4F3SNpM7hTmT0v6h/KWVFY9QE9EjB2F/gu5IJmKfgZ4MSJ6I2IIuA/4qTLXNOEcHCd3dHRfSQ3kOrhWlrmmsjjVaMVTUUR8KCKyEbGQ3H8X34mIqvtXZbGSMee2SLo4aboKeOYUm1Szl4E3SmpK/ndzFVV4oUCqgxxOZhExLOlW4CGOje77dJnLKpcrgRuApyStSdo+nAxiaQbw+8CXkn9kbQJ+q8z1lEVEPC7pX4AnyV2N+COqcOgRDzliZmYl8akqMzMriYPDzMxK4uAwM7OSODjMzKwkDg4zMyuJg8OsCJIOJO8LJf3aBO/7w+PmH5nI/ZtNNAeHWWkWAiUFRzLS8qkcFxwRUXV3Glt1cXCYleZ24L9JWpM8d6FW0l9IWiVpnaT3Akh6a/IMky8DTyVt/yrpieRZDTcnbbeTG0l1jaQvJW1jRzdK9r1e0lOSfjVv39/Ne/7Fl5K7lJF0u6Rnklo+ec5/HZsSfOe4WWluAz4QET8PkATA/oh4vaRG4IeSxkZDvRx4TUS8mMz/dkTskTQdWCXpqxFxm6RbI2Jpgc/6RWApuedbzEm2eThZ9lrgJ8iNn/ZD4EpJzwDvAi6JiJCUmdivbpbjIw6zs/OzwG8kQ7E8DswGFifL/isvNAD+QNJa4DFyA2gu5tTeBPxjRIxExA7ge8Dr8/bdExGjwBpyp9D6gMPAXZJ+ETh0lt/NrCAHh9nZEfD7EbE0eS3Ke/7CwaMrSW8lN3LqFRHRTW4Mo9M9UrTQ0P5jjuRNjwB1ETFM7ijnq8A7ga+X8D3MiubgMCtNP9CSN/8Q8LvJsPNIevVJHmLUBuyNiEOSLiH3CN4xQ2Pbj/Mw8KtJP0o7uafs/dfJCkuel9KWDD75PnKnucwmnPs4zEqzDhhOTjn9LblnbS8Enkw6qHvJ/Wt/vK8Dt0haB2wgd7pqzApgnaQnI+Ldee33A1cAa8k9ROxPI+KVJHgKaQEekDSN3NHKH53RNzQ7DY+Oa2ZmJfGpKjMzK4mDw8zMSuLgMDOzkjg4zMysJA4OMzMriYPDzMxK4uAwM7OS/H98iA8C+mcx5AAAAABJRU5ErkJggg==\n",
       "text/plain": [
        "<Figure size 432x288 with 1 Axes>"
       ]
@@ -478,12 +482,12 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 14,
+   "execution_count": 15,
    "metadata": {},
    "outputs": [
     {
      "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAot0lEQVR4nO3deXxcZ33v8c9Xuyx5X+LEexI7djYSUMOSsrQ0kFJIKFDqpBRyWxrgEkqhhZv09lIaXuXSjcItebUNNMAtS6CBUtObktKyFgq1Q0xCrHHiOIttNIls2dZYsvbf/eMcyWN5JI1tjWak+b5fr3nNOc9Z5qeJc37znOc5z6OIwMzMbLyacgdgZmaVyQnCzMwKcoIwM7OCnCDMzKwgJwgzMyvICcLMzApygjAzs4KcIGzWk3Qs7zUi6Xje+q+dwfm+JenNpYjVbDapK3cAZmcrIlpHlyU9Abw5Iv6tfBGVlqS6iBgqdxw297kGYXOWpBpJt0p6TNIhSV+UtCTd1iTpM2n5EUnbJZ0j6Y+BFwIfS2sgH5vg3P8gKSvpqKTvSLokb1uzpL+Q9GS6/T8kNafbflbS99PP3CfpprT8pFqLpJsk/Ufeekh6u6RHgUfTso+m5+iWdL+kF+btXyvp99O/PZduXyPpDkl/Me5v2SbpXWf/jdtc4wRhc9k7gFcDLwbOAw4Dd6Tb3gQsBNYAS4G3Ascj4n8C3wVuiYjWiLhlgnP/C7ARWAH8CPhs3rY/B54DvABYArwXGJG0Lj3ur4DlwBXAztP4e14NPBe4OF3fnp5jCfA54B8kNaXb3g3cALwCWAD8BtALfBq4QVINgKRlwC+kx5udxLeYbC57K8mFfj+ApPcDT0n6dWCQJDFcGBEPAvefzokj4q7R5fS8hyUtBHIkF+PnRcSBdJfvp/vdCPxbRHw+LT+Uvor1vyOiKy+Gz+Rt+wtJfwBcBPwYeDPw3ojYnW7/8ehnSjoKvBT4OrAV+FZEPH0acViVcA3C5rJ1wD+mt3OOAO3AMHAO8PfAfcDdkn4q6U8l1Rdz0vT2zYfS2zfdwBPppmXpqwl4rMChayYoL9a+cXH8nqT29DbWEZIa0bIiPuvTwBvS5TeQfBdmp3CCsLlsH/CLEbEo79UUEQciYjAi/igiLia5FfRK4I3pcVMNcXwjcD3JrZmFwPq0XMBBoA+4YIJ4CpUD9ADz8tZXFthnLK60veG9wOuBxRGxCDiaxjDVZ30GuF7Ss4AtwFcm2M+qnBOEzWV/A/xxeu8fScslXZ8u/5ykyyTVAt0kt5xG0uOeBs6f5LzzgX6S20PzgA+OboiIEeAu4MOSzktrG8+X1EjSTvELkl4vqU7SUklXpIfuBF4jaZ6kC4HfnOJvmw8MAZ1AnaT3kbQ1jPoE8AFJG5W4XNLSNMb9JO0Xfw98KSKOT/FZVqWcIGwu+yiwDfhXSTngBySNvJD8Qr+HJDm0A9/mxK2WjwKvk3RY0v8pcN7/CzwJHAB2pefN93vAQyQX4S7gT4CaiHiKpNH4d9PyncCz0mP+EhggSU6f5uRG70LuA74GPJLG0sfJt6A+DHwR+Nf0b/w7oDlv+6eBy/DtJZuEPGGQWfWR9CKSW03rwhcBm4BrEGZVJm2MfyfwCScHm4wThFkVkbQFOAKcC3ykrMFYxfMtJjMzK8g1CDMzK2jOPEm9bNmyWL9+fbnDMDObVe6///6DEbG80LY5kyDWr1/Pjh07yh2GmdmsIunJibb5FpOZmRXkBGFmZgU5QZiZWUFOEGZmVpAThJmZFeQEYWZmBZU0QUi6VtJuSXsk3Vpg+1pJ35T0gKQHJb0ib9tt6XG7Jb28lHGamdmpSvYcRDrO/h3ANcB+YLukbRGxK2+3PwC+GBF/Leli4F5gfbq8FbiEZC7hf5O0KSKGSxWvmRUWETzd3c+D+4/w6DPHaKyrYdG8BhbPqx97XzyvgYXN9dTUaOoT2qxRygflrgL2RMReAEl3k8zClZ8gghOTnCwEfpouXw/cHRH9wOOS9qTn+88SxmtmwDPdfTy4/ygPHTjx6sz1T3mcBAubk2SxaN7J7yeSSd5yS7Ktqb52Bv4qOxOlTBCrOHkCk/2cmKxl1PtJJnN5B9BCMoXj6LH5k7DsT8tOIulm4GaAtWvXTkvQZtWkM9fPTw4cTRPCER46cJSnu5NkUCO4YHkrL9y4jMtXLeSy1QvZvHIBQ8PB4d4BjhwfTN57BzjcM5i8946WDfJ0dx+7szkO9w7QOzBx5b+pviZNJg0saq5nccvJNZNCtZUFzfXUVkFtpW9wmEM9A3QdG+BQTz+Hjg3Q1TPAwZ5+usaWB1i/dB4f3XrltH9+uYfauAH4VET8haTnA38v6dJiD46IO4E7Adra2jwsrdkkDh3rT2oEebWDjqN9QPLr//xlLbzggmVcliaDi89dQEtj4UvEwnn1p/XZ/UPDHEmTx6nJJFk+kiaW3dkcR3oHOXJ8kOGRwv9bS9DaUMf8pjrmN9WzoDl5T9ZPLC8Y9z5W3lxPS0Mt0swmmb7BYbp6BjiUXvBPLA/QlSaAQz3ptmMD9EyQWOtrxdKWRpa0NLC0tYFzFzYX3O9slTJBHADW5K2vTsvy/SZwLUBE/KekJmBZkcea2QQO9wycuEWUJoQDR05MPX3+shau2rAkSQarFnLJqoW0TpAMpkNjXS3nLKjlnAVNRR8zMhLk+odOTSY9SfLI9Q3SfXyIXN8gub4hnsn18VjnELm+IbqPDzI0QXIZVSNobTw5mUyVaOY31bMgr7y2RnT1JL/kD/UMcOhY/ynLB9Nf+l09AxzrHyoYS32tkot9SyNLWxtYt3Te2PLSloaxRLC0pZElrQ3Mb6ybkeRWygSxHdgoaQPJxX0rcOO4fZ4CXgp8Kp3IpIlkEvZtwOckfZikkXoj8F8ljNVszOGeAXbuP8IDTx1h574j9PYPsaA5uTAk78mFJHk/dX1+Ux31tTPXg/xo7yA/+enJt4n2dZ1IBuuXzuPZ6xbzphes47JVi7hk1QIWNJ1eDaAcamrEwuZ6FjbXs27p6R0bEfQNjiRJpO9EEulO33Nj70nZaKL56ZE+cv25sW0T1WCmUlcjlrY2sKSlkaUtyQV/SUsDy1qTX/3JcrJ9SUsDC5pm5oJ/ukqWICJiSNItJJOr1wJ3RcTDkm4HdkTENpLJ2z8u6V0kDdY3pVMgPizpiyQN2kPA292DyUphcHiE3dkcDzx1mAeeOsID+47w+MEeIPmFuemc+SxpaeCZXB97nhm9mAwy1XVjXkNtgURy9gmmu2+Qn4y7TfTkod6x7WuXzOPyVYv4teeu4/K0ZrCwufKTwXSTRHNDLc0NtaxYMPX+hUQEvQPDYwmlUKIZHgkWz2sY+6W/tLWyL/ina87MKNfW1hYe7tumkj3alySDfUd44KnDPHTgKH2DIwAsa23kyrWLkteaxVy+emHBe/CjF47RX56jSWNsPX+5b7DAflP/Mh2fYFoa63iqq3cseQGsXtw81l5w+apFXLpqAYvmNUzvF2ZznqT7I6Kt0LZyN1KblUzf4DAPHTjKA08dZue+5JbRaKNsQ20Nl6xawI1XrRtLCqsWNRf1q08SLY11tDTWce7C04/rTBLM4d4BNp3Tyuues5pL03aDJS1OBlZaThB2kogg291Ha2MdrTPUEDYdIoInDvWyc196q+ipI7R3dI81VK5Z0szPrF+SJoPFbDl3Po115el/f7YJxmymOEHYSe7evo/bvvwQkPzKXtxSnzakpe/z0vfWBpbMaxhrcFvSkvRRr5uhxtnuvkF+nNYKRmsIh3sHAWhpqOVZaxbxlhefz5VrFnPF2kUsa22ckbjM5hInCDvJ/U8eZvG8et72kgvo6hmkK+2r3dUzwEOHj9DVM0B3X+GuepA8STvaLW9xS9JwN/q+ZNzykpYG5jVM/U9weCR45OncWDJ4YN8RHus8RkTSH37jilZedvFKrkhvFW1cMb8qHqIyKzUnCDtJJtvNpasWcvOLLphwn8HhEQ73DNDVmzzh2dU7MPbAz+HepA/44Z4B9nX1Jr/sewYm7JPeVF+T1ERGu/zNO1Fj6RkYZudTR/jx/iNjT+IuaWngyjWLuP5Z53Hl2sVcvmbhrOiyaTYbOUHYmKHhER55+hg3vWD9pPvV19awYkETK4p86Cki6O4bGquJdKUJZPTp0bGaSu8gjx88NvYEaV2NuOS8BfzKc1Zz5drFXLl2EWuXzJs17SJms50ThI154lAPA0MjbF45f1rPK5144GnDspaijukbHEaibA3JZuYEYXnaO3IAbF55hk8WTSOP8GlWfp5RzsZkst3U1YgLVhT3K9/M5jYnCBuT6chxwfJW39YxM8AJwvJksjk2nzu97Q9mNns5QRgAR48PcuDI8YpofzCzyuAEYQDszqYN1K5BmFnKCcIA2J3tBmCLaxBmlnKCMADaszkWzavnnAUes8jMEk4QBkCmo5vNK+f7KWUzG+MEYYyMBLuzOTdQm9lJnCCM/YeP0zMwPO1DbJjZ7OYEYbSnDdSbz3UNwsxOKGmCkHStpN2S9ki6tcD2v5S0M309IulI3rbhvG3bShlntct05JBg0zmt5Q7FzCpIyQbrk1QL3AFcA+wHtkvaFhG7RveJiHfl7f8O4Mq8UxyPiCtKFZ+dkMl2s35pS1GT95hZ9ShlDeIqYE9E7I2IAeBu4PpJ9r8B+HwJ47EJZLI5tz+Y2SlKmSBWAfvy1venZaeQtA7YAHwjr7hJ0g5JP5D06gmOuzndZ0dnZ+c0hV1degeGeOJQj3swmdkpKqWReitwT0QM55Wti4g24EbgI5JOmQMzIu6MiLaIaFu+fPlMxTqnPPJ0Mrezh9gws/FKmSAOAGvy1lenZYVsZdztpYg4kL7vBb7Fye0TNk0yHR5iw8wKK2WC2A5slLRBUgNJEjilN5KkzcBi4D/zyhZLakyXlwFXA7vGH2tnL5PN0dJQy+rFzeUOxcwqTMm6rUTEkKRbgPuAWuCuiHhY0u3AjogYTRZbgbsjIvIO3wL8raQRkiT2ofzeTzZ92ju6uWjlfGpqPMSGmZ2spP0aI+Je4N5xZe8bt/7+Asd9H7islLEZRASZbI5fuvzccodiZhWoUhqprQyy3X0cPT7IFndxNbMCnCCqWKZjdJIgN1Cb2amcIKrY6BhMF7kGYWYFOEFUsUxHjlWLmlnQVF/uUMysAjlBVLFMtpstfkDOzCbgBFGl+oeGeazTQ2yY2cScIKrUnmeOMTwSHmLDzCbkBFGlxnowuQZhZhNwgqhSmWw3jXU1rF86r9yhmFmFcoKoUplsjk3nzKeu1v8EzKwwXx2qVHuHJwkys8k5QVShzlw/B4/1+wlqM5uUE0QV2p1NGqg9BpOZTcYJogplPMSGmRXBCaIKtXfkWDG/kaWtjeUOxcwqmBNEFcpku93+YGZTcoKoMkPDIzz69DG3P5jZlJwgqszjB3sYGB7xEBtmNiUniCrTnvUQG2ZWnJImCEnXStotaY+kWwts/0tJO9PXI5KO5G17k6RH09ebShlnNcl0dFNXIy5Y3lruUMyswtWV6sSSaoE7gGuA/cB2SdsiYtfoPhHxrrz93wFcmS4vAf4QaAMCuD899nCp4q0WmWyOC1e00lDnyqOZTa6UV4mrgD0RsTciBoC7gesn2f8G4PPp8suBr0dEV5oUvg5cW8JYq0amo9tDbJhZUUqZIFYB+/LW96dlp5C0DtgAfON0jpV0s6QdknZ0dnZOS9Bz2dHeQX56tM9dXM2sKJVyn2ErcE9EDJ/OQRFxZ0S0RUTb8uXLSxTa3DH6BLVrEGZWjFImiAPAmrz11WlZIVs5cXvpdI+1ImVGx2ByDcLMilDKBLEd2Chpg6QGkiSwbfxOkjYDi4H/zCu+D3iZpMWSFgMvS8vsLGSy3SyeV8+K+R5iw8ymVrJeTBExJOkWkgt7LXBXRDws6XZgR0SMJoutwN0REXnHdkn6AEmSAbg9IrpKFWu1yGRzbF65AEnlDsXMZoGSJQiAiLgXuHdc2fvGrb9/gmPvAu4qWXBVZmQk2J3N8as/s2bqnc3MqJxGaiuxfYd76R0YdgO1mRXNCaJKtHd4iA0zOz1OEFUik+1Ggk3nuAZhZsVxgqgSmY4cG5a20NxQW+5QzGyWcIKoEskkQa49mFnxJuzFJOk1RRzfl/ZUsgrW0z/Ek129vObZq8sdipnNIpN1c/048E/AZJ3mX8S4bqxWeR55OkeEh9gws9MzWYL4l4j4jckOlvSZaY7HSsBDbJjZmZiwDSIi3jDVwcXsY+WX6eimtbGOVYuayx2Kmc0iRTdSS7pQ0mckfUnS80sZlE2v9myOi1bOp6bGQ2yYWfEma6Ruioi+vKIPAO9Nl78KXFHCuGyaRASZjm5e9azzyh2Kmc0yk9UgvirpjXnrg8B6YB1wWvM2WPl0HO2ju2/IkwSZ2WmbLEFcCyyQ9DVJLwJ+j2Qq0F8Gfm0mgrOzNzpJ0Bb3YDKz0zThLaZ0drePSfp74H8BbwP+ICIem6ng7OyNjsG0yQnCzE7TZG0QzwXeAwwAHwSOA38s6QDwgYg4MiMR2lnJZHOsXtzMgqb6codiZrPMZM9B/C3wCqAV+GREXA1slfRi4Askt5uswmU6uj2Cq5mdkcnaIIY40Sg9MFoYEd+OCCeHWaBvcJi9B3vY4jGYzOwMTFaDuBF4C0lyeOMk+1mF2vPMMYZHwjUIMzsjkzVSPwL87gzGYtNsdIgNj+JqZmdiwltMkv55qoOn2kfStZJ2S9oj6dYJ9nm9pF2SHpb0ubzyYUk709e2qWKxU2U6ummsq2H90pZyh2Jms9Bkt5h+dooLs4CLJ9wo1QJ3ANcA+4HtkrZFxK68fTYCtwFXR8RhSSvyTnE8Iq4o4m+wCWTSITZqPcSGmZ2ByRLE9UUcPzDJtquAPRGxF0DS3ek5d+Xt81vAHRFxGCAininiM61ImWw3P795xdQ7mpkVMFkbxLfP8tyrgH156/uB547bZxOApO8BtcD7I+Jr6bYmSTtIelN9KCK+Mv4DJN0M3Aywdu3aswx3bunM9XPw2IAbqM3sjE1Wg5ipz98IvARYDXxH0mXpQ3jrIuKApPOBb0h6aPxT3BFxJ3AnQFtbW8xo5BVudIgNN1Cb2Zkq5ZzUB4A1eeur07J8+4FtETEYEY8Dj5AkDCLiQPq+F/gWcGUJY51zMukQG65BmNmZmjJBSHqVpDNJJNuBjZI2SGoAtgLjG72/QlJ7QNIykltOeyUtltSYV341J7dd2BTas92cs6CRJS0N5Q7FzGapYi78vwo8KulPJW0u9sQRMQTcAtwHtANfjIiHJd0u6bp0t/uAQ5J2Ad8E3hMRh4AtwA5JP07LP5Tf+8mmlunIufZgZmdlyjaIiHiDpAXADcCnJAXwSeDzEZGb4th7gXvHlb0vbzmAd6ev/H2+D1xW7B9hJxscHmHPM8d44aZl5Q7FzGaxom4dRUQ3cA9wN3AuyZwQP5L0jhLGZmfo8YM9DAyPsMU1CDM7C8W0QVwn6R9JGorrgasi4heBZ+GhOCpSe4d7MJnZ2Summ+trgb+MiO/kF0ZEr6TfLE1YdjYy2Rz1teL8Za3lDsXMZrFiEsT7gY7RFUnNwDkR8URE/HupArMzl+no5oLlrTTUlbIXs5nNdcVcQf4BGMlbH07LrEJlsjm2nOv2BzM7O8UkiLqIyJ8waABw5/oKdaR3gI6jfWz2HNRmdpaKSRCdec8tIOl64GDpQrKzcWIOCNcgzOzsFNMG8Vbgs5I+RjLE9z48w1zFyqQ9mLa4BmFmZ6mYB+UeA54nqTVdP1byqOyMZbI5lrQ0sHx+Y7lDMbNZrqjRXCX9EnAJyRDcAETE7SWMy85QezbH5pXzGf3vZGZ2pop5UO5vSMZjegfJLaZfAdaVOC47AyMjwSNZj8FkZtOjmEbqF0TEG4HDEfFHwPNJJ/qxyvJUVy/HB4f9BLWZTYtiEkRf+t4r6TxgkGQ8JqswY5MEuYHazKZBMW0QX5W0CPgz4EdAAB8vZVB2Zto7ctQINq5wgjCzszdpgkgnCvr3dArQL0n6Z6ApIo7ORHB2ejLZbtYva6G5obbcoZjZHDDpLaaIGAHuyFvvd3KoXJlszkN8m9m0KaYN4t8lvVbuN1nRevqHePJQr9sfzGzaFJMg3kIyOF+/pG5JOUndJY7LTtPupz3EhplNr2KepPZP0lkg05EmCNcgzGyaFPOg3IsKvYo5uaRrJe2WtEfSrRPs83pJuyQ9LOlzeeVvkvRo+npT8X9Sdcpku2ltrGP14uZyh2Jmc0Qx3Vzfk7fcBFwF3A/8/GQHSaolaeC+BtgPbJe0LSJ25e2zEbgNuDoiDktakZYvAf4QaCPpVnt/euzhov+yKpPp8BAbZja9pqxBRMSr8l7XAJcCxVyorwL2RMTedA6Ju4Hrx+3zW8Adoxf+iHgmLX858PWI6Eq3fR24trg/qfpEBO3Zbj9BbWbT6kzmpNwPbCliv1UkQ4PnH7dq3D6bgE2SvifpB5KuPY1jkXSzpB2SdnR2dhb9B8w1Pz3aR65vyGMwmdm0mvIWk6S/IrnNA0lCuYLkierp+vyNwEuA1cB3JF1W7MERcSdwJ0BbW1tMsfucNTYHhGsQZjaNimmD2JG3PAR8PiK+V8RxB4A1eeur07J8+4EfRsQg8LikR0gSxgGSpJF/7LeK+MyqNDqL3KZznCDMbPoUkyDuAfoiYhiSxmdJ8yKid4rjtgMbJW0gueBvBW4ct89XgBuAT0paRnLLaS/wGPBBSYvT/V5G0phtBbR3dLNmSTPzm+rLHYqZzSFFPUkN5PedbAb+baqDImIIuAW4D2gHvhgRD0u6PW+O6/uAQ5J2Ad8E3hMRhyKiC/gASZLZDtyellkBGc8BYWYlUEwNoil/mtGIOCZpXjEnj4h7gXvHlb0vbzmAd6ev8cfeBdxVzOdUs77BYfZ2HuMVl64sdyhmNscUU4PokfTs0RVJzwGOly4kOx17njnGSHiIDTObfsXUIH4H+AdJPyWZcnQlyRSkVgHaOzxJkJmVRjFjMW2XtBm4KC3anfY6sgqQyeZoqq9h3dKWcodiZnNMMWMxvR1oiYifRMRPgFZJ/730oVkxMtluLjpnPrU1HmLDzKZXMW0Qv5XOKAdAOvTFb5UsIitaRNDe4R5MZlYaxSSI2vzJgtJB+BpKF5IVq/NYP109Ax6DycxKophG6q8BX5D0t+n6W9IyK7MTc0C4BmFm06+YBPE/gJuBt6XrXwc+XrKIrGiZrHswmVnpFDPc90hE/E1EvC4iXgfsAv6q9KHZVDIdOVYuaGJxi+/4mdn0K6YGgaQrScZMej3wOPDlUgZlxWnP5tz+YGYlM2GCkLSJJCncABwEvgAoIn5uhmKzSQwOj7DnmRwv3rS83KGY2Rw1WQ0iA3wXeGVE7AGQ9K4ZicqmtLezh8Hh8BwQZlYyk7VBvAboAL4p6eOSXkoy1IZVgBMN1O7BZGalMWGCiIivRMRWYDPJUNy/A6yQ9NeSXjZD8dkE2jty1NeK85d7iA0zK41iejH1RMTnIuJVJDO7PUDS9dXKKJPt5sIV86mvPZNpxc3MpnZaV5eIOBwRd0bES0sVkBUn05Fji59/MLMS8s/PWehwzwDZ7j53cTWzknKCmIUyWQ+xYWal5wQxC431YHINwsxKqKQJQtK1knZL2iPp1gLbb5LUKWln+npz3rbhvPJtpYxztsl05Fja0sDy1sZyh2Jmc1hRQ22ciXRY8DuAa4D9wHZJ2yJi17hdvxARtxQ4xfGIuKJU8c1mmWw3m8+dT94o7GZm066UNYirgD0RsTciBoC7getL+HlVYXgk2P20Jwkys9IrZYJYBezLW9+flo33WkkPSrpH0pq88iZJOyT9QNKrC32ApJvTfXZ0dnZOX+QV7MlDPfQNjniIbzMruXI3Un8VWB8Rl5PMM/HpvG3rIqINuBH4iKQLxh+cPpPRFhFty5dXx6B1u9MeTFvOdQ3CzEqrlAniAJBfI1idlo2JiEMR0Z+ufgJ4Tt62A+n7XuBbwJUljHXWaM/mqBFcuKK13KGY2RxXygSxHdgoaYOkBmArcFJvJEnn5q1eB7Sn5YslNabLy4CrSSYqqnqZjm42LGuhqb623KGY2RxXsl5METEk6RbgPqAWuCsiHpZ0O7AjIrYBvy3pOmAI6AJuSg/fAvytpBGSJPahAr2fqlImm+Oy1QvLHYaZVYGSJQiAiLgXuHdc2fvylm8Dbitw3PeBy0oZ22x0rH+Ip7p6eX3b6nKHYmZVoNyN1HYadnuIDTObQU4Qs4iH2DCzmeQEMYtkOnLMb6xj1aLmcodiZlXACWIW8RAbZjaTnCBmiYgg0+EhNsxs5jhBzBIHjhwn1z/k9gczmzFOELNEpsM9mMxsZjlBzBKjPZgu8iB9ZjZDnCBmifZsjrVL5tHaWNJnG83MxjhBzBKZjm4P8W1mM8oJYhboGxzm8YM9bPYQ32Y2g5wgZoFHnz7GSMAW1yDMbAY5QcwC7WNDbLgGYWYzxwliFsh05Giur2XtknnlDsXMqogTxCyQyXazaeV8ams8xIaZzRwniAoXEbR3dLv9wcxmnBNEhevM9XO4d9BdXM1sxjlBVLj20UmC3EBtZjPMCaLCZTrSHkyuQZjZDCtpgpB0raTdkvZIurXA9pskdUramb7enLftTZIeTV9vKmWclSyTzXHuwiYWzWsodyhmVmVKNrCPpFrgDuAaYD+wXdK2iNg1btcvRMQt445dAvwh0AYEcH967OFSxVup2j3EhpmVSSlrEFcBeyJib0QMAHcD1xd57MuBr0dEV5oUvg5cW6I4K9bA0AiPdR5z+4OZlUUpE8QqYF/e+v60bLzXSnpQ0j2S1pzOsZJulrRD0o7Ozs7pirti7D14jMHhcA3CzMqi3I3UXwXWR8TlJLWET5/OwRFxZ0S0RUTb8uXLSxJgOY1OErTFNQgzK4NSJogDwJq89dVp2ZiIOBQR/enqJ4DnFHtsNWjPdtNQW8OGZS3lDsXMqlApE8R2YKOkDZIagK3AtvwdJJ2bt3od0J4u3we8TNJiSYuBl6VlVSXTkePCFa3U15a7omdm1ahkvZgiYkjSLSQX9lrgroh4WNLtwI6I2Ab8tqTrgCGgC7gpPbZL0gdIkgzA7RHRVapYK1Um283VFy4rdxhmVqVKOn9lRNwL3Duu7H15y7cBt01w7F3AXaWMr5J19QzwdHc/W1a6/cHMysP3LipUZmwOCPdgMrPycIKoUKM9mDa7BmFmZeIEUaEy2W6WtTawfH5juUMxsyrlBFGhMtmcaw9mVlZOEBVoeCTYnc35CWozKysniAr0xKEe+odGPAaTmZWVE0QFOtFA7RqEmZWPE0QFymS7qa0RF65oLXcoZlbFnCAqUCab4/xlLTTV15Y7FDOrYk4QFSiT7eYi314yszJzgqgwub5B9nUd9xDfZlZ2ThAV5pGn3UBtZpXBCaLCtI/2YHINwszKzAmiwmSy3cxvquO8hU3lDsXMqpwTRIXJdOTYsnIBksodiplVOSeIChIRyRhMHuLbzCqAE0QF2X/4OMf6hzxIn5lVBCeICpLJjjZQuwZhZuXnBFFBMh3JLHIXneMEYWblV9IEIelaSbsl7ZF06yT7vVZSSGpL19dLOi5pZ/r6m1LGWSky2Rzrls6jpbGkU4WbmRWlZFciSbXAHcA1wH5gu6RtEbFr3H7zgXcCPxx3isci4opSxVeJ2rPdfkDOzCpGKX+qXgXsiYi9AJLuBq4Hdo3b7wPAnwDvKWEsExoeCQ4e60eCGil9gdL30bIT2xlbn86uqMcHhnniYA+vuvy8aTunmdnZKGWCWAXsy1vfDzw3fwdJzwbWRMT/kzQ+QWyQ9ADQDfxBRHy3FEEe6R3guR/89zM6Nj9pTJ1Q8ren+9ec2H9oZISRgC1uoDazClG2m92SaoAPAzcV2NwBrI2IQ5KeA3xF0iUR0T3uHDcDNwOsXbv2jOJoaazjg798GSMRRAQjASPpe7J+oiwCRkby14vYP68sIhgZOXX/4XS/521YytUXLjujv8PMbLqVMkEcANbkra9Oy0bNBy4FvpXeqlkJbJN0XUTsAPoBIuJ+SY8Bm4Ad+R8QEXcCdwK0tbXFmQTZVF/Ljc89s+RiZjaXlbIX03Zgo6QNkhqArcC20Y0RcTQilkXE+ohYD/wAuC4idkhanjZyI+l8YCOwt4SxmpnZOCWrQUTEkKRbgPuAWuCuiHhY0u3AjojYNsnhLwJulzQIjABvjYiuUsVqZmanUsQZ3ZmpOG1tbbFjx46pdzQzszGS7o+ItkLb/CS1mZkV5ARhZmYFOUGYmVlBThBmZlaQE4SZmRU0Z3oxSeoEnjyLUywDDk5TOLOdv4uT+fs4mb+PE+bCd7EuIpYX2jBnEsTZkrRjoq5e1cbfxcn8fZzM38cJc/278C0mMzMryAnCzMwKcoI44c5yB1BB/F2czN/Hyfx9nDCnvwu3QZiZWUGuQZiZWUFOEGZmVlDVJwhJ10raLWmPpFvLHU85SVoj6ZuSdkl6WNI7yx1TuUmqlfSApH8udyzlJmmRpHskZSS1S3p+uWMqJ0nvSv8/+Ymkz0tqKndM062qE0Q6KdEdwC8CFwM3SLq4vFGV1RDwuxFxMfA84O1V/n0AvBNoL3cQFeKjwNciYjPwLKr4e5G0CvhtoC0iLiWZ82ZreaOaflWdIICrgD0RsTciBoC7gevLHFPZRERHRPwoXc6RXABWlTeq8pG0Gvgl4BPljqXcJC0kmcjr7wAiYiAijpQ1qPKrA5ol1QHzgJ+WOZ5pV+0JYhWwL299P1V8QcwnaT1wJfDDModSTh8B3ksyq2G12wB0Ap9Mb7l9QlJLuYMql4g4APw58BTQARyNiH8tb1TTr9oThBUgqRX4EvA7EdFd7njKQdIrgWci4v5yx1Ih6oBnA38dEVcCPUDVttlJWkxyt2EDcB7QIukN5Y1q+lV7gjgArMlbX52WVS1J9STJ4bMR8eVyx1NGVwPXSXqC5Nbjz0v6THlDKqv9wP6IGK1R3kOSMKrVLwCPR0RnRAwCXwZeUOaYpl21J4jtwEZJGyQ1kDQybStzTGUjSST3mNsj4sPljqecIuK2iFgdEetJ/l18IyLm3C/EYkVEFtgn6aK06KXArjKGVG5PAc+TNC/9/+alzMFG+7pyB1BOETEk6RbgPpJeCHdFxMNlDqucrgZ+HXhI0s607Pcj4t7yhWQV5B3AZ9MfU3uB/1bmeMomIn4o6R7gRyS9/x5gDg674aE2zMysoGq/xWRmZhNwgjAzs4KcIMzMrCAnCDMzK8gJwszMCnKCMEtJOpa+r5d04zSf+/fHrX9/Os9vVgpOEGanWg+cVoJIB2ybzEkJIiLm3FO3Nvc4QZid6kPACyXtTMf8r5X0Z5K2S3pQ0lsAJL1E0nclbSN9qljSVyTdn84TcHNa9iGSUT93SvpsWjZaW1F67p9IekjSr+ad+1t58y98Nn1iF0kfSufseFDSn8/4t2NVo6qfpDabwK3A70XEKwHSC/3RiPgZSY3A9ySNjtz5bODSiHg8Xf+NiOiS1Axsl/SliLhV0i0RcUWBz3oNcAXJ/ArL0mO+k267EriEZBjp7wFXS2oHfhnYHBEhadH0/ulmJ7gGYTa1lwFvTIcf+SGwFNiYbvuvvOQA8NuSfgz8gGQgyI1M7meBz0fEcEQ8DXwb+Jm8c++PiBFgJ8mtr6NAH/B3kl4D9J7l32Y2IScIs6kJeEdEXJG+NuSN/d8ztpP0EpJRPp8fEc8iGZ/nbKah7M9bHgbqImKIZKKre4BXAl87i/ObTcoJwuxUOWB+3vp9wNvSodCRtGmCyXIWAocjolfSZpJpW0cNjh4/zneBX03bOZaTzNr2XxMFls7VsTAdQPFdJLemzErCbRBmp3oQGE5vFX2KZC7m9cCP0obiTuDVBY77GvDWtJ1gN8ltplF3Ag9K+lFE/Fpe+T8Czwd+DATw3ojIpgmmkPnAP0lqIqnZvPuM/kKzIng0VzMzK8i3mMzMrCAnCDMzK8gJwszMCnKCMDOzgpwgzMysICcIMzMryAnCzMwK+v9dS7Ovcb84WwAAAABJRU5ErkJggg==\n",
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAvfUlEQVR4nO3dd3yV5f3/8dcnCRDC3nsPQURWBBFRcFRcRa1WcFWr4gBX+21rd/tr+63ftlZxFdG6B04UWwcuwIFKgLAFwkxAIKwAgZD1+f1xDnqMh3CAnNwnyfv5eORh7nHd53NO6Xnnuu5xmbsjIiJSVlLQBYiISGJSQIiISFQKCBERiUoBISIiUSkgREQkKgWEiIhEpYAQEZGoFBBS5ZnZnoifUjPbF7F8+REcb4aZXRePWkWqkpSgCxA5Wu5e/8DvZrYWuM7d3wuuovgysxR3Lw66Dqn+1IOQasvMkszsTjNbZWbbzOxFM2sa3pZqZs+E1+80szlm1srM/gIMBx4I90AeOMixXzKzTWaWZ2azzKxPxLa6Zna3ma0Lb//YzOqGt51sZp+GXzPbzK4Or/9Wr8XMrjazjyOW3czGm9lKYGV43cTwMXaZ2VwzGx6xf7KZ/Sr83neHt3cwswfN7O4y7+UNM7v9qD9wqXYUEFKd3QpcAJwKtAV2AA+Gt/0IaAR0AJoBNwL73P3XwEfABHev7+4TDnLst4AeQEtgHvBsxLZ/AIOAk4CmwM+BUjPrGG53P9AC6A9kHsb7uQAYAhwbXp4TPkZT4DngJTNLDW/7CTAWOAdoCPwY2As8CYw1syQAM2sOnA48fxh1SA2hISapzm4g9EWfA2BmfwDWm9mVQBGhYOju7guBuYdzYHd/7MDv4ePuMLNGwG5CX8YnuvuG8C6fhve7HHjP3Q98GW8L/8Tqr+6+PaKGZyK23W1mvwGOARYA1wE/d/fl4e0LDrymmeURCoV3gTHADHfffBh1SA2hHoRUZ52AqeHhnJ3AMqAEaAU8DbwDTDGzjWb2NzOrFctBw8M3d4WHb3YBa8Obmod/UoFVUZp2OMj6WGWXqeOnZrYsPIy1k1CPqHkMr/UkcEX49ysIfRYi36GAkOosGzjb3RtH/KS6+wZ3L3L3P7r7sYSGgs4Drgq3O9Qjji8DRgNnEPpS7hxeb8BWoADodpB6oq0HyAfSIpZbR9nn67rC5xt+AfwQaOLujYG8cA2Heq1ngNFm1g/oDbx2kP2khlNASHU2CfiLmXUCMLMWZjY6/PtIM+trZsnALkJDTiXhdpuBruUctwGwn9DwUBrwvwc2uHsp8BjwTzNrG+5tDDWzOoTOU5xhZj80sxQza2Zm/cNNM4GLzCzNzLoD1x7ivTUAioFcIMXMfkfoXMMBjwJ/MrMeFnK8mTUL15hD6PzF08Ar7r7vEK8lNZQCQqqzicA0YLqZ7QY+I3SSF0J/ob9MKByWATMJ/WV9oN3FZrbDzO6LctyngHXABmBp+LiR/gdYROhLeDvwf0CSu68ndNL4p+H1mUC/cJt7gEJC4fQk3z7pHc07hE54rwjXUsC3h6D+CbwITA+/x38DdSO2Pwn0RcNLUg7ThEEiNY+ZnUIoEDuHez0i36EehEgNEz4ZfxvwqMJByqOAEKlBzKw3sBNoA9wbaDGS8DTEJCIiUakHISIiUVWrO6mbN2/unTt3DroMEZEqY+7cuVvdvUW0bdUqIDp37kxGRkbQZYiIVBlmtu5g2zTEJCIiUSkgREQkKgWEiIhEpYAQEZGoFBAiIhKVAkJERKJSQIiISFQKCBGRKmzmilye+GQNRSUV/9xFBYSISBVVWurc9daXPDl73ddTCVYkBYSISBX1zpJNLPtqF7ee3p2U5Ir/Oo9rQJjZKDNbbmZZZnZnlO2NzOwNM1tgZkvM7JpY24qI1GSlpc69762ka4t6fL9fu7i8RtwCIjzX74PA2cCxwFgzO7bMbuOBpe7eDxgB3G1mtWNsKyJSY725+CuWb97N7Wf0JDkpHgNM8e1BDAay3H21uxcCU4DRZfZxoIGZGVCf0Dy9xTG2FRGpkUrCvYeerepzbt82cXudeAZEO749iXpOeF2kB4DewEZCk7zfFp4CMZa2IiI10n8WbiRryx5uOz1+vQeIb0BEq7rs9HVnAZlAW6A/8ICZNYyxbehFzMaZWYaZZeTm5h55tSIiVUBxSSkT31tJr9YNOPu41nF9rXgGRA7QIWK5PaGeQqRrgFc9JAtYA/SKsS0A7j7Z3dPdPb1Fi6hzXoiIVBvTFmxk9dZ8bj+jJ0lx7D1AfANiDtDDzLqYWW1gDDCtzD7rgdMBzKwVcAywOsa2IiI1SnFJKRPfX0mftg05q0+ruL9e3GaUc/diM5sAvAMkA4+5+xIzuzG8fRLwJ+AJM1tEaFjpF+6+FSBa23jVKiJSFbw6fwPrtu3l0avSCV3bE19xnXLU3d8E3iyzblLE7xuB78XaVkSkpioqKeW+91dyfPtGnN67ZaW8pu6kFhGpAl6em0POjn3ccUbPSuk9gAJCRCThFRaX8sAHWfTv0JgRx1TexTgKCBGRBPdiRjYbdu7jJ2dWXu8BFBAiIgmtoKiEBz/MYlCnJgzv0bxSX1sBISKSwF6Yk81XeQWV3nsABYSISMI60HsY3KUpJ3VrVumvr4AQEUlQz32+ni279wfSewAFhIhIQtpXWMJDM1ZxUrdmnNi18nsPoIAQEUlIz3y2jq179nPHmT0Dq0EBISKSYPL3FzNp5iqG92jOCZ2bBlaHAkJEJME8NXsd2/ILuf2M4HoPoIAQEUkoe/YXM3nWKkYc04JBnZoEWosCQkQkgTz56Vp27C3ijoB7D6CAEBFJGLsKipg8azWn92pJvw6Ngy5HASEikige/3gtefuKAr1yKZICQkQkAeTtK+LRj1fzvWNbcVy7RkGXAyggREQSwr8/XsPuguLAr1yKpIAQEQnYzr2FPPbxGs7p25pj2zYMupyvKSBERAL2yEeryS8s5rbTE6f3AAoIEZFAbc8v5IlP1nJu3zYc07pB0OV8iwJCRCRAk2etZm9RCbef0SPoUr4jrgFhZqPMbLmZZZnZnVG2/8zMMsM/i82sxMyahretNbNF4W0Z8axTRCQIW/fs58lP1zK6X1u6t0ys3gNASrwObGbJwIPAmUAOMMfMprn70gP7uPvfgb+H9z8fuMPdt0ccZqS7b41XjSIiQXp45ir2F5dw6+mJ13uA+PYgBgNZ7r7a3QuBKcDocvYfCzwfx3pERBLGll0FPDV7HRcMaEfXFvWDLieqeAZEOyA7YjknvO47zCwNGAW8ErHagelmNtfMxh3sRcxsnJllmFlGbm5uBZQtIhJ//5q5iuJS59bTErP3APENiGjz4/lB9j0f+KTM8NIwdx8InA2MN7NTojV098nunu7u6S1atDi6ikVEKsHmXQU8+/l6fjCwHZ2b1wu6nIOKZ0DkAB0iltsDGw+y7xjKDC+5+8bwf7cAUwkNWYmIVHkPfZhFaalzSwL3HiC+ATEH6GFmXcysNqEQmFZ2JzNrBJwKvB6xrp6ZNTjwO/A9YHEcaxURqRQbd+7j+S+yuSS9PR2apgVdTrnidhWTuxeb2QTgHSAZeMzdl5jZjeHtk8K7XghMd/f8iOatgKlmdqDG59z97XjVKiJSWR78MAvHGT+ye9ClHFLcAgLA3d8E3iyzblKZ5SeAJ8qsWw30i2dtIiKVLWfHXl7MyObSEzrQvkli9x5Ad1KLiFSaBz/MwrAq0XsABYSISKVYv20vL2XkcNmQjrRpVDfocmKigBARqQT3f7CS5CTjphHdgi4lZgoIEZE4W7M1n1fnb+DyIZ1o1TA16HJipoAQEYmz+99fSa1k48YRXYMu5bAoIERE4mhV7h5ey9zAVUM707JB1ek9gAJCRCSu7nt/Jam1krnhlKrVewAFhIhI3KzcvJtpCzZy1dDONKtfJ+hyDpsCQkQkTu59fyVptZIZVwV7D6CAEBGJiy837eK/C7/immFdaFqvdtDlHBEFhIhIHEx8byUN6qRw3fAuQZdyxBQQIiIVbMnGPN5avIlrTu5C47Sq2XsABYSISIW7972VNEhN4dqTq27vARQQIiIValFOHu8u3cz1w7vSqG6toMs5KgoIEZEKdO97K2hUtxbXDOscdClHTQEhIlJBMrN38v6XWxh3SlcapFbt3gMoIEREKsw9766gSVotfnRS56BLqRAKCBGRCjB33XZmrsjlhlO7Ub9OXCfrrDQKCBGRCnDPuytpVq82Vw3tFHQpFUYBISJylL5Ys52Ps7Zy04hupNWuHr0HiHNAmNkoM1tuZllmdmeU7T8zs8zwz2IzKzGzprG0FRFJFPe8u4IWDepw+ZDq03uAOAaEmSUDDwJnA8cCY83s2Mh93P3v7t7f3fsDvwRmuvv2WNqKiCSCT1dtZfbqbdx0ajfq1k4OupwKFc8exGAgy91Xu3shMAUYXc7+Y4Hnj7CtiEilc3fufXclrRrW4bIhHYMup8LFMyDaAdkRyznhdd9hZmnAKOCVI2g7zswyzCwjNzf3qIsWEYnVJ1nb+GLtdsaP7E5qrerVe4D4BoRFWecH2fd84BN33364bd19srunu3t6ixYtjqBMEZHD5+7c894K2jRK5dITOgRdTlzEMyBygMhPrT2w8SD7juGb4aXDbSsiUqncnac/W8fcdTsYP7I7dVKqX+8BIJ7XY80BephZF2ADoRC4rOxOZtYIOBW44nDbiohUtry9Rfxq6iL+u+grhnVvxg/Tq2fvAcoJCDO7KIb2Be7+ZrQN7l5sZhOAd4Bk4DF3X2JmN4a3TwrveiEw3d3zD9U2pnckIhInn6/exh0vZLJl935+MaoX407pSnJStBHx6sHco58WMLNtwOtEPx9wwCnu3i0ehR2J9PR0z8jICLoMEalmikpKmfjeSh6ckUWnpmlMHDOAfh0aB11WhTCzue6eHm1beUNMb7n7jw9x4GeOqjIRkQS3bls+t03JJDN7J5cMas8fvt+HetXkWUuHctB36e5XHGzb4ewjIlIVuTtT52/gt68tJinJeOCyAZx3fNugy6pUMcegmXUH/gDUBf7h7rPjVZSISJB2FRTxm6mLmbZgI4M7N+WeMf1p17hu0GVVuvJOUqe6e0HEqj8Bvyd0P8JLQP/4liYiUvky1m7ntimZbNpVwE/P7MnNI7tX6xPR5SmvB/GGmT3l7k+Hl4uAzoQCoiTehYmIVKbiklLu/yCL+z9YSfsmabx041AGdmwSdFmBKi8gRgE3mdnbwF+A/wFuBdKAyyuhNhGRSpG9fS+3v5DJ3HU7uGhAO/44uk+1mDL0aJV3kroEeMDMngZ+B7QBfuvuqyqrOBGReHs9cwO/mboYgIlj+jO6f9THvtVI5Z2DGAL8DCgE/hfYB/zFzHKAP7l7XuWUKCJS8XYXFPH715fw6vwNDOrUhHsv7U+HpmlBl5VQyhtimgRcDNQHHnb3YcAYMzsVeBE4qxLqExGpcPPX7+C2KZnk7NjL7Wf0YMLI7qQka4LNssoLiBJCJ6XTCPUiAHD3mcDM+JYlIlLxSkqdhz7M4t73V9K6YSov3jCU9M5Ngy4rYZUXEJcBNxAKh6sqpxwRkfjYsHMfd0zJ5Iu12/l+v7b86YLjaFRXJ6LLU95J6hXATyuxFhGRuPjPwo386tVFlJQ6//xhPy4c0A6zmnlvw+E46KCbmf3nUI1j2UdEJCj5+4v52UsLmPDcfLq2qM+btw3nooHtFQ4xKm+I6WQzm1bOdgOOreB6REQqxILsndw2ZT7rtu9lwsju3HZGD2rpRPRhKS8gRsfQvvDQu4iIVJ6SUufhWav45/QVtGxQhynXn8iQrs2CLqtKKu8chK5UEpEq5au8ffzkhQXMXr2Nc/u24X8v7EujNJ2IPlI146HmUuWUlDrb9uynZcPUoEuRKuLtxV/xi1cWUVRSyt8uPp5LBulcw9HSgJwkpD/9Zykn3fUB/134VdClSILbW1jML19dyI3PzKNTszT+e+twfpjeQeFQAQ7ZgzCz84A33b20EuoRYVNeAc99vp6UZOOW5+ext/B4LqnGE8PLkVu8IY9bp8xnzdZ8bhrRjTvO6EntFP3dW1Fi+STHACvN7G9m1jveBYk8PGsVJe68Pv5kTurWnJ+9vJCnZ68NuixJIKWlziOzVnPhQ5+wd38Jz143hF+M6qVwqGCH7EG4+xVm1hAYCzxuZg48Djzv7rvLa2tmo4CJQDLwqLvfFWWfEcC9QC1gq7ufGl6/FthN6JEfxQebVFuqly27Q72Hiwa045jWDXj0R+lMeG4ev319CfmFJdx4aregS6yxNuUV8NCMLLblhy5eNPh6GCf0O1HX8/V6+9Y+hNtYeMnsm2NQZl8rc4wvv9rNF2u3M6pPa/56UV+a1Ktd4e9XYjxJ7e67zOwVQtON3g5cCPzMzO5z9/ujtTGzZOBB4EwgB5hjZtPcfWnEPo2Bh4BR7r7ezFqWOcxId996mO9JqrBHZq2mqKSU8SO7A5BaK5l/XTGIO17I5K63vmTv/mLuOLOnxpcrUUmp89Tstdw9fQWFJaV0aFIXh9DUYd/8B3eP+B0OLLmHfojY70A7//oYHvF75P5l14cW6qQk89eL+jLmBJ1riKdYzkGcD/wY6AY8DQx29y1mlgYsA6IGBDAYyHL31eHjTCF0b8XSiH0uA1519/UA7r7lSN+IVH1b9+znmc/Wc0H/dnRuXu/r9bWSk5g4ZgB1ayVz3wdZ5BeW8Jtze+uLoRIsysnjV1MXsWhDHqf0bMGfRvehU7N6h24o1UIsPYhLgHvcfVbkSnffa2Y/LqddOyA7YjkHGFJmn55ALTObATQAJrr7UwdeApgeHtJ62N0nR3sRMxsHjAPo2LFjDG9HEtWjH62hoLiE8ad1/8625CTj/35wPPXqpPDvj9ewt7CEv1xwHEk1dK7geNtdUMTd01fw1Oy1NKtfh/vHDuC849solGuYWALi98DX1xqaWV2glbuvdff3y2kX7V+Sl1lOAQYBpxMavpptZp+FHxQ4zN03hoed3jWzL8uGFEA4OCYDpKenlz2+VBHb8wt5avZazj++Ld1a1I+6T1KS8fvzj6Vu7WT+NWMVBUUl/P3i4/Uc/wrk7ry9eBN/eGMJW3bv54ohnfifs47RU09rqFgC4iXgpIjlkvC6Ew7RLgeIvDaxPbAxyj5b3T0fyDezWUA/YIW7b4TQsJOZTSU0ZPWdgJDq4bGP17CvqIQJUXoPkcyMX4zqRf06Kfz9neXsLSzmvrEDqJOSXEmVVl/Z2/fy+2lL+ODLLfRu05BJVwxiQMcmQZclAYrlT68Ud4+cMKgQiOWSgTlADzPrYma1CV0uW/bhf68Dw80sJXxOYwiwzMzqmVkDADOrB3wPWBzDa0oVlLe3iCc+Xcs5x7WhZ6sGMbUZP7I7vz3vWN5ZsplxT81lX2FJnKusvopKSpk0cxXfu2cWn63exm/O7c0bE4YpHCSmHkSumX3f3acBmNlo4JBXFrl7sZlNAN4hdJnrY+6+xMxuDG+f5O7LzOxtYCFQSuhS2MVm1hWYGh7vTAGec/e3j+QNSuJ77JM17NlffMjeQ1nXntyFerWT+eXURVz9+Bf8++oTqF9HT485HHPXbefXUxfz5abdnHlsK/7w/T60a1w36LIkQZh7+cP2ZtYNeBZoS+i8QjZwlbtnxb+8w5Oenu4ZGRlBlyGHYVdBEcPu+oCTujXj4SuP7FaX1zM38JMXF9C3XSOevGawHs4Wg7y9Rdz19pc8/8V62jRK5Q/f78NZfVoHXZYEwMzmHuw+s1hulFsFnGhm9QkFSrk3x4kcjic/WcvugmJuOa3HER9jdP92pNZK5pbn5jPmkc94+trBNK9fpwKrrD7cndczN/Ln/y5le34h157chTvO7Kmel0QV078KMzsX6AOkHrjMzd3/Xxzrkhpgz/5iHv14DWf0bslx7Rod1bHO6tOaR36Uzg1PZ3Dpw7N59roTad1IT4KNtGZrPr99bTEfZ22lX/tGPHHN4KP+3KV6O+RJajObBFwK3EJoiOkSoFOc65Ia4KnZa8nbV3RUvYdIp/ZswZPXDGbzrv1c8vCnZG/fWyHHrer2F5cw8b2VnHXvLBZk7+T/je7DqzcPUzjIIcVyFdNJ7n4VsMPd/wgM5duXr4octvz9xTz60RpGHNOCfh0aV9hxh3RtxjPXDWHXvmIumTSbVbl7KuzYVdHsVds4e+JH3PPeCs48thXv/fRUrhramWTdYCgxiCUgCsL/3WtmbYEioEv8SpKa4NnP17E9v7DCeg+R+ndozJRxJ1JcWsqlD89m2Ve7Kvw1Et22Pfv5yYuZjH3kM4pKSnnimhN48LKBtNIETHIYYgmIN8IP1fs7MA9YCzwfx5qkmttXWMLkWasZ3qM5gzrF51r73m0a8sINQ0lJSmLM5M/IzN4Zl9dJNKWlzgtz1nP6P2cyLXMjN4/oxvTbT2XEMWWfgylyaOUGhJklAe+7+053f4XQuYde7v67SqlOqqXnvljP1j2F3Hp6xfceInVrUZ+XbhxKo7q1uPyRz/h89ba4vl7QVm7ezZjJn/GLVxbRo2V93rxtOD8f1Yu6tXWXuRyZcgMiPIvc3RHL+909L+5VSbVVUFTCpJmrGNq1GSd0bhr31+vQNI0XbxhK60ap/OjxL5i5Ijfur1nZ9hWW8Pd3vuSc+z5i+ebd/N8P+vLCuKEx35UucjCxDDFNN7MfmB7jKBXghTnZ5O7eH/feQ6TWjVJ54YahdG1en+ufzOCdJZsq7bXjbcbyLXzv3pk8+OEqzj++Le//9FQuPaGjnnIrFSKWgPgJoYfz7TezXWa228xq3lk/OWr7i0v414xVDO7clBO7xr/3EKl5/To8f/2JHNu2ITc/O4/XMzdU6utXtC27Chj/3DyufnwOtZKSeO66Ifzz0v66QVAqVCx3UqufKhXipYwcNu0q4B+X9AtkXoFGabV45rohXPfkHG5/IZO9hSWMHVy15hApKXWe/Xwdf397OfuLS7n9jB7cNKKbnmYrcRHLjHKnRFsfbW4GkYMpLC7lXzNWMbBjY4Z1bxZYHfXrpPDENYO58Zm5/PLVRewtLOHak6vGVdtLNubxq6mLWZC9k5O6NePPFxxH14PMnSFSEWJ51MbPIn5PJTQvw1zgtLhUJNXSq/Ny2LBzH3+58LjAZyVLrZXMw1cO4rbnM/nTf5ayr7CY8SO7B15XWe5O9vZ9zF2/nU+ytjF1/gYa163FPZf244L+7RKuXql+YhliOj9y2cw6AH+LW0VS7RSVlPLgjCz6tW/EqT1bBF0OEJr0/oHLBvCzlxfyj+kryC8s4ednHRPol25BUQlLNuYxd92O8M9Otu7ZD4R6Ppee0IGfn3UMjdNimY5F5OgdySMcc4DjKroQqb5em7+B7O37+MP5fRLqr96U5CTuvqTf11OY7t1fzO/P71NpVwBt2V3AvHU7mbtuO3PX7WDxhl0UlpQC0LFpGsN7NGdgpyakd2pCz1YN9HgMqXSxnIO4n2/mkk4C+gML4liTVCPFJaU8+GEWfdo25LReiXc3b1KS8ZcLjiOtVjKPfryGvYUl3PWD4yv8y7ik1Fm+aTdz1+9gXriHsD78MMHayUn0bd+Iq4d1ZmDHJgzs1JiWDfRIDAleLD2IyBl4ioHn3f2TONUj1cwbCzeydtteHr5yUEL1HiKZGb8+tzf16qQw8f2V7Csq4Z5L+1MrOZarwKPbVVDE/PU7mbsuFAjz1+8gPzwtavP6dUjv1IQrT+zEwE5NOK5dQ12FJAkploB4GShw9xIAM0s2szR317OUpVwlpc79H2TRq3UDzuzdKuhyymVm3HFmT9JqJ/PXt76koKiEBy4bSGqtQ39xuzvrtu0NnTcI9xCWb96NOyQZ9GrdkIsGtmdQpyYM6tSE9k3qJmxYikSKJSDeB84ADjw3uS4wHTgpXkVJ9fDfRV+xOjefhy4fWGXu7L3h1G6k1U7mt68v4don5/DIVemk1f72/00KikpYtOGbk8nz1u1gW34hAA1SUxjYsQnn9G3DoE5N6NehsWZrkyorln+5qe7+9UP13X2PmaXFsSapBkpLnfvfX0mPlvUZVcXmOr5yaGfq1k7h5y8v4Kp/f8H/XXx86PxBOBCWbMyjqCR0Wq5r83qM7NXy695B9xb1q0wYihxKLAGRb2YD3X0egJkNAvbFcnAzGwVMBJKBR939rij7jADuBWoBW9391FjbSuJ6e8kmVm7Zw31jB1TJL8yLB7Wnbq1kbpsyn9PvnglAnZQk+rVvzLUnd2VQpyYM7NiYZnq0hVRjsQTE7cBLZrYxvNyG0BSk5TKzZOBB4ExCl8bOMbNp7r40Yp/GwEPAKHdfb2YtY20riau01Lnv/ZV0bVGPc/u2CbqcI3bu8W1o3SiVRTk76d+xCce2aUjtlCM/cS1S1cRyo9wcM+sFHENoTuov3b0ohmMPBrLcfTWAmU0BRgORX/KXAa+6+/rwa205jLaSoN5dtpkvN+3mnkv7Vflr9w8MHYnURIf8c8jMxgP13H2xuy8C6pvZzTEcux2QHbGcE14XqSfQxMxmmNlcM7vqMNoeqG+cmWWYWUZubvV71n9V4x7qPXRulsb5x7cNuhwROQqx9Jevd/edBxbcfQdwfQztov3p6GWWU4BBwLnAWcBvzaxnjG0P1DPZ3dPdPb1Fi8R4jENN9sGXW1iycRfjR3Yn5SjuIxCR4MVyDiLJzMzdHb4+PxDLw2BygA4Ry+2BjVH22eru+YROhs8C+sXYVhLMgd5Dh6Z1uWBA1A6fiFQhsfyJ9w7wopmdbmanAc8Db8fQbg7Qw8y6mFltYAwwrcw+rwPDzSwlfOnsEGBZjG0lwcxckcuCnDzGj+h+VHchi0hiiKUH8QtgHHAToaGf6cAjh2rk7sVmNoFQwCQDj7n7EjO7Mbx9krsvM7O3gYVAKaHLWRcDRGt72O9OKo27M/H9lbRrXJeLBrYPuhwRqQAWHjmKvYHZycBYdx8fn5KOXHp6umdkZBx6R6lwH6/cyhX//pw/X3AcV5zYKehyRCRGZjbX3dOjbYvpGQBm1h8YS+j+hzXAqxVWnVR5od7DClo3TOWSdPUeRKqLgwZE+GqiMYSCYRvwAqEex8hKqk2qiM9Wb2fO2h388ft99FRSkWqkvB7El8BHwPnungVgZndUSlVSpdz3/kpaNKjDpSd0OPTOIlJllHepyQ+ATcCHZvaImZ1O9PsTpAb7Ys12Zq/exg2ndI3p0dgiUnUcNCDcfaq7Xwr0AmYAdwCtzOxfZva9SqpPEtz9H6ykef3aXD5EJ6ZFqptDXqzu7vnu/qy7n0fohrVM4M54FyaJb+66HXy0civXD+9K3drqPYhUN4d1N5O7b3f3h939tHgVJFXH/R+spElaLV3WKlJN6XZXOSILsncyY3ku1w3vSj3NmCZSLSkg5Ijc/8FKGtWtxVVD1XsQqa4UEHLYFm/I471lW7j25C40SK0VdDkiEicKCDls93+wkgapKfzopM5BlyIicaSAkMOy7KtdvLNkM9cM60Kjuuo9iFRnCgg5LA98kEX9Oin8eFjnoEsRkThTQEjMVmzezZuLv+JHJ3WicVosc0aJSFWmgJCYPfBBFnVrJXPtyV2DLkVEKoECQmKStWUPbyzcyJVDO9G0nnoPIjWBAkJi8tCHWdRJSeL64eo9iNQUCgg5pLVb83ktcwNXDOlE8/p1gi5HRCqJAkIO6aEZWdRKTmLcKeo9iNQkCggpV/b2vbw6bwNjB3ekZcPUoMsRkUoU14Aws1FmttzMsszsO48IN7MRZpZnZpnhn99FbFtrZovC6zPiWacc3EMzVpFkxo2ndgu6FBGpZHF7DKeZJQMPAmcCOcAcM5vm7kvL7PpReK6JaEa6+9Z41Sjl27BzHy/PzWbMCR1p3Ui9B5GaJp49iMFAlruvdvdCYAowOo6vJxVs0oxVANw4Qr0HkZoongHRDsiOWM4JrytrqJktMLO3zKxPxHoHppvZXDMbd7AXMbNxZpZhZhm5ubkVU7mwKa+AF+Zkc/GgDrRrXDfockQkAPGc6cWirPMyy/OATu6+x8zOAV4DeoS3DXP3jWbWEnjXzL5091nfOaD7ZGAyQHp6etnjyxGaNHMVpe7crN6DSI0Vzx5EDtAhYrk9sDFyB3ff5e57wr+/CdQys+bh5Y3h/24BphIaspJKsGVXAc9/sZ6LBrajQ9O0oMsRkYDEMyDmAD3MrIuZ1QbGANMidzCz1mZm4d8Hh+vZZmb1zKxBeH094HvA4jjWKhEmz1pNcakzfmT3oEsRkQDFbYjJ3YvNbALwDpAMPObuS8zsxvD2ScDFwE1mVgzsA8a4u5tZK2BqODtSgOfc/e141SrfWL5pN898vo7R/dvSqVm9oMsRkQCZe/UZtk9PT/eMDN0ycSQWZO/koRlZTF+6mXq1U3jjlpPp0lwBIVLdmdlcd0+Pti2eJ6klwbk7n67axkMzsvgkaxsNU1OYMLI7V5/UmWZ65pJIjaeAqIFKS513l23moRmrWJC9kxYN6vDLs3tx2ZCONEjVNKIiEqKAqEGKSkqZlrmRSTNXsXLLHjo0rcufLziOiwe1J7VWctDliUiCUUDUAAVFJbwwJ5vJs1azYec+erVuwMQx/Tm3bxtSkvW8RhGJTgFRjeXtK+KZz9bx2Mdr2JZfyKBOTfh/o/twWq+WhK8QExE5KAVENZS7ez+PfbKGZ2avY/f+Yk7p2YLxI7oxuEtTBYOIxEwBUY1kb9/L5FmreTEjm8KSUs45rg03jejGce0aBV2aiFRBCohqYMXm3fxrxiqmLdhIksFFA9pzw6ld6dqiftCliUgVpoCowuav38FDM1bx7tLN1K2VzNUndea64V1o00hPXxWRo6eAqGLcnY+ztvLQh6uYvXobjerW4tbTe3D1SZ1pWq920OWJSDWigKgiSkudd5Zs4qEZq1i0IY+WDerw63N6M3ZIR+rX0f+MIlLx9M2S4AqLS3ktcwOTZq5idW4+nZql8deL+nLRwHbUSdHNbSISPwqIBLWvsIQpc9bzyKzVbMwroHebhtw/dgDn9G1DcpIuVRWR+FNAJJi8vUU8NXstj3+6lu35hZzQuQl/ubAvI45poXsYRKRSKSASxL7CEu77YCVPz17Hnv3FjDymBTeP7M4JnZsGXZqI1FAKiASwKncP45+dx5ebdnPe8aGb2/q01c1tIhIsBUTA3liwkTtfWUjtlCSeuOYERhzTMuiSREQABURg9heX8Of/LOPpz9YxqFMT7h87gLaNdYObiCQOBUQAsrfv5eZn57FoQx7XD+/Cz0f1opYeuy0iCUYBUcneXbqZn76YiQMPXzmIs/q0DrokEZGo4vpnq5mNMrPlZpZlZndG2T7CzPLMLDP887tY21Y1RSWl/O+by7j+qQw6Nkvjv7cMVziISEKLWw/CzJKBB4EzgRxgjplNc/elZXb9yN3PO8K2VcJXefuY8Nx85q7bwZUnduLX5/bWFJ8ikvDiOcQ0GMhy99UAZjYFGA3E8iV/NG0TyqwVudz+QiYFRSVMHNOf0f3bBV2SiEhM4jnE1A7IjljOCa8ra6iZLTCzt8ysz2G2xczGmVmGmWXk5uZWRN0VoqTU+ef05fzo8S9oUb8O0yacrHAQkSolnj2IaM+F8DLL84BO7r7HzM4BXgN6xNg2tNJ9MjAZID09Peo+lS13935umzKfT1dt4+JB7fnT6OOoW1tDSiJStcQzIHKADhHL7YGNkTu4+66I3980s4fMrHksbRPVZ6u3ccvz89m1r4i/XXw8P0zvcOhGIiIJKJ4BMQfoYWZdgA3AGOCyyB3MrDWw2d3dzAYTGvLaBuw8VNtEU1rq/GvmKu6evpzOzerx1I8H07tNw6DLEhE5YnELCHcvNrMJwDtAMvCYuy8xsxvD2ycBFwM3mVkxsA8Y4+4ORG0br1qP1o78Qn7yYiYfLs/l3OPbcNdFfWmQWivoskREjoqFvo+rh/T0dM/IyKjU15y/fgcTnptP7u79/Pa83lxxYic9lltEqgwzm+vu6dG26U7qI+TuPP7JWv761jJaNUzl5ZuGcnz7xkGXJSJSYRQQR2BXQRG/eHkhby3exBm9W3H3Jf1olKYhJRGpXhQQh2nxhjzGPzePnB37+NU5vbh+eFcNKYlItaSAiJG78/wX2fzhjSU0TavNC+NOJF2zvYlINaaAiEH+/mJ+89pips7fwPAezbn30v40q18n6LJEROJKAXEIKzfv5qZn57Eqdw8/ObMn40d2JzlJQ0oiUv0pIMrx6rwcfj11MfXqJPPMtUMY1r150CWJiFQaBUQUBUUl/PGNJTz/RTaDuzTlgbEDaNkwNeiyREQqlQKijLVb87n52Xks/WoXN4/oxk/O7EmKpgMVkRpIARHhrUVf8bOXF5KcZDx2dTqn9WoVdEkiIoFRQACFxaX89a1lPP7JWvp3aMwDlw2gfZO0oMsSEQlUjQ+IvL1FXPX4FyzI3smPh3XhzrN7UTtFQ0oiIjU+IBqkptCpaRo3ntKVs/u2CbocEZGEUeMDIinJuG/sgKDLEBFJOBpLERGRqBQQIiISlQJCRESiUkCIiEhUCggREYlKASEiIlEpIEREJCoFhIiIRGXuHnQNFcbMcoF1R9i8ObC1AsupyvRZfJs+j2/T5/GN6vBZdHL3FtE2VKuAOBpmluHu6UHXkQj0WXybPo9v0+fxjer+WWiISUREolJAiIhIVAqIb0wOuoAEos/i2/R5fJs+j29U689C5yBERCQq9SBERCQqBYSIiERV4wPCzEaZ2XIzyzKzO4OuJ0hm1sHMPjSzZWa2xMxuC7qmoJlZspnNN7P/BF1L0MyssZm9bGZfhv+NDA26piCZ2R3h/58sNrPnzSw16JoqWo0OCDNLBh4EzgaOBcaa2bHBVhWoYuCn7t4bOBEYX8M/D4DbgGVBF5EgJgJvu3svoB81+HMxs3bArUC6ux8HJANjgq2q4tXogAAGA1nuvtrdC4EpwOiAawqMu3/l7vPCv+8m9AXQLtiqgmNm7YFzgUeDriVoZtYQOAX4N4C7F7r7zkCLCl4KUNfMUoA0YGPA9VS4mh4Q7YDsiOUcavAXYiQz6wwMAD4PuJQg3Qv8HCgNuI5E0BXIBR4PD7k9amb1gi4qKO6+AfgHsB74Cshz9+nBVlXxanpAWJR1Nf66XzOrD7wC3O7uu4KuJwhmdh6wxd3nBl1LgkgBBgL/cvcBQD5QY8/ZmVkTQqMNXYC2QD0zuyLYqipeTQ+IHKBDxHJ7qmE38XCYWS1C4fCsu78adD0BGgZ838zWEhp6PM3Mngm2pEDlADnufqBH+TKhwKipzgDWuHuuuxcBrwInBVxThavpATEH6GFmXcysNqGTTNMCrikwZmaExpiXufs/g64nSO7+S3dv7+6dCf27+MDdq91fiLFy901AtpkdE151OrA0wJKCth440czSwv+/OZ1qeNI+JegCguTuxWY2AXiH0FUIj7n7koDLCtIw4EpgkZllhtf9yt3fDK4kSSC3AM+G/5haDVwTcD2BcffPzexlYB6hq//mUw0fu6FHbYiISFQ1fYhJREQOQgEhIiJRKSBERCQqBYSIiESlgBARkagUECJhZrYn/N/OZnZZBR/7V2WWP63I44vEgwJC5Ls6A4cVEOEnA5fnWwHh7tXurlupfhQQIt91FzDczDLDz/xPNrO/m9kcM1toZjcAmNmI8PwZzwGLwuteM7O54XkCxoXX3UXoqZ+ZZvZseN2B3oqFj73YzBaZ2aURx54RMf/Cs+E7djGzu8xsabiWf1T6pyM1Ro2+k1rkIO4E/sfdzwMIf9HnufsJZlYH+MTMDjy5czBwnLuvCS//2N23m1ldYI6ZveLud5rZBHfvH+W1LgL6E5pfoXm4zazwtgFAH0LPB/sEGGZmS4ELgV7u7mbWuGLfusg31IMQObTvAVeFHz/yOdAM6BHe9kVEOADcamYLgM8IPQiyB+U7GXje3UvcfTMwEzgh4tg57l4KZBIa+toFFACPmtlFwN6jfG8iB6WAEDk0A25x9/7hny4Rz/7P/3onsxGEnvI51N37EXo+z6GmoYz2yPkD9kf8XgKkuHsxoV7LK8AFwNuH8T5EDosCQuS7dgMNIpbfAW4KPwodM+t5kMlyGgE73H2vmfUiNG3rAUUH2pcxC7g0fJ6jBaFZ2744WGHhuToahR+geDuh4SmRuNA5CJHvWggUh4eKniA0F3NnYF74RHEuob/ey3obuNHMFgLLCQ0zHTAZWGhm89z98oj1U4GhwAJCk1X93N03hQMmmgbA62aWSqj3cccRvUORGOhpriIiEpWGmEREJCoFhIiIRKWAEBGRqBQQIiISlQJCRESiUkCIiEhUCggREYnq/wPNsrzQPvNmRgAAAABJRU5ErkJggg==\n",
       "text/plain": [
        "<Figure size 432x288 with 1 Axes>"
       ]
@@ -501,16 +505,16 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 15,
+   "execution_count": 16,
    "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "0.798340863819657"
+       "0.8091021716950881"
       ]
      },
-     "execution_count": 15,
+     "execution_count": 16,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -521,7 +525,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 16,
+   "execution_count": 17,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -540,7 +544,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 17,
+   "execution_count": 18,
    "metadata": {},
    "outputs": [
     {
@@ -549,7 +553,7 @@
        "<All keys matched successfully>"
       ]
      },
-     "execution_count": 17,
+     "execution_count": 18,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -557,6 +561,10 @@
    "source": [
     "import torch\n",
     "\n",
+    "# Make sure the model is on CPU before loading a pretrained state_dict\n",
+    "model = model.cpu()\n",
+    "\n",
+    "# Load pretrained weights\n",
     "trained_state_dict = torch.load(\"state_dict.pth\")[\"models_state_dict\"][0]\n",
     "\n",
     "model.load_state_dict(trained_state_dict, strict=False)"
@@ -564,7 +572,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 18,
+   "execution_count": 19,
    "metadata": {
     "scrolled": true
    },
@@ -575,12 +583,16 @@
        "0.9188772287810328"
       ]
      },
-     "execution_count": 18,
+     "execution_count": 19,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
+    "# Move the model back to it's target device\n",
+    "model.to(device)\n",
+    "\n",
+    "# Test for accuracy\n",
     "test(model, test_quantized_loader)"
    ]
   },
@@ -600,6 +612,16 @@
     "Sometimes, it's desirable to make some changes to our trained network prior to export (this is known in general as \"network surgery\"). This depends on the model and is not generally necessary, but in this case we want to make a couple of changes to get better results with FINN."
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Move the model to CPU before surgery\n",
+    "model = model.cpu()"
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -609,7 +631,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 19,
+   "execution_count": 21,
    "metadata": {},
    "outputs": [
     {
@@ -618,7 +640,7 @@
        "(64, 593)"
       ]
      },
-     "execution_count": 19,
+     "execution_count": 21,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -634,7 +656,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 20,
+   "execution_count": 22,
    "metadata": {},
    "outputs": [
     {
@@ -643,7 +665,7 @@
        "(64, 600)"
       ]
      },
-     "execution_count": 20,
+     "execution_count": 22,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -658,7 +680,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 21,
+   "execution_count": 23,
    "metadata": {},
    "outputs": [
     {
@@ -667,7 +689,7 @@
        "torch.Size([64, 600])"
       ]
      },
-     "execution_count": 21,
+     "execution_count": 23,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -690,11 +712,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 22,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "from brevitas.core.quant import QuantType\n",
     "from brevitas.nn import QuantIdentity\n",
     "\n",
     "\n",
@@ -702,23 +723,27 @@
     "    def __init__(self, my_pretrained_model):\n",
     "        super(CybSecMLPForExport, self).__init__()\n",
     "        self.pretrained = my_pretrained_model\n",
-    "        self.qnt_output = QuantIdentity(quant_type=QuantType.BINARY, bit_width=1, min_val=-1.0, max_val=1.0)\n",
+    "        self.qnt_output = QuantIdentity(\n",
+    "            quant_type='binary', \n",
+    "            scaling_impl_type='const',\n",
+    "            bit_width=1, min_val=-1.0, max_val=1.0)\n",
     "    \n",
     "    def forward(self, x):\n",
     "        # assume x contains bipolar {-1,1} elems\n",
     "        # shift from {-1,1} -> {0,1} since that is the\n",
     "        # input range for the trained network\n",
-    "        x = (x + torch.tensor([1.0])) / 2.0  \n",
+    "        x = (x + torch.tensor([1.0]).to(x.device)) / 2.0  \n",
     "        out_original = self.pretrained(x)\n",
     "        out_final = self.qnt_output(out_original)   # output as {-1,1}     \n",
     "        return out_final\n",
     "\n",
-    "model_for_export = CybSecMLPForExport(modified_model)"
+    "model_for_export = CybSecMLPForExport(modified_model)\n",
+    "model_for_export.to(device)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 23,
+   "execution_count": 25,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -731,16 +756,17 @@
     "    with torch.no_grad():\n",
     "        for data in test_loader:\n",
     "            inputs, target = data\n",
+    "            inputs, target = inputs.to(device), target.to(device)\n",
     "            # pad inputs to 600 elements\n",
-    "            input_padded = np.pad(inputs, [(0,0), (0,7)])\n",
+    "            input_padded = torch.nn.functional.pad(inputs, (0,7,0,0))\n",
     "            # convert inputs to {-1,+1}\n",
-    "            input_scaled = 2*input_padded - 1\n",
+    "            input_scaled = 2 * input_padded - 1\n",
     "            # run the model\n",
-    "            output = model(torch.from_numpy(input_scaled).float())\n",
-    "            y_pred.extend(list(output.flatten()))\n",
+    "            output = model(input_scaled.float())\n",
+    "            y_pred.extend(list(output.flatten().cpu().numpy()))\n",
     "            # make targets bipolar {-1,+1}\n",
-    "            expected = 2*target.float() - 1\n",
-    "            expected = expected.detach().numpy()\n",
+    "            expected = 2 * target.float() - 1\n",
+    "            expected = expected.cpu().numpy()\n",
     "            y_true.extend(list(expected.flatten()))\n",
     "        \n",
     "    return accuracy_score(y_true, y_pred)"
@@ -748,7 +774,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 24,
+   "execution_count": 26,
    "metadata": {},
    "outputs": [
     {
@@ -757,7 +783,7 @@
        "0.9188772287810328"
       ]
      },
-     "execution_count": 24,
+     "execution_count": 26,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -780,7 +806,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 25,
+   "execution_count": 27,
    "metadata": {
     "scrolled": true
    },
@@ -791,16 +817,6 @@
      "text": [
       "Model saved to cybsec-mlp-ready.onnx\n"
      ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "<ipython-input-22-78c27bb59095>:15: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n",
-      "  x = (x + torch.tensor([1.0])) / 2.0\n",
-      "/workspace/brevitas/src/brevitas/quant_tensor/__init__.py:74: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n",
-      "  training = torch.tensor(training, dtype=torch.bool)\n"
-     ]
     }
    ],
    "source": [
@@ -809,6 +825,7 @@
     "\n",
     "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n",
     "input_shape = (1, 600)\n",
+    "\n",
     "# create a QuantTensor instance to mark input as bipolar during export\n",
     "input_a = np.random.randint(0, 1, size=input_shape).astype(np.float32)\n",
     "input_a = 2 * input_a - 1\n",
@@ -818,6 +835,10 @@
     "    input_t, scale=torch.tensor(scale), bit_width=torch.tensor(1.0), signed=True\n",
     ")\n",
     "\n",
+    "#Move to CPU before export\n",
+    "model_for_export.cpu()\n",
+    "\n",
+    "# Export to ONNX\n",
     "bo.export_finn_onnx(\n",
     "    model_for_export, export_path=ready_model_filename, input_t=input_qt\n",
     ")\n",
@@ -843,38 +864,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 26,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Serving 'cybsec-mlp-ready.onnx' at http://0.0.0.0:8081\n"
-     ]
-    },
-    {
-     "data": {
-      "text/html": [
-       "\n",
-       "        <iframe\n",
-       "            width=\"100%\"\n",
-       "            height=\"400\"\n",
-       "            src=\"http://localhost:8081/\"\n",
-       "            frameborder=\"0\"\n",
-       "            allowfullscreen\n",
-       "        ></iframe>\n",
-       "        "
-      ],
-      "text/plain": [
-       "<IPython.lib.display.IFrame at 0x7fb36398c3a0>"
-      ]
-     },
-     "execution_count": 26,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "from finn.util.visualization import showInNetron\n",
     "\n",
@@ -888,18 +880,11 @@
     "## That's it! <a id=\"thats_it\" ></a>\n",
     "You created, trained and tested a quantized MLP that is ready to be loaded into FINN, congratulations! You can now proceed to the next notebook."
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python 3 (ipykernel)",
    "language": "python",
    "name": "python3"
   },
@@ -913,7 +898,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.5"
+   "version": "3.7.0"
   }
  },
  "nbformat": 4,
diff --git a/requirements.txt b/requirements.txt
index da0ec0b63092f0618bb7c9982b95fa90e8f91118..87386cfbbd03393c8b5936f5510b86c8cf25557f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,7 @@ numpy==1.18.0
 onnx==1.7.0
 onnxoptimizer
 onnxruntime==1.4.0
-pre-commit==2.6.0
+pre-commit==2.9.2
 pyscaffold==3.2.1
 scipy==1.5.2
 setupext-janitor>=1.1.2
diff --git a/run-docker.sh b/run-docker.sh
index 2ac0e108ae034a6a23091d5fecf34c2cc0d8a1d8..c804b8aa7b03d87309ba71443610ec4844fb123e 100755
--- a/run-docker.sh
+++ b/run-docker.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright (c) 2020, Xilinx
+# Copyright (c) 2020-2022, Advanced Micro Devices
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -125,17 +125,22 @@ elif [ "$1" = "build_dataflow" ]; then
   DOCKER_CMD="build_dataflow $BUILD_DATAFLOW_DIR"
 elif [ "$1" = "build_custom" ]; then
   BUILD_CUSTOM_DIR=$(readlink -f "$2")
+  FLOW_NAME=${3:-build}
   FINN_DOCKER_EXTRA+="-v $BUILD_CUSTOM_DIR:$BUILD_CUSTOM_DIR -w $BUILD_CUSTOM_DIR "
   DOCKER_INTERACTIVE="-it"
   #FINN_HOST_BUILD_DIR=$BUILD_DATAFLOW_DIR/build
-  gecho "Running build_custom: $BUILD_CUSTOM_DIR/build.py"
-  DOCKER_CMD="python -mpdb -cc -cq build.py"
+  gecho "Running build_custom: $BUILD_CUSTOM_DIR/$FLOW_NAME.py"
+  DOCKER_CMD="python -mpdb -cc -cq $FLOW_NAME.py"
+elif [ -z "$1" ]; then
+   gecho "Running container only"
+   DOCKER_CMD="bash"
+   DOCKER_INTERACTIVE="-it"
 else
-  gecho "Running container only"
-  DOCKER_CMD="bash"
-  DOCKER_INTERACTIVE="-it"
+  gecho "Running container with passed arguments"
+  DOCKER_CMD="$@"
 fi
 
+
 if [ "$FINN_DOCKER_GPU" != 0 ];then
   gecho "nvidia-docker detected, enabling GPUs"
   if [ ! -z "$NVIDIA_VISIBLE_DEVICES" ];then
@@ -178,7 +183,7 @@ DOCKER_EXEC+="-e SHELL=/bin/bash "
 DOCKER_EXEC+="-v $SCRIPTPATH:/workspace/finn "
 DOCKER_EXEC+="-v $FINN_HOST_BUILD_DIR:$FINN_HOST_BUILD_DIR "
 DOCKER_EXEC+="-e FINN_BUILD_DIR=$FINN_HOST_BUILD_DIR "
-DOCKER_EXEC+="-e FINN_ROOT="/workspace/finn" "
+DOCKER_EXEC+="-e FINN_ROOT="/workspace" "
 DOCKER_EXEC+="-e LOCALHOST_URL=$LOCALHOST_URL "
 DOCKER_EXEC+="-e VIVADO_IP_CACHE=$VIVADO_IP_CACHE "
 DOCKER_EXEC+="-e PYNQ_BOARD=$PYNQ_BOARD "
diff --git a/setup.cfg b/setup.cfg
index c1dff9bd9b44fc7ca7a02ad0891fd75f10009530..bcf5364b782447d21eea553ddcc2a6fc9b2636c0 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -119,6 +119,11 @@ markers =
     vivado: mark tests that require Vivado or Vivado HLS
     vitis: mark tests that require Vitis
     board: mark tests that require a PYNQ board
+    brevitas_export : mark tests that test brevitas export functionality
+    streamline: mark tests that test streamlining functionality
+    util: mark tests that test util functions
+    transform: mark tests that test transformations (before hls layers)
+    fpgadataflow: mark tests related to hls layers
 norecursedirs =
     dist
     build
diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py
index 698f050cc25937684f935d78fb8eb05da4662049..6abac895a9ba647d3fd3733fda4b337f3b05dca6 100644
--- a/src/finn/builder/build_dataflow_config.py
+++ b/src/finn/builder/build_dataflow_config.py
@@ -342,7 +342,7 @@ class DataflowBuildConfig:
         if self.target_fps is None:
             return None
         else:
-            n_clock_cycles_per_sec = 10 ** 9 / self.synth_clk_period_ns
+            n_clock_cycles_per_sec = 10**9 / self.synth_clk_period_ns
             n_cycles_per_frame = n_clock_cycles_per_sec / self.target_fps
             return int(n_cycles_per_frame)
 
diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py
index 771424b5d42626b1edba58d5102c44e0dedbe158..85f150dcf83972a0d14cef959235965b5cf30fe6 100644
--- a/src/finn/builder/build_dataflow_steps.py
+++ b/src/finn/builder/build_dataflow_steps.py
@@ -55,6 +55,7 @@ from finn.builder.build_dataflow_config import (
 )
 from finn.core.modelwrapper import ModelWrapper
 from finn.core.onnx_exec import execute_onnx
+from finn.core.rtlsim_exec import rtlsim_exec
 from finn.core.throughput_test import throughput_test_rtlsim
 from finn.custom_op.registry import getCustomOp
 from finn.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount
@@ -108,7 +109,11 @@ from finn.util.test import execute_parent
 
 
 def verify_step(
-    model: ModelWrapper, cfg: DataflowBuildConfig, step_name: str, need_parent: bool
+    model: ModelWrapper,
+    cfg: DataflowBuildConfig,
+    step_name: str,
+    need_parent: bool,
+    rtlsim_pre_hook=None,
 ):
     print("Running verification for " + step_name)
     verify_out_dir = cfg.output_dir + "/verification_output"
@@ -131,7 +136,10 @@ def verify_step(
         inp_tensor_name = model.graph.input[0].name
         out_tensor_name = model.graph.output[0].name
         inp_dict = {inp_tensor_name: in_npy}
-        out_dict = execute_onnx(model, inp_dict, True)
+        if rtlsim_pre_hook is not None:
+            out_dict = rtlsim_exec(model, inp_dict, pre_hook=rtlsim_pre_hook)
+        else:
+            out_dict = execute_onnx(model, inp_dict, True)
         out_npy = out_dict[out_tensor_name]
     res = np.isclose(exp_out_npy, out_npy, atol=1e-3).all()
     res_to_str = {True: "SUCCESS", False: "FAIL"}
@@ -397,7 +405,7 @@ def step_generate_estimate_reports(model: ModelWrapper, cfg: DataflowBuildConfig
         model = model.transform(AnnotateCycles())
         estimate_network_performance = model.analysis(dataflow_performance)
         # add some more metrics to estimated performance
-        n_clock_cycles_per_sec = (10 ** 9) / cfg.synth_clk_period_ns
+        n_clock_cycles_per_sec = (10**9) / cfg.synth_clk_period_ns
         est_fps = n_clock_cycles_per_sec / estimate_network_performance["max_cycles"]
         estimate_network_performance["estimated_throughput_fps"] = est_fps
         est_latency_ns = (
@@ -599,7 +607,7 @@ def step_out_of_context_synthesis(model: ModelWrapper, cfg: DataflowBuildConfig)
 
         estimate_network_performance = model.analysis(dataflow_performance)
         # add some more metrics to estimated performance
-        n_clock_cycles_per_sec = float(ooc_res_dict["fmax_mhz"]) * (10 ** 6)
+        n_clock_cycles_per_sec = float(ooc_res_dict["fmax_mhz"]) * (10**6)
         est_fps = n_clock_cycles_per_sec / estimate_network_performance["max_cycles"]
         ooc_res_dict["estimated_throughput_fps"] = est_fps
         with open(report_dir + "/ooc_synth_and_timing.json", "w") as f:
diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py
index 58187bfe4cfb4d90984e478c09e6bdc6c7484a9b..b4da42a5e715c835f9c289c76179cb0efe7edf9d 100644
--- a/src/finn/custom_op/fpgadataflow/hlscustomop.py
+++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py
@@ -300,8 +300,6 @@ class HLSCustomOp(CustomOp):
         self.code_gen_dict["$PROJECTNAME$"] = ["project_{}".format(node.name)]
         self.code_gen_dict["$HWSRCDIR$"] = [code_gen_dir]
         self.code_gen_dict["$FPGAPART$"] = [fpgapart]
-        self.code_gen_dict["$FINNHLSLIBDIR$"] = ["/workspace/finn-hlslib"]
-        self.code_gen_dict["$FINNHLSCUSTOMDIR$"] = ["/workspace/finn/custom_hls"]
         self.code_gen_dict["$TOPFXN$"] = [node.name]
         self.code_gen_dict["$CLKPERIOD$"] = [str(clk)]
         self.code_gen_dict["$DEFAULT_DIRECTIVES$"] = self.ipgen_default_directives()
@@ -404,15 +402,15 @@ class HLSCustomOp(CustomOp):
         builder = CppBuilder()
         # to enable additional debug features please uncommand the next line
         # builder.append_includes("-DDEBUG")
-        builder.append_includes("-I/workspace/finn/src/finn/qnn-data/cpp")
-        builder.append_includes("-I/workspace/cnpy/")
-        builder.append_includes("-I/workspace/finn-hlslib")
-        builder.append_includes("-I/workspace/finn/custom_hls")
+        builder.append_includes("-I$FINN_ROOT/finn/src/finn/qnn-data/cpp")
+        builder.append_includes("-I$FINN_ROOT/cnpy/")
+        builder.append_includes("-I$FINN_ROOT/finn-hlslib")
+        builder.append_includes("-I$FINN_ROOT/finn/custom_hls")
         builder.append_includes("-I{}/include".format(os.environ["HLS_PATH"]))
         builder.append_includes("--std=c++14")
         builder.append_includes("-O3")
         builder.append_sources(code_gen_dir + "/*.cpp")
-        builder.append_sources("/workspace/cnpy/cnpy.cpp")
+        builder.append_sources("$FINN_ROOT/cnpy/cnpy.cpp")
         builder.append_includes("-lz")
         builder.set_executable_path(code_gen_dir + "/node_model")
         builder.build(code_gen_dir)
diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py
index 2c78685a4a5ac942e5320e75862deee7b40f8ba2..dcf67e4c4338b8a903fefd7a83a96331d0a5c8e9 100644
--- a/src/finn/custom_op/fpgadataflow/lookup.py
+++ b/src/finn/custom_op/fpgadataflow/lookup.py
@@ -29,13 +29,14 @@
 import numpy as np
 import os
 import warnings
-from math import ceil
+from math import ceil, log2
 
 from finn.core.datatype import DataType
 from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
 from finn.util.data_packing import (
     npy_to_rtlsim_input,
     numpy_to_hls_code,
+    pack_innermost_dim_as_hex_string,
     rtlsim_output_to_npy,
 )
 
@@ -58,6 +59,13 @@ class Lookup(HLSCustomOp):
             "InputType": ("s", True, ""),
             # Input shape
             "InputShape": ("ints", False, [1]),
+            # Memory mode
+            # const : parameters baked into bitfile (BRAM)
+            # external : lookup performed in external memory over AXI MM
+            "mem_mode": ("s", False, "const", ["const", "external"]),
+            # Width for AXI-MM interface
+            # only relevant when mem_mode="external"
+            "ext_mem_width": ("i", False, 32),
         }
         my_attrs.update(super().get_nodeattr_types())
         return my_attrs
@@ -72,7 +80,8 @@ class Lookup(HLSCustomOp):
 
     def get_normal_output_shape(self):
         ishape = self.get_normal_input_shape()
-        oshape = list(ishape) + [self.get_nodeattr("EmbeddingDim")]
+        emb_dim = self.get_nodeattr("EmbeddingDim")
+        oshape = list(ishape) + [emb_dim]
         return tuple(oshape)
 
     def get_folded_input_shape(self):
@@ -81,7 +90,23 @@ class Lookup(HLSCustomOp):
         return tuple(folded_ishape)
 
     def get_folded_output_shape(self):
-        return self.get_normal_output_shape()
+        ishape = self.get_normal_input_shape()
+        mem_mode = self.get_nodeattr("mem_mode")
+        emb_dim = self.get_nodeattr("EmbeddingDim")
+        if mem_mode == "const":
+            oshape = list(ishape) + [emb_dim]
+        elif mem_mode == "external":
+            ext_mem_width = self.get_nodeattr("ext_mem_width")
+            bits_per_emb_elem = self.get_output_datatype().bitwidth()
+            assert ext_mem_width % bits_per_emb_elem == 0
+            emb_elems_per_ext_mem_width = ext_mem_width // bits_per_emb_elem
+            oshape = list(ishape) + [
+                emb_dim // emb_elems_per_ext_mem_width,
+                emb_elems_per_ext_mem_width,
+            ]
+        else:
+            raise Exception("Unrecognized mem_mode:" + mem_mode)
+        return tuple(oshape)
 
     def make_shape_compatible_op(self, model):
         exp_ishape = tuple(self.get_normal_input_shape())
@@ -123,17 +148,20 @@ class Lookup(HLSCustomOp):
         return ibits
 
     def get_outstream_width(self):
+        folded_oshape = self.get_folded_output_shape()
         obits = self.get_output_datatype().bitwidth()
-        ofm_ch = self.get_nodeattr("EmbeddingDim")
-        return obits * ofm_ch
+        return obits * folded_oshape[-1]
 
     def get_number_output_values(self):
         folded_oshape = self.get_folded_output_shape()
         return np.prod(folded_oshape[:-1])
 
     def global_includes(self):
-        global_incls = ['#include "lookup.hpp"']
-        global_incls.append('#include "embeddings.hpp"')
+        mem_mode = self.get_nodeattr("mem_mode")
+        global_incls = []
+        if mem_mode == "const":
+            global_incls.append('#include "lookup.hpp"')
+            global_incls.append('#include "embeddings.hpp"')
         self.code_gen_dict["$GLOBALS$"] = global_incls
 
     def defines(self, var):
@@ -142,14 +170,26 @@ class Lookup(HLSCustomOp):
         elem_hls_type = dtype.get_hls_datatype_str()
         emb_type = DataType[self.get_nodeattr("EmbeddingType")]
         emb_hls_type = emb_type.get_hls_datatype_str()
+        emb_dim = self.get_nodeattr("EmbeddingDim")
+        mem_mode = self.get_nodeattr("mem_mode")
         my_defines = []
-        my_defines.append(
-            "#define NumEmbeddings %d" % self.get_nodeattr("NumEmbeddings")
-        )
-        my_defines.append("#define EmbeddingDim %d" % self.get_nodeattr("EmbeddingDim"))
         my_defines.append("#define NumInputs %d" % n_inputs)
-        my_defines.append("#define InputType %s" % elem_hls_type)
-        my_defines.append("#define EmbeddingType %s" % emb_hls_type)
+        if mem_mode == "external":
+            ext_mem_width = self.get_nodeattr("ext_mem_width")
+            ext_mem_emb_size = self.get_folded_output_shape()[-2]
+            ext_mem_emb_align = ceil(log2(ext_mem_emb_size))
+            my_defines.append("#define MemBits %d" % ext_mem_width)
+            my_defines.append("#define EmbeddingSize %d" % ext_mem_emb_size)
+            my_defines.append("#define EmbeddingAlign %d" % ext_mem_emb_align)
+            my_defines.append("#define T_SRC %s" % elem_hls_type)
+            my_defines.append("#define T_DST ap_uint<MemBits>")
+        elif mem_mode == "const":
+            my_defines.append(
+                "#define NumEmbeddings %d" % self.get_nodeattr("NumEmbeddings")
+            )
+            my_defines.append("#define EmbeddingDim %d" % emb_dim)
+            my_defines.append("#define InputType %s" % elem_hls_type)
+            my_defines.append("#define EmbeddingType %s" % emb_hls_type)
         self.code_gen_dict["$DEFINES$"] = my_defines
 
     def read_npy_data(self):
@@ -186,7 +226,7 @@ class Lookup(HLSCustomOp):
         oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}")
 
         self.code_gen_dict["$DATAOUTSTREAM$"] = [
-            'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");'
+            'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", %s);'
             % (
                 packed_hls_type,
                 elem_hls_type,
@@ -194,6 +234,7 @@ class Lookup(HLSCustomOp):
                 npy_type,
                 oshape_cpp_str,
                 npy_out,
+                "false",
             )
         ]
 
@@ -210,22 +251,46 @@ class Lookup(HLSCustomOp):
         )
 
     def docompute(self):
-        self.code_gen_dict["$DOCOMPUTE$"] = [
-            """StreamingLookup<NumEmbeddings,  EmbeddingDim, NumInputs,
-            InputType, EmbeddingType >(in0, out, embeddings);"""
-        ]
+        mem_mode = self.get_nodeattr("mem_mode")
+        if mem_mode == "const":
+            self.code_gen_dict["$DOCOMPUTE$"] = [
+                """StreamingLookup<NumEmbeddings,  EmbeddingDim, NumInputs,
+                InputType, EmbeddingType >(in0, out, embeddings);"""
+            ]
+        elif mem_mode == "external":
+            hls_impl = """
+    if(!in0.empty()) {
+        ap_uint<T_SRC::width+EmbeddingAlign> const  base =
+            (in0.read(), ap_uint<EmbeddingAlign>(0));
+        for(unsigned  j = 0; j < EmbeddingSize; j++) {
+#pragma HLS PIPELINE II=1
+            out.write(mem[base+j]);
+        }
+    }
+            """
+            self.code_gen_dict["$DOCOMPUTE$"] = [hls_impl]
 
     def blackboxfunction(self):
+        mem_mode = self.get_nodeattr("mem_mode")
         ibits = self.get_instream_width()
         packed_input_hls_type = "ap_uint<%d>" % ibits
         obits = self.get_outstream_width()
         packed_output_hls_type = "ap_uint<%d>" % obits
-        self.code_gen_dict["$BLACKBOXFUNCTION$"] = [
-            "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)"
-            % (self.onnx_node.name, packed_input_hls_type, packed_output_hls_type)
-        ]
+        if mem_mode == "const":
+            self.code_gen_dict["$BLACKBOXFUNCTION$"] = [
+                "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)"
+                % (self.onnx_node.name, packed_input_hls_type, packed_output_hls_type)
+            ]
+        elif mem_mode == "external":
+            self.code_gen_dict["$BLACKBOXFUNCTION$"] = [
+                "void "
+                + self.onnx_node.name
+                + "(hls::stream<T_SRC> &in0, hls::stream<T_DST> &out, "
+                + "T_DST const *const  mem)"
+            ]
 
     def pragmas(self):
+        mem_mode = self.get_nodeattr("mem_mode")
         my_pragmas = [
             "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname()
         ]
@@ -233,24 +298,68 @@ class Lookup(HLSCustomOp):
             "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname()
         )
         my_pragmas.append("#pragma HLS INTERFACE ap_ctrl_none port=return")
+        if mem_mode == "const":
+            my_pragmas.append(
+                "#pragma HLS BIND_STORAGE variable=embeddings type=ROM_2P impl=BRAM"
+            )
+        elif mem_mode == "external":
+            my_pragmas.append("#pragma HLS INTERFACE m_axi offset=slave port=mem")
+            my_pragmas.append("#pragma HLS INTERFACE s_axilite port=mem bundle=control")
+        else:
+            raise Exception("Unrecognized mem_mode: " + mem_mode)
         self.code_gen_dict["$PRAGMAS$"] = my_pragmas
 
     def generate_params(self, model, path):
-        code_gen_dir = path
+        mem_mode = self.get_nodeattr("mem_mode")
         embeddings = model.get_initializer(self.onnx_node.input[1])
-        weight_filename = "{}/embeddings.hpp".format(code_gen_dir)
-        edt = DataType[self.get_nodeattr("EmbeddingType")]
-        # obits = self.get_outstream_width()
-        # packed_output_hls_type = "ap_uint<%d>" % obits
-        assert np.vectorize(edt.allowed)(
-            embeddings
-        ).all(), "Embeddings can't be expressed with type %s" % str(edt)
-        embeddings_hls_code = numpy_to_hls_code(
-            embeddings, edt, "embeddings", True, False
-        )
-        f_thresh = open(weight_filename, "w")
-        f_thresh.write(embeddings_hls_code)
-        f_thresh.close()
+        if mem_mode == "const":
+            code_gen_dir = path
+            weight_filename = "{}/embeddings.hpp".format(code_gen_dir)
+            edt = DataType[self.get_nodeattr("EmbeddingType")]
+            # obits = self.get_outstream_width()
+            # packed_output_hls_type = "ap_uint<%d>" % obits
+            assert np.vectorize(edt.allowed)(
+                embeddings
+            ).all(), "Embeddings can't be expressed with type %s" % str(edt)
+            # reverse innertmost dim in embeddings to remain compatible with
+            # how we normally encode the data in FINN
+            embeddings_rev = np.flip(embeddings, -1)
+            embeddings_hls_code = numpy_to_hls_code(
+                embeddings_rev, edt, "embeddings", True, False
+            )
+            f_thresh = open(weight_filename, "w")
+            f_thresh.write(embeddings_hls_code)
+            f_thresh.close()
+        elif mem_mode == "external":
+            edt = DataType[self.get_nodeattr("EmbeddingType")]
+            ext_mem_width = self.get_nodeattr("ext_mem_width")
+            assert edt.bitwidth() == 8, (
+                "Lookup with mem_mode=external "
+                + "only works with 8-bit embeddings but found "
+                + str(edt)
+            )
+            emb_dim = self.get_nodeattr("EmbeddingDim")
+            # need to zero-pad embeddings in external mode for burst alignment
+            # compute how much padding we need
+            emb_elems_per_ext_mem_width = self.get_folded_output_shape()[-1]
+            ext_mem_emb_size = self.get_folded_output_shape()[-2]
+            ext_mem_emb_align = ceil(log2(ext_mem_emb_size))
+            align_factor = int((ext_mem_width / 8) * 2**ext_mem_emb_align)
+            pad_amount = align_factor - emb_dim
+            embeddings_padded = np.pad(embeddings, [(0, 0), (0, pad_amount)])
+            # reshape for packing the innermost dim
+            embeddings_padded = embeddings_padded.reshape(
+                -1, emb_elems_per_ext_mem_width
+            )
+            weight_filename = "%s/%s.dat" % (path, self.onnx_node.name)
+            ret = pack_innermost_dim_as_hex_string(
+                embeddings_padded, edt, ext_mem_width, True, prefix=""
+            )
+            with open(weight_filename, "w") as f:
+                for current_line in ret:
+                    f.write(current_line + "\n")
+        else:
+            raise Exception("Unrecognized mem_mode: " + mem_mode)
 
     def execute_node(self, context, graph):
         mode = self.get_nodeattr("exec_mode")
@@ -259,6 +368,10 @@ class Lookup(HLSCustomOp):
         exp_oshape = tuple(self.get_normal_output_shape())
         folded_ishape = tuple(self.get_folded_input_shape())
         folded_oshape = tuple(self.get_folded_output_shape())
+        mem_mode = self.get_nodeattr("mem_mode")
+        assert (
+            mem_mode == "const"
+        ), "Only mem_mode=const is supported for simulation of Lookup layer"
 
         if mode == "cppsim":
             code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
@@ -310,7 +423,7 @@ class Lookup(HLSCustomOp):
                 out_shape,
                 packed_bits,
                 target_bits,
-                reverse_inner=False,
+                reverse_inner=True,
             )
             # load and reshape output
             output = np.load(out_npy_path)
@@ -328,10 +441,16 @@ class Lookup(HLSCustomOp):
         ), """Output shape doesn't match expected shape."""
 
     def bram_estimation(self):
-        # current calculation assumes embeddings always stored in BRAM_18Ks
-        width_factor = ceil(self.get_outstream_width() / 16)
-        depth_factor = ceil(self.get_nodeattr("NumEmbeddings") / 1024)
-        return width_factor * depth_factor
+        mem_mode = self.get_nodeattr("mem_mode")
+        if mem_mode == "const":
+            # current calculation assumes embeddings always stored in BRAM_18Ks
+            # when mem_mode is const
+            width_factor = ceil(self.get_outstream_width() / 16)
+            depth_factor = ceil(self.get_nodeattr("NumEmbeddings") / 1024)
+            return width_factor * depth_factor
+        else:
+            # TODO can we estimate BRAMs for the DMA engine?
+            return 0
 
     def bram_efficiency_estimation(self):
         bram16_est = self.bram_estimation()
@@ -340,3 +459,18 @@ class Lookup(HLSCustomOp):
         ebits = self.get_outstream_width() * self.get_nodeattr("NumEmbeddings")
         bram16_est_capacity = bram16_est * 18 * 1024
         return ebits / bram16_est_capacity
+
+    def get_ap_int_max_w(self):
+        parent_max = super().get_ap_int_max_w()
+        mem_mode = self.get_nodeattr("mem_mode")
+        ext_mem_width = self.get_nodeattr("ext_mem_width")
+        if mem_mode == "external":
+            return max(ext_mem_width, parent_max)
+        else:
+            return parent_max
+
+    def get_verilog_top_module_intf_names(self):
+        intf_names = super().get_verilog_top_module_intf_names()
+        intf_names["axilite"] = ["s_axi_control"]
+        intf_names["aximm"] = [("m_axi_gmem", self.get_nodeattr("ext_mem_width"))]
+        return intf_names
diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
index 8ca3787232fc22ae6cf798edc8552fd257fdcc2f..3f4103b4380f8d1838910b37e966e8363891d39f 100644
--- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
+++ b/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py
@@ -358,8 +358,8 @@ class StreamingFCLayer_Batch(HLSCustomOp):
         if noact == 0:
             odt = self.get_output_datatype()
             B = odt.bitwidth()
-            thr_luts = (2 ** B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64)
-            comp_luts = (2 ** B - 1) * acc_bits
+            thr_luts = (2**B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64)
+            comp_luts = (2**B - 1) * acc_bits
 
         return int(
             c0
diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
old mode 100644
new mode 100755
index bc771cc796796a9e38c7c266c81a4e65431e6524..6fbf176d4c80d5b5cd6caac294e131ec1a515438
--- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
+++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
@@ -32,9 +32,12 @@ import warnings
 
 from finn.core.datatype import DataType
 from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
-from finn.custom_op.general.im2col import compute_conv_output_dim
+from finn.custom_op.general.maxpoolnhwc import compute_pool_output_dim
 from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
 
+# TODO: consider splitting this into separate implementations for 1D and 2D
+# similar to what we do for ConvolutionInputGenerator
+
 
 class StreamingMaxPool_Batch(HLSCustomOp):
     """Class that corresponds to finn-hlslib StreamingMaxPool_batch function."""
@@ -44,6 +47,10 @@ class StreamingMaxPool_Batch(HLSCustomOp):
             "ImgDim": ("ints", True, []),  # [H, W] = [Y, X]
             "PoolDim": ("ints", True, []),  # [H, W] = [Y, X]
             "NumChannels": ("i", True, 0),
+            # parallelism control - only supported for 1D maxpool
+            "PE": ("i", False, 0),
+            # round up (instead of down) output size - only supported for 1D maxpool
+            "CeilMode": ("i", False, 0),
             # FINN DataTypes for inputs/outputs
             "dataType": ("s", True, ""),
         }
@@ -82,24 +89,30 @@ class StreamingMaxPool_Batch(HLSCustomOp):
         return ishape
 
     def get_folded_input_shape(self):
-        # even though there is no folding in the current hlslib op,
-        # insert a time multiplexing axis to remain compatible with the
-        # shapes produced by the rest of the dataflow pipeline
-        ret = list(self.get_normal_input_shape())
-        ret.insert(-1, 1)
-        return tuple(ret)
+        ifm_dim_h, ifm_dim_w = self.get_nodeattr("ImgDim")
+        ifm_ch = self.get_nodeattr("NumChannels")
+        pe = self.get_nodeattr("PE")
+        nf = int(ifm_ch / pe)
+        if self.is_1d():
+            folded_ishape = (1, ifm_dim_h, ifm_dim_w, nf, pe)
+        else:
+            folded_ishape = (1, ifm_dim_h, ifm_dim_w, 1, ifm_ch)
+        return folded_ishape
 
     def get_normal_output_shape(self):
         ifm_dim_h, ifm_dim_w = self.get_nodeattr("ImgDim")
         k_h, k_w = tuple(self.get_nodeattr("PoolDim"))
         ifm_ch = self.get_nodeattr("NumChannels")
-        stride_h = k_h
-        stride_w = k_w
-        pad = 0
-        assert ifm_dim_h % k_h == 0, "StreamingMaxPool needs ImgDim_h % PoolDim_h == 0"
-        assert ifm_dim_w % k_w == 0, "StreamingMaxPool needs ImgDim_w % PoolDim_w == 0"
-        ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad)
-        ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad)
+        ceil_mode = self.get_nodeattr("CeilMode")
+        if not self.is_1d():
+            assert (
+                ifm_dim_h % k_h == 0
+            ), "StreamingMaxPool needs ImgDim_h % PoolDim_h == 0"
+            assert (
+                ifm_dim_w % k_w == 0
+            ), "StreamingMaxPool needs ImgDim_w % PoolDim_w == 0"
+        ofm_dim_h = compute_pool_output_dim(ifm_dim_h, k_h, k_h, 0, ceil_mode)
+        ofm_dim_w = compute_pool_output_dim(ifm_dim_w, k_w, k_w, 0, ceil_mode)
         oshape = (1, ofm_dim_h, ofm_dim_w, ifm_ch)
         return oshape
 
@@ -107,8 +120,15 @@ class StreamingMaxPool_Batch(HLSCustomOp):
         # even though there is no folding in the current hlslib op,
         # insert a time multiplexing axis to remain compatible with the
         # shapes produced by the rest of the dataflow pipeline
+        ifm_ch = self.get_nodeattr("NumChannels")
+        pe = self.get_nodeattr("PE")
+        nf = int(ifm_ch / pe)
         ret = list(self.get_normal_output_shape())
-        ret.insert(-1, 1)
+        if self.is_1d():
+            ret[-1] = nf
+            ret.append(pe)
+        else:
+            ret.insert(-1, 1)
         return tuple(ret)
 
     def get_number_output_values(self):
@@ -118,20 +138,27 @@ class StreamingMaxPool_Batch(HLSCustomOp):
     def get_exp_cycles(self):
         # derived from StreamingMaxPool_Batch loop nest
         ifm_dim, k, ifm_ch = self.get_1d_attrs_normalized()
+        _, _, ofm_dim_w, nf, _ = self.get_folded_output_shape()
+
         if self.is_1d():
-            return int(ifm_dim[1] + k[1])
+            exp_cycles = ofm_dim_w * nf * (k[1] + 1)
+            return int(exp_cycles)
         else:
             # TODO: adjust inaccurate formula
             return int(ifm_dim[1] * (ifm_dim[1] + (ifm_dim[1] / k[1])))
 
     def get_instream_width(self):
         dt_bits = self.get_input_datatype().bitwidth()
+        pe = self.get_nodeattr("PE")
         ifm_ch = self.get_nodeattr("NumChannels")
-        in_width = int(dt_bits * ifm_ch)
+        if self.is_1d():
+            in_width = int(dt_bits * pe)
+        else:
+            in_width = int(dt_bits * ifm_ch)
         return in_width
 
     def get_outstream_width(self):
-        """For streaming maxpool out stream with is the same as in stream width"""
+        """For streaming maxpool out stream width is the same as in stream width"""
         return self.get_instream_width()
 
     def make_shape_compatible_op(self, model):
@@ -178,16 +205,32 @@ class StreamingMaxPool_Batch(HLSCustomOp):
     def defines(self, var):
         numReps = 1
         ifm_dim, k, ifm_ch = self.get_1d_attrs_normalized()
+        ceil_mode = self.get_nodeattr("CeilMode")
+        output_size = compute_pool_output_dim(ifm_dim[1], k[1], k[1], 0, ceil_mode)
 
-        self.code_gen_dict["$DEFINES$"] = [
-            """#define ImgDim {}\n #define PoolDim {}\n
-            #define NumChannels {}\n #define numReps {}""".format(
-                ifm_dim[1],
-                k[1],
-                self.get_nodeattr("NumChannels"),
-                numReps,
-            )
-        ]
+        if self.is_1d():
+            self.code_gen_dict["$DEFINES$"] = [
+                """#define ImgDim {}\n #define PoolDim {}\n
+                #define NumChannels {}\n #define PE {}\n #define OutputSize {}
+                \n #define numReps {}""".format(
+                    ifm_dim[1],
+                    k[1],
+                    self.get_nodeattr("NumChannels"),
+                    self.get_nodeattr("PE"),
+                    output_size,
+                    numReps,
+                )
+            ]
+        else:
+            self.code_gen_dict["$DEFINES$"] = [
+                """#define ImgDim {}\n #define PoolDim {}\n
+                #define NumChannels {}\n #define numReps {}""".format(
+                    ifm_dim[1],
+                    k[1],
+                    self.get_nodeattr("NumChannels"),
+                    numReps,
+                )
+            ]
 
     def read_npy_data(self):
         code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
@@ -227,17 +270,22 @@ class StreamingMaxPool_Batch(HLSCustomOp):
                 "%s<ImgDim, PoolDim, NumChannels>(in0, out);" % (op)
             ]
         else:
+            dtype = self.get_input_datatype()
+            dtype_hls = dtype.get_hls_datatype_str()
+            minval_str = str(int(dtype.min()))
             if self.is_1d():
                 op = "StreamingMaxPool_Precision_1d"
+                self.code_gen_dict["$DOCOMPUTE$"] = [
+                    """%s<ImgDim, PoolDim, NumChannels, PE,
+                     OutputSize, %s, %s>(in0, out);"""
+                    % (op, dtype_hls, minval_str)
+                ]
             else:
                 op = "StreamingMaxPool_Precision"
-            dtype = self.get_input_datatype()
-            dtype_hls = dtype.get_hls_datatype_str()
-            minval_str = str(int(dtype.min()))
-            self.code_gen_dict["$DOCOMPUTE$"] = [
-                "%s<ImgDim, PoolDim, NumChannels, %s, %s>(in0, out);"
-                % (op, dtype_hls, minval_str)
-            ]
+                self.code_gen_dict["$DOCOMPUTE$"] = [
+                    "%s<ImgDim, PoolDim, NumChannels, %s, %s>(in0, out);"
+                    % (op, dtype_hls, minval_str)
+                ]
 
     def dataoutstrm(self):
         code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
@@ -293,6 +341,7 @@ class StreamingMaxPool_Batch(HLSCustomOp):
         node = self.onnx_node
         exp_ishape = self.get_normal_input_shape()
         exp_oshape = self.get_normal_output_shape()
+        folded_ishape = self.get_folded_input_shape()
         folded_oshape = self.get_folded_output_shape()
 
         # TODO ensure codegen dir exists
@@ -320,9 +369,8 @@ class StreamingMaxPool_Batch(HLSCustomOp):
             export_idt = DataType["BINARY"]
         else:
             export_idt = self.get_input_datatype()
-        # no reshaping for input since assuming no folding on input
-        # make copy before saving array
-        reshaped_input = inp.copy()
+
+        reshaped_input = inp.reshape(folded_ishape)
         np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input)
 
         if mode == "cppsim":
@@ -333,7 +381,7 @@ class StreamingMaxPool_Batch(HLSCustomOp):
             assert (
                 context[node.output[0]].shape == folded_oshape
             ), "cppsim \
-            did not produce expected ofolded utput shape"
+            did not produce expected folded output shape"
             context[node.output[0]] = context[node.output[0]].reshape(*exp_oshape)
         elif mode == "rtlsim":
             sim = self.get_rtlsim()
@@ -371,4 +419,4 @@ class StreamingMaxPool_Batch(HLSCustomOp):
         assert (
             context[node.output[0]].shape == exp_oshape
         ), """Output
-        shape doesn't match expected shape (1, ofm_dim, ofm_dim, k*k*ifm_ch)."""
+        shape doesn't match expected shape (1, ofm_dim, ofm_dim, ifm_ch)."""
diff --git a/src/finn/custom_op/fpgadataflow/templates.py b/src/finn/custom_op/fpgadataflow/templates.py
index 0e1a2933008648047ad005c259386a3e82d60e2d..d33a7b54b8e96c5e63aa8b77743e83e7460715a6 100644
--- a/src/finn/custom_op/fpgadataflow/templates.py
+++ b/src/finn/custom_op/fpgadataflow/templates.py
@@ -86,10 +86,10 @@ puts "HLS project: $config_proj_name"
 set config_hwsrcdir "$HWSRCDIR$"
 puts "HW source dir: $config_hwsrcdir"
 set config_proj_part "$FPGAPART$"
-
-set config_bnnlibdir "$FINNHLSLIBDIR$"
-set config_customhlsdir "$FINNHLSCUSTOMDIR$"
-
+set config_bnnlibdir "$::env(FINN_ROOT)/finn-hlslib"
+puts "finn-hlslib dir: $config_bnnlibdir"
+set config_customhlsdir "$::env(FINN_ROOT)/finn/custom_hls"
+puts "custom HLS dir: $config_customhlsdir"
 set config_toplevelfxn "$TOPFXN$"
 set config_clkperiod $CLKPERIOD$
 
diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py
index 0e8d77c3a9c81d9664ddf9aa1ee8953662ce46b2..173882bf929611f6cd9f560f48a46dfe09430622 100644
--- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py
+++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py
@@ -589,7 +589,7 @@ class Thresholding_Batch(HLSCustomOp):
     # TODO check and add whatever missing
     def defines(self, var):
         numInputVectors = list(self.get_nodeattr("numInputVectors"))
-        numReps = numInputVectors[0]
+        numReps = int(np.prod(numInputVectors))
         self.code_gen_dict["$DEFINES$"] = [
             """#define NumChannels1 {}\n #define PE1 {}\n #define numReps {}""".format(
                 self.get_nodeattr("NumChannels"),
@@ -937,3 +937,8 @@ class Thresholding_Batch(HLSCustomOp):
         thres_count = out_features * num_steps
         ret_dict[thres_param_type] = thres_count
         return ret_dict
+
+    def ipgen_extra_directives(self):
+        "Return a list of extra tcl directives for HLS synthesis."
+
+        return ["config_compile -pipeline_style frp"]
diff --git a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py
index e9a212a588d90bfd72ef0487c4137eda912ca701..3d8dcaf2fca52b6c23b10322e0061b580807e0bc 100644
--- a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py
+++ b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py
@@ -698,8 +698,8 @@ class Vector_Vector_Activate_Batch(HLSCustomOp):
         if noact == 0:
             odt = self.get_output_datatype()
             B = odt.bitwidth()
-            thr_luts = (2 ** B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64)
-            comp_luts = (2 ** B - 1) * acc_bits
+            thr_luts = (2**B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64)
+            comp_luts = (2**B - 1) * acc_bits
 
         return int(c0 + c1 * (P * (mult_luts + acc_luts + thr_luts + comp_luts)) + c2)
 
diff --git a/src/finn/qnn-data/mdd-data/finn_design.mdd b/src/finn/qnn-data/mdd-data/finn_design.mdd
new file mode 100644
index 0000000000000000000000000000000000000000..517180fa94079ad3e04d3a45776f165fd82cc483
--- /dev/null
+++ b/src/finn/qnn-data/mdd-data/finn_design.mdd
@@ -0,0 +1,36 @@
+# Copyright (c) 2022  Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+#
+# * Neither the name of  Advanced Micro Devices nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+OPTION psf_version = 2.1;
+
+BEGIN driver finn_design
+        OPTION supported_peripherals = (finn_design);
+        OPTION driver_state = ACTIVE;
+        OPTION VERSION = 1.0;
+        OPTION NAME = finn_design;
+END driver
diff --git a/src/finn/qnn-data/mdd-data/finn_design.tcl b/src/finn/qnn-data/mdd-data/finn_design.tcl
new file mode 100644
index 0000000000000000000000000000000000000000..b8c55e12b22a2152157cbecd2b0b4bf061e9918a
--- /dev/null
+++ b/src/finn/qnn-data/mdd-data/finn_design.tcl
@@ -0,0 +1,58 @@
+# Copyright (c) 2022  Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+#
+# * Neither the name of  Advanced Micro Devices nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# @brief        Address range defines for FINN IP.
+# @author       Thomas B. Preußer <thomas.preusser@amd.com>
+##
+
+proc generate {drv_handle} {
+        # Bounds of all exposed slave address ranges to xparameters.h
+        set file_handle [hsi::utils::open_include_file "xparameters.h"]
+        generate_memrange_parameters $drv_handle $file_handle
+        close $file_handle
+}
+
+proc generate_memrange_parameters {drv_handle file_handle} {
+        # Collect unique slave interfaces to custom module
+        array unset ranges
+        foreach mem_range [hsi::get_mem_ranges -of_object [hsi::get_cells -hier [hsi::get_sw_processor]] $drv_handle] {
+                set ranges([common::get_property SLAVE_INTERFACE $mem_range]) [list \
+                        [common::get_property BASE_NAME  $mem_range] \
+                        [common::get_property BASE_VALUE $mem_range] \
+                        [common::get_property HIGH_NAME  $mem_range] \
+                        [common::get_property HIGH_VALUE $mem_range] \
+                ]
+        }
+
+        # Produce defines for the address range bounds
+        set prefix "XPAR_[string toupper $drv_handle]"
+        foreach {key val} [array get ranges] {
+                puts $file_handle "#define [format "%s_%s_%s" $prefix $key [lindex $val 0]] [lindex $val 1]"
+                puts $file_handle "#define [format "%s_%s_%s" $prefix $key [lindex $val 2]] [lindex $val 3]"
+        }
+        puts $file_handle ""
+}
diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py
index c16bd00eba9f5ba424682d0c64a759cb1b638891..4ea5abdc0d142eb510ada8de83bfb61a84287352 100644
--- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py
+++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py
@@ -339,20 +339,27 @@ class InferStreamingMaxPool(Transformation):
         graph = model.graph
         node_ind = 0
         graph_modified = False
-        for n in graph.node:
+        for node in graph.node:
             node_ind += 1
-            if n.op_type == "MaxPoolNHWC":
-                mp_input = n.input[0]
-                mp_output = n.output[0]
+            if node.op_type == "MaxPoolNHWC":
+                mp_input = node.input[0]
+                mp_output = node.output[0]
                 mp_in_shape = model.get_tensor_shape(mp_input)
                 # mp_out_shape = model.get_tensor_shape(mp_output)
                 dt = model.get_tensor_datatype(mp_input)
-                mp_inst = getCustomOp(n)
+                mp_inst = getCustomOp(node)
                 k_h, k_w = mp_inst.get_nodeattr("kernel_shape")
                 ifm_ch = mp_in_shape[-1]
                 ifm_dim_h = mp_in_shape[1]
                 ifm_dim_w = mp_in_shape[2]
-                if ifm_dim_h % k_h == 0 and ifm_dim_w % k_w == 0:
+                pe = 1
+                ceil_mode = mp_inst.get_nodeattr("ceil_mode")
+                is_1d = (ifm_dim_h == 1 and k_h == 1) or (ifm_dim_w == 1 and k_w == 1)
+                is_divisable = (ifm_dim_h % k_h == 0) or (ifm_dim_w % k_w == 0)
+                is_bipolar = dt == DataType["BIPOLAR"]
+                pass_1d = is_1d and (not is_bipolar)
+                pass_2d = (not is_1d) and is_divisable
+                if pass_1d or pass_2d:
                     # create equivalent StreamingMaxPool_Batch node
                     new_node = helper.make_node(
                         "StreamingMaxPool_Batch",
@@ -364,12 +371,16 @@ class InferStreamingMaxPool(Transformation):
                         NumChannels=ifm_ch,
                         ImgDim=(ifm_dim_h, ifm_dim_w),
                         dataType=dt.name,
-                        name="StreamingMaxPool_Batch_" + n.name,
+                        PE=pe,
+                        CeilMode=ceil_mode,
+                        name="StreamingMaxPool_Batch_" + node.name,
                     )
                     graph.node.insert(node_ind, new_node)
                     # remove old nodes
-                    graph.node.remove(n)
+                    graph.node.remove(node)
                     graph_modified = True
+                else:
+                    warnings.warn(node.name + ": could not convert to HLS")
         if graph_modified:
             model = model.transform(InferShapes())
             model = model.transform(InferDataTypes())
diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py
index 618ed0780a860b74eed02a76964d882d04d7ecde..b19ef170f4547747d196978d08b8eacc7963d1ce 100644
--- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py
+++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py
@@ -26,11 +26,14 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pkg_resources as pk
+
 import json
 import multiprocessing as mp
 import os
 import subprocess
 import warnings
+from shutil import copytree
 
 from finn.custom_op.registry import getCustomOp
 from finn.transformation.base import Transformation
@@ -61,7 +64,9 @@ def is_external_output(model, node, i):
     # indicate whether output i of node should be made external
     # True only if output is unconnected
     consumers = model.find_consumers(node.output[i])
-    if consumers is None:
+    if consumers == []:
+        # TODO should ideally check if tensor is in top-level
+        # outputs
         return True
     return False
 
@@ -160,6 +165,16 @@ class CreateStitchedIP(Transformation):
             self.connect_cmds.append(
                 "set_property name m_axi_gmem0 [get_bd_intf_ports m_axi_gmem_0]"
             )
+            self.connect_cmds.append("assign_bd_address")
+            seg_name = "%s/Data_m_axi_gmem/SEG_m_axi_gmem0_Reg" % (inst_name)
+            self.connect_cmds.append(
+                "set_property offset 0 [get_bd_addr_segs {%s}]" % (seg_name)
+            )
+            # TODO should propagate this information from the node instead of 4G
+            self.connect_cmds.append(
+                "set_property range 4G [get_bd_addr_segs {%s}]" % (seg_name)
+            )
+
             self.intf_names["aximm"] = [("m_axi_gmem0", aximm_intf_name[0][1])]
             assert self.has_aximm is False, "Currently limited to one AXI-MM interface"
             self.has_aximm = True
@@ -215,7 +230,7 @@ class CreateStitchedIP(Transformation):
         model = model.transform(ReplaceVerilogRelPaths())
         ip_dirs = ["list"]
         # add RTL streamer IP
-        ip_dirs.append("/workspace/finn/finn-rtllib/memstream")
+        ip_dirs.append("$::env(FINN_ROOT)/finn/finn-rtllib/memstream")
         if model.graph.node[0].op_type not in ["StreamingFIFO", "IODMA"]:
             warnings.warn(
                 """First node is not StreamingFIFO or IODMA.
@@ -257,7 +272,7 @@ class CreateStitchedIP(Transformation):
         for input in model.graph.input:
             inp_name = input.name
             inp_cons = model.find_consumers(inp_name)
-            assert inp_cons is not None, "No consumer for input " + inp_name
+            assert inp_cons != [], "No consumer for input " + inp_name
             assert len(inp_cons) == 1, "Multiple consumers for input " + inp_name
             node = inp_cons[0]
             node_inst = getCustomOp(node)
@@ -432,6 +447,21 @@ class CreateStitchedIP(Transformation):
                 "ipx::add_file dcp/%s.dcp "
                 "[ipx::get_file_groups xilinx_simulationcheckpoint]" % block_name
             )
+        # add a rudimentary driver mdd to get correct ranges in xparameters.h later on
+        example_data_dir = pk.resource_filename("finn.qnn-data", "mdd-data/")
+        copytree(example_data_dir, vivado_stitch_proj_dir + "/data")
+        tcl.append("file copy -force data ip/")
+        tcl.append("ipx::add_file_group -type software_driver {} [ipx::current_core]")
+        tcl.append(
+            "set_property type mdd [ipx::add_file data/finn_design.mdd "
+            "[ipx::get_file_groups xilinx_softwaredriver -of_objects "
+            "[ipx::current_core]]]"
+        )
+        tcl.append(
+            "set_property type tclSource [ipx::add_file data/finn_design.tcl "
+            "[ipx::get_file_groups xilinx_softwaredriver -of_objects "
+            "[ipx::current_core]]]"
+        )
         tcl.append("ipx::update_checksums [ipx::find_open_core %s]" % block_vlnv)
         tcl.append("ipx::save_core [ipx::find_open_core %s]" % block_vlnv)
         # export list of used Verilog files (for rtlsim later on)
diff --git a/src/finn/transformation/fpgadataflow/insert_dwc.py b/src/finn/transformation/fpgadataflow/insert_dwc.py
index 4a0d0a89c4a6bb5809887ffcfffb2068ccebaa48..afc889f5bc90a551efddc6232f5689504fe1bb29 100644
--- a/src/finn/transformation/fpgadataflow/insert_dwc.py
+++ b/src/finn/transformation/fpgadataflow/insert_dwc.py
@@ -45,7 +45,7 @@ class InsertDWC(Transformation):
             if _suitable_node(n):
                 for output_name in n.output:
                     consumers = model.find_consumers(output_name)
-                    if consumers is None:
+                    if consumers == []:
                         continue
                     assert len(consumers) == 1, (
                         n.name
diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py
index b5ae2da47a19af5b6bbf44a2a65cbef4c3bbc4dd..26613849060e361a6bc93483e3e1d8416e1fd97f 100644
--- a/src/finn/transformation/fpgadataflow/insert_fifo.py
+++ b/src/finn/transformation/fpgadataflow/insert_fifo.py
@@ -62,7 +62,7 @@ class InsertFIFO(Transformation):
             if _suitable_node(first_node):
                 for n_output in first_node.output:
                     consumers = model.find_consumers(n_output)
-                    if consumers is None:
+                    if consumers == []:
                         continue
                     if len(consumers) > 1:
                         warnings.warn(
diff --git a/src/finn/transformation/fpgadataflow/insert_tlastmarker.py b/src/finn/transformation/fpgadataflow/insert_tlastmarker.py
index 34cb61346dcd5bcd6f41a4272748764cf385a524..a33cee4640a7498f478962767ac4260d9c2bed90 100644
--- a/src/finn/transformation/fpgadataflow/insert_tlastmarker.py
+++ b/src/finn/transformation/fpgadataflow/insert_tlastmarker.py
@@ -97,7 +97,7 @@ class InsertTLastMarker(Transformation):
                 first_node = model.find_consumers(graph_in_name)
                 # skip if no consumers (this may be the case for unused initializers)
                 # TODO: fix this with a cleanup transform
-                if first_node is None:
+                if first_node == []:
                     continue
                 assert len(first_node) == 1, "Input fans out to multiple nodes"
                 first_node = first_node[0]
diff --git a/src/finn/transformation/fpgadataflow/make_zynq_proj.py b/src/finn/transformation/fpgadataflow/make_zynq_proj.py
index 80ce8f0163a23293423ac208451c901eb645643c..0b92f1777373a78cf09466dc3aea6a2802ec98fe 100644
--- a/src/finn/transformation/fpgadataflow/make_zynq_proj.py
+++ b/src/finn/transformation/fpgadataflow/make_zynq_proj.py
@@ -68,7 +68,7 @@ def collect_ip_dirs(model, ipstitch_path):
     ip_dirs += [ipstitch_path + "/ip"]
     if need_memstreamer:
         # add RTL streamer IP
-        ip_dirs.append("/workspace/finn/finn-rtllib/memstream")
+        ip_dirs.append("$::env(FINN_ROOT)/finn/finn-rtllib/memstream")
     return ip_dirs
 
 
@@ -152,11 +152,13 @@ class MakeZYNQProject(Transformation):
             # define kernel instances
             # name kernels connected to graph inputs as idmaxx
             # name kernels connected to graph outputs as odmaxx
-            if producer is None or consumer is None:
+            if (producer is None) or (consumer == []):
+                # TODO not a good way of checking for external inp&out
+                # should look at the list of top-level in/out instead
                 if producer is None:
                     instance_names[node.name] = "idma" + str(idma_idx)
                     idma_idx += 1
-                elif consumer is None:
+                elif consumer == []:
                     instance_names[node.name] = "odma" + str(odma_idx)
                     odma_idx += 1
                 config.append(
diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py
index 39eb049565475b462ea0df9d88b46e3598e6cdd9..28f74b5292d14947f4f8a27f2723af4f04590ec8 100644
--- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py
+++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py
@@ -99,7 +99,7 @@ class RemoveShallowFIFOs(Transformation):
                 # bypass shallow fifos
                 shallow_fifos.append(node)
                 consumers = model.find_consumers(node.output[0])
-                if consumers is None:
+                if consumers == []:
                     producer = model.find_producer(node.input[0])
                     for idx, inp in enumerate(producer.output):
                         if inp == node.input[0]:
@@ -222,7 +222,7 @@ class InsertAndSetFIFODepths(Transformation):
         fpgapart,
         clk_ns=10.0,
         max_qsrl_depth=256,
-        max_depth=2 ** 14,
+        max_depth=2**14,
         swg_exception=True,
         vivado_ram_style="auto",
     ):
diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py
index a12f359c7d3f1c29a17694ef4987a1a349286234..ba1d757b75ff46ef1f78075bc8f3fe07c11551c8 100644
--- a/src/finn/transformation/fpgadataflow/templates.py
+++ b/src/finn/transformation/fpgadataflow/templates.py
@@ -103,8 +103,8 @@ create_project finn_zynq_link ./ -part $FPGA_PART
 # set board part repo paths to find PYNQ-Z1/Z2
 set paths_prop [get_property BOARD_PART_REPO_PATHS [current_project]]
 set paths_param [get_param board.repoPaths]
-lappend paths_prop /workspace/board_files
-lappend paths_param /workspace/board_files
+lappend paths_prop $::env(FINN_ROOT)/board_files
+lappend paths_param $::env(FINN_ROOT)/board_files
 set_property BOARD_PART_REPO_PATHS $paths_prop [current_project]
 set_param board.repoPaths $paths_param
 
diff --git a/src/finn/transformation/fpgadataflow/vitis_build.py b/src/finn/transformation/fpgadataflow/vitis_build.py
index 365632cd5a02eae6e19e670e0b676c521e460507..4dce3ab16c38bfe5dd43f3e23b14ea2ec571f68c 100644
--- a/src/finn/transformation/fpgadataflow/vitis_build.py
+++ b/src/finn/transformation/fpgadataflow/vitis_build.py
@@ -213,11 +213,13 @@ class VitisLink(Transformation):
             # define kernel instances
             # name kernels connected to graph inputs as idmaxx
             # name kernels connected to graph inputs as odmaxx
+            # TODO not a good way of checking for external in/out
+            # check top-level in/out list instead
             if producer is None:
                 instance_names[node.name] = "idma" + str(idma_idx)
                 config.append("nk=%s:1:%s" % (node.name, instance_names[node.name]))
                 idma_idx += 1
-            elif consumer is None:
+            elif consumer == []:
                 instance_names[node.name] = "odma" + str(odma_idx)
                 config.append("nk=%s:1:%s" % (node.name, instance_names[node.name]))
                 odma_idx += 1
diff --git a/src/finn/transformation/qonnx/fold_quant_weights.py b/src/finn/transformation/qonnx/fold_quant_weights.py
index c81085e7a74eb3e08fbee447d7b9b027a436671e..e8a0f418ae5eb587d6aabae57d8b379357d3a0ca 100644
--- a/src/finn/transformation/qonnx/fold_quant_weights.py
+++ b/src/finn/transformation/qonnx/fold_quant_weights.py
@@ -146,11 +146,14 @@ class FoldQuantWeights(Transformation):
                         model.set_initializer(mul_tensor.name, scale)
 
                         successor = model.find_consumers(node_out)
-                        if successor is None:
+                        if successor == []:
                             raise RuntimeError(
                                 "Can only constant fold scaled Quant weights "
                                 "if a successor exists."
                             )
+                        assert (
+                            len(successor) == 1
+                        ), "Only implemented for a single consumer"
                         successor = successor[0]
                         succ_output_name = successor.output[0]
 
diff --git a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py
index faad31fa06e76b245f25b6f0aa583fec5c0da29a..c234bd38d9679f72b6df73e81df57fba3e8d4554 100644
--- a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py
+++ b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py
@@ -230,7 +230,7 @@ class AvgPoolAndTruncToQuantAvgPool(Transformation):
                         # 7c2603a95e90e4de2575020e575c24eab6a15889/src/finn/custom_op/
                         # general/quantavgpool2d.py#L94
                         ibits = math.floor(
-                            math.log(2 ** trunc_in_bits / (k_s * k_s), 2)
+                            math.log(2**trunc_in_bits / (k_s * k_s), 2)
                         )
                         # Get sign
                         signed = _get_signed_from_upstream(model, t_node)
diff --git a/src/finn/transformation/qonnx/qonnx_activation_handlers.py b/src/finn/transformation/qonnx/qonnx_activation_handlers.py
index 3336b1eee7fa9d54092cd56b9ba0edaf9d0884b1..c8bde7fea8ae8195001a7eccfd48baa4c48997ae 100644
--- a/src/finn/transformation/qonnx/qonnx_activation_handlers.py
+++ b/src/finn/transformation/qonnx/qonnx_activation_handlers.py
@@ -333,7 +333,7 @@ class QuantReluHandler(QuantActBaseHandler):
         # Calculate thersholds, see: https://github.com/Xilinx/brevitas/blob/
         # a5bfd6dc5e030f0047ac1ee47932b60e8e873e17/src/brevitas/export/
         # onnx/finn/handler/act.py#L21
-        num_distinct_values = 2 ** bit_width
+        num_distinct_values = 2**bit_width
         num_thresholds = int(num_distinct_values - 1)
         flat_scale = quant_scale.flatten().astype(np.float32)
         num_scale_channels = flat_scale.shape[0]
@@ -468,9 +468,9 @@ class QuantIdentityHandler(QuantActBaseHandler):
             return thresholds
         else:
             if narrow:
-                num_distinct_values = 2 ** bit_width - 1
+                num_distinct_values = 2**bit_width - 1
             else:
-                num_distinct_values = 2 ** bit_width
+                num_distinct_values = 2**bit_width
 
             num_thresholds = int(num_distinct_values - 1)
             flat_scale = quant_scale.flatten()
diff --git a/src/finn/transformation/streamline/absorb.py b/src/finn/transformation/streamline/absorb.py
index 97ae3b51a849a4174c9853cb41c0d6d72bdf8dad..32e539d87045520044378b94fd0e3c71486990c7 100644
--- a/src/finn/transformation/streamline/absorb.py
+++ b/src/finn/transformation/streamline/absorb.py
@@ -627,10 +627,9 @@ class AbsorbTransposeIntoResize(Transformation):
                         graph.node.insert(node_ind + 1, new_transpose)
                         # rewire nodes
                         final_t_cands = model.find_consumers(mt_cand.output[0])
-                        if final_t_cands is not None:
-                            # rewire next nodes' inputs
-                            for final_t_cand in final_t_cands:
-                                final_t_cand.input[0] = trans_output
+                        # rewire next nodes' inputs
+                        for final_t_cand in final_t_cands:
+                            final_t_cand.input[0] = trans_output
                         mt_cand.output[0] = trans_input
                         graph_modified = True
         if graph_modified:
diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py
index 0cdd6651d982426b1d81d7313346dcd899294bf7..e922dffe37691a39434e9ebafa5df6d1a11d389e 100644
--- a/src/finn/transformation/streamline/reorder.py
+++ b/src/finn/transformation/streamline/reorder.py
@@ -670,6 +670,13 @@ class MakeMaxPoolNHWC(Transformation):
                 if consumer is not None and consumer.op_type == "Transpose":
                     perms = list(get_by_name(consumer.attribute, "perm").ints)
                     if perms == [0, 2, 3, 1]:
+                        ceil_mode = get_by_name(n.attribute, "ceil_mode")
+                        if ceil_mode is not None:
+                            ceil_mode = ceil_mode.i
+                        else:
+                            ceil_mode = (
+                                0  # default to ceil_mode=0 (equivalent to np.floor)
+                            )
                         n.op_type = "MaxPoolNHWC"
                         n.domain = "finn.custom_op.general"
                         start_name = n.input[0]
@@ -683,12 +690,20 @@ class MakeMaxPoolNHWC(Transformation):
                         n.output[0] = end_name
                         model.set_tensor_shape(mid_name, (b, hi, wi, c))
                         model.set_tensor_shape(end_name, (b, ho, wo, c))
+                        getCustomOp(n).set_nodeattr("ceil_mode", ceil_mode)
                         graph.node.remove(consumer)
                         graph.node.insert(node_ind - 1, consumer)
                         graph_modified = True
                 elif producer is not None and producer.op_type == "Transpose":
                     perms = list(get_by_name(producer.attribute, "perm").ints)
                     if perms == [0, 3, 1, 2]:
+                        ceil_mode = get_by_name(n.attribute, "ceil_mode")
+                        if ceil_mode is not None:
+                            ceil_mode = ceil_mode.i
+                        else:
+                            ceil_mode = (
+                                0  # default to ceil_mode=0 (equivalent to np.floor)
+                            )
                         n.op_type = "MaxPoolNHWC"
                         n.domain = "finn.custom_op.general"
                         start_name = producer.input[0]
@@ -702,6 +717,7 @@ class MakeMaxPoolNHWC(Transformation):
                         n.output[0] = mid_name
                         model.set_tensor_shape(mid_name, (b, ho, wo, c))
                         model.set_tensor_shape(end_name, (b, c, ho, wo))
+                        getCustomOp(n).set_nodeattr("ceil_mode", ceil_mode)
                         graph.node.remove(producer)
                         graph.node.insert(node_ind, producer)
                         graph_modified = True
@@ -739,6 +755,7 @@ class MoveOpPastFork(Transformation):
                 # Check case when branches are empty and go
                 # to the same node
                 consumers = model.find_consumers(n.output[0])
+                assert len(consumers) > 1, "Must have >1 consumer"
                 unique_consumer = True
                 for consum_node in consumers[1:]:
                     if consumers[0] != consum_node:
diff --git a/tests/brevitas/test_brevitas_avg_pool_export.py b/tests/brevitas/test_brevitas_avg_pool_export.py
index 1b38914a83e7c5d68bb004df7545b518d6a93ddd..6d0c68f0f456c05ab60ffa043277409730b695ce 100644
--- a/tests/brevitas/test_brevitas_avg_pool_export.py
+++ b/tests/brevitas/test_brevitas_avg_pool_export.py
@@ -47,6 +47,7 @@ from finn.util.basic import gen_finn_dt_tensor
 base_export_onnx_path = "test_brevitas_avg_pool_export.onnx"
 
 
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("QONNX_export", [False, True])
 @pytest.mark.parametrize("kernel_size", [2, 3])
 @pytest.mark.parametrize("stride", [1, 2])
diff --git a/tests/brevitas/test_brevitas_cnv.py b/tests/brevitas/test_brevitas_cnv.py
index 78ca361366902b37f826b575904126c783adbece..2592d381173ee2112565f17d6631dd98f05e221a 100644
--- a/tests/brevitas/test_brevitas_cnv.py
+++ b/tests/brevitas/test_brevitas_cnv.py
@@ -47,7 +47,7 @@ from finn.util.test import get_test_model_trained
 
 export_onnx_path = "test_brevitas_cnv.onnx"
 
-
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("abits", [1, 2])
 @pytest.mark.parametrize("wbits", [1, 2])
 @pytest.mark.parametrize("QONNX_export", [False, True])
diff --git a/tests/brevitas/test_brevitas_debug.py b/tests/brevitas/test_brevitas_debug.py
index e42b93babefd9ca6a7a86def18a5cbb21d795c4c..3db1a208456f7209623530681d96d6aa35928900 100644
--- a/tests/brevitas/test_brevitas_debug.py
+++ b/tests/brevitas/test_brevitas_debug.py
@@ -47,6 +47,7 @@ from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN
 from finn.util.test import get_test_model_trained
 
 
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("QONNX_export", [False, True])
 @pytest.mark.parametrize("QONNX_FINN_conversion", [False, True])
 def test_brevitas_debug(QONNX_export, QONNX_FINN_conversion):
diff --git a/tests/brevitas/test_brevitas_fc.py b/tests/brevitas/test_brevitas_fc.py
index 8e1e3de8d06b24ce946fb0a6726d875d0e75736e..fc0f24b9172eb7882197026420ede8fe5d69bee5 100644
--- a/tests/brevitas/test_brevitas_fc.py
+++ b/tests/brevitas/test_brevitas_fc.py
@@ -49,6 +49,7 @@ from finn.util.test import get_test_model_trained
 export_onnx_path = make_build_dir("test_brevitas_fc_")
 
 
+@pytest.mark.brevitas_export
 # act bits
 @pytest.mark.parametrize("abits", [1, 2])
 # weight bits
diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py
index 108c97c2e83b7f3ca9dd6ead746b3ef8b4d10af5..189ca1da6c2862db6239186c7eb234a992a66472 100644
--- a/tests/brevitas/test_brevitas_mobilenet.py
+++ b/tests/brevitas/test_brevitas_mobilenet.py
@@ -52,7 +52,7 @@ from finn.util.basic import make_build_dir
 from finn.util.pytorch import NormalizePreProc
 from finn.util.test import crop_center, get_test_model_trained, resize_smaller_side
 
-
+@pytest.mark.brevitas_export
 @pytest.mark.xfail
 def test_brevitas_mobilenet():
     # get single image as input and prepare image
diff --git a/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py b/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py
index b530b4bd84c548319549a8b16e0c3a79584e075d..4f9d2778028223d85882839ef7243e170ef90dd6 100644
--- a/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py
+++ b/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py
@@ -47,7 +47,7 @@ from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN
 
 export_onnx_path = "test_brevitas_non_scaled_QuantHardTanh_export.onnx"
 
-
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("abits", [1, 2, 4, 8])
 @pytest.mark.parametrize("narrow_range", [False, True])
 @pytest.mark.parametrize("max_val", [1.0, 1 - 2 ** (-7)])
diff --git a/tests/brevitas/test_brevitas_qconv2d.py b/tests/brevitas/test_brevitas_qconv2d.py
index beaea4e51ecdd4cff9f0d4d0c16735cdecad207c..4d9bd14ae3500fd8c0e78e6c4d377ce1f234d168 100644
--- a/tests/brevitas/test_brevitas_qconv2d.py
+++ b/tests/brevitas/test_brevitas_qconv2d.py
@@ -49,7 +49,7 @@ from finn.util.basic import gen_finn_dt_tensor
 
 export_onnx_path = "test_brevitas_conv.onnx"
 
-
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("dw", [False, True])
 @pytest.mark.parametrize("bias", [True, False])
 @pytest.mark.parametrize("in_channels", [32])
diff --git a/tests/brevitas/test_brevitas_qlinear.py b/tests/brevitas/test_brevitas_qlinear.py
index 1099d3ec83336e5cd07707b35baea112b7a2aee6..e78262fcb24a1fec1fa876a39c67bd3aa850299c 100644
--- a/tests/brevitas/test_brevitas_qlinear.py
+++ b/tests/brevitas/test_brevitas_qlinear.py
@@ -46,7 +46,7 @@ from finn.util.basic import gen_finn_dt_tensor
 
 export_onnx_path = "test_brevitas_qlinear.onnx"
 
-
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("bias", [False, True])
 @pytest.mark.parametrize("out_features", [4])
 @pytest.mark.parametrize("in_features", [3])
diff --git a/tests/brevitas/test_brevitas_relu_act_export.py b/tests/brevitas/test_brevitas_relu_act_export.py
index 57ead3b6c047220e90d4276620cc14b8f795fe08..01ba7f382535ea8a12a60f211b7718ca57164db4 100644
--- a/tests/brevitas/test_brevitas_relu_act_export.py
+++ b/tests/brevitas/test_brevitas_relu_act_export.py
@@ -47,7 +47,7 @@ from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN
 
 export_onnx_path = "test_brevitas_relu_act_export.onnx"
 
-
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("abits", [2, 4, 8])
 @pytest.mark.parametrize("max_val", [1.0, 1.5, 1 - 2 ** (-7)])
 @pytest.mark.parametrize(
@@ -110,7 +110,7 @@ scaling_impl.learned_value": torch.tensor(
     assert np.isclose(produced, expected, atol=1e-3).all()
     os.remove(export_onnx_path)
 
-
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("abits", [2, 4, 8])
 @pytest.mark.parametrize("max_val", [1.0, 1.5, 1 - 2 ** (-7)])
 @pytest.mark.parametrize("scaling_per_channel", [True, False])
diff --git a/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py b/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py
index c6da2e2e971ee97cb73243284920cc87e8b4d7bb..9f17c0f26c42058f314a25c066c8ba37a06e0b65 100644
--- a/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py
+++ b/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py
@@ -47,7 +47,7 @@ from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN
 
 export_onnx_path = "test_brevitas_scaled_QHardTanh_export.onnx"
 
-
+@pytest.mark.brevitas_export
 @pytest.mark.parametrize("abits", [2, 4, 8])
 @pytest.mark.parametrize("narrow_range", [False, True])
 @pytest.mark.parametrize("min_val", [-1.0, -(1 - 2 ** (-7)), -2])
diff --git a/tests/brevitas/test_brevitas_validate_mobilenet.py b/tests/brevitas/test_brevitas_validate_mobilenet.py
index 12e7e7aff2ec2ebae3e2ec7713a24046553dc5f2..67e6b785a70c81717adadd3d2695017e0382edda 100644
--- a/tests/brevitas/test_brevitas_validate_mobilenet.py
+++ b/tests/brevitas/test_brevitas_validate_mobilenet.py
@@ -61,7 +61,7 @@ mean = [0.485, 0.456, 0.406]
 std = 0.226
 ch = 3
 
-
+@pytest.mark.brevitas_export
 def test_brevitas_mobilenet_preproc():
     if "IMAGENET_VAL_PATH" not in os.environ.keys():
         pytest.skip("Can't do validation without IMAGENET_VAL_PATH")
@@ -98,6 +98,7 @@ def test_brevitas_mobilenet_preproc():
         assert (finn_img == pyt_img).all()
 
 
+@pytest.mark.brevitas_export
 @pytest.mark.slow
 # marked as XFAIL until Brevitas export issues are resolved:
 # https://github.com/Xilinx/brevitas/issues/173
diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py
index bc14b9dee5de0b1c0a5d05292935ab91c1dd1553..b74875e10fc8c539b7a5f3eced5f1f11df3b5f94 100644
--- a/tests/end2end/test_end2end_bnn_pynq.py
+++ b/tests/end2end/test_end2end_bnn_pynq.py
@@ -765,7 +765,7 @@ class TestEnd2End:
         ret = dict()
         # try a range of batch sizes, some may fail due to insufficient DMA
         # buffers
-        bsize_range_in = [8 ** i for i in range(5)]
+        bsize_range_in = [8**i for i in range(5)]
         bsize_range = []
         for bsize in bsize_range_in:
             res = throughput_test_remote(model, bsize)
diff --git a/tests/fpgadataflow/test_code_gen_trafo.py b/tests/fpgadataflow/test_code_gen_trafo.py
index 5ddff3d36f03d17833e17bc98649a64dabf31577..837173b6772ce968c0c618b40e23f6c0f810015a 100644
--- a/tests/fpgadataflow/test_code_gen_trafo.py
+++ b/tests/fpgadataflow/test_code_gen_trafo.py
@@ -37,6 +37,7 @@ from finn.core.modelwrapper import ModelWrapper
 from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
 
 
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_code_gen_trafo():
     idt = wdt = odt = DataType["BIPOLAR"]
diff --git a/tests/fpgadataflow/test_compilation_trafo.py b/tests/fpgadataflow/test_compilation_trafo.py
index 81e2ff9a7c5829982cdb6121378e9e9e3af81632..e36bce7e9abc8c5d8f815e4559cbce52ca186934 100644
--- a/tests/fpgadataflow/test_compilation_trafo.py
+++ b/tests/fpgadataflow/test_compilation_trafo.py
@@ -38,6 +38,7 @@ from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
 from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
 
 
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_compilation_trafo():
     idt = wdt = odt = DataType["BIPOLAR"]
diff --git a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py
index 5cc5f8fa6c1ccd3e5a9e154b6fb2773caf4668a9..95c340694a71eb52c0f8dd4b00e06df244f4d651 100644
--- a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py
+++ b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py
@@ -67,6 +67,7 @@ from finn.util.basic import gen_finn_dt_tensor
 )
 @pytest.mark.parametrize("depthwise", [False, True])
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode):
diff --git a/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py b/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py
index bf690d1d68bc0f580663735c3596c1dfc0a651e8..946b748e583297c2e2fa52d73fed5f13fcba14ab 100644
--- a/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py
+++ b/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py
@@ -89,6 +89,7 @@ def make_single_maxpool_modelwrapper(onnx_op_name, ishape, idt, pdt, pshape):
 @pytest.mark.parametrize("scalar_param", [True, False])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 @pytest.mark.slow
 def test_convert_to_hls_channelwise_layer(
diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py
index 9b0f3d68aed655f0b36857d50a085093ea94aecb..005ec40288bed58d62993e99a84e3ca63cdfa679 100755
--- a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py
+++ b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py
@@ -75,6 +75,7 @@ def get_multithreshold_rand_params(channels, num_of_thres, seed=None):
 )
 @pytest.mark.parametrize("depthwise", [False, True])
 @pytest.mark.parametrize("use_reshape", [False, True])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 @pytest.mark.slow
 def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape):
diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py
index d96bc987567cdcfcd18a404986c954c7527c7354..cc1bac1ed199ba5b4eabcb4535f329772ea1ce35 100644
--- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py
+++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py
@@ -58,6 +58,7 @@ from finn.util.basic import gen_finn_dt_tensor
 )
 @pytest.mark.parametrize("depthwise", [False, True])
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode):
diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py
index 3357ee6d6c1e540818549f2d0df8b8554690ca3c..292a2c8f7a7e0af757e7967e51204db81f79767c 100644
--- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py
+++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py
@@ -55,6 +55,7 @@ from finn.util.test import get_test_model_trained
 export_onnx_path_cnv = "test_convert_to_hls_layers_cnv.onnx"
 
 
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 # Standalone or fused thresholding-based activation
 @pytest.mark.parametrize("fused_activation", [True, False])
diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py
index a1dc11e0eee5aab462beb0ec34b8771ced20a379..f5e069a3e5486ee1771f6417a93dbafecaaa77d7 100644
--- a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py
+++ b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py
@@ -55,6 +55,7 @@ from finn.util.test import get_test_model_trained
 export_onnx_path = "test_convert_to_hls_layers_fc.onnx"
 
 
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_convert_to_hls_layers_tfc_w1a1():
     tfc = get_test_model_trained("TFC", 1, 1)
@@ -125,6 +126,7 @@ def test_convert_to_hls_layers_tfc_w1a1():
     os.remove(export_onnx_path)
 
 
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_convert_to_hls_layers_tfc_w1a2():
     tfc = get_test_model_trained("TFC", 1, 2)
diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py
index 6089901566cb412e63cd8acc7a8260081248ba52..06b0367507ea5c9df4c8280090900bc20548c541 100644
--- a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py
+++ b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py
@@ -143,6 +143,7 @@ def make_model(ch, ifmdim):
 @pytest.mark.parametrize("ch", [16])
 # ifmdim
 @pytest.mark.parametrize("ifmdim", [5])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 @pytest.mark.slow
 def test_convert_to_hls_layers_synthetic(ch, ifmdim, idt):
diff --git a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py
index 0dd9991b2ff07a35c923afeda854352213f8ca09..7595275c3be34e947f40415d050c0f3e4a9a7a58 100644
--- a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py
+++ b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py
@@ -140,6 +140,7 @@ def prepare_inputs(input_tensor):
 @pytest.mark.parametrize("op_type", ["QuantAvgPool2d", "MaxPool", "MaxPool1D"])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_convert_to_hls_pool_batch(
diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py
index 633db668d3bc5de815a313743c06cd74a7166c9c..11b358da3e20ecafa6b575a961bc24e496942ad4 100644
--- a/tests/fpgadataflow/test_depthwise_convolution.py
+++ b/tests/fpgadataflow/test_depthwise_convolution.py
@@ -168,6 +168,7 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding):
 @pytest.mark.parametrize("stride", [1, 2])
 # padding
 @pytest.mark.parametrize("padding", [0, 1])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_depthwise_conv_hls_cppsim(act, pe, k, stride, padding):
@@ -210,6 +211,7 @@ def test_depthwise_conv_hls_cppsim(act, pe, k, stride, padding):
 @pytest.mark.parametrize("stride", [1, 2])
 # padding
 @pytest.mark.parametrize("padding", [0, 1])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_depthwise_conv_hls_rtlsim(act, pe, k, stride, padding):
diff --git a/tests/fpgadataflow/test_fpgadataflow_addstreams.py b/tests/fpgadataflow/test_fpgadataflow_addstreams.py
index 8cbf54ec188b12c67e02a33e3540718e9b08f382..a3927cd2aa6a9e87c32068f986ab6030fbacc559 100644
--- a/tests/fpgadataflow/test_fpgadataflow_addstreams.py
+++ b/tests/fpgadataflow/test_fpgadataflow_addstreams.py
@@ -89,6 +89,7 @@ def prepare_inputs(input1, input2):
 @pytest.mark.parametrize("fold", [-1, 2, 1])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_fpgadataflow_addstreams(idt, ch, fold, exec_mode):
     if fold == -1:
diff --git a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py
index 949046d4ae313b852471e7d8a93e44fea48f7b0f..f774a4ff53c636419d8eb7dcfba866fd601f0c98 100644
--- a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py
+++ b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py
@@ -100,6 +100,7 @@ def make_modelwrapper(C, pe, idt, odt, pdt, func, vecs):
 @pytest.mark.parametrize("func", ["add", "mul"])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 @pytest.mark.slow
 def test_fpgadataflow_channelwise_ops(idt, act, pdt, nf, ich, func, vecs, exec_mode):
diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py
index 47cd7e7ba1df76cc793cd0946581239a6883874e..afac8dc6f30982b63827dcd5a9ee4b70c92235ae 100644
--- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py
+++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py
@@ -149,6 +149,7 @@ def prepare_inputs(input_tensor):
 @pytest.mark.parametrize("simd", [1, 2])
 # depthwise
 @pytest.mark.parametrize("dw", [0, 1])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_slidingwindow(
diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py
index 27e1907508f94e6d65ac57d313a9b3e9dd824f5a..0d8b26632307b2b514c2aacaa96b28989286cd0d 100644
--- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py
+++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py
@@ -169,6 +169,7 @@ def prepare_inputs(input_tensor):
 @pytest.mark.parametrize("flip", [False, True])
 # Use parallel window output variant
 @pytest.mark.parametrize("parallel_window", [False, True])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_slidingwindow_1d(
diff --git a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py
index 1faf647df225853cf026a49adbfc6bb9d8f1b670..838dec81d32799d5a2afa6cfda8db632b2ac3355 100644
--- a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py
+++ b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py
@@ -103,6 +103,7 @@ def prepare_inputs(input_tensor, idt):
 @pytest.mark.parametrize("n_dupl", [2, 3])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_fpgadataflow_duplicatestreams(idt, ch, fold, imdim, n_dupl, exec_mode):
     if fold == -1:
diff --git a/tests/fpgadataflow/test_fpgadataflow_dwc.py b/tests/fpgadataflow/test_fpgadataflow_dwc.py
index 248b591eb48d7cfd6f121738a9bca525c38a45f8..973bfcca2e9862769b2b973365682cbfbc4b4512 100644
--- a/tests/fpgadataflow/test_fpgadataflow_dwc.py
+++ b/tests/fpgadataflow/test_fpgadataflow_dwc.py
@@ -83,6 +83,7 @@ def prepare_inputs(input_tensor, dt):
 @pytest.mark.parametrize("OUTWidth", [2, 4])
 # finn_dtype
 @pytest.mark.parametrize("finn_dtype", [DataType["BIPOLAR"], DataType["INT2"]])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_dwc_rtlsim(Shape, INWidth, OUTWidth, finn_dtype):
diff --git a/tests/fpgadataflow/test_fpgadataflow_fclayer.py b/tests/fpgadataflow/test_fpgadataflow_fclayer.py
index 02c3a3dc9506152fe999873df0612e76a5c9cefd..41bd5a6d0be4fdd82a40fbdcfc2a307f501b8c07 100644
--- a/tests/fpgadataflow/test_fpgadataflow_fclayer.py
+++ b/tests/fpgadataflow/test_fpgadataflow_fclayer.py
@@ -146,6 +146,7 @@ def prepare_inputs(input_tensor, idt, wdt):
 @pytest.mark.parametrize("mw", [16])
 # HLS matrix height (output features)
 @pytest.mark.parametrize("mh", [16])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_fclayer_cppsim(mem_mode, idt, wdt, act, nf, sf, mw, mh):
@@ -233,6 +234,7 @@ def test_fpgadataflow_fclayer_cppsim(mem_mode, idt, wdt, act, nf, sf, mw, mh):
 @pytest.mark.parametrize("mw", [16])
 # HLS matrix height (output features)
 @pytest.mark.parametrize("mh", [16])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_fclayer_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh):
@@ -332,6 +334,7 @@ def test_fpgadataflow_fclayer_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh):
 @pytest.mark.parametrize("mw", [128])
 # HLS matrix height (output features)
 @pytest.mark.parametrize("mh", [128])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim(
     mem_mode, idt, wdt, act, nf, sf, mw, mh
diff --git a/tests/fpgadataflow/test_fpgadataflow_fifo.py b/tests/fpgadataflow/test_fpgadataflow_fifo.py
index 4d3074fe14617df4386f060b6a476734931fb4ca..15e7f594ee4916559324f35d42b07de9acc5a2c6 100644
--- a/tests/fpgadataflow/test_fpgadataflow_fifo.py
+++ b/tests/fpgadataflow/test_fpgadataflow_fifo.py
@@ -87,6 +87,7 @@ def prepare_inputs(input_tensor, dt):
 @pytest.mark.parametrize("depth", [16])
 # finn_dtype
 @pytest.mark.parametrize("finn_dtype", [DataType["BIPOLAR"]])  # , DataType["INT2"]])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_fifo_rtlsim(Shape, folded_shape, depth, finn_dtype):
diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py
index b564273c0927938859dc438dce619e7067a7ad74..ce21ea0c321587b4d73b64dbd2729090f141cce8 100644
--- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py
+++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py
@@ -111,6 +111,7 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty
 @pytest.mark.parametrize("idt", [DataType["INT2"], DataType["INT4"]])
 # execution mode
 @pytest.mark.parametrize("mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode):
diff --git a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py
index 2299cc6e8f397df718d2fd65be8a562c2457e42d..fc622b10e9abcc3b050e30fc275ca927b89c7d9c 100644
--- a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py
+++ b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py
@@ -87,6 +87,7 @@ def prepare_inputs(input_tensor, idt):
 @pytest.mark.parametrize("imdim", [7])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_fpgadataflow_globalaccpool(idt, ch, fold, imdim, exec_mode):
     if fold == -1:
diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py
index 3f7a700dbc1157ad403d7117171b0dee5884166f..381ff3a09f667c326bfad43b8fc7ece538a1213e 100644
--- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py
+++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py
@@ -201,6 +201,7 @@ def create_two_fc_model(mem_mode="decoupled"):
 
 
 @pytest.mark.parametrize("mem_mode", ["const", "decoupled"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_fpgadataflow_ipstitch_gen_model(mem_mode):
     model = create_one_fc_model(mem_mode)
@@ -222,6 +223,7 @@ def test_fpgadataflow_ipstitch_gen_model(mem_mode):
 
 
 @pytest.mark.parametrize("mem_mode", ["const", "decoupled"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_fpgadataflow_ipstitch_do_stitch(mem_mode):
     model = load_test_checkpoint_or_skip(
@@ -239,6 +241,7 @@ def test_fpgadataflow_ipstitch_do_stitch(mem_mode):
 
 
 @pytest.mark.parametrize("mem_mode", ["const", "decoupled"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_fpgadataflow_ipstitch_rtlsim(mem_mode):
     model = load_test_checkpoint_or_skip(
@@ -287,6 +290,7 @@ def test_fpgadataflow_ipstitch_rtlsim(mem_mode):
 
 
 @pytest.mark.parametrize("mem_mode", ["const", "decoupled"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 @pytest.mark.slow
 def test_fpgadataflow_ipstitch_synth_ooc(mem_mode):
@@ -307,7 +311,7 @@ def test_fpgadataflow_ipstitch_synth_ooc(mem_mode):
     assert ret["BRAM"] == 0
     assert ret["fmax_mhz"] > 100
 
-
+@pytest.mark.fpgadataflow
 def test_fpgadataflow_ipstitch_iodma_floorplan():
     model = create_one_fc_model()
     if model.graph.node[0].op_type == "StreamingDataflowPartition":
@@ -330,6 +334,7 @@ def test_fpgadataflow_ipstitch_iodma_floorplan():
 @pytest.mark.parametrize("period_ns", [5])
 # override mem_mode to external
 @pytest.mark.parametrize("extw", [True, False])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 @pytest.mark.vitis
@@ -353,6 +358,7 @@ def test_fpgadataflow_ipstitch_vitis_end2end(board, period_ns, extw):
 
 # board
 @pytest.mark.parametrize("board", ["Pynq-Z1"])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_ipstitch_zynqbuild_end2end(board):
diff --git a/tests/fpgadataflow/test_fpgadataflow_labelselect.py b/tests/fpgadataflow/test_fpgadataflow_labelselect.py
index 8ed06c8bdf1c0dbfab2f8141bf724132f4a24705..2858426d1ee4b1f91f5de807ccded4ffe35a3a40 100644
--- a/tests/fpgadataflow/test_fpgadataflow_labelselect.py
+++ b/tests/fpgadataflow/test_fpgadataflow_labelselect.py
@@ -92,6 +92,7 @@ def prepare_inputs(input_tensor, idt):
 @pytest.mark.parametrize("k", [1, 5])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_fpgadataflow_labelselect(idt, labels, fold, k, exec_mode):
     np.random.seed(0)
diff --git a/tests/fpgadataflow/test_fpgadataflow_lookup.py b/tests/fpgadataflow/test_fpgadataflow_lookup.py
index 45678bbdf22c21d794777aba27d9070b42238267..0c284a530319290eb406c6b54a80e4f52d7ed1fa 100644
--- a/tests/fpgadataflow/test_fpgadataflow_lookup.py
+++ b/tests/fpgadataflow/test_fpgadataflow_lookup.py
@@ -36,8 +36,10 @@ from torch import nn
 from finn.core.datatype import DataType
 from finn.core.modelwrapper import ModelWrapper
 from finn.core.onnx_exec import execute_onnx
+from finn.custom_op.registry import getCustomOp
 from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
 from finn.transformation.fpgadataflow.convert_to_hls_layers import InferLookupLayer
+from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP
 from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP
 from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
 from finn.transformation.fpgadataflow.prepare_ip import PrepareIP
@@ -87,6 +89,7 @@ def make_lookup_model(embeddings, ishape, idt, edt):
 )
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 @pytest.mark.slow
 def test_fpgadataflow_lookup(edt, embedding_cfg, exec_mode):
@@ -124,9 +127,57 @@ def test_fpgadataflow_lookup(edt, embedding_cfg, exec_mode):
         model = model.transform(SetExecMode("cppsim"))
     elif exec_mode == "rtlsim":
         model = model.transform(GiveUniqueNodeNames())
-        model = model.transform(PrepareIP("xc7z020clg400-1", 10))
+        model = model.transform(PrepareIP("xczu3eg-sbva484-1-e", 10))
         model = model.transform(HLSSynthIP())
         model = model.transform(SetExecMode("rtlsim"))
         model = model.transform(PrepareRTLSim())
     ret_sim = execute_onnx(model, {iname: itensor})
     assert (exp_out == ret_sim[oname]).all()
+
+
+@pytest.mark.fpgadataflow
+@pytest.mark.vivado
+@pytest.mark.slow
+def test_fpgadataflow_lookup_external():
+    fpga_part = "xczu3eg-sbva484-1-e"
+    edt = DataType["INT8"]
+    embedding_cfg = (200000, DataType["UINT32"], 300)
+    ishape = (1, 600)
+    num_embeddings, idt, embedding_dim = embedding_cfg
+    eshape = (num_embeddings, embedding_dim)
+    exp_oshape = tuple(list(ishape) + [embedding_dim])
+    embeddings = gen_finn_dt_tensor(edt, eshape)
+    model = make_lookup_model(embeddings, ishape, idt, edt)
+    assert len(model.graph.node) == 1
+    assert model.graph.node[0].op_type == "Gather"
+    iname = model.graph.input[0].name
+    ename = model.graph.node[0].input[0]
+    oname = model.graph.output[0].name
+    assert model.get_tensor_datatype(iname) == idt
+    assert model.get_tensor_datatype(ename) == edt
+    assert model.get_tensor_datatype(oname) == edt
+    assert tuple(model.get_tensor_shape(ename)) == eshape
+    assert tuple(model.get_tensor_shape(oname)) == exp_oshape
+    assert (model.get_initializer(ename) == embeddings).all()
+    # itensor = gen_finn_dt_tensor(idt, ishape).astype(np.int64)
+    # itensor = np.clip(itensor, 0, num_embeddings - 1)
+    # ret = execute_onnx(model, {iname: itensor})
+    # exp_out = np.take(embeddings, itensor, axis=0)
+    # assert (exp_out == ret[oname]).all()
+    # call transformation to convert to HLS and verify conversion
+    model = model.transform(InferLookupLayer())
+    assert model.graph.node[0].op_type == "Lookup"
+    assert model.graph.node[0].input[0] == iname
+    assert model.graph.node[0].input[1] == ename
+    assert model.graph.node[0].output[0] == oname
+    getCustomOp(model.graph.node[0]).set_nodeattr("mem_mode", "external")
+    model = model.transform(GiveUniqueNodeNames())
+    model = model.transform(PrepareIP(fpga_part, 10))
+    model = model.transform(HLSSynthIP())
+    model = model.transform(CreateStitchedIP(fpga_part, 10.0))
+    ifnames = eval(model.get_metadata_prop("vivado_stitch_ifnames"))
+    # check some generated files/interfaces for the generated stitched IP
+    assert ifnames["aximm"] == [["m_axi_gmem0", 32]]
+    assert ifnames["s_axis"] == [["s_axis_0", 32]]
+    assert ifnames["m_axis"] == [["m_axis_0", 32]]
+    assert ifnames["axilite"] == ["s_axi_control_0"]
diff --git a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py
index fe52a73fc07df8551442e975c5eb378c132a56d7..951843a6585c842cf1d4ac93241b3e34554100a5 100644
--- a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py
+++ b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 from onnx import TensorProto, helper
 
 from finn.analysis.fpgadataflow.res_estimation import (
@@ -49,7 +51,7 @@ def check_two_dict_for_equality(dict1, dict2):
 
     return True
 
-
+@pytest.mark.fpgadataflow
 def test_res_estimate():
     mw = mh = 4
     simd = 1
diff --git a/tests/fpgadataflow/test_fpgadataflow_thresholding.py b/tests/fpgadataflow/test_fpgadataflow_thresholding.py
index 341bd3f37041c9b5a1526e99b2c4bad4d3dd3029..4cfdbe82d5b9ea533519b97a85c3a09c3bffc97b 100644
--- a/tests/fpgadataflow/test_fpgadataflow_thresholding.py
+++ b/tests/fpgadataflow/test_fpgadataflow_thresholding.py
@@ -52,15 +52,21 @@ from finn.transformation.general import GiveUniqueNodeNames
 from finn.util.basic import gen_finn_dt_tensor
 from finn.util.pyverilator import axilite_read, axilite_write
 
-test_fpga_part = "xc7z020clg400-1"
+test_fpga_part = "xczu3eg-sbva484-1-e"
 target_clk_ns = 5
 
 
-def make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode):
+def make_single_thresholding_modelwrapper(
+    T, pe, idt, odt, actval, mem_mode, n_inp_vecs
+):
     NumChannels = T.shape[0]
 
-    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, NumChannels])
-    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, NumChannels])
+    inp = helper.make_tensor_value_info(
+        "inp", TensorProto.FLOAT, n_inp_vecs + [NumChannels]
+    )
+    outp = helper.make_tensor_value_info(
+        "outp", TensorProto.FLOAT, n_inp_vecs + [NumChannels]
+    )
 
     node_inp_list = ["inp", "thresh"]
 
@@ -78,6 +84,7 @@ def make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode):
         outputDataType=odt.name,
         ActVal=actval,
         mem_mode=mem_mode,
+        numInputVectors=n_inp_vecs,
     )
     graph = helper.make_graph(
         nodes=[Thresholding_node],
@@ -109,16 +116,18 @@ def make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode):
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
 # memory mode
 @pytest.mark.parametrize("mem_mode", ["const", "decoupled"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 @pytest.mark.slow
 def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode):
     if nf == -1:
         nf = ich
     pe = ich // nf
+    n_inp_vecs = [1, 2, 2]
     assert ich % pe == 0
 
     # generate input data
-    x = gen_finn_dt_tensor(idt, (1, ich))
+    x = gen_finn_dt_tensor(idt, tuple(n_inp_vecs + [ich]))
 
     odt = act
     n_steps = act.get_num_possible_values() - 1
@@ -135,7 +144,9 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode):
     else:
         actval = odt.min()
 
-    model = make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode)
+    model = make_single_thresholding_modelwrapper(
+        T, pe, idt, odt, actval, mem_mode, n_inp_vecs
+    )
 
     if exec_mode == "cppsim":
         model = model.transform(PrepareCppSim())
@@ -153,7 +164,10 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode):
     # package input data as dictionary
     input_dict = {"inp": x}
 
-    y = multithreshold(x, T)
+    # multithreshold util fxn wants NCHW input, not NHWC
+    y = multithreshold(np.transpose(x, (0, 3, 1, 2)), T)
+    # convert back to NHWC for comparison to hw outputs
+    y = np.transpose(y, (0, 2, 3, 1))
     if act == DataType["BIPOLAR"]:
         # binary to bipolar
         y = 2 * y - 1
@@ -183,8 +197,10 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode):
         assert exp_cycles != 0
 
 
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_runtime_thresholds_single_layer():
+    n_inp_vecs = [1, 2, 2]
     mem_mode = "decoupled"
     act = DataType["INT4"]
     idt = DataType["INT16"]
@@ -194,7 +210,7 @@ def test_runtime_thresholds_single_layer():
     assert ich % pe == 0
 
     # generate input data
-    in_tensor = gen_finn_dt_tensor(idt, (1, ich))
+    in_tensor = gen_finn_dt_tensor(idt, tuple(n_inp_vecs + [ich]))
 
     odt = act
     n_steps = act.get_num_possible_values() - 1
@@ -207,7 +223,9 @@ def test_runtime_thresholds_single_layer():
     else:
         actval = odt.min()
 
-    model = make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode)
+    model = make_single_thresholding_modelwrapper(
+        T, pe, idt, odt, actval, mem_mode, n_inp_vecs
+    )
     op_inst = getCustomOp(model.graph.node[0])
     op_inst.set_nodeattr("runtime_writeable_weights", 1)
     op_inst.make_weight_file(T, "decoupled_runtime", "old_weights.dat")
@@ -244,7 +262,13 @@ def test_runtime_thresholds_single_layer():
     # only use second batch element in output; first will be invalid due to
     # old weights (see above)
     y = exec_ctx["outp"][1]
-    expected = multithreshold(in_tensor, T)[1]
+
+    # multithreshold util fxn wants NCHW input, not NHWC
+    expected = multithreshold(np.transpose(in_tensor, (0, 3, 1, 2)), T)
+    # convert back to NHWC for comparison to hw outputs
+    expected = np.transpose(expected, (0, 2, 3, 1))[1]
+
+    # expected = multithreshold(in_tensor, T)[1]
     if act == DataType["BIPOLAR"]:
         # binary to bipolar
         expected = 2 * expected - 1
diff --git a/tests/fpgadataflow/test_fpgadataflow_upsampler.py b/tests/fpgadataflow/test_fpgadataflow_upsampler.py
index 1709cfe32904a5ed369f8399150a8a1d05f4b781..362d9def1028c46a8ebf1d79649971156b1d57a3 100644
--- a/tests/fpgadataflow/test_fpgadataflow_upsampler.py
+++ b/tests/fpgadataflow/test_fpgadataflow_upsampler.py
@@ -125,6 +125,7 @@ class PyTorchTestModel(nn.Module):
 @pytest.mark.parametrize("NumChannels", [4])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 @pytest.mark.slow
 def test_fpgadataflow_upsampler(dt, IFMDim, scale, NumChannels, exec_mode):
diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py
index 6f39994bf27594a063a1e66c5bba7867eaabef6e..46cb23a520b524ef4063916179bb33a6810ef7c8 100644
--- a/tests/fpgadataflow/test_fpgadataflow_vvau.py
+++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py
@@ -62,8 +62,8 @@ def _infer_sparse_weight_tensor(W_conv, k_h, k_w, channels):
 def _calculate_dot_prod_range(dt_a, dt_b, len):
     """Returns the (min,max) values a dot product between two (un)signed vectors of
     types dt_a and dt_b of len elements can take."""
-    min_prod = 2 ** 30
-    max_prod = -(2 ** 30)
+    min_prod = 2**30
+    max_prod = -(2**30)
     for a_val in [dt_a.min(), dt_a.max()]:
         for b_val in [dt_b.min(), dt_b.max()]:
             prod = a_val * b_val * len
@@ -158,6 +158,7 @@ def prepare_inputs(input_tensor):
 @pytest.mark.parametrize("channels", [3, 4])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
 def test_fpgadataflow_vvau(
diff --git a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py
index f18fd8d1019337e7b87ae9e47ba3a5b53ec849f7..494aea4dad000ff6d6bf61e9e38440b727d90dc7 100644
--- a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py
+++ b/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py
@@ -35,19 +35,22 @@ import finn.core.onnx_exec as oxe
 from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer
 from finn.core.datatype import DataType
 from finn.core.modelwrapper import ModelWrapper
+from finn.custom_op.general.maxpoolnhwc import compute_pool_output_dim
 
 # from finn.custom_op.registry import getCustomOp
 from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
+from finn.transformation.fpgadataflow.convert_to_hls_layers import InferStreamingMaxPool
 from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP
 from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
 from finn.transformation.fpgadataflow.prepare_ip import PrepareIP
 from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim
 from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode
 from finn.transformation.general import GiveUniqueNodeNames
+from finn.transformation.infer_shapes import InferShapes
 from finn.util.basic import gen_finn_dt_tensor
 
 
-def make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt):
+def make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt, ceil_mode):
     k_h, k_w = k
     ifm_dim_h, ifm_dim_w = ifm_dim
     ofm_dim_h, ofm_dim_w = ofm_dim
@@ -66,6 +69,7 @@ def make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt):
         domain="finn.custom_op.general",
         kernel_shape=[k_h, k_w],
         strides=[k_h, k_w],
+        ceil_mode=ceil_mode,
         pads=[0, 0, 0, 0],
     )
     graph = helper.make_graph(
@@ -81,7 +85,9 @@ def make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt):
     return model
 
 
-def make_single_streamingmaxpool_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt):
+def make_single_streamingmaxpool_modelwrapper(
+    k, ifm_ch, pe, ifm_dim, ofm_dim, idt, ceil_mode
+):
     k_h, k_w = k
     ifm_dim_h, ifm_dim_w = ifm_dim
     ofm_dim_h, ofm_dim_w = ofm_dim
@@ -101,7 +107,9 @@ def make_single_streamingmaxpool_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt):
         backend="fpgadataflow",
         PoolDim=[k_h, k_w],
         NumChannels=ifm_ch,
+        PE=pe,
         ImgDim=[ifm_dim_h, ifm_dim_w],
+        CeilMode=ceil_mode,
         dataType=idt.name,
     )
     graph = helper.make_graph(
@@ -128,14 +136,21 @@ def prepare_inputs(input_tensor):
 # kernel size
 @pytest.mark.parametrize("k", [2, 4])
 # input dimension
-@pytest.mark.parametrize("ifm_dim", [4, 8])
+@pytest.mark.parametrize("ifm_dim", [4, 10])
 # input channels
 @pytest.mark.parametrize("ifm_ch", [1, 3])  # 1,3
+# pe
+@pytest.mark.parametrize("pe", [1, 3])
+# ceil mode
+@pytest.mark.parametrize("ceil_mode", [1])
 # execution mode
 @pytest.mark.parametrize("exec_mode", ["rtlsim", "cppsim"])
+@pytest.mark.fpgadataflow
 @pytest.mark.slow
 @pytest.mark.vivado
-def test_fpgadataflow_streamingmaxpool(idt, dim_1d, k, ifm_dim, ifm_ch, exec_mode):
+def test_fpgadataflow_streamingmaxpool(
+    idt, dim_1d, k, ifm_dim, ifm_ch, pe, ceil_mode, exec_mode
+):
     ifm_dim_h = ifm_dim
     k_h = k
     if dim_1d:
@@ -149,22 +164,31 @@ def test_fpgadataflow_streamingmaxpool(idt, dim_1d, k, ifm_dim, ifm_ch, exec_mod
 
     stride_h = k_h
     stride_w = k_w
-    ofm_dim_h = int(((ifm_dim_h - k_h) / stride_h) + 1)
-    ofm_dim_w = int(((ifm_dim_w - k_w) / stride_w) + 1)
+    ofm_dim_h = compute_pool_output_dim(ifm_dim_h, k_h, stride_h, 0, ceil_mode)
+    ofm_dim_w = compute_pool_output_dim(ifm_dim_w, k_w, stride_w, 0, ceil_mode)
     ofm_dim = (ofm_dim_h, ofm_dim_w)
     if idt == DataType["BIPOLAR"] and dim_1d:
         pytest.skip("Skipping binary StreamingMaxPool_1d (not implemented)")
-    if ifm_dim_h % k_h != 0 or ifm_dim_w % k_w != 0:
-        pytest.skip("Skipping StreamingMaxPool test w/ ImgDim % PoolDim != 0")
+    if (ifm_dim_h % k_h != 0 or ifm_dim_w % k_w != 0) and (not dim_1d):
+        pytest.skip("StreamingMaxPool_2d test w/ ImgDim % PoolDim != 0 not implemented")
+    if pe > ifm_ch:
+        pytest.skip("PE cannot be larger than number of input channels")
+    if pe > 1 and (not dim_1d):
+        pytest.skip("PE>1 only supported for StreamingMaxPool_1d")
 
     x = gen_finn_dt_tensor(idt, (1, ifm_dim_h, ifm_dim_w, ifm_ch))
     # prepare input data
     input_dict = prepare_inputs(x)
 
-    golden = make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt)
+    golden = make_single_maxpoolnhwc_modelwrapper(
+        k, ifm_ch, ifm_dim, ofm_dim, idt, ceil_mode
+    )
     y_expected = oxe.execute_onnx(golden, input_dict)["outp"]
 
-    model = make_single_streamingmaxpool_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt)
+    model = golden.transform(InferStreamingMaxPool())
+    model = model.transform(InferShapes())
+
+    assert model.graph.node[0].op_type == "StreamingMaxPool_Batch"
 
     if exec_mode == "cppsim":
         model = model.transform(SetExecMode("cppsim"))
@@ -173,7 +197,7 @@ def test_fpgadataflow_streamingmaxpool(idt, dim_1d, k, ifm_dim, ifm_ch, exec_mod
     elif exec_mode == "rtlsim":
         model = model.transform(SetExecMode("rtlsim"))
         model = model.transform(GiveUniqueNodeNames())
-        model = model.transform(PrepareIP("xc7z020clg400-1", 5))
+        model = model.transform(PrepareIP("xczu3eg-sbva484-1-e", 5))
         model = model.transform(HLSSynthIP())
         model = model.transform(PrepareRTLSim())
     else:
diff --git a/tests/fpgadataflow/test_runtime_weights.py b/tests/fpgadataflow/test_runtime_weights.py
index 0196a78d5c4254d7cb116641f946bcccb9e1ebc9..8d0976cad2f3c1deb6947db7a06a669ed34dc5d7 100644
--- a/tests/fpgadataflow/test_runtime_weights.py
+++ b/tests/fpgadataflow/test_runtime_weights.py
@@ -43,10 +43,11 @@ from finn.util.basic import gen_finn_dt_tensor
 from finn.util.create import hls_random_mlp_maker
 from finn.util.pyverilator import axilite_read, axilite_write
 
-test_fpga_part = "xc7z020clg400-1"
+test_fpga_part = "xczu3eg-sbva484-1-e"
 target_clk_ns = 5
 
 
+@pytest.mark.fpgadataflow
 @pytest.mark.vivado
 def test_runtime_weights_single_layer():
     idt = DataType["UINT32"]
diff --git a/tests/fpgadataflow/test_set_folding.py b/tests/fpgadataflow/test_set_folding.py
index 66fd5b43a1b8b8c8986bf9c9b9d0e9efd7a744a6..b3d5458ff29edb18e01c33501db758587f865b35 100644
--- a/tests/fpgadataflow/test_set_folding.py
+++ b/tests/fpgadataflow/test_set_folding.py
@@ -109,9 +109,10 @@ def make_multi_fclayer_model(ch, wdt, adt, tdt, nnodes):
 
 
 # desired frames per second
-@pytest.mark.parametrize("target_fps", [30, 10 ** 5, 10 ** 7])
+@pytest.mark.parametrize("target_fps", [30, 10**5, 10**7])
 # target chip or board
 @pytest.mark.parametrize("platform", ["Pynq-Z1", "Ultra96", "U200"])
+@pytest.mark.fpgadataflow
 def test_set_folding(target_fps, platform):
 
     model = make_multi_fclayer_model(
@@ -126,7 +127,7 @@ def test_set_folding(target_fps, platform):
     dataflow_model = load_test_checkpoint_or_skip(dataflow_model_filename)
 
     clk_ns = 5
-    target_cycles_per_frame = int((10 ** 9 / clk_ns) / target_fps)
+    target_cycles_per_frame = int((10**9 / clk_ns) / target_fps)
     dataflow_model = dataflow_model.transform(SetFolding(target_cycles_per_frame))
 
     exp_cycles_dict = dataflow_model.analysis(exp_cycles_per_layer)
diff --git a/tests/transformation/streamline/test_absorb_mul_into_topk.py b/tests/transformation/streamline/test_absorb_mul_into_topk.py
index bc9a31d49c7edfc20ca3e932efd00df939f1135f..e75f2d21db5cb2fe1b2f93e43ee0e61c7a7681c9 100644
--- a/tests/transformation/streamline/test_absorb_mul_into_topk.py
+++ b/tests/transformation/streamline/test_absorb_mul_into_topk.py
@@ -39,6 +39,7 @@ from finn.transformation.insert_topk import InsertTopK
 from finn.transformation.streamline.absorb import AbsorbScalarMulAddIntoTopK
 
 
+@pytest.mark.streamline
 # parameter to indicate if mul parameter is negative or positive
 @pytest.mark.parametrize("mul_positive", [True, False])
 # parameter to indicate if mul parameter is scalar or not
diff --git a/tests/transformation/streamline/test_absorb_opposite_transposes.py b/tests/transformation/streamline/test_absorb_opposite_transposes.py
index 859e691277a261f01b559e2e166763e402c5d689..ca5ed6ba6a85935604750ab35df0ccf30e032c2c 100644
--- a/tests/transformation/streamline/test_absorb_opposite_transposes.py
+++ b/tests/transformation/streamline/test_absorb_opposite_transposes.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 import numpy as np
 import onnx.helper as oh
 from onnx import TensorProto
@@ -36,6 +38,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline.absorb import AbsorbConsecutiveTransposes
 
 
+@pytest.mark.streamline
 def test_absorb_opposite_transposes():
     np.random.seed(0)
     input_shape = [1, 3, 4, 2]
diff --git a/tests/transformation/streamline/test_absorb_transp_into_flatten.py b/tests/transformation/streamline/test_absorb_transp_into_flatten.py
index 1e5d5fe5806d2e3f418438b260d2257f5ae31adf..533dc693da0774e89d2dbb44aac52a6bef038990 100644
--- a/tests/transformation/streamline/test_absorb_transp_into_flatten.py
+++ b/tests/transformation/streamline/test_absorb_transp_into_flatten.py
@@ -13,6 +13,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline.absorb import AbsorbTransposeIntoFlatten
 
 
+@pytest.mark.streamline
 # permutation of transpose node
 @pytest.mark.parametrize("perm", [[0, 2, 3, 1], [0, 1, 3, 2], [3, 2, 0, 1]])
 # reshape or flatten
diff --git a/tests/transformation/streamline/test_collapse_repeated_op.py b/tests/transformation/streamline/test_collapse_repeated_op.py
index 1741ab6b8f4fc1c3e806a8868f329cd7753eac4d..d48d4ad3c2a30e005c1ccc02eee4f7edcaa8a57b 100644
--- a/tests/transformation/streamline/test_collapse_repeated_op.py
+++ b/tests/transformation/streamline/test_collapse_repeated_op.py
@@ -38,6 +38,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline import CollapseRepeatedAdd, CollapseRepeatedMul
 
 
+@pytest.mark.streamline
 def test_collapse_repeated_op():
     top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
     add_param_0 = oh.make_tensor_value_info("add_param_0", TensorProto.FLOAT, [2])
@@ -74,6 +75,7 @@ def test_collapse_repeated_op():
     assert new_model.graph.node[1].op_type == "Mul"
 
 
+@pytest.mark.streamline
 @pytest.mark.parametrize(
     "test_args",
     [("Add", CollapseRepeatedAdd()), ("Mul", CollapseRepeatedMul())],
diff --git a/tests/transformation/streamline/test_factor_out_mul_sign_magnitude.py b/tests/transformation/streamline/test_factor_out_mul_sign_magnitude.py
index fca073f5a05e10bd721a18538dada05b4ad0d774..2e5ed2eebfcf7ac7c39ccd8c0f105dee8fb389a8 100644
--- a/tests/transformation/streamline/test_factor_out_mul_sign_magnitude.py
+++ b/tests/transformation/streamline/test_factor_out_mul_sign_magnitude.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 import numpy as np
 import onnx.helper as oh
 from onnx import TensorProto
@@ -36,6 +38,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline import FactorOutMulSignMagnitude
 
 
+@pytest.mark.streamline
 def test_factor_out_mul_sign_magnitude():
     top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [1, 2])
     mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [1, 2])
diff --git a/tests/transformation/streamline/test_linear_past_eltwise.py b/tests/transformation/streamline/test_linear_past_eltwise.py
index 098b3f9d4f67a2cbc1a87fbb67a313d00e229777..0e4ad6237b3f293c2ee32dcb4963423f6e8d9f19 100644
--- a/tests/transformation/streamline/test_linear_past_eltwise.py
+++ b/tests/transformation/streamline/test_linear_past_eltwise.py
@@ -89,6 +89,7 @@ def make_model(shape):
     return model
 
 
+@pytest.mark.streamline
 # channels
 @pytest.mark.parametrize("ch", [64])
 # ifmdim
@@ -133,6 +134,7 @@ def test_linear_past_eltwise_add(ch, ifmdim):
     os.remove(export_onnx_path)
 
 
+@pytest.mark.streamline
 @pytest.mark.parametrize("ch", [64, 1])
 # ifmdim
 @pytest.mark.parametrize("ifmdim", [-1, 7])
diff --git a/tests/transformation/streamline/test_maxpool_nhwc.py b/tests/transformation/streamline/test_maxpool_nhwc.py
new file mode 100644
index 0000000000000000000000000000000000000000..446302be94d7c5e9c06da1c1fc926de7a3bff578
--- /dev/null
+++ b/tests/transformation/streamline/test_maxpool_nhwc.py
@@ -0,0 +1,109 @@
+import pytest
+
+import onnx
+import onnx.helper as oh
+from onnx import TensorProto
+
+import finn.core.onnx_exec as oxe
+from finn.core.datatype import DataType
+from finn.core.modelwrapper import ModelWrapper
+from finn.custom_op.general.maxpoolnhwc import compute_pool_output_dim
+from finn.transformation.infer_shapes import InferShapes
+from finn.transformation.streamline.reorder import MakeMaxPoolNHWC
+from finn.util.basic import gen_finn_dt_tensor
+
+
+def create_maxpool(ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt):
+    ofm_dim_h = compute_pool_output_dim(
+        ifm_dim[0], kernel_shape[0], strides[0], pads[0], ceil_mode
+    )
+    ofm_dim_w = compute_pool_output_dim(
+        ifm_dim[1], kernel_shape[1], strides[1], pads[1], ceil_mode
+    )
+    inp = oh.make_tensor_value_info(
+        "inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]]
+    )
+    outp_mp = oh.make_tensor_value_info(
+        "outp_mp", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w]
+    )
+    outp = oh.make_tensor_value_info(
+        "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch]
+    )
+
+    maxpool_node = oh.make_node(
+        "MaxPool",
+        inputs=["inp"],
+        outputs=["out_mp"],
+        ceil_mode=ceil_mode,
+        kernel_shape=kernel_shape,
+        pads=pads,
+        strides=strides,
+    )
+
+    transpose_node = onnx.helper.make_node(
+        "Transpose",
+        inputs=["out_mp"],
+        outputs=["outp"],
+        name="Transpose1",
+        perm=[0, 2, 3, 1],
+    )
+
+    graph = oh.make_graph(
+        nodes=[maxpool_node, transpose_node],
+        name="maxpool_graph",
+        inputs=[inp],
+        outputs=[outp],
+        value_info=[outp_mp],
+    )
+
+    model = oh.make_model(graph, producer_name="maxpool_model")
+    model = ModelWrapper(model)
+    model.set_tensor_datatype("inp", idt)
+    model.set_tensor_datatype("outp", idt)
+
+    model = model.transform(InferShapes())
+
+    return model
+
+
+@pytest.mark.streamline
+# input dimension
+@pytest.mark.parametrize("ifm_dim", [[8, 8], [9, 9]])
+# input channels
+@pytest.mark.parametrize("ifm_ch", [3])
+# kernel shape
+@pytest.mark.parametrize("kernel_shape", [[2, 2]])
+# padding
+@pytest.mark.parametrize("pads", [[0, 0, 0, 0], [1, 1, 1, 1]])
+# strides
+@pytest.mark.parametrize("strides", [[2, 2]])
+# ceil_mode
+@pytest.mark.parametrize("ceil_mode", [0, 1])
+# input datatype
+@pytest.mark.parametrize("idt", [DataType["INT4"]])
+def test_maxpool_nhwc(ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt):
+    # create MaxPool node
+    maxpool_model = create_maxpool(
+        ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt
+    )
+
+    # generate input tensor for testing
+    input_tensor = gen_finn_dt_tensor(idt, [1, ifm_ch, ifm_dim[0], ifm_dim[1]])
+    input_dict = {"inp": input_tensor}
+
+    # execute first model
+    output_dict = oxe.execute_onnx(maxpool_model, input_dict)
+    expected = output_dict["outp"]
+
+    # transform MaxPool into MaxPoolNHWC
+    maxpool_model = maxpool_model.transform(MakeMaxPoolNHWC())
+
+    # execute transformed model
+    output_node_name = maxpool_model.graph.output[0].name
+    output_dict = oxe.execute_onnx(
+        maxpool_model, input_dict, return_full_exec_context=False
+    )
+    output = output_dict[output_node_name]
+
+    # compare outputs
+    assert (expected == output).all()
diff --git a/tests/transformation/streamline/test_move_add_past_mul.py b/tests/transformation/streamline/test_move_add_past_mul.py
index 163b9d310a5f12bd0b854f9aa46f53a549bf109e..e0ee449734e523b1e1742c85dd6b9d1bbdd32537 100644
--- a/tests/transformation/streamline/test_move_add_past_mul.py
+++ b/tests/transformation/streamline/test_move_add_past_mul.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 import numpy as np
 import onnx.helper as oh
 from onnx import TensorProto
@@ -36,6 +38,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline import MoveAddPastMul
 
 
+@pytest.mark.streamline
 def test_move_add_past_mul_single():
     top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
     add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
@@ -65,6 +68,7 @@ def test_move_add_past_mul_single():
     assert new_model.graph.node[0].output[0] == new_model.graph.node[1].input[0]
 
 
+@pytest.mark.streamline
 def test_move_add_past_mul_multi():
     top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
     add_param_0 = oh.make_tensor_value_info("add_param_0", TensorProto.FLOAT, [2])
@@ -103,6 +107,7 @@ def test_move_add_past_mul_multi():
         assert new_model.graph.node[i].output[0] == new_model.graph.node[i + 1].input[0]
 
 
+@pytest.mark.streamline
 def test_move_add_past_mul_only_if_linear():
     top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
     top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
diff --git a/tests/transformation/streamline/test_move_chw_add_past_conv.py b/tests/transformation/streamline/test_move_chw_add_past_conv.py
index e4be8fc3836f18bf95eb193516937c2e9334e2ff..d43531fa7d48a67ed91d1e7843bbdfd726fcf14d 100644
--- a/tests/transformation/streamline/test_move_chw_add_past_conv.py
+++ b/tests/transformation/streamline/test_move_chw_add_past_conv.py
@@ -38,6 +38,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline.reorder import MoveAddPastConv
 
 
+@pytest.mark.streamline
 # input dimension
 @pytest.mark.parametrize("idim", [4, 7])
 # kernel size
diff --git a/tests/transformation/streamline/test_move_flatten_past_affine.py b/tests/transformation/streamline/test_move_flatten_past_affine.py
index ef01436dc9435676b562e2b635a8cf12e901046b..1a4cecf1c46fddcb4427975cbf7e31a25628bf9a 100644
--- a/tests/transformation/streamline/test_move_flatten_past_affine.py
+++ b/tests/transformation/streamline/test_move_flatten_past_affine.py
@@ -42,6 +42,7 @@ from finn.transformation.streamline.reorder import MoveFlattenPastAffine
 from finn.util.basic import gen_finn_dt_tensor
 
 
+@pytest.mark.streamline
 # data layout
 @pytest.mark.parametrize("data_layout", [DataLayout.NHWC, DataLayout.NCHW])
 # batch size
diff --git a/tests/transformation/streamline/test_move_flatten_past_topk.py b/tests/transformation/streamline/test_move_flatten_past_topk.py
index 6086f7804eda4447de8f5948f521f0b003f65020..e3d8c65434871ecfa87784e69c76d99330c3f554 100644
--- a/tests/transformation/streamline/test_move_flatten_past_topk.py
+++ b/tests/transformation/streamline/test_move_flatten_past_topk.py
@@ -42,6 +42,7 @@ from finn.transformation.streamline.reorder import MoveFlattenPastTopK
 from finn.util.basic import gen_finn_dt_tensor
 
 
+@pytest.mark.streamline
 # data layout
 @pytest.mark.parametrize("data_layout", [DataLayout.NHWC, DataLayout.NCHW])
 # batch size
diff --git a/tests/transformation/streamline/test_move_identical_op_past_join_op.py b/tests/transformation/streamline/test_move_identical_op_past_join_op.py
index 60e76b8b07e06048ecf1a15c72134fecf5c97346..1d840ec15403e7a70c8da67a6f57076d8521d587 100644
--- a/tests/transformation/streamline/test_move_identical_op_past_join_op.py
+++ b/tests/transformation/streamline/test_move_identical_op_past_join_op.py
@@ -60,6 +60,7 @@ def create_model(perm):
     return model
 
 
+@pytest.mark.streamline
 # Permutation of transpose node
 @pytest.mark.parametrize("perm", [[0, 3, 1, 2], [0, 2, 3, 1]])
 def test_move_identical_op_past_join_op(perm):
diff --git a/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py b/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py
index fca05afa5b155e6a293857c14c10c4a9b80eeaf4..127f0fde7bc8423d7135a94f0d6f2ff1317bff76 100644
--- a/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py
+++ b/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py
@@ -1,3 +1,5 @@
+import pytest
+
 import numpy as np
 from onnx import TensorProto, helper
 
@@ -17,7 +19,7 @@ def get_multithreshold_rand_params(channels, num_of_thres, seed=None):
     thres = ((thres - bias) * steps).astype(np.float32)
     return thres
 
-
+@pytest.mark.streamline
 def test_move_maxpool_past_multithreshold():
     # generate test vectors of correct shape
     ch = 64
diff --git a/tests/transformation/streamline/test_move_mul_past_dw_conv.py b/tests/transformation/streamline/test_move_mul_past_dw_conv.py
index e9e956d845ef8e56d2078bcd738ad3bb0ff72bfa..ee7f840bb4461b9b32f25048c0678da9a68526b5 100644
--- a/tests/transformation/streamline/test_move_mul_past_dw_conv.py
+++ b/tests/transformation/streamline/test_move_mul_past_dw_conv.py
@@ -12,6 +12,7 @@ from finn.transformation.streamline.reorder import MoveMulPastDWConv
 from finn.util.basic import gen_finn_dt_tensor
 
 
+@pytest.mark.streamline
 # input dimension
 @pytest.mark.parametrize("ifm_dim", [4, 7])
 # input channels
diff --git a/tests/transformation/streamline/test_move_mul_past_maxpool.py b/tests/transformation/streamline/test_move_mul_past_maxpool.py
index 2c51aaf36a79591fd0fd0cea368d5e23da0d07c3..5f92c514c05b8ea9d75e6c3813dfee998fd8b08b 100755
--- a/tests/transformation/streamline/test_move_mul_past_maxpool.py
+++ b/tests/transformation/streamline/test_move_mul_past_maxpool.py
@@ -13,6 +13,7 @@ from finn.transformation.streamline.reorder import MoveMulPastMaxPool
 from finn.util.basic import gen_finn_dt_tensor
 
 
+@pytest.mark.streamline
 # input dimension
 @pytest.mark.parametrize("ifm_dim", [4, 7])
 # input channels
diff --git a/tests/transformation/streamline/test_move_past_fork.py b/tests/transformation/streamline/test_move_past_fork.py
index 364590f933ac27539fd546d64e25325032c885c9..f578234d6200936502e2e00c841b49707a99656b 100644
--- a/tests/transformation/streamline/test_move_past_fork.py
+++ b/tests/transformation/streamline/test_move_past_fork.py
@@ -9,6 +9,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline.reorder import MoveLinearPastFork
 
 
+@pytest.mark.streamline
 @pytest.mark.parametrize("ch", [64, 1])
 # ifmdim
 @pytest.mark.parametrize("ifmdim", [-1, 7])
diff --git a/tests/transformation/streamline/test_move_scalar_past_conv.py b/tests/transformation/streamline/test_move_scalar_past_conv.py
index 5e2ded0174e9aa7a02551ed6b658f97ff070a523..8f725db91a4dadc938fb9296606e7214f02dcb6e 100644
--- a/tests/transformation/streamline/test_move_scalar_past_conv.py
+++ b/tests/transformation/streamline/test_move_scalar_past_conv.py
@@ -10,6 +10,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline import MoveAddPastConv, MoveScalarMulPastConv
 
 
+@pytest.mark.streamline
 @pytest.mark.parametrize("padding", [False, True])
 @pytest.mark.parametrize(
     "test_args",
@@ -90,6 +91,7 @@ def test_move_scalar_past_conv(test_args, padding):
         assert new_model.graph.node[2].op_type == scalar_op
 
 
+@pytest.mark.streamline
 @pytest.mark.parametrize(
     "test_args",
     [("Add", MoveAddPastConv()), ("Mul", MoveScalarMulPastConv())],
diff --git a/tests/transformation/streamline/test_move_scalar_past_matmul.py b/tests/transformation/streamline/test_move_scalar_past_matmul.py
index b15f84303b0dc2e00bd51397543871cfeb99c1f9..4d6dd95173485c234fd6d231e524d30b50ab56de 100644
--- a/tests/transformation/streamline/test_move_scalar_past_matmul.py
+++ b/tests/transformation/streamline/test_move_scalar_past_matmul.py
@@ -41,6 +41,7 @@ from finn.transformation.streamline import (
 )
 
 
+@pytest.mark.streamline
 def test_move_scalar_mul_past_matmul():
     top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [1, 2])
     mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [1, 1])
@@ -72,6 +73,7 @@ def test_move_scalar_mul_past_matmul():
     assert new_model.graph.node[0].output[0] == new_model.graph.node[1].input[0]
 
 
+@pytest.mark.streamline
 def test_move_scalar_add_past_matmul():
     top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [1, 2])
     add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [1, 1])
@@ -103,6 +105,7 @@ def test_move_scalar_add_past_matmul():
     assert new_model.graph.node[0].output[0] == new_model.graph.node[1].input[0]
 
 
+@pytest.mark.streamline
 @pytest.mark.parametrize(
     "test_args",
     [("Add", MoveScalarAddPastMatMul()), ("Mul", MoveScalarMulPastMatMul())],
diff --git a/tests/transformation/streamline/test_move_transpose_past_scalar_mul.py b/tests/transformation/streamline/test_move_transpose_past_scalar_mul.py
index 9110ede98da81a627127767276db33362503ef84..ad174a4909202f2d62fa2a3c31a7da8ead900e0b 100644
--- a/tests/transformation/streamline/test_move_transpose_past_scalar_mul.py
+++ b/tests/transformation/streamline/test_move_transpose_past_scalar_mul.py
@@ -13,6 +13,7 @@ from finn.transformation.infer_shapes import InferShapes
 from finn.transformation.streamline.reorder import MoveTransposePastScalarMul
 
 
+@pytest.mark.streamline
 # permutation of transpose node
 @pytest.mark.parametrize("perm", [[0, 2, 3, 1], [0, 1, 3, 2], [3, 2, 0, 1]])
 # scalar mul
diff --git a/tests/transformation/streamline/test_round_thresholds.py b/tests/transformation/streamline/test_round_thresholds.py
index 2e57f1c85f6ac197ca7a4cf15e595c34cc0fb564..3a533b0694fa81bae846d2d2f6e8dbcb41a8ee6c 100644
--- a/tests/transformation/streamline/test_round_thresholds.py
+++ b/tests/transformation/streamline/test_round_thresholds.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 import numpy as np
 from onnx import TensorProto, helper
 
@@ -35,6 +37,7 @@ from finn.core.modelwrapper import ModelWrapper
 from finn.transformation.streamline import RoundAndClipThresholds
 
 
+@pytest.mark.streamline
 def test_round_thresholds():
     v = helper.make_tensor_value_info("v", TensorProto.FLOAT, [1, 4])
     thresholds = helper.make_tensor_value_info("thresholds", TensorProto.FLOAT, [4, 1])
diff --git a/tests/transformation/streamline/test_sign_to_thres.py b/tests/transformation/streamline/test_sign_to_thres.py
index 2ffb5713c0363b115dee5c41484fb5826faf803a..aa9254e8d605bbcd1d8a61da4d79cc6d582a1764 100644
--- a/tests/transformation/streamline/test_sign_to_thres.py
+++ b/tests/transformation/streamline/test_sign_to_thres.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 import brevitas.onnx as bo
 import onnx
 import onnx.numpy_helper as nph
@@ -42,6 +44,7 @@ from finn.util.test import get_test_model_trained
 export_onnx_path = "test_sign_to_thres.onnx"
 
 
+@pytest.mark.streamline
 def test_sign_to_thres():
     lfc = get_test_model_trained("LFC", 1, 1)
     bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
diff --git a/tests/transformation/streamline/test_streamline_cnv.py b/tests/transformation/streamline/test_streamline_cnv.py
index ed2595330323bfc8a576af36ae3fea27522ec66c..f2c4921c9ae55fa2206abbbb2661fe20e6068b93 100644
--- a/tests/transformation/streamline/test_streamline_cnv.py
+++ b/tests/transformation/streamline/test_streamline_cnv.py
@@ -50,6 +50,7 @@ from finn.util.test import get_test_model_trained
 export_onnx_path = make_build_dir("test_streamline_cnv_")
 
 
+@pytest.mark.streamline
 # act bits
 @pytest.mark.parametrize("abits", [1, 2])
 # weight bits
diff --git a/tests/transformation/streamline/test_streamline_fc.py b/tests/transformation/streamline/test_streamline_fc.py
index 3563b87c45a7ffe99fe6e9bdfd9f54a39e89cb68..875a1c46029b83f59211556dc79c9bac26ff927f 100644
--- a/tests/transformation/streamline/test_streamline_fc.py
+++ b/tests/transformation/streamline/test_streamline_fc.py
@@ -51,6 +51,7 @@ from finn.util.test import get_test_model_trained
 export_onnx_path = make_build_dir("test_streamline_fc_")
 
 
+@pytest.mark.streamline
 # act bits
 @pytest.mark.parametrize("abits", [1, 2])
 # weight bits
diff --git a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py
index 300ef85faacf664b89c7b949ea2e462f110eef85..bdb988e2aa508ed7464aee33d30b671fa38ebacb 100644
--- a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py
+++ b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 import pkg_resources as pk
 
 import brevitas.onnx as bo
@@ -44,7 +46,7 @@ from finn.util.test import get_test_model_trained
 
 export_onnx_path = "test_output_bn2affine.onnx"
 
-
+@pytest.mark.transform
 def test_batchnorm_to_affine_cnv_w1a1():
     lfc = get_test_model_trained("CNV", 1, 1)
     bo.export_finn_onnx(lfc, (1, 3, 32, 32), export_onnx_path)
@@ -69,6 +71,7 @@ def test_batchnorm_to_affine_cnv_w1a1():
     os.remove(export_onnx_path)
 
 
+@pytest.mark.transform
 def test_batchnorm_to_affine_lfc_w1a1():
     lfc = get_test_model_trained("LFC", 1, 1)
     bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
diff --git a/tests/transformation/test_infer_data_layouts_cnv.py b/tests/transformation/test_infer_data_layouts_cnv.py
index 10bc687d13d4a85ce64955cb38c1c0dfdc6d53da..fc26a7edce02198e2534dcb5bf56c500719ccec1 100644
--- a/tests/transformation/test_infer_data_layouts_cnv.py
+++ b/tests/transformation/test_infer_data_layouts_cnv.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 import brevitas.onnx as bo
 import os
 
@@ -45,7 +47,7 @@ from finn.util.test import get_test_model_trained
 
 export_onnx_path_cnv = "test_infer_data_layouts.onnx"
 
-
+@pytest.mark.transform
 def test_infer_data_layouts_cnv():
     cnv = get_test_model_trained("CNV", 1, 1)
     bo.export_finn_onnx(cnv, (1, 3, 32, 32), export_onnx_path_cnv)
diff --git a/tests/transformation/test_infer_datatypes_lfc.py b/tests/transformation/test_infer_datatypes_lfc.py
index 8883dac7a54eafaaa768c8ae991b2030e385b318..3758485860cf0176143fe6f55b71508327ffe762 100644
--- a/tests/transformation/test_infer_datatypes_lfc.py
+++ b/tests/transformation/test_infer_datatypes_lfc.py
@@ -26,6 +26,8 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import pytest
+
 import brevitas.onnx as bo
 import os
 
@@ -40,6 +42,7 @@ from finn.util.test import get_test_model_trained
 export_onnx_path = "test_infer_datatypes.onnx"
 
 
+@pytest.mark.transform
 def test_infer_datatypes_lfc():
     lfc = get_test_model_trained("LFC", 1, 1)
     bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py
index df7d63e3d2e139077f0fa20b10714c0a43a24e47..d9443e381677273d15bcb06832b009990a6ad11a 100644
--- a/tests/transformation/test_qonnx_to_finn.py
+++ b/tests/transformation/test_qonnx_to_finn.py
@@ -88,6 +88,7 @@ def analysis_testing_for_no_quant_nodes(model):
     return dict()
 
 
+@pytest.mark.transform
 # This test currently takes about 4 min and 20 seconds
 @pytest.mark.parametrize("abits", [1, 2])
 @pytest.mark.parametrize("wbits", [1, 2])
diff --git a/tests/util/test_build_dataflow.py b/tests/util/test_build_dataflow.py
index de1b3abcc314c0c1451bd86bab8a7b93600ca697..d33a4f2fd6c974b13ac315c7ef621eacb04002c4 100644
--- a/tests/util/test_build_dataflow.py
+++ b/tests/util/test_build_dataflow.py
@@ -39,7 +39,7 @@ from finn.util.basic import make_build_dir
 
 @pytest.mark.slow
 @pytest.mark.vivado
-def test_build_dataflow_directory():
+def test_end2end_build_dataflow_directory():
     test_dir = make_build_dir("test_build_dataflow_directory_")
     target_dir = test_dir + "/build_dataflow"
     example_data_dir = pk.resource_filename("finn.qnn-data", "build_dataflow/")
diff --git a/tests/util/test_create.py b/tests/util/test_create.py
index c11e60175ea3ac94b6686ec5f8401a7c134fe53e..655c01f06eecca84d414ce3b995cfe4d1ba58170 100644
--- a/tests/util/test_create.py
+++ b/tests/util/test_create.py
@@ -32,6 +32,7 @@ import finn.util.create as create
 from finn.core.datatype import DataType
 
 
+@pytest.mark.util
 @pytest.mark.parametrize(
     "bitwidth", [DataType["BIPOLAR"], DataType["INT2"], DataType["INT4"]]
 )
diff --git a/tests/util/test_data_packing_hls.py b/tests/util/test_data_packing_hls.py
index e2b5dc4c301b6d159018f5ac13a719785baf443e..2b67ce26e10f0d672f49d10169adb0b3fa5427fd 100644
--- a/tests/util/test_data_packing_hls.py
+++ b/tests/util/test_data_packing_hls.py
@@ -38,6 +38,7 @@ from finn.core.datatype import DataType
 from finn.util.data_packing import numpy_to_hls_code
 
 
+@pytest.mark.util
 @pytest.mark.parametrize(
     "dtype",
     [
@@ -123,6 +124,7 @@ g++ -o test_npy2apintstream test.cpp /workspace/cnpy/cnpy.cpp \
     assert success
 
 
+@pytest.mark.util
 def test_numpy_to_hls_code():
     def remove_all_whitespace(s):
         return "".join(s.split())