diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index 960abd675bbb185ce2fadfab954ec2b4fd6ff94e..051fd506cab6ae2da0cbea8badff342361734562 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -11,7 +11,7 @@ jobs: test: name: Run quicktest on PR branch - runs-on: ubuntu-16.04 + runs-on: ubuntu-18.04 steps: - name: checkout diff --git a/AUTHORS.rst b/AUTHORS.rst index eb1e06e54b7eb6deedd3e7f8392bb3aa257e7dc6..533ed62e1dbda2799f74805f2100769f9c4fecfc 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -2,9 +2,14 @@ Contributors ============ -* Yaman Umuroglu (@maltanar) +* Yaman Umuroglu (@maltanar) (maintainer) * Jakoba Petri-Koenig (@auphelia) * Andrea Rigoni (@AndreaRigoni) * Hendrik Borras (@HenniOVP) * Lucian Petrica (@quetric) * Tobias Alonso (@Tobi-Alonso) +* Felix Paul Jentzsch (@felixpj) +* Mirza Mrahorovic (@mmrahorovic) +* Suranga Mahesh (@surangamh) +* Peter Lehnhardt (@pete-lennart) +* Neil Kim Nielsen (@neilkimn) diff --git a/docker/Dockerfile.finn_ci b/docker/Dockerfile.finn_ci index 269fa38b73a2edd904bc6aad52522cddb3d33d25..c4248919696a1bbff0a1fd5a2264f4edadd4b112 100644 --- a/docker/Dockerfile.finn_ci +++ b/docker/Dockerfile.finn_ci @@ -27,22 +27,44 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FROM pytorch/pytorch:1.1.0-cuda10.0-cudnn7.5-devel -MAINTAINER Yaman Umuroglu <yamanu@xilinx.com> +LABEL maintainer="Yaman Umuroglu <yamanu@xilinx.com>" WORKDIR /workspace +# some Vitis deps require a timezone to be specified, which hangs in Docker +# use workaround from https://grigorkh.medium.com/fix-tzdata-hangs-docker-image-build-cdb52cc3360d +ENV TZ="Europe/Dublin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + RUN apt-get update RUN apt-get -y upgrade -RUN apt-get install -y build-essential libglib2.0-0 libsm6 libxext6 libxrender-dev -RUN apt-get install -y verilator zsh nano rsync -RUN apt-get install -y sshpass wget unzip +RUN apt-get install -y build-essential +RUN apt-get install -y libglib2.0-0 +RUN apt-get install -y libsm6 +RUN apt-get install -y libxext6 +RUN apt-get install -y libxrender-dev +RUN apt-get install -y verilator +RUN apt-get install -y nano +RUN apt-get install -y zsh +RUN apt-get install -y rsync +RUN apt-get install -y git +RUN apt-get install -y sshpass +RUN apt-get install -y wget +RUN apt-get install -y unzip +RUN apt-get install -y zip RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config # XRT deps -RUN wget https://raw.githubusercontent.com/Xilinx/XRT/master/src/runtime_src/tools/scripts/xrtdeps.sh -RUN apt-get update -RUN bash xrtdeps.sh -docker -RUN rm xrtdeps.sh +# install vitis deps if required +ARG INSTALL_XRT_DEPS="0" +ARG XRT_DEB_VERSION="xrt_202010.2.7.766_18.04-amd64-xrt" +RUN if [ "$INSTALL_XRT_DEPS" = "1" ] ; then \ + echo "Installing XRT: $XRT_DEB_VERSION"; \ + wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb; \ + apt install -y /tmp/$XRT_DEB_VERSION.deb; \ + else \ + echo "Skipping installation of XRT dependencies"; \ + fi # cloning dependency repos # finn-base diff --git a/docker/Dockerfile.finn_dev b/docker/Dockerfile.finn_dev index 6e100a8792bee7b766caed107a4171e39bb4576d..1feeb51a5308638b404cd0a8c70550f95ccbc450 100644 --- a/docker/Dockerfile.finn_dev +++ b/docker/Dockerfile.finn_dev @@ -26,8 +26,8 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -FROM pytorch/pytorch:1.1.0-cuda10.0-cudnn7.5-devel -MAINTAINER Yaman Umuroglu <yamanu@xilinx.com> +FROM pytorch/pytorch:1.7.1-cuda11.0-cudnn8-runtime +LABEL maintainer="Yaman Umuroglu <yamanu@xilinx.com>" ARG GID ARG GNAME ARG UNAME @@ -36,11 +36,28 @@ ARG PASSWD WORKDIR /workspace +# some Vitis deps require a timezone to be specified, which hangs in Docker +# use workaround from https://grigorkh.medium.com/fix-tzdata-hangs-docker-image-build-cdb52cc3360d +ENV TZ="Europe/Dublin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + + RUN apt-get update RUN apt-get -y upgrade -RUN apt-get install -y build-essential libglib2.0-0 libsm6 libxext6 libxrender-dev -RUN apt-get install -y verilator nano zsh rsync -RUN apt-get -y install sshpass wget unzip +RUN apt-get install -y build-essential +RUN apt-get install -y libglib2.0-0 +RUN apt-get install -y libsm6 +RUN apt-get install -y libxext6 +RUN apt-get install -y libxrender-dev +RUN apt-get install -y verilator +RUN apt-get install -y nano +RUN apt-get install -y zsh +RUN apt-get install -y rsync +RUN apt-get install -y git +RUN apt-get install -y sshpass +RUN apt-get install -y wget +RUN apt-get install -y unzip +RUN apt-get install -y zip RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config COPY requirements.txt . @@ -60,16 +77,18 @@ RUN pip install scikit-learn==0.24.1 RUN pip install tqdm==4.31.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading + # switch user RUN groupadd -g $GID $GNAME RUN useradd -M -u $UID $UNAME -g $GNAME RUN usermod -aG sudo $UNAME RUN echo "$UNAME:$PASSWD" | chpasswd RUN echo "root:$PASSWD" | chpasswd +RUN chown -R $UNAME:$GNAME /workspace RUN ln -s /workspace /home/$UNAME -RUN chown -R $UNAME:$GNAME /home/$UNAME USER $UNAME + # cloning dependency repos (as user) # finn-base RUN git clone https://github.com/Xilinx/finn-base.git /workspace/finn-base @@ -103,12 +122,11 @@ RUN chmod 755 /usr/local/bin/finn_entrypoint.sh RUN chmod 755 /usr/local/bin/quicktest.sh # install vitis deps if required ARG INSTALL_XRT_DEPS +ARG XRT_DEB_VERSION RUN if [ "$INSTALL_XRT_DEPS" = "1" ] ; then \ - echo "Installing XRT dependencies"; \ - wget https://raw.githubusercontent.com/Xilinx/XRT/master/src/runtime_src/tools/scripts/xrtdeps.sh; \ - apt-get update; \ - bash xrtdeps.sh -docker; \ - rm xrtdeps.sh; \ + echo "Installing XRT: $XRT_DEB_VERSION"; \ + wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb; \ + apt install -y /tmp/$XRT_DEB_VERSION.deb; \ else \ echo "Skipping installation of XRT dependencies"; \ fi diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh index 8e6d054d4fee5330d88ded9506d73568d21bf2e0..7edb3f9a9e831e005440dd69b36998272d727da5 100644 --- a/docker/finn_entrypoint.sh +++ b/docker/finn_entrypoint.sh @@ -4,21 +4,27 @@ export SHELL=/bin/bash export FINN_ROOT=/workspace/finn GREEN='\033[0;32m' +RED='\033[0;31m' NC='\033[0m' # No Color gecho () { echo -e "${GREEN}$1${NC}" } +recho () { + echo -e "${RED}ERROR: $1${NC}" +} + # checkout the correct dependency repo commits # the repos themselves are cloned in the Dockerfile -FINN_BASE_COMMIT=8908c6a3f6674c4fa790954bd41c23ee5bf053df +FINN_BASE_COMMIT=ac0b86a63eb937b869bfa453a996a8a8b8506546 FINN_EXP_COMMIT=f82c0d9868bb88ea045dfadb28508d327d287221 -BREVITAS_COMMIT=aff49758ec445d77c75721c7de3091a2a1797ca8 +BREVITAS_COMMIT=d7ded80fa9557da2998ea310669edee7fb2d9526 CNPY_COMMIT=4e8810b1a8637695171ed346ce68f6984e585ef4 -HLSLIB_COMMIT=2e49322d1bbc4969ca293843bda1f3f9c05456fc +HLSLIB_COMMIT=4d74baefa79df48b5a0348d63f39a26df075de51 PYVERILATOR_COMMIT=e2ff74030de3992dcac54bf1b6aad2915946e8cb OMX_COMMIT=1bae737669901e762f581af73348332b5c4b2ada +AVNET_BDF_COMMIT=2d49cfc25766f07792c0b314489f21fe916b639b gecho "Setting up known-good commit versions for FINN dependencies" # finn-base @@ -80,11 +86,12 @@ if [ ! -d "/workspace/finn/board_files" ]; then rm pynq-z2.zip cd $OLD_PWD fi -if [ ! -d "/workspace/finn/board_files/ultra96v1" ]; then - gecho "Downloading Avnet BDF files into board_files" +if [ ! -d "/workspace/finn/board_files/ultra96v2" ]; then + gecho "Downloading Avnet BDF files from known-good commit into board_files" OLD_PWD=$(pwd) cd /workspace/finn git clone https://github.com/Avnet/bdf.git + git -C /workspace/finn/bdf checkout $AVNET_BDF_COMMIT --quiet mv /workspace/finn/bdf/* /workspace/finn/board_files/ rm -rf /workspace/finn/bdf cd $OLD_PWD @@ -92,10 +99,13 @@ fi if [ ! -z "$VITIS_PATH" ];then # source Vitis env.vars export XILINX_VITIS=$VITIS_PATH + export XILINX_XRT=/opt/xilinx/xrt source $VITIS_PATH/settings64.sh if [ ! -z "$XILINX_XRT" ];then # source XRT source $XILINX_XRT/setup.sh + else + recho "XRT not found on $XILINX_XRT, did the installation fail?" fi fi exec "$@" diff --git a/docs/finn/getting_started.rst b/docs/finn/getting_started.rst index 25538a3ad1fa21b8e841b5f3fadc5388137d68bb..f7ca6af31c7660e52eca4b2a8f6dc1be482e3606 100644 --- a/docs/finn/getting_started.rst +++ b/docs/finn/getting_started.rst @@ -118,7 +118,8 @@ Prior to running the `run-docker.sh` script, there are several environment varia These are summarized below: * ``VIVADO_PATH`` points to your Vivado installation on the host -* (optional, for Vitis & Alveo only) ``VITIS_PATH``, ``PLATFORM_REPO_PATHS`` and ``XILINX_XRT`` respectively point to your Vitis installation, the Vitis platform files, and Xilinx XRT +* (optional, for Vitis & Alveo only) ``VITIS_PATH``, and ``PLATFORM_REPO_PATHS`` respectively point to your Vitis installation, and the Vitis platform files. +* (optional, for Vitis & Alveo only) ``XRT_DEB_VERSION`` specifies the .deb to be installed for XRT inside the container (see default value in ``run-docker.sh``). * (optional) ``JUPYTER_PORT`` (default 8888) changes the port for Jupyter inside Docker * (optional) ``JUPYTER_PASSWD_HASH`` (default "") Set the Jupyter notebook password hash. If set to empty string, token authentication will be used (token printed in terminal on launch). * (optional) ``LOCALHOST_URL`` (default localhost) sets the base URL for accessing e.g. Netron from inside the container. Useful when running FINN remotely. @@ -162,7 +163,7 @@ We use *host* to refer to the PC running the FINN Docker environment, which will On the target side: -1. Install Xilinx XRT and set up the ``XILINX_XRT`` environment variable to point to your installation, for instance ``/opt/xilinx/xrt``. +1. Install Xilinx XRT. 2. Install the Vitis platform files for Alveo and set up the ``PLATFORM_REPO_PATHS`` environment variable to point to your installation, for instance ``/opt/xilinx/platforms``. 3. Create a conda environment named *finn-pynq-alveo* by following this guide `to set up PYNQ for Alveo <https://pynq.readthedocs.io/en/latest/getting_started/alveo_getting_started.html>`_. It's best to follow the recommended environment.yml (set of package versions) in this guide. 4. Activate the environment with `conda activate finn-pynq-alveo` and install the bitstring package with ``pip install bitstring``. @@ -173,7 +174,7 @@ On the target side: On the host side: 1. Install Vitis 2020.1 and set up the ``VITIS_PATH`` environment variable to point to your installation. -2. Install Xilinx XRT and set up the ``XILINX_XRT`` environment variable to point to your installation. *This must be the same path as the target's XRT (target step 1)* +2. Install Xilinx XRT. Ensure that the ``XRT_DEB_VERSION`` environment variable reflects which version of XRT you have installed. 3. Install the Vitis platform files for Alveo and set up the ``PLATFORM_REPO_PATHS`` environment variable to point to your installation. *This must be the same path as the target's platform files (target step 2)* 4. Set up the ``ALVEO_*`` environment variables accordingly for your target, see description of environment variables above. 5. `Set up public key authentication <https://www.digitalocean.com/community/tutorials/how-to-configure-ssh-key-based-authentication-on-a-linux-server>`_. Copy your private key to the ``finn/ssh_keys`` folder on the host to get password-less deployment and remote execution. diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index ff4c5704002219ca18bb07eeb8c768f860f3ffbf..e0ce00c1beefe8172ac5fd2aeaaa076b9bb574c1 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -103,23 +103,23 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "--2021-02-24 16:57:33-- https://zenodo.org/record/4519767/files/unsw_nb15_binarized.npz?download=1\n", + "--2021-05-10 18:14:00-- https://zenodo.org/record/4519767/files/unsw_nb15_binarized.npz?download=1\n", "Resolving zenodo.org (zenodo.org)... 137.138.76.77\n", "Connecting to zenodo.org (zenodo.org)|137.138.76.77|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 13391907 (13M) [application/octet-stream]\n", - "Saving to: 'unsw_nb15_binarized.npz'\n", + "Saving to: ‘unsw_nb15_binarized.npz’\n", "\n", - "unsw_nb15_binarized 100%[===================>] 12.77M 2.17MB/s in 8.9s \n", + "unsw_nb15_binarized 100%[===================>] 12.77M 3.96MB/s in 3.4s \n", "\n", - "2021-02-24 16:57:44 (1.44 MB/s) - 'unsw_nb15_binarized.npz' saved [13391907/13391907]\n", + "2021-05-10 18:14:04 (3.77 MB/s) - ‘unsw_nb15_binarized.npz’ saved [13391907/13391907]\n", "\n" ] } @@ -422,7 +422,9 @@ "name": "stderr", "output_type": "stream", "text": [ - "Training loss = 0.132480 test accuracy = 0.797989: 100%|██████████| 10/10 [00:58<00:00, 5.70s/it]\n" + "Training loss: 0%| | 0/10 [00:00<?, ?it/s]/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py:130: UserWarning: CUDA initialization: Found no NVIDIA driver on your system. Please check that you have an NVIDIA GPU and installed a driver from http://www.nvidia.com/Download/index.aspx (Triggered internally at /opt/conda/conda-bld/pytorch_1607370172916/work/c10/cuda/CUDAFunctions.cpp:100.)\n", + " Variable._execution_engine.run_backward(\n", + "Training loss = 0.131708 test accuracy = 0.805398: 100%|██████████| 10/10 [01:04<00:00, 6.42s/it]\n" ] } ], @@ -457,7 +459,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAjcUlEQVR4nO3de5QcZ33m8e/TPTfNrceyRrdu2ZJBMsiewQZhwhIICRBsQmwSjokJIWQ3OQ5ZHEggm5gkBxJnOccLWTbZXbOLN5jsbiCObQirJAYTwj0JoDG+yJKxLcsXjWRdLWlGl7n/9o+uGfeMeqSRND3VPf18zukzVW9Vdf+mj6RHVW+9bykiMDMzmymTdgFmZladHBBmZlaWA8LMzMpyQJiZWVkOCDMzK8sBYWZmZTkgzGYh6cuS3jPf+55lDa+X1D/f72s2Fw1pF2A2nyQdK1ltBYaB8WT91yPic3N9r4i4phL7mtUKB4QtKhHRPrks6Wng1yLiazP3k9QQEWMLWZtZrfElJqsLk5dqJP2epL3AZyVdIOnvJR2QdDhZLpQc801Jv5Ys/4qk70r602TfpyRdc477rpP0bUmDkr4m6TZJfzXH3+OlyWcdkbRN0rUl294iaXvyvrsl/U7Sviz53Y5Iel7SdyT5776dkf+QWD1ZCSwFLgZupPjn/7PJ+kXASeC/n+b4VwGPAcuAjwOfkaRz2PfzwA+AC4E/At49l+IlNQJ/B3wVWA78JvA5SZcmu3yG4mW0DuBy4OtJ+4eAfqAbWAH8PuA5duyMHBBWTyaAj0bEcEScjIhDEfGFiDgREYPAx4CfOM3xz0TE/4qIceB/A6so/oM7530lXQS8EvhIRIxExHeBzXOs/8eAduDW5NivA38PvDPZPgpslNQZEYcj4ocl7auAiyNiNCK+E56EzebAAWH15EBEDE2uSGqV9GlJz0gaAL4NdEnKznL83smFiDiRLLaf5b6rgedL2gB2zbH+1cCuiJgoaXsGyCfLbwfeAjwj6VuSXp20fwLYAXxV0k5JN8/x86zOOSCsnsz8X/OHgEuBV0VEJ/C6pH22y0bz4TlgqaTWkrY1czx2D7BmRv/BRcBugIjYEhHXUbz89CXgrqR9MCI+FBGXANcCH5T0hvP7NaweOCCsnnVQ7Hc4Imkp8NFKf2BEPAP0AX8kqSn5X/7PzvHw7wMngN+V1Cjp9cmxdybv9S5JuYgYBQYoXlJD0lslvTjpAzlK8bbfibKfYFbCAWH17M+AJcBB4HvAVxboc98FvBo4BPxH4G8ojtc4rYgYoRgI11Cs+VPAL0fEj5Jd3g08nVwue2/yOQDrga8Bx4B/BT4VEd+Yt9/GFi25r8osXZL+BvhRRFT8DMbsbPgMwmyBSXqlpBdJyki6GriOYp+BWVXxSGqzhbcS+CLFcRD9wG9ExAPplmR2Kl9iMjOzsnyJyczMylo0l5iWLVsWa9euTbsMM7Oacv/99x+MiO5y2xZNQKxdu5a+vr60yzAzqymSnpltmy8xmZlZWQ4IMzMrywFhZmZlVTQgJF0t6TFJO043g6Skt0sKSZtK2j6cHPeYpDdXsk4zMztVxTqpkymTbwPeRHEw0BZJmyNi+4z9OoAPUJyIbLJtI3ADcBnFKY6/JmlDMre+mZktgEqeQVwF7IiInckkY3dSnFJgpj8B/hMwVNJ2HXBn8mCXpyjOZX9VBWs1M7MZKhkQeaY/CKWfFx5sAoCklwNrIuIfzvbY5PgbJfVJ6jtw4MD8VG1mZkCKndTJQ08+SfGhLeckIm6PiE0Rsam7u+w4jzM6cmKEP//aE2ztP3quZZiZLUqVHCi3m+lPyiokbZMmH6z+zeRZ7iuBzZKuncOx8yabEf/la4+TEfQUcpX4CDOzmlTJM4gtwHpJ6yQ1Uex0nno4e0QcjYhlEbE2ItZSfGDLtRHRl+x3g6RmSesoPvDkB5UosqOlkUu629i622cQZmalKnYGERFjkm4C7gOywB0RsU3SLUBfRGw+zbHbJN0FbAfGgPdV8g6m3nyO7z/1fKXe3sysJlV0LqaIuBe4d0bbR2bZ9/Uz1j8GfKxixZW4PJ/jSw/uYf/gEMs7WhbiI83Mqp5HUgO9hS4AHvFlJjOzKQ4I4LLVnUjwsO9kMjOb4oAA2pobeHF3u88gzMxKOCASPYWczyDMzEo4IBI9+Rz7B4fZNzB05p3NzOqAAyLRmwyS81mEmVmRAyKxcVWOjGBr/5G0SzEzqwoOiMSSpiwbVnR4RLWZWcIBUaInn2Pr7qNERNqlmJmlzgFRoqeQ4+CxEZ476o5qMzMHRImevDuqzcwmOSBKvHRVJw0ZsXX3kbRLMTNLnQOiREvjZEf1QNqlmJmlzgExQ08+x9b+I+6oNrO654CYoaeQ4/CJUfoPn0y7FDOzVDkgZpgcUe3xEGZW7xwQM1y6soPGrHwnk5nVPQfEDM0NWV6ystNTf5tZ3XNAlHF5PsfD7qg2szrngCijt5BjYGiMZ58/kXYpZmapcUCU4RHVZmYOiLI2rOigqSHjfggzq2sOiDKaGjK8dFWnzyDMrK45IGbRky/eyTQx4Y5qM6tPDohZ9Oa7GBwe4+lDx9MuxcwsFQ6IWfR4RLWZ1TkHxCzWL2+nuSHDVvdDmFmdckDMoiGbYePqTh72GYSZ1amKBoSkqyU9JmmHpJvLbH+vpK2SHpT0XUkbk/a1kk4m7Q9K+p+VrHM2vfkc23YfZdwd1WZWhyoWEJKywG3ANcBG4J2TAVDi8xHRExFXAB8HPlmy7cmIuCJ5vbdSdZ5OT6GL4yPjPHXwWBofb2aWqkqeQVwF7IiInRExAtwJXFe6Q0SUPrqtDaiq/6pPTv3t8RBmVo8qGRB5YFfJen/SNo2k90l6kuIZxPtLNq2T9ICkb0l6bbkPkHSjpD5JfQcOHJjP2gF4UXc7SxqzvpPJzOpS6p3UEXFbRLwI+D3gD5Pm54CLIuJK4IPA5yV1ljn29ojYFBGburu75722bEZctrrTdzKZWV2qZEDsBtaUrBeSttncCbwNICKGI+JQsnw/8CSwoTJlnl5PIce2PQOMjU+k8fFmZqmpZEBsAdZLWiepCbgB2Fy6g6T1Jas/AzyRtHcnndxIugRYD+ysYK2z6i3kODk6zpMHPKLazOpLQ6XeOCLGJN0E3AdkgTsiYpukW4C+iNgM3CTpjcAocBh4T3L464BbJI0CE8B7I+L5StV6Oj35LgAe7j/CpSs70ijBzCwVFQsIgIi4F7h3RttHSpY/MMtxXwC+UMna5uqSZW20NWV5ZPdRrt+05swHmJktEql3Ule7TEZcls95RLWZ1R0HxBz05nNs3zPAqDuqzayOOCDmoKeQY3hsgif2eUS1mdUPB8Qc9Ba6ANi6+0iqdZiZLSQHxBxcvLSVjpYGj6g2s7rigJiDTEZcvjrnEdVmVlccEHPUW8jx6HODjIy5o9rM6oMDYo56CjlGxid4fN9g2qWYmS0IB8Qc9SYjqt0PYWb1wgExR2uWLiG3pNHPhjCzuuGAmCNJ9ORzvtXVzOqGA+Is9BRyPLZ3kKHR8bRLMTOrOAfEWejN5xgdDx7b645qM1v8HBBnoSd5RrU7qs2sHjggzkK+awkXtDZ6wJyZ1QUHxFmQRE+hy1N/m1ldcECcpd58jsf3uaPazBY/B8RZ6inkGJ8Itj83kHYpZmYV5YA4S71JR/UjvsxkZoucA+IsrexsYVl7k0dUm9mi54A4S1Mjqh0QZrbIOSDOQU+hiyf2D3JiZCztUszMKsYBcQ568zkmArbvcUe1mS1eDohz4BHVZlYPHBDnYEVnC8s7mt0PYWaLmgPiHPUWch5RbWaLmgPiHPXku3jywDGODbuj2swWJwfEOeot5IiAbT6LMLNFqqIBIelqSY9J2iHp5jLb3ytpq6QHJX1X0saSbR9OjntM0psrWee5uDzvjmozW9wqFhCSssBtwDXARuCdpQGQ+HxE9ETEFcDHgU8mx24EbgAuA64GPpW8X9Xo7mhmVa7FAWFmi1YlzyCuAnZExM6IGAHuBK4r3SEiSgcStAGRLF8H3BkRwxHxFLAjeb+q4hHVZraYVTIg8sCukvX+pG0aSe+T9CTFM4j3n82xaest5Nh58DgDQ6Npl2JmNu9S76SOiNsi4kXA7wF/eDbHSrpRUp+kvgMHDlSmwNPoKXQBntnVzBanSgbEbmBNyXohaZvNncDbzubYiLg9IjZFxKbu7u7zq/Yc9OQ99beZLV6VDIgtwHpJ6yQ1Uex03ly6g6T1Jas/AzyRLG8GbpDULGkdsB74QQVrPSdL25rIdy3x1N9mtig1VOqNI2JM0k3AfUAWuCMitkm6BeiLiM3ATZLeCIwCh4H3JMduk3QXsB0YA94XEVX5jM/eQs53MpnZolSxgACIiHuBe2e0faRk+QOnOfZjwMcqV9386Cnk+PIjezl6YpRca2Pa5ZiZzZvUO6lr3VQ/xB6fRZjZ4uKAOE+TAeF+CDNbbBwQ56mrtYmLlraydfeRtEsxM5tXDoh50FPI+QzCzBYdB8Q86M3n6D98ksPHR9Iuxcxs3jgg5kGPZ3Y1s0XIATEPLnNAmNki5ICYB7kljaxb1sbD/UfSLsXMbN44IOaJp/42s8XGATFPegs59hwd4uCx4bRLMTObFw6IeeJHkJrZYuOAmCeXre5EwpeZzGzRcEDMk46WRi5Z1uYBc2a2aMwpICS1ScokyxskXSvJU5fO0Fvo8pQbZrZozPUM4ttAi6Q88FXg3cBfVqqoWtWTz7FvYJj9A0Npl2Jmdt7mGhCKiBPAzwOfiojrgcsqV1Zt6im4o9rMFo85B4SkVwPvAv4hactWpqTatXFVJxl56m8zWxzmGhC/BXwY+NvkcaCXAN+oWFU1qq25gRcvb/cZhJktCnN65GhEfAv4FkDSWX0wIt5fycJqVU++i289foCIQFLa5ZiZnbO53sX0eUmdktqAR4Dtkv5DZUurTT35Tg4eG2avO6rNrMbN9RLTxogYAN4GfBlYR/FOJpuhp9AFeMCcmdW+uQZEYzLu4W3A5ogYBaJiVdWwjas6yWbkfggzq3lzDYhPA08DbcC3JV0MDFSqqFq2pCnL+uXtvpPJzGrenAIiIv5rROQj4i1R9AzwkxWurWb1FnJs3X2UCJ9kmVntmmsndU7SJyX1Ja//TPFswsroyed4/vgIe466o9rMatdcLzHdAQwC70heA8BnK1VUrXuho/pIqnWYmZ2PuQbEiyLioxGxM3n9MXBJJQurZS9Z2UFDRu6HMLOaNteAOCnpxydXJL0GOFmZkmpfS2OWS1d2+E4mM6tpcxpJDbwX+D+Scsn6YeA9lSlpcegt5Lh3616PqDazmjXXu5geioiXAb1Ab0RcCfzUmY6TdLWkxyTtkHRzme0flLRd0sOS/im5fXZy27ikB5PX5rP4narC5fkcR0+O0n/YJ1pmVpvO6olyETGQjKgG+ODp9pWUBW4DrgE2Au+UtHHGbg8AmyKiF7gH+HjJtpMRcUXyuvZs6qwGvfkuwDO7mlntOp9Hjp7puslVwI6kU3sEuBO4rnSHiPhG8pwJgO8BhfOop6psWNlOUzbDw37CnJnVqPMJiDONAssDu0rW+5O22fwqxXmeJrUkYy6+J+lt5Q6QdOPk2IwDBw7MpeYF09yQ5SWrOjwnk5nVrNN2UksapHwQCFgyX0VI+iVgE/ATJc0XR8Tu5NkTX5e0NSKeLD0uIm4HbgfYtGlT1Q1bvjyf4+8e2uOOajOrSac9g4iIjojoLPPqiIgz3QG1G1hTsl5I2qaR9EbgD4BrI2K45LN3Jz93At8ErpzTb1RFevM5BofGeObQiTPvbGZWZc7nEtOZbAHWS1onqQm4AZh2N5KkKylOBHhtROwvab9AUnOyvAx4DbC9grVWxOQzqh/2eAgzq0EVC4iIGANuAu4DHgXuSh5XeoukybuSPgG0A3fPuJ31pUCfpIcoPtr01oiouYDYsKKDpoaMp9wws5o014Fy5yQi7gXundH2kZLlN85y3L8APZWsbSE0ZjNsXNXpW13NrCZV8hKTUZzZddueASYmqq4P3czstBwQFdZTyHFseIynDh1PuxQzs7PigKiw3qSj2uMhzKzWOCAq7MXd7bQ0ZtwPYWY1xwFRYQ3ZDJetzrHVU26YWY1xQCyAnnyOR3YPMO6OajOrIQ6IBdCTz3FydJydB46lXYqZ2Zw5IBbAZEe1+yHMrJY4IBbAJd3ttDZl/QhSM6spDogFkM2Iy1Z38rCn3DCzGuKAWCA9+S62PzfA2PhE2qWYmc2JA2KB9BZyDI1OsMMd1WZWIxwQC6THHdVmVmMcEAtk3YVttDc3eMoNM6sZDogFkpnsqPadTGZWIxwQC6i3kOPR5wYYdUe1mdUAB8QC6il0MTI2weP7BtMuxczsjBwQC6g376m/zax2OCAW0MUXttLR0uB+CDOrCQ6IBSSJnnzOZxBmVhMcEAusp5DjR3sHGB4bT7sUM7PTckAssN58F6PjweN7PaLazKqbA2KBTU397SfMmVmVc0AssMIFS+hqbXQ/hJlVPQfEApvsqPacTGZW7RwQKejJ53h83yBDo+6oNrPq5YBIQW8hx9hE8KO9HlFtZtXLAZGCnkIXAFv9hDkzq2IVDQhJV0t6TNIOSTeX2f5BSdslPSzpnyRdXLLtPZKeSF7vqWSdC211roWlbU3uhzCzqlaxgJCUBW4DrgE2Au+UtHHGbg8AmyKiF7gH+Hhy7FLgo8CrgKuAj0q6oFK1LrSpEdWecsPMqlglzyCuAnZExM6IGAHuBK4r3SEivhERJ5LV7wGFZPnNwD9GxPMRcRj4R+DqCta64HoLOZ7Yf4yTI+6oNrPqVMmAyAO7Stb7k7bZ/Crw5bM5VtKNkvok9R04cOA8y11YPfkc4xPB9ucG0i7FzKysquiklvRLwCbgE2dzXETcHhGbImJTd3d3ZYqrkF53VJtZlatkQOwG1pSsF5K2aSS9EfgD4NqIGD6bY2vZis5mlrU3e+pvM6talQyILcB6SeskNQE3AJtLd5B0JfBpiuGwv2TTfcBPS7og6Zz+6aRt0ZBEb8FTf5tZ9apYQETEGHATxX/YHwXuiohtkm6RdG2y2yeAduBuSQ9K2pwc+zzwJxRDZgtwS9K2qPTkczx54BjHh8fSLsXM7BQNlXzziLgXuHdG20dKlt94mmPvAO6oXHXp6y3kmAjY/twAr1y7NO1yzMymqYpO6nrVkzyj2gPmzKwaOSBStLyzhRWdzb6TycyqkgMiZT35Lo+oNrOq5IBIWW8hx86DxxkcGk27FDOzaRwQKesp5IiAbXs8otrMqosDImWTHdUeD2Fm1cYBkbJl7c2szrV4RLWZVR0HRBXoKeR4xAFhZlXGAVEFegtdPHXwOEdPuqPazKqHA6IKTPZDbPNZhJlVEQdEFZgaUe2AMLMq4oCoAhe0NVG4YInvZDKzquKAqBK9BT+j2syqiwOiSvTku3j2+RPsHxhKuxQzM8ABUTVe/aILkeAnPvFNPnjXg3xv5yEiIu2yzKyOVfR5EDZ3V6zp4kv//jXcuWUXf/fQHr74w91cfGEr17+iwNtfUWBVbknaJZpZndFi+V/qpk2boq+vL+0y5sXJkXG+su057trSz7/uPIQEr13fzTs2FXjTxhU0N2TTLtHMFglJ90fEprLbHBDV7dlDJ7jn/l3cc38/e44O0dXayHUvW831m9ZweXJ7rJnZuXJALALjE8G/PHmQu/r6uW/bXkbGJti4qpN3bCpw3RV5LmhrSrtEM6tBDohF5siJETY/tIe7+/rZuvsoTdkMb9q4gus3FXjt+m6yGaVdopnVCAfEIrZ9zwB337+LLz2wm8MnRlnZ2cLbX5Hn+lesYe2ytrTLM7Mq54CoA8Nj4/zTo/u5u28X33r8ABMBV61byjs2reEtPStpbfINa2Z2KgdEndl7dIgv/LCfu/t28fShE7Q1ZXlr72re8coCL7/oAiRfgjKzIgdEnYoI+p45zF1bdvEPW5/jxMg4l3S3cf0r1vD2l+dZ3tmSdolmljIHhHFseIx7H36Ou+/fxZanD5PNiNdv6Ob6TQV+6iUraGrwoHqzeuSAsGl2HjjG3ff384X7+9k/OMzStiZ+7so879i0hktXdqRdnpktIAeElTU2PsF3njjIXX27+Nqj+xgdD3oLOa67Is/lqzvZsKLD4yvMFjkHhJ3RoWPDfOnBPdzdt4sf7R2cau/uaGbDinbWL+/g0pUdxeUVHXS2NKZYrZnNFweEzVlEsOfoEI/vG+SJfYM8vu9YsnyMk6PjU/utyrWwfkUHG5a3s2FFBxtWdrB+eTttzb6d1qyWnC4gKvq3WdLVwJ8DWeAvIuLWGdtfB/wZ0AvcEBH3lGwbB7Ymq89GxLWVrNWKJJHvWkK+awk/eenyqfaJiWD3kZM8vm+Qx5LAeHzfIP935yGGxyam9st3LWHDinY2rOxgw/IONqzo4MXL21nS5AkGzWpNxQJCUha4DXgT0A9skbQ5IraX7PYs8CvA75R5i5MRcUWl6rOzk8mINUtbWbO0lTe8dMVU+/hE8OzzJ0454/jnHYcYGS8GhwQXLW1l/fLiJapLV3awfnkHl3S30dLo4DCrVpU8g7gK2BEROwEk3QlcB0wFREQ8nWybKPcGVv2yGbFuWRvrlrXx5stWTrWPjU/w9KET00Lj8X2DfPOx/YxNFC9rZgRrL2wrXqJK+jY2rOhg3bI233ZrVgUqGRB5YFfJej/wqrM4vkVSHzAG3BoRX5q5g6QbgRsBLrroonOv1OZdQzbDi5e38+Ll7VzT80L7yNgETx86zmN7S8449g/y1e17SXKDhpLQWZlrYUXn5Kt5armzpcEjws0qrJp7FC+OiN2SLgG+LmlrRDxZukNE3A7cDsVO6jSKtLPT1JBJzhimj7cYGh1n54HjPLF/MDnbOMbTh47z/aee5+jJ0VPep6UxUxIcLazoaGZlroXlJcsrOlt8CcvsPFQyIHYDa0rWC0nbnETE7uTnTknfBK4EnjztQVazWhqzbFzdycbVnadsGxodZ//AMHsHhtg37VVs29p/hH8cGGJo9NQrlZ0tDVNhsbyjhZW55pLl4lnJsvZmGrO+pGU2UyUDYguwXtI6isFwA/CLczlQ0gXAiYgYlrQMeA3w8YpValWtpTHLRRe2ctGFrbPuExEMDI2xf2AoCZLhaWGyd2CYHfsPsn9wmPGJ6SebEixrby5ewupoYUWupfizs5kVuRaWdxRDZWlrExk/a8PqSMUCIiLGJN0E3EfxNtc7ImKbpFuAvojYLOmVwN8CFwA/K+mPI+Iy4KXAp5PO6wzFPojts3yUGZLILWkkt6SR9Stmny5kfCI4dHy4eEZydIh9g0mYJMt7jg7xwK4jPH985JRjGzKiu6OZ5R3NdCcBsryjheWdzS8sdzRzYXuzH9pki4IHypmVMTxWvKy1b2CI/YPD7B8YYt9gMVj2Dw5N/Tx84tT+kUxyRrI8OSNZXhIky0uCZVl7Ew2+tGUpS22gnFmtam7ITo37OJ3hsXEODA4nITI9PPYNDLPn6BAP9R/h4LFTz0gkuLCteEYyPUyaWd7ZMvWzu73Zt/1aKhwQZuehuSFL4YJWChecPkhGxyc4eGx4+llJcmZSXB5i254BDh0bZqLMSf3Stia6WouX0DpbGulc0khnS0Nxfaqt4ZTtnUsa3QFv58wBYbYAGrMZVuWWsCq35LT7jU8Eh44Vw+OFy1vD7Bsc4uiJUQaGRjlyYoRnnz/B0ZOjDJwcnRp4OJvWpuysATIzYCaXJ/fraGlwx3wdc0CYVZFsRsXLS50tXJ7PnXH/iODk6DgDJ8cYGBqdCo2BodFi28mkbXJ9aJS9A0M8vn9wav103ZAStDc3TAuVxmyGTEY0ZERGIpuBhkyxLSvIZjJkM8XfJZsRWSnZJrLZ5OeMbQ3JekaiITv5vi/sM7Wc7NOYFUsas7Q0ZWltytLa2MCSpmzx1Zj1TQLzxAFhVsMk0drUQGtTcbzH2ZqYCI6PjCXBMpYEyWSojE0Lm6MnRxkcGuXk6DjjEzH1mohgbCKYmAjGIxgbL7ZN7VO6XLJ/Je+PaW7IsKQpS2tjdio4JkOkNQmR6csNxeVkffpycVtL4wv718tZlQPCrI5lMqKjpZGOlsbizeYLKJLgGJsZMmcIlvGJYgidHB3n5Mg4J0fHOTEyzsmRsZLl5Ofo5HJx25ETI+w5Utw2NPrCPmerpTFTDJbGYnA0NWRobszSnM3Q3JihuSFDc0O2+LOxZHlyv4aSfU67//TtTQ2ZBT07ckCYWSqUXE5qSHk2lImJYHhsghMjY9OCoxgeY5wcmZgKmFODZ5yR8QmGR8cZHptgeGycY8NjHDpWXC62lW4//3lJG7M6JXAuz+f4b++8ch6+jekcEGZW1zIZTV2GurDCnxURjI7HrOExbXlsnOHRkuWxiWR9/JTta5ae/uaHc+WAMDNbIJJoahBNDRlmH+9fPXyDtJmZleWAMDOzshwQZmZWlgPCzMzKckCYmVlZDggzMyvLAWFmZmU5IMzMrKxF80Q5SQeAZ87jLZYBB+epnFrn72I6fx/T+ft4wWL4Li6OiO5yGxZNQJwvSX2zPXav3vi7mM7fx3T+Pl6w2L8LX2IyM7OyHBBmZlaWA+IFt6ddQBXxdzGdv4/p/H28YFF/F+6DMDOzsnwGYWZmZTkgzMysrLoPCElXS3pM0g5JN6ddT5okrZH0DUnbJW2T9IG0a0qbpKykByT9fdq1pE1Sl6R7JP1I0qOSXp12TWmS9NvJ35NHJP21pJa0a5pvdR0QkrLAbcA1wEbgnZI2pltVqsaAD0XERuDHgPfV+fcB8AHg0bSLqBJ/DnwlIl4CvIw6/l4k5YH3A5si4nIgC9yQblXzr64DArgK2BEROyNiBLgTuC7lmlITEc9FxA+T5UGK/wDk060qPZIKwM8Af5F2LWmTlANeB3wGICJGIuJIqkWlrwFYIqkBaAX2pFzPvKv3gMgDu0rW+6njfxBLSVoLXAl8P+VS0vRnwO8CEynXUQ3WAQeAzyaX3P5CUlvaRaUlInYDfwo8CzwHHI2Ir6Zb1fyr94CwMiS1A18AfisiBtKuJw2S3grsj4j7066lSjQALwf+R0RcCRwH6rbPTtIFFK82rANWA22SfindquZfvQfEbmBNyXohaatbkhophsPnIuKLadeTotcA10p6muKlx5+S9FfplpSqfqA/IibPKO+hGBj16o3AUxFxICJGgS8C/yblmuZdvQfEFmC9pHWSmih2Mm1OuabUSBLFa8yPRsQn064nTRHx4YgoRMRain8uvh4Ri+5/iHMVEXuBXZIuTZreAGxPsaS0PQv8mKTW5O/NG1iEnfYNaReQpogYk3QTcB/FuxDuiIhtKZeVptcA7wa2Snowafv9iLg3vZKsivwm8LnkP1M7gX+bcj2piYjvS7oH+CHFu/8eYBFOu+GpNszMrKx6v8RkZmazcECYmVlZDggzMyvLAWFmZmU5IMzMrCwHhFlC0rHk51pJvzjP7/37M9b/ZT7f36wSHBBmp1oLnFVAJBO2nc60gIiIRTfq1hYfB4TZqW4FXivpwWTO/6ykT0jaIulhSb8OIOn1kr4jaTPJqGJJX5J0f/KcgBuTtlspzvr5oKTPJW2TZytK3vsRSVsl/ULJe3+z5PkLn0tG7CLp1uSZHQ9L+tMF/3asbtT1SGqzWdwM/E5EvBUg+Yf+aES8UlIz8M+SJmfufDlweUQ8laz/u4h4XtISYIukL0TEzZJuiogrynzWzwNXUHy+wrLkmG8n264ELqM4jfQ/A6+R9Cjwc8BLIiIkdc3vr272Ap9BmJ3ZTwO/nEw/8n3gQmB9su0HJeEA8H5JDwHfozgR5HpO78eBv46I8YjYB3wLeGXJe/dHxATwIMVLX0eBIeAzkn4eOHGev5vZrBwQZmcm4Dcj4orkta5k7v/jUztJr6c4y+erI+JlFOfnOZ/HUA6XLI8DDRExRvFBV/cAbwW+ch7vb3ZaDgizUw0CHSXr9wG/kUyFjqQNszwsJwccjogTkl5C8bGtk0Ynj5/hO8AvJP0c3RSf2vaD2QpLntWRSyZQ/G2Kl6bMKsJ9EGanehgYTy4V/SXFZzGvBX6YdBQfAN5W5rivAO9N+gkeo3iZadLtwMOSfhgR7ypp/1vg1cBDQAC/GxF7k4AppwP4f5JaKJ7ZfPCcfkOzOfBsrmZmVpYvMZmZWVkOCDMzK8sBYWZmZTkgzMysLAeEmZmV5YAwM7OyHBBmZlbW/wc3oVuWUfor/QAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEWCAYAAABxMXBSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAApFUlEQVR4nO3df5xddX3n8dd77vzK/LyTzJCfMyRAAAPi3O6AP6i0KrbAtsB2qYK/oGtLbYvVRa2ou9pl5bFWWrXuUhdWxVqpqKglVRCsAtJWMIGEQIKBGAJJSMjk9ySTzM/P/nHPTO5MJsncZG7unZn38/G4j3vP9/y4n3Mheeec7znfo4jAzMxsvMqKXYCZmU0uDg4zM8uLg8PMzPLi4DAzs7w4OMzMLC8ODjMzy4uDw+w4SLpf0rUTvWyeNfympE0TvV2zYykvdgFmJ4ukfTmTNUAPMJBM/3FE3DXebUXEpYVY1mwycHDYtBERdUOfJW0A/jAi/mX0cpLKI6L/ZNZmNpn4VJVNe0OnfCR9VNJW4E5JTZJ+IKlT0q7k84KcdR6W9IfJ5+sk/aukv06WfUHSpce57CJJP5PUJelfJN0m6Rvj3I9XJd+1W9JqSZfnzLtM0ppku5slfThpb072bbeknZIeleS/F+yo/D+IWdYcYCZwKnA92T8bdybTbcAB4P8cZf3XAmuBZuCzwFck6TiW/UfgF8As4C+Bd4+neEkVwD8DDwKnAO8H7pJ0VrLIV8iejqsHzgV+mrR/CNgEtACzgY8DHofIjsrBYZY1CHwqInoi4kBE7IiI70ZEd0R0AbcAv3GU9V+MiP8XEQPA3wNzyf5FPO5lJbUB5wOfjIjeiPhXYOk4638dUAd8Jln3p8APgGuS+X3AEkkNEbErIp7MaZ8LnBoRfRHxaHgAOzsGB4dZVmdEHByakFQj6XZJL0raC/wMSEtKHWH9rUMfIqI7+ViX57LzgJ05bQAbx1n/PGBjRAzmtL0IzE8+/2fgMuBFSY9Ien3SfiuwDnhQ0npJN43z+2wac3CYZY3+V/aHgLOA10ZEA3BR0n6k008TYQswU1JNTlvrONd9GWgd1T/RBmwGiIhlEXEF2dNY/wR8O2nviogPRcRpwOXAjZLecmK7YVOdg8NsbPVk+zV2S5oJfKrQXxgRLwLLgb+UVJkcFfzuOFd/HOgG/kJShaTfTNa9O9nWOyU1RkQfsJfsqTkk/Y6kM5I+lj1kL08eHPMbzBIODrOxfQGYAWwHHgN+dJK+953A64EdwKeBb5G93+SoIqKXbFBcSrbmvwPeExG/TBZ5N7AhOe32vuR7ABYD/wLsA34O/F1EPDRhe2NTktwPZla6JH0L+GVEFPyIx2y8fMRhVkIknS/pdEllki4BriDbJ2FWMnznuFlpmQN8j+x9HJuAP4mIFcUtyWwkn6oyM7O8+FSVmZnlZVqcqmpubo6FCxcWuwwzs0nliSee2B4RLaPbp0VwLFy4kOXLlxe7DDOzSUXSi2O1+1SVmZnlxcFhZmZ5cXCYmVleHBxmZpYXB4eZmeXFwWFmZnlxcJiZWV4cHEdx78rNfOOxMS9jNjObthwcR/HA6q3c/rNfFbsMM7OS4uA4ivbWNBt3HmD7vmM+R8fMbNooaHBIukTSWknrJN00xvz3SXpa0kpJ/yppSc68jyXrrZX02+Pd5kTKtDUBsPKl3YX8GjOzSaVgwSEpBdxG9lGWS4BrcoMh8Y8R8eqIaAc+C3wuWXcJcDVwDnAJ8HeSUuPc5oQ5d14j5WVixcZdhfoKM7NJp5BHHBcA6yJiffI85LvJPs1sWETszZmsBYYeDnIFcHdE9ETEC8C6ZHvH3OZEmlGZ4lVzG1jhIw4zs2GFDI75wMac6U1J2wiS/kzSr8gecfz5MdYd1zaT7V4vabmk5Z2dnce9E5m2NE9t3M3AoB94ZWYGJdA5HhG3RcTpwEeB/zaB270jIjoioqOl5bDh5Mct05Zmf+8Az2/rmqjSzMwmtUIGx2agNWd6QdJ2JHcDVx5j3Xy3ecLaW91BbmaWq5DBsQxYLGmRpEqynd1LcxeQtDhn8j8CzyeflwJXS6qStAhYDPxiPNucaAtn1ZCuqXA/h5lZomBPAIyIfkk3AA8AKeCrEbFa0s3A8ohYCtwg6WKgD9gFXJusu1rSt4E1QD/wZxExADDWNgu1D8n3kWlN+8oqM7NEQR8dGxH3AfeNavtkzucPHGXdW4BbxrPNQsu0NfHwc53sPdhHQ3XFyfxqM7OSU/TO8ckg05YmAlZt3FPsUszMis7BMQ6vaU0jwYqXfLrKzMzBMQ4N1RWc3lLHyo27i12KmVnROTjGKdtBvpsI3whoZtObg2OcMm1N7Nzfy0s7u4tdiplZUTk4xinTlgbw/RxmNu05OMbpzNn11FSm3EFuZtOeg2OcUmXivAWNrHAHuZlNcw6OPGTamljz8l4O9g0UuxQzs6JxcOQh05qmfzBY/bJvBDSz6cvBkYd2d5CbmTk48nFKfTULmmY4OMxsWnNw5CnT1uQrq8xsWnNw5Km9Nc3Lew7yyt6DxS7FzKwoHBx58o2AZjbdOTjydM68BipTZX6wk5lNWw6OPFWVp1gyr8FHHGY2bTk4jkOmLc2qTbvpHxgsdilmZiedg+M4ZNqaONg3yC+3dhW7FDOzk87BcRwyrWkAP9jJzKalggaHpEskrZW0TtJNY8y/UdIaSask/UTSqUn7myStzHkdlHRlMu9rkl7ImddeyH0Yy4KmGTTXVbqfw8ympfJCbVhSCrgNeCuwCVgmaWlErMlZbAXQERHdkv4E+Czw9oh4CGhPtjMTWAc8mLPeRyLinkLVfiySaG9t8pVVZjYtFfKI4wJgXUSsj4he4G7gitwFIuKhiBh6pN5jwIIxtnMVcH/OciUh05Zmfed+dnf3FrsUM7OTqpDBMR/YmDO9KWk7kvcC94/RfjXwzVFttySntz4vqWqsjUm6XtJyScs7OzvzqXtchm4EdD+HmU03JdE5LuldQAdw66j2ucCrgQdymj8GnA2cD8wEPjrWNiPijojoiIiOlpaWCa/5vAVpJN9BbmbTTyGDYzPQmjO9IGkbQdLFwCeAyyOiZ9TstwHfj4i+oYaI2BJZPcCdZE+JnXR1VeWcNbveRxxmNu0UMjiWAYslLZJUSfaU09LcBSRlgNvJhsa2MbZxDaNOUyVHIUgScCXwzMSXPj6ZtjQrN+5mcDCKVYKZ2UlXsOCIiH7gBrKnmZ4Fvh0RqyXdLOnyZLFbgTrgO8mltcPBImkh2SOWR0Zt+i5JTwNPA83Apwu1D8eSaW1iz4E+Xtixv1glmJmddAW7HBcgIu4D7hvV9smczxcfZd0NjNGZHhFvnsAST0juSLmnt9QVtxgzs5OkJDrHJ6vTW+qoryr3g53MbFpxcJyAsjLxmta0r6wys2nFwXGCMm1p1r7SRXdvf7FLMTM7KRwcJyjTlmZgMHh6055il2JmdlI4OE5Qe2sTACt8P4eZTRMOjhM0s7aShbNq3EFuZtOGg2MCZNqaWPHSbiJ8I6CZTX0OjgnQ3ppmW1cPW/YcLHYpZmYF5+CYALk3ApqZTXUOjglw9pwGqsrL3M9hZtOCg2MCVJaX8er5jb6yysymBQfHBMm0pXl68x56+weLXYqZWUE5OCZIe2sTvf2DPLtlb7FLMTMrKAfHBPGjZM1sunBwTJC5jdXMbqhyB7mZTXkOjgkiiUxrkzvIzWzKc3BMoExbmhd3dLNj3+hHp5uZTR0OjgmUacsOeOh+DjObyhwcE+jV8xtJlcl3kJvZlFbQ4JB0iaS1ktZJummM+TdKWiNplaSfSDo1Z96ApJXJa2lO+yJJjyfb/JakykLuQz5mVKY4e069jzjMbEorWHBISgG3AZcCS4BrJC0ZtdgKoCMizgPuAT6bM+9ARLQnr8tz2v8K+HxEnAHsAt5bqH04Hpm2NCs37mZg0CPlmtnUVMgjjguAdRGxPiJ6gbuBK3IXiIiHIqI7mXwMWHC0DUoS8GayIQPw98CVE1n0icq0NrGvp59fde4rdilmZgVRyOCYD2zMmd6UtB3Je4H7c6arJS2X9JikK5O2WcDuiBh6wPextnnSHRop1/dzmNnUVBKd45LeBXQAt+Y0nxoRHcA7gC9IOj3PbV6fBM/yzs7OCaz26BY119I4o8Id5GY2ZRUyODYDrTnTC5K2ESRdDHwCuDwihm+AiIjNyft64GEgA+wA0pLKj7bNZL07IqIjIjpaWlpOfG/GSRLtrWl3kJvZlFXI4FgGLE6ugqoErgaW5i4gKQPcTjY0tuW0N0mqSj43AxcCayL7bNaHgKuSRa8F7i3gPhyXTFuata90sa+n/9gLm5lNMgULjqQf4gbgAeBZ4NsRsVrSzZKGrpK6FagDvjPqsttXAcslPUU2KD4TEWuSeR8FbpS0jmyfx1cKtQ/HK9PWRASs8lGHmU1B5cde5PhFxH3AfaPaPpnz+eIjrPfvwKuPMG892Su2Slb7gjQAKzbu5g1nNBe3GDOzCVYSneNTTWNNBae31PrKKjObkhwcBdLe2sSKl3aT7ZYxM5s6HBwFkmlLs2N/L5t2HSh2KWZmE8rBUSBDNwI+6dNVZjbFODgK5KzZ9cyoSPlGQDObchwcBVKeKuO8BY1+IqCZTTkOjgLKtDWx5uU9HOwbKHYpZmYTxsFRQO2tafoGgtUv7y12KWZmE8bBUUBDHeQet8rMphIHRwHNbqhmfnqGbwQ0synFwVFg7W1pX1llZlOKg6PAMq1pNu8+wLa9B4tdipnZhHBwFFimrQnAl+Wa2ZTh4Ciwc+Y1UJGST1eZ2ZTh4Ciw6ooUS+Y2sHKjO8jNbGpwcJwEmbYmVm3aQ//AYLFLMTM7YQ6OkyDTlqa7d4DnXtlX7FLMzE6Yg+MkyLQOdZD7dJWZTX4OjpOgdeYMZtZWuoPczKYEB8dJIIlMa9pDj5jZlFDQ4JB0iaS1ktZJummM+TdKWiNplaSfSDo1aW+X9HNJq5N5b89Z52uSXpC0Mnm1F3IfJkqmLc26bfvYc6Cv2KWYmZ2QggWHpBRwG3ApsAS4RtKSUYutADoi4jzgHuCzSXs38J6IOAe4BPiCpHTOeh+JiPbktbJQ+zCRhm4EfMpHHWY2yRXyiOMCYF1ErI+IXuBu4IrcBSLioYjoTiYfAxYk7c9FxPPJ55eBbUBLAWstuPMWNCLhfg4zm/TGFRySaiWVJZ/PlHS5pIpjrDYf2JgzvSlpO5L3AveP8d0XAJXAr3Kab0lOYX1eUtURar5e0nJJyzs7O49RauHVV1dw5in1vrLKzCa98R5x/AyoljQfeBB4N/C1iSpC0ruADuDWUe1zgX8A/iAihu6e+xhwNnA+MBP46FjbjIg7IqIjIjpaWkrjYKU96SCPiGKXYmZ23MYbHEpOKf0e8HcR8fvAOcdYZzPQmjO9IGkbuWHpYuATwOUR0ZPT3gD8EPhERDw21B4RWyKrB7iT7CmxSSHTlmZ3dx8bdnQfe2EzsxI17uCQ9HrgnWT/MgdIHWOdZcBiSYskVQJXA0tHbTQD3E42NLbltFcC3we+HhH3jFpn7lBBwJXAM+Pch6IbHinXD3Yys0lsvMHxQbKniL4fEaslnQY8dLQVIqIfuAF4AHgW+Hay7s2SLk8WuxWoA76TXFo7FCxvAy4Crhvjstu7JD0NPA00A58e5z4U3Rmn1FFXVe4OcjOb1JTv+fakk7wuIvYWpqSJ19HREcuXLy92GQC888uPsedAHz94/xuLXYqZ2VFJeiIiOka3j/eqqn+U1CCpluypoTWSPjLRRU4HmdYmnt3SxYHegWKXYmZ2XMZ7qmpJcoRxJdlLZheRvbLK8tTemmZgMHh6855il2JmdlzGGxwVyX0bVwJLI6IP8DWlx6G9LQ3gBzuZ2aQ13uC4HdgA1AI/S8aUmjR9HKWkua6Ktpk17iA3s0lrXMEREV+MiPkRcVlyD8WLwJsKXNuUlWlLOzjMbNIab+d4o6TPDQ3hIelvyB592HHItKbZuvcgW/YcKHYpZmZ5G++pqq8CXWTvr3gb2dNUdxaqqKmuffhGwN3FLcTM7DiMNzhOj4hPJSPdro+I/wGcVsjCprIlcxuoLC/zg53MbFIab3AckPTrQxOSLgR8nuU4VZaXce68Bg89YmaTUvk4l3sf8HVJjcn0LuDawpQ0PWTamvjGYy/SNzBIRcpP8DWzyWO8V1U9FRGvAc4DzouIDPDmglY2xWXa0vT0D/LLLV3FLsXMLC95/VM3IvbmjFF1YwHqmTaGR8r1jYBmNsmcyDkSTVgV09C8xmpa6qt8ZZWZTTonEhwecuQESCKTPBHQzGwyOWrnuKQuxg4IATMKUtE0kmlr4sE1r7Brfy9NtZXFLsfMbFyOesQREfUR0TDGqz4ixntFlh1BZnjAw91FrcPMLB++DrSIzlvQSJn8KFkzm1wcHEVUU1nO2XMaWOEjDjObRBwcRdbelmblS7sZHPS1BmY2OTg4iizTmqarp5/12/cVuxQzs3EpaHBIukTSWknrJN00xvwbJa2RtErST5IHRA3Nu1bS88nr2pz2/yDp6WSbX5Q0qe8nGboR8Enfz2Fmk0TBgkNSCrgNuBRYAlwjacmoxVYAHRFxHnAP8Nlk3ZnAp4DXAhcAn5LUlKzzJeCPgMXJ65JC7cPJcFpzLQ3V5b4R0MwmjUIecVwArEuGYe8F7gauyF0gIh6KiO5k8jFgQfL5t4EfR8TOiNgF/Bi4RNJcoCEiHouIAL5O9jnok1ZZmWhva/KVVWY2aRQyOOYDG3OmNyVtR/Je4P5jrDs/+XzMbUq6fuiJhZ2dnXmWfnK1t6Z57pUu9vX0F7sUM7NjKonOcUnvAjqAWydqmxFxR0R0RERHS0vLRG22IDJtaQYDVm3aXexSzMyOqZDBsRlozZlekLSNIOli4BPA5RHRc4x1N3PodNYRtznZtC9IA76D3Mwmh0IGxzJgsaRFkiqBq4GluQtIygC3kw2NbTmzHgB+S1JT0in+W8ADEbEF2CvpdcnVVO8B7i3gPpwUTbWVnNZc6w5yM5sUCjbeVET0S7qBbAikgK9GxGpJNwPLI2Ip2VNTdcB3kqtqX4qIyyNip6T/STZ8AG6OiJ3J5z8FvkZ2kMX7OdQvMqm1t6X52XPbiQgm+RXGZjbFFXSgwoi4D7hvVNsncz5ffJR1vwp8dYz25cC5E1hmSci0NfG9JzezadcBWmfWFLscM7MjKonOccveQQ543CozK3kOjhJx1px6qivKWOl+DjMrcQ6OElGRKuO8+Wk/g9zMSp6Do4Rk2tKs3ryXnv6BYpdiZnZEDo4SkmlL0zswyJqX9xa7FDOzI3JwlJChkXJ9P4eZlTIHRwmZ3VDN3MZqX1llZiXNwVFiMm1pVrqD3MxKmIOjxGRam9i48wCdXT3HXtjMrAgcHCUm05YGPOChmZUuB0eJOXd+I+Vl8oOdzKxkOThKTHVFilfNbfCVVWZWshwcJSjTlmbVpt0MDEaxSzEzO4yDowRl2tLs7x3g+W1dxS7FzOwwDo4SlGn1jYBmVrocHCXo1Fk1NNVUuIPczEqSg6MESSLT1uQjDjMrSQ6OEtXemmZd5z72HuwrdilmZiM4OEpUpi1NBKzauKfYpZiZjVDQ4JB0iaS1ktZJummM+RdJelJSv6SrctrfJGllzuugpCuTeV+T9ELOvPZC7kOxvKY1jYT7Ocys5JQXasOSUsBtwFuBTcAySUsjYk3OYi8B1wEfzl03Ih4C2pPtzATWAQ/mLPKRiLinULWXgobqCs5oqfNIuWZWcgp5xHEBsC4i1kdEL3A3cEXuAhGxISJWAYNH2c5VwP0R0V24UktTpi3Nipd2EeEbAc2sdBQyOOYDG3OmNyVt+boa+OaotlskrZL0eUlVY60k6XpJyyUt7+zsPI6vLb5MWxO7uvt4cce0y0wzK2El3TkuaS7wauCBnOaPAWcD5wMzgY+OtW5E3BERHRHR0dLSUvBaC6G9NQ3ACj+fw8xKSCGDYzPQmjO9IGnLx9uA70fE8DWpEbElsnqAO8meEpuSzpxdT01lipW+n8PMSkghg2MZsFjSIkmVZE85Lc1zG9cw6jRVchSCJAFXAs+ceKmlKVUmXrMg7Q5yMyspBQuOiOgHbiB7mulZ4NsRsVrSzZIuB5B0vqRNwO8Dt0taPbS+pIVkj1geGbXpuyQ9DTwNNAOfLtQ+lIJMW5o1L+/lYN9AsUsxMwMKeDkuQETcB9w3qu2TOZ+XkT2FNda6GxijMz0i3jyxVZa2TFsT/YPBM5v30LFwZrHLMTMr7c5xO9RBvmyDO8jNrDQ4OEpcS30V58xr4K8fXMv/uv9ZDvT6lJWZFZeDYxK46w9fy1W/toDbH1nPWz//CA+v3VbsksxsGnNwTALpmkr+6qrz+Nb1r6OyvIzr7lzG+7+5gs6unmKXZmbTkINjEnntabO4/wNv5IMXL+aBZ7bylr95mG/+4iUG/WxyMzuJHByTTFV5ig9efCb3f/CNvGpuAx/73tO8/Y6f8/wrfj65mZ0cDo5J6vSWOu6+/nV89qrzeH7bPi774qP8zYNrfb+HmRWcg2MSk8TbOlr5yY2/we+eN4///dN1XPq3j/Lv67YXuzQzm8IcHFPArLoqPvf2dr7x3tcyGME7vvw4N357JTv39xa7NDObghwcU8ivL27mgQ9exJ+96XSWrnyZt/zNw9zzxCY/z8PMJpSDY4qprkjxkd8+mx/++Rs5raWOD3/nKd7x/x5nfee+YpdmZlOEg2OKOmtOPd/549dzy386l2de3sMlf/soX/zJ8/T2H+1hi2Zmx+bgmMLKysQ7X3sqP7nxN3jrktl87sfPcdkXH2XZhp3FLs3MJjEHxzRwSkM1t73j17jzuvM50DvA7//fn3PTd1exp7vv2CubmY3i4JhG3nT2Kfz4xou4/qLT+M4Tm3jL5x7m3pWb3XluZnlxcEwzNZXlfPyyV7H0hguZl57BB+5eybV3LmPjzu5il2Zmk4SDY5o6Z14j3//TC/nU7y7hiQ07eevnH+FLD/+KvgF3npvZ0Tk4prFUmfiDCxfxLx/6DS5a3MJf/eiX/O7//ldWvOSHRpnZkTk4jLmNM7jjPR3c/u7/wO7uPn7vS//OJ+99hr0H3XluZodzcNiw3z5nDj++8SKuff1C/uGxF3nr5x7h/qe3uPPczEYoaHBIukTSWknrJN00xvyLJD0pqV/SVaPmDUhambyW5rQvkvR4ss1vSaos5D5MN/XVFfzl5efwT396ITNrq/iTu57kj76+nM27DxS7NDMrEQULDkkp4DbgUmAJcI2kJaMWewm4DvjHMTZxICLak9flOe1/BXw+Is4AdgHvnfDijde0pvnnGy7k45edzb+t28FbP/cIX350PXsO+PSV2XRXXsBtXwCsi4j1AJLuBq4A1gwtEBEbknnjupRHkoA3A+9Imv4e+EvgSxNVtB1Snirj+otO59Jz5/Lf732GT//wWT79w2eZ21jNmbPrOWtOffZ9dj1nnFLHjMpUsUs2s5OgkMExH9iYM70JeG0e61dLWg70A5+JiH8CZgG7I6I/Z5vzx1pZ0vXA9QBtbW35VW4jtM6s4c7rzufn63ewatMe1m7tYu3WLn6+fsfw2FcSLJxVy5mz6zhrdj1nzskGysLmWipS7kozm0oKGRwn6tSI2CzpNOCnkp4G9ox35Yi4A7gDoKOjw727J0gSbzi9mTec3jzc1j8wyIs7u3luaxdrX+niuVeygfLjNa8w9Bj0ipQ4vaVu+AjlrOR9fnoGZWUq0t6Y2YkoZHBsBlpzphckbeMSEZuT9/WSHgYywHeBtKTy5Kgjr23axCpPlXF6Sx2nt9Rx6avnDrcf7BvgV537kiDJvj/x4i6WPvXy8DI1lSkWz67nrNkjQ6WlvorsGUkzK1WFDI5lwGJJi8j+5X41h/omjkpSE9AdET2SmoELgc9GREh6CLgKuBu4Fri3INXbcauuSHHOvEbOmdc4or3rYB/Pb9s3fISydmsXP/3lNr69fNPwMk01FSP7T+bUc+Yp9TTWVJzs3TCzI1Ahr9GXdBnwBSAFfDUibpF0M7A8IpZKOh/4PtAEHAS2RsQ5kt4A3A4Mkr3y6wsR8ZVkm6eRDY2ZwArgXRHRc7Q6Ojo6Yvny5QXZRztx2/f18NwrXUmg7Bv+3NXTP7zMnIbqpN8ke4RyWkstp86qZVZtpY9QzApE0hMR0XFY+3S4ucvBMflEBFv2HBw+Mhk6Snl+274RD6OqrypnYXNt9jWrhoWzDn2e6VAxOyFHCo5S7hy3aUwS89IzmJeewZvOOmW4fWAweGlnNxu27+eF7ft5ccd+XtjRzVMbd/PDVS8Pd8oD1FeXs6g5e2SyaFYNC4c+N9fSVFPhUDE7Tg4Om1RSZWJRc/Yv/zeNmtfbP8imXd1s2LGfF7Z3Z0Nl+35Wbtx1WKg0VCdHKskRyqLmmiRgammq9WAEZkfj4LApo7K8jNNa6jitpe6web39g2zclXukkg2YJ1/axQ9GhUrjjIrsaa/hYMmeAlvUXEu6xqFi5uCwaaGy/NClw6P19A+wcecBNmzfz4YdyWt79/AlxLndgOmaiuFTX22zapnXWM3sxmrmNlYzt2EGDTPKfQrMpjwHh017VeUpzjiljjNOOVKodI849bVhx36WbdjFvaNCBWBGRYo5jdXMaciGyZyhV0M1cxtnMKexmlm1lb750SY1B4fZUWRDpZ4zTqk/bF7fwCDbunrYuucAW/YcZGvy2rI3+/74Czt5Ze9B+gdHpktFSpxSnw2W2Y3VzG3Ihks2WKqY0ziDU+qrPFSLlSwHh9lxqkiVMT89g/npGUdcZnAw2L6/ZzhUtu49yJY9B3llT/Z9zct7+cmzr3Cwb+Q4nxK01FWNOnpJgqVhxvDRTHWFB5a0k8/BYVZAZWXZo4tT6qs5b8HYy0QEew/0s2XvyCOXoaOXDTv289j6Hew92H/YuumaCprrqqitTFFbVU5NZTm1Vanse9I2PF2VorayPFkuNfxel6xXWe4jHBsfB4dZkUmisaaCxpoKzp7TcMTl9vf0s3XvwVFHLwfYub+X/T0DdPf28/LuA+zv7R+e7u4dGHcdFSmNCJyaquznmspy6qpSI6Zrq5JQqjwUQo0zKjilvoqZtZWU+zTblObgMJskaqvKj3hl2JEMDgbdfQN09/Szv3eA/T3ZMNnf08/+3n66eway770D7OvpH16uOwmf/T397Oo+MDw9njCSYGZNJS31VbTUV9Fcl31vqauiub6SlrrqpL2SphpfKDAZOTjMprCyMlFXVU5d1cT9UR8YDA6MCqN9Pf3s7u6jc18P27t66NzXQ2dXD9v39fDC9v10dvXQ03/489pSZaK5rnJUuGTfRwdPQ7UvdS4VDg4zy0vqOMIoItjX009n11Cg9NLZdTAJmt7hoPnlli627+s57Eo0yN6LMzJYKocDpjnnvb66nBmVKarLUz6aKRAHh5kVnCTqqyuor64Y887+XIODwZ4Dhx+9DL939bB59wFWbtzNjv09h91Lk6uqvGw4RGZUpqiuSDGjIts2oyI7nW3LnX9omdHzD61TdmidaRhQDg4zKyllZaKptpKm2krOnH34/TO5BgaDnft7h4Nle1cP+3v7OdA7wIG+7Otg7wAH+wYPTfcNcKB3gN3dfRzsy5mXrHM8hgIqN4yqysuoLC+jKnlVlpdRmSqjqjyV/ZzTPrqtamjZijIqU6kRy45YL5VKlik7qeHl4DCzSStVpuG+kIkQEfT0D3Kgd4CD/YfCJBs2g4c+5wTQUCD19A2OnO4fpLc/2we0c/8gvf2DSdsgvQOD9PQN0DswSN/AxDzaorxMo8IlGzhffk8HC5trJ+Q7hr9rQrdmZjaJSRo+YjhZBgcjGyT9g/T0D2SD5bCQGaR3YCB5H1p2MGfZgZHrDLUNDDKjcuL3xcFhZlZEZWWiumworCbHI5J9l46ZmeXFwWFmZnkpaHBIukTSWknrJN00xvyLJD0pqV/SVTnt7ZJ+Lmm1pFWS3p4z72uSXpC0Mnm1F3IfzMxspIL1cUhKAbcBbwU2AcskLY2INTmLvQRcB3x41OrdwHsi4nlJ84AnJD0QEbuT+R+JiHsKVbuZmR1ZITvHLwDWRcR6AEl3A1cAw8ERERuSeSPGIoiI53I+vyxpG9AC7C5gvWZmNg6FPFU1H9iYM70pacuLpAuASuBXOc23JKewPi9pYi7gNjOzcSnpznFJc4F/AP4gIoaOSj4GnA2cD8wEPnqEda+XtFzS8s7OzpNSr5nZdFDI4NgMtOZML0jaxkVSA/BD4BMR8dhQe0Rsiawe4E6yp8QOExF3RERHRHS0tLQc1w6YmdnhCtnHsQxYLGkR2cC4GnjHeFaUVAl8H/j66E5wSXMjYouy4ytfCTxzrO098cQT2yW9mGf9Q5qB7ce57lTk3+MQ/xYj+fcYaSr8HqeO1ag42tCSJ0jSZcAXgBTw1Yi4RdLNwPKIWCrpfLIB0QQcBLZGxDmS3kX2aGJ1zuaui4iVkn5KtqNcwErgfRGxr4D7sDwiOgq1/cnGv8ch/i1G8u8x0lT+PQoaHFPBVP6Pfzz8exzi32Ik/x4jTeXfo6Q7x83MrPQ4OI7tjmIXUGL8exzi32Ik/x4jTdnfw6eqzMwsLz7iMDOzvDg4zMwsLw6OozjW6L7ThaRWSQ9JWpOMWPyBYtdUCiSlJK2Q9INi11JsktKS7pH0S0nPSnp9sWsqFkn/Nflz8oykb0qqLnZNE83BcQQ5o/teCiwBrpG0pLhVFU0/8KGIWAK8Dvizafxb5PoA8GyxiygRfwv8KCLOBl7DNP1dJM0H/hzoiIhzyd7DdnVxq5p4Do4jGx7dNyJ6gaHRfaedZJiXJ5PPXWT/Ush7wMqpRNIC4D8CXy52LcUmqRG4CPgKQET05jwCYToqB2ZIKgdqgJeLXM+Ec3Ac2YSM7jvVSFoIZIDHi1xKsX0B+Atg8BjLTQeLgE7gzuTU3Zcl1Ra7qGKIiM3AX5N91tAWYE9EPFjcqiaeg8PGTVId8F3ggxGxt9j1FIuk3wG2RcQTxa6lRJQDvwZ8KSIywH5gWvYJSmoie2ZiETAPqE2GUJpSHBxHdkKj+041kirIhsZdEfG9YtdTZBcCl0vaQPYU5pslfaO4JRXVJmBTRAwdhd5DNkimo4uBFyKiMyL6gO8BbyhyTRPOwXFkw6P7JqP1Xg0sLXJNRZGMRPwV4NmI+Fyx6ym2iPhYRCyIiIVk/7/4aURMuX9VjldEbAU2SjoraXoLOU/6nGZeAl4nqSb5c/MWpuCFAoUcVn1Si4h+STcAD3BodN/Vx1htqroQeDfwtKSVSdvHI+K+4pVkJeb9wF3JP7LWA39Q5HqKIiIel3QP8CTZqxFXMAWHHvGQI2ZmlhefqjIzs7w4OMzMLC8ODjMzy4uDw8zM8uLgMDOzvDg4zI5B0r7kfaGkd0zwtj8+avrfJ3L7ZoXg4DAbv4VAXsGRDHR3NCOCIyKm3F3GNvU4OMzG7zPAGyWtTJ65kJJ0q6RlklZJ+mMASb8p6VFJS0nuoJb0T5KeSJ7TcH3S9hmyo6iulHRX0jZ0dKNk289IelrS23O2/XDOsy/uSu5QRtJnkmemrJL01yf917Fpw3eOm43fTcCHI+J3AJIA2BMR50uqAv5N0tBIqL8GnBsRLyTT/yUidkqaASyT9N2IuEnSDRHRPsZ3/R7QTvbZFs3JOj9L5mWAc8gO1/1vwIWSngX+E3B2RISk9MTuutkhPuIwO36/BbwnGYblcWAWsDiZ94uc0AD4c0lPAY+RHTxzMUf368A3I2IgIl4BHgHOz9n2pogYBFaSPYW2BzgIfEXS7wHdJ7hvZkfk4DA7fgLeHxHtyWtRzrMX9g8vJP0m2VFTXx8RryE7ftGJPE60J+fzAFAeEf1kHz52D/A7wI9OYPtmR+XgMBu/LqA+Z/oB4E+SIeeRdOYRHmDUCOyKiG5JZ5N9/O6QvqH1R3kUeHvSj9JC9gl7vzhSYcmzUhqTgSf/K9lTXGYF4T4Os/FbBQwkp5y+RvY52wuBJ5MO6k7gyjHW+xHwvqQfYi3Z01VD7gBWSXoyIt6Z0/594PXAU0AAfxERW5PgGUs9cK+karJHQjce1x6ajYNHxzUzs7z4VJWZmeXFwWFmZnlxcJiZWV4cHGZmlhcHh5mZ5cXBYWZmeXFwmJlZXv4//lzH8IMQHB8AAAAASUVORK5CYII=\n", "text/plain": [ "<Figure size 432x288 with 1 Axes>" ] @@ -483,7 +485,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAo3UlEQVR4nO3deXxddZ3/8dcnW5OmbbokXUiXtHRnLYRSBCq0LBWQVaEgiz8XRkeY0XEZdNRxYFTGUdFRRgcRF0CQRZjKIFvCJhZoS8vSJt2XJKVJuiZps+fz++Oe0tv2Jr1tc3PuTd7Px+M+mrPdfO5Fzzvn+z3n+zV3R0RE5EBpYRcgIiLJSQEhIiIxKSBERCQmBYSIiMSkgBARkZgUECIiEpMCQkREYlJASMozs4aoV4eZNUYtf+II3u8lM/tMImoVSSUZYRcgcrTcfcDen81sA/AZd38hvIoSy8wy3L0t7Dqk99MVhPRaZpZmZreZ2Voz22Zmj5jZ0GBbtpk9EKzfaWaLzGyEmX0XOBv4eXAF8vNO3vtRM9tiZrvM7BUzOy5qW46Z/cjMNgbb/2pmOcG2s8zsb8HvrDCzTwbr97tqMbNPmtlfo5bdzL5gZquB1cG6nwbvUWdmS8zs7Kj9083sG8Fnrw+2jzGzu83sRwd8lgVm9qWj/8alt1FASG92K3A58GHgGGAHcHew7SYgDxgDDAM+BzS6+78ArwK3uPsAd7+lk/f+CzAJGA68BTwYte2HwKnAh4ChwNeADjMbFxz3M6AAOBlYdhif53LgdGB6sLwoeI+hwB+AR80sO9j2T8C1wEXAIOBTwB7gd8C1ZpYGYGb5wHnB8SL7UROT9GafI3KirwQws+8Am8zsBqCVSDBMdPd3gCWH88buft/en4P33WFmeUA9kZPxLHevCnb5W7DfdcAL7v5QsH5b8IrX9919e1QND0Rt+5GZfROYArwNfAb4mruvDLa/vfd3mtkuYC7wPDAfeMndqw+jDukjdAUhvdk44ImgOWcnUAa0AyOA+4FngYfNbLOZ/cDMMuN506D55s6g+aYO2BBsyg9e2cDaGIeO6WR9vCoOqOMrZlYWNGPtJHJFlB/H7/odcH3w8/VEvguRgyggpDerAD7i7oOjXtnuXuXure7+b+4+nUhT0CXAjcFxhxri+DrgMiJNM3lAUbDegK1AE3BsJ/XEWg+wG+gftTwyxj4f1BX0N3wNuBoY4u6DgV1BDYf6XQ8Al5nZScA04MlO9pM+TgEhvdkvge8Gbf+YWYGZXRb8fK6ZnWBm6UAdkSanjuC4amBCF+87EGgm0jzUH/je3g3u3gHcB/zYzI4JrjbOMLN+RPopzjOzq80sw8yGmdnJwaHLgCvNrL+ZTQQ+fYjPNhBoA2qBDDP7NpG+hr3uBe4ws0kWcaKZDQtqrCTSf3E/8Li7Nx7id0kfpYCQ3uynwALgOTOrB14n0skLkb/QHyMSDmXAy+xravkp8DEz22Fm/xXjfX8PbASqgBXB+0b7CvAukZPwduA/gDR330Sk0/jLwfplwEnBMXcBLUTC6Xfs3+kdy7PAM8CqoJYm9m+C+jHwCPBc8Bl/DeREbf8dcAJqXpIumCYMEul7zGw2kaamca6TgHRCVxAifUzQGf+PwL0KB+mKAkKkDzGzacBOYBTwk1CLkaSnJiYREYlJVxAiIhJTr3mSOj8/34uKisIuQ0QkpSxZsmSruxfE2tZrAqKoqIjFixeHXYaISEoxs42dbVMTk4iIxKSAEBGRmBQQIiISU0IDwszmmdlKM1tjZrfF2D7WzF40s6Vm9o6ZXRS17evBcSvN7MJE1ikiIgdLWCd1MAja3cD5QCWwyMwWuPuKqN2+CTzi7r8ws+nA00BR8PN84DgiE728YGaT3b09UfWKiMj+EnkFMRNY4+7r3L0FeJjIEMnRnH0jUOYBm4OfLwMedvdmd18PrAneT0REekgiA6KQ/UeXrAzWRfsOcL2ZVRK5erj1MI4VEZEECvs5iGuB37r7j8zsDOB+Mzs+3oPN7GbgZoCxY8cmqEQRkeTS1t5B5Y5G1m/dzbqtu8nJTOe607v/HJjIgKgiMu3hXqODddE+DcwDcPeFwYTr+XEei7vfA9wDUFxcrEGlRKTXcHdqG5pZX7v7gyBYV7ub9Vsb2LR9D63t+055p4wdnHIBsQiYZGbjiZzc5xOZqjHaJiKTp/82GGUym8gMWQuAP5jZj4l0Uk8C3kxgrSIioWhobmPDBwHQwPqtkUBYX7ub+ua2D/bLykijaFh/Jg4fwAXHjWR8fi4T8nOZUDCAIf3jmk79sCUsINy9zcxuITLzVTpwn7svN7PbgcXuvoDIzFq/MrMvEemw/mQwPv1yM3uEyGxdbcAXdAeTiKSq1vYOKrbviVwJ1EbCYP3WSBhU1zV/sJ8ZHJOXw4SCXK48pZDx+bmMLxjAhPxcjhmcQ3qadfFbul+vGe67uLjYNRaTiITF3ampbw4CoGG/pqFN2/fQ3rHvXDs0Nyty8g9exxbkMj5/AOOG9Sc7M71H6zazJe5eHGtb2J3UIiIpoaWtg227m6mt3/favKspuCqIXA3sadnX0JGdmUbRsFymjRrIxSeMCq4GIs1Cg/tnhfhJ4qeAEJE+q73D2bGnZb+Tfm1DM1uDf6PX7dzTetDxaQajh/RnfH4uM8cPZUJ+5EpgfEEuowZlk9bDTULdTQEhIr2Ku1PX1Lbfyb22vpmtDc0HBcG2hmY6YrSy52SmM3xQP/IH9OPYggHMmjCMgoH9Iq8B/ciP+jkro/cOaaeAEJGU0tDcxmtrtlJT10RtQ0vMv/xb2joOOi4z3cgfEDmxj8rL5sTReR+c9PeuLwj+ze2nUyMoIEQkRayqruf+hRv501uV7A7a+s1gWG7WByf4Ywty9zvRf/DvwH7k5WRiltpNPj1NASEiSau1vYPnlldz/+sbeH3ddrLS07jkxFFcfdoYJuTnMjQ3i4z03tvEEzYFhIgknZq6Jv7w5iYeenMT1XXNFA7O4Z/nTeXq4tEMG9Av7PL6DAWEiCQFd+eN9du5f+FGnl2+hbYO58OTC/ju5eM4d+rwHn9ITBQQIhKyhuY2nnirkvtf38iq6gYGZWfwyQ8Vcf2scRTl54ZdXp+mgBCRUKyuruf+1zfyp7eqaGhu4/jCQfzgqhP56EnHkJPVs08TS2wKCBHpMa3tHTy/oprfL9y/0/n6M8YxY8xg3WWUZBQQIpJwNXVNPPRmBX94c+MHnc5fmzeFa4rHqNM5iSkgRCQh3J0312/n969v5Nn3Ip3Os9XpnFIUECLSrRqa23hiaRUPLNzIyup6BmVncFPQ6Txenc4pRQEhIt1iTU3kSefHg07n444ZxH9cdQKXnlSoTucUpYAQSXItbR0sXLeN5tZ2BuVkMig7k0E5GeTlZJKblRHqiKFtH3Q6b2Thum1kpadx8YmjuH7WOE4Zq07nVKeAEElCHR3Okk07eGJpFU+/+37MoaYhMtz0wKjAGJQdeeXlRNZFwqST5exMsjPTjugkvrfT+aE3N7GlronCwTl89cIpXHPaGPLV6dxrKCBEksiamnqeXLqZJ5dVUbmjkZzMdC48bgSXnVxIwcB+1DW1UtfYSl1jG3VNrexqDJab2qhrjCyv29pAXWMbuxpbaWzteqbezHT7IFAG5mQyKDtjvwDZGyqRgMmko8N5/K1Kngk6nc+elM8dlx/PHHU690oKCJGQ1dQ1seDtSCi8V1VHmsHZkwr4ygVTOH/6iKMaerqlrYP6vUESFSKRoDk4ZHY1tlK1o/GD9a3tB0+WsLfT+ROnj2VCwYCj+eiS5BQQIiFoaG7jueVbeGJpFa+t2UqHw4mj8/j2JdO55KRRDB+Y3S2/JysjjWED+h3RswbuTlNrx34h0tjazqnjhtA/S6eOvkD/lUV6SGt7B39dvZUnllbx3IotNLV2MGZoDrecO5HLZhRybJL9NW5m5GSlk5OVzohB3RNYkloUECIJ5O4sq9jJk0ureOqd99m2u4XB/TP52KmjuWJGIaeMHaI7fSRpKSBEEmDD1t08uayKJ5dWsWHbHvplpHHe9BFccXIhsycX9Op5jKX3UECIdJNtDc089c77PLG0imUVOzGDMyYM4+/Pnci840cyKDsz7BJFDosCQuQoNLa083xZNU8ureLlVbW0dzjTRg3iGxdN5dKTChmZp7Z7SV0KCJHD1N7h/G1tpLP52fe2sLulnVF52Xz27AlcPuMYpo4cFHaJIt1CASESB3dn+eY6nlhaxZ/f3kxNfTMDszP46EnHcPmMQmYWDQ11yAuRRFBAiHTh/V2N/OmtKp5YWsWamgYy041zpwznihmFnDt1ONmZGoROei8FhEgMDc1t/PeLa7j3r+tpaetgZtFQvnfFCVx0wkgG988KuzyRHqGAEInS3uE8tqSC/3x2FVsbmrlyRiFfPG8yY4f1D7s0kR6ngBAJLFy7jTueWsGK9+s4ddwQ7r2pmJPHDA67LJHQKCCkz9uwdTffe7qM51ZUUzg4h59dO4NLThylJ5ylz1NASJ9V19TKz0vX8JvX1pOZnsZXL5zCp88ar45nkYACQvqctvYOHl5UwY+fX8WOPS18/NTRfOWCKQzXgHQi+1FASJ/y6upa7nhqBauqGzh9/FC+dcl0ji/MC7sskaSU0IAws3nAT4F04F53v/OA7XcB5waL/YHh7j442NYOvBts2+TulyayVund1tQ08L2nyygtr2Hs0P788vpTuPC4kepnEOlCwgLCzNKBu4HzgUpgkZktcPcVe/dx9y9F7X8rMCPqLRrd/eRE1Sd9w849LfzkhdU88PpGsjPT+fpHpvLJM4vol6F+BpFDSeQVxExgjbuvAzCzh4HLgBWd7H8t8K8JrEf6kNb2Dh58fSN3vbCa+qZW5s8cyz+dP5n8I5hZTaSvSmRAFAIVUcuVwOmxdjSzccB4oDRqdbaZLQbagDvd/ckYx90M3AwwduzY7qlaUpq78+LKGr77f2Wsrd3NWRPz+eYl0zSAnsgRSJZO6vnAY+7eHrVunLtXmdkEoNTM3nX3tdEHufs9wD0AxcXFB8+uLn3Kqup67nhqBa+u3sqE/Fx+fVMxc6YOVz+DyBFKZEBUAWOilkcH62KZD3wheoW7VwX/rjOzl4j0T6w9+FDp67Y1NHPXC6v4wxubGNAvg29dMp0bZo3TrG0iRymRAbEImGRm44kEw3zgugN3MrOpwBBgYdS6IcAed282s3zgTOAHCaxVUlBLWwe/X7iBn5asZk9LOzfMGscXz5vMkFwNpifSHRIWEO7eZma3AM8Suc31Pndfbma3A4vdfUGw63zgYXePbiKaBvyPmXUAaUT6IDrr3JY+xt15bkU133+6jA3b9nDOlAK+efE0Jg4fGHZpIr2K7X9eTl3FxcW+ePHisMuQBFuxuY47nlrBwnXbmDh8AN+8eBrnTBkedlkiKcvMlrh7caxtydJJLdKl2vpmfvTcSv64uILBOZncftlxXDdzLBnp6mcQSRQFhCS1ptZ27nttPf/94lqaWtv59JnjuXXOJPL6Z4Zdmkivp4CQpOTuPP3uFr7/lzIqdzRy3rQRfOOiqUwoGBB2aSJ9hgJCksr23S08ubSKRxZXUL6lnqkjB/LgZ07nzIn5YZcm0ucoICR07R3Oq6treXRxJc+vqKalvYMTR+fxg6tO5KpTR5OepgfdRMKggJDQbNq2h0eXVPDYkkre39XEkP6ZfGLWWK4uHsO0URoaQyRsCgjpUU2t7Tzz3hb+uKiCheu2YQazJxXwzYunc9704RplVSSJKCAk4dydd6t28cdFFSx4ezP1TW2MGZrDl8+fzFWnjuaYwTlhlygiMSggJGEO7HDul5HGRSeM4uPFo5k1fhhp6lsQSWoKCOlWezucH1lcwfMrqmltd04ance/X348Hz3pGPJy9PyCSKpQQEi3iNXhfMOsIq4+bbTmYhBJUQoIOWKNLe08s/x9/riogtfXbSfNYPbkAr59yXTmThuh4bZFUpwCQg6Lu/NO5S4eWVzBgmWbqW9uY+zQ/nzlgkiH86g8dTiL9BYKCInL9t0tPLG0ikeDDufszDQuOn4UHy8ew+njh6rDWaQXUkBIp9o7nFdW1/JodIfzmMF894pIh/OgbHU4i/RmCgg5yMZtu3l0cSWPLalkS10TQ3OzuPGMIq4uHsOUkZqUR6SvUEDIfp55730+98BbpBl8eHIB37l0OnOmqsNZpC9SQMh+HllcSeHgHB7//IcYmZcddjkiEiL9WSgfaGxp57U1W7nguBEKBxFRQMg+f1u7lea2DuZOHRF2KSKSBDptYjKzK+M4vsndn+7GeiREJeU15GalM3P80LBLEZEk0FUfxK+A/wW6usF9NqCA6AXcndKyGmZPLlCHtIgAXQfEX9z9U10dbGYPdHM9EpLlm+vYUtfEnKnDwy5FRJJEp38quvv1hzo4nn0kNZSW12AG50xRQIhIRNxtCWY20cweMLPHzeyMRBYlPa+kvIaTRg+mYGC/sEsRkSTRaUCY2YH3Od4BfB34IvCLBNYkPay2vpm3K3Zy3jRdPYjIPl1dQfzZzG6MWm4FioBxQHsii5Ke9eLKGgDm6PZWEYnSVUDMAwaZ2TNmNhv4CnAhcAXwiZ4oTnpGSVk1o/KymTZK4yyJyD6d3sXk7u3Az83sfuBbwOeBb7r72p4qThKvua2dV1dv5YoZhZhpyG4R2aerB+VOB74KtADfAxqB75pZFXCHu+/skQolod5Yt509Le3MVf+DiBygq+cg/ge4CBgA/MbdzwTmm9mHgT8SaW6SFFdaXkN2ZhofOjY/7FJEJMl0FRBtRDqlc4lcRQDg7i8DLye2LOkJ7k5JeTVnHptPdmZ62OWISJLpqpP6OuAqYA5wYxf7SYpaU9NAxfZG5k7T3UsicrCuOqlXAV/uwVqkh5WU7729Vf0PInKwrh6Ue+pQBx9qHzObZ2YrzWyNmd0WY/tdZrYseK0ys51R224ys9XB66ZD1SKHr6SsmuOOGaS5H0Qkpq76IM4yswVdbDdgeqcbzdKBu4HzgUpgkZktcPcVe/dx9y9F7X8rMCP4eSjwr0Ax4MCS4Ngdh/5IEo8du1tYsnEHt5w7MexSRCRJdRUQl8VxfEsX22YCa9x9HYCZPRy854pO9r+WSChA5A6p5919e3Ds80Qe3HsojpokDi+vqqXDYY76H0SkE131QRztnUqFQEXUciVweqwdzWwcMB4o7eLYwhjH3QzcDDB27NijLLdvKSmvIX9AFicW5oVdiogkqWSZGWY+8Fjw9Hbc3P0edy929+KCgoIEldb7tLZ38PLKGs6dMpy0ND09LSKxJTIgqoAxUcujg3WxzGf/5qPDOVYO05KNO6hratPT0yLSpUMGhJl91MyOJEgWAZPMbLyZZREJgYM6vc1sKjAEWBi1+lngAjMbYmZDgAuCddINSstryEpP46xJuuoSkc7Fc+K/BlhtZj8ITuZxcfc24BYiJ/Yy4BF3X25mt5vZpVG7zgcednePOnY7kfknFgWv2/d2WMvRe6GsmtMnDGVAv67uURCRvu6QZwh3v97MBhG5y+i3ZubAb4CH3L3+EMc+DTx9wLpvH7D8nU6OvQ+471D1yeFZv3U362p3c+OscWGXIiJJLq6mI3evAx4DHgZGEZkT4q3g2QVJIaXlmhxIROITTx/EpWb2BPASkAnMdPePACehoThSTml5NZOGD2DssP5hlyIiSS6eRuirgLvc/ZXole6+x8w+nZiyJBHqm1p5Y912Pn32+LBLEZEUEE9AfAd4f++CmeUAI9x9g7uXJKow6X6vrt5KW4czV81LIhKHePogHgU6opbbg3WSYkrKahjcP5NTxg4OuxQRSQHxBESGu0dPGNQCZCWuJEmE9g7nxZU1nDO5gIz0ZHmAXkSSWTxnitro5xbM7DJga+JKkkRYVrGT7btbNDifiMQtnj6IzwEPmtnPiQzxXYFmmEs5peXVpKcZH9bT0yISp3gelFsLzDKzAcFyQ8Krkm5XUlZD8bgh5PXPDLsUEUkRcY21YGYXA8cB2WaR0T/d/fYE1iXdqGpnI+Vb6vnGRXGPlCIiEteDcr8kMh7TrUSamD4OaJyGFKKnp0XkSMTTSf0hd78R2OHu/wacAUxObFnSnUrKqika1p9jC3LDLkVEUkg8AdEU/LvHzI4BWomMxyQpYE9LG39bu405U0ewt3lQRCQe8fRB/NnMBgP/CbwFOPCrRBYl3ee1NdtoaevQ5EAicti6DIhgoqASd98JPG5mTwHZ7r6rJ4qTo1daXs2AfhmcVjQ07FJEJMV02cTk7h3A3VHLzQqH1OHulJTVMHtyPlkZenpaRA5PPGeNEjO7ytSAnXKWb66jpr5Zdy+JyBGJJyD+jsjgfM1mVmdm9WZWl+C6pBuUlNVgBudM0dPTInL44nmSemBPFCLdr6S8mhljBpM/oF/YpYhICjpkQJjZ7FjrD5xASJJLTV0T71Tu4qsXTgm7FBFJUfHc5vrVqJ+zgZnAEmBOQiqSbvHiyr1PT+v2VhE5MvE0MX00etnMxgA/SVRB0j1Kymo4Ji+bqSPVQigiR+ZI7n2sBKZ1dyHSfZpa2/nrmq3MmTZcT0+LyBGLpw/iZ0SenoZIoJxM5IlqSVJvrN/OnpZ2zT0tIkclnj6IxVE/twEPuftrCapHukFpWTXZmWmcceywsEsRkRQWT0A8BjS5ezuAmaWbWX9335PY0uRIuDsvlNVw1sQCsjPTwy5HRFJYXE9SAzlRyznAC4kpR47WquoGqnY2anA+ETlq8QREdvQ0o8HP/RNXkhyNkvJqAM6dooAQkaMTT0DsNrNT9i6Y2alAY+JKkqNRWlbD8YWDGJmXHXYpIpLi4umD+CLwqJltJjLl6EgiU5BKktm+u4W3Nu3gljmTwi5FRHqBeB6UW2RmU4G9YzasdPfWxJYlR+LlVTV0OMzV09Mi0g0O2cRkZl8Act39PXd/DxhgZn+f+NLkcL1QVkPBwH6cUJgXdiki0gvE0wfx2WBGOQDcfQfw2YRVJEektb2DV1bWMmfKcNLS9PS0iBy9eAIiPXqyIDNLB7ISV5IciUUbtlPf3MYc3d4qIt0knoB4Bvijmc01s7nAQ8G6QzKzeWa20szWmNltnexztZmtMLPlZvaHqPXtZrYseC2I5/f1ZaVlNWSlp3HWxPywSxGRXiKeu5j+GbgZ+Hyw/Dzwq0MdFFxp3A2cT2SAv0VmtsDdV0TtMwn4OnCmu+8ws+g/fxvd/eS4PoVQWl7DrGOHkdsvnv+kIiKHdsgrCHfvcPdfuvvH3P1jwArgZ3G890xgjbuvc/cW4GHgsgP2+Sxwd9CvgbvXHF75ArCutoF1W3fr7iUR6VZxDfdtZjPM7AdmtgG4HSiP47BCoCJquTJYF20yMNnMXjOz181sXtS2bDNbHKy/vJO6bg72WVxbWxvPR+mVSss1OZCIdL9O2yPMbDJwbfDaCvwRMHc/t5t//yTgHGA08IqZnRDcNTXO3avMbAJQambvuvva6IPd/R7gHoDi4mKnjyopq2HyiAGMGaoRUESk+3R1BVFOZFrRS9z9LHf/GdB+GO9dBYyJWh4drItWCSxw91Z3Xw+sIhIYuHtV8O864CVgxmH87j5jV2MrizZsZ+40zf0gIt2rq4C4EngfeNHMfhXcwXQ4N9gvAiaZ2XgzywLmAwfejfQkkasHzCyfSJPTOjMbYmb9otafSaTvQw7w6upa2jpc/Q8i0u06DQh3f9Ld5wNTgReJjMk03Mx+YWYXHOqN3b0NuAV4FigDHnH35WZ2u5ldGuz2LLDNzFYEv+Or7r6NyJSmi83s7WD9ndF3P8k+pWU1DO6fyYyxQ8IuRUR6GXOPv+nezIYAHweucfe5CavqCBQXF/vixYsPvWMv0t7hFP/785wzZTh3XXNy2OWISAoysyXuXhxrW1x3Me3l7jvc/Z5kC4e+alnFDnbsadXdSyKSEIcVEJJcSspqSE8zZk8uCLsUEemFFBAprKSshtOKhpCXkxl2KSLSCykgUlTF9j2srK5n7lTd3ioiiaGASFEvrow8PT1Xo7eKSIIoIFJUSVkN4/NzmVAwIOxSRKSXUkCkoN3NbSxcu013L4lIQikgUtBra7bS0t6hp6dFJKEUECmotLyGgf0yKC4aGnYpItKLKSBSTEeHU1Jew+zJBWRl6D+fiCSOzjAp5r3Nu6itb1b/g4gknAIixZSU1WAG5yogRCTBFBApprS8hlPGDmFoblbYpYhIL6eASCHVdU28W7VLzUsi0iMUECnkxXI9PS0iPUcBkUJeKKuhcHAOU0YMDLsUEekDFBApoqm1ndfWbGXO1OGYHc7MryIiR0YBkSIWrttGY2s7c9S8JCI9RAGRIkrLasjJTOeMCcPCLkVE+ggFRApwd0rLazhrUj7ZmelhlyMifYQCIgWsrK6namejBucTkR6lgEgBJWWR21v19LSI9CQFRAooKavmhMI8RgzKDrsUEelDFBBJbltDM0srdurpaRHpcQqIJPfSylrc9fS0iPQ8BUSSKy2vYfjAfhx/TF7YpYhIH6OASGItbR28sqqWOVOHk5amp6dFpGcpIJLY4g3bqW9uU/+DiIRCAZHESspryMpI48yJ+WGXIiJ9kAIiSbk7JWXVnDFhGLn9MsIuR0T6IAVEklq3dTcbtu3R3UsiEhoFRJIqDZ6eVv+DiIRFAZGkSsqrmTpyIKOH9A+7FBHpoxQQSWhXYyuLNuzQ1YOIhCqhAWFm88xspZmtMbPbOtnnajNbYWbLzewPUetvMrPVweumRNaZbF5ZVUt7h6v/QURClbDbY8wsHbgbOB+oBBaZ2QJ3XxG1zyTg68CZ7r7DzIYH64cC/woUAw4sCY7dkah6k0lpeQ1D+mdy8pghYZciIn1YIq8gZgJr3H2du7cADwOXHbDPZ4G795743b0mWH8h8Ly7bw+2PQ/MS2CtSaOtvYMXV9Zw7pThpOvpaREJUSIDohCoiFquDNZFmwxMNrPXzOx1M5t3GMdiZjeb2WIzW1xbW9uNpYdnacVOdu5p1dzTIhK6sDupM4BJwDnAtcCvzGxwvAe7+z3uXuzuxQUFBYmpsIeVlNWQkWbMntw7Po+IpK5EBkQVMCZqeXSwLlolsMDdW919PbCKSGDEc2yvVFpezczxQxmUnRl2KSLSxyUyIBYBk8xsvJllAfOBBQfs8ySRqwfMLJ9Ik9M64FngAjMbYmZDgAuCdb1axfY9rKpu0O2tIpIUEnYXk7u3mdktRE7s6cB97r7czG4HFrv7AvYFwQqgHfiqu28DMLM7iIQMwO3uvj1RtSaL0vJIH/3caSNCrkREBMzdw66hWxQXF/vixYvDLuOo3PDrN6ja0UjpV84JuxQR6SPMbIm7F8faFnYntQQamtt4Y912NS+JSNLQONJJ4KWVNdzx1Apa2juYd/zIsMsREQEUEKFaW9vAd/+vjNLyGoqG9efeG4spLhoadlkiIoACIhS7Glv5r5LV/O5vG8jOTOcbF03lpg8V0S8jPezSREQ+oIDoQe0dzsOLNvGj51axY08L1xSP4csXTKFgYL+wSxMROYgCoocsXLuN259aQdn7dcwsGsq3Pzqd4wvzwi5LRKRTCogE27RtD997uoxnlm+hcHAOd193ChedMBIzDcQnIslNAZEgDc1t/PeLa7j3r+tJN+PL50/ms7MnkJ2pfgYRSQ0KiG7W0eH8aWkVP3imnJr6Zq6YUcg/z5vKyLzssEsTETksCohutGTjDm7/83LertzFSWMG88sbTuWUsZr0R0RSkwKiG7y/q5E7/1LO/y7bzIhB/fjx1Sdx+cmFpGnCHxFJYQqIo9DY0s49r6zjFy+vocPhlnMn8vlzjiW3n75WEUl9OpMdAXfnqXfe5/tPl7F5VxMXnzCK2z4ylTFD+4ddmohIt1FAHKZ3K3dx+1PLWbRhB9NHDeLH15zMrAnDwi5LRKTbKSDiVFPfxA+fXcmjSyoZ2j+L7195AlcXjyFd/Qwi0kspIA6hua2d37y2gZ+XrqG5rZ3Pnj2BW+ZM1JSgItLrKSA64e48t6Ka7z1dxsZtezhv2nD+5eLpjM/PDbs0EZEeoYCIYeWWem5/ajmvrdnGpOED+P2nZjJ7ckHYZYmI9CgFRJTtu1u46/lVPPjGRgZmZ/Jvlx7HJ04fS0a6Jt4Tkb5HAQG0tndw/8KN/OSFVexuaeeGWeP44nmTGZKbFXZpIiKh6fMBUbF9D5/8zZusrd3N2ZPy+dYl05k8YmDYZYmIhK7PB8SIQdmMG5bL1z8yjbnThmsYbhGRQJ8PiKyMNO775GlhlyEiknTU+yoiIjEpIEREJCYFhIiIxKSAEBGRmBQQIiISkwJCRERiUkCIiEhMCggREYnJ3D3sGrqFmdUCG4/iLfKBrd1UTqrTd7E/fR/70/exT2/4Lsa5e8zhqntNQBwtM1vs7sVh15EM9F3sT9/H/vR97NPbvws1MYmISEwKCBERiUkBsc89YReQRPRd7E/fx/70fezTq78L9UGIiEhMuoIQEZGYFBAiIhJTnw8IM5tnZivNbI2Z3RZ2PWEyszFm9qKZrTCz5Wb2j2HXFDYzSzezpWb2VNi1hM3MBpvZY2ZWbmZlZnZG2DWFycy+FPz/5D0ze8jMssOuqbv16YAws3TgbuAjwHTgWjObHm5VoWoDvuzu04FZwBf6+PcB8I9AWdhFJImfAs+4+1TgJPrw92JmhcA/AMXufjyQDswPt6ru16cDApgJrHH3de7eAjwMXBZyTaFx9/fd/a3g53oiJ4DCcKsKj5mNBi4G7g27lrCZWR4wG/g1gLu3uPvOUIsKXwaQY2YZQH9gc8j1dLu+HhCFQEXUciV9+IQYzcyKgBnAGyGXEqafAF8DOkKuIxmMB2qB3wRNbveaWW7YRYXF3auAHwKbgPeBXe7+XLhVdb++HhASg5kNAB4HvujudWHXEwYzuwSocfclYdeSJDKAU4BfuPsMYDfQZ/vszGwIkdaG8cAxQK6ZXR9uVd2vrwdEFTAmanl0sK7PMrNMIuHwoLv/Kex6QnQmcKmZbSDS9DjHzB4It6RQVQKV7r73ivIxIoHRV50HrHf3WndvBf4EfCjkmrpdXw+IRcAkMxtvZllEOpkWhFxTaMzMiLQxl7n7j8OuJ0zu/nV3H+3uRUT+d1Hq7r3uL8R4ufsWoMLMpgSr5gIrQiwpbJuAWWbWP/j/zVx6Yad9RtgFhMnd28zsFuBZInch3Ofuy0MuK0xnAjcA75rZsmDdN9z96fBKkiRyK/Bg8MfUOuD/hVxPaNz9DTN7DHiLyN1/S+mFw25oqA0REYmprzcxiYhIJxQQIiISkwJCRERiUkCIiEhMCggREYlJASESMLOG4N8iM7uum9/7Gwcs/607318kERQQIgcrAg4rIIIB27qyX0C4e6976lZ6HwWEyMHuBM42s2XBmP/pZvafZrbIzN4xs78DMLNzzOxVM1tA8FSxmT1pZkuCeQJuDtbdSWTUz2Vm9mCwbu/VigXv/Z6ZvWtm10S990tR8y88GDyxi5ndGczZ8Y6Z/bDHvx3pM/r0k9QinbgN+Iq7XwIQnOh3uftpZtYPeM3M9o7ceQpwvLuvD5Y/5e7bzSwHWGRmj7v7bWZ2i7ufHON3XQmcTGR+hfzgmFeCbTOA44gMI/0acKaZlQFXAFPd3c1scPd+dJF9dAUhcmgXADcGw4+8AQwDJgXb3owKB4B/MLO3gdeJDAQ5ia6dBTzk7u3uXg28DJwW9d6V7t4BLCPS9LULaAJ+bWZXAnuO8rOJdEoBIXJoBtzq7icHr/FRY//v/mAns3OIjPJ5hrufRGR8nqOZhrI56ud2IMPd24hMdPUYcAnwzFG8v0iXFBAiB6sHBkYtPwt8PhgKHTOb3MlkOXnADnffY2ZTiUzbulfr3uMP8CpwTdDPUUBk1rY3OyssmKsjLxhA8UtEmqZEEkJ9ECIHewdoD5qKfktkLuYi4K2go7gWuDzGcc8Anwv6CVYSaWba6x7gHTN7y90/EbX+CeAM4G3Aga+5+5YgYGIZCPyvmWUTubL5pyP6hCJx0GiuIiISk5qYREQkJgWEiIjEpIAQEZGYFBAiIhKTAkJERGJSQIiISEwKCBERien/A0GEoRH/AAAdAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAn/klEQVR4nO3de5xdZ13v8c937pOZyaXJ5J4maZs2DRRSGCtYxQtQq2KL4MEUEVCx4KGAqHCKx4NYjp56AeFgX0qBKsqlQFEMnkoFBURuJoVy6eyWpmmhSXaSyXXPJHOf3/ljrZnsTPbM7DTZWXvv+b5fr51Z61nrWfs3+5VZv72eZ63nUURgZmY2XUPWAZiZWXVygjAzs5KcIMzMrCQnCDMzK8kJwszMSnKCMDOzkpwgzMysJCcIq3mSBopeE5IGi9Z/+Ukc7wuSXlWJWM1qSVPWAZidq4jonFyW9Djwqoj4XHYRVZakpogYyzoOq3++grC6JalB0q2SHpV0WNLHJV2UbmuT9KG0/JikHZJWSPoj4MeAv0yvQP5yhmN/QtJ+Sccl/YekpxRta5f0DknfT7f/p6T2dNuPSvpK+p5PSHplWn7aVYukV0r6z6L1kPRaSY8Aj6Rl706PUZB0v6QfK9q/UdLvpb97f7p9naQ7JL1j2u+yXdIbz/0Tt3rjBGH17HXAC4EfB1YDR4E70m2vABYB64ClwGuAwYj4n8CXgFsiojMibpnh2P8CbAKWA98APly07c+BZwI/AlwEvBmYkLQ+rfceoBvYCjxwFr/PC4EfBrak6zvSY1wEfAT4hKS2dNtvAzcBPwssBH4NOAl8ELhJUgOApGXA89L6ZqdxE5PVs9eQnOj3AEh6G/ADSb8CjJIkhssi4tvA/Wdz4Ii4a3I5Pe5RSYuAfpKT8bMiYm+6y1fS/V4KfC4iPpqWH05f5fo/EXGkKIYPFW17h6TfB64AvgW8CnhzRDycbv/W5HtKOg48F/gssA34QkQcOIs4bJ7wFYTVs/XAP6bNOceAHDAOrAD+HrgPuFvSPkl/Kqm5nIOmzTe3p803BeDxdNOy9NUGPFqi6roZysv1xLQ4fldSLm3GOkZyRbSsjPf6IPCydPllJJ+F2RmcIKyePQH8TEQsLnq1RcTeiBiNiD+MiC0kTUEvAF6e1ptriOOXAjeSNM0sAjak5QIOAUPApTPEU6oc4ASwoGh9ZYl9puJK+xveDLwEWBIRi4HjaQxzvdeHgBslPR24EvjUDPvZPOcEYfXsr4E/Stv+kdQt6cZ0+SclXSWpESiQNDlNpPUOAJfMctwuYJikeWgB8MeTGyJiArgLeKek1enVxrMltZL0UzxP0kskNUlaKmlrWvUB4EWSFki6DPj1OX63LmAM6AOaJL2VpK9h0vuBt0vapMTTJC1NY9xD0n/x98AnI2JwjveyecoJwurZu4HtwL9K6ge+RtLJC8k39HtIkkMO+CKnmlreDfyipKOS/m+J4/4d8H1gL9CbHrfY7wLfITkJHwH+BGiIiB+QdBr/Tlr+APD0tM5fACMkyemDnN7pXcp9wGeA76WxDHF6E9Q7gY8D/5r+jh8A2ou2fxC4Cjcv2SzkCYPM5h9JzyFpalofPgnYDHwFYTbPpJ3xbwDe7+Rgs3GCMJtHJF0JHANWAe/KNBirem5iMjOzknwFYWZmJdXNk9TLli2LDRs2ZB2GmVlNuf/++w9FRHepbXWTIDZs2MDOnTuzDsPMrKZI+v5M29zEZGZmJTlBmJlZSU4QZmZWkhOEmZmV5ARhZmYlOUGYmVlJThBmZlZS3TwHYWb1ISIYGp2gf2iUwtAYhaFR+ofGkvXB5OfI2ATdXa2sWtzOqkVtrFzUxsK2siYErBsTE8GhE8McLAwzEcHT1i4+7+9R0QQh6XqSsfUbSUaOvH3a9otJxqVfnO5za0Tcm257C8mkKePA6yPivkrGambnx8jYqZN7/7ST++TJvvikf/r6GIXBUcYmzn6MuM7WJlYuamNV+lq5qH1qedWi9jSJNCFp7oNlKCIoDI1xsDDE/sIQBwrDHCgMTb32F4Y5WBiir3946nN6+rrF/NNrrz3vsVQsQaQzdd0BPB/YA+yQtD0ieot2+33g4xHxV5K2APcCG9LlbcBTgNXA5yRdHhHjlYrXzE6JCAaGxzg8MMLhE8McGhhJlgeGTz+ZT0sEhcFRhscm5jx+Z2sTC9ua6GprpqutieVdbVza3URXWrYwLe9qa2Jhe/Np+y5sa6a5sYGD/UPsPz5E/vgQ+eOD5I8n6/uOD/G9A30c7B9m+likHS2NaRJpPz2RLE4TycJ2FrZXLokMjY6nJ/ph9heGOFh00j+Qru8vDDE0euZnuKi9mRULW1mxsI1Ny5dNLa9Y2MbaJe0l3u3cVfIK4hpgV0TsBpB0N8k8vsUJIjg1TeIiYF+6fCNwd0QMA49J2pUe76sVjNesrg2PjXPkRHKiPzQwPHXyT9ZPLR8eGObQiRFGZjjRtzU3FJ3Am1nU3szaJe1TJ/HpJ/Opk3578rOztYnGhnM/Aa9dsoC1SxbMuH10fIKD/cPsT5NH/tjpyeRLjxziYP8Q0y9WFkwlkVOJZOWiNlanVyGrFrWxqL35tCQyNj7BoYGR9Bv/0Azf/oc5Pjh6RpytTQ2sXJSc6K9au5jndbWyclEbyxe2sWJyuauN9pbGc/7MzlYlE8QaTp8CcQ+npnuc9DaS6SBfB3SQTAI/Wbd4Gsc9adlpJN0M3Axw8cUXn5egzWrFxERwbHA0OaGXOMEfnkoCSULoHxoreZyWpgaWdbSwtLOVpZ0tXL6ii2WdLSztbGFpR1K2rLOVZZ2tLOloprXpwp+onozmxgbWLG5nzeKZv12PpUlkMnFMvyL58q5DHCicmUTamxtZtaiNBa2NHCgMc2jgzKuVxgaxvKuV5Qvb2LC0g2ddsnTqG3/xt/9qbvbKupP6JuBvI+Idkp4N/L2kp5ZbOSLuBO4E6Onp8cQWVhcmOx/3HRsif2yQvceSk9WhgeGpb/6HBkY4enKE8RJt9RJctODUCf4pqxeyrLOVpUVJYFnRyb+ztXpPUJXW1NjA6sXtrF7cDiwpuc/Y+AR9A8NFVyGnEsnJkTGesmoRKxalJ/2utvTbfytLO1rPy5VSliqZIPYC64rW16ZlxX4duB4gIr4qqQ1YVmZds5pUGBpl37FB8seG0pP/IPuODSVl6bfX0fHTT/xtzQ0s72pjaWcLa5csYOu6xSW/4S/tbGHJgpaaPzFVk6bGhrSpqR3mWUNFJRPEDmCTpI0kJ/dtwEun7fMD4LnA36ZTIbYBfcB24COS3knSSb0J+K8Kxmp2XgyPjbP/eHriT0/6+45PnvyTRDAwfHpTT2ODWLkwadveum4xP3PVStYsbp9q/16zuJ3FC5rn7bd8y07FEkREjEm6BbiP5BbWuyLiQUm3ATsjYjvwO8D7JL2RpMP6lekk6g9K+jhJh/YY8FrfwWRZm5gI+gaGk5N+2tQwlQjSk/+hgeEz6i3taGHV4qQd+kcuXcbqxUnn5+rFbaxe3M7yrjZ/47eqVDdzUvf09IQnDLJzNTY+wQ+OnOTRvhPsOjjAroMDPHH0JPuODXKgMHRG009HSyOr0jbs1YuSE/7kt/7Jh7jammujU9fmJ0n3R0RPqW1Zd1KbZWJwZJxH+wZ4tC9JApM/Hz90kpHxU7d3rljYyvqLOuhZvyQ5+S9uZ83kFcCiyt4zb5Y1Jwira0dOjEyd/Cdfj/YNsPfY4NRtiQ2C9Us7uLS7k5/avILLlndyaXcHly7vnHfDN5gVc4KwmjcxEew7Ppie/JOmoUcPDrCrb4AjJ0am9mtrbuCSZZ084+IlvKRnHZct7+Sy5Z2sX7qgZu7tN7uQnCCsZoyMTfD9wydOuxLY1TfAowdPMDh66h6GJQuauWx5J9dtSa8GlndyWXcnaxa30+DOYLOyOUFYVTpyYoTPP3SQXZN9BAcH+P6Rk6c9GLZmcTuXLu/kh665KLka6E6uCJZ2tmYYuVn9cIKwqvS//7mXf/jmXpoaxIZlHVy+ooufvWoVly7v4LLuLi7p7qCj1f99zSrJf2FWlb699zjPubybD7yih+ZGz2tllgX/5VnVGRodZ3ffAFvXLnJyMMuQ//qs6nzvQD8TAVeuWjj3zmZWMU4QVnVy+QLgBGGWNScIqzq5fD8dLY1cfNHMk8GYWeU5QVjV6d1X4IqVXX5mwSxjThBWVSKC3P4CW1a7ecksa04QVlX2HB2kf2jM/Q9mVcAJwqqKO6jNqocThFWV3nwBCTav7Mo6FLN5zwnCqkouX2Dj0g4WtPghf7OsOUFYVcnl+928ZFYlnCCsavQPjfKDIye5cpWbl8yqQUUThKTrJT0saZekW0ts/wtJD6Sv70k6VrRtvGjb9krGadXh4f39gDuozapFxRp6JTUCdwDPB/YAOyRtj4jeyX0i4o1F+78OuLroEIMRsbVS8Vn16fUdTGZVpZJXENcAuyJid0SMAHcDN86y/03ARysYj1W5XL7A4gXNrFrUlnUoZkZlE8Qa4Imi9T1p2RkkrQc2Av9eVNwmaaekr0l64Qz1bk732dnX13eewras9Ob7uXLlQiQPsWFWDaqlk3obcE9EjBeVrY+IHuClwLskXTq9UkTcGRE9EdHT3d19oWK1ChifCB7eX3DzklkVqWSC2AusK1pfm5aVso1pzUsRsTf9uRv4Aqf3T1idefzwCYZGJ3wHk1kVqWSC2AFskrRRUgtJEjjjbiRJm4ElwFeLypZIak2XlwHXAr3T61r96N3nDmqzalOxu5giYkzSLcB9QCNwV0Q8KOk2YGdETCaLbcDdERFF1a8E3itpgiSJ3V5895PVn1y+QFOD2LSiM+tQzCxV0fEMIuJe4N5pZW+dtv62EvW+AlxVydisuuTyBS5b3klrU2PWoZhZqlo6qW2e8xAbZtXHCcIyd+TECPsLQ+6gNqsyThCWuck5ILasWpRxJGZWzAnCMndqkiBfQZhVEycIy1xvvsDyrlaWdrZmHYqZFXGCsMy5g9qsOjlBWKZGxibYddAJwqwaOUFYpnYdHGB0PNiy2gnCrNo4QVimTt3B5A5qs2rjBGGZyuULtDY1sGFpR9ahmNk0ThCWqd58gStWdtHU6P+KZtXGf5WWmYggly9w5Ur3P5hVIycIy8yBwjBHT466g9qsSjlBWGZOPUHtBGFWjZwgLDO9aYLY7DuYzKqSE4RlpjdfYO2Sdha2NWcdipmV4ARhmcnlC2xx85JZ1XKCsEwMjozz+KET7n8wq2JOEJaJhw/0MxHuoDarZhVNEJKul/SwpF2Sbi2x/S8kPZC+vifpWNG2V0h6JH29opJx2oV3aogNJwizatVUqQNLagTuAJ4P7AF2SNoeEb2T+0TEG4v2fx1wdbp8EfAHQA8QwP1p3aOVitcurN59BTpbm1i7pD3rUMxsBpW8grgG2BURuyNiBLgbuHGW/W8CPpou/zTw2Yg4kiaFzwLXVzBWu8By+QJXruqioUFZh2JmM6hkglgDPFG0victO4Ok9cBG4N/Ppq6kmyXtlLSzr6/vvARtlTcxETy033NAmFW7aumk3gbcExHjZ1MpIu6MiJ6I6Onu7q5QaHa+7Tk6yMDwmBOEWZWrZILYC6wrWl+blpWyjVPNS2db12pMb/444DuYzKpdJRPEDmCTpI2SWkiSwPbpO0naDCwBvlpUfB9wnaQlkpYA16VlVgd68/00CK5Y4SE2zKpZxe5iiogxSbeQnNgbgbsi4kFJtwE7I2IyWWwD7o6IKKp7RNLbSZIMwG0RcaRSsdqFlcsX2Lisg/aWxqxDMbNZVCxBAETEvcC908reOm39bTPUvQu4q2LBWWZy+QJb1y3OOgwzm0O1dFLbPHF8cJQ9Rwfd/2BWA5wg7IJ6yE9Qm9UMJwi7oKaG2PAscmZVb8Y+CEkvKqP+UNrPYFaWXL6fizpaWN7VmnUoZjaH2Tqp3wf8EzDbWAjPYVontNlscvuTITYkD7FhVu1mSxD/EhG/NltlSR86z/FYHRsbn+Ch/f28/Fnrsw7FzMowYx9ERLxsrsrl7GM26bFDJxgZm/AdTGY1ouxOakmXSfqQpE9KenYlg7L61OsOarOaMlsndVtEDBUVvR14c7r8aWBrBeOyOpTL99PcKC7t7sw6FDMrw2xXEJ+W9PKi9VFgA7AeOKtRV80gucX1suVdtDT57mqzWjDbX+r1wEJJn5H0HOB3SSby+QXgly9EcFZfetNJgsysNszYxJTOzfCXkv4e+F/AbwK/HxGPXqjgrH4cGhimr3/YT1Cb1ZDZ+iB+GHgTMAL8MTAI/JGkvcDbI+LYBYnQ6kLOQ2yY1ZzZnoN4L/CzQCfwNxFxLbBN0o8DHyNpbjIry2SC8C2uZrVjtgQxRtIp3UFyFQFARHwR+GJlw7J6k8v3s3JhG0s6WrIOxczKNFuCeCnwapLk8PJZ9jObU+8+d1Cb1ZrZOqm/B/zOBYzF6tTw2DiP9g3wvC3Lsw7FzM7CjLe5SvrnuSqXs4/ZIwcGGJsI9z+Y1ZjZmph+VNL2WbYL2HKe47E65A5qs9o0W4K4sYz6I7NtlHQ98G6gEXh/RNxeYp+XAG8DAvhWRLw0LR8HvpPu9oOIuKGMeKwK9eYLtDU3sGFpR9ahmNlZmK0P4pzuVJLUCNwBPB/YA+yQtD0ieov22QS8Bbg2Io5KKm6kHoyIrecSg1WHXL7AFSsX0tjgOSDMakklB8W5BtgVEbsjYgS4mzOvSn4DuCMijgJExMEKxmMZiAhy+X4/IGdWgyqZINYATxSt70nLil0OXC7py5K+ljZJTWqTtDMtf2GpN5B0c7rPzr6+vvMavJ0f+eNDHB8cZYtvcTWrOXMmCEk/L6lSiaQJ2AT8BHAT8D5Ji9Nt6yOih+R5jHdJunR65Yi4MyJ6IqKnu7u7QiHauXAHtVntKufE/0vAI5L+VNLmszj2XmBd0fratKzYHmB7RIxGxGPA90gSBhGxN/25G/gCcPVZvLdVid59SYLY7ARhVnPmTBDptKJXA48Cfyvpq2nTzlxtBjuATZI2SmoBtgHTb5v9FMnVA5KWkTQ57Za0RFJrUfm1QC9Wc3L7C6xfuoDO1tlumDOzalRW01FEFIB7SDqaV5HMCfENSa+bpc4YcAtwH5ADPh4RD0q6TdLkLav3AYcl9QKfB94UEYeBK4Gdkr6Vlt9efPeT1Y5cvp8rV/rqwawWzfm1Lj2Z/ypwGfB3wDURcVDSApJv9e+ZqW5E3AvcO63srUXLAfx2+ire5yvAVeX/GlaNTo6M8fjhE7xw6/R7E8ysFpRz3f9i4C8i4j+KCyPipKRfr0xYVg8e2t9PBB6kz6xGlZMg3gbkJ1cktQMrIuLxiPi3SgVmtW+yg9p3MJnVpnL6ID4BTBStj6dlZrPK5Qt0tTWxdkl71qGY2ZNQToJoSp+EBiBd9qwvNqdcvsCVqxYieYgNs1pUToLoK7rrCEk3AocqF5LVg4mJ4KH9HmLDrJaV0wfxGuDDkv6SZIjvJ/AMczaH7x85ycmRcXdQm9WwORNERDwKPEtSZ7o+UPGorOZ5iA2z2lfW462Sfg54CskAegBExG0VjMtqXC5foLFBXL7CVxBmtaqcwfr+mmQ8pteRNDH9N2B9heOyGpfLF7hkWQdtzY1Zh2JmT1I5ndQ/EhEvB45GxB8CzyYZM8lsRrl8v5uXzGpcOQliKP15UtJqYJRkPCazko6dHGHvsUEnCLMaV04fxKfTORr+DPgGydzR76tkUFbbcvl+ALasdoIwq2WzJoh0oqB/i4hjwCcl/TPQFhHHL0RwVptO3cHkDmqzWjZrE1NETAB3FK0POznYXHL5Ass6W1je1ZZ1KGZ2Dsrpg/g3SS+Wx0uwMvWmQ2yYWW0rJ0G8mmRwvmFJBUn9kgoVjstq1Oj4BI8cGHCCMKsD5TxJ7YZkK9vuvhOMjE94DCazOlDOjHLPKVU+fQIhM/AQG2b1pJwmpjcVvf4X8GmSSYTmJOl6SQ9L2iXp1hn2eYmkXkkPSvpIUfkrJD2Svl5RzvtZ9nL5Ai2NDVzS3ZF1KGZ2jsppYvr54nVJ64B3zVVPUiPJHVDPB/YAOyRtj4jeon02AW8Bro2Io5KWp+UXAX8A9JA8d3F/Wvdoub+YZaM3X2DTik6aG8v57mFm1ezJ/BXvAa4sY79rgF0RsTudZOhu4MZp+/wGcMfkiT8iDqblPw18NiKOpNs+C1z/JGK1CyznO5jM6kY5fRDvIfkWD0lC2UryRPVc1pDMHTFpD/DD0/a5PH2PLwONwNsi4jMz1F1TIrabgZsBLr744jJCsko62D/EoYERd1Cb1YlyhtrYWbQ8Bnw0Ir58Ht9/E/ATwFrgPyRdVW7liLgTuBOgp6cn5tjdKmxyiA1fQZjVh3ISxD3AUESMQ9K3IGlBRJyco95eYF3R+tq0rNge4OsRMQo8Jul7JAljL0nSKK77hTJitQxN3sHkKwiz+lDWk9RAe9F6O/C5MurtADZJ2iipBdgGbJ+2z6dIE4GkZSRNTruB+4DrJC2RtAS4Li2zKta7r8DqRW0sWtCcdShmdh6UcwXRVjzNaEQMSFowV6WIGJN0C8mJvRG4KyIelHQbsDMitnMqEfQC48CbIuIwgKS3kyQZgNsi4shZ/WZ2weXyBY/galZHykkQJyQ9IyK+ASDpmcBgOQePiHuBe6eVvbVoOYDfTl/T694F3FXO+1j2hkbH2X3oBNc/dWXWoZjZeVJOgvgt4BOS9pFMObqSZApSsymPHBhgfCLcQW1WR8p5UG6HpM3AFWnRw2mnstmU3nwyCrwThFn9mLOTWtJrgY6I+G5EfBfolPTfKx+a1ZJcvp8FLY2sv2jO7ikzqxHl3MX0G+mMcgCkTzb/RsUisprUmy+weWUXDQ2eNsSsXpSTIBqLJwtKx1hqqVxIVmsiwkNsmNWhcjqpPwN8TNJ70/VXp2VmAOw9Nkj/0JgThFmdKSdB/A+S8Y5+M13/LPC+ikVkNad3n+eAMKtHczYxRcRERPx1RPxiRPwi0Au8p/KhWa3I5fuRYPNKTz5oVk/KuYJA0tXATcBLgMeAf6hkUFZbcvkCG5Z20NFa1n8nM6sRM/5FS7qcJCncBBwCPgYoIn7yAsVmNSK3v8BTPMSGWd2ZrYnpIeCngBdExI9GxHtIxksymzIwPMb3D5/kypVOEGb1ZrYE8SIgD3xe0vskPZdkqA2zKQ/l3UFtVq9mTBAR8amI2AZsBj5PMibTckl/Jem6CxSfVbmpOSDcxGRWd8q5i+lERHwkIn6eZOKeb5Lc+mpGb76fRe3NrFrUlnUoZnaelfMk9ZSIOBoRd0bEcysVkNWW5AnqLooetjezOnFWCcKs2PhE8NB+D7FhVq+cIOxJe/zwCYZGJ5wgzOqUE4Q9aVMd1E4QZnXJCcKetFy+QFOD2LSiM+tQzKwCKpogJF0v6WFJuyTdWmL7KyX1SXogfb2qaNt4Ufn2SsZpT04u38+l3Z20NjVmHYqZVUDFBs9J5424A3g+sAfYIWl7RPRO2/VjEXFLiUMMRsTWSsVn5653X4FnXXJR1mGYWYVU8griGmBXROyOiBHgbuDGCr6fXUBHT4ywvzDkDmqzOlbJBLEGeKJofU9aNt2LJX1b0j2S1hWVt0naKelrkl5Y6g0k3Zzus7Ovr+/8RW5z8hPUZvUv607qTwMbIuJpJBMRfbBo2/qI6AFeCrxL0qXTK6cP7fVERE93d/eFidiAZA5q8BhMZvWskgliL1B8RbA2LZsSEYcjYjhdfT/wzKJte9Ofu4EvAFdXMFY7S735At1drSzrbM06FDOrkEomiB3AJkkbJbUA24DT7kaStKpo9QYgl5YvkdSaLi8DriWZyc6qRC7f76sHszpXsbuYImJM0i3AfUAjcFdEPCjpNmBnRGwHXi/pBmAMOAK8Mq1+JfBeSRMkSez2Enc/WUZGxibYdbCfH7/czXpm9ayic0RGxL3AvdPK3lq0/BbgLSXqfQW4qpKx2ZP3aN8Ao+PBlas8B7VZPcu6k9pqkIfYMJsfnCDsrPXuK9DS1MDGZR1Zh2JmFeQEYWctt7/AFSu6aGr0fx+zeua/cDsrEUEu3+/mJbN5wAnCzsrB/mGOnBhxB7XZPOAEYWfFT1CbzR9OEHZWevclCWKzE4RZ3XOCsLOSyxdYs7idRe3NWYdiZhXmBGFnJZcveARXs3nCCcLKNjgyzmOHTrj/wWyecIKwsj18oJ+JgC2+g8lsXnCCsLLlfAeT2bziBGFly+ULdLY2sW7JgqxDMbMLwAnCypbLF9i8souGBmUdipldAE4QVpaJifAkQWbzjBOElWXP0UEGhsecIMzmEScIK8upITZ8B5PZfOEEYWXJ5Qs0CDav9BWE2XzhBGFlyeULbFjWQXtLY9ahmNkFUtEEIel6SQ9L2iXp1hLbXympT9ID6etVRdteIemR9PWKSsZpc+vNF9z/YDbPNFXqwJIagTuA5wN7gB2StkdE77RdPxYRt0yrexHwB0APEMD9ad2jlYrXZlYYGmXP0UFuuubirEMxswuoklcQ1wC7ImJ3RIwAdwM3lln3p4HPRsSRNCl8Fri+QnHaHB7K9wN4FjmzeaaSCWIN8ETR+p60bLoXS/q2pHskrTubupJulrRT0s6+vr7zFbdN4yE2zOanrDupPw1siIinkVwlfPBsKkfEnRHRExE93d3dFQnQkgSxZEEzKxa2Zh2KmV1AlUwQe4F1Retr07IpEXE4IobT1fcDzyy3rl04kx3UkofYMJtPKpkgdgCbJG2U1AJsA7YX7yBpVdHqDUAuXb4PuE7SEklLgOvSMrvAxsYneHi/h9gwm48qdhdTRIxJuoXkxN4I3BURD0q6DdgZEduB10u6ARgDjgCvTOsekfR2kiQDcFtEHKlUrDazxw+fYHhswh3UZvNQxRIEQETcC9w7reytRctvAd4yQ927gLsqGZ/NrTe9g8lXEGbzT9ad1FblcvkCzY3isuWdWYdiZheYE4TNqndfgUu7O2lp8n8Vs/nGf/U2q1y+4P4Hs3nKCcJmdHhgmIP9w2xZ7QRhNh85QdiMcu6gNpvXnCBsRr3544AThNl85QRhM8rl+1mxsJWLOlqyDsXMMuAEYTNyB7XZ/OYEYSUNj42z6+CAm5fM5jEnCCtp18EBxibCCcJsHnOCsJJ693kOCLP5zgnCSsrl+2lrbmDjso6sQzGzjDhBWEm5fIErVi6kscFzQJjNV04QdoaIILe/wJZVXVmHYmYZcoKwM+wvDHHs5Kj7H8zmOScIO4M7qM0MnCCshFw+SRCbV7qJyWw+c4KwM+Ty/Vx80QK62pqzDsXMMlTRBCHpekkPS9ol6dZZ9nuxpJDUk65vkDQo6YH09deVjNNOl8sXuNId1GbzXsXmpJbUCNwBPB/YA+yQtD0ieqft1wW8Afj6tEM8GhFbKxWflXZyZIzHDp/ghq2rsw7FzDJWySuIa4BdEbE7IkaAu4EbS+z3duBPgKEKxmJlemh/PxHuoDazCl5BAGuAJ4rW9wA/XLyDpGcA6yLi/0l607T6GyV9EygAvx8RX6pEkEdPjPCC9/znaWXSDMtohvLi/VWyfHrBTHUiIvk59U/yIyImV4mAIIg4tV5cP4rKJvcrrju5Nlle/J6jYxMAHsXVzCqaIGYlqQF4J/DKEpvzwMURcVjSM4FPSXpKRBSmHeNm4GaAiy+++EnF0dQonnXJ0qn1U6dhmGFx6oR6Znnp/Werc/p7RJKE0nwhkuQxmT6kU2WT20/tq6ntp/bVVCJTurNOO/appFd87NWL21i7pB0zm98qmSD2AuuK1temZZO6gKcCX0hPeCuB7ZJuiIidwDBARNwv6VHgcmBn8RtExJ3AnQA9PT3Tz8ll6Wpr5h0vefqTqWpmVtcq2QexA9gkaaOkFmAbsH1yY0Qcj4hlEbEhIjYAXwNuiIidkrrTTm4kXQJsAnZXMFYzM5umYlcQETEm6RbgPqARuCsiHpR0G7AzIrbPUv05wG2SRoEJ4DURcaRSsZqZ2ZlU3DZey3p6emLnzp1z72hmZlMk3R8RPaW2+UlqMzMryQnCzMxKcoIwM7OSnCDMzKwkJwgzMyupbu5iktQHfP8cDrEMOHSewql1/ixO58/jdP48TqmHz2J9RHSX2lA3CeJcSdo5061e840/i9P58zidP49T6v2zcBOTmZmV5ARhZmYlOUGccmfWAVQRfxan8+dxOn8ep9T1Z+E+CDMzK8lXEGZmVpIThJmZlTTvE4Sk6yU9LGmXpFuzjidLktZJ+rykXkkPSnpD1jFlTVKjpG9K+uesY8mapMWS7pH0kKScpGdnHVOWJL0x/Tv5rqSPSmrLOqbzbV4niHRSojuAnwG2ADdJ2pJtVJkaA34nIrYAzwJeO88/D4A3ALmsg6gS7wY+ExGbgaczjz8XSWuA1wM9EfFUkjlvtmUb1fk3rxMEcA2wKyJ2R8QIcDdwY8YxZSYi8hHxjXS5n+QEsCbbqLIjaS3wc8D7s44la5IWkUzk9QGAiBiJiGOZBpW9JqBdUhOwANiXcTzn3XxPEGuAJ4rW9zCPT4jFJG0Arga+nnEoWXoX8GaSWQ3nu41AH/A3aZPb+yV1ZB1UViJiL/DnwA+APHA8Iv4126jOv/meIKwESZ3AJ4HfiohC1vFkQdILgIMRcX/WsVSJJuAZwF9FxNXACWDe9tlJWkLS2rARWA10SHpZtlGdf/M9QewF1hWtr03L5i1JzSTJ4cMR8Q9Zx5Oha4EbJD1O0vT4U5I+lG1ImdoD7ImIySvKe0gSxnz1POCxiOiLiFHgH4AfyTim826+J4gdwCZJGyW1kHQybc84psxIEkkbcy4i3pl1PFmKiLdExNqI2EDy/+LfI6LuviGWKyL2A09IuiItei7Qm2FIWfsB8CxJC9K/m+dSh532TVkHkKWIGJN0C3AfyV0Id0XEgxmHlaVrgV8BviPpgbTs9yLi3uxCsiryOuDD6Zep3cCvZhxPZiLi65LuAb5BcvffN6nDYTc81IaZmZU035uYzMxsBk4QZmZWkhOEmZmV5ARhZmYlOUGYmVlJThBmKUkD6c8Nkl56no/9e9PWv3I+j29WCU4QZmfaAJxVgkgHbJvNaQkiIuruqVurP04QZme6HfgxSQ+kY/43SvozSTskfVvSqwEk/YSkL0naTvpUsaRPSbo/nSfg5rTsdpJRPx+Q9OG0bPJqRemxvyvpO5J+qejYXyiaf+HD6RO7SLo9nbPj25L+/IJ/OjZvzOsnqc1mcCvwuxHxAoD0RH88In5IUivwZUmTI3c+A3hqRDyWrv9aRByR1A7skPTJiLhV0i0RsbXEe70I2Eoyv8KytM5/pNuuBp5CMoz0l4FrJeWAXwA2R0RIWnx+f3WzU3wFYTa364CXp8OPfB1YCmxKt/1XUXIAeL2kbwFfIxkIchOz+1HgoxExHhEHgC8CP1R07D0RMQE8QNL0dRwYAj4g6UXAyXP83cxm5ARhNjcBr4uIrelrY9HY/yemdpJ+gmSUz2dHxNNJxuc5l2koh4uWx4GmiBgjmejqHuAFwGfO4fhms3KCMDtTP9BVtH4f8JvpUOhIunyGyXIWAUcj4qSkzSTTtk4anaw/zZeAX0r7ObpJZm37r5kCS+fqWJQOoPhGkqYps4pwH4TZmb4NjKdNRX9LMhfzBuAbaUdxH/DCEvU+A7wm7Sd4mKSZadKdwLclfSMifrmo/B+BZwPfAgJ4c0TsTxNMKV3AP0lqI7my+e0n9RualcGjuZqZWUluYjIzs5KcIMzMrCQnCDMzK8kJwszMSnKCMDOzkpwgzMysJCcIMzMr6f8DtnEhqj6H3isAAAAASUVORK5CYII=\n", "text/plain": [ "<Figure size 432x288 with 1 Axes>" ] @@ -507,7 +509,7 @@ { "data": { "text/plain": [ - "0.7979886313948404" + "0.8053976582616722" ] }, "execution_count": 15, @@ -546,7 +548,7 @@ { "data": { "text/plain": [ - "IncompatibleKeys(missing_keys=[], unexpected_keys=[])" + "<All keys matched successfully>" ] }, "execution_count": 17, @@ -773,12 +775,14 @@ "# Export to FINN-ONNX <a id=\"export_finn_onnx\" ></a>\n", "\n", "\n", - "[ONNX](https://onnx.ai/) is an open format built to represent machine learning models, and the FINN compiler expects an ONNX model as input. We'll now export our network into ONNX to be imported and used in FINN for the next notebooks. Note that the particular ONNX representation used for FINN differs from standard ONNX, you can read more about this [here](https://finn.readthedocs.io/en/latest/internals.html#intermediate-representation-finn-onnx)." + "[ONNX](https://onnx.ai/) is an open format built to represent machine learning models, and the FINN compiler expects an ONNX model as input. We'll now export our network into ONNX to be imported and used in FINN for the next notebooks. Note that the particular ONNX representation used for FINN differs from standard ONNX, you can read more about this [here](https://finn.readthedocs.io/en/latest/internals.html#intermediate-representation-finn-onnx).\n", + "\n", + "You can see below how we export a trained network in Brevitas into a FINN-compatible ONNX representation. Note how we create a `QuantTensor` instance with dummy data to tell Brevitas how our inputs look like, which will be used to set the input quantization annotation on the exported model." ] }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 27, "metadata": { "scrolled": true }, @@ -787,69 +791,38 @@ "name": "stdout", "output_type": "stream", "text": [ - "Model saved to cybsec-mlp.onnx\n" + "Model saved to cybsec-mlp-ready.onnx\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:15: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " from ipykernel import kernelapp as app\n" + "<ipython-input-22-78c27bb59095>:15: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", + " x = (x + torch.tensor([1.0])) / 2.0\n" ] } ], "source": [ "import brevitas.onnx as bo\n", + "from brevitas.quant_tensor import QuantTensor\n", "\n", - "export_onnx_path = \"cybsec-mlp.onnx\"\n", + "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n", "input_shape = (1, 600)\n", - "bo.export_finn_onnx(model_for_export, input_shape, export_onnx_path)\n", - "\n", - "print(\"Model saved to %s\" % export_onnx_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## One final fix: input datatype\n", - "\n", - "There's one more thing we'll do: we will mark the input tensor datatype as `DataType.BIPOLAR`, which will be used by the compiler later on. To do this, we'll utilize the `ModelWrapper` component from FINN, which lets us examine and manipulate the ONNX graph in an easier way.\n", - "\n", - "*In the near future it will be possible to add this information to the model [while exporting](https://github.com/Xilinx/brevitas/issues/232), instead of having to add it manually.*" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Input tensor name: 0\n", - "Input tensor shape: [1, 600]\n", - "Input tensor datatype: DataType.BIPOLAR\n" - ] - } - ], - "source": [ - "from finn.core.modelwrapper import ModelWrapper\n", - "from finn.core.datatype import DataType\n", + "# create a QuantTensor instance to mark input as bipolar during export\n", + "input_a = np.random.randint(0, 1, size=input_shape).astype(np.float32)\n", + "input_a = 2 * input_a - 1\n", + "scale = 1.0\n", + "input_t = torch.from_numpy(input_a * scale)\n", + "input_qt = QuantTensor(\n", + " input_t, scale=torch.tensor(scale), bit_width=torch.tensor(1.0), signed=True\n", + ")\n", "\n", - "finn_model = ModelWrapper(export_onnx_path)\n", + "bo.export_finn_onnx(\n", + " model_for_export, export_path=ready_model_filename, input_t=input_qt\n", + ")\n", "\n", - "finnonnx_in_tensor_name = finn_model.graph.input[0].name\n", - "finnonnx_model_in_shape = finn_model.get_tensor_shape(finnonnx_in_tensor_name)\n", - "finn_model.set_tensor_datatype(finnonnx_in_tensor_name, DataType.BIPOLAR)\n", - "print(\"Input tensor name: %s\" % finnonnx_in_tensor_name)\n", - "print(\"Input tensor shape: %s\" % str(finnonnx_model_in_shape))\n", - "print(\"Input tensor datatype: %s\" % str(finn_model.get_tensor_datatype(finnonnx_in_tensor_name)))\n", - "\n", - "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n", - "finn_model.save(ready_model_filename)" + "print(\"Model saved to %s\" % ready_model_filename)" ] }, { @@ -870,7 +843,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 28, "metadata": {}, "outputs": [ { @@ -894,10 +867,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f77214fa630>" + "<IPython.lib.display.IFrame at 0x7f49738bffa0>" ] }, - "execution_count": 27, + "execution_count": 28, "metadata": {}, "output_type": "execute_result" } @@ -940,7 +913,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.8.5" } }, "nbformat": 4, diff --git a/requirements.txt b/requirements.txt index 6dd4b5724782d01fc2958cc56c04cbc8e70af31f..de007ace503de9f110027b4190d5db6188633575 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ future==0.18.2 gspread==3.6.0 numpy==1.18.0 onnx==1.7.0 +onnxoptimizer onnxruntime==1.4.0 pre-commit==2.6.0 scipy==1.5.2 diff --git a/run-docker.sh b/run-docker.sh index 99f337c24116d56c2015ae0c21b044699fcb8f7a..6e8439810c40da4d78229300b20d4728119a92ab 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -59,11 +59,6 @@ else recho "This is required to be able to use Vitis." exit -1 fi - if [ -z "$XILINX_XRT" ];then - recho "Please set XILINX_XRT pointing to your XRT installation." - recho "This is required to be able to use Vitis." - exit -1 - fi fi DOCKER_GID=$(id -g) @@ -74,7 +69,13 @@ DOCKER_PASSWD="finn" # generate a random number per-run to allow multiple # containers from the same user DOCKER_RND=$(shuf -i0-32768 -n1) -DOCKER_TAG="finn_dev_${DOCKER_UNAME}" +# create a separate tag when Vitis enabled, since Docker image needs +# additional dependencies installed +if [ ! -z "$VITIS_PATH" ];then + DOCKER_TAG="finn_dev_vitis_${DOCKER_UNAME}" +else + DOCKER_TAG="finn_dev_${DOCKER_UNAME}" +fi # uncomment to run multiple instances with different names # DOCKER_INST_NAME="finn_${DOCKER_UNAME}_${DOCKER_RND}" DOCKER_INST_NAME="finn_dev_${DOCKER_UNAME}" @@ -102,8 +103,8 @@ SCRIPTPATH=$(dirname "$SCRIPT") : ${ALVEO_PASSWORD=""} : ${ALVEO_BOARD="U250"} : ${ALVEO_TARGET_DIR="/tmp"} -: ${XILINX_XRT="/opt/xilinx/xrt"} : ${PLATFORM_REPO_PATHS="/opt/xilinx/platforms"} +: ${XRT_DEB_VERSION="xrt_202010.2.7.766_18.04-amd64-xrt"} : ${FINN_HOST_BUILD_DIR="/tmp/$DOCKER_INST_NAME"} DOCKER_INTERACTIVE="" @@ -178,6 +179,7 @@ docker build -f docker/Dockerfile.finn_dev --tag=$DOCKER_TAG \ --build-arg UID=$DOCKER_UID \ --build-arg PASSWD=$DOCKER_PASSWD \ --build-arg INSTALL_XRT_DEPS=$INSTALL_XRT_DEPS \ + --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION \ . cd $OLD_PWD # Launch container with current directory mounted @@ -213,16 +215,10 @@ if [ ! -z "$VITIS_PATH" ];then recho "PLATFORM_REPO_PATHS must be set for Vitis/Alveo flows" exit -1 fi - if [ -z "$XILINX_XRT" ];then - recho "XILINX_XRT must be set for Vitis/Alveo flows" - exit -1 - fi DOCKER_EXEC+="-v $VITIS_PATH:$VITIS_PATH " DOCKER_EXEC+="-v $PLATFORM_REPO_PATHS:$PLATFORM_REPO_PATHS " - DOCKER_EXEC+="-v $XILINX_XRT:$XILINX_XRT " DOCKER_EXEC+="-e VITIS_PATH=$VITIS_PATH " DOCKER_EXEC+="-e PLATFORM_REPO_PATHS=$PLATFORM_REPO_PATHS " - DOCKER_EXEC+="-e XILINX_XRT=$XILINX_XRT " DOCKER_EXEC+="-e ALVEO_IP=$ALVEO_IP " DOCKER_EXEC+="-e ALVEO_USERNAME=$ALVEO_USERNAME " DOCKER_EXEC+="-e ALVEO_PASSWORD=$ALVEO_PASSWORD " diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 1c1861e5286e92abcf983056f8263daae14334e8..50cd9ed4ff663044433c5d8997f2fcd9337a0dd6 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -46,10 +46,8 @@ from finn.transformation.streamline import Streamline from finn.transformation.infer_data_layouts import InferDataLayouts from finn.transformation.move_reshape import RemoveCNVtoFCFlatten from finn.transformation.lower_convs_to_matmul import LowerConvsToMatMul -from finn.transformation.streamline.reorder import ( - MakeMaxPoolNHWC, - MoveScalarLinearPastInvariants, -) +from finn.transformation.streamline.reorder import MakeMaxPoolNHWC + from shutil import copy, copytree from finn.transformation.fpgadataflow.insert_dwc import InsertDWC from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO @@ -158,13 +156,14 @@ def step_streamline(model: ModelWrapper, cfg: DataflowBuildConfig): topologies. """ - model = model.transform(MoveScalarLinearPastInvariants()) + model = model.transform(absorb.AbsorbSignBiasIntoMultiThreshold()) model = model.transform(Streamline()) need_lowering = len(model.get_nodes_by_op_type("Conv")) > 0 if need_lowering: model = model.transform(LowerConvsToMatMul()) model = model.transform(MakeMaxPoolNHWC()) model = model.transform(absorb.AbsorbTransposeIntoMultiThreshold()) + model = model.transform(MakeMaxPoolNHWC()) model = model.transform(ConvertBipolarMatMulToXnorPopcount()) model = model.transform(Streamline()) # absorb final add-mul nodes into TopK @@ -181,7 +180,7 @@ def step_streamline(model: ModelWrapper, cfg: DataflowBuildConfig): def step_convert_to_hls(model: ModelWrapper, cfg: DataflowBuildConfig): """Convert eligible nodes to `HLSCustomOp` subclasses that represent HLS layers. Which nodes and particular configurations can be converted to HLS - is limited, see the source code of the `convert_to_hls` module for more. """ + is limited, see the source code of the `convert_to_hls` module for more.""" mem_mode = cfg.default_mem_mode.value if cfg.standalone_thresholds: diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py index 068950b89ae543f5a37c28d83d87ecfa605eaab4..b20b652254028c4ee5dc2edd1f1302ea3359019b 100644 --- a/src/finn/custom_op/fpgadataflow/__init__.py +++ b/src/finn/custom_op/fpgadataflow/__init__.py @@ -29,6 +29,9 @@ from finn.custom_op.fpgadataflow.convolutioninputgenerator import ( ConvolutionInputGenerator, ) +from finn.custom_op.fpgadataflow.convolutioninputgenerator1d import ( + ConvolutionInputGenerator1D, +) from finn.custom_op.fpgadataflow.downsampler import DownSampler from finn.custom_op.fpgadataflow.streamingfclayer_batch import StreamingFCLayer_Batch from finn.custom_op.fpgadataflow.streamingmaxpool_batch import StreamingMaxPool_Batch @@ -49,6 +52,9 @@ from finn.custom_op.fpgadataflow.vector_vector_activate_batch import ( ) from finn.custom_op.fpgadataflow.channelwise_op_batch import ChannelwiseOp_Batch from finn.custom_op.fpgadataflow.iodma import IODMA +from finn.custom_op.fpgadataflow.streamingdataflowpartition import ( + StreamingDataflowPartition, +) custom_op = dict() @@ -58,6 +64,7 @@ custom_op["DownSampler"] = DownSampler custom_op["StreamingMaxPool_Batch"] = StreamingMaxPool_Batch custom_op["StreamingFCLayer_Batch"] = StreamingFCLayer_Batch custom_op["ConvolutionInputGenerator"] = ConvolutionInputGenerator +custom_op["ConvolutionInputGenerator1D"] = ConvolutionInputGenerator1D custom_op["TLastMarker"] = TLastMarker custom_op["StreamingDataWidthConverter_Batch"] = StreamingDataWidthConverter_Batch custom_op["StreamingFIFO"] = StreamingFIFO @@ -71,3 +78,4 @@ custom_op["DuplicateStreams_Batch"] = DuplicateStreams_Batch custom_op["Vector_Vector_Activate_Batch"] = Vector_Vector_Activate_Batch custom_op["ChannelwiseOp_Batch"] = ChannelwiseOp_Batch custom_op["IODMA"] = IODMA +custom_op["StreamingDataflowPartition"] = StreamingDataflowPartition diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index 3f400053df8de6ec1e53e39fb5a3edee15f3ab30..6e77cd3da7328fd81dccc2ff171a9ae84723d165 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -61,12 +61,14 @@ class ConvolutionInputGenerator(HLSCustomOp): def get_nodeattr_types(self): my_attrs = { - "ConvKernelDim": ("i", True, 0), + "ConvKernelDim": ("ints", True, []), # [H, W] = [Y, X] "IFMChannels": ("i", True, 0), - "IFMDim": ("i", True, 0), - "OFMDim": ("i", True, 0), + "IFMDim": ("ints", True, []), # [H, W] = [Y, X] + "OFMDim": ("ints", True, []), # [H, W] = [Y, X] "SIMD": ("i", True, 0), - "Stride": ("i", True, 0), + "Stride": ("ints", True, [1, 1]), # [H, W] = [Y, X] + # note: only dilation=1 supported for now + "Dilation": ("ints", True, [1, 1]), # [H, W] = [Y, X] # FINN DataTypes for inputs, weights, outputs "inputDataType": ("s", True, ""), "outputDataType": ("s", True, ""), @@ -86,44 +88,59 @@ class ConvolutionInputGenerator(HLSCustomOp): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_normal_input_shape(self): + def get_nodeattr(self, name): + # overriding get_nodeattr to check for square kernel/img.. requirement + # since this can't be done with the attribute restriction in nodeattr_types + # TODO non-square can be enabled in theory but needs testing + ret = super().get_nodeattr(name) + props_to_check = ["ConvKernelDim", "IFMDim", "OFMDim", "Stride", "Dilation"] + if name in props_to_check: + is_square = ret[0] == ret[1] + assert is_square, "Only square %s supported" % name + if name == "Dilation": + assert ret[0] == ret[1] == 1, "Only dilation=1 supported" + return ret - ifm_dim = self.get_nodeattr("IFMDim") + def get_normal_input_shape(self): + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") - - ishape = (1, ifm_dim, ifm_dim, ifm_ch) + ishape = (1, ifm_dim_h, ifm_dim_w, ifm_ch) return ishape def get_folded_input_shape(self): - ifm_dim = self.get_nodeattr("IFMDim") + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") simd = self.get_nodeattr("SIMD") assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" wf = int(ifm_ch / simd) - folded_ishape = (1, ifm_dim, ifm_dim, wf, simd) + folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) return folded_ishape def get_normal_output_shape(self): - k = self.get_nodeattr("ConvKernelDim") - ifm_dim = self.get_nodeattr("IFMDim") + k_h, k_w = self.get_nodeattr("ConvKernelDim") + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") - stride = self.get_nodeattr("Stride") + stride_h, stride_w = self.get_nodeattr("Stride") + dilation_h, dilation_w = self.get_nodeattr("Dilation") pad = 0 - ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad) - oshape = (1, ofm_dim, ofm_dim, k * k * ifm_ch) + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) + oshape = (1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch) return oshape def get_folded_output_shape(self): - k = self.get_nodeattr("ConvKernelDim") - ifm_dim = self.get_nodeattr("IFMDim") + k_h, k_w = self.get_nodeattr("ConvKernelDim") + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") - stride = self.get_nodeattr("Stride") + stride_h, stride_w = self.get_nodeattr("Stride") + dilation_h, dilation_w = self.get_nodeattr("Dilation") simd = self.get_nodeattr("SIMD") pad = 0 - ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, pad) + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" - wf = int((k * k * ifm_ch) // simd) - folded_oshape = (1, ofm_dim, ofm_dim, wf, simd) + wf = int((k_h * k_w * ifm_ch) // simd) + folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, simd) return folded_oshape def make_shape_compatible_op(self, model): @@ -186,26 +203,31 @@ class ConvolutionInputGenerator(HLSCustomOp): def get_exp_cycles(self): simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") - k = self.get_nodeattr("ConvKernelDim") - ifm_dim = self.get_nodeattr("IFMDim") - ofm_dim = self.get_nodeattr("OFMDim") - stride = self.get_nodeattr("Stride") + k_h, k_w = self.get_nodeattr("ConvKernelDim") + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ofm_dim_h, ofm_dim_w = self.get_nodeattr("OFMDim") + stride_h, stride_w = self.get_nodeattr("Stride") + dilation_h, dilation_w = self.get_nodeattr("Dilation") + # since mmv != 1 is not supported yet, we set mmv for now to 1 mmv = 1 # see https://github.com/Xilinx/finn-hlslib/blob/master/slidingwindow.h - cycles_write_block = (ofm_dim * k * k * (ifm_ch / simd)) / mmv - cycles_read_block = stride * ifm_dim * (ifm_ch / simd) + cycles_write_block = (ofm_dim_w * k_w * k_h * (ifm_ch / simd)) / mmv + cycles_read_block = stride_w * ifm_dim_w * (ifm_ch / simd) max_cycles = max(cycles_write_block, cycles_read_block) - exp_cycles = ifm_dim * k * (ifm_ch / simd) + ofm_dim * max_cycles + exp_cycles = ( + ifm_dim_w * k_h * dilation_h * (ifm_ch / simd) + ofm_dim_h * max_cycles + ) return int(exp_cycles) def bram_estimation(self): + # NOTE: only tested with a square convolution simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") - ifm_dim = self.get_nodeattr("IFMDim") - k = self.get_nodeattr("ConvKernelDim") - stride = self.get_nodeattr("Stride") + ifm_dim = self.get_nodeattr("IFMDim")[0] + k = self.get_nodeattr("ConvKernelDim")[0] + stride = self.get_nodeattr("Stride")[0] ram_style = self.get_nodeattr("ram_style") if ram_style == "block" or ram_style == "auto": ram_depth = ifm_dim * ifm_ch / simd @@ -232,11 +254,12 @@ class ConvolutionInputGenerator(HLSCustomOp): return 0 def lut_estimation(self): + # NOTE: only tested with a square convolution simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") - ifm_dim = self.get_nodeattr("IFMDim") - k = self.get_nodeattr("ConvKernelDim") - stride = self.get_nodeattr("Stride") + ifm_dim = self.get_nodeattr("IFMDim")[0] + k = self.get_nodeattr("ConvKernelDim")[0] + stride = self.get_nodeattr("Stride")[0] ram_style = self.get_nodeattr("ram_style") if ram_style == "distributed": ram_luts = int( @@ -252,11 +275,12 @@ class ConvolutionInputGenerator(HLSCustomOp): return 300 + ram_luts def uram_estimation(self): + # NOTE: only tested with a square convolution simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") - ifm_dim = self.get_nodeattr("IFMDim") - k = self.get_nodeattr("ConvKernelDim") - stride = self.get_nodeattr("Stride") + ifm_dim = self.get_nodeattr("IFMDim")[0] + k = self.get_nodeattr("ConvKernelDim")[0] + stride = self.get_nodeattr("Stride")[0] ram_style = self.get_nodeattr("ram_style") if ram_style == "ultra": return int( @@ -295,7 +319,7 @@ class ConvolutionInputGenerator(HLSCustomOp): assert ( inp.shape == exp_ishape ), """Input shape doesn't - match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" + match expected shape (1, ifm_dim_h, ifm_dim_w, ifm_ch).""" if self.get_input_datatype() == DataType.BIPOLAR: # store bipolar activations as binary inp = (inp + 1) / 2 @@ -354,26 +378,27 @@ class ConvolutionInputGenerator(HLSCustomOp): assert ( context[node.output[0]].shape == exp_oshape ), """Output - shape doesn't match expected shape (1, ofm_dim, ofm_dim, k*k*ifm_ch).""" + shape doesn't match expected shape (1, ofm_dim_h, ofm_dim_w, k_h*k_w*ifm_ch).""" def global_includes(self): self.code_gen_dict["$GLOBALS$"] = ['#include "slidingwindow.h"'] def defines(self, var): numReps = 1 + ifm_dim = self.get_nodeattr("IFMDim")[0] + ifm_ch = self.get_nodeattr("IFMChannels") + ofm_dim = self.get_nodeattr("OFMDim")[0] + k = self.get_nodeattr("ConvKernelDim")[0] + stride = self.get_nodeattr("Stride")[0] + simd = self.get_nodeattr("SIMD") + ifm_precision = self.get_input_datatype().bitwidth() + self.code_gen_dict["$DEFINES$"] = [ """#define ConvKernelDim1 {}\n #define IFMChannels1 {}\n #define Input_precision1 {}\n #define IFMDim1 {}\n #define OFMDim1 {}\n #define SIMD1 {}\n #define Stride1 {}\n #define numReps {}""".format( - self.get_nodeattr("ConvKernelDim"), - self.get_nodeattr("IFMChannels"), - self.get_input_datatype().bitwidth(), - self.get_nodeattr("IFMDim"), - self.get_nodeattr("OFMDim"), - self.get_nodeattr("SIMD"), - self.get_nodeattr("Stride"), - numReps, + k, ifm_ch, ifm_precision, ifm_dim, ofm_dim, simd, stride, numReps ) ] @@ -415,9 +440,11 @@ class ConvolutionInputGenerator(HLSCustomOp): } hls_ram_style = map_to_hls_ram_style[ram_style] hls_call = node.op_type - # check if non optimized ConvolutionInputGenerator is needed - k = self.get_nodeattr("ConvKernelDim") - stride = self.get_nodeattr("Stride") + + # check which ConvolutionInputGenerator is needed + k = self.get_nodeattr("ConvKernelDim")[0] + stride = self.get_nodeattr("Stride")[0] + if k % stride != 0: hls_call += "_kernel_stride" diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py new file mode 100644 index 0000000000000000000000000000000000000000..782655b31b7f4add4c886a46845506af875190bc --- /dev/null +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -0,0 +1,616 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +import math +import numpy as np + +from finn.core.datatype import DataType +from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp +from finn.custom_op.general.im2col import compute_conv_output_dim +from onnx import TensorProto, helper +from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy + +# This operation should only be used for 1D convolutions. Either the +# IFMDim_H or IFMDim_W should be '1', which represents the so-called +# dummy-dimension + +# ONNX i/o tensor shape assumptions for ConvolutionInputGenerator1D: +# input 0 is the input tensor, shape NHWC = (1, IFMDim_H, IFMDim_W, IFMChannels) +# output 0 is the output tensor, shape NHWC: +# = (1, OFMDim_H, OFMDim_W, (ConvKernelDim_H*ConvKernelDim_W)*IFMChannels) + +# note: the actual data layout produced by the hlslib kernels is different +# for depthwise and non-depthwise ops. +# * non-depthwise SWG: (1, OFMDim_H, OFMDim_W, K_H, K_W, IFMChannels/SIMD, SIMD) +# * depthwise SWG: (1, OFMDim_H, OFMDim_W, IFMChannels/SIMD, K_H, K_W, SIMD) +# see test_fpgadataflow_slidingwindow.py for an example of how to transform +# between the two layouts + + +class ConvolutionInputGenerator1D(HLSCustomOp): + """Class that corresponds to one of the 1D finn-hlslib ConvolutionInputGenerator + (sliding window) function variants. Depending on the combination of + attributes (e.g. depthwise or not, whether dilation is 0) a different + variant will be picked for the actual HLS implementation.""" + + def __init__(self, onnx_node): + super().__init__(onnx_node) + + def get_nodeattr_types(self): + my_attrs = { + "ConvKernelDim": ("ints", True, []), # [H, W] = [Y, X] + "IFMChannels": ("i", True, 0), + "IFMDim": ("ints", True, []), # [H, W] = [Y, X] + "OFMDim": ("ints", True, []), # [H, W] = [Y, X] + "SIMD": ("i", True, 0), + "Stride": ("ints", True, []), # [H, W] = [Y, X] + "Dilation": ("ints", True, []), # [H, W] = [Y, X] + # FINN DataTypes for inputs, weights, outputs + "inputDataType": ("s", True, ""), + "outputDataType": ("s", True, ""), + "depthwise": ("i", False, 0, {0, 1}), + # FPGA resource type for ConvolutionInputGenerator input buffer + # auto -- let Vivado HLS decide + # block -- use BRAM + # distributed -- use LUTRAM + # ultra -- use URAM + "ram_style": ( + "s", + False, + "distributed", + {"auto", "block", "distributed", "ultra"}, + ), + } + my_attrs.update(super().get_nodeattr_types()) + return my_attrs + + def get_normal_input_shape(self): + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ifm_ch = self.get_nodeattr("IFMChannels") + ishape = (1, ifm_dim_h, ifm_dim_w, ifm_ch) + return ishape + + def get_folded_input_shape(self): + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ifm_ch = self.get_nodeattr("IFMChannels") + simd = self.get_nodeattr("SIMD") + assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" + wf = int(ifm_ch / simd) + folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) + return folded_ishape + + def get_normal_output_shape(self): + k_h, k_w = self.get_nodeattr("ConvKernelDim") + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ifm_ch = self.get_nodeattr("IFMChannels") + stride_h, stride_w = self.get_nodeattr("Stride") + dilation_h, dilation_w = self.get_nodeattr("Dilation") + pad = 0 + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) + oshape = (1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch) + return oshape + + def get_folded_output_shape(self): + k_h, k_w = self.get_nodeattr("ConvKernelDim") + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ifm_ch = self.get_nodeattr("IFMChannels") + stride_h, stride_w = self.get_nodeattr("Stride") + dilation_h, dilation_w = self.get_nodeattr("Dilation") + simd = self.get_nodeattr("SIMD") + pad = 0 + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) + assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" + wf = int((k_h * k_w * ifm_ch) // simd) + folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, simd) + return folded_oshape + + def make_shape_compatible_op(self, model): + exp_ishape = self.get_normal_input_shape() + oshape = self.get_normal_output_shape() + ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) + assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen." + # implement tensor with correct shape + values = np.random.randn(*oshape).astype(np.float32) + return helper.make_node( + "Constant", + inputs=[], + outputs=[self.onnx_node.output[0]], + value=helper.make_tensor( + name="const_tensor", + data_type=TensorProto.FLOAT, + dims=values.shape, + vals=values.flatten().astype(float), + ), + ) + + def infer_node_datatype(self, model): + node = self.onnx_node + # data type stays the same + dtype = model.get_tensor_datatype(node.input[0]) + model.set_tensor_datatype(node.output[0], dtype) + + def verify_node(self): + pass + + def get_input_datatype(self): + """Returns FINN DataType of input.""" + return DataType[self.get_nodeattr("inputDataType")] + + def get_output_datatype(self): + """Returns FINN DataType of output.""" + return DataType[self.get_nodeattr("outputDataType")] + + def get_instream_width(self): + """Returns stream width, input and output stream width are equal for + the sliding window function""" + ibits = self.get_input_datatype().bitwidth() + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" + in_width = simd * ibits + return in_width + + def get_outstream_width(self): + """Returns stream width, input and output stream width are equal for + the sliding window function, so the function to determine the input + stream width can be reused.""" + return self.get_instream_width() + + def get_number_output_values(self): + folded_oshape = self.get_folded_output_shape() + num_output_elems = np.prod(folded_oshape[:-1]) + return num_output_elems + + def get_1d_conv_attrs_normalized(self): + # support both (1, D) and (D, 1) cases transparently: + # For the kernel, presenting the input data of size D as + # [H, W] = [Y, X] = [1, D] or [D, 1] + # effectively gives the same result. Because the + # ConvolutionInputGenerator_NonSquare_Dilated(_dws) kernel currently only + # supports dilation>1 along the X-axis and the + # ConvolutionInputGenerator_NonSquare only works for stride>1 along the + # X-axis, we are working with the following assumption: + # the dummy ('1') dimension is the Y-dimension, i.e. + # images and kernels (and their attributes) of dimension + # [H, W] = [Y, X] = [D, 1] or [1, D] are always mapped to [1, D] + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + ifm_dim = self.get_nodeattr("IFMDim") + ofm_dim = self.get_nodeattr("OFMDim") + stride = self.get_nodeattr("Stride") + dilation = self.get_nodeattr("Dilation") + + # see defines() for an explanation + if ifm_dim[1] == 1: + ifm_dim = ifm_dim[::-1] + ofm_dim = ofm_dim[::-1] + k = k[::-1] + stride = stride[::-1] + dilation = dilation[::-1] + + return (ifm_ch, ifm_dim, ofm_dim, k, stride, dilation) + + def get_exp_cycles(self): + simd = self.get_nodeattr("SIMD") + ( + ifm_ch, + ifm_dim, + ofm_dim, + k, + stride, + dilation, + ) = self.get_1d_conv_attrs_normalized() + ifm_dim_h, ifm_dim_w = ifm_dim + ofm_dim_h, ofm_dim_w = ofm_dim + k_h, k_w = k + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + + # since mmv != 1 is not supported yet, we set mmv for now to 1 + mmv = 1 + # see https://github.com/Xilinx/finn-hlslib/blob/master/slidingwindow.h + cycles_write_block = (ofm_dim_w * k_w * k_h * (ifm_ch / simd)) / mmv + cycles_read_block = stride_w * ifm_dim_w * (ifm_ch / simd) + max_cycles = max(cycles_write_block, cycles_read_block) + exp_cycles = ( + ifm_dim_w * k_h * dilation_h * (ifm_ch / simd) + ofm_dim_h * max_cycles + ) + + return int(exp_cycles) + + def bram_estimation(self): + # NOTE: not tested for correctness + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + ifm_dim = np.prod(self.get_nodeattr("IFMDim")) + k = np.prod(self.get_nodeattr("ConvKernelDim")) + stride = np.prod(self.get_nodeattr("Stride")) + ram_style = self.get_nodeattr("ram_style") + if ram_style == "block" or ram_style == "auto": + ram_depth = ifm_dim * ifm_ch / simd + if ram_depth <= 512: + ram_width = 36 + elif ram_depth <= 1024: + ram_width = 18 + elif ram_depth <= 2048: + ram_width = 9 + elif ram_depth <= 4096: + ram_width = 4 + elif ram_depth <= 8192: + ram_width = 2 + else: + ram_width = 1 + return int( + (k + stride) + * ( + math.ceil(simd * self.get_input_datatype().bitwidth() / ram_width) + * math.ceil(ifm_dim * ifm_ch / simd / ram_depth) + ) + ) + else: + return 0 + + def lut_estimation(self): + # NOTE: not tested for correctness + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + ifm_dim = np.prod(self.get_nodeattr("IFMDim")) + k = np.prod(self.get_nodeattr("ConvKernelDim")) + stride = np.prod(self.get_nodeattr("Stride")) + ram_style = self.get_nodeattr("ram_style") + if ram_style == "distributed": + ram_luts = int( + (k + stride) + * ( + simd + * self.get_input_datatype().bitwidth() + * math.ceil(ifm_dim * ifm_ch / simd / 64) + ) + ) + else: + ram_luts = 0 + return 300 + ram_luts + + def uram_estimation(self): + # NOTE: not tested for correctness + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + ifm_dim = np.prod(self.get_nodeattr("IFMDim")) + k = np.prod(self.get_nodeattr("ConvKernelDim")) + stride = np.prod(self.get_nodeattr("Stride")) + ram_style = self.get_nodeattr("ram_style") + if ram_style == "ultra": + return int( + (k + stride) + * ( + math.ceil(simd * self.get_input_datatype().bitwidth() / 64) + * math.ceil(ifm_dim * ifm_ch / simd / 4096) + ) + ) + else: + return 0 + + def execute_node(self, context, graph): + mode = self.get_nodeattr("exec_mode") + node = self.onnx_node + exp_ishape = self.get_normal_input_shape() + exp_oshape = self.get_normal_output_shape() + folded_ishape = self.get_folded_input_shape() + folded_oshape = self.get_folded_output_shape() + + # TODO ensure codegen dir exists + if mode == "cppsim": + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + elif mode == "rtlsim": + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + else: + raise Exception( + """Invalid value for attribute exec_mode! Is currently set to: {} + has to be set to one of the following value ("cppsim", "rtlsim")""".format( + mode + ) + ) + + inp = context[node.input[0]] + assert str(inp.dtype) == "float32", "Input datatype is not float32" + assert ( + inp.shape == exp_ishape + ), """Input shape doesn't + match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" + if self.get_input_datatype() == DataType.BIPOLAR: + # store bipolar activations as binary + inp = (inp + 1) / 2 + export_idt = DataType.BINARY + else: + export_idt = self.get_input_datatype() + # reshape input into folded form + inp = inp.reshape(folded_ishape) + # make copy before saving array + reshaped_input = inp.copy() + np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input) + + if mode == "cppsim": + # execute the precompiled model + super().exec_precompiled_singlenode_model() + # load output npy file + super().npy_to_dynamic_output(context) + assert ( + context[node.output[0]].shape == folded_oshape + ), "cppsim \ + did not produce expected ofolded utput shape" + context[node.output[0]] = context[node.output[0]].reshape(*exp_oshape) + elif mode == "rtlsim": + sim = self.get_rtlsim() + nbits = self.get_instream_width() + rtlsim_inp = npy_to_rtlsim_input( + "{}/input_0.npy".format(code_gen_dir), export_idt, nbits + ) + super().reset_rtlsim(sim) + super().toggle_clk(sim) + rtlsim_output = self.rtlsim(sim, rtlsim_inp) + odt = export_idt + target_bits = odt.bitwidth() + packed_bits = self.get_outstream_width() + out_npy_path = "{}/output.npy".format(code_gen_dir) + out_shape = self.get_folded_output_shape() + rtlsim_output_to_npy( + rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits + ) + # load and reshape output + output = np.load(out_npy_path) + output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape) + context[node.output[0]] = output + else: + raise Exception( + """Invalid value for attribute exec_mode! Is currently set to: {} + has to be set to one of the following value ("cppsim", "rtlsim")""".format( + mode + ) + ) + # binary -> bipolar if needed + if self.get_output_datatype() == DataType.BIPOLAR: + out = context[node.output[0]] + out = 2 * out - 1 + context[node.output[0]] = out + assert ( + context[node.output[0]].shape == exp_oshape + ), """Output + shape doesn't match expected shape (1, ofm_dim_h, ofm_dim_w, k_h*k_w*ifm_ch).""" + + def global_includes(self): + self.code_gen_dict["$GLOBALS$"] = ['#include "slidingwindow.h"'] + + def defines(self, var): + numReps = 1 + ( + ifm_ch, + ifm_dim, + ofm_dim, + k, + stride, + dilation, + ) = self.get_1d_conv_attrs_normalized() + simd = self.get_nodeattr("SIMD") + ifm_precision = self.get_input_datatype().bitwidth() + ifm_dim_y, ifm_dim_x = ifm_dim + ofm_dim_y, ofm_dim_x = ofm_dim + k_y, k_x = k + dilation_y, dilation_x = dilation + # For a 1d convolution with stride=[S,1] or [1,S], the finn-hlslib function + # of ConvInpGen must be created with [stride_y, stride_x] = [S, S]. + # TODO: changes in finn-hlslib (slidingwindow.h) + stride_y = np.prod(stride) + stride_x = np.prod(stride) + + if dilation_x > 1: + assert ( + dilation_y == 1 + ), "Dilation value greater than 1 along y-axis is not yet supported" + self.code_gen_dict["$DEFINES$"] = [ + """ + #define ConvKernelDim1_x {}\n + #define ConvKernelDim1_y {}\n + #define IFMChannels1 {}\n + #define Input_precision1 {}\n + #define IFMDim1_x {}\n + #define IFMDim1_y {}\n + #define OFMDim1_x {}\n + #define OFMDim1_y {}\n + #define SIMD1 {}\n + #define Stride1_x {}\n + #define Stride1_y {}\n + #define Dilation1_x {}\n + #define Dilation1_y {}\n + #define numReps {} + """.format( + k_x, + k_y, + ifm_ch, + ifm_precision, + ifm_dim_x, + ifm_dim_y, + ofm_dim_x, + ofm_dim_y, + simd, + stride_x, + stride_y, + dilation_x, + dilation_y, + numReps, + ) + ] + else: + ofm_dim = self.get_nodeattr("OFMDim") + self.code_gen_dict["$DEFINES$"] = [ + """ + #define ConvKernelDim1_x {}\n + #define ConvKernelDim1_y {}\n + #define IFMChannels1 {}\n + #define Input_precision1 {}\n + #define IFMDim1_x {}\n + #define IFMDim1_y {}\n + #define OFMDim1_x {}\n + #define OFMDim1_y {}\n + #define SIMD1 {}\n + #define Stride1_x {}\n + #define Stride1_y {}\n + #define numReps {} + """.format( + k_x, + k_y, + ifm_ch, + ifm_precision, + ifm_dim_x, + ifm_dim_y, + ofm_dim_x, + ofm_dim_y, + simd, + stride_x, + stride_y, + numReps, + ) + ] + + def read_npy_data(self): + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + dtype = self.get_input_datatype() + if dtype == DataType.BIPOLAR: + # use binary for bipolar storage + dtype = DataType.BINARY + elem_bits = dtype.bitwidth() + packed_bits = self.get_instream_width() + packed_hls_type = "ap_uint<%d>" % packed_bits + elem_hls_type = dtype.get_hls_datatype_str() + npy_type = "float" + npy_in = "%s/input_0.npy" % code_gen_dir + self.code_gen_dict["$READNPYDATA$"] = [] + self.code_gen_dict["$READNPYDATA$"].append( + 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' + % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + ) + + def strm_decl(self): + self.code_gen_dict["$STREAMDECLARATIONS$"] = [] + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream<ap_uint<{}>> in0 ("in0");'.format(self.get_instream_width()) + ) + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream<ap_uint<{}>> out ("out");'.format(self.get_outstream_width()) + ) + + def docompute(self): + ram_style = self.get_nodeattr("ram_style") + map_to_hls_ram_style = { + "auto": "ap_resource_dflt()", + "block": "ap_resource_bram()", + "distributed": "ap_resource_lutram()", + "ultra": "ap_resource_uram()", + } + hls_ram_style = map_to_hls_ram_style[ram_style] + hls_call = "ConvolutionInputGenerator" + # check which ConvolutionInputGenerator is needed + dilation_h, dilation_w = self.get_nodeattr("Dilation") + + hls_call += "_NonSquare" + if dilation_h > 1 or dilation_w > 1: + hls_call += "_Dilated" + if self.get_nodeattr("depthwise") == 1: + hls_call += "_dws" + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{}<ConvKernelDim1_x, ConvKernelDim1_y, IFMChannels1, Input_precision1, + IFMDim1_x, IFMDim1_y, OFMDim1_x, OFMDim1_y, SIMD1, Stride1_x, Stride1_y, + Dilation1_x, Dilation1_y> (in0, out, numReps, {});""".format( + hls_call, hls_ram_style + ) + ] + elif self.get_nodeattr("depthwise") == 1: + hls_call += "_dws" + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{}<ConvKernelDim1_x, ConvKernelDim1_y, IFMChannels1, Input_precision1, + IFMDim1_x, IFMDim1_y, OFMDim1_x, OFMDim1_y, SIMD1, Stride1_x, Stride1_y> + (in0, out, numReps, {});""".format( + hls_call, hls_ram_style + ) + ] + else: + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{}<ConvKernelDim1_x, ConvKernelDim1_y, IFMChannels1, Input_precision1, + IFMDim1_x, IFMDim1_y, OFMDim1_x, OFMDim1_y, SIMD1, Stride1_x, Stride1_y> + (in0, out, numReps, {});""".format( + hls_call, hls_ram_style + ) + ] + + def dataoutstrm(self): + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + dtype = self.get_output_datatype() + if dtype == DataType.BIPOLAR: + # use binary for bipolar storage + dtype = DataType.BINARY + elem_bits = dtype.bitwidth() + packed_bits = self.get_outstream_width() + packed_hls_type = "ap_uint<%d>" % packed_bits + elem_hls_type = dtype.get_hls_datatype_str() + npy_type = "float" + npy_out = "%s/output.npy" % code_gen_dir + oshape = self.get_folded_output_shape() + oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") + + self.code_gen_dict["$DATAOUTSTREAM$"] = [ + 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + oshape_cpp_str, + npy_out, + ) + ] + + def save_as_npy(self): + self.code_gen_dict["$SAVEASCNPY$"] = [] + + def blackboxfunction(self): + self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ + """void {}(hls::stream<ap_uint<SIMD1*Input_precision1>> &in0, + hls::stream<ap_uint<SIMD1*Input_precision1>> &out)""".format( + self.onnx_node.name + ) + ] + + def pragmas(self): + self.code_gen_dict["$PRAGMAS$"] = ["#pragma HLS INTERFACE axis port=in0"] + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE axis port=out") + self.code_gen_dict["$PRAGMAS$"].append( + "#pragma HLS INTERFACE ap_ctrl_none port=return" + ) diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index 27dfab54ec6d483d948dd383e54a44117d7c1a65..99f959bf59f06d6ab5b71dd7245d657f4964cca4 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -17,9 +17,17 @@ class FMPadding_Batch(HLSCustomOp): def get_nodeattr_types(self): my_attrs = { # spatial size of input images - "ImgDim": ("i", True, 0), + "ImgDim": ("ints", True, []), # [H, W] = [Y, X] # total padding (per dimension) to apply - "Padding": ("i", True, 2), + # NOTE: Current padding scheme that is applied tries to pad the same + # amount of zeros in front and behind the image for each dimension. + # As an example, a padding scheme such as [1, x, 3, x] is equal + # to [2, x, 2, x] + "Padding": ( + "ints", + True, + [1, 1, 1, 1], + ), # [H_begin, W_begin, H_end, W_end] = [Y_begin, X_begin, Y_end, X_end] # number of channels in input image "NumChannels": ("i", True, 0), # SIMD Input parallelism @@ -38,31 +46,33 @@ class FMPadding_Batch(HLSCustomOp): def get_padded_odim(self): "Return the padded spatial size of the output." - - idim = self.get_nodeattr("ImgDim") + idim_h, idim_w = self.get_nodeattr("ImgDim") pad = self.get_nodeattr("Padding") - return idim + pad + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + odim_h = idim_h + pad_h + odim_w = idim_w + pad_w + return [odim_h, odim_w] def get_exp_cycles(self): - odim = self.get_padded_odim() + odim_h, odim_w = self.get_padded_odim() channels = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") batch_size = self.get_nodeattr("numInputVectors") - exp_cycles = (channels / simd) * batch_size * odim * odim + exp_cycles = (channels / simd) * batch_size * odim_h * odim_w return int(exp_cycles) def get_normal_input_shape(self): - idim = self.get_nodeattr("ImgDim") + idim_h, idim_w = self.get_nodeattr("ImgDim") num_ch = self.get_nodeattr("NumChannels") - - ishape = (1, idim, idim, num_ch) + ishape = (1, idim_h, idim_w, num_ch) return ishape def get_normal_output_shape(self): - odim = self.get_padded_odim() + odim_h, odim_w = self.get_padded_odim() num_ch = self.get_nodeattr("NumChannels") - oshape = (1, odim, odim, num_ch) + oshape = (1, odim_h, odim_w, num_ch) return oshape def get_folded_input_shape(self): @@ -148,20 +158,53 @@ class FMPadding_Batch(HLSCustomOp): self.code_gen_dict["$GLOBALS$"] = ['#include "streamtools.h"'] def defines(self, var): - self.code_gen_dict["$DEFINES$"] = [ - """#define ImgDim1 {}\n#define OutputDim1 {}\n - #define Padding1 {}\n#define NumChannels1 {}\n - #define PaddingStyle1 {}\n#define numReps {} - #define SIMD1 {}\n""".format( - self.get_nodeattr("ImgDim"), - self.get_padded_odim(), - self.get_nodeattr("Padding"), - self.get_nodeattr("NumChannels"), - self.get_nodeattr("PaddingStyle"), - self.get_nodeattr("numInputVectors"), - self.get_nodeattr("SIMD"), - ) - ] + idim_h, idim_w = self.get_nodeattr("ImgDim") + odim_h, odim_w = self.get_padded_odim() + pad = self.get_nodeattr("Padding") + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + is_square = idim_h == idim_w + + if is_square: + assert ( + pad_h == pad_w + ), "Only equal padding along the dimensions for square images is supported" + self.code_gen_dict["$DEFINES$"] = [ + """#define ImgDim1 {}\n#define OutputDim1 {}\n + #define Padding1 {}\n#define NumChannels1 {}\n + #define SIMD1 {}\n#define PaddingStyle1 {}\n + #define numReps {}\n""".format( + idim_h, + odim_h, + pad_h, + self.get_nodeattr("NumChannels"), + self.get_nodeattr("SIMD"), + self.get_nodeattr("PaddingStyle"), + self.get_nodeattr("numInputVectors"), + ) + ] + else: + self.code_gen_dict["$DEFINES$"] = [ + """ + #define OutputDim1_x {}\n + #define OutputDim1_y {}\n + #define Padding1_x {}\n + #define Padding1_y {}\n + #define NumChannels1 {}\n + #define SIMD1 {}\n + #define PaddingStyle1 {}\n + #define numReps {}\n + """.format( + odim_w, + odim_h, + pad_w, + pad_h, + self.get_nodeattr("NumChannels"), + self.get_nodeattr("SIMD"), + self.get_nodeattr("PaddingStyle"), + self.get_nodeattr("numInputVectors"), + ) + ] def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -193,12 +236,26 @@ class FMPadding_Batch(HLSCustomOp): def docompute(self): in_t = self.get_input_datatype().get_hls_datatype_str() node = self.onnx_node - self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<ImgDim1, OutputDim1, Padding1, NumChannels1,SIMD1, - {}, PaddingStyle1> (in0, out, numReps);""".format( - node.op_type, in_t - ) - ] + + idim_h, idim_w = self.get_nodeattr("ImgDim") + is_square = idim_h == idim_w + + if is_square: + hls_call = node.op_type + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{}<ImgDim1, OutputDim1, Padding1, NumChannels1,SIMD1, + {}, PaddingStyle1> (in0, out, numReps);""".format( + hls_call, in_t + ) + ] + else: + hls_call = "FMPadding_nonsquare_Batch" + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{}<OutputDim1_x, OutputDim1_y, Padding1_x, Padding1_y, NumChannels1, + SIMD1, {}, PaddingStyle1> (in0, out, numReps);""".format( + hls_call, in_t + ) + ] def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -270,7 +327,7 @@ class FMPadding_Batch(HLSCustomOp): assert ( inp.shape == exp_ishape ), """Input shape doesn't - match expected shape (1, ImgDim, ImgDim, NumChannels).""" + match expected shape (1, ImgDim_h, ImgDim_w, NumChannels).""" export_idt = self.get_input_datatype() reshaped_input = inp.reshape(folded_ishape) @@ -316,4 +373,4 @@ class FMPadding_Batch(HLSCustomOp): assert ( context[node.output[0]].shape == exp_oshape ), """Output shape doesn't match expected shape - (1, OutputDim, OutputDim, NumChannels).""" + (1, OutputDim_H, OutputDim_W, NumChannels).""" diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index 2ab070b2fdc059a554930345a81abc368c29bfa7..c07188430244b635ab6b1ec192337da74550d57d 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -38,11 +38,11 @@ from finn.util.basic import ( roundup_to_integer_multiple, get_rtlsim_trace_depth, ) -from finn.util.fpgadataflow import ( - IPGenBuilder, +from finn.util.pyverilator import ( pyverilate_get_liveness_threshold_cycles, rtlsim_multi_io, ) +from finn.util.hls import CallHLS from . import templates try: @@ -310,11 +310,11 @@ class HLSCustomOp(CustomOp): return [] def ipgen_singlenode_code(self): - """Builds the bash script for ip generation using the IPGenBuilder from - finn.util.fpgadataflow.""" + """Builds the bash script for ip generation using the CallHLS from + finn.util.hls.""" node = self.onnx_node code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - builder = IPGenBuilder() + builder = CallHLS() builder.append_tcl(code_gen_dir + "/hls_syn_{}.tcl".format(node.name)) builder.set_ipgen_path(code_gen_dir + "/project_{}".format(node.name)) builder.build(code_gen_dir) diff --git a/src/finn/custom_op/fpgadataflow/streamingdataflowpartition.py b/src/finn/custom_op/fpgadataflow/streamingdataflowpartition.py new file mode 100644 index 0000000000000000000000000000000000000000..53446ff1f2aba30e69bf188c1673c738440567fb --- /dev/null +++ b/src/finn/custom_op/fpgadataflow/streamingdataflowpartition.py @@ -0,0 +1,94 @@ +# Copyright (c) 2020 Xilinx, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of Xilinx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from finn.custom_op.base import CustomOp + +# TODO move StreamingDataflowPartition to HLSCustomOp base class + + +class StreamingDataflowPartition(CustomOp): + """Class that corresponds to the meta/container node StreamingDataflowPartition + which is a placeholder for a group of fpgadataflow nodes that have been separated + out into a FINN-ONNX model of its own. Note that is does not produce any HLS or + bitfile by itself.""" + + def get_nodeattr_types(self): + return { + "model": ("s", True, ""), + "res_estimate": ("s", False, ""), + "res_hls": ("s", False, ""), + "res_synth": ("s", False, ""), + "slr": ("i", False, -1), + "partition_id": ("i", False, 0), + "device_id": ("i", False, 0), + "mem_port": ("s", False, ""), + } + + def make_shape_compatible_op(self, model): + pass + + def infer_node_datatype(self, model): + pass + + def execute_node(self, context, graph): + # TODO add RPC execution with synthesized bitfile? + # whole-design rtlsim with PyVerilator may also be an alternative + pass + + def verify_node(self): + info_messages = [] + + # verify number of attributes + num_of_attr = 1 + if len(self.onnx_node.attribute) == num_of_attr: + info_messages.append("The number of attributes is correct") + else: + info_messages.append( + """The number of attributes is incorrect, + {} should have {} attributes""".format( + self.onnx_node.op_type, num_of_attr + ) + ) + # verify that all necessary attributes exist + try: + self.get_nodeattr("model") + info_messages.append("All necessary attributes exist") + except Exception: + info_messages.append( + """The necessary attributes do not exist. + StreamingDataflowPartition needs the following attribute(s): + model""" + ) + + # verify the number of inputs + if len(self.onnx_node.input) >= 1: + info_messages.append("The number of inputs is correct") + else: + info_messages.append("StreamingDataflowPartition needs 1 data input") + + return info_messages diff --git a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py index 9a897d9fa16064017dfc02f500d2360ae8431b4a..fead30650c60d38f9cd70de8f1515f847e15276f 100644 --- a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py +++ b/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py @@ -26,9 +26,9 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): def get_nodeattr_types(self): my_attrs = { "PE": ("i", True, 0), - "Dim": ("i", True, 0), + "Dim": ("ints", True, []), # [H, W] "Channels": ("i", True, 0), - "Kernel": ("i", True, 0), + "Kernel": ("ints", True, []), # [H, W] "resType": ("s", False, "auto", {"auto", "lut", "dsp"}), "ActVal": ("i", False, 0), # FINN DataTypes for inputs, weights, outputs @@ -45,10 +45,10 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): def minimize_accumulator_width(self, model): weights = model.get_initializer(self.onnx_node.input[1]) - k = self.get_nodeattr("Kernel") + k_h, k_w = self.get_nodeattr("Kernel") fm = self.get_nodeattr("Channels") # put weights into the shape expected by calculate_matvec_accumulator_range - weights = weights.reshape(fm, k * k).transpose() + weights = weights.reshape(fm, k_h * k_w).transpose() if len(self.onnx_node.input) > 2: thresholds = model.get_initializer(self.onnx_node.input[2]) else: @@ -85,9 +85,11 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): tdt = DataType.get_smallest_possible(0 - tdt_max) else: tdt = DataType.get_smallest_possible(tdt_max) - assert np.vectorize(tdt.allowed)(threshold_tensor).all(), ( - "Thresholds in %s can't be expressed with type %s" - % (self.onnx_node.name, str(tdt)) + assert np.vectorize(tdt.allowed)( + threshold_tensor + ).all(), "Thresholds in %s can't be expressed with type %s" % ( + self.onnx_node.name, + str(tdt), ) self.set_nodeattr("accDataType", tdt.name) else: @@ -110,9 +112,9 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): def calc_wmem(self): """Calculates and returns WMEM.""" ch = self.get_nodeattr("Channels") - k = self.get_nodeattr("Kernel") + k_h, k_w = self.get_nodeattr("Kernel") pe = self.get_nodeattr("PE") - wmem = k * k * ch // pe + wmem = k_h * k_w * ch // pe return wmem def calc_tmem(self): @@ -181,34 +183,34 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): return out_width def get_folded_input_shape(self): - k = self.get_nodeattr("Kernel") - sf = k * k - dim = self.get_nodeattr("Dim") + k_h, k_w = self.get_nodeattr("Kernel") + sf = k_h * k_w + dim_h, dim_w = self.get_nodeattr("Dim") ch = self.get_nodeattr("Channels") pe = self.get_nodeattr("PE") nf = ch // pe - folded_input_shape = tuple([1, dim, dim, sf * nf, pe]) + folded_input_shape = tuple([1, dim_h, dim_w, sf * nf, pe]) return folded_input_shape def get_folded_output_shape(self): ch = self.get_nodeattr("Channels") pe = self.get_nodeattr("PE") nf = ch // pe - dim = self.get_nodeattr("Dim") - folded_output_shape = tuple([1, dim, dim, nf, pe]) + dim_h, dim_w = self.get_nodeattr("Dim") + folded_output_shape = tuple([1, dim_h, dim_w, nf, pe]) return folded_output_shape def get_normal_input_shape(self): - dim = self.get_nodeattr("Dim") + dim_h, dim_w = self.get_nodeattr("Dim") ch = self.get_nodeattr("Channels") - k = self.get_nodeattr("Kernel") - normal_input_shape = tuple([1, dim, dim, k * k * ch]) + k_h, k_w = self.get_nodeattr("Kernel") + normal_input_shape = tuple([1, dim_h, dim_w, k_h * k_w * ch]) return normal_input_shape def get_normal_output_shape(self): ch = self.get_nodeattr("Channels") - dim = self.get_nodeattr("Dim") - normal_output_shape = tuple([1, dim, dim, ch]) + dim_h, dim_w = self.get_nodeattr("Dim") + normal_output_shape = tuple([1, dim_h, dim_w, ch]) return normal_output_shape def get_number_output_values(self): @@ -218,13 +220,13 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): def get_exp_cycles(self): pe = self.get_nodeattr("PE") ch = self.get_nodeattr("Channels") - dim = self.get_nodeattr("Dim") - k = self.get_nodeattr("Kernel") + dim_h, dim_w = self.get_nodeattr("Dim") + k_h, k_w = self.get_nodeattr("Kernel") # currently FINN supports for vvau a batch size of 1 batch_size = 1 # since mmv != 1 is not supported yet, we set mmv for now to 1 mmv = 1 - exp_cycles = ((ch * k * k) / pe) * batch_size * (dim * dim) / mmv + exp_cycles = ((ch * k_h * k_w) / pe) * batch_size * (dim_h * dim_w) / mmv return int(exp_cycles) def get_template_param_values(self): @@ -251,17 +253,17 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): def get_hls_compatible_weight_tensor(self, orig_weight_matrix): pe = self.get_nodeattr("PE") ch = self.get_nodeattr("Channels") - k = self.get_nodeattr("Kernel") + k_h, k_w = self.get_nodeattr("Kernel") wmem = self.calc_wmem() assert orig_weight_matrix.shape == ( ch, 1, - k, - k, + k_h, + k_w, ), """Weights matrix doesn't have expected shape (channels, 1, kernel_size, kernel_size)""" ret = orig_weight_matrix - ret = ret.reshape(ch, k * k) + ret = ret.reshape(ch, k_h * k_w) # distribute rows between PEs ret = interleave_matrix_outer_dim_from_partitions(ret, pe) ret = ret.reshape(1, pe, wmem, 1) @@ -338,9 +340,11 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) # get computed threshold datatype from attribute tdt = DataType[self.get_nodeattr("accDataType")] - assert np.vectorize(tdt.allowed)(threshold_tensor).all(), ( - "Thresholds in %s can't be expressed with type %s" - % (self.onnx_node.name, str(tdt)) + assert np.vectorize(tdt.allowed)( + threshold_tensor + ).all(), "Thresholds in %s can't be expressed with type %s" % ( + self.onnx_node.name, + str(tdt), ) thresholds_hls_code = numpy_to_hls_code( threshold_tensor, tdt, "thresholds", False, True @@ -455,10 +459,10 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): self.code_gen_dict["$GLOBALS$"] += ['#include "thresh.h"'] def defines(self, var): - dim = self.get_nodeattr("Dim") - numReps = 1 * dim * dim - kernel = self.get_nodeattr("Kernel") - innerProdDim = kernel * kernel + dim_h, dim_w = self.get_nodeattr("Dim") + numReps = 1 * dim_h * dim_w + k_h, k_w = self.get_nodeattr("Kernel") + innerProdDim = k_h * k_w self.code_gen_dict["$DEFINES$"] = [ """#define Channels1 {}\n #define InnerProdDim {}\n #define SIMD1 1\n #define PE1 {}\n #define numReps {}""".format( @@ -664,8 +668,8 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): else: mult_luts = (2 * math.ceil((W + A) / 6) - 1) * (W + A) # accumulator - k = self.get_nodeattr("Kernel") - acc_bits = W + A + math.ceil(math.log(k * k, 2)) + k_h, k_w = self.get_nodeattr("Kernel") + acc_bits = W + A + math.ceil(math.log(k_h * k_w, 2)) acc_luts = acc_bits # thresholds and threshold comparators thr_luts = 0 @@ -694,20 +698,20 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): return int(mult_dsp) def get_op_and_param_counts(self): - k = self.get_nodeattr("Kernel") + k_h, k_w = self.get_nodeattr("Kernel") fm = self.get_nodeattr("Channels") - dim = self.get_nodeattr("Dim") + dim_h, dim_w = self.get_nodeattr("Dim") weight_bits = self.get_weight_datatype().bitwidth() inp_bits = self.get_input_datatype().bitwidth() - num_repetitions = int(dim * dim) - mac_count = k * k * fm * num_repetitions + num_repetitions = int(dim_h * dim_w) + mac_count = k_h * k_w * fm * num_repetitions # cannonicalize op type: highest bitwidth operand first s.t. # e.g. mac_8bx4b and mac_4bx8b don't appear as two different op types bw1 = min(inp_bits, weight_bits) bw2 = max(inp_bits, weight_bits) mac_op_type = "op_mac_%dbx%db" % (bw1, bw2) weight_param_type = "param_weight_%db" % (weight_bits) - weight_count = k * k * fm + weight_count = k_h * k_w * fm ret_dict = {mac_op_type: mac_count, weight_param_type: weight_count} if self.get_nodeattr("noActivation") == 0: tdt = DataType[self.get_nodeattr("accDataType")] diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index a221b510ab8d22f4daca1c32e717a9b482246712..1f3d40e929e29d16790a491bbfd7a4a5033f866f 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -64,30 +64,28 @@ class InferConvInpGen(Transformation): warnings.warn("Input is not int. Can't infer ConvInpGen") continue i2c_inst = getCustomOp(n) - stride = i2c_inst.get_nodeattr("stride") - k_attr = i2c_inst.get_nodeattr("kernel_size") - k_h = k_attr[0] - k_w = k_attr[1] + stride_h, stride_w = i2c_inst.get_nodeattr("stride") + k_h, k_w = i2c_inst.get_nodeattr("kernel_size") pad_attr = i2c_inst.get_nodeattr("pad_amount") pad_h = pad_attr[0] + pad_attr[2] pad_w = pad_attr[1] + pad_attr[3] + dilation_h, dilation_w = i2c_inst.get_nodeattr("dilations") # temporary checks until non-square conv support is finalized - assert pad_h == pad_w, "Non-square images not yet supported." - assert k_h == k_w, "Non-square kernels not yet supported." - k = k_h - pad = pad_attr[0] pad_val = i2c_inst.get_nodeattr("pad_value") depthwise = i2c_inst.get_nodeattr("depthwise") ifm_ch = i2c_in_shape[-1] - ifm_dim = i2c_in_shape[1] - ofm_dim = i2c_out_shape[1] + ifm_dim_h = i2c_in_shape[1] + ifm_dim_w = i2c_in_shape[2] + ofm_dim_h = i2c_out_shape[1] + ofm_dim_w = i2c_out_shape[2] # default params for ConvolutionInputGenerator ConvInpGen_node_idx = node_ind ConvInpGen_input = i2c_input - ConvInpGen_idim = ifm_dim + ConvInpGen_idim_h = ifm_dim_h + ConvInpGen_idim_w = ifm_dim_w - if pad > 0: + if pad_h > 0 or pad_w > 0: # if padding enabled, ensure pad_val supported by DataType # assert dt.allowed(pad_val),"""FMPadding_Batch DataType # must support pad_val""" @@ -95,12 +93,13 @@ class InferConvInpGen(Transformation): pad_val == 0 ), "FMPadding_Batch doesn't currently support pad_val!= 0" - odim_padding = ifm_dim + 2 * pad + odim_padding_h = ifm_dim_h + pad_h + odim_padding_w = ifm_dim_w + pad_w padding_out = helper.make_tensor_value_info( model.make_new_valueinfo_name(), TensorProto.FLOAT, - (1, odim_padding, odim_padding, ifm_ch), + (1, odim_padding_h, odim_padding_w, ifm_ch), ) graph.value_info.append(padding_out) padding_out = padding_out.name @@ -108,7 +107,8 @@ class InferConvInpGen(Transformation): ConvInpGen_node_idx += 1 ConvInpGen_input = padding_out - ConvInpGen_idim = odim_padding + ConvInpGen_idim_h = odim_padding_h + ConvInpGen_idim_w = odim_padding_w padding_node = helper.make_node( "FMPadding_Batch", @@ -116,15 +116,31 @@ class InferConvInpGen(Transformation): [padding_out], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", - ImgDim=ifm_dim, - Padding=2 * pad, + ImgDim=[ifm_dim_h, ifm_dim_w], + Padding=pad_attr, NumChannels=ifm_ch, inputDataType=dt.name, SIMD=ifm_ch, ) graph.node.insert(node_ind, padding_node) - if stride > 1 and k == 1: + # Ensure that only supported HLS nodes are inserted + is_square_image = ConvInpGen_idim_h == ConvInpGen_idim_w + is_square_kernel = k_h == k_w + is_kernel_pointwise = k_h == 1 and k_w == 1 + is_equal_stride = stride_h == stride_w + is_1d_convolution = (k_h == 1 and k_w > 1 and ifm_dim_h == 1) or ( + k_h > 1 and k_w == 1 and ifm_dim_w == 1 + ) + + if (stride_h > 1 or stride_w > 1) and is_kernel_pointwise: + assert ( + is_square_image + ), "DownSampler currently only supports square input images." + assert is_equal_stride, """DownSampler currently only supports equal stride value + along different axes.""" + ConvInpGen_idim = ConvInpGen_idim_h + stride = stride_h # create DownSampler node ConvInpGen_node = helper.make_node( "DownSampler", @@ -141,22 +157,58 @@ class InferConvInpGen(Transformation): graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) else: # create equivalent ConvolutionInputGenerator node - ConvInpGen_node = helper.make_node( - "ConvolutionInputGenerator", - [ConvInpGen_input], - [i2c_output], - domain="finn.custom_op.fpgadataflow", - backend="fpgadataflow", - ConvKernelDim=k, - IFMChannels=ifm_ch, - IFMDim=ConvInpGen_idim, - OFMDim=ofm_dim, - SIMD=ifm_ch, - Stride=stride, - inputDataType=dt.name, - outputDataType=dt.name, - depthwise=depthwise, - ) + if ( + is_square_image and is_square_kernel + ): # square images and square kernels + assert is_equal_stride, """Non-equal strides along different axes is not supported + for (non-)square convolutions""" + assert ( + dilation_h == 1 and dilation_w == 1 + ), """Dilation value != 1 is not supported + for square convolutions""" + ConvInpGen_node = helper.make_node( + "ConvolutionInputGenerator", + [ConvInpGen_input], + [i2c_output], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + ConvKernelDim=[k_h, k_w], + IFMChannels=ifm_ch, + IFMDim=[ConvInpGen_idim_h, ConvInpGen_idim_w], + OFMDim=[ofm_dim_h, ofm_dim_w], + SIMD=ifm_ch, + Stride=[stride_h, stride_w], + Dilation=[dilation_h, dilation_w], + inputDataType=dt.name, + outputDataType=dt.name, + depthwise=depthwise, + ) + else: # non-square images and/or kernels + assert ( + is_1d_convolution + ), "ConvultionInputGenerator1D works only for 1D convolutions" + if dilation_h > 1 or dilation_w > 1: + assert ( + stride_h == 1 and stride_w == 1 + ), """Stride value of greater than 1 is not supported for convolutions + with dilation value greater than 1""" + ConvInpGen_node = helper.make_node( + "ConvolutionInputGenerator1D", + [ConvInpGen_input], + [i2c_output], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + ConvKernelDim=[k_h, k_w], + IFMChannels=ifm_ch, + IFMDim=[ConvInpGen_idim_h, ConvInpGen_idim_w], + OFMDim=[ofm_dim_h, ofm_dim_w], + SIMD=ifm_ch, + Stride=[stride_h, stride_w], + Dilation=[dilation_h, dilation_w], + inputDataType=dt.name, + outputDataType=dt.name, + depthwise=depthwise, + ) graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) # remove old nodes graph.node.remove(n) @@ -338,7 +390,7 @@ class InferPool_Batch(Transformation): [im2col_in], [im2col_out], domain="finn.custom_op.general", - stride=stride, + stride=[stride, stride], kernel_size=[k, k], pad_amount=[pad, pad, pad, pad], pad_value=pad_value, @@ -684,7 +736,7 @@ class InferVVAU(Transformation): ): sparsity = model.get_tensor_sparsity(n.input[1]) try: - k = sparsity["dw"]["kernel_shape"] + k_h, k_w = sparsity["dw"]["kernel_shape"] except KeyError: raise Exception( """Sparsity doesn't indicate that MatMul @@ -702,25 +754,25 @@ class InferVVAU(Transformation): mm_output = n.output[0] W = model.get_initializer(mm_weight) # infer dense weight tensor from sparse weight matrix - # kernel size k which was extracted above and the value of + # kernel size (k_h, k_w) which was extracted above and the value of # the channels is used. - # the weight matrix has a shape of (k * k * Channels, Channels) + # the weight matrix has a shape of (k_h * k_w * Channels, Channels) # we need to reverse the creation of the sparse weight matrix - # to achieve a weight tensor of shape (Channels, 1, k, k) + # to achieve a weight tensor of shape (Channels, 1, k_h, k_w) channels = int(W.shape[1]) - # transpose to achieve a shape of (k * k * Channels, Channels) + # transpose to achieve a shape of (k_h * k_w * Channels, Channels) W = W.T - # reshape to (Channels, k, k, Channels) to transpose afterwards - # to (Channels, Channels, k, k) - W = W.reshape(channels, k, k, channels) + # reshape to (Channels, k_h, k_w, Channels) to transpose afterwards + # to (Channels, Channels, k_h, k_w) + W = W.reshape(channels, k_h, k_w, channels) W = W.transpose(0, 3, 1, 2) # now we can extract the values using a for loop over the channels # and fill a zero numpy array in the correct shape - w_tensor = np.zeros((channels, 1, k, k)) + w_tensor = np.zeros((channels, 1, k_h, k_w)) for ch in range(channels): w_tensor[ch][0] = W[ch][ch] model.set_initializer(mm_weight, w_tensor) - model.set_tensor_shape(mm_weight, (channels, 1, k, k)) + model.set_tensor_shape(mm_weight, (channels, 1, k_h, k_w)) # create node with pe=channels as default pe = channels assert ( @@ -762,9 +814,9 @@ class InferVVAU(Transformation): backend="fpgadataflow", resType="lut", PE=pe, - Dim=mm_in_shape[1], + Dim=[mm_in_shape[1], mm_in_shape[2]], Channels=channels, - Kernel=k, + Kernel=[k_h, k_w], inputDataType=idt.name, weightDataType=wdt.name, outputDataType=odt.name, @@ -790,9 +842,9 @@ class InferVVAU(Transformation): backend="fpgadataflow", resType="lut", PE=pe, - Dim=mm_in_shape[1], + Dim=[mm_in_shape[1], mm_in_shape[2]], Channels=channels, - Kernel=k, + Kernel=[k_h, k_w], inputDataType=idt.name, weightDataType=wdt.name, outputDataType=odt.name, @@ -1345,7 +1397,11 @@ class InferGlobalAccPoolLayer(Transformation): ) model.graph.value_info.append(mul_value) model.set_initializer(mul_value.name, np.array(1 / (vecs[1] * vecs[2]))) - new_mul = helper.make_node("Mul", [pool_out, mul_value.name], [result],) + new_mul = helper.make_node( + "Mul", + [pool_out, mul_value.name], + [result], + ) graph.node.insert(insert_point, new_pool) graph.node.insert(insert_point + 1, new_mul) node_ind += 1 diff --git a/src/finn/transformation/fpgadataflow/create_dataflow_partition.py b/src/finn/transformation/fpgadataflow/create_dataflow_partition.py index 56bfb4306e555c716a9156d6f0949c339193eb38..419a6d8c494651862f55e63e6829a61fe8040599 100644 --- a/src/finn/transformation/fpgadataflow/create_dataflow_partition.py +++ b/src/finn/transformation/fpgadataflow/create_dataflow_partition.py @@ -148,7 +148,7 @@ class CreateDataflowPartition(Transformation): [df_out], # use the model attribute to mark the df model model=df_model_filename, - domain="finn.custom_op.general", + domain="finn.custom_op.fpgadataflow", partition_id=target_partition_id, slr=slr, mem_port=mem_port, diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index c0ac1319dd520794afd66f187b35e529739e5cd7..1ce936cd79c2257897e74430d00e5082c51c9320 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -29,11 +29,9 @@ def _suitable_node(node): def _suitable_folded_shapes(ishape, oshape): - i_dummy = np.random.rand(*ishape) - o_dummy = np.random.rand(*oshape) - ishape_canonical = np.squeeze(i_dummy).shape - oshape_canonical = np.squeeze(o_dummy).shape - return ishape_canonical == oshape_canonical + matching_stream_width = ishape[-1] == oshape[-1] + matching_size = np.prod(ishape) == np.prod(oshape) + return matching_stream_width and matching_size class InsertFIFO(Transformation): diff --git a/src/finn/transformation/fpgadataflow/make_zynq_proj.py b/src/finn/transformation/fpgadataflow/make_zynq_proj.py index 3dab426ccf9bab73ddac83299bdc47f89ea46bdc..f2f172139ed9144eccdfbe37d0ae1a0695d049e4 100644 --- a/src/finn/transformation/fpgadataflow/make_zynq_proj.py +++ b/src/finn/transformation/fpgadataflow/make_zynq_proj.py @@ -265,7 +265,10 @@ class MakeZYNQProject(Transformation): vivado_pynq_proj_dir + "/finn_zynq_link.runs/impl_1/top_wrapper.bit" ) if not os.path.isfile(bitfile_name): - raise Exception("Synthesis failed, no bitfile found") + raise Exception( + "Synthesis failed, no bitfile found. Check logs under %s" + % vivado_pynq_proj_dir + ) deploy_bitfile_name = vivado_pynq_proj_dir + "/resizer.bit" copy(bitfile_name, deploy_bitfile_name) # set bitfile attribute @@ -275,7 +278,10 @@ class MakeZYNQProject(Transformation): + "/finn_zynq_link.srcs/sources_1/bd/top/hw_handoff/top.hwh" ) if not os.path.isfile(hwh_name): - raise Exception("Synthesis failed, no hardware handoff file found") + raise Exception( + "Synthesis failed, no bitfile found. Check logs under %s" + % vivado_pynq_proj_dir + ) deploy_hwh_name = vivado_pynq_proj_dir + "/resizer.hwh" copy(hwh_name, deploy_hwh_name) model.set_metadata_prop("hw_handoff", deploy_hwh_name) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index f7d59978d8f8866aefb3028d570bb6b434df33b4..ea27eee04db6f90b50a58296ceaf6f6ed58602ac 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -39,8 +39,8 @@ from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP from finn.transformation.fpgadataflow.insert_dwc import InsertDWC from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.general import GiveUniqueNodeNames, GiveReadableTensorNames -from finn.util.fpgadataflow import pyverilate_stitched_ip, is_fpgadataflow_node -from finn.util.pyverilator import reset_rtlsim, toggle_clk +from finn.util.fpgadataflow import is_fpgadataflow_node +from finn.util.pyverilator import pyverilate_stitched_ip, reset_rtlsim, toggle_clk def reset_implementation(node): diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py index 9c0169a98f515d0b32e10bdfc834eca5fb681ffd..bd5de66cf575cd2e4eee0bf064a611e736a4b76e 100644 --- a/src/finn/transformation/fpgadataflow/templates.py +++ b/src/finn/transformation/fpgadataflow/templates.py @@ -115,7 +115,7 @@ if {$BOARD == "ZCU104"} { set_property board_part xilinx.com:zcu102:part0:3.3 [current_project] set ZYNQ_TYPE "zynq_us+" } elseif {$BOARD == "Ultra96"} { - set_property board_part em.avnet.com:ultra96v1:part0:1.2 [current_project] + set_property board_part avnet.com:ultra96v1:part0:1.2 [current_project] set ZYNQ_TYPE "zynq_us+" } elseif {$BOARD == "Pynq-Z2"} { set ZYNQ_TYPE "zynq_7000" diff --git a/src/finn/transformation/move_reshape.py b/src/finn/transformation/move_reshape.py index cb8deaeec4b79d3c47d7705ff8f9bf72a085dfc0..990b858ad62aec00be4be4e0dd30bef3eb9e3ce3 100644 --- a/src/finn/transformation/move_reshape.py +++ b/src/finn/transformation/move_reshape.py @@ -1,5 +1,7 @@ from finn.transformation.base import Transformation from finn.util.basic import get_by_name, is_finn_op +from finn.custom_op.registry import getCustomOp +import warnings def _is_fpgadataflow_node(node): @@ -18,33 +20,66 @@ def _is_fpgadataflow_node(node): class RemoveCNVtoFCFlatten(Transformation): - """Removes a node that implements a (1, -1) reshape if it is - between two fpgadataflow nodes""" + """Removes a flatten node if it is between two fpgadataflow nodes. + For an NHWC-Conv to FC transition, the preceding transpose is absorbed. + The flatten operation can also be implemented by a reshape node.""" def apply(self, model): - graph = model.graph graph_modified = False for n in graph.node: - if n.op_type == "Reshape": - shape = model.get_initializer(n.input[1]) - if (shape == [1, -1]).all(): + # also support implicit flatten via reshape, e.g. reshape(1,-1) + if n.op_type == "Flatten" or n.op_type == "Reshape": + ishape = model.get_tensor_shape(n.input[0]) + oshape = model.get_tensor_shape(n.output[0]) + if len(oshape) == 2 and ishape[0] == oshape[0]: producer = model.find_producer(n.input[0]) if _is_fpgadataflow_node(producer) is True: + # standalone flatten, remove consumer = model.find_consumer(n.output[0]) if _is_fpgadataflow_node(consumer) is True: graph_modified = True consumer.input[0] = n.input[0] graph.node.remove(n) elif producer.op_type == "Transpose": + # transpose + flatten, absorb into following node transp_node = producer - producer = model.find_producer(transp_node.input[0]) - if _is_fpgadataflow_node(producer) is True: - consumer = model.find_consumer(n.output[0]) - if _is_fpgadataflow_node(consumer) is True: - graph_modified = True - consumer.input[0] = transp_node.input[0] - graph.node.remove(n) - graph.node.remove(transp_node) + # check if transpose converts NHWC to NCHW + perms = list(get_by_name(transp_node.attribute, "perm").ints) + if perms == [0, 3, 1, 2]: + producer = model.find_producer(transp_node.input[0]) + if _is_fpgadataflow_node(producer) is True: + consumer = model.find_consumer(n.output[0]) + if consumer.op_type == "StreamingFCLayer_Batch": + fc_inst = getCustomOp(consumer) + mw = fc_inst.get_nodeattr("MW") + mh = fc_inst.get_nodeattr("MH") + (b, h, w, c) = model.get_tensor_shape( + transp_node.input[0] + ) + # absorb transpose into weight matrix, + # allowing FC layer to operate on the NHWC input + W = model.get_initializer(consumer.input[1]) + assert ( + W is not None + ), "Initializer for matmul weights is not set." + W_new = W.reshape(c, h, w, mh) + W_new = W_new.transpose((1, 2, 0, 3)) + W_new = W_new.reshape(mw, mh) + model.set_initializer(consumer.input[1], W_new) + # remove transpose & flatten nodes + consumer.input[0] = transp_node.input[0] + graph.node.remove(n) + graph.node.remove(transp_node) + graph_modified = True + else: + warnings.warn( + "Could not absorb transpose->flatten \ + into subsequent node" + ) + else: + warnings.warn( + "Unsupported transpose node before flatten layer" + ) return (model, graph_modified) diff --git a/src/finn/transformation/streamline/__init__.py b/src/finn/transformation/streamline/__init__.py index 876f8892dbc9c42189ee8dc06ff5eb407f7a0946..97cd957ce166255c1442a544f6e865b42c33a1df 100644 --- a/src/finn/transformation/streamline/__init__.py +++ b/src/finn/transformation/streamline/__init__.py @@ -60,6 +60,7 @@ from finn.transformation.streamline.reorder import ( MoveAddPastConv, MoveScalarMulPastConv, MoveMulPastMaxPool, + MoveScalarLinearPastInvariants, ) from finn.transformation.streamline.round_thresholds import RoundAndClipThresholds @@ -78,6 +79,7 @@ class Streamline(Transformation): BatchNormToAffine(), ConvertSignToThres(), MoveMulPastMaxPool(), + MoveScalarLinearPastInvariants(), AbsorbSignBiasIntoMultiThreshold(), MoveAddPastMul(), MoveScalarAddPastMatMul(), diff --git a/src/finn/transformation/streamline/absorb.py b/src/finn/transformation/streamline/absorb.py index fa2d7a714ad894ebb19099c7ed73e42e12ffdf44..8237d8bf2f05ab93c61f4f6c58d0e049195e223d 100644 --- a/src/finn/transformation/streamline/absorb.py +++ b/src/finn/transformation/streamline/absorb.py @@ -308,8 +308,8 @@ class Absorb1BitMulIntoConv(Transformation): class AbsorbTransposeIntoMultiThreshold(Transformation): - """Change (NHWCTranpose -> MultiThreshold -> NCHWTranspose) to (MultiThreshold) - with NHWC mode.""" + """Change (NCHWTranspose -> MultiThreshold -> NHWCTranspose) to (MultiThreshold) + with NHWC mode. For (NCHWTranspose -> MultiThreshold) move Transpose past MT.""" def apply(self, model): graph = model.graph @@ -338,24 +338,26 @@ class AbsorbTransposeIntoMultiThreshold(Transformation): graph.node.remove(n) graph.node.remove(final_t_cand) graph_modified = True - elif final_t_cand.op_type == "Reshape": - oshape = model.get_tensor_shape(final_t_cand.output[0]) - if len(oshape) == 2: - # transition to FC part, can still use NHWC - mt = getCustomOp(mt_cand) - mt.set_nodeattr("data_layout", "NHWC") - # get rid of first tranpose node - mt_cand.input[0] = n.input[0] - # fix output shape for MultiThreshold - mt_ishape = model.get_tensor_shape(mt_cand.input[0]) - (b, h, w, c) = mt_ishape - assert ( - h == 1 and w == 1 - ), """Untested spatial dim - in conv->fc transition, proceed with caution!""" - model.set_tensor_shape(mt_cand.output[0], mt_ishape) - graph.node.remove(n) - graph_modified = True + else: + mt = getCustomOp(mt_cand) + mt.set_nodeattr("data_layout", "NHWC") + # get rid of first tranpose node + mt_cand.input[0] = n.input[0] + graph.node.remove(n) + # fix output shape for MultiThreshold + mt_ishape = model.get_tensor_shape(mt_cand.input[0]) + model.set_tensor_shape(mt_cand.output[0], mt_ishape) + # re-insert Transpose behind MultiThreshold + transpose_output = model.make_new_valueinfo_name() + new_transpose = oh.make_node( + "Transpose", + [mt_cand.output[0]], + [transpose_output], + perm=[0, 3, 1, 2], + ) + graph.node.insert(node_ind + 1, new_transpose) + final_t_cand.input[0] = transpose_output + graph_modified = True if graph_modified: model = model.transform(InferDataTypes()) return (model, graph_modified) diff --git a/src/finn/transformation/streamline/remove.py b/src/finn/transformation/streamline/remove.py index 12c6984c6e66e1917d2a1e0a74c8620ccb6afabc..0abcf441f9636a52f9194df325874d293530f78a 100644 --- a/src/finn/transformation/streamline/remove.py +++ b/src/finn/transformation/streamline/remove.py @@ -32,8 +32,31 @@ from finn.transformation.infer_shapes import InferShapes import numpy as np +def _remove_node_and_rewire(model, node): + producer = model.find_producer(node.input[0]) + if producer is not None: + # wire output tensor to + # output of producer node + producer.output[0] = node.output[0] + else: + # node is first in graph + consumer = model.find_consumer(node.output[0]) + assert consumer is not None, "Whole graph is identity" + assert consumer.input[0] == node.output[0] + # rewire consumer's input directly to graph input + consumer.input[0] = node.input[0] + # remove node + model.graph.node.remove(node) + + class RemoveIdentityOps(Transformation): - """Remove identity ops like Add/Sub with zero or Mul/Div with one""" + """Remove identity ops like Add/Sub with zero or Mul/Div with one. A tolerance + value (defaults to 1e-05) can be specified during init for the comparison + to zero/one.""" + + def __init__(self, atol=1e-05): + super().__init__() + self.atol = atol def apply(self, model): graph = model.graph @@ -47,12 +70,11 @@ class RemoveIdentityOps(Transformation): and not model.is_join_node(n) ): A = model.get_initializer(n.input[1]) - if A is not None and (A == np.zeros_like(A)).all(): - producer = model.find_producer(n.input[0]) - # remove node and wire output tensor to - # output of producer node - producer.output[0] = n.output[0] - graph.node.remove(n) + if ( + A is not None + and np.isclose(A, np.zeros_like(A), atol=self.atol).all() + ): + _remove_node_and_rewire(model, n) elif ( n.op_type in ["Mul", "Div"] @@ -60,11 +82,10 @@ class RemoveIdentityOps(Transformation): and not model.is_join_node(n) ): A = model.get_initializer(n.input[1]) - if A is not None and (A == np.ones_like(A)).all(): - producer = model.find_producer(n.input[0]) - # remove node and wire output tensor to - # output of producer node - producer.output[0] = n.output[0] - graph.node.remove(n) + if ( + A is not None + and np.isclose(A, np.ones_like(A), atol=self.atol).all() + ): + _remove_node_and_rewire(model, n) model = model.transform(InferShapes()) return (model, graph_modified) diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py index 7163a95c4dbbe5c8bcee4ebeea87c5e9611c179e..4049d7bc8b555dce6f3ab6f9475f6aebf41fbc2c 100644 --- a/src/finn/transformation/streamline/reorder.py +++ b/src/finn/transformation/streamline/reorder.py @@ -645,7 +645,8 @@ class MoveScalarLinearPastInvariants(Transformation): class MakeMaxPoolNHWC(Transformation): - """Convert (MaxPool, NHWCTranpose) into (MaxPoolNHWC).""" + """Convert (MaxPool, NHWCTranspose) into (NHWCTranspose, MaxPoolNHWC) + and (NCHWTranspose, MaxPool) into (MaxPoolNHWC, NCHWTranspose).""" def apply(self, model): graph = model.graph @@ -655,6 +656,7 @@ class MakeMaxPoolNHWC(Transformation): node_ind += 1 if n.op_type == "MaxPool": consumer = model.find_consumer(n.output[0]) + producer = model.find_producer(n.input[0]) if consumer is not None and consumer.op_type == "Transpose": perms = list(get_by_name(consumer.attribute, "perm").ints) if perms == [0, 2, 3, 1]: @@ -674,6 +676,25 @@ class MakeMaxPoolNHWC(Transformation): graph.node.remove(consumer) graph.node.insert(node_ind - 1, consumer) graph_modified = True + elif producer is not None and producer.op_type == "Transpose": + perms = list(get_by_name(producer.attribute, "perm").ints) + if perms == [0, 3, 1, 2]: + n.op_type = "MaxPoolNHWC" + n.domain = "finn.custom_op.general" + start_name = producer.input[0] + mid_name = n.input[0] + end_name = n.output[0] + (b, hi, wi, c) = model.get_tensor_shape(start_name) + (b, c, ho, wo) = model.get_tensor_shape(end_name) + producer.input[0] = mid_name + producer.output[0] = end_name + n.input[0] = start_name + n.output[0] = mid_name + model.set_tensor_shape(mid_name, (b, ho, wo, c)) + model.set_tensor_shape(end_name, (b, c, ho, wo)) + graph.node.remove(producer) + graph.node.insert(node_ind, producer) + graph_modified = True return (model, graph_modified) diff --git a/tests/brevitas/test_brevitas_QConv2d.py b/tests/brevitas/test_brevitas_QConv2d.py index 198f1e7961a9e160589989b8b34b45b5fda53817..5f124690d7c1f266f074351abe690abdd3ae5a2c 100644 --- a/tests/brevitas/test_brevitas_QConv2d.py +++ b/tests/brevitas/test_brevitas_QConv2d.py @@ -1,3 +1,31 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + import pytest import os import numpy as np @@ -19,8 +47,9 @@ export_onnx_path = "test_brevitas_conv.onnx" @pytest.mark.parametrize("dw", [False, True]) +@pytest.mark.parametrize("bias", [True, False]) @pytest.mark.parametrize("in_channels", [32]) -def test_brevitas_QConv2d(dw, in_channels): +def test_brevitas_QConv2d(dw, bias, in_channels): ishape = (1, 32, 111, 111) if dw is True: groups = in_channels @@ -45,10 +74,8 @@ def test_brevitas_QConv2d(dw, in_channels): kernel_size=kernel_size, padding=padding, stride=stride, - bias=False, + bias=bias, bias_quant_type=QuantType.FP, - compute_output_bit_width=False, - compute_output_scale=False, weight_bit_width=4, weight_quant_type=QuantType.INT, weight_scaling_impl_type=ScalingImplType.STATS, @@ -60,7 +87,7 @@ def test_brevitas_QConv2d(dw, in_channels): ) weight_tensor = gen_finn_dt_tensor(DataType.INT4, w_shape) b_conv.weight = torch.nn.Parameter(torch.from_numpy(weight_tensor).float()) - + b_conv.eval() bo.export_finn_onnx(b_conv, ishape, export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) @@ -69,7 +96,6 @@ def test_brevitas_QConv2d(dw, in_channels): odict = oxe.execute_onnx(model, idict, True) produced = odict[model.graph.output[0].name] inp_tensor = torch.from_numpy(inp_tensor).float() - b_conv.eval() expected = b_conv.forward(inp_tensor).detach().numpy() assert np.isclose(produced, expected, atol=1e-3).all() diff --git a/tests/brevitas/test_brevitas_avg_pool_export.py b/tests/brevitas/test_brevitas_avg_pool_export.py index f3d6c5dde7179bec8fe97e2a6c791afb5733514c..4b88b0f787fb7780079ee82d6802d5cbff410748 100644 --- a/tests/brevitas/test_brevitas_avg_pool_export.py +++ b/tests/brevitas/test_brevitas_avg_pool_export.py @@ -1,27 +1,53 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os -import onnx # noqa import torch import numpy as np -import brevitas.onnx as bo -from brevitas.nn import QuantAvgPool2d -from brevitas.quant_tensor import pack_quant_tensor -from brevitas.core.quant import QuantType +import pytest +import finn.core.onnx_exec as oxe from finn.core.modelwrapper import ModelWrapper from finn.core.datatype import DataType from finn.transformation.infer_shapes import InferShapes from finn.transformation.infer_datatypes import InferDataTypes from finn.util.basic import gen_finn_dt_tensor -import finn.core.onnx_exec as oxe -import pytest +from brevitas.export import FINNManager +from brevitas.nn import QuantAvgPool2d +from brevitas.quant_tensor import QuantTensor + export_onnx_path = "test_brevitas_avg_pool_export.onnx" @pytest.mark.parametrize("kernel_size", [2, 3]) @pytest.mark.parametrize("stride", [1, 2]) -@pytest.mark.parametrize("signed", [False, True]) +@pytest.mark.parametrize("signed", [True, False]) @pytest.mark.parametrize("bit_width", [2, 4]) @pytest.mark.parametrize("input_bit_width", [4, 8, 16]) @pytest.mark.parametrize("channels", [2, 4]) @@ -29,73 +55,46 @@ export_onnx_path = "test_brevitas_avg_pool_export.onnx" def test_brevitas_avg_pool_export( kernel_size, stride, signed, bit_width, input_bit_width, channels, idim ): - ishape = (1, channels, idim, idim) - ibw_tensor = torch.Tensor([input_bit_width]) - b_avgpool = QuantAvgPool2d( - kernel_size=kernel_size, - stride=stride, - bit_width=bit_width, - quant_type=QuantType.INT, - ) - # call forward pass manually once to cache scale factor and bitwidth - input_tensor = torch.from_numpy(np.zeros(ishape)).float() - scale = np.ones((1, channels, 1, 1)) - output_scale = torch.from_numpy(scale).float() - input_quant_tensor = pack_quant_tensor( - tensor=input_tensor, scale=output_scale, bit_width=ibw_tensor, signed=signed + quant_avgpool = QuantAvgPool2d( + kernel_size=kernel_size, stride=stride, bit_width=bit_width ) - bo.export_finn_onnx(b_avgpool, ishape, export_onnx_path, input_t=input_quant_tensor) - model = ModelWrapper(export_onnx_path) + quant_avgpool.eval() - # determine input FINN datatype - if signed is True: - prefix = "INT" - else: - prefix = "UINT" + # determine input + prefix = "INT" if signed else "UINT" dt_name = prefix + str(input_bit_width) dtype = DataType[dt_name] - model = model.transform(InferShapes()) - model = model.transform(InferDataTypes()) - - # execution with input tensor using integers and scale = 1 - # calculate golden output - inp = gen_finn_dt_tensor(dtype, ishape) - input_tensor = torch.from_numpy(inp).float() - input_quant_tensor = pack_quant_tensor( - tensor=input_tensor, scale=output_scale, bit_width=ibw_tensor, signed=signed - ) - b_avgpool.eval() - expected = b_avgpool.forward(input_quant_tensor).tensor.detach().numpy() - - # finn execution - idict = {model.graph.input[0].name: inp} - odict = oxe.execute_onnx(model, idict, True) - produced = odict[model.graph.output[0].name] - assert (expected == produced).all() - - # execution with input tensor using float and scale != 1 - scale = np.random.uniform(low=0, high=1, size=(1, channels, 1, 1)).astype( + input_shape = (1, channels, idim, idim) + input_array = gen_finn_dt_tensor(dtype, input_shape) + # Brevitas QuantAvgPool layers need QuantTensors to export correctly + # which requires setting up a QuantTensor instance with the scale + # factor, zero point, bitwidth and signedness + scale_array = np.random.uniform(low=0, high=1, size=(1, channels, 1, 1)).astype( np.float32 ) - inp_tensor = inp * scale - input_tensor = torch.from_numpy(inp_tensor).float() - input_scale = torch.from_numpy(scale).float() - input_quant_tensor = pack_quant_tensor( - tensor=input_tensor, scale=input_scale, bit_width=ibw_tensor, signed=signed + input_tensor = torch.from_numpy(input_array * scale_array).float() + scale_tensor = torch.from_numpy(scale_array).float() + zp = torch.tensor(0.0) + input_quant_tensor = QuantTensor( + input_tensor, scale_tensor, zp, input_bit_width, signed + ) + + # export + FINNManager.export( + quant_avgpool, export_path=export_onnx_path, input_t=input_quant_tensor ) - # export again to set the scale values correctly - bo.export_finn_onnx(b_avgpool, ishape, export_onnx_path, input_t=input_quant_tensor) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) - b_avgpool.eval() - expected = b_avgpool.forward(input_quant_tensor).tensor.detach().numpy() - # finn execution - idict = {model.graph.input[0].name: inp_tensor} - odict = oxe.execute_onnx(model, idict, True) - produced = odict[model.graph.output[0].name] - - assert np.isclose(expected, produced).all() + # reference brevitas output + ref_output_array = quant_avgpool(input_quant_tensor).tensor.detach().numpy() + # finn output + idict = {model.graph.input[0].name: input_array} + odict = oxe.execute_onnx(model, idict, True) + finn_output = odict[model.graph.output[0].name] + # compare outputs + assert np.isclose(ref_output_array, finn_output).all() + # cleanup os.remove(export_onnx_path) diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py index 94f937ef2afc9eb86665e26d703be9f01e2163a0..98a18403e79366aca184559a8b868f30aa27c35e 100644 --- a/tests/brevitas/test_brevitas_mobilenet.py +++ b/tests/brevitas/test_brevitas_mobilenet.py @@ -1,3 +1,31 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + from PIL import Image import numpy as np import brevitas.onnx as bo diff --git a/tests/brevitas/test_brevitas_non_scaled_QuantHardTanh_export.py b/tests/brevitas/test_brevitas_non_scaled_QuantHardTanh_export.py index 9c7296b7b3b6d36cfb43b6d9e96e7fba6bbce49a..37ea12ac0f6387b1ab6669bce4aaaaaa60e87b58 100644 --- a/tests/brevitas/test_brevitas_non_scaled_QuantHardTanh_export.py +++ b/tests/brevitas/test_brevitas_non_scaled_QuantHardTanh_export.py @@ -1,3 +1,31 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + import os import onnx # noqa import numpy as np diff --git a/tests/brevitas/test_brevitas_qlinear.py b/tests/brevitas/test_brevitas_qlinear.py new file mode 100644 index 0000000000000000000000000000000000000000..62ed358dc9030c35e865921ca7cf9e80c34020fd --- /dev/null +++ b/tests/brevitas/test_brevitas_qlinear.py @@ -0,0 +1,78 @@ +# Copyright (c) 2021, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest +import os +import numpy as np +import torch +import brevitas.onnx as bo +from brevitas.nn import QuantLinear +from brevitas.core.quant import QuantType +from finn.core.modelwrapper import ModelWrapper +from finn.core.datatype import DataType +import finn.core.onnx_exec as oxe +from finn.transformation.infer_shapes import InferShapes +from finn.util.basic import gen_finn_dt_tensor + +export_onnx_path = "test_brevitas_qlinear.onnx" + + +@pytest.mark.parametrize("bias", [False, True]) +@pytest.mark.parametrize("out_features", [4]) +@pytest.mark.parametrize("in_features", [3]) +@pytest.mark.parametrize("w_bits", [4]) +@pytest.mark.parametrize("i_dtype", [DataType.UINT4]) +def test_brevitas_qlinear(bias, out_features, in_features, w_bits, i_dtype): + i_shape = (1, in_features) + w_shape = (out_features, in_features) + b_linear = QuantLinear( + out_features=out_features, + in_features=in_features, + bias=bias, + bias_quant_type=QuantType.FP, + weight_bit_width=w_bits, + weight_quant_type=QuantType.INT, + weight_scaling_per_output_channel=True, + ) + weight_tensor_fp = np.random.uniform(low=-1.0, high=1.0, size=w_shape).astype( + np.float32 + ) + b_linear.weight.data = torch.from_numpy(weight_tensor_fp) + b_linear.eval() + bo.export_finn_onnx(b_linear, i_shape, export_onnx_path) + model = ModelWrapper(export_onnx_path) + model = model.transform(InferShapes()) + inp_tensor = gen_finn_dt_tensor(i_dtype, i_shape) + idict = {model.graph.input[0].name: inp_tensor} + odict = oxe.execute_onnx(model, idict, True) + produced = odict[model.graph.output[0].name] + inp_tensor = torch.from_numpy(inp_tensor).float() + expected = b_linear.forward(inp_tensor).detach().numpy() + + assert np.isclose(produced, expected, atol=1e-3).all() + os.remove(export_onnx_path) diff --git a/tests/brevitas/test_brevitas_relu_act_export.py b/tests/brevitas/test_brevitas_relu_act_export.py index fa114585d31fca629aa759e386aa3fbd04280a2e..278f05a4a9f264d69461e555d28437cdc83e1a71 100644 --- a/tests/brevitas/test_brevitas_relu_act_export.py +++ b/tests/brevitas/test_brevitas_relu_act_export.py @@ -1,3 +1,31 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + import os import onnx # noqa import numpy as np diff --git a/tests/brevitas/test_brevitas_scaled_QHardTanh_export.py b/tests/brevitas/test_brevitas_scaled_QHardTanh_export.py index e0ec82ebed44e2e984be9f62e02bc1721a7f9c33..b1652c1cdce881157c6e97dcfde4257902678c8f 100644 --- a/tests/brevitas/test_brevitas_scaled_QHardTanh_export.py +++ b/tests/brevitas/test_brevitas_scaled_QHardTanh_export.py @@ -1,3 +1,31 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + import onnx # noqa import os import numpy as np diff --git a/tests/brevitas/test_brevitas_validate_mobilenet.py b/tests/brevitas/test_brevitas_validate_mobilenet.py index 42bc3942d1a4f5fbdf70dbb1f1b5e853357abff8..dd079fe2e27ace85b9a08e699fa437e93f8e7f3d 100644 --- a/tests/brevitas/test_brevitas_validate_mobilenet.py +++ b/tests/brevitas/test_brevitas_validate_mobilenet.py @@ -1,3 +1,31 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + import os import csv import numpy as np diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index ddea2dafce02c181a279d9c95759b97dee00a504..a6e7ad642222936775293ec145e845ef111dd4d3 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -129,12 +129,7 @@ def update_dashboard_data(topology, wbits, abits, key, val): def fold_tfc(model): fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") # (PE, SIMD, ramstyle) for each layer - config = [ - (16, 49, "block"), - (8, 8, "auto"), - (8, 8, "auto"), - (10, 8, "distributed"), - ] + config = [(16, 49, "block"), (8, 8, "auto"), (8, 8, "auto"), (10, 8, "distributed")] for fcl, (pe, simd, ramstyle) in zip(fc_layers, config): fcl_inst = getCustomOp(fcl) fcl_inst.set_nodeattr("PE", pe) @@ -312,8 +307,8 @@ class TestEnd2End: def test_export(self, topology, wbits, abits): if wbits > abits: pytest.skip("No wbits > abits end2end network configs for now") - if topology == "lfc" and wbits > 1: - pytest.skip("Skipping non-existing lfc configs") + if topology == "lfc" and not (wbits == 1 and abits == 1): + pytest.skip("Skipping certain lfc configs") (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) chkpt_name = get_checkpoint_name(topology, wbits, abits, "export") bo.export_finn_onnx(model, ishape, chkpt_name) @@ -352,6 +347,8 @@ class TestEnd2End: assert os.path.isfile(chkpt_preproc_name) # join preprocessing and core model pre_model = ModelWrapper(chkpt_preproc_name) + pre_model = pre_model.transform(InferShapes()) + pre_model = pre_model.transform(FoldConstants()) model = model.transform(MergeONNXModels(pre_model)) # add input quantization annotation: UINT8 for all BNN-PYNQ models global_inp_name = model.graph.input[0].name @@ -372,6 +369,7 @@ class TestEnd2End: def test_streamline(self, topology, wbits, abits): prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "pre_post") model = load_test_checkpoint_or_skip(prev_chkpt_name) + model = model.transform(absorb.AbsorbSignBiasIntoMultiThreshold()) # move past any reshapes to be able to streamline input scaling model = model.transform(MoveScalarLinearPastInvariants()) model = model.transform(Streamline()) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 4dba19f586e3235a582e72d4c3936a60ebc4a703..63d6a91e37586030a15c9a7c2523679875444f3f 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -28,6 +28,7 @@ import torch from brevitas.nn import QuantLinear, QuantReLU +from brevitas.quant_tensor import QuantTensor import torch.nn as nn import numpy as np from brevitas.core.quant import QuantType @@ -115,19 +116,32 @@ def test_end2end_cybsec_mlp_export(): model_for_export = CybSecMLPForExport(model) export_onnx_path = get_checkpoint_name("export") input_shape = (1, 600) - bo.export_finn_onnx(model_for_export, input_shape, export_onnx_path) + # create a QuantTensor instance to mark the input as bipolar during export + input_a = np.random.randint(0, 1, size=input_shape).astype(np.float32) + input_a = 2 * input_a - 1 + scale = 1.0 + input_t = torch.from_numpy(input_a * scale) + input_qt = QuantTensor( + input_t, scale=torch.tensor(scale), bit_width=torch.tensor(1.0), signed=True + ) + + bo.export_finn_onnx( + model_for_export, export_path=export_onnx_path, input_t=input_qt + ) assert os.path.isfile(export_onnx_path) # fix input datatype finn_model = ModelWrapper(export_onnx_path) finnonnx_in_tensor_name = finn_model.graph.input[0].name - finn_model.set_tensor_datatype(finnonnx_in_tensor_name, DataType.BIPOLAR) - finn_model.save(export_onnx_path) assert tuple(finn_model.get_tensor_shape(finnonnx_in_tensor_name)) == (1, 600) - assert len(finn_model.graph.node) == 30 - assert finn_model.graph.node[0].op_type == "Add" - assert finn_model.graph.node[1].op_type == "Div" - assert finn_model.graph.node[2].op_type == "MatMul" + # verify a few exported ops + assert finn_model.graph.node[1].op_type == "Add" + assert finn_model.graph.node[2].op_type == "Div" + assert finn_model.graph.node[3].op_type == "MatMul" assert finn_model.graph.node[-1].op_type == "MultiThreshold" + # verify datatypes on some tensors + assert finn_model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType.BIPOLAR + first_matmul_w_name = finn_model.graph.node[3].input[1] + assert finn_model.get_tensor_datatype(first_matmul_w_name) == DataType.INT2 @pytest.mark.slow diff --git a/tests/end2end/test_end2end_mobilenet_v1.py b/tests/end2end/test_end2end_mobilenet_v1.py index c23749829a9d75c9a9519663a872aa1281bd46d3..5bfe8e1ea1b48ed77c40a584d624cc0ecdedb668 100644 --- a/tests/end2end/test_end2end_mobilenet_v1.py +++ b/tests/end2end/test_end2end_mobilenet_v1.py @@ -74,18 +74,8 @@ from finn.transformation.fpgadataflow.create_dataflow_partition import ( from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim -from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim -from finn.transformation.fpgadataflow.prepare_ip import PrepareIP -from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP -from finn.transformation.fpgadataflow.replace_verilog_relpaths import ( - ReplaceVerilogRelPaths, -) -from finn.transformation.fpgadataflow.annotate_resources import AnnotateResources -from finn.transformation.fpgadataflow.set_fifo_depths import InsertAndSetFIFODepths from finn.core.onnx_exec import execute_onnx from finn.util.basic import alveo_part_map, alveo_default_platform -from finn.util.config import extract_model_config_to_json -from finn.transformation.fpgadataflow.vitis_build import VitisBuild, VitisOptStrategy build_dir = os.environ["FINN_BUILD_DIR"] @@ -111,6 +101,7 @@ def test_end2end_mobilenet_export(): # set input finn datatype to UINT8 preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType.UINT8) preproc_model = preproc_model.transform(InferShapes()) + preproc_model = preproc_model.transform(FoldConstants()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) preproc_model = preproc_model.transform(GiveUniqueParameterTensors()) preproc_model = preproc_model.transform(GiveReadableTensorNames()) @@ -197,6 +188,10 @@ def test_end2end_mobilenet_streamline(): model = model.transform(GiveReadableTensorNames()) model = model.transform(InferDataTypes()) model.save(build_dir + "/end2end_mobilenet_streamlined.onnx") + assert ( + len(model.get_nodes_by_op_type("Add")) == 1 + ) # only final quantized bias Add op remains + assert len(model.get_nodes_by_op_type("Mul")) == 0 # no Mul ops remain def test_end2end_mobilenet_lowering(): @@ -334,101 +329,3 @@ def test_end2end_mobilenet_cppsim(): assert (golden == res_cppsim).all() assert np.isclose(golden_prob, res_cppsim_prob).all() - - -@pytest.mark.slow -@pytest.mark.vivado -def test_end2end_mobilenet_gen_hls_ip(): - model = load_test_checkpoint_or_skip( - build_dir + "/end2end_mobilenet_dataflow_model.onnx" - ) - start = time.time() - model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) - model = model.transform(HLSSynthIP()) - model = model.transform(ReplaceVerilogRelPaths()) - end = time.time() - elapsed_time = end - start - f = open(build_dir + "/end2end_mobilenet_ipgen_time.txt", "w+") - f.write("Execution time in seconds: " + str(elapsed_time)) - f.close() - - model = model.transform(AnnotateResources("hls")) - model.save(build_dir + "/end2end_mobilenet_ipgen.onnx") - - -@pytest.mark.slow -@pytest.mark.vivado -@pytest.mark.xfail -def test_end2end_mobilenet_rtlsim(): - model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_ipgen.onnx") - x = np.load(build_dir + "/end2end_mobilenet_input.npy") - inp_name = model.graph.input[0].name - out_name = model.graph.output[0].name - inp_dict = {inp_name: x} - # node-by-node rtlsim - model = model.transform(SetExecMode("rtlsim")) - model = model.transform(PrepareRTLSim()) - model.save(build_dir + "/end2end_mobilenet_ipgen_nodebynode_rtlsim.onnx") - ret_rtlsim_nodebynode = execute_onnx(model, inp_dict, True) - res_rtlsim_nodebynode = ret_rtlsim_nodebynode[out_name] - np.save( - build_dir + "/end2end_mobilenet_result_rtlsim_nodebynode.npy", - res_rtlsim_nodebynode, - ) - a0 = np.load(build_dir + "/end2end_mobilenet_topk_scale.npy") - res_rtlsim_nodebynode_prob = ( - ret_rtlsim_nodebynode[model.graph.node[-2].output[0]] * a0 - ) - np.save( - build_dir + "/end2end_mobilenet_result_rtlsim_nodebynode_prob.npy", - res_rtlsim_nodebynode_prob, - ) - - # check result with golden values - golden = np.load(build_dir + "/end2end_mobilenet_golden_top5.npy") - golden_prob = np.load(build_dir + "/end2end_mobilenet_golden_top5_prob.npy") - - assert (golden == res_rtlsim_nodebynode).all() - assert np.isclose(golden_prob, res_rtlsim_nodebynode_prob).all() - - -@pytest.mark.slow -@pytest.mark.vivado -def test_end2end_mobilenet_set_fifo_depths(): - model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_ipgen.onnx") - start = time.time() - model = model.transform( - InsertAndSetFIFODepths( - test_fpga_part, target_clk_ns, vivado_ram_style=large_fifo_ram_style - ) - ) - end = time.time() - elapsed_time = end - start - f = open(build_dir + "/end2end_mobilenet_fifoset_time.txt", "w+") - f.write("Execution time in seconds: " + str(elapsed_time)) - f.close() - extract_model_config_to_json( - model, - build_dir + "/end2end_mobilenet_folded_and_fifo_config.json", - ["PE", "SIMD", "impl_style", "ram_style", "depth"], - ) - model.save(build_dir + "/end2end_mobilenet_fifodepth.onnx") - - -@pytest.mark.slow -@pytest.mark.vitis -def test_end2end_mobilenet_build(): - model = load_test_checkpoint_or_skip( - build_dir + "/end2end_mobilenet_fifodepth.onnx" - ) - model = model.transform( - VitisBuild( - test_fpga_part, - target_clk_ns, - test_platform, - strategy=VitisOptStrategy.PERFORMANCE_BEST, - ) - ) - model.save(build_dir + "/end2end_mobilenet_build.onnx") - model = model.transform(AnnotateResources("synth")) - model.save(build_dir + "/end2end_mobilenet_final.onnx") diff --git a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..dfdb21fa72cbbeeb503f7ecc447b659ef7934fb9 --- /dev/null +++ b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py @@ -0,0 +1,189 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from onnx import TensorProto, helper +import numpy as np +import pytest + +from finn.core.datatype import DataType +from finn.transformation.infer_shapes import InferShapes +from finn.transformation.infer_datatypes import InferDataTypes +from finn.transformation.general import GiveUniqueNodeNames +from finn.transformation.lower_convs_to_matmul import LowerConvsToMatMul + +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +import finn.core.onnx_exec as oxe +from finn.core.modelwrapper import ModelWrapper +from finn.util.basic import gen_finn_dt_tensor +import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls + +from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim +from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim +from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.custom_op.general.im2col import compute_conv_output_dim +from finn.custom_op.registry import getCustomOp +from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer + + +# conv_config: +# [pad_h_begin, pad_w_begin, pad_h_end, pad_w_end] +# [kernel_size_h, kernel_size_w] +# [stride_h, stride_w] +# [dilation_h, dilation_w] +@pytest.mark.parametrize( + "conv_config", + [ + [[0, 0, 0, 0], [4, 1], [1, 1], [1, 1]], + [[1, 0, 1, 0], [4, 1], [1, 1], [1, 1]], + [[1, 0, 1, 0], [4, 1], [2, 1], [1, 1]], + # [[1, 0, 1, 0], [4, 1], [1, 1], [2, 1]] + ], +) +@pytest.mark.parametrize("depthwise", [False, True]) +@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +@pytest.mark.slow +@pytest.mark.vivado +def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode): + pad, kernel_size, stride, dilation = conv_config + np.random.seed(0) + idt = DataType.UINT4 + + in_feature_dim_h, in_feature_dim_w = [10, 1] + in_chn = 16 + + k_h, k_w = kernel_size + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + + if depthwise is True: + group = out_chn = in_chn + conv_param_shape = [out_chn, 1, k_h, k_w] + else: + group = 1 + out_chn = 20 + conv_param_shape = [out_chn, in_chn, k_h, k_w] + + out_feature_dim_h = compute_conv_output_dim( + in_feature_dim_h, k_h, stride_h, pad_h, dilation_h + ) + out_feature_dim_w = compute_conv_output_dim( + in_feature_dim_w, k_w, stride_w, pad_w, dilation_w + ) + + input_shape = [1, in_chn, in_feature_dim_h, in_feature_dim_w] + output_shape = [1, out_chn, out_feature_dim_h, out_feature_dim_w] + + conv_weight_dt = DataType.UINT4 + + conv_config = {} + conv_config["dilations"] = [dilation_h, dilation_w] + conv_config["group"] = group + conv_config["kernel_shape"] = [k_h, k_w] + conv_config["pads"] = pad + conv_config["strides"] = [stride_h, stride_w] + + top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) + top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape) + value_info = [ + helper.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape) + ] + + modelproto = helper.make_model( + helper.make_graph( + name="conv_test", + inputs=[top_in], + outputs=[top_out], + value_info=value_info, + nodes=[ + helper.make_node("Conv", ["top_in", "p1"], ["top_out"], **conv_config) + ], + ) + ) + + model = ModelWrapper(modelproto) + model.set_tensor_datatype("top_in", idt) + model.set_tensor_datatype("top_out", idt) + model.set_tensor_datatype("p1", conv_weight_dt) + model.set_initializer("p1", gen_finn_dt_tensor(conv_weight_dt, conv_param_shape)) + + model = model.transform(InferShapes()) + model = model.transform(InferDataTypes()) + + new_model = model.transform(LowerConvsToMatMul()) + new_model = new_model.transform(to_hls.InferConvInpGen()) + if depthwise is True: + new_model = new_model.transform(to_hls.InferVVAU()) + else: + new_model = new_model.transform(to_hls.InferQuantizedStreamingFCLayer()) + fc_node = new_model.get_nodes_by_op_type("StreamingFCLayer_Batch")[0] + fc_inst = getCustomOp(fc_node) + mw = fc_inst.get_nodeattr("MW") + mh = fc_inst.get_nodeattr("MH") + pe_cands = list(filter(lambda x: mh % x == 0, range(2, mh + 1))) + simd_cands = list(filter(lambda x: mw % x == 0, range(2, mw + 1))) + fc_inst.set_nodeattr("PE", pe_cands[0]) + fc_inst.set_nodeattr("SIMD", simd_cands[0]) + + new_model = new_model.transform(GiveUniqueNodeNames()) + new_model = new_model.transform(InferShapes()) + new_model = new_model.transform(InferDataTypes()) + + if exec_mode == "cppsim": + new_model = new_model.transform(PrepareCppSim()) + new_model = new_model.transform(CompileCppSim()) + new_model = new_model.transform(SetExecMode("cppsim")) + elif exec_mode == "rtlsim": + new_model = new_model.transform(SetExecMode("rtlsim")) + new_model = new_model.transform(GiveUniqueNodeNames()) + new_model = new_model.transform(PrepareIP("xc7z020clg400-1", 5)) + new_model = new_model.transform(HLSSynthIP()) + new_model = new_model.transform(PrepareRTLSim()) + else: + raise Exception("Unknown exec_mode") + + x = gen_finn_dt_tensor(idt, input_shape) + inp_dict = {model.graph.input[0].name: x} + assert oxe.compare_execution(model, new_model, inp_dict) + + if pad_h == 1 and pad_w == 1: + padding_node = new_model.get_nodes_by_op_type("FMPadding_Batch")[0] + padding_inst = getCustomOp(padding_node) + assert padding_inst.get_nodeattr("SIMD") == in_chn + + if depthwise is True and exec_mode == "rtlsim": + node = new_model.get_nodes_by_op_type("Vector_Vector_Activate_Batch")[0] + inst = getCustomOp(node) + cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") + exp_cycles_dict = new_model.analysis(exp_cycles_per_layer) + exp_cycles = exp_cycles_dict[node.name] + assert np.isclose(exp_cycles, cycles_rtlsim, atol=11) + assert exp_cycles != 0 diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py new file mode 100755 index 0000000000000000000000000000000000000000..8b7b0a4b6a93cde690a3d87eb3a2f1a0a55a85f8 --- /dev/null +++ b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py @@ -0,0 +1,250 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from onnx import TensorProto, helper +import numpy as np +import pytest + +from finn.core.datatype import DataType +from finn.transformation.infer_shapes import InferShapes +from finn.transformation.infer_datatypes import InferDataTypes +from finn.transformation.general import GiveUniqueNodeNames +from finn.transformation.lower_convs_to_matmul import LowerConvsToMatMul + +import finn.core.onnx_exec as oxe +from finn.core.modelwrapper import ModelWrapper +from finn.util.basic import gen_finn_dt_tensor +import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls + +from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim +from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim +from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.custom_op.general.im2col import compute_conv_output_dim + +import finn.transformation.streamline.absorb as absorb +from finn.transformation.general import RemoveUnusedTensors +from finn.transformation.streamline import Streamline +from finn.transformation.infer_data_layouts import InferDataLayouts +from finn.transformation.move_reshape import RemoveCNVtoFCFlatten +from finn.transformation.streamline.reorder import MoveScalarLinearPastInvariants + +import finn.core.data_layout as DataLayout + + +def get_multithreshold_rand_params(channels, num_of_thres, seed=None): + if seed is not None: + np.random.seed(seed) + steps = np.random.rand(channels, 1) * 30 + bias = np.random.rand(channels, 1) * -10 + thres = [np.arange(num_of_thres) for chn in range(channels)] + thres = ((thres + bias) * steps).astype(np.float32) + thres = np.round(thres) + return thres + + +# conv_config: input_shape, kernel_shape, stride, pad +@pytest.mark.parametrize( + "conv_config", + [ + ((6, 6), (3, 3), (1, 1), (1, 1)), + # TODO: enable 1d conv test cases + # ((12, 1), (3, 1), (1, 1), (1, 0)), + # ((1, 15), (1, 5), (1, 1), (0, 2)), + ], +) +@pytest.mark.parametrize("depthwise", [False, True]) +@pytest.mark.parametrize("use_reshape", [False, True]) +@pytest.mark.vivado +@pytest.mark.slow +def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): + np.random.seed(0) + idt = DataType.UINT4 + odt = DataType.UINT4 + conv_weight_dt = DataType.INT4 + fc_weight_dt = DataType.INT4 + + input_shape, kernel_shape, stride, pad = conv_config + kernel_size_h, kernel_size_w = kernel_shape + input_size_h, input_size_w = input_shape + stride_h, stride_w = stride + pad_h, pad_w = pad + + in_chn = 4 + fc_filters = 16 + + if depthwise is True: + group = out_chn = in_chn + conv_param_shape = [out_chn, 1, kernel_size_h, kernel_size_w] + else: + group = 1 + out_chn = 8 + conv_param_shape = [out_chn, in_chn, kernel_size_h, kernel_size_w] + + output_size_h = compute_conv_output_dim( + input_size_h, kernel_size_h, stride_h, 2 * pad_h + ) + output_size_w = compute_conv_output_dim( + input_size_w, kernel_size_w, stride_w, 2 * pad_w + ) + + input_shape = [1, in_chn, input_size_h, input_size_w] + fc_param_shape = [out_chn * output_size_h * output_size_w, fc_filters] + output_shape = [1, fc_filters] + + conv_config = {} + conv_config["dilations"] = [1, 1] + conv_config["group"] = group + conv_config["kernel_shape"] = [kernel_size_h, kernel_size_w] + conv_config["pads"] = [pad_h, pad_w, pad_h, pad_w] + conv_config["strides"] = [stride_h, stride_w] + + global_in = helper.make_tensor_value_info( + "global_in", TensorProto.FLOAT, input_shape + ) + global_out = helper.make_tensor_value_info( + "global_out", TensorProto.FLOAT, output_shape + ) + value_info = [ + helper.make_tensor_value_info( + "conv_param", TensorProto.FLOAT, conv_param_shape + ), + helper.make_tensor_value_info("thres1_param", TensorProto.FLOAT, (out_chn, 15)), + helper.make_tensor_value_info( + "matmul_param", TensorProto.FLOAT, fc_param_shape + ), + helper.make_tensor_value_info( + "thres2_param", TensorProto.FLOAT, (fc_filters, 15) + ), + helper.make_tensor_value_info("reshape_shape", TensorProto.INT64, []), + ] + + if use_reshape: + flatten_node = helper.make_node( + "Reshape", ["thres1_out", "reshape_shape"], ["flatten_out"] + ) + else: + flatten_node = helper.make_node( + "Flatten", ["thres1_out"], ["flatten_out"], axis=1 + ) + + modelproto = helper.make_model( + helper.make_graph( + name="test", + inputs=[global_in], + outputs=[global_out], + value_info=value_info, + nodes=[ + helper.make_node( + "Conv", ["global_in", "conv_param"], ["conv_out"], **conv_config + ), + helper.make_node( + "MultiThreshold", + ["conv_out", "thres1_param"], + ["thres1_out"], + domain="finn.custom_op.general", + out_dtype="UINT4", + ), + flatten_node, + helper.make_node( + "MatMul", ["flatten_out", "matmul_param"], ["matmul_out"] + ), + helper.make_node( + "MultiThreshold", + ["matmul_out", "thres2_param"], + ["global_out"], + domain="finn.custom_op.general", + out_dtype="UINT4", + ), + ], + ) + ) + + model = ModelWrapper(modelproto) + model.set_tensor_datatype("global_in", idt) + model.set_tensor_layout("global_in", DataLayout.NCHW) + model.set_tensor_datatype("global_out", odt) + model.set_tensor_datatype("conv_param", conv_weight_dt) + model.set_tensor_datatype("matmul_param", fc_weight_dt) + model.set_tensor_datatype("thres1_param", DataType.INT32) + model.set_tensor_datatype("thres2_param", DataType.INT32) + + model.set_initializer( + "conv_param", gen_finn_dt_tensor(conv_weight_dt, conv_param_shape) + ) + model.set_initializer( + "thres1_param", get_multithreshold_rand_params(out_chn, 15, seed=0) + ) + model.set_initializer( + "thres2_param", get_multithreshold_rand_params(fc_filters, 15, seed=0) + ) + model.set_initializer( + "matmul_param", gen_finn_dt_tensor(fc_weight_dt, fc_param_shape) + ) + model.set_initializer("reshape_shape", np.array([1, -1])) + + model = model.transform(InferShapes()) + model = model.transform(InferDataTypes()) + model = model.transform(InferDataLayouts()) + + # streamlining + new_model = model.transform(MoveScalarLinearPastInvariants()) + new_model = new_model.transform(Streamline()) + new_model = new_model.transform(LowerConvsToMatMul()) + new_model = new_model.transform(absorb.AbsorbTransposeIntoMultiThreshold()) + new_model = new_model.transform(Streamline()) + new_model = new_model.transform(InferDataLayouts()) + new_model = new_model.transform(RemoveUnusedTensors()) + + # convert_to_hls + if depthwise is True: + new_model = new_model.transform(to_hls.InferVVAU()) + new_model = new_model.transform(to_hls.InferQuantizedStreamingFCLayer()) + new_model = new_model.transform(to_hls.InferThresholdingLayer()) + new_model = new_model.transform(to_hls.InferConvInpGen()) + new_model = new_model.transform(to_hls.InferStreamingMaxPool()) + new_model = new_model.transform(RemoveCNVtoFCFlatten()) + new_model = new_model.transform(absorb.AbsorbConsecutiveTransposes()) + new_model = new_model.transform(GiveUniqueNodeNames()) + new_model = new_model.transform(InferDataLayouts()) + + # prepare cppsim + new_model = new_model.transform(PrepareCppSim()) + new_model = new_model.transform(CompileCppSim()) + new_model = new_model.transform(SetExecMode("cppsim")) + + # check for correct execution + x = gen_finn_dt_tensor(idt, input_shape) + inp_dict = {model.graph.input[0].name: x} + assert oxe.compare_execution(model, new_model, inp_dict) + + num_transpose = len(new_model.get_nodes_by_op_type("Transpose")) + num_flatten = len(new_model.get_nodes_by_op_type("Flatten")) + num_reshape = len(new_model.get_nodes_by_op_type("Reshape")) + + # check if transpose->flatten was removed + assert num_transpose == 1 and num_flatten == 0 and num_reshape == 0 diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index 20751a5877a879eeabf1ed6b67a7573208cf9367..15bf160799826b0d50a0f043a56dd1fc2accdd12 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -115,8 +115,8 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): thr_nodes = model.get_nodes_by_op_type("Thresholding_Batch") assert len(thr_nodes) == 8 non_finn_nodes = model.get_non_finn_nodes() - assert len(non_finn_nodes) == 4 - exp_non_finn_nodes = ["Transpose", "Reshape", "Mul", "Add"] + assert len(non_finn_nodes) == 5 + exp_non_finn_nodes = ["Transpose", "Transpose", "Reshape", "Mul", "Add"] assert [x.op_type for x in non_finn_nodes] == exp_non_finn_nodes fc_nodes = model.get_nodes_by_op_type("StreamingFCLayer_Batch") assert len(fc_nodes) == 9 diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py index c406d78158c52226fea881c48bc178139653fea5..3efeacb6e6875c6defa799eb7154e02ce880e16a 100644 --- a/tests/fpgadataflow/test_depthwise_convolution.py +++ b/tests/fpgadataflow/test_depthwise_convolution.py @@ -98,7 +98,7 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): inputs=["inp"], outputs=["im2col_out"], kernel_size=[k, k], - stride=stride, + stride=[stride, stride], pad_amount=[padding, padding, padding, padding], input_shape="(1, {}, {}, {})".format(ifm_dim, ifm_dim, ifm_ch), depthwise=1, @@ -142,7 +142,7 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): W_matrix = W_matrix.reshape(ofm_ch, ifm_ch * k * k) model.set_initializer("W_sparse", W_matrix.T) - sparsity = {"dw": {"kernel_shape": k}} + sparsity = {"dw": {"kernel_shape": [k, k]}} model.set_tensor_sparsity("W_sparse", sparsity) if act is not None: diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py index 4e0e8c7c35a8fc8a30e0ba4c27a7c0d637e24d1f..1ec12263e22a199ac7da55fdf3418185cd38e555 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py @@ -47,7 +47,9 @@ from finn.custom_op.registry import getCustomOp from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer -def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, simd, stride, idt): +def make_single_im2col_modelwrapper( + k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt +): odt = idt inp = helper.make_tensor_value_info( "inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch] @@ -61,12 +63,12 @@ def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, simd, stride, i ["inp"], ["outp"], domain="finn.custom_op.general", - backend="fpgadataflow", - stride=stride, + stride=[stride, stride], kernel_size=[k, k], input_shape=str((1, ifm_dim, ifm_dim, ifm_ch)), pad_amount=[0, 0, 0, 0], pad_value=0, + dilations=[dilation, dilation], ) graph = helper.make_graph( nodes=[im2col_node], name="im2col_graph", inputs=[inp], outputs=[outp] @@ -82,7 +84,7 @@ def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, simd, stride, i def make_single_slidingwindow_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, simd, stride, idt, dw=0 + k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt, dw=0 ): odt = idt inp = helper.make_tensor_value_info( @@ -98,12 +100,13 @@ def make_single_slidingwindow_modelwrapper( ["outp"], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", - ConvKernelDim=k, + ConvKernelDim=[k, k], IFMChannels=ifm_ch, - IFMDim=ifm_dim, - OFMDim=ofm_dim, + IFMDim=[ifm_dim, ifm_dim], + OFMDim=[ofm_dim, ofm_dim], SIMD=simd, - Stride=stride, + Stride=[stride, stride], + Dilation=[dilation, dilation], inputDataType=idt.name, outputDataType=odt.name, depthwise=dw, @@ -138,6 +141,9 @@ def prepare_inputs(input_tensor): @pytest.mark.parametrize("ifm_ch", [2, 4]) # Stride @pytest.mark.parametrize("stride", [1, 2]) +# Dilation +# Currently only dilation value of 1 is supported +@pytest.mark.parametrize("dilation", [1]) # execution mode @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) # input channel parallelism ("SIMD") @@ -147,13 +153,13 @@ def prepare_inputs(input_tensor): @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_slidingwindow( - idt, k, ifm_dim, ifm_ch, stride, exec_mode, simd, dw + idt, k, ifm_dim, ifm_ch, stride, dilation, exec_mode, simd, dw ): ofm_dim = int(((ifm_dim - k) / stride) + 1) x = gen_finn_dt_tensor(idt, (1, ifm_dim, ifm_dim, ifm_ch)) model = make_single_slidingwindow_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, simd, stride, idt, dw + k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt, dw ) if exec_mode == "cppsim": @@ -174,9 +180,10 @@ def test_fpgadataflow_slidingwindow( # execute model y_produced = oxe.execute_onnx(model, input_dict)["outp"] golden = make_single_im2col_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, simd, stride, idt + k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt ) y_expected = oxe.execute_onnx(golden, input_dict)["outp"] + if dw == 0: assert (y_produced == y_expected).all() else: diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py new file mode 100644 index 0000000000000000000000000000000000000000..6c83aab0d683cdb3888aca3c46bb339bd6330917 --- /dev/null +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py @@ -0,0 +1,256 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest +import numpy as np + +from onnx import TensorProto, helper + +import finn.core.onnx_exec as oxe +from finn.core.datatype import DataType +from finn.core.modelwrapper import ModelWrapper +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim +from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim +from finn.transformation.general import GiveUniqueNodeNames +from finn.util.basic import gen_finn_dt_tensor + +from finn.custom_op.registry import getCustomOp +from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer +from finn.custom_op.general.im2col import compute_conv_output_dim + + +def make_single_im2col_modelwrapper( + k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt +): + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + ofm_dim_h, ofm_dim_w = ofm_dim + + odt = idt + inp = helper.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] + ) + outp = helper.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] + ) + + im2col_node = helper.make_node( + "Im2Col", + ["inp"], + ["outp"], + domain="finn.custom_op.general", + stride=[stride_h, stride_w], + kernel_size=[k_h, k_w], + input_shape=str((1, ifm_dim_h, ifm_dim_w, ifm_ch)), + dilations=[dilation_h, dilation_w], + pad_amount=[0, 0, 0, 0], + pad_value=0, + ) + graph = helper.make_graph( + nodes=[im2col_node], name="im2col_graph", inputs=[inp], outputs=[outp] + ) + + model = helper.make_model(graph, producer_name="im2col-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", odt) + + return model + + +def make_single_slidingwindow_modelwrapper( + k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt, dw=0 +): + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + ofm_dim_h, ofm_dim_w = ofm_dim + + odt = idt + inp = helper.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] + ) + outp = helper.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] + ) + + SlidingWindow_node = helper.make_node( + "ConvolutionInputGenerator1D", + ["inp"], + ["outp"], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + ConvKernelDim=[k_h, k_w], + IFMChannels=ifm_ch, + IFMDim=[ifm_dim_h, ifm_dim_w], + OFMDim=[ofm_dim_h, ofm_dim_w], + SIMD=simd, + Stride=[stride_h, stride_w], + Dilation=[dilation_h, dilation_w], + inputDataType=idt.name, + outputDataType=odt.name, + depthwise=dw, + ) + graph = helper.make_graph( + nodes=[SlidingWindow_node], + name="slidingwindow_graph", + inputs=[inp], + outputs=[outp], + ) + + model = helper.make_model(graph, producer_name="slidingwindow-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", odt) + + return model + + +def prepare_inputs(input_tensor): + return {"inp": input_tensor} + + +# input datatype +# @pytest.mark.parametrize("idt", [DataType.BIPOLAR, DataType.INT8]) +@pytest.mark.parametrize("idt", [DataType.INT8]) +# kernel size +@pytest.mark.parametrize("k", [[4, 1]]) +# input dimension +@pytest.mark.parametrize("ifm_dim", [[10, 1]]) +# input channels +@pytest.mark.parametrize("ifm_ch", [1, 4]) +# Stride +@pytest.mark.parametrize("stride", [[1, 1], [2, 1]]) +# Dilation +# @pytest.mark.parametrize("dilation", [[1, 1], [2, 1]]) +@pytest.mark.parametrize("dilation", [[1, 1]]) +# execution mode +@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +# input channel parallelism ("SIMD") +@pytest.mark.parametrize("simd", [1, 4]) +# depthwise +@pytest.mark.parametrize("dw", [0, 1]) +# Flip dimensions +@pytest.mark.parametrize("flip", [False, True]) +@pytest.mark.slow +@pytest.mark.vivado +def test_fpgadataflow_slidingwindow_1d( + idt, k, ifm_dim, ifm_ch, stride, dilation, exec_mode, simd, dw, flip +): + if flip: + k = k[::-1] + ifm_dim = ifm_dim[::-1] + stride = stride[::-1] + dilation = dilation[::-1] + + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + + if (dilation_h > 1 or dilation_w > 1) and (stride_h > 1 or stride_w > 1): + pytest.skip( + """Dilation value greater than 1 and stride greater than 1 + currently not supported for 1D convolutions""" + ) + if simd > ifm_ch: + pytest.skip("SIMD cannot be larger than number of input channels") + + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, 0, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) + ofm_dim = [ofm_dim_h, ofm_dim_w] + + x = gen_finn_dt_tensor(idt, (1, ifm_dim_h, ifm_dim_w, ifm_ch)) + model = make_single_slidingwindow_modelwrapper( + k=k, + ifm_ch=ifm_ch, + ifm_dim=ifm_dim, + ofm_dim=ofm_dim, + simd=simd, + stride=stride, + dilation=dilation, + idt=idt, + dw=dw, + ) + + if exec_mode == "cppsim": + model = model.transform(SetExecMode("cppsim")) + model = model.transform(PrepareCppSim()) + model = model.transform(CompileCppSim()) + elif exec_mode == "rtlsim": + model = model.transform(SetExecMode("rtlsim")) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(HLSSynthIP()) + model = model.transform(PrepareRTLSim()) + else: + raise Exception("Unknown exec_mode in test_fpgadataflow_slidingwindow") + + # prepare input data + input_dict = prepare_inputs(x) + # execute model + y_produced = oxe.execute_onnx(model, input_dict)["outp"] + golden = make_single_im2col_modelwrapper( + k=k, + ifm_ch=ifm_ch, + ifm_dim=ifm_dim, + ofm_dim=ofm_dim, + simd=simd, + stride=stride, + dilation=dilation, + idt=idt, + ) + y_expected = oxe.execute_onnx(golden, input_dict)["outp"] + + if dw == 0: + assert (y_produced == y_expected).all() + else: + y_expected = y_expected.reshape( + 1, ofm_dim_h, ofm_dim_w, k_h * k_w, ifm_ch // simd, simd + ) + y_expected = y_expected.transpose(0, 1, 2, 4, 3, 5) + y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) + assert (y_produced == y_expected).all() + + if exec_mode == "rtlsim": + node = model.get_nodes_by_op_type("ConvolutionInputGenerator1D")[0] + inst = getCustomOp(node) + cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") + exp_cycles_dict = model.analysis(exp_cycles_per_layer) + exp_cycles = exp_cycles_dict[node.name] + assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) + assert exp_cycles != 0 diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index b2835d578b03ee689330d53a9a7b233c9b9f4222..ab47b300136bd95622f064b30e3bbaae76a61597 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -54,15 +54,20 @@ target_clk_ns = 10 def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_style): + pad_h = padding[0] + padding[2] + pad_w = padding[1] + padding[3] + idim_h, idim_w = idim + assert pad_style == 2, "only pad_style == 2 supported in hlslib" - assert padding > 0, "Output dim should be greater than input dim" - odim = idim + padding + assert pad_h > 0 or pad_w > 0, "Output dim should be greater than input dim" + odim_h = idim_h + pad_h + odim_w = idim_w + pad_w inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, idim, idim, num_ch] + "inp", TensorProto.FLOAT, [1, idim_h, idim_w, num_ch] ) outp = helper.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, odim, odim, num_ch] + "outp", TensorProto.FLOAT, [1, odim_h, odim_w, num_ch] ) FMPadding = helper.make_node( @@ -94,9 +99,9 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty # input image dimension -@pytest.mark.parametrize("idim", [8]) +@pytest.mark.parametrize("idim", [[8, 8], [10, 8]]) # number of rows and number of cols to add -@pytest.mark.parametrize("pad", [2, 3]) +@pytest.mark.parametrize("pad", [[1, 1, 1, 1], [1, 1, 2, 2], [1, 3, 2, 3]]) # number of channels @pytest.mark.parametrize("num_ch", [2, 4]) # Input parallelism @@ -112,10 +117,22 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): if num_ch % simd != 0: pytest.skip(" num_ch % simd != 0, skipping") + + idim_h, idim_w = idim + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + + if idim_h == idim_w and pad_h != pad_w: + pytest.skip( + """Only equal padding along the dimensions for square images + is supported, skipping""" + ) + # generate input data - x = gen_finn_dt_tensor(idt, [1, idim, idim, num_ch]) + x = gen_finn_dt_tensor(idt, [1, idim_h, idim_w, num_ch]) input_dict = {"inp": x} - odim = idim + pad + odim_h = idim_h + pad_h + odim_w = idim_w + pad_w model = make_single_fmpadding_modelwrapper(idim, pad, num_ch, simd, idt, pad_style) model = model.transform(InferShapes()) @@ -129,24 +146,26 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) y_produced = oxe.execute_onnx(model, input_dict)["outp"] - expected_oshape = (1, odim, odim, num_ch) + expected_oshape = (1, odim_h, odim_w, num_ch) assert y_produced.shape == expected_oshape # calculate reference # calculate correct pad according to parameters if pad_style == 2: - if pad % 2 == 0: - pad_up = pad // 2 - pad_left = pad // 2 + if pad_h % 2 == 0: + pad_up = pad_h // 2 + else: + pad_up = pad_h // 2 + 1 + if pad_w % 2 == 0: + pad_left = pad_w // 2 else: - pad_up = pad // 2 + 1 - pad_left = pad // 2 + 1 + pad_left = pad_w // 2 + 1 else: - pad_up = pad // 2 - pad_left = pad // 2 + pad_up = pad_h // 2 + pad_left = pad_w // 2 - pad_down = pad - pad_up - pad_right = pad - pad_left + pad_down = pad_h - pad_up + pad_right = pad_w - pad_left y_expected = np.pad( x, ((0, 0), (pad_up, pad_down), (pad_left, pad_right), (0, 0)), "constant" diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py index 23d7610dfdf434602f326e1117b072f312962295..4fa780548a544d92e02b28486ae1e325ff1f9a9b 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py @@ -52,7 +52,7 @@ from finn.util.basic import ( alveo_part_map, alveo_default_platform, ) -from finn.util.fpgadataflow import pyverilate_stitched_ip +from finn.util.pyverilator import pyverilate_stitched_ip from finn.util.test import load_test_checkpoint_or_skip from finn.transformation.fpgadataflow.synth_ooc import SynthOutOfContext from finn.transformation.infer_data_layouts import InferDataLayouts diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py new file mode 100644 index 0000000000000000000000000000000000000000..4756d4fe18ccd4934b4041c70bf2f3a1bb577ec7 --- /dev/null +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -0,0 +1,242 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +import numpy as np +from onnx import TensorProto, helper + +import finn.core.onnx_exec as oxe +from finn.core.datatype import DataType +from finn.core.modelwrapper import ModelWrapper +from finn.util.basic import gen_finn_dt_tensor +from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim +from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim +from finn.transformation.general import GiveUniqueNodeNames +from finn.custom_op.general.multithreshold import multithreshold + +from finn.custom_op.registry import getCustomOp +from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer + + +def _infer_sparse_weight_tensor(W_conv, k_h, k_w, channels): + W_sparse = np.zeros((channels, channels, k_h, k_w)) + for ch in range(channels): + W_sparse[ch][ch] = W_conv[ch][0] + W_conv = W_sparse.astype(np.float32) + W_matmul = W_conv.transpose(0, 2, 3, 1) + W_matmul = W_matmul.reshape(channels, channels * k_h * k_w) + W_matmul = W_matmul.T + + return W_matmul + + +def _calculate_dot_prod_range(dt_a, dt_b, len): + """Returns the (min,max) values a dot product between two (un)signed vectors of + types dt_a and dt_b of len elements can take.""" + min_prod = 2 ** 30 + max_prod = -(2 ** 30) + for a_val in [dt_a.min(), dt_a.max()]: + for b_val in [dt_b.min(), dt_b.max()]: + prod = a_val * b_val * len + if prod < min_prod: + min_prod = prod + if prod > max_prod: + max_prod = prod + return (min_prod, max_prod) + + +def _make_single_vvau_modelwrapper( + W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T=None, tdt=None +): + in_shape = [1, dim_h, dim_w, k_h * k_w * channels] # [N, H, W, K*K*CH] + out_shape = [ + 1, + dim_h, + dim_w, + channels, + ] # [N, H, W, OFM_CH] (OFM_CH=IFM_CH because depthwise convolution) + + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, in_shape) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, out_shape) + + if T is not None: + no_act = 0 + node_inp_list = ["inp", "weights", "thresh"] + actval = odt.min() + else: + no_act = 1 + node_inp_list = ["inp", "weights"] + actval = 0 + + VVAU_node = helper.make_node( + "Vector_Vector_Activate_Batch", + node_inp_list, + ["outp"], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + PE=pe, + Dim=[dim_h, dim_w], + Channels=channels, + Kernel=[k_h, k_w], + resType="lut", + ActVal=actval, + inputDataType=idt.name, + weightDataType=wdt.name, + outputDataType=odt.name, + noActivation=no_act, + ) + + graph = helper.make_graph( + nodes=[VVAU_node], name="vvau_graph", inputs=[inp], outputs=[outp] + ) + + model = helper.make_model(graph, producer_name="vvau-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", odt) + model.set_tensor_datatype("weights", wdt) + + model.set_initializer("weights", W) + model.set_tensor_shape("weights", (channels, 1, k_h, k_w)) + + if T is not None: + model.set_tensor_datatype("thresh", tdt) + model.set_initializer("thresh", T) + + return model + + +def prepare_inputs(input_tensor): + return {"inp": input_tensor} + + +# mem_mode: const or decoupled +@pytest.mark.parametrize("idt", [DataType.UINT4, DataType.UINT8]) +# weight datatype +@pytest.mark.parametrize("wdt", [DataType.INT4]) +# activation: None or DataType +@pytest.mark.parametrize("act", [DataType.UINT4, None]) +# PE +@pytest.mark.parametrize("pe", [1, "channels"]) +# Input image shape +@pytest.mark.parametrize("dim_h", [10]) +@pytest.mark.parametrize("dim_w", [10, 1]) +# Kernel shape +@pytest.mark.parametrize("k_h", [3]) +@pytest.mark.parametrize("k_w", [3, 1]) +# Number of input and output channels +@pytest.mark.parametrize("channels", [3, 4]) +# execution mode +@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +@pytest.mark.slow +@pytest.mark.vivado +def test_fpgadataflow_vvau( + idt, wdt, act, pe, dim_h, dim_w, k_h, k_w, channels, exec_mode +): + if pe == "channels": + pe = channels + + if dim_w == 1 and k_w != 1: + pytest.skip("1D image requires 1D kernel, skipping.") + + if channels % pe != 0: + pytest.skip("Requirement Channels divisable by PE is violated.") + + # Generate weights in expected shape for ONNX and HLS node + W = gen_finn_dt_tensor(wdt, (channels, 1, k_h, k_w)) # shape: [channels, 1, k, k] + W_onnx = _infer_sparse_weight_tensor( + W, k_h, k_w, channels + ) # shape: [k*k*channels, channels] + + # Generate inputs in expected format for ONNX and HLS node + x = gen_finn_dt_tensor(idt, (1, dim_h, dim_w, k_h * k_w * channels)) + x_vvau = x.reshape(1, dim_h, dim_w, k_h * k_w, channels // pe, pe) + x_vvau = x_vvau.transpose(0, 1, 2, 4, 3, 5) + x_vvau = x_vvau.reshape(1, dim_h, dim_w, channels * k_h * k_w) + + if act is None: + T = None + tdt = None + odt = DataType.INT32 + else: + odt = act + (min_v, max_v) = _calculate_dot_prod_range(idt, wdt, k_h * k_w * channels) + n_steps = act.get_num_possible_values() - 1 + T = np.random.randint(min_v, max_v - 1, (channels, n_steps)).astype(np.float32) + T = np.sort(T, axis=1) + tdt = DataType.INT32 + + model = _make_single_vvau_modelwrapper( + W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt + ) + + if exec_mode == "cppsim": + model = model.transform(SetExecMode("cppsim")) + model = model.transform(PrepareCppSim()) + model = model.transform(CompileCppSim()) + elif exec_mode == "rtlsim": + model = model.transform(SetExecMode("rtlsim")) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(HLSSynthIP()) + model = model.transform(PrepareRTLSim()) + else: + raise Exception("Unknown exec_mode in test_fpgadataflow_vvau") + + input_dict = prepare_inputs(x_vvau) + + # Calculate output + y_expected = np.matmul(x, W_onnx) # Y is in [N, H, W, C] format + if T is not None: + # Reshape Y, as multithreshold expects Y to be in [N, C, H, W] format + y_expected = np.transpose(y_expected, (0, 3, 1, 2)) + y_expected = multithreshold(y_expected, T) + y_expected = np.transpose(y_expected, (0, 2, 3, 1)) + # signed offset + y_expected += act.min() + + y_produced = oxe.execute_onnx(model, input_dict, return_full_exec_context=False)[ + "outp" + ] + + assert (y_produced == y_expected).all(), "cppsim failed" + + if exec_mode == "rtlsim": + node = model.get_nodes_by_op_type("Vector_Vector_Activate_Batch")[0] + inst = getCustomOp(node) + cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") + exp_cycles_dict = model.analysis(exp_cycles_per_layer) + exp_cycles = exp_cycles_dict[node.name] + assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) + assert exp_cycles != 0 diff --git a/tests/transformation/streamline/test_remove_identity_ops.py b/tests/transformation/streamline/test_remove_identity_ops.py index 536c1ab0b48fa44388da23f45b528da3c5f3b2f2..d02e1d39755bf4783cd5dbdc2b88ca0931e02874 100644 --- a/tests/transformation/streamline/test_remove_identity_ops.py +++ b/tests/transformation/streamline/test_remove_identity_ops.py @@ -11,18 +11,29 @@ from finn.transformation.streamline.remove import RemoveIdentityOps from finn.util.basic import gen_finn_dt_tensor -def insert_identity_op(model, op): +def insert_identity_op(model, op, as_first_node, approx): + if approx: + zero_val = 0.000001 + one_val = 0.999999 + else: + zero_val = 0.0 + one_val = 1.0 if op in ["Add", "Sub"]: - val = np.asarray([0.0], dtype=np.float32) + val = np.asarray([zero_val], dtype=np.float32) elif op in ["Mul", "Div"]: - val = np.asarray([1.0], dtype=np.float32) + val = np.asarray([one_val], dtype=np.float32) else: return - identity_node = helper.make_node(op, ["div_out", "value"], ["ident_out"]) graph = model.graph - graph.node.insert(3, identity_node) - graph.node[-1].input[0] = "ident_out" + if as_first_node: + identity_node = helper.make_node(op, ["inp", "value"], ["ident_out"]) + graph.node.insert(0, identity_node) + graph.node[1].input[0] = "ident_out" + else: + identity_node = helper.make_node(op, ["div_out", "value"], ["ident_out"]) + graph.node.insert(3, identity_node) + graph.node[-1].input[0] = "ident_out" model.set_initializer("value", val) return model @@ -30,7 +41,9 @@ def insert_identity_op(model, op): # identity operations to be inserted @pytest.mark.parametrize("op", ["Add", "Sub", "Mul", "Div"]) -def test_remove_identity_ops(op): +@pytest.mark.parametrize("approx", [False, True]) +@pytest.mark.parametrize("as_first_node", [False, True]) +def test_remove_identity_ops(op, as_first_node, approx): # set up onnx model inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 4, 1, 1]) @@ -64,7 +77,7 @@ def test_remove_identity_ops(op): model.set_initializer("shape", shape_values) model.set_initializer("div", div_values) model.set_initializer("matmul", matmul_values) - insert_identity_op(model, op) + insert_identity_op(model, op, as_first_node, approx) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) idict = {"inp": inp_values} @@ -78,4 +91,4 @@ def test_remove_identity_ops(op): odict = oxe.execute_onnx(model, idict) out_after = odict["outp"] - assert (out_before == out_after).all() + assert np.isclose(out_before, out_after, atol=1e-3).all()