diff --git a/notebooks/end2end_example/tfc_end2end_example.ipynb b/notebooks/end2end_example/tfc_end2end_example.ipynb index 27c5c3eead98a030276bfa515cf3dc836c91d721..3505d6f5180be96b778c01a0d3b2365ca2e38e4d 100644 --- a/notebooks/end2end_example/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/tfc_end2end_example.ipynb @@ -42,7 +42,7 @@ }, { "cell_type": "code", - "execution_count": 74, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -95,18 +95,9 @@ }, { "cell_type": "code", - "execution_count": 75, + "execution_count": 6, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/brevitas_cnv_lfc/training_scripts/models/TFC.py:73: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " x = 2.0 * x - torch.tensor([1.0])\n" - ] - } - ], + "outputs": [], "source": [ "import onnx\n", "from finn.util.test import get_test_model_trained\n", @@ -126,7 +117,7 @@ }, { "cell_type": "code", - "execution_count": 82, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -152,10 +143,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc62d60e5c0>" + "<IPython.lib.display.IFrame at 0x7f186ccfbe10>" ] }, - "execution_count": 82, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -173,7 +164,7 @@ }, { "cell_type": "code", - "execution_count": 83, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -258,7 +249,7 @@ }, { "cell_type": "code", - "execution_count": 84, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -285,7 +276,7 @@ }, { "cell_type": "code", - "execution_count": 85, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -311,10 +302,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc6c4430828>" + "<IPython.lib.display.IFrame at 0x7f186e386240>" ] }, - "execution_count": 85, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -335,7 +326,7 @@ }, { "cell_type": "code", - "execution_count": 86, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -350,6 +341,7 @@ " ConvertSubToAdd(),\n", " BatchNormToAffine(),\n", " ConvertSignToThres(),\n", + " MoveAddPastMul(),\n", " MoveScalarAddPastMatMul(),\n", " MoveScalarMulPastMatMul(),\n", " MoveAddPastMul(),\n", @@ -387,7 +379,7 @@ }, { "cell_type": "code", - "execution_count": 104, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -413,10 +405,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc65422bf60>" + "<IPython.lib.display.IFrame at 0x7f186cd470b8>" ] }, - "execution_count": 104, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -441,7 +433,7 @@ }, { "cell_type": "code", - "execution_count": 105, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -467,10 +459,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc64d8d9f98>" + "<IPython.lib.display.IFrame at 0x7f17f04bbc18>" ] }, - "execution_count": 105, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -508,7 +500,7 @@ }, { "cell_type": "code", - "execution_count": 90, + "execution_count": 14, "metadata": { "scrolled": false }, @@ -536,10 +528,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc62d60eb70>" + "<IPython.lib.display.IFrame at 0x7f1868061eb8>" ] }, - "execution_count": 90, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -570,7 +562,7 @@ }, { "cell_type": "code", - "execution_count": 91, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -596,10 +588,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc62d60e1d0>" + "<IPython.lib.display.IFrame at 0x7f186cc55e48>" ] }, - "execution_count": 91, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -622,7 +614,7 @@ }, { "cell_type": "code", - "execution_count": 92, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -631,7 +623,7 @@ "text": [ "\n", "Stopping http://0.0.0.0:8081\n", - "Serving '/tmp/finn_maltanar/dataflow_partition_l2y9b77c/df_model.onnx' at http://0.0.0.0:8081\n" + "Serving '/tmp/finn_maltanar/dataflow_partition_h1c4i5gn/df_model.onnx' at http://0.0.0.0:8081\n" ] }, { @@ -648,10 +640,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc62d60e320>" + "<IPython.lib.display.IFrame at 0x7f17f04c70f0>" ] }, - "execution_count": 92, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -672,7 +664,7 @@ }, { "cell_type": "code", - "execution_count": 93, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -692,7 +684,7 @@ }, { "cell_type": "code", - "execution_count": 94, + "execution_count": 18, "metadata": {}, "outputs": [ { @@ -726,7 +718,7 @@ }, { "cell_type": "code", - "execution_count": 95, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -762,7 +754,7 @@ " 'rtlsim_trace': ('s', False, '')}" ] }, - "execution_count": 95, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -788,7 +780,7 @@ }, { "cell_type": "code", - "execution_count": 96, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -822,7 +814,7 @@ }, { "cell_type": "code", - "execution_count": 98, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -848,10 +840,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc654223780>" + "<IPython.lib.display.IFrame at 0x7f1868061d30>" ] }, - "execution_count": 98, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -884,7 +876,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -903,7 +895,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -934,7 +926,7 @@ }, { "cell_type": "code", - "execution_count": 99, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ @@ -956,7 +948,7 @@ }, { "cell_type": "code", - "execution_count": 100, + "execution_count": 25, "metadata": {}, "outputs": [], "source": [ @@ -975,7 +967,7 @@ }, { "cell_type": "code", - "execution_count": 106, + "execution_count": 26, "metadata": {}, "outputs": [ { @@ -1001,10 +993,10 @@ " " ], "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc65422be48>" + "<IPython.lib.display.IFrame at 0x7f17f04c9470>" ] }, - "execution_count": 106, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } @@ -1026,7 +1018,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 27, "metadata": {}, "outputs": [ { @@ -1055,7 +1047,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 28, "metadata": {}, "outputs": [ { @@ -1063,8 +1055,8 @@ "output_type": "stream", "text": [ "#!/bin/bash \r\n", - "cd /tmp/finn_maltanar/code_gen_ipgen_StreamingFCLayer_Batch_hc367wg4\r\n", - "vivado_hls /tmp/finn_maltanar/code_gen_ipgen_StreamingFCLayer_Batch_hc367wg4/hls_syn_StreamingFCLayer_Batch_0.tcl\r\n", + "cd /tmp/finn_maltanar/code_gen_ipgen_StreamingFCLayer_Batch_5f0hmok_\r\n", + "vivado_hls /tmp/finn_maltanar/code_gen_ipgen_StreamingFCLayer_Batch_5f0hmok_/hls_syn_StreamingFCLayer_Batch_0.tcl\r\n", "cd /workspace/finn\r\n" ] } @@ -1085,7 +1077,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 29, "metadata": {}, "outputs": [ { @@ -1095,7 +1087,7 @@ "\r\n", "set config_proj_name project_StreamingFCLayer_Batch_0\r\n", "puts \"HLS project: $config_proj_name\"\r\n", - "set config_hwsrcdir \"/tmp/finn_maltanar/code_gen_ipgen_StreamingFCLayer_Batch_hc367wg4\"\r\n", + "set config_hwsrcdir \"/tmp/finn_maltanar/code_gen_ipgen_StreamingFCLayer_Batch_5f0hmok_\"\r\n", "puts \"HW source dir: $config_hwsrcdir\"\r\n", "set config_proj_part \"xczu3eg-sbva484-1-e\"\r\n", "\r\n", @@ -1146,7 +1138,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 30, "metadata": {}, "outputs": [], "source": [ @@ -1166,22 +1158,22 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 31, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[key: \"vivado_stitch_proj\"\n", - "value: \"/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke\"\n", + "value: \"/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo\"\n", ", key: \"vivado_stitch_vlnv\"\n", "value: \"xilinx_finn:finn:finn_design:1.0\"\n", ", key: \"wrapper_filename\"\n", - "value: \"/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke/finn_vivado_stitch_proj.srcs/sources_1/bd/finn_design/hdl/finn_design_wrapper.v\"\n", + "value: \"/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo/finn_vivado_stitch_proj.srcs/sources_1/bd/finn_design/hdl/finn_design_wrapper.v\"\n", "]" ] }, - "execution_count": 33, + "execution_count": 31, "metadata": {}, "output_type": "execute_result" } @@ -1192,16 +1184,16 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 32, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke'" + "'/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo'" ] }, - "execution_count": 34, + "execution_count": 32, "metadata": {}, "output_type": "execute_result" } @@ -1226,7 +1218,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 33, "metadata": {}, "outputs": [], "source": [ @@ -1266,7 +1258,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 34, "metadata": { "scrolled": true }, @@ -1275,17 +1267,17 @@ "data": { "text/plain": [ "[key: \"vivado_stitch_proj\"\n", - "value: \"/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke\"\n", + "value: \"/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo\"\n", ", key: \"vivado_stitch_vlnv\"\n", "value: \"xilinx_finn:finn:finn_design:1.0\"\n", ", key: \"wrapper_filename\"\n", - "value: \"/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke/finn_vivado_stitch_proj.srcs/sources_1/bd/finn_design/hdl/finn_design_wrapper.v\"\n", + "value: \"/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo/finn_vivado_stitch_proj.srcs/sources_1/bd/finn_design/hdl/finn_design_wrapper.v\"\n", ", key: \"vivado_pynq_proj\"\n", - "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hqlnpt5q\"\n", + "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hq9mfroo\"\n", "]" ] }, - "execution_count": 36, + "execution_count": 34, "metadata": {}, "output_type": "execute_result" } @@ -1299,7 +1291,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 35, "metadata": {}, "outputs": [ { @@ -1325,7 +1317,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 36, "metadata": {}, "outputs": [], "source": [ @@ -1348,26 +1340,26 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 37, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[key: \"vivado_stitch_proj\"\n", - "value: \"/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke\"\n", + "value: \"/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo\"\n", ", key: \"vivado_stitch_vlnv\"\n", "value: \"xilinx_finn:finn:finn_design:1.0\"\n", ", key: \"wrapper_filename\"\n", - "value: \"/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke/finn_vivado_stitch_proj.srcs/sources_1/bd/finn_design/hdl/finn_design_wrapper.v\"\n", + "value: \"/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo/finn_vivado_stitch_proj.srcs/sources_1/bd/finn_design/hdl/finn_design_wrapper.v\"\n", ", key: \"vivado_pynq_proj\"\n", - "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hqlnpt5q\"\n", + "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hq9mfroo\"\n", ", key: \"vivado_pynq_bitfile\"\n", - "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hqlnpt5q/resizer.bit\"\n", + "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hq9mfroo/resizer.bit\"\n", "]" ] }, - "execution_count": 39, + "execution_count": 37, "metadata": {}, "output_type": "execute_result" } @@ -1381,7 +1373,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -1399,7 +1391,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 39, "metadata": {}, "outputs": [], "source": [ @@ -1417,7 +1409,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 40, "metadata": {}, "outputs": [ { @@ -1511,7 +1503,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 41, "metadata": {}, "outputs": [], "source": [ @@ -1533,24 +1525,24 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 42, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[key: \"vivado_stitch_proj\"\n", - "value: \"/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke\"\n", + "value: \"/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo\"\n", ", key: \"vivado_stitch_vlnv\"\n", "value: \"xilinx_finn:finn:finn_design:1.0\"\n", ", key: \"wrapper_filename\"\n", - "value: \"/tmp/finn_maltanar/vivado_stitch_proj_n3me5eke/finn_vivado_stitch_proj.srcs/sources_1/bd/finn_design/hdl/finn_design_wrapper.v\"\n", + "value: \"/tmp/finn_maltanar/vivado_stitch_proj_oo2lpoeo/finn_vivado_stitch_proj.srcs/sources_1/bd/finn_design/hdl/finn_design_wrapper.v\"\n", ", key: \"vivado_pynq_proj\"\n", - "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hqlnpt5q\"\n", + "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hq9mfroo\"\n", ", key: \"vivado_pynq_bitfile\"\n", - "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hqlnpt5q/resizer.bit\"\n", + "value: \"/tmp/finn_maltanar/vivado_pynq_proj_hq9mfroo/resizer.bit\"\n", ", key: \"pynq_driver_dir\"\n", - "value: \"/tmp/finn_maltanar/pynq_driver_yu_l_jao\"\n", + "value: \"/tmp/finn_maltanar/pynq_driver_25t8u9sd\"\n", ", key: \"pynq_ip\"\n", "value: \"192.168.3.1\"\n", ", key: \"pynq_username\"\n", @@ -1560,15 +1552,15 @@ ", key: \"pynq_target_dir\"\n", "value: \"/home/xilinx/finn_tfc_end2end_example\"\n", ", key: \"pynq_deployment_dir\"\n", - "value: \"/tmp/finn_maltanar/pynq_deployment_1oyo7x66\"\n", + "value: \"/tmp/finn_maltanar/pynq_deployment_mpyziv7h\"\n", ", key: \"pynq_deploy_dir\"\n", - "value: \"/tmp/finn_maltanar/pynq_deployment_1oyo7x66\"\n", + "value: \"/tmp/finn_maltanar/pynq_deployment_mpyziv7h\"\n", ", key: \"exec_mode\"\n", "value: \"remote_pynq\"\n", "]" ] }, - "execution_count": 49, + "execution_count": 42, "metadata": {}, "output_type": "execute_result" } @@ -1579,13 +1571,14 @@ }, { "cell_type": "code", - "execution_count": 103, + "execution_count": 43, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ + "/home/xilinx/finn_tfc_end2end_example/pynq_deployment_1oyo7x66:\r\n", "total 5820\r\n", "-rw-r--r-- 1 xilinx xilinx 1934 Feb 13 13:36 driver.py\r\n", "drwxr-xr-x 4 xilinx xilinx 4096 Feb 13 13:36 finn\r\n", @@ -1593,7 +1586,14 @@ "-rw-r--r-- 1 root root 120 Feb 13 14:24 output.npy\r\n", "-rw-r--r-- 1 xilinx xilinx 5568787 Feb 13 13:36 resizer.bit\r\n", "-rw-r--r-- 1 xilinx xilinx 368173 Feb 13 13:36 resizer.hwh\r\n", - "-rw-r--r-- 1 root root 32 Feb 13 14:24 sds_trace_data.dat\r\n" + "-rw-r--r-- 1 root root 32 Feb 13 14:24 sds_trace_data.dat\r\n", + "\r\n", + "/home/xilinx/finn_tfc_end2end_example/pynq_deployment_mpyziv7h:\r\n", + "total 5808\r\n", + "-rw-r--r-- 1 xilinx xilinx 1934 Feb 28 16:09 driver.py\r\n", + "drwxr-xr-x 4 xilinx xilinx 4096 Feb 28 16:09 finn\r\n", + "-rw-r--r-- 1 xilinx xilinx 5568787 Feb 28 16:09 resizer.bit\r\n", + "-rw-r--r-- 1 xilinx xilinx 368173 Feb 28 16:09 resizer.hwh\r\n" ] } ], @@ -1610,30 +1610,18 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 44, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "<matplotlib.image.AxesImage at 0x7fc62d83dbe0>" + "<matplotlib.image.AxesImage at 0x7f17e0a82e10>" ] }, - "execution_count": 52, + "execution_count": 44, "metadata": {}, "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAARX0lEQVR4nO3dfYyVZXrH8d/FoDAw8iYRCaisG/5QqmUbgk1KyOKmxlUMbKJm/aPauAmarMmqTVqz/UOSaqJVa/pH3YStL9CsmiWoq0a7a82mWo1GNFQQW1CULGR4E5H3t+HqH/NgZ3We6549z3nOc9z7+0kmM3Ouec65OTM/zsv13Pdt7i4Af/xGNT0AAJ1B2IFMEHYgE4QdyARhBzIxupM3Zma89Z+ZUaPKH09OnTpV23VXvf6enp6wPjAw0PJ1183dbbjLK4XdzK6U9M+SeiT9q7vfV+X6cmU27O/mS6k/6ip/eKNHx38CqcCk6r29vaW1Q4cOhcem9PX1hfUDBw6U1lIt50mTJoX1zz77LKx3o5afxptZj6R/kfR9SRdLusHMLm7XwAC0V5XX7PMlfeTuW9z9uKSnJS1pz7AAtFuVsM+Q9Lsh328rLvs9ZrbMzNaa2doKtwWgotrfoHP3FZJWSLxBBzSpyiP7dknnDfl+ZnEZgC5UJezvSJptZt8yszMl/VDS8+0ZFoB2a/lpvLufNLPbJP1ag623x9z9g7aNLCPjx48P6wcPHmz5useMGRPWjx07FtZTbcFx48aF9ai9lmoppqSOj9prqT76vn37WhpTN6v0mt3dX5L0UpvGAqBGnC4LZIKwA5kg7EAmCDuQCcIOZIKwA5mwTq4um+vpsqled6qXffTo0bA+duzYlo9Nia676vWfffbZYb3qNNLofp06dWp47O7du8N6amrwyZMnw3qdyuaz88gOZIKwA5kg7EAmCDuQCcIOZIKwA5mg9fYNkGrNVfkd1nnddUtNDa6yem1q6m5qanCTS03TegMyR9iBTBB2IBOEHcgEYQcyQdiBTBB2IBP02TvgrLPOCuvRbqOSNHHixLB+4sSJ0lpqN9LUFNbPP/88rC9YsCCs33rrraW1VC/6jjvuCOtbt24N601OM20SfXYgc4QdyARhBzJB2IFMEHYgE4QdyARhBzJBn/0b4JFHHgnrUS871Wuuuox1b29vWI+ktk2+5JJLwvqmTZvC+vHjx0trZ5xxRnhsdO6ClP53HzlyJKzXqazPXmnLZjP7VNIBSQOSTrr7vCrXB6A+lcJeWOTue9pwPQBqxGt2IBNVw+6SfmNm75rZsuF+wMyWmdlaM1tb8bYAVFD1afwCd99uZudIesXM/sfdXxv6A+6+QtIKiTfogCZVemR39+3F512SnpU0vx2DAtB+LYfdzMab2Vmnv5Z0haQN7RoYgPaq8jR+mqRniz7taElPuvu/t2VUf2RSWzYvWrQorF922WVhPeqVHzx4MDw21W/u6+sL66nzNKI566m11x999NGWr1uS7rzzztLaW2+9FR5b93bSTWg57O6+RdKftnEsAGpE6w3IBGEHMkHYgUwQdiAThB3IBFNcu0Bqqubs2bPD+v79+0trEyZMCI+NpoFK6SmwVbZ8TrX9UlJLcO/du7e0tnTp0vDYdevWhfVUSzLV8qwTS0kDmSPsQCYIO5AJwg5kgrADmSDsQCYIO5CJdiw42TFRT7fOfnBK6thU/ZZbbgnrq1atCuszZ85s+bZTffZ77rknrK9evTqsn3nmmaW1K664Ijz2wQcfDOuprbCj2168eHF47LZt28L6nj3fvDVWeWQHMkHYgUwQdiAThB3IBGEHMkHYgUwQdiATHZ/Pnup3Rzo51naqOvd54cKFYf2iiy4qrY0bNy48dvTo+FSLNWvWhPUtW7aE9SpSyz3PmTMnrKfu90jq75T57AC6FmEHMkHYgUwQdiAThB3IBGEHMkHYgUx0vM8+alT5/y9V54XXqcpc+lOnTlW67eg+S9VPnjwZHjt+/PiwfujQobCe2o46+p2l5tJfffXVYf3pp58O61X67Kk17VP3a5Na7rOb2WNmtsvMNgy5bIqZvWJmm4vPk9s5WADtN5Kn8U9IuvIrl90l6VV3ny3p1eJ7AF0sGXZ3f03SV/fRWSJpZfH1SknxXjoAGtfqGnTT3L2/+HqHpGllP2hmyyQta/F2ALRJ5QUn3d2jDRvdfYWkFRIbOwJNarX1ttPMpktS8XlX+4YEoA6thv15STcVX98k6VftGQ6AuiT77Gb2lKTvSpoqaaekuyU9J+mXks6XtFXS9e5evhn2/19XbU/jq64bX7UeSfVkU3uoR/uvV9Xb2xvWjxw5EtZT5wBUOcfgwgsvDOsff/xxy9edGldqTfqUw4cPVzq+irI+e/I1u7vfUFL6XqURAegoTpcFMkHYgUwQdiAThB3IBGEHMsGWzYVUC3JgYCCsR3p6esJ61WWHozZRqsWUmsKakrr+aNvkqCZJixYtamlMp0W/0xMnToTHpqa4Vvl7aAqP7EAmCDuQCcIOZIKwA5kg7EAmCDuQCcIOZKKr+ux1budcdTnnKuq+7QMHDpTWUv3iVK87dXyqTx8tF51axvq6664L60ePHg3rY8eOLa2l+uyp31mTWzK3ikd2IBOEHcgEYQcyQdiBTBB2IBOEHcgEYQcy0fE+ezS3u5t75dGSyanllFPq3Fb50ksvDY+dM2dOWE8tJf3cc8+F9UjUB5ekhQsXhvUqW3inlqGOzl2Qqi/B3QQe2YFMEHYgE4QdyARhBzJB2IFMEHYgE4QdyETH++zRnPU6++ipufKped1RT3j06PhuXLp0aVhPHb9kyZKwPmbMmNLa3Llzw2MnTZoU1lO97Ndff73l42fPnh0em1qbPdXrXr9+fWnt8ssvD4+N7lOpO/voKclHdjN7zMx2mdmGIZctN7PtZrau+Liq3mECqGokT+OfkHTlMJc/7O5zi4+X2jssAO2WDLu7vyZpbwfGAqBGVd6gu83M3i+e5k8u+yEzW2Zma81sbYXbAlBRq2H/maRvS5orqV/SQ2U/6O4r3H2eu89r8bYAtEFLYXf3ne4+4O6nJP1c0vz2DgtAu7UUdjObPuTbH0jaUPazALqDpfqoZvaUpO9Kmippp6S7i+/nSnJJn0q6xd37kzdmFt5Yqt+cmvcdmTVrVli/5pprwvrixYtLa6l516l526m509H+61K8hnlfX194bErVed3R7/SLL74Ij504cWJYT9m8eXNpbdWqVeGxDz1U+spUUnf32d192JNKkifVuPsNw1z8aOURAegoTpcFMkHYgUwQdiAThB3IBGEHMpFsvbX1xsw8Wna5zimud999d1hfvnx5WN+zZ09pberUqa0M6UuprYf37o2nJkT1Cy64IDw21RZMbdmccuzYsdJaahpp6u8h1YqNpi2ntlx++eWXw/rNN98c1pvc0rms9cYjO5AJwg5kgrADmSDsQCYIO5AJwg5kgrADmeh4nz2qV9maODXVMtX3rLLt8q5du8L61q1bw/oDDzwQ1levXh3W580rXwTo4YcfDo9Nbdk8eXLpimOSpG3btoX16Hf6xBNPhMd+8sknYf3aa68N69HU46rTa1988cWwnpoyXSf67EDmCDuQCcIOZIKwA5kg7EAmCDuQCcIOZKKjffZRo0Z5ND/6+PHj4fHnnHNOaW337t3hsak+e2rudNQvTm0HvWnTprA+ZcqUsJ5atjha7vn8888Pj03NZ08t771v376wfuONN5bWXnjhhfDYlNQ6AtFy0YsWLQqPTa0xkLpfUst/14k+O5A5wg5kgrADmSDsQCYIO5AJwg5kgrADmeiq+exVpPqeK1euDOvXX399y9d/+PDh8Nhx48aF9dS2yKl5/gMDA6W11Lrvb775Zlh/8sknw/q6devC+htvvFFaS51fkOrhp37n0Xkb8+fPD499++23w/rjjz8e1lPrytep5T67mZ1nZr81s41m9oGZ/aS4fIqZvWJmm4vP8SoHABo1kqfxJyX9jbtfLOnPJf3YzC6WdJekV919tqRXi+8BdKlk2N29393fK74+IOlDSTMkLZF0+rnxSklL6xokgOriFz1fYWazJH1H0tuSprl7f1HaIWlayTHLJC1rfYgA2mHE78abWZ+kNZJud/f9Q2s++C7fsG++ufsKd5/n7uWrIgKo3YjCbmZnaDDov3D3Z4qLd5rZ9KI+XVK8xCqARiVbbzY4f3OlpL3ufvuQyx+Q9Jm732dmd0ma4u5/m7iu8MbOPffccCw7duwI65Fo+15JmjlzZli/9957S2szZswIj01tuZzaujjaLlqS7r///tLaxo0bw2NTU1xT2yKnpKYtR1JtwxMnToT1aOpx6u9+woQJYb3qlOk6lbXeRvKa/S8k/ZWk9WZ2uqn6U0n3Sfqlmf1I0lZJcaMaQKOSYXf3/5JU9l/k99o7HAB14XRZIBOEHcgEYQcyQdiBTBB2IBMdneLa09PjUV83NVU06n3u37+/tCZJfX19YT3VN416vlX6vVK655s6RyDqZad6+MeOHQvrVUW/79Ryzampwam/lyq/s5SqY6sTS0kDmSPsQCYIO5AJwg5kgrADmSDsQCYIO5CJrlpKOjWHOOqlp5YVrjove/r06aW1/v7+0tpI9Pb2hvXUls11XndqGetDhw6F9SpzylNGjYofq6rMKW/6/IQq6LMDmSPsQCYIO5AJwg5kgrADmSDsQCYIO5CJruqzA6iOPjuQOcIOZIKwA5kg7EAmCDuQCcIOZIKwA5lIht3MzjOz35rZRjP7wMx+Uly+3My2m9m64uOq+ocLoFXJk2rMbLqk6e7+npmdJeldSUs1uB/7QXd/cMQ3xkk1QO3KTqoZyf7s/ZL6i68PmNmHkma0d3gA6vYHvWY3s1mSviPp7eKi28zsfTN7zMwmlxyzzMzWmtnaSiMFUMmIz403sz5J/ynpXnd/xsymSdojySX9gwaf6t+cuA6exgM1K3saP6Kwm9kZkl6U9Gt3/6dh6rMkvejuf5K4HsIO1KzliTA2uDzoo5I+HBr04o27034gaUPVQQKoz0jejV8g6XVJ6yWdXpv3p5JukDRXg0/jP5V0S/FmXnRdPLIDNav0NL5dCDtQP+azA5kj7EAmCDuQCcIOZIKwA5kg7EAmCDuQCcIOZIKwA5kg7EAmCDuQCcIOZIKwA5kg7EAmkgtOttkeSVuHfD+1uKwbdevYunVcEmNrVTvHdkFZoaPz2b9242Zr3X1eYwMIdOvYunVcEmNrVafGxtN4IBOEHchE02Ff0fDtR7p1bN06LomxtaojY2v0NTuAzmn6kR1AhxB2IBONhN3MrjSz/zWzj8zsribGUMbMPjWz9cU21I3uT1fsobfLzDYMuWyKmb1iZpuLz8PusdfQ2LpiG+9gm/FG77umtz/v+Gt2M+uRtEnSX0raJukdSTe4+8aODqSEmX0qaZ67N34ChpktlHRQ0qrTW2uZ2T9K2uvu9xX/UU5297/rkrEt1x+4jXdNYyvbZvyv1eB9187tz1vRxCP7fEkfufsWdz8u6WlJSxoYR9dz99ck7f3KxUskrSy+XqnBP5aOKxlbV3D3fnd/r/j6gKTT24w3et8F4+qIJsI+Q9Lvhny/Td2137tL+o2ZvWtmy5oezDCmDdlma4ekaU0OZhjJbbw76SvbjHfNfdfK9udV8Qbd1y1w9z+T9H1JPy6ernYlH3wN1k29059J+rYG9wDsl/RQk4MpthlfI+l2d98/tNbkfTfMuDpyvzUR9u2Szhvy/czisq7g7tuLz7skPavBlx3dZOfpHXSLz7saHs+X3H2nuw+4+ylJP1eD912xzfgaSb9w92eKixu/74YbV6futybC/o6k2Wb2LTM7U9IPJT3fwDi+xszGF2+cyMzGS7pC3bcV9fOSbiq+vknSrxocy+/plm28y7YZV8P3XePbn7t7xz8kXaXBd+Q/lvT3TYyhZFwXSvrv4uODpscm6SkNPq07ocH3Nn4k6WxJr0raLOk/JE3porH9mwa39n5fg8Ga3tDYFmjwKfr7ktYVH1c1fd8F4+rI/cbpskAmeIMOyARhBzJB2IFMEHYgE4QdyARhBzJB2IFM/B+tIjCppYWKvAAAAABJRU5ErkJggg==\n", - "text/plain": [ - "<Figure size 432x288 with 1 Axes>" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" } ], "source": [ @@ -1655,7 +1643,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 45, "metadata": {}, "outputs": [], "source": [ @@ -1675,7 +1663,7 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": 48, "metadata": {}, "outputs": [], "source": [ @@ -1697,7 +1685,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 49, "metadata": {}, "outputs": [ { @@ -1706,13 +1694,13 @@ "<BarContainer object of 10 artists>" ] }, - "execution_count": 62, + "execution_count": 49, "metadata": {}, "output_type": "execute_result" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD4CAYAAAD8Zh1EAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAMoUlEQVR4nO3cf6jd913H8edryercD1sxV9AkLgEzNQyl5dJVC1pshbSV5A9FGqjoKMs/y6yuKJlKHfWfzcn8gXUa5xzO2azWIcFGI7iKILbkdp11SYxcstrcrNK7rtYfQ7Pg2z/uiZzd3ptzkp57T/u+zwcEzvf7/XC+75ObPDn3e36kqpAkvfa9btoDSJImw6BLUhMGXZKaMOiS1IRBl6QmNk/rxFu2bKkdO3ZM6/SS9Jr05JNPfqmqZlY6NrWg79ixg7m5uWmdXpJek5L8y2rHvOQiSU0YdElqwqBLUhMjg57kY0meT/L5VY4nyW8mmU/ydJIbJj+mJGmUcZ6hfxzYc5njtwO7Bn8OAB955WNJkq7UyKBX1d8CX77Mkn3AH9aSx4HrknzLpAaUJI1nEtfQtwLnhrYXBvteJsmBJHNJ5hYXFydwaknSJev6omhVHa6q2aqanZlZ8X3xkqSrNImgnwe2D21vG+yTJK2jSXxS9ChwMMkR4B3AS1X13ATuV8vsOPTomp/jmQ/cuebnkLQ2RgY9yUPALcCWJAvALwGvB6iq3wGOAXcA88BXgHeu1bCSpNWNDHpV7R9xvIB3T2wiSdJV8ZOiktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1MRYQU+yJ8mZJPNJDq1w/NuSPJbkqSRPJ7lj8qNKki5nZNCTbAIeBG4HdgP7k+xetuwXgYer6nrgLuC3Jz2oJOnyxnmGfiMwX1Vnq+oCcATYt2xNAd8wuH0t8MXJjShJGsc4Qd8KnBvaXhjsG/Z+4O4kC8Ax4D0r3VGSA0nmkswtLi5exbiSpNVM6kXR/cDHq2obcAfwiSQvu++qOlxVs1U1OzMzM6FTS5JgvKCfB7YPbW8b7Bt2D/AwQFX9PfAGYMskBpQkjWecoJ8AdiXZmeQall70PLpszbPArQBJvouloHtNRZLW0cigV9VF4CBwHDjN0rtZTiZ5IMnewbL7gHcl+QfgIeAnq6rWamhJ0sttHmdRVR1j6cXO4X33D90+Bdw82dEkSVfCT4pKUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSE2MFPcmeJGeSzCc5tMqaH0tyKsnJJH882TElSaNsHrUgySbgQeCHgAXgRJKjVXVqaM0u4H3AzVX1YpJvXquBJUkrG+cZ+o3AfFWdraoLwBFg37I17wIerKoXAarq+cmOKUkaZZygbwXODW0vDPYNexvwtiR/l+TxJHtWuqMkB5LMJZlbXFy8uoklSSua1Iuim4FdwC3AfuD3kly3fFFVHa6q2aqanZmZmdCpJUkwXtDPA9uHtrcN9g1bAI5W1Ver6gvAP7MUeEnSOhkn6CeAXUl2JrkGuAs4umzNn7H07JwkW1i6BHN2gnNKkkYYGfSquggcBI4Dp4GHq+pkkgeS7B0sOw68kOQU8Bjws1X1wloNLUl6uZFvWwSoqmPAsWX77h+6XcB7B38kSVPgJ0UlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpibGCnmRPkjNJ5pMcusy6H0lSSWYnN6IkaRwjg55kE/AgcDuwG9ifZPcK694C3As8MekhJUmjjfMM/UZgvqrOVtUF4Aiwb4V1vwx8EPjvCc4nSRrTOEHfCpwb2l4Y7Pt/SW4AtlfVo5e7oyQHkswlmVtcXLziYSVJq3vFL4omeR3wYeC+UWur6nBVzVbV7MzMzCs9tSRpyDhBPw9sH9reNth3yVuAtwN/k+QZ4CbgqC+MStL6GifoJ4BdSXYmuQa4Czh66WBVvVRVW6pqR1XtAB4H9lbV3JpMLEla0cigV9VF4CBwHDgNPFxVJ5M8kGTvWg8oSRrP5nEWVdUx4NiyffevsvaWVz6WJOlK+UlRSWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJamKsoCfZk+RMkvkkh1Y4/t4kp5I8neSvk7x18qNKki5nZNCTbAIeBG4HdgP7k+xetuwpYLaqvht4BPiVSQ8qSbq8cZ6h3wjMV9XZqroAHAH2DS+oqseq6iuDzceBbZMdU5I0yjhB3wqcG9peGOxbzT3AX6x0IMmBJHNJ5hYXF8efUpI00kRfFE1yNzALfGil41V1uKpmq2p2ZmZmkqeWpA1v8xhrzgPbh7a3DfZ9jSS3Ab8A/EBV/c9kxpMkjWucZ+gngF1Jdia5BrgLODq8IMn1wO8Ce6vq+cmPKUkaZWTQq+oicBA4DpwGHq6qk0keSLJ3sOxDwJuBP0nyuSRHV7k7SdIaGeeSC1V1DDi2bN/9Q7dvm/BckqQr5CdFJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqYmxgp5kT5IzSeaTHFrh+Ncl+dTg+BNJdkx6UEnS5Y0MepJNwIPA7cBuYH+S3cuW3QO8WFXfDvwa8MFJDypJurzNY6y5EZivqrMASY4A+4BTQ2v2Ae8f3H4E+K0kqaqa4Kyaoh2HHl3zczzzgTvX/ByvNWv99+7feS/jBH0rcG5oewF4x2prqupikpeAbwK+NLwoyQHgwGDzP5OcuZqhr9KW5fNsEFf0uDPF360mfG5/3mOY5s97wjbSz/utqx0YJ+gTU1WHgcPrec5LksxV1ew0zj1NPu6Nxce9sY3zouh5YPvQ9rbBvhXXJNkMXAu8MIkBJUnjGSfoJ4BdSXYmuQa4Czi6bM1R4CcGt38U+IzXzyVpfY285DK4Jn4QOA5sAj5WVSeTPADMVdVR4PeBTySZB77MUvRfbaZyqedVwMe9sfi4N7D4RFqSevCTopLUhEGXpCbaB33U1xZ0lGR7kseSnEpyMsm9055pPSXZlOSpJH8+7VnWU5LrkjyS5J+SnE7yvdOeaT0k+ZnBv/PPJ3koyRumPdO0tA76mF9b0NFF4L6q2g3cBLx7gzzuS+4FTk97iCn4DeAvq+o7ge9hA/wdJNkK/BQwW1VvZ+mNG6/GN2Wsi9ZBZ+hrC6rqAnDpawtaq6rnquqzg9v/wdJ/7K3TnWp9JNkG3Al8dNqzrKck1wLfz9I7zqiqC1X1b9Odat1sBr5+8BmYNwJfnPI8U9M96Ct9bcGGCNslg2++vB54YrqTrJtfB34O+N9pD7LOdgKLwB8MLjd9NMmbpj3UWquq88CvAs8CzwEvVdVfTXeq6eke9A0tyZuBPwV+uqr+fdrzrLUkPww8X1VPTnuWKdgM3AB8pKquB/4LaP+aUZJvZOm37p3AtwJvSnL3dKeanu5BH+drC1pK8nqWYv7Jqvr0tOdZJzcDe5M8w9LltR9M8kfTHWndLAALVXXpN7FHWAp8d7cBX6iqxar6KvBp4PumPNPUdA/6OF9b0E6SsHQt9XRVfXja86yXqnpfVW2rqh0s/aw/U1Ub4tlaVf0rcC7Jdwx23crXfsV1V88CNyV54+Df/a1sgBeDV7Ou37a43lb72oIpj7UebgZ+HPjHJJ8b7Pv5qjo2xZm09t4DfHLw5OUs8M4pz7PmquqJJI8An2Xp3V1PsYG/BsCP/ktSE90vuUjShmHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUxP8BwjHuoBhu1y0AAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD4CAYAAAD8Zh1EAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAMoUlEQVR4nO3cf6jd913H8edryercD1sxV9AkLgEzNQyl5dJVC1pshbSV5A9FGqjoKMs/y6yuKJlKHfWfzcn8gXUa5xzO2azWIcFGI7iKILbkdp11SYxcstrcrNK7rtYfQ7Pg2z/uiZzd3ptzkp57T/u+zwcEzvf7/XC+75ObPDn3e36kqpAkvfa9btoDSJImw6BLUhMGXZKaMOiS1IRBl6QmNk/rxFu2bKkdO3ZM6/SS9Jr05JNPfqmqZlY6NrWg79ixg7m5uWmdXpJek5L8y2rHvOQiSU0YdElqwqBLUhMjg57kY0meT/L5VY4nyW8mmU/ydJIbJj+mJGmUcZ6hfxzYc5njtwO7Bn8OAB955WNJkq7UyKBX1d8CX77Mkn3AH9aSx4HrknzLpAaUJI1nEtfQtwLnhrYXBvteJsmBJHNJ5hYXFydwaknSJev6omhVHa6q2aqanZlZ8X3xkqSrNImgnwe2D21vG+yTJK2jSXxS9ChwMMkR4B3AS1X13ATuV8vsOPTomp/jmQ/cuebnkLQ2RgY9yUPALcCWJAvALwGvB6iq3wGOAXcA88BXgHeu1bCSpNWNDHpV7R9xvIB3T2wiSdJV8ZOiktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1MRYQU+yJ8mZJPNJDq1w/NuSPJbkqSRPJ7lj8qNKki5nZNCTbAIeBG4HdgP7k+xetuwXgYer6nrgLuC3Jz2oJOnyxnmGfiMwX1Vnq+oCcATYt2xNAd8wuH0t8MXJjShJGsc4Qd8KnBvaXhjsG/Z+4O4kC8Ax4D0r3VGSA0nmkswtLi5exbiSpNVM6kXR/cDHq2obcAfwiSQvu++qOlxVs1U1OzMzM6FTS5JgvKCfB7YPbW8b7Bt2D/AwQFX9PfAGYMskBpQkjWecoJ8AdiXZmeQall70PLpszbPArQBJvouloHtNRZLW0cigV9VF4CBwHDjN0rtZTiZ5IMnewbL7gHcl+QfgIeAnq6rWamhJ0sttHmdRVR1j6cXO4X33D90+Bdw82dEkSVfCT4pKUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSE2MFPcmeJGeSzCc5tMqaH0tyKsnJJH882TElSaNsHrUgySbgQeCHgAXgRJKjVXVqaM0u4H3AzVX1YpJvXquBJUkrG+cZ+o3AfFWdraoLwBFg37I17wIerKoXAarq+cmOKUkaZZygbwXODW0vDPYNexvwtiR/l+TxJHtWuqMkB5LMJZlbXFy8uoklSSua1Iuim4FdwC3AfuD3kly3fFFVHa6q2aqanZmZmdCpJUkwXtDPA9uHtrcN9g1bAI5W1Ver6gvAP7MUeEnSOhkn6CeAXUl2JrkGuAs4umzNn7H07JwkW1i6BHN2gnNKkkYYGfSquggcBI4Dp4GHq+pkkgeS7B0sOw68kOQU8Bjws1X1wloNLUl6uZFvWwSoqmPAsWX77h+6XcB7B38kSVPgJ0UlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpibGCnmRPkjNJ5pMcusy6H0lSSWYnN6IkaRwjg55kE/AgcDuwG9ifZPcK694C3As8MekhJUmjjfMM/UZgvqrOVtUF4Aiwb4V1vwx8EPjvCc4nSRrTOEHfCpwb2l4Y7Pt/SW4AtlfVo5e7oyQHkswlmVtcXLziYSVJq3vFL4omeR3wYeC+UWur6nBVzVbV7MzMzCs9tSRpyDhBPw9sH9reNth3yVuAtwN/k+QZ4CbgqC+MStL6GifoJ4BdSXYmuQa4Czh66WBVvVRVW6pqR1XtAB4H9lbV3JpMLEla0cigV9VF4CBwHDgNPFxVJ5M8kGTvWg8oSRrP5nEWVdUx4NiyffevsvaWVz6WJOlK+UlRSWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJamKsoCfZk+RMkvkkh1Y4/t4kp5I8neSvk7x18qNKki5nZNCTbAIeBG4HdgP7k+xetuwpYLaqvht4BPiVSQ8qSbq8cZ6h3wjMV9XZqroAHAH2DS+oqseq6iuDzceBbZMdU5I0yjhB3wqcG9peGOxbzT3AX6x0IMmBJHNJ5hYXF8efUpI00kRfFE1yNzALfGil41V1uKpmq2p2ZmZmkqeWpA1v8xhrzgPbh7a3DfZ9jSS3Ab8A/EBV/c9kxpMkjWucZ+gngF1Jdia5BrgLODq8IMn1wO8Ce6vq+cmPKUkaZWTQq+oicBA4DpwGHq6qk0keSLJ3sOxDwJuBP0nyuSRHV7k7SdIaGeeSC1V1DDi2bN/9Q7dvm/BckqQr5CdFJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqQmDLklNGHRJasKgS1ITBl2SmjDoktSEQZekJgy6JDVh0CWpCYMuSU0YdElqwqBLUhMGXZKaMOiS1IRBl6QmDLokNWHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUhEGXpCYMuiQ1YdAlqYmxgp5kT5IzSeaTHFrh+Ncl+dTg+BNJdkx6UEnS5Y0MepJNwIPA7cBuYH+S3cuW3QO8WFXfDvwa8MFJDypJurzNY6y5EZivqrMASY4A+4BTQ2v2Ae8f3H4E+K0kqaqa4Kyaoh2HHl3zczzzgTvX/ByvNWv99+7feS/jBH0rcG5oewF4x2prqupikpeAbwK+NLwoyQHgwGDzP5OcuZqhr9KW5fNsEFf0uDPF360mfG5/3mOY5s97wjbSz/utqx0YJ+gTU1WHgcPrec5LksxV1ew0zj1NPu6Nxce9sY3zouh5YPvQ9rbBvhXXJNkMXAu8MIkBJUnjGSfoJ4BdSXYmuQa4Czi6bM1R4CcGt38U+IzXzyVpfY285DK4Jn4QOA5sAj5WVSeTPADMVdVR4PeBTySZB77MUvRfbaZyqedVwMe9sfi4N7D4RFqSevCTopLUhEGXpCbaB33U1xZ0lGR7kseSnEpyMsm9055pPSXZlOSpJH8+7VnWU5LrkjyS5J+SnE7yvdOeaT0k+ZnBv/PPJ3koyRumPdO0tA76mF9b0NFF4L6q2g3cBLx7gzzuS+4FTk97iCn4DeAvq+o7ge9hA/wdJNkK/BQwW1VvZ+mNG6/GN2Wsi9ZBZ+hrC6rqAnDpawtaq6rnquqzg9v/wdJ/7K3TnWp9JNkG3Al8dNqzrKck1wLfz9I7zqiqC1X1b9Odat1sBr5+8BmYNwJfnPI8U9M96Ct9bcGGCNslg2++vB54YrqTrJtfB34O+N9pD7LOdgKLwB8MLjd9NMmbpj3UWquq88CvAs8CzwEvVdVfTXeq6eke9A0tyZuBPwV+uqr+fdrzrLUkPww8X1VPTnuWKdgM3AB8pKquB/4LaP+aUZJvZOm37p3AtwJvSnL3dKeanu5BH+drC1pK8nqWYv7Jqvr0tOdZJzcDe5M8w9LltR9M8kfTHWndLAALVXXpN7FHWAp8d7cBX6iqxar6KvBp4PumPNPUdA/6OF9b0E6SsHQt9XRVfXja86yXqnpfVW2rqh0s/aw/U1Ub4tlaVf0rcC7Jdwx23crXfsV1V88CNyV54+Df/a1sgBeDV7Ou37a43lb72oIpj7UebgZ+HPjHJJ8b7Pv5qjo2xZm09t4DfHLw5OUs8M4pz7PmquqJJI8An2Xp3V1PsYG/BsCP/ktSE90vuUjShmHQJakJgy5JTRh0SWrCoEtSEwZdkpow6JLUxP8BwjHuoBhu1y0AAAAASUVORK5CYII=\n", "text/plain": [ "<Figure size 432x288 with 1 Axes>" ] diff --git a/src/finn/custom_op/fpgadataflow/templates.py b/src/finn/custom_op/fpgadataflow/templates.py index e2f43f4edf206b32974adaf02fd479f0af522702..dccbb2533940305f90082f1d902e4713b2febfb6 100644 --- a/src/finn/custom_op/fpgadataflow/templates.py +++ b/src/finn/custom_op/fpgadataflow/templates.py @@ -1,5 +1,6 @@ # template for single node execution docompute_template = """ +#define AP_INT_MAX_W 4096 #include "cnpy.h" #include "npy2apintstream.hpp" #include <vector> @@ -30,6 +31,7 @@ $SAVEASCNPY$ # cpp file ipgen_template = """ +#define AP_INT_MAX_W 4096 #include "bnn-library.h" // includes for network parameters $GLOBALS$ diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 153cee77c4d572723ebc08a9a92a3fc15a4d77b2..3572b683b3c3375fda60feb6858dd512f5726c99 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -2,10 +2,11 @@ from onnx import helper from finn.core.datatype import DataType from finn.transformation import Transformation +from finn.custom_op.registry import getCustomOp class InferBinaryStreamingFCLayer(Transformation): - """Convert pairs of binary XnorPopcountMatMul layers to + """Convert XnorPopcountMatMul layers to StreamingFCLayer_Batch layers. Any immediately following MultiThreshold layers will also be absorbed into the MVTU.""" @@ -18,9 +19,13 @@ class InferBinaryStreamingFCLayer(Transformation): if n.op_type == "XnorPopcountMatMul": mm_input = n.input[0] mm_weight = n.input[1] - assert model.get_tensor_datatype(mm_input) == DataType.BINARY, """First + assert ( + model.get_tensor_datatype(mm_input) == DataType.BINARY + ), """First input for xnorpopcount is not set to FINN DataType BINARY.""" - assert model.get_tensor_datatype(mm_weight) == DataType.BINARY, """Second + assert ( + model.get_tensor_datatype(mm_weight) == DataType.BINARY + ), """Second input (weights) for xnorpopcount is not set to FINN DataType BINARY.""" idt = DataType.BINARY wdt = DataType.BINARY @@ -38,7 +43,9 @@ class InferBinaryStreamingFCLayer(Transformation): assert mh % pe == 0, "Requirement MH divisable by PE is violated." assert mw % simd == 0, "Requirement MW divisable by SIMD is violated." wmem = mw * mh // (pe * simd) - assert mw * mh == wmem * pe * simd, """Requirement (MW * MH) divisiable by + assert ( + mw * mh == wmem * pe * simd + ), """Requirement (MW * MH) divisiable by (WMEM * PE * SIMD) is violated.""" # see if we have any following thresholds consumer = model.find_consumer(mm_output) @@ -48,7 +55,9 @@ class InferBinaryStreamingFCLayer(Transformation): mt_output = consumer.output[0] mt_thres = consumer.input[1] T = model.get_initializer(mt_thres) - assert T.shape[0] == 1 or T.shape[0] == mh, """First dimension of + assert ( + T.shape[0] == 1 or T.shape[0] == mh + ), """First dimension of thresholds neither 1 nor MH.""" odt = model.get_tensor_datatype(mt_output) if odt.bitwidth() == 1: @@ -116,3 +125,126 @@ class InferBinaryStreamingFCLayer(Transformation): graph_modified = True return (model, graph_modified) + + +class InferQuantizedStreamingFCLayer(Transformation): + """Convert MatMul layers with quantized inputs and weights to + StreamingFCLayer_Batch layers. Any immediately following MultiThreshold + layers will also be absorbed into the MVTU.""" + + def apply(self, model): + graph = model.graph + node_ind = 0 + graph_modified = False + for n in graph.node: + node_ind += 1 + if n.op_type == "MatMul": + mm_input = n.input[0] + mm_weight = n.input[1] + idt = model.get_tensor_datatype(mm_input) + wdt = model.get_tensor_datatype(mm_weight) + if idt.is_integer() and wdt.is_integer(): + mm_output = n.output[0] + W = model.get_initializer(mm_weight) + # extract weight shape, note that ONNX and finn-hlslib + # make different assumptions about dim order here + # ONNX assumes W has (in, out) shape + # finn-hlslib assumes W has (out, in) shape + mh = int(W.shape[1]) + mw = int(W.shape[0]) + # create node with no parallelization first + pe = 1 + simd = 1 + assert mh % pe == 0, "Requirement MH divisable by PE is violated." + assert ( + mw % simd == 0 + ), "Requirement MW divisable by SIMD is violated." + wmem = mw * mh // (pe * simd) + assert ( + mw * mh == wmem * pe * simd + ), """Requirement (MW * MH) divisiable by + (WMEM * PE * SIMD) is violated.""" + # see if we have any following thresholds + consumer = model.find_consumer(mm_output) + if consumer is not None and consumer.op_type == "MultiThreshold": + # TODO ensure integer thresholds? + # create MVTU (i.e. including activation) + mt_output = consumer.output[0] + mt_thres = consumer.input[1] + T = model.get_initializer(mt_thres) + assert ( + T.shape[0] == 1 or T.shape[0] == mh + ), """First dimension of + thresholds neither 1 nor MH.""" + odt = model.get_tensor_datatype(mt_output) + scale = getCustomOp(consumer).get_nodeattr("out_scale") + assert ( + scale == 1.0 + ), "out_scale must be equal to 1.0 for HLS conversion." + actval = getCustomOp(consumer).get_nodeattr("out_bias") + assert ( + int(actval) == actval + ), "out_bias must be integer for HLS conversion." + actval = int(actval) + assert (not odt.signed()) or ( + actval < 0 + ), "Signed output requres actval < 0" + in_shape = [1, mw] + out_shape = [1, mh] + model.set_tensor_shape(mm_input, in_shape) + model.set_tensor_shape(mt_output, out_shape) + # create and insert new StreamingFCLayer node + new_node = helper.make_node( + "StreamingFCLayer_Batch", + [mm_input, mm_weight, mt_thres], + [mt_output], + domain="finn", + backend="fpgadataflow", + resType="ap_resource_lut()", + MW=mw, + MH=mh, + SIMD=simd, + PE=pe, + inputDataType=idt.name, + weightDataType=wdt.name, + outputDataType=odt.name, + ActVal=actval, + binaryXnorMode=0, + noActivation=0, + ) + graph.node.insert(node_ind, new_node) + # remove old nodes + graph.node.remove(n) + graph.node.remove(consumer) + graph_modified = True + else: + # no activation, matmul only + in_shape = [1, mw] + out_shape = [1, mh] + odt = model.get_tensor_datatype(mm_output) + model.set_tensor_shape(mm_input, in_shape) + model.set_tensor_shape(mm_output, out_shape) + # create and insert new StreamingFCLayer node + new_node = helper.make_node( + "StreamingFCLayer_Batch", + [mm_input, mm_weight], + [mm_output], + domain="finn", + backend="fpgadataflow", + resType="ap_resource_lut()", + MW=mw, + MH=mh, + SIMD=simd, + PE=pe, + inputDataType=idt.name, + weightDataType=wdt.name, + outputDataType=odt.name, + ActVal=0, + binaryXnorMode=0, + noActivation=1, + ) + graph.node.insert(node_ind, new_node) + # remove old node + graph.node.remove(n) + graph_modified = True + return (model, graph_modified) diff --git a/src/finn/transformation/streamline/__init__.py b/src/finn/transformation/streamline/__init__.py index 09065e740debb5c458cb1de263c5648df3046c41..b2a492fc62ed091aea72b76a66232ca62473e047 100644 --- a/src/finn/transformation/streamline/__init__.py +++ b/src/finn/transformation/streamline/__init__.py @@ -37,6 +37,7 @@ class Streamline(Transformation): ConvertSubToAdd(), BatchNormToAffine(), ConvertSignToThres(), + MoveAddPastMul(), MoveScalarAddPastMatMul(), MoveScalarMulPastMatMul(), MoveAddPastMul(), diff --git a/tests/end2end/test_end2end_tfc.py b/tests/end2end/test_end2end_tfc_w1a1.py similarity index 73% rename from tests/end2end/test_end2end_tfc.py rename to tests/end2end/test_end2end_tfc_w1a1.py index 63e4f13c9fb3735749eec9bf9de42c817344e229..41324f19e394af697f863908e074d77d570dafa5 100644 --- a/tests/end2end/test_end2end_tfc.py +++ b/tests/end2end/test_end2end_tfc_w1a1.py @@ -48,53 +48,53 @@ test_fpga_part = pynq_part_map[test_pynq_board] target_clk_ns = 5 -def test_end2end_tfc_export(): +def test_end2end_tfc_w1a1_export(): import brevitas.onnx as bo tfc = get_test_model_trained("TFC", 1, 1) bo.export_finn_onnx( - tfc, (1, 1, 28, 28), build_dir + "/end2end_tfc_w1_a1_export.onnx" + tfc, (1, 1, 28, 28), build_dir + "/end2end_tfc_w1a1_export.onnx" ) -def test_end2end_tfc_import_and_tidy(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_export.onnx") +def test_end2end_tfc_w1a1_import_and_tidy(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_export.onnx") model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) model = model.transform(InferDataTypes()) - model.save(build_dir + "/end2end_tfc_w1_a1_tidy.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_tidy.onnx") -def test_end2end_tfc_streamline(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_tidy.onnx") +def test_end2end_tfc_w1a1_streamline(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_tidy.onnx") model = model.transform(Streamline()) - model.save(build_dir + "/end2end_tfc_w1_a1_streamlined.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_streamlined.onnx") -def test_end2end_tfc_convert_to_hls_layers(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_streamlined.onnx") +def test_end2end_tfc_w1a1_convert_to_hls_layers(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_streamlined.onnx") model = model.transform(ConvertBipolarMatMulToXnorPopcount()) model = model.transform(absorb.AbsorbAddIntoMultiThreshold()) model = model.transform(absorb.AbsorbMulIntoMultiThreshold()) model = model.transform(RoundAndClipThresholds()) model = model.transform(to_hls.InferBinaryStreamingFCLayer()) - model.save(build_dir + "/end2end_tfc_w1_a1_hls_layers.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_hls_layers.onnx") -def test_end2end_tfc_create_dataflow_partition(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_hls_layers.onnx") +def test_end2end_tfc_w1a1_create_dataflow_partition(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_hls_layers.onnx") parent_model = model.transform(CreateDataflowPartition()) - parent_model.save(build_dir + "/end2end_tfc_w1_a1_dataflow_parent.onnx") + parent_model.save(build_dir + "/end2end_tfc_w1a1_dataflow_parent.onnx") sdp_node = getCustomOp(parent_model.graph.node[2]) dataflow_model_filename = sdp_node.get_nodeattr("model") dataflow_model = ModelWrapper(dataflow_model_filename) - dataflow_model.save(build_dir + "/end2end_tfc_w1_a1_dataflow_model.onnx") + dataflow_model.save(build_dir + "/end2end_tfc_w1a1_dataflow_model.onnx") -def test_end2end_tfc_fold_and_tlastmarker(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_dataflow_model.onnx") +def test_end2end_tfc_w1a1_fold_and_tlastmarker(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_dataflow_model.onnx") fc0 = model.graph.node[0] fc1 = model.graph.node[1] fc2 = model.graph.node[2] @@ -117,26 +117,26 @@ def test_end2end_tfc_fold_and_tlastmarker(): fc3w.set_nodeattr("PE", 10) fc3w.set_nodeattr("outFIFODepth", 50) model = model.transform(InsertTLastMarker()) - model.save(build_dir + "/end2end_tfc_w1_a1_folded.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_folded.onnx") -def test_end2end_tfc_gen_hls_ip(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_folded.onnx") +def test_end2end_tfc_w1a1_gen_hls_ip(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_folded.onnx") model = model.transform(GiveUniqueNodeNames()) model = model.transform(CodeGen_ipgen(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynth_IPGen()) - model.save(build_dir + "/end2end_tfc_w1_a1_ipgen.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_ipgen.onnx") -def test_end2end_tfc_ip_stitch(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_ipgen.onnx") +def test_end2end_tfc_w1a1_ip_stitch(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_ipgen.onnx") model = model.transform(ReplaceVerilogRelPaths()) model = model.transform(CodeGen_ipstitch(test_fpga_part)) - model.save(build_dir + "/end2end_tfc_w1_a1_ipstitch.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_ipstitch.onnx") -def test_end2end_tfc_verify_dataflow_part(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_ipstitch.onnx") +def test_end2end_tfc_w1a1_verify_dataflow_part(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_ipstitch.onnx") x = np.zeros((1, 784), dtype=np.float32) inp_name = model.graph.input[0].name out_name = model.graph.output[0].name @@ -145,7 +145,7 @@ def test_end2end_tfc_verify_dataflow_part(): model = model.transform(CodeGen_npysim()) model = model.transform(Compile()) model = model.transform(SetExecMode("npysim")) - model.save(build_dir + "/end2end_tfc_w1_a1_ipstitch_npysim.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_ipstitch_npysim.onnx") ret_npysim = execute_onnx(model, inp_dict, True) res_npysim = ret_npysim[out_name] # node-by-node rtlsim @@ -154,22 +154,22 @@ def test_end2end_tfc_verify_dataflow_part(): getCustomOp(model.graph.node[1]).set_nodeattr("rtlsim_trace", "default") getCustomOp(model.graph.node[2]).set_nodeattr("rtlsim_trace", "default") getCustomOp(model.graph.node[3]).set_nodeattr("rtlsim_trace", "default") - model.save(build_dir + "/end2end_tfc_w1_a1_ipstitch_nodebynode_rtlsim.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_ipstitch_nodebynode_rtlsim.onnx") ret_rtlsim_nodebynode = execute_onnx(model, inp_dict, True) res_rtlsim_nodebynode = ret_rtlsim_nodebynode[out_name] # whole-network (ip-stitched) rtlsim model.set_metadata_prop("exec_mode", "rtlsim") model.set_metadata_prop("rtlsim_trace", "whole_trace.vcd") - model.save(build_dir + "/end2end_tfc_w1_a1_ipstitch_whole_rtlsim.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_ipstitch_whole_rtlsim.onnx") ret_rtlsim_whole = execute_onnx(model, inp_dict, True) res_rtlsim_whole = ret_rtlsim_whole[out_name] assert np.isclose(res_npysim, res_rtlsim_nodebynode).all() assert np.isclose(res_npysim, res_rtlsim_whole).all() -def test_end2end_tfc_verify_all(): +def test_end2end_tfc_w1a1_verify_all(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_streamlined.onnx") + golden = ModelWrapper(build_dir + "/end2end_tfc_w1a1_streamlined.onnx") iname = golden.graph.input[0].name oname = golden.graph.output[0].name raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") @@ -180,25 +180,23 @@ def test_end2end_tfc_verify_all(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_dataflow_parent.onnx") + parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_dataflow_parent.onnx") iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name # produce results with npysim sdp_node = getCustomOp(parent_model.graph.node[2]) - sdp_node.set_nodeattr( - "model", build_dir + "/end2end_tfc_w1_a1_ipstitch_npysim.onnx" - ) + sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w1a1_ipstitch_npysim.onnx") ret_npysim = execute_onnx(parent_model, {iname: x}, True) y_npysim = ret_npysim[oname] # produce results with node-by-node rtlsim sdp_node.set_nodeattr( - "model", build_dir + "/end2end_tfc_w1_a1_ipstitch_nodebynode_rtlsim.onnx" + "model", build_dir + "/end2end_tfc_w1a1_ipstitch_nodebynode_rtlsim.onnx" ) ret_nodebynode_rtlsim = execute_onnx(parent_model, {iname: x}, True) y_nodebynode_rtlsim = ret_nodebynode_rtlsim[oname] # produce results with whole-network (stitched ip) rtlsim sdp_node.set_nodeattr( - "model", build_dir + "/end2end_tfc_w1_a1_ipstitch_whole_rtlsim.onnx" + "model", build_dir + "/end2end_tfc_w1a1_ipstitch_whole_rtlsim.onnx" ) ret_whole_rtlsim = execute_onnx(parent_model, {iname: x}, True) y_whole_rtlsim = ret_whole_rtlsim[oname] @@ -207,26 +205,26 @@ def test_end2end_tfc_verify_all(): assert np.isclose(y_golden, y_whole_rtlsim).all() -def test_end2end_tfc_make_pynq_proj(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_ipstitch.onnx") +def test_end2end_tfc_w1a1_make_pynq_proj(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_ipstitch.onnx") model = model.transform(MakePYNQProject(test_pynq_board)) - model.save(build_dir + "/end2end_tfc_w1_a1_pynq_project.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_pynq_project.onnx") def test_end2end_synth_pynq_project(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_pynq_project.onnx") + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_pynq_project.onnx") model = model.transform(SynthPYNQProject()) - model.save(build_dir + "/end2end_tfc_w1_a1_synth.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_synth.onnx") -def test_end2end_tfc_make_driver(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_synth.onnx") +def test_end2end_tfc_w1a1_make_driver(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_synth.onnx") model = model.transform(MakePYNQDriver()) - model.save(build_dir + "/end2end_tfc_w1_a1_pynq_driver.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_pynq_driver.onnx") -def test_end2end_tfc_deploy_on_pynq(): - model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_pynq_driver.onnx") +def test_end2end_tfc_w1a1_deploy_on_pynq(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_pynq_driver.onnx") try: ip = os.environ["PYNQ_IP"] # no fault for this one; skip if not defined if ip == "": @@ -236,14 +234,14 @@ def test_end2end_tfc_deploy_on_pynq(): target_dir = os.getenv("PYNQ_TARGET_DIR", "/home/xilinx/finn") model = model.transform(DeployToPYNQ(ip, username, password, target_dir)) # save the model to be able to link it to the parent - model.save(build_dir + "/end2end_tfc_w1_a1_pynq_deploy.onnx") + model.save(build_dir + "/end2end_tfc_w1a1_pynq_deploy.onnx") except KeyError: pytest.skip("PYNQ board IP address not specified") -def test_end2end_tfc_run_on_pynq(): +def test_end2end_tfc_w1a1_run_on_pynq(): # use the streamlined model as the "golden" model for right answers - golden = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_streamlined.onnx") + golden = ModelWrapper(build_dir + "/end2end_tfc_w1a1_streamlined.onnx") iname = golden.graph.input[0].name oname = golden.graph.output[0].name raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") @@ -255,7 +253,7 @@ def test_end2end_tfc_run_on_pynq(): y_golden = ret_golden[oname] # set up parent+child graph to test # we'll use models from the previous step as the child model - parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1_a1_dataflow_parent.onnx") + parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1a1_dataflow_parent.onnx") iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name try: @@ -264,9 +262,7 @@ def test_end2end_tfc_run_on_pynq(): pytest.skip("PYNQ board IP address not specified") # produce results with npysim sdp_node = getCustomOp(parent_model.graph.node[2]) - sdp_node.set_nodeattr( - "model", build_dir + "/end2end_tfc_w1_a1_pynq_deploy.onnx" - ) + sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w1a1_pynq_deploy.onnx") ret = execute_onnx(parent_model, {iname: x}, True) y = ret[oname] assert np.isclose(y, y_golden).all() diff --git a/tests/end2end/test_end2end_tfc_w1a2.py b/tests/end2end/test_end2end_tfc_w1a2.py new file mode 100644 index 0000000000000000000000000000000000000000..8f8d543d408b5ac3a60aa03f2866922525288836 --- /dev/null +++ b/tests/end2end/test_end2end_tfc_w1a2.py @@ -0,0 +1,269 @@ +import os +from pkgutil import get_data + +import pytest + +import numpy as np + +# as of Feb'20 there is a bug that segfaults ONNX shape inference if we +# import pytorch before onnx, so we make sure to import onnx first +import onnx # NOQA +import onnx.numpy_helper as nph + +import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls +from finn.core.modelwrapper import ModelWrapper +from finn.core.onnx_exec import execute_onnx +from finn.custom_op.registry import getCustomOp +from finn.transformation.fold_constants import FoldConstants +from finn.transformation.fpgadataflow.codegen_ipgen import CodeGen_ipgen +from finn.transformation.fpgadataflow.codegen_ipstitch import CodeGen_ipstitch +from finn.transformation.fpgadataflow.codegen_npysim import CodeGen_npysim +from finn.transformation.fpgadataflow.compile import Compile +from finn.transformation.fpgadataflow.create_dataflow_partition import ( + CreateDataflowPartition, +) +from finn.transformation.fpgadataflow.hlssynth_ipgen import HLSSynth_IPGen +from finn.transformation.fpgadataflow.insert_tlastmarker import InsertTLastMarker +from finn.transformation.fpgadataflow.make_deployment import DeployToPYNQ +from finn.transformation.fpgadataflow.make_pynq_driver import MakePYNQDriver +from finn.transformation.fpgadataflow.make_pynq_proj import MakePYNQProject +from finn.transformation.fpgadataflow.replace_verilog_relpaths import ( + ReplaceVerilogRelPaths, +) +from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.fpgadataflow.synth_pynq_proj import SynthPYNQProject +from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames +from finn.transformation.infer_datatypes import InferDataTypes +from finn.transformation.infer_shapes import InferShapes +from finn.transformation.streamline import Streamline +from finn.util.basic import pynq_part_map +from finn.util.test import get_test_model_trained + +build_dir = "/tmp/" + os.environ["FINN_INST_NAME"] +test_pynq_board = os.getenv("PYNQ_BOARD", default="Pynq-Z1") +test_fpga_part = pynq_part_map[test_pynq_board] +target_clk_ns = 5 + + +def test_end2end_tfc_w1a2_export(): + import brevitas.onnx as bo + + tfc = get_test_model_trained("TFC", 1, 2) + bo.export_finn_onnx( + tfc, (1, 1, 28, 28), build_dir + "/end2end_tfc_w1a2_export.onnx" + ) + + +def test_end2end_tfc_w1a2_import_and_tidy(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_export.onnx") + model = model.transform(InferShapes()) + model = model.transform(FoldConstants()) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveReadableTensorNames()) + model = model.transform(InferDataTypes()) + model.save(build_dir + "/end2end_tfc_w1a2_tidy.onnx") + + +def test_end2end_tfc_w1a2_streamline(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_tidy.onnx") + model = model.transform(Streamline()) + model.save(build_dir + "/end2end_tfc_w1a2_streamlined.onnx") + + +def test_end2end_tfc_w1a2_convert_to_hls_layers(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_streamlined.onnx") + # model = model.transform(ConvertBipolarMatMulToXnorPopcount()) + # model = model.transform(absorb.AbsorbAddIntoMultiThreshold()) + # model = model.transform(absorb.AbsorbMulIntoMultiThreshold()) + # model = model.transform(RoundAndClipThresholds()) + # model = model.transform(to_hls.InferBinaryStreamingFCLayer()) + model = model.transform(to_hls.InferQuantizedStreamingFCLayer()) + model.save(build_dir + "/end2end_tfc_w1a2_hls_layers.onnx") + + +def test_end2end_tfc_w1a2_create_dataflow_partition(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_hls_layers.onnx") + parent_model = model.transform(CreateDataflowPartition()) + parent_model.save(build_dir + "/end2end_tfc_w1a2_dataflow_parent.onnx") + sdp_node = getCustomOp(parent_model.graph.node[2]) + dataflow_model_filename = sdp_node.get_nodeattr("model") + dataflow_model = ModelWrapper(dataflow_model_filename) + dataflow_model.save(build_dir + "/end2end_tfc_w1a2_dataflow_model.onnx") + + +def test_end2end_tfc_w1a2_fold_and_tlastmarker(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_dataflow_model.onnx") + fc0 = model.graph.node[0] + fc1 = model.graph.node[1] + fc2 = model.graph.node[2] + fc3 = model.graph.node[3] + fc0w = getCustomOp(fc0) + fc1w = getCustomOp(fc1) + fc2w = getCustomOp(fc2) + fc3w = getCustomOp(fc3) + fc0w.set_nodeattr("inFIFODepth", 50) + fc0w.set_nodeattr("SIMD", 8) + fc0w.set_nodeattr("PE", 16) + fc0w.set_nodeattr("outFIFODepth", 4) + fc1w.set_nodeattr("SIMD", 16) + fc1w.set_nodeattr("PE", 16) + fc1w.set_nodeattr("outFIFODepth", 4) + fc2w.set_nodeattr("SIMD", 16) + fc2w.set_nodeattr("PE", 16) + fc2w.set_nodeattr("outFIFODepth", 4) + fc3w.set_nodeattr("SIMD", 16) + fc3w.set_nodeattr("PE", 10) + fc3w.set_nodeattr("outFIFODepth", 50) + model = model.transform(InsertTLastMarker()) + model.save(build_dir + "/end2end_tfc_w1a2_folded.onnx") + + +def test_end2end_tfc_w1a2_gen_hls_ip(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_folded.onnx") + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(CodeGen_ipgen(test_fpga_part, target_clk_ns)) + model = model.transform(HLSSynth_IPGen()) + model.save(build_dir + "/end2end_tfc_w1a2_ipgen.onnx") + + +def test_end2end_tfc_w1a2_ip_stitch(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_ipgen.onnx") + model = model.transform(ReplaceVerilogRelPaths()) + model = model.transform(CodeGen_ipstitch(test_fpga_part)) + model.save(build_dir + "/end2end_tfc_w1a2_ipstitch.onnx") + + +def test_end2end_tfc_w1a2_verify_dataflow_part(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_ipstitch.onnx") + x = np.zeros((1, 784), dtype=np.float32) + inp_name = model.graph.input[0].name + out_name = model.graph.output[0].name + inp_dict = {inp_name: x} + # npysim + model = model.transform(CodeGen_npysim()) + model = model.transform(Compile()) + model = model.transform(SetExecMode("npysim")) + model.save(build_dir + "/end2end_tfc_w1a2_ipstitch_npysim.onnx") + ret_npysim = execute_onnx(model, inp_dict, True) + res_npysim = ret_npysim[out_name] + # node-by-node rtlsim + model = model.transform(SetExecMode("rtlsim")) + getCustomOp(model.graph.node[0]).set_nodeattr("rtlsim_trace", "default") + getCustomOp(model.graph.node[1]).set_nodeattr("rtlsim_trace", "default") + getCustomOp(model.graph.node[2]).set_nodeattr("rtlsim_trace", "default") + getCustomOp(model.graph.node[3]).set_nodeattr("rtlsim_trace", "default") + model.save(build_dir + "/end2end_tfc_w1a2_ipstitch_nodebynode_rtlsim.onnx") + ret_rtlsim_nodebynode = execute_onnx(model, inp_dict, True) + res_rtlsim_nodebynode = ret_rtlsim_nodebynode[out_name] + # whole-network (ip-stitched) rtlsim + model.set_metadata_prop("exec_mode", "rtlsim") + model.set_metadata_prop("rtlsim_trace", "whole_trace.vcd") + model.save(build_dir + "/end2end_tfc_w1a2_ipstitch_whole_rtlsim.onnx") + ret_rtlsim_whole = execute_onnx(model, inp_dict, True) + res_rtlsim_whole = ret_rtlsim_whole[out_name] + assert np.isclose(res_npysim, res_rtlsim_nodebynode).all() + assert np.isclose(res_npysim, res_rtlsim_whole).all() + + +def test_end2end_tfc_w1a2_verify_all(): + # use the streamlined model as the "golden" model for right answers + golden = ModelWrapper(build_dir + "/end2end_tfc_w1a2_streamlined.onnx") + iname = golden.graph.input[0].name + oname = golden.graph.output[0].name + raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") + input_tensor = onnx.load_tensor_from_string(raw_i) + x = nph.to_array(input_tensor) + # x = np.zeros(ishape, dtype=np.float32) + ret_golden = execute_onnx(golden, {iname: x}, True) + y_golden = ret_golden[oname] + # set up parent+child graph to test + # we'll use models from the previous step as the child model + parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_dataflow_parent.onnx") + iname = parent_model.graph.input[0].name + oname = parent_model.graph.output[0].name + # produce results with npysim + sdp_node = getCustomOp(parent_model.graph.node[2]) + sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w1a2_ipstitch_npysim.onnx") + ret_npysim = execute_onnx(parent_model, {iname: x}, True) + y_npysim = ret_npysim[oname] + # produce results with node-by-node rtlsim + sdp_node.set_nodeattr( + "model", build_dir + "/end2end_tfc_w1a2_ipstitch_nodebynode_rtlsim.onnx" + ) + ret_nodebynode_rtlsim = execute_onnx(parent_model, {iname: x}, True) + y_nodebynode_rtlsim = ret_nodebynode_rtlsim[oname] + # produce results with whole-network (stitched ip) rtlsim + sdp_node.set_nodeattr( + "model", build_dir + "/end2end_tfc_w1a2_ipstitch_whole_rtlsim.onnx" + ) + ret_whole_rtlsim = execute_onnx(parent_model, {iname: x}, True) + y_whole_rtlsim = ret_whole_rtlsim[oname] + assert np.isclose(y_golden, y_npysim).all() + assert np.isclose(y_golden, y_nodebynode_rtlsim).all() + assert np.isclose(y_golden, y_whole_rtlsim).all() + + +def test_end2end_tfc_w1a2_make_pynq_proj(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_ipstitch.onnx") + model = model.transform(MakePYNQProject(test_pynq_board)) + model.save(build_dir + "/end2end_tfc_w1a2_pynq_project.onnx") + + +def test_end2end_synth_pynq_project(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_pynq_project.onnx") + model = model.transform(SynthPYNQProject()) + model.save(build_dir + "/end2end_tfc_w1a2_synth.onnx") + + +def test_end2end_tfc_w1a2_make_driver(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_synth.onnx") + model = model.transform(MakePYNQDriver()) + model.save(build_dir + "/end2end_tfc_w1a2_pynq_driver.onnx") + + +def test_end2end_tfc_w1a2_deploy_on_pynq(): + model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_pynq_driver.onnx") + try: + ip = os.environ["PYNQ_IP"] # no fault for this one; skip if not defined + if ip == "": + pytest.skip("PYNQ board IP address not specified") + username = os.getenv("PYNQ_USERNAME", "xilinx") + password = os.getenv("PYNQ_PASSWORD", "xilinx") + target_dir = os.getenv("PYNQ_TARGET_DIR", "/home/xilinx/finn") + model = model.transform(DeployToPYNQ(ip, username, password, target_dir)) + # save the model to be able to link it to the parent + model.save(build_dir + "/end2end_tfc_w1a2_pynq_deploy.onnx") + except KeyError: + pytest.skip("PYNQ board IP address not specified") + + +def test_end2end_tfc_w1a2_run_on_pynq(): + # use the streamlined model as the "golden" model for right answers + golden = ModelWrapper(build_dir + "/end2end_tfc_w1a2_streamlined.onnx") + iname = golden.graph.input[0].name + oname = golden.graph.output[0].name + raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") + input_tensor = onnx.load_tensor_from_string(raw_i) + x = nph.to_array(input_tensor) + # x = np.zeros(ishape, dtype=np.float32) + # run using FINN-based execution + ret_golden = execute_onnx(golden, {iname: x}, True) + y_golden = ret_golden[oname] + # set up parent+child graph to test + # we'll use models from the previous step as the child model + parent_model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_dataflow_parent.onnx") + iname = parent_model.graph.input[0].name + oname = parent_model.graph.output[0].name + try: + ip = os.environ["PYNQ_IP"] # NOQA + if ip == "": + pytest.skip("PYNQ board IP address not specified") + # produce results with npysim + sdp_node = getCustomOp(parent_model.graph.node[2]) + sdp_node.set_nodeattr("model", build_dir + "/end2end_tfc_w1a2_pynq_deploy.onnx") + ret = execute_onnx(parent_model, {iname: x}, True) + y = ret[oname] + assert np.isclose(y, y_golden).all() + + except KeyError: + pytest.skip("PYNQ board IP address not specified") diff --git a/tests/fpgadataflow/test_convert_to_hls_layers.py b/tests/fpgadataflow/test_convert_to_hls_layers.py index 1f012b136eb031dc3a98e3a533df2b23787ea265..0c5a1b50d72f783f74f34e6990e4af76b42d92db 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers.py @@ -27,8 +27,8 @@ export_onnx_path = "test_output_tfc.onnx" def test_convert_to_hls_layers_tfc_w1a1(): - lfc = get_test_model_trained("TFC", 1, 1) - bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path) + tfc = get_test_model_trained("TFC", 1, 1) + bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -91,5 +91,72 @@ def test_convert_to_hls_layers_tfc_w1a1(): input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float() assert input_tensor.shape == (1, 1, 28, 28) # do forward pass in PyTorch/Brevitas - expected = lfc.forward(input_tensor).detach().numpy() + expected = tfc.forward(input_tensor).detach().numpy() assert np.isclose(produced, expected, atol=1e-3).all() + + +def test_convert_to_hls_layers_tfc_w1a2(): + tfc = get_test_model_trained("TFC", 1, 2) + bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path) + model = ModelWrapper(export_onnx_path) + model = model.transform(InferShapes()) + model = model.transform(FoldConstants()) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveReadableTensorNames()) + model = model.transform(Streamline()) + from finn.transformation.fpgadataflow.convert_to_hls_layers import ( + InferQuantizedStreamingFCLayer, + ) + + model = model.transform(InferQuantizedStreamingFCLayer()) + + fc0 = model.graph.node[2] + assert fc0.op_type == "StreamingFCLayer_Batch" + assert model.get_tensor_shape(fc0.input[0]) == [1, 784] + assert model.get_tensor_shape(fc0.input[1]) == [784, 64] + assert model.get_tensor_shape(fc0.input[2]) == [64, 2] + fc1 = model.graph.node[3] + assert fc1.op_type == "StreamingFCLayer_Batch" + assert model.get_tensor_shape(fc1.input[0]) == [1, 64] + assert model.get_tensor_shape(fc1.input[1]) == [64, 64] + assert model.get_tensor_shape(fc1.input[2]) == [64, 2] + fc2 = model.graph.node[4] + assert fc2.op_type == "StreamingFCLayer_Batch" + assert model.get_tensor_shape(fc2.input[0]) == [1, 64] + assert model.get_tensor_shape(fc2.input[1]) == [64, 64] + assert model.get_tensor_shape(fc2.input[2]) == [64, 2] + fc3 = model.graph.node[5] + assert fc3.op_type == "StreamingFCLayer_Batch" + assert model.get_tensor_shape(fc3.input[0]) == [1, 64] + assert model.get_tensor_shape(fc3.input[1]) == [64, 10] + fc0w = getCustomOp(fc0) + fc0w.set_nodeattr("SIMD", 784) + fc0w.set_nodeattr("PE", 16) + fc1w = getCustomOp(fc1) + fc1w.set_nodeattr("SIMD", 16) + fc1w.set_nodeattr("PE", 16) + fc2w = getCustomOp(fc2) + fc2w.set_nodeattr("SIMD", 16) + fc2w.set_nodeattr("PE", 16) + fc3w = getCustomOp(fc3) + fc3w.set_nodeattr("SIMD", 16) + fc3w.set_nodeattr("PE", 10) + model = model.transform(CodeGen_npysim()) + model = model.transform(Compile()) + model = model.transform(SetExecMode("npysim")) + raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") + input_tensor = onnx.load_tensor_from_string(raw_i) + # run using FINN-based execution + input_dict = {"global_in": nph.to_array(input_tensor)} + output_dict = oxe.execute_onnx(model, input_dict, True) + produced = output_dict[model.graph.output[0].name] + model = ModelWrapper(export_onnx_path) + model = model.transform(InferShapes()) + model = model.transform(FoldConstants()) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveReadableTensorNames()) + model = model.transform(Streamline()) + golden_output_dict = oxe.execute_onnx(model, input_dict, True) + expected = golden_output_dict[model.graph.output[0].name] + assert np.isclose(produced, expected, atol=1e-3).all() + os.remove(export_onnx_path) diff --git a/tests/transformation/streamline/test_streamline_lfc_w1a1.py b/tests/transformation/streamline/test_streamline_fc.py similarity index 68% rename from tests/transformation/streamline/test_streamline_lfc_w1a1.py rename to tests/transformation/streamline/test_streamline_fc.py index 357223f6433b57fd6045b828f89d625ca0c37225..3114ac70363d6aa302f35a4911ffe1d49775d099 100644 --- a/tests/transformation/streamline/test_streamline_lfc_w1a1.py +++ b/tests/transformation/streamline/test_streamline_fc.py @@ -1,10 +1,10 @@ -import os from pkgutil import get_data import brevitas.onnx as bo import numpy as np import onnx import onnx.numpy_helper as nph +import pytest import finn.core.onnx_exec as oxe from finn.core.modelwrapper import ModelWrapper @@ -13,18 +13,22 @@ from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeN from finn.transformation.infer_shapes import InferShapes from finn.transformation.streamline import Streamline from finn.util.test import get_test_model_trained +from finn.util.basic import make_build_dir -export_onnx_path = "test_output_lfc.onnx" -# TODO get from config instead, hardcoded to Docker path for now -trained_lfc_w1a1_checkpoint = ( - "/workspace/brevitas_cnv_lfc/pretrained_models/LFC_1W1A/checkpoints/best.tar" -) +export_onnx_path = make_build_dir("test_streamline_fc_") - -def test_streamline_lfc_w1a1(): - lfc = get_test_model_trained("LFC", 1, 1) - bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path) - model = ModelWrapper(export_onnx_path) +# activation: None or DataType +@pytest.mark.parametrize("size", ["TFC", "SFC", "LFC"]) +# weight bits +@pytest.mark.parametrize("wbits", [1]) +# act bits +@pytest.mark.parametrize("abits", [1, 2]) +def test_streamline_fc(size, wbits, abits): + nname = "%s_%dW%dA" % (size, wbits, abits) + finn_onnx = export_onnx_path + "/%s.onnx" % nname + fc = get_test_model_trained(size, wbits, abits) + bo.export_finn_onnx(fc, (1, 1, 28, 28), finn_onnx) + model = ModelWrapper(finn_onnx) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) @@ -40,4 +44,3 @@ def test_streamline_lfc_w1a1(): produced_ctx = oxe.execute_onnx(model, input_dict, True) produced = produced_ctx[model.graph.output[0].name] assert np.isclose(expected, produced, atol=1e-3).all() - os.remove(export_onnx_path) diff --git a/tests/transformation/streamline/test_streamline_lfc_w1a2.py b/tests/transformation/streamline/test_streamline_lfc_w1a2.py deleted file mode 100644 index 1181227a9ba38d769c97c00a3b1c74bb82389980..0000000000000000000000000000000000000000 --- a/tests/transformation/streamline/test_streamline_lfc_w1a2.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -from pkgutil import get_data - -import brevitas.onnx as bo -import numpy as np -import onnx -import onnx.numpy_helper as nph - -import finn.core.onnx_exec as oxe -from finn.core.modelwrapper import ModelWrapper -from finn.transformation.fold_constants import FoldConstants -from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames -from finn.transformation.infer_shapes import InferShapes -from finn.transformation.streamline import Streamline -from finn.util.test import get_test_model_trained - -export_onnx_path = "test_output_lfc.onnx" - - -def test_streamline_lfc_w1a2(): - lfc = get_test_model_trained("LFC", 1, 1) - bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path) - model = ModelWrapper(export_onnx_path) - model = model.transform(InferShapes()) - model = model.transform(FoldConstants()) - model = model.transform(GiveUniqueNodeNames()) - model = model.transform(GiveReadableTensorNames()) - # load one of the test vectors - raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") - input_tensor = onnx.load_tensor_from_string(raw_i) - # run using FINN-based execution - input_dict = {"global_in": nph.to_array(input_tensor)} - expected_ctx = oxe.execute_onnx(model, input_dict, True) - expected = expected_ctx[model.graph.output[0].name] - model = model.transform(Streamline()) - produced_ctx = oxe.execute_onnx(model, input_dict, True) - produced = produced_ctx[model.graph.output[0].name] - assert np.isclose(expected, produced, atol=1e-3).all() - os.remove(export_onnx_path)