diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dfc83ba618eb905fe5579231542d14d529503ac2..f5998e98d00f7ea2e89ae3f0fcddd5862454f876 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -68,3 +68,11 @@ repos: # black-compatible flake-8 config args: ['--max-line-length=88', # black default '--extend-ignore=E203'] # E203 is not PEP8 compliant + +- repo: local + hooks: + - id: jupyter-nb-clear-output + name: jupyter-nb-clear-output + files: \.ipynb$ + language: system + entry: jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace diff --git a/docs/finn/faq.rst b/docs/finn/faq.rst index e426bdb4e28dd02c83b47d59b59c318840815f78..3ddd13664432ceefdd0379004d856abd096f93ff 100644 --- a/docs/finn/faq.rst +++ b/docs/finn/faq.rst @@ -75,7 +75,7 @@ Why does FINN-generated architectures need FIFOs between layers? See https://github.com/Xilinx/finn/discussions/383 How do I tell FINN to utilize DSPs instead of LUTs for MAC operations in particular layers? - This is done with the ``resType="dsp"`` attribute on ``StreamingFCLayer`` and ``Vector_Vector_Activate`` instances. + This is done with the ``resType="dsp"`` attribute on ``MatrixVectorActivation`` and ``Vector_Vector_Activate`` instances. When using the ``build_dataflow`` system, this can be specified at a per layer basis by specifying it as part of one or more layers’ folding config (:py:mod:`finn.builder.build_dataflow_config.DataflowBuildConfig.folding_config_file`). This is a good idea for layers with more weight/input act bits and high PE*SIMD. @@ -84,7 +84,7 @@ How do I tell FINN to utilize DSPs instead of LUTs for MAC operations in particu How do I tell FINN to utilize a particular type of memory resource in particular layers? This is done with the ``ram_style`` attribute. Check the particular ``HLSCustomOp`` attribute definition to see - which modes are supported (`example for StreamingFCLayer <https://github.com/Xilinx/finn/blob/dev/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py#L95>`_). + which modes are supported (`example for MatrixVectorActivation <https://github.com/Xilinx/finn/blob/dev/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py#L101>`_). When using the ``build_dataflow`` system, this can be specified at a per layer basis by specifying it as part of one or more layers’ folding config (:py:mod:`finn.builder.build_dataflow_config.DataflowBuildConfig.folding_config_file`). See the `MobileNet-v1 build config for ZCU104 in finn-examples <https://github.com/Xilinx/finn-examples/blob/main/build/mobilenet-v1/folding_config/ZCU104_folding_config.json#L15>`_ for reference. diff --git a/docs/finn/internals.rst b/docs/finn/internals.rst index e03873d3925dcea4a0a3c28e98878c5cb8e5a7fe..e28874145d6d61232b0d63b0e53e4dd5dcdc4cfc 100644 --- a/docs/finn/internals.rst +++ b/docs/finn/internals.rst @@ -146,10 +146,10 @@ A transformation passes changes (transforms) the given model, it gets the model .. _mem_mode: -StreamingFCLayer *mem_mode* +MatrixVectorActivation *mem_mode* =========================== -FINN supports two types of the so-called *mem_mode* attrıbute for the node StreamingFCLayer. This mode controls how the weight values are accessed during the execution. That means the mode setting has direct influence on the resulting circuit. Currently two settings for the *mem_mode* are supported in FINN: +FINN supports two types of the so-called *mem_mode* attrıbute for the node MatrixVectorActivation. This mode controls how the weight values are accessed during the execution. That means the mode setting has direct influence on the resulting circuit. Currently two settings for the *mem_mode* are supported in FINN: * "const" @@ -163,7 +163,7 @@ The following picture shows the idea behind the two modes. Const mode ---------- -In *const* mode the weights are "baked in" into the Matrix-Vector-Activate-Unit (MVAU), which means they are part of the HLS code. During the IP block generation the weight values are integrated as *params.h* file in the HLS code and synthesized together with it. For the *const* mode IP block generation the `StreamingFCLayer_Batch function <https://github.com/Xilinx/finn-hlslib/blob/07a8353f6cdfd8bcdd81e309a5581044c2a93d3b/fclayer.h#L94>`_ from the finn-hls library is used, which implements a standard MVAU. The resulting IP block has an input and an output stream, as shown in the above picture on the left. FIFOs in the form of verilog components are connected to these. +In *const* mode the weights are "baked in" into the Matrix-Vector-Activate-Unit (MVAU), which means they are part of the HLS code. During the IP block generation the weight values are integrated as *params.h* file in the HLS code and synthesized together with it. For the *const* mode IP block generation the `Matrix_Vector_Activate_Batch function <https://github.com/Xilinx/finn-hlslib/blob/19fa1197c09bca24a0f77a7fa04b8d7cb5cc1c1d/mvau.hpp#L93>`_ from the finn-hls library is used, which implements a standard MVAU. The resulting IP block has an input and an output stream, as shown in the above picture on the left. FIFOs in the form of verilog components are connected to these. Advantages: @@ -185,7 +185,7 @@ In *decoupled* mode a different variant of the MVAU with three ports is used. Be Advantages: -* better control over the used memory primivites used (see the ram_style attribute in StreamingFCLayer) +* better control over the used memory primivites used (see the ram_style attribute in MatrixVectorActivation) * potentially faster HLS synthesis time since weight array shape is no longer part of HLS synthesis diff --git a/docs/finn/nw_prep.rst b/docs/finn/nw_prep.rst index 22d0f92f9396c4896ec0581d7adb3f0210ef5077..8d0403fc9bb6a45fae60f14c0fb0acf862792abb 100644 --- a/docs/finn/nw_prep.rst +++ b/docs/finn/nw_prep.rst @@ -35,7 +35,7 @@ After this transformation the ONNX model is streamlined and contains now custom Convert to HLS Layers ===================== -Pairs of binary XNORPopcountMatMul layers are converted to StreamingFCLayers and following Multithreshold layers are absorbed into the Matrix-Vector-Activate-Unit (MVAU). The result is a model consisting of a mixture of HLS and non-HLS layers. For more details, see :py:mod:`finn.transformation.fpgadataflow.convert_to_hls_layers`. The MVAU can be implemented in two different modes, *const* and *decoupled*, see chapter :ref:`mem_mode`. +Pairs of binary XNORPopcountMatMul layers are converted to MatrixVectorActivation layers and following Multithreshold layers are absorbed into the Matrix-Vector-Activate-Unit (MVAU). The result is a model consisting of a mixture of HLS and non-HLS layers. For more details, see :py:mod:`finn.transformation.fpgadataflow.convert_to_hls_layers`. The MVAU can be implemented in two different modes, *const* and *decoupled*, see chapter :ref:`mem_mode`. Dataflow Partitioning ===================== @@ -43,7 +43,7 @@ Dataflow Partitioning In the next step the graph is split and the part consisting of HLS layers is further processed in the FINN flow. The parent graph containing the non-HLS layers remains. The PE and SIMD are set to 1 by default, so the result is a network of only HLS layers with maximum folding. The model can be verified using the *cppsim* simulation. It is a simulation using C++ and is described in more detail in chapter :ref:`verification`. Folding -======= +========= To adjust the folding, the values for PE and SIMD can be increased to achieve also an increase in the performance. The result can be verified using the same simulation flow as for the network with maximum folding (*cppsim* using C++), for details please have a look at chapter :ref:`verification`. diff --git a/docs/finn/source_code/finn.custom_op.fpgadataflow.rst b/docs/finn/source_code/finn.custom_op.fpgadataflow.rst index 34a6285f227690c87c568855e7ca70ddb9b2764c..7de038248d418e1964effd7678bc1cad4cb48c14 100644 --- a/docs/finn/source_code/finn.custom_op.fpgadataflow.rst +++ b/docs/finn/source_code/finn.custom_op.fpgadataflow.rst @@ -127,10 +127,10 @@ finn.custom\_op.fpgadataflow.streamingdatawidthconverter\_batch :undoc-members: :show-inheritance: -finn.custom\_op.fpgadataflow.streamingfclayer\_batch +finn.custom\_op.fpgadataflow.matrixvectoractivation ----------------------------------------------------------- -.. automodule:: finn.custom_op.fpgadataflow.streamingfclayer_batch +.. automodule:: finn.custom_op.fpgadataflow.matrixvectoractivation :members: :undoc-members: :show-inheritance: @@ -184,10 +184,10 @@ finn.custom\_op.fpgadataflow.upsampler :undoc-members: :show-inheritance: -finn.custom\_op.fpgadataflow.vector\_vector\_activate\_batch +finn.custom\_op.fpgadataflow.vectorvectoractivation ----------------------------------------------- -.. automodule:: finn.custom_op.fpgadataflow.vector_vector_activate_batch +.. automodule:: finn.custom_op.fpgadataflow.vectorvectoractivation :members: :undoc-members: :show-inheritance: diff --git a/fetch-repos.sh b/fetch-repos.sh index ae3ad4320de4a5ba9cd296d7b8c22a241d8f37ae..31a636c2f33081bfb84c87f981dceeb025cfe357 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -28,11 +28,11 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. QONNX_COMMIT="4a4826641db8d34619d31eac155fe95af11692eb" -FINN_EXP_COMMIT="af6102769226b82b639f243dc36f065340991513" +FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="269410aa217389fc02e69bd7de210cd026f10971" +HLSLIB_COMMIT="5db5c8d480ae82bbbd05dd216b85272b6c6af091" OMX_COMMIT="a97f0bf145a2f7e57ca416ea76c9e45df4e9aa37" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" EXP_BOARD_FILES_MD5="ac1811ae93b03f5f09a505283ff989a3" diff --git a/notebooks/FCLayer_graph.onnx b/notebooks/FCLayer_graph.onnx deleted file mode 100644 index 950c78a9de7224b83ff46da4920da1baa5d80d61..0000000000000000000000000000000000000000 Binary files a/notebooks/FCLayer_graph.onnx and /dev/null differ diff --git a/notebooks/advanced/0_custom_analysis_pass.ipynb b/notebooks/advanced/0_custom_analysis_pass.ipynb index 2768c123da21d585df20ffe93f1222814b037cb4..a6e06921516fd624ad9e8e1884677c7791f5734a 100644 --- a/notebooks/advanced/0_custom_analysis_pass.ipynb +++ b/notebooks/advanced/0_custom_analysis_pass.ipynb @@ -13,7 +13,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -48,38 +48,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving '../LFCW1A1.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f14142de3c8>" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "showInNetron(\"../LFCW1A1.onnx\")" ] @@ -93,7 +64,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -110,7 +81,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -140,20 +111,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " def analysis(self, analysis_fxn):\n", - " \"\"\"Runs given anaylsis_fxn on this model and return resulting dict.\"\"\"\n", - " return analysis_fxn(self)\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "showSrc(ModelWrapper.analysis)" ] @@ -167,17 +127,9 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'Shape': 1, 'Gather': 1, 'Unsqueeze': 5, 'Concat': 1, 'Reshape': 1, 'Mul': 5, 'Sub': 1, 'Sign': 4, 'MatMul': 4, 'BatchNormalization': 3, 'Squeeze': 3}\n" - ] - } - ], + "outputs": [], "source": [ "print(model.analysis(count_equal_nodes))" ] diff --git a/notebooks/advanced/1_custom_transformation_pass.ipynb b/notebooks/advanced/1_custom_transformation_pass.ipynb index fe6390501e20d53e5c43e12a1dcc60d56a9024c5..7f78bea9e57e7145a75cd8c9f822ac5f57bcdf5f 100644 --- a/notebooks/advanced/1_custom_transformation_pass.ipynb +++ b/notebooks/advanced/1_custom_transformation_pass.ipynb @@ -13,7 +13,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -42,32 +42,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " def transform(self, transformation, make_deepcopy=True):\n", - " \"\"\"Applies given Transformation repeatedly until no more changes can be made\n", - " and returns a transformed ModelWrapper instance.\n", - "\n", - " If make_deepcopy is specified, operates on a new (deep)copy of model.\n", - " \"\"\"\n", - " transformed_model = self\n", - " if make_deepcopy:\n", - " transformed_model = copy.deepcopy(self)\n", - " model_was_changed = True\n", - " while model_was_changed:\n", - " (transformed_model, model_was_changed) = transformation.apply(\n", - " transformed_model\n", - " )\n", - " return transformed_model\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", "showSrc(ModelWrapper.transform)" @@ -98,27 +75,9 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "class Transformation(ABC):\n", - " \"\"\"Transformation class all transformations are based on. Contains only\n", - " abstract method apply() every transformation has to fill.\"\"\"\n", - "\n", - " def __init__(self):\n", - " super().__init__()\n", - "\n", - " @abstractmethod\n", - " def apply(self, model):\n", - " pass\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "from qonnx.transformation.base import Transformation\n", "\n", @@ -145,7 +104,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -157,45 +116,16 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving '../LFCW1A1.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc625ac0a20>" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "showInNetron('../LFCW1A1.onnx')" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -232,7 +162,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -242,40 +172,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/tmp/LFCW1A1_changed.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fc625ac09b0>" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "showInNetron('/tmp/LFCW1A1_changed.onnx')" ] @@ -291,66 +190,9 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "class NodeLocalTransformation(Transformation):\n", - " \"\"\"\n", - " Parent class for transformations, which can be executed locally to one node\n", - " by accessing and modifying the attributes of only that node.\n", - " This class can then automatically parallelize the transformation.\n", - " Transformations sublcassing NodeLocalTransformation must implement the\n", - " abstract method applyNodeLocal().\n", - "\n", - " To control the degree of parallelization, specify the num_workers argument\n", - " in the constructor, using one of the following values:\n", - " * None: use NUM_DEFAULT_WORKERS environment variable\n", - " * 0: use all available CPU cores\n", - " * (any other int>0): set number of parallel workers\n", - " \"\"\"\n", - "\n", - " def __init__(self, num_workers=None):\n", - " super().__init__()\n", - " if num_workers is None:\n", - " self._num_workers = get_num_default_workers()\n", - " else:\n", - " self._num_workers = num_workers\n", - " assert self._num_workers >= 0, \"Number of workers must be nonnegative.\"\n", - " if self._num_workers == 0:\n", - " self._num_workers = mp.cpu_count()\n", - "\n", - " @abstractmethod\n", - " def applyNodeLocal(self, node):\n", - " pass\n", - "\n", - " def apply(self, model):\n", - " # Remove old nodes from the current model\n", - " old_nodes = []\n", - " for i in range(len(model.graph.node)):\n", - " old_nodes.append(model.graph.node.pop())\n", - "\n", - " # Execute transformation in parallel\n", - " with mp.Pool(self._num_workers) as p:\n", - " new_nodes_and_bool = p.map(self.applyNodeLocal, old_nodes, chunksize=1)\n", - "\n", - " # extract nodes and check if the transformation needs to run again\n", - " # Note: .pop() had initially reversed the node order\n", - " run_again = False\n", - " for node, run in reversed(new_nodes_and_bool):\n", - " # Reattach new nodes to old model\n", - " model.graph.node.append(node)\n", - " if run is True:\n", - " run_again = True\n", - "\n", - " return (model, run_again)\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "from qonnx.transformation.base import NodeLocalTransformation\n", "\n", @@ -370,59 +212,9 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "class CompileCppSim(NodeLocalTransformation):\n", - " \"\"\"For every node: compile C++ code in node attribute \"code_gen_dir_cppsim\"\n", - " and save path to executables in node attribute \"executable_path\".\n", - " All nodes in the graph must have the fpgadataflow backend attribute.\n", - "\n", - " To use these executables, exec_mode must be set to \"cppsim\" (using transformation\n", - " SetExecMode) and the model has to be executed using execute_onnx() from\n", - " finn.core.onnx_exec\n", - "\n", - " * num_workers (int or None) number of parallel workers, see documentation in\n", - " NodeLocalTransformation for more details.\n", - " \"\"\"\n", - "\n", - " def __init__(self, num_workers=None):\n", - " super().__init__(num_workers=num_workers)\n", - "\n", - " def applyNodeLocal(self, node):\n", - " op_type = node.op_type\n", - " if is_fpgadataflow_node(node) is True:\n", - " try:\n", - " # lookup op_type in registry of CustomOps\n", - " inst = registry.getCustomOp(node)\n", - " # ensure that code is generated\n", - " assert (\n", - " inst.get_nodeattr(\"code_gen_dir_cppsim\") != \"\"\n", - " ), \"\"\"Node\n", - " attribute \"code_gen_dir_cppsim\" is not set. Please run\n", - " Transformation PrepareCppSim first.\"\"\"\n", - " # call the compilation function for this node\n", - " inst.compile_singlenode_code()\n", - " # ensure that executable path is now set\n", - " assert (\n", - " inst.get_nodeattr(\"executable_path\") != \"\"\n", - " ), \"\"\"Transformation\n", - " compile was not successful, there is no path to executables set\n", - " in node attribute \"executable_path\".\"\"\"\n", - " except KeyError:\n", - " # exception if op_type is not supported\n", - " raise Exception(\n", - " \"Custom op_type %s is currently not supported.\" % op_type\n", - " )\n", - " return (node, False)\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim\n", "\n", diff --git a/notebooks/advanced/2_custom_op.ipynb b/notebooks/advanced/2_custom_op.ipynb index 8595a10d6ee9194a45ea16e30f9f787c5aeed88c..e3b5d8cf0bd01bf2588331d346e706b3a36fed10 100644 --- a/notebooks/advanced/2_custom_op.ipynb +++ b/notebooks/advanced/2_custom_op.ipynb @@ -28,57 +28,9 @@ }, { "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['__abstractmethods__',\n", - " '__class__',\n", - " '__delattr__',\n", - " '__dict__',\n", - " '__dir__',\n", - " '__doc__',\n", - " '__eq__',\n", - " '__format__',\n", - " '__ge__',\n", - " '__getattribute__',\n", - " '__gt__',\n", - " '__hash__',\n", - " '__init__',\n", - " '__init_subclass__',\n", - " '__le__',\n", - " '__lt__',\n", - " '__module__',\n", - " '__ne__',\n", - " '__new__',\n", - " '__reduce__',\n", - " '__reduce_ex__',\n", - " '__repr__',\n", - " '__setattr__',\n", - " '__sizeof__',\n", - " '__slots__',\n", - " '__str__',\n", - " '__subclasshook__',\n", - " '__weakref__',\n", - " '_abc_impl',\n", - " 'execute_node',\n", - " 'get_nodeattr',\n", - " 'get_nodeattr_allowed_values',\n", - " 'get_nodeattr_def',\n", - " 'get_nodeattr_types',\n", - " 'infer_node_datatype',\n", - " 'make_shape_compatible_op',\n", - " 'set_nodeattr',\n", - " 'verify_node']" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.custom_op.base import CustomOp\n", "dir(CustomOp)" @@ -95,7 +47,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -183,7 +135,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -200,27 +152,9 @@ }, { "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'DebugMarker': qonnx.custom_op.general.debugmarker.DebugMarker,\n", - " 'QuantAvgPool2d': qonnx.custom_op.general.quantavgpool2d.QuantAvgPool2d,\n", - " 'MaxPoolNHWC': qonnx.custom_op.general.maxpoolnhwc.MaxPoolNHWC,\n", - " 'GenericPartition': qonnx.custom_op.general.genericpartition.GenericPartition,\n", - " 'MultiThreshold': qonnx.custom_op.general.multithreshold.MultiThreshold,\n", - " 'XnorPopcountMatMul': qonnx.custom_op.general.xnorpopcount.XnorPopcountMatMul,\n", - " 'Im2Col': qonnx.custom_op.general.im2col.Im2Col,\n", - " 'MyPythonPowerOp': __main__.MyPythonPowerOp}" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "general.custom_op" ] @@ -238,7 +172,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -283,34 +217,9 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[input: \"inp\"\n", - "output: \"outp\"\n", - "op_type: \"MyPythonPowerOp\"\n", - "attribute {\n", - " name: \"exec_mode\"\n", - " s: \"python\"\n", - " type: STRING\n", - "}\n", - "attribute {\n", - " name: \"exponent\"\n", - " i: 2\n", - " type: INT\n", - "}\n", - "domain: \"qonnx.custom_op.general\"\n", - "]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# generate a small graph with our custom op\n", "input_shape = (1, 2, 4)\n", @@ -327,21 +236,9 @@ }, { "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[[ 0., -3., 1., -8.],\n", - " [ 2., -2., -4., -8.]]], dtype=float32)" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from qonnx.core.datatype import DataType\n", "from qonnx.util.basic import gen_finn_dt_tensor\n", @@ -360,21 +257,9 @@ }, { "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'outp': array([[[ 0., 9., 1., 64.],\n", - " [ 4., 4., 16., 64.]]], dtype=float32)}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.core.onnx_exec import execute_onnx\n", "\n", @@ -406,7 +291,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -521,34 +406,9 @@ }, { "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[input: \"inp\"\n", - "output: \"outp\"\n", - "op_type: \"MyMixedPowerOp\"\n", - "attribute {\n", - " name: \"exec_mode\"\n", - " s: \"python\"\n", - " type: STRING\n", - "}\n", - "attribute {\n", - " name: \"exponent\"\n", - " i: 2\n", - " type: INT\n", - "}\n", - "domain: \"qonnx.custom_op.general\"\n", - "]" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# register our new op\n", "general.custom_op[\"MyMixedPowerOp\"] = MyMixedPowerOp\n", @@ -567,19 +427,9 @@ }, { "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Available functions: ['__abstractmethods__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '__weakref__', '_abc_impl', 'execute_node', 'get_nodeattr', 'get_nodeattr_allowed_values', 'get_nodeattr_def', 'get_nodeattr_types', 'infer_node_datatype', 'make_shape_compatible_op', 'my_custom_cpp_gen', 'onnx_node', 'set_nodeattr', 'verify_node']\n", - "codegen_dir: \n", - "exec_mode: python\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from qonnx.custom_op.registry import getCustomOp\n", "\n", @@ -602,7 +452,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -641,7 +491,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -657,17 +507,9 @@ }, { "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/tmp/finn_dev_maltanar/my_custom_oppswiou3i\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "new_op_inst = getCustomOp(mixedop_graph_new.graph.node[0])\n", "codegen_dir = new_op_inst.get_nodeattr(\"codegen_dir\")\n", @@ -683,17 +525,9 @@ }, { "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "compile.sh node_model\ttop.cpp\r\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! ls {codegen_dir}" ] @@ -707,39 +541,9 @@ }, { "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\r\n", - "#include <iostream>\r\n", - "#include <fstream>\r\n", - "using namespace std;\r\n", - "#define EXPONENT 2\r\n", - "\r\n", - "int main(int argc, char **argv) {\r\n", - " ifstream infile(\"input.txt\");\r\n", - " ofstream outfile(\"output.txt\");\r\n", - " \r\n", - " float elem;\r\n", - " while (infile >> elem)\r\n", - " {\r\n", - " float res = 1.0;\r\n", - " for(int i=0; i < EXPONENT; i++) {\r\n", - " res *= elem;\r\n", - " }\r\n", - " outfile << res << \"\\n\";\r\n", - " }\r\n", - "\r\n", - " return 0;\r\n", - "}\r\n", - " " - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! cat {codegen_dir}/top.cpp" ] @@ -757,7 +561,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -766,7 +570,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -775,26 +579,16 @@ }, { "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "49\r\n", - "64\r\n", - "81\r\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! cat {codegen_dir}/output.txt" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -812,21 +606,9 @@ }, { "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[[-6., 3., 2., -5.],\n", - " [ 5., 2., 0., -2.]]], dtype=float32)" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# generate a random input of e.g signed 4-bit values\n", "random_input = gen_finn_dt_tensor(DataType[\"INT4\"], input_shape)\n", @@ -842,21 +624,9 @@ }, { "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'outp': array([[[36., 9., 4., 25.],\n", - " [25., 4., 0., 4.]]], dtype=float32)}" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# run with FINN's execute_onnx, custom node will use Python execution\n", "new_op_inst.set_nodeattr(\"exec_mode\", \"python\")\n", @@ -874,21 +644,9 @@ }, { "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'outp': array([[[36., 9., 4., 25.],\n", - " [25., 4., 0., 4.]]])}" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# run with FINN's execute_onnx, custom node will use c++ execution\n", "new_op_inst.set_nodeattr(\"exec_mode\", \"c++\")\n", diff --git a/notebooks/basics/0_how_to_work_with_onnx.ipynb b/notebooks/basics/0_how_to_work_with_onnx.ipynb index 727181f583dc8c8c9780f5f72a281a4c528e90e7..a4ea75fe38aac6720671a9b51de0ef31951cccb0 100644 --- a/notebooks/basics/0_how_to_work_with_onnx.ipynb +++ b/notebooks/basics/0_how_to_work_with_onnx.ipynb @@ -31,7 +31,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -56,7 +56,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -98,7 +98,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -119,7 +119,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -154,7 +154,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -171,7 +171,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -180,40 +180,9 @@ }, { "cell_type": "code", - "execution_count": 7, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving '/tmp/simple_model.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fcdfc956b70>" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron('/tmp/simple_model.onnx')" ] @@ -229,7 +198,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -252,7 +221,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -270,7 +239,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -289,7 +258,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -308,29 +277,9 @@ }, { "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The output of the ONNX model is: \n", - "[[22. 13. 21. 8.]\n", - " [ 0. 8. 11. 1.]\n", - " [ 3. 12. 8. 2.]\n", - " [ 0. 6. 1. 4.]]\n", - "\n", - "The output of the reference function is: \n", - "[[22. 13. 21. 8.]\n", - " [ 0. 8. 11. 1.]\n", - " [ 3. 12. 8. 2.]\n", - " [ 0. 6. 1. 4.]]\n", - "\n", - "The results are the same!\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "ref_output= expected_output(in1_values, in2_values, in3_values)\n", "print(\"The output of the ONNX model is: \\n{}\".format(output[0]))\n", @@ -369,7 +318,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -386,7 +335,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -410,7 +359,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -433,19 +382,9 @@ }, { "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found adder node: Add1\n", - "Found adder node: Add2\n", - "Found adder node: Add3\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "add_nodes = identify_adder_nodes(finn_model)\n", "for node in add_nodes:\n", @@ -461,7 +400,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -490,7 +429,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -520,19 +459,9 @@ }, { "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found following pair that could be replaced by a sum node:\n", - "Add1\n", - "Add2\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "for node in add_nodes:\n", " add_pairs = adder_pair(finn_model, node)\n", @@ -556,18 +485,9 @@ }, { "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The new node gets the following inputs: \n", - "['in1', 'in2', 'in3']\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "input_list = []\n", "for i in range(len(substitute_pair)):\n", @@ -591,7 +511,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -607,7 +527,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -628,7 +548,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -656,7 +576,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -666,40 +586,9 @@ }, { "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/tmp/simple_model1.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fcdfc130cc0>" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron('/tmp/simple_model1.onnx')" ] @@ -713,7 +602,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -723,29 +612,9 @@ }, { "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The output of the manipulated ONNX model is: \n", - "[[22. 13. 21. 8.]\n", - " [ 0. 8. 11. 1.]\n", - " [ 3. 12. 8. 2.]\n", - " [ 0. 6. 1. 4.]]\n", - "\n", - "The output of the reference function is: \n", - "[[22. 13. 21. 8.]\n", - " [ 0. 8. 11. 1.]\n", - " [ 3. 12. 8. 2.]\n", - " [ 0. 6. 1. 4.]]\n", - "\n", - "The results are the same!\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(\"The output of the manipulated ONNX model is: \\n{}\".format(output[0]))\n", "print(\"\\nThe output of the reference function is: \\n{}\".format(ref_output))\n", diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index c75bd9069d6df897d7dba3d2453d3b0495420c1c..b628fa455a27649791c2b6f72409b85f71f7c704 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -55,14 +55,15 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from finn.util.basic import make_build_dir\n", "from finn.util.visualization import showInNetron\n", + "import os\n", " \n", - "build_dir = \"/workspace/finn\"" + "build_dir = os.environ[\"FINN_ROOT\"]" ] }, { @@ -76,20 +77,9 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/brevitas/src/brevitas_examples/bnn_pynq/models/CNV.py:106: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " x = 2.0 * x - torch.tensor([1.0], device=x.device)\n", - "/workspace/brevitas/src/brevitas/quant_tensor/__init__.py:74: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " training = torch.tensor(training, dtype=torch.bool)\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import onnx\n", "from finn.util.test import get_test_model_trained\n", @@ -119,38 +109,9 @@ }, { "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving '/workspace/finn/end2end_cnv_w1a1_tidy.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://localhost:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f912af76550>" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/end2end_cnv_w1a1_tidy.onnx\")" ] @@ -173,18 +134,9 @@ }, { "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/finn-base/src/finn/transformation/infer_data_layouts.py:114: UserWarning: Assuming 4D input is NCHW\n", - " warnings.warn(\"Assuming 4D input is NCHW\")\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.util.pytorch import ToTensor\n", "from qonnx.transformation.merge_onnx_models import MergeONNXModels\n", @@ -208,7 +160,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -230,39 +182,9 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/end2end_cnv_w1a1_pre_post.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://localhost:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f8ffd85a760>" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/end2end_cnv_w1a1_pre_post.onnx\")" ] @@ -285,7 +207,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -328,39 +250,9 @@ }, { "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/end2end_cnv_w1a1_streamlined.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://localhost:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f91ac6e6f70>" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/end2end_cnv_w1a1_streamlined.onnx\")" ] @@ -376,18 +268,9 @@ }, { "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/finn/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py:591: UserWarning: Clipping some thresholds in \n", - " warnings.warn(\"Clipping some thresholds in %s\" % self.onnx_node.name)\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls\n", "from finn.transformation.fpgadataflow.create_dataflow_partition import (\n", @@ -401,8 +284,8 @@ "mem_mode = \"decoupled\"\n", "\n", "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_streamlined.onnx\")\n", - "model = model.transform(to_hls.InferBinaryStreamingFCLayer(mem_mode))\n", - "model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode))\n", + "model = model.transform(to_hls.InferBinaryMatrixVectorActivation(mem_mode))\n", + "model = model.transform(to_hls.InferQuantizedMatrixVectorActivation(mem_mode))\n", "# TopK to LabelSelect\n", "model = model.transform(to_hls.InferLabelSelectLayer())\n", "# input quantization (if any) to standalone thresholding\n", @@ -429,46 +312,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Notice the additional `RemoveCNVtoFCFlatten` transformation that was not used for TFC-w1a1. In the last Netron visualization you may have noticed a `Reshape` operation towards the end of the network where the convolutional part of the network ends and the fully-connected layers started. That `Reshape` is essentialy a tensor flattening operation, which we can remove for the purposes of hardware implementation. We can examine the contents of the dataflow partition with Netron, and observe the `ConvolutionInputGenerator`, `StreamingFCLayer_Batch` and `StreamingMaxPool_Batch` nodes that implement the sliding window, matrix multiply and maxpool operations in hlslib. *Note that the StreamingFCLayer instances following the ConvolutionInputGenerator nodes are really implementing the convolutions, despite the name. The final three StreamingFCLayer instances implement actual FC layers.*" + "Notice the additional `RemoveCNVtoFCFlatten` transformation that was not used for TFC-w1a1. In the last Netron visualization you may have noticed a `Reshape` operation towards the end of the network where the convolutional part of the network ends and the fully-connected layers started. That `Reshape` is essentialy a tensor flattening operation, which we can remove for the purposes of hardware implementation. We can examine the contents of the dataflow partition with Netron, and observe the `ConvolutionInputGenerator`, `MatrixVectorActivation` and `StreamingMaxPool_Batch` nodes that implement the sliding window, matrix multiply and maxpool operations in hlslib. *Note that the MatrixVectorActivation instances following the ConvolutionInputGenerator nodes are really implementing the convolutions, despite the name. The final three MatrixVectorActivation instances implement actual FC layers.*" ] }, { "cell_type": "code", - "execution_count": 10, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/end2end_cnv_w1a1_dataflow_parent.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://localhost:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f8ffd85ae20>" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir + \"/end2end_cnv_w1a1_dataflow_parent.onnx\")" ] @@ -482,39 +333,9 @@ }, { "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/end2end_cnv_w1a1_dataflow_model.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://localhost:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f8ffd832280>" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir + \"/end2end_cnv_w1a1_dataflow_model.onnx\")" ] @@ -528,12 +349,12 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_dataflow_model.onnx\")\n", - "fc_layers = model.get_nodes_by_op_type(\"StreamingFCLayer_Batch\")\n", + "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", "# each tuple is (PE, SIMD, in_fifo_depth) for a layer\n", "folding = [\n", " (16, 3, 128),\n", @@ -567,44 +388,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Below we visualize in Netron to observe the `StreamingDataWidthConverter` and `StreamingFIFO` nodes that have been inserted into graph, as well as the folding factors in the `PE` and `SIMD` attributes of each `StreamingFCLayer_Batch`." + "Below we visualize in Netron to observe the `StreamingDataWidthConverter` and `StreamingFIFO` nodes that have been inserted into graph, as well as the folding factors in the `PE` and `SIMD` attributes of each `MatrixVectorActivation`." ] }, { "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/end2end_cnv_w1a1_folded.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://localhost:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f8ff1243af0>" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir + \"/end2end_cnv_w1a1_folded.onnx\")" ] @@ -627,22 +418,9 @@ }, { "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/finn/src/finn/transformation/fpgadataflow/floorplan.py:107: UserWarning: 32 nodes have no entry in the provided floorplan, SLR was set to -1\n", - " warnings.warn(\n", - "/workspace/finn/src/finn/transformation/fpgadataflow/insert_fifo.py:154: UserWarning: Overriding input FIFO depth to 32\n", - " warnings.warn(\"Overriding input FIFO depth to 32\")\n", - "/workspace/finn/src/finn/transformation/fpgadataflow/insert_fifo.py:200: UserWarning: Overriding output FIFO depth to 32\n", - " warnings.warn(\"Overriding output FIFO depth to 32\")\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "test_pynq_board = \"Pynq-Z2\"\n", "target_clk_ns = 10\n", @@ -666,22 +444,9 @@ }, { "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Welcome to PYNQ Linux, based on Ubuntu 18.04 (GNU/Linux 4.19.0-xilinx-v2019.1 armv7l)\r\n", - "\r\n", - " * Super-optimized for small spaces - read how we shrank the memory\r\n", - " footprint of MicroK8s to make it the smallest full K8s around.\r\n", - "\r\n", - " https://ubuntu.com/blog/microk8s-memory-optimisation\r\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import os\n", "\n", @@ -701,7 +466,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -714,20 +479,9 @@ }, { "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'/home/xilinx/finn_dev_jduarte/pynq_deployment_yrxnwrak'" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "target_dir_pynq = target_dir + \"/\" + model.get_metadata_prop(\"pynq_deployment_dir\").split(\"/\")[-1]\n", "target_dir_pynq" @@ -735,24 +489,9 @@ }, { "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "total 4240\r\n", - "-rw-rw-r-- 1 xilinx xilinx 18616 Jun 28 20:42 driver_base.py\r\n", - "-rw-r--r-- 1 xilinx xilinx 4868 Jun 28 20:42 driver.py\r\n", - "drwxr-xr-x 4 xilinx xilinx 4096 Jun 28 20:42 finn\r\n", - "-rw-r--r-- 1 xilinx xilinx 4045671 Jun 28 20:42 resizer.bit\r\n", - "-rw-r--r-- 1 xilinx xilinx 247083 Jun 28 20:42 resizer.hwh\r\n", - "drwxr-xr-x 2 xilinx xilinx 4096 Jun 28 20:42 runtime_weights\r\n", - "-rw-rw-r-- 1 xilinx xilinx 4107 Jun 28 20:42 validate.py\r\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! ssh {options} {username}@{ip} -p {port} 'ls -l {target_dir_pynq}'" ] @@ -766,32 +505,9 @@ }, { "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "<matplotlib.image.AxesImage at 0x7f917faeb6d0>" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD5CAYAAADhukOtAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAe8klEQVR4nO2da4yc53Xf/2feuex9Z5dLLpdXURJlRVZiSqFVO1EV2akDRUkgGwhcu4ChAEYUBBEQA+kHwQVqF+gHp6ht+EPhgq5VK4ZrWbUtSEiE1LYcRDDsSKJu1IW6ULxIJJdcksu97+zcTj/MyKXU5//sksudpf38fwDB2efs875nnnnPvLPPf8455u4QQvz6k1tvB4QQnUHBLkQiKNiFSAQFuxCJoGAXIhEU7EIkQn41k83sDgBfA5AB+B/u/qXY7/d3533DQDF8rPh5Ltq3mKTo4Lbouci06PH40eJGj70Px/wP2yx2MjIHAGLK7KXJttyP2NHcL/4aaB2TrQenGX3Sl+ZH7NkxSzPiBvNxer6OxaVG0MlLDnYzywD8NwAfA3AcwNNm9qi7v8LmbBgo4gv/7vrw8bxJz1UshN20HA+IanWJ2uqNGj9XMfxmBACNZthHj7wqlmtQWy6jJnitlx8T/JiFYiU4nkVeastx/xvNOrXV6vw1azZJUBj3ox6+RgEAS+x4WC5wwz7G3tSrVX59NBqRdYxcw7nIa1Yl19U8X3osVMPH+/ZPTkR8uHRuAXDI3Q+7exXAgwDuWsXxhBBryGqCfSuAty/4+Xh7TAhxBbLmG3Rmdo+Z7Tez/XOLkc8lQog1ZTXBfgLA9gt+3tYeexfuvs/d97r73r7uVe0HCiFWwWqC/WkAu81sl5kVAXwKwKOXxy0hxOXmkm+17l43s3sB/B+0pLf73f3l6BwYquT9xX2RTyS7lSXwHesc+FZ3Ph/ZIb8ExcsKfNJStUpt9WbEx4j0lkV28fNkmjX5DjPqXLmI7SI3I/5XrSs43shKfE7seA2+HtbkPhpRE7oir1neuC2XjygXtcgaG/8T1skae0RnyLKwjzFlYlWfq939MQCPreYYQojOoG/QCZEICnYhEkHBLkQiKNiFSAQFuxCJ0OFvuTicJVY4l3+8EZ5jDS7VNGtc8sq6IzIOeDIDk7yaEemnWChQW925rVmLPLfI+er1sM0imVy5iMxnGU8M8iwsrwHAYiMssZ06x+Wp+Sr3cW6Oz8ucr0d/V3gdi8Zf54GebmrrLnEJrZnj11wuKqOFfeRXB1BjyVcR7U13diESQcEuRCIo2IVIBAW7EImgYBciETq6G2/uyDfIrnsW2S0mSRylLJIfn49tS0YSHUiCAQCaCFOPFQvLcT8KRb7ru/mq66htZuostZ09txA+V57vqucQSU6p80tk0bn/B4+FffTSMJ1Ty3hiU7WP7/zPTU9S24mJqeB4X4k/r8ap8BwA2DHK13FDP1/HrnysnFX4Oi5GLuEGUSBi5bZ0ZxciERTsQiSCgl2IRFCwC5EICnYhEkHBLkQirEO517A0YPkyn0HkhHqsA0eOy3LVOk9YKEZqpDUapFZYJDEFESmkGKmD9q/+zceo7Zmf/4LaTk6dC47PRyS0eoNLXseOn6G2Iyd495FSeSw4vm10F53jpX5qq+b561Lo20ht9cpccPzcxEk6p6fM5cHjc6eprUJqJQLAaD9Pa+kphBNhGrWwjAoArIlPpJOX7uxCpIKCXYhEULALkQgKdiESQcEuRCIo2IVIhFVJb2Z2FMAsgAaAurvvjf1+03JYyoXllemFHjqvQdoTDfVxeW0g43JYPlKPrRmR5ZisQevqIZ5Ft7Bwntp++vePUNvpKV6v7/Rc+HzHTvBzHRt/m9qyrj5qa2QD1NY7MBIcL/Tw4+W7eBZdKdKSqSvHpcOz1XBbsbFtO+icyuI8tR05wqW3yekKtWXGn/dVG8O2QoNLecbqMkak3suhs3/E3XnOpRDiikAf44VIhNUGuwP4kZk9Y2b3XA6HhBBrw2o/xt/q7ifMbBOAH5vZq+7+xIW/0H4TuAcAhvp5lQ8hxNqyqju7u59o/z8B4GEAtwR+Z5+773X3vX3d6/BVfCEEgFUEu5n1mln/O48B/AGAly6XY0KIy8tqbrWjAB5ub/XnAfwvd//H2IR603BmMZzhM1kr03lP/Pyfg+O/sZtLLh95f1j6AYChSHHLJslsA4AcadOTy/GMpobztkURNQlHjh2htslFngHmPUPB8ayPSz+5oVlq6y4PUlu1wqWmKmmvNDDEX7OBPm6bOHWK2mbO84KT/cXwJd7VzWW+t85zcanQv4nazpx6i9r6TvM13jwQ9qXbIpmKpAgrIrLyJQe7ux8G8IFLnS+E6CyS3oRIBAW7EImgYBciERTsQiSCgl2IROhsr7eshPxguODgwjn+vlMrhgsKTi6EpTAAWKjy3mADRZ7Z1iR9t9rG4HCW8Yy9SpVLPGd48hrOznIJMFYQcWhjOJtrvjlD54yA+5hFMtGqBb6Olfmw1FSZ437sHN1AbQtEQgOACZLZBgBWCMuU05O8mCMiBUQX53lGXFbk18HEDM86HCfZcjtH+PWdYwlxsRaH3CSE+HVCwS5EIijYhUgEBbsQiaBgFyIROrob39Xdi/f91v+XBQsAOP4vr9F5fYPh3fhbPhw+FgD0ZMeorUp2igEgl+dJLVYI70w3vEzn9G/aTm3PHzhEbX1lvjO9def7qc1z4d3nQmTnvLkUbhkFANVqpMVWZK0yksTx8gsH6JyBUqRFUi9PkumN1LU7eSpcM65OlBUAyMgOPgAM9XN1YrrBk57OT3LbkVPTwfEto5vpnDxTlCLZVbqzC5EICnYhEkHBLkQiKNiFSAQFuxCJoGAXIhE6Kr3lsjx6BsOS0s6rr6PzFolqsWPXtXTOSI1LK1NHuCxXiyTCNOrhRIdbbvs4nbPjat4Ra9dvHqW2Z557gdqG+rgkc3IiXD8t77yMd6nAJS/wZcRcJClkmtSFG+rl54qcCo2IVDayMSzNAsBSLfx6nj0flrsAwCItu/ojdfLyGQ+naoUn3hx++3hwfGOZy3y7t4XbqHnk/q07uxCJoGAXIhEU7EIkgoJdiERQsAuRCAp2IRJhWenNzO4H8McAJtz9xvbYMIDvAbgKwFEAn3R3XmTrnWPlcshK4Qylk6cP0nl7fvuDwfHeQV7zK5s9QW2NeqRFTqTW2eG3w9lytw6F6+oBAHq2UVN/L5djuvI8k6s7Uuusq0gytiJ11bZuGaO2V958k9qKRV7nb2Y2vFZXbdtN51x3/Q3UNjnJL6++gTK1nTw1ERy3HK/vVh7iNf6mI7Xksohk191TprbF2fB1cIhcbwDQXQyfq1aPZClSy//jWwDueM/YfQAed/fdAB5v/yyEuIJZNtjb/dbf+w2JuwA80H78AICPX163hBCXm0v9m33U3cfbj0+h1dFVCHEFs+oNOnd3RL7paGb3mNl+M9s/Pc1rhgsh1pZLDfbTZjYGAO3/w7sgANx9n7vvdfe9g4MDl3g6IcRqudRgfxTA3e3HdwN45PK4I4RYK1YivX0XwO0ARszsOIAvAPgSgIfM7LMAjgH45EpOZpah0BW+u1cqvCDi0lI47a0QkaB6evmniN5IS6NSxrPe+vLhfk3f2vdNOudP/u291FaYP0VtxVIkeynHfdx19dbg+MTkSTqnMsez1zZvGqG2yRkuHS5Vw6/n1dfyTMVrruWZj9PPPUtt87Nz1DYzH/ax3uAS1eJiuB0TAJTLg9TWcC6VDZR5tl+9Gn49sxzvD3Z8PPxhukqy/IAVBLu7f5qYfn+5uUKIKwd9g06IRFCwC5EICnYhEkHBLkQiKNiFSISOFpyEGSwLSxALEfmnsrAYHC9EenLNnuNZXsi49FYAL0Q4Vg5nSr1xkPdsO3mc27DA5bBjx49S202beY+7rTvDxSi3TPBvNM8f4gU4h0tlausvc1nu8OGjwfGxLWFpEACmZvg3LGsRqez0Gd6rrukWHLdIcciFiPRmOX5dhc/UojdSqBLNcJZd0cLXPQBUz4VlW4+U7dSdXYhEULALkQgKdiESQcEuRCIo2IVIBAW7EInQWenNAZCeXZlzaWVsJNwfrqeLS28/PcALJQ5FivLtHubZSV2lsOxSzHOp5szEUWprLvHihTuu4UUss8jz7hkYCo6PjPLCl+cmedbYdCSzrRFRNzeS/mv5iFxaIdlfQDyba7HCs8PqxEk2DgCVJZ6BWa/z++OGkU3UZsavq6KFr5+SRfoOejjjsxApeqk7uxCJoGAXIhEU7EIkgoJdiERQsAuRCB3djTcDCvlwMslgH09OKfeHbdbku5UzzhMPzp7nKQsj/XxJeovhHdVGLlwjDwCOnjxKbaNDvJ7Zzmt5K6QKPx2eeibcRuvEON/57+8L7+ADQKHAWzy9fOgt7gi5jzQj95elyG783DxPCikP83ZNdZIIM36aFkRGbz9/XfIZTzTp6eE1EYusLRcA1MKJPI35KTpldFN/cDxf4G2tdGcXIhEU7EIkgoJdiERQsAuRCAp2IRJBwS5EIqyk/dP9AP4YwIS739ge+yKAPwdwpv1rn3f3x1ZywszCUsjmTeHaaS0niYwTSYAY28YTSfZH5LAp45KdZ+E6eYMjPKlicIAnQBS6wvIJAFwVkd76BsOJQQDwP+//dnB8IbJWM4uT1LawyGsDFiJXz+ah8POuTPJ6d/Mk0QgABgf46/Lqa29Q2+nTZ4LjM5GWUeUyf2IDvX3UljnXRAtVvo4ZqUW4sZcfb7ArHEf5yO17JXf2bwG4IzD+VXff0/63okAXQqwfywa7uz8BgL/1CyF+JVjN3+z3mtkBM7vfzPhXsIQQVwSXGuxfB3ANgD0AxgF8mf2imd1jZvvNbP/U1NQlnk4IsVouKdjd/bS7N9y9CeAbAGjXAnff5+573X1vuVy+RDeFEKvlkoLdzMYu+PETAF66PO4IIdaKlUhv3wVwO4ARMzsO4AsAbjezPWhVlTsK4C9WcrJcLkezfwaGuPRWb4TdLOV5JtF1u3ZQ2/5nuOQ1U7iW2po2Gxwf3crltVcO/gu1/c7v/Rm1/eLnfN78fKRNUvVscHzi1Nt0Tuw9f67GbXlwaWgoF86y29rNfZ8+wyW0esa3hUY3cVujEc6kW4y0eKos8rp785EaevUml/NqlRPUtqkQzujb0sez6Jbq4Tmxu/eywe7unw4Mf3O5eUKIKwt9g06IRFCwC5EICnYhEkHBLkQiKNiFSISOFpzM5XLo7QtnLw2NjNB5dQu7WckV6ZyuvgFqK5d5QcG33j5Fbbd+8P1hP+Z4O6me/nDWFQCMnzhObYdef53a6g3enihH6g3Oz0zTOf0bxqhteprLUIN9vBjl+667MTj+9Auv0jnPvnqU2m69/Q+prVDkEtXhQ4eC49Oz/HnFimJWFrm8tnOUS7rdvbyg6vBweJ7neQHOejVc+NJJVimgO7sQyaBgFyIRFOxCJIKCXYhEULALkQgKdiESoaPSm3sTzXpY8hgc5oX85hfDhQgXGrzvVpbx97Ed27dR2+sv88yr6YWwxNbXyzPstl9DTTj2Oi++eOLkOLV9+MMfpLaFhbA01L9lK50zvIUX53xrkktli0tcciz2hvuvDWzcTufc1M9flzNnwv3QAODosReobX4xLFNOTXMJbePGjdQ26Px12dnHJdFNA7wHW8HCmYDVGu9v10skthx4TOjOLkQiKNiFSAQFuxCJoGAXIhEU7EIkQkd345v1GmbPhXczuyO1vZYq4V1Oa3L3zfiu5Mgwb5/0eu4wtU1Mhlv4nMv4rvRgH6+td/2NPCHn8DFeM67GuyRhaiasduzevZvO2b2LSwbHxnkCzcsvv0ht586Gk1OKJa66DPXxRJLjL3NV4NQ5XtfOSLJUFmm9FWsdtpPnmWBHP08M6srxpJalSvj6aTZ5bcNanRyPX/a6swuRCgp2IRJBwS5EIijYhUgEBbsQiaBgFyIRVtL+aTuAvwMwitbG/j53/5qZDQP4HoCr0GoB9Ul3D/f8abO0tITDh8LS1o7dv0HndeXC0luzyhMF8l0RGSRi6+/n0lDfQLiu3fXXv4/O+cmPHqO2hWle765neBO1HTo+QW3bt4WTcna972Y6p1Tkl8HVO3iSz9Qkf7lfORhOKGo61w1PTPFEkhmSDAUAlQaXbWemwlLkps086eatc7w+3fB2LpeeK3E/0OTPbaoefm6e59fpEjleFTzhZiV39jqAv3H3GwB8CMBfmdkNAO4D8Li77wbwePtnIcQVyrLB7u7j7v5s+/EsgIMAtgK4C8AD7V97AMDH18hHIcRl4KL+ZjezqwDcBOBJAKPuv0zuPYXWx3whxBXKioPdzPoA/ADA59z9Xd9PdHcH+aKemd1jZvvNbP/sLC8YIIRYW1YU7GZWQCvQv+PuP2wPnzazsbZ9DEBw18jd97n7XnffG9v8EkKsLcsGu5kZWv3YD7r7Vy4wPQrg7vbjuwE8cvndE0JcLlaS9fa7AD4D4EUze7499nkAXwLwkJl9FsAxAJ9c7kALS3U8fygsG+248RY6r4lwtpmxzB8AaPL0n5nZWWqbmjpLbRuG9wTH77zjI3TOng9cT20P/fBhajPjEsrg4BC1bd0SlpT6Bsp0TlYPry8ADG/ml8jYrhq1TXeHZaPnXuD14sbneEqZF3g7r8HNPItx5JqwVJZFZK2Gcz9e83D7MgA4dIrLg8WMH3OxUgmOL0Qu73ozfH3MNnh24LLB7u4/A8A8/f3l5gshrgz0DTohEkHBLkQiKNiFSAQFuxCJoGAXIhE6WnCy0jC8Pt0dtJ1t8AKAXghLE7kqL4boRJoAgFyO27aM8Wyzf/074cyxrgKXXHbt5G2X/uhPP0Vt33/4H6jt7Cn+vMenw8ULK5VDdE4RXOOZXOS2Q8d41h6qYVnOR3iG4NCmcJFKAGhGKim2vvNF5nWFj9m0cCFKAKhF2opNN/i5ugr8mF15Lr3NWzjLrlbg5/JmeH0bEclWd3YhEkHBLkQiKNiFSAQFuxCJoGAXIhEU7EIkQkelt6WG4fWp8PvLIz/jfcP27BwJjm8u8gyknkIkW2sz7782NsKzq665mhQpdF5McPzMOWq7/0Eurz37/CvUxnrfAQBNBHT+vu4NfrxGia9HI8eloTzCEms9Ig3Vc+E5ANAVu1IjWWqVavh5e47PyUcy4rIm7+vnFS5T1sHnFZphHzPjr1m1FvY/0uJQd3YhUkHBLkQiKNiFSAQFuxCJoGAXIhE6uhvfgGEuF04WePzZ1+m8N94Mt4y647dvoHOu2cLb9Bw5HG5NBAC3ffBGausiiQmzVb7D/NA/Pk1tz71yktoW6pFWQpHd4lwh/P7djNTkyxnfRY7tWjeaPAFoieww1xp8jhmvabeESFKI8+eWz5Od7ozf53p6eEJLEdz/Bt9wR8N4qDXIxHqNvy7F/nJw3HL8PLqzC5EICnYhEkHBLkQiKNiFSAQFuxCJoGAXIhGWld7MbDuAv0OrJbMD2OfuXzOzLwL4cwBn2r/6eXd/LHqyfB4bRjYGbZPnuXwyfn4qOP7zF3irm0ZtZ8QTLq1s3EySXQBYFpbDntr/Ep3zDz/9BbUtNXnNNeS59JbLXfx7dGOJJ7t4RJZrRuS1mOTFWigV8vySs4xLmMj4a5aPzMuy8PliTUazyPrmnMuDjUiyUTMiHTLNbvNmLh/3D4Rtb5Yi68Q9+CV1AH/j7s+aWT+AZ8zsx23bV939v67gGEKIdWYlvd7GAYy3H8+a2UEAvGSqEOKK5KI+D5rZVQBuAvBke+heMztgZvebGW8tKoRYd1Yc7GbWB+AHAD7n7jMAvg7gGgB70Lrzf5nMu8fM9pvZ/voib5UshFhbVhTs1qrC/wMA33H3HwKAu59294a7NwF8A0Cwwbq773P3ve6+N9/NG0EIIdaWZYPdzAzANwEcdPevXDA+dsGvfQIA35IWQqw7K9mN/10AnwHwopk93x77PIBPm9ketOS4owD+YrkDmRmVSQoFLjXVK2E54ejpGTpnaf4gtd1283XU1l0eo7bpSlgi+ecn99M5FeeZS7U6l3FKJZ7Z1ozUQVtYCLcSipFFMrKMJ70h0pEJJSJ5xbKyELFZicuU3d28dl2eSH21SEbZ7Pw8tTUiMuVSnb8ug0PhOooAMDoWtvVFCu8tzob/JPbItbGS3fifAQi95FFNXQhxZaFv0AmRCAp2IRJBwS5EIijYhUgEBbsQidDRgpNwR7NOsqhiGUNZWIaqgmc7TcwtUduzr/FCj3cucGll1sNyx4nz/JuBpT6eXVVf4P5Xlrj/PT0RqYm0vYodz3Lcj1ykXVMsg82JjOaR+0shIjfO1Xj2XbXOpTImy8Uy9mIS2nyk9VZfmctr5Y285Vi1Hj7ma6/yrM4CyUasVbl/urMLkQgKdiESQcEuRCIo2IVIBAW7EImgYBciETosvQFgWUPO5Y4sCxfrazqXhRo5XuDv6ASXyu5/iOf3fPT2vcHxIyfPBMcBYKERK0IYkaG6eOHArMhtPaSHWbGby1qLs1y6imWHeUSiKpCMrSzPX7PYubJIUclYH7vFhbmLnhM7V3lomNo2jPKMybPnJqlt6uyp8PhbvCfhtbt2hQ0RSVF3diESQcEuRCIo2IVIBAW7EImgYBciERTsQiRCR6W3LJ9huFwO2ioVLofNL4YzeYoZz/6qR2ShXKS45RNPHaC2IyfD2XLT87xw5OTcIrWRZCcAQG9vJFsuUlSwVAo/t3xEruvq5hllWSQjLl/gx2yQ+0g9InlZxObOfWzU+PpXa+FF7u7iUuTIhg3UNjTC5bVqJHNzqRgpHkn6szXzXD6er4Svq2ZEwtadXYhEULALkQgKdiESQcEuRCIo2IVIhGV3482sC8ATAErt3/++u3/BzHYBeBDABgDPAPiMu0f2lwFvOpbILmIp8raz1AjvthYyvhtc55vI8Bw/Wa6b74IfIwkvuUhyR73Gd5hjikGlUqG2+Uh7ohx5bmyXHgB6i3zXtzuSQJPLcf+LXeHzdffw9a1WeSLM2UmeSNIEn5cvhNdjaKCXzhkdLlPb5s08EWZqntf5m506T21z01PB8fIwP9fZM2eD4/VIMtFK7uxLAD7q7h9Aqz3zHWb2IQB/C+Cr7n4tgPMAPruCYwkh1ollg91bvJMnWGj/cwAfBfD99vgDAD6+Fg4KIS4PK+3PnrU7uE4A+DGANwFMuf+yRelxAFvXxEMhxGVhRcHu7g133wNgG4BbAFy/0hOY2T1mtt/M9tcWeItlIcTaclG78e4+BeCfAHwYQNnsl429twE4Qebsc/e97r630DOwGl+FEKtg2WA3s41mVm4/7gbwMQAH0Qr6P23/2t0AHlkjH4UQl4GVJMKMAXjAzDK03hwecve/N7NXADxoZv8ZwHMAvrncgZrNJpYWw5JSKTM6r4d42azxJJNI1yI0wSWjWCJBk7SbqlcjCRwN/rxiLYhitmYkEYZJb+fPc+lnMrKOA31cohqM1GMbILXwusClvEaTS1d5iyTrlPiLvVQJH7OU569L7Fz1hemIjfs/N3WO2pokWaerxCXRCquTZ5HnRS1t3P0AgJsC44fR+vtdCPErgL5BJ0QiKNiFSAQFuxCJoGAXIhEU7EIkgsUknst+MrMzAI61fxwBEE7d6Szy493Ij3fzq+bHTnffGDJ0NNjfdWKz/e4ebp4mP+SH/LjsfuhjvBCJoGAXIhHWM9j3reO5L0R+vBv58W5+bfxYt7/ZhRCdRR/jhUiEdQl2M7vDzF4zs0Nmdt96+ND246iZvWhmz5vZ/g6e934zmzCzly4YGzazH5vZG+3/h9bJjy+a2Yn2mjxvZnd2wI/tZvZPZvaKmb1sZn/dHu/omkT86OiamFmXmT1lZi+0/fhP7fFdZvZkO26+Z2a84moId+/oPwAZWmWtrgZQBPACgBs67Ufbl6MARtbhvLcBuBnASxeM/RcA97Uf3wfgb9fJjy8C+PcdXo8xADe3H/cDeB3ADZ1ek4gfHV0TAAagr/24AOBJAB8C8BCAT7XH/zuAv7yY467Hnf0WAIfc/bC3Sk8/COCudfBj3XD3JwC8tzbyXWgV7gQ6VMCT+NFx3H3c3Z9tP55FqzjKVnR4TSJ+dBRvcdmLvK5HsG8F8PYFP69nsUoH8CMze8bM7lknH95h1N3H249PARhdR1/uNbMD7Y/5a/7nxIWY2VVo1U94Euu4Ju/xA+jwmqxFkdfUN+hudfebAfwhgL8ys9vW2yGg9c6O1hvRevB1ANeg1SNgHMCXO3ViM+sD8AMAn3P3d1Un7eSaBPzo+Jr4Koq8MtYj2E8A2H7Bz7RY5Vrj7ifa/08AeBjrW3nntJmNAUD7/4n1cMLdT7cvtCaAb6BDa2JmBbQC7Dvu/sP2cMfXJOTHeq1J+9xTuMgir4z1CPanAexu7ywWAXwKwKOddsLMes2s/53HAP4AwEvxWWvKo2gV7gTWsYDnO8HV5hPowJqYmaFVw/Cgu3/lAlNH14T50ek1WbMir53aYXzPbuOdaO10vgngP6yTD1ejpQS8AODlTvoB4LtofRysofW312fR6pn3OIA3APwEwPA6+fFtAC8COIBWsI11wI9b0fqIfgDA8+1/d3Z6TSJ+dHRNAPwWWkVcD6D1xvIfL7hmnwJwCMD/BlC6mOPqG3RCJELqG3RCJIOCXYhEULALkQgKdiESQcEuRCIo2IVIBAW7EImgYBciEf4vt7E0CllzrOkAAAAASUVORK5CYII=\n", - "text/plain": [ - "<Figure size 432x288 with 1 Axes>" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import pkg_resources as pk\n", "import matplotlib.pyplot as plt\n", @@ -812,7 +528,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -829,20 +545,9 @@ }, { "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[3.]], dtype=float32)" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "ret[oname]" ] @@ -874,20 +579,9 @@ }, { "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[sudo] password for xilinx: Requirement already satisfied: dataset_loading from git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading in /usr/local/lib/python3.6/dist-packages\n", - "Requirement already satisfied: Pillow in /usr/lib/python3/dist-packages (from dataset_loading)\n", - "Requirement already satisfied: scipy in /usr/lib/python3/dist-packages (from dataset_loading)\n", - "Connection to 99.121.248.96 closed.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! ssh {options} -t {username}@{ip} -p {port} 'echo {password} | sudo -S pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading'" ] @@ -905,31 +599,9 @@ }, { "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[sudo] password for xilinx: Tar File found in dest_dir. Not Downloading again\n", - "Extracting Python CIFAR10 data.\n", - "Files extracted\n", - "batch 1 / 10 : total OK 851 NOK 149\n", - "batch 2 / 10 : total OK 1683 NOK 317\n", - "batch 3 / 10 : total OK 2522 NOK 478\n", - "batch 4 / 10 : total OK 3370 NOK 630\n", - "batch 5 / 10 : total OK 4207 NOK 793\n", - "batch 6 / 10 : total OK 5044 NOK 956\n", - "batch 7 / 10 : total OK 5887 NOK 1113\n", - "batch 8 / 10 : total OK 6728 NOK 1272\n", - "batch 9 / 10 : total OK 7570 NOK 1430\n", - "batch 10 / 10 : total OK 8419 NOK 1581\n", - "Final accuracy: 84.190000\n", - "Connection to 99.121.248.96 closed.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! ssh {options} -t {username}@{ip} -p {port} 'cd {target_dir_pynq}; echo {password} | sudo -S python3.6 validate.py --dataset cifar10 --batchsize 1000'" ] diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index 34834117d4378baf5c00c4230b30f7a5b0546eac..5501e030e28e3cbd52d226d7d9b8014974ca38a9 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -42,15 +42,15 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from finn.util.visualization import showSrc, showInNetron\n", "from finn.util.basic import make_build_dir\n", - "\n", + "import os\n", " \n", - "build_dir = \"/workspace/finn\"" + "build_dir = os.environ[\"FINN_ROOT\"]" ] }, { @@ -77,27 +77,16 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Downloading: \"https://github.com/Xilinx/brevitas/releases/download/bnn_pynq-r1/tfc_1w1a-45185b4d.pth\" to /home/maltanar/.cache/torch/checkpoints/tfc_1w1a-45185b4d.pth\n", - "100%|██████████| 249073/249073 [00:00<00:00, 767315.58it/s]\n", - "/workspace/brevitas/brevitas_examples/bnn_pynq/models/FC.py:84: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", - " x = 2.0 * x - torch.tensor([1.0], device=x.device)\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import onnx\n", "from finn.util.test import get_test_model_trained\n", "import brevitas.onnx as bo\n", "\n", "tfc = get_test_model_trained(\"TFC\", 1, 1)\n", - "bo.export_finn_onnx(tfc, (1, 1, 28, 28), build_dir+\"/tfc_w1_a1.onnx\")" + "bo.export_finn_onnx(tfc, (1, 1, 28, 28), build_dir+\"/tfc_w1_a1.onnx\"); # semicolon added to suppress log" ] }, { @@ -110,38 +99,9 @@ }, { "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving '/workspace/finn/tfc_w1_a1.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe30c65e828>" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/tfc_w1_a1.onnx\")" ] @@ -155,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -243,7 +203,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -271,40 +231,9 @@ }, { "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1_a1_tidy.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe2d26a7da0>" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/tfc_w1_a1_tidy.onnx\")" ] @@ -324,48 +253,9 @@ }, { "cell_type": "code", - "execution_count": 109, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1_a1_with_preproc.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/finn/src/finn/transformation/infer_data_layouts.py:113: UserWarning: Assuming 4D input is NCHW\n", - " warnings.warn(\"Assuming 4D input is NCHW\")\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe264171f98>" - ] - }, - "execution_count": 109, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.util.pytorch import ToTensor\n", "from qonnx.transformation.merge_onnx_models import MergeONNXModels\n", @@ -401,40 +291,9 @@ }, { "cell_type": "code", - "execution_count": 110, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1_a1_pre_post.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe2640f4588>" - ] - }, - "execution_count": 110, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from qonnx.transformation.insert_topk import InsertTopK\n", "\n", @@ -472,49 +331,9 @@ }, { "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "class Streamline(Transformation):\n", - " \"\"\"Apply the streamlining transform, see arXiv:1709.04060.\"\"\"\n", - "\n", - " def apply(self, model):\n", - " streamline_transformations = [\n", - " ConvertSubToAdd(),\n", - " ConvertDivToMul(),\n", - " BatchNormToAffine(),\n", - " ConvertSignToThres(),\n", - " AbsorbSignBiasIntoMultiThreshold(),\n", - " MoveAddPastMul(),\n", - " MoveScalarAddPastMatMul(),\n", - " MoveAddPastConv(),\n", - " MoveScalarMulPastMatMul(),\n", - " MoveScalarMulPastConv(),\n", - " MoveAddPastMul(),\n", - " CollapseRepeatedAdd(),\n", - " CollapseRepeatedMul(),\n", - " AbsorbAddIntoMultiThreshold(),\n", - " FactorOutMulSignMagnitude(),\n", - " AbsorbMulIntoMultiThreshold(),\n", - " Absorb1BitMulIntoMatMul(),\n", - " Absorb1BitMulIntoConv(),\n", - " RoundAndClipThresholds(),\n", - " ]\n", - " for trn in streamline_transformations:\n", - " model = model.transform(trn)\n", - " model = model.transform(RemoveIdentityOps())\n", - " model = model.transform(GiveUniqueNodeNames())\n", - " model = model.transform(GiveReadableTensorNames())\n", - " model = model.transform(InferDataTypes())\n", - " return (model, False)\n", - "\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.transformation.streamline import Streamline\n", "showSrc(Streamline)" @@ -531,40 +350,9 @@ }, { "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1_a1_streamlined.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe2640f4d30>" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.transformation.streamline.reorder import MoveScalarLinearPastInvariants\n", "import finn.transformation.streamline.absorb as absorb\n", @@ -591,40 +379,9 @@ }, { "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1a1_ready_for_hls_conversion.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe30c65e898>" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount\n", "from finn.transformation.streamline.round_thresholds import RoundAndClipThresholds\n", @@ -658,60 +415,27 @@ "metadata": {}, "source": [ "### Conversion to HLS layers <a id='hls_layers'></a>\n", - "Converts the nodes to HLS layers that correspond to the functions in [finn-hls library](https://finn-hlslib.readthedocs.io/en/latest/). In our case this transformation converts pairs of binary XnorPopcountMatMul layers to StreamingFCLayer_Batch layers. Any immediately following MultiThreshold layers will also be absorbed into the MVTU.\n", + "Converts the nodes to HLS layers that correspond to the functions in [finn-hls library](https://finn-hlslib.readthedocs.io/en/latest/). In our case this transformation converts pairs of binary XnorPopcountMatMul layers to MatrixVectorActivation layers. Any immediately following MultiThreshold layers will also be absorbed into the MVTU.\n", "\n", - "Below is the code for the transformation and the network is visualized using netron to create the new structure with `StreamingFCLayer_Batch` nodes, which will correspond to a function call from the [finn-hlslib](https://finn-hlslib.readthedocs.io/en/latest/library/fclayer.html#_CPPv4I_j_j_j_j000_i_i000E22StreamingFCLayer_BatchvRN3hls6streamI7ap_uintI9InStreamWEEERN3hls6streamI7ap_uintI10OutStreamWEEERK2TWRK2TAKjRK1R) library." + "Below is the code for the transformation and the network is visualized using netron to create the new structure with `MatrixVectorActivation` nodes, which will correspond to a function call from the [finn-hlslib](https://finn-hlslib.readthedocs.io/en/latest/library/matrixvector.html) library." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Note:** The transformation `to_hls.InferBinaryStreamingFCLayer` gets the string \"decoupled\" as argument, this indicates the `mem_mode` for the weights. In FINN there are different options to set the way the weights are stored and accessed. For details please have a look on the [FINN readthedocs website](https://finn.readthedocs.io/) under Internals." + "**Note:** The transformation `to_hls.InferBinaryMatrixVectorActivation` gets the string \"decoupled\" as argument, this indicates the `mem_mode` for the weights. In FINN there are different options to set the way the weights are stored and accessed. For details please have a look on the [FINN readthedocs website](https://finn.readthedocs.io/) under Internals." ] }, { "cell_type": "code", - "execution_count": 29, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1_a1_hls_layers.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe30c65e748>" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls\n", "model = ModelWrapper(build_dir+\"/tfc_w1a1_ready_for_hls_conversion.onnx\")\n", - "model = model.transform(to_hls.InferBinaryStreamingFCLayer(\"decoupled\"))\n", + "model = model.transform(to_hls.InferBinaryMatrixVectorActivation(\"decoupled\"))\n", "# TopK to LabelSelect\n", "model = model.transform(to_hls.InferLabelSelectLayer())\n", "# input quantization (if any) to standalone thresholding\n", @@ -724,7 +448,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Each StreamingFCLayer_Batch node has two attributes that specify the degree of folding, PE and SIMD. In all nodes the values for these attributes are set as default to 1, which would correspond to a maximum folding (time multiplexing) and thus minimum performance. We will shortly cover how these can be adjusted, but first we want to separate the HLS layers from the non-HLS layers in this network." + "Each MatrixVectorActivation node has two attributes that specify the degree of folding, PE and SIMD. In all nodes the values for these attributes are set as default to 1, which would correspond to a maximum folding (time multiplexing) and thus minimum performance. We will shortly cover how these can be adjusted, but first we want to separate the HLS layers from the non-HLS layers in this network." ] }, { @@ -733,45 +457,14 @@ "source": [ "### Creating a Dataflow Partition <a id='dataflow_partition'></a>\n", "\n", - "In the graph above, you can see that there is a mixture of FINN HLS layers (StreamingFCLayer_Batch) with regular ONNX layers (Reshape, Mul, Add). To create a bitstream, FINN needs a model with only HLS layers. In order to achieve this, we will use the `CreateDataflowPartition` transformation to create a \"dataflow partition\" in this graph, separating out the HLS layers into another model, and replacing them with a placeholder layer called StreamingDataflowPartition:" + "In the graph above, you can see that there is a mixture of FINN HLS layers (MatrixVectorActivation) with regular ONNX layers (Reshape, Mul, Add). To create a bitstream, FINN needs a model with only HLS layers. In order to achieve this, we will use the `CreateDataflowPartition` transformation to create a \"dataflow partition\" in this graph, separating out the HLS layers into another model, and replacing them with a placeholder layer called StreamingDataflowPartition:" ] }, { "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1_a1_dataflow_parent.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe2640abc88>" - ] - }, - "execution_count": 30, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.transformation.fpgadataflow.create_dataflow_partition import CreateDataflowPartition\n", "\n", @@ -785,45 +478,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can see that the StreamingFCLayer instances have all been replaced with a single `StreamingDataflowPartition`, which has an attribute `model` that points to the extracted, HLS dataflow-only graph:" + "We can see that the MatrixVectorActivation instances have all been replaced with a single `StreamingDataflowPartition`, which has an attribute `model` that points to the extracted, HLS dataflow-only graph:" ] }, { "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/tmp/finn_dev_maltanar/dataflow_partition0_q1ym9aul/df_model.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe264098f60>" - ] - }, - "execution_count": 33, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from qonnx.custom_op.registry import getCustomOp\n", "sdp_node = parent_model.get_nodes_by_op_type(\"StreamingDataflowPartition\")[0]\n", @@ -836,12 +498,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can see all the extracted `StreamingFCLayer` instances have been moved to the child (dataflow) model. We will load the child model with `ModelWrapper` and continue working on it." + "We can see all the extracted `MatrixVectorActivation` instances have been moved to the child (dataflow) model. We will load the child model with `ModelWrapper` and continue working on it." ] }, { "cell_type": "code", - "execution_count": 34, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -856,7 +518,7 @@ "\n", "*Folding* in FINN describes how much a layer is time-multiplexed in terms of execution resources. There are several *folding factors* for each layer, controlled by the PE (parallelization over outputs) and SIMD (parallelization over inputs) parameters as described by the original [FINN paper](https://arxiv.org/pdf/1612.07119). The higher the PE and SIMD values are set, the faster the generated accelerator will run, and the more FPGA resources it will consume. \n", "\n", - "Since the folding parameters are node attributes, they can be easily accessed and changed using a helper function of the `ModelWrapper`. But first we take a closer look at one of the nodes that implement a StreamingFCLayer_Batch operation. This is where the Netron visualization helps us, in the above diagram we can see that the first four nodes are StreamingFCLayer_Batch. So as an example we extract the first node." + "Since the folding parameters are node attributes, they can be easily accessed and changed using a helper function of the `ModelWrapper`. But first we take a closer look at one of the nodes that implement a MatrixVectorActivation operation. This is where the Netron visualization helps us, in the above diagram we can see that the first four nodes are MatrixVectorActivation. So as an example we extract the first node." ] }, { @@ -868,51 +530,9 @@ }, { "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "CustomOp wrapper is of class Thresholding_Batch\n" - ] - }, - { - "data": { - "text/plain": [ - "{'PE': ('i', True, 0),\n", - " 'NumChannels': ('i', True, 0),\n", - " 'ram_style': ('s', False, 'distributed'),\n", - " 'inputDataType': ('s', True, ''),\n", - " 'outputDataType': ('s', True, ''),\n", - " 'inFIFODepth': ('i', False, 2),\n", - " 'outFIFODepth': ('i', False, 2),\n", - " 'numInputVectors': ('ints', False, [1]),\n", - " 'ActVal': ('i', False, 0),\n", - " 'backend': ('s', True, 'fpgadataflow'),\n", - " 'code_gen_dir_cppsim': ('s', False, ''),\n", - " 'code_gen_dir_ipgen': ('s', False, ''),\n", - " 'executable_path': ('s', False, ''),\n", - " 'ipgen_path': ('s', False, ''),\n", - " 'ip_path': ('s', False, ''),\n", - " 'ip_vlnv': ('s', False, ''),\n", - " 'exec_mode': ('s', False, ''),\n", - " 'cycles_rtlsim': ('i', False, 0),\n", - " 'cycles_estimate': ('i', False, 0),\n", - " 'rtlsim_trace': ('s', False, ''),\n", - " 'res_estimate': ('s', False, ''),\n", - " 'res_hls': ('s', False, ''),\n", - " 'res_synth': ('s', False, ''),\n", - " 'rtlsim_so': ('s', False, ''),\n", - " 'partition_id': ('i', False, 0)}" - ] - }, - "execution_count": 35, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "fc0 = model.graph.node[0]\n", "fc0w = getCustomOp(fc0)\n", @@ -932,11 +552,11 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "fc_layers = model.get_nodes_by_op_type(\"StreamingFCLayer_Batch\")\n", + "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", "# (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer\n", "config = [\n", " (16, 49, 16, 64, \"block\"),\n", @@ -977,42 +597,9 @@ }, { "cell_type": "code", - "execution_count": 42, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1_a1_set_folding_factors.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe2640712e8>" - ] - }, - "execution_count": 42, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "model.save(build_dir+\"/tfc_w1_a1_set_folding_factors.onnx\")\n", "showInNetron(build_dir+\"/tfc_w1_a1_set_folding_factors.onnx\")" @@ -1038,17 +625,9 @@ }, { "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "dict_keys(['Ultra96', 'Pynq-Z1', 'Pynq-Z2', 'ZCU102', 'ZCU104'])\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# print the names of the supported PYNQ boards\n", "from finn.util.basic import pynq_part_map\n", @@ -1057,7 +636,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1076,7 +655,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1087,7 +666,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1105,40 +684,9 @@ }, { "cell_type": "code", - "execution_count": 99, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/workspace/finn/tfc_w1_a1_post_synthesis.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe2ef58eb00>" - ] - }, - "execution_count": 99, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir + \"/tfc_w1_a1_post_synthesis.onnx\")" ] @@ -1152,40 +700,9 @@ }, { "cell_type": "code", - "execution_count": 102, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Stopping http://0.0.0.0:8081\n", - "Serving '/tmp/finn_dev_maltanar/dataflow_partition2_b6c72_s0/df_model.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7fe2ef5a0e48>" - ] - }, - "execution_count": 102, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "model = ModelWrapper(build_dir + \"/tfc_w1_a1_post_synthesis.onnx\")\n", "sdp_node_middle = getCustomOp(model.graph.node[1])\n", @@ -1203,34 +720,9 @@ }, { "cell_type": "code", - "execution_count": 103, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[key: \"pynq_driver_dir\"\n", - "value: \"/tmp/finn_dev_maltanar/pynq_driver_kl300vbh\"\n", - ", key: \"vivado_stitch_proj\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_stitch_proj_yy5ixo91\"\n", - ", key: \"clk_ns\"\n", - "value: \"10\"\n", - ", key: \"wrapper_filename\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_stitch_proj_yy5ixo91/finn_vivado_stitch_proj.srcs/sources_1/bd/StreamingDataflowPartition_1/hdl/StreamingDataflowPartition_1_wrapper.v\"\n", - ", key: \"vivado_stitch_vlnv\"\n", - "value: \"xilinx_finn:finn:StreamingDataflowPartition_1:1.0\"\n", - ", key: \"vivado_stitch_ifnames\"\n", - "value: \"{\\'clk\\': [\\'ap_clk\\'], \\'rst\\': [\\'ap_rst_n\\'], \\'s_axis\\': [\\'s_axis_0\\'], \\'m_axis\\': [\\'m_axis_0\\'], \\'aximm\\': [], \\'axilite\\': []}\"\n", - ", key: \"platform\"\n", - "value: \"zynq-iodma\"\n", - "]" - ] - }, - "execution_count": 103, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "model = ModelWrapper(postsynth_layers)\n", "model.model.metadata_props" @@ -1252,32 +744,9 @@ }, { "cell_type": "code", - "execution_count": 97, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[key: \"pynq_driver_dir\"\n", - "value: \"/tmp/finn_dev_maltanar/pynq_driver_kl300vbh\"\n", - ", key: \"vivado_pynq_proj\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_zynq_proj_kdf60v6f\"\n", - ", key: \"bitfile\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_zynq_proj_kdf60v6f/resizer.bit\"\n", - ", key: \"hw_handoff\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_zynq_proj_kdf60v6f/resizer.hwh\"\n", - ", key: \"vivado_synth_rpt\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_zynq_proj_kdf60v6f/synth_report.xml\"\n", - ", key: \"platform\"\n", - "value: \"zynq-iodma\"\n", - "]" - ] - }, - "execution_count": 97, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "model = ModelWrapper(build_dir + \"/tfc_w1_a1_post_synthesis.onnx\")\n", "model.model.metadata_props" @@ -1292,20 +761,9 @@ }, { "cell_type": "code", - "execution_count": 98, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "NA\t\t\t finn_zynq_link.runs resizer.bit\t vivado.jou\r\n", - "finn_zynq_link.cache\t finn_zynq_link.srcs resizer.hwh\t vivado.log\r\n", - "finn_zynq_link.hw\t finn_zynq_link.xpr synth_project.sh\r\n", - "finn_zynq_link.ip_user_files ip_config.tcl\t synth_report.xml\r\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! ls {model.get_metadata_prop(\"vivado_pynq_proj\")}" ] @@ -1344,21 +802,9 @@ }, { "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Welcome to PYNQ Linux, based on Ubuntu 18.04 (GNU/Linux 5.4.0-xilinx-v2020.1 armv7l)\r\n", - "\r\n", - " * Pure upstream Kubernetes 1.21, smallest, simplest cluster ops!\r\n", - "\r\n", - " https://microk8s.io/\r\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import os\n", "\n", @@ -1378,7 +824,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1397,68 +843,18 @@ }, { "cell_type": "code", - "execution_count": 48, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[key: \"pynq_driver_dir\"\n", - "value: \"/tmp/finn_dev_maltanar/pynq_driver_kl300vbh\"\n", - ", key: \"vivado_pynq_proj\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_zynq_proj_kdf60v6f\"\n", - ", key: \"bitfile\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_zynq_proj_kdf60v6f/resizer.bit\"\n", - ", key: \"hw_handoff\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_zynq_proj_kdf60v6f/resizer.hwh\"\n", - ", key: \"vivado_synth_rpt\"\n", - "value: \"/tmp/finn_dev_maltanar/vivado_zynq_proj_kdf60v6f/synth_report.xml\"\n", - ", key: \"platform\"\n", - "value: \"zynq-iodma\"\n", - ", key: \"pynq_ip\"\n", - "value: \"192.168.2.99\"\n", - ", key: \"pynq_port\"\n", - "value: \"22\"\n", - ", key: \"pynq_username\"\n", - "value: \"xilinx\"\n", - ", key: \"pynq_password\"\n", - "value: \"xilinx\"\n", - ", key: \"pynq_target_dir\"\n", - "value: \"/home/xilinx/finn_tfc_end2end_example\"\n", - ", key: \"pynq_deployment_dir\"\n", - "value: \"/tmp/finn_dev_maltanar/pynq_deployment_3wrnn2sp\"\n", - ", key: \"pynq_deploy_dir\"\n", - "value: \"/tmp/finn_dev_maltanar/pynq_deployment_3wrnn2sp\"\n", - ", key: \"exec_mode\"\n", - "value: \"remote_pynq\"\n", - "]" - ] - }, - "execution_count": 48, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "model.model.metadata_props" ] }, { "cell_type": "code", - "execution_count": 106, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'/home/xilinx/finn_tfc_end2end_example/pynq_deployment_3wrnn2sp'" - ] - }, - "execution_count": 106, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "target_dir_pynq = target_dir + \"/\" + model.get_metadata_prop(\"pynq_deployment_dir\").split(\"/\")[-1]\n", "target_dir_pynq" @@ -1466,27 +862,9 @@ }, { "cell_type": "code", - "execution_count": 107, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "total 4236\r\n", - "-rw-r--r-- 1 xilinx xilinx 8490 Sep 21 11:06 driver.py\r\n", - "drwxr-xr-x 4 xilinx xilinx 4096 Sep 21 11:06 finn\r\n", - "-rw-r--r-- 1 xilinx xilinx 3264 Sep 21 12:05 input.npy\r\n", - "-rw-r--r-- 1 root root 205 Sep 21 12:34 nw_metrics.txt\r\n", - "-rw-r--r-- 1 root root 84 Sep 21 12:06 output.npy\r\n", - "drwxrwxr-x 2 xilinx xilinx 4096 Sep 21 11:34 __pycache__\r\n", - "-rw-r--r-- 1 xilinx xilinx 4045671 Sep 21 11:06 resizer.bit\r\n", - "-rw-r--r-- 1 xilinx xilinx 246211 Sep 21 11:06 resizer.hwh\r\n", - "-rw-r--r-- 1 root root 32 Sep 21 12:34 sds_trace_data.dat\r\n", - "-rw-r--r-- 1 xilinx xilinx 1727 Sep 21 11:06 validate.py\r\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! ssh {options} {username}@{ip} -p {port} 'ls -l {target_dir_pynq}'" ] @@ -1500,32 +878,9 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "<matplotlib.image.AxesImage at 0x7fcb96004cc0>" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAARYElEQVR4nO3dfYyVZXrH8d/FoDAw8iYRCaisG/5QqmUbgk1KyOKmxlUMbKJm/aPauAmarMmqTVqz/UOSaqJVa/pH3YStL9CsmiWoq0a7a82mWo1GNFQQW1CULGR4E5H3t+HqH/NgZ3We6549z3nOc9z7+0kmM3Ouec65OTM/zsv13Pdt7i4Af/xGNT0AAJ1B2IFMEHYgE4QdyARhBzIxupM3Zma89Z+ZUaPKH09OnTpV23VXvf6enp6wPjAw0PJ1183dbbjLK4XdzK6U9M+SeiT9q7vfV+X6cmU27O/mS6k/6ip/eKNHx38CqcCk6r29vaW1Q4cOhcem9PX1hfUDBw6U1lIt50mTJoX1zz77LKx3o5afxptZj6R/kfR9SRdLusHMLm7XwAC0V5XX7PMlfeTuW9z9uKSnJS1pz7AAtFuVsM+Q9Lsh328rLvs9ZrbMzNaa2doKtwWgotrfoHP3FZJWSLxBBzSpyiP7dknnDfl+ZnEZgC5UJezvSJptZt8yszMl/VDS8+0ZFoB2a/lpvLufNLPbJP1ag623x9z9g7aNLCPjx48P6wcPHmz5useMGRPWjx07FtZTbcFx48aF9ai9lmoppqSOj9prqT76vn37WhlSV6v0mt3dX5L0UpvGAqBGnC4LZIKwA5kg7EAmCDuQCcIOZIKwA5mwTq4um+vpsqled6qXffTo0bA+duzYlo9Nia676vWfffbZYb3qNNLofp06dWp47O7du8N6amrwyZMnw3qdyuaz88gOZIKwA5kg7EAmCDuQCcIOZIKwA5mg9fYNkGrNVfkd1nnddUtNDa6yem1q6m5qanCTS03TegMyR9iBTBB2IBOEHcgEYQcyQdiBTBB2IBP02TvgrLPOCuvRbqOSNHHixLB+4sSJ0lpqN9LUFNbPP/88rC9YsCCs33rrraW1VC/6jjvuCOtbt24N601OM20SfXYgc4QdyARhBzJB2IFMEHYgE4QdyARhBzJBn/0b4JFHHgnrUS871Wuuuox1b29vWI+ktk2+5JJLwvqmTZvC+vHjx0trZ5xxRnhsdO6ClP53HzlyJKzXqazPXmnLZjP7VNIBSQOSTrr7vCrXB6A+lcJeWOTue9pwPQBqxGt2IBNVw+6SfmNm75rZsuF+wMyWmdlaM1tb8bYAVFD1afwCd99uZudIesXM/sfdXxv6A+6+QtIKiTfogCZVemR39+3F512SnpU0vx2DAtB+LYfdzMab2Vmnv5Z0haQN7RoYgPaq8jR+mqRniz7taElPuvu/t2VUf2RSWzYvWrQorF922WVhPeqVHzx4MDw21W/u6+sL66nzNKI566m11x999NGWr1uS7rzzztLaW2+9FR5b93bSTWg57O6+RdKftnEsAGpE6w3IBGEHMkHYgUwQdiAThB3IBFNcu0Bqqubs2bPD+v79+0trEyZMCI+NpoFK6SmwVbZ8TrX9UlJLcO/du7e0tnTp0vDYdevWhfVUSzLV8qwTS0kDmSPsQCYIO5AJwg5kgrADmSDsQCYIO5CJdiw42TFRT7fOfnBK6thU/ZZbbgnrq1atCuszZ85s+bZTffZ77rknrK9evTqsn3nmmaW1K664Ijz2wQcfDOuprbCj2168eHF47LZt28L6nj3fvDVWeWQHMkHYgUwQdiAThB3IBGEHMkHYgUwQdiATHZ/Pnup3Rzo51naqOvd54cKFYf2iiy4qrY0bNy48dvTo+FSLNWvWhPUtW7aE9SpSyz3PmTMnrKfu90jq75T57AC6FmEHMkHYgUwQdiAThB3IBGEHMkHYgUx0vM8+alT5/y9V54XXqcpc+lOnTlW67eg+S9VPnjwZHjt+/PiwfujQobCe2o46+p2l5tJfffXVYf3pp58O61X67Kk17VP3a5Na7rOb2WNmtsvMNgy5bIqZvWJmm4vPk9s5WADtN5Kn8U9IuvIrl90l6VV3ny3p1eJ7AF0sGXZ3f03SV/fRWSJpZfH1SklL2zssAO3W6hp009y9v/h6h6RpZT9oZsskLWvxdgC0SeUFJ93dow0b3X2FpBUSGzsCTWq19bbTzKZLUvF5V/uGBKAOrYb9eUk3FV/fJOlX7RkOgLok++xm9pSk70qaKmmnpLslPSfpl5LOl7RV0vXuXr4Z9v9fV21P46uuG1+1Hkn1ZFN7qEf7r1fV29sb1o8cORLWU+cAVDnH4MILLwzrH3/8ccvXnRpXak36lMOHD1c6voqyPnvyNbu731BS+l6lEQHoKE6XBTJB2IFMEHYgE4QdyARhBzLBls2FVAtyYGAgrEd6enrCetVlh6M2UarFlJrCmpK6/mjb5KgmSYsWLWppTKdFv9MTJ06Ex6amuFb5e2gKj+xAJgg7kAnCDmSCsAOZIOxAJgg7kAnCDmSiq/rsdW7nXHU55yrqvu0DBw6U1lL94lSvO3V8qk8fLRedWsb6uuuuC+tHjx4N62PHji2tpfrsqd9Zk1syt4pHdiAThB3IBGEHMkHYgUwQdiAThB3IBGEHMtHxPns0t7ube+XRksmp5ZRT6txW+dJLLw2PnTNnTlhPLSX93HPPhfVI1AeXpIULF4b1Klt4p5ahjs5dkKovwd0EHtmBTBB2IBOEHcgEYQcyQdiBTBB2IBOEHchEx/vs0Zz1OvvoqbnyqXndUU949Oj4bly6dGlYTx2/ZMmSsD5mzJjS2ty5c8NjJ02aFNZTvezXX3+95eNnz54dHptamz3V616/fn1p7fLLLw+Pje5TqTv76CnJR3Yze8zMdpnZhiGXLTez7Wa2rvi4qt5hAqhqJE/jn5B05TCXP+zuc4uPl9o7LADtlgy7u78maW8HxgKgRlXeoLvNzN4vnuZPLvshM1tmZmvNbG2F2wJQUath/5mkb0uaK6lf0kNlP+juK9x9nrvPa/G2ALRBS2F3953uPuDupyT9XNL89g4LQLu1FHYzmz7k2x9I2lD2swC6g6X6qGb2lKTvSpoqaaeku4vv50pySZ9KusXd+5M3ZhbeWKrfnJr3HZk1a1ZYv+aaa8L64sWLS2upedepedupudPR/utSvIZ5X19feGxK1Xnd0e/0iy++CI+dOHFiWE/ZvHlzaW3VqlXhsQ89VPrKVFJ399ndfdiTSpIn1bj7DcNc/GjlEQHoKE6XBTJB2IFMEHYgE4QdyARhBzKRbL219cbMPFp2uc4prnfffXdYX758eVjfs2dPaW3q1KmtDOlLqa2H9+6NpyZE9QsuuCA8NtUWTG3ZnHLs2LHSWmoaaervIdWKjaYtp7Zcfvnll8P6zTffHNab3NK5rPXGIzuQCcIOZIKwA5kg7EAmCDuQCcIOZIKwA5noeJ89qlfZmjg11TLV96yy7fKuXbvC+tatW8P6Aw88ENZXr14d1ufNK18E6OGHHw6PTW3ZPHly6YpjkqRt27aF9eh3+sQTT4THfvLJJ2H92muvDevR1OOq02tffPHFsJ6aMl0n+uxA5gg7kAnCDmSCsAOZIOxAJgg7kAnCDmSio332UaNGeTQ/+vjx4+Hx55xzTmlt9+7d4bGpPntq7nTUL05tB71p06awPmXKlLCeWrY4Wu75/PPPD49NzWdPLe+9b9++sH7jjTeW1l544YXw2JTUOgLRctGLFi0Kj02tMZC6X1LLf9eJPjuQOcIOZIKwA5kg7EAmCDuQCcIOZIKwA5noqvnsVaT6nitXrgzr119/fcvXf/jw4fDYcePGhfXUtsipef4DAwOltdS672+++WZYf/LJJ8P6unXrwvobb7xRWkudX5Dq4ad+59F5G/Pnzw+Pffvtt8P6448/HtZT68rXqeU+u5mdZ2a/NbONZvaBmf2kuHyKmb1iZpuLz/EqBwAaNZKn8Scl/Y27XyzpzyX92MwulnSXpFfdfbakV4vvAXSpZNjdvd/d3yu+PiDpQ0kzJC2RdPq58UpJS2saI4A2iF/0fIWZzZL0HUlvS5rm7v1FaYekaSXHLJO0rMIYAbTBiN+NN7M+SWsk3e7u+4fWfPBdvmHffHP3Fe4+z93LV0UEULsRhd3MztBg0H/h7s8UF+80s+lFfbqkeIlVAI1Ktt5scP7mSkl73f32IZc/IOkzd7/PzO6SNMXd/zZxXeGNnXvuueFYduzYEdYj0fa9kjRz5sywfu+995bWZsyYER6b2nI5tXVxtF20JN1///2ltY0bN4bHpqa4prZFTklNW46k2oYnTpwI69HU49Tf/YQJE8J61SnTdSprvY3kNftfSPorSevNbF1x2U8l3Sfpl2b2I0lbJcWNagCNSobd3f9LUtl/kd9r73AA1IXTZYFMEHYgE4QdyARhBzJB2IFMdHSKa09Pj0d93dRU0aj3uX///tKaJPX19YX1VN806vlW6fdK6Z5v6hyBqJed6uEfO3YsrFcV/b5TyzWnpgan/l6q/M5Sqo6tTiwlDWSOsAOZIOxAJgg7kAnCDmSCsAOZIOxAJrpqKenUHOKol55aVrjqvOzp06eX1vr7+0trI9Hb2xvWU1s213ndqWWsDx06FNarzClPGTUqfqyqMqe86fMTqqDPDmSOsAOZIOxAJgg7kAnCDmSCsAOZIOxAJrqqzw6gOvrsQOYIO5AJwg5kgrADmSDsQCYIO5AJwg5kIhl2MzvPzH5rZhvN7AMz+0lx+XIz225m64qPq+ofLoBWJU+qMbPpkqa7+3tmdpakdyUt1eB+7Afd/cER3xgn1QC1KzupZiT7s/dL6i++PmBmH0qa0d7hAajbH/Sa3cxmSfqOpLeLi24zs/fN7DEzm1xyzDIzW2tma6sNFUAVIz433sz6JP2npHvd/RkzmyZpjySX9A8afKp/c+I6eBoP1KzsafyIwm5mZ0h6UdKv3f2fhqnPkvSiu/9J4noIO1CzlifC2ODyoI9K+nBo0Is37k77gaQNVQcJoD4jeTd+gaTXJa2XdHpt3p9KukHSXA0+jf9U0i3Fm3nRdfHIDtSs0tP4diHsQP2Yzw5kjrADmSDsQCYIO5AJwg5kgrADmSDsQCYIO5AJwg5kgrADmSDsQCYIO5AJwg5kgrADmUguONlmeyRtHfL91OKybtStY+vWcUmMrVXtHNsFZYWOzmf/2o2brXX3eY0NINCtY+vWcUmMrVWdGhtP44FMEHYgE02HfUXDtx/p1rF167gkxtaqjoyt0dfsADqn6Ud2AB1C2IFMNBJ2M7vSzP7XzD4ys7uaGEMZM/vUzNYX21A3uj9dsYfeLjPbMOSyKWb2ipltLj4Pu8deQ2Prim28g23GG73vmt7+vOOv2c2sR9ImSX8paZukdyTd4O4bOzqQEmb2qaR57t74CRhmtlDSQUmrTm+tZWb/KGmvu99X/Ec52d3/rkvGtlx/4DbeNY2tbJvxv1aD9107tz9vRROP7PMlfeTuW9z9uKSnJS1pYBxdz91fk7T3KxcvkbSy+HqlBv9YOq5kbF3B3fvd/b3i6wOSTm8z3uh9F4yrI5oI+wxJvxvy/TZ1137vLuk3ZvaumS1rejDDmDZkm60dkqY1OZhhJLfx7qSvbDPeNfddK9ufV8UbdF+3wN3/TNL3Jf24eLralXzwNVg39U5/JunbGtwDsF/SQ00OpthmfI2k2919/9Bak/fdMOPqyP3WRNi3SzpvyPczi8u6grtvLz7vkvSsBl92dJOdp3fQLT7vang8X3L3ne4+4O6nJP1cDd53xTbjayT9wt2fKS5u/L4bblydut+aCPs7kmab2bfM7ExJP5T0fAPj+BozG1+8cSIzGy/pCnXfVtTPS7qp+PomSb9qcCy/p1u28S7bZlwN33eNb3/u7h3/kHSVBt+R/1jS3zcxhpJxXSjpv4uPD5oem6SnNPi07oQG39v4kaSzJb0qabOk/5A0pYvG9m8a3Nr7fQ0Ga3pDY1ugwafo70taV3xc1fR9F4yrI/cbp8sCmeANOiAThB3IBGEHMkHYgUwQdiAThB3IBGEHMvF/rSIwqVQD1iIAAAAASUVORK5CYII=\n", - "text/plain": [ - "<Figure size 432x288 with 1 Axes>" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from pkgutil import get_data\n", "import onnx.numpy_helper as nph\n", @@ -1538,17 +893,9 @@ }, { "cell_type": "code", - "execution_count": 92, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Expected network input shape is [1, 784]\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "model = ModelWrapper(build_dir + \"/tfc_w1_a1_pynq_deploy.onnx\")\n", "iname = model.graph.input[0].name\n", @@ -1566,7 +913,7 @@ }, { "cell_type": "code", - "execution_count": 95, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1579,20 +926,9 @@ }, { "cell_type": "code", - "execution_count": 96, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[2.]], dtype=float32)" - ] - }, - "execution_count": 96, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "ret[oname]" ] @@ -1624,22 +960,9 @@ }, { "cell_type": "code", - "execution_count": 75, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[sudo] password for xilinx: Collecting git+https://github.com/fbcotter/dataset_loading.git@0.0.4\n", - " Cloning https://github.com/fbcotter/dataset_loading.git (to 0.0.4) to /tmp/pip-hhwx4j3n-build\n", - " Requirement already satisfied (use --upgrade to upgrade): dataset-loading==0.0.4 from git+https://github.com/fbcotter/dataset_loading.git@0.0.4 in /usr/local/lib/python3.6/dist-packages\n", - "Requirement already satisfied: Pillow in /usr/lib/python3/dist-packages (from dataset-loading==0.0.4)\n", - "Requirement already satisfied: scipy in /usr/lib/python3/dist-packages (from dataset-loading==0.0.4)\n", - "Connection to 192.168.2.99 closed.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! ssh {options} -t {username}@{ip} -p {port} 'echo {password} | sudo -S pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading'" ] @@ -1657,36 +980,9 @@ }, { "cell_type": "code", - "execution_count": 108, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[sudo] password for xilinx: Looking for Train Imgs\n", - "Tar File found in data_dir. Not Downloading again\n", - "Looking for Train Labels\n", - "Tar File found in data_dir. Not Downloading again\n", - "Looking for Test Imgs\n", - "Tar File found in data_dir. Not Downloading again\n", - "Looking for Test Labels\n", - "Tar File found in data_dir. Not Downloading again\n", - "batch 0 / 10 : total OK 913 NOK 87\n", - "batch 1 / 10 : total OK 1800 NOK 200\n", - "batch 2 / 10 : total OK 2714 NOK 286\n", - "batch 3 / 10 : total OK 3619 NOK 381\n", - "batch 4 / 10 : total OK 4535 NOK 465\n", - "batch 5 / 10 : total OK 5488 NOK 512\n", - "batch 6 / 10 : total OK 6438 NOK 562\n", - "batch 7 / 10 : total OK 7399 NOK 601\n", - "batch 8 / 10 : total OK 8371 NOK 629\n", - "batch 9 / 10 : total OK 9296 NOK 704\n", - "Final accuracy: 92.960000\n", - "Connection to 192.168.2.99 closed.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "! ssh {options} -t {username}@{ip} -p {port} 'cd {target_dir_pynq}; echo {password} | sudo -S python3.6 validate.py --dataset mnist --batchsize 1000'" ] @@ -1709,23 +1005,9 @@ }, { "cell_type": "code", - "execution_count": 104, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Network metrics:\n", - "runtime[ms]: 10.43391227722168\n", - "throughput[images/s]: 958413.2714850444\n", - "DRAM_in_bandwidth[Mb/s]: 751.3960048442748\n", - "DRAM_out_bandwidth[Mb/s]: 0.9584132714850445\n", - "fclk[mhz]: 100.0\n", - "N: 10000\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.core.throughput_test import throughput_test_remote\n", "\n", @@ -1745,17 +1027,9 @@ }, { "cell_type": "code", - "execution_count": 105, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "We reach approximately 61% of the ideal performance.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "II = 64\n", "# frequency in MHz\n", diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb index 0f0f60e8f34af10f25822087156d90d906652400..1e07781b66a8eaa816921a5ff721756bf418a26c 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb @@ -28,14 +28,15 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from finn.util.basic import make_build_dir\n", "from finn.util.visualization import showSrc, showInNetron\n", - " \n", - "build_dir = \"/workspace/finn\"" + "import os\n", + "\n", + "build_dir = os.environ[\"FINN_ROOT\"]" ] }, { @@ -47,22 +48,9 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[-1.119972 , -1.7596636, 0.8423852, -1.0705007, -1.3218282,\n", - " -1.5030646, -1.4598225, -1.2803943, -1.0334575, -1.7878995]],\n", - " dtype=float32)" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from pkgutil import get_data\n", "import onnx\n", @@ -91,42 +79,9 @@ }, { "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "def xnorpopcountmatmul(inp0, inp1):\n", - " \"\"\"Simulates XNOR-popcount matrix multiplication as a regular bipolar\n", - " matrix multiplication followed by some post processing.\"\"\"\n", - " # extract the operand shapes\n", - " # (M, K0) = inp0.shape\n", - " # (K1, N) = inp1.shape\n", - " K0 = inp0.shape[-1]\n", - " K1 = inp1.shape[0]\n", - " # make sure shapes are compatible with matmul\n", - " assert K0 == K1, \"Matrix shapes are not compatible with matmul.\"\n", - " K = K0\n", - " # convert binary inputs to bipolar\n", - " inp0_bipolar = 2.0 * inp0 - 1.0\n", - " inp1_bipolar = 2.0 * inp1 - 1.0\n", - " # call regular numpy matrix multiplication\n", - " out = np.matmul(inp0_bipolar, inp1_bipolar)\n", - " # XNOR-popcount does not produce the regular dot product result --\n", - " # it returns the number of +1s after XNOR. let P be the number of +1s\n", - " # and N be the number of -1s. XNOR-popcount returns P, whereas the\n", - " # regular dot product result from numpy is P-N, so we need to apply\n", - " # some correction.\n", - " # out = P-N\n", - " # K = P+N\n", - " # out + K = 2P, so P = (out + K)/2\n", - " return (out + K) * 0.5\n", - "\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from qonnx.custom_op.general.xnorpopcount import xnorpopcountmatmul\n", "showSrc(xnorpopcountmatmul)" @@ -145,7 +100,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -158,25 +113,17 @@ }, { "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Results are the same!\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import finn.core.onnx_exec as oxe\n", - "output_dict = oxe.execute_onnx(model_for_sim, input_dict)\n", + "output_dict = oxe.execute_onnx(model_for_sim, input_dict, return_full_exec_context=False)\n", "output_pysim = output_dict[list(output_dict.keys())[0]]\n", "\n", "\n", "\n", - "if np.isclose(output_pysim, output_golden, atol=1e-3).all():\n", + "if np.isclose(output_pysim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all():\n", " print(\"Results are the same!\")\n", "else:\n", " print(\"The results are not the same!\")" @@ -200,7 +147,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -218,7 +165,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -240,38 +187,9 @@ }, { "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving '/workspace/finn/tfc_w1_a1_for_cppsim.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://0.0.0.0:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f3cac09d978>" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "model_for_cppsim.save(build_dir+\"/tfc_w1_a1_for_cppsim.onnx\")\n", "showInNetron(build_dir+\"/tfc_w1_a1_for_cppsim.onnx\")" @@ -290,18 +208,9 @@ }, { "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "compile.sh\t\t\t memblock_0.dat thresh.h\r\n", - "execute_StreamingFCLayer_Batch.cpp node_model\t weights.npy\r\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from qonnx.custom_op.registry import getCustomOp\n", "\n", @@ -327,7 +236,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -348,26 +257,18 @@ }, { "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Results are the same!\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "parent_model = ModelWrapper(build_dir+\"/tfc_w1_a1_dataflow_parent.onnx\")\n", - "sdp_node = parent_model.graph.node[2]\n", + "sdp_node = parent_model.graph.node[1]\n", "child_model = build_dir + \"/tfc_w1_a1_for_cppsim.onnx\"\n", "getCustomOp(sdp_node).set_nodeattr(\"model\", child_model)\n", "output_dict = oxe.execute_onnx(parent_model, input_dict)\n", "output_cppsim = output_dict[list(output_dict.keys())[0]]\n", "\n", - "if np.isclose(output_cppsim, output_golden, atol=1e-3).all():\n", + "if np.isclose(output_cppsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all():\n", " print(\"Results are the same!\")\n", "else:\n", " print(\"The results are not the same!\")" @@ -404,7 +305,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -433,14 +334,14 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# parent model\n", "model_for_rtlsim = ModelWrapper(build_dir + \"/tfc_w1_a1_dataflow_parent.onnx\")\n", "# reference child model\n", - "sdp_node = getCustomOp(model_for_rtlsim.graph.node[2])\n", + "sdp_node = getCustomOp(model_for_rtlsim.graph.node[1])\n", "sdp_node.set_nodeattr(\"model\", build_dir + \"/tfc_w1_a1_dataflow_child.onnx\")\n", "\n", "model_for_rtlsim = model_for_rtlsim.transform(SetExecMode(\"rtlsim\"))" @@ -455,22 +356,14 @@ }, { "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Results are the same!\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "output_dict = oxe.execute_onnx(model_for_rtlsim, input_dict)\n", "output_rtlsim = output_dict[list(output_dict.keys())[0]]\n", "\n", - "if np.isclose(output_rtlsim, output_golden, atol=1e-3).all():\n", + "if np.isclose(output_rtlsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all():\n", " print(\"Results are the same!\")\n", "else:\n", " print(\"The results are not the same!\")" @@ -487,24 +380,9 @@ }, { "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/finn/src/finn/transformation/fpgadataflow/hlssynth_ip.py:70: UserWarning: Using pre-existing IP for StreamingFCLayer_Batch_3\n", - " warnings.warn(\"Using pre-existing IP for %s\" % node.name)\n", - "/workspace/finn/src/finn/transformation/fpgadataflow/hlssynth_ip.py:70: UserWarning: Using pre-existing IP for StreamingFCLayer_Batch_1\n", - " warnings.warn(\"Using pre-existing IP for %s\" % node.name)\n", - "/workspace/finn/src/finn/transformation/fpgadataflow/hlssynth_ip.py:70: UserWarning: Using pre-existing IP for StreamingFCLayer_Batch_2\n", - " warnings.warn(\"Using pre-existing IP for %s\" % node.name)\n", - "/workspace/finn/src/finn/transformation/fpgadataflow/hlssynth_ip.py:70: UserWarning: Using pre-existing IP for StreamingFCLayer_Batch_0\n", - " warnings.warn(\"Using pre-existing IP for %s\" % node.name)\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from finn.transformation.fpgadataflow.insert_dwc import InsertDWC\n", "from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO\n", @@ -519,51 +397,36 @@ "child_model = child_model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns))\n", "child_model = child_model.transform(PrepareRTLSim())\n", "child_model.set_metadata_prop(\"exec_mode\",\"rtlsim\")\n", - "child_model.save(build_dir + \"/tfc_w1_a1_dataflow_child.onnx\")" + "child_model.save(build_dir + \"/tfc_w1_a1_dataflow_child.onnx\");" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# parent model\n", "model_for_rtlsim = ModelWrapper(build_dir + \"/tfc_w1_a1_dataflow_parent.onnx\")\n", "# reference child model\n", - "sdp_node = getCustomOp(model_for_rtlsim.graph.node[2])\n", + "sdp_node = getCustomOp(model_for_rtlsim.graph.node[1])\n", "sdp_node.set_nodeattr(\"model\", build_dir + \"/tfc_w1_a1_dataflow_child.onnx\")" ] }, { "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Results are the same!\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "output_dict = oxe.execute_onnx(model_for_rtlsim, input_dict)\n", "output_rtlsim = output_dict[list(output_dict.keys())[0]]\n", "\n", - "if np.isclose(output_rtlsim, output_golden, atol=1e-3).all():\n", + "if np.isclose(output_rtlsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all():\n", " print(\"Results are the same!\")\n", "else:\n", " print(\"The results are not the same!\")" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -582,7 +445,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.8.5" } }, "nbformat": 4, diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index 06bc735af09b270457c294e200dd4c3c987e3527..68b345ed348f7a3f6fff507e1a4e45f6942a6a60 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -57,7 +57,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -156,7 +156,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -171,18 +171,9 @@ }, { "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Input shape for 1 batch: torch.Size([1000, 593])\n", - "Label shape for 1 batch: torch.Size([1000])\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "count = 0\n", "for x,y in train_quantized_loader:\n", @@ -204,17 +195,9 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Target device: cuda\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "print(\"Target device: \" + str(device))" @@ -236,7 +219,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -304,7 +287,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -334,7 +317,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -388,7 +371,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -406,7 +389,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -417,19 +400,9 @@ }, { "cell_type": "code", - "execution_count": 13, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Training loss = 0.131165 test accuracy = 0.809102: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10/10 [02:24<00:00, 14.43s/it]\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import numpy as np\n", "from sklearn.metrics import accuracy_score\n", @@ -454,24 +427,9 @@ }, { "cell_type": "code", - "execution_count": 14, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEWCAYAAABxMXBSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAoTklEQVR4nO3de5hdd13v8fdnrslkLjuXSZPOnjRpCW0jnQkYCrUcLlY8bVUC+qitWKtSS32sioJS+AM5h8M5FQHxHAsl1GJVsKK0Nmqh3IQCvZi0JGl6SUnTtJkkTSbXmSSTzO17/thrkp3JTrJ3Mit7z57P63m2e63fuuzv3pb5ZK3fWr+liMDMzKxYNeUuwMzMJhcHh5mZlcTBYWZmJXFwmJlZSRwcZmZWEgeHmZmVxMFhdgYkfU3SjRO9bok1vFVSz0Tv1+x06spdgNm5IulA3mwTcAQYSebfGxFfKnZfEXFNGuuaTQYODpsyIqJ5bFrSZuCmiPjW+PUk1UXE8LmszWwy8akqm/LGTvlI+qCkV4AvSpop6d8l9Uram0xn87b5rqSbkunflPQDSZ9M1n1R0jVnuO4iSQ9L6pf0LUl3SPqHIr/Hpcln7ZP0tKR35C27VtIzyX63SvpA0j4n+W77JO2R9H1J/rtgp+T/QMxy5gGzgAuAm8n9b+OLyfwCYAD461Ns/wZgAzAH+ATwN5J0But+GfgvYDbwUeCGYoqXVA/8G/ANYC7w+8CXJF2crPI35E7HtQCvAb6TtL8f6AHagfOADwMeh8hOycFhljMK/FlEHImIgYjYHRFfjYhDEdEPfBx4yym2fykivhARI8A9wHxyf4iLXlfSAuD1wEciYjAifgCsLLL+NwLNwO3Jtt8B/h24Plk+BCyR1BoReyPiybz2+cAFETEUEd8PD2Bnp+HgMMvpjYjDYzOSmiR9XtJLkvqAh4GMpNqTbP/K2EREHEomm0tc93xgT14bwJYi6z8f2BIRo3ltLwEdyfQvAdcCL0n6nqQrkva/ADYC35C0SdJtRX6eTWEODrOc8f/Kfj9wMfCGiGgF3py0n+z000TYDsyS1JTX1lnkttuAznH9EwuArQARsSoilpM7jfWvwFeS9v6IeH9EXAj8AvDHkq46u69h1c7BYVZYC7l+jX2SZgF/lvYHRsRLwGrgo5IakqOCXyhy88eBg8CfSqqX9NZk23uTfb1bUltEDAF9JJchS/p5Sa9K+ljG2kcKfoJZwsFhVthngOnALuAx4Ovn6HPfDVwB7Ab+F/BP5O43OaWIGATeAVxDrubPAr8REc8lq9wAbE5Ou90C/HrSvhj4FnAAeBT4bER8d6K+jFUnuR/MrHJJ+ifguYhI/YjHrFg+4jCrIJJeL+kiSTWSrgaWk+uTMKsYvnPcrLLMA+4jdx9HD/C7EfGj8pZkdjyfqjIzs5L4VJWZmZVkSpyqmjNnTixcuLDcZZiZTSpPPPHErohoH98+JYJj4cKFrF69utxlmJlNKpJeKtTuU1VmZlYSB4eZmZXEwWFmZiVxcJiZWUkcHGZmVhIHh5mZlcTBYWZmJXFwnMJ/btjJZ7+7sdxlmJlVFAfHKTyycRef+daPGRwePf3KZmZTRKrBIelqSRskbSz0LGNJyyWtk7RG0mpJbzrdtpJmSfqmpB8n7zPTqr+7M8Pg8CgbXulP6yPMzCad1IJDUi1wB7knki0Brpe0ZNxq3wa6I2Ip8NvAXUVsexvw7YhYnGx/QiBNlO5sBoC1PfvS+ggzs0knzSOOy4GNEbEpeazlveQeSnNURByIY+O6zwCiiG2XA/ck0/cA70zrC2RnTmfWjAbWbtmX1keYmU06aQZHB7Alb74naTuOpHdJeg74D3JHHafb9ryI2A6QvM+d4Lrza6Mr28a6nv1pfYSZ2aSTZnCoQNsJT42KiPsj4hJyRw4fK2XbU364dHPSb7K6t7e3lE2P053N8OOd/Rw8MnzG+zAzqyZpBkcP0Jk3nwW2nWzliHgYuEjSnNNsu0PSfIDkfedJ9rciIpZFxLL29hOGky9ad2cbowHrt/qow8wM0g2OVcBiSYskNQDXASvzV5D0KklKpl8HNAC7T7PtSuDGZPpG4IEUvwNd7iA3MztOag9yiohhSbcCDwG1wN0R8bSkW5LldwK/BPyGpCFgAPjVpLO84LbJrm8HviLpPcDLwC+n9R0A5jQ3kp05nbVbfMRhZgYpPwEwIh4EHhzXdmfe9J8Df17stkn7buCqia301LqzGR9xmJklfOd4Ebo72+jZO8DuA0fKXYqZWdk5OIow1s/hy3LNzBwcRbmso40awRrfCGhm5uAoxozGOl41t5l17ucwM3NwFCvXQb6fYyOkmJlNTQ6OInV3ZthzcJCevQPlLsXMrKwcHEXySLlmZjkOjiJdPK+Fhroaj5RrZlOeg6NIDXU1LJnfylpfkmtmU5yDowRLOzOs37qfkVF3kJvZ1OXgKEFXto1DgyNs3Hmg3KWYmZWNg6ME3Z0ZAPdzmNmU5uAowaLZM2iZVscaX1llZlOYg6MENTVjj5LdV+5SzMzKxsFRou5shue293N4aKTcpZiZlYWDo0Rd2QzDo8Ez2/vKXYqZWVk4OEq01B3kZjbFOThKNK9tGnNbGv1sDjObshwcZ6C7M+MjDjObshwcZ2BpZ4ZNuw6yf2Co3KWYmZ1zqQaHpKslbZC0UdJtBZa/W9K65PWIpO6k/WJJa/JefZLelyz7qKStecuuTfM7FNKVbQPgKZ+uMrMpqC6tHUuqBe4A3g70AKskrYyIZ/JWexF4S0TslXQNsAJ4Q0RsAJbm7WcrcH/edn8ZEZ9Mq/bT6erIALkh1t+0eE65yjAzK4s0jzguBzZGxKaIGATuBZbnrxARj0TE3mT2MSBbYD9XAS9ExEsp1lqStqZ6Fs2Z4X4OM5uS0gyODmBL3nxP0nYy7wG+VqD9OuAfx7XdmpzeulvSzEI7k3SzpNWSVvf29pZSd1G6s21+qJOZTUlpBocKtBUcj1zS28gFxwfHtTcA7wD+Oa/5c8BF5E5lbQc+VWifEbEiIpZFxLL29vaSiz+drmyGHX1HeGX/4Qnft5lZJUszOHqAzrz5LLBt/EqSuoC7gOURsXvc4muAJyNix1hDROyIiJGIGAW+QO6U2Dl3dKRcH3WY2RSTZnCsAhZLWpQcOVwHrMxfQdIC4D7ghoh4vsA+rmfcaSpJ8/Nm3wWsn9Cqi/QT57dSVyP3c5jZlJPaVVURMSzpVuAhoBa4OyKelnRLsvxO4CPAbOCzkgCGI2IZgKQmcldkvXfcrj8haSm5016bCyw/J6bV13LxvBbfQW5mU05qwQEQEQ8CD45ruzNv+ibgppNse4hcqIxvv2GCyzxj3Z0Z/m3tNkZHg5qaQl06ZmbVx3eOn4XubBv9h4fZvPtguUsxMztnHBxnwR3kZjYVOTjOwuK5LTQ11LJ2i/s5zGzqcHCchdoa8ZrzfSOgmU0tDo6z1N3ZxtPb+hgcHi13KWZm54SD4yx1ZTMMDo/y/I7+cpdiZnZOODjO0tijZNf4RkAzmyIcHGcpO3M6M5vqWed+DjObIhwcZ0lS8ihZX1llZlODg2MCdGcz/HhnPwePDJe7FDOz1Dk4JkB3ZxujAeu3+qjDzKqfg2MCdGUzgO8gN7OpwcExAeY0N9KRmc5aj5RrZlOAg2OCLO3M+NkcZjYlODgmSFe2jZ69A+w+cKTcpZiZpcrBMUHGRsr1g53MrNo5OCbIZR1t1Mgd5GZW/RwcE2RGYx2vmtvsfg4zq3oOjgnUnc2wrmc/EVHuUszMUuPgmEBdnRl2HxykZ+9AuUsxM0tNqsEh6WpJGyRtlHRbgeXvlrQueT0iqTtv2WZJT0laI2l1XvssSd+U9OPkfWaa36EUS30joJlNAakFh6Ra4A7gGmAJcL2kJeNWexF4S0R0AR8DVoxb/raIWBoRy/LabgO+HRGLgW8n8xXh4nktNNTW+MoqM6tqaR5xXA5sjIhNETEI3Assz18hIh6JiL3J7GNAtoj9LgfuSabvAd45MeWevYa6Gpac3+pnc5hZVUszODqALXnzPUnbybwH+FrefADfkPSEpJvz2s+LiO0AyfvcQjuTdLOk1ZJW9/b2ntEXOBNLOzOs37qfkVF3kJtZdUozOFSgreBfU0lvIxccH8xrvjIiXkfuVNfvSXpzKR8eESsiYllELGtvby9l07PSlW3j0OAIG3ceOGefaWZ2LqUZHD1AZ958Ftg2fiVJXcBdwPKI2D3WHhHbkvedwP3kTn0B7JA0P9l2PrAzlerP0Ngd5O4gN7NqlWZwrAIWS1okqQG4DliZv4KkBcB9wA0R8Xxe+wxJLWPTwM8C65PFK4Ebk+kbgQdS/A4lWzR7Bi2Ndb4R0MyqVl1aO46IYUm3Ag8BtcDdEfG0pFuS5XcCHwFmA5+VBDCcXEF1HnB/0lYHfDkivp7s+nbgK5LeA7wM/HJa3+FM1NSIrs42H3GYWdVKLTgAIuJB4MFxbXfmTd8E3FRgu01A9/j2ZNlu4KqJrXRidWUzfOHhTRweGmFafW25yzEzm1C+czwF3dkMw6PBM9v7yl2KmdmEc3CkYOnYEOvu5zCzKuTgSMG8tmnMbWn0o2TNrCo5OFLS3ZlxB7mZVSUHR0q6s21s6j3I/oGhcpdiZjahHBwpGbsR8CmfrjKzKuPgSElXRwbwHeRmVn0cHClpa6pn0ZwZvoPczKqOgyNF3dk2P5vDzKqOgyNFXdkMr/QdZkff4XKXYmY2YRwcKTo6Uq5PV5lZFXFwpOgnzm+lrkbuIDezquLgSNG0+lountfC2i3u5zCz6uHgSFlXNsO6nn2M+lGyZlYlHBwpW9rZRt/hYTbvPljuUszMJoSDI2VjHeS+LNfMqoWDI2Wvam9men0ta3xllZlVCQdHyupqa7iso411vrLKzKqEg+Mc6Mq2sX5bH0Mjo+UuxczsrKUaHJKulrRB0kZJtxVY/m5J65LXI5K6k/ZOSf8p6VlJT0v6w7xtPippq6Q1yevaNL/DROjuzDA4PMqGV/rLXYqZ2VlLLTgk1QJ3ANcAS4DrJS0Zt9qLwFsiogv4GLAiaR8G3h8RlwJvBH5v3LZ/GRFLk9eDaX2HidKdzQAeKdfMqkOaRxyXAxsjYlNEDAL3AsvzV4iIRyJibzL7GJBN2rdHxJPJdD/wLNCRYq2p6pw1nZlN9R56xMyqQprB0QFsyZvv4dR//N8DfG18o6SFwGuBx/Oab01Ob90taWahnUm6WdJqSat7e3tLLn4iSaK7M+NLcs2sKhQVHJJmSKpJpl8t6R2S6k+3WYG2grdPS3obueD44Lj2ZuCrwPsioi9p/hxwEbAU2A58qtA+I2JFRCyLiGXt7e2nKTV9XdkMz+/o59DgcLlLMTM7K8UecTwMTJPUAXwb+C3gb0+zTQ/QmTefBbaNX0lSF3AXsDwidue115MLjS9FxH1j7RGxIyJGImIU+AK5U2IVb2lnG6MB67f2nX5lM7MKVmxwKCIOAb8I/L+IeBe5Du9TWQUslrRIUgNwHbDyuJ1KC4D7gBsi4vm8dgF/AzwbEZ8et838vNl3AeuL/A5l1TXWQe5+DjOb5OqKXE+SrgDeTe6U0mm3jYhhSbcCDwG1wN0R8bSkW5LldwIfAWYDn81lBcMRsQy4ErgBeErSmmSXH06uoPqEpKXkTnttBt5b5HcoqznNjXRkprPGV1aZ2SRXbHC8D/gQcH/yx/9C4D9Pt1Hyh/7BcW135k3fBNxUYLsfULiPhIi4ociaK053p+8gN7PJr6jgiIjvAd8DSDrJd0XEH6RZWDXqzmZ48KlX2H3gCLObG8tdjpnZGSn2qqovS2qVNAN4Btgg6U/SLa36HB0pd6svyzWzyavYzvElyeWw7yR36mkBuT4IK8FrOtqQ3EFuZpNbscFRn1we+07ggYgY4iT3ZNjJNTfWsXhus28ENLNJrdjg+Dy5K5hmAA9LugDwDQlnoCubYe2WfUQ4d81scioqOCLi/0ZER0RcGzkvAW9Lubaq1N2ZYffBQXr2DpS7FDOzM1Js53ibpE+Pjf0k6VPkjj6sRN3ZNsCPkjWzyavYU1V3A/3ArySvPuCLaRVVzS6Z10pDbY2HWDezSavYGwAviohfypv/H3l3dFsJGupqWHJ+q6+sMrNJq9gjjgFJbxqbkXQl4JP0Z6g728ZTW/czMuoOcjObfIoNjluAOyRtlrQZ+GsmyRhRlai7M8OhwRFe6D1Q7lLMzEpW7FVVayOiG+gCuiLitcBPp1pZFRsbKXeNT1eZ2SRU0hMAI6Iv74FKf5xCPVPChXNm0NJY534OM5uUzubRsQVHr7XTq6kRl2XbfEmumU1KZxMc7tk9C92dGZ7d3sfhoZFyl2JmVpJTXo4rqZ/CASFgeioVTRHd2QzDo8Gz2/t47YKZ5S7HzKxop3uKX8u5KmSq6e7M3UG+dss+B4eZTSpnc6rKzsK81mnMbWl0P4eZTToOjjKRRFc242eQm9mk4+Aoo6WdbWzqPcj+gaFyl2JmVrRUg0PS1ZI2SNoo6bYCy98taV3yekRS9+m2lTRL0jcl/Th5n7QdBGM3Aq73o2TNbBJJLTgk1QJ3ANcAS4DrJS0Zt9qLwFsiogv4GLCiiG1vA74dEYuBbyfzk1JXMsS67yA3s8kkzSOOy4GNEbEpIgaBe4Hl+StExCMRsTeZfQzIFrHtcuCeZPoeco+znZQyTQ0smjODde7nMLNJJM3g6AC25M33JG0n8x7ga0Vse15EbAdI3ucW2pmkm8cePNXb23sG5Z8bXdk21m7xqSozmzzSDI5CQ5IUvNtc0tvIBccHS932ZCJiRUQsi4hl7e3tpWx6TnVnM7zSd5gdfYfLXYqZWVHSDI4eoDNvPgtsG7+SpC7gLmB5ROwuYtsdkuYn284Hdk5w3edU/o2AZmaTQZrBsQpYLGmRpAbgOmBl/gqSFgD3ATdExPNFbrsSuDGZvhF4IMXvkLqfOL+N2hr5UbJmNmkU++jYkkXEsKRbgYeAWuDuiHha0i3J8juBjwCzgc9KAhhOTi8V3DbZ9e3AVyS9B3gZ+OW0vsO5MK2+lovPa/Ed5GY2aaQWHAAR8SDw4Li2O/OmbwJuKnbbpH03cNXEVlpe3Z0Z/mPdNiKCJEDNzCqW7xyvAEs72+g7PMzm3YfKXYqZ2Wk5OCrA2B3k7iA3s8nAwVEBFs9tZnp9rTvIzWxScHBUgLraGl7T0eojDjObFBwcFaI7m+HpbX0MjYyWuxQzs1NycFSIrs4MR4ZH2fBKf7lLMTM7JQdHhVg61kHufg4zq3AOjgrROWs6M5vqWecBD82swjk4KsTYo2R9xGFmlc7BUUG6OzM8v6OfQ4PD5S7FzOykHBwVpDvbxmjA+q195S7FzOykHBwVZOwOcj8R0MwqmYOjgrS3NNKRme5nkJtZRXNwVJjuzjZ3kJtZRXNwVJjubIYtewbYc3Cw3KWYmRXk4KgwXb4R0MwqnIOjwlyWbUPCNwKaWcVycFSY5sY6XtXe7CMOM6tYDo4K1N2ZYV3PPiKi3KWYmZ3AwVGBurNt7DowyNZ9A+UuxczsBKkGh6SrJW2QtFHSbQWWXyLpUUlHJH0gr/1iSWvyXn2S3pcs+6ikrXnLrk3zO5RDd2cGgLXu5zCzClSX1o4l1QJ3AG8HeoBVklZGxDN5q+0B/gB4Z/62EbEBWJq3n63A/Xmr/GVEfDKt2svtknmtNNTWsK5nHz/XNb/c5ZiZHSfNI47LgY0RsSkiBoF7geX5K0TEzohYBQydYj9XAS9ExEvplVpZGupquPT8Vt9BbmYVKc3g6AC25M33JG2lug74x3Ftt0paJ+luSTMLbSTpZkmrJa3u7e09g48tr6XZNtZv3c/IqDvIzayypBkcKtBW0l9BSQ3AO4B/zmv+HHARuVNZ24FPFdo2IlZExLKIWNbe3l7Kx1aErmyGg4MjvNB7oNylmJkdJ83g6AE68+azwLYS93EN8GRE7BhriIgdETESEaPAF8idEqs6xzrI95W1DjOz8dIMjlXAYkmLkiOH64CVJe7jesadppKU31v8LmD9WVVZoS6cM4OWxjrfCGhmFSe1q6oiYljSrcBDQC1wd0Q8LemWZPmdkuYBq4FWYDS55HZJRPRJaiJ3RdZ7x+36E5KWkjvttbnA8qpQUyMuy7b5klwzqzipBQdARDwIPDiu7c686VfIncIqtO0hYHaB9hsmuMyK1d2Z4a7vb+Lw0AjT6mvLXY6ZGeA7xytad7aNoZHg2e1+lKyZVQ4HRwUb6yBf1+PTVWZWORwcFWxe6zTaWxp9ZZWZVRQHRwWTRHc24yurzKyiODgqXHe2jRd6D9J3+FSjspiZnTsOjgo31s/xlPs5zKxCODgqXFe2DfAzyM2scjg4KlymqYGFs5v4lyd6+O6GnX4qoJmVnYNjErjtmks5eGSY3/ziKq75q+/z1Sd6GBweLXdZZjZFaSr8C3bZsmWxevXqcpdxVgaHR3lgzVa+8P1NPL/jAPNap/Hbb1rI9ZcvoGVafbnLM7MqJOmJiFh2QruDY3KJCL67oZfPP/wCj23aQ0tjHb/2hgX81pWLmNc2rdzlmVkVcXBUSXDkW9ezj88/vImvPbWd2hrxju4Obn7zhVw8r6XcpZlZFXBwVGFwjHl59yHu/uGL/NOqLQwMjfDWi9u5+c0XcsWFs5EKPU/LzOz0HBxVHBxj9h4c5B8ee4l7Ht3MrgODXNbRxs1vvpBrXjOPulpfB2FmpXFwTIHgGHN4aIT7ntzKXd/fxKZdB+mcNZ33XLmIX3l9J00NqY6kb2ZVxMExhYJjzOho8M1nd7Di4U088dJeMk313PDGC7jxpxYyp7mx3OWZWYVzcEzB4Mi3evMePv/wJr717A7qa2v4pddl+Z3/togL25vLXZqZVaiTBYfPW0wRyxbOYtnCWbzQe4C7vv8iX32yh3tXvczbLz2P977lQn7yglnlLtHMJgkfcUxRvf1H+LtHN/N3j77E/oEhfvKCmdz85gt5+6XnUVPjK7HM7ORHHKleaiPpakkbJG2UdFuB5ZdIelTSEUkfGLdss6SnJK2RtDqvfZakb0r6cfI+M83vUK3aWxp5/89ezKMf+mk++gtL2NF3mPf+/RP8zKe/x5cff5nDQyPlLtHMKlRqRxySaoHngbcDPcAq4PqIeCZvnbnABcA7gb0R8cm8ZZuBZRGxa9x+PwHsiYjbkzCaGREfPFUtPuI4veGRUb62/hVWPLyJp7buZ05zAzdesZAbrriATFNDucszszIoxxHH5cDGiNgUEYPAvcDy/BUiYmdErAJKeUrRcuCeZPoecqFjZ6mutoZf6D6flbdeyZd/5w28pqONT33zea74P9/hoyufZsueQ+Uu0cwqRJqd4x3Alrz5HuANJWwfwDckBfD5iFiRtJ8XEdsBImJ7ctRyAkk3AzcDLFiwoNTapyxJ/NRFc/ipi+aw4ZV+Vjy8iS89/hJ/9+hmrr1sPj932Xwund/KgllN7gsxm6LSDI5Cf1VKOS92ZURsS4Lhm5Kei4iHi904CZoVkDtVVcLnWuLieS186le6+ZP/fjFf/OGLfPnxl/n3ddsBmNFQyyXzW7l0fgtL5rdx6fwWLpnXyvSG2jJXbWZpSzM4eoDOvPkssK3YjSNiW/K+U9L95E59PQzskDQ/OdqYD+ycwJqtgHlt0/jQtZfyR29/Nc/v6OeZbX08u72PZ7f388CPtvEPj70MQI1g4ZwZXDq/lSVjr/NbmdvS6DGzzKpImsGxClgsaRGwFbgO+LViNpQ0A6iJiP5k+meB/5ksXgncCNyevD8w0YVbYdPqa+nKZujKZo62RQQ9ewd4+miY9LF2yz7+IzkyAZg1o4ElY0cn57dy6fxWLmpvpt7jZ5lNSqnexyHpWuAzQC1wd0R8XNItABFxp6R5wGqgFRgFDgBLgDnA/clu6oAvR8THk33OBr4CLABeBn45Ivacqg5fVXXu7R8Y4rkkSJ5Jjk427Og/+uTChtoaFp/XfPToZOy9rckPpTKrFB5yxMFRdsMjo2zadTAXJtvGAqWPXQcGj67TkZme9JskYXJ+K50z3RFvVg4ecsTKrq62hlef18Krz2th+dKOo+07+w/z7Pb8vpM+vvPcTkaTf9PMaKjl0iRILk1OeS2aM4O26fXuOzErAx9xWEU6PDRyQkf8s9v76D8yfHSdlsY6OmZOp3NWE9mZ0+mcmbwn834Wu9nZ8RGHTSqn6oh/dnsfL+85RM/eAXr2HuLl3Yf44cZdHBo8fpiUTFP90UAZHy7ZmU2+dNjsDDk4bNKQROesXAiMFxHsPTREz95DbNmTC5QtyfTzO/r5znM7OZJ0zI+Z09xAdtxRyliwdMycTmOdg8WsEAeHVQVJzJrRwKwZDccdpYyJCHoPHDkaKmNHK1v2DPDU1v089PQrDI1E3v7gvJZpBUOlc1YT89qm+XJim7IcHDYlSGJuyzTmtkzjJy84cUDlkdFgR99hevYOsCU5DbZl7yF69h7iv17cwwNrBo521gPU1ojzWhrJNDXQOr2Otun1x71ax79PO7asoc6BY5Obg8OMXBCcn5nO+ZnpXL7oxIdaDY2M8sr+w7kw2ZMLlW37DrN/YJD9A0Ns3nWI/QND7B8YYuA0Q9JPr69NQqXuuKDJD5f80Ml/Tauv8ZVkVnYODrMi1NfWHOtfuejU6w4Oj7J/YIi+w0NHw6Rv3Hv+a9u+3OXIfQNDx101VrgOnXAkM7OpnvaWxtwRVWvjcdMtjXUOGptwDg6zCdZQV0N7S+4PeKmGR0bpPzx8XOjkAmd43HwumPYeGuSF3gP09h85ofMfYFp9zbEgaWnMvVqnHa1vbrJs9owG32RpRXNwmFWQutoaZs5oYOaM0h6eFRH0HR6mt/8wO/uOsLP/CDuT6d4DR9jZd4Tnd/Tzg4276D984lFNbY2Y09xwNGDGQqW99fjAmdPc4KvNzMFhVg0kHe0HedXcllOuOzA4Qm8SLLn3YyGzs/8I2/YfZm3PPnYfHKTQ/cGZpvqjRyq5cGmkvbmRWTMaaGqoZXpDXe69vpYZjcl0Qy1N9bXU+Uq0quDgMJtipjfUsmB2Ewtmn3g/TL7hkVF2HxxMAuVwLmDypnv7j/DiroPs7D983KXMp9JQW5MLkbEwaailqb7uhLYZDfltdTTV5y+vK7j9ub5aLSKIOPaQoYgg4GjYVvPVcw4OMyuorraG81qncV7rNKDtpOtFBPsODbFvYIhDg8MMDI5wKHkNDA1z8MjIsbahY8tz78McGhxh76FBtu47vq1Qn80p660R0xtqqa+tOe6P+Nh0rliS9uP/yAd5IVCgLX8fxY7S1NRQm+tLam482qfU3tx49AKG9uZcX9Ps5oZJd0+Qg8PMzoqkM+qXOZ2R0WBgaOTEMBoc4eBxbcn0UG7ZSHLDjZR7DOnYVWVjF5cJ5S3LW578n/zlx/ZzfBvSse2TZWP7kHLhsn9g6OiR2Y93HuCRF3azf2Co4HedNaPhaMCM9THlh83YdKUM7OngMLOKVFsjmhvraG6snj9TR4ZH2HVgMNe31HeY3gO5YDn6OnCEx188SO+BI0efXZOvoTZ3ldyc8Ucy+UczSfu0+vQuYqie/4+YmVW4xrpaOjLT6chMP+V6x66SOxYo44OmZ+8h1mzZe9KLGFqm1dHe0sj/ftdlvPHC2RP6PRwcZmYV5vir5JpPue7YRQzjj1zGrpzLpPBUTQeHmdkkdvxFDOfG5OrKNzOzsnNwmJlZSVINDklXS9ogaaOk2wosv0TSo5KOSPpAXnunpP+U9KykpyX9Yd6yj0raKmlN8ro2ze9gZmbHS62PQ1ItcAfwdqAHWCVpZUQ8k7faHuAPgHeO23wYeH9EPCmpBXhC0jfztv3LiPhkWrWbmdnJpXnEcTmwMSI2RcQgcC+wPH+FiNgZEauAoXHt2yPiyWS6H3gW6EixVjMzK1KawdEBbMmb7+EM/vhLWgi8Fng8r/lWSesk3S3pxMe55ba7WdJqSat7e3tL/VgzMzuJNIOj0H3xRY7ykuxAaga+CrwvIvqS5s+Re5TOUmA78KlC20bEiohYFhHL2tvbS/lYMzM7hTSDowfozJvPAtuK3VhSPbnQ+FJE3DfWHhE7ImIkIkaBL5A7JWZmZudImjcArgIWS1oEbAWuA36tmA2VG8Xrb4BnI+LT45bNj4jtyey7gPWn298TTzyxS9JLpRSfZw6w6wy3rUb+PY7xb3E8/x7Hq4bf44JCjYpixwg+A8mlsp8BaoG7I+Ljkm4BiIg7Jc0DVgOtwChwAFgCdAHfB55K2gE+HBEPSvp7cqepAtgMvDcvSNL4DqsjYlla+59s/Hsc49/ieP49jlfNv0eqQ45ExIPAg+Pa7sybfoXcKazxfkDhPhIi4oaJrNHMzErjO8fNzKwkDo7TW1HuAiqMf49j/Fscz7/H8ar290i1j8PMzKqPjzjMzKwkDg4zMyuJg+MUTje671RxqtGKpzJJtZJ+JOnfy11LuUnKSPoXSc8l/51cUe6aykXSHyX/O1kv6R8lnbsnLJ0jDo6TyBvd9xpy95ZcL2lJeasqm7HRii8F3gj83hT+LfL9IbkBOA3+Cvh6RFwCdDNFfxdJHeRG/F4WEa8hdw/bdeWtauI5OE7utKP7ThUerfhEkrLAzwF3lbuWcpPUCryZ3GgPRMRgROwra1HlVQdMl1QHNFHCUEuThYPj5CZkdN9qc5LRiqeizwB/yrGRDaayC4Fe4IvJqbu7JM0od1HlEBFbgU8CL5MbhHV/RHyjvFVNPAfHyZ316L7V5iSjFU85kn4e2BkRT5S7lgpRB7wO+FxEvBY4CEzJPsHkMQ/LgUXA+cAMSb9e3qomnoPj5M5qdN9qc7LRiqeoK4F3SNpM7hTmT0v6h/KWVFY9QE9EjB2F/gu5IJmKfgZ4MSJ6I2IIuA/4qTLXNOEcHCd3dHRfSQ3kOrhWlrmmsjjVaMVTUUR8KCKyEbGQ3H8X34mIqvtXZbGSMee2SLo4aboKeOYUm1Szl4E3SmpK/ndzFVV4oUCqgxxOZhExLOlW4CGOje77dJnLKpcrgRuApyStSdo+nAxiaQbw+8CXkn9kbQJ+q8z1lEVEPC7pX4AnyV2N+COqcOgRDzliZmYl8akqMzMriYPDzMxK4uAwM7OSODjMzKwkDg4zMyuJg8OsCJIOJO8LJf3aBO/7w+PmH5nI/ZtNNAeHWWkWAiUFRzLS8qkcFxwRUXV3Glt1cXCYleZ24L9JWpM8d6FW0l9IWiVpnaT3Akh6a/IMky8DTyVt/yrpieRZDTcnbbeTG0l1jaQvJW1jRzdK9r1e0lOSfjVv39/Ne/7Fl5K7lJF0u6Rnklo+ec5/HZsSfOe4WWluAz4QET8PkATA/oh4vaRG4IeSxkZDvRx4TUS8mMz/dkTskTQdWCXpqxFxm6RbI2Jpgc/6RWApuedbzEm2eThZ9lrgJ8iNn/ZD4EpJzwDvAi6JiJCUmdivbpbjIw6zs/OzwG8kQ7E8DswGFifL/isvNAD+QNJa4DFyA2gu5tTeBPxjRIxExA7ge8Dr8/bdExGjwBpyp9D6gMPAXZJ+ETh0lt/NrCAHh9nZEfD7EbE0eS3Ke/7CwaMrSW8lN3LqFRHRTW4Mo9M9UrTQ0P5jjuRNjwB1ETFM7ijnq8A7ga+X8D3MiubgMCtNP9CSN/8Q8LvJsPNIevVJHmLUBuyNiEOSLiH3CN4xQ2Pbj/Mw8KtJP0o7uafs/dfJCkuel9KWDD75PnKnucwmnPs4zEqzDhhOTjn9LblnbS8Enkw6qHvJ/Wt/vK8Dt0haB2wgd7pqzApgnaQnI+Ldee33A1cAa8k9ROxPI+KVJHgKaQEekDSN3NHKH53RNzQ7DY+Oa2ZmJfGpKjMzK4mDw8zMSuLgMDOzkjg4zMysJA4OMzMriYPDzMxK4uAwM7OS/H98iA8C+mcx5AAAAABJRU5ErkJggg==\n", - "text/plain": [ - "<Figure size 432x288 with 1 Axes>" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", @@ -482,22 +440,9 @@ }, { "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAvfUlEQVR4nO3dd3yV5f3/8dcnCRDC3nsPQURWBBFRcFRcRa1WcFWr4gBX+21rd/tr+63ftlZxFdG6B04UWwcuwIFKgLAFwkxAIKwAgZD1+f1xDnqMh3CAnNwnyfv5eORh7nHd53NO6Xnnuu5xmbsjIiJSVlLQBYiISGJSQIiISFQKCBERiUoBISIiUSkgREQkKgWEiIhEpYAQEZGoFBBS5ZnZnoifUjPbF7F8+REcb4aZXRePWkWqkpSgCxA5Wu5e/8DvZrYWuM7d3wuuovgysxR3Lw66Dqn+1IOQasvMkszsTjNbZWbbzOxFM2sa3pZqZs+E1+80szlm1srM/gIMBx4I90AeOMixXzKzTWaWZ2azzKxPxLa6Zna3ma0Lb//YzOqGt51sZp+GXzPbzK4Or/9Wr8XMrjazjyOW3czGm9lKYGV43cTwMXaZ2VwzGx6xf7KZ/Sr83neHt3cwswfN7O4y7+UNM7v9qD9wqXYUEFKd3QpcAJwKtAV2AA+Gt/0IaAR0AJoBNwL73P3XwEfABHev7+4TDnLst4AeQEtgHvBsxLZ/AIOAk4CmwM+BUjPrGG53P9AC6A9kHsb7uQAYAhwbXp4TPkZT4DngJTNLDW/7CTAWOAdoCPwY2As8CYw1syQAM2sOnA48fxh1SA2hISapzm4g9EWfA2BmfwDWm9mVQBGhYOju7guBuYdzYHd/7MDv4ePuMLNGwG5CX8YnuvuG8C6fhve7HHjP3Q98GW8L/8Tqr+6+PaKGZyK23W1mvwGOARYA1wE/d/fl4e0LDrymmeURCoV3gTHADHfffBh1SA2hHoRUZ52AqeHhnJ3AMqAEaAU8DbwDTDGzjWb2NzOrFctBw8M3d4WHb3YBa8Obmod/UoFVUZp2OMj6WGWXqeOnZrYsPIy1k1CPqHkMr/UkcEX49ysIfRYi36GAkOosGzjb3RtH/KS6+wZ3L3L3P7r7sYSGgs4Drgq3O9Qjji8DRgNnEPpS7hxeb8BWoADodpB6oq0HyAfSIpZbR9nn67rC5xt+AfwQaOLujYG8cA2Heq1ngNFm1g/oDbx2kP2khlNASHU2CfiLmXUCMLMWZjY6/PtIM+trZsnALkJDTiXhdpuBruUctwGwn9DwUBrwvwc2uHsp8BjwTzNrG+5tDDWzOoTOU5xhZj80sxQza2Zm/cNNM4GLzCzNzLoD1x7ivTUAioFcIMXMfkfoXMMBjwJ/MrMeFnK8mTUL15hD6PzF08Ar7r7vEK8lNZQCQqqzicA0YLqZ7QY+I3SSF0J/ob9MKByWATMJ/WV9oN3FZrbDzO6LctyngHXABmBp+LiR/gdYROhLeDvwf0CSu68ndNL4p+H1mUC/cJt7gEJC4fQk3z7pHc07hE54rwjXUsC3h6D+CbwITA+/x38DdSO2Pwn0RcNLUg7ThEEiNY+ZnUIoEDuHez0i36EehEgNEz4ZfxvwqMJByqOAEKlBzKw3sBNoA9wbaDGS8DTEJCIiUakHISIiUVWrO6mbN2/unTt3DroMEZEqY+7cuVvdvUW0bdUqIDp37kxGRkbQZYiIVBlmtu5g2zTEJCIiUSkgREQkKgWEiIhEpYAQEZGoFBAiIhKVAkJERKJSQIiISFQKCBGRKmzmilye+GQNRSUV/9xFBYSISBVVWurc9daXPDl73ddTCVYkBYSISBX1zpJNLPtqF7ee3p2U5Ir/Oo9rQJjZKDNbbmZZZnZnlO2NzOwNM1tgZkvM7JpY24qI1GSlpc69762ka4t6fL9fu7i8RtwCIjzX74PA2cCxwFgzO7bMbuOBpe7eDxgB3G1mtWNsKyJSY725+CuWb97N7Wf0JDkpHgNM8e1BDAay3H21uxcCU4DRZfZxoIGZGVCf0Dy9xTG2FRGpkUrCvYeerepzbt82cXudeAZEO749iXpOeF2kB4DewEZCk7zfFp4CMZa2IiI10n8WbiRryx5uOz1+vQeIb0BEq7rs9HVnAZlAW6A/8ICZNYyxbehFzMaZWYaZZeTm5h55tSIiVUBxSSkT31tJr9YNOPu41nF9rXgGRA7QIWK5PaGeQqRrgFc9JAtYA/SKsS0A7j7Z3dPdPb1Fi6hzXoiIVBvTFmxk9dZ8bj+jJ0lx7D1AfANiDtDDzLqYWW1gDDCtzD7rgdMBzKwVcAywOsa2IiI1SnFJKRPfX0mftg05q0+ruL9e3GaUc/diM5sAvAMkA4+5+xIzuzG8fRLwJ+AJM1tEaFjpF+6+FSBa23jVKiJSFbw6fwPrtu3l0avSCV3bE19xnXLU3d8E3iyzblLE7xuB78XaVkSkpioqKeW+91dyfPtGnN67ZaW8pu6kFhGpAl6em0POjn3ccUbPSuk9gAJCRCThFRaX8sAHWfTv0JgRx1TexTgKCBGRBPdiRjYbdu7jJ2dWXu8BFBAiIgmtoKiEBz/MYlCnJgzv0bxSX1sBISKSwF6Yk81XeQWV3nsABYSISMI60HsY3KUpJ3VrVumvr4AQEUlQz32+ni279wfSewAFhIhIQtpXWMJDM1ZxUrdmnNi18nsPoIAQEUlIz3y2jq179nPHmT0Dq0EBISKSYPL3FzNp5iqG92jOCZ2bBlaHAkJEJME8NXsd2/ILuf2M4HoPoIAQEUkoe/YXM3nWKkYc04JBnZoEWosCQkQkgTz56Vp27C3ijoB7D6CAEBFJGLsKipg8azWn92pJvw6Ngy5HASEikige/3gtefuKAr1yKZICQkQkAeTtK+LRj1fzvWNbcVy7RkGXAyggREQSwr8/XsPuguLAr1yKpIAQEQnYzr2FPPbxGs7p25pj2zYMupyvKSBERAL2yEeryS8s5rbTE6f3AAoIEZFAbc8v5IlP1nJu3zYc07pB0OV8iwJCRCRAk2etZm9RCbef0SPoUr4jrgFhZqPMbLmZZZnZnVG2/8zMMsM/i82sxMyahretNbNF4W0Z8axTRCQIW/fs58lP1zK6X1u6t0ys3gNASrwObGbJwIPAmUAOMMfMprn70gP7uPvfgb+H9z8fuMPdt0ccZqS7b41XjSIiQXp45ir2F5dw6+mJ13uA+PYgBgNZ7r7a3QuBKcDocvYfCzwfx3pERBLGll0FPDV7HRcMaEfXFvWDLieqeAZEOyA7YjknvO47zCwNGAW8ErHagelmNtfMxh3sRcxsnJllmFlGbm5uBZQtIhJ//5q5iuJS59bTErP3APENiGjz4/lB9j0f+KTM8NIwdx8InA2MN7NTojV098nunu7u6S1atDi6ikVEKsHmXQU8+/l6fjCwHZ2b1wu6nIOKZ0DkAB0iltsDGw+y7xjKDC+5+8bwf7cAUwkNWYmIVHkPfZhFaalzSwL3HiC+ATEH6GFmXcysNqEQmFZ2JzNrBJwKvB6xrp6ZNTjwO/A9YHEcaxURqRQbd+7j+S+yuSS9PR2apgVdTrnidhWTuxeb2QTgHSAZeMzdl5jZjeHtk8K7XghMd/f8iOatgKlmdqDG59z97XjVKiJSWR78MAvHGT+ye9ClHFLcAgLA3d8E3iyzblKZ5SeAJ8qsWw30i2dtIiKVLWfHXl7MyObSEzrQvkli9x5Ad1KLiFSaBz/MwrAq0XsABYSISKVYv20vL2XkcNmQjrRpVDfocmKigBARqQT3f7CS5CTjphHdgi4lZgoIEZE4W7M1n1fnb+DyIZ1o1TA16HJipoAQEYmz+99fSa1k48YRXYMu5bAoIERE4mhV7h5ey9zAVUM707JB1ek9gAJCRCSu7nt/Jam1krnhlKrVewAFhIhI3KzcvJtpCzZy1dDONKtfJ+hyDpsCQkQkTu59fyVptZIZVwV7D6CAEBGJiy837eK/C7/immFdaFqvdtDlHBEFhIhIHEx8byUN6qRw3fAuQZdyxBQQIiIVbMnGPN5avIlrTu5C47Sq2XsABYSISIW7972VNEhN4dqTq27vARQQIiIValFOHu8u3cz1w7vSqG6toMs5KgoIEZEKdO97K2hUtxbXDOscdClHTQEhIlJBMrN38v6XWxh3SlcapFbt3gMoIEREKsw9766gSVotfnRS56BLqRAKCBGRCjB33XZmrsjlhlO7Ub9OXCfrrDQKCBGRCnDPuytpVq82Vw3tFHQpFUYBISJylL5Ys52Ps7Zy04hupNWuHr0HiHNAmNkoM1tuZllmdmeU7T8zs8zwz2IzKzGzprG0FRFJFPe8u4IWDepw+ZDq03uAOAaEmSUDDwJnA8cCY83s2Mh93P3v7t7f3fsDvwRmuvv2WNqKiCSCT1dtZfbqbdx0ajfq1k4OupwKFc8exGAgy91Xu3shMAUYXc7+Y4Hnj7CtiEilc3fufXclrRrW4bIhHYMup8LFMyDaAdkRyznhdd9hZmnAKOCVI2g7zswyzCwjNzf3qIsWEYnVJ1nb+GLtdsaP7E5qrerVe4D4BoRFWecH2fd84BN33364bd19srunu3t6ixYtjqBMEZHD5+7c894K2jRK5dITOgRdTlzEMyBygMhPrT2w8SD7juGb4aXDbSsiUqncnac/W8fcdTsYP7I7dVKqX+8BIJ7XY80BephZF2ADoRC4rOxOZtYIOBW44nDbiohUtry9Rfxq6iL+u+grhnVvxg/Tq2fvAcoJCDO7KIb2Be7+ZrQN7l5sZhOAd4Bk4DF3X2JmN4a3TwrveiEw3d3zD9U2pnckIhInn6/exh0vZLJl935+MaoX407pSnJStBHx6sHco58WMLNtwOtEPx9wwCnu3i0ehR2J9PR0z8jICLoMEalmikpKmfjeSh6ckUWnpmlMHDOAfh0aB11WhTCzue6eHm1beUNMb7n7jw9x4GeOqjIRkQS3bls+t03JJDN7J5cMas8fvt+HetXkWUuHctB36e5XHGzb4ewjIlIVuTtT52/gt68tJinJeOCyAZx3fNugy6pUMcegmXUH/gDUBf7h7rPjVZSISJB2FRTxm6mLmbZgI4M7N+WeMf1p17hu0GVVuvJOUqe6e0HEqj8Bvyd0P8JLQP/4liYiUvky1m7ntimZbNpVwE/P7MnNI7tX6xPR5SmvB/GGmT3l7k+Hl4uAzoQCoiTehYmIVKbiklLu/yCL+z9YSfsmabx041AGdmwSdFmBKi8gRgE3mdnbwF+A/wFuBdKAyyuhNhGRSpG9fS+3v5DJ3HU7uGhAO/44uk+1mDL0aJV3kroEeMDMngZ+B7QBfuvuqyqrOBGReHs9cwO/mboYgIlj+jO6f9THvtVI5Z2DGAL8DCgE/hfYB/zFzHKAP7l7XuWUKCJS8XYXFPH715fw6vwNDOrUhHsv7U+HpmlBl5VQyhtimgRcDNQHHnb3YcAYMzsVeBE4qxLqExGpcPPX7+C2KZnk7NjL7Wf0YMLI7qQka4LNssoLiBJCJ6XTCPUiAHD3mcDM+JYlIlLxSkqdhz7M4t73V9K6YSov3jCU9M5Ngy4rYZUXEJcBNxAKh6sqpxwRkfjYsHMfd0zJ5Iu12/l+v7b86YLjaFRXJ6LLU95J6hXATyuxFhGRuPjPwo386tVFlJQ6//xhPy4c0A6zmnlvw+E46KCbmf3nUI1j2UdEJCj5+4v52UsLmPDcfLq2qM+btw3nooHtFQ4xKm+I6WQzm1bOdgOOreB6REQqxILsndw2ZT7rtu9lwsju3HZGD2rpRPRhKS8gRsfQvvDQu4iIVJ6SUufhWav45/QVtGxQhynXn8iQrs2CLqtKKu8chK5UEpEq5au8ffzkhQXMXr2Nc/u24X8v7EujNJ2IPlI146HmUuWUlDrb9uynZcPUoEuRKuLtxV/xi1cWUVRSyt8uPp5LBulcw9HSgJwkpD/9Zykn3fUB/134VdClSILbW1jML19dyI3PzKNTszT+e+twfpjeQeFQAQ7ZgzCz84A33b20EuoRYVNeAc99vp6UZOOW5+ext/B4LqnGE8PLkVu8IY9bp8xnzdZ8bhrRjTvO6EntFP3dW1Fi+STHACvN7G9m1jveBYk8PGsVJe68Pv5kTurWnJ+9vJCnZ68NuixJIKWlziOzVnPhQ5+wd38Jz143hF+M6qVwqGCH7EG4+xVm1hAYCzxuZg48Djzv7rvLa2tmo4CJQDLwqLvfFWWfEcC9QC1gq7ufGl6/FthN6JEfxQebVFuqly27Q72Hiwa045jWDXj0R+lMeG4ev319CfmFJdx4aregS6yxNuUV8NCMLLblhy5eNPh6GCf0O1HX8/V6+9Y+hNtYeMnsm2NQZl8rc4wvv9rNF2u3M6pPa/56UV+a1Ktd4e9XYjxJ7e67zOwVQtON3g5cCPzMzO5z9/ujtTGzZOBB4EwgB5hjZtPcfWnEPo2Bh4BR7r7ezFqWOcxId996mO9JqrBHZq2mqKSU8SO7A5BaK5l/XTGIO17I5K63vmTv/mLuOLOnxpcrUUmp89Tstdw9fQWFJaV0aFIXh9DUYd/8B3eP+B0OLLmHfojY70A7//oYHvF75P5l14cW6qQk89eL+jLmBJ1riKdYzkGcD/wY6AY8DQx29y1mlgYsA6IGBDAYyHL31eHjTCF0b8XSiH0uA1519/UA7r7lSN+IVH1b9+znmc/Wc0H/dnRuXu/r9bWSk5g4ZgB1ayVz3wdZ5BeW8Jtze+uLoRIsysnjV1MXsWhDHqf0bMGfRvehU7N6h24o1UIsPYhLgHvcfVbkSnffa2Y/LqddOyA7YjkHGFJmn55ALTObATQAJrr7UwdeApgeHtJ62N0nR3sRMxsHjAPo2LFjDG9HEtWjH62hoLiE8ad1/8625CTj/35wPPXqpPDvj9ewt7CEv1xwHEk1dK7geNtdUMTd01fw1Oy1NKtfh/vHDuC849solGuYWALi98DX1xqaWV2glbuvdff3y2kX7V+Sl1lOAQYBpxMavpptZp+FHxQ4zN03hoed3jWzL8uGFEA4OCYDpKenlz2+VBHb8wt5avZazj++Ld1a1I+6T1KS8fvzj6Vu7WT+NWMVBUUl/P3i4/Uc/wrk7ry9eBN/eGMJW3bv54ohnfifs47RU09rqFgC4iXgpIjlkvC6Ew7RLgeIvDaxPbAxyj5b3T0fyDezWUA/YIW7b4TQsJOZTSU0ZPWdgJDq4bGP17CvqIQJUXoPkcyMX4zqRf06Kfz9neXsLSzmvrEDqJOSXEmVVl/Z2/fy+2lL+ODLLfRu05BJVwxiQMcmQZclAYrlT68Ud4+cMKgQiOWSgTlADzPrYma1CV0uW/bhf68Dw80sJXxOYwiwzMzqmVkDADOrB3wPWBzDa0oVlLe3iCc+Xcs5x7WhZ6sGMbUZP7I7vz3vWN5ZsplxT81lX2FJnKusvopKSpk0cxXfu2cWn63exm/O7c0bE4YpHCSmHkSumX3f3acBmNlo4JBXFrl7sZlNAN4hdJnrY+6+xMxuDG+f5O7LzOxtYCFQSuhS2MVm1hWYGh7vTAGec/e3j+QNSuJ77JM17NlffMjeQ1nXntyFerWT+eXURVz9+Bf8++oTqF9HT485HHPXbefXUxfz5abdnHlsK/7w/T60a1w36LIkQZh7+cP2ZtYNeBZoS+i8QjZwlbtnxb+8w5Oenu4ZGRlBlyGHYVdBEcPu+oCTujXj4SuP7FaX1zM38JMXF9C3XSOevGawHs4Wg7y9Rdz19pc8/8V62jRK5Q/f78NZfVoHXZYEwMzmHuw+s1hulFsFnGhm9QkFSrk3x4kcjic/WcvugmJuOa3HER9jdP92pNZK5pbn5jPmkc94+trBNK9fpwKrrD7cndczN/Ln/y5le34h157chTvO7Kmel0QV078KMzsX6AOkHrjMzd3/Xxzrkhpgz/5iHv14DWf0bslx7Rod1bHO6tOaR36Uzg1PZ3Dpw7N59roTad1IT4KNtGZrPr99bTEfZ22lX/tGPHHN4KP+3KV6O+RJajObBFwK3EJoiOkSoFOc65Ia4KnZa8nbV3RUvYdIp/ZswZPXDGbzrv1c8vCnZG/fWyHHrer2F5cw8b2VnHXvLBZk7+T/je7DqzcPUzjIIcVyFdNJ7n4VsMPd/wgM5duXr4octvz9xTz60RpGHNOCfh0aV9hxh3RtxjPXDWHXvmIumTSbVbl7KuzYVdHsVds4e+JH3PPeCs48thXv/fRUrhramWTdYCgxiCUgCsL/3WtmbYEioEv8SpKa4NnP17E9v7DCeg+R+ndozJRxJ1JcWsqlD89m2Ve7Kvw1Et22Pfv5yYuZjH3kM4pKSnnimhN48LKBtNIETHIYYgmIN8IP1fs7MA9YCzwfx5qkmttXWMLkWasZ3qM5gzrF51r73m0a8sINQ0lJSmLM5M/IzN4Zl9dJNKWlzgtz1nP6P2cyLXMjN4/oxvTbT2XEMWWfgylyaOUGhJklAe+7+053f4XQuYde7v67SqlOqqXnvljP1j2F3Hp6xfceInVrUZ+XbhxKo7q1uPyRz/h89ba4vl7QVm7ezZjJn/GLVxbRo2V93rxtOD8f1Yu6tXWXuRyZcgMiPIvc3RHL+909L+5VSbVVUFTCpJmrGNq1GSd0bhr31+vQNI0XbxhK60ap/OjxL5i5Ijfur1nZ9hWW8Pd3vuSc+z5i+ebd/N8P+vLCuKEx35UucjCxDDFNN7MfmB7jKBXghTnZ5O7eH/feQ6TWjVJ54YahdG1en+ufzOCdJZsq7bXjbcbyLXzv3pk8+OEqzj++Le//9FQuPaGjnnIrFSKWgPgJoYfz7TezXWa228xq3lk/OWr7i0v414xVDO7clBO7xr/3EKl5/To8f/2JHNu2ITc/O4/XMzdU6utXtC27Chj/3DyufnwOtZKSeO66Ifzz0v66QVAqVCx3UqufKhXipYwcNu0q4B+X9AtkXoFGabV45rohXPfkHG5/IZO9hSWMHVy15hApKXWe/Xwdf397OfuLS7n9jB7cNKKbnmYrcRHLjHKnRFsfbW4GkYMpLC7lXzNWMbBjY4Z1bxZYHfXrpPDENYO58Zm5/PLVRewtLOHak6vGVdtLNubxq6mLWZC9k5O6NePPFxxH14PMnSFSEWJ51MbPIn5PJTQvw1zgtLhUJNXSq/Ny2LBzH3+58LjAZyVLrZXMw1cO4rbnM/nTf5ayr7CY8SO7B15XWe5O9vZ9zF2/nU+ytjF1/gYa163FPZf244L+7RKuXql+YhliOj9y2cw6AH+LW0VS7RSVlPLgjCz6tW/EqT1bBF0OEJr0/oHLBvCzlxfyj+kryC8s4ednHRPol25BUQlLNuYxd92O8M9Otu7ZD4R6Ppee0IGfn3UMjdNimY5F5OgdySMcc4DjKroQqb5em7+B7O37+MP5fRLqr96U5CTuvqTf11OY7t1fzO/P71NpVwBt2V3AvHU7mbtuO3PX7WDxhl0UlpQC0LFpGsN7NGdgpyakd2pCz1YN9HgMqXSxnIO4n2/mkk4C+gML4liTVCPFJaU8+GEWfdo25LReiXc3b1KS8ZcLjiOtVjKPfryGvYUl3PWD4yv8y7ik1Fm+aTdz1+9gXriHsD78MMHayUn0bd+Iq4d1ZmDHJgzs1JiWDfRIDAleLD2IyBl4ioHn3f2TONUj1cwbCzeydtteHr5yUEL1HiKZGb8+tzf16qQw8f2V7Csq4Z5L+1MrOZarwKPbVVDE/PU7mbsuFAjz1+8gPzwtavP6dUjv1IQrT+zEwE5NOK5dQ12FJAkploB4GShw9xIAM0s2szR317OUpVwlpc79H2TRq3UDzuzdKuhyymVm3HFmT9JqJ/PXt76koKiEBy4bSGqtQ39xuzvrtu0NnTcI9xCWb96NOyQZ9GrdkIsGtmdQpyYM6tSE9k3qJmxYikSKJSDeB84ADjw3uS4wHTgpXkVJ9fDfRV+xOjefhy4fWGXu7L3h1G6k1U7mt68v4don5/DIVemk1f72/00KikpYtOGbk8nz1u1gW34hAA1SUxjYsQnn9G3DoE5N6NehsWZrkyorln+5qe7+9UP13X2PmaXFsSapBkpLnfvfX0mPlvUZVcXmOr5yaGfq1k7h5y8v4Kp/f8H/XXx86PxBOBCWbMyjqCR0Wq5r83qM7NXy695B9xb1q0wYihxKLAGRb2YD3X0egJkNAvbFcnAzGwVMBJKBR939rij7jADuBWoBW9391FjbSuJ6e8kmVm7Zw31jB1TJL8yLB7Wnbq1kbpsyn9PvnglAnZQk+rVvzLUnd2VQpyYM7NiYZnq0hVRjsQTE7cBLZrYxvNyG0BSk5TKzZOBB4ExCl8bOMbNp7r40Yp/GwEPAKHdfb2YtY20riau01Lnv/ZV0bVGPc/u2CbqcI3bu8W1o3SiVRTk76d+xCce2aUjtlCM/cS1S1cRyo9wcM+sFHENoTuov3b0ohmMPBrLcfTWAmU0BRgORX/KXAa+6+/rwa205jLaSoN5dtpkvN+3mnkv7Vflr9w8MHYnURIf8c8jMxgP13H2xuy8C6pvZzTEcux2QHbGcE14XqSfQxMxmmNlcM7vqMNoeqG+cmWWYWUZubvV71n9V4x7qPXRulsb5x7cNuhwROQqx9Jevd/edBxbcfQdwfQztov3p6GWWU4BBwLnAWcBvzaxnjG0P1DPZ3dPdPb1Fi8R4jENN9sGXW1iycRfjR3Yn5SjuIxCR4MVyDiLJzMzdHb4+PxDLw2BygA4Ry+2BjVH22eru+YROhs8C+sXYVhLMgd5Dh6Z1uWBA1A6fiFQhsfyJ9w7wopmdbmanAc8Db8fQbg7Qw8y6mFltYAwwrcw+rwPDzSwlfOnsEGBZjG0lwcxckcuCnDzGj+h+VHchi0hiiKUH8QtgHHAToaGf6cAjh2rk7sVmNoFQwCQDj7n7EjO7Mbx9krsvM7O3gYVAKaHLWRcDRGt72O9OKo27M/H9lbRrXJeLBrYPuhwRqQAWHjmKvYHZycBYdx8fn5KOXHp6umdkZBx6R6lwH6/cyhX//pw/X3AcV5zYKehyRCRGZjbX3dOjbYvpGQBm1h8YS+j+hzXAqxVWnVR5od7DClo3TOWSdPUeRKqLgwZE+GqiMYSCYRvwAqEex8hKqk2qiM9Wb2fO2h388ft99FRSkWqkvB7El8BHwPnungVgZndUSlVSpdz3/kpaNKjDpSd0OPTOIlJllHepyQ+ATcCHZvaImZ1O9PsTpAb7Ys12Zq/exg2ndI3p0dgiUnUcNCDcfaq7Xwr0AmYAdwCtzOxfZva9SqpPEtz9H6ykef3aXD5EJ6ZFqptDXqzu7vnu/qy7n0fohrVM4M54FyaJb+66HXy0civXD+9K3drqPYhUN4d1N5O7b3f3h939tHgVJFXH/R+spElaLV3WKlJN6XZXOSILsncyY3ku1w3vSj3NmCZSLSkg5Ijc/8FKGtWtxVVD1XsQqa4UEHLYFm/I471lW7j25C40SK0VdDkiEicKCDls93+wkgapKfzopM5BlyIicaSAkMOy7KtdvLNkM9cM60Kjuuo9iFRnCgg5LA98kEX9Oin8eFjnoEsRkThTQEjMVmzezZuLv+JHJ3WicVosc0aJSFWmgJCYPfBBFnVrJXPtyV2DLkVEKoECQmKStWUPbyzcyJVDO9G0nnoPIjWBAkJi8tCHWdRJSeL64eo9iNQUCgg5pLVb83ktcwNXDOlE8/p1gi5HRCqJAkIO6aEZWdRKTmLcKeo9iNQkCggpV/b2vbw6bwNjB3ekZcPUoMsRkUoU14Aws1FmttzMsszsO48IN7MRZpZnZpnhn99FbFtrZovC6zPiWacc3EMzVpFkxo2ndgu6FBGpZHF7DKeZJQMPAmcCOcAcM5vm7kvL7PpReK6JaEa6+9Z41Sjl27BzHy/PzWbMCR1p3Ui9B5GaJp49iMFAlruvdvdCYAowOo6vJxVs0oxVANw4Qr0HkZoongHRDsiOWM4JrytrqJktMLO3zKxPxHoHppvZXDMbd7AXMbNxZpZhZhm5ubkVU7mwKa+AF+Zkc/GgDrRrXDfockQkAPGc6cWirPMyy/OATu6+x8zOAV4DeoS3DXP3jWbWEnjXzL5091nfOaD7ZGAyQHp6etnjyxGaNHMVpe7crN6DSI0Vzx5EDtAhYrk9sDFyB3ff5e57wr+/CdQys+bh5Y3h/24BphIaspJKsGVXAc9/sZ6LBrajQ9O0oMsRkYDEMyDmAD3MrIuZ1QbGANMidzCz1mZm4d8Hh+vZZmb1zKxBeH094HvA4jjWKhEmz1pNcakzfmT3oEsRkQDFbYjJ3YvNbALwDpAMPObuS8zsxvD2ScDFwE1mVgzsA8a4u5tZK2BqODtSgOfc/e141SrfWL5pN898vo7R/dvSqVm9oMsRkQCZe/UZtk9PT/eMDN0ycSQWZO/koRlZTF+6mXq1U3jjlpPp0lwBIVLdmdlcd0+Pti2eJ6klwbk7n67axkMzsvgkaxsNU1OYMLI7V5/UmWZ65pJIjaeAqIFKS513l23moRmrWJC9kxYN6vDLs3tx2ZCONEjVNKIiEqKAqEGKSkqZlrmRSTNXsXLLHjo0rcufLziOiwe1J7VWctDliUiCUUDUAAVFJbwwJ5vJs1azYec+erVuwMQx/Tm3bxtSkvW8RhGJTgFRjeXtK+KZz9bx2Mdr2JZfyKBOTfh/o/twWq+WhK8QExE5KAVENZS7ez+PfbKGZ2avY/f+Yk7p2YLxI7oxuEtTBYOIxEwBUY1kb9/L5FmreTEjm8KSUs45rg03jejGce0aBV2aiFRBCohqYMXm3fxrxiqmLdhIksFFA9pzw6ld6dqiftCliUgVpoCowuav38FDM1bx7tLN1K2VzNUndea64V1o00hPXxWRo6eAqGLcnY+ztvLQh6uYvXobjerW4tbTe3D1SZ1pWq920OWJSDWigKgiSkudd5Zs4qEZq1i0IY+WDerw63N6M3ZIR+rX0f+MIlLx9M2S4AqLS3ktcwOTZq5idW4+nZql8deL+nLRwHbUSdHNbSISPwqIBLWvsIQpc9bzyKzVbMwroHebhtw/dgDn9G1DcpIuVRWR+FNAJJi8vUU8NXstj3+6lu35hZzQuQl/ubAvI45poXsYRKRSKSASxL7CEu77YCVPz17Hnv3FjDymBTeP7M4JnZsGXZqI1FAKiASwKncP45+dx5ebdnPe8aGb2/q01c1tIhIsBUTA3liwkTtfWUjtlCSeuOYERhzTMuiSREQABURg9heX8Of/LOPpz9YxqFMT7h87gLaNdYObiCQOBUQAsrfv5eZn57FoQx7XD+/Cz0f1opYeuy0iCUYBUcneXbqZn76YiQMPXzmIs/q0DrokEZGo4vpnq5mNMrPlZpZlZndG2T7CzPLMLDP887tY21Y1RSWl/O+by7j+qQw6Nkvjv7cMVziISEKLWw/CzJKBB4EzgRxgjplNc/elZXb9yN3PO8K2VcJXefuY8Nx85q7bwZUnduLX5/bWFJ8ikvDiOcQ0GMhy99UAZjYFGA3E8iV/NG0TyqwVudz+QiYFRSVMHNOf0f3bBV2SiEhM4jnE1A7IjljOCa8ra6iZLTCzt8ysz2G2xczGmVmGmWXk5uZWRN0VoqTU+ef05fzo8S9oUb8O0yacrHAQkSolnj2IaM+F8DLL84BO7r7HzM4BXgN6xNg2tNJ9MjAZID09Peo+lS13935umzKfT1dt4+JB7fnT6OOoW1tDSiJStcQzIHKADhHL7YGNkTu4+66I3980s4fMrHksbRPVZ6u3ccvz89m1r4i/XXw8P0zvcOhGIiIJKJ4BMQfoYWZdgA3AGOCyyB3MrDWw2d3dzAYTGvLaBuw8VNtEU1rq/GvmKu6evpzOzerx1I8H07tNw6DLEhE5YnELCHcvNrMJwDtAMvCYuy8xsxvD2ycBFwM3mVkxsA8Y4+4ORG0br1qP1o78Qn7yYiYfLs/l3OPbcNdFfWmQWivoskREjoqFvo+rh/T0dM/IyKjU15y/fgcTnptP7u79/Pa83lxxYic9lltEqgwzm+vu6dG26U7qI+TuPP7JWv761jJaNUzl5ZuGcnz7xkGXJSJSYRQQR2BXQRG/eHkhby3exBm9W3H3Jf1olKYhJRGpXhQQh2nxhjzGPzePnB37+NU5vbh+eFcNKYlItaSAiJG78/wX2fzhjSU0TavNC+NOJF2zvYlINaaAiEH+/mJ+89pips7fwPAezbn30v40q18n6LJEROJKAXEIKzfv5qZn57Eqdw8/ObMn40d2JzlJQ0oiUv0pIMrx6rwcfj11MfXqJPPMtUMY1r150CWJiFQaBUQUBUUl/PGNJTz/RTaDuzTlgbEDaNkwNeiyREQqlQKijLVb87n52Xks/WoXN4/oxk/O7EmKpgMVkRpIARHhrUVf8bOXF5KcZDx2dTqn9WoVdEkiIoFRQACFxaX89a1lPP7JWvp3aMwDlw2gfZO0oMsSEQlUjQ+IvL1FXPX4FyzI3smPh3XhzrN7UTtFQ0oiIjU+IBqkptCpaRo3ntKVs/u2CbocEZGEUeMDIinJuG/sgKDLEBFJOBpLERGRqBQQIiISlQJCRESiUkCIiEhUCggREYlKASEiIlEpIEREJCoFhIiIRGXuHnQNFcbMcoF1R9i8ObC1AsupyvRZfJs+j2/T5/GN6vBZdHL3FtE2VKuAOBpmluHu6UHXkQj0WXybPo9v0+fxjer+WWiISUREolJAiIhIVAqIb0wOuoAEos/i2/R5fJs+j29U689C5yBERCQq9SBERCQqBYSIiERV4wPCzEaZ2XIzyzKzO4OuJ0hm1sHMPjSzZWa2xMxuC7qmoJlZspnNN7P/BF1L0MyssZm9bGZfhv+NDA26piCZ2R3h/58sNrPnzSw16JoqWo0OCDNLBh4EzgaOBcaa2bHBVhWoYuCn7t4bOBEYX8M/D4DbgGVBF5EgJgJvu3svoB81+HMxs3bArUC6ux8HJANjgq2q4tXogAAGA1nuvtrdC4EpwOiAawqMu3/l7vPCv+8m9AXQLtiqgmNm7YFzgUeDriVoZtYQOAX4N4C7F7r7zkCLCl4KUNfMUoA0YGPA9VS4mh4Q7YDsiOUcavAXYiQz6wwMAD4PuJQg3Qv8HCgNuI5E0BXIBR4PD7k9amb1gi4qKO6+AfgHsB74Cshz9+nBVlXxanpAWJR1Nf66XzOrD7wC3O7uu4KuJwhmdh6wxd3nBl1LgkgBBgL/cvcBQD5QY8/ZmVkTQqMNXYC2QD0zuyLYqipeTQ+IHKBDxHJ7qmE38XCYWS1C4fCsu78adD0BGgZ838zWEhp6PM3Mngm2pEDlADnufqBH+TKhwKipzgDWuHuuuxcBrwInBVxThavpATEH6GFmXcysNqGTTNMCrikwZmaExpiXufs/g64nSO7+S3dv7+6dCf27+MDdq91fiLFy901AtpkdE151OrA0wJKCth440czSwv+/OZ1qeNI+JegCguTuxWY2AXiH0FUIj7n7koDLCtIw4EpgkZllhtf9yt3fDK4kSSC3AM+G/5haDVwTcD2BcffPzexlYB6hq//mUw0fu6FHbYiISFQ1fYhJREQOQgEhIiJRKSBERCQqBYSIiESlgBARkagUECJhZrYn/N/OZnZZBR/7V2WWP63I44vEgwJC5Ls6A4cVEOEnA5fnWwHh7tXurlupfhQQIt91FzDczDLDz/xPNrO/m9kcM1toZjcAmNmI8PwZzwGLwuteM7O54XkCxoXX3UXoqZ+ZZvZseN2B3oqFj73YzBaZ2aURx54RMf/Cs+E7djGzu8xsabiWf1T6pyM1Ro2+k1rkIO4E/sfdzwMIf9HnufsJZlYH+MTMDjy5czBwnLuvCS//2N23m1ldYI6ZveLud5rZBHfvH+W1LgL6E5pfoXm4zazwtgFAH0LPB/sEGGZmS4ELgV7u7mbWuGLfusg31IMQObTvAVeFHz/yOdAM6BHe9kVEOADcamYLgM8IPQiyB+U7GXje3UvcfTMwEzgh4tg57l4KZBIa+toFFACPmtlFwN6jfG8iB6WAEDk0A25x9/7hny4Rz/7P/3onsxGEnvI51N37EXo+z6GmoYz2yPkD9kf8XgKkuHsxoV7LK8AFwNuH8T5EDosCQuS7dgMNIpbfAW4KPwodM+t5kMlyGgE73H2vmfUiNG3rAUUH2pcxC7g0fJ6jBaFZ2744WGHhuToahR+geDuh4SmRuNA5CJHvWggUh4eKniA0F3NnYF74RHEuob/ey3obuNHMFgLLCQ0zHTAZWGhm89z98oj1U4GhwAJCk1X93N03hQMmmgbA62aWSqj3cccRvUORGOhpriIiEpWGmEREJCoFhIiIRKWAEBGRqBQQIiISlQJCRESiUkCIiEhUCggREYnq/wPNsrzQPvNmRgAAAABJRU5ErkJggg==\n", - "text/plain": [ - "<Figure size 432x288 with 1 Axes>" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "acc_per_epoch = [np.mean(acc_per_epoch) for acc_per_epoch in running_test_acc]\n", "display_loss_plot(acc_per_epoch, title=\"Test accuracy\", ylabel=\"Accuracy [%]\")" @@ -505,27 +450,16 @@ }, { "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0.8091021716950881" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "test(model, test_quantized_loader)" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -544,20 +478,9 @@ }, { "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "<All keys matched successfully>" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import torch\n", "\n", @@ -572,22 +495,9 @@ }, { "cell_type": "code", - "execution_count": 19, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/plain": [ - "0.9188772287810328" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Move the model back to it's target device\n", "model.to(device)\n", @@ -614,7 +524,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -631,20 +541,9 @@ }, { "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(64, 593)" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from copy import deepcopy\n", "\n", @@ -656,20 +555,9 @@ }, { "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(64, 600)" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import numpy as np\n", "\n", @@ -680,20 +568,9 @@ }, { "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "torch.Size([64, 600])" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "modified_model[0].weight.data = torch.from_numpy(W_new)\n", "modified_model[0].weight.shape" @@ -743,7 +620,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -774,20 +651,9 @@ }, { "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0.9188772287810328" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "test_padded_bipolar(model_for_export, test_quantized_loader)" ] @@ -806,19 +672,9 @@ }, { "cell_type": "code", - "execution_count": 27, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model saved to cybsec-mlp-ready.onnx\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import brevitas.onnx as bo\n", "from brevitas.quant_tensor import QuantTensor\n", diff --git a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb index 0ffb19d8b407957f359c91a58509aa3450f1641d..3e116b1adbcfddcd3cf61d8ad11130988fc4e2d4 100644 --- a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb +++ b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb @@ -20,7 +20,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -58,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -77,85 +77,9 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['__class__',\n", - " '__delattr__',\n", - " '__dict__',\n", - " '__dir__',\n", - " '__doc__',\n", - " '__eq__',\n", - " '__format__',\n", - " '__ge__',\n", - " '__getattribute__',\n", - " '__gt__',\n", - " '__hash__',\n", - " '__init__',\n", - " '__init_subclass__',\n", - " '__le__',\n", - " '__lt__',\n", - " '__module__',\n", - " '__ne__',\n", - " '__new__',\n", - " '__reduce__',\n", - " '__reduce_ex__',\n", - " '__repr__',\n", - " '__setattr__',\n", - " '__sizeof__',\n", - " '__str__',\n", - " '__subclasshook__',\n", - " '__weakref__',\n", - " '_model_proto',\n", - " 'analysis',\n", - " 'check_all_tensor_shapes_specified',\n", - " 'check_compatibility',\n", - " 'cleanup',\n", - " 'find_consumer',\n", - " 'find_consumers',\n", - " 'find_direct_predecessors',\n", - " 'find_direct_successors',\n", - " 'find_producer',\n", - " 'find_upstream',\n", - " 'get_all_tensor_names',\n", - " 'get_finn_nodes',\n", - " 'get_initializer',\n", - " 'get_metadata_prop',\n", - " 'get_node_index',\n", - " 'get_nodes_by_op_type',\n", - " 'get_non_finn_nodes',\n", - " 'get_tensor_datatype',\n", - " 'get_tensor_fanout',\n", - " 'get_tensor_layout',\n", - " 'get_tensor_shape',\n", - " 'get_tensor_sparsity',\n", - " 'get_tensor_valueinfo',\n", - " 'graph',\n", - " 'is_fork_node',\n", - " 'is_join_node',\n", - " 'make_empty_exec_context',\n", - " 'make_new_valueinfo_name',\n", - " 'model',\n", - " 'rename_tensor',\n", - " 'save',\n", - " 'set_initializer',\n", - " 'set_metadata_prop',\n", - " 'set_tensor_datatype',\n", - " 'set_tensor_layout',\n", - " 'set_tensor_shape',\n", - " 'set_tensor_sparsity',\n", - " 'temporary_fix_oldstyle_domain',\n", - " 'transform']" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "dir(model_for_sim)" ] @@ -169,24 +93,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Input tensor name: 0\n", - "Output tensor name: 73\n", - "Input tensor shape: [1, 600]\n", - "Output tensor shape: [1, 1]\n", - "Input tensor datatype: BIPOLAR\n", - "Output tensor datatype: FLOAT32\n", - "List of node operator types in the graph: \n", - "['Mul', 'Add', 'Div', 'MatMul', 'Mul', 'Add', 'BatchNormalization', 'MultiThreshold', 'Mul', 'MatMul', 'Mul', 'Add', 'BatchNormalization', 'MultiThreshold', 'Mul', 'MatMul', 'Mul', 'Add', 'BatchNormalization', 'MultiThreshold', 'Mul', 'MatMul', 'Mul', 'Add', 'MultiThreshold']\n" - ] - } - ], + "outputs": [], "source": [ "from qonnx.core.datatype import DataType\n", "\n", @@ -226,7 +135,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -262,38 +171,9 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving 'cybsec-mlp-verification.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " <iframe\n", - " width=\"100%\"\n", - " height=\"400\"\n", - " src=\"http://localhost:8081/\"\n", - " frameborder=\"0\"\n", - " allowfullscreen\n", - " ></iframe>\n", - " " - ], - "text/plain": [ - "<IPython.lib.display.IFrame at 0x7f3be619b2b0>" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from finn.util.visualization import showInNetron\n", "\n", @@ -311,20 +191,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "torch.Size([100, 593])" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import numpy as np\n", "from torch.utils.data import TensorDataset\n", @@ -356,20 +225,9 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "<All keys matched successfully>" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "input_size = 593 \n", "hidden1 = 64 \n", @@ -409,7 +267,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -441,7 +299,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -476,17 +334,9 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "ok 100 nok 0: 100%|██████████| 100/100 [00:21<00:00, 4.72it/s]\n" - ] - } - ], + "outputs": [], "source": [ "import numpy as np\n", "from tqdm import trange\n", @@ -511,17 +361,9 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Verification succeeded. Brevitas and FINN-ONNX execution outputs are identical\n" - ] - } - ], + "outputs": [], "source": [ "if ok == n_verification_inputs:\n", " print(\"Verification succeeded. Brevitas and FINN-ONNX execution outputs are identical\")\n", diff --git a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb index 551c321534cfefa13b8d34b7f1e7685000702ec0..980a770fe2b47aebd9da2fe2fdb8943b542c07b2 100644 --- a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb +++ b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb @@ -106,17 +106,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Previous run results deleted!\n" - ] - } - ], + "outputs": [], "source": [ "import finn.builder.build_dataflow as build\n", "import finn.builder.build_dataflow_config as build_cfg\n", @@ -148,40 +140,9 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Building dataflow accelerator from cybsec-mlp-ready.onnx\n", - "Intermediate outputs will be generated in /tmp/finn_dev_ubuntu\n", - "Final outputs will be generated in output_estimates_only\n", - "Build log is at output_estimates_only/build_dataflow.log\n", - "Running step: step_tidy_up [1/7]\n", - "Running step: step_streamline [2/7]\n", - "Running step: step_convert_to_hls [3/7]\n", - "Running step: step_create_dataflow_partition [4/7]\n", - "Running step: step_target_fps_parallelization [5/7]\n", - "Running step: step_apply_folding_config [6/7]\n", - "Running step: step_generate_estimate_reports [7/7]\n", - "Completed successfully\n", - "CPU times: user 1.84 s, sys: 599 ms, total: 2.44 s\n", - "Wall time: 1.77 s\n" - ] - }, - { - "data": { - "text/plain": [ - "0" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "%%time\n", "build.build_dataflow_cfg(model_file, cfg_estimates)" @@ -196,36 +157,18 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "build_dataflow.log intermediate_models report time_per_step.json\r\n" - ] - } - ], + "outputs": [], "source": [ "! ls {estimates_output_dir}" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "estimate_layer_config_alternatives.json estimate_network_performance.json\r\n", - "estimate_layer_cycles.json\t\t op_and_param_counts.json\r\n", - "estimate_layer_resources.json\r\n" - ] - } - ], + "outputs": [], "source": [ "! ls {estimates_output_dir}/report" ] @@ -239,23 +182,9 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\r\n", - " \"critical_path_cycles\": 252,\r\n", - " \"max_cycles\": 64,\r\n", - " \"max_cycles_node_name\": \"StreamingFCLayer_Batch_1\",\r\n", - " \"estimated_throughput_fps\": 1562500.0,\r\n", - " \"estimated_latency_ns\": 2520.0\r\n", - "}" - ] - } - ], + "outputs": [], "source": [ "! cat {estimates_output_dir}/report/estimate_network_performance.json" ] @@ -269,7 +198,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -282,23 +211,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'StreamingFCLayer_Batch_0': 60,\n", - " 'StreamingFCLayer_Batch_1': 64,\n", - " 'StreamingFCLayer_Batch_2': 64,\n", - " 'StreamingFCLayer_Batch_3': 64}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "read_json_dict(estimates_output_dir + \"/report/estimate_layer_cycles.json\")" ] @@ -314,44 +229,9 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'StreamingFCLayer_Batch_0': {'BRAM_18K': 36,\n", - " 'BRAM_efficiency': 0.11574074074074074,\n", - " 'LUT': 8184,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'StreamingFCLayer_Batch_1': {'BRAM_18K': 4,\n", - " 'BRAM_efficiency': 0.1111111111111111,\n", - " 'LUT': 1217,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'StreamingFCLayer_Batch_2': {'BRAM_18K': 4,\n", - " 'BRAM_efficiency': 0.1111111111111111,\n", - " 'LUT': 1217,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'StreamingFCLayer_Batch_3': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.006944444444444444,\n", - " 'LUT': 341,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'total': {'BRAM_18K': 45.0, 'LUT': 10959.0, 'URAM': 0.0, 'DSP': 0.0}}" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "read_json_dict(estimates_output_dir + \"/report/estimate_layer_resources.json\")" ] @@ -375,7 +255,7 @@ "\n", "<font color=\"red\">**Live FINN tutorial:** These next builds will take about 10 minutes to complete since multiple calls to Vivado and a call to RTL simulation are involved. While this is running, you can examine the generated files with noVNC -- it is running on **(your AWS URL):6080/vnc.html**\n", "\n", - "* Once the `step_hls_codegen [8/16]` below is completed, you can view the generated HLS code under its own folder for each layer: `/tmp/finn_dev_ubuntu/code_gen_ipgen_StreamingFCLayer_Batch_XXXXXX`\n", + "* Once the `step_hls_codegen [8/16]` below is completed, you can view the generated HLS code under its own folder for each layer: `/tmp/finn_dev_ubuntu/code_gen_ipgen_MatrixVectorActivation_XXXXXX`\n", " \n", "* Once the `step_create_stitched_ip [11/16]` below is completed, you can view the generated stitched IP in Vivado under `/home/ubuntu/finn/notebooks/end2end_example/cybersecurity/output_ipstitch_ooc_rtlsim/stitched_ip`\n", "</font> " @@ -383,17 +263,9 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Previous run results deleted!\n" - ] - } - ], + "outputs": [], "source": [ "import finn.builder.build_dataflow as build\n", "import finn.builder.build_dataflow_config as build_cfg\n", @@ -425,49 +297,9 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Building dataflow accelerator from cybsec-mlp-ready.onnx\n", - "Intermediate outputs will be generated in /tmp/finn_dev_ubuntu\n", - "Final outputs will be generated in output_ipstitch_ooc_rtlsim\n", - "Build log is at output_ipstitch_ooc_rtlsim/build_dataflow.log\n", - "Running step: step_tidy_up [1/16]\n", - "Running step: step_streamline [2/16]\n", - "Running step: step_convert_to_hls [3/16]\n", - "Running step: step_create_dataflow_partition [4/16]\n", - "Running step: step_target_fps_parallelization [5/16]\n", - "Running step: step_apply_folding_config [6/16]\n", - "Running step: step_generate_estimate_reports [7/16]\n", - "Running step: step_hls_codegen [8/16]\n", - "Running step: step_hls_ipgen [9/16]\n", - "Running step: step_set_fifo_depths [10/16]\n", - "Running step: step_create_stitched_ip [11/16]\n", - "Running step: step_measure_rtlsim_performance [12/16]\n", - "Running step: step_make_pynq_driver [13/16]\n", - "Running step: step_out_of_context_synthesis [14/16]\n", - "Running step: step_synthesize_bitfile [15/16]\n", - "Running step: step_deployment_package [16/16]\n", - "Completed successfully\n", - "CPU times: user 4.76 s, sys: 710 ms, total: 5.47 s\n", - "Wall time: 8min 5s\n" - ] - }, - { - "data": { - "text/plain": [ - "0" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "%%time\n", "build.build_dataflow_cfg(model_file, cfg_stitched_ip)" @@ -489,22 +321,9 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "all_verilog_srcs.txt\t\t finn_vivado_stitch_proj.xpr\r\n", - "finn_vivado_stitch_proj.cache\t ip\r\n", - "finn_vivado_stitch_proj.hw\t make_project.sh\r\n", - "finn_vivado_stitch_proj.ip_user_files make_project.tcl\r\n", - "finn_vivado_stitch_proj.sim\t vivado.jou\r\n", - "finn_vivado_stitch_proj.srcs\t vivado.log\r\n" - ] - } - ], + "outputs": [], "source": [ "! ls {rtlsim_output_dir}/stitched_ip" ] @@ -518,18 +337,9 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "estimate_layer_resources_hls.json rtlsim_performance.json\r\n", - "ooc_synth_and_timing.json\r\n" - ] - } - ], + "outputs": [], "source": [ "! ls {rtlsim_output_dir}/report" ] @@ -543,27 +353,9 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\r\n", - " \"vivado_proj_folder\": \"/tmp/finn_dev_ubuntu/synth_out_of_context_iut077er/results_finn_design_wrapper\",\r\n", - " \"LUT\": 8667.0,\r\n", - " \"FF\": 9063.0,\r\n", - " \"DSP\": 0.0,\r\n", - " \"BRAM\": 22.0,\r\n", - " \"WNS\": 0.946,\r\n", - " \"\": 0,\r\n", - " \"fmax_mhz\": 110.44842058758559,\r\n", - " \"estimated_throughput_fps\": 1725756.5716810247\r\n", - "}" - ] - } - ], + "outputs": [], "source": [ "! cat {rtlsim_output_dir}/report/ooc_synth_and_timing.json" ] @@ -577,26 +369,9 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\r\n", - " \"cycles\": 643,\r\n", - " \"runtime[ms]\": 0.00643,\r\n", - " \"throughput[images/s]\": 1088646.967340591,\r\n", - " \"DRAM_in_bandwidth[Mb/s]\": 81.64852255054431,\r\n", - " \"DRAM_out_bandwidth[Mb/s]\": 0.13608087091757387,\r\n", - " \"fclk[mhz]\": 100.0,\r\n", - " \"N\": 7,\r\n", - " \"latency_cycles\": 211\r\n", - "}" - ] - } - ], + "outputs": [], "source": [ "! cat {rtlsim_output_dir}/report/rtlsim_performance.json" ] @@ -610,62 +385,9 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\r\n", - " \"Defaults\": {},\r\n", - " \"StreamingFIFO_0\": {\r\n", - " \"ram_style\": \"auto\",\r\n", - " \"depth\": 32,\r\n", - " \"impl_style\": \"rtl\"\r\n", - " },\r\n", - " \"StreamingFCLayer_Batch_0\": {\r\n", - " \"PE\": 16,\r\n", - " \"SIMD\": 40,\r\n", - " \"ram_style\": \"auto\",\r\n", - " \"resType\": \"lut\",\r\n", - " \"mem_mode\": \"decoupled\",\r\n", - " \"runtime_writeable_weights\": 0\r\n", - " },\r\n", - " \"StreamingDataWidthConverter_Batch_0\": {\r\n", - " \"impl_style\": \"hls\"\r\n", - " },\r\n", - " \"StreamingFCLayer_Batch_1\": {\r\n", - " \"PE\": 1,\r\n", - " \"SIMD\": 64,\r\n", - " \"ram_style\": \"auto\",\r\n", - " \"resType\": \"lut\",\r\n", - " \"mem_mode\": \"decoupled\",\r\n", - " \"runtime_writeable_weights\": 0\r\n", - " },\r\n", - " \"StreamingDataWidthConverter_Batch_1\": {\r\n", - " \"impl_style\": \"hls\"\r\n", - " },\r\n", - " \"StreamingFCLayer_Batch_2\": {\r\n", - " \"PE\": 1,\r\n", - " \"SIMD\": 64,\r\n", - " \"ram_style\": \"auto\",\r\n", - " \"resType\": \"lut\",\r\n", - " \"mem_mode\": \"decoupled\",\r\n", - " \"runtime_writeable_weights\": 0\r\n", - " },\r\n", - " \"StreamingFCLayer_Batch_3\": {\r\n", - " \"PE\": 1,\r\n", - " \"SIMD\": 1,\r\n", - " \"ram_style\": \"auto\",\r\n", - " \"resType\": \"lut\",\r\n", - " \"mem_mode\": \"decoupled\",\r\n", - " \"runtime_writeable_weights\": 0\r\n", - " }\r\n", - "}" - ] - } - ], + "outputs": [], "source": [ "! cat {rtlsim_output_dir}/final_hw_config.json" ] @@ -681,7 +403,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -716,49 +438,9 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Building dataflow accelerator from cybsec-mlp-ready.onnx\n", - "Intermediate outputs will be generated in /tmp/finn_dev_ubuntu\n", - "Final outputs will be generated in output_final\n", - "Build log is at output_final/build_dataflow.log\n", - "Running step: step_tidy_up [1/16]\n", - "Running step: step_streamline [2/16]\n", - "Running step: step_convert_to_hls [3/16]\n", - "Running step: step_create_dataflow_partition [4/16]\n", - "Running step: step_target_fps_parallelization [5/16]\n", - "Running step: step_apply_folding_config [6/16]\n", - "Running step: step_generate_estimate_reports [7/16]\n", - "Running step: step_hls_codegen [8/16]\n", - "Running step: step_hls_ipgen [9/16]\n", - "Running step: step_set_fifo_depths [10/16]\n", - "Running step: step_create_stitched_ip [11/16]\n", - "Running step: step_measure_rtlsim_performance [12/16]\n", - "Running step: step_make_pynq_driver [13/16]\n", - "Running step: step_out_of_context_synthesis [14/16]\n", - "Running step: step_synthesize_bitfile [15/16]\n", - "Running step: step_deployment_package [16/16]\n", - "Completed successfully\n", - "CPU times: user 4.47 s, sys: 766 ms, total: 5.24 s\n", - "Wall time: 22min 13s\n" - ] - }, - { - "data": { - "text/plain": [ - "0" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "#%%time\n", "#build.build_dataflow_cfg(model_file, cfg)" @@ -773,17 +455,9 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "finn-accel.bit\tfinn-accel.hwh\r\n" - ] - } - ], + "outputs": [], "source": [ "#! ls {final_output_dir}/bitfile" ] @@ -797,17 +471,9 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "driver.py driver_base.py finn runtime_weights validate.py\r\n" - ] - } - ], + "outputs": [], "source": [ "#! ls {final_output_dir}/driver" ] @@ -821,18 +487,9 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "estimate_layer_resources_hls.json post_synth_resources.xml\r\n", - "post_route_timing.rpt\r\n" - ] - } - ], + "outputs": [], "source": [ "#! ls {final_output_dir}/report" ] @@ -846,17 +503,9 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "bitfile driver\r\n" - ] - } - ], + "outputs": [], "source": [ "#! ls {final_output_dir}/deploy" ] @@ -874,7 +523,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -883,7 +532,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -892,38 +541,18 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "driver.py\tfinn\t\t unsw_nb15_binarized.npz validate.py\r\n", - "driver_base.py\truntime_weights validate-unsw-nb15.py\r\n" - ] - } - ], + "outputs": [], "source": [ "#! ls {final_output_dir}/deploy/driver" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'/workspace/finn/notebooks/end2end_example/cybersecurity/deploy-on-pynq.zip'" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "#from shutil import make_archive\n", "#make_archive('deploy-on-pynq', 'zip', final_output_dir+\"/deploy\")" @@ -1016,7 +645,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.8.5" } }, "nbformat": 4, diff --git a/requirements.txt b/requirements.txt index f9d451c2a1a353d57edccb9688c504a5600c2492..3bab23fb7d6c6cc80155b9f4b42c5db48ab0723e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,6 +9,7 @@ onnx==1.11.0 onnxoptimizer onnxruntime==1.11.1 pre-commit==2.9.2 +protobuf==3.20.1 pyscaffold==3.2.1 scipy==1.5.2 setupext-janitor>=1.1.2 diff --git a/src/finn/analysis/fpgadataflow/res_estimation.py b/src/finn/analysis/fpgadataflow/res_estimation.py index 3c8c8e7c0b16c3ad312ad14bc7fac758e717cd04..406496bc0e873de0df484f457c0fe8d97b94e434 100644 --- a/src/finn/analysis/fpgadataflow/res_estimation.py +++ b/src/finn/analysis/fpgadataflow/res_estimation.py @@ -63,8 +63,8 @@ def res_estimation_complete(model): op_type = node.op_type inst = registry.getCustomOp(node) if ( - op_type == "StreamingFCLayer_Batch" - or op_type == "Vector_Vector_Activate_Batch" + op_type == "MatrixVectorActivation" + or op_type == "VectorVectorActivation" ): orig_restype = inst.get_nodeattr("resType") res_dict[node.name] = [] diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index ff49b601c28618c31758a46d3f03ea5c88c3cf6c..381dfe91a22a95ac056ea61f7de1d1b9d176ab17 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -59,7 +59,7 @@ class DataflowOutputType(str, Enum): class ComputeEngineMemMode(str, Enum): """Memory mode for generated compute engines. See - https://finn.readthedocs.io/en/latest/internals.html#streamingfclayer-mem-mode + https://finn.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode for more information.""" CONST = "const" @@ -222,8 +222,8 @@ class DataflowBuildConfig: #: (Optional) Whether thresholding layers (which implement quantized #: activations in FINN) will be implemented as stand-alone HLS layers, - #: instead of being part of StreamingFCLayer. This gives larger flexibility, - #: and makes it possible to have runtime-writable thresholds. + #: instead of being part of MatrixVectorActivation layer. This gives larger + #: flexibility, and makes it possible to have runtime-writable thresholds. standalone_thresholds: Optional[bool] = False #: Target board, only needed for generating full bitfiles where the FINN diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 926c3131d59cce0b7a0d8a31972adec932c4b527..59f77650da5c3c3f9db0ea65e2288544b376bec3 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -292,9 +292,9 @@ def step_convert_to_hls(model: ModelWrapper, cfg: DataflowBuildConfig): # doing this first causes all threshold layers to be standalone model = model.transform(to_hls.InferThresholdingLayer()) # needed for bipolar MatMul layers - model = model.transform(to_hls.InferBinaryStreamingFCLayer(mem_mode)) + model = model.transform(to_hls.InferBinaryMatrixVectorActivation(mem_mode)) # needed for non-bipolar MatMul layers - model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode)) + model = model.transform(to_hls.InferQuantizedMatrixVectorActivation(mem_mode)) # TopK to LabelSelect model = model.transform(to_hls.InferLabelSelectLayer()) # input quantization (if any) as standalone threshold diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py index 7ddfbf9610a47d4918d9ebcd06c4aef6fc006558..755a0e056858e4db5c4392a77fc458a267948912 100644 --- a/src/finn/custom_op/fpgadataflow/__init__.py +++ b/src/finn/custom_op/fpgadataflow/__init__.py @@ -43,6 +43,7 @@ from finn.custom_op.fpgadataflow.globalaccpool_batch import GlobalAccPool_Batch from finn.custom_op.fpgadataflow.iodma import IODMA from finn.custom_op.fpgadataflow.labelselect_batch import LabelSelect_Batch from finn.custom_op.fpgadataflow.lookup import Lookup +from finn.custom_op.fpgadataflow.matrixvectoractivation import MatrixVectorActivation from finn.custom_op.fpgadataflow.pool_batch import Pool_Batch from finn.custom_op.fpgadataflow.streamingdataflowpartition import ( StreamingDataflowPartition, @@ -50,15 +51,12 @@ from finn.custom_op.fpgadataflow.streamingdataflowpartition import ( from finn.custom_op.fpgadataflow.streamingdatawidthconverter_batch import ( StreamingDataWidthConverter_Batch, ) -from finn.custom_op.fpgadataflow.streamingfclayer_batch import StreamingFCLayer_Batch from finn.custom_op.fpgadataflow.streamingfifo import StreamingFIFO from finn.custom_op.fpgadataflow.streamingmaxpool_batch import StreamingMaxPool_Batch from finn.custom_op.fpgadataflow.thresholding_batch import Thresholding_Batch from finn.custom_op.fpgadataflow.tlastmarker import TLastMarker from finn.custom_op.fpgadataflow.upsampler import UpsampleNearestNeighbour_Batch -from finn.custom_op.fpgadataflow.vector_vector_activate_batch import ( - Vector_Vector_Activate_Batch, -) +from finn.custom_op.fpgadataflow.vectorvectoractivation import VectorVectorActivation custom_op = dict() @@ -66,7 +64,7 @@ custom_op = dict() # registered and plug in correctly into the infrastructure custom_op["DownSampler"] = DownSampler custom_op["StreamingMaxPool_Batch"] = StreamingMaxPool_Batch -custom_op["StreamingFCLayer_Batch"] = StreamingFCLayer_Batch +custom_op["MatrixVectorActivation"] = MatrixVectorActivation custom_op["ConvolutionInputGenerator"] = ConvolutionInputGenerator custom_op["ConvolutionInputGenerator1D"] = ConvolutionInputGenerator1D custom_op["TLastMarker"] = TLastMarker @@ -79,7 +77,7 @@ custom_op["Thresholding_Batch"] = Thresholding_Batch custom_op["AddStreams_Batch"] = AddStreams_Batch custom_op["LabelSelect_Batch"] = LabelSelect_Batch custom_op["DuplicateStreams_Batch"] = DuplicateStreams_Batch -custom_op["Vector_Vector_Activate_Batch"] = Vector_Vector_Activate_Batch +custom_op["VectorVectorActivation"] = VectorVectorActivation custom_op["ChannelwiseOp_Batch"] = ChannelwiseOp_Batch custom_op["IODMA"] = IODMA custom_op["StreamingDataflowPartition"] = StreamingDataflowPartition diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index ddeda8c86f96a3007b1d0861337799bd02903560..c337398ebb82ecf75cf25386f354c29a925858a1 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -329,10 +329,10 @@ class HLSCustomOp(CustomOp): ], "vitis_hls": [ "set_param hls.enable_hidden_option_error false", - "config_compile -disable_unroll_code_size_check", + "config_compile -disable_unroll_code_size_check -pipeline_style flp", "config_interface -m_axi_addr64", - "config_rtl -auto_prefix", - "config_export -disable_deadlock_detection", + "config_rtl -module_auto_prefix", + "config_rtl -deadlock_detection none", ], } return default_directives[hls_version] @@ -658,7 +658,7 @@ compilation transformations? be filled by every node. var: makes it possible to reuse the function for different c++ code generation. - I.e. if set to "ipgen" in StreamingFCLayer_Batch additional PRAGMA defines are + I.e. if set to "ipgen" in MatrixVectorActivation additional PRAGMA defines are added.""" pass diff --git a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py similarity index 98% rename from src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py rename to src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index cdcff47022deb5196ffafc9c2bf28af5db45b9ae..ed88048a2dd827685a8d923f3dacee8f4541e0ce 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfclayer_batch.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -48,7 +48,7 @@ from finn.util.data_packing import ( from . import templates -# ONNX i/o tensor shape assumptions for StreamingFCLayer: +# ONNX i/o tensor shape assumptions for MatrixVectorActivation: # input 0 is the input tensor, shape (.., i_size) = (..., MW) # input 1 is the weight tensor, shape (i_size, o_size) = (MW, MH) # (optional) input 2 is the thresholds tensor, shape (o_size, n_thres) @@ -56,8 +56,9 @@ from . import templates # the ... here can be any shape (representing groups of vectors) -class StreamingFCLayer_Batch(HLSCustomOp): - """Class that corresponds to finn-hls StreamingFCLayer_Batch function.""" +class MatrixVectorActivation(HLSCustomOp): + """Class that corresponds to finn-hls Matrix_Vector_Activate(_Stream)_Batch + function.""" def __init__(self, onnx_node): super().__init__(onnx_node) @@ -192,7 +193,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): info_messages.append("All necessary attributes exist") except Exception: info_messages.append( - """The required StreamingFCLayer attributes do not exist.""" + """The required MatrixVectorActivation attributes do not exist.""" ) # verify the number of inputs depending on noActivation value @@ -204,7 +205,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): info_messages.append("The number of inputs is correct") else: info_messages.append( - """StreamingFCLayer_Batch needs in no + """MatrixVectorActivation needs in no activation mode 2 inputs (data input and weights)""" ) elif no_act == 0: @@ -212,7 +213,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): info_messages.append("The number of inputs is correct") else: info_messages.append( - """StreamingFCLayer_Batch needs 3 inputs + """MatrixVectorActivation needs 3 inputs (data input and weights and threshold values)""" ) else: @@ -940,7 +941,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): reshaped_input, ) elif in_ind > 2: - raise Exception("Unexpected input found for StreamingFCLayer") + raise Exception("Unexpected input found for MatrixVectorActivation") in_ind += 1 if mode == "cppsim": @@ -1010,16 +1011,12 @@ class StreamingFCLayer_Batch(HLSCustomOp): self.code_gen_dict["$GLOBALS$"] += ['#include "activations.hpp"'] mem_mode = self.get_nodeattr("mem_mode") - if mem_mode == "const": - # self.code_gen_dict["$GLOBALS$"] += ['#include "params.h"'] - pass - elif mem_mode == "decoupled" or mem_mode == "external": - self.code_gen_dict["$GLOBALS$"] += ['#include "mvau.hpp"'] - else: + if mem_mode not in ["const", "decoupled", "external"]: raise Exception( """Please set mem_mode to "const", "decoupled", or "external", currently no other parameter value is supported!""" ) + self.code_gen_dict["$GLOBALS$"] += ['#include "mvau.hpp"'] if self.calc_tmem() != 0: # TODO find a better way of checking for no pregenerated thresholds self.code_gen_dict["$GLOBALS$"] += ['#include "thresh.h"'] @@ -1031,7 +1028,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): MW = self.get_nodeattr("MW") condition = SIMD >= (MW / 1024) msg = ( - f"HLS synthesis of StreamingFCLayer_Batch requires: " + f"HLS synthesis of MatrixVectorActivation requires: " f"SIMD >= MW / 1024. This is not fulfilled with: SIMD={SIMD} " f"and MW={MW} for node: {self.onnx_node.name}." ) @@ -1123,11 +1120,9 @@ class StreamingFCLayer_Batch(HLSCustomOp): else: threshs = "threshs" if mem_mode == "const": - node = self.onnx_node self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<MW1, MH1, SIMD1, PE1, {}, {}, {}> + """Matrix_Vector_Activate_Batch<MW1, MH1, SIMD1, PE1, 1, {}, {}, {}> (in0, out, weights, {}, numReps, {});""".format( - node.op_type, tmpl_args["TSrcI"], tmpl_args["TDstI"], tmpl_args["TWeightI"], @@ -1426,7 +1421,7 @@ class StreamingFCLayer_Batch(HLSCustomOp): # base class impl sufficient for const/external modes return super().code_generation_ipi() else: - raise Exception("Unrecognized mem_mode for StreamingFCLayer") + raise Exception("Unrecognized mem_mode for MatrixVectorActivation") return cmd def get_verilog_top_module_intf_names(self): diff --git a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py similarity index 98% rename from src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py rename to src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index dabcd45f51bb90ad3baa7838caffed81b25e3b32..27b23dd32835c265759a8cabfd2a3412844077ca 100644 --- a/src/finn/custom_op/fpgadataflow/vector_vector_activate_batch.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -45,7 +45,7 @@ from finn.util.data_packing import ( ) -class Vector_Vector_Activate_Batch(HLSCustomOp): +class VectorVectorActivation(HLSCustomOp): """Class that corresponds to finn-hlslib Vector_Vector_Activate_Batch function""" def __init__(self, onnx_node): @@ -422,9 +422,7 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): reshaped_input, ) elif in_ind > 2: - raise Exception( - "Unexpected input found for Vector_Vector_Activate_Unit" - ) + raise Exception("Unexpected input found for VectorVectorActivation") in_ind += 1 if mode == "cppsim": @@ -523,11 +521,9 @@ class Vector_Vector_Activate_Batch(HLSCustomOp): threshs = "PassThroughActivation<%s>()" % odtype_hls_str else: threshs = "threshs" - node = self.onnx_node self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<Channels1, InnerProdDim, SIMD1, PE1, 1, {}, {}, {}> + """Vector_Vector_Activate_Batch<Channels1, InnerProdDim, SIMD1, PE1, 1, {}, {}, {}> (in0, out, weights, {}, numReps, {});""".format( - node.op_type, tmpl_args["TSrcI"], tmpl_args["TDstI"], tmpl_args["TWeightI"], diff --git a/src/finn/qnn-data/build_dataflow/folding_config.json b/src/finn/qnn-data/build_dataflow/folding_config.json index 1fbe289608f68c296c4d86fd0dbe4a07e3d70277..95167f1a306f1edefc9deb460413b16768dc96d5 100644 --- a/src/finn/qnn-data/build_dataflow/folding_config.json +++ b/src/finn/qnn-data/build_dataflow/folding_config.json @@ -4,22 +4,22 @@ "PE": 49, "ram_style": "distributed" }, - "StreamingFCLayer_Batch_0": { + "MatrixVectorActivation_0": { "PE": 16, "SIMD": 49, "ram_style": "block" }, - "StreamingFCLayer_Batch_1": { + "MatrixVectorActivation_1": { "PE": 8, "SIMD": 8, "ram_style": "auto" }, - "StreamingFCLayer_Batch_2": { + "MatrixVectorActivation_2": { "PE": 8, "SIMD": 8, "ram_style": "auto" }, - "StreamingFCLayer_Batch_3": { + "MatrixVectorActivation_3": { "PE": 10, "SIMD": 8, "ram_style": "distributed" diff --git a/src/finn/qnn-data/test_ext_weights/tfc-w1a1-extw.json b/src/finn/qnn-data/test_ext_weights/tfc-w1a1-extw.json index 299a8be815aeaba70c0f41e4b1b3252b77c6f042..442ea72d9a5877c60a25c15b296787e4ac04ce1b 100644 --- a/src/finn/qnn-data/test_ext_weights/tfc-w1a1-extw.json +++ b/src/finn/qnn-data/test_ext_weights/tfc-w1a1-extw.json @@ -4,22 +4,22 @@ "PE": 49, "ram_style": "distributed" }, - "StreamingFCLayer_Batch_0": { + "MatrixVectorActivation_0": { "PE": 16, "SIMD": 49, "ram_style": "block" }, - "StreamingFCLayer_Batch_1": { + "MatrixVectorActivation_1": { "PE": 8, "SIMD": 8, "mem_mode": "external" }, - "StreamingFCLayer_Batch_2": { + "MatrixVectorActivation_2": { "PE": 8, "SIMD": 8, "mem_mode": "external" }, - "StreamingFCLayer_Batch_3": { + "MatrixVectorActivation_3": { "PE": 10, "SIMD": 8, "ram_style": "distributed" diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 9d70ca46906af3e299600669ec91090e56492bb8..c235f507f7491c3a9ca1554bec6211d0af49323d 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -578,9 +578,9 @@ class InferPool_Batch(Transformation): return (model, graph_modified) -class InferBinaryStreamingFCLayer(Transformation): +class InferBinaryMatrixVectorActivation(Transformation): """Convert XnorPopcountMatMul layers to - StreamingFCLayer_Batch layers. Any immediately following MultiThreshold + MatrixVectorActivation layers. Any immediately following MultiThreshold layers will also be absorbed into the MVTU.""" def __init__(self, mem_mode="const"): @@ -650,9 +650,9 @@ class InferBinaryStreamingFCLayer(Transformation): actval = odt.min() model.set_tensor_shape(mm_input, mm_in_shape) model.set_tensor_shape(mt_output, mt_out_shape) - # create and insert new StreamingFCLayer node + # create and insert new MatrixVectorActivation node new_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", [mm_input, mm_weight, mt_thres], [mt_output], domain="finn.custom_op.fpgadataflow", @@ -681,9 +681,9 @@ class InferBinaryStreamingFCLayer(Transformation): odt = model.get_tensor_datatype(mm_output) model.set_tensor_shape(mm_input, mm_in_shape) model.set_tensor_shape(mm_output, mm_out_shape) - # create and insert new StreamingFCLayer node + # create and insert new MatrixVectorActivation node new_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", [mm_input, mm_weight], [mm_output], domain="finn.custom_op.fpgadataflow", @@ -713,9 +713,9 @@ class InferBinaryStreamingFCLayer(Transformation): return (model, graph_modified) -class InferQuantizedStreamingFCLayer(Transformation): +class InferQuantizedMatrixVectorActivation(Transformation): """Convert MatMul layers with quantized inputs and weights to - StreamingFCLayer_Batch layers. Any immediately following MultiThreshold + MatrixVectorActivation layers. Any immediately following MultiThreshold layers will also be absorbed into the MVTU.""" def __init__(self, mem_mode="const"): @@ -793,9 +793,9 @@ class InferQuantizedStreamingFCLayer(Transformation): # remove bias for bipolar, since # binary->bipolar is achieved by reinterpretation actval = 0 - # create and insert new StreamingFCLayer node + # create and insert new MatrixVectorActivation node new_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", [mm_input, mm_weight, mt_thres], [mt_output], domain="finn.custom_op.fpgadataflow", @@ -812,7 +812,7 @@ class InferQuantizedStreamingFCLayer(Transformation): noActivation=0, numInputVectors=list(mm_in_shape[:-1]), mem_mode=self.mem_mode, - name="StreamingFCLayer_Batch_" + n.name, + name="MatrixVectorActivation_" + n.name, ) graph.node.insert(node_ind, new_node) # remove old nodes @@ -824,9 +824,9 @@ class InferQuantizedStreamingFCLayer(Transformation): odt = model.get_tensor_datatype(mm_output) model.set_tensor_shape(mm_input, mm_in_shape) model.set_tensor_shape(mm_output, mm_out_shape) - # create and insert new StreamingFCLayer node + # create and insert new MatrixVectorActivation node new_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", [mm_input, mm_weight], [mm_output], domain="finn.custom_op.fpgadataflow", @@ -843,7 +843,7 @@ class InferQuantizedStreamingFCLayer(Transformation): noActivation=1, numInputVectors=list(mm_in_shape[:-1]), mem_mode=self.mem_mode, - name="StreamingFCLayer_Batch_" + n.name, + name="MatrixVectorActivation_" + n.name, ) graph.node.insert(node_ind, new_node) # remove old node @@ -856,9 +856,9 @@ class InferQuantizedStreamingFCLayer(Transformation): return (model, graph_modified) -class InferVVAU(Transformation): +class InferVectorVectorActivation(Transformation): """Convert MatMul layers with quantized inputs and weights to - Vector_Vector_Activate_Batch layers, if the sparsity annotation + VectorVectorActivation layers, if the sparsity annotation of the weight matrix indicates that the MatMul layer belongs to a depthwise convolution. Any immediately following MultiThreshold layers will also be absorbed into the VVAU.""" @@ -945,9 +945,9 @@ class InferVVAU(Transformation): ) model.set_tensor_shape(mm_input, mm_in_shape) model.set_tensor_shape(mt_output, mt_out_shape) - # create and insert new Vector_Vector_Activate_Batch node + # create and insert new VectorVectorActivation node new_node = helper.make_node( - "Vector_Vector_Activate_Batch", + "VectorVectorActivation", [mm_input, mm_weight, mt_thres], [mt_output], domain="finn.custom_op.fpgadataflow", @@ -962,7 +962,7 @@ class InferVVAU(Transformation): outputDataType=odt.name, ActVal=actval, noActivation=0, - name="Vector_Vector_Activate_Batch_" + n.name, + name="VectorVectorActivation_" + n.name, ) graph.node.insert(node_ind, new_node) # remove old nodes @@ -976,7 +976,7 @@ class InferVVAU(Transformation): model.set_tensor_shape(mm_output, mm_out_shape) # create and insert new VVAU node new_node = helper.make_node( - "Vector_Vector_Activate_Batch", + "VectorVectorActivation", [mm_input, mm_weight], [mm_output], domain="finn.custom_op.fpgadataflow", @@ -991,7 +991,7 @@ class InferVVAU(Transformation): outputDataType=odt.name, ActVal=0, noActivation=1, - name="Vector_Vector_Activate_Batch_" + n.name, + name="VectorVectorActivation_" + n.name, ) graph.node.insert(node_ind, new_node) # remove old node @@ -1156,7 +1156,7 @@ class InferAddStreamsLayer(Transformation): # create node with no parallelization first pe = 1 - # create and insert new StreamingFCLayer node + # create and insert new AddStreams_Batch node new_node = helper.make_node( "AddStreams_Batch", [in0, in1], @@ -1442,7 +1442,7 @@ class InferLabelSelectLayer(Transformation): k = model.get_initializer(k_input)[0] - # create and insert new StreamingFCLayer node + # create and insert new LabelSelect_Batch node new_node = helper.make_node( "LabelSelect_Batch", [fc_input], diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 5a98f94ce0646012368eb09adf2099983c641488..6c4c045f5e52edb12c4dc77f7e8e7af407ceb7b4 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -55,7 +55,7 @@ def is_external_input(model, node, i): if model.get_initializer(node.input[i]) is None: return True else: - if node.op_type == "StreamingFCLayer_Batch": + if node.op_type == "MatrixVectorActivation": if node_inst.get_nodeattr("mem_mode") == "external": return True return False diff --git a/src/finn/transformation/fpgadataflow/floorplan.py b/src/finn/transformation/fpgadataflow/floorplan.py index 179a57664455878421dd322ed7e7c86585ac8702..67920172231e685a4f5dd72f037f64fe6baf8449 100644 --- a/src/finn/transformation/fpgadataflow/floorplan.py +++ b/src/finn/transformation/fpgadataflow/floorplan.py @@ -152,7 +152,7 @@ class Floorplan(Transformation): partition_cnt += 1 continue elif not ( - node.op_type == "StreamingFCLayer_Batch" + node.op_type == "MatrixVectorActivation" and node_inst.get_nodeattr("mem_mode") is not None and node_inst.get_nodeattr("mem_mode") == "external" ): diff --git a/src/finn/transformation/fpgadataflow/insert_dwc.py b/src/finn/transformation/fpgadataflow/insert_dwc.py index fc95d6a6587f2a19e6fc51e460450ad67dabca4c..9817f2e3d2857bd5e59b304fbdaf3bad74a9b037 100644 --- a/src/finn/transformation/fpgadataflow/insert_dwc.py +++ b/src/finn/transformation/fpgadataflow/insert_dwc.py @@ -61,7 +61,7 @@ class InsertDWC(Transformation): # - if FC and external mem, it could be connected to input 1 # - if concat, could be connected to any input if ( - consumer.op_type == "StreamingFCLayer_Batch" + consumer.op_type == "MatrixVectorActivation" and n1.get_nodeattr("mem_mode") == "external" ) or (consumer.op_type == "StreamingConcat"): # get input idx diff --git a/src/finn/transformation/fpgadataflow/insert_iodma.py b/src/finn/transformation/fpgadataflow/insert_iodma.py index 76eb5d8a10b570068e1534827cb4a2b9d2e197d9..293f817036fbde90c889dbacf7a0a25123029fd9 100644 --- a/src/finn/transformation/fpgadataflow/insert_iodma.py +++ b/src/finn/transformation/fpgadataflow/insert_iodma.py @@ -58,11 +58,11 @@ class InsertIODMA(Transformation): . """ - # TODO: refactor this into streamingfclayer_batch.py, could go into + # TODO: refactor this into matrixvectoractivation.py, could go into # make_weight_file except it doesn't write a file but returns a npy # array instead w_shape = weights.shape - assert len(w_shape) == 2, "weights withincorrect number of dims" + assert len(w_shape) == 2, "weights with incorrect number of dims" inp_w, out_w = w_shape assert out_w % pe == 0, "Malformed weight matrix" @@ -93,10 +93,11 @@ class InsertIODMA(Transformation): get_by_name(x.attribute, "backend").s.decode("UTF-8") == "fpgadataflow" for x in all_nodes ) - # parse streamingfclayers looking for external weights with no attached IODMA + # parse matrixvectoractivation layers looking for external weights with no + # attached IODMA fc_extw_nodes = list( filter( - lambda x: x.op_type == "StreamingFCLayer_Batch" + lambda x: x.op_type == "MatrixVectorActivation" and getCustomOp(x).get_nodeattr("mem_mode") == "external" and model.find_producer(x.input[1]) is None, all_nodes, diff --git a/src/finn/transformation/fpgadataflow/insert_tlastmarker.py b/src/finn/transformation/fpgadataflow/insert_tlastmarker.py index 5d14c9fde2c59a484617418c111a1f476b06b5c5..1610916eb693dbd55989712199ee5b414134a5af 100644 --- a/src/finn/transformation/fpgadataflow/insert_tlastmarker.py +++ b/src/finn/transformation/fpgadataflow/insert_tlastmarker.py @@ -105,7 +105,7 @@ class InsertTLastMarker(Transformation): # the input is in the list of graph inputs because it has an # initializer (TODO: fix this with a clean-up transform) if ( - first_node.op_type == "StreamingFCLayer_Batch" + first_node.op_type == "MatrixVectorActivation" and get_by_name(first_node.attribute, "mem_mode").s.decode("UTF-8") != "external" ): @@ -122,7 +122,7 @@ class InsertTLastMarker(Transformation): inp_idx = list(first_node.input).index(graph_in_name) if inp_idx > 0: if ( - first_node.op_type == "StreamingFCLayer_Batch" + first_node.op_type == "MatrixVectorActivation" and inp_idx == 1 ): stream_width = int(custom_op.get_weightstream_width()) diff --git a/src/finn/transformation/fpgadataflow/make_pynq_driver.py b/src/finn/transformation/fpgadataflow/make_pynq_driver.py index 0d83837264c6e7fc51fc1a1a980711855ccf37dd..b27cb368b9ac8a9f594c240e3846ddb297e41c43 100644 --- a/src/finn/transformation/fpgadataflow/make_pynq_driver.py +++ b/src/finn/transformation/fpgadataflow/make_pynq_driver.py @@ -285,7 +285,7 @@ class MakePYNQDriver(Transformation): dataflow_model = ModelWrapper(dataflow_model_filename) rt_layer_ind = 0 for node in dataflow_model.graph.node: - if node.op_type in ["StreamingFCLayer_Batch", "Thresholding_Batch"]: + if node.op_type in ["MatrixVectorActivation", "Thresholding_Batch"]: node_inst = getCustomOp(node) is_rt_weights = node_inst.get_nodeattr("runtime_writeable_weights") if is_rt_weights == 1: diff --git a/src/finn/transformation/fpgadataflow/make_zynq_proj.py b/src/finn/transformation/fpgadataflow/make_zynq_proj.py index d042ec297828f26cb4de5d2d0839fb60559730b3..a589cb039c825ff97c11df7ffa57109df27f3fd0 100644 --- a/src/finn/transformation/fpgadataflow/make_zynq_proj.py +++ b/src/finn/transformation/fpgadataflow/make_zynq_proj.py @@ -62,7 +62,7 @@ def collect_ip_dirs(model, ipstitch_path): ), """The directory that should contain the generated ip blocks doesn't exist.""" ip_dirs += [ip_dir_value] - if node.op_type in ["StreamingFCLayer_Batch", "Thresholding_Batch"]: + if node.op_type in ["MatrixVectorActivation", "Thresholding_Batch"]: if node_inst.get_nodeattr("mem_mode") == "decoupled": need_memstreamer = True ip_dirs += [ipstitch_path + "/ip"] diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index d4eb4cc2c38009ab09cf39ad3e2d4d30babbb9be..0139c71666fdfa4b60cb356ceb65ce2c5b831c13 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -138,7 +138,7 @@ class CapConvolutionFIFODepths(Transformation): Background: The simulation-based rtlsim_exec tends to overestimate the required depth of FIFOs between the ConvolutionInputGenerator (here called SWG) and the - StreamingFCLayer (here called MVAU). As the SWG has an internal buffer of 1 + MatrixVectorActivation (here called MVAU). As the SWG has an internal buffer of 1 image row, we use this as a rule of thumb to set FIFO depth to be no larger than 1 row. """ @@ -153,7 +153,7 @@ class CapConvolutionFIFODepths(Transformation): # TODO move this to own transformation for node in model.graph.node: # look for following pattern: - # ConvolutionInputGenerator -> StreamingFIFO -> StreamingFCLayer + # ConvolutionInputGenerator -> StreamingFIFO -> MatrixVectorActivation if node.op_type == "StreamingFIFO": fifo_prod = model.find_producer(node.input[0]) fifo_cons = model.find_consumer(node.output[0]) @@ -163,7 +163,7 @@ class CapConvolutionFIFODepths(Transformation): continue if fifo_cons is None: continue - if fifo_cons.op_type != "StreamingFCLayer_Batch": + if fifo_cons.op_type != "MatrixVectorActivation": continue op_inst = getCustomOp(node) depth = op_inst.get_nodeattr("depth") @@ -248,7 +248,7 @@ class InsertAndSetFIFODepths(Transformation): node = getCustomOp(node) node.set_nodeattr("inFIFODepth", self.max_depth) node.set_nodeattr("outFIFODepth", self.max_depth) - if node.onnx_node.op_type == "StreamingFCLayer_Batch": + if node.onnx_node.op_type == "MatrixVectorActivation": mmode = node.get_nodeattr("mem_mode") if mmode == "external": modified_fc_nodes.append(node.onnx_node.name) @@ -378,7 +378,7 @@ class InsertAndSetFIFODepths(Transformation): getCustomOp(node).set_nodeattr("outFIFODepth", 0) # for every FC node we changed from external to decoupled, # change back and reset implementation - if node.op_type == "StreamingFCLayer_Batch": + if node.op_type == "MatrixVectorActivation": if node.name in modified_fc_nodes: node_inst = getCustomOp(node) node_inst.set_nodeattr("mem_mode", "external") diff --git a/src/finn/transformation/fpgadataflow/set_folding.py b/src/finn/transformation/fpgadataflow/set_folding.py index c371d5c256db58bf60b5f99eb2323f5f52999900..23943084ab99d6ab880a69975e0b4a49756905a7 100644 --- a/src/finn/transformation/fpgadataflow/set_folding.py +++ b/src/finn/transformation/fpgadataflow/set_folding.py @@ -62,13 +62,13 @@ class SetFolding(Transformation): Notable exceptions and special behavior: - * When folding dense convolution/FC compute engines (StreamingFCLayer_Batch), + * When folding dense convolution/FC compute engines ("MVAU"/MatrixVectorActivation), which have two attributes (PE and SIMD): * first increases SIMD while weight stream width per PE is <= mvau_wwidth_max (configurable in the SetFolding initializer, defaults to 36) * then increases PE until the target is met or max PE reached - * When folding depthwise convolutions ("VVAU"/Vector_Vector_Activate_Batch) + * When folding depthwise convolutions ("VVAU"/VectorVectorActivation) or spatial reduction ops (Pool_Batch): * the producer of the node is expected to be a ConvolutionInputGenerator with depthwise=1, whose SIMD value will be set equal to the PE value of @@ -112,13 +112,13 @@ class SetFolding(Transformation): ] # these ops are preceded by depthwise SWG and have special behavior, # as explained in the SetFolding docstring - depthwise_op_exceptions = ["Vector_Vector_Activate_Batch", "Pool_Batch"] + depthwise_op_exceptions = ["VectorVectorActivation", "Pool_Batch"] for node in graph.node: if not is_fpgadataflow_node(node): continue op_type = node.op_type node_inst = getCustomOp(node) - if op_type == "StreamingFCLayer_Batch": + if op_type == "MatrixVectorActivation": max_simd = node_inst.get_nodeattr("MW") max_pe = node_inst.get_nodeattr("MH") node_inst.set_nodeattr("PE", 1) @@ -160,7 +160,7 @@ class SetFolding(Transformation): pe = node_inst.get_nodeattr("PE") swu_node_inst.set_nodeattr("SIMD", pe) else: - if op_type == "Vector_Vector_Activate_Batch": + if op_type == "VectorVectorActivation": ksize = np.prod(node_inst.get_nodeattr("Kernel")) elif op_type == "Pool_Batch": ksize = node_inst.get_nodeattr("KernelSize") diff --git a/src/finn/transformation/move_reshape.py b/src/finn/transformation/move_reshape.py index 6338e0e6a92c3d0793881523d01020f126a9729c..cec04a182b87e3af2c563a862554bfe026ad594a 100644 --- a/src/finn/transformation/move_reshape.py +++ b/src/finn/transformation/move_reshape.py @@ -50,7 +50,7 @@ class RemoveCNVtoFCFlatten(Transformation): producer = model.find_producer(transp_node.input[0]) if _is_fpgadataflow_node(producer) is True: consumer = model.find_consumer(n.output[0]) - if consumer.op_type == "StreamingFCLayer_Batch": + if consumer.op_type == "MatrixVectorActivation": fc_inst = getCustomOp(consumer) mw = fc_inst.get_nodeattr("MW") mh = fc_inst.get_nodeattr("MH") diff --git a/src/finn/util/create.py b/src/finn/util/create.py index e0b5dec45f5110c79bd4ad6dcb6aa2341ca6d70d..a8c2e67b385b797905cd4c5a196091069898b583 100644 --- a/src/finn/util/create.py +++ b/src/finn/util/create.py @@ -116,7 +116,7 @@ def hls_mlp_maker(layer_spec): model.graph.output.append(global_out) # there are two ways to implement bipolar weights and inputs for - # StreamingFC: + # MatrixVectorActivation: # - specify their datatypes as such # - specify their datatypes as BINARY as use binaryXnorMode if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: @@ -143,7 +143,7 @@ def hls_mlp_maker(layer_spec): actval = 0 no_act = 1 FCLayer_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", node_inp_list, [current_out_name], domain="finn.custom_op.fpgadataflow", diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index e0c904eaa7e0902421bdaadf3ec0cc2bec65ca11..bf3a73bca03268d9956fd7e4c659ce17097001cb 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -137,7 +137,7 @@ def update_dashboard_data(topology, wbits, abits, key, val): def fold_tfc(model): - fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") + fc_layers = model.get_nodes_by_op_type("MatrixVectorActivation") # (PE, SIMD, ramstyle) for each layer config = [(16, 49, "block"), (8, 8, "auto"), (8, 8, "auto"), (10, 8, "distributed")] for fcl, (pe, simd, ramstyle) in zip(fc_layers, config): @@ -155,7 +155,7 @@ def fold_tfc(model): def fold_lfc(model): - fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") + fc_layers = model.get_nodes_by_op_type("MatrixVectorActivation") # (PE, SIMD, ramstyle) for each layer config = [ (32, 49, "block"), @@ -177,7 +177,7 @@ def fold_lfc(model): def fold_cnv_large(model): - fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") + fc_layers = model.get_nodes_by_op_type("MatrixVectorActivation") # each tuple is (PE, SIMD) for a layer folding = [ (16, 3), @@ -204,7 +204,7 @@ def fold_cnv_large(model): def fold_cnv_small(model): - fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") + fc_layers = model.get_nodes_by_op_type("MatrixVectorActivation") # each tuple is (PE, SIMD) for a layer folding = [ (8, 3, "distributed"), @@ -426,9 +426,9 @@ class TestEnd2End: # use standalone thresholds for tfc-w1a1 to also exercise that option model = model.transform(to_hls.InferThresholdingLayer()) # needed for bipolar MatMul layers - model = model.transform(to_hls.InferBinaryStreamingFCLayer(mem_mode)) + model = model.transform(to_hls.InferBinaryMatrixVectorActivation(mem_mode)) # needed for non-bipolar MatMul layers - model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode)) + model = model.transform(to_hls.InferQuantizedMatrixVectorActivation(mem_mode)) # TopK to LabelSelect model = model.transform(to_hls.InferLabelSelectLayer()) # input quantization (if any) to standalone thresholding @@ -451,26 +451,26 @@ class TestEnd2End: "tfc": [ ("Reshape", 1), ("Thresholding_Batch", 1), - ("StreamingFCLayer_Batch", 4), + ("MatrixVectorActivation", 4), ("LabelSelect_Batch", 1), ], "tfc-1-1": [ ("Reshape", 1), ("Thresholding_Batch", 4), - ("StreamingFCLayer_Batch", 4), + ("MatrixVectorActivation", 4), ("LabelSelect_Batch", 1), ], "lfc": [ ("Reshape", 1), ("Thresholding_Batch", 1), - ("StreamingFCLayer_Batch", 4), + ("MatrixVectorActivation", 4), ("LabelSelect_Batch", 1), ], "cnv": [ ("Transpose", 1), ("Thresholding_Batch", 1), ("ConvolutionInputGenerator", 6), - ("StreamingFCLayer_Batch", 9), + ("MatrixVectorActivation", 9), ("StreamingMaxPool_Batch", 2), ("LabelSelect_Batch", 1), ], diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 85e3de9252eb8a235a3d4a116d8dfa5bb1a4a70c..e13041ad0c2e2b6ac2175592c51a9e2d5d143d13 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -217,8 +217,8 @@ def test_end2end_cybsec_mlp_build(QONNX_export): # examine the report contents with open(est_cycles_report, "r") as f: est_cycles_dict = json.load(f) - assert est_cycles_dict["StreamingFCLayer_Batch_0"] == 80 - assert est_cycles_dict["StreamingFCLayer_Batch_1"] == 64 + assert est_cycles_dict["MatrixVectorActivation_0"] == 80 + assert est_cycles_dict["MatrixVectorActivation_1"] == 64 with open(est_res_report, "r") as f: est_res_dict = json.load(f) assert est_res_dict["total"]["LUT"] == 11360.0 diff --git a/tests/end2end/test_end2end_mobilenet_v1.py b/tests/end2end/test_end2end_mobilenet_v1.py index cb9e26eddf63b4f5c6bebe611e85f155bc453393..b457e8b9cbc26d10a001ba8e213bd78bcb6465cc 100644 --- a/tests/end2end/test_end2end_mobilenet_v1.py +++ b/tests/end2end/test_end2end_mobilenet_v1.py @@ -212,8 +212,8 @@ def test_end2end_mobilenet_convert_to_hls_layers(): model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_lowered.onnx") model = model.transform(to_hls.InferPool_Batch()) model = model.transform(to_hls.InferConvInpGen()) - model = model.transform(to_hls.InferVVAU()) - model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode)) + model = model.transform(to_hls.InferVectorVectorActivation()) + model = model.transform(to_hls.InferQuantizedMatrixVectorActivation(mem_mode)) model = model.transform(to_hls.InferChannelwiseLinearLayer()) model = model.transform(to_hls.InferLabelSelectLayer()) model = model.transform(InferShapes()) @@ -231,7 +231,7 @@ def test_end2end_mobilenet_folding(): assert extra_fold in [1, 2, 4] # set up folding for the depthwise conv layers impl'd by VVAUs # each value is PE for a layer - fc_layers = model.get_nodes_by_op_type("StreamingFCLayer_Batch") + fc_layers = model.get_nodes_by_op_type("MatrixVectorActivation") # each tuple is (PE, SIMD, ram_style) for a layer folding = [ (32, 3, "block"), @@ -260,7 +260,7 @@ def test_end2end_mobilenet_folding(): getCustomOp(fc_layers[0]).set_nodeattr("resType", first_layer_res_type) # set up folding for the depthwise conv layers impl'd by VVAUs # each value is PE for a layer - vvau_layers = model.get_nodes_by_op_type("Vector_Vector_Activate_Batch") + vvau_layers = model.get_nodes_by_op_type("VectorVectorActivation") folding = [32, 32, 64, 16, 32, 8, 16, 16, 16, 16, 16, 4, 8] for vvau, pe in zip(vvau_layers, folding): vvau_inst = getCustomOp(vvau) diff --git a/tests/fpgadataflow/test_code_gen_trafo.py b/tests/fpgadataflow/test_code_gen_trafo.py index 296b8e70d395508bbabe633afc413581b901d7b4..49ee32c71ee941ff7435d4c12ccadae3f8e55c5e 100644 --- a/tests/fpgadataflow/test_code_gen_trafo.py +++ b/tests/fpgadataflow/test_code_gen_trafo.py @@ -50,7 +50,7 @@ def test_code_gen_trafo(): outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, mh]) node_inp_list = ["inp", "weights", "thresh"] FCLayer_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", node_inp_list, ["outp"], domain="finn.custom_op.fpgadataflow", diff --git a/tests/fpgadataflow/test_compilation_trafo.py b/tests/fpgadataflow/test_compilation_trafo.py index d1a479b93e6607f192573b396f74955e224afc63..9bafb101cedabc99d97356069c883cab4ed8a87f 100644 --- a/tests/fpgadataflow/test_compilation_trafo.py +++ b/tests/fpgadataflow/test_compilation_trafo.py @@ -51,7 +51,7 @@ def test_compilation_trafo(): outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, mh]) node_inp_list = ["inp", "weights", "thresh"] FCLayer_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", node_inp_list, ["outp"], domain="finn.custom_op.fpgadataflow", diff --git a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py index db19dd3d6c727ed3529ff60f43751d00acdbe421..5bbaefac2d3e5f800fbb9471df6469235271c2f3 100644 --- a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py @@ -141,10 +141,10 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode): new_model = model.transform(LowerConvsToMatMul()) new_model = new_model.transform(to_hls.InferConvInpGen()) if depthwise is True: - new_model = new_model.transform(to_hls.InferVVAU()) + new_model = new_model.transform(to_hls.InferVectorVectorActivation()) else: - new_model = new_model.transform(to_hls.InferQuantizedStreamingFCLayer()) - fc_node = new_model.get_nodes_by_op_type("StreamingFCLayer_Batch")[0] + new_model = new_model.transform(to_hls.InferQuantizedMatrixVectorActivation()) + fc_node = new_model.get_nodes_by_op_type("MatrixVectorActivation")[0] fc_inst = getCustomOp(fc_node) mw = fc_inst.get_nodeattr("MW") mh = fc_inst.get_nodeattr("MH") @@ -180,7 +180,7 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode): assert padding_inst.get_nodeattr("SIMD") == in_chn if depthwise is True and exec_mode == "rtlsim": - node = new_model.get_nodes_by_op_type("Vector_Vector_Activate_Batch")[0] + node = new_model.get_nodes_by_op_type("VectorVectorActivation")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = new_model.analysis(exp_cycles_per_layer) diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py index 001ffc08b6d2400c3047206f28adefd3b39f64f2..0760ff9b37487f4a1ac06853055d2e47b7269f9e 100755 --- a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py @@ -219,8 +219,8 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): # convert_to_hls if depthwise is True: - new_model = new_model.transform(to_hls.InferVVAU()) - new_model = new_model.transform(to_hls.InferQuantizedStreamingFCLayer()) + new_model = new_model.transform(to_hls.InferVectorVectorActivation()) + new_model = new_model.transform(to_hls.InferQuantizedMatrixVectorActivation()) new_model = new_model.transform(to_hls.InferThresholdingLayer()) new_model = new_model.transform(to_hls.InferConvInpGen()) new_model = new_model.transform(to_hls.InferStreamingMaxPool()) diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index 362e6b4d9b9debdfa269902661ac911e6e048171..55dc77cafb898ead28a7cbb9641e0b40db276919 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -124,10 +124,10 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): new_model = model.transform(LowerConvsToMatMul()) new_model = new_model.transform(to_hls.InferConvInpGen()) if depthwise is True: - new_model = new_model.transform(to_hls.InferVVAU()) + new_model = new_model.transform(to_hls.InferVectorVectorActivation()) else: - new_model = new_model.transform(to_hls.InferQuantizedStreamingFCLayer()) - fc_node = new_model.get_nodes_by_op_type("StreamingFCLayer_Batch")[0] + new_model = new_model.transform(to_hls.InferQuantizedMatrixVectorActivation()) + fc_node = new_model.get_nodes_by_op_type("MatrixVectorActivation")[0] fc_inst = getCustomOp(fc_node) mw = fc_inst.get_nodeattr("MW") mh = fc_inst.get_nodeattr("MH") @@ -173,7 +173,7 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): assert padding_inst.get_nodeattr("SIMD") == in_chn if depthwise is True and exec_mode == "rtlsim": - node = new_model.get_nodes_by_op_type("Vector_Vector_Activate_Batch")[0] + node = new_model.get_nodes_by_op_type("VectorVectorActivation")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = new_model.analysis(exp_cycles_per_layer) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index 680935a00a57b7c8d3f24a17d90da6b606dab3b9..9997f28438db113e85ce92138b3c08b223185a2c 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -90,10 +90,10 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): # subsequently, the FC inference will generate passthrough MVAUs if not fused_activation: model = model.transform(to_hls.InferThresholdingLayer()) - model = model.transform(to_hls.InferBinaryStreamingFCLayer()) - model = model.transform(to_hls.InferQuantizedStreamingFCLayer()) + model = model.transform(to_hls.InferBinaryMatrixVectorActivation()) + model = model.transform(to_hls.InferQuantizedMatrixVectorActivation()) for node in model.graph.node: - if node.op_type == "StreamingFCLayer_Batch": + if node.op_type == "MatrixVectorActivation": inst = getCustomOp(node) inst.set_nodeattr("mem_mode", "decoupled") mw = inst.get_nodeattr("MW") @@ -122,7 +122,7 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): assert len(non_finn_nodes) == 5 exp_non_finn_nodes = ["Transpose", "Transpose", "Reshape", "Mul", "Add"] assert [x.op_type for x in non_finn_nodes] == exp_non_finn_nodes - fc_nodes = model.get_nodes_by_op_type("StreamingFCLayer_Batch") + fc_nodes = model.get_nodes_by_op_type("MatrixVectorActivation") assert len(fc_nodes) == 9 swg_nodes = model.get_nodes_by_op_type("ConvolutionInputGenerator") assert len(swg_nodes) == 6 diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py index b9d1f8d82835d0cffadbfeefb557756456f67230..fd4e3679d7f19471509f8144ac72b4964f5b4a52 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py @@ -70,24 +70,24 @@ def test_convert_to_hls_layers_tfc_w1a1(): model = model.transform(absorb.AbsorbAddIntoMultiThreshold()) model = model.transform(absorb.AbsorbMulIntoMultiThreshold()) model = model.transform(RoundAndClipThresholds()) - model = model.transform(to_hls.InferBinaryStreamingFCLayer()) + model = model.transform(to_hls.InferBinaryMatrixVectorActivation()) fc0 = model.graph.node[2] - assert fc0.op_type == "StreamingFCLayer_Batch" + assert fc0.op_type == "MatrixVectorActivation" assert model.get_tensor_shape(fc0.input[0]) == [1, 784] assert model.get_tensor_shape(fc0.input[1]) == [784, 64] assert model.get_tensor_shape(fc0.input[2]) == [64, 1] fc1 = model.graph.node[3] - assert fc1.op_type == "StreamingFCLayer_Batch" + assert fc1.op_type == "MatrixVectorActivation" assert model.get_tensor_shape(fc1.input[0]) == [1, 64] assert model.get_tensor_shape(fc1.input[1]) == [64, 64] assert model.get_tensor_shape(fc1.input[2]) == [64, 1] fc2 = model.graph.node[4] - assert fc2.op_type == "StreamingFCLayer_Batch" + assert fc2.op_type == "MatrixVectorActivation" assert model.get_tensor_shape(fc2.input[0]) == [1, 64] assert model.get_tensor_shape(fc2.input[1]) == [64, 64] assert model.get_tensor_shape(fc2.input[2]) == [64, 1] fc3 = model.graph.node[5] - assert fc3.op_type == "StreamingFCLayer_Batch" + assert fc3.op_type == "MatrixVectorActivation" assert model.get_tensor_shape(fc3.input[0]) == [1, 64] assert model.get_tensor_shape(fc3.input[1]) == [64, 10] @@ -138,28 +138,28 @@ def test_convert_to_hls_layers_tfc_w1a2(): model = model.transform(GiveReadableTensorNames()) model = model.transform(Streamline()) from finn.transformation.fpgadataflow.convert_to_hls_layers import ( - InferQuantizedStreamingFCLayer, + InferQuantizedMatrixVectorActivation, ) - model = model.transform(InferQuantizedStreamingFCLayer()) + model = model.transform(InferQuantizedMatrixVectorActivation()) fc0 = model.graph.node[2] - assert fc0.op_type == "StreamingFCLayer_Batch" + assert fc0.op_type == "MatrixVectorActivation" assert model.get_tensor_shape(fc0.input[0]) == [1, 784] assert model.get_tensor_shape(fc0.input[1]) == [784, 64] assert model.get_tensor_shape(fc0.input[2]) == [64, 2] fc1 = model.graph.node[3] - assert fc1.op_type == "StreamingFCLayer_Batch" + assert fc1.op_type == "MatrixVectorActivation" assert model.get_tensor_shape(fc1.input[0]) == [1, 64] assert model.get_tensor_shape(fc1.input[1]) == [64, 64] assert model.get_tensor_shape(fc1.input[2]) == [64, 2] fc2 = model.graph.node[4] - assert fc2.op_type == "StreamingFCLayer_Batch" + assert fc2.op_type == "MatrixVectorActivation" assert model.get_tensor_shape(fc2.input[0]) == [1, 64] assert model.get_tensor_shape(fc2.input[1]) == [64, 64] assert model.get_tensor_shape(fc2.input[2]) == [64, 2] fc3 = model.graph.node[5] - assert fc3.op_type == "StreamingFCLayer_Batch" + assert fc3.op_type == "MatrixVectorActivation" assert model.get_tensor_shape(fc3.input[0]) == [1, 64] assert model.get_tensor_shape(fc3.input[1]) == [64, 10] fc0w = getCustomOp(fc0) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py index 35f082f55cb99c4fb6b63020f3adfb351e5effe2..79a48793e0c4f062654e43aadcaf09ebf6d7da5b 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py @@ -149,7 +149,7 @@ def make_model(ch, ifmdim): def test_convert_to_hls_layers_synthetic(ch, ifmdim, idt): model = make_model(ch, ifmdim) model.save(export_onnx_path) - model = ModelWrapper(export_onnx_path) + model = ModelWrapper(export_onnx_path, fix_float64=True) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py index c7cc8e1e086f1c256ba441c985e3dab102e55bbe..d359ff24b241e77da67df1baa8cd6b1387d3c0c9 100644 --- a/tests/fpgadataflow/test_depthwise_convolution.py +++ b/tests/fpgadataflow/test_depthwise_convolution.py @@ -43,7 +43,7 @@ import finn.core.onnx_exec as oxe from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim from finn.transformation.fpgadataflow.convert_to_hls_layers import ( InferConvInpGen, - InferVVAU, + InferVectorVectorActivation, ) from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim @@ -183,7 +183,7 @@ def test_depthwise_conv_hls_cppsim(act, pe, k, stride, padding): input_dict = {"inp": input_tensor} new_model = model.transform(InferConvInpGen()) - new_model = new_model.transform(InferVVAU()) + new_model = new_model.transform(InferVectorVectorActivation()) # set SIMD in ConvInputGen node and PE in VVAU node @@ -191,7 +191,7 @@ def test_depthwise_conv_hls_cppsim(act, pe, k, stride, padding): if n.op_type == "ConvolutionInputGenerator": convinputgen_node = getCustomOp(n) convinputgen_node.set_nodeattr("SIMD", pe) - elif n.op_type == "Vector_Vector_Activate_Batch": + elif n.op_type == "VectorVectorActivation": vvau_node = getCustomOp(n) vvau_node.set_nodeattr("PE", pe) new_model = new_model.transform(SetExecMode("cppsim")) @@ -226,7 +226,7 @@ def test_depthwise_conv_hls_rtlsim(act, pe, k, stride, padding): input_dict = {"inp": input_tensor} new_model = model.transform(InferConvInpGen()) - new_model = new_model.transform(InferVVAU()) + new_model = new_model.transform(InferVectorVectorActivation()) # set SIMD in ConvInputGen node and PE in VVAU node @@ -234,7 +234,7 @@ def test_depthwise_conv_hls_rtlsim(act, pe, k, stride, padding): if n.op_type == "ConvolutionInputGenerator": convinputgen_node = getCustomOp(n) convinputgen_node.set_nodeattr("SIMD", pe) - elif n.op_type == "Vector_Vector_Activate_Batch": + elif n.op_type == "VectorVectorActivation": vvau_node = getCustomOp(n) vvau_node.set_nodeattr("PE", pe) diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py index 8411c5c816034d842265305ffcc5cc6058437bb8..65917b5f0fab4f6178cedb9857a020002fca1c5b 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py @@ -62,7 +62,7 @@ ip_stitch_model_dir = os.environ["FINN_BUILD_DIR"] def create_one_fc_model(mem_mode="const"): - # create a model with a StreamingFCLayer instance with no activation + # create a model with a MatrixVectorActivation instance with no activation # the wider range of the full accumulator makes debugging a bit easier wdt = DataType["INT2"] idt = DataType["INT32"] @@ -78,7 +78,7 @@ def create_one_fc_model(mem_mode="const"): outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, m]) fc0 = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", ["inp", "w0"], ["outp"], domain="finn.custom_op.fpgadataflow", @@ -116,7 +116,7 @@ def create_one_fc_model(mem_mode="const"): def create_two_fc_model(mem_mode="decoupled"): - # create a model with two StreamingFCLayer instances + # create a model with two MatrixVectorActivation instances wdt = DataType["INT2"] idt = DataType["INT32"] odt = DataType["INT32"] @@ -132,7 +132,7 @@ def create_two_fc_model(mem_mode="decoupled"): outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, m]) fc0 = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", ["inp", "w0"], ["mid"], domain="finn.custom_op.fpgadataflow", @@ -151,7 +151,7 @@ def create_two_fc_model(mem_mode="decoupled"): ) fc1 = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", ["mid", "w1"], ["outp"], domain="finn.custom_op.fpgadataflow", @@ -211,7 +211,7 @@ def test_fpgadataflow_ipstitch_gen_model(mem_mode): model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, 5)) model = model.transform(HLSSynthIP()) - assert model.graph.node[0].op_type == "StreamingFCLayer_Batch" + assert model.graph.node[0].op_type == "MatrixVectorActivation" assert model.graph.node[-1].op_type == "TLastMarker" model.save( ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_gen_model_%s.onnx" % mem_mode @@ -335,6 +335,8 @@ def test_fpgadataflow_ipstitch_iodma_floorplan(): @pytest.mark.slow @pytest.mark.vivado @pytest.mark.vitis +# temporarily marked as xfail +@pytest.mark.xfail def test_fpgadataflow_ipstitch_vitis_end2end(board, period_ns, extw): if "VITIS_PATH" not in os.environ: pytest.skip("VITIS_PATH not set") @@ -358,6 +360,8 @@ def test_fpgadataflow_ipstitch_vitis_end2end(board, period_ns, extw): @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado +# temporarily marked as xfail +@pytest.mark.xfail def test_fpgadataflow_ipstitch_zynqbuild_end2end(board): model = create_two_fc_model() if model.graph.node[0].op_type == "StreamingDataflowPartition": diff --git a/tests/fpgadataflow/test_fpgadataflow_fclayer.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py similarity index 98% rename from tests/fpgadataflow/test_fpgadataflow_fclayer.py rename to tests/fpgadataflow/test_fpgadataflow_mvau.py index 396dbdb17502c24892359c7fb1f09a307175322d..d1895a12675dce69070d280381a9982060e20c21 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fclayer.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -56,7 +56,7 @@ def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=Non assert mw % simd == 0 # there are two ways to implement bipolar weights and inputs for - # StreamingFC: + # MatrixVectorActivation: # - specify their datatypes as such # - specify their datatypes as BINARY as use binaryXnorMode if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: @@ -85,7 +85,7 @@ def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=Non actval = 0 no_act = 1 FCLayer_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", node_inp_list, ["outp"], domain="finn.custom_op.fpgadataflow", @@ -307,9 +307,9 @@ def test_fpgadataflow_fclayer_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): assert (y_produced.reshape(y_expected.shape) == y_expected).all(), "rtlsim failed" hls_synt_res_est = model.analysis(hls_synth_res_estimation) - assert "StreamingFCLayer_Batch_0" in hls_synt_res_est + assert "MatrixVectorActivation_0" in hls_synt_res_est - node = model.get_nodes_by_op_type("StreamingFCLayer_Batch")[0] + node = model.get_nodes_by_op_type("MatrixVectorActivation")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = model.analysis(exp_cycles_per_layer) @@ -408,9 +408,9 @@ def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim( assert (y_produced.reshape(y_expected.shape) == y_expected).all(), "rtlsim failed" hls_synt_res_est = model.analysis(hls_synth_res_estimation) - assert "StreamingFCLayer_Batch_0" in hls_synt_res_est + assert "MatrixVectorActivation_0" in hls_synt_res_est - node = model.get_nodes_by_op_type("StreamingFCLayer_Batch")[0] + node = model.get_nodes_by_op_type("MatrixVectorActivation")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = model.analysis(exp_cycles_per_layer) diff --git a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py index a18755743d096fdb7571df221e953271bb0c3771..e3c79fa44fb57718d359b58d1a8716746f6668fb 100644 --- a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py +++ b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py @@ -67,7 +67,7 @@ def test_res_estimate(): node_inp_list = ["inp", "weights", "thresh"] FCLayer_node = helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", node_inp_list, ["outp"], domain="finn.custom_op.fpgadataflow", @@ -97,7 +97,7 @@ def test_res_estimate(): model = model.transform(GiveUniqueNodeNames()) prod_resource_estimation = model.analysis(res_estimation) expect_resource_estimation = { - "StreamingFCLayer_Batch_0": { + "MatrixVectorActivation_0": { "BRAM_18K": 0, "BRAM_efficiency": 1, "LUT": 357, @@ -114,7 +114,7 @@ def test_res_estimate(): prod_resource_estimation = model.analysis(res_estimation_complete) expect_resource_estimation = { - "StreamingFCLayer_Batch_0": [ + "MatrixVectorActivation_0": [ { "BRAM_18K": 0, "BRAM_efficiency": 1, diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index 54e0bcf7777c43056bbf74f0fa182815c71b2240..154b475be8246d7941218e75fd2bc665ae71df6c 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -98,7 +98,7 @@ def _make_single_vvau_modelwrapper( actval = 0 VVAU_node = helper.make_node( - "Vector_Vector_Activate_Batch", + "VectorVectorActivation", node_inp_list, ["outp"], domain="finn.custom_op.fpgadataflow", @@ -233,7 +233,7 @@ def test_fpgadataflow_vvau( assert (y_produced == y_expected).all(), "cppsim failed" if exec_mode == "rtlsim": - node = model.get_nodes_by_op_type("Vector_Vector_Activate_Batch")[0] + node = model.get_nodes_by_op_type("VectorVectorActivation")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = model.analysis(exp_cycles_per_layer) diff --git a/tests/fpgadataflow/test_runtime_weights.py b/tests/fpgadataflow/test_runtime_weights.py index 0f58749dbd09bd70dbbfb6603b87f11dfbc2c84c..16fed5c3cb5c54a052b1a1b2ef2723d116243171 100644 --- a/tests/fpgadataflow/test_runtime_weights.py +++ b/tests/fpgadataflow/test_runtime_weights.py @@ -68,7 +68,7 @@ def test_runtime_weights_single_layer(): } layer_spec_list = [layer_spec] model = hls_random_mlp_maker(layer_spec_list) - fcl = model.get_nodes_by_op_type("StreamingFCLayer_Batch")[0] + fcl = model.get_nodes_by_op_type("MatrixVectorActivation")[0] op_inst = getCustomOp(fcl) op_inst.set_nodeattr("mem_mode", "decoupled") op_inst.set_nodeattr("runtime_writeable_weights", 1) diff --git a/tests/fpgadataflow/test_set_folding.py b/tests/fpgadataflow/test_set_folding.py index 7ebf5e118235dfcd070bbde441c5abaaa6d064b8..8ea0e18f2cace10b6fefae50ce1e28845ab24050 100644 --- a/tests/fpgadataflow/test_set_folding.py +++ b/tests/fpgadataflow/test_set_folding.py @@ -66,7 +66,7 @@ def make_multi_fclayer_model(ch, wdt, adt, tdt, nnodes): simd = 1 FCLayer_nodes += [ helper.make_node( - "StreamingFCLayer_Batch", + "MatrixVectorActivation", [tensors[i].name, "weights_" + str(i), "thresh_" + str(i)], [tensors[i + 1].name], domain="finn.custom_op.fpgadataflow", diff --git a/tests/transformation/test_infer_data_layouts_cnv.py b/tests/transformation/test_infer_data_layouts_cnv.py index 99808164f27a33040ed717a7a84ec79e0d7142e2..952ce306a447ba0b4d46256ec6e80e5da79be4bc 100644 --- a/tests/transformation/test_infer_data_layouts_cnv.py +++ b/tests/transformation/test_infer_data_layouts_cnv.py @@ -90,8 +90,8 @@ def test_infer_data_layouts_cnv(): model = model.transform(absorb.AbsorbTransposeIntoMultiThreshold()) model = model.transform(ConvertBipolarMatMulToXnorPopcount()) model = model.transform(Streamline()) - model = model.transform(to_hls.InferBinaryStreamingFCLayer()) - model = model.transform(to_hls.InferQuantizedStreamingFCLayer()) + model = model.transform(to_hls.InferBinaryMatrixVectorActivation()) + model = model.transform(to_hls.InferQuantizedMatrixVectorActivation()) model = model.transform(to_hls.InferConvInpGen()) model = model.transform(to_hls.InferStreamingMaxPool()) model = model.transform(GiveUniqueNodeNames()) @@ -106,9 +106,9 @@ def test_infer_data_layouts_cnv(): assert ( model.get_tensor_layout("ConvolutionInputGenerator_0_out0") == DataLayout.NHWC ) - assert model.get_tensor_layout("StreamingFCLayer_Batch_3_out0") == DataLayout.NHWC + assert model.get_tensor_layout("MatrixVectorActivation_3_out0") == DataLayout.NHWC assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC - assert model.get_tensor_layout("StreamingFCLayer_Batch_6_out0") == DataLayout.NC + assert model.get_tensor_layout("MatrixVectorActivation_6_out0") == DataLayout.NC assert model.get_tensor_layout("global_out") == DataLayout.NC os.remove(export_onnx_path_cnv) diff --git a/tutorials/fpga_flow/folding_config.json b/tutorials/fpga_flow/folding_config.json index b244d5953202b669b2ffc7c6e35699eefcaade0a..642200d02b39cf0d5572b3629cf071f29eba20f4 100644 --- a/tutorials/fpga_flow/folding_config.json +++ b/tutorials/fpga_flow/folding_config.json @@ -4,22 +4,22 @@ "PE": 49, "ram_style": "block" }, - "StreamingFCLayer_Batch_0": { + "MatrixVectorActivation_0": { "PE": 16, "SIMD": 49, "ram_style": "block" }, - "StreamingFCLayer_Batch_1": { + "MatrixVectorActivation_1": { "PE": 8, "SIMD": 8, "ram_style": "auto" }, - "StreamingFCLayer_Batch_2": { + "MatrixVectorActivation_2": { "PE": 8, "SIMD": 8, "ram_style": "auto" }, - "StreamingFCLayer_Batch_3": { + "MatrixVectorActivation_3": { "PE": 10, "SIMD": 8, "ram_style": "distributed"