diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index a2747e3921dc8e5a8427b4d5d9b7f143a57b018f..28155d6f3eacd4dfd77aefbc73fc4ed3ef12f1dd 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -359,21 +359,21 @@ "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", "# each tuple is (PE, SIMD, in_fifo_depth) for a layer\n", "folding = [\n", - " (16, 3, 128),\n", - " (32, 32, 128),\n", - " (16, 32, 128),\n", - " (16, 32, 128),\n", - " (4, 32, 81),\n", - " (1, 32, 2),\n", - " (1, 4, 2),\n", - " (1, 8, 128),\n", - " (5, 1, 3),\n", + " (16, 3, [128]),\n", + " (32, 32, [128]),\n", + " (16, 32, [128]),\n", + " (16, 32, [128]),\n", + " (4, 32, [81]),\n", + " (1, 32, [2]),\n", + " (1, 4, [2]),\n", + " (1, 8, [128]),\n", + " (5, 1, [3]),\n", "]\n", "for fcl, (pe, simd, ififodepth) in zip(fc_layers, folding):\n", " fcl_inst = getCustomOp(fcl)\n", " fcl_inst.set_nodeattr(\"PE\", pe)\n", " fcl_inst.set_nodeattr(\"SIMD\", simd)\n", - " fcl_inst.set_nodeattr(\"inFIFODepth\", ififodepth)\n", + " fcl_inst.set_nodeattr(\"inFIFODepths\", ififodepth)\n", "\n", "# use same SIMD values for the sliding window operators\n", "swg_layers = model.get_nodes_by_op_type(\"ConvolutionInputGenerator\")\n", diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index a6f05df30925250df1704afb6f9ff9dc7dc17dc0..c4fc92b97c91d6b1dfadc41ac3c23d014bd9fada 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -559,17 +559,17 @@ "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", "# (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer\n", "config = [\n", - " (16, 49, 16, 64, \"block\"),\n", - " (8, 8, 64, 64, \"auto\"),\n", - " (8, 8, 64, 64, \"auto\"),\n", - " (10, 8, 64, 10, \"distributed\"),\n", + " (16, 49, [16], [64], \"block\"),\n", + " (8, 8, [64], [64], \"auto\"),\n", + " (8, 8, [64], [64], \"auto\"),\n", + " (10, 8, [64], [10], \"distributed\"),\n", "]\n", "for fcl, (pe, simd, ififo, ofifo, ramstyle) in zip(fc_layers, config):\n", " fcl_inst = getCustomOp(fcl)\n", " fcl_inst.set_nodeattr(\"PE\", pe)\n", " fcl_inst.set_nodeattr(\"SIMD\", simd)\n", - " fcl_inst.set_nodeattr(\"inFIFODepth\", ififo)\n", - " fcl_inst.set_nodeattr(\"outFIFODepth\", ofifo)\n", + " fcl_inst.set_nodeattr(\"inFIFODepths\", ififo)\n", + " fcl_inst.set_nodeattr(\"outFIFODepths\", ofifo)\n", " fcl_inst.set_nodeattr(\"ram_style\", ramstyle)\n", " \n", "# set parallelism for input quantizer to be same as first layer's SIMD\n", @@ -590,7 +590,7 @@ "metadata": {}, "source": [ "Besides PE and SIMD three other node attributes are set. `ram_style` specifies how the weights are to be stored (BRAM, LUTRAM, and so on). It can be selected explicitly or with the option `auto` you can let Vivado decide.\n", - "`inFIFODepth` and `outFIFODepth` specifies the FIFO depths that is needed by the node from the surrounding FIFOs. These attributes are used in the transformation 'InsertFIFO' to insert the appropriate FIFOs between the nodes, which will be automatically called as part of the hardware build process.\n", + "`inFIFODepths` and `outFIFODepths` specifies the FIFO depths that is needed by the node from the surrounding FIFOs. These attributes are used in the transformation 'InsertFIFO' to insert the appropriate FIFOs between the nodes, which will be automatically called as part of the hardware build process.\n", "\n", "In previous versions of FINN we had to call transformations to insert data width converters, FIFOs and `TLastMarker` manually at this step. This is no longer needed, as all this is taken care of by the `ZynqBuild` or `VitisBuild` transformations." ]