diff --git a/AUTHORS.rst b/AUTHORS.rst index 1d42d35a3b269176fcab79d8239b84ac8442fa43..d011ce3d7ad74125b7013b7a7e987eb22e70a9f3 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -2,8 +2,9 @@ Contributors ============ -* Yaman Umuroglu (@maltanar) (maintainer) -* Jakoba Petri-Koenig (@auphelia) +* Jakoba Petri-Koenig (@auphelia) (maintainer) +* Thomas Preusser (@preusser) +* Yaman Umuroglu (@maltanar) * Andrea Rigoni (@AndreaRigoni) * Hendrik Borras (@HenniOVP) * Lucian Petrica (@quetric) @@ -22,3 +23,6 @@ Contributors * Javier Duarte (@jmduarte) * Uma Maheshwari (@umav1511) * José Rosa (@pinxau1000) +* Aziz Bahri (@azizb-xlnx) +* Fionn O'Donohoe (@fionnodonohoe-xlnx) +* Matthias Gehre (@mgehre-amd) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index deed89651db34d3821df35c8a1eb0f85b72f23a5..d376a1b42b0f1f3856f40b3993533785fb254a9b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ We welcome contributions to FINN. Please follow the steps below and be sure that your contribution complies with our guidelines. -1. Share your proposal via <a href="https://github.com/Xilinx/finn/issues" target="_blank">Github issues</a>. If you are looking for some issues to get started with, we have a list of <a href="https://github.com/Xilinx/finn/labels/good%20first%20issue">good first issues</a> in the issue tracker. Feel free to ask questions on the <a href="https://gitter.im/xilinx-finn/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge">FINN gitter channel as well</a>. +1. Share your proposal via <a href="https://github.com/Xilinx/finn/issues" target="_blank">Github issues</a>. If you are looking for some issues to get started with, we have a list of <a href="https://github.com/Xilinx/finn/labels/good%20first%20issue">good first issues</a> in the issue tracker. Feel free to ask questions in the <a href="https://github.com/Xilinx/finn/discussions">FINN GitHub discussions</a> as well. We welcome submissions to: @@ -31,4 +31,4 @@ Please follow the steps below and be sure that your contribution complies with o 3. We will review your contribution and, if any additional fixes or modifications are necessary, may provide feedback to guide you. When accepted, your pull request will -be merged to the repository. If you have more questions please contact us via the <a href="https://gitter.im/xilinx-finn/community" target="_blank">FINN gitter channel</a>. +be merged to the repository. If you have more questions please contact us. diff --git a/README.md b/README.md index 4cc995fc8c991ccc851e95fd30897aeea8ca266a..1b8efc8f19d0b664a17320585f5ea60acbe03eb4 100644 --- a/README.md +++ b/README.md @@ -24,9 +24,7 @@ Please see the [Getting Started](https://finn.readthedocs.io/en/latest/getting_s ## What's New in FINN? -* **2021-11-05:** v0.7 is released, introducing QONNX support, three new example networks and many other improvements. Read more on the [v0.7 release blog post](https://xilinx.github.io/finn//2021/11/05/finn-v07-is-released.html). -* **2021-06-15:** v0.6 is released, with ResNet-50 on U250 and ZCU104 MobileNet-v1 in finn-examples showcasing new features plus a lot more. Read more on the [v0.6 release blog post](https://xilinx.github.io/finn//2021/06/15/finn-v06-is-released.html). -* **2020-12-17:** v0.5b (beta) is released, with a new [examples repo](https://github.com/Xilinx/finn-examples) including MobileNet-v1. Read more on the <a href="https://xilinx.github.io/finn/2020/12/17/finn-v05b-beta-is-released.html">release blog post</a>. +* Please find all news under [GitHub discussions Announcements](https://github.com/Xilinx/finn/discussions/categories/announcements). ## Documentation diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 850d637de4d1384231a90dc5cdca532cb4dec5fd..a3f40d52ef6c8a5b79f46c1bb70f83fb61218fc9 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -29,8 +29,7 @@ FROM pytorch/pytorch:1.7.1-cuda11.0-cudnn8-runtime LABEL maintainer="Yaman Umuroglu <yamanu@xilinx.com>" -# XRT version to be installed -ARG XRT_DEB_VERSION="xrt_202010.2.7.766_18.04-amd64-xrt" +ARG XRT_DEB_VERSION="xrt_202210.2.13.466_18.04-amd64-xrt" WORKDIR /workspace diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index ad533efa5d8bbab68837e6092f91c4767cde60f7..e3e5b5f7f93c312269f2c96942e44318875903e0 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -9,7 +9,7 @@ node { "FINN_XILINX_VERSION=2022.1", "FINN_DOCKER_TAG=xilinx/finn:jenkins", "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci", - "PLATFORM_REPO_PATHS=/opt/xilinx/dsa" + "PLATFORM_REPO_PATHS=/opt/xilinx/platforms" ]){ parallel firstBranch: { stage('Brevitas export') { diff --git a/docs/finn/getting_started.rst b/docs/finn/getting_started.rst index 8a8a803a3d9c2a0bc780e8fd6b33cd20060a28a6..40425c119fafdcd03292b05c7a7e71310f767239 100644 --- a/docs/finn/getting_started.rst +++ b/docs/finn/getting_started.rst @@ -143,7 +143,7 @@ Supported FPGA Hardware **Shell-integrated accelerator + driver:** For quick deployment, we target boards supported by `PYNQ <http://www.pynq.io/>`_ . For these platforms, we can build a full bitfile including DMAs to move data into and out of the FINN-generated accelerator, as well as a Python driver to launch the accelerator. We support the Pynq-Z1, Pynq-Z2, Ultra96, ZCU102 and ZCU104 boards. .. warning:: - In previous FINN versions (v0.4b - v0.7) we had preliminary support for `Xilinx Alveo boards <https://www.xilinx.com/products/boards-and-kits/alveo.html>`_ using PYNQ and Vitis 2020.1, see instructions below for Alveo setup that works with older versions. Please note that with the new release with Vitis 2022.1, we do not have support for an automatic deployment on Alveo cards. + In previous FINN versions (v0.4b - v0.7) we had support for `Xilinx Alveo boards <https://www.xilinx.com/products/boards-and-kits/alveo.html>`_ using PYNQ and Vitis 2020.1, see instructions below for Alveo setup that works with older versions. Please note that with the new release with Vitis 2022.1, we do only have experimental support to automatically deployment for Alveo cards. **Vivado IPI support for any Xilinx FPGA:** FINN generates a Vivado IP Integrator (IPI) design from the neural network with AXI stream (FIFO) in-out interfaces, which can be integrated onto any Xilinx FPGA as part of a larger system. It's up to you to take the FINN-generated accelerator (what we call "stitched IP" in the tutorials), wire it up to your FPGA design and send/receive neural network data to/from the accelerator. @@ -167,9 +167,6 @@ Continue on the host side (replace the ``<PYNQ_IP>`` and ``<PYNQ_USERNAME>`` wit Alveo first-time setup ********************** -.. warning:: - Alveo cards are not automatically supported in the new FINN release with Vitis 2022.1. If you are looking for a build flow for Alveo inside of FINN, you will need to use older FINN versions (v0.4b - v0.7) with Vitis 2020.1. - We use *host* to refer to the PC running the FINN Docker environment, which will build the accelerator+driver and package it up, and *target* to refer to the PC where the Alveo card is installed. These two can be the same PC, or connected over the network -- FINN includes some utilities to make it easier to test on remote PCs too. Prior to first usage, you need to set up both the host and the target in the following manner: On the target side: diff --git a/docs/finn/hw_build.rst b/docs/finn/hw_build.rst index e1e5411adb4078636ddd4c0087245f8c2a58c372..2a64b87943075ff004f79c9d457136e41e27723d 100644 --- a/docs/finn/hw_build.rst +++ b/docs/finn/hw_build.rst @@ -12,10 +12,6 @@ A model where all layers have been converted to HLS layers can be processed by FINN to build a bitfile and driver targeting a Zynq system or to generate a Vivado IP Integrator (IPI) design with AXI stream (FIFO) in-out interfaces, which can be integrated onto any Xilinx FPGA as part of a larger system. -.. warning:: - With the new FINN release, we do not offer out-of-the box support for Alveo cards anymore. - Please use an older FINN version (v04b - v0.7) and Vitis 2020.1 in case you want to use `VitisBuild`. The description for the `VitisBuild` below is still valid for older versions. - Hardware Build ============== diff --git a/fetch-repos.sh b/fetch-repos.sh index 88f0a3822a36df7d5ff3a86df31f5f3e9bb2181c..1fb830e34930f64d6c275f814498eb3205a29b23 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -33,7 +33,7 @@ BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="e9946e5e56acd85837e8e79224d2bb60764bed69" -OMX_COMMIT="a97f0bf145a2f7e57ca416ea76c9e45df4e9aa37" +OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" EXP_BOARD_FILES_MD5="30eecc497c31050bd46d10ea20eba232" diff --git a/run-docker.sh b/run-docker.sh index 3a83004a0ec18168c015a5152a18ec5bcfa47c9b..381be35293dddbabe077be2aeae609f8c5621842 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -86,7 +86,7 @@ SCRIPTPATH=$(dirname "$SCRIPT") : ${ALVEO_BOARD="U250"} : ${ALVEO_TARGET_DIR="/tmp"} : ${PLATFORM_REPO_PATHS="/opt/xilinx/platforms"} -: ${XRT_DEB_VERSION="xrt_202010.2.7.766_18.04-amd64-xrt"} +: ${XRT_DEB_VERSION="xrt_202210.2.13.466_18.04-amd64-xrt"} : ${FINN_HOST_BUILD_DIR="/tmp/$DOCKER_INST_NAME"} : ${FINN_DOCKER_TAG="xilinx/finn:$(git describe --always --tags --dirty).$XRT_DEB_VERSION"} : ${FINN_DOCKER_PREBUILT="0"} diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index d9ffea4d9cd8895fdf55a497e8c7d0e49808ac95..882b40a0aaf542e6dcaf427ca3567ae78394ede5 100755 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -138,14 +138,22 @@ class StreamingMaxPool_Batch(HLSCustomOp): def get_exp_cycles(self): # derived from StreamingMaxPool_Batch loop nest ifm_dim, k, ifm_ch = self.get_1d_attrs_normalized() - _, _, ofm_dim_w, nf, _ = self.get_folded_output_shape() + warnings.warn( + """Estimated latency for layer {} can be lower than + actual latency!""".format( + self.onnx_node.name + ) + ) if self.is_1d(): - exp_cycles = ofm_dim_w * nf * (k[1] + 1) + _, _, _, nf, _ = self.get_folded_output_shape() + ceil_mode = self.get_nodeattr("CeilMode") + ofm_dim = compute_pool_output_dim(ifm_dim[1], k[1], k[1], 0, ceil_mode) + exp_cycles = ofm_dim * nf * (k[1] + 1) return int(exp_cycles) else: # TODO: adjust inaccurate formula - return int(ifm_dim[1] * (ifm_dim[1] + (ifm_dim[1] / k[1]))) + return int(ifm_dim[1] * ifm_dim[1] * (1 + 1 / (k[1] * k[1]))) def get_instream_width(self): dt_bits = self.get_input_datatype().bitwidth() diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 7c978cf61a465cacb4d562634d950311ed992021..892ab09fdf41947f86e2bf122e057e94585dfa8c 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -534,8 +534,9 @@ class CreateStitchedIP(Transformation): tcl.append("ipx::save_core [ipx::find_open_core %s]" % block_vlnv) # export list of used Verilog files (for rtlsim later on) tcl.append( - "set all_v_files [get_files -filter {FILE_TYPE == Verilog " - + "&& USED_IN_SYNTHESIS == 1} ]" + "set all_v_files [get_files -filter {USED_IN_SYNTHESIS == 1 " + + "&& (FILE_TYPE == Verilog || FILE_TYPE == SystemVerilog " + + '|| FILE_TYPE =="Verilog Header")}]' ) v_file_list = "%s/all_verilog_srcs.txt" % vivado_stitch_proj_dir tcl.append("set fp [open %s w]" % v_file_list) diff --git a/src/finn/transformation/fpgadataflow/synth_ooc.py b/src/finn/transformation/fpgadataflow/synth_ooc.py index 8d4aec259c440e311f6e3a6fb4d0359d55d738ca..6070cce636f50473545ab8a33c7867b7e1eb7f9c 100644 --- a/src/finn/transformation/fpgadataflow/synth_ooc.py +++ b/src/finn/transformation/fpgadataflow/synth_ooc.py @@ -52,7 +52,7 @@ class SynthOutOfContext(Transformation): top_module_name = model.get_metadata_prop("wrapper_filename") top_module_name = file_to_basename(top_module_name).strip(".v") build_dir = make_build_dir("synth_out_of_context_") - verilog_extensions = [".v", ".vh"] + verilog_extensions = [".v", ".sv", ".vh"] with open(vivado_stitch_proj_dir + "/all_verilog_srcs.txt", "r") as f: all_verilog_srcs = f.read().split() for file in all_verilog_srcs: diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index c90985ebc9932c56c840e34464b838f3141c79a8..4aba87216c8999612f748e989a945ceff33da167 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -61,7 +61,7 @@ alveo_part_map["U280"] = "xcu280-fsvh2892-2L-e" alveo_default_platform = dict() alveo_default_platform["U50"] = "xilinx_u50_gen3x16_xdma_201920_3" alveo_default_platform["U200"] = "xilinx_u200_xdma_201830_2" -alveo_default_platform["U250"] = "xilinx_u250_xdma_201830_2" +alveo_default_platform["U250"] = "xilinx_u250_gen3x16_xdma_2_1_202010_1" alveo_default_platform["U280"] = "xilinx_u280_xdma_201920_3" diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 3396561e06f553785e842ec0b6626bc405d262c5..f6a51da8e44ea60ae5693cdd033b39bdf51376ac 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -74,7 +74,9 @@ def pyverilate_stitched_ip( # are identical but in multiple directories (regslice_core.v) # remove duplicates from list by doing list -> set -> list - all_verilog_files = list(set(filter(lambda x: x.endswith(".v"), all_verilog_srcs))) + all_verilog_files = list( + set(filter(lambda x: x.endswith(".v") or x.endswith(".sv"), all_verilog_srcs)) + ) # remove all but one instances of regslice_core.v filtered_verilog_files = [] diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 782e18fbc38ec8e1a9cc1d2facaba5b38d3c947d..103f18b514c23c4e1ad35a85d020dc0481aa9c47 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -673,9 +673,6 @@ class TestEnd2End: @pytest.mark.vitis @pytest.mark.parametrize("kind", ["zynq", "alveo"]) def test_build(self, topology, wbits, abits, QONNX_export, kind): - # temporarily adding skip for alveo builds - if kind == "alveo": - pytest.skip("Alveo tests temporarily excluded") if kind == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name( @@ -698,9 +695,6 @@ class TestEnd2End: @pytest.mark.vitis @pytest.mark.parametrize("kind", ["zynq", "alveo"]) def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, kind): - # temporarily adding skip for alveo builds - if kind == "alveo": - pytest.skip("Alveo tests temporarily excluded") if kind == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name( @@ -715,9 +709,6 @@ class TestEnd2End: @pytest.mark.parametrize("kind", ["zynq", "alveo"]) def test_deploy(self, topology, wbits, abits, QONNX_export, kind): - # temporarily adding skip for alveo builds - if kind == "alveo": - pytest.skip("Alveo tests temporarily excluded") prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "driver_" + kind ) @@ -741,9 +732,6 @@ class TestEnd2End: @pytest.mark.parametrize("kind", ["zynq", "alveo"]) def test_run_on_hw(self, topology, wbits, abits, QONNX_export, kind): - # temporarily adding skip for alveo builds - if kind == "alveo": - pytest.skip("Alveo tests temporarily excluded") prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "deploy_" + kind ) @@ -768,9 +756,6 @@ class TestEnd2End: @pytest.mark.parametrize("kind", ["zynq", "alveo"]) def test_throughput_hw(self, topology, wbits, abits, QONNX_export, kind): - # temporarily adding skip for alveo builds - if kind == "alveo": - pytest.skip("Alveo tests temporarily excluded") prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "deploy_" + kind ) diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py index a3809e61304ef031407e7fbec0f9037382d999ad..80f2d724ad7ccbf563c23076155313bad1ecb336 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py @@ -335,8 +335,6 @@ def test_fpgadataflow_ipstitch_iodma_floorplan(): @pytest.mark.slow @pytest.mark.vivado @pytest.mark.vitis -# temporarily marked as xfail -@pytest.mark.xfail def test_fpgadataflow_ipstitch_vitis_end2end(board, period_ns, extw): if "VITIS_PATH" not in os.environ: pytest.skip("VITIS_PATH not set") @@ -348,6 +346,8 @@ def test_fpgadataflow_ipstitch_vitis_end2end(board, period_ns, extw): assert sdp_node.__class__.__name__ == "StreamingDataflowPartition" assert os.path.isfile(sdp_node.get_nodeattr("model")) model = load_test_checkpoint_or_skip(sdp_node.get_nodeattr("model")) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP(fpga_part, period_ns)) model = model.transform(VitisBuild(fpga_part, period_ns, platform)) model.save(ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_vitis.onnx") assert model.get_metadata_prop("platform") == "alveo" diff --git a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py b/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py similarity index 84% rename from tests/fpgadataflow/test_layer_streaming_maxpool_batch.py rename to tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py index 55c90644dfbb23fbc2da10cf969461abe6d38bf3..a3968cf79704092ffb5ec53c887842372b625f4d 100644 --- a/tests/fpgadataflow/test_layer_streaming_maxpool_batch.py +++ b/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py @@ -32,6 +32,7 @@ from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.general.maxpoolnhwc import compute_pool_output_dim +from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes from qonnx.util.basic import gen_finn_dt_tensor @@ -82,46 +83,6 @@ def make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt, ceil_ return model -def make_single_streamingmaxpool_modelwrapper( - k, ifm_ch, pe, ifm_dim, ofm_dim, idt, ceil_mode -): - k_h, k_w = k - ifm_dim_h, ifm_dim_w = ifm_dim - ofm_dim_h, ofm_dim_w = ofm_dim - odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] - ) - outp = helper.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch] - ) - - smp_node = helper.make_node( - "StreamingMaxPool_Batch", - ["inp"], - ["outp"], - domain="finn.custom_op.fpgadataflow", - backend="fpgadataflow", - PoolDim=[k_h, k_w], - NumChannels=ifm_ch, - PE=pe, - ImgDim=[ifm_dim_h, ifm_dim_w], - CeilMode=ceil_mode, - dataType=idt.name, - ) - graph = helper.make_graph( - nodes=[smp_node], name="smp_graph", inputs=[inp], outputs=[outp] - ) - - model = helper.make_model(graph, producer_name="smp-model") - model = ModelWrapper(model) - - model.set_tensor_datatype("inp", idt) - model.set_tensor_datatype("outp", odt) - - return model - - def prepare_inputs(input_tensor): return {"inp": input_tensor} @@ -187,6 +148,10 @@ def test_fpgadataflow_streamingmaxpool( assert model.graph.node[0].op_type == "StreamingMaxPool_Batch" + # Ensure PE value is set + streamingmaxpool_node = model.get_nodes_by_op_type("StreamingMaxPool_Batch")[0] + getCustomOp(streamingmaxpool_node).set_nodeattr("PE", pe) + if exec_mode == "cppsim": model = model.transform(SetExecMode("cppsim")) model = model.transform(PrepareCppSim()) @@ -198,7 +163,7 @@ def test_fpgadataflow_streamingmaxpool( model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) else: - raise Exception("Unknown exec_mode in test_layer_streaming_maxpool_batch") + raise Exception("Unknown exec_mode in test_fpgadataflow_streamingmaxpool") # execute model y_produced = oxe.execute_onnx(model, input_dict)["outp"] @@ -211,6 +176,7 @@ def test_fpgadataflow_streamingmaxpool( exp_cycles_dict = model.analysis(exp_cycles_per_layer) exp_cycles = exp_cycles_dict[node.name] # FIXME: maxpool cycles prediction needs a fix - # mostl likely due to some loops not flattening + # most likely due to inaccurate cycle prediction of + # nested for-loops # assert np.isclose(exp_cycles, cycles_rtlsim, atol=15) assert exp_cycles != 0 diff --git a/tests/util/test_build_dataflow.py b/tests/util/test_build_dataflow.py index d33a4f2fd6c974b13ac315c7ef621eacb04002c4..cdf69aebddc4d6af2288774acbff5dd8a52512b3 100644 --- a/tests/util/test_build_dataflow.py +++ b/tests/util/test_build_dataflow.py @@ -39,6 +39,7 @@ from finn.util.basic import make_build_dir @pytest.mark.slow @pytest.mark.vivado +@pytest.mark.end2end def test_end2end_build_dataflow_directory(): test_dir = make_build_dir("test_build_dataflow_directory_") target_dir = test_dir + "/build_dataflow"